summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/obsolete/proc-sys-vm-nr_pdflush_threads5
-rw-r--r--Documentation/ABI/stable/sysfs-bus-firewire11
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci12
-rw-r--r--Documentation/ABI/testing/sysfs-bus-rbd10
-rw-r--r--Documentation/ABI/testing/sysfs-devices-edac140
-rw-r--r--Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb44
-rw-r--r--Documentation/ABI/testing/sysfs-platform-asus-wmi7
-rw-r--r--Documentation/ABI/testing/sysfs-platform-ideapad-laptop11
-rw-r--r--Documentation/DMA-attributes.txt42
-rw-r--r--Documentation/DocBook/filesystems.tmpl4
-rw-r--r--Documentation/DocBook/media/v4l/biblio.xml2
-rw-r--r--Documentation/DocBook/media/v4l/common.xml17
-rw-r--r--Documentation/DocBook/media/v4l/compat.xml42
-rw-r--r--Documentation/DocBook/media/v4l/controls.xml5
-rw-r--r--Documentation/DocBook/media/v4l/dev-subdev.xml36
-rw-r--r--Documentation/DocBook/media/v4l/io.xml19
-rw-r--r--Documentation/DocBook/media/v4l/selection-api.xml34
-rw-r--r--Documentation/DocBook/media/v4l/selections-common.xml164
-rw-r--r--Documentation/DocBook/media/v4l/v4l2.xml11
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-create-bufs.xml12
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml14
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-enum-freq-bands.xml179
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-frequency.xml13
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-selection.xml86
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-tuner.xml38
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-qbuf.xml9
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-querycap.xml13
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml68
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml79
-rw-r--r--Documentation/IRQ-domain.txt5
-rw-r--r--Documentation/block/00-INDEX10
-rw-r--r--Documentation/block/cfq-iosched.txt77
-rw-r--r--Documentation/block/queue-sysfs.txt71
-rw-r--r--Documentation/cgroups/hugetlb.txt45
-rw-r--r--Documentation/cgroups/memory.txt12
-rw-r--r--Documentation/device-mapper/dm-raid.txt26
-rw-r--r--Documentation/device-mapper/striped.txt7
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt24
-rw-r--r--Documentation/devicetree/bindings/arm/calxeda/l2ecc.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/mrvl/intc.txt20
-rw-r--r--Documentation/devicetree/bindings/ata/cavium-compact-flash.txt30
-rw-r--r--Documentation/devicetree/bindings/ata/marvell.txt16
-rw-r--r--Documentation/devicetree/bindings/gpio/cavium-octeon-gpio.txt49
-rw-r--r--Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-samsung.txt9
-rw-r--r--Documentation/devicetree/bindings/gpio/mrvl-gpio.txt23
-rw-r--r--Documentation/devicetree/bindings/i2c/cavium-i2c.txt34
-rw-r--r--Documentation/devicetree/bindings/i2c/gpio-i2c.txt (renamed from Documentation/devicetree/bindings/gpio/gpio_i2c.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mxs.txt3
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-ocores.txt33
-rw-r--r--Documentation/devicetree/bindings/i2c/mrvl-i2c.txt19
-rw-r--r--Documentation/devicetree/bindings/mfd/ab8500.txt123
-rw-r--r--Documentation/devicetree/bindings/mfd/max77686.txt59
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65910.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/twl6040.txt2
-rw-r--r--Documentation/devicetree/bindings/mips/cavium/bootbus.txt126
-rw-r--r--Documentation/devicetree/bindings/mips/cavium/ciu.txt26
-rw-r--r--Documentation/devicetree/bindings/mips/cavium/ciu2.txt27
-rw-r--r--Documentation/devicetree/bindings/mips/cavium/dma-engine.txt21
-rw-r--r--Documentation/devicetree/bindings/mips/cavium/uctl.txt46
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt8
-rw-r--r--Documentation/devicetree/bindings/mtd/orion-nand.txt4
-rw-r--r--Documentation/devicetree/bindings/net/cavium-mdio.txt27
-rw-r--r--Documentation/devicetree/bindings/net/cavium-mix.txt39
-rw-r--r--Documentation/devicetree/bindings/net/cavium-pip.txt98
-rw-r--r--Documentation/devicetree/bindings/pwm/lpc32xx-pwm.txt12
-rw-r--r--Documentation/devicetree/bindings/pwm/mxs-pwm.txt17
-rw-r--r--Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt18
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm.txt57
-rw-r--r--Documentation/devicetree/bindings/regulator/tps6586x.txt12
-rw-r--r--Documentation/devicetree/bindings/serial/cavium-uart.txt19
-rw-r--r--Documentation/devicetree/bindings/thermal/spear-thermal.txt14
-rw-r--r--Documentation/devicetree/bindings/tty/serial/of-serial.txt1
-rw-r--r--Documentation/devicetree/bindings/video/backlight/pwm-backlight.txt28
-rw-r--r--Documentation/devicetree/bindings/watchdog/marvel.txt14
-rw-r--r--Documentation/dontdiff1
-rwxr-xr-xDocumentation/dvb/get_dvb_firmware42
-rw-r--r--Documentation/edac.txt112
-rw-r--r--Documentation/fault-injection/fault-injection.txt27
-rw-r--r--Documentation/fault-injection/notifier-error-inject.txt99
-rw-r--r--Documentation/feature-removal-schedule.txt75
-rw-r--r--Documentation/filesystems/Locking21
-rw-r--r--Documentation/filesystems/porting5
-rw-r--r--Documentation/filesystems/vfat.txt11
-rw-r--r--Documentation/filesystems/vfs.txt16
-rw-r--r--Documentation/input/edt-ft5x06.txt54
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/laptops/laptop-mode.txt12
-rw-r--r--Documentation/leds/00-INDEX2
-rw-r--r--Documentation/leds/leds-blinkm.txt80
-rw-r--r--Documentation/leds/leds-lm3556.txt85
-rw-r--r--Documentation/leds/ledtrig-oneshot.txt59
-rw-r--r--Documentation/networking/ip-sysctl.txt6
-rw-r--r--Documentation/networking/netconsole.txt19
-rw-r--r--Documentation/pinctrl.txt6
-rw-r--r--Documentation/power/power_supply_class.txt10
-rw-r--r--Documentation/printk-formats.txt15
-rw-r--r--Documentation/pwm.txt76
-rw-r--r--Documentation/remoteproc.txt58
-rw-r--r--Documentation/security/Yama.txt14
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt5
-rw-r--r--Documentation/sysctl/fs.txt60
-rw-r--r--Documentation/sysctl/vm.txt44
-rw-r--r--Documentation/thermal/sysfs-api.txt30
-rw-r--r--Documentation/vfio.txt314
-rw-r--r--Documentation/video4linux/CARDLIST.au08282
-rw-r--r--Documentation/video4linux/CARDLIST.bttv1
-rw-r--r--Documentation/video4linux/CARDLIST.cx238854
-rw-r--r--Documentation/video4linux/CARDLIST.saa71341
-rw-r--r--Documentation/video4linux/v4l2-framework.txt73
-rw-r--r--Documentation/vm/hugetlbpage.txt10
-rw-r--r--Documentation/w1/slaves/w1_therm2
-rw-r--r--Documentation/watchdog/src/watchdog-test.c2
-rw-r--r--MAINTAINERS152
-rw-r--r--Makefile28
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/alpha/Kconfig3
-rw-r--r--arch/alpha/include/asm/atomic.h4
-rw-r--r--arch/alpha/include/asm/fpu.h2
-rw-r--r--arch/alpha/include/asm/ptrace.h5
-rw-r--r--arch/alpha/include/asm/socket.h2
-rw-r--r--arch/alpha/include/asm/uaccess.h34
-rw-r--r--arch/alpha/include/asm/unistd.h5
-rw-r--r--arch/alpha/include/asm/word-at-a-time.h55
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c3
-rw-r--r--arch/alpha/kernel/entry.S161
-rw-r--r--arch/alpha/kernel/osf_sys.c49
-rw-r--r--arch/alpha/kernel/process.c19
-rw-r--r--arch/alpha/kernel/smc37c669.c12
-rw-r--r--arch/alpha/kernel/systbls.S4
-rw-r--r--arch/alpha/lib/Makefile2
-rw-r--r--arch/alpha/lib/ev6-strncpy_from_user.S424
-rw-r--r--arch/alpha/lib/ev67-strlen_user.S107
-rw-r--r--arch/alpha/lib/strlen_user.S91
-rw-r--r--arch/alpha/lib/strncpy_from_user.S339
-rw-r--r--arch/alpha/mm/fault.c36
-rw-r--r--arch/alpha/oprofile/common.c1
-rw-r--r--arch/arm/Kconfig35
-rw-r--r--arch/arm/Kconfig.debug9
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/boot/compressed/atags_to_fdt.c62
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi5
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9g25ek.dts2
-rw-r--r--arch/arm/boot/dts/highbank.dts12
-rw-r--r--arch/arm/boot/dts/imx23.dtsi52
-rw-r--r--arch/arm/boot/dts/imx27-3ds.dts2
-rw-r--r--arch/arm/boot/dts/imx27.dtsi6
-rw-r--r--arch/arm/boot/dts/imx28.dtsi76
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts6
-rw-r--r--arch/arm/boot/dts/imx51.dtsi12
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts22
-rw-r--r--arch/arm/boot/dts/imx53.dtsi21
-rw-r--r--arch/arm/boot/dts/imx6q-sabrelite.dts1
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi21
-rw-r--r--arch/arm/boot/dts/kirkwood-dns320.dts64
-rw-r--r--arch/arm/boot/dts/kirkwood-dns325.dts70
-rw-r--r--arch/arm/boot/dts/kirkwood-dnskw.dtsi69
-rw-r--r--arch/arm/boot/dts/kirkwood-dreamplug.dts52
-rw-r--r--arch/arm/boot/dts/kirkwood-goflexnet.dts99
-rw-r--r--arch/arm/boot/dts/kirkwood-ib62x0.dts40
-rw-r--r--arch/arm/boot/dts/kirkwood-iconnect.dts48
-rw-r--r--arch/arm/boot/dts/kirkwood-lschlv2.dts20
-rw-r--r--arch/arm/boot/dts/kirkwood-lsxhl.dts20
-rw-r--r--arch/arm/boot/dts/kirkwood-lsxl.dtsi95
-rw-r--r--arch/arm/boot/dts/kirkwood-ts219-6281.dts21
-rw-r--r--arch/arm/boot/dts/kirkwood-ts219-6282.dts21
-rw-r--r--arch/arm/boot/dts/kirkwood-ts219.dtsi78
-rw-r--r--arch/arm/boot/dts/kirkwood.dtsi66
-rw-r--r--arch/arm/boot/dts/r8a7740.dtsi21
-rw-r--r--arch/arm/boot/dts/sh7377.dtsi21
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts2
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi6
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi6
-rw-r--r--arch/arm/boot/dts/twl6030.dtsi3
-rw-r--r--arch/arm/common/dmabounce.c1
-rw-r--r--arch/arm/configs/armadillo800eva_defconfig27
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/kzm9d_defconfig89
-rw-r--r--arch/arm/configs/kzm9g_defconfig8
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/configs/tct_hammer_defconfig2
-rw-r--r--arch/arm/configs/tegra_defconfig1
-rw-r--r--arch/arm/configs/u8500_defconfig1
-rw-r--r--arch/arm/include/asm/arch_timer.h3
-rw-r--r--arch/arm/include/asm/cacheflush.h8
-rw-r--r--arch/arm/include/asm/delay.h32
-rw-r--r--arch/arm/include/asm/dma-mapping.h24
-rw-r--r--arch/arm/include/asm/kmap_types.h26
-rw-r--r--arch/arm/include/asm/locks.h274
-rw-r--r--arch/arm/include/asm/memory.h2
-rw-r--r--arch/arm/include/asm/mutex.h119
-rw-r--r--arch/arm/include/asm/perf_event.h17
-rw-r--r--arch/arm/include/asm/pgtable.h40
-rw-r--r--arch/arm/include/asm/pmu.h3
-rw-r--r--arch/arm/include/asm/sched_clock.h2
-rw-r--r--arch/arm/include/asm/setup.h4
-rw-r--r--arch/arm/include/asm/spinlock.h76
-rw-r--r--arch/arm/include/asm/spinlock_types.h17
-rw-r--r--arch/arm/include/asm/timex.h10
-rw-r--r--arch/arm/include/asm/uaccess.h27
-rw-r--r--arch/arm/include/asm/unistd.h1
-rw-r--r--arch/arm/include/asm/word-at-a-time.h96
-rw-r--r--arch/arm/kernel/arch_timer.c13
-rw-r--r--arch/arm/kernel/armksyms.c7
-rw-r--r--arch/arm/kernel/entry-armv.S111
-rw-r--r--arch/arm/kernel/entry-common.S44
-rw-r--r--arch/arm/kernel/ftrace.c17
-rw-r--r--arch/arm/kernel/head.S59
-rw-r--r--arch/arm/kernel/perf_event.c15
-rw-r--r--arch/arm/kernel/perf_event_v6.c2
-rw-r--r--arch/arm/kernel/perf_event_v7.c5
-rw-r--r--arch/arm/kernel/perf_event_xscale.c2
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/ptrace.c35
-rw-r--r--arch/arm/kernel/sched_clock.c24
-rw-r--r--arch/arm/kernel/setup.c6
-rw-r--r--arch/arm/kernel/signal.c114
-rw-r--r--arch/arm/kernel/signal.h2
-rw-r--r--arch/arm/kernel/smp.c5
-rw-r--r--arch/arm/kernel/topology.c241
-rw-r--r--arch/arm/kernel/traps.c88
-rw-r--r--arch/arm/lib/Makefile26
-rw-r--r--arch/arm/lib/delay-loop.S (renamed from arch/arm/lib/delay.S)20
-rw-r--r--arch/arm/lib/delay.c71
-rw-r--r--arch/arm/lib/io-acorn.S3
-rw-r--r--arch/arm/lib/io-readsw-armv3.S106
-rw-r--r--arch/arm/lib/io-writesw-armv3.S126
-rw-r--r--arch/arm/lib/strncpy_from_user.S43
-rw-r--r--arch/arm/lib/strnlen_user.S40
-rw-r--r--arch/arm/lib/uaccess.S564
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c10
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c6
-rw-r--r--arch/arm/mach-at91/clock.c12
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c39
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c1
-rw-r--r--arch/arm/mach-dove/common.c5
-rw-r--r--arch/arm/mach-dove/irq.c58
-rw-r--r--arch/arm/mach-exynos/clock-exynos4.c41
-rw-r--r--arch/arm/mach-exynos/clock-exynos4.h3
-rw-r--r--arch/arm/mach-exynos/clock-exynos4210.c37
-rw-r--r--arch/arm/mach-exynos/clock-exynos4212.c41
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c4
-rw-r--r--arch/arm/mach-exynos/mach-origen.c8
-rw-r--r--arch/arm/mach-exynos/mach-smdkv310.c7
-rw-r--r--arch/arm/mach-exynos/pm_domains.c2
-rw-r--r--arch/arm/mach-gemini/irq.c1
-rw-r--r--arch/arm/mach-imx/Makefile10
-rw-r--r--arch/arm/mach-imx/clk-imx27.c8
-rw-r--r--arch/arm/mach-imx/clk-imx31.c2
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c1
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c8
-rw-r--r--arch/arm/mach-imx/headsmp.S (renamed from arch/arm/mach-imx/head-v7.S)0
-rw-r--r--arch/arm/mach-imx/hotplug.c23
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c4
-rw-r--r--arch/arm/mach-imx/mm-imx25.c10
-rw-r--r--arch/arm/mach-imx/mm-imx3.c7
-rw-r--r--arch/arm/mach-imx/mm-imx5.c40
-rw-r--r--arch/arm/mach-integrator/core.c1
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c2
-rw-r--r--arch/arm/mach-kirkwood/Kconfig29
-rw-r--r--arch/arm/mach-kirkwood/Makefile3
-rw-r--r--arch/arm/mach-kirkwood/Makefile.boot5
-rw-r--r--arch/arm/mach-kirkwood/board-dnskw.c143
-rw-r--r--arch/arm/mach-kirkwood/board-dreamplug.c80
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c29
-rw-r--r--arch/arm/mach-kirkwood/board-goflexnet.c71
-rw-r--r--arch/arm/mach-kirkwood/board-ib62x0.c72
-rw-r--r--arch/arm/mach-kirkwood/board-iconnect.c56
-rw-r--r--arch/arm/mach-kirkwood/board-lsxl.c135
-rw-r--r--arch/arm/mach-kirkwood/board-ts219.c82
-rw-r--r--arch/arm/mach-kirkwood/common.c76
-rw-r--r--arch/arm/mach-kirkwood/common.h17
-rw-r--r--arch/arm/mach-kirkwood/db88f6281-bp-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/irq.c38
-rw-r--r--arch/arm/mach-mmp/gplugd.c1
-rw-r--r--arch/arm/mach-mmp/sram.c2
-rw-r--r--arch/arm/mach-msm/platsmp.c2
-rw-r--r--arch/arm/mach-mv78xx0/addr-map.c2
-rw-r--r--arch/arm/mach-mv78xx0/common.c6
-rw-r--r--arch/arm/mach-mv78xx0/irq.c22
-rw-r--r--arch/arm/mach-mxs/Kconfig6
-rw-r--r--arch/arm/mach-mxs/Makefile3
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c8
-rw-r--r--arch/arm/mach-netx/fb.c23
-rw-r--r--arch/arm/mach-omap1/board-h2-mmc.c1
-rw-r--r--arch/arm/mach-omap1/board-h3-mmc.c1
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c1
-rw-r--r--arch/arm/mach-omap1/board-palmz71.c3
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c2
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c1
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c11
-rw-r--r--arch/arm/mach-omap2/common-board-devices.h1
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c148
-rw-r--r--arch/arm/mach-omap2/display.c25
-rw-r--r--arch/arm/mach-omap2/hsmmc.c1
-rw-r--r--arch/arm/mach-omap2/mux.h1
-rw-r--r--arch/arm/mach-omap2/omap-smp.c2
-rw-r--r--arch/arm/mach-omap2/opp4xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/pm34xx.c19
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S8
-rw-r--r--arch/arm/mach-omap2/timer.c4
-rw-r--r--arch/arm/mach-omap2/twl-common.c1
-rw-r--r--arch/arm/mach-orion5x/common.c3
-rw-r--r--arch/arm/mach-orion5x/irq.c22
-rw-r--r--arch/arm/mach-prima2/timer.c6
-rw-r--r--arch/arm/mach-pxa/eseries.h14
-rw-r--r--arch/arm/mach-pxa/hx4700.c56
-rw-r--r--arch/arm/mach-pxa/include/mach/regs-ost.h22
-rw-r--r--arch/arm/mach-pxa/lubbock.c2
-rw-r--r--arch/arm/mach-pxa/magician.c5
-rw-r--r--arch/arm/mach-pxa/raumfeld.c2
-rw-r--r--arch/arm/mach-pxa/reset.c7
-rw-r--r--arch/arm/mach-pxa/time.c52
-rw-r--r--arch/arm/mach-pxa/trizeps4.c4
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig4
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/dma.h3
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/pm-core.h4
-rw-r--r--arch/arm/mach-sa1100/assabet.c2
-rw-r--r--arch/arm/mach-sa1100/cpu-sa1100.c1
-rw-r--r--arch/arm/mach-sa1100/cpu-sa1110.c1
-rw-r--r--arch/arm/mach-sa1100/include/mach/SA-1100.h16
-rw-r--r--arch/arm/mach-sa1100/include/mach/gpio.h1
-rw-r--r--arch/arm/mach-sa1100/include/mach/hardware.h6
-rw-r--r--arch/arm/mach-sa1100/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-sa1100/irq.c1
-rw-r--r--arch/arm/mach-sa1100/jornada720_ssp.c1
-rw-r--r--arch/arm/mach-sa1100/leds-cerf.c1
-rw-r--r--arch/arm/mach-sa1100/leds-hackkit.c1
-rw-r--r--arch/arm/mach-sa1100/leds-lart.c1
-rw-r--r--arch/arm/mach-sa1100/pm.c1
-rw-r--r--arch/arm/mach-sa1100/sleep.S8
-rw-r--r--arch/arm/mach-sa1100/time.c48
-rw-r--r--arch/arm/mach-shmobile/Kconfig13
-rw-r--r--arch/arm/mach-shmobile/Makefile2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c82
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c62
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c466
-rw-r--r--arch/arm/mach-shmobile/board-bonito.c10
-rw-r--r--arch/arm/mach-shmobile/board-g4evm.c52
-rw-r--r--arch/arm/mach-shmobile/board-kota2.c30
-rw-r--r--arch/arm/mach-shmobile/board-kzm9d.c10
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c320
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c86
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c12
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7740.c150
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7779.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh7367.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh7377.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c12
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h1
-rw-r--r--arch/arm/mach-shmobile/include/mach/dma-register.h84
-rw-r--r--arch/arm/mach-shmobile/include/mach/gpio.h32
-rw-r--r--arch/arm/mach-shmobile/include/mach/pm-rmobile.h44
-rw-r--r--arch/arm/mach-shmobile/include/mach/r8a7740.h33
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h45
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh73a0.h7
-rw-r--r--arch/arm/mach-shmobile/intc-r8a7740.c13
-rw-r--r--arch/arm/mach-shmobile/intc-sh73a0.c4
-rw-r--r--arch/arm/mach-shmobile/pfc-r8a7740.c24
-rw-r--r--arch/arm/mach-shmobile/pm-r8a7740.c54
-rw-r--r--arch/arm/mach-shmobile/pm-rmobile.c167
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c297
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7740.c360
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c209
-rw-r--r--arch/arm/mach-shmobile/setup-sh7377.c47
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c152
-rw-r--r--arch/arm/mach-spear3xx/spear300.c26
-rw-r--r--arch/arm/mach-spear3xx/spear310.c26
-rw-r--r--arch/arm/mach-spear3xx/spear320.c26
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c3
-rw-r--r--arch/arm/mach-spear6xx/spear6xx.c51
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra20.c3
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra30.c3
-rw-r--r--arch/arm/mach-tegra/board-harmony-power.c35
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/board-mop500-msp.c10
-rw-r--r--arch/arm/mach-ux500/board-mop500.c36
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c7
-rw-r--r--arch/arm/mach-ux500/devices-common.h24
-rw-r--r--arch/arm/mach-ux500/include/mach/setup.h3
-rw-r--r--arch/arm/mach-vt8500/Makefile2
-rw-r--r--arch/arm/mach-vt8500/pwm.c265
-rw-r--r--arch/arm/mm/context.c35
-rw-r--r--arch/arm/mm/dma-mapping.c573
-rw-r--r--arch/arm/mm/flush.c2
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c8
-rw-r--r--arch/arm/mm/proc-v6.S6
-rw-r--r--arch/arm/mm/proc-v7-2level.S5
-rw-r--r--arch/arm/mm/tlb-v7.S12
-rw-r--r--arch/arm/oprofile/common.c45
-rw-r--r--arch/arm/plat-mxc/Kconfig6
-rw-r--r--arch/arm/plat-mxc/Makefile1
-rw-r--r--arch/arm/plat-mxc/include/mach/i2c.h2
-rw-r--r--arch/arm/plat-mxc/tzic.c1
-rw-r--r--arch/arm/plat-omap/dmtimer.c6
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h3
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h2
-rw-r--r--arch/arm/plat-omap/include/plat/multi.h9
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h4
-rw-r--r--arch/arm/plat-orion/common.c9
-rw-r--r--arch/arm/plat-orion/gpio.c166
-rw-r--r--arch/arm/plat-orion/include/plat/common.h6
-rw-r--r--arch/arm/plat-orion/include/plat/gpio.h16
-rw-r--r--arch/arm/plat-orion/include/plat/irq.h3
-rw-r--r--arch/arm/plat-orion/irq.c40
-rw-r--r--arch/arm/plat-pxa/Makefile1
-rw-r--r--arch/arm/plat-pxa/pwm.c304
-rw-r--r--arch/arm/plat-s3c24xx/dma.c2
-rw-r--r--arch/arm/plat-samsung/Kconfig3
-rw-r--r--arch/arm/plat-samsung/Makefile4
-rw-r--r--arch/arm/plat-samsung/devs.c29
-rw-r--r--arch/arm/plat-samsung/include/plat/hdmi.h16
-rw-r--r--arch/arm/plat-samsung/pm.c2
-rw-r--r--arch/arm/plat-spear/include/plat/pl080.h6
-rw-r--r--arch/arm/plat-spear/pl080.c10
-rw-r--r--arch/arm/plat-versatile/platsmp.c2
-rw-r--r--arch/arm/vfp/entry.S16
-rw-r--r--arch/arm/vfp/vfphw.S26
-rw-r--r--arch/arm/vfp/vfpmodule.c10
-rw-r--r--arch/avr32/Kconfig1
-rw-r--r--arch/avr32/boards/atstk1000/atstk1002.c2
-rw-r--r--arch/avr32/include/asm/kmap_types.h24
-rw-r--r--arch/avr32/include/asm/unistd.h1
-rw-r--r--arch/avr32/mm/fault.c33
-rw-r--r--arch/blackfin/Kconfig11
-rw-r--r--arch/blackfin/include/asm/unistd.h1
-rw-r--r--arch/blackfin/kernel/Makefile1
-rw-r--r--arch/blackfin/kernel/pwm.c100
-rw-r--r--arch/blackfin/kernel/setup.c1
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/cache.h16
-rw-r--r--arch/cris/Kconfig1
-rw-r--r--arch/cris/include/asm/unistd.h1
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/include/asm/cpumask.h6
-rw-r--r--arch/frv/include/asm/highmem.h34
-rw-r--r--arch/frv/include/asm/kmap_types.h24
-rw-r--r--arch/frv/include/asm/unistd.h1
-rw-r--r--arch/frv/kernel/kernel_thread.S2
-rw-r--r--arch/frv/mb93090-mb00/pci-dma.c4
-rw-r--r--arch/frv/mm/cache-page.c8
-rw-r--r--arch/frv/mm/highmem.c20
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/include/asm/unistd.h1
-rw-r--r--arch/hexagon/include/asm/Kbuild2
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/configs/generic_defconfig1
-rw-r--r--arch/ia64/configs/gensparse_defconfig1
-rw-r--r--arch/ia64/include/asm/atomic.h4
-rw-r--r--arch/ia64/include/asm/machvec.h2
-rw-r--r--arch/ia64/include/asm/machvec_dig.h2
-rw-r--r--arch/ia64/include/asm/machvec_dig_vtd.h2
-rw-r--r--arch/ia64/include/asm/machvec_hpsim.h2
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1.h2
-rw-r--r--arch/ia64/include/asm/machvec_hpzx1_swiotlb.h2
-rw-r--r--arch/ia64/include/asm/machvec_sn2.h2
-rw-r--r--arch/ia64/include/asm/machvec_uv.h2
-rw-r--r--arch/ia64/include/asm/machvec_xen.h2
-rw-r--r--arch/ia64/include/asm/processor.h2
-rw-r--r--arch/ia64/kernel/acpi.c5
-rw-r--r--arch/ia64/kernel/irq_ia64.c1
-rw-r--r--arch/ia64/kernel/perfmon.c1
-rw-r--r--arch/ia64/kvm/Kconfig1
-rw-r--r--arch/ia64/pci/fixup.c4
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/m32r/include/asm/unistd.h1
-rw-r--r--arch/m68k/Kconfig14
-rw-r--r--arch/m68k/Kconfig.cpu19
-rw-r--r--arch/m68k/apollo/config.c16
-rw-r--r--arch/m68k/include/asm/Kbuild25
-rw-r--r--arch/m68k/include/asm/MC68332.h152
-rw-r--r--arch/m68k/include/asm/apollodma.h248
-rw-r--r--arch/m68k/include/asm/apollohw.h2
-rw-r--r--arch/m68k/include/asm/bitsperlong.h1
-rw-r--r--arch/m68k/include/asm/cputime.h6
-rw-r--r--arch/m68k/include/asm/delay.h2
-rw-r--r--arch/m68k/include/asm/device.h7
-rw-r--r--arch/m68k/include/asm/emergency-restart.h6
-rw-r--r--arch/m68k/include/asm/errno.h6
-rw-r--r--arch/m68k/include/asm/futex.h6
-rw-r--r--arch/m68k/include/asm/ioctl.h1
-rw-r--r--arch/m68k/include/asm/ipcbuf.h1
-rw-r--r--arch/m68k/include/asm/irq_regs.h1
-rw-r--r--arch/m68k/include/asm/kdebug.h1
-rw-r--r--arch/m68k/include/asm/kmap_types.h6
-rw-r--r--arch/m68k/include/asm/kvm_para.h1
-rw-r--r--arch/m68k/include/asm/local.h6
-rw-r--r--arch/m68k/include/asm/local64.h1
-rw-r--r--arch/m68k/include/asm/mac_mouse.h23
-rw-r--r--arch/m68k/include/asm/mcfmbus.h77
-rw-r--r--arch/m68k/include/asm/mman.h1
-rw-r--r--arch/m68k/include/asm/mutex.h9
-rw-r--r--arch/m68k/include/asm/percpu.h6
-rw-r--r--arch/m68k/include/asm/resource.h6
-rw-r--r--arch/m68k/include/asm/sbus.h45
-rw-r--r--arch/m68k/include/asm/scatterlist.h6
-rw-r--r--arch/m68k/include/asm/sections.h8
-rw-r--r--arch/m68k/include/asm/shm.h31
-rw-r--r--arch/m68k/include/asm/siginfo.h6
-rw-r--r--arch/m68k/include/asm/statfs.h6
-rw-r--r--arch/m68k/include/asm/topology.h6
-rw-r--r--arch/m68k/include/asm/types.h22
-rw-r--r--arch/m68k/include/asm/unaligned.h4
-rw-r--r--arch/m68k/include/asm/unistd.h1
-rw-r--r--arch/m68k/include/asm/xor.h1
-rw-r--r--arch/m68k/kernel/setup_no.c11
-rw-r--r--arch/m68k/kernel/sys_m68k.c8
-rw-r--r--arch/m68k/kernel/vmlinux-nommu.lds2
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds2
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds2
-rw-r--r--arch/m68k/lib/muldi3.c2
-rw-r--r--arch/m68k/mm/init_mm.c2
-rw-r--r--arch/m68k/mm/init_no.c2
-rw-r--r--arch/m68k/platform/68328/head-de2.S8
-rw-r--r--arch/m68k/platform/68328/head-pilot.S10
-rw-r--r--arch/m68k/platform/68328/head-ram.S4
-rw-r--r--arch/m68k/platform/68328/head-rom.S6
-rw-r--r--arch/m68k/platform/68360/head-ram.S6
-rw-r--r--arch/m68k/platform/68360/head-rom.S8
-rw-r--r--arch/m68k/platform/coldfire/head.S10
-rw-r--r--arch/m68k/sun3/prom/init.c48
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/sections.h4
-rw-r--r--arch/microblaze/include/asm/unistd.h1
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c3
-rw-r--r--arch/microblaze/kernel/setup.c4
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/Kbuild.platforms1
-rw-r--r--arch/mips/Kconfig43
-rw-r--r--arch/mips/alchemy/board-mtx1.c6
-rw-r--r--arch/mips/alchemy/common/platform.c10
-rw-r--r--arch/mips/alchemy/devboards/Makefile2
-rw-r--r--arch/mips/alchemy/devboards/bcsr.c5
-rw-r--r--arch/mips/alchemy/devboards/pb1100.c4
-rw-r--r--arch/mips/alchemy/devboards/pb1500.c4
-rw-r--r--arch/mips/alchemy/devboards/platform.c30
-rw-r--r--arch/mips/alchemy/devboards/prom.c69
-rw-r--r--arch/mips/ath79/dev-usb.c2
-rw-r--r--arch/mips/ath79/gpio.c6
-rw-r--r--arch/mips/bcm63xx/Kconfig4
-rw-r--r--arch/mips/bcm63xx/Makefile3
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c107
-rw-r--r--arch/mips/bcm63xx/clk.c26
-rw-r--r--arch/mips/bcm63xx/cpu.c63
-rw-r--r--arch/mips/bcm63xx/dev-dsp.c2
-rw-r--r--arch/mips/bcm63xx/dev-flash.c123
-rw-r--r--arch/mips/bcm63xx/dev-rng.c40
-rw-r--r--arch/mips/bcm63xx/dev-spi.c123
-rw-r--r--arch/mips/bcm63xx/dev-wdt.c2
-rw-r--r--arch/mips/bcm63xx/irq.c21
-rw-r--r--arch/mips/bcm63xx/prom.c4
-rw-r--r--arch/mips/bcm63xx/setup.c13
-rw-r--r--arch/mips/boot/compressed/Makefile4
-rw-r--r--arch/mips/boot/compressed/uart-16550.c5
-rw-r--r--arch/mips/cavium-octeon/.gitignore2
-rw-r--r--arch/mips/cavium-octeon/Makefile16
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-fpa.c183
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-fpa.c243
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c436
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S16
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c699
-rw-r--r--arch/mips/cavium-octeon/octeon_3xxx.dts571
-rw-r--r--arch/mips/cavium-octeon/octeon_68xx.dts625
-rw-r--r--arch/mips/cavium-octeon/serial.c134
-rw-r--r--arch/mips/cavium-octeon/setup.c45
-rw-r--r--arch/mips/configs/ls1b_defconfig109
-rw-r--r--arch/mips/configs/nlm_xlr_defconfig4
-rw-r--r--arch/mips/dec/prom/memory.c2
-rw-r--r--arch/mips/include/asm/clock.h11
-rw-r--r--arch/mips/include/asm/cpu.h3
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h3
-rw-r--r--arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h150
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_flash.h12
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h91
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h8
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h293
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/ioremap.h1
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h55
-rw-r--r--arch/mips/include/asm/mach-jz4740/jz4740_nand.h4
-rw-r--r--arch/mips/include/asm/mach-loongson/loongson.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/irq.h73
-rw-r--r--arch/mips/include/asm/mach-loongson1/loongson1.h44
-rw-r--r--arch/mips/include/asm/mach-loongson1/platform.h23
-rw-r--r--arch/mips/include/asm/mach-loongson1/prom.h24
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-clk.h33
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-wdt.h22
-rw-r--r--arch/mips/include/asm/mach-loongson1/war.h25
-rw-r--r--arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-tx49xx/mangle-port.h2
-rw-r--r--arch/mips/include/asm/mipsmtregs.h13
-rw-r--r--arch/mips/include/asm/module.h3
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h4
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/iomap.h5
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pcibus.h76
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pic.h4
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/usb.h64
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/xlp.h17
-rw-r--r--arch/mips/include/asm/netlogic/xlr/bridge.h104
-rw-r--r--arch/mips/include/asm/netlogic/xlr/flash.h55
-rw-r--r--arch/mips/include/asm/netlogic/xlr/gpio.h59
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-fpa.h64
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper.h2
-rw-r--r--arch/mips/include/asm/octeon/octeon.h5
-rw-r--r--arch/mips/include/asm/prom.h3
-rw-r--r--arch/mips/include/asm/r4k-timer.h8
-rw-r--r--arch/mips/include/asm/smtc.h6
-rw-r--r--arch/mips/include/asm/uaccess.h6
-rw-r--r--arch/mips/include/asm/uasm.h100
-rw-r--r--arch/mips/include/asm/unistd.h1
-rw-r--r--arch/mips/jz4740/board-qi_lb60.c1
-rw-r--r--arch/mips/jz4740/platform.c20
-rw-r--r--arch/mips/jz4740/reset.c49
-rw-r--r--arch/mips/kernel/cpu-probe.c299
-rw-r--r--arch/mips/kernel/cpufreq/Makefile2
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_cpufreq.c21
-rw-r--r--arch/mips/kernel/kspd.c2
-rw-r--r--arch/mips/kernel/module.c43
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c5
-rw-r--r--arch/mips/kernel/prom.c29
-rw-r--r--arch/mips/kernel/smp.c8
-rw-r--r--arch/mips/kernel/smtc.c76
-rw-r--r--arch/mips/kernel/sync-r4k.c26
-rw-r--r--arch/mips/kernel/traps.c1
-rw-r--r--arch/mips/lantiq/clk.c5
-rw-r--r--arch/mips/lantiq/prom.c22
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c49
-rw-r--r--arch/mips/lib/Makefile2
-rw-r--r--arch/mips/lib/memcpy-inatomic.S451
-rw-r--r--arch/mips/lib/memcpy.S11
-rw-r--r--arch/mips/loongson/Kconfig1
-rw-r--r--arch/mips/loongson/lemote-2f/Makefile2
-rw-r--r--arch/mips/loongson/lemote-2f/clock.c (renamed from arch/mips/kernel/cpufreq/loongson2_clock.c)46
-rw-r--r--arch/mips/loongson1/Kconfig22
-rw-r--r--arch/mips/loongson1/Makefile11
-rw-r--r--arch/mips/loongson1/Platform7
-rw-r--r--arch/mips/loongson1/common/Makefile5
-rw-r--r--arch/mips/loongson1/common/clock.c181
-rw-r--r--arch/mips/loongson1/common/irq.c147
-rw-r--r--arch/mips/loongson1/common/platform.c124
-rw-r--r--arch/mips/loongson1/common/prom.c87
-rw-r--r--arch/mips/loongson1/common/reset.c45
-rw-r--r--arch/mips/loongson1/common/setup.c29
-rw-r--r--arch/mips/loongson1/ls1b/Makefile5
-rw-r--r--arch/mips/loongson1/ls1b/board.c33
-rw-r--r--arch/mips/mm/uasm.c62
-rw-r--r--arch/mips/mti-malta/malta-pci.c13
-rw-r--r--arch/mips/netlogic/common/earlycons.c2
-rw-r--r--arch/mips/netlogic/common/smpboot.S157
-rw-r--r--arch/mips/netlogic/xlp/Makefile2
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c52
-rw-r--r--arch/mips/netlogic/xlp/of.c34
-rw-r--r--arch/mips/netlogic/xlp/platform.c2
-rw-r--r--arch/mips/netlogic/xlp/setup.c16
-rw-r--r--arch/mips/netlogic/xlp/usb-init.c124
-rw-r--r--arch/mips/netlogic/xlr/Makefile2
-rw-r--r--arch/mips/netlogic/xlr/platform-flash.c220
-rw-r--r--arch/mips/netlogic/xlr/platform.c140
-rw-r--r--arch/mips/netlogic/xlr/setup.c2
-rw-r--r--arch/mips/oprofile/common.c1
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c10
-rw-r--r--arch/mips/pci/Makefile1
-rw-r--r--arch/mips/pci/fixup-cobalt.c8
-rw-r--r--arch/mips/pci/fixup-malta.c14
-rw-r--r--arch/mips/pci/fixup-rc32434.c2
-rw-r--r--arch/mips/pci/ops-bcm63xx.c63
-rw-r--r--arch/mips/pci/pci-ar724x.c22
-rw-r--r--arch/mips/pci/pci-bcm63xx.c133
-rw-r--r--arch/mips/pci/pci-bcm63xx.h5
-rw-r--r--arch/mips/pci/pci-xlp.c248
-rw-r--r--arch/mips/pci/pci-xlr.c4
-rw-r--r--arch/mips/pnx833x/stb22x/board.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c1
-rw-r--r--arch/mips/txx9/Kconfig1
-rw-r--r--arch/mips/txx9/generic/pci.c6
-rw-r--r--arch/mips/txx9/generic/setup.c12
-rw-r--r--arch/mips/txx9/generic/setup_tx4939.c2
-rw-r--r--arch/mips/txx9/rbtx4939/setup.c11
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/include/asm/ipc.h1
-rw-r--r--arch/mn10300/include/asm/unistd.h1
-rw-r--r--arch/openrisc/include/asm/Kbuild2
-rw-r--r--arch/parisc/include/asm/atomic.h4
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/sys_parisc.c8
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi7
-rw-r--r--arch/powerpc/boot/dts/p2020rdb-pc_32b.dts4
-rw-r--r--arch/powerpc/boot/dts/p2020rdb-pc_36b.dts4
-rw-r--r--arch/powerpc/boot/dts/p3041ds.dts2
-rw-r--r--arch/powerpc/configs/85xx/p1023rds_defconfig31
-rw-r--r--arch/powerpc/configs/chroma_defconfig4
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig29
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig1
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig18
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig33
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig32
-rw-r--r--arch/powerpc/configs/ppc64_defconfig6
-rw-r--r--arch/powerpc/configs/pseries_defconfig6
-rw-r--r--arch/powerpc/include/asm/cputable.h2
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h8
-rw-r--r--arch/powerpc/include/asm/kmap_types.h31
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h12
-rw-r--r--arch/powerpc/include/asm/mpic_msgr.h1
-rw-r--r--arch/powerpc/include/asm/processor.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/dma-iommu.c10
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c1
-rw-r--r--arch/powerpc/kernel/dma.c36
-rw-r--r--arch/powerpc/kernel/entry_32.S12
-rw-r--r--arch/powerpc/kernel/entry_64.S23
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S3
-rw-r--r--arch/powerpc/kernel/ftrace.c11
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c2
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/kgdb.c27
-rw-r--r--arch/powerpc/kernel/process.c12
-rw-r--r--arch/powerpc/kernel/rtas_flash.c2
-rw-r--r--arch/powerpc/kernel/smp.c11
-rw-r--r--arch/powerpc/kernel/syscalls.c8
-rw-r--r--arch/powerpc/kernel/sysfs.c10
-rw-r--r--arch/powerpc/kernel/time.c9
-rw-r--r--arch/powerpc/kernel/traps.c3
-rw-r--r--arch/powerpc/kernel/vio.c1
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S12
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S1
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S77
-rw-r--r--arch/powerpc/kvm/e500_tlb.c11
-rw-r--r--arch/powerpc/lib/code-patching.c2
-rw-r--r--arch/powerpc/lib/copyuser_power7.S35
-rw-r--r--arch/powerpc/lib/memcpy_power7.S4
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/numa.c7
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c122
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c77
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c2
-rw-r--r--arch/powerpc/platforms/powernv/smp.c10
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h4
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c39
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c13
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c3
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c8
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c2
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c3
-rw-r--r--arch/powerpc/xmon/xmon.c84
-rw-r--r--arch/s390/Kconfig4
-rw-r--r--arch/s390/defconfig7
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/mmu_context.h16
-rw-r--r--arch/s390/include/asm/posix_types.h3
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/setup.h2
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/sparsemem.h2
-rw-r--r--arch/s390/include/asm/syscall.h10
-rw-r--r--arch/s390/include/asm/unistd.h1
-rw-r--r--arch/s390/kernel/compat_linux.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.S4
-rw-r--r--arch/s390/kernel/debug.c70
-rw-r--r--arch/s390/kernel/dis.c4
-rw-r--r--arch/s390/kernel/early.c1
-rw-r--r--arch/s390/kernel/ipl.c12
-rw-r--r--arch/s390/kernel/ptrace.c7
-rw-r--r--arch/s390/kernel/setup.c12
-rw-r--r--arch/s390/kernel/sys_s390.c9
-rw-r--r--arch/s390/kernel/traps.c16
-rw-r--r--arch/s390/kernel/vdso.c9
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/mm/fault.c35
-rw-r--r--arch/s390/mm/mmap.c12
-rw-r--r--arch/s390/mm/pgtable.c7
-rw-r--r--arch/s390/oprofile/backtrace.c2
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/boards/Kconfig13
-rw-r--r--arch/sh/boards/board-apsh4a3a.c10
-rw-r--r--arch/sh/boards/board-apsh4ad0a.c10
-rw-r--r--arch/sh/boards/board-magicpanelr2.c10
-rw-r--r--arch/sh/boards/board-polaris.c10
-rw-r--r--arch/sh/boards/board-sh2007.c12
-rw-r--r--arch/sh/boards/board-sh7757lcr.c14
-rw-r--r--arch/sh/boards/mach-ap325rxa/setup.c21
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c125
-rw-r--r--arch/sh/boards/mach-kfr2r09/setup.c12
-rw-r--r--arch/sh/boards/mach-migor/setup.c13
-rw-r--r--arch/sh/boards/mach-rsk/setup.c10
-rw-r--r--arch/sh/boards/mach-sdk7786/setup.c10
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c15
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig2
-rw-r--r--arch/sh/configs/sdk7786_defconfig4
-rw-r--r--arch/sh/configs/se7206_defconfig2
-rw-r--r--arch/sh/configs/shx3_defconfig2
-rw-r--r--arch/sh/configs/urquell_defconfig4
-rw-r--r--arch/sh/drivers/dma/dma-sh.c2
-rw-r--r--arch/sh/include/asm/sections.h1
-rw-r--r--arch/sh/include/asm/unistd.h1
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/sh7269.h36
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7757.h2
-rw-r--r--arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c195
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c4
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c1
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c14
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c1
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/lib/mcount.S8
-rw-r--r--arch/sh/mm/fault.c8
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/fixmap.h110
-rw-r--r--arch/sparc/include/asm/highmem.h3
-rw-r--r--arch/sparc/include/asm/leon.h1
-rw-r--r--arch/sparc/include/asm/mmu_context_32.h8
-rw-r--r--arch/sparc/include/asm/page_32.h3
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h29
-rw-r--r--arch/sparc/include/asm/pgtable_32.h44
-rw-r--r--arch/sparc/include/asm/unistd.h1
-rw-r--r--arch/sparc/include/asm/vaddrs.h22
-rw-r--r--arch/sparc/kernel/head_32.S2
-rw-r--r--arch/sparc/kernel/ldc.c6
-rw-r--r--arch/sparc/kernel/leon_kernel.c16
-rw-r--r--arch/sparc/kernel/process_32.c4
-rw-r--r--arch/sparc/kernel/setup_32.c1
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c29
-rw-r--r--arch/sparc/lib/NG2memcpy.S72
-rw-r--r--arch/sparc/lib/U1memcpy.S4
-rw-r--r--arch/sparc/lib/copy_page.S56
-rw-r--r--arch/sparc/mm/fault_32.c18
-rw-r--r--arch/sparc/mm/highmem.c42
-rw-r--r--arch/sparc/mm/init_32.c58
-rw-r--r--arch/sparc/mm/init_64.c28
-rw-r--r--arch/sparc/mm/srmmu.c332
-rw-r--r--arch/sparc/prom/init_32.c7
-rw-r--r--arch/sparc/prom/init_64.c4
-rw-r--r--arch/tile/configs/tilegx_defconfig4
-rw-r--r--arch/tile/configs/tilepro_defconfig4
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/kmap_types.h31
-rw-r--r--arch/tile/kernel/pci.c2
-rw-r--r--arch/tile/kernel/pci_gx.c2
-rw-r--r--arch/tile/mm/highmem.c2
-rw-r--r--arch/um/defconfig10
-rw-r--r--arch/um/drivers/chan_kern.c4
-rw-r--r--arch/um/drivers/line.c231
-rw-r--r--arch/um/drivers/line.h12
-rw-r--r--arch/um/drivers/mconsole_kern.c3
-rw-r--r--arch/um/drivers/port_kern.c6
-rw-r--r--arch/um/drivers/random.c3
-rw-r--r--arch/um/drivers/ssl.c42
-rw-r--r--arch/um/drivers/stdio_console.c21
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/um/drivers/xterm_kern.c3
-rw-r--r--arch/um/include/asm/kmap_types.h18
-rw-r--r--arch/um/include/asm/ptrace-generic.h2
-rw-r--r--arch/um/include/shared/as-layout.h3
-rw-r--r--arch/um/include/shared/irq_user.h3
-rw-r--r--arch/um/include/shared/kern_util.h13
-rw-r--r--arch/um/kernel/irq.c2
-rw-r--r--arch/um/kernel/process.c13
-rw-r--r--arch/um/kernel/ptrace.c71
-rw-r--r--arch/um/kernel/sigio.c3
-rw-r--r--arch/um/kernel/skas/syscall.c6
-rw-r--r--arch/um/kernel/time.c2
-rw-r--r--arch/um/kernel/trap.c39
-rw-r--r--arch/um/os-Linux/internal.h2
-rw-r--r--arch/um/os-Linux/signal.c26
-rw-r--r--arch/um/os-Linux/skas/process.c16
-rw-r--r--arch/um/os-Linux/time.c4
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/include/asm/mce.h8
-rw-r--r--arch/x86/include/asm/olpc.h19
-rw-r--r--arch/x86/include/asm/perf_event.h13
-rw-r--r--arch/x86/include/asm/spinlock.h3
-rw-r--r--arch/x86/include/asm/unistd.h1
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/acpi/sleep.h2
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S4
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S4
-rw-r--r--arch/x86/kernel/alternative.c4
-rw-r--r--arch/x86/kernel/apic/io_apic.c14
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c43
-rw-r--r--arch/x86/kernel/cpu/perf_event.c89
-rw-r--r--arch/x86/kernel/cpu/perf_event.h22
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c102
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c7
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c1333
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h235
-rw-r--r--arch/x86/kernel/e820.c2
-rw-r--r--arch/x86/kernel/irq.c3
-rw-r--r--arch/x86/kernel/kdebugfs.c6
-rw-r--r--arch/x86/kernel/microcode_amd.c7
-rw-r--r--arch/x86/kvm/emulate.c30
-rw-r--r--arch/x86/kvm/i8259.c17
-rw-r--r--arch/x86/kvm/mmu.c13
-rw-r--r--arch/x86/kvm/vmx.c20
-rw-r--r--arch/x86/kvm/x86.c9
-rw-r--r--arch/x86/mm/hugetlbpage.c21
-rw-r--r--arch/x86/mm/pageattr.c10
-rw-r--r--arch/x86/mm/srat.c15
-rw-r--r--arch/x86/platform/efi/efi.c30
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-pm.c16
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c1
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c1
-rw-r--r--arch/x86/platform/olpc/olpc.c190
-rw-r--r--arch/x86/realmode/rm/Makefile2
-rw-r--r--arch/x86/syscalls/syscall_64.tbl8
-rw-r--r--arch/x86/um/asm/ptrace.h6
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/include/asm/cpumask.h16
-rw-r--r--arch/xtensa/include/asm/rmap.h16
-rw-r--r--arch/xtensa/kernel/syscall.c2
-rw-r--r--arch/xtensa/mm/fault.c29
-rw-r--r--block/blk-cgroup.c139
-rw-r--r--block/blk-cgroup.h128
-rw-r--r--block/blk-core.c253
-rw-r--r--block/blk-ioc.c1
-rw-r--r--block/blk-lib.c41
-rw-r--r--block/blk-merge.c117
-rw-r--r--block/blk-settings.c3
-rw-r--r--block/blk-sysfs.c34
-rw-r--r--block/blk-throttle.c3
-rw-r--r--block/blk.h4
-rw-r--r--block/bsg-lib.c53
-rw-r--r--block/genhd.c22
-rw-r--r--block/ioctl.c59
-rw-r--r--block/partition-generic.c4
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/ac.c8
-rw-r--r--drivers/acpi/acpi_memhotplug.c43
-rw-r--r--drivers/acpi/acpi_pad.c4
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h19
-rw-r--r--drivers/acpi/acpica/achware.h12
-rw-r--r--drivers/acpi/acpica/aclocal.h43
-rw-r--r--drivers/acpi/acpica/acmacros.h6
-rw-r--r--drivers/acpi/acpica/acobject.h15
-rw-r--r--drivers/acpi/acpica/acpredef.h8
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h4
-rw-r--r--drivers/acpi/acpica/amlresrc.h4
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c4
-rw-r--r--drivers/acpi/acpica/dsfield.c103
-rw-r--r--drivers/acpi/acpica/dsinit.c4
-rw-r--r--drivers/acpi/acpica/dsmethod.c6
-rw-r--r--drivers/acpi/acpica/dsmthdat.c32
-rw-r--r--drivers/acpi/acpica/dsobject.c14
-rw-r--r--drivers/acpi/acpica/dsopcode.c12
-rw-r--r--drivers/acpi/acpica/dsutils.c6
-rw-r--r--drivers/acpi/acpica/dswscope.c4
-rw-r--r--drivers/acpi/acpica/dswstate.c20
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c4
-rw-r--r--drivers/acpi/acpica/evgpe.c22
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c22
-rw-r--r--drivers/acpi/acpica/evmisc.c191
-rw-r--r--drivers/acpi/acpica/evregion.c24
-rw-r--r--drivers/acpi/acpica/evrgnini.c28
-rw-r--r--drivers/acpi/acpica/evsci.c4
-rw-r--r--drivers/acpi/acpica/evxface.c474
-rw-r--r--drivers/acpi/acpica/evxfevnt.c8
-rw-r--r--drivers/acpi/acpica/evxfgpe.c112
-rw-r--r--drivers/acpi/acpica/evxfregn.c18
-rw-r--r--drivers/acpi/acpica/exconfig.c8
-rw-r--r--drivers/acpi/acpica/exconvrt.c10
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c4
-rw-r--r--drivers/acpi/acpica/exdump.c51
-rw-r--r--drivers/acpi/acpica/exfldio.c14
-rw-r--r--drivers/acpi/acpica/exmisc.c26
-rw-r--r--drivers/acpi/acpica/exmutex.c6
-rw-r--r--drivers/acpi/acpica/exprep.c6
-rw-r--r--drivers/acpi/acpica/exregion.c38
-rw-r--r--drivers/acpi/acpica/exresolv.c4
-rw-r--r--drivers/acpi/acpica/exresop.c10
-rw-r--r--drivers/acpi/acpica/exstore.c6
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c8
-rw-r--r--drivers/acpi/acpica/exutils.c10
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c30
-rw-r--r--drivers/acpi/acpica/hwregs.c22
-rw-r--r--drivers/acpi/acpica/hwsleep.c20
-rw-r--r--drivers/acpi/acpica/hwtimer.c4
-rw-r--r--drivers/acpi/acpica/hwvalid.c4
-rw-r--r--drivers/acpi/acpica/hwxface.c12
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c26
-rw-r--r--drivers/acpi/acpica/nsaccess.c8
-rw-r--r--drivers/acpi/acpica/nsalloc.c10
-rw-r--r--drivers/acpi/acpica/nsdump.c18
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c6
-rw-r--r--drivers/acpi/acpica/nseval.c10
-rw-r--r--drivers/acpi/acpica/nsinit.c6
-rw-r--r--drivers/acpi/acpica/nsload.c4
-rw-r--r--drivers/acpi/acpica/nsnames.c10
-rw-r--r--drivers/acpi/acpica/nsobject.c28
-rw-r--r--drivers/acpi/acpica/nspredef.c38
-rw-r--r--drivers/acpi/acpica/nsrepair.c14
-rw-r--r--drivers/acpi/acpica/nsrepair2.c26
-rw-r--r--drivers/acpi/acpica/nssearch.c12
-rw-r--r--drivers/acpi/acpica/nsutils.c26
-rw-r--r--drivers/acpi/acpica/nswalk.c8
-rw-r--r--drivers/acpi/acpica/nsxfeval.c26
-rw-r--r--drivers/acpi/acpica/nsxfname.c16
-rw-r--r--drivers/acpi/acpica/nsxfobj.c8
-rw-r--r--drivers/acpi/acpica/psargs.c6
-rw-r--r--drivers/acpi/acpica/psloop.c16
-rw-r--r--drivers/acpi/acpica/psopcode.c4
-rw-r--r--drivers/acpi/acpica/psparse.c6
-rw-r--r--drivers/acpi/acpica/psscope.c6
-rw-r--r--drivers/acpi/acpica/pstree.c14
-rw-r--r--drivers/acpi/acpica/psutils.c8
-rw-r--r--drivers/acpi/acpica/psxface.c16
-rw-r--r--drivers/acpi/acpica/rsaddr.c14
-rw-r--r--drivers/acpi/acpica/rscalc.c2
-rw-r--r--drivers/acpi/acpica/rscreate.c6
-rw-r--r--drivers/acpi/acpica/rsdump.c10
-rw-r--r--drivers/acpi/acpica/rslist.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c22
-rw-r--r--drivers/acpi/acpica/rsutils.c44
-rw-r--r--drivers/acpi/acpica/rsxface.c14
-rw-r--r--drivers/acpi/acpica/tbfadt.c144
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c23
-rw-r--r--drivers/acpi/acpica/tbutils.c58
-rw-r--r--drivers/acpi/acpica/tbxface.c226
-rw-r--r--drivers/acpi/acpica/tbxfload.c389
-rw-r--r--drivers/acpi/acpica/tbxfroot.c12
-rw-r--r--drivers/acpi/acpica/utaddress.c10
-rw-r--r--drivers/acpi/acpica/utalloc.c20
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c32
-rw-r--r--drivers/acpi/acpica/utdecode.c70
-rw-r--r--drivers/acpi/acpica/utdelete.c38
-rw-r--r--drivers/acpi/acpica/uteval.c6
-rw-r--r--drivers/acpi/acpica/utexcep.c153
-rw-r--r--drivers/acpi/acpica/utglobal.c11
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utlock.c6
-rw-r--r--drivers/acpi/acpica/utmath.c4
-rw-r--r--drivers/acpi/acpica/utmisc.c104
-rw-r--r--drivers/acpi/acpica/utmutex.c8
-rw-r--r--drivers/acpi/acpica/utobject.c14
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c34
-rw-r--r--drivers/acpi/acpica/utstate.c16
-rw-r--r--drivers/acpi/acpica/utxface.c18
-rw-r--r--drivers/acpi/acpica/utxferror.c90
-rw-r--r--drivers/acpi/acpica/utxfmutex.c14
-rw-r--r--drivers/acpi/apei/apei-base.c5
-rw-r--r--drivers/acpi/battery.c11
-rw-r--r--drivers/acpi/bus.c4
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/container.c43
-rw-r--r--drivers/acpi/fan.c4
-rw-r--r--drivers/acpi/numa.c12
-rw-r--r--drivers/acpi/osl.c4
-rw-r--r--drivers/acpi/pci_root.c11
-rw-r--r--drivers/acpi/power.c4
-rw-r--r--drivers/acpi/processor_driver.c30
-rw-r--r--drivers/acpi/processor_idle.c12
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/scan.c58
-rw-r--r--drivers/acpi/sleep.c77
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c14
-rw-r--r--drivers/acpi/utils.c42
-rw-r--r--drivers/acpi/video_detect.c60
-rw-r--r--drivers/amba/bus.c2
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c8
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ata_piix.c8
-rw-r--r--drivers/ata/libahci.c3
-rw-r--r--drivers/ata/libata-acpi.c15
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/pata_arasan_cf.c14
-rw-r--r--drivers/ata/pata_atiixp.c16
-rw-r--r--drivers/ata/sata_mv.c42
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/base/Kconfig1
-rw-r--r--drivers/base/core.c9
-rw-r--r--drivers/base/devtmpfs.c9
-rw-r--r--drivers/base/dma-mapping.c49
-rw-r--r--drivers/base/power/clock_ops.c3
-rw-r--r--drivers/base/power/common.c4
-rw-r--r--drivers/base/power/runtime.c13
-rw-r--r--drivers/bcma/driver_mips.c6
-rw-r--r--drivers/bcma/host_pci.c1
-rw-r--r--drivers/bcma/scan.c15
-rw-r--r--drivers/bcma/sprom.c4
-rw-r--r--drivers/block/cciss_scsi.c11
-rw-r--r--drivers/block/drbd/drbd_actlog.c8
-rw-r--r--drivers/block/drbd/drbd_bitmap.c19
-rw-r--r--drivers/block/drbd/drbd_int.h45
-rw-r--r--drivers/block/drbd/drbd_main.c97
-rw-r--r--drivers/block/drbd/drbd_nl.c40
-rw-r--r--drivers/block/drbd/drbd_proc.c3
-rw-r--r--drivers/block/drbd/drbd_receiver.c38
-rw-r--r--drivers/block/drbd/drbd_req.c45
-rw-r--r--drivers/block/drbd/drbd_worker.c12
-rw-r--r--drivers/block/floppy.c24
-rw-r--r--drivers/block/nbd.c8
-rw-r--r--drivers/block/rbd.c816
-rw-r--r--drivers/block/rbd_types.h1
-rw-r--r--drivers/block/umem.c37
-rw-r--r--drivers/block/virtio_blk.c115
-rw-r--r--drivers/block/xen-blkfront.c5
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/char/agp/intel-agp.c16
-rw-r--r--drivers/char/agp/intel-agp.h43
-rw-r--r--drivers/char/agp/intel-gtt.c198
-rw-r--r--drivers/char/hw_random/Kconfig14
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c175
-rw-r--r--drivers/char/hw_random/omap-rng.c2
-rw-r--r--drivers/char/hw_random/virtio-rng.c37
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/random.c355
-rw-r--r--drivers/char/tpm/tpm_tis.c2
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clocksource/cs5535-clockevt.c4
-rw-r--r--drivers/cpufreq/exynos5250-cpufreq.c55
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c1
-rw-r--r--drivers/cpuidle/Kconfig3
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/coupled.c727
-rw-r--r--drivers/cpuidle/cpuidle.c85
-rw-r--r--drivers/cpuidle/cpuidle.h32
-rw-r--r--drivers/crypto/caam/jr.c10
-rw-r--r--drivers/crypto/hifn_795x.c4
-rw-r--r--drivers/crypto/mv_cesa.c4
-rw-r--r--drivers/crypto/n2_core.c3
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/amba-pl08x.c941
-rw-r--r--drivers/dma/imx-dma.c36
-rw-r--r--drivers/dma/omap-dma.c669
-rw-r--r--drivers/dma/sa11x0-dma.c388
-rw-r--r--drivers/dma/sh/shdma-base.c9
-rw-r--r--drivers/dma/sh/shdma.c12
-rw-r--r--drivers/dma/tegra20-apb-dma.c18
-rw-r--r--drivers/dma/virt-dma.c123
-rw-r--r--drivers/dma/virt-dma.h152
-rw-r--r--drivers/edac/Kconfig24
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/amd64_edac.c376
-rw-r--r--drivers/edac/amd64_edac.h29
-rw-r--r--drivers/edac/amd64_edac_dbg.c89
-rw-r--r--drivers/edac/amd64_edac_inj.c134
-rw-r--r--drivers/edac/amd76x_edac.c34
-rw-r--r--drivers/edac/cell_edac.c28
-rw-r--r--drivers/edac/cpc925_edac.c96
-rw-r--r--drivers/edac/e752x_edac.c92
-rw-r--r--drivers/edac/e7xxx_edac.c89
-rw-r--r--drivers/edac/edac_core.h39
-rw-r--r--drivers/edac/edac_device.c47
-rw-r--r--drivers/edac/edac_device_sysfs.c71
-rw-r--r--drivers/edac/edac_mc.c395
-rw-r--r--drivers/edac/edac_mc_sysfs.c1355
-rw-r--r--drivers/edac/edac_module.c20
-rw-r--r--drivers/edac/edac_module.h26
-rw-r--r--drivers/edac/edac_pci.c26
-rw-r--r--drivers/edac/edac_pci_sysfs.c49
-rw-r--r--drivers/edac/highbank_l2_edac.c149
-rw-r--r--drivers/edac/highbank_mc_edac.c264
-rw-r--r--drivers/edac/i3000_edac.c47
-rw-r--r--drivers/edac/i3200_edac.c48
-rw-r--r--drivers/edac/i5000_edac.c207
-rw-r--r--drivers/edac/i5100_edac.c14
-rw-r--r--drivers/edac/i5400_edac.c201
-rw-r--r--drivers/edac/i7300_edac.c173
-rw-r--r--drivers/edac/i7core_edac.c520
-rw-r--r--drivers/edac/i82443bxgx_edac.c51
-rw-r--r--drivers/edac/i82860_edac.c45
-rw-r--r--drivers/edac/i82875p_edac.c53
-rw-r--r--drivers/edac/i82975x_edac.c55
-rw-r--r--drivers/edac/mpc85xx_edac.c131
-rw-r--r--drivers/edac/mv64x60_edac.c40
-rw-r--r--drivers/edac/pasemi_edac.c22
-rw-r--r--drivers/edac/ppc4xx_edac.c16
-rw-r--r--drivers/edac/r82600_edac.c48
-rw-r--r--drivers/edac/sb_edac.c257
-rw-r--r--drivers/edac/tile_edac.c12
-rw-r--r--drivers/edac/x38_edac.c48
-rw-r--r--drivers/extcon/Kconfig2
-rw-r--r--drivers/extcon/extcon-max8997.c29
-rw-r--r--drivers/extcon/extcon_gpio.c3
-rw-r--r--drivers/firewire/core-device.c9
-rw-r--r--drivers/firewire/core-iso.c2
-rw-r--r--drivers/firewire/core-transaction.c23
-rw-r--r--drivers/firewire/ohci.c30
-rw-r--r--drivers/firmware/dmi_scan.c3
-rw-r--r--drivers/firmware/memmap.c8
-rw-r--r--drivers/firmware/pcdp.c4
-rw-r--r--drivers/gpio/Kconfig27
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/gpio-amd8111.c246
-rw-r--r--drivers/gpio/gpio-arizona.c163
-rw-r--r--drivers/gpio/gpio-em.c6
-rw-r--r--drivers/gpio/gpio-langwell.c7
-rw-r--r--drivers/gpio/gpio-lpc32xx.c74
-rw-r--r--drivers/gpio/gpio-msic.c2
-rw-r--r--drivers/gpio/gpio-mxc.c76
-rw-r--r--drivers/gpio/gpio-omap.c10
-rw-r--r--drivers/gpio/gpio-pca953x.c67
-rw-r--r--drivers/gpio/gpio-pcf857x.c93
-rw-r--r--drivers/gpio/gpio-pxa.c30
-rw-r--r--drivers/gpio/gpio-rdc321x.c1
-rw-r--r--drivers/gpio/gpio-samsung.c19
-rw-r--r--drivers/gpio/gpio-sch.c3
-rw-r--r--drivers/gpio/gpio-tps6586x.c158
-rw-r--r--drivers/gpio/gpio-wm8994.c17
-rw-r--r--drivers/gpio/gpiolib-of.c11
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c16
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c1
-rw-r--r--drivers/gpu/drm/drm_dma.c5
-rw-r--r--drivers/gpu/drm/drm_drv.c13
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_edid_load.c8
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_fops.c78
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_info.c38
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_lock.c4
-rw-r--r--drivers/gpu/drm/drm_mm.c169
-rw-r--r--drivers/gpu/drm/drm_modes.c3
-rw-r--r--drivers/gpu/drm/drm_pci.c49
-rw-r--r--drivers/gpu/drm/drm_proc.c5
-rw-r--r--drivers/gpu/drm/drm_sysfs.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c296
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h31
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c125
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c40
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c49
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c246
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c53
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c48
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.h2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c6
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h2
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c2
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c17
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h6
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/dvo.h2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c68
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c61
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c307
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h70
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c351
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c535
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c10
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c270
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h192
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c5
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c139
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h28
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c126
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c522
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c98
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h54
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c375
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c10
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c66
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c49
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c60
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c478
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c128
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c66
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c35
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c13
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c20
-rw-r--r--drivers/gpu/drm/nouveau/Makefile3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c245
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h83
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpuobj.c (renamed from drivers/gpu/drm/nouveau/nouveau_object.c)60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c81
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c25
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fifo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc4
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc.h94
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.fuc.h87
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nve0_fifo.c37
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c97
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c41
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c140
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c341
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c43
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h6
-rw-r--r--drivers/gpu/drm/radeon/ni.c212
-rw-r--r--drivers/gpu/drm/radeon/nid.h4
-rw-r--r--drivers/gpu/drm/radeon/r100.c1191
-rw-r--r--drivers/gpu/drm/radeon/r200.c4
-rw-r--r--drivers/gpu/drm/radeon/r300.c21
-rw-r--r--drivers/gpu/drm/radeon/r420.c21
-rw-r--r--drivers/gpu/drm/radeon/r520.c18
-rw-r--r--drivers/gpu/drm/radeon/r600.c213
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c70
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c196
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h31
-rw-r--r--drivers/gpu/drm/radeon/radeon.h180
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c56
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c138
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c111
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c390
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c398
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c417
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c283
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c173
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c411
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c84
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c71
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c95
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c48
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6009
-rw-r--r--drivers/gpu/drm/radeon/rs400.c21
-rw-r--r--drivers/gpu/drm/radeon/rs600.c44
-rw-r--r--drivers/gpu/drm/radeon/rs690.c21
-rw-r--r--drivers/gpu/drm/radeon/rv515.c33
-rw-r--r--drivers/gpu/drm/radeon/rv770.c38
-rw-r--r--drivers/gpu/drm/radeon/si.c211
-rw-r--r--drivers/gpu/drm/radeon/sid.h4
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c9
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c19
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c1
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c49
-rw-r--r--drivers/gpu/drm/via/via_drv.c4
-rw-r--r--drivers/gpu/drm/via/via_mm.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c61
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/hwmon/acpi_power_meter.c4
-rw-r--r--drivers/hwmon/applesmc.c70
-rw-r--r--drivers/hwmon/asus_atk0110.c6
-rw-r--r--drivers/hwmon/coretemp.c4
-rw-r--r--drivers/hwmon/jc42.c26
-rw-r--r--drivers/hwmon/via-cputemp.c2
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/i2c/busses/Kconfig7
-rw-r--r--drivers/i2c/busses/i2c-at91.c13
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c147
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c1
-rw-r--r--drivers/i2c/busses/i2c-imx.c75
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c133
-rw-r--r--drivers/i2c/busses/i2c-mxs.c68
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c207
-rw-r--r--drivers/i2c/busses/i2c-ocores.c113
-rw-r--r--drivers/i2c/busses/i2c-octeon.c92
-rw-r--r--drivers/i2c/busses/i2c-omap.c155
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c3
-rw-r--r--drivers/i2c/busses/i2c-pnx.c19
-rw-r--r--drivers/i2c/busses/i2c-puv3.c15
-rw-r--r--drivers/i2c/busses/i2c-pxa.c7
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c6
-rw-r--r--drivers/i2c/busses/i2c-stu300.c102
-rw-r--r--drivers/i2c/busses/i2c-tegra.c124
-rw-r--r--drivers/i2c/i2c-core.c44
-rw-r--r--drivers/ide/ide-pm.c4
-rw-r--r--drivers/idle/intel_idle.c40
-rw-r--r--drivers/iio/frequency/adf4350.c24
-rw-r--r--drivers/iio/light/adjd_s311.c7
-rw-r--r--drivers/iio/light/lm3533-als.c4
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/ucma.c21
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c16
-rw-r--r--drivers/infiniband/hw/mlx4/main.c5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c16
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib.h10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h56
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c19
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c646
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c57
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c87
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c2
-rw-r--r--drivers/input/misc/88pm80x_onkey.c168
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ab8500-ponkey.c4
-rw-r--r--drivers/input/mouse/synaptics.c22
-rw-r--r--drivers/input/serio/hp_sdc.c2
-rw-r--r--drivers/input/tablet/wacom_wac.c21
-rw-r--r--drivers/input/tablet/wacom_wac.h3
-rw-r--r--drivers/input/touchscreen/Kconfig13
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c898
-rw-r--r--drivers/input/touchscreen/eeti_ts.c21
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c1
-rw-r--r--drivers/iommu/amd_iommu.c25
-rw-r--r--drivers/iommu/amd_iommu_init.c8
-rw-r--r--drivers/iommu/exynos-iommu.c6
-rw-r--r--drivers/iommu/intel-iommu.c26
-rw-r--r--drivers/iommu/intel_irq_remapping.c18
-rw-r--r--drivers/iommu/tegra-smmu.c17
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c7
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c12
-rw-r--r--drivers/isdn/mISDN/layer2.c2
-rw-r--r--drivers/leds/Kconfig39
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/led-class.c27
-rw-r--r--drivers/leds/led-core.c66
-rw-r--r--drivers/leds/led-triggers.c116
-rw-r--r--drivers/leds/leds-88pm860x.c9
-rw-r--r--drivers/leds/leds-adp5520.c8
-rw-r--r--drivers/leds/leds-asic3.c16
-rw-r--r--drivers/leds/leds-atmel-pwm.c5
-rw-r--r--drivers/leds/leds-bd2802.c8
-rw-r--r--drivers/leds/leds-blinkm.c815
-rw-r--r--drivers/leds/leds-da903x.c9
-rw-r--r--drivers/leds/leds-dac124s085.c4
-rw-r--r--drivers/leds/leds-gpio.c11
-rw-r--r--drivers/leds/leds-lm3530.c24
-rw-r--r--drivers/leds/leds-lm3556.c512
-rw-r--r--drivers/leds/leds-lp3944.c9
-rw-r--r--drivers/leds/leds-lp5521.c20
-rw-r--r--drivers/leds/leds-lp5523.c30
-rw-r--r--drivers/leds/leds-lp8788.c193
-rw-r--r--drivers/leds/leds-lt3593.c9
-rw-r--r--drivers/leds/leds-max8997.c93
-rw-r--r--drivers/leds/leds-mc13783.c9
-rw-r--r--drivers/leds/leds-netxbig.c10
-rw-r--r--drivers/leds/leds-ns2.c19
-rw-r--r--drivers/leds/leds-pca9532.c10
-rw-r--r--drivers/leds/leds-pca955x.c18
-rw-r--r--drivers/leds/leds-pca9633.c6
-rw-r--r--drivers/leds/leds-pwm.c7
-rw-r--r--drivers/leds/leds-regulator.c9
-rw-r--r--drivers/leds/leds-renesas-tpu.c25
-rw-r--r--drivers/leds/leds-s3c24xx.c44
-rw-r--r--drivers/leds/leds-sunfire.c21
-rw-r--r--drivers/leds/leds-tca6507.c17
-rw-r--r--drivers/leds/leds.h2
-rw-r--r--drivers/leds/ledtrig-backlight.c8
-rw-r--r--drivers/leds/ledtrig-default-on.c2
-rw-r--r--drivers/leds/ledtrig-gpio.c6
-rw-r--r--drivers/leds/ledtrig-heartbeat.c2
-rw-r--r--drivers/leds/ledtrig-ide-disk.c25
-rw-r--r--drivers/leds/ledtrig-oneshot.c204
-rw-r--r--drivers/leds/ledtrig-timer.c2
-rw-r--r--drivers/leds/ledtrig-transient.c8
-rw-r--r--drivers/md/Kconfig14
-rw-r--r--drivers/md/bitmap.c2
-rw-r--r--drivers/md/dm-crypt.c219
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-exception-store.c13
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-ioctl.c5
-rw-r--r--drivers/md/dm-linear.c2
-rw-r--r--drivers/md/dm-log.c13
-rw-r--r--drivers/md/dm-mpath.c49
-rw-r--r--drivers/md/dm-raid.c147
-rw-r--r--drivers/md/dm-raid1.c10
-rw-r--r--drivers/md/dm-snap.c34
-rw-r--r--drivers/md/dm-stripe.c87
-rw-r--r--drivers/md/dm-table.c3
-rw-r--r--drivers/md/dm-thin-metadata.c769
-rw-r--r--drivers/md/dm-thin-metadata.h25
-rw-r--r--drivers/md/dm-thin.c542
-rw-r--r--drivers/md/dm-verity.c2
-rw-r--r--drivers/md/dm.c40
-rw-r--r--drivers/md/dm.h5
-rw-r--r--drivers/md/md.c76
-rw-r--r--drivers/md/md.h11
-rw-r--r--drivers/md/persistent-data/Makefile1
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c105
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h21
-rw-r--r--drivers/md/persistent-data/dm-space-map-checker.c446
-rw-r--r--drivers/md/persistent-data/dm-space-map-checker.h26
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c12
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.h1
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c34
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c91
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.h11
-rw-r--r--drivers/md/raid1.c224
-rw-r--r--drivers/md/raid1.h30
-rw-r--r--drivers/md/raid10.c125
-rw-r--r--drivers/md/raid10.h25
-rw-r--r--drivers/md/raid5.c317
-rw-r--r--drivers/md/raid5.h3
-rw-r--r--drivers/media/Kconfig114
-rw-r--r--drivers/media/common/tuners/Kconfig64
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c249
-rw-r--r--drivers/media/common/tuners/xc5000.c14
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-core.c1
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.h1
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig3
-rw-r--r--drivers/media/dvb/dvb-usb/az6007.c6
-rw-r--r--drivers/media/dvb/dvb-usb/dvb-usb-ids.h3
-rw-r--r--drivers/media/dvb/dvb-usb/rtl28xxu.c516
-rw-r--r--drivers/media/dvb/frontends/Kconfig8
-rw-r--r--drivers/media/dvb/frontends/Makefile1
-rw-r--r--drivers/media/dvb/frontends/a8293.c37
-rw-r--r--drivers/media/dvb/frontends/dib8000.c4
-rw-r--r--drivers/media/dvb/frontends/drxk.h11
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.c350
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.h17
-rw-r--r--drivers/media/dvb/frontends/lgs8gxx.c5
-rw-r--r--drivers/media/dvb/frontends/rtl2832.c789
-rw-r--r--drivers/media/dvb/frontends/rtl2832.h74
-rw-r--r--drivers/media/dvb/frontends/rtl2832_priv.h260
-rw-r--r--drivers/media/dvb/frontends/s5h1420.c20
-rw-r--r--drivers/media/dvb/frontends/stb0899_drv.c22
-rw-r--r--drivers/media/dvb/frontends/stv0367.c5
-rw-r--r--drivers/media/dvb/frontends/stv090x.c4
-rw-r--r--drivers/media/dvb/frontends/tda10071.c351
-rw-r--r--drivers/media/dvb/frontends/tda10071_priv.h15
-rw-r--r--drivers/media/dvb/ngene/ngene-cards.c1
-rw-r--r--drivers/media/dvb/siano/smscoreapi.c39
-rw-r--r--drivers/media/dvb/siano/smsusb.c2
-rw-r--r--drivers/media/radio/Kconfig34
-rw-r--r--drivers/media/radio/Makefile4
-rw-r--r--drivers/media/radio/lm7000.h43
-rw-r--r--drivers/media/radio/radio-aimslab.c66
-rw-r--r--drivers/media/radio/radio-cadet.c388
-rw-r--r--drivers/media/radio/radio-mr800.c5
-rw-r--r--drivers/media/radio/radio-sf16fmi.c61
-rw-r--r--drivers/media/radio/radio-shark.c381
-rw-r--r--drivers/media/radio/radio-shark2.c355
-rw-r--r--drivers/media/radio/radio-tea5777.c491
-rw-r--r--drivers/media/radio/radio-tea5777.h87
-rw-r--r--drivers/media/radio/radio-wl1273.c3
-rw-r--r--drivers/media/radio/si470x/radio-si470x-common.c292
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c11
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c49
-rw-r--r--drivers/media/radio/si470x/radio-si470x.h7
-rw-r--r--drivers/media/radio/wl128x/fmdrv_rx.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c4
-rw-r--r--drivers/media/rc/Kconfig61
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/ati_remote.c133
-rw-r--r--drivers/media/rc/ene_ir.c3
-rw-r--r--drivers/media/rc/fintek-cir.c32
-rw-r--r--drivers/media/rc/gpio-ir-recv.c26
-rw-r--r--drivers/media/rc/iguanair.c639
-rw-r--r--drivers/media/rc/mceusb.c20
-rw-r--r--drivers/media/rc/nuvoton-cir.c145
-rw-r--r--drivers/media/rc/rc-main.c5
-rw-r--r--drivers/media/video/Kconfig85
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/adv7180.c235
-rw-r--r--drivers/media/video/adv7393.c487
-rw-r--r--drivers/media/video/adv7393_regs.h188
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c11
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c6
-rw-r--r--drivers/media/video/bt8xx/bttv.h2
-rw-r--r--drivers/media/video/cpia2/cpia2_v4l.c2
-rw-r--r--drivers/media/video/cs8420.h50
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c18
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.h2
-rw-r--r--drivers/media/video/cx18/cx18-streams.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-avcore.c56
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c17
-rw-r--r--drivers/media/video/cx231xx/cx231xx-i2c.c8
-rw-r--r--drivers/media/video/cx231xx/cx231xx.h2
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c10
-rw-r--r--drivers/media/video/cx23885/cx23885.h2
-rw-r--r--drivers/media/video/cx25821/cx25821-i2c.c10
-rw-r--r--drivers/media/video/cx25821/cx25821-medusa-video.c2
-rw-r--r--drivers/media/video/cx25821/cx25821.h2
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c31
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c234
-rw-r--r--drivers/media/video/cx88/cx88-cards.c20
-rw-r--r--drivers/media/video/cx88/cx88-core.c7
-rw-r--r--drivers/media/video/cx88/cx88-video.c901
-rw-r--r--drivers/media/video/cx88/cx88.h68
-rw-r--r--drivers/media/video/davinci/Kconfig30
-rw-r--r--drivers/media/video/davinci/Makefile8
-rw-r--r--drivers/media/video/davinci/vpbe_display.c4
-rw-r--r--drivers/media/video/davinci/vpif.c45
-rw-r--r--drivers/media/video/davinci/vpif.h45
-rw-r--r--drivers/media/video/davinci/vpif_capture.c694
-rw-r--r--drivers/media/video/davinci/vpif_capture.h16
-rw-r--r--drivers/media/video/davinci/vpif_display.c684
-rw-r--r--drivers/media/video/davinci/vpif_display.h23
-rw-r--r--drivers/media/video/em28xx/em28xx-audio.c27
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c7
-rw-r--r--drivers/media/video/em28xx/em28xx-core.c33
-rw-r--r--drivers/media/video/em28xx/em28xx-dvb.c95
-rw-r--r--drivers/media/video/em28xx/em28xx-i2c.c1
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-reg.h51
-rw-r--r--drivers/media/video/gspca/benq.c7
-rw-r--r--drivers/media/video/gspca/conex.c208
-rw-r--r--drivers/media/video/gspca/cpia1.c486
-rw-r--r--drivers/media/video/gspca/etoms.c221
-rw-r--r--drivers/media/video/gspca/finepix.c1
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c1
-rw-r--r--drivers/media/video/gspca/gspca.c50
-rw-r--r--drivers/media/video/gspca/jeilinj.c219
-rw-r--r--drivers/media/video/gspca/jl2005bcd.c5
-rw-r--r--drivers/media/video/gspca/kinect.c10
-rw-r--r--drivers/media/video/gspca/konica.c289
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c1
-rw-r--r--drivers/media/video/gspca/mars.c48
-rw-r--r--drivers/media/video/gspca/mr97310a.c439
-rw-r--r--drivers/media/video/gspca/nw80x.c203
-rw-r--r--drivers/media/video/gspca/ov519.c600
-rw-r--r--drivers/media/video/gspca/ov534.c570
-rw-r--r--drivers/media/video/gspca/ov534_9.c294
-rw-r--r--drivers/media/video/gspca/pac207.c1
-rw-r--r--drivers/media/video/gspca/pac7302.c372
-rw-r--r--drivers/media/video/gspca/pac7311.c1
-rw-r--r--drivers/media/video/gspca/se401.c184
-rw-r--r--drivers/media/video/gspca/sn9c2028.c7
-rw-r--r--drivers/media/video/gspca/sonixb.c622
-rw-r--r--drivers/media/video/gspca/sonixj.c1
-rw-r--r--drivers/media/video/gspca/spca1528.c271
-rw-r--r--drivers/media/video/gspca/spca500.c201
-rw-r--r--drivers/media/video/gspca/spca501.c257
-rw-r--r--drivers/media/video/gspca/spca505.c77
-rw-r--r--drivers/media/video/gspca/spca506.c211
-rw-r--r--drivers/media/video/gspca/spca508.c71
-rw-r--r--drivers/media/video/gspca/spca561.c393
-rw-r--r--drivers/media/video/gspca/sq905.c1
-rw-r--r--drivers/media/video/gspca/sq905c.c1
-rw-r--r--drivers/media/video/gspca/sq930x.c110
-rw-r--r--drivers/media/video/gspca/stk014.c188
-rw-r--r--drivers/media/video/gspca/stv0680.c7
-rw-r--r--drivers/media/video/gspca/sunplus.c237
-rw-r--r--drivers/media/video/gspca/t613.c824
-rw-r--r--drivers/media/video/gspca/topro.c459
-rw-r--r--drivers/media/video/gspca/tv8532.c125
-rw-r--r--drivers/media/video/gspca/vc032x.c694
-rw-r--r--drivers/media/video/gspca/vicam.c68
-rw-r--r--drivers/media/video/gspca/w996Xcf.c15
-rw-r--r--drivers/media/video/gspca/xirlink_cit.c473
-rw-r--r--drivers/media/video/ibmmpeg2.h94
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c12
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.h1
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c4
-rw-r--r--drivers/media/video/m5mols/Kconfig1
-rw-r--r--drivers/media/video/m5mols/m5mols_controls.c4
-rw-r--r--drivers/media/video/mem2mem_testdev.c332
-rw-r--r--drivers/media/video/mt9m001.c2
-rw-r--r--drivers/media/video/mt9m032.c13
-rw-r--r--drivers/media/video/mt9m111.c1
-rw-r--r--drivers/media/video/mt9p031.c5
-rw-r--r--drivers/media/video/mt9t001.c13
-rw-r--r--drivers/media/video/mt9v022.c2
-rw-r--r--drivers/media/video/mx1_camera.c4
-rw-r--r--drivers/media/video/mx2_camera.c47
-rw-r--r--drivers/media/video/mx2_emmaprp.c10
-rw-r--r--drivers/media/video/mx3_camera.c22
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c8
-rw-r--r--drivers/media/video/omap3isp/isppreview.c8
-rw-r--r--drivers/media/video/omap3isp/ispresizer.c6
-rw-r--r--drivers/media/video/ov2640.c6
-rw-r--r--drivers/media/video/ov772x.c8
-rw-r--r--drivers/media/video/ov9640.c1
-rw-r--r--drivers/media/video/pms.c1
-rw-r--r--drivers/media/video/pvrusb2/Kconfig1
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c12
-rw-r--r--drivers/media/video/pwc/pwc-if.c171
-rw-r--r--drivers/media/video/pwc/pwc-v4l.c165
-rw-r--r--drivers/media/video/pwc/pwc.h3
-rw-r--r--drivers/media/video/s2255drv.c1
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c112
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h3
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite-reg.c2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite.c15
-rw-r--r--drivers/media/video/s5p-fimc/fimc-m2m.c52
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c7
-rw-r--r--drivers/media/video/s5p-fimc/fimc-reg.c20
-rw-r--r--drivers/media/video/s5p-g2d/g2d.c9
-rw-r--r--drivers/media/video/s5p-jpeg/jpeg-core.c38
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c10
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c11
-rw-r--r--drivers/media/video/s5p-tv/mixer_video.c8
-rw-r--r--drivers/media/video/s5p-tv/sii9234_drv.c12
-rw-r--r--drivers/media/video/saa7121.h132
-rw-r--r--drivers/media/video/saa7134/saa7134-dvb.c82
-rw-r--r--drivers/media/video/saa7146.h112
-rw-r--r--drivers/media/video/saa7146reg.h283
-rw-r--r--drivers/media/video/saa7164/saa7164-api.c14
-rw-r--r--drivers/media/video/saa7164/saa7164-i2c.c20
-rw-r--r--drivers/media/video/saa7164/saa7164.h2
-rw-r--r--drivers/media/video/smiapp/Kconfig1
-rw-r--r--drivers/media/video/smiapp/smiapp-core.c41
-rw-r--r--drivers/media/video/sn9c102/sn9c102.h2
-rw-r--r--drivers/media/video/soc_camera.c12
-rw-r--r--drivers/media/video/soc_mediabus.c6
-rw-r--r--drivers/media/video/tlg2300/pd-main.c4
-rw-r--r--drivers/media/video/tuner-core.c15
-rw-r--r--drivers/media/video/tvaudio.c291
-rw-r--r--drivers/media/video/tvp5150.c97
-rw-r--r--drivers/media/video/tw9910.c8
-rw-r--r--drivers/media/video/uvc/Kconfig1
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c5
-rw-r--r--drivers/media/video/uvc/uvc_queue.c1
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c2
-rw-r--r--drivers/media/video/uvc/uvc_video.c8
-rw-r--r--drivers/media/video/v4l2-compat-ioctl32.c10
-rw-r--r--drivers/media/video/v4l2-ctrls.c3
-rw-r--r--drivers/media/video/v4l2-dev.c70
-rw-r--r--drivers/media/video/v4l2-ioctl.c3365
-rw-r--r--drivers/media/video/v4l2-mem2mem.c18
-rw-r--r--drivers/media/video/v4l2-subdev.c4
-rw-r--r--drivers/media/video/via-camera.c2
-rw-r--r--drivers/media/video/videobuf-core.c16
-rw-r--r--drivers/media/video/videobuf-dma-contig.c57
-rw-r--r--drivers/media/video/videobuf2-core.c417
-rw-r--r--drivers/media/video/vivi.c220
-rw-r--r--drivers/media/video/zr364xx.c484
-rw-r--r--drivers/message/i2o/i2o_config.c7
-rw-r--r--drivers/message/i2o/i2o_proc.c37
-rw-r--r--drivers/mfd/88pm800.c596
-rw-r--r--drivers/mfd/88pm805.c301
-rw-r--r--drivers/mfd/88pm80x.c145
-rw-r--r--drivers/mfd/88pm860x-core.c23
-rw-r--r--drivers/mfd/Kconfig102
-rw-r--r--drivers/mfd/Makefile15
-rw-r--r--drivers/mfd/ab3100-core.c23
-rw-r--r--drivers/mfd/ab8500-core.c242
-rw-r--r--drivers/mfd/ab8500-debugfs.c12
-rw-r--r--drivers/mfd/ab8500-gpadc.c9
-rw-r--r--drivers/mfd/ab8500-sysctrl.c6
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/anatop-mfd.c2
-rw-r--r--drivers/mfd/arizona-core.c566
-rw-r--r--drivers/mfd/arizona-i2c.c97
-rw-r--r--drivers/mfd/arizona-irq.c275
-rw-r--r--drivers/mfd/arizona-spi.c97
-rw-r--r--drivers/mfd/arizona.h40
-rw-r--r--drivers/mfd/asic3.c1
-rw-r--r--drivers/mfd/da9052-core.c1
-rw-r--r--drivers/mfd/db8500-prcmu.c92
-rw-r--r--drivers/mfd/dbx500-prcmu-regs.h1
-rw-r--r--drivers/mfd/ezx-pcap.c2
-rw-r--r--drivers/mfd/max77686-irq.c319
-rw-r--r--drivers/mfd/max77686.c187
-rw-r--r--drivers/mfd/max77693.c11
-rw-r--r--drivers/mfd/max8925-core.c8
-rw-r--r--drivers/mfd/max8997-irq.c62
-rw-r--r--drivers/mfd/max8997.c9
-rw-r--r--drivers/mfd/mc13xxx-core.c4
-rw-r--r--drivers/mfd/mc13xxx-i2c.c12
-rw-r--r--drivers/mfd/mc13xxx-spi.c15
-rw-r--r--drivers/mfd/mfd-core.c30
-rw-r--r--drivers/mfd/pcf50633-core.c9
-rw-r--r--drivers/mfd/s5m-core.c206
-rw-r--r--drivers/mfd/s5m-irq.c495
-rw-r--r--drivers/mfd/sec-core.c216
-rw-r--r--drivers/mfd/sec-irq.c317
-rw-r--r--drivers/mfd/tc3589x.c9
-rw-r--r--drivers/mfd/timberdale.c2
-rw-r--r--drivers/mfd/tps65010.c3
-rw-r--r--drivers/mfd/tps65090.c4
-rw-r--r--drivers/mfd/tps6586x.c296
-rw-r--r--drivers/mfd/tps65910.c23
-rw-r--r--drivers/mfd/twl-core.c12
-rw-r--r--drivers/mfd/twl6040-core.c24
-rw-r--r--drivers/mfd/wm5102-tables.c2399
-rw-r--r--drivers/mfd/wm5110-tables.c2281
-rw-r--r--drivers/mfd/wm831x-otp.c8
-rw-r--r--drivers/mfd/wm8350-core.c354
-rw-r--r--drivers/mfd/wm8350-i2c.c5
-rw-r--r--drivers/mfd/wm8350-irq.c8
-rw-r--r--drivers/mfd/wm8350-regmap.c3222
-rw-r--r--drivers/mfd/wm8994-core.c17
-rw-r--r--drivers/mfd/wm8994-irq.c10
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/ab8500-pwm.c6
-rw-r--r--drivers/misc/lkdtm.c2
-rw-r--r--drivers/misc/mei/interrupt.c2
-rw-r--r--drivers/misc/mei/main.c27
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c84
-rw-r--r--drivers/misc/ti-st/st_core.c5
-rw-r--r--drivers/misc/ti-st/st_ll.c2
-rw-r--r--drivers/mmc/card/block.c26
-rw-r--r--drivers/mmc/host/atmel-mci.c6
-rw-r--r--drivers/mmc/host/bfin_sdh.c7
-rw-r--r--drivers/mmc/host/dw_mmc.c85
-rw-r--r--drivers/mmc/host/mvsdio.c4
-rw-r--r--drivers/mmc/host/mxs-mmc.c14
-rw-r--r--drivers/mmc/host/omap.c382
-rw-r--r--drivers/mmc/host/omap_hsmmc.c204
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h6
-rw-r--r--drivers/mtd/maps/uclinux.c5
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/jz4740_nand.c228
-rw-r--r--drivers/mtd/nand/omap2.c105
-rw-r--r--drivers/mtd/nand/orion_nand.c6
-rw-r--r--drivers/mtd/ubi/vtbl.c4
-rw-r--r--drivers/net/appletalk/cops.c4
-rw-r--r--drivers/net/appletalk/ltpc.c4
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/caif/caif_serial.c3
-rw-r--r--drivers/net/can/c_can/c_can_platform.c8
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c4
-rw-r--r--drivers/net/can/softing/softing_fw.c7
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c93
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c71
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c84
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c14
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c13
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c312
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c70
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c1
-rw-r--r--drivers/net/ethernet/renesas/Kconfig4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c11
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c20
-rw-r--r--drivers/net/ethernet/sfc/tx.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h4
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c3
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c4
-rw-r--r--drivers/net/ethernet/wiznet/Kconfig1
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c1
-rw-r--r--drivers/net/fddi/skfp/pmf.c2
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/hyperv/netvsc_drv.c7
-rw-r--r--drivers/net/hyperv/rndis_filter.c22
-rw-r--r--drivers/net/irda/bfin_sir.c8
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/irda/pxaficp_ir.c12
-rw-r--r--drivers/net/macvtap.c3
-rw-r--r--drivers/net/netconsole.c6
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c1
-rw-r--r--drivers/net/phy/mdio-mux.c2
-rw-r--r--drivers/net/phy/mdio-octeon.c92
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/team/team.c18
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/cdc-phonet.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c88
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c255
-rw-r--r--drivers/net/usb/sierra_net.c52
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/wan/dscc4.c5
-rw-r--r--drivers/net/wimax/i2400m/fw.c4
-rw-r--r--drivers/net/wireless/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c2
-rw-r--r--drivers/net/wireless/b43/main.c21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c6
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c13
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c30
-rw-r--r--drivers/net/wireless/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c1
-rw-r--r--drivers/net/wireless/libertas/if_usb.c1
-rw-r--r--drivers/net/wireless/libertas/main.c5
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c68
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c71
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c3
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/xen-netfront.c39
-rw-r--r--drivers/of/base.c27
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci-driver.c13
-rw-r--r--drivers/pci/pci-sysfs.c42
-rw-r--r--drivers/pci/pci.c1
-rw-r--r--drivers/pci/pcie/portdrv_pci.c14
-rw-r--r--drivers/pci/probe.c31
-rw-r--r--drivers/pcmcia/sa1100_shannon.c1
-rw-r--r--drivers/pinctrl/core.c13
-rw-r--r--drivers/pinctrl/pinctrl-imx23.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx28.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx51.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8500.c7
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c3
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c1
-rw-r--r--drivers/pinctrl/pinctrl-u300.c8
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/olpc/Makefile4
-rw-r--r--drivers/platform/olpc/olpc-ec.c336
-rw-r--r--drivers/platform/x86/Kconfig6
-rw-r--r--drivers/platform/x86/acer-wmi.c153
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/apple-gmux.c428
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c108
-rw-r--r--drivers/platform/x86/asus-wmi.c44
-rw-r--r--drivers/platform/x86/asus-wmi.h2
-rw-r--r--drivers/platform/x86/classmate-laptop.c421
-rw-r--r--drivers/platform/x86/dell-laptop.c54
-rw-r--r--drivers/platform/x86/eeepc-wmi.c25
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c2
-rw-r--r--drivers/platform/x86/hdaps.c2
-rw-r--r--drivers/platform/x86/hp_accel.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c110
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c4
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/samsung-laptop.c41
-rw-r--r--drivers/platform/x86/sony-laptop.c12
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c14
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c4
-rw-r--r--drivers/platform/x86/xo1-rfkill.c3
-rw-r--r--drivers/platform/x86/xo15-ebook.c2
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/bq27x00_battery.c155
-rw-r--r--drivers/power/charger-manager.c152
-rw-r--r--drivers/power/ds2781_battery.c2
-rw-r--r--drivers/power/gpio-charger.c2
-rw-r--r--drivers/power/lp8727_charger.c2
-rw-r--r--drivers/power/max17042_battery.c8
-rw-r--r--drivers/power/olpc_battery.c63
-rw-r--r--drivers/power/pda_power.c10
-rw-r--r--drivers/power/power_supply_core.c65
-rw-r--r--drivers/power/power_supply_sysfs.c8
-rw-r--r--drivers/power/sbs-battery.c2
-rw-r--r--drivers/power/smb347-charger.c123
-rw-r--r--drivers/power/test_power.c75
-rw-r--r--drivers/power/twl4030_charger.c80
-rw-r--r--drivers/pps/pps.c4
-rw-r--r--drivers/pwm/Kconfig127
-rw-r--r--drivers/pwm/Makefile11
-rw-r--r--drivers/pwm/core.c713
-rw-r--r--drivers/pwm/pwm-bfin.c162
-rw-r--r--drivers/pwm/pwm-imx.c (renamed from arch/arm/plat-mxc/pwm.c)204
-rw-r--r--drivers/pwm/pwm-lpc32xx.c148
-rw-r--r--drivers/pwm/pwm-mxs.c203
-rw-r--r--drivers/pwm/pwm-pxa.c218
-rw-r--r--drivers/pwm/pwm-samsung.c (renamed from arch/arm/plat-samsung/pwm.c)237
-rw-r--r--drivers/pwm/pwm-tegra.c259
-rw-r--r--drivers/pwm/pwm-tiecap.c230
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c409
-rw-r--r--drivers/pwm/pwm-vt8500.c177
-rw-r--r--drivers/rapidio/devices/tsi721.c12
-rw-r--r--drivers/regulator/Kconfig2
-rw-r--r--drivers/regulator/ab3100.c1
-rw-r--r--drivers/regulator/ab8500.c6
-rw-r--r--drivers/regulator/anatop-regulator.c5
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/db8500-prcmu.c6
-rw-r--r--drivers/regulator/gpio-regulator.c38
-rw-r--r--drivers/regulator/palmas-regulator.c23
-rw-r--r--drivers/regulator/s5m8767.c78
-rw-r--r--drivers/regulator/tps6586x-regulator.c8
-rw-r--r--drivers/regulator/twl-regulator.c5
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/omap_remoteproc.c26
-rw-r--r--drivers/remoteproc/remoteproc_core.c726
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c4
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c295
-rw-r--r--drivers/remoteproc/remoteproc_internal.h62
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c34
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c3
-rw-r--r--drivers/rtc/Kconfig11
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-88pm80x.c369
-rw-r--r--drivers/rtc/rtc-ab8500.c42
-rw-r--r--drivers/rtc/rtc-at91sam9.c22
-rw-r--r--drivers/rtc/rtc-cmos.c1
-rw-r--r--drivers/rtc/rtc-coh901331.c61
-rw-r--r--drivers/rtc/rtc-da9052.c5
-rw-r--r--drivers/rtc/rtc-max8925.c13
-rw-r--r--drivers/rtc/rtc-mc13xxx.c6
-rw-r--r--drivers/rtc/rtc-mv.c2
-rw-r--r--drivers/rtc/rtc-pcf2123.c2
-rw-r--r--drivers/rtc/rtc-pcf8563.c11
-rw-r--r--drivers/rtc/rtc-pl031.c95
-rw-r--r--drivers/rtc/rtc-r9701.c6
-rw-r--r--drivers/rtc/rtc-rs5c348.c7
-rw-r--r--drivers/rtc/rtc-s3c.c4
-rw-r--r--drivers/rtc/rtc-wm831x.c24
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_ioctl.c7
-rw-r--r--drivers/s390/char/sclp_sdias.c2
-rw-r--r--drivers/s390/net/netiucv.c34
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--drivers/scsi/scsi_transport_fc.c38
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/sh/intc/Kconfig4
-rw-r--r--drivers/sh/intc/Makefile2
-rw-r--r--drivers/sh/intc/core.c38
-rw-r--r--drivers/sh/intc/internals.h5
-rw-r--r--drivers/sh/intc/irqdomain.c68
-rw-r--r--drivers/sh/pfc/pinctrl.c34
-rw-r--r--drivers/spi/Kconfig9
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-bcm63xx.c35
-rw-r--r--drivers/spi/spi-coldfire-qspi.c5
-rw-r--r--drivers/spi/spi-falcon.c469
-rw-r--r--drivers/spi/spi-omap2-mcspi.c235
-rw-r--r--drivers/spi/spi-pl022.c1
-rw-r--r--drivers/spi/spi-s3c64xx.c14
-rw-r--r--drivers/staging/bcm/Misc.c31
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c3
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c2
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c2
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c17
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c6
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c26
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c4
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c4
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c4
-rw-r--r--drivers/staging/csr/Kconfig2
-rw-r--r--drivers/staging/gdm72xx/sdio_boot.c7
-rw-r--r--drivers/staging/gdm72xx/usb_boot.c22
-rw-r--r--drivers/staging/iio/adc/ad7192.c48
-rw-r--r--drivers/staging/iio/adc/ad7298_ring.c2
-rw-r--r--drivers/staging/iio/adc/ad7780.c10
-rw-r--r--drivers/staging/iio/adc/ad7793.c99
-rw-r--r--drivers/staging/media/dt3155v4l/dt3155v4l.c15
-rw-r--r--drivers/staging/media/easycap/easycap_main.c1
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c2
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c60
-rw-r--r--drivers/staging/media/solo6x10/TODO2
-rw-r--r--drivers/staging/media/solo6x10/core.c13
-rw-r--r--drivers/staging/media/solo6x10/i2c.c2
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c28
-rw-r--r--drivers/staging/octeon/ethernet.c153
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h3
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c1
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c2
-rw-r--r--drivers/staging/omapdrm/omap_encoder.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c2
-rw-r--r--drivers/staging/winbond/wbusb.c2
-rw-r--r--drivers/target/target_core_file.c32
-rw-r--r--drivers/target/target_core_pscsi.c9
-rw-r--r--drivers/target/target_core_transport.c15
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c8
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c4
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/spear_thermal.c28
-rw-r--r--drivers/thermal/thermal_sys.c225
-rw-r--r--drivers/tty/serial/8250/8250.c8
-rw-r--r--drivers/tty/serial/Kconfig10
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/ifx6x60.c2
-rw-r--r--drivers/tty/serial/mxs-auart.c14
-rw-r--r--drivers/tty/serial/of_serial.c1
-rw-r--r--drivers/tty/serial/pch_uart.c59
-rw-r--r--drivers/tty/serial/pmac_zilog.c12
-rw-r--r--drivers/tty/serial/samsung.c4
-rw-r--r--drivers/tty/serial/serial_core.c6
-rw-r--r--drivers/tty/serial/sh-sci.c5
-rw-r--r--drivers/tty/serial/uartlite.c3
-rw-r--r--drivers/tty/tty_ldisc.c2
-rw-r--r--drivers/tty/vt/vt_ioctl.c47
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/chipidea/Kconfig9
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/hub.c9
-rw-r--r--drivers/usb/early/ehci-dbgp.c2
-rw-r--r--drivers/usb/gadget/f_phonet.c2
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/m66592-udc.c9
-rw-r--r--drivers/usb/gadget/m66592-udc.h5
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c12
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c11
-rw-r--r--drivers/usb/gadget/r8a66597-udc.h5
-rw-r--r--drivers/usb/gadget/storage_common.c12
-rw-r--r--drivers/usb/gadget/u_ether.c6
-rw-r--r--drivers/usb/gadget/u_uac1.c6
-rw-r--r--drivers/usb/host/ehci-omap.c175
-rw-r--r--drivers/usb/host/ehci-orion.c4
-rw-r--r--drivers/usb/host/ehci-sead3.c2
-rw-r--r--drivers/usb/host/ehci-tegra.c3
-rw-r--r--drivers/usb/host/isp1362-hcd.c8
-rw-r--r--drivers/usb/host/ohci-omap.c2
-rw-r--r--drivers/usb/host/pci-quirks.c7
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/r8a66597-hcd.c12
-rw-r--r--drivers/usb/host/r8a66597.h5
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci-ring.c40
-rw-r--r--drivers/usb/host/xhci.c8
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/emi62.c2
-rw-r--r--drivers/usb/musb/Kconfig4
-rw-r--r--drivers/usb/musb/musb_core.h8
-rw-r--r--drivers/usb/musb/musb_dsps.c19
-rw-r--r--drivers/usb/otg/isp1301_omap.c1
-rw-r--r--drivers/usb/renesas_usbhs/common.c6
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c8
-rw-r--r--drivers/usb/serial/bus.c15
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/ipw.c3
-rw-r--r--drivers/usb/serial/mos7840.c16
-rw-r--r--drivers/usb/serial/option.c285
-rw-r--r--drivers/usb/serial/qcserial.c47
-rw-r--r--drivers/usb/serial/usb-wwan.h3
-rw-r--r--drivers/usb/serial/usb_wwan.c68
-rw-r--r--drivers/vfio/Kconfig16
-rw-r--r--drivers/vfio/Makefile3
-rw-r--r--drivers/vfio/pci/Kconfig8
-rw-r--r--drivers/vfio/pci/Makefile4
-rw-r--r--drivers/vfio/pci/vfio_pci.c579
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c1540
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c740
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h91
-rw-r--r--drivers/vfio/pci/vfio_pci_rdwr.c269
-rw-r--r--drivers/vfio/vfio.c1415
-rw-r--r--drivers/vfio/vfio_iommu_type1.c753
-rw-r--r--drivers/vhost/Kconfig3
-rw-r--r--drivers/vhost/Kconfig.tcm6
-rw-r--r--drivers/vhost/Makefile2
-rw-r--r--drivers/vhost/tcm_vhost.c1649
-rw-r--r--drivers/vhost/tcm_vhost.h103
-rw-r--r--drivers/video/aty/aty128fb.c180
-rw-r--r--drivers/video/aty/radeon_monitor.c35
-rw-r--r--drivers/video/auo_k190x.c2
-rw-r--r--drivers/video/backlight/Kconfig2
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c19
-rw-r--r--drivers/video/backlight/corgi_lcd.c19
-rw-r--r--drivers/video/backlight/l4f00242t03.c29
-rw-r--r--drivers/video/backlight/lm3533_bl.c8
-rw-r--r--drivers/video/backlight/lms283gf05.c24
-rw-r--r--drivers/video/backlight/lp855x_bl.c10
-rw-r--r--drivers/video/backlight/ot200_bl.c21
-rw-r--r--drivers/video/backlight/pwm_bl.c159
-rw-r--r--drivers/video/backlight/tosa_bl.c8
-rw-r--r--drivers/video/backlight/tosa_lcd.c7
-rw-r--r--drivers/video/console/bitblit.c2
-rw-r--r--drivers/video/console/fbcon.c11
-rw-r--r--drivers/video/da8xx-fb.c78
-rw-r--r--drivers/video/epson1355fb.c8
-rw-r--r--drivers/video/exynos/exynos_dp_core.c23
-rw-r--r--drivers/video/exynos/exynos_dp_core.h4
-rw-r--r--drivers/video/exynos/exynos_dp_reg.c4
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c2
-rw-r--r--drivers/video/exynos/s6e8ax0.h21
-rw-r--r--drivers/video/fb_defio.c2
-rw-r--r--drivers/video/fb_draw.h7
-rw-r--r--drivers/video/grvga.c47
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/mx3fb.c55
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c10
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c179
-rw-r--r--drivers/video/omap2/displays/panel-lgphilips-lb035q02.c8
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c1
-rw-r--r--drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c9
-rw-r--r--drivers/video/omap2/displays/panel-picodlp.c9
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c9
-rw-r--r--drivers/video/omap2/displays/panel-taal.c1
-rw-r--r--drivers/video/omap2/displays/panel-tfp410.c7
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c8
-rw-r--r--drivers/video/omap2/dss/Kconfig4
-rw-r--r--drivers/video/omap2/dss/apply.c91
-rw-r--r--drivers/video/omap2/dss/dispc.c494
-rw-r--r--drivers/video/omap2/dss/dispc.h28
-rw-r--r--drivers/video/omap2/dss/display.c40
-rw-r--r--drivers/video/omap2/dss/dpi.c64
-rw-r--r--drivers/video/omap2/dss/dsi.c152
-rw-r--r--drivers/video/omap2/dss/dss.c19
-rw-r--r--drivers/video/omap2/dss/dss.h54
-rw-r--r--drivers/video/omap2/dss/dss_features.h5
-rw-r--r--drivers/video/omap2/dss/hdmi.c246
-rw-r--r--drivers/video/omap2/dss/hdmi_panel.c9
-rw-r--r--drivers/video/omap2/dss/manager.c51
-rw-r--r--drivers/video/omap2/dss/overlay.c33
-rw-r--r--drivers/video/omap2/dss/rfbi.c40
-rw-r--r--drivers/video/omap2/dss/sdi.c56
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h21
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c26
-rw-r--r--drivers/video/omap2/dss/venc.c8
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c53
-rw-r--r--drivers/video/s3fb.c31
-rw-r--r--drivers/video/sh_mipi_dsi.c7
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c1117
-rw-r--r--drivers/video/sh_mobile_lcdcfb.h5
-rw-r--r--drivers/video/sh_mobile_meram.c235
-rw-r--r--drivers/video/smscufx.c2
-rw-r--r--drivers/video/w100fb.c12
-rw-r--r--drivers/w1/slaves/w1_therm.c9
-rw-r--r--drivers/w1/w1_family.h1
-rw-r--r--drivers/watchdog/booke_wdt.c7
-rw-r--r--drivers/watchdog/da9052_wdt.c1
-rw-r--r--drivers/watchdog/orion_wdt.c8
-rw-r--r--drivers/watchdog/sa1100_wdt.c14
-rw-r--r--drivers/zorro/zorro.c2
-rw-r--r--fs/9p/vfs_file.c3
-rw-r--r--fs/affs/bitmap.c28
-rw-r--r--fs/autofs4/expire.c36
-rw-r--r--fs/bio.c13
-rw-r--r--fs/block_dev.c3
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/async-thread.c9
-rw-r--r--fs/btrfs/backref.c44
-rw-r--r--fs/btrfs/backref.h7
-rw-r--r--fs/btrfs/btrfs_inode.h14
-rw-r--r--fs/btrfs/check-integrity.c7
-rw-r--r--fs/btrfs/compression.c1
-rw-r--r--fs/btrfs/ctree.c772
-rw-r--r--fs/btrfs/ctree.h382
-rw-r--r--fs/btrfs/delayed-inode.c35
-rw-r--r--fs/btrfs/delayed-inode.h2
-rw-r--r--fs/btrfs/delayed-ref.c203
-rw-r--r--fs/btrfs/delayed-ref.h66
-rw-r--r--fs/btrfs/disk-io.c204
-rw-r--r--fs/btrfs/disk-io.h8
-rw-r--r--fs/btrfs/extent-tree.c455
-rw-r--r--fs/btrfs/extent_io.c75
-rw-r--r--fs/btrfs/file-item.c8
-rw-r--r--fs/btrfs/file.c3
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/inode.c377
-rw-r--r--fs/btrfs/ioctl.c477
-rw-r--r--fs/btrfs/ioctl.h97
-rw-r--r--fs/btrfs/locking.c16
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/qgroup.c1577
-rw-r--r--fs/btrfs/relocation.c5
-rw-r--r--fs/btrfs/root-tree.c107
-rw-r--r--fs/btrfs/send.c4572
-rw-r--r--fs/btrfs/send.h133
-rw-r--r--fs/btrfs/struct-funcs.c196
-rw-r--r--fs/btrfs/super.c88
-rw-r--r--fs/btrfs/transaction.c109
-rw-r--r--fs/btrfs/transaction.h12
-rw-r--r--fs/btrfs/tree-log.c4
-rw-r--r--fs/btrfs/volumes.c62
-rw-r--r--fs/btrfs/volumes.h6
-rw-r--r--fs/buffer.c94
-rw-r--r--fs/cachefiles/rdwr.c2
-rw-r--r--fs/ceph/addr.c3
-rw-r--r--fs/ceph/debugfs.c1
-rw-r--r--fs/ceph/dir.c45
-rw-r--r--fs/ceph/file.c62
-rw-r--r--fs/ceph/inode.c15
-rw-r--r--fs/ceph/ioctl.c3
-rw-r--r--fs/ceph/mds_client.c23
-rw-r--r--fs/ceph/snap.c18
-rw-r--r--fs/ceph/super.c1
-rw-r--r--fs/ceph/super.h10
-rw-r--r--fs/ceph/xattr.c1
-rw-r--r--fs/cifs/Makefile3
-rw-r--r--fs/cifs/cache.c2
-rw-r--r--fs/cifs/cifs_debug.c62
-rw-r--r--fs/cifs/cifs_dfs_ref.c7
-rw-r--r--fs/cifs/cifs_unicode.c60
-rw-r--r--fs/cifs/cifs_unicode.h6
-rw-r--r--fs/cifs/cifsacl.c27
-rw-r--r--fs/cifs/cifsencrypt.c52
-rw-r--r--fs/cifs/cifsfs.c24
-rw-r--r--fs/cifs/cifsglob.h202
-rw-r--r--fs/cifs/cifsproto.h262
-rw-r--r--fs/cifs/cifssmb.c397
-rw-r--r--fs/cifs/connect.c446
-rw-r--r--fs/cifs/dir.c60
-rw-r--r--fs/cifs/file.c193
-rw-r--r--fs/cifs/inode.c548
-rw-r--r--fs/cifs/ioctl.c6
-rw-r--r--fs/cifs/link.c46
-rw-r--r--fs/cifs/misc.c31
-rw-r--r--fs/cifs/nterr.c6
-rw-r--r--fs/cifs/nterr.h22
-rw-r--r--fs/cifs/ntlmssp.h10
-rw-r--r--fs/cifs/readdir.c31
-rw-r--r--fs/cifs/sess.c12
-rw-r--r--fs/cifs/smb1ops.c423
-rw-r--r--fs/cifs/smb2glob.h44
-rw-r--r--fs/cifs/smb2inode.c163
-rw-r--r--fs/cifs/smb2maperror.c2477
-rw-r--r--fs/cifs/smb2misc.c349
-rw-r--r--fs/cifs/smb2ops.c307
-rw-r--r--fs/cifs/smb2pdu.c1125
-rw-r--r--fs/cifs/smb2pdu.h579
-rw-r--r--fs/cifs/smb2proto.h86
-rw-r--r--fs/cifs/smb2status.h1782
-rw-r--r--fs/cifs/smb2transport.c172
-rw-r--r--fs/cifs/smbencrypt.c14
-rw-r--r--fs/cifs/transport.c104
-rw-r--r--fs/cifs/xattr.c24
-rw-r--r--fs/compat.c10
-rw-r--r--fs/direct-io.c5
-rw-r--r--fs/dlm/config.c7
-rw-r--r--fs/dlm/config.h1
-rw-r--r--fs/dlm/debug_fs.c103
-rw-r--r--fs/dlm/dir.c287
-rw-r--r--fs/dlm/dir.h7
-rw-r--r--fs/dlm/dlm_internal.h62
-rw-r--r--fs/dlm/lock.c1292
-rw-r--r--fs/dlm/lock.h5
-rw-r--r--fs/dlm/lockspace.c45
-rw-r--r--fs/dlm/rcom.c147
-rw-r--r--fs/dlm/rcom.h1
-rw-r--r--fs/dlm/recover.c295
-rw-r--r--fs/dlm/recover.h2
-rw-r--r--fs/dlm/recoverd.c14
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h24
-rw-r--r--fs/ecryptfs/file.c90
-rw-r--r--fs/ecryptfs/inode.c95
-rw-r--r--fs/ecryptfs/main.c22
-rw-r--r--fs/ecryptfs/messaging.c136
-rw-r--r--fs/ecryptfs/miscdev.c98
-rw-r--r--fs/ecryptfs/mmap.c39
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/exec.c63
-rw-r--r--fs/exofs/inode.c27
-rw-r--r--fs/exofs/ore.c14
-rw-r--r--fs/exofs/super.c11
-rw-r--r--fs/ext2/balloc.c14
-rw-r--r--fs/ext2/ialloc.c1
-rw-r--r--fs/ext2/inode.c5
-rw-r--r--fs/ext2/super.c33
-rw-r--r--fs/ext3/balloc.c2
-rw-r--r--fs/ext3/bitmap.c12
-rw-r--r--fs/ext3/inode.c8
-rw-r--r--fs/ext3/super.c11
-rw-r--r--fs/ext4/balloc.c65
-rw-r--r--fs/ext4/bitmap.c18
-rw-r--r--fs/ext4/ext4.h26
-rw-r--r--fs/ext4/ext4_jbd2.c8
-rw-r--r--fs/ext4/ext4_jbd2.h25
-rw-r--r--fs/ext4/extents.c52
-rw-r--r--fs/ext4/file.c121
-rw-r--r--fs/ext4/ialloc.c5
-rw-r--r--fs/ext4/inode.c151
-rw-r--r--fs/ext4/mballoc.c26
-rw-r--r--fs/ext4/mmp.c6
-rw-r--r--fs/ext4/namei.c15
-rw-r--r--fs/ext4/resize.c7
-rw-r--r--fs/ext4/super.c371
-rw-r--r--fs/ext4/xattr.c11
-rw-r--r--fs/fat/dir.c255
-rw-r--r--fs/fat/fat.h15
-rw-r--r--fs/fat/file.c15
-rw-r--r--fs/fat/inode.c12
-rw-r--r--fs/fat/namei_msdos.c11
-rw-r--r--fs/fat/namei_vfat.c11
-rw-r--r--fs/fcntl.c29
-rw-r--r--fs/file_table.c4
-rw-r--r--fs/fs-writeback.c9
-rw-r--r--fs/fuse/dir.c3
-rw-r--r--fs/fuse/file.c19
-rw-r--r--fs/fuse/fuse_i.h3
-rw-r--r--fs/fuse/inode.c32
-rw-r--r--fs/gfs2/file.c18
-rw-r--r--fs/gfs2/meta_io.c2
-rw-r--r--fs/gfs2/trans.c4
-rw-r--r--fs/hfs/mdb.c4
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hugetlbfs/inode.c4
-rw-r--r--fs/inode.c14
-rw-r--r--fs/internal.h4
-rw-r--r--fs/jbd/journal.c9
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/journal.c7
-rw-r--r--fs/lockd/clntproc.c14
-rw-r--r--fs/lockd/grace.c16
-rw-r--r--fs/lockd/host.c92
-rw-r--r--fs/lockd/netns.h7
-rw-r--r--fs/lockd/svc.c43
-rw-r--r--fs/lockd/svc4proc.c14
-rw-r--r--fs/lockd/svclock.c17
-rw-r--r--fs/lockd/svcproc.c16
-rw-r--r--fs/lockd/svcsubs.c19
-rw-r--r--fs/locks.c34
-rw-r--r--fs/logfs/dev_bdev.c15
-rw-r--r--fs/logfs/inode.c18
-rw-r--r--fs/logfs/journal.c2
-rw-r--r--fs/logfs/readwrite.c1
-rw-r--r--fs/logfs/segment.c2
-rw-r--r--fs/minix/itree_v2.c3
-rw-r--r--fs/namei.c323
-rw-r--r--fs/namespace.c97
-rw-r--r--fs/nfs/Kconfig23
-rw-r--r--fs/nfs/Makefile24
-rw-r--r--fs/nfs/blocklayout/blocklayout.c39
-rw-r--r--fs/nfs/callback.c28
-rw-r--r--fs/nfs/callback.h2
-rw-r--r--fs/nfs/callback_xdr.c4
-rw-r--r--fs/nfs/client.c874
-rw-r--r--fs/nfs/delegation.c7
-rw-r--r--fs/nfs/delegation.h21
-rw-r--r--fs/nfs/dir.c127
-rw-r--r--fs/nfs/direct.c90
-rw-r--r--fs/nfs/dns_resolve.c4
-rw-r--r--fs/nfs/file.c238
-rw-r--r--fs/nfs/getroot.c50
-rw-r--r--fs/nfs/idmap.c89
-rw-r--r--fs/nfs/inode.c110
-rw-r--r--fs/nfs/internal.h126
-rw-r--r--fs/nfs/namespace.c17
-rw-r--r--fs/nfs/netns.h2
-rw-r--r--fs/nfs/nfs.h29
-rw-r--r--fs/nfs/nfs2super.c31
-rw-r--r--fs/nfs/nfs2xdr.c28
-rw-r--r--fs/nfs/nfs3client.c65
-rw-r--r--fs/nfs/nfs3proc.c51
-rw-r--r--fs/nfs/nfs3super.c31
-rw-r--r--fs/nfs/nfs3xdr.c43
-rw-r--r--fs/nfs/nfs4_fs.h37
-rw-r--r--fs/nfs/nfs4client.c656
-rw-r--r--fs/nfs/nfs4file.c126
-rw-r--r--fs/nfs/nfs4filelayout.c10
-rw-r--r--fs/nfs/nfs4filelayoutdev.c2
-rw-r--r--fs/nfs/nfs4getroot.c49
-rw-r--r--fs/nfs/nfs4proc.c388
-rw-r--r--fs/nfs/nfs4state.c114
-rw-r--r--fs/nfs/nfs4super.c357
-rw-r--r--fs/nfs/nfs4sysctl.c68
-rw-r--r--fs/nfs/nfs4xdr.c114
-rw-r--r--fs/nfs/objlayout/objio_osd.c55
-rw-r--r--fs/nfs/pagelist.c10
-rw-r--r--fs/nfs/pnfs.c96
-rw-r--r--fs/nfs/pnfs.h33
-rw-r--r--fs/nfs/proc.c41
-rw-r--r--fs/nfs/read.c27
-rw-r--r--fs/nfs/super.c609
-rw-r--r--fs/nfs/sysctl.c26
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfs/write.c157
-rw-r--r--fs/nfsd/export.c10
-rw-r--r--fs/nfsd/netns.h4
-rw-r--r--fs/nfsd/nfs4callback.c5
-rw-r--r--fs/nfsd/nfs4idmap.c4
-rw-r--r--fs/nfsd/nfs4proc.c18
-rw-r--r--fs/nfsd/nfs4recover.c9
-rw-r--r--fs/nfsd/nfs4state.c201
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--fs/nfsd/nfsctl.c8
-rw-r--r--fs/nfsd/nfsd.h13
-rw-r--r--fs/nfsd/nfsfh.c1
-rw-r--r--fs/nfsd/nfsproc.c9
-rw-r--r--fs/nfsd/nfssvc.c24
-rw-r--r--fs/nfsd/state.h6
-rw-r--r--fs/nfsd/vfs.c89
-rw-r--r--fs/nfsd/vfs.h11
-rw-r--r--fs/nilfs2/alloc.h14
-rw-r--r--fs/nilfs2/bmap.h7
-rw-r--r--fs/nilfs2/btnode.h8
-rw-r--r--fs/nilfs2/cpfile.c10
-rw-r--r--fs/nilfs2/dat.c6
-rw-r--r--fs/nilfs2/export.h8
-rw-r--r--fs/nilfs2/file.c18
-rw-r--r--fs/nilfs2/ifile.c6
-rw-r--r--fs/nilfs2/inode.c7
-rw-r--r--fs/nilfs2/ioctl.c6
-rw-r--r--fs/nilfs2/mdt.h7
-rw-r--r--fs/nilfs2/nilfs.h17
-rw-r--r--fs/nilfs2/segment.c5
-rw-r--r--fs/nilfs2/sufile.c8
-rw-r--r--fs/nilfs2/super.c10
-rw-r--r--fs/nilfs2/the_nilfs.c1
-rw-r--r--fs/nilfs2/the_nilfs.h8
-rw-r--r--fs/ntfs/file.c3
-rw-r--r--fs/ntfs/super.c17
-rw-r--r--fs/ocfs2/file.c11
-rw-r--r--fs/ocfs2/ioctl.c14
-rw-r--r--fs/ocfs2/journal.c7
-rw-r--r--fs/ocfs2/localalloc.c8
-rw-r--r--fs/ocfs2/mmap.c2
-rw-r--r--fs/ocfs2/refcounttree.c11
-rw-r--r--fs/open.c24
-rw-r--r--fs/pipe.c77
-rw-r--r--fs/proc/base.c22
-rw-r--r--fs/qnx4/bitmap.c24
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/reiserfs/bitmap.c2
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/select.c10
-rw-r--r--fs/splice.c3
-rw-r--r--fs/super.c296
-rw-r--r--fs/sysfs/bin.c2
-rw-r--r--fs/ubifs/debug.h2
-rw-r--r--fs/ubifs/file.c10
-rw-r--r--fs/ubifs/lpt.c5
-rw-r--r--fs/ubifs/recovery.c2
-rw-r--r--fs/ubifs/replay.c3
-rw-r--r--fs/ubifs/super.c5
-rw-r--r--fs/udf/inode.c5
-rw-r--r--fs/udf/super.c7
-rw-r--r--fs/xattr.c16
-rw-r--r--fs/xfs/xfs_alloc_btree.h14
-rw-r--r--fs/xfs/xfs_aops.c97
-rw-r--r--fs/xfs/xfs_aops.h14
-rw-r--r--fs/xfs/xfs_attr.c78
-rw-r--r--fs/xfs/xfs_attr_leaf.c255
-rw-r--r--fs/xfs/xfs_attr_leaf.h21
-rw-r--r--fs/xfs/xfs_bmap.c2
-rw-r--r--fs/xfs/xfs_buf.c240
-rw-r--r--fs/xfs/xfs_buf.h116
-rw-r--r--fs/xfs/xfs_buf_item.c345
-rw-r--r--fs/xfs/xfs_buf_item.h38
-rw-r--r--fs/xfs/xfs_da_btree.c823
-rw-r--r--fs/xfs/xfs_da_btree.h38
-rw-r--r--fs/xfs/xfs_dinode.h2
-rw-r--r--fs/xfs/xfs_dir2.c4
-rw-r--r--fs/xfs/xfs_dir2_block.c118
-rw-r--r--fs/xfs/xfs_dir2_data.c50
-rw-r--r--fs/xfs/xfs_dir2_leaf.c621
-rw-r--r--fs/xfs/xfs_dir2_node.c236
-rw-r--r--fs/xfs/xfs_dir2_priv.h46
-rw-r--r--fs/xfs/xfs_dir2_sf.c4
-rw-r--r--fs/xfs/xfs_discard.c6
-rw-r--r--fs/xfs/xfs_file.c29
-rw-r--r--fs/xfs/xfs_ialloc.c447
-rw-r--r--fs/xfs/xfs_ialloc.h2
-rw-r--r--fs/xfs/xfs_iget.c15
-rw-r--r--fs/xfs/xfs_inode.c208
-rw-r--r--fs/xfs/xfs_inode.h13
-rw-r--r--fs/xfs/xfs_ioctl.c55
-rw-r--r--fs/xfs/xfs_ioctl32.c12
-rw-r--r--fs/xfs/xfs_iomap.c10
-rw-r--r--fs/xfs/xfs_iops.c45
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_log.c223
-rw-r--r--fs/xfs/xfs_log_priv.h18
-rw-r--r--fs/xfs/xfs_log_recover.c142
-rw-r--r--fs/xfs/xfs_mount.c13
-rw-r--r--fs/xfs/xfs_mount.h6
-rw-r--r--fs/xfs/xfs_qm.c2
-rw-r--r--fs/xfs/xfs_rtalloc.c2
-rw-r--r--fs/xfs/xfs_super.c88
-rw-r--r--fs/xfs/xfs_sync.c15
-rw-r--r--fs/xfs/xfs_trace.h2
-rw-r--r--fs/xfs/xfs_trans.c17
-rw-r--r--fs/xfs/xfs_trans.h52
-rw-r--r--fs/xfs/xfs_trans_ail.c35
-rw-r--r--fs/xfs/xfs_trans_buf.c68
-rw-r--r--fs/xfs/xfs_trans_priv.h1
-rw-r--r--fs/xfs/xfs_types.h14
-rw-r--r--fs/xfs/xfs_utils.c17
-rw-r--r--fs/xfs/xfs_vnodeops.c285
-rw-r--r--include/acpi/acexcep.h7
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h6
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpi_bus.h12
-rw-r--r--include/acpi/acpiosxf.h4
-rw-r--r--include/acpi/acpixf.h29
-rw-r--r--include/acpi/acrestyp.h6
-rw-r--r--include/acpi/actbl.h10
-rw-r--r--include/acpi/actbl1.h10
-rw-r--r--include/acpi/actbl2.h10
-rw-r--r--include/acpi/actbl3.h2
-rw-r--r--include/acpi/actypes.h15
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/asm-generic/dma-coherent.h1
-rw-r--r--include/asm-generic/dma-mapping-common.h55
-rw-r--r--include/asm-generic/fcntl.h4
-rw-r--r--include/asm-generic/kmap_types.h34
-rw-r--r--include/asm-generic/mutex-xchg.h11
-rw-r--r--include/asm-generic/sizes.h49
-rw-r--r--include/drm/drmP.h19
-rw-r--r--include/drm/drm_crtc.h7
-rw-r--r--include/drm/drm_crtc_helper.h4
-rw-r--r--include/drm/drm_encoder_slave.h2
-rw-r--r--include/drm/drm_mm.h93
-rw-r--r--include/drm/drm_mode.h5
-rw-r--r--include/drm/drm_pciids.h45
-rw-r--r--include/drm/exynos_drm.h9
-rw-r--r--include/drm/i915_drm.h34
-rw-r--r--include/drm/intel-gtt.h8
-rw-r--r--include/drm/nouveau_drm.h94
-rw-r--r--include/drm/radeon_drm.h2
-rw-r--r--include/drm/sis_drm.h8
-rw-r--r--include/drm/ttm/ttm_bo_driver.h3
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/acpi.h52
-rw-r--r--include/linux/aio.h38
-rw-r--r--include/linux/amba/pl08x.h156
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/backing-dev.h8
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h6
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h75
-rw-r--r--include/linux/blkpg.h1
-rw-r--r--include/linux/bsg-lib.h1
-rw-r--r--include/linux/can.h25
-rw-r--r--include/linux/ceph/ceph_features.h27
-rw-r--r--include/linux/ceph/ceph_fs.h14
-rw-r--r--include/linux/ceph/decode.h49
-rw-r--r--include/linux/ceph/libceph.h10
-rw-r--r--include/linux/ceph/messenger.h60
-rw-r--r--include/linux/ceph/mon_client.h2
-rw-r--r--include/linux/ceph/msgpool.h3
-rw-r--r--include/linux/cgroup_subsys.h8
-rw-r--r--include/linux/clk.h168
-rw-r--r--include/linux/compaction.h8
-rw-r--r--include/linux/compat.h4
-rw-r--r--include/linux/cpuidle.h15
-rw-r--r--include/linux/cpumask.h15
-rw-r--r--include/linux/crush/crush.h8
-rw-r--r--include/linux/device-mapper.h29
-rw-r--r--include/linux/dm-ioctl.h6
-rw-r--r--include/linux/dma-attrs.h2
-rw-r--r--include/linux/dma-mapping.h3
-rw-r--r--include/linux/edac.h208
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/firewire.h4
-rw-r--r--include/linux/flex_proportions.h101
-rw-r--r--include/linux/fs.h171
-rw-r--r--include/linux/ftrace_event.h5
-rw-r--r--include/linux/fuse.h19
-rw-r--r--include/linux/genhd.h57
-rw-r--r--include/linux/gfp.h13
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/highmem.h48
-rw-r--r--include/linux/hugetlb.h50
-rw-r--r--include/linux/hugetlb_cgroup.h126
-rw-r--r--include/linux/i2c-ocores.h3
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/i2c/pca953x.h2
-rw-r--r--include/linux/i2c/twl.h2
-rw-r--r--include/linux/if_team.h30
-rw-r--r--include/linux/iio/frequency/adf4350.h2
-rw-r--r--include/linux/init.h3
-rw-r--r--include/linux/input/edt-ft5x06.h24
-rw-r--r--include/linux/input/eeti_ts.h1
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/irqdomain.h28
-rw-r--r--include/linux/jbd2.h1
-rw-r--r--include/linux/jiffies.h29
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/kern_levels.h25
-rw-r--r--include/linux/kernel.h12
-rw-r--r--include/linux/key-type.h1
-rw-r--r--include/linux/kref.h18
-rw-r--r--include/linux/ktime.h7
-rw-r--r--include/linux/leds.h29
-rw-r--r--include/linux/libfdt.h8
-rw-r--r--include/linux/libfdt_env.h13
-rw-r--r--include/linux/lockd/lockd.h6
-rw-r--r--include/linux/memcontrol.h34
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mempool.h3
-rw-r--r--include/linux/mfd/88pm80x.h369
-rw-r--r--include/linux/mfd/88pm860x.h1
-rw-r--r--include/linux/mfd/abx500/ab8500.h5
-rw-r--r--include/linux/mfd/arizona/core.h114
-rw-r--r--include/linux/mfd/arizona/pdata.h119
-rw-r--r--include/linux/mfd/arizona/registers.h6594
-rw-r--r--include/linux/mfd/core.h5
-rw-r--r--include/linux/mfd/db8500-prcmu.h7
-rw-r--r--include/linux/mfd/dbx500-prcmu.h7
-rw-r--r--include/linux/mfd/ezx-pcap.h1
-rw-r--r--include/linux/mfd/max77686-private.h246
-rw-r--r--include/linux/mfd/max77686.h114
-rw-r--r--include/linux/mfd/max77693-private.h1
-rw-r--r--include/linux/mfd/max8997-private.h4
-rw-r--r--include/linux/mfd/max8997.h1
-rw-r--r--include/linux/mfd/s5m87xx/s5m-core.h379
-rw-r--r--include/linux/mfd/s5m87xx/s5m-pmic.h129
-rw-r--r--include/linux/mfd/samsung/core.h159
-rw-r--r--include/linux/mfd/samsung/irq.h152
-rw-r--r--include/linux/mfd/samsung/rtc.h (renamed from include/linux/mfd/s5m87xx/s5m-rtc.h)69
-rw-r--r--include/linux/mfd/samsung/s2mps11.h196
-rw-r--r--include/linux/mfd/samsung/s5m8763.h96
-rw-r--r--include/linux/mfd/samsung/s5m8767.h188
-rw-r--r--include/linux/mfd/tps65910.h1
-rw-r--r--include/linux/mfd/twl6040.h6
-rw-r--r--include/linux/mfd/wm8350/core.h26
-rw-r--r--include/linux/mfd/wm8994/pdata.h1
-rw-r--r--include/linux/migrate.h4
-rw-r--r--include/linux/mm.h32
-rw-r--r--include/linux/mm_types.h20
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmzone.h26
-rw-r--r--include/linux/mv643xx_eth.h2
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h2
-rw-r--r--include/linux/netpoll.h42
-rw-r--r--include/linux/nfs_fs.h18
-rw-r--r--include/linux/nfs_fs_sb.h10
-rw-r--r--include/linux/nfs_idmap.h2
-rw-r--r--include/linux/nfs_page.h3
-rw-r--r--include/linux/nfs_xdr.h26
-rw-r--r--include/linux/nfsd/nfsfh.h1
-rw-r--r--include/linux/nilfs2_fs.h63
-rw-r--r--include/linux/of.h38
-rw-r--r--include/linux/olpc-ec.h41
-rw-r--r--include/linux/omap-dma.h22
-rw-r--r--include/linux/oom.h21
-rw-r--r--include/linux/page-flags.h29
-rw-r--r--include/linux/page-isolation.h13
-rw-r--r--include/linux/page_cgroup.h10
-rw-r--r--include/linux/pagemap.h5
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pci_regs.h5
-rw-r--r--include/linux/perf_event.h3
-rw-r--r--include/linux/pinctrl/consumer.h1
-rw-r--r--include/linux/pipe_fs_i.h10
-rw-r--r--include/linux/platform_data/i2c-nomadik.h (renamed from arch/arm/plat-nomadik/include/plat/i2c.h)6
-rw-r--r--include/linux/platform_data/leds-lm3556.h50
-rw-r--r--include/linux/platform_data/lp855x.h (renamed from include/linux/lp855x.h)6
-rw-r--r--include/linux/platform_data/lp8727.h (renamed from include/linux/lp8727.h)0
-rw-r--r--include/linux/platform_data/mv_usb.h9
-rw-r--r--include/linux/platform_data/spear_thermal.h26
-rw-r--r--include/linux/posix_types.h18
-rw-r--r--include/linux/power/charger-manager.h67
-rw-r--r--include/linux/power_supply.h13
-rw-r--r--include/linux/printk.h41
-rw-r--r--include/linux/pwm.h117
-rw-r--r--include/linux/pwm_backlight.h1
-rw-r--r--include/linux/random.h6
-rw-r--r--include/linux/remoteproc.h20
-rw-r--r--include/linux/scatterlist.h4
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/shdma-base.h2
-rw-r--r--include/linux/shm.h6
-rw-r--r--include/linux/shrinker.h1
-rw-r--r--include/linux/sizes.h47
-rw-r--r--include/linux/skbuff.h80
-rw-r--r--include/linux/slab.h24
-rw-r--r--include/linux/slab_def.h12
-rw-r--r--include/linux/slub_def.h3
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/sunrpc/auth.h2
-rw-r--r--include/linux/sunrpc/cache.h34
-rw-r--r--include/linux/sunrpc/gss_api.h3
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/xdr.h6
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/swap.h14
-rw-r--r--include/linux/thermal.h20
-rw-r--r--include/linux/time.h37
-rw-r--r--include/linux/timex.h2
-rw-r--r--include/linux/topology.h1
-rw-r--r--include/linux/uvcvideo.h3
-rw-r--r--include/linux/v4l2-common.h71
-rw-r--r--include/linux/v4l2-subdev.h20
-rw-r--r--include/linux/vfio.h445
-rw-r--r--include/linux/videodev2.h79
-rw-r--r--include/linux/virtio_blk.h10
-rw-r--r--include/linux/virtio_ids.h2
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/vmalloc.h9
-rw-r--r--include/linux/vmstat.h5
-rw-r--r--include/linux/writeback.h6
-rw-r--r--include/media/adv7393.h28
-rw-r--r--include/media/davinci/vpif_types.h2
-rw-r--r--include/media/gpio-ir-recv.h6
-rw-r--r--include/media/mt9t001.h1
-rw-r--r--include/media/v4l2-chip-ident.h3
-rw-r--r--include/media/v4l2-dev.h3
-rw-r--r--include/media/v4l2-ioctl.h27
-rw-r--r--include/media/videobuf-core.h2
-rw-r--r--include/media/videobuf2-core.h54
-rw-r--r--include/media/videobuf2-dma-contig.h6
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/codel.h8
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/inet6_hashtables.h13
-rw-r--r--include/net/inet_connection_sock.h1
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ip_fib.h6
-rw-r--r--include/net/llc.h2
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h1
-rw-r--r--include/net/netns/ipv4.h2
-rw-r--r--include/net/protocol.h2
-rw-r--r--include/net/route.h22
-rw-r--r--include/net/scm.h4
-rw-r--r--include/net/sock.h42
-rw-r--r--include/net/tcp.h1
-rw-r--r--include/net/xfrm.h6
-rw-r--r--include/ras/ras_event.h102
-rw-r--r--include/sound/es1688.h1
-rw-r--r--include/sound/pcm.h3
-rw-r--r--include/sound/tea575x-tuner.h5
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/trace/events/gfpflags.h1
-rw-r--r--include/trace/events/random.h134
-rw-r--r--include/trace/events/sched.h4
-rw-r--r--include/trace/ftrace.h6
-rw-r--r--include/video/da8xx-fb.h3
-rw-r--r--include/video/omapdss.h51
-rw-r--r--include/video/sh_mobile_lcdc.h7
-rw-r--r--include/video/sh_mobile_meram.h71
-rw-r--r--init/Kconfig31
-rw-r--r--init/main.c11
-rw-r--r--ipc/compat.c18
-rw-r--r--ipc/mqueue.c61
-rw-r--r--ipc/shm.c9
-rw-r--r--ipc/syscall.c2
-rw-r--r--ipc/util.c4
-rw-r--r--ipc/util.h2
-rw-r--r--kernel/audit.c21
-rw-r--r--kernel/audit_tree.c19
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/debug/kdb/kdb_debugger.c4
-rw-r--r--kernel/debug/kdb/kdb_io.c11
-rw-r--r--kernel/debug/kdb/kdb_main.c15
-rw-r--r--kernel/events/callchain.c9
-rw-r--r--kernel/events/core.c30
-rw-r--r--kernel/events/internal.h3
-rw-r--r--kernel/events/uprobes.c213
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c50
-rw-r--r--kernel/futex.c17
-rw-r--r--kernel/irq/handle.c7
-rw-r--r--kernel/irq/irqdomain.c362
-rw-r--r--kernel/irq/manage.c32
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kmod.c37
-rw-r--r--kernel/panic.c8
-rw-r--r--kernel/printk.c34
-rw-r--r--kernel/resource.c24
-rw-r--r--kernel/sched/core.c41
-rw-r--r--kernel/sched/cpupri.c10
-rw-r--r--kernel/sched/fair.c40
-rw-r--r--kernel/sched/rt.c13
-rw-r--r--kernel/sched/sched.h8
-rw-r--r--kernel/sched/stop_task.c22
-rw-r--r--kernel/softirq.c9
-rw-r--r--kernel/sys.c57
-rw-r--r--kernel/sysctl.c69
-rw-r--r--kernel/sysctl_binary.c2
-rw-r--r--kernel/task_work.c1
-rw-r--r--kernel/taskstats.c5
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/timekeeping.c442
-rw-r--r--kernel/timer.c9
-rw-r--r--kernel/trace/trace_event_perf.c2
-rw-r--r--kernel/trace/trace_kprobe.c6
-rw-r--r--kernel/trace/trace_syscalls.c8
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug91
-rw-r--r--lib/Makefile14
-rw-r--r--lib/atomic64_test.c5
-rw-r--r--lib/cpu-notifier-error-inject.c63
-rw-r--r--lib/crc32.c2
-rw-r--r--lib/fdt.c2
-rw-r--r--lib/fdt_ro.c2
-rw-r--r--lib/fdt_rw.c2
-rw-r--r--lib/fdt_strerror.c2
-rw-r--r--lib/fdt_sw.c2
-rw-r--r--lib/fdt_wip.c2
-rw-r--r--lib/flex_proportions.c272
-rw-r--r--lib/memory-notifier-error-inject.c48
-rw-r--r--lib/memweight.c38
-rw-r--r--lib/notifier-error-inject.c112
-rw-r--r--lib/notifier-error-inject.h24
-rw-r--r--lib/pSeries-reconfig-notifier-error-inject.c51
-rw-r--r--lib/percpu_counter.c14
-rw-r--r--lib/pm-notifier-error-inject.c49
-rw-r--r--lib/scatterlist.c72
-rw-r--r--lib/spinlock_debug.c2
-rw-r--r--lib/vsprintf.c83
-rw-r--r--mm/Kconfig5
-rw-r--r--mm/Makefile7
-rw-r--r--mm/backing-dev.c78
-rw-r--r--mm/compaction.c167
-rw-r--r--mm/fadvise.c18
-rw-r--r--mm/filemap.c38
-rw-r--r--mm/filemap_xip.c6
-rw-r--r--mm/highmem.c12
-rw-r--r--mm/hugetlb.c195
-rw-r--r--mm/hugetlb_cgroup.c418
-rw-r--r--mm/hwpoison-inject.c2
-rw-r--r--mm/internal.h9
-rw-r--r--mm/memblock.c35
-rw-r--r--mm/memcontrol.c390
-rw-r--r--mm/memory-failure.c21
-rw-r--r--mm/memory.c23
-rw-r--r--mm/memory_hotplug.c20
-rw-r--r--mm/mempolicy.c10
-rw-r--r--mm/mempool.c12
-rw-r--r--mm/migrate.c81
-rw-r--r--mm/mmap.c18
-rw-r--r--mm/mmu_notifier.c45
-rw-r--r--mm/mmzone.c2
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/oom_kill.c223
-rw-r--r--mm/page-writeback.c108
-rw-r--r--mm/page_alloc.c336
-rw-r--r--mm/page_cgroup.c2
-rw-r--r--mm/page_io.c145
-rw-r--r--mm/page_isolation.c93
-rw-r--r--mm/shmem.c6
-rw-r--r--mm/slab.c623
-rw-r--r--mm/slab.h33
-rw-r--r--mm/slab_common.c120
-rw-r--r--mm/slob.c152
-rw-r--r--mm/slub.c464
-rw-r--r--mm/sparse.c29
-rw-r--r--mm/swap.c52
-rw-r--r--mm/swap_state.c7
-rw-r--r--mm/swapfile.c145
-rw-r--r--mm/vmalloc.c52
-rw-r--r--mm/vmscan.c175
-rw-r--r--mm/vmstat.c1
-rw-r--r--net/8021q/vlan_dev.c52
-rw-r--r--net/atm/common.c1
-rw-r--r--net/atm/pvc.c1
-rw-r--r--net/batman-adv/gateway_client.c6
-rw-r--r--net/batman-adv/translation-table.c1
-rw-r--r--net/bluetooth/hci_event.c28
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/l2cap_core.c1
-rw-r--r--net/bluetooth/l2cap_sock.c3
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bluetooth/sco.c19
-rw-r--r--net/bluetooth/smp.c5
-rw-r--r--net/bridge/br_device.c30
-rw-r--r--net/bridge/br_forward.c2
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/bridge/br_sysfs_if.c6
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/caif/chnl_net.c4
-rw-r--r--net/ceph/ceph_common.c26
-rw-r--r--net/ceph/crush/mapper.c13
-rw-r--r--net/ceph/crypto.c1
-rw-r--r--net/ceph/crypto.h3
-rw-r--r--net/ceph/debugfs.c4
-rw-r--r--net/ceph/messenger.c932
-rw-r--r--net/ceph/mon_client.c127
-rw-r--r--net/ceph/msgpool.c7
-rw-r--r--net/ceph/osd_client.c77
-rw-r--r--net/ceph/osdmap.c59
-rw-r--r--net/core/dev.c84
-rw-r--r--net/core/dst.c10
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/netpoll.c99
-rw-r--r--net/core/netprio_cgroup.c30
-rw-r--r--net/core/rtnetlink.c17
-rw-r--r--net/core/scm.c4
-rw-r--r--net/core/skbuff.c124
-rw-r--r--net/core/sock.c60
-rw-r--r--net/dccp/ccid.h4
-rw-r--r--net/dccp/ccids/ccid3.c1
-rw-r--r--net/ipv4/Makefile2
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/fib_semantics.c42
-rw-r--r--net/ipv4/fib_trie.c55
-rw-r--r--net/ipv4/inet_connection_sock.c7
-rw-r--r--net/ipv4/ip_fragment.c4
-rw-r--r--net/ipv4/ip_input.c12
-rw-r--r--net/ipv4/ip_output.c10
-rw-r--r--net/ipv4/ipmr.c14
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c14
-rw-r--r--net/ipv4/route.c242
-rw-r--r--net/ipv4/sysctl_net_ipv4.c15
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_cong.c3
-rw-r--r--net/ipv4/tcp_input.c44
-rw-r--r--net/ipv4/tcp_ipv4.c48
-rw-r--r--net/ipv4/tcp_metrics.c12
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c49
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_input.c4
-rw-r--r--net/ipv4/xfrm4_policy.c1
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/esp6.c6
-rw-r--r--net/ipv6/ip6_input.c11
-rw-r--r--net/ipv6/proc.c4
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/ipv6/tcp_ipv6.c73
-rw-r--r--net/ipv6/xfrm6_policy.c8
-rw-r--r--net/l2tp/l2tp_core.c3
-rw-r--r--net/l2tp/l2tp_core.h1
-rw-r--r--net/l2tp/l2tp_ip6.c1
-rw-r--r--net/llc/af_llc.c8
-rw-r--r--net/llc/llc_input.c21
-rw-r--r--net/llc/llc_station.c29
-rw-r--r--net/mac80211/led.c2
-rw-r--r--net/mac80211/mesh.c3
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/scan.c3
-rw-r--r--net/mac80211/tx.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c5
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_conntrack_expect.c29
-rw-r--r--net/netfilter/nf_conntrack_netlink.c10
-rw-r--r--net/netfilter/nf_conntrack_sip.c92
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netlink/af_netlink.c6
-rw-r--r--net/openvswitch/actions.c3
-rw-r--r--net/packet/af_packet.c12
-rw-r--r--net/sched/act_gact.c14
-rw-r--r--net/sched/act_ipt.c7
-rw-r--r--net/sched/act_mirred.c11
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/act_simple.c5
-rw-r--r--net/sched/sch_qfq.c95
-rw-r--r--net/sctp/ulpevent.c3
-rw-r--r--net/socket.c5
-rw-r--r--net/sunrpc/Kconfig5
-rw-r--r--net/sunrpc/auth.c54
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c1
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c20
-rw-r--r--net/sunrpc/cache.c5
-rw-r--r--net/sunrpc/clnt.c12
-rw-r--r--net/sunrpc/rpcb_clnt.c4
-rw-r--r--net/sunrpc/sched.c14
-rw-r--r--net/sunrpc/svc_xprt.c10
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/sunrpc/xdr.c127
-rw-r--r--net/sunrpc/xprtrdma/transport.c3
-rw-r--r--net/sunrpc/xprtsock.c53
-rw-r--r--net/unix/af_unix.c97
-rw-r--r--net/wanrouter/wanmain.c51
-rw-r--r--net/wireless/core.c5
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/reg.c19
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c25
-rw-r--r--scripts/Makefile.fwinst2
-rwxr-xr-xscripts/checkpatch.pl72
-rw-r--r--scripts/coccinelle/iterators/use_after_iter.cocci147
-rw-r--r--scripts/coccinelle/misc/irqf_oneshot.cocci65
-rwxr-xr-xscripts/config62
-rwxr-xr-xscripts/decodecode2
-rw-r--r--scripts/kconfig/.gitignore1
-rw-r--r--scripts/kconfig/Makefile41
-rw-r--r--scripts/kconfig/confdata.c61
-rw-r--r--scripts/kconfig/lxdialog/check-lxdialog.sh8
-rw-r--r--scripts/kconfig/lxdialog/textbox.c3
-rw-r--r--scripts/kconfig/mconf.c6
-rw-r--r--scripts/kconfig/nconf.c10
-rw-r--r--scripts/kconfig/nconf.gui.c8
-rw-r--r--scripts/kconfig/streamline_config.pl175
-rwxr-xr-xscripts/kernel-doc1
-rw-r--r--scripts/link-vmlinux.sh4
-rw-r--r--scripts/package/builddeb7
-rw-r--r--scripts/sortextable.c1
-rwxr-xr-xscripts/tags.sh6
-rw-r--r--security/selinux/avc.c2
-rw-r--r--security/selinux/hooks.c18
-rw-r--r--security/smack/smackfs.c8
-rw-r--r--security/yama/yama_lsm.c43
-rw-r--r--sound/arm/pxa2xx-ac97.c4
-rw-r--r--sound/atmel/abdac.c3
-rw-r--r--sound/atmel/ac97c.c14
-rw-r--r--sound/core/misc.c13
-rw-r--r--sound/core/sgbuf.c2
-rw-r--r--sound/drivers/aloop.c2
-rw-r--r--sound/drivers/dummy.c2
-rw-r--r--sound/drivers/mpu401/mpu401_uart.c1
-rw-r--r--sound/drivers/pcsp/pcsp.c4
-rw-r--r--sound/i2c/other/tea575x-tuner.c45
-rw-r--r--sound/isa/als100.c2
-rw-r--r--sound/isa/es1688/es1688_lib.c34
-rw-r--r--sound/oss/sb_audio.c4
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c2
-rw-r--r--sound/pci/ctxfi/ctatc.c4
-rw-r--r--sound/pci/emu10k1/memory.c5
-rw-r--r--sound/pci/hda/hda_auto_parser.c5
-rw-r--r--sound/pci/hda/hda_beep.c29
-rw-r--r--sound/pci/hda/hda_codec.c83
-rw-r--r--sound/pci/hda/hda_codec.h2
-rw-r--r--sound/pci/hda/hda_intel.c9
-rw-r--r--sound/pci/hda/hda_proc.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c174
-rw-r--r--sound/pci/hda/patch_conexant.c6
-rw-r--r--sound/pci/hda/patch_hdmi.c12
-rw-r--r--sound/pci/hda/patch_realtek.c30
-rw-r--r--sound/pci/hda/patch_sigmatel.c35
-rw-r--r--sound/pci/hda/patch_via.c15
-rw-r--r--sound/pci/lx6464es/lx6464es.c2
-rw-r--r--sound/pci/rme9652/hdspm.c2
-rw-r--r--sound/pci/sis7019.c5
-rw-r--r--sound/ppc/powermac.c2
-rw-r--r--sound/ppc/snd_ps3.c1
-rw-r--r--sound/soc/blackfin/bf6xx-sport.c7
-rw-r--r--sound/soc/codecs/ab8500-codec.c4
-rw-r--r--sound/soc/codecs/ad1980.c1
-rw-r--r--sound/soc/codecs/mc13783.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c3
-rw-r--r--sound/soc/codecs/stac9766.c1
-rw-r--r--sound/soc/codecs/twl6040.c2
-rw-r--r--sound/soc/codecs/wm5102.c25
-rw-r--r--sound/soc/codecs/wm5110.c12
-rw-r--r--sound/soc/codecs/wm8962.c18
-rw-r--r--sound/soc/codecs/wm8994.c17
-rw-r--r--sound/soc/codecs/wm9712.c22
-rw-r--r--sound/soc/codecs/wm9713.c1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c10
-rw-r--r--sound/soc/fsl/imx-ssi.c5
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c8
-rw-r--r--sound/soc/mxs/Kconfig2
-rw-r--r--sound/soc/mxs/mxs-saif.c24
-rw-r--r--sound/soc/omap/mcbsp.c2
-rw-r--r--sound/soc/omap/omap-mcbsp.c1
-rw-r--r--sound/soc/omap/omap-pcm.c1
-rw-r--r--sound/soc/samsung/pcm.c2
-rw-r--r--sound/soc/soc-core.c12
-rw-r--r--sound/soc/soc-jack.c2
-rw-r--r--sound/soc/tegra/tegra_alc5632.c2
-rw-r--r--sound/soc/tegra/tegra_wm8903.c10
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c2
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.c2
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.h2
-rw-r--r--sound/sound_firmware.c8
-rw-r--r--sound/usb/card.c4
-rw-r--r--sound/usb/clock.c3
-rw-r--r--sound/usb/endpoint.c24
-rw-r--r--sound/usb/endpoint.h3
-rw-r--r--sound/usb/pcm.c61
-rw-r--r--tools/lib/traceevent/.gitignore1
-rw-r--r--tools/lib/traceevent/Makefile14
-rw-r--r--tools/perf/Makefile11
-rw-r--r--tools/perf/builtin-record.c4
-rw-r--r--tools/perf/builtin-report.c5
-rw-r--r--tools/perf/builtin-test.c23
-rw-r--r--tools/perf/builtin-top.c25
-rw-r--r--tools/perf/ui/browsers/hists.c4
-rw-r--r--tools/perf/util/annotate.c15
-rw-r--r--tools/perf/util/dso-test-data.c153
-rw-r--r--tools/perf/util/event.h3
-rw-r--r--tools/perf/util/evlist.c9
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c15
-rw-r--r--tools/perf/util/evsel.h10
-rw-r--r--tools/perf/util/header.c15
-rw-r--r--tools/perf/util/hist.c7
-rw-r--r--tools/perf/util/intlist.c101
-rw-r--r--tools/perf/util/intlist.h75
-rw-r--r--tools/perf/util/map.c41
-rw-r--r--tools/perf/util/map.h1
-rw-r--r--tools/perf/util/parse-events-test.c12
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/parse-options.c3
-rw-r--r--tools/perf/util/python-ext-sources2
-rw-r--r--tools/perf/util/python.c6
-rw-r--r--tools/perf/util/rblist.c107
-rw-r--r--tools/perf/util/rblist.h47
-rw-r--r--tools/perf/util/session.c51
-rw-r--r--tools/perf/util/session.h24
-rw-r--r--tools/perf/util/strlist.c130
-rw-r--r--tools/perf/util/strlist.h11
-rw-r--r--tools/perf/util/symbol.c457
-rw-r--r--tools/perf/util/symbol.h54
-rw-r--r--tools/perf/util/target.c11
-rw-r--r--tools/power/x86/turbostat/Makefile1
-rw-r--r--tools/power/x86/turbostat/turbostat.877
-rw-r--r--tools/power/x86/turbostat/turbostat.c1333
-rw-r--r--tools/testing/fault-injection/failcmd.sh219
-rwxr-xr-xtools/testing/ktest/ktest.pl167
-rw-r--r--tools/testing/ktest/sample.conf52
-rw-r--r--tools/testing/selftests/Makefile2
-rw-r--r--tools/testing/selftests/cpu-hotplug/Makefile6
-rw-r--r--tools/testing/selftests/cpu-hotplug/on-off-test.sh221
-rw-r--r--tools/testing/selftests/memory-hotplug/Makefile6
-rw-r--r--tools/testing/selftests/memory-hotplug/on-off-test.sh230
-rw-r--r--tools/vm/slabinfo.c14
-rw-r--r--virt/kvm/kvm_main.c7
3351 files changed, 141197 insertions, 63355 deletions
diff --git a/Documentation/ABI/obsolete/proc-sys-vm-nr_pdflush_threads b/Documentation/ABI/obsolete/proc-sys-vm-nr_pdflush_threads
new file mode 100644
index 000000000000..b0b0eeb20fe3
--- /dev/null
+++ b/Documentation/ABI/obsolete/proc-sys-vm-nr_pdflush_threads
@@ -0,0 +1,5 @@
+What: /proc/sys/vm/nr_pdflush_threads
+Date: June 2012
+Contact: Wanpeng Li <liwp@linux.vnet.ibm.com>
+Description: Since pdflush is replaced by per-BDI flusher, the interface of old pdflush
+ exported in /proc/sys/vm/ should be removed.
diff --git a/Documentation/ABI/stable/sysfs-bus-firewire b/Documentation/ABI/stable/sysfs-bus-firewire
index 3d484e5dc846..41e5a0cd1e3e 100644
--- a/Documentation/ABI/stable/sysfs-bus-firewire
+++ b/Documentation/ABI/stable/sysfs-bus-firewire
@@ -39,6 +39,17 @@ Users: udev rules to set ownership and access permissions or ACLs of
/dev/fw[0-9]+ character device files
+What: /sys/bus/firewire/devices/fw[0-9]+/is_local
+Date: July 2012
+KernelVersion: 3.6
+Contact: linux1394-devel@lists.sourceforge.net
+Description:
+ IEEE 1394 node device attribute.
+ Read-only and immutable.
+Values: 1: The sysfs entry represents a local node (a controller card).
+ 0: The sysfs entry represents a remote node.
+
+
What: /sys/bus/firewire/devices/fw[0-9]+[.][0-9]+/
Date: May 2007
KernelVersion: 2.6.22
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 34f51100f029..dff1f48d252d 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -210,3 +210,15 @@ Users:
firmware assigned instance number of the PCI
device that can help in understanding the firmware
intended order of the PCI device.
+
+What: /sys/bus/pci/devices/.../d3cold_allowed
+Date: July 2012
+Contact: Huang Ying <ying.huang@intel.com>
+Description:
+ d3cold_allowed is bit to control whether the corresponding PCI
+ device can be put into D3Cold state. If it is cleared, the
+ device will never be put into D3Cold state. If it is set, the
+ device may be put into D3Cold state if other requirements are
+ satisfied too. Reading this attribute will show the current
+ value of d3cold_allowed bit. Writing this attribute will set
+ the value of d3cold_allowed bit.
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
index bcd88eb7ebcd..3c17b62899f6 100644
--- a/Documentation/ABI/testing/sysfs-bus-rbd
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -35,8 +35,14 @@ name
pool
- The pool where this rbd image resides. The pool-name pair is unique
- per rados system.
+ The name of the storage pool where this rbd image resides.
+ An rbd image name is unique within its pool.
+
+pool_id
+
+ The unique identifier for the rbd image's pool. This is
+ a permanent attribute of the pool. A pool's id will never
+ change.
size
diff --git a/Documentation/ABI/testing/sysfs-devices-edac b/Documentation/ABI/testing/sysfs-devices-edac
new file mode 100644
index 000000000000..30ee78aaed75
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-edac
@@ -0,0 +1,140 @@
+What: /sys/devices/system/edac/mc/mc*/reset_counters
+Date: January 2006
+Contact: linux-edac@vger.kernel.org
+Description: This write-only control file will zero all the statistical
+ counters for UE and CE errors on the given memory controller.
+ Zeroing the counters will also reset the timer indicating how
+ long since the last counter were reset. This is useful for
+ computing errors/time. Since the counters are always reset
+ at driver initialization time, no module/kernel parameter
+ is available.
+
+What: /sys/devices/system/edac/mc/mc*/seconds_since_reset
+Date: January 2006
+Contact: linux-edac@vger.kernel.org
+Description: This attribute file displays how many seconds have elapsed
+ since the last counter reset. This can be used with the error
+ counters to measure error rates.
+
+What: /sys/devices/system/edac/mc/mc*/mc_name
+Date: January 2006
+Contact: linux-edac@vger.kernel.org
+Description: This attribute file displays the type of memory controller
+ that is being utilized.
+
+What: /sys/devices/system/edac/mc/mc*/size_mb
+Date: January 2006
+Contact: linux-edac@vger.kernel.org
+Description: This attribute file displays, in count of megabytes, of memory
+ that this memory controller manages.
+
+What: /sys/devices/system/edac/mc/mc*/ue_count
+Date: January 2006
+Contact: linux-edac@vger.kernel.org
+Description: This attribute file displays the total count of uncorrectable
+ errors that have occurred on this memory controller. If
+ panic_on_ue is set, this counter will not have a chance to
+ increment, since EDAC will panic the system
+
+What: /sys/devices/system/edac/mc/mc*/ue_noinfo_count
+Date: January 2006
+Contact: linux-edac@vger.kernel.org
+Description: This attribute file displays the number of UEs that have
+ occurred on this memory controller with no information as to
+ which DIMM slot is having errors.
+
+What: /sys/devices/system/edac/mc/mc*/ce_count
+Date: January 2006
+Contact: linux-edac@vger.kernel.org
+Description: This attribute file displays the total count of correctable
+ errors that have occurred on this memory controller. This
+ count is very important to examine. CEs provide early
+ indications that a DIMM is beginning to fail. This count
+ field should be monitored for non-zero values and report
+ such information to the system administrator.
+
+What: /sys/devices/system/edac/mc/mc*/ce_noinfo_count
+Date: January 2006
+Contact: linux-edac@vger.kernel.org
+Description: This attribute file displays the number of CEs that
+ have occurred on this memory controller wherewith no
+ information as to which DIMM slot is having errors. Memory is
+ handicapped, but operational, yet no information is available
+ to indicate which slot the failing memory is in. This count
+ field should be also be monitored for non-zero values.
+
+What: /sys/devices/system/edac/mc/mc*/sdram_scrub_rate
+Date: February 2007
+Contact: linux-edac@vger.kernel.org
+Description: Read/Write attribute file that controls memory scrubbing.
+ The scrubbing rate used by the memory controller is set by
+ writing a minimum bandwidth in bytes/sec to the attribute file.
+ The rate will be translated to an internal value that gives at
+ least the specified rate.
+ Reading the file will return the actual scrubbing rate employed.
+ If configuration fails or memory scrubbing is not implemented,
+ the value of the attribute file will be -1.
+
+What: /sys/devices/system/edac/mc/mc*/max_location
+Date: April 2012
+Contact: Mauro Carvalho Chehab <mchehab@redhat.com>
+ linux-edac@vger.kernel.org
+Description: This attribute file displays the information about the last
+ available memory slot in this memory controller. It is used by
+ userspace tools in order to display the memory filling layout.
+
+What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/size
+Date: April 2012
+Contact: Mauro Carvalho Chehab <mchehab@redhat.com>
+ linux-edac@vger.kernel.org
+Description: This attribute file will display the size of dimm or rank.
+ For dimm*/size, this is the size, in MB of the DIMM memory
+ stick. For rank*/size, this is the size, in MB for one rank
+ of the DIMM memory stick. On single rank memories (1R), this
+ is also the total size of the dimm. On dual rank (2R) memories,
+ this is half the size of the total DIMM memories.
+
+What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_dev_type
+Date: April 2012
+Contact: Mauro Carvalho Chehab <mchehab@redhat.com>
+ linux-edac@vger.kernel.org
+Description: This attribute file will display what type of DRAM device is
+ being utilized on this DIMM (x1, x2, x4, x8, ...).
+
+What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_edac_mode
+Date: April 2012
+Contact: Mauro Carvalho Chehab <mchehab@redhat.com>
+ linux-edac@vger.kernel.org
+Description: This attribute file will display what type of Error detection
+ and correction is being utilized. For example: S4ECD4ED would
+ mean a Chipkill with x4 DRAM.
+
+What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_label
+Date: April 2012
+Contact: Mauro Carvalho Chehab <mchehab@redhat.com>
+ linux-edac@vger.kernel.org
+Description: This control file allows this DIMM to have a label assigned
+ to it. With this label in the module, when errors occur
+ the output can provide the DIMM label in the system log.
+ This becomes vital for panic events to isolate the
+ cause of the UE event.
+ DIMM Labels must be assigned after booting, with information
+ that correctly identifies the physical slot with its
+ silk screen label. This information is currently very
+ motherboard specific and determination of this information
+ must occur in userland at this time.
+
+What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_location
+Date: April 2012
+Contact: Mauro Carvalho Chehab <mchehab@redhat.com>
+ linux-edac@vger.kernel.org
+Description: This attribute file will display the location (csrow/channel,
+ branch/channel/slot or channel/slot) of the dimm or rank.
+
+What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_mem_type
+Date: April 2012
+Contact: Mauro Carvalho Chehab <mchehab@redhat.com>
+ linux-edac@vger.kernel.org
+Description: This attribute file will display what type of memory is
+ currently on this csrow. Normally, either buffered or
+ unbuffered memory (for example, Unbuffered-DDR3).
diff --git a/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb b/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb
new file mode 100644
index 000000000000..2107082426da
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-platform-sh_mobile_lcdc_fb
@@ -0,0 +1,44 @@
+What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_alpha
+Date: May 2012
+Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Description:
+ This file is only available on fb[0-9] devices corresponding
+ to overlay planes.
+
+ Stores the alpha blending value for the overlay. Values range
+ from 0 (transparent) to 255 (opaque). The value is ignored if
+ the mode is not set to Alpha Blending.
+
+What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_mode
+Date: May 2012
+Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Description:
+ This file is only available on fb[0-9] devices corresponding
+ to overlay planes.
+
+ Selects the composition mode for the overlay. Possible values
+ are
+
+ 0 - Alpha Blending
+ 1 - ROP3
+
+What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_position
+Date: May 2012
+Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Description:
+ This file is only available on fb[0-9] devices corresponding
+ to overlay planes.
+
+ Stores the x,y overlay position on the display in pixels. The
+ position format is `[0-9]+,[0-9]+'.
+
+What: /sys/devices/platform/sh_mobile_lcdc_fb.[0-3]/graphics/fb[0-9]/ovl_rop3
+Date: May 2012
+Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Description:
+ This file is only available on fb[0-9] devices corresponding
+ to overlay planes.
+
+ Stores the raster operation (ROP3) for the overlay. Values
+ range from 0 to 255. The value is ignored if the mode is not
+ set to ROP3.
diff --git a/Documentation/ABI/testing/sysfs-platform-asus-wmi b/Documentation/ABI/testing/sysfs-platform-asus-wmi
index 2e7df91620de..019e1e29370e 100644
--- a/Documentation/ABI/testing/sysfs-platform-asus-wmi
+++ b/Documentation/ABI/testing/sysfs-platform-asus-wmi
@@ -29,3 +29,10 @@ KernelVersion: 2.6.39
Contact: "Corentin Chary" <corentincj@iksaif.net>
Description:
Control the card touchpad. 1 means on, 0 means off.
+
+What: /sys/devices/platform/<platform>/lid_resume
+Date: May 2012
+KernelVersion: 3.5
+Contact: "AceLan Kao" <acelan.kao@canonical.com>
+Description:
+ Resume on lid open. 1 means on, 0 means off.
diff --git a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
index 814b01354c41..b31e782bd985 100644
--- a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
@@ -5,4 +5,15 @@ Contact: "Ike Panhc <ike.pan@canonical.com>"
Description:
Control the power of camera module. 1 means on, 0 means off.
+What: /sys/devices/platform/ideapad/fan_mode
+Date: June 2012
+KernelVersion: 3.6
+Contact: "Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+ Change fan mode
+ There are four available modes:
+ * 0 -> Super Silent Mode
+ * 1 -> Standard Mode
+ * 2 -> Dust Cleaning
+ * 4 -> Efficient Thermal Dissipation Mode
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index 5c72eed89563..f50309081ac7 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -49,3 +49,45 @@ DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either
consistent or non-consistent memory as it sees fit. By using this API,
you are guaranteeing to the platform that you have all the correct and
necessary sync points for this memory in the driver.
+
+DMA_ATTR_NO_KERNEL_MAPPING
+--------------------------
+
+DMA_ATTR_NO_KERNEL_MAPPING lets the platform to avoid creating a kernel
+virtual mapping for the allocated buffer. On some architectures creating
+such mapping is non-trivial task and consumes very limited resources
+(like kernel virtual address space or dma consistent address space).
+Buffers allocated with this attribute can be only passed to user space
+by calling dma_mmap_attrs(). By using this API, you are guaranteeing
+that you won't dereference the pointer returned by dma_alloc_attr(). You
+can threat it as a cookie that must be passed to dma_mmap_attrs() and
+dma_free_attrs(). Make sure that both of these also get this attribute
+set on each call.
+
+Since it is optional for platforms to implement
+DMA_ATTR_NO_KERNEL_MAPPING, those that do not will simply ignore the
+attribute and exhibit default behavior.
+
+DMA_ATTR_SKIP_CPU_SYNC
+----------------------
+
+By default dma_map_{single,page,sg} functions family transfer a given
+buffer from CPU domain to device domain. Some advanced use cases might
+require sharing a buffer between more than one device. This requires
+having a mapping created separately for each device and is usually
+performed by calling dma_map_{single,page,sg} function more than once
+for the given buffer with device pointer to each device taking part in
+the buffer sharing. The first call transfers a buffer from 'CPU' domain
+to 'device' domain, what synchronizes CPU caches for the given region
+(usually it means that the cache has been flushed or invalidated
+depending on the dma direction). However, next calls to
+dma_map_{single,page,sg}() for other devices will perform exactly the
+same sychronization operation on the CPU cache. CPU cache sychronization
+might be a time consuming operation, especially if the buffers are
+large, so it is highly recommended to avoid it if possible.
+DMA_ATTR_SKIP_CPU_SYNC allows platform code to skip synchronization of
+the CPU cache for the given buffer assuming that it has been already
+transferred to 'device' domain. This attribute can be also used for
+dma_unmap_{single,page,sg} functions family to force buffer to stay in
+device domain after releasing a mapping for it. Use this attribute with
+care!
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index 3fca32c41927..25b58efd955d 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -224,8 +224,8 @@ all your transactions.
</para>
<para>
-Then at umount time , in your put_super() (2.4) or write_super() (2.5)
-you can then call journal_destroy() to clean up your in-core journal object.
+Then at umount time , in your put_super() you can then call journal_destroy()
+to clean up your in-core journal object.
</para>
<para>
diff --git a/Documentation/DocBook/media/v4l/biblio.xml b/Documentation/DocBook/media/v4l/biblio.xml
index 7c49facecd25..1078e45f189f 100644
--- a/Documentation/DocBook/media/v4l/biblio.xml
+++ b/Documentation/DocBook/media/v4l/biblio.xml
@@ -194,7 +194,7 @@ in the frequency range from 87,5 to 108,0 MHz</title>
<corpauthor>National Radio Systems Committee
(<ulink url="http://www.nrscstandards.org">http://www.nrscstandards.org</ulink>)</corpauthor>
</authorgroup>
- <title>NTSC-4: United States RBDS Standard</title>
+ <title>NRSC-4: United States RBDS Standard</title>
</biblioentry>
<biblioentry id="iso12232">
diff --git a/Documentation/DocBook/media/v4l/common.xml b/Documentation/DocBook/media/v4l/common.xml
index 4101aeb56540..b91d25313b63 100644
--- a/Documentation/DocBook/media/v4l/common.xml
+++ b/Documentation/DocBook/media/v4l/common.xml
@@ -464,14 +464,14 @@ The <structfield>type</structfield> field of the respective
<structfield>tuner</structfield> field contains the index number of
the tuner.</para>
- <para>Radio devices have exactly one tuner with index zero, no
+ <para>Radio input devices have exactly one tuner with index zero, no
video inputs.</para>
<para>To query and change tuner properties applications use the
&VIDIOC-G-TUNER; and &VIDIOC-S-TUNER; ioctl, respectively. The
&v4l2-tuner; returned by <constant>VIDIOC_G_TUNER</constant> also
contains signal status information applicable when the tuner of the
-current video input, or a radio tuner is queried. Note that
+current video or radio input is queried. Note that
<constant>VIDIOC_S_TUNER</constant> does not switch the current tuner,
when there is more than one at all. The tuner is solely determined by
the current video input. Drivers must support both ioctls and set the
@@ -491,8 +491,17 @@ the modulator. The <structfield>type</structfield> field of the
respective &v4l2-output; returned by the &VIDIOC-ENUMOUTPUT; ioctl is
set to <constant>V4L2_OUTPUT_TYPE_MODULATOR</constant> and its
<structfield>modulator</structfield> field contains the index number
-of the modulator. This specification does not define radio output
-devices.</para>
+of the modulator.</para>
+
+ <para>Radio output devices have exactly one modulator with index
+zero, no video outputs.</para>
+
+ <para>A video or radio device cannot support both a tuner and a
+modulator. Two separate device nodes will have to be used for such
+hardware, one that supports the tuner functionality and one that supports
+the modulator functionality. The reason is a limitation with the
+&VIDIOC-S-FREQUENCY; ioctl where you cannot specify whether the frequency
+is for a tuner or a modulator.</para>
<para>To query and change modulator properties applications use
the &VIDIOC-G-MODULATOR; and &VIDIOC-S-MODULATOR; ioctl. Note that
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index ea42ef824948..faa0fd14666a 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2377,10 +2377,11 @@ that used it. It was originally scheduled for removal in 2.6.35.
<para>V4L2_CTRL_FLAG_VOLATILE was added to signal volatile controls to userspace.</para>
</listitem>
<listitem>
- <para>Add selection API for extended control over cropping and
-composing. Does not affect the compatibility of current drivers and
-applications. See <link linkend="selection-api"> selection API </link> for
-details.</para>
+ <para>Add selection API for extended control over cropping
+ and composing. Does not affect the compatibility of current
+ drivers and applications. See <link
+ linkend="selection-api"> selection API </link> for
+ details.</para>
</listitem>
</orderedlist>
</section>
@@ -2458,6 +2459,36 @@ details.</para>
</orderedlist>
</section>
+ <section>
+ <title>V4L2 in Linux 3.6</title>
+ <orderedlist>
+ <listitem>
+ <para>Replaced <structfield>input</structfield> in
+ <structname>v4l2_buffer</structname> by
+ <structfield>reserved2</structfield> and removed
+ <constant>V4L2_BUF_FLAG_INPUT</constant>.</para>
+ </listitem>
+ </orderedlist>
+ </section>
+
+ <section>
+ <title>V4L2 in Linux 3.6</title>
+ <orderedlist>
+ <listitem>
+ <para>Added V4L2_CAP_VIDEO_M2M and V4L2_CAP_VIDEO_M2M_MPLANE capabilities.</para>
+ </listitem>
+ </orderedlist>
+ </section>
+
+ <section>
+ <title>V4L2 in Linux 3.6</title>
+ <orderedlist>
+ <listitem>
+ <para>Added support for frequency band enumerations: &VIDIOC-ENUM-FREQ-BANDS;.</para>
+ </listitem>
+ </orderedlist>
+ </section>
+
<section id="other">
<title>Relation of V4L2 to other Linux multimedia APIs</title>
@@ -2587,6 +2618,9 @@ ioctls.</para>
<para><link linkend="v4l2-auto-focus-area"><constant>
V4L2_CID_AUTO_FOCUS_AREA</constant></link> control.</para>
</listitem>
+ <listitem>
+ <para>Support for frequency band enumeration: &VIDIOC-ENUM-FREQ-BANDS; ioctl.</para>
+ </listitem>
</itemizedlist>
</section>
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index cda0dfb6769a..b0964fb4e834 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -373,6 +373,11 @@ minimum value disables backlight compensation.</entry>
</entry>
</row>
<row>
+ <entry><constant>V4L2_CID_AUTOBRIGHTNESS</constant></entry>
+ <entry>boolean</entry>
+ <entry>Enable Automatic Brightness.</entry>
+ </row>
+ <row>
<entry><constant>V4L2_CID_ROTATE</constant></entry>
<entry>integer</entry>
<entry>Rotates the image by specified angle. Common angles are 90,
diff --git a/Documentation/DocBook/media/v4l/dev-subdev.xml b/Documentation/DocBook/media/v4l/dev-subdev.xml
index 4afcbbec5eda..a3d9dd093268 100644
--- a/Documentation/DocBook/media/v4l/dev-subdev.xml
+++ b/Documentation/DocBook/media/v4l/dev-subdev.xml
@@ -276,7 +276,7 @@
</para>
</section>
- <section>
+ <section id="v4l2-subdev-selections">
<title>Selections: cropping, scaling and composition</title>
<para>Many sub-devices support cropping frames on their input or output
@@ -290,8 +290,8 @@
size. Both the coordinates and sizes are expressed in pixels.</para>
<para>As for pad formats, drivers store try and active
- rectangles for the selection targets of ACTUAL type <xref
- linkend="v4l2-subdev-selection-targets">.</xref></para>
+ rectangles for the selection targets <xref
+ linkend="v4l2-selections-common" />.</para>
<para>On sink pads, cropping is applied relative to the
current pad format. The pad format represents the image size as
@@ -308,7 +308,7 @@
<para>Scaling support is optional. When supported by a subdev,
the crop rectangle on the subdev's sink pad is scaled to the
size configured using the &VIDIOC-SUBDEV-S-SELECTION; IOCTL
- using <constant>V4L2_SUBDEV_SEL_COMPOSE_ACTUAL</constant>
+ using <constant>V4L2_SEL_TGT_COMPOSE</constant>
selection target on the same pad. If the subdev supports scaling
but not composing, the top and left values are not used and must
always be set to zero.</para>
@@ -323,32 +323,32 @@
<para>The drivers should always use the closest possible
rectangle the user requests on all selection targets, unless
specifically told otherwise.
- <constant>V4L2_SUBDEV_SEL_FLAG_SIZE_GE</constant> and
- <constant>V4L2_SUBDEV_SEL_FLAG_SIZE_LE</constant> flags may be
+ <constant>V4L2_SEL_FLAG_GE</constant> and
+ <constant>V4L2_SEL_FLAG_LE</constant> flags may be
used to round the image size either up or down. <xref
- linkend="v4l2-subdev-selection-flags"></xref></para>
+ linkend="v4l2-selection-flags" /></para>
</section>
<section>
<title>Types of selection targets</title>
<section>
- <title>ACTUAL targets</title>
+ <title>Actual targets</title>
- <para>ACTUAL targets reflect the actual hardware configuration
- at any point of time. There is a BOUNDS target
- corresponding to every ACTUAL.</para>
+ <para>Actual targets (without a postfix) reflect the actual
+ hardware configuration at any point of time. There is a BOUNDS
+ target corresponding to every actual target.</para>
</section>
<section>
<title>BOUNDS targets</title>
- <para>BOUNDS targets is the smallest rectangle that contains
- all valid ACTUAL rectangles. It may not be possible to set the
- ACTUAL rectangle as large as the BOUNDS rectangle, however.
- This may be because e.g. a sensor's pixel array is not
- rectangular but cross-shaped or round. The maximum size may
- also be smaller than the BOUNDS rectangle.</para>
+ <para>BOUNDS targets is the smallest rectangle that contains all
+ valid actual rectangles. It may not be possible to set the actual
+ rectangle as large as the BOUNDS rectangle, however. This may be
+ because e.g. a sensor's pixel array is not rectangular but
+ cross-shaped or round. The maximum size may also be smaller than the
+ BOUNDS rectangle.</para>
</section>
</section>
@@ -362,7 +362,7 @@
performed by the user: the changes made will be propagated to
any subsequent stages. If this behaviour is not desired, the
user must set
- <constant>V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG</constant> flag. This
+ <constant>V4L2_SEL_FLAG_KEEP_CONFIG</constant> flag. This
flag causes no propagation of the changes are allowed in any
circumstances. This may also cause the accessed rectangle to be
adjusted by the driver, depending on the properties of the
diff --git a/Documentation/DocBook/media/v4l/io.xml b/Documentation/DocBook/media/v4l/io.xml
index fd6aca2922b6..1885cc0755cb 100644
--- a/Documentation/DocBook/media/v4l/io.xml
+++ b/Documentation/DocBook/media/v4l/io.xml
@@ -683,14 +683,12 @@ memory, set by the application. See <xref linkend="userp" /> for details.
</row>
<row>
<entry>__u32</entry>
- <entry><structfield>input</structfield></entry>
+ <entry><structfield>reserved2</structfield></entry>
<entry></entry>
- <entry>Some video capture drivers support rapid and
-synchronous video input changes, a function useful for example in
-video surveillance applications. For this purpose applications set the
-<constant>V4L2_BUF_FLAG_INPUT</constant> flag, and this field to the
-number of a video input as in &v4l2-input; field
-<structfield>index</structfield>.</entry>
+ <entry>A place holder for future extensions and custom
+(driver defined) buffer types
+<constant>V4L2_BUF_TYPE_PRIVATE</constant> and higher. Applications
+should set this to 0.</entry>
</row>
<row>
<entry>__u32</entry>
@@ -923,13 +921,6 @@ Drivers set or clear this flag when the <constant>VIDIOC_DQBUF</constant>
ioctl is called.</entry>
</row>
<row>
- <entry><constant>V4L2_BUF_FLAG_INPUT</constant></entry>
- <entry>0x0200</entry>
- <entry>The <structfield>input</structfield> field is valid.
-Applications set or clear this flag before calling the
-<constant>VIDIOC_QBUF</constant> ioctl.</entry>
- </row>
- <row>
<entry><constant>V4L2_BUF_FLAG_PREPARED</constant></entry>
<entry>0x0400</entry>
<entry>The buffer has been prepared for I/O and can be queued by the
diff --git a/Documentation/DocBook/media/v4l/selection-api.xml b/Documentation/DocBook/media/v4l/selection-api.xml
index b299e4779354..e7ed5077834d 100644
--- a/Documentation/DocBook/media/v4l/selection-api.xml
+++ b/Documentation/DocBook/media/v4l/selection-api.xml
@@ -53,11 +53,11 @@ cropping and composing rectangles have the same size.</para>
</mediaobject>
</figure>
-For complete list of the available selection targets see table <xref
-linkend="v4l2-sel-target"/>
-
</section>
+ See <xref linkend="v4l2-selection-targets" /> for more
+ information.
+
<section>
<title>Configuration</title>
@@ -74,7 +74,7 @@ cropping/composing rectangles may have to be aligned, and both the source and
the sink may have arbitrary upper and lower size limits. Therefore, as usual,
drivers are expected to adjust the requested parameters and return the actual
values selected. An application can control the rounding behaviour using <link
-linkend="v4l2-sel-flags"> constraint flags </link>.</para>
+linkend="v4l2-selection-flags"> constraint flags </link>.</para>
<section>
@@ -91,7 +91,7 @@ top/left corner at position <constant> (0,0) </constant>. The rectangle's
coordinates are expressed in pixels.</para>
<para>The top left corner, width and height of the source rectangle, that is
-the area actually sampled, is given by the <constant> V4L2_SEL_TGT_CROP_ACTIVE
+the area actually sampled, is given by the <constant> V4L2_SEL_TGT_CROP
</constant> target. It uses the same coordinate system as <constant>
V4L2_SEL_TGT_CROP_BOUNDS </constant>. The active cropping area must lie
completely inside the capture boundaries. The driver may further adjust the
@@ -111,13 +111,13 @@ height are equal to the image size set by <constant> VIDIOC_S_FMT </constant>.
</para>
<para>The part of a buffer into which the image is inserted by the hardware is
-controlled by the <constant> V4L2_SEL_TGT_COMPOSE_ACTIVE </constant> target.
+controlled by the <constant> V4L2_SEL_TGT_COMPOSE </constant> target.
The rectangle's coordinates are also expressed in the same coordinate system as
the bounds rectangle. The composing rectangle must lie completely inside bounds
rectangle. The driver must adjust the composing rectangle to fit to the
bounding limits. Moreover, the driver can perform other adjustments according
to hardware limitations. The application can control rounding behaviour using
-<link linkend="v4l2-sel-flags"> constraint flags </link>.</para>
+<link linkend="v4l2-selection-flags"> constraint flags </link>.</para>
<para>For capture devices the default composing rectangle is queried using
<constant> V4L2_SEL_TGT_COMPOSE_DEFAULT </constant>. It is usually equal to the
@@ -125,7 +125,7 @@ bounding rectangle.</para>
<para>The part of a buffer that is modified by the hardware is given by
<constant> V4L2_SEL_TGT_COMPOSE_PADDED </constant>. It contains all pixels
-defined using <constant> V4L2_SEL_TGT_COMPOSE_ACTIVE </constant> plus all
+defined using <constant> V4L2_SEL_TGT_COMPOSE </constant> plus all
padding data modified by hardware during insertion process. All pixels outside
this rectangle <emphasis>must not</emphasis> be changed by the hardware. The
content of pixels that lie inside the padded area but outside active area is
@@ -153,7 +153,7 @@ specified using <constant> VIDIOC_S_FMT </constant> ioctl.</para>
<para>The top left corner, width and height of the source rectangle, that is
the area from which image date are processed by the hardware, is given by the
-<constant> V4L2_SEL_TGT_CROP_ACTIVE </constant>. Its coordinates are expressed
+<constant> V4L2_SEL_TGT_CROP </constant>. Its coordinates are expressed
in in the same coordinate system as the bounds rectangle. The active cropping
area must lie completely inside the crop boundaries and the driver may further
adjust the requested size and/or position according to hardware
@@ -165,7 +165,7 @@ bounding rectangle.</para>
<para>The part of a video signal or graphics display where the image is
inserted by the hardware is controlled by <constant>
-V4L2_SEL_TGT_COMPOSE_ACTIVE </constant> target. The rectangle's coordinates
+V4L2_SEL_TGT_COMPOSE </constant> target. The rectangle's coordinates
are expressed in pixels. The composing rectangle must lie completely inside the
bounds rectangle. The driver must adjust the area to fit to the bounding
limits. Moreover, the driver can perform other adjustments according to
@@ -184,7 +184,7 @@ such a padded area is driver-dependent feature not covered by this document.
Driver developers are encouraged to keep padded rectangle equal to active one.
The padded target is accessed by the <constant> V4L2_SEL_TGT_COMPOSE_PADDED
</constant> identifier. It must contain all pixels from the <constant>
-V4L2_SEL_TGT_COMPOSE_ACTIVE </constant> target.</para>
+V4L2_SEL_TGT_COMPOSE </constant> target.</para>
</section>
@@ -193,8 +193,8 @@ V4L2_SEL_TGT_COMPOSE_ACTIVE </constant> target.</para>
<title>Scaling control</title>
<para>An application can detect if scaling is performed by comparing the width
-and the height of rectangles obtained using <constant> V4L2_SEL_TGT_CROP_ACTIVE
-</constant> and <constant> V4L2_SEL_TGT_COMPOSE_ACTIVE </constant> targets. If
+and the height of rectangles obtained using <constant> V4L2_SEL_TGT_CROP
+</constant> and <constant> V4L2_SEL_TGT_COMPOSE </constant> targets. If
these are not equal then the scaling is applied. The application can compute
the scaling ratios using these values.</para>
@@ -252,7 +252,7 @@ area)</para>
ret = ioctl(fd, &VIDIOC-G-SELECTION;, &amp;sel);
if (ret)
exit(-1);
- sel.target = V4L2_SEL_TGT_CROP_ACTIVE;
+ sel.target = V4L2_SEL_TGT_CROP;
ret = ioctl(fd, &VIDIOC-S-SELECTION;, &amp;sel);
if (ret)
exit(-1);
@@ -281,7 +281,7 @@ area)</para>
r.left = sel.r.width / 4;
r.top = sel.r.height / 4;
sel.r = r;
- sel.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
+ sel.target = V4L2_SEL_TGT_COMPOSE;
sel.flags = V4L2_SEL_FLAG_LE;
ret = ioctl(fd, &VIDIOC-S-SELECTION;, &amp;sel);
if (ret)
@@ -298,11 +298,11 @@ V4L2_BUF_TYPE_VIDEO_OUTPUT </constant> for other devices</para>
&v4l2-selection; compose = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
- .target = V4L2_SEL_TGT_COMPOSE_ACTIVE,
+ .target = V4L2_SEL_TGT_COMPOSE,
};
&v4l2-selection; crop = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
- .target = V4L2_SEL_TGT_CROP_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP,
};
double hscale, vscale;
diff --git a/Documentation/DocBook/media/v4l/selections-common.xml b/Documentation/DocBook/media/v4l/selections-common.xml
new file mode 100644
index 000000000000..7502f784b8cc
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/selections-common.xml
@@ -0,0 +1,164 @@
+<section id="v4l2-selections-common">
+
+ <title>Common selection definitions</title>
+
+ <para>While the <link linkend="selection-api">V4L2 selection
+ API</link> and <link linkend="v4l2-subdev-selections">V4L2 subdev
+ selection APIs</link> are very similar, there's one fundamental
+ difference between the two. On sub-device API, the selection
+ rectangle refers to the media bus format, and is bound to a
+ sub-device's pad. On the V4L2 interface the selection rectangles
+ refer to the in-memory pixel format.</para>
+
+ <para>This section defines the common definitions of the
+ selection interfaces on the two APIs.</para>
+
+ <section id="v4l2-selection-targets">
+
+ <title>Selection targets</title>
+
+ <para>The precise meaning of the selection targets may be
+ dependent on which of the two interfaces they are used.</para>
+
+ <table pgwide="1" frame="none" id="v4l2-selection-targets-table">
+ <title>Selection target definitions</title>
+ <tgroup cols="5">
+ <colspec colname="c1" />
+ <colspec colname="c2" />
+ <colspec colname="c3" />
+ <colspec colname="c4" />
+ <colspec colname="c5" />
+ &cs-def;
+ <thead>
+ <row rowsep="1">
+ <entry align="left">Target name</entry>
+ <entry align="left">id</entry>
+ <entry align="left">Definition</entry>
+ <entry align="left">Valid for V4L2</entry>
+ <entry align="left">Valid for V4L2 subdev</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_SEL_TGT_CROP</constant></entry>
+ <entry>0x0000</entry>
+ <entry>Crop rectangle. Defines the cropped area.</entry>
+ <entry>Yes</entry>
+ <entry>Yes</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SEL_TGT_CROP_DEFAULT</constant></entry>
+ <entry>0x0001</entry>
+ <entry>Suggested cropping rectangle that covers the "whole picture".</entry>
+ <entry>Yes</entry>
+ <entry>No</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SEL_TGT_CROP_BOUNDS</constant></entry>
+ <entry>0x0002</entry>
+ <entry>Bounds of the crop rectangle. All valid crop
+ rectangles fit inside the crop bounds rectangle.
+ </entry>
+ <entry>Yes</entry>
+ <entry>Yes</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SEL_TGT_COMPOSE</constant></entry>
+ <entry>0x0100</entry>
+ <entry>Compose rectangle. Used to configure scaling
+ and composition.</entry>
+ <entry>Yes</entry>
+ <entry>Yes</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SEL_TGT_COMPOSE_DEFAULT</constant></entry>
+ <entry>0x0101</entry>
+ <entry>Suggested composition rectangle that covers the "whole picture".</entry>
+ <entry>Yes</entry>
+ <entry>No</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SEL_TGT_COMPOSE_BOUNDS</constant></entry>
+ <entry>0x0102</entry>
+ <entry>Bounds of the compose rectangle. All valid compose
+ rectangles fit inside the compose bounds rectangle.</entry>
+ <entry>Yes</entry>
+ <entry>Yes</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SEL_TGT_COMPOSE_PADDED</constant></entry>
+ <entry>0x0103</entry>
+ <entry>The active area and all padding pixels that are inserted or
+ modified by hardware.</entry>
+ <entry>Yes</entry>
+ <entry>No</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ </section>
+
+ <section id="v4l2-selection-flags">
+
+ <title>Selection flags</title>
+
+ <table pgwide="1" frame="none" id="v4l2-selection-flags-table">
+ <title>Selection flag definitions</title>
+ <tgroup cols="5">
+ <colspec colname="c1" />
+ <colspec colname="c2" />
+ <colspec colname="c3" />
+ <colspec colname="c4" />
+ <colspec colname="c5" />
+ &cs-def;
+ <thead>
+ <row rowsep="1">
+ <entry align="left">Flag name</entry>
+ <entry align="left">id</entry>
+ <entry align="left">Definition</entry>
+ <entry align="left">Valid for V4L2</entry>
+ <entry align="left">Valid for V4L2 subdev</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_SEL_FLAG_GE</constant></entry>
+ <entry>(1 &lt;&lt; 0)</entry>
+ <entry>Suggest the driver it should choose greater or
+ equal rectangle (in size) than was requested. Albeit the
+ driver may choose a lesser size, it will only do so due to
+ hardware limitations. Without this flag (and
+ <constant>V4L2_SEL_FLAG_LE</constant>) the
+ behaviour is to choose the closest possible
+ rectangle.</entry>
+ <entry>Yes</entry>
+ <entry>Yes</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SEL_FLAG_LE</constant></entry>
+ <entry>(1 &lt;&lt; 1)</entry>
+ <entry>Suggest the driver it
+ should choose lesser or equal rectangle (in size) than was
+ requested. Albeit the driver may choose a greater size, it
+ will only do so due to hardware limitations.</entry>
+ <entry>Yes</entry>
+ <entry>Yes</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_SEL_FLAG_KEEP_CONFIG</constant></entry>
+ <entry>(1 &lt;&lt; 2)</entry>
+ <entry>The configuration must not be propagated to any
+ further processing steps. If this flag is not given, the
+ configuration is propagated inside the subdevice to all
+ further processing steps.</entry>
+ <entry>No</entry>
+ <entry>Yes</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ </section>
+
+</section>
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index 008c2d73a484..eee6908c749f 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -140,6 +140,11 @@ structs, ioctls) must be noted in more detail in the history chapter
applications. -->
<revision>
+ <revnumber>3.6</revnumber>
+ <date>2012-07-02</date>
+ <authorinitials>hv</authorinitials>
+ <revremark>Added VIDIOC_ENUM_FREQ_BANDS.
+ </revremark>
<revnumber>3.5</revnumber>
<date>2012-05-07</date>
<authorinitials>sa, sn</authorinitials>
@@ -534,6 +539,7 @@ and discussions on the V4L mailing list.</revremark>
&sub-enum-fmt;
&sub-enum-framesizes;
&sub-enum-frameintervals;
+ &sub-enum-freq-bands;
&sub-enuminput;
&sub-enumoutput;
&sub-enumstd;
@@ -589,6 +595,11 @@ and discussions on the V4L mailing list.</revremark>
&sub-write;
</appendix>
+ <appendix>
+ <title>Common definitions for V4L2 and V4L2 subdev interfaces</title>
+ &sub-selections-common;
+ </appendix>
+
<appendix id="videodev">
<title>Video For Linux Two Header File</title>
&sub-videodev2-h;
diff --git a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
index a2474ecb574a..a8cda1acacd9 100644
--- a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
@@ -64,7 +64,7 @@ different sizes.</para>
<para>To allocate device buffers applications initialize relevant fields of
the <structname>v4l2_create_buffers</structname> structure. They set the
<structfield>type</structfield> field in the
-<structname>v4l2_format</structname> structure, embedded in this
+&v4l2-format; structure, embedded in this
structure, to the respective stream or buffer type.
<structfield>count</structfield> must be set to the number of required buffers.
<structfield>memory</structfield> specifies the required I/O method. The
@@ -97,7 +97,13 @@ information.</para>
<row>
<entry>__u32</entry>
<entry><structfield>count</structfield></entry>
- <entry>The number of buffers requested or granted.</entry>
+ <entry>The number of buffers requested or granted. If count == 0, then
+ <constant>VIDIOC_CREATE_BUFS</constant> will set <structfield>index</structfield>
+ to the current number of created buffers, and it will check the validity of
+ <structfield>memory</structfield> and <structfield>format.type</structfield>.
+ If those are invalid -1 is returned and errno is set to &EINVAL;,
+ otherwise <constant>VIDIOC_CREATE_BUFS</constant> returns 0. It will
+ never set errno to &EBUSY; in this particular case.</entry>
</row>
<row>
<entry>__u32</entry>
@@ -108,7 +114,7 @@ information.</para>
/></entry>
</row>
<row>
- <entry>struct&nbsp;v4l2_format</entry>
+ <entry>&v4l2-format;</entry>
<entry><structfield>format</structfield></entry>
<entry>Filled in by the application, preserved by the driver.</entry>
</row>
diff --git a/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml b/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
index 6673ce582050..cd7720d404ea 100644
--- a/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-dv-timings-cap.xml
@@ -54,15 +54,9 @@
interface and may change in the future.</para>
</note>
- <para>To query the available timings, applications initialize the
-<structfield>index</structfield> field and zero the reserved array of &v4l2-dv-timings-cap;
-and call the <constant>VIDIOC_DV_TIMINGS_CAP</constant> ioctl with a pointer to this
-structure. Drivers fill the rest of the structure or return an
-&EINVAL; when the index is out of bounds. To enumerate all supported DV timings,
-applications shall begin at index zero, incrementing by one until the
-driver returns <errorcode>EINVAL</errorcode>. Note that drivers may enumerate a
-different set of DV timings after switching the video input or
-output.</para>
+ <para>To query the capabilities of the DV receiver/transmitter applications can call
+this ioctl and the driver will fill in the structure. Note that drivers may return
+different values after switching the video input or output.</para>
<table pgwide="1" frame="none" id="v4l2-bt-timings-cap">
<title>struct <structname>v4l2_bt_timings_cap</structname></title>
@@ -115,7 +109,7 @@ output.</para>
<row>
<entry>__u32</entry>
<entry><structfield>reserved</structfield>[16]</entry>
- <entry></entry>
+ <entry>Reserved for future extensions. Drivers must set the array to zero.</entry>
</row>
</tbody>
</tgroup>
diff --git a/Documentation/DocBook/media/v4l/vidioc-enum-freq-bands.xml b/Documentation/DocBook/media/v4l/vidioc-enum-freq-bands.xml
new file mode 100644
index 000000000000..6541ba0175ed
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/vidioc-enum-freq-bands.xml
@@ -0,0 +1,179 @@
+<refentry id="vidioc-enum-freq-bands">
+ <refmeta>
+ <refentrytitle>ioctl VIDIOC_ENUM_FREQ_BANDS</refentrytitle>
+ &manvol;
+ </refmeta>
+
+ <refnamediv>
+ <refname>VIDIOC_ENUM_FREQ_BANDS</refname>
+ <refpurpose>Enumerate supported frequency bands</refpurpose>
+ </refnamediv>
+
+ <refsynopsisdiv>
+ <funcsynopsis>
+ <funcprototype>
+ <funcdef>int <function>ioctl</function></funcdef>
+ <paramdef>int <parameter>fd</parameter></paramdef>
+ <paramdef>int <parameter>request</parameter></paramdef>
+ <paramdef>struct v4l2_frequency_band
+*<parameter>argp</parameter></paramdef>
+ </funcprototype>
+ </funcsynopsis>
+ </refsynopsisdiv>
+
+ <refsect1>
+ <title>Arguments</title>
+
+ <variablelist>
+ <varlistentry>
+ <term><parameter>fd</parameter></term>
+ <listitem>
+ <para>&fd;</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>request</parameter></term>
+ <listitem>
+ <para>VIDIOC_ENUM_FREQ_BANDS</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><parameter>argp</parameter></term>
+ <listitem>
+ <para></para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+
+ <refsect1>
+ <title>Description</title>
+
+ <note>
+ <title>Experimental</title>
+ <para>This is an <link linkend="experimental"> experimental </link>
+ interface and may change in the future.</para>
+ </note>
+
+ <para>Enumerates the frequency bands that a tuner or modulator supports.
+To do this applications initialize the <structfield>tuner</structfield>,
+<structfield>type</structfield> and <structfield>index</structfield> fields,
+and zero out the <structfield>reserved</structfield> array of a &v4l2-frequency-band; and
+call the <constant>VIDIOC_ENUM_FREQ_BANDS</constant> ioctl with a pointer
+to this structure.</para>
+
+ <para>This ioctl is supported if the <constant>V4L2_TUNER_CAP_FREQ_BANDS</constant> capability
+ of the corresponding tuner/modulator is set.</para>
+
+ <table pgwide="1" frame="none" id="v4l2-frequency-band">
+ <title>struct <structname>v4l2_frequency_band</structname></title>
+ <tgroup cols="3">
+ &cs-str;
+ <tbody valign="top">
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>tuner</structfield></entry>
+ <entry>The tuner or modulator index number. This is the
+same value as in the &v4l2-input; <structfield>tuner</structfield>
+field and the &v4l2-tuner; <structfield>index</structfield> field, or
+the &v4l2-output; <structfield>modulator</structfield> field and the
+&v4l2-modulator; <structfield>index</structfield> field.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>type</structfield></entry>
+ <entry>The tuner type. This is the same value as in the
+&v4l2-tuner; <structfield>type</structfield> field. The type must be set
+to <constant>V4L2_TUNER_RADIO</constant> for <filename>/dev/radioX</filename>
+device nodes, and to <constant>V4L2_TUNER_ANALOG_TV</constant>
+for all others. Set this field to <constant>V4L2_TUNER_RADIO</constant> for
+modulators (currently only radio modulators are supported).
+See <xref linkend="v4l2-tuner-type" /></entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>index</structfield></entry>
+ <entry>Identifies the frequency band, set by the application.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>capability</structfield></entry>
+ <entry spanname="hspan">The tuner/modulator capability flags for
+this frequency band, see <xref linkend="tuner-capability" />. The <constant>V4L2_TUNER_CAP_LOW</constant>
+capability must be the same for all frequency bands of the selected tuner/modulator.
+So either all bands have that capability set, or none of them have that capability.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>rangelow</structfield></entry>
+ <entry spanname="hspan">The lowest tunable frequency in
+units of 62.5 kHz, or if the <structfield>capability</structfield>
+flag <constant>V4L2_TUNER_CAP_LOW</constant> is set, in units of 62.5
+Hz, for this frequency band.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>rangehigh</structfield></entry>
+ <entry spanname="hspan">The highest tunable frequency in
+units of 62.5 kHz, or if the <structfield>capability</structfield>
+flag <constant>V4L2_TUNER_CAP_LOW</constant> is set, in units of 62.5
+Hz, for this frequency band.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>modulation</structfield></entry>
+ <entry spanname="hspan">The supported modulation systems of this frequency band.
+ See <xref linkend="band-modulation" />. Note that currently only one
+ modulation system per frequency band is supported. More work will need to
+ be done if multiple modulation systems are possible. Contact the
+ linux-media mailing list (&v4l-ml;) if you need that functionality.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[9]</entry>
+ <entry>Reserved for future extensions. Applications and drivers
+ must set the array to zero.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ <table pgwide="1" frame="none" id="band-modulation">
+ <title>Band Modulation Systems</title>
+ <tgroup cols="3">
+ &cs-def;
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_BAND_MODULATION_VSB</constant></entry>
+ <entry>0x02</entry>
+ <entry>Vestigial Sideband modulation, used for analog TV.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_BAND_MODULATION_FM</constant></entry>
+ <entry>0x04</entry>
+ <entry>Frequency Modulation, commonly used for analog radio.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_BAND_MODULATION_AM</constant></entry>
+ <entry>0x08</entry>
+ <entry>Amplitude Modulation, commonly used for analog radio.</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </refsect1>
+
+ <refsect1>
+ &return-value;
+
+ <variablelist>
+ <varlistentry>
+ <term><errorcode>EINVAL</errorcode></term>
+ <listitem>
+ <para>The <structfield>tuner</structfield> or <structfield>index</structfield>
+is out of bounds or the <structfield>type</structfield> field is wrong.</para>
+ </listitem>
+ </varlistentry>
+ </variablelist>
+ </refsect1>
+</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml b/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
index 69c178a4d205..c7a1c462e724 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
@@ -98,11 +98,12 @@ the &v4l2-output; <structfield>modulator</structfield> field and the
<entry>__u32</entry>
<entry><structfield>type</structfield></entry>
<entry>The tuner type. This is the same value as in the
-&v4l2-tuner; <structfield>type</structfield> field. See The type must be set
+&v4l2-tuner; <structfield>type</structfield> field. The type must be set
to <constant>V4L2_TUNER_RADIO</constant> for <filename>/dev/radioX</filename>
device nodes, and to <constant>V4L2_TUNER_ANALOG_TV</constant>
-for all others. The field is not applicable to modulators, &ie; ignored
-by drivers. See <xref linkend="v4l2-tuner-type" /></entry>
+for all others. Set this field to <constant>V4L2_TUNER_RADIO</constant> for
+modulators (currently only radio modulators are supported).
+See <xref linkend="v4l2-tuner-type" /></entry>
</row>
<row>
<entry>__u32</entry>
@@ -135,6 +136,12 @@ bounds or the value in the <structfield>type</structfield> field is
wrong.</para>
</listitem>
</varlistentry>
+ <varlistentry>
+ <term><errorcode>EBUSY</errorcode></term>
+ <listitem>
+ <para>A hardware seek is in progress.</para>
+ </listitem>
+ </varlistentry>
</variablelist>
</refsect1>
</refentry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-selection.xml b/Documentation/DocBook/media/v4l/vidioc-g-selection.xml
index bb04eff75f45..f76d8a6d9b92 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-selection.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-selection.xml
@@ -65,9 +65,9 @@ Do not use multiplanar buffers. Use <constant> V4L2_BUF_TYPE_VIDEO_CAPTURE
</constant>. Use <constant> V4L2_BUF_TYPE_VIDEO_OUTPUT </constant> instead of
<constant> V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE </constant>. The next step is
setting the value of &v4l2-selection; <structfield>target</structfield> field
-to <constant> V4L2_SEL_TGT_CROP_ACTIVE </constant> (<constant>
-V4L2_SEL_TGT_COMPOSE_ACTIVE </constant>). Please refer to table <xref
-linkend="v4l2-sel-target" /> or <xref linkend="selection-api" /> for additional
+to <constant> V4L2_SEL_TGT_CROP </constant> (<constant>
+V4L2_SEL_TGT_COMPOSE </constant>). Please refer to table <xref
+linkend="v4l2-selections-common" /> or <xref linkend="selection-api" /> for additional
targets. The <structfield>flags</structfield> and <structfield>reserved
</structfield> fields of &v4l2-selection; are ignored and they must be filled
with zeros. The driver fills the rest of the structure or
@@ -86,9 +86,9 @@ use multiplanar buffers. Use <constant> V4L2_BUF_TYPE_VIDEO_CAPTURE
</constant>. Use <constant> V4L2_BUF_TYPE_VIDEO_OUTPUT </constant> instead of
<constant> V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE </constant>. The next step is
setting the value of &v4l2-selection; <structfield>target</structfield> to
-<constant>V4L2_SEL_TGT_CROP_ACTIVE</constant> (<constant>
-V4L2_SEL_TGT_COMPOSE_ACTIVE </constant>). Please refer to table <xref
-linkend="v4l2-sel-target" /> or <xref linkend="selection-api" /> for additional
+<constant>V4L2_SEL_TGT_CROP</constant> (<constant>
+V4L2_SEL_TGT_COMPOSE </constant>). Please refer to table <xref
+linkend="v4l2-selections-common" /> or <xref linkend="selection-api" /> for additional
targets. The &v4l2-rect; <structfield>r</structfield> rectangle need to be
set to the desired active area. Field &v4l2-selection; <structfield> reserved
</structfield> is ignored and must be filled with zeros. The driver may adjust
@@ -154,74 +154,8 @@ exist no rectangle </emphasis> that satisfies the constraints.</para>
</refsect1>
- <refsect1>
- <table frame="none" pgwide="1" id="v4l2-sel-target">
- <title>Selection targets.</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_SEL_TGT_CROP_ACTIVE</constant></entry>
- <entry>0x0000</entry>
- <entry>The area that is currently cropped by hardware.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_CROP_DEFAULT</constant></entry>
- <entry>0x0001</entry>
- <entry>Suggested cropping rectangle that covers the "whole picture".</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_CROP_BOUNDS</constant></entry>
- <entry>0x0002</entry>
- <entry>Limits for the cropping rectangle.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_COMPOSE_ACTIVE</constant></entry>
- <entry>0x0100</entry>
- <entry>The area to which data is composed by hardware.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_COMPOSE_DEFAULT</constant></entry>
- <entry>0x0101</entry>
- <entry>Suggested composing rectangle that covers the "whole picture".</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_COMPOSE_BOUNDS</constant></entry>
- <entry>0x0102</entry>
- <entry>Limits for the composing rectangle.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_TGT_COMPOSE_PADDED</constant></entry>
- <entry>0x0103</entry>
- <entry>The active area and all padding pixels that are inserted or modified by hardware.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
-
- <refsect1>
- <table frame="none" pgwide="1" id="v4l2-sel-flags">
- <title>Selection constraint flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_SEL_FLAG_GE</constant></entry>
- <entry>0x00000001</entry>
- <entry>Indicates that the adjusted rectangle must contain the original
- &v4l2-selection; <structfield>r</structfield> rectangle.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SEL_FLAG_LE</constant></entry>
- <entry>0x00000002</entry>
- <entry>Indicates that the adjusted rectangle must be inside the original
- &v4l2-rect; <structfield>r</structfield> rectangle.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
- </refsect1>
+ <para>Selection targets and flags are documented in <xref
+ linkend="v4l2-selections-common"/>.</para>
<section>
<figure id="sel-const-adjust">
@@ -252,14 +186,14 @@ exist no rectangle </emphasis> that satisfies the constraints.</para>
<row>
<entry>__u32</entry>
<entry><structfield>target</structfield></entry>
- <entry>Used to select between <link linkend="v4l2-sel-target"> cropping
+ <entry>Used to select between <link linkend="v4l2-selections-common"> cropping
and composing rectangles</link>.</entry>
</row>
<row>
<entry>__u32</entry>
<entry><structfield>flags</structfield></entry>
<entry>Flags controlling the selection rectangle adjustments, refer to
- <link linkend="v4l2-sel-flags">selection flags</link>.</entry>
+ <link linkend="v4l2-selection-flags">selection flags</link>.</entry>
</row>
<row>
<entry>&v4l2-rect;</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
index 62a1aa200a36..701138f1209d 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml
@@ -119,10 +119,14 @@ field is not quite clear.--></para></entry>
<xref linkend="tuner-capability" />. Audio flags indicate the ability
to decode audio subprograms. They will <emphasis>not</emphasis>
change, for example with the current video standard.</para><para>When
-the structure refers to a radio tuner only the
-<constant>V4L2_TUNER_CAP_LOW</constant>,
-<constant>V4L2_TUNER_CAP_STEREO</constant> and
-<constant>V4L2_TUNER_CAP_RDS</constant> flags can be set.</para></entry>
+the structure refers to a radio tuner the
+<constant>V4L2_TUNER_CAP_LANG1</constant>,
+<constant>V4L2_TUNER_CAP_LANG2</constant> and
+<constant>V4L2_TUNER_CAP_NORM</constant> flags can't be used.</para>
+<para>If multiple frequency bands are supported, then
+<structfield>capability</structfield> is the union of all
+<structfield>capability</structfield> fields of each &v4l2-frequency-band;.
+</para></entry>
</row>
<row>
<entry>__u32</entry>
@@ -130,7 +134,9 @@ the structure refers to a radio tuner only the
<entry spanname="hspan">The lowest tunable frequency in
units of 62.5 kHz, or if the <structfield>capability</structfield>
flag <constant>V4L2_TUNER_CAP_LOW</constant> is set, in units of 62.5
-Hz.</entry>
+Hz. If multiple frequency bands are supported, then
+<structfield>rangelow</structfield> is the lowest frequency
+of all the frequency bands.</entry>
</row>
<row>
<entry>__u32</entry>
@@ -138,7 +144,9 @@ Hz.</entry>
<entry spanname="hspan">The highest tunable frequency in
units of 62.5 kHz, or if the <structfield>capability</structfield>
flag <constant>V4L2_TUNER_CAP_LOW</constant> is set, in units of 62.5
-Hz.</entry>
+Hz. If multiple frequency bands are supported, then
+<structfield>rangehigh</structfield> is the highest frequency
+of all the frequency bands.</entry>
</row>
<row>
<entry>__u32</entry>
@@ -276,6 +284,18 @@ can or must be switched. (B/G PAL tuners for example are typically not
<constant>V4L2_TUNER_ANALOG_TV</constant> tuners can have this capability.</entry>
</row>
<row>
+ <entry><constant>V4L2_TUNER_CAP_HWSEEK_BOUNDED</constant></entry>
+ <entry>0x0004</entry>
+ <entry>If set, then this tuner supports the hardware seek functionality
+ where the seek stops when it reaches the end of the frequency range.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_TUNER_CAP_HWSEEK_WRAP</constant></entry>
+ <entry>0x0008</entry>
+ <entry>If set, then this tuner supports the hardware seek functionality
+ where the seek wraps around when it reaches the end of the frequency range.</entry>
+ </row>
+ <row>
<entry><constant>V4L2_TUNER_CAP_STEREO</constant></entry>
<entry>0x0010</entry>
<entry>Stereo audio reception is supported.</entry>
@@ -328,6 +348,12 @@ radio tuners.</entry>
<entry>0x0200</entry>
<entry>The RDS data is parsed by the hardware and set via controls.</entry>
</row>
+ <row>
+ <entry><constant>V4L2_TUNER_CAP_FREQ_BANDS</constant></entry>
+ <entry>0x0400</entry>
+ <entry>The &VIDIOC-ENUM-FREQ-BANDS; ioctl can be used to enumerate
+ the available frequency bands.</entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/vidioc-qbuf.xml b/Documentation/DocBook/media/v4l/vidioc-qbuf.xml
index 9caa49af580f..77ff5be0809d 100644
--- a/Documentation/DocBook/media/v4l/vidioc-qbuf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-qbuf.xml
@@ -71,12 +71,9 @@ initialize the <structfield>bytesused</structfield>,
<structfield>field</structfield> and
<structfield>timestamp</structfield> fields, see <xref
linkend="buffer" /> for details.
-Applications must also set <structfield>flags</structfield> to 0. If a driver
-supports capturing from specific video inputs and you want to specify a video
-input, then <structfield>flags</structfield> should be set to
-<constant>V4L2_BUF_FLAG_INPUT</constant> and the field
-<structfield>input</structfield> must be initialized to the desired input.
-The <structfield>reserved</structfield> field must be set to 0. When using
+Applications must also set <structfield>flags</structfield> to 0.
+The <structfield>reserved2</structfield> and
+<structfield>reserved</structfield> fields must be set to 0. When using
the <link linkend="planar-apis">multi-planar API</link>, the
<structfield>m.planes</structfield> field must contain a userspace pointer
to a filled-in array of &v4l2-plane; and the <structfield>length</structfield>
diff --git a/Documentation/DocBook/media/v4l/vidioc-querycap.xml b/Documentation/DocBook/media/v4l/vidioc-querycap.xml
index 4643505cd4ca..f33dd746b66b 100644
--- a/Documentation/DocBook/media/v4l/vidioc-querycap.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-querycap.xml
@@ -192,6 +192,19 @@ linkend="output">Video Output</link> interface.</entry>
<link linkend="output">Video Output</link> interface.</entry>
</row>
<row>
+ <entry><constant>V4L2_CAP_VIDEO_M2M</constant></entry>
+ <entry>0x00004000</entry>
+ <entry>The device supports the single-planar API through the
+ Video Memory-To-Memory interface.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CAP_VIDEO_M2M_MPLANE</constant></entry>
+ <entry>0x00008000</entry>
+ <entry>The device supports the
+ <link linkend="planar-apis">multi-planar API</link> through the
+ Video Memory-To-Memory interface.</entry>
+ </row>
+ <row>
<entry><constant>V4L2_CAP_VIDEO_OVERLAY</constant></entry>
<entry>0x00000004</entry>
<entry>The device supports the <link
diff --git a/Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml b/Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml
index 407dfceb71f0..3dd1bec6d3c7 100644
--- a/Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-s-hw-freq-seek.xml
@@ -52,11 +52,26 @@
<para>Start a hardware frequency seek from the current frequency.
To do this applications initialize the <structfield>tuner</structfield>,
<structfield>type</structfield>, <structfield>seek_upward</structfield>,
-<structfield>spacing</structfield> and
-<structfield>wrap_around</structfield> fields, and zero out the
-<structfield>reserved</structfield> array of a &v4l2-hw-freq-seek; and
-call the <constant>VIDIOC_S_HW_FREQ_SEEK</constant> ioctl with a pointer
-to this structure.</para>
+<structfield>wrap_around</structfield>, <structfield>spacing</structfield>,
+<structfield>rangelow</structfield> and <structfield>rangehigh</structfield>
+fields, and zero out the <structfield>reserved</structfield> array of a
+&v4l2-hw-freq-seek; and call the <constant>VIDIOC_S_HW_FREQ_SEEK</constant>
+ioctl with a pointer to this structure.</para>
+
+ <para>The <structfield>rangelow</structfield> and
+<structfield>rangehigh</structfield> fields can be set to a non-zero value to
+tell the driver to search a specific band. If the &v4l2-tuner;
+<structfield>capability</structfield> field has the
+<constant>V4L2_TUNER_CAP_HWSEEK_PROG_LIM</constant> flag set, these values
+must fall within one of the bands returned by &VIDIOC-ENUM-FREQ-BANDS;. If
+the <constant>V4L2_TUNER_CAP_HWSEEK_PROG_LIM</constant> flag is not set,
+then these values must exactly match those of one of the bands returned by
+&VIDIOC-ENUM-FREQ-BANDS;. If the current frequency of the tuner does not fall
+within the selected band it will be clamped to fit in the band before the
+seek is started.</para>
+
+ <para>If an error is returned, then the original frequency will
+ be restored.</para>
<para>This ioctl is supported if the <constant>V4L2_CAP_HW_FREQ_SEEK</constant> capability is set.</para>
@@ -87,7 +102,10 @@ field and the &v4l2-tuner; <structfield>index</structfield> field.</entry>
<row>
<entry>__u32</entry>
<entry><structfield>wrap_around</structfield></entry>
- <entry>If non-zero, wrap around when at the end of the frequency range, else stop seeking.</entry>
+ <entry>If non-zero, wrap around when at the end of the frequency range, else stop seeking.
+ The &v4l2-tuner; <structfield>capability</structfield> field will tell you what the
+ hardware supports.
+ </entry>
</row>
<row>
<entry>__u32</entry>
@@ -96,7 +114,27 @@ field and the &v4l2-tuner; <structfield>index</structfield> field.</entry>
</row>
<row>
<entry>__u32</entry>
- <entry><structfield>reserved</structfield>[7]</entry>
+ <entry><structfield>rangelow</structfield></entry>
+ <entry>If non-zero, the lowest tunable frequency of the band to
+search in units of 62.5 kHz, or if the &v4l2-tuner;
+<structfield>capability</structfield> field has the
+<constant>V4L2_TUNER_CAP_LOW</constant> flag set, in units of 62.5 Hz.
+If <structfield>rangelow</structfield> is zero a reasonable default value
+is used.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>rangehigh</structfield></entry>
+ <entry>If non-zero, the highest tunable frequency of the band to
+search in units of 62.5 kHz, or if the &v4l2-tuner;
+<structfield>capability</structfield> field has the
+<constant>V4L2_TUNER_CAP_LOW</constant> flag set, in units of 62.5 Hz.
+If <structfield>rangehigh</structfield> is zero a reasonable default value
+is used.</entry>
+ </row>
+ <row>
+ <entry>__u32</entry>
+ <entry><structfield>reserved</structfield>[5]</entry>
<entry>Reserved for future extensions. Applications
must set the array to zero.</entry>
</row>
@@ -113,14 +151,22 @@ field and the &v4l2-tuner; <structfield>index</structfield> field.</entry>
<term><errorcode>EINVAL</errorcode></term>
<listitem>
<para>The <structfield>tuner</structfield> index is out of
-bounds, the wrap_around value is not supported or the value in the <structfield>type</structfield> field is
-wrong.</para>
+bounds, the <structfield>wrap_around</structfield> value is not supported or
+one of the values in the <structfield>type</structfield>,
+<structfield>rangelow</structfield> or <structfield>rangehigh</structfield>
+fields is wrong.</para>
+ </listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorcode>ENODATA</errorcode></term>
+ <listitem>
+ <para>The hardware seek found no channels.</para>
</listitem>
</varlistentry>
<varlistentry>
- <term><errorcode>EAGAIN</errorcode></term>
+ <term><errorcode>EBUSY</errorcode></term>
<listitem>
- <para>The ioctl timed-out. Try again.</para>
+ <para>Another hardware seek is already in progress.</para>
</listitem>
</varlistentry>
</variablelist>
diff --git a/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml b/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml
index 208e9f0da3f3..f33cc814a01d 100644
--- a/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-subdev-g-selection.xml
@@ -72,10 +72,10 @@
<section>
<title>Types of selection targets</title>
- <para>There are two types of selection targets: actual and bounds.
- The ACTUAL targets are the targets which configure the hardware.
- The BOUNDS target will return a rectangle that contain all
- possible ACTUAL rectangles.</para>
+ <para>There are two types of selection targets: actual and bounds. The
+ actual targets are the targets which configure the hardware. The BOUNDS
+ target will return a rectangle that contain all possible actual
+ rectangles.</para>
</section>
<section>
@@ -87,71 +87,8 @@
<constant>EINVAL</constant>.</para>
</section>
- <table pgwide="1" frame="none" id="v4l2-subdev-selection-targets">
- <title>V4L2 subdev selection targets</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL</constant></entry>
- <entry>0x0000</entry>
- <entry>Actual crop. Defines the cropping
- performed by the processing step.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS</constant></entry>
- <entry>0x0002</entry>
- <entry>Bounds of the crop rectangle.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL</constant></entry>
- <entry>0x0100</entry>
- <entry>Actual compose rectangle. Used to configure scaling
- on sink pads and composition on source pads.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS</constant></entry>
- <entry>0x0102</entry>
- <entry>Bounds of the compose rectangle.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
-
- <table pgwide="1" frame="none" id="v4l2-subdev-selection-flags">
- <title>V4L2 subdev selection flags</title>
- <tgroup cols="3">
- &cs-def;
- <tbody valign="top">
- <row>
- <entry><constant>V4L2_SUBDEV_SEL_FLAG_SIZE_GE</constant></entry>
- <entry>(1 &lt;&lt; 0)</entry> <entry>Suggest the driver it
- should choose greater or equal rectangle (in size) than
- was requested. Albeit the driver may choose a lesser size,
- it will only do so due to hardware limitations. Without
- this flag (and
- <constant>V4L2_SUBDEV_SEL_FLAG_SIZE_LE</constant>) the
- behaviour is to choose the closest possible
- rectangle.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SUBDEV_SEL_FLAG_SIZE_LE</constant></entry>
- <entry>(1 &lt;&lt; 1)</entry> <entry>Suggest the driver it
- should choose lesser or equal rectangle (in size) than was
- requested. Albeit the driver may choose a greater size, it
- will only do so due to hardware limitations.</entry>
- </row>
- <row>
- <entry><constant>V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG</constant></entry>
- <entry>(1 &lt;&lt; 2)</entry>
- <entry>The configuration should not be propagated to any
- further processing steps. If this flag is not given, the
- configuration is propagated inside the subdevice to all
- further processing steps.</entry>
- </row>
- </tbody>
- </tgroup>
- </table>
+ <para>Selection targets and flags are documented in <xref
+ linkend="v4l2-selections-common"/>.</para>
<table pgwide="1" frame="none" id="v4l2-subdev-selection">
<title>struct <structname>v4l2_subdev_selection</structname></title>
@@ -173,13 +110,13 @@
<entry>__u32</entry>
<entry><structfield>target</structfield></entry>
<entry>Target selection rectangle. See
- <xref linkend="v4l2-subdev-selection-targets">.</xref>.</entry>
+ <xref linkend="v4l2-selections-common" />.</entry>
</row>
<row>
<entry>__u32</entry>
<entry><structfield>flags</structfield></entry>
<entry>Flags. See
- <xref linkend="v4l2-subdev-selection-flags">.</xref></entry>
+ <xref linkend="v4l2-selection-flags" />.</entry>
</row>
<row>
<entry>&v4l2-rect;</entry>
diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt
index 27dcaabfb4db..1401cece745a 100644
--- a/Documentation/IRQ-domain.txt
+++ b/Documentation/IRQ-domain.txt
@@ -93,6 +93,7 @@ Linux IRQ number into the hardware.
Most drivers cannot use this mapping.
==== Legacy ====
+irq_domain_add_simple()
irq_domain_add_legacy()
irq_domain_add_legacy_isa()
@@ -115,3 +116,7 @@ The legacy map should only be used if fixed IRQ mappings must be
supported. For example, ISA controllers would use the legacy map for
mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ
numbers.
+
+Most users of legacy mappings should use irq_domain_add_simple() which
+will use a legacy domain only if an IRQ range is supplied by the
+system and will otherwise use a linear domain mapping.
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index d111e3b23db0..d18ecd827c40 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -3,15 +3,21 @@
biodoc.txt
- Notes on the Generic Block Layer Rewrite in Linux 2.5
capability.txt
- - Generic Block Device Capability (/sys/block/<disk>/capability)
+ - Generic Block Device Capability (/sys/block/<device>/capability)
+cfq-iosched.txt
+ - CFQ IO scheduler tunables
+data-integrity.txt
+ - Block data integrity
deadline-iosched.txt
- Deadline IO scheduler tunables
ioprio.txt
- Block io priorities (in CFQ scheduler)
+queue-sysfs.txt
+ - Queue's sysfs entries
request.txt
- The members of struct request (in include/linux/blkdev.h)
stat.txt
- - Block layer statistics in /sys/block/<dev>/stat
+ - Block layer statistics in /sys/block/<device>/stat
switching-sched.txt
- Switching I/O schedulers at runtime
writeback_cache_control.txt
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
index 6d670f570451..d89b4fe724d7 100644
--- a/Documentation/block/cfq-iosched.txt
+++ b/Documentation/block/cfq-iosched.txt
@@ -1,3 +1,14 @@
+CFQ (Complete Fairness Queueing)
+===============================
+
+The main aim of CFQ scheduler is to provide a fair allocation of the disk
+I/O bandwidth for all the processes which requests an I/O operation.
+
+CFQ maintains the per process queue for the processes which request I/O
+operation(syncronous requests). In case of asynchronous requests, all the
+requests from all the processes are batched together according to their
+process's I/O priority.
+
CFQ ioscheduler tunables
========================
@@ -25,6 +36,72 @@ there are multiple spindles behind single LUN (Host based hardware RAID
controller or for storage arrays), setting slice_idle=0 might end up in better
throughput and acceptable latencies.
+back_seek_max
+-------------
+This specifies, given in Kbytes, the maximum "distance" for backward seeking.
+The distance is the amount of space from the current head location to the
+sectors that are backward in terms of distance.
+
+This parameter allows the scheduler to anticipate requests in the "backward"
+direction and consider them as being the "next" if they are within this
+distance from the current head location.
+
+back_seek_penalty
+-----------------
+This parameter is used to compute the cost of backward seeking. If the
+backward distance of request is just 1/back_seek_penalty from a "front"
+request, then the seeking cost of two requests is considered equivalent.
+
+So scheduler will not bias toward one or the other request (otherwise scheduler
+will bias toward front request). Default value of back_seek_penalty is 2.
+
+fifo_expire_async
+-----------------
+This parameter is used to set the timeout of asynchronous requests. Default
+value of this is 248ms.
+
+fifo_expire_sync
+----------------
+This parameter is used to set the timeout of synchronous requests. Default
+value of this is 124ms. In case to favor synchronous requests over asynchronous
+one, this value should be decreased relative to fifo_expire_async.
+
+slice_async
+-----------
+This parameter is same as of slice_sync but for asynchronous queue. The
+default value is 40ms.
+
+slice_async_rq
+--------------
+This parameter is used to limit the dispatching of asynchronous request to
+device request queue in queue's slice time. The maximum number of request that
+are allowed to be dispatched also depends upon the io priority. Default value
+for this is 2.
+
+slice_sync
+----------
+When a queue is selected for execution, the queues IO requests are only
+executed for a certain amount of time(time_slice) before switching to another
+queue. This parameter is used to calculate the time slice of synchronous
+queue.
+
+time_slice is computed using the below equation:-
+time_slice = slice_sync + (slice_sync/5 * (4 - prio)). To increase the
+time_slice of synchronous queue, increase the value of slice_sync. Default
+value is 100ms.
+
+quantum
+-------
+This specifies the number of request dispatched to the device queue. In a
+queue's time slice, a request will not be dispatched if the number of request
+in the device exceeds this parameter. This parameter is used for synchronous
+request.
+
+In case of storage with several disk, this setting can limit the parallel
+processing of request. Therefore, increasing the value can imporve the
+performace although this can cause the latency of some I/O to increase due
+to more number of requests.
+
CFQ IOPS Mode for group scheduling
===================================
Basic CFQ design is to provide priority based time slices. Higher priority
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index d8147b336c35..e54ac1d53403 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -9,20 +9,71 @@ These files are the ones found in the /sys/block/xxx/queue/ directory.
Files denoted with a RO postfix are readonly and the RW postfix means
read-write.
+add_random (RW)
+----------------
+This file allows to trun off the disk entropy contribution. Default
+value of this file is '1'(on).
+
+discard_granularity (RO)
+-----------------------
+This shows the size of internal allocation of the device in bytes, if
+reported by the device. A value of '0' means device does not support
+the discard functionality.
+
+discard_max_bytes (RO)
+----------------------
+Devices that support discard functionality may have internal limits on
+the number of bytes that can be trimmed or unmapped in a single operation.
+The discard_max_bytes parameter is set by the device driver to the maximum
+number of bytes that can be discarded in a single operation. Discard
+requests issued to the device must not exceed this limit. A discard_max_bytes
+value of 0 means that the device does not support discard functionality.
+
+discard_zeroes_data (RO)
+------------------------
+When read, this file will show if the discarded block are zeroed by the
+device or not. If its value is '1' the blocks are zeroed otherwise not.
+
hw_sector_size (RO)
-------------------
This is the hardware sector size of the device, in bytes.
+iostats (RW)
+-------------
+This file is used to control (on/off) the iostats accounting of the
+disk.
+
+logical_block_size (RO)
+-----------------------
+This is the logcal block size of the device, in bytes.
+
max_hw_sectors_kb (RO)
----------------------
This is the maximum number of kilobytes supported in a single data transfer.
+max_integrity_segments (RO)
+---------------------------
+When read, this file shows the max limit of integrity segments as
+set by block layer which a hardware controller can handle.
+
max_sectors_kb (RW)
-------------------
This is the maximum number of kilobytes that the block layer will allow
for a filesystem request. Must be smaller than or equal to the maximum
size allowed by the hardware.
+max_segments (RO)
+-----------------
+Maximum number of segments of the device.
+
+max_segment_size (RO)
+---------------------
+Maximum segment size of the device.
+
+minimum_io_size (RO)
+--------------------
+This is the smallest preferred io size reported by the device.
+
nomerges (RW)
-------------
This enables the user to disable the lookup logic involved with IO
@@ -38,11 +89,31 @@ read or write requests. Note that the total allocated number may be twice
this amount, since it applies only to reads or writes (not the accumulated
sum).
+To avoid priority inversion through request starvation, a request
+queue maintains a separate request pool per each cgroup when
+CONFIG_BLK_CGROUP is enabled, and this parameter applies to each such
+per-block-cgroup request pool. IOW, if there are N block cgroups,
+each request queue may have upto N request pools, each independently
+regulated by nr_requests.
+
+optimal_io_size (RO)
+--------------------
+This is the optimal io size reported by the device.
+
+physical_block_size (RO)
+------------------------
+This is the physical block size of device, in bytes.
+
read_ahead_kb (RW)
------------------
Maximum number of kilobytes to read-ahead for filesystems on this block
device.
+rotational (RW)
+---------------
+This file is used to stat if the device is of rotational type or
+non-rotational type.
+
rq_affinity (RW)
----------------
If this option is '1', the block layer will migrate request completions to the
diff --git a/Documentation/cgroups/hugetlb.txt b/Documentation/cgroups/hugetlb.txt
new file mode 100644
index 000000000000..a9faaca1f029
--- /dev/null
+++ b/Documentation/cgroups/hugetlb.txt
@@ -0,0 +1,45 @@
+HugeTLB Controller
+-------------------
+
+The HugeTLB controller allows to limit the HugeTLB usage per control group and
+enforces the controller limit during page fault. Since HugeTLB doesn't
+support page reclaim, enforcing the limit at page fault time implies that,
+the application will get SIGBUS signal if it tries to access HugeTLB pages
+beyond its limit. This requires the application to know beforehand how much
+HugeTLB pages it would require for its use.
+
+HugeTLB controller can be created by first mounting the cgroup filesystem.
+
+# mount -t cgroup -o hugetlb none /sys/fs/cgroup
+
+With the above step, the initial or the parent HugeTLB group becomes
+visible at /sys/fs/cgroup. At bootup, this group includes all the tasks in
+the system. /sys/fs/cgroup/tasks lists the tasks in this cgroup.
+
+New groups can be created under the parent group /sys/fs/cgroup.
+
+# cd /sys/fs/cgroup
+# mkdir g1
+# echo $$ > g1/tasks
+
+The above steps create a new group g1 and move the current shell
+process (bash) into it.
+
+Brief summary of control files
+
+ hugetlb.<hugepagesize>.limit_in_bytes # set/show limit of "hugepagesize" hugetlb usage
+ hugetlb.<hugepagesize>.max_usage_in_bytes # show max "hugepagesize" hugetlb usage recorded
+ hugetlb.<hugepagesize>.usage_in_bytes # show current res_counter usage for "hugepagesize" hugetlb
+ hugetlb.<hugepagesize>.failcnt # show the number of allocation failure due to HugeTLB limit
+
+For a system supporting two hugepage size (16M and 16G) the control
+files include:
+
+hugetlb.16GB.limit_in_bytes
+hugetlb.16GB.max_usage_in_bytes
+hugetlb.16GB.usage_in_bytes
+hugetlb.16GB.failcnt
+hugetlb.16MB.limit_in_bytes
+hugetlb.16MB.max_usage_in_bytes
+hugetlb.16MB.usage_in_bytes
+hugetlb.16MB.failcnt
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index dd88540bb995..4372e6b8a353 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -73,6 +73,8 @@ Brief summary of control files.
memory.kmem.tcp.limit_in_bytes # set/show hard limit for tcp buf memory
memory.kmem.tcp.usage_in_bytes # show current tcp buf memory allocation
+ memory.kmem.tcp.failcnt # show the number of tcp buf memory usage hits limits
+ memory.kmem.tcp.max_usage_in_bytes # show max tcp buf memory usage recorded
1. History
@@ -187,12 +189,12 @@ the cgroup that brought it in -- this will happen on memory pressure).
But see section 8.2: when moving a task to another cgroup, its pages may
be recharged to the new cgroup, if move_charge_at_immigrate has been chosen.
-Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.
+Exception: If CONFIG_CGROUP_CGROUP_MEMCG_SWAP is not used.
When you do swapoff and make swapped-out pages of shmem(tmpfs) to
be backed into memory in force, charges for pages are accounted against the
caller of swapoff rather than the users of shmem.
-2.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP)
+2.4 Swap Extension (CONFIG_MEMCG_SWAP)
Swap Extension allows you to record charge for swap. A swapped-in page is
charged back to original page allocator if possible.
@@ -259,7 +261,7 @@ When oom event notifier is registered, event will be delivered.
per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by
zone->lru_lock, it has no lock of its own.
-2.7 Kernel Memory Extension (CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
+2.7 Kernel Memory Extension (CONFIG_MEMCG_KMEM)
With the Kernel memory extension, the Memory Controller is able to limit
the amount of kernel memory used by the system. Kernel memory is fundamentally
@@ -286,8 +288,8 @@ per cgroup, instead of globally.
a. Enable CONFIG_CGROUPS
b. Enable CONFIG_RESOURCE_COUNTERS
-c. Enable CONFIG_CGROUP_MEM_RES_CTLR
-d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension)
+c. Enable CONFIG_MEMCG
+d. Enable CONFIG_MEMCG_SWAP (to use swap extension)
1. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?)
# mount -t tmpfs none /sys/fs/cgroup
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 946c73342cde..1c1844957166 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -27,6 +27,10 @@ The target is named "raid" and it accepts the following parameters:
- rotating parity N (right-to-left) with data restart
raid6_nc RAID6 N continue
- rotating parity N (right-to-left) with data continuation
+ raid10 Various RAID10 inspired algorithms chosen by additional params
+ - RAID10: Striped Mirrors (aka 'Striping on top of mirrors')
+ - RAID1E: Integrated Adjacent Stripe Mirroring
+ - and other similar RAID10 variants
Reference: Chapter 4 of
http://www.snia.org/sites/default/files/SNIA_DDF_Technical_Position_v2.0.pdf
@@ -59,6 +63,28 @@ The target is named "raid" and it accepts the following parameters:
logical size of the array. The bitmap records the device
synchronisation state for each region.
+ [raid10_copies <# copies>]
+ [raid10_format near]
+ These two options are used to alter the default layout of
+ a RAID10 configuration. The number of copies is can be
+ specified, but the default is 2. There are other variations
+ to how the copies are laid down - the default and only current
+ option is "near". Near copies are what most people think of
+ with respect to mirroring. If these options are left
+ unspecified, or 'raid10_copies 2' and/or 'raid10_format near'
+ are given, then the layouts for 2, 3 and 4 devices are:
+ 2 drives 3 drives 4 drives
+ -------- ---------- --------------
+ A1 A1 A1 A1 A2 A1 A1 A2 A2
+ A2 A2 A2 A3 A3 A3 A3 A4 A4
+ A3 A3 A4 A4 A5 A5 A5 A6 A6
+ A4 A4 A5 A6 A6 A7 A7 A8 A8
+ .. .. .. .. .. .. .. .. ..
+ The 2-device layout is equivalent 2-way RAID1. The 4-device
+ layout is what a traditional RAID10 would look like. The
+ 3-device layout is what might be called a 'RAID1E - Integrated
+ Adjacent Stripe Mirroring'.
+
<#raid_devs>: The number of devices composing the array.
Each device consists of two entries. The first is the device
containing the metadata (if any); the second is the one containing the
diff --git a/Documentation/device-mapper/striped.txt b/Documentation/device-mapper/striped.txt
index f34d3236b9da..45f3b91ea4c3 100644
--- a/Documentation/device-mapper/striped.txt
+++ b/Documentation/device-mapper/striped.txt
@@ -9,15 +9,14 @@ devices in parallel.
Parameters: <num devs> <chunk size> [<dev path> <offset>]+
<num devs>: Number of underlying devices.
- <chunk size>: Size of each chunk of data. Must be a power-of-2 and at
- least as large as the system's PAGE_SIZE.
+ <chunk size>: Size of each chunk of data. Must be at least as
+ large as the system's PAGE_SIZE.
<dev path>: Full pathname to the underlying block-device, or a
"major:minor" device-number.
<offset>: Starting sector within the device.
One or more underlying devices can be specified. The striped device size must
-be a multiple of the chunk size and a multiple of the number of underlying
-devices.
+be a multiple of the chunk size multiplied by the number of underlying devices.
Example scripts
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index f5cfc62b7ad3..30b8b83bd333 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -231,6 +231,9 @@ i) Constructor
no_discard_passdown: Don't pass discards down to the underlying
data device, but just remove the mapping.
+ read_only: Don't allow any changes to be made to the pool
+ metadata.
+
Data block size must be between 64KB (128 sectors) and 1GB
(2097152 sectors) inclusive.
@@ -239,7 +242,7 @@ ii) Status
<transaction id> <used metadata blocks>/<total metadata blocks>
<used data blocks>/<total data blocks> <held metadata root>
-
+ [no_]discard_passdown ro|rw
transaction id:
A 64-bit number used by userspace to help synchronise with metadata
@@ -257,6 +260,21 @@ ii) Status
held root. This feature is not yet implemented so '-' is
always returned.
+ discard_passdown|no_discard_passdown
+ Whether or not discards are actually being passed down to the
+ underlying device. When this is enabled when loading the table,
+ it can get disabled if the underlying device doesn't support it.
+
+ ro|rw
+ If the pool encounters certain types of device failures it will
+ drop into a read-only metadata mode in which no changes to
+ the pool metadata (like allocating new blocks) are permitted.
+
+ In serious cases where even a read-only mode is deemed unsafe
+ no further I/O will be permitted and the status will just
+ contain the string 'Fail'. The userspace recovery tools
+ should then be used.
+
iii) Messages
create_thin <dev id>
@@ -329,3 +347,7 @@ regain some space then send the 'trim' message to the pool.
ii) Status
<nr mapped sectors> <highest mapped sector>
+
+ If the pool has encountered device errors and failed, the status
+ will just contain the string 'Fail'. The userspace recovery
+ tools should then be used.
diff --git a/Documentation/devicetree/bindings/arm/calxeda/l2ecc.txt b/Documentation/devicetree/bindings/arm/calxeda/l2ecc.txt
new file mode 100644
index 000000000000..94e642a33db0
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/calxeda/l2ecc.txt
@@ -0,0 +1,15 @@
+Calxeda Highbank L2 cache ECC
+
+Properties:
+- compatible : Should be "calxeda,hb-sregs-l2-ecc"
+- reg : Address and size for ECC error interrupt clear registers.
+- interrupts : Should be single bit error interrupt, then double bit error
+ interrupt.
+
+Example:
+
+ sregs@fff3c200 {
+ compatible = "calxeda,hb-sregs-l2-ecc";
+ reg = <0xfff3c200 0x100>;
+ interrupts = <0 71 4 0 72 4>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt b/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt
new file mode 100644
index 000000000000..f770ac0893d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt
@@ -0,0 +1,14 @@
+Calxeda DDR memory controller
+
+Properties:
+- compatible : Should be "calxeda,hb-ddr-ctrl"
+- reg : Address and size for DDR controller registers.
+- interrupts : Interrupt for DDR controller.
+
+Example:
+
+ memory-controller@fff00000 {
+ compatible = "calxeda,hb-ddr-ctrl";
+ reg = <0xfff00000 0x1000>;
+ interrupts = <0 91 4>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/mrvl/intc.txt b/Documentation/devicetree/bindings/arm/mrvl/intc.txt
index 80b9a94d9a23..8b53273cb22f 100644
--- a/Documentation/devicetree/bindings/arm/mrvl/intc.txt
+++ b/Documentation/devicetree/bindings/arm/mrvl/intc.txt
@@ -38,3 +38,23 @@ Example:
reg-names = "mux status", "mux mask";
mrvl,intc-nr-irqs = <2>;
};
+
+* Marvell Orion Interrupt controller
+
+Required properties
+- compatible : Should be "marvell,orion-intc".
+- #interrupt-cells: Specifies the number of cells needed to encode an
+ interrupt source. Supported value is <1>.
+- interrupt-controller : Declare this node to be an interrupt controller.
+- reg : Interrupt mask address. A list of 4 byte ranges, one per controller.
+ One entry in the list represents 32 interrupts.
+
+Example:
+
+ intc: interrupt-controller {
+ compatible = "marvell,orion-intc", "marvell,intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0xfed20204 0x04>,
+ <0xfed20214 0x04>;
+ };
diff --git a/Documentation/devicetree/bindings/ata/cavium-compact-flash.txt b/Documentation/devicetree/bindings/ata/cavium-compact-flash.txt
new file mode 100644
index 000000000000..93986a5a8018
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/cavium-compact-flash.txt
@@ -0,0 +1,30 @@
+* Compact Flash
+
+The Cavium Compact Flash device is connected to the Octeon Boot Bus,
+and is thus a child of the Boot Bus device. It can read and write
+industry standard compact flash devices.
+
+Properties:
+- compatible: "cavium,ebt3000-compact-flash";
+
+ Compatibility with many Cavium evaluation boards.
+
+- reg: The base address of the the CF chip select banks. Depending on
+ the device configuration, there may be one or two banks.
+
+- cavium,bus-width: The width of the connection to the CF devices. Valid
+ values are 8 and 16.
+
+- cavium,true-ide: Optional, if present the CF connection is in True IDE mode.
+
+- cavium,dma-engine-handle: Optional, a phandle for the DMA Engine connected
+ to this device.
+
+Example:
+ compact-flash@5,0 {
+ compatible = "cavium,ebt3000-compact-flash";
+ reg = <5 0 0x10000>, <6 0 0x10000>;
+ cavium,bus-width = <16>;
+ cavium,true-ide;
+ cavium,dma-engine-handle = <&dma0>;
+ };
diff --git a/Documentation/devicetree/bindings/ata/marvell.txt b/Documentation/devicetree/bindings/ata/marvell.txt
new file mode 100644
index 000000000000..b5cdd20cde9c
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/marvell.txt
@@ -0,0 +1,16 @@
+* Marvell Orion SATA
+
+Required Properties:
+- compatibility : "marvell,orion-sata"
+- reg : Address range of controller
+- interrupts : Interrupt controller is using
+- nr-ports : Number of SATA ports in use.
+
+Example:
+
+ sata@80000 {
+ compatible = "marvell,orion-sata";
+ reg = <0x80000 0x5000>;
+ interrupts = <21>;
+ nr-ports = <2>;
+ }
diff --git a/Documentation/devicetree/bindings/gpio/cavium-octeon-gpio.txt b/Documentation/devicetree/bindings/gpio/cavium-octeon-gpio.txt
new file mode 100644
index 000000000000..9d6dcd3fe7f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/cavium-octeon-gpio.txt
@@ -0,0 +1,49 @@
+* General Purpose Input Output (GPIO) bus.
+
+Properties:
+- compatible: "cavium,octeon-3860-gpio"
+
+ Compatibility with all cn3XXX, cn5XXX and cn6XXX SOCs.
+
+- reg: The base address of the GPIO unit's register bank.
+
+- gpio-controller: This is a GPIO controller.
+
+- #gpio-cells: Must be <2>. The first cell is the GPIO pin.
+
+- interrupt-controller: The GPIO controller is also an interrupt
+ controller, many of its pins may be configured as an interrupt
+ source.
+
+- #interrupt-cells: Must be <2>. The first cell is the GPIO pin
+ connected to the interrupt source. The second cell is the interrupt
+ triggering protocol and may have one of four values:
+ 1 - edge triggered on the rising edge.
+ 2 - edge triggered on the falling edge
+ 4 - level triggered active high.
+ 8 - level triggered active low.
+
+- interrupts: Interrupt routing for each pin.
+
+Example:
+
+ gpio-controller@1070000000800 {
+ #gpio-cells = <2>;
+ compatible = "cavium,octeon-3860-gpio";
+ reg = <0x10700 0x00000800 0x0 0x100>;
+ gpio-controller;
+ /* Interrupts are specified by two parts:
+ * 1) GPIO pin number (0..15)
+ * 2) Triggering (1 - edge rising
+ * 2 - edge falling
+ * 4 - level active high
+ * 8 - level active low)
+ */
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ /* The GPIO pin connect to 16 consecutive CUI bits */
+ interrupts = <0 16>, <0 17>, <0 18>, <0 19>,
+ <0 20>, <0 21>, <0 22>, <0 23>,
+ <0 24>, <0 25>, <0 26>, <0 27>,
+ <0 28>, <0 29>, <0 30>, <0 31>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
index 4f3929713ae4..dbd22e0df21e 100644
--- a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
@@ -22,7 +22,7 @@ Required properties:
Example:
gpio0: gpio@73f84000 {
- compatible = "fsl,imx51-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx51-gpio", "fsl,imx35-gpio";
reg = <0x73f84000 0x4000>;
interrupts = <50 51>;
gpio-controller;
diff --git a/Documentation/devicetree/bindings/gpio/gpio-samsung.txt b/Documentation/devicetree/bindings/gpio/gpio-samsung.txt
index 8f50fe5e6c42..5375625e8cd2 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-samsung.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-samsung.txt
@@ -11,14 +11,15 @@ Required properties:
<[phandle of the gpio controller node]
[pin number within the gpio controller]
[mux function]
- [pull up/down]
+ [flags and pull up/down]
[drive strength]>
Values for gpio specifier:
- Pin number: is a value between 0 to 7.
- - Pull Up/Down: 0 - Pull Up/Down Disabled.
- 1 - Pull Down Enabled.
- 3 - Pull Up Enabled.
+ - Flags and Pull Up/Down: 0 - Pull Up/Down Disabled.
+ 1 - Pull Down Enabled.
+ 3 - Pull Up Enabled.
+ Bit 16 (0x00010000) - Input is active low.
- Drive Strength: 0 - 1x,
1 - 3x,
2 - 2x,
diff --git a/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt b/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
index 05428f39d9ac..e13787498bcf 100644
--- a/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/mrvl-gpio.txt
@@ -27,3 +27,26 @@ Example:
interrupt-controller;
#interrupt-cells = <1>;
};
+
+* Marvell Orion GPIO Controller
+
+Required properties:
+- compatible : Should be "marvell,orion-gpio"
+- reg : Address and length of the register set for controller.
+- gpio-controller : So we know this is a gpio controller.
+- ngpio : How many gpios this controller has.
+- interrupts : Up to 4 Interrupts for the controller.
+
+Optional properties:
+- mask-offset : For SMP Orions, offset for Nth CPU
+
+Example:
+
+ gpio0: gpio@10100 {
+ compatible = "marvell,orion-gpio";
+ #gpio-cells = <2>;
+ gpio-controller;
+ reg = <0x10100 0x40>;
+ ngpio = <32>;
+ interrupts = <35>, <36>, <37>, <38>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/cavium-i2c.txt b/Documentation/devicetree/bindings/i2c/cavium-i2c.txt
new file mode 100644
index 000000000000..dced82ebe31d
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/cavium-i2c.txt
@@ -0,0 +1,34 @@
+* Two Wire Serial Interface (TWSI) / I2C
+
+- compatible: "cavium,octeon-3860-twsi"
+
+ Compatibility with all cn3XXX, cn5XXX and cn6XXX SOCs.
+
+- reg: The base address of the TWSI/I2C bus controller register bank.
+
+- #address-cells: Must be <1>.
+
+- #size-cells: Must be <0>. I2C addresses have no size component.
+
+- interrupts: A single interrupt specifier.
+
+- clock-frequency: The I2C bus clock rate in Hz.
+
+Example:
+ twsi0: i2c@1180000001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cavium,octeon-3860-twsi";
+ reg = <0x11800 0x00001000 0x0 0x200>;
+ interrupts = <0 45>;
+ clock-frequency = <100000>;
+
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ };
+ tmp@4c {
+ compatible = "ti,tmp421";
+ reg = <0x4c>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio_i2c.txt b/Documentation/devicetree/bindings/i2c/gpio-i2c.txt
index 4f8ec947c6bd..4f8ec947c6bd 100644
--- a/Documentation/devicetree/bindings/gpio/gpio_i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/gpio-i2c.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mxs.txt b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt
index 1bfc02de1b0c..30ac3a0557f7 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mxs.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt
@@ -4,6 +4,8 @@ Required properties:
- compatible: Should be "fsl,<chip>-i2c"
- reg: Should contain registers location and length
- interrupts: Should contain ERROR and DMA interrupts
+- clock-frequency: Desired I2C bus clock frequency in Hz.
+ Only 100000Hz and 400000Hz modes are supported.
Examples:
@@ -13,4 +15,5 @@ i2c0: i2c@80058000 {
compatible = "fsl,imx28-i2c";
reg = <0x80058000 2000>;
interrupts = <111 68>;
+ clock-frequency = <100000>;
};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
new file mode 100644
index 000000000000..c15781f4dc8c
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt
@@ -0,0 +1,33 @@
+Device tree configuration for i2c-ocores
+
+Required properties:
+- compatible : "opencores,i2c-ocores"
+- reg : bus address start and address range size of device
+- interrupts : interrupt number
+- clock-frequency : frequency of bus clock in Hz
+- #address-cells : should be <1>
+- #size-cells : should be <0>
+
+Optional properties:
+- reg-shift : device register offsets are shifted by this value
+- reg-io-width : io register width in bytes (1, 2 or 4)
+- regstep : deprecated, use reg-shift above
+
+Example:
+
+ i2c0: ocores@a0000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "opencores,i2c-ocores";
+ reg = <0xa0000000 0x8>;
+ interrupts = <10>;
+ clock-frequency = <20000000>;
+
+ reg-shift = <0>; /* 8 bit registers */
+ reg-io-width = <1>; /* 8 bit read/write */
+
+ dummy@60 {
+ compatible = "dummy";
+ reg = <0x60>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/mrvl-i2c.txt b/Documentation/devicetree/bindings/i2c/mrvl-i2c.txt
index b891ee218354..0f7945019f6f 100644
--- a/Documentation/devicetree/bindings/i2c/mrvl-i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/mrvl-i2c.txt
@@ -1,4 +1,4 @@
-* I2C
+* Marvell MMP I2C controller
Required properties :
@@ -32,3 +32,20 @@ Examples:
interrupts = <58>;
};
+* Marvell MV64XXX I2C controller
+
+Required properties :
+
+ - reg : Offset and length of the register set for the device
+ - compatible : Should be "marvell,mv64xxx-i2c"
+ - interrupts : The interrupt number
+ - clock-frequency : Desired I2C bus clock frequency in Hz.
+
+Examples:
+
+ i2c@11000 {
+ compatible = "marvell,mv64xxx-i2c";
+ reg = <0x11000 0x20>;
+ interrupts = <29>;
+ clock-frequency = <100000>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/ab8500.txt b/Documentation/devicetree/bindings/mfd/ab8500.txt
new file mode 100644
index 000000000000..69e757a657a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/ab8500.txt
@@ -0,0 +1,123 @@
+* AB8500 Multi-Functional Device (MFD)
+
+Required parent device properties:
+- compatible : contains "stericsson,ab8500";
+- interrupts : contains the IRQ line for the AB8500
+- interrupt-controller : describes the AB8500 as an Interrupt Controller (has its own domain)
+- #interrupt-cells : should be 2, for 2-cell format
+ - The first cell is the AB8500 local IRQ number
+ - The second cell is used to specify optional parameters
+ - bits[3:0] trigger type and level flags:
+ 1 = low-to-high edge triggered
+ 2 = high-to-low edge triggered
+ 4 = active high level-sensitive
+ 8 = active low level-sensitive
+
+Optional parent device properties:
+- reg : contains the PRCMU mailbox address for the AB8500 i2c port
+
+The AB8500 consists of a large and varied group of sub-devices:
+
+Device IRQ Names Supply Names Description
+------ --------- ------------ -----------
+ab8500-bm : : : Battery Manager
+ab8500-btemp : : : Battery Temperature
+ab8500-charger : : : Battery Charger
+ab8500-fg : : : Fuel Gauge
+ab8500-gpadc : HW_CONV_END : vddadc : Analogue to Digital Converter
+ SW_CONV_END : :
+ab8500-gpio : : : GPIO Controller
+ab8500-ponkey : ONKEY_DBF : : Power-on Key
+ ONKEY_DBR : :
+ab8500-pwm : : : Pulse Width Modulator
+ab8500-regulator : : : Regulators
+ab8500-rtc : 60S : : Real Time Clock
+ : ALARM : :
+ab8500-sysctrl : : : System Control
+ab8500-usb : ID_WAKEUP_R : vddulpivio18 : Universal Serial Bus
+ : ID_WAKEUP_F : v-ape :
+ : VBUS_DET_F : musb_1v8 :
+ : VBUS_DET_R : :
+ : USB_LINK_STATUS : :
+ : USB_ADP_PROBE_PLUG : :
+ : USB_ADP_PROBE_UNPLUG : :
+
+Required child device properties:
+- compatible : "stericsson,ab8500-[bm|btemp|charger|fg|gpadc|gpio|ponkey|
+ pwm|regulator|rtc|sysctrl|usb]";
+
+Optional child device properties:
+- interrupts : contains the device IRQ(s) using the 2-cell format (see above)
+- interrupt-names : contains names of IRQ resource in the order in which they were
+ supplied in the interrupts property
+- <supply_name>-supply : contains a phandle to the regulator supply node in Device Tree
+
+ab8500@5 {
+ compatible = "stericsson,ab8500";
+ reg = <5>; /* mailbox 5 is i2c */
+ interrupts = <0 40 0x4>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ ab8500-rtc {
+ compatible = "stericsson,ab8500-rtc";
+ interrupts = <17 0x4
+ 18 0x4>;
+ interrupt-names = "60S", "ALARM";
+ };
+
+ ab8500-gpadc {
+ compatible = "stericsson,ab8500-gpadc";
+ interrupts = <32 0x4
+ 39 0x4>;
+ interrupt-names = "HW_CONV_END", "SW_CONV_END";
+ vddadc-supply = <&ab8500_ldo_tvout_reg>;
+ };
+
+ ab8500-usb {
+ compatible = "stericsson,ab8500-usb";
+ interrupts = < 90 0x4
+ 96 0x4
+ 14 0x4
+ 15 0x4
+ 79 0x4
+ 74 0x4
+ 75 0x4>;
+ interrupt-names = "ID_WAKEUP_R",
+ "ID_WAKEUP_F",
+ "VBUS_DET_F",
+ "VBUS_DET_R",
+ "USB_LINK_STATUS",
+ "USB_ADP_PROBE_PLUG",
+ "USB_ADP_PROBE_UNPLUG";
+ vddulpivio18-supply = <&ab8500_ldo_initcore_reg>;
+ v-ape-supply = <&db8500_vape_reg>;
+ musb_1v8-supply = <&db8500_vsmps2_reg>;
+ };
+
+ ab8500-ponkey {
+ compatible = "stericsson,ab8500-ponkey";
+ interrupts = <6 0x4
+ 7 0x4>;
+ interrupt-names = "ONKEY_DBF", "ONKEY_DBR";
+ };
+
+ ab8500-sysctrl {
+ compatible = "stericsson,ab8500-sysctrl";
+ };
+
+ ab8500-pwm {
+ compatible = "stericsson,ab8500-pwm";
+ };
+
+ ab8500-regulators {
+ compatible = "stericsson,ab8500-regulator";
+
+ ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
+ /*
+ * See: Documentation/devicetree/bindings/regulator/regulator.txt
+ * for more information on regulators
+ */
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mfd/max77686.txt b/Documentation/devicetree/bindings/mfd/max77686.txt
new file mode 100644
index 000000000000..c6a3469d3436
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/max77686.txt
@@ -0,0 +1,59 @@
+Maxim MAX77686 multi-function device
+
+MAX77686 is a Mulitifunction device with PMIC, RTC and Charger on chip. It is
+interfaced to host controller using i2c interface. PMIC and Charger submodules
+are addressed using same i2c slave address whereas RTC submodule uses
+different i2c slave address,presently for which we are statically creating i2c
+client while probing.This document describes the binding for mfd device and
+PMIC submodule.
+
+Required properties:
+- compatible : Must be "maxim,max77686";
+- reg : Specifies the i2c slave address of PMIC block.
+- interrupts : This i2c device has an IRQ line connected to the main SoC.
+- interrupt-parent : The parent interrupt controller.
+
+Optional node:
+- voltage-regulators : The regulators of max77686 have to be instantiated
+ under subnode named "voltage-regulators" using the following format.
+
+ regulator_name {
+ regulator-compatible = LDOn/BUCKn
+ standard regulator constraints....
+ };
+ refer Documentation/devicetree/bindings/regulator/regulator.txt
+
+ The regulator-compatible property of regulator should initialized with string
+to get matched with their hardware counterparts as follow:
+
+ -LDOn : for LDOs, where n can lie in range 1 to 26.
+ example: LDO1, LDO2, LDO26.
+ -BUCKn : for BUCKs, where n can lie in range 1 to 9.
+ example: BUCK1, BUCK5, BUCK9.
+
+Example:
+
+ max77686@09 {
+ compatible = "maxim,max77686";
+ interrupt-parent = <&wakeup_eint>;
+ interrupts = <26 0>;
+ reg = <0x09>;
+
+ voltage-regulators {
+ ldo11_reg {
+ regulator-compatible = "LDO11";
+ regulator-name = "vdd_ldo11";
+ regulator-min-microvolt = <1900000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-always-on;
+ };
+
+ buck1_reg {
+ regulator-compatible = "BUCK1";
+ regulator-name = "vdd_mif";
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ }
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt
index d2802d4717bc..db03599ae4dc 100644
--- a/Documentation/devicetree/bindings/mfd/tps65910.txt
+++ b/Documentation/devicetree/bindings/mfd/tps65910.txt
@@ -81,7 +81,7 @@ Example:
ti,vmbch-threshold = 0;
ti,vmbch2-threshold = 0;
-
+ ti,en-ck32k-xtal;
ti,en-gpio-sleep = <0 0 1 0 0 0 0 0 0>;
vcc1-supply = <&reg_parent>;
diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt
index bc67c6f424aa..c855240f3a0e 100644
--- a/Documentation/devicetree/bindings/mfd/twl6040.txt
+++ b/Documentation/devicetree/bindings/mfd/twl6040.txt
@@ -6,7 +6,7 @@ They are connected ot the host processor via i2c for commands, McPDM for audio
data and commands.
Required properties:
-- compatible : Must be "ti,twl6040";
+- compatible : "ti,twl6040" for twl6040, "ti,twl6041" for twl6041
- reg: must be 0x4b for i2c address
- interrupts: twl6040 has one interrupt line connecteded to the main SoC
- interrupt-parent: The parent interrupt controller
diff --git a/Documentation/devicetree/bindings/mips/cavium/bootbus.txt b/Documentation/devicetree/bindings/mips/cavium/bootbus.txt
new file mode 100644
index 000000000000..6581478225a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/cavium/bootbus.txt
@@ -0,0 +1,126 @@
+* Boot Bus
+
+The Octeon Boot Bus is a configurable parallel bus with 8 chip
+selects. Each chip select is independently configurable.
+
+Properties:
+- compatible: "cavium,octeon-3860-bootbus"
+
+ Compatibility with all cn3XXX, cn5XXX and cn6XXX SOCs.
+
+- reg: The base address of the Boot Bus' register bank.
+
+- #address-cells: Must be <2>. The first cell is the chip select
+ within the bootbus. The second cell is the offset from the chip select.
+
+- #size-cells: Must be <1>.
+
+- ranges: There must be one one triplet of (child-bus-address,
+ parent-bus-address, length) for each active chip select. If the
+ length element for any triplet is zero, the chip select is disabled,
+ making it inactive.
+
+The configuration parameters for each chip select are stored in child
+nodes.
+
+Configuration Properties:
+- compatible: "cavium,octeon-3860-bootbus-config"
+
+- cavium,cs-index: A single cell indicating the chip select that
+ corresponds to this configuration.
+
+- cavium,t-adr: A cell specifying the ADR timing (in nS).
+
+- cavium,t-ce: A cell specifying the CE timing (in nS).
+
+- cavium,t-oe: A cell specifying the OE timing (in nS).
+
+- cavium,t-we: A cell specifying the WE timing (in nS).
+
+- cavium,t-rd-hld: A cell specifying the RD_HLD timing (in nS).
+
+- cavium,t-wr-hld: A cell specifying the WR_HLD timing (in nS).
+
+- cavium,t-pause: A cell specifying the PAUSE timing (in nS).
+
+- cavium,t-wait: A cell specifying the WAIT timing (in nS).
+
+- cavium,t-page: A cell specifying the PAGE timing (in nS).
+
+- cavium,t-rd-dly: A cell specifying the RD_DLY timing (in nS).
+
+- cavium,pages: A cell specifying the PAGES parameter (0 = 8 bytes, 1
+ = 2 bytes, 2 = 4 bytes, 3 = 8 bytes).
+
+- cavium,wait-mode: Optional. If present, wait mode (WAITM) is selected.
+
+- cavium,page-mode: Optional. If present, page mode (PAGEM) is selected.
+
+- cavium,bus-width: A cell specifying the WIDTH parameter (in bits) of
+ the bus for this chip select.
+
+- cavium,ale-mode: Optional. If present, ALE mode is selected.
+
+- cavium,sam-mode: Optional. If present, SAM mode is selected.
+
+- cavium,or-mode: Optional. If present, OR mode is selected.
+
+Example:
+ bootbus: bootbus@1180000000000 {
+ compatible = "cavium,octeon-3860-bootbus";
+ reg = <0x11800 0x00000000 0x0 0x200>;
+ /* The chip select number and offset */
+ #address-cells = <2>;
+ /* The size of the chip select region */
+ #size-cells = <1>;
+ ranges = <0 0 0x0 0x1f400000 0xc00000>,
+ <1 0 0x10000 0x30000000 0>,
+ <2 0 0x10000 0x40000000 0>,
+ <3 0 0x10000 0x50000000 0>,
+ <4 0 0x0 0x1d020000 0x10000>,
+ <5 0 0x0 0x1d040000 0x10000>,
+ <6 0 0x0 0x1d050000 0x10000>,
+ <7 0 0x10000 0x90000000 0>;
+
+ cavium,cs-config@0 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <0>;
+ cavium,t-adr = <20>;
+ cavium,t-ce = <60>;
+ cavium,t-oe = <60>;
+ cavium,t-we = <45>;
+ cavium,t-rd-hld = <35>;
+ cavium,t-wr-hld = <45>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <0>;
+ cavium,t-page = <35>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <8>;
+ };
+ .
+ .
+ .
+ cavium,cs-config@6 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <6>;
+ cavium,t-adr = <5>;
+ cavium,t-ce = <300>;
+ cavium,t-oe = <270>;
+ cavium,t-we = <150>;
+ cavium,t-rd-hld = <100>;
+ cavium,t-wr-hld = <70>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <0>;
+ cavium,t-page = <320>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,wait-mode;
+ cavium,bus-width = <16>;
+ };
+ .
+ .
+ .
+ };
diff --git a/Documentation/devicetree/bindings/mips/cavium/ciu.txt b/Documentation/devicetree/bindings/mips/cavium/ciu.txt
new file mode 100644
index 000000000000..2c2d0746b43d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/cavium/ciu.txt
@@ -0,0 +1,26 @@
+* Central Interrupt Unit
+
+Properties:
+- compatible: "cavium,octeon-3860-ciu"
+
+ Compatibility with all cn3XXX, cn5XXX and cn63XX SOCs.
+
+- interrupt-controller: This is an interrupt controller.
+
+- reg: The base address of the CIU's register bank.
+
+- #interrupt-cells: Must be <2>. The first cell is the bank within
+ the CIU and may have a value of 0 or 1. The second cell is the bit
+ within the bank and may have a value between 0 and 63.
+
+Example:
+ interrupt-controller@1070000000000 {
+ compatible = "cavium,octeon-3860-ciu";
+ interrupt-controller;
+ /* Interrupts are specified by two parts:
+ * 1) Controller register (0 or 1)
+ * 2) Bit within the register (0..63)
+ */
+ #interrupt-cells = <2>;
+ reg = <0x10700 0x00000000 0x0 0x7000>;
+ };
diff --git a/Documentation/devicetree/bindings/mips/cavium/ciu2.txt b/Documentation/devicetree/bindings/mips/cavium/ciu2.txt
new file mode 100644
index 000000000000..0ec7ba8bbbcb
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/cavium/ciu2.txt
@@ -0,0 +1,27 @@
+* Central Interrupt Unit
+
+Properties:
+- compatible: "cavium,octeon-6880-ciu2"
+
+ Compatibility with 68XX SOCs.
+
+- interrupt-controller: This is an interrupt controller.
+
+- reg: The base address of the CIU's register bank.
+
+- #interrupt-cells: Must be <2>. The first cell is the bank within
+ the CIU and may have a value between 0 and 63. The second cell is
+ the bit within the bank and may also have a value between 0 and 63.
+
+Example:
+ interrupt-controller@1070100000000 {
+ compatible = "cavium,octeon-6880-ciu2";
+ interrupt-controller;
+ /* Interrupts are specified by two parts:
+ * 1) Controller register (0..63)
+ * 2) Bit within the register (0..63)
+ */
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x10701 0x00000000 0x0 0x4000000>;
+ };
diff --git a/Documentation/devicetree/bindings/mips/cavium/dma-engine.txt b/Documentation/devicetree/bindings/mips/cavium/dma-engine.txt
new file mode 100644
index 000000000000..cb4291e3b1d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/cavium/dma-engine.txt
@@ -0,0 +1,21 @@
+* DMA Engine.
+
+The Octeon DMA Engine transfers between the Boot Bus and main memory.
+The DMA Engine will be refered to by phandle by any device that is
+connected to it.
+
+Properties:
+- compatible: "cavium,octeon-5750-bootbus-dma"
+
+ Compatibility with all cn52XX, cn56XX and cn6XXX SOCs.
+
+- reg: The base address of the DMA Engine's register bank.
+
+- interrupts: A single interrupt specifier.
+
+Example:
+ dma0: dma-engine@1180000000100 {
+ compatible = "cavium,octeon-5750-bootbus-dma";
+ reg = <0x11800 0x00000100 0x0 0x8>;
+ interrupts = <0 63>;
+ };
diff --git a/Documentation/devicetree/bindings/mips/cavium/uctl.txt b/Documentation/devicetree/bindings/mips/cavium/uctl.txt
new file mode 100644
index 000000000000..aa66b9b8d801
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/cavium/uctl.txt
@@ -0,0 +1,46 @@
+* UCTL USB controller glue
+
+Properties:
+- compatible: "cavium,octeon-6335-uctl"
+
+ Compatibility with all cn6XXX SOCs.
+
+- reg: The base address of the UCTL register bank.
+
+- #address-cells: Must be <2>.
+
+- #size-cells: Must be <2>.
+
+- ranges: Empty to signify direct mapping of the children.
+
+- refclk-frequency: A single cell containing the reference clock
+ frequency in Hz.
+
+- refclk-type: A string describing the reference clock connection
+ either "crystal" or "external".
+
+Example:
+ uctl@118006f000000 {
+ compatible = "cavium,octeon-6335-uctl";
+ reg = <0x11800 0x6f000000 0x0 0x100>;
+ ranges; /* Direct mapping */
+ #address-cells = <2>;
+ #size-cells = <2>;
+ /* 12MHz, 24MHz and 48MHz allowed */
+ refclk-frequency = <24000000>;
+ /* Either "crystal" or "external" */
+ refclk-type = "crystal";
+
+ ehci@16f0000000000 {
+ compatible = "cavium,octeon-6335-ehci","usb-ehci";
+ reg = <0x16f00 0x00000000 0x0 0x100>;
+ interrupts = <0 56>;
+ big-endian-regs;
+ };
+ ohci@16f0000000400 {
+ compatible = "cavium,octeon-6335-ohci","usb-ohci";
+ reg = <0x16f00 0x00000400 0x0 0x100>;
+ interrupts = <0 56>;
+ big-endian-regs;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 70cd49b1caa8..1dd622546d06 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -10,8 +10,8 @@ Required properties:
- compatible : Should be "fsl,<chip>-esdhc"
Optional properties:
-- fsl,cd-internal : Indicate to use controller internal card detection
-- fsl,wp-internal : Indicate to use controller internal write protection
+- fsl,cd-controller : Indicate to use controller internal card detection
+- fsl,wp-controller : Indicate to use controller internal write protection
Examples:
@@ -19,8 +19,8 @@ esdhc@70004000 {
compatible = "fsl,imx51-esdhc";
reg = <0x70004000 0x4000>;
interrupts = <1>;
- fsl,cd-internal;
- fsl,wp-internal;
+ fsl,cd-controller;
+ fsl,wp-controller;
};
esdhc@70008000 {
diff --git a/Documentation/devicetree/bindings/mtd/orion-nand.txt b/Documentation/devicetree/bindings/mtd/orion-nand.txt
index b2356b7d2fa4..2d6ab660e603 100644
--- a/Documentation/devicetree/bindings/mtd/orion-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/orion-nand.txt
@@ -1,7 +1,7 @@
NAND support for Marvell Orion SoC platforms
Required properties:
-- compatible : "mrvl,orion-nand".
+- compatible : "marvell,orion-nand".
- reg : Base physical address of the NAND and length of memory mapped
region
@@ -24,7 +24,7 @@ nand@f4000000 {
ale = <1>;
bank-width = <1>;
chip-delay = <25>;
- compatible = "mrvl,orion-nand";
+ compatible = "marvell,orion-nand";
reg = <0xf4000000 0x400>;
partition@0 {
diff --git a/Documentation/devicetree/bindings/net/cavium-mdio.txt b/Documentation/devicetree/bindings/net/cavium-mdio.txt
new file mode 100644
index 000000000000..04cb7491d232
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cavium-mdio.txt
@@ -0,0 +1,27 @@
+* System Management Interface (SMI) / MDIO
+
+Properties:
+- compatible: "cavium,octeon-3860-mdio"
+
+ Compatibility with all cn3XXX, cn5XXX and cn6XXX SOCs.
+
+- reg: The base address of the MDIO bus controller register bank.
+
+- #address-cells: Must be <1>.
+
+- #size-cells: Must be <0>. MDIO addresses have no size component.
+
+Typically an MDIO bus might have several children.
+
+Example:
+ mdio@1180000001800 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00001800 0x0 0x40>;
+
+ ethernet-phy@0 {
+ ...
+ reg = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/cavium-mix.txt b/Documentation/devicetree/bindings/net/cavium-mix.txt
new file mode 100644
index 000000000000..5da628db68bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cavium-mix.txt
@@ -0,0 +1,39 @@
+* MIX Ethernet controller.
+
+Properties:
+- compatible: "cavium,octeon-5750-mix"
+
+ Compatibility with all cn5XXX and cn6XXX SOCs populated with MIX
+ devices.
+
+- reg: The base addresses of four separate register banks. The first
+ bank contains the MIX registers. The second bank the corresponding
+ AGL registers. The third bank are the AGL registers shared by all
+ MIX devices present. The fourth bank is the AGL_PRT_CTL shared by
+ all MIX devices present.
+
+- cell-index: A single cell specifying which portion of the shared
+ register banks corresponds to this MIX device.
+
+- interrupts: Two interrupt specifiers. The first is the MIX
+ interrupt routing and the second the routing for the AGL interrupts.
+
+- mac-address: Optional, the MAC address to assign to the device.
+
+- local-mac-address: Optional, the MAC address to assign to the device
+ if mac-address is not specified.
+
+- phy-handle: Optional, a phandle for the PHY device connected to this device.
+
+Example:
+ ethernet@1070000100800 {
+ compatible = "cavium,octeon-5750-mix";
+ reg = <0x10700 0x00100800 0x0 0x100>, /* MIX */
+ <0x11800 0xE0000800 0x0 0x300>, /* AGL */
+ <0x11800 0xE0000400 0x0 0x400>, /* AGL_SHARED */
+ <0x11800 0xE0002008 0x0 0x8>; /* AGL_PRT_CTL */
+ cell-index = <1>;
+ interrupts = <1 18>, < 1 46>;
+ local-mac-address = [ 00 0f b7 10 63 54 ];
+ phy-handle = <&phy1>;
+ };
diff --git a/Documentation/devicetree/bindings/net/cavium-pip.txt b/Documentation/devicetree/bindings/net/cavium-pip.txt
new file mode 100644
index 000000000000..d4c53ba04b3b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cavium-pip.txt
@@ -0,0 +1,98 @@
+* PIP Ethernet nexus.
+
+The PIP Ethernet nexus can control several data packet input/output
+devices. The devices have a two level grouping scheme. There may be
+several interfaces, and each interface may have several ports. These
+ports might be an individual Ethernet PHY.
+
+
+Properties for the PIP nexus:
+- compatible: "cavium,octeon-3860-pip"
+
+ Compatibility with all cn3XXX, cn5XXX and cn6XXX SOCs.
+
+- reg: The base address of the PIP's register bank.
+
+- #address-cells: Must be <1>.
+
+- #size-cells: Must be <0>.
+
+Properties for PIP interfaces which is a child the PIP nexus:
+- compatible: "cavium,octeon-3860-pip-interface"
+
+ Compatibility with all cn3XXX, cn5XXX and cn6XXX SOCs.
+
+- reg: The interface number.
+
+- #address-cells: Must be <1>.
+
+- #size-cells: Must be <0>.
+
+Properties for PIP port which is a child the PIP interface:
+- compatible: "cavium,octeon-3860-pip-port"
+
+ Compatibility with all cn3XXX, cn5XXX and cn6XXX SOCs.
+
+- reg: The port number within the interface group.
+
+- mac-address: Optional, the MAC address to assign to the device.
+
+- local-mac-address: Optional, the MAC address to assign to the device
+ if mac-address is not specified.
+
+- phy-handle: Optional, a phandle for the PHY device connected to this device.
+
+Example:
+
+ pip@11800a0000000 {
+ compatible = "cavium,octeon-3860-pip";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0xa0000000 0x0 0x2000>;
+
+ interface@0 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 0f b7 10 63 60 ];
+ phy-handle = <&phy2>;
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 0f b7 10 63 61 ];
+ phy-handle = <&phy3>;
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 0f b7 10 63 62 ];
+ phy-handle = <&phy4>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 0f b7 10 63 63 ];
+ phy-handle = <&phy5>;
+ };
+ };
+
+ interface@1 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 0f b7 10 63 64 ];
+ phy-handle = <&phy6>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pwm/lpc32xx-pwm.txt b/Documentation/devicetree/bindings/pwm/lpc32xx-pwm.txt
new file mode 100644
index 000000000000..cfe1db3bb6e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/lpc32xx-pwm.txt
@@ -0,0 +1,12 @@
+LPC32XX PWM controller
+
+Required properties:
+- compatible: should be "nxp,lpc3220-pwm"
+- reg: physical base address and length of the controller's registers
+
+Examples:
+
+pwm@0x4005C000 {
+ compatible = "nxp,lpc3220-pwm";
+ reg = <0x4005C000 0x8>;
+};
diff --git a/Documentation/devicetree/bindings/pwm/mxs-pwm.txt b/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
new file mode 100644
index 000000000000..b16f4a57d111
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
@@ -0,0 +1,17 @@
+Freescale MXS PWM controller
+
+Required properties:
+- compatible: should be "fsl,imx23-pwm"
+- reg: physical base address and length of the controller's registers
+- #pwm-cells: should be 2. The first cell specifies the per-chip index
+ of the PWM to use and the second cell is the duty cycle in nanoseconds.
+- fsl,pwm-number: the number of PWM devices
+
+Example:
+
+pwm: pwm@80064000 {
+ compatible = "fsl,imx28-pwm", "fsl,imx23-pwm";
+ reg = <0x80064000 2000>;
+ #pwm-cells = <2>;
+ fsl,pwm-number = <8>;
+};
diff --git a/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
new file mode 100644
index 000000000000..bbbeedb4ec05
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
@@ -0,0 +1,18 @@
+Tegra SoC PWFM controller
+
+Required properties:
+- compatible: should be one of:
+ - "nvidia,tegra20-pwm"
+ - "nvidia,tegra30-pwm"
+- reg: physical base address and length of the controller's registers
+- #pwm-cells: On Tegra the number of cells used to specify a PWM is 2. The
+ first cell specifies the per-chip index of the PWM to use and the second
+ cell is the duty cycle in nanoseconds.
+
+Example:
+
+ pwm: pwm@7000a000 {
+ compatible = "nvidia,tegra20-pwm";
+ reg = <0x7000a000 0x100>;
+ #pwm-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm.txt b/Documentation/devicetree/bindings/pwm/pwm.txt
new file mode 100644
index 000000000000..73ec962bfe8c
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/pwm.txt
@@ -0,0 +1,57 @@
+Specifying PWM information for devices
+======================================
+
+1) PWM user nodes
+-----------------
+
+PWM users should specify a list of PWM devices that they want to use
+with a property containing a 'pwm-list':
+
+ pwm-list ::= <single-pwm> [pwm-list]
+ single-pwm ::= <pwm-phandle> <pwm-specifier>
+ pwm-phandle : phandle to PWM controller node
+ pwm-specifier : array of #pwm-cells specifying the given PWM
+ (controller specific)
+
+PWM properties should be named "pwms". The exact meaning of each pwms
+property must be documented in the device tree binding for each device.
+An optional property "pwm-names" may contain a list of strings to label
+each of the PWM devices listed in the "pwms" property. If no "pwm-names"
+property is given, the name of the user node will be used as fallback.
+
+Drivers for devices that use more than a single PWM device can use the
+"pwm-names" property to map the name of the PWM device requested by the
+pwm_get() call to an index into the list given by the "pwms" property.
+
+The following example could be used to describe a PWM-based backlight
+device:
+
+ pwm: pwm {
+ #pwm-cells = <2>;
+ };
+
+ [...]
+
+ bl: backlight {
+ pwms = <&pwm 0 5000000>;
+ pwm-names = "backlight";
+ };
+
+pwm-specifier typically encodes the chip-relative PWM number and the PWM
+period in nanoseconds. Note that in the example above, specifying the
+"pwm-names" is redundant because the name "backlight" would be used as
+fallback anyway.
+
+2) PWM controller nodes
+-----------------------
+
+PWM controller nodes must specify the number of cells used for the
+specifier using the '#pwm-cells' property.
+
+An example PWM controller might look like this:
+
+ pwm: pwm@7000a000 {
+ compatible = "nvidia,tegra20-pwm";
+ reg = <0x7000a000 0x100>;
+ #pwm-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/regulator/tps6586x.txt b/Documentation/devicetree/bindings/regulator/tps6586x.txt
index d156e1b5db12..da80c2ae0915 100644
--- a/Documentation/devicetree/bindings/regulator/tps6586x.txt
+++ b/Documentation/devicetree/bindings/regulator/tps6586x.txt
@@ -9,9 +9,9 @@ Required properties:
- regulators: list of regulators provided by this controller, must have
property "regulator-compatible" to match their hardware counterparts:
sm[0-2], ldo[0-9] and ldo_rtc
-- sm0-supply: The input supply for the SM0.
-- sm1-supply: The input supply for the SM1.
-- sm2-supply: The input supply for the SM2.
+- vin-sm0-supply: The input supply for the SM0.
+- vin-sm1-supply: The input supply for the SM1.
+- vin-sm2-supply: The input supply for the SM2.
- vinldo01-supply: The input supply for the LDO1 and LDO2
- vinldo23-supply: The input supply for the LDO2 and LDO3
- vinldo4-supply: The input supply for the LDO4
@@ -30,9 +30,9 @@ Example:
#gpio-cells = <2>;
gpio-controller;
- sm0-supply = <&some_reg>;
- sm1-supply = <&some_reg>;
- sm2-supply = <&some_reg>;
+ vin-sm0-supply = <&some_reg>;
+ vin-sm1-supply = <&some_reg>;
+ vin-sm2-supply = <&some_reg>;
vinldo01-supply = <...>;
vinldo23-supply = <...>;
vinldo4-supply = <...>;
diff --git a/Documentation/devicetree/bindings/serial/cavium-uart.txt b/Documentation/devicetree/bindings/serial/cavium-uart.txt
new file mode 100644
index 000000000000..87a6c375cd44
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/cavium-uart.txt
@@ -0,0 +1,19 @@
+* Universal Asynchronous Receiver/Transmitter (UART)
+
+- compatible: "cavium,octeon-3860-uart"
+
+ Compatibility with all cn3XXX, cn5XXX and cn6XXX SOCs.
+
+- reg: The base address of the UART register bank.
+
+- interrupts: A single interrupt specifier.
+
+- current-speed: Optional, the current bit rate in bits per second.
+
+Example:
+ uart1: serial@1180000000c00 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000c00 0x0 0x400>;
+ current-speed = <115200>;
+ interrupts = <0 35>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/spear-thermal.txt b/Documentation/devicetree/bindings/thermal/spear-thermal.txt
new file mode 100644
index 000000000000..93e3b67c102d
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/spear-thermal.txt
@@ -0,0 +1,14 @@
+* SPEAr Thermal
+
+Required properties:
+- compatible : "st,thermal-spear1340"
+- reg : Address range of the thermal registers
+- st,thermal-flags: flags used to enable thermal sensor
+
+Example:
+
+ thermal@fc000000 {
+ compatible = "st,thermal-spear1340";
+ reg = <0xfc000000 0x1000>;
+ st,thermal-flags = <0x7000>;
+ };
diff --git a/Documentation/devicetree/bindings/tty/serial/of-serial.txt b/Documentation/devicetree/bindings/tty/serial/of-serial.txt
index b8b27b0aca10..0847fdeee11a 100644
--- a/Documentation/devicetree/bindings/tty/serial/of-serial.txt
+++ b/Documentation/devicetree/bindings/tty/serial/of-serial.txt
@@ -9,6 +9,7 @@ Required properties:
- "ns16750"
- "ns16850"
- "nvidia,tegra20-uart"
+ - "nxp,lpc3220-uart"
- "ibm,qpace-nwp-serial"
- "serial" if the port type is unknown.
- reg : offset and length of the register set for the device.
diff --git a/Documentation/devicetree/bindings/video/backlight/pwm-backlight.txt b/Documentation/devicetree/bindings/video/backlight/pwm-backlight.txt
new file mode 100644
index 000000000000..1e4fc727f3b1
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/backlight/pwm-backlight.txt
@@ -0,0 +1,28 @@
+pwm-backlight bindings
+
+Required properties:
+ - compatible: "pwm-backlight"
+ - pwms: OF device-tree PWM specification (see PWM binding[0])
+ - brightness-levels: Array of distinct brightness levels. Typically these
+ are in the range from 0 to 255, but any range starting at 0 will do.
+ The actual brightness level (PWM duty cycle) will be interpolated
+ from these values. 0 means a 0% duty cycle (darkest/off), while the
+ last value in the array represents a 100% duty cycle (brightest).
+ - default-brightness-level: the default brightness level (index into the
+ array defined by the "brightness-levels" property)
+
+Optional properties:
+ - pwm-names: a list of names for the PWM devices specified in the
+ "pwms" property (see PWM binding[0])
+
+[0]: Documentation/devicetree/bindings/pwm/pwm.txt
+
+Example:
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 0 5000000>;
+
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/marvel.txt b/Documentation/devicetree/bindings/watchdog/marvel.txt
new file mode 100644
index 000000000000..0b2503ab0a05
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/marvel.txt
@@ -0,0 +1,14 @@
+* Marvell Orion Watchdog Time
+
+Required Properties:
+
+- Compatibility : "marvell,orion-wdt"
+- reg : Address of the timer registers
+
+Example:
+
+ wdt@20300 {
+ compatible = "marvell,orion-wdt";
+ reg = <0x20300 0x28>;
+ status = "okay";
+ };
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index b4a898f43c37..39462cf35cd4 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -150,7 +150,6 @@ keywords.c
ksym.c*
ksym.h*
kxgettext
-lkc_defs.h
lex.c
lex.*.c
linux
diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware
index fbb241174486..12d3952e83d5 100755
--- a/Documentation/dvb/get_dvb_firmware
+++ b/Documentation/dvb/get_dvb_firmware
@@ -29,7 +29,7 @@ use IO::Handle;
"af9015", "ngene", "az6027", "lme2510_lg", "lme2510c_s7395",
"lme2510c_s7395_old", "drxk", "drxk_terratec_h5",
"drxk_hauppauge_hvr930c", "tda10071", "it9135", "it9137",
- "drxk_pctv");
+ "drxk_pctv", "drxk_terratec_htc_stick", "sms1xxx_hcw");
# Check args
syntax() if (scalar(@ARGV) != 1);
@@ -676,6 +676,24 @@ sub drxk_terratec_h5 {
"$fwfile"
}
+sub drxk_terratec_htc_stick {
+ my $url = "http://ftp.terratec.de/Receiver/Cinergy_HTC_Stick/Updates/";
+ my $zipfile = "Cinergy_HTC_Stick_Drv_5.09.1202.00_XP_Vista_7.exe";
+ my $hash = "6722a2442a05423b781721fbc069ed5e";
+ my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 0);
+ my $drvfile = "Cinergy HTC Stick/BDA Driver 5.09.1202.00/Windows 32 Bit/emOEM.sys";
+ my $fwfile = "dvb-usb-terratec-htc-stick-drxk.fw";
+
+ checkstandard();
+
+ wgetfile($zipfile, $url . $zipfile);
+ verify($zipfile, $hash);
+ unzip($zipfile, $tmpdir);
+ extract("$tmpdir/$drvfile", 0x4e5c0, 42692, "$fwfile");
+
+ "$fwfile"
+}
+
sub it9135 {
my $sourcefile = "dvb-usb-it9135.zip";
my $url = "http://www.ite.com.tw/uploads/firmware/v3.6.0.0/$sourcefile";
@@ -748,6 +766,28 @@ sub drxk_pctv {
"$fwfile";
}
+sub sms1xxx_hcw {
+ my $url = "http://steventoth.net/linux/sms1xxx/";
+ my %files = (
+ 'sms1xxx-hcw-55xxx-dvbt-01.fw' => "afb6f9fb9a71d64392e8564ef9577e5a",
+ 'sms1xxx-hcw-55xxx-dvbt-02.fw' => "b44807098ba26e52cbedeadc052ba58f",
+ 'sms1xxx-hcw-55xxx-isdbt-02.fw' => "dae934eeea85225acbd63ce6cfe1c9e4",
+ );
+
+ checkstandard();
+
+ my $allfiles;
+ foreach my $fwfile (keys %files) {
+ wgetfile($fwfile, "$url/$fwfile");
+ verify($fwfile, $files{$fwfile});
+ $allfiles .= " $fwfile";
+ }
+
+ $allfiles =~ s/^\s//;
+
+ $allfiles;
+}
+
# ---------------------------------------------------------------
# Utilities
diff --git a/Documentation/edac.txt b/Documentation/edac.txt
index 03df2b020332..56c7e936430f 100644
--- a/Documentation/edac.txt
+++ b/Documentation/edac.txt
@@ -232,116 +232,20 @@ EDAC control and attribute files.
In 'mcX' directories are EDAC control and attribute files for
-this 'X' instance of the memory controllers:
-
-
-Counter reset control file:
-
- 'reset_counters'
-
- This write-only control file will zero all the statistical counters
- for UE and CE errors. Zeroing the counters will also reset the timer
- indicating how long since the last counter zero. This is useful
- for computing errors/time. Since the counters are always reset at
- driver initialization time, no module/kernel parameter is available.
-
- RUN TIME: echo "anything" >/sys/devices/system/edac/mc/mc0/counter_reset
-
- This resets the counters on memory controller 0
-
-
-Seconds since last counter reset control file:
-
- 'seconds_since_reset'
-
- This attribute file displays how many seconds have elapsed since the
- last counter reset. This can be used with the error counters to
- measure error rates.
-
-
-
-Memory Controller name attribute file:
-
- 'mc_name'
-
- This attribute file displays the type of memory controller
- that is being utilized.
-
-
-Total memory managed by this memory controller attribute file:
-
- 'size_mb'
-
- This attribute file displays, in count of megabytes, of memory
- that this instance of memory controller manages.
-
-
-Total Uncorrectable Errors count attribute file:
-
- 'ue_count'
-
- This attribute file displays the total count of uncorrectable
- errors that have occurred on this memory controller. If panic_on_ue
- is set this counter will not have a chance to increment,
- since EDAC will panic the system.
-
-
-Total UE count that had no information attribute fileY:
-
- 'ue_noinfo_count'
-
- This attribute file displays the number of UEs that have occurred
- with no information as to which DIMM slot is having errors.
-
-
-Total Correctable Errors count attribute file:
-
- 'ce_count'
-
- This attribute file displays the total count of correctable
- errors that have occurred on this memory controller. This
- count is very important to examine. CEs provide early
- indications that a DIMM is beginning to fail. This count
- field should be monitored for non-zero values and report
- such information to the system administrator.
-
-
-Total Correctable Errors count attribute file:
-
- 'ce_noinfo_count'
-
- This attribute file displays the number of CEs that
- have occurred wherewith no information as to which DIMM slot
- is having errors. Memory is handicapped, but operational,
- yet no information is available to indicate which slot
- the failing memory is in. This count field should be also
- be monitored for non-zero values.
-
-Device Symlink:
-
- 'device'
-
- Symlink to the memory controller device.
-
-Sdram memory scrubbing rate:
-
- 'sdram_scrub_rate'
-
- Read/Write attribute file that controls memory scrubbing. The scrubbing
- rate is set by writing a minimum bandwidth in bytes/sec to the attribute
- file. The rate will be translated to an internal value that gives at
- least the specified rate.
-
- Reading the file will return the actual scrubbing rate employed.
-
- If configuration fails or memory scrubbing is not implemented, accessing
- that attribute will fail.
+this 'X' instance of the memory controllers.
+For a description of the sysfs API, please see:
+ Documentation/ABI/testing/sysfs/devices-edac
============================================================================
'csrowX' DIRECTORIES
+When CONFIG_EDAC_LEGACY_SYSFS is enabled, the sysfs will contain the
+csrowX directories. As this API doesn't work properly for Rambus, FB-DIMMs
+and modern Intel Memory Controllers, this is being deprecated in favor
+of dimmX directories.
+
In the 'csrowX' directories are EDAC control and attribute files for
this 'X' instance of csrow:
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index ba4be8b77093..4cf1a2a6bd72 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -240,3 +240,30 @@ trap "echo 0 > /sys/kernel/debug/$FAILTYPE/probability" SIGINT SIGTERM EXIT
echo "Injecting errors into the module $module... (interrupt to stop)"
sleep 1000000
+Tool to run command with failslab or fail_page_alloc
+----------------------------------------------------
+In order to make it easier to accomplish the tasks mentioned above, we can use
+tools/testing/fault-injection/failcmd.sh. Please run a command
+"./tools/testing/fault-injection/failcmd.sh --help" for more information and
+see the following examples.
+
+Examples:
+
+Run a command "make -C tools/testing/selftests/ run_tests" with injecting slab
+allocation failure.
+
+ # ./tools/testing/fault-injection/failcmd.sh \
+ -- make -C tools/testing/selftests/ run_tests
+
+Same as above except to specify 100 times failures at most instead of one time
+at most by default.
+
+ # ./tools/testing/fault-injection/failcmd.sh --times=100 \
+ -- make -C tools/testing/selftests/ run_tests
+
+Same as above except to inject page allocation failure instead of slab
+allocation failure.
+
+ # env FAILCMD_TYPE=fail_page_alloc \
+ ./tools/testing/fault-injection/failcmd.sh --times=100 \
+ -- make -C tools/testing/selftests/ run_tests
diff --git a/Documentation/fault-injection/notifier-error-inject.txt b/Documentation/fault-injection/notifier-error-inject.txt
new file mode 100644
index 000000000000..c83526c364e5
--- /dev/null
+++ b/Documentation/fault-injection/notifier-error-inject.txt
@@ -0,0 +1,99 @@
+Notifier error injection
+========================
+
+Notifier error injection provides the ability to inject artifical errors to
+specified notifier chain callbacks. It is useful to test the error handling of
+notifier call chain failures which is rarely executed. There are kernel
+modules that can be used to test the following notifiers.
+
+ * CPU notifier
+ * PM notifier
+ * Memory hotplug notifier
+ * powerpc pSeries reconfig notifier
+
+CPU notifier error injection module
+-----------------------------------
+This feature can be used to test the error handling of the CPU notifiers by
+injecting artifical errors to CPU notifier chain callbacks.
+
+If the notifier call chain should be failed with some events notified, write
+the error code to debugfs interface
+/sys/kernel/debug/notifier-error-inject/cpu/actions/<notifier event>/error
+
+Possible CPU notifier events to be failed are:
+
+ * CPU_UP_PREPARE
+ * CPU_UP_PREPARE_FROZEN
+ * CPU_DOWN_PREPARE
+ * CPU_DOWN_PREPARE_FROZEN
+
+Example1: Inject CPU offline error (-1 == -EPERM)
+
+ # cd /sys/kernel/debug/notifier-error-inject/cpu
+ # echo -1 > actions/CPU_DOWN_PREPARE/error
+ # echo 0 > /sys/devices/system/cpu/cpu1/online
+ bash: echo: write error: Operation not permitted
+
+Example2: inject CPU online error (-2 == -ENOENT)
+
+ # echo -2 > actions/CPU_UP_PREPARE/error
+ # echo 1 > /sys/devices/system/cpu/cpu1/online
+ bash: echo: write error: No such file or directory
+
+PM notifier error injection module
+----------------------------------
+This feature is controlled through debugfs interface
+/sys/kernel/debug/notifier-error-inject/pm/actions/<notifier event>/error
+
+Possible PM notifier events to be failed are:
+
+ * PM_HIBERNATION_PREPARE
+ * PM_SUSPEND_PREPARE
+ * PM_RESTORE_PREPARE
+
+Example: Inject PM suspend error (-12 = -ENOMEM)
+
+ # cd /sys/kernel/debug/notifier-error-inject/pm/
+ # echo -12 > actions/PM_SUSPEND_PREPARE/error
+ # echo mem > /sys/power/state
+ bash: echo: write error: Cannot allocate memory
+
+Memory hotplug notifier error injection module
+----------------------------------------------
+This feature is controlled through debugfs interface
+/sys/kernel/debug/notifier-error-inject/memory/actions/<notifier event>/error
+
+Possible memory notifier events to be failed are:
+
+ * MEM_GOING_ONLINE
+ * MEM_GOING_OFFLINE
+
+Example: Inject memory hotplug offline error (-12 == -ENOMEM)
+
+ # cd /sys/kernel/debug/notifier-error-inject/memory
+ # echo -12 > actions/MEM_GOING_OFFLINE/error
+ # echo offline > /sys/devices/system/memory/memoryXXX/state
+ bash: echo: write error: Cannot allocate memory
+
+powerpc pSeries reconfig notifier error injection module
+--------------------------------------------------------
+This feature is controlled through debugfs interface
+/sys/kernel/debug/notifier-error-inject/pSeries-reconfig/actions/<notifier event>/error
+
+Possible pSeries reconfig notifier events to be failed are:
+
+ * PSERIES_RECONFIG_ADD
+ * PSERIES_RECONFIG_REMOVE
+ * PSERIES_DRCONF_MEM_ADD
+ * PSERIES_DRCONF_MEM_REMOVE
+
+For more usage examples
+-----------------------
+There are tools/testing/selftests using the notifier error injection features
+for CPU and memory notifiers.
+
+ * tools/testing/selftests/cpu-hotplug/on-off-test.sh
+ * tools/testing/selftests/memory-hotplug/on-off-test.sh
+
+These scripts first do simple online and offline tests and then do fault
+injection tests if notifier error injection module is available.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 61d1a89baeaf..f4d8c7105fcd 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -13,6 +13,14 @@ Who: Jim Cromie <jim.cromie@gmail.com>, Jason Baron <jbaron@redhat.com>
---------------------------
+What: /proc/sys/vm/nr_pdflush_threads
+When: 2012
+Why: Since pdflush is deprecated, the interface exported in /proc/sys/vm/
+ should be removed.
+Who: Wanpeng Li <liwp@linux.vnet.ibm.com>
+
+---------------------------
+
What: CONFIG_APM_CPU_IDLE, and its ability to call APM BIOS in idle
When: 2012
Why: This optional sub-feature of APM is of dubious reliability,
@@ -70,20 +78,6 @@ Who: Luis R. Rodriguez <lrodriguez@atheros.com>
---------------------------
-What: IRQF_SAMPLE_RANDOM
-Check: IRQF_SAMPLE_RANDOM
-When: July 2009
-
-Why: Many of IRQF_SAMPLE_RANDOM users are technically bogus as entropy
- sources in the kernel's current entropy model. To resolve this, every
- input point to the kernel's entropy pool needs to better document the
- type of entropy source it actually is. This will be replaced with
- additional add_*_randomness functions in drivers/char/random.c
-
-Who: Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com>
-
----------------------------
-
What: The ieee80211_regdom module parameter
When: March 2010 / desktop catchup
@@ -512,14 +506,6 @@ Who: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
----------------------------
-What: kmap_atomic(page, km_type)
-When: 3.5
-Why: The old kmap_atomic() with two arguments is deprecated, we only
- keep it for backward compatibility for few cycles and then drop it.
-Who: Cong Wang <amwang@redhat.com>
-
-----------------------------
-
What: get_robust_list syscall
When: 2013
Why: There appear to be no production users of the get_robust_list syscall,
@@ -593,7 +579,7 @@ Why: KVM tracepoints provide mostly equivalent information in a much more
----------------------------
What: at91-mci driver ("CONFIG_MMC_AT91")
-When: 3.7
+When: 3.8
Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support
was added to atmel-mci as a first step to support more chips.
Then at91-mci was kept only for old IP versions (on at91rm9200 and
@@ -608,3 +594,46 @@ When: June 2013
Why: Unsupported/unmaintained/unused since 2.6
----------------------------
+
+What: V4L2 selections API target rectangle and flags unification, the
+ following definitions will be removed: V4L2_SEL_TGT_CROP_ACTIVE,
+ V4L2_SEL_TGT_COMPOSE_ACTIVE, V4L2_SUBDEV_SEL_*, V4L2_SUBDEV_SEL_FLAG_*
+ in favor of common V4L2_SEL_TGT_* and V4L2_SEL_FLAG_* definitions.
+ For more details see include/linux/v4l2-common.h.
+When: 3.8
+Why: The regular V4L2 selections and the subdev selection API originally
+ defined distinct names for the target rectangles and flags - V4L2_SEL_*
+ and V4L2_SUBDEV_SEL_*. Although, it turned out that the meaning of these
+ target rectangles is virtually identical and the APIs were consolidated
+ to use single set of names - V4L2_SEL_*. This didn't involve any ABI
+ changes. Alias definitions were created for the original ones to avoid
+ any instabilities in the user space interface. After few cycles these
+ backward compatibility definitions will be removed.
+Who: Sylwester Nawrocki <sylvester.nawrocki@gmail.com>
+
+----------------------------
+
+What: Using V4L2_CAP_VIDEO_CAPTURE and V4L2_CAP_VIDEO_OUTPUT flags
+ to indicate a V4L2 memory-to-memory device capability
+When: 3.8
+Why: New drivers should use new V4L2_CAP_VIDEO_M2M capability flag
+ to indicate a V4L2 video memory-to-memory (M2M) device and
+ applications can now identify a M2M video device by checking
+ for V4L2_CAP_VIDEO_M2M, with VIDIOC_QUERYCAP ioctl. Using ORed
+ V4L2_CAP_VIDEO_CAPTURE and V4L2_CAP_VIDEO_OUTPUT flags for M2M
+ devices is ambiguous and may lead, for example, to identifying
+ a M2M device as a video capture or output device.
+Who: Sylwester Nawrocki <s.nawrocki@samsung.com>
+
+----------------------------
+
+What: OMAP private DMA implementation
+When: 2013
+Why: We have a DMA engine implementation; all users should be updated
+ to use this rather than persisting with the old APIs. The old APIs
+ block merging the old DMA engine implementation into the DMA
+ engine driver.
+Who: Russell King <linux@arm.linux.org.uk>,
+ Santosh Shilimkar <santosh.shilimkar@ti.com>
+
+----------------------------
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index e0cce2a5f820..e540a24e5d06 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -114,7 +114,6 @@ prototypes:
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
- void (*write_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_fs) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
@@ -136,10 +135,9 @@ write_inode:
drop_inode: !!!inode->i_lock!!!
evict_inode:
put_super: write
-write_super: read
sync_fs: read
-freeze_fs: read
-unfreeze_fs: read
+freeze_fs: write
+unfreeze_fs: write
statfs: maybe(read) (see below)
remount_fs: write
umount_begin: no
@@ -206,6 +204,8 @@ prototypes:
int (*launder_page)(struct page *);
int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long);
int (*error_remove_page)(struct address_space *, struct page *);
+ int (*swap_activate)(struct file *);
+ int (*swap_deactivate)(struct file *);
locking rules:
All except set_page_dirty and freepage may block
@@ -229,6 +229,8 @@ migratepage: yes (both)
launder_page: yes
is_partially_uptodate: yes
error_remove_page: yes
+swap_activate: no
+swap_deactivate: no
->write_begin(), ->write_end(), ->sync_page() and ->readpage()
may be called from the request handler (/dev/loop).
@@ -330,6 +332,15 @@ cleaned, or an error value if not. Note that in order to prevent the page
getting mapped back in and redirtied, it needs to be kept locked
across the entire operation.
+ ->swap_activate will be called with a non-zero argument on
+files backing (non block device backed) swapfiles. A return value
+of zero indicates success, in which case this file can be used for
+backing swapspace. The swapspace operations will be proxied to the
+address space operations.
+
+ ->swap_deactivate() will be called in the sys_swapoff()
+path after ->swap_activate() returned success.
+
----------------------- file_lock_operations ------------------------------
prototypes:
void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
@@ -346,7 +357,6 @@ prototypes:
int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
void (*lm_notify)(struct file_lock *); /* unblock callback */
int (*lm_grant)(struct file_lock *, struct file_lock *, int);
- void (*lm_release_private)(struct file_lock *);
void (*lm_break)(struct file_lock *); /* break_lease callback */
int (*lm_change)(struct file_lock **, int);
@@ -355,7 +365,6 @@ locking rules:
lm_compare_owner: yes no
lm_notify: yes no
lm_grant: no no
-lm_release_private: maybe no
lm_break: yes no
lm_change yes no
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 2bef2b3843d1..0742feebc6e2 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -94,9 +94,8 @@ protected.
---
[mandatory]
-BKL is also moved from around sb operations. ->write_super() Is now called
-without BKL held. BKL should have been shifted into individual fs sb_op
-functions. If you don't need it, remove it.
+BKL is also moved from around sb operations. BKL should have been shifted into
+individual fs sb_op functions. If you don't need it, remove it.
---
[informational]
diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt
index ead764b2728f..de1e6c4dccff 100644
--- a/Documentation/filesystems/vfat.txt
+++ b/Documentation/filesystems/vfat.txt
@@ -137,6 +137,17 @@ errors=panic|continue|remount-ro
without doing anything or remount the partition in
read-only mode (default behavior).
+discard -- If set, issues discard/TRIM commands to the block
+ device when blocks are freed. This is useful for SSD devices
+ and sparse/thinly-provisoned LUNs.
+
+nfs -- This option maintains an index (cache) of directory
+ inodes by i_logstart which is used by the nfs-related code to
+ improve look-ups.
+
+ Enable this only if you want to export the FAT filesystem
+ over NFS
+
<bool>: 0,1,yes,no,true,false
TODO
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index aa754e01464e..2ee133e030c3 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -216,7 +216,6 @@ struct super_operations {
void (*drop_inode) (struct inode *);
void (*delete_inode) (struct inode *);
void (*put_super) (struct super_block *);
- void (*write_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_fs) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
@@ -273,9 +272,6 @@ or bottom half).
put_super: called when the VFS wishes to free the superblock
(i.e. unmount). This is called with the superblock lock held
- write_super: called when the VFS superblock needs to be written to
- disc. This method is optional
-
sync_fs: called when VFS is writing out all dirty data associated with
a superblock. The second parameter indicates whether the method
should wait until the write out has been completed. Optional.
@@ -592,6 +588,8 @@ struct address_space_operations {
int (*migratepage) (struct page *, struct page *);
int (*launder_page) (struct page *);
int (*error_remove_page) (struct mapping *mapping, struct page *page);
+ int (*swap_activate)(struct file *);
+ int (*swap_deactivate)(struct file *);
};
writepage: called by the VM to write a dirty page to backing store.
@@ -760,6 +758,16 @@ struct address_space_operations {
Setting this implies you deal with pages going away under you,
unless you have them locked or reference counts increased.
+ swap_activate: Called when swapon is used on a file to allocate
+ space if necessary and pin the block lookup information in
+ memory. A return value of zero indicates success,
+ in which case this file can be used to back swapspace. The
+ swapspace operations will be proxied to this address space's
+ ->swap_{out,in} methods.
+
+ swap_deactivate: Called during swapoff on files where swap_activate
+ was successful.
+
The File Object
===============
diff --git a/Documentation/input/edt-ft5x06.txt b/Documentation/input/edt-ft5x06.txt
new file mode 100644
index 000000000000..2032f0b7a8fa
--- /dev/null
+++ b/Documentation/input/edt-ft5x06.txt
@@ -0,0 +1,54 @@
+EDT ft5x06 based Polytouch devices
+----------------------------------
+
+The edt-ft5x06 driver is useful for the EDT "Polytouch" family of capacitive
+touch screens. Note that it is *not* suitable for other devices based on the
+focaltec ft5x06 devices, since they contain vendor-specific firmware. In
+particular this driver is not suitable for the Nook tablet.
+
+It has been tested with the following devices:
+ * EP0350M06
+ * EP0430M06
+ * EP0570M06
+ * EP0700M06
+
+The driver allows configuration of the touch screen via a set of sysfs files:
+
+/sys/class/input/eventX/device/device/threshold:
+ allows setting the "click"-threshold in the range from 20 to 80.
+
+/sys/class/input/eventX/device/device/gain:
+ allows setting the sensitivity in the range from 0 to 31. Note that
+ lower values indicate higher sensitivity.
+
+/sys/class/input/eventX/device/device/offset:
+ allows setting the edge compensation in the range from 0 to 31.
+
+/sys/class/input/eventX/device/device/report_rate:
+ allows setting the report rate in the range from 3 to 14.
+
+
+For debugging purposes the driver provides a few files in the debug
+filesystem (if available in the kernel). In /sys/kernel/debug/edt_ft5x06
+you'll find the following files:
+
+num_x, num_y:
+ (readonly) contains the number of sensor fields in X- and
+ Y-direction.
+
+mode:
+ allows switching the sensor between "factory mode" and "operation
+ mode" by writing "1" or "0" to it. In factory mode (1) it is
+ possible to get the raw data from the sensor. Note that in factory
+ mode regular events don't get delivered and the options described
+ above are unavailable.
+
+raw_data:
+ contains num_x * num_y big endian 16 bit values describing the raw
+ values for each sensor field. Note that each read() call on this
+ files triggers a new readout. It is recommended to provide a buffer
+ big enough to contain num_x * num_y * 2 bytes.
+
+Note that reading raw_data gives a I/O error when the device is not in factory
+mode. The same happens when reading/writing to the parameter files when the
+device is not in regular operation mode.
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 915f28c470e9..849b771c5e03 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -88,6 +88,7 @@ Code Seq#(hex) Include File Comments
and kernel/power/user.c
'8' all SNP8023 advanced NIC card
<mailto:mcr@solidum.com>
+';' 64-7F linux/vfio.h
'@' 00-0F linux/radeonfb.h conflict!
'@' 00-0F drivers/video/aty/aty128fb.c conflict!
'A' 00-1F linux/apm_bios.h conflict!
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c2619ef44a72..ad7e2e5088c1 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -526,7 +526,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
coherent_pool=nn[KMG] [ARM,KNL]
Sets the size of memory pool for coherent, atomic dma
- allocations if Contiguous Memory Allocator (CMA) is used.
+ allocations, by default set to 256K.
code_bytes [X86] How many bytes of object code to print
in an oops report.
diff --git a/Documentation/laptops/laptop-mode.txt b/Documentation/laptops/laptop-mode.txt
index 0bf25eebce94..4ebbfc3f1c6e 100644
--- a/Documentation/laptops/laptop-mode.txt
+++ b/Documentation/laptops/laptop-mode.txt
@@ -262,9 +262,9 @@ MINIMUM_BATTERY_MINUTES=10
#
# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
-# exceeded, the kernel will wake pdflush which will then reduce the amount
-# of dirty memory to dirty_background_ratio. Set this nice and low, so once
-# some writeout has commenced, we do a lot of it.
+# exceeded, the kernel will wake flusher threads which will then reduce the
+# amount of dirty memory to dirty_background_ratio. Set this nice and low,
+# so once some writeout has commenced, we do a lot of it.
#
#DIRTY_BACKGROUND_RATIO=5
@@ -384,9 +384,9 @@ CPU_MAXFREQ=${CPU_MAXFREQ:-'slowest'}
#
# Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been
-# exceeded, the kernel will wake pdflush which will then reduce the amount
-# of dirty memory to dirty_background_ratio. Set this nice and low, so once
-# some writeout has commenced, we do a lot of it.
+# exceeded, the kernel will wake flusher threads which will then reduce the
+# amount of dirty memory to dirty_background_ratio. Set this nice and low,
+# so once some writeout has commenced, we do a lot of it.
#
DIRTY_BACKGROUND_RATIO=${DIRTY_BACKGROUND_RATIO:-'5'}
diff --git a/Documentation/leds/00-INDEX b/Documentation/leds/00-INDEX
index 29f481df32c7..5fefe374892f 100644
--- a/Documentation/leds/00-INDEX
+++ b/Documentation/leds/00-INDEX
@@ -6,3 +6,5 @@ leds-lp5521.txt
- notes on how to use the leds-lp5521 driver.
leds-lp5523.txt
- notes on how to use the leds-lp5523 driver.
+leds-lm3556.txt
+ - notes on how to use the leds-lm3556 driver.
diff --git a/Documentation/leds/leds-blinkm.txt b/Documentation/leds/leds-blinkm.txt
new file mode 100644
index 000000000000..9dd92f4cf4e1
--- /dev/null
+++ b/Documentation/leds/leds-blinkm.txt
@@ -0,0 +1,80 @@
+The leds-blinkm driver supports the devices of the BlinkM family.
+
+They are RGB-LED modules driven by a (AT)tiny microcontroller and
+communicate through I2C. The default address of these modules is
+0x09 but this can be changed through a command. By this you could
+dasy-chain up to 127 BlinkMs on an I2C bus.
+
+The device accepts RGB and HSB color values through separate commands.
+Also you can store blinking sequences as "scripts" in
+the controller and run them. Also fading is an option.
+
+The interface this driver provides is 2-fold:
+
+a) LED class interface for use with triggers
+############################################
+
+The registration follows the scheme:
+blinkm-<i2c-bus-nr>-<i2c-device-nr>-<color>
+
+$ ls -h /sys/class/leds/blinkm-6-*
+/sys/class/leds/blinkm-6-9-blue:
+brightness device max_brightness power subsystem trigger uevent
+
+/sys/class/leds/blinkm-6-9-green:
+brightness device max_brightness power subsystem trigger uevent
+
+/sys/class/leds/blinkm-6-9-red:
+brightness device max_brightness power subsystem trigger uevent
+
+(same is /sys/bus/i2c/devices/6-0009/leds)
+
+We can control the colors separated into red, green and blue and
+assign triggers on each color.
+
+E.g.:
+
+$ cat blinkm-6-9-blue/brightness
+05
+
+$ echo 200 > blinkm-6-9-blue/brightness
+$
+
+$ modprobe ledtrig-heartbeat
+$ echo heartbeat > blinkm-6-9-green/trigger
+$
+
+
+b) Sysfs group to control rgb, fade, hsb, scripts ...
+#####################################################
+
+This extended interface is available as folder blinkm
+in the sysfs folder of the I2C device.
+E.g. below /sys/bus/i2c/devices/6-0009/blinkm
+
+$ ls -h /sys/bus/i2c/devices/6-0009/blinkm/
+blue green red test
+
+Currently supported is just setting red, green, blue
+and a test sequence.
+
+E.g.:
+
+$ cat *
+00
+00
+00
+#Write into test to start test sequence!#
+
+$ echo 1 > test
+$
+
+$ echo 255 > red
+$
+
+
+
+as of 6/2012
+
+dl9pf <at> gmx <dot> de
+
diff --git a/Documentation/leds/leds-lm3556.txt b/Documentation/leds/leds-lm3556.txt
new file mode 100644
index 000000000000..d9eb91b51913
--- /dev/null
+++ b/Documentation/leds/leds-lm3556.txt
@@ -0,0 +1,85 @@
+Kernel driver for lm3556
+========================
+
+*Texas Instrument:
+ 1.5 A Synchronous Boost LED Flash Driver w/ High-Side Current Source
+* Datasheet: http://www.national.com/ds/LM/LM3556.pdf
+
+Authors:
+ Daniel Jeong
+ Contact:Daniel Jeong(daniel.jeong-at-ti.com, gshark.jeong-at-gmail.com)
+
+Description
+-----------
+There are 3 functions in LM3556, Flash, Torch and Indicator.
+
+FLASH MODE
+In Flash Mode, the LED current source(LED) provides 16 target current levels
+from 93.75 mA to 1500 mA.The Flash currents are adjusted via the CURRENT
+CONTROL REGISTER(0x09).Flash mode is activated by the ENABLE REGISTER(0x0A),
+or by pulling the STROBE pin HIGH.
+LM3556 Flash can be controlled through sys/class/leds/flash/brightness file
+* if STROBE pin is enabled, below example control brightness only, and
+ON / OFF will be controlled by STROBE pin.
+
+Flash Example:
+OFF : #echo 0 > sys/class/leds/flash/brightness
+93.75 mA: #echo 1 > sys/class/leds/flash/brightness
+... .....
+1500 mA: #echo 16 > sys/class/leds/flash/brightness
+
+TORCH MODE
+In Torch Mode, the current source(LED) is programmed via the CURRENT CONTROL
+REGISTER(0x09).Torch Mode is activated by the ENABLE REGISTER(0x0A) or by the
+hardware TORCH input.
+LM3556 torch can be controlled through sys/class/leds/torch/brightness file.
+* if TORCH pin is enabled, below example control brightness only,
+and ON / OFF will be controlled by TORCH pin.
+
+Torch Example:
+OFF : #echo 0 > sys/class/leds/torch/brightness
+46.88 mA: #echo 1 > sys/class/leds/torch/brightness
+... .....
+375 mA : #echo 8 > sys/class/leds/torch/brightness
+
+INDICATOR MODE
+Indicator pattern can be set through sys/class/leds/indicator/pattern file,
+and 4 patterns are pre-defined in indicator_pattern array.
+According to N-lank, Pulse time and N Period values, different pattern wiill
+be generated.If you want new patterns for your own device, change
+indicator_pattern array with your own values and INDIC_PATTERN_SIZE.
+Please refer datasheet for more detail about N-Blank, Pulse time and N Period.
+
+Indicator pattern example:
+pattern 0: #echo 0 > sys/class/leds/indicator/pattern
+....
+pattern 3: #echo 3 > sys/class/leds/indicator/pattern
+
+Indicator brightness can be controlled through
+sys/class/leds/indicator/brightness file.
+
+Example:
+OFF : #echo 0 > sys/class/leds/indicator/brightness
+5.86 mA : #echo 1 > sys/class/leds/indicator/brightness
+........
+46.875mA : #echo 8 > sys/class/leds/indicator/brightness
+
+Notes
+-----
+Driver expects it is registered using the i2c_board_info mechanism.
+To register the chip at address 0x63 on specific adapter, set the platform data
+according to include/linux/platform_data/leds-lm3556.h, set the i2c board info
+
+Example:
+ static struct i2c_board_info __initdata board_i2c_ch4[] = {
+ {
+ I2C_BOARD_INFO(LM3556_NAME, 0x63),
+ .platform_data = &lm3556_pdata,
+ },
+ };
+
+and register it in the platform init function
+
+Example:
+ board_register_i2c_bus(4, 400,
+ board_i2c_ch4, ARRAY_SIZE(board_i2c_ch4));
diff --git a/Documentation/leds/ledtrig-oneshot.txt b/Documentation/leds/ledtrig-oneshot.txt
new file mode 100644
index 000000000000..07cd1fa41a3a
--- /dev/null
+++ b/Documentation/leds/ledtrig-oneshot.txt
@@ -0,0 +1,59 @@
+One-shot LED Trigger
+====================
+
+This is a LED trigger useful for signaling the user of an event where there are
+no clear trap points to put standard led-on and led-off settings. Using this
+trigger, the application needs only to signal the trigger when an event has
+happened, than the trigger turns the LED on and than keeps it off for a
+specified amount of time.
+
+This trigger is meant to be usable both for sporadic and dense events. In the
+first case, the trigger produces a clear single controlled blink for each
+event, while in the latter it keeps blinking at constant rate, as to signal
+that the events are arriving continuously.
+
+A one-shot LED only stays in a constant state when there are no events. An
+additional "invert" property specifies if the LED has to stay off (normal) or
+on (inverted) when not rearmed.
+
+The trigger can be activated from user space on led class devices as shown
+below:
+
+ echo oneshot > trigger
+
+This adds the following sysfs attributes to the LED:
+
+ delay_on - specifies for how many milliseconds the LED has to stay at
+ LED_FULL brightness after it has been armed.
+ Default to 100 ms.
+
+ delay_off - specifies for how many milliseconds the LED has to stay at
+ LED_OFF brightness after it has been armed.
+ Default to 100 ms.
+
+ invert - reverse the blink logic. If set to 0 (default) blink on for delay_on
+ ms, then blink off for delay_off ms, leaving the LED normally off. If
+ set to 1, blink off for delay_off ms, then blink on for delay_on ms,
+ leaving the LED normally on.
+ Setting this value also immediately change the LED state.
+
+ shot - write any non-empty string to signal an events, this starts a blink
+ sequence if not already running.
+
+Example use-case: network devices, initialization:
+
+ echo oneshot > trigger # set trigger for this led
+ echo 33 > delay_on # blink at 1 / (33 + 33) Hz on continuous traffic
+ echo 33 > delay_off
+
+interface goes up:
+
+ echo 1 > invert # set led as normally-on, turn the led on
+
+packet received/transmitted:
+
+ echo 1 > shot # led starts blinking, ignored if already blinking
+
+interface goes down
+
+ echo 0 > invert # set led as normally-off, turn the led off
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 406a5226220d..ca447b35b833 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -48,12 +48,6 @@ min_adv_mss - INTEGER
The advertised MSS depends on the first hop route MTU, but will
never be lower than this setting.
-rt_cache_rebuild_count - INTEGER
- The per net-namespace route cache emergency rebuild threshold.
- Any net-namespace having its route cache rebuilt due to
- a hash bucket chain being too long more than this many times
- will have its route caching disabled
-
IP Fragmentation:
ipfrag_high_thresh - INTEGER
diff --git a/Documentation/networking/netconsole.txt b/Documentation/networking/netconsole.txt
index 8d022073e3ef..2e9e0ae2cd45 100644
--- a/Documentation/networking/netconsole.txt
+++ b/Documentation/networking/netconsole.txt
@@ -51,8 +51,23 @@ Built-in netconsole starts immediately after the TCP stack is
initialized and attempts to bring up the supplied dev at the supplied
address.
-The remote host can run either 'netcat -u -l -p <port>',
-'nc -l -u <port>' or syslogd.
+The remote host has several options to receive the kernel messages,
+for example:
+
+1) syslogd
+
+2) netcat
+
+ On distributions using a BSD-based netcat version (e.g. Fedora,
+ openSUSE and Ubuntu) the listening port must be specified without
+ the -p switch:
+
+ 'nc -u -l -p <port>' / 'nc -u -l <port>' or
+ 'netcat -u -l -p <port>' / 'netcat -u -l <port>'
+
+3) socat
+
+ 'socat udp-recv:<port> -'
Dynamic reconfiguration:
========================
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index e40f4b4e1977..1479aca23744 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -840,9 +840,9 @@ static unsigned long i2c_pin_configs[] = {
static struct pinctrl_map __initdata mapping[] = {
PIN_MAP_MUX_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", "i2c0"),
- PIN_MAP_MUX_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs),
- PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs),
- PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs),
+ PIN_MAP_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs),
+ PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs),
+ PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs),
};
Finally, some devices expect the mapping table to contain certain specific
diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt
index 211831d4095f..2f0ddc15b5ac 100644
--- a/Documentation/power/power_supply_class.txt
+++ b/Documentation/power/power_supply_class.txt
@@ -112,14 +112,24 @@ CHARGE_COUNTER - the current charge counter (in µAh). This could easily
be negative; there is no empty or full value. It is only useful for
relative, time-based measurements.
+CONSTANT_CHARGE_CURRENT - constant charge current programmed by charger.
+
+CONSTANT_CHARGE_VOLTAGE - constant charge voltage programmed by charger.
+
ENERGY_FULL, ENERGY_EMPTY - same as above but for energy.
CAPACITY - capacity in percents.
+CAPACITY_ALERT_MIN - minimum capacity alert value in percents.
+CAPACITY_ALERT_MAX - maximum capacity alert value in percents.
CAPACITY_LEVEL - capacity level. This corresponds to
POWER_SUPPLY_CAPACITY_LEVEL_*.
TEMP - temperature of the power supply.
+TEMP_ALERT_MIN - minimum battery temperature alert value in milli centigrade.
+TEMP_ALERT_MAX - maximum battery temperature alert value in milli centigrade.
TEMP_AMBIENT - ambient temperature.
+TEMP_AMBIENT_ALERT_MIN - minimum ambient temperature alert value in milli centigrade.
+TEMP_AMBIENT_ALERT_MAX - maximum ambient temperature alert value in milli centigrade.
TIME_TO_EMPTY - seconds left for battery to be considered empty (i.e.
while battery powers a load)
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 5df176ed59b8..7561d7ed8e11 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -53,9 +53,20 @@ Struct Resources:
For printing struct resources. The 'R' and 'r' specifiers result in a
printed resource with ('R') or without ('r') a decoded flags member.
+Raw buffer as a hex string:
+ %*ph 00 01 02 ... 3f
+ %*phC 00:01:02: ... :3f
+ %*phD 00-01-02- ... -3f
+ %*phN 000102 ... 3f
+
+ For printing a small buffers (up to 64 bytes long) as a hex string with
+ certain separator. For the larger buffers consider to use
+ print_hex_dump().
+
MAC/FDDI addresses:
%pM 00:01:02:03:04:05
+ %pMR 05:04:03:02:01:00
%pMF 00-01-02-03-04-05
%pm 000102030405
@@ -67,6 +78,10 @@ MAC/FDDI addresses:
the 'M' specifier to use dash ('-') separators instead of the default
separator.
+ For Bluetooth addresses the 'R' specifier shall be used after the 'M'
+ specifier to use reversed byte order suitable for visual interpretation
+ of Bluetooth addresses which are in the little endian order.
+
IPv4 addresses:
%pI4 1.2.3.4
diff --git a/Documentation/pwm.txt b/Documentation/pwm.txt
new file mode 100644
index 000000000000..554290ebab94
--- /dev/null
+++ b/Documentation/pwm.txt
@@ -0,0 +1,76 @@
+Pulse Width Modulation (PWM) interface
+
+This provides an overview about the Linux PWM interface
+
+PWMs are commonly used for controlling LEDs, fans or vibrators in
+cell phones. PWMs with a fixed purpose have no need implementing
+the Linux PWM API (although they could). However, PWMs are often
+found as discrete devices on SoCs which have no fixed purpose. It's
+up to the board designer to connect them to LEDs or fans. To provide
+this kind of flexibility the generic PWM API exists.
+
+Identifying PWMs
+----------------
+
+Users of the legacy PWM API use unique IDs to refer to PWM devices.
+
+Instead of referring to a PWM device via its unique ID, board setup code
+should instead register a static mapping that can be used to match PWM
+consumers to providers, as given in the following example:
+
+ static struct pwm_lookup board_pwm_lookup[] = {
+ PWM_LOOKUP("tegra-pwm", 0, "pwm-backlight", NULL),
+ };
+
+ static void __init board_init(void)
+ {
+ ...
+ pwm_add_table(board_pwm_lookup, ARRAY_SIZE(board_pwm_lookup));
+ ...
+ }
+
+Using PWMs
+----------
+
+Legacy users can request a PWM device using pwm_request() and free it
+after usage with pwm_free().
+
+New users should use the pwm_get() function and pass to it the consumer
+device or a consumer name. pwm_put() is used to free the PWM device.
+
+After being requested a PWM has to be configured using:
+
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns);
+
+To start/stop toggling the PWM output use pwm_enable()/pwm_disable().
+
+Implementing a PWM driver
+-------------------------
+
+Currently there are two ways to implement pwm drivers. Traditionally
+there only has been the barebone API meaning that each driver has
+to implement the pwm_*() functions itself. This means that it's impossible
+to have multiple PWM drivers in the system. For this reason it's mandatory
+for new drivers to use the generic PWM framework.
+
+A new PWM controller/chip can be added using pwmchip_add() and removed
+again with pwmchip_remove(). pwmchip_add() takes a filled in struct
+pwm_chip as argument which provides a description of the PWM chip, the
+number of PWM devices provider by the chip and the chip-specific
+implementation of the supported PWM operations to the framework.
+
+Locking
+-------
+
+The PWM core list manipulations are protected by a mutex, so pwm_request()
+and pwm_free() may not be called from an atomic context. Currently the
+PWM core does not enforce any locking to pwm_enable(), pwm_disable() and
+pwm_config(), so the calling context is currently driver specific. This
+is an issue derived from the former barebone API and should be fixed soon.
+
+Helpers
+-------
+
+Currently a PWM can only be configured with period_ns and duty_ns. For several
+use cases freq_hz and duty_percent might be better. Instead of calculating
+this in your driver please consider adding appropriate helpers to the framework.
diff --git a/Documentation/remoteproc.txt b/Documentation/remoteproc.txt
index 70a048cd3fa3..23a09b884bc7 100644
--- a/Documentation/remoteproc.txt
+++ b/Documentation/remoteproc.txt
@@ -36,8 +36,7 @@ cost.
Note: to use this function you should already have a valid rproc
handle. There are several ways to achieve that cleanly (devres, pdata,
the way remoteproc_rpmsg.c does this, or, if this becomes prevalent, we
- might also consider using dev_archdata for this). See also
- rproc_get_by_name() below.
+ might also consider using dev_archdata for this).
void rproc_shutdown(struct rproc *rproc)
- Power off a remote processor (previously booted with rproc_boot()).
@@ -51,30 +50,6 @@ cost.
which means that the @rproc handle stays valid even after
rproc_shutdown() returns, and users can still use it with a subsequent
rproc_boot(), if needed.
- - don't call rproc_shutdown() to unroll rproc_get_by_name(), exactly
- because rproc_shutdown() _does not_ decrement the refcount of @rproc.
- To decrement the refcount of @rproc, use rproc_put() (but _only_ if
- you acquired @rproc using rproc_get_by_name()).
-
- struct rproc *rproc_get_by_name(const char *name)
- - Find an rproc handle using the remote processor's name, and then
- boot it. If it's already powered on, then just immediately return
- (successfully). Returns the rproc handle on success, and NULL on failure.
- This function increments the remote processor's refcount, so always
- use rproc_put() to decrement it back once rproc isn't needed anymore.
- Note: currently rproc_get_by_name() and rproc_put() are not used anymore
- by the rpmsg bus and its drivers. We need to scrutinize the use cases
- that still need them, and see if we can migrate them to use the non
- name-based boot/shutdown interface.
-
- void rproc_put(struct rproc *rproc)
- - Decrement @rproc's power refcount and shut it down if it reaches zero
- (essentially by just calling rproc_shutdown), and then decrement @rproc's
- validity refcount too.
- After this function returns, @rproc may _not_ be used anymore, and its
- handle should be considered invalid.
- This function should be called _iff_ the @rproc handle was grabbed by
- calling rproc_get_by_name().
3. Typical usage
@@ -115,21 +90,21 @@ int dummy_rproc_example(struct rproc *my_rproc)
This function should be used by rproc implementations during
initialization of the remote processor.
After creating an rproc handle using this function, and when ready,
- implementations should then call rproc_register() to complete
+ implementations should then call rproc_add() to complete
the registration of the remote processor.
On success, the new rproc is returned, and on failure, NULL.
Note: _never_ directly deallocate @rproc, even if it was not registered
- yet. Instead, if you just need to unroll rproc_alloc(), use rproc_free().
+ yet. Instead, when you need to unroll rproc_alloc(), use rproc_put().
- void rproc_free(struct rproc *rproc)
+ void rproc_put(struct rproc *rproc)
- Free an rproc handle that was allocated by rproc_alloc.
- This function should _only_ be used if @rproc was only allocated,
- but not registered yet.
- If @rproc was already successfully registered (by calling
- rproc_register()), then use rproc_unregister() instead.
+ This function essentially unrolls rproc_alloc(), by decrementing the
+ rproc's refcount. It doesn't directly free rproc; that would happen
+ only if there are no other references to rproc and its refcount now
+ dropped to zero.
- int rproc_register(struct rproc *rproc)
+ int rproc_add(struct rproc *rproc)
- Register @rproc with the remoteproc framework, after it has been
allocated with rproc_alloc().
This is called by the platform-specific rproc implementation, whenever
@@ -142,20 +117,15 @@ int dummy_rproc_example(struct rproc *my_rproc)
of registering this remote processor, additional virtio drivers might get
probed.
- int rproc_unregister(struct rproc *rproc)
- - Unregister a remote processor, and decrement its refcount.
- If its refcount drops to zero, then @rproc will be freed. If not,
- it will be freed later once the last reference is dropped.
-
+ int rproc_del(struct rproc *rproc)
+ - Unroll rproc_add().
This function should be called when the platform specific rproc
implementation decides to remove the rproc device. it should
- _only_ be called if a previous invocation of rproc_register()
+ _only_ be called if a previous invocation of rproc_add()
has completed successfully.
- After rproc_unregister() returns, @rproc is _not_ valid anymore and
- it shouldn't be used. More specifically, don't call rproc_free()
- or try to directly free @rproc after rproc_unregister() returns;
- none of these are needed, and calling them is a bug.
+ After rproc_del() returns, @rproc is still valid, and its
+ last refcount should be decremented by calling rproc_put().
Returns 0 on success and -EINVAL if @rproc isn't valid.
diff --git a/Documentation/security/Yama.txt b/Documentation/security/Yama.txt
index e369de2d48cd..dd908cf64ecf 100644
--- a/Documentation/security/Yama.txt
+++ b/Documentation/security/Yama.txt
@@ -46,14 +46,13 @@ restrictions, it can call prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, ...)
so that any otherwise allowed process (even those in external pid namespaces)
may attach.
-These restrictions do not change how ptrace via PTRACE_TRACEME operates.
-
-The sysctl settings are:
+The sysctl settings (writable only with CAP_SYS_PTRACE) are:
0 - classic ptrace permissions: a process can PTRACE_ATTACH to any other
process running under the same uid, as long as it is dumpable (i.e.
did not transition uids, start privileged, or have called
- prctl(PR_SET_DUMPABLE...) already).
+ prctl(PR_SET_DUMPABLE...) already). Similarly, PTRACE_TRACEME is
+ unchanged.
1 - restricted ptrace: a process must have a predefined relationship
with the inferior it wants to call PTRACE_ATTACH on. By default,
@@ -61,12 +60,13 @@ The sysctl settings are:
classic criteria is also met. To change the relationship, an
inferior can call prctl(PR_SET_PTRACER, debugger, ...) to declare
an allowed debugger PID to call PTRACE_ATTACH on the inferior.
+ Using PTRACE_TRACEME is unchanged.
2 - admin-only attach: only processes with CAP_SYS_PTRACE may use ptrace
- with PTRACE_ATTACH.
+ with PTRACE_ATTACH, or through children calling PTRACE_TRACEME.
-3 - no attach: no processes may use ptrace with PTRACE_ATTACH. Once set,
- this sysctl cannot be changed to a lower value.
+3 - no attach: no processes may use ptrace with PTRACE_ATTACH nor via
+ PTRACE_TRACEME. Once set, this sysctl value cannot be changed.
The original children-only logic was based on the restrictions in grsecurity.
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 7456360e161c..a92bba816843 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -53,6 +53,7 @@ ALC882/883/885/888/889
acer-aspire-8930g Acer Aspire 8330G/6935G
acer-aspire Acer Aspire others
inv-dmic Inverted internal mic workaround
+ no-primary-hp VAIO Z workaround (for fixed speaker DAC)
ALC861/660
==========
@@ -273,6 +274,10 @@ STAC92HD83*
dell-s14 Dell laptop
dell-vostro-3500 Dell Vostro 3500 laptop
hp-dv7-4000 HP dv-7 4000
+ hp_cNB11_intquad HP CNB models with 4 speakers
+ hp-zephyr HP Zephyr
+ hp-led HP with broken BIOS for mute LED
+ hp-inv-led HP with broken BIOS for inverted mute LED
auto BIOS setup (default)
STAC9872
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 13d6166d7a27..88152f214f48 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/fs:
- nr_open
- overflowuid
- overflowgid
+- protected_hardlinks
+- protected_symlinks
- suid_dumpable
- super-max
- super-nr
@@ -157,22 +159,68 @@ The default is 65534.
==============================================================
+protected_hardlinks:
+
+A long-standing class of security issues is the hardlink-based
+time-of-check-time-of-use race, most commonly seen in world-writable
+directories like /tmp. The common method of exploitation of this flaw
+is to cross privilege boundaries when following a given hardlink (i.e. a
+root process follows a hardlink created by another user). Additionally,
+on systems without separated partitions, this stops unauthorized users
+from "pinning" vulnerable setuid/setgid files against being upgraded by
+the administrator, or linking to special files.
+
+When set to "0", hardlink creation behavior is unrestricted.
+
+When set to "1" hardlinks cannot be created by users if they do not
+already own the source file, or do not have read/write access to it.
+
+This protection is based on the restrictions in Openwall and grsecurity.
+
+==============================================================
+
+protected_symlinks:
+
+A long-standing class of security issues is the symlink-based
+time-of-check-time-of-use race, most commonly seen in world-writable
+directories like /tmp. The common method of exploitation of this flaw
+is to cross privilege boundaries when following a given symlink (i.e. a
+root process follows a symlink belonging to another user). For a likely
+incomplete list of hundreds of examples across the years, please see:
+http://cve.mitre.org/cgi-bin/cvekey.cgi?keyword=/tmp
+
+When set to "0", symlink following behavior is unrestricted.
+
+When set to "1" symlinks are permitted to be followed only when outside
+a sticky world-writable directory, or when the uid of the symlink and
+follower match, or when the directory owner matches the symlink's owner.
+
+This protection is based on the restrictions in Openwall and grsecurity.
+
+==============================================================
+
suid_dumpable:
This value can be used to query and set the core dump mode for setuid
or otherwise protected/tainted binaries. The modes are
0 - (default) - traditional behaviour. Any process which has changed
- privilege levels or is execute only will not be dumped
+ privilege levels or is execute only will not be dumped.
1 - (debug) - all processes dump core when possible. The core dump is
owned by the current user and no security is applied. This is
intended for system debugging situations only. Ptrace is unchecked.
+ This is insecure as it allows regular users to examine the memory
+ contents of privileged processes.
2 - (suidsafe) - any binary which normally would not be dumped is dumped
- readable by root only. This allows the end user to remove
- such a dump but not access it directly. For security reasons
- core dumps in this mode will not overwrite one another or
- other files. This mode is appropriate when administrators are
- attempting to debug problems in a normal environment.
+ anyway, but only if the "core_pattern" kernel sysctl is set to
+ either a pipe handler or a fully qualified path. (For more details
+ on this limitation, see CVE-2006-2451.) This mode is appropriate
+ when administrators are attempting to debug problems in a normal
+ environment, and either have a core dump pipe handler that knows
+ to treat privileged core dumps with care, or specific directory
+ defined for catching core dumps. If a core dump happens without
+ a pipe handler or fully qualifid path, a message will be emitted
+ to syslog warning about the lack of a correct setting.
==============================================================
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 96f0ee825bed..078701fdbd4d 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -42,7 +42,6 @@ Currently, these files are in /proc/sys/vm:
- mmap_min_addr
- nr_hugepages
- nr_overcommit_hugepages
-- nr_pdflush_threads
- nr_trim_pages (only if CONFIG_MMU=n)
- numa_zonelist_order
- oom_dump_tasks
@@ -77,8 +76,8 @@ huge pages although processes will also directly compact memory as required.
dirty_background_bytes
-Contains the amount of dirty memory at which the pdflush background writeback
-daemon will start writeback.
+Contains the amount of dirty memory at which the background kernel
+flusher threads will start writeback.
Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only
one of them may be specified at a time. When one sysctl is written it is
@@ -90,7 +89,7 @@ other appears as 0 when read.
dirty_background_ratio
Contains, as a percentage of total system memory, the number of pages at which
-the pdflush background writeback daemon will start writing out dirty data.
+the background kernel flusher threads will start writing out dirty data.
==============================================================
@@ -113,9 +112,9 @@ retained.
dirty_expire_centisecs
This tunable is used to define when dirty data is old enough to be eligible
-for writeout by the pdflush daemons. It is expressed in 100'ths of a second.
-Data which has been dirty in-memory for longer than this interval will be
-written out next time a pdflush daemon wakes up.
+for writeout by the kernel flusher threads. It is expressed in 100'ths
+of a second. Data which has been dirty in-memory for longer than this
+interval will be written out next time a flusher thread wakes up.
==============================================================
@@ -129,7 +128,7 @@ data.
dirty_writeback_centisecs
-The pdflush writeback daemons will periodically wake up and write `old' data
+The kernel flusher threads will periodically wake up and write `old' data
out to disk. This tunable expresses the interval between those wakeups, in
100'ths of a second.
@@ -426,16 +425,6 @@ See Documentation/vm/hugetlbpage.txt
==============================================================
-nr_pdflush_threads
-
-The current number of pdflush threads. This value is read-only.
-The value changes according to the number of dirty pages in the system.
-
-When necessary, additional pdflush threads are created, one per second, up to
-nr_pdflush_threads_max.
-
-==============================================================
-
nr_trim_pages
This is available only on NOMMU kernels.
@@ -502,9 +491,10 @@ oom_dump_tasks
Enables a system-wide task dump (excluding kernel threads) to be
produced when the kernel performs an OOM-killing and includes such
-information as pid, uid, tgid, vm size, rss, cpu, oom_adj score, and
-name. This is helpful to determine why the OOM killer was invoked
-and to identify the rogue task that caused it.
+information as pid, uid, tgid, vm size, rss, nr_ptes, swapents,
+oom_score_adj score, and name. This is helpful to determine why the
+OOM killer was invoked, to identify the rogue task that caused it,
+and to determine why the OOM killer chose the task it did to kill.
If this is set to zero, this information is suppressed. On very
large systems with thousands of tasks it may not be feasible to dump
@@ -574,16 +564,24 @@ of physical RAM. See above.
page-cluster
-page-cluster controls the number of pages which are written to swap in
-a single attempt. The swap I/O size.
+page-cluster controls the number of pages up to which consecutive pages
+are read in from swap in a single attempt. This is the swap counterpart
+to page cache readahead.
+The mentioned consecutivity is not in terms of virtual/physical addresses,
+but consecutive on swap space - that means they were swapped out together.
It is a logarithmic value - setting it to zero means "1 page", setting
it to 1 means "2 pages", setting it to 2 means "4 pages", etc.
+Zero disables swap readahead completely.
The default value is three (eight pages at a time). There may be some
small benefits in tuning this to a different value if your workload is
swap-intensive.
+Lower values mean lower latencies for initial faults, but at the same time
+extra faults and I/O delays for following faults if they would have been part of
+that consecutive pages readahead would have brought in.
+
=============================================================
panic_on_oom
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index 1733ab947a95..c087dbcf3535 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -32,7 +32,8 @@ temperature) and throttle appropriate devices.
1.1 thermal zone device interface
1.1.1 struct thermal_zone_device *thermal_zone_device_register(char *name,
- int trips, void *devdata, struct thermal_zone_device_ops *ops)
+ int trips, int mask, void *devdata,
+ struct thermal_zone_device_ops *ops)
This interface function adds a new thermal zone device (sensor) to
/sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the
@@ -40,16 +41,17 @@ temperature) and throttle appropriate devices.
name: the thermal zone name.
trips: the total number of trip points this thermal zone supports.
+ mask: Bit string: If 'n'th bit is set, then trip point 'n' is writeable.
devdata: device private data
ops: thermal zone device call-backs.
.bind: bind the thermal zone device with a thermal cooling device.
.unbind: unbind the thermal zone device with a thermal cooling device.
.get_temp: get the current temperature of the thermal zone.
- .get_mode: get the current mode (user/kernel) of the thermal zone.
- - "kernel" means thermal management is done in kernel.
- - "user" will prevent kernel thermal driver actions upon trip points
+ .get_mode: get the current mode (enabled/disabled) of the thermal zone.
+ - "enabled" means the kernel thermal management is enabled.
+ - "disabled" will prevent kernel thermal driver action upon trip points
so that user applications can take charge of thermal management.
- .set_mode: set the mode (user/kernel) of the thermal zone.
+ .set_mode: set the mode (enabled/disabled) of the thermal zone.
.get_trip_type: get the type of certain trip point.
.get_trip_temp: get the temperature above which the certain trip point
will be fired.
@@ -119,6 +121,7 @@ Thermal zone device sys I/F, created once it's registered:
|---mode: Working mode of the thermal zone
|---trip_point_[0-*]_temp: Trip point temperature
|---trip_point_[0-*]_type: Trip point type
+ |---trip_point_[0-*]_hyst: Hysteresis value for this trip point
Thermal cooling device sys I/F, created once it's registered:
/sys/class/thermal/cooling_device[0-*]:
@@ -167,14 +170,14 @@ temp
RO, Required
mode
- One of the predefined values in [kernel, user].
+ One of the predefined values in [enabled, disabled].
This file gives information about the algorithm that is currently
managing the thermal zone. It can be either default kernel based
algorithm or user space application.
- kernel = Thermal management in kernel thermal zone driver.
- user = Preventing kernel thermal zone driver actions upon
- trip points so that user application can take full
- charge of the thermal management.
+ enabled = enable Kernel Thermal management.
+ disabled = Preventing kernel thermal zone driver actions upon
+ trip points so that user application can take full
+ charge of the thermal management.
RW, Optional
trip_point_[0-*]_temp
@@ -188,6 +191,11 @@ trip_point_[0-*]_type
thermal zone.
RO, Optional
+trip_point_[0-*]_hyst
+ The hysteresis value for a trip point, represented as an integer
+ Unit: Celsius
+ RW, Optional
+
cdev[0-*]
Sysfs link to the thermal cooling device node where the sys I/F
for cooling device throttling control represents.
@@ -248,7 +256,7 @@ method, the sys I/F structure will be built like this:
|thermal_zone1:
|---type: acpitz
|---temp: 37000
- |---mode: kernel
+ |---mode: enabled
|---trip_point_0_temp: 100000
|---trip_point_0_type: critical
|---trip_point_1_temp: 80000
diff --git a/Documentation/vfio.txt b/Documentation/vfio.txt
new file mode 100644
index 000000000000..0cb6685c8029
--- /dev/null
+++ b/Documentation/vfio.txt
@@ -0,0 +1,314 @@
+VFIO - "Virtual Function I/O"[1]
+-------------------------------------------------------------------------------
+Many modern system now provide DMA and interrupt remapping facilities
+to help ensure I/O devices behave within the boundaries they've been
+allotted. This includes x86 hardware with AMD-Vi and Intel VT-d,
+POWER systems with Partitionable Endpoints (PEs) and embedded PowerPC
+systems such as Freescale PAMU. The VFIO driver is an IOMMU/device
+agnostic framework for exposing direct device access to userspace, in
+a secure, IOMMU protected environment. In other words, this allows
+safe[2], non-privileged, userspace drivers.
+
+Why do we want that? Virtual machines often make use of direct device
+access ("device assignment") when configured for the highest possible
+I/O performance. From a device and host perspective, this simply
+turns the VM into a userspace driver, with the benefits of
+significantly reduced latency, higher bandwidth, and direct use of
+bare-metal device drivers[3].
+
+Some applications, particularly in the high performance computing
+field, also benefit from low-overhead, direct device access from
+userspace. Examples include network adapters (often non-TCP/IP based)
+and compute accelerators. Prior to VFIO, these drivers had to either
+go through the full development cycle to become proper upstream
+driver, be maintained out of tree, or make use of the UIO framework,
+which has no notion of IOMMU protection, limited interrupt support,
+and requires root privileges to access things like PCI configuration
+space.
+
+The VFIO driver framework intends to unify these, replacing both the
+KVM PCI specific device assignment code as well as provide a more
+secure, more featureful userspace driver environment than UIO.
+
+Groups, Devices, and IOMMUs
+-------------------------------------------------------------------------------
+
+Devices are the main target of any I/O driver. Devices typically
+create a programming interface made up of I/O access, interrupts,
+and DMA. Without going into the details of each of these, DMA is
+by far the most critical aspect for maintaining a secure environment
+as allowing a device read-write access to system memory imposes the
+greatest risk to the overall system integrity.
+
+To help mitigate this risk, many modern IOMMUs now incorporate
+isolation properties into what was, in many cases, an interface only
+meant for translation (ie. solving the addressing problems of devices
+with limited address spaces). With this, devices can now be isolated
+from each other and from arbitrary memory access, thus allowing
+things like secure direct assignment of devices into virtual machines.
+
+This isolation is not always at the granularity of a single device
+though. Even when an IOMMU is capable of this, properties of devices,
+interconnects, and IOMMU topologies can each reduce this isolation.
+For instance, an individual device may be part of a larger multi-
+function enclosure. While the IOMMU may be able to distinguish
+between devices within the enclosure, the enclosure may not require
+transactions between devices to reach the IOMMU. Examples of this
+could be anything from a multi-function PCI device with backdoors
+between functions to a non-PCI-ACS (Access Control Services) capable
+bridge allowing redirection without reaching the IOMMU. Topology
+can also play a factor in terms of hiding devices. A PCIe-to-PCI
+bridge masks the devices behind it, making transaction appear as if
+from the bridge itself. Obviously IOMMU design plays a major factor
+as well.
+
+Therefore, while for the most part an IOMMU may have device level
+granularity, any system is susceptible to reduced granularity. The
+IOMMU API therefore supports a notion of IOMMU groups. A group is
+a set of devices which is isolatable from all other devices in the
+system. Groups are therefore the unit of ownership used by VFIO.
+
+While the group is the minimum granularity that must be used to
+ensure secure user access, it's not necessarily the preferred
+granularity. In IOMMUs which make use of page tables, it may be
+possible to share a set of page tables between different groups,
+reducing the overhead both to the platform (reduced TLB thrashing,
+reduced duplicate page tables), and to the user (programming only
+a single set of translations). For this reason, VFIO makes use of
+a container class, which may hold one or more groups. A container
+is created by simply opening the /dev/vfio/vfio character device.
+
+On its own, the container provides little functionality, with all
+but a couple version and extension query interfaces locked away.
+The user needs to add a group into the container for the next level
+of functionality. To do this, the user first needs to identify the
+group associated with the desired device. This can be done using
+the sysfs links described in the example below. By unbinding the
+device from the host driver and binding it to a VFIO driver, a new
+VFIO group will appear for the group as /dev/vfio/$GROUP, where
+$GROUP is the IOMMU group number of which the device is a member.
+If the IOMMU group contains multiple devices, each will need to
+be bound to a VFIO driver before operations on the VFIO group
+are allowed (it's also sufficient to only unbind the device from
+host drivers if a VFIO driver is unavailable; this will make the
+group available, but not that particular device). TBD - interface
+for disabling driver probing/locking a device.
+
+Once the group is ready, it may be added to the container by opening
+the VFIO group character device (/dev/vfio/$GROUP) and using the
+VFIO_GROUP_SET_CONTAINER ioctl, passing the file descriptor of the
+previously opened container file. If desired and if the IOMMU driver
+supports sharing the IOMMU context between groups, multiple groups may
+be set to the same container. If a group fails to set to a container
+with existing groups, a new empty container will need to be used
+instead.
+
+With a group (or groups) attached to a container, the remaining
+ioctls become available, enabling access to the VFIO IOMMU interfaces.
+Additionally, it now becomes possible to get file descriptors for each
+device within a group using an ioctl on the VFIO group file descriptor.
+
+The VFIO device API includes ioctls for describing the device, the I/O
+regions and their read/write/mmap offsets on the device descriptor, as
+well as mechanisms for describing and registering interrupt
+notifications.
+
+VFIO Usage Example
+-------------------------------------------------------------------------------
+
+Assume user wants to access PCI device 0000:06:0d.0
+
+$ readlink /sys/bus/pci/devices/0000:06:0d.0/iommu_group
+../../../../kernel/iommu_groups/26
+
+This device is therefore in IOMMU group 26. This device is on the
+pci bus, therefore the user will make use of vfio-pci to manage the
+group:
+
+# modprobe vfio-pci
+
+Binding this device to the vfio-pci driver creates the VFIO group
+character devices for this group:
+
+$ lspci -n -s 0000:06:0d.0
+06:0d.0 0401: 1102:0002 (rev 08)
+# echo 0000:06:0d.0 > /sys/bus/pci/devices/0000:06:0d.0/driver/unbind
+# echo 1102 0002 > /sys/bus/pci/drivers/vfio/new_id
+
+Now we need to look at what other devices are in the group to free
+it for use by VFIO:
+
+$ ls -l /sys/bus/pci/devices/0000:06:0d.0/iommu_group/devices
+total 0
+lrwxrwxrwx. 1 root root 0 Apr 23 16:13 0000:00:1e.0 ->
+ ../../../../devices/pci0000:00/0000:00:1e.0
+lrwxrwxrwx. 1 root root 0 Apr 23 16:13 0000:06:0d.0 ->
+ ../../../../devices/pci0000:00/0000:00:1e.0/0000:06:0d.0
+lrwxrwxrwx. 1 root root 0 Apr 23 16:13 0000:06:0d.1 ->
+ ../../../../devices/pci0000:00/0000:00:1e.0/0000:06:0d.1
+
+This device is behind a PCIe-to-PCI bridge[4], therefore we also
+need to add device 0000:06:0d.1 to the group following the same
+procedure as above. Device 0000:00:1e.0 is a bridge that does
+not currently have a host driver, therefore it's not required to
+bind this device to the vfio-pci driver (vfio-pci does not currently
+support PCI bridges).
+
+The final step is to provide the user with access to the group if
+unprivileged operation is desired (note that /dev/vfio/vfio provides
+no capabilities on its own and is therefore expected to be set to
+mode 0666 by the system).
+
+# chown user:user /dev/vfio/26
+
+The user now has full access to all the devices and the iommu for this
+group and can access them as follows:
+
+ int container, group, device, i;
+ struct vfio_group_status group_status =
+ { .argsz = sizeof(group_status) };
+ struct vfio_iommu_x86_info iommu_info = { .argsz = sizeof(iommu_info) };
+ struct vfio_iommu_x86_dma_map dma_map = { .argsz = sizeof(dma_map) };
+ struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
+
+ /* Create a new container */
+ container = open("/dev/vfio/vfio, O_RDWR);
+
+ if (ioctl(container, VFIO_GET_API_VERSION) != VFIO_API_VERSION)
+ /* Unknown API version */
+
+ if (!ioctl(container, VFIO_CHECK_EXTENSION, VFIO_X86_IOMMU))
+ /* Doesn't support the IOMMU driver we want. */
+
+ /* Open the group */
+ group = open("/dev/vfio/26", O_RDWR);
+
+ /* Test the group is viable and available */
+ ioctl(group, VFIO_GROUP_GET_STATUS, &group_status);
+
+ if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE))
+ /* Group is not viable (ie, not all devices bound for vfio) */
+
+ /* Add the group to the container */
+ ioctl(group, VFIO_GROUP_SET_CONTAINER, &container);
+
+ /* Enable the IOMMU model we want */
+ ioctl(container, VFIO_SET_IOMMU, VFIO_X86_IOMMU)
+
+ /* Get addition IOMMU info */
+ ioctl(container, VFIO_IOMMU_GET_INFO, &iommu_info);
+
+ /* Allocate some space and setup a DMA mapping */
+ dma_map.vaddr = mmap(0, 1024 * 1024, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ dma_map.size = 1024 * 1024;
+ dma_map.iova = 0; /* 1MB starting at 0x0 from device view */
+ dma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
+
+ ioctl(container, VFIO_IOMMU_MAP_DMA, &dma_map);
+
+ /* Get a file descriptor for the device */
+ device = ioctl(group, VFIO_GROUP_GET_DEVICE_FD, "0000:06:0d.0");
+
+ /* Test and setup the device */
+ ioctl(device, VFIO_DEVICE_GET_INFO, &device_info);
+
+ for (i = 0; i < device_info.num_regions; i++) {
+ struct vfio_region_info reg = { .argsz = sizeof(reg) };
+
+ reg.index = i;
+
+ ioctl(device, VFIO_DEVICE_GET_REGION_INFO, &reg);
+
+ /* Setup mappings... read/write offsets, mmaps
+ * For PCI devices, config space is a region */
+ }
+
+ for (i = 0; i < device_info.num_irqs; i++) {
+ struct vfio_irq_info irq = { .argsz = sizeof(irq) };
+
+ irq.index = i;
+
+ ioctl(device, VFIO_DEVICE_GET_IRQ_INFO, &reg);
+
+ /* Setup IRQs... eventfds, VFIO_DEVICE_SET_IRQS */
+ }
+
+ /* Gratuitous device reset and go... */
+ ioctl(device, VFIO_DEVICE_RESET);
+
+VFIO User API
+-------------------------------------------------------------------------------
+
+Please see include/linux/vfio.h for complete API documentation.
+
+VFIO bus driver API
+-------------------------------------------------------------------------------
+
+VFIO bus drivers, such as vfio-pci make use of only a few interfaces
+into VFIO core. When devices are bound and unbound to the driver,
+the driver should call vfio_add_group_dev() and vfio_del_group_dev()
+respectively:
+
+extern int vfio_add_group_dev(struct iommu_group *iommu_group,
+ struct device *dev,
+ const struct vfio_device_ops *ops,
+ void *device_data);
+
+extern void *vfio_del_group_dev(struct device *dev);
+
+vfio_add_group_dev() indicates to the core to begin tracking the
+specified iommu_group and register the specified dev as owned by
+a VFIO bus driver. The driver provides an ops structure for callbacks
+similar to a file operations structure:
+
+struct vfio_device_ops {
+ int (*open)(void *device_data);
+ void (*release)(void *device_data);
+ ssize_t (*read)(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*write)(void *device_data, const char __user *buf,
+ size_t size, loff_t *ppos);
+ long (*ioctl)(void *device_data, unsigned int cmd,
+ unsigned long arg);
+ int (*mmap)(void *device_data, struct vm_area_struct *vma);
+};
+
+Each function is passed the device_data that was originally registered
+in the vfio_add_group_dev() call above. This allows the bus driver
+an easy place to store its opaque, private data. The open/release
+callbacks are issued when a new file descriptor is created for a
+device (via VFIO_GROUP_GET_DEVICE_FD). The ioctl interface provides
+a direct pass through for VFIO_DEVICE_* ioctls. The read/write/mmap
+interfaces implement the device region access defined by the device's
+own VFIO_DEVICE_GET_REGION_INFO ioctl.
+
+-------------------------------------------------------------------------------
+
+[1] VFIO was originally an acronym for "Virtual Function I/O" in its
+initial implementation by Tom Lyon while as Cisco. We've since
+outgrown the acronym, but it's catchy.
+
+[2] "safe" also depends upon a device being "well behaved". It's
+possible for multi-function devices to have backdoors between
+functions and even for single function devices to have alternative
+access to things like PCI config space through MMIO registers. To
+guard against the former we can include additional precautions in the
+IOMMU driver to group multi-function PCI devices together
+(iommu=group_mf). The latter we can't prevent, but the IOMMU should
+still provide isolation. For PCI, SR-IOV Virtual Functions are the
+best indicator of "well behaved", as these are designed for
+virtualization usage models.
+
+[3] As always there are trade-offs to virtual machine device
+assignment that are beyond the scope of VFIO. It's expected that
+future IOMMU technologies will reduce some, but maybe not all, of
+these trade-offs.
+
+[4] In this case the device is below a PCI bridge, so transactions
+from either function of the device are indistinguishable to the iommu:
+
+-[0000:00]-+-1e.0-[06]--+-0d.0
+ \-0d.1
+
+00:1e.0 PCI bridge: Intel Corporation 82801 PCI Bridge (rev 90)
diff --git a/Documentation/video4linux/CARDLIST.au0828 b/Documentation/video4linux/CARDLIST.au0828
index 7b59e953c4bf..a8a65753e544 100644
--- a/Documentation/video4linux/CARDLIST.au0828
+++ b/Documentation/video4linux/CARDLIST.au0828
@@ -3,4 +3,4 @@
2 -> Hauppauge HVR850 (au0828) [2040:7240]
3 -> DViCO FusionHDTV USB (au0828) [0fe9:d620]
4 -> Hauppauge HVR950Q rev xxF8 (au0828) [2040:7201,2040:7211,2040:7281]
- 5 -> Hauppauge Woodbury (au0828) [2040:8200]
+ 5 -> Hauppauge Woodbury (au0828) [05e1:0480,2040:8200]
diff --git a/Documentation/video4linux/CARDLIST.bttv b/Documentation/video4linux/CARDLIST.bttv
index b753906c7183..581f666a76cf 100644
--- a/Documentation/video4linux/CARDLIST.bttv
+++ b/Documentation/video4linux/CARDLIST.bttv
@@ -159,3 +159,4 @@
158 -> Geovision GV-800(S) (slave) [800b:763d,800c:763d,800d:763d]
159 -> ProVideo PV183 [1830:1540,1831:1540,1832:1540,1833:1540,1834:1540,1835:1540,1836:1540,1837:1540]
160 -> Tongwei Video Technology TD-3116 [f200:3116]
+161 -> Aposonic W-DVR [0279:0228]
diff --git a/Documentation/video4linux/CARDLIST.cx23885 b/Documentation/video4linux/CARDLIST.cx23885
index f316d1816fcd..652aecd13199 100644
--- a/Documentation/video4linux/CARDLIST.cx23885
+++ b/Documentation/video4linux/CARDLIST.cx23885
@@ -18,7 +18,7 @@
17 -> NetUP Dual DVB-S2 CI [1b55:2a2c]
18 -> Hauppauge WinTV-HVR1270 [0070:2211]
19 -> Hauppauge WinTV-HVR1275 [0070:2215,0070:221d,0070:22f2]
- 20 -> Hauppauge WinTV-HVR1255 [0070:2251,0070:2259,0070:22f1]
+ 20 -> Hauppauge WinTV-HVR1255 [0070:2251,0070:22f1]
21 -> Hauppauge WinTV-HVR1210 [0070:2291,0070:2295,0070:2299,0070:229d,0070:22f0,0070:22f3,0070:22f4,0070:22f5]
22 -> Mygica X8506 DMB-TH [14f1:8651]
23 -> Magic-Pro ProHDTV Extreme 2 [14f1:8657]
@@ -33,3 +33,5 @@
32 -> MPX-885
33 -> Mygica X8507 [14f1:8502]
34 -> TerraTec Cinergy T PCIe Dual [153b:117e]
+ 35 -> TeVii S471 [d471:9022]
+ 36 -> Hauppauge WinTV-HVR1255 [0070:2259]
diff --git a/Documentation/video4linux/CARDLIST.saa7134 b/Documentation/video4linux/CARDLIST.saa7134
index 34f3b330e5f4..94d9025aa82d 100644
--- a/Documentation/video4linux/CARDLIST.saa7134
+++ b/Documentation/video4linux/CARDLIST.saa7134
@@ -188,3 +188,4 @@
187 -> Beholder BeholdTV 503 FM [5ace:5030]
188 -> Sensoray 811/911 [6000:0811,6000:0911]
189 -> Kworld PC150-U [17de:a134]
+190 -> Asus My Cinema PS3-100 [1043:48cd]
diff --git a/Documentation/video4linux/v4l2-framework.txt b/Documentation/video4linux/v4l2-framework.txt
index 1f5905270050..89318be6c1d2 100644
--- a/Documentation/video4linux/v4l2-framework.txt
+++ b/Documentation/video4linux/v4l2-framework.txt
@@ -594,6 +594,15 @@ You should also set these fields:
unlocked_ioctl file operation is called this lock will be taken by the
core and released afterwards. See the next section for more details.
+- queue: a pointer to the struct vb2_queue associated with this device node.
+ If queue is non-NULL, and queue->lock is non-NULL, then queue->lock is
+ used for the queuing ioctls (VIDIOC_REQBUFS, CREATE_BUFS, QBUF, DQBUF,
+ QUERYBUF, PREPARE_BUF, STREAMON and STREAMOFF) instead of the lock above.
+ That way the vb2 queuing framework does not have to wait for other ioctls.
+ This queue pointer is also used by the vb2 helper functions to check for
+ queuing ownership (i.e. is the filehandle calling it allowed to do the
+ operation).
+
- prio: keeps track of the priorities. Used to implement VIDIOC_G/S_PRIORITY.
If left to NULL, then it will use the struct v4l2_prio_state in v4l2_device.
If you want to have a separate priority state per (group of) device node(s),
@@ -647,47 +656,43 @@ manually set the struct media_entity type and name fields.
A reference to the entity will be automatically acquired/released when the
video device is opened/closed.
-v4l2_file_operations and locking
---------------------------------
-
-You can set a pointer to a mutex_lock in struct video_device. Usually this
-will be either a top-level mutex or a mutex per device node. By default this
-lock will be used for unlocked_ioctl, but you can disable locking for
-selected ioctls by calling:
-
- void v4l2_disable_ioctl_locking(struct video_device *vdev, unsigned int cmd);
-
-E.g.: v4l2_disable_ioctl_locking(vdev, VIDIOC_DQBUF);
+ioctls and locking
+------------------
-You have to call this before you register the video_device.
+The V4L core provides optional locking services. The main service is the
+lock field in struct video_device, which is a pointer to a mutex. If you set
+this pointer, then that will be used by unlocked_ioctl to serialize all ioctls.
-Particularly with USB drivers where certain commands such as setting controls
-can take a long time you may want to do your own locking for the buffer queuing
-ioctls.
+If you are using the videobuf2 framework, then there is a second lock that you
+can set: video_device->queue->lock. If set, then this lock will be used instead
+of video_device->lock to serialize all queuing ioctls (see the previous section
+for the full list of those ioctls).
-If you want still finer-grained locking then you have to set mutex_lock to NULL
-and do you own locking completely.
+The advantage of using a different lock for the queuing ioctls is that for some
+drivers (particularly USB drivers) certain commands such as setting controls
+can take a long time, so you want to use a separate lock for the buffer queuing
+ioctls. That way your VIDIOC_DQBUF doesn't stall because the driver is busy
+changing the e.g. exposure of the webcam.
-It is up to the driver developer to decide which method to use. However, if
-your driver has high-latency operations (for example, changing the exposure
-of a USB webcam might take a long time), then you might be better off with
-doing your own locking if you want to allow the user to do other things with
-the device while waiting for the high-latency command to finish.
+Of course, you can always do all the locking yourself by leaving both lock
+pointers at NULL.
-If a lock is specified then all ioctl commands will be serialized on that
-lock. If you use videobuf then you must pass the same lock to the videobuf
-queue initialize function: if videobuf has to wait for a frame to arrive, then
-it will temporarily unlock the lock and relock it afterwards. If your driver
-also waits in the code, then you should do the same to allow other processes
-to access the device node while the first process is waiting for something.
+If you use the old videobuf then you must pass the video_device lock to the
+videobuf queue initialize function: if videobuf has to wait for a frame to
+arrive, then it will temporarily unlock the lock and relock it afterwards. If
+your driver also waits in the code, then you should do the same to allow other
+processes to access the device node while the first process is waiting for
+something.
In the case of videobuf2 you will need to implement the wait_prepare and
-wait_finish callbacks to unlock/lock if applicable. In particular, if you use
-the lock in struct video_device then you must unlock/lock this mutex in
-wait_prepare and wait_finish.
-
-The implementation of a hotplug disconnect should also take the lock before
-calling v4l2_device_disconnect.
+wait_finish callbacks to unlock/lock if applicable. If you use the queue->lock
+pointer, then you can use the helper functions vb2_ops_wait_prepare/finish.
+
+The implementation of a hotplug disconnect should also take the lock from
+video_device before calling v4l2_device_disconnect. If you are also using
+video_device->queue->lock, then you have to first lock video_device->queue->lock
+followed by video_device->lock. That way you can be sure no ioctl is running
+when you call v4l2_device_disconnect.
video_device registration
-------------------------
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index f8551b3879f8..4ac359b7aa17 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -299,11 +299,17 @@ map_hugetlb.c.
*******************************************************************
/*
- * hugepage-shm: see Documentation/vm/hugepage-shm.c
+ * map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c
*/
*******************************************************************
/*
- * hugepage-mmap: see Documentation/vm/hugepage-mmap.c
+ * hugepage-shm: see tools/testing/selftests/vm/hugepage-shm.c
+ */
+
+*******************************************************************
+
+/*
+ * hugepage-mmap: see tools/testing/selftests/vm/hugepage-mmap.c
*/
diff --git a/Documentation/w1/slaves/w1_therm b/Documentation/w1/slaves/w1_therm
index 0403aaaba878..874a8ca93feb 100644
--- a/Documentation/w1/slaves/w1_therm
+++ b/Documentation/w1/slaves/w1_therm
@@ -3,6 +3,7 @@ Kernel driver w1_therm
Supported chips:
* Maxim ds18*20 based temperature sensors.
+ * Maxim ds1825 based temperature sensors.
Author: Evgeniy Polyakov <johnpol@2ka.mipt.ru>
@@ -15,6 +16,7 @@ supported family codes:
W1_THERM_DS18S20 0x10
W1_THERM_DS1822 0x22
W1_THERM_DS18B20 0x28
+W1_THERM_DS1825 0x3B
Support is provided through the sysfs w1_slave file. Each open and
read sequence will initiate a temperature conversion then provide two
diff --git a/Documentation/watchdog/src/watchdog-test.c b/Documentation/watchdog/src/watchdog-test.c
index 73ff5cc93e05..3da822967ee0 100644
--- a/Documentation/watchdog/src/watchdog-test.c
+++ b/Documentation/watchdog/src/watchdog-test.c
@@ -31,7 +31,7 @@ static void keep_alive(void)
* or "-e" to enable the card.
*/
-void term(int sig)
+static void term(int sig)
{
close(fd);
fprintf(stderr, "Stopping watchdog ticks...\n");
diff --git a/MAINTAINERS b/MAINTAINERS
index 429e72cc13fc..fdc0119963e7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -242,13 +242,6 @@ W: http://www.lesswatts.org/projects/acpi/
S: Supported
F: drivers/acpi/fan.c
-ACPI PROCESSOR AGGREGATOR DRIVER
-M: Shaohua Li <shaohua.li@intel.com>
-L: linux-acpi@vger.kernel.org
-W: http://www.lesswatts.org/projects/acpi/
-S: Supported
-F: drivers/acpi/acpi_pad.c
-
ACPI THERMAL DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
@@ -834,24 +827,24 @@ F: arch/arm/mach-pxa/colibri-pxa270-income.c
ARM/INTEL IOP32X ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/INTEL IOP33X ARM ARCHITECTURE
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/INTEL IOP13XX ARM ARCHITECTURE
M: Lennert Buytenhek <kernel@wantstofly.org>
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/INTEL IQ81342EX MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -876,7 +869,7 @@ F: drivers/pcmcia/pxa2xx_stargate2.c
ARM/INTEL XSC3 (MANZANO) ARM CORE
M: Lennert Buytenhek <kernel@wantstofly.org>
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -932,14 +925,14 @@ S: Maintained
ARM/NOMADIK ARCHITECTURE
M: Alessandro Rubini <rubini@unipv.it>
-M: Linus Walleij <linus.walleij@stericsson.com>
+M: Linus Walleij <linus.walleij@linaro.org>
M: STEricsson <STEricsson_nomadik_linux@list.st.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-nomadik/
F: arch/arm/plat-nomadik/
F: drivers/i2c/busses/i2c-nomadik.c
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
M: Nelson Castillo <arhuaco@freaks-unidos.net>
@@ -1153,7 +1146,7 @@ F: drivers/usb/host/ehci-w90x900.c
F: drivers/video/nuc900fb.c
ARM/U300 MACHINE SUPPORT
-M: Linus Walleij <linus.walleij@stericsson.com>
+M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: arch/arm/mach-u300/
@@ -1168,15 +1161,20 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/Ux500 ARM ARCHITECTURE
M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
-M: Linus Walleij <linus.walleij@stericsson.com>
+M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-ux500/
+F: drivers/clocksource/clksrc-dbx500-prcmu.c
F: drivers/dma/ste_dma40*
+F: drivers/hwspinlock/u8500_hsem.c
F: drivers/mfd/abx500*
F: drivers/mfd/ab8500*
-F: drivers/mfd/stmpe*
+F: drivers/mfd/dbx500*
+F: drivers/mfd/db8500*
+F: drivers/pinctrl/pinctrl-nomadik*
F: drivers/rtc/rtc-ab8500.c
+F: drivers/rtc/rtc-pl031.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/VFP SUPPORT
@@ -1193,6 +1191,17 @@ S: Maintained
F: arch/arm/mach-pxa/vpac270.c
F: arch/arm/mach-pxa/include/mach/vpac270.h
+ARM/VT8500 ARM ARCHITECTURE
+M: Tony Prisk <linux@prisktech.co.nz>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-vt8500/
+F: drivers/video/vt8500lcdfb.*
+F: drivers/video/wm8505fb*
+F: drivers/video/wmt_ge_rops.*
+F: drivers/tty/serial/vt8500_serial.c
+F: drivers/rtc/rtc-vt8500-c
+
ARM/ZIPIT Z2 SUPPORT
M: Marek Vasut <marek.vasut@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1223,9 +1232,9 @@ S: Maintained
F: drivers/hwmon/asb100.c
ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
W: http://sourceforge.net/projects/xscaleiop
-S: Supported
+S: Maintained
F: Documentation/crypto/async-tx-api.txt
F: crypto/async_tx/
F: drivers/dma/
@@ -1536,6 +1545,11 @@ W: http://blackfin.uclinux.org/
S: Supported
F: drivers/i2c/busses/i2c-bfin-twi.c
+BLINKM RGB LED DRIVER
+M: Jan-Simon Moeller <jansimon.moeller@gmx.de>
+S: Maintained
+F: drivers/leds/leds-blinkm.c
+
BLOCK LAYER
M: Jens Axboe <axboe@kernel.dk>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
@@ -1780,15 +1794,16 @@ F: arch/powerpc/oprofile/*cell*
F: arch/powerpc/platforms/cell/
CEPH DISTRIBUTED FILE SYSTEM CLIENT
-M: Sage Weil <sage@newdream.net>
+M: Sage Weil <sage@inktank.com>
L: ceph-devel@vger.kernel.org
-W: http://ceph.newdream.net/
+W: http://ceph.com/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
S: Supported
F: Documentation/filesystems/ceph.txt
F: fs/ceph
F: net/ceph
F: include/linux/ceph
+F: include/linux/crush
CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
L: linux-usb@vger.kernel.org
@@ -2202,7 +2217,7 @@ S: Maintained
F: drivers/scsi/tmscsim.*
DC395x SCSI driver
-M: Oliver Neukum <oliver@neukum.name>
+M: Oliver Neukum <oliver@neukum.org>
M: Ali Akcaagac <aliakc@web.de>
M: Jamie Lenehan <lenehan@twibble.org>
W: http://twibble.org/dist/dc395x/
@@ -2349,7 +2364,7 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vinod.koul@intel.com>
-M: Dan Williams <dan.j.williams@intel.com>
+M: Dan Williams <djbw@fb.com>
S: Supported
F: drivers/dma/
F: include/linux/dma*
@@ -2741,6 +2756,7 @@ M: Jingoo Han <jg1.han@samsung.com>
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/exynos/exynos_dp*
+F: include/video/exynos_dp*
EXYNOS MIPI DISPLAY DRIVERS
M: Inki Dae <inki.dae@samsung.com>
@@ -3083,7 +3099,7 @@ F: include/linux/gigaset_dev.h
GPIO SUBSYSTEM
M: Grant Likely <grant.likely@secretlab.ca>
-M: Linus Walleij <linus.walleij@stericsson.com>
+M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
T: git git://git.secretlab.ca/git/linux-2.6.git
F: Documentation/gpio.txt
@@ -3146,8 +3162,7 @@ S: Maintained
F: drivers/media/video/gspca/t613.c
GSPCA USB WEBCAM DRIVER
-M: Jean-Francois Moine <moinejf@free.fr>
-W: http://moinejf.free.fr
+M: Hans de Goede <hdegoede@redhat.com>
L: linux-media@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-media.git
S: Maintained
@@ -3537,7 +3552,6 @@ K: \b(ABS|SYN)_MT_
INTEL C600 SERIES SAS CONTROLLER DRIVER
M: Intel SCU Linux support <intel-linux-scu@intel.com>
-M: Dan Williams <dan.j.williams@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
M: Ed Nadolski <edmund.nadolski@intel.com>
L: linux-scsi@vger.kernel.org
@@ -3580,8 +3594,8 @@ F: arch/x86/kernel/microcode_core.c
F: arch/x86/kernel/microcode_intel.c
INTEL I/OAT DMA DRIVER
-M: Dan Williams <dan.j.williams@intel.com>
-S: Supported
+M: Dan Williams <djbw@fb.com>
+S: Maintained
F: drivers/dma/ioat*
INTEL IOMMU (VT-d)
@@ -3593,8 +3607,8 @@ F: drivers/iommu/intel-iommu.c
F: include/linux/intel-iommu.h
INTEL IOP-ADMA DMA DRIVER
-M: Dan Williams <dan.j.williams@intel.com>
-S: Maintained
+M: Dan Williams <djbw@fb.com>
+S: Odd fixes
F: drivers/dma/iop-adma.c
INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
@@ -4523,7 +4537,7 @@ S: Supported
F: arch/microblaze/
MICROTEK X6 SCANNER
-M: Oliver Neukum <oliver@neukum.name>
+M: Oliver Neukum <oliver@neukum.org>
S: Maintained
F: drivers/usb/image/microtek.*
@@ -4961,6 +4975,13 @@ S: Maintained
F: drivers/usb/*/*omap*
F: arch/arm/*omap*/usb*
+OMAP GPIO DRIVER
+M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+M: Kevin Hilman <khilman@ti.com>
+L: linux-omap@vger.kernel.org
+S: Maintained
+F: drivers/gpio/gpio-omap.c
+
OMFS FILESYSTEM
M: Bob Copeland <me@bobcopeland.com>
L: linux-karma-devel@lists.sourceforge.net
@@ -5312,14 +5333,15 @@ PIN CONTROL SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
F: drivers/pinctrl/
+F: include/linux/pinctrl/
PIN CONTROLLER - ST SPEAR
-M: Viresh Kumar <viresh.linux@gmail.com>
+M: Viresh Kumar <viresh.linux@gmail.com>
L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
S: Maintained
-F: driver/pinctrl/spear/
+F: drivers/pinctrl/spear/
PKTCDVD DRIVER
M: Peter Osterlund <petero2@telia.com>
@@ -5510,6 +5532,18 @@ S: Maintained
F: Documentation/video4linux/README.pvrusb2
F: drivers/media/video/pvrusb2/
+PWM SUBSYSTEM
+M: Thierry Reding <thierry.reding@avionic-design.de>
+L: linux-kernel@vger.kernel.org
+S: Maintained
+W: http://gitorious.org/linux-pwm
+T: git git://gitorious.org/linux-pwm/linux-pwm.git
+F: Documentation/pwm.txt
+F: Documentation/devicetree/bindings/pwm/
+F: include/linux/pwm.h
+F: include/linux/of_pwm.h
+F: drivers/pwm/
+
PXA2xx/PXA3xx SUPPORT
M: Eric Miao <eric.y.miao@gmail.com>
M: Russell King <linux@arm.linux.org.uk>
@@ -5611,10 +5645,12 @@ S: Supported
F: arch/hexagon/
RADOS BLOCK DEVICE (RBD)
-F: include/linux/qnxtypes.h
-M: Yehuda Sadeh <yehuda@hq.newdream.net>
-M: Sage Weil <sage@newdream.net>
+M: Yehuda Sadeh <yehuda@inktank.com>
+M: Sage Weil <sage@inktank.com>
+M: Alex Elder <elder@inktank.com>
M: ceph-devel@vger.kernel.org
+W: http://ceph.com/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
S: Supported
F: drivers/block/rbd.c
F: drivers/block/rbd_types.h
@@ -5651,7 +5687,7 @@ F: Documentation/blockdev/ramdisk.txt
F: drivers/block/brd.c
RANDOM NUMBER DRIVER
-M: Matt Mackall <mpm@selenic.com>
+M: Theodore Ts'o" <tytso@mit.edu>
S: Maintained
F: drivers/char/random.c
@@ -5725,6 +5761,7 @@ F: include/linux/regmap.h
REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
M: Ohad Ben-Cohen <ohad@wizery.com>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git
S: Maintained
F: drivers/remoteproc/
F: Documentation/remoteproc.txt
@@ -5883,6 +5920,16 @@ L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/s3c-fb.c
+SAMSUNG MULTIFUNCTION DEVICE DRIVERS
+M: Sangbeom Kim <sbkim73@samsung.com>
+L: linux-kernel@vger.kernel.org
+S: Supported
+F: drivers/mfd/sec*.c
+F: drivers/regulator/s2m*.c
+F: drivers/regulator/s5m*.c
+F: drivers/rtc/rtc-sec.c
+F: include/linux/mfd/samsung/
+
SERIAL DRIVERS
M: Alan Cox <alan@linux.intel.com>
L: linux-serial@vger.kernel.org
@@ -7029,7 +7076,7 @@ F: include/linux/mtd/ubi.h
F: include/mtd/ubi-user.h
USB ACM DRIVER
-M: Oliver Neukum <oliver@neukum.name>
+M: Oliver Neukum <oliver@neukum.org>
L: linux-usb@vger.kernel.org
S: Maintained
F: Documentation/usb/acm.txt
@@ -7050,7 +7097,7 @@ S: Supported
F: drivers/block/ub.c
USB CDC ETHERNET DRIVER
-M: Oliver Neukum <oliver@neukum.name>
+M: Oliver Neukum <oliver@neukum.org>
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/net/usb/cdc_*.c
@@ -7123,7 +7170,7 @@ F: drivers/usb/host/isp116x*
F: include/linux/usb/isp116x.h
USB KAWASAKI LSI DRIVER
-M: Oliver Neukum <oliver@neukum.name>
+M: Oliver Neukum <oliver@neukum.org>
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/serial/kl5kusb105.*
@@ -7241,6 +7288,12 @@ W: http://www.connecttech.com
S: Supported
F: drivers/usb/serial/whiteheat*
+USB SMSC75XX ETHERNET DRIVER
+M: Steve Glendinning <steve.glendinning@shawell.net>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/usb/smsc75xx.*
+
USB SMSC95XX ETHERNET DRIVER
M: Steve Glendinning <steve.glendinning@shawell.net>
L: netdev@vger.kernel.org
@@ -7340,6 +7393,7 @@ W: http://user-mode-linux.sourceforge.net
S: Maintained
F: Documentation/virtual/uml/
F: arch/um/
+F: arch/x86/um/
F: fs/hostfs/
F: fs/hppfs/
@@ -7372,6 +7426,14 @@ S: Maintained
F: Documentation/filesystems/vfat.txt
F: fs/fat/
+VFIO DRIVER
+M: Alex Williamson <alex.williamson@redhat.com>
+L: kvm@vger.kernel.org
+S: Maintained
+F: Documentation/vfio.txt
+F: drivers/vfio/
+F: include/linux/vfio.h
+
VIDEOBUF2 FRAMEWORK
M: Pawel Osciak <pawel@osciak.com>
M: Marek Szyprowski <m.szyprowski@samsung.com>
@@ -7614,22 +7676,28 @@ S: Supported
F: Documentation/hwmon/wm83??
F: arch/arm/mach-s3c64xx/mach-crag6410*
F: drivers/clk/clk-wm83*.c
+F: drivers/extcon/extcon-arizona.c
F: drivers/leds/leds-wm83*.c
+F: drivers/gpio/gpio-*wm*.c
+F: drivers/gpio/gpio-arizona.c
F: drivers/hwmon/wm83??-hwmon.c
F: drivers/input/misc/wm831x-on.c
F: drivers/input/touchscreen/wm831x-ts.c
F: drivers/input/touchscreen/wm97*.c
-F: drivers/mfd/wm8*.c
+F: drivers/mfd/arizona*
+F: drivers/mfd/wm*.c
F: drivers/power/wm83*.c
F: drivers/rtc/rtc-wm83*.c
F: drivers/regulator/wm8*.c
F: drivers/video/backlight/wm83*_bl.c
F: drivers/watchdog/wm83*_wdt.c
+F: include/linux/mfd/arizona/
F: include/linux/mfd/wm831x/
F: include/linux/mfd/wm8350/
F: include/linux/mfd/wm8400*
F: include/linux/wm97xx.h
F: include/sound/wm????.h
+F: sound/soc/codecs/arizona.?
F: sound/soc/codecs/wm*
WORKQUEUE
diff --git a/Makefile b/Makefile
index 4bb09e1b1230..371ce8899f5c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
-PATCHLEVEL = 5
+PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc4
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
@@ -535,11 +535,11 @@ PHONY += include/config/auto.conf
include/config/auto.conf:
$(Q)test -e include/generated/autoconf.h -a -e $@ || ( \
- echo; \
- echo " ERROR: Kernel configuration is invalid."; \
- echo " include/generated/autoconf.h or $@ are missing.";\
- echo " Run 'make oldconfig && make prepare' on kernel src to fix it."; \
- echo; \
+ echo >&2; \
+ echo >&2 " ERROR: Kernel configuration is invalid."; \
+ echo >&2 " include/generated/autoconf.h or $@ are missing.";\
+ echo >&2 " Run 'make oldconfig && make prepare' on kernel src to fix it."; \
+ echo >&2 ; \
/bin/false)
endif # KBUILD_EXTMOD
@@ -796,8 +796,8 @@ prepare3: include/config/kernel.release
ifneq ($(KBUILD_SRC),)
@$(kecho) ' Using $(srctree) as source for kernel'
$(Q)if [ -f $(srctree)/.config -o -d $(srctree)/include/config ]; then \
- echo " $(srctree) is not clean, please run 'make mrproper'"; \
- echo " in the '$(srctree)' directory.";\
+ echo >&2 " $(srctree) is not clean, please run 'make mrproper'"; \
+ echo >&2 " in the '$(srctree)' directory.";\
/bin/false; \
fi;
endif
@@ -971,11 +971,11 @@ else # CONFIG_MODULES
# ---------------------------------------------------------------------------
modules modules_install: FORCE
- @echo
- @echo "The present kernel configuration has modules disabled."
- @echo "Type 'make config' and enable loadable module support."
- @echo "Then build a kernel with module support enabled."
- @echo
+ @echo >&2
+ @echo >&2 "The present kernel configuration has modules disabled."
+ @echo >&2 "Type 'make config' and enable loadable module support."
+ @echo >&2 "Then build a kernel with module support enabled."
+ @echo >&2
@exit 1
endif # CONFIG_MODULES
diff --git a/arch/Kconfig b/arch/Kconfig
index 8c3d957fa8e2..72f2fa189cc5 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -248,7 +248,14 @@ config HAVE_CMPXCHG_LOCAL
config HAVE_CMPXCHG_DOUBLE
bool
+config ARCH_WANT_IPC_PARSE_VERSION
+ bool
+
+config ARCH_WANT_COMPAT_IPC_PARSE_VERSION
+ bool
+
config ARCH_WANT_OLD_COMPAT_IPC
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
bool
config HAVE_ARCH_SECCOMP_FILTER
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 3de74c9f9610..9944dedee5b1 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -14,9 +14,12 @@ config ALPHA
select AUTO_IRQ_AFFINITY if SMP
select GENERIC_IRQ_SHOW
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CMOS_UPDATE
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 3bb7ffeae3bc..c2cbe4fc391c 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -14,8 +14,8 @@
*/
-#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
-#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
+#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h
index db00f7885faa..e477bcd5b94a 100644
--- a/arch/alpha/include/asm/fpu.h
+++ b/arch/alpha/include/asm/fpu.h
@@ -1,7 +1,9 @@
#ifndef __ASM_ALPHA_FPU_H
#define __ASM_ALPHA_FPU_H
+#ifdef __KERNEL__
#include <asm/special_insns.h>
+#endif
/*
* Alpha floating-point control register defines:
diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h
index fd698a174f26..b87755a19554 100644
--- a/arch/alpha/include/asm/ptrace.h
+++ b/arch/alpha/include/asm/ptrace.h
@@ -76,7 +76,10 @@ struct switch_stack {
#define task_pt_regs(task) \
((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
-#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0)
+#define current_pt_regs() \
+ ((struct pt_regs *) ((char *)current_thread_info() + 2*PAGE_SIZE) - 1)
+
+#define force_successful_syscall_return() (current_pt_regs()->r0 = 0)
#endif
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h
index dcb221a4b5be..7d2f75be932e 100644
--- a/arch/alpha/include/asm/socket.h
+++ b/arch/alpha/include/asm/socket.h
@@ -76,9 +76,11 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#ifdef __KERNEL__
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.
*/
#define SOCK_NONBLOCK 0x40000000
+#endif /* __KERNEL__ */
#endif /* _ASM_SOCKET_H */
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index b49ec2f8d6e3..766fdfde2b7a 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -433,36 +433,12 @@ clear_user(void __user *to, long len)
#undef __module_address
#undef __module_call
-/* Returns: -EFAULT if exception before terminator, N if the entire
- buffer filled, else strlen. */
+#define user_addr_max() \
+ (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
-extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len);
-
-extern inline long
-strncpy_from_user(char *to, const char __user *from, long n)
-{
- long ret = -EFAULT;
- if (__access_ok((unsigned long)from, 0, get_fs()))
- ret = __strncpy_from_user(to, from, n);
- return ret;
-}
-
-/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
-extern long __strlen_user(const char __user *);
-
-extern inline long strlen_user(const char __user *str)
-{
- return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
-}
-
-/* Returns: 0 if exception before NUL or reaching the supplied limit (N),
- * a value greater than N if the limit would be exceeded, else strlen. */
-extern long __strnlen_user(const char __user *, long);
-
-extern inline long strnlen_user(const char __user *str, long n)
-{
- return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
-}
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
/*
* About the exception table:
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index d1f23b722df4..a31a78eac9b9 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -465,12 +465,13 @@
#define __NR_setns 501
#define __NR_accept4 502
#define __NR_sendmmsg 503
+#define __NR_process_vm_readv 504
+#define __NR_process_vm_writev 505
#ifdef __KERNEL__
-#define NR_SYSCALLS 504
+#define NR_SYSCALLS 506
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..6b340d0f1521
--- /dev/null
+++ b/arch/alpha/include/asm/word-at-a-time.h
@@ -0,0 +1,55 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <asm/compiler.h>
+
+/*
+ * word-at-a-time interface for Alpha.
+ */
+
+/*
+ * We do not use the word_at_a_time struct on Alpha, but it needs to be
+ * implemented to humour the generic code.
+ */
+struct word_at_a_time {
+ const unsigned long unused;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { 0 }
+
+/* Return nonzero if val has a zero */
+static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long zero_locations = __kernel_cmpbge(0, val);
+ *bits = zero_locations;
+ return zero_locations;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+#define create_zero_mask(bits) (bits)
+
+static inline unsigned long find_zero(unsigned long bits)
+{
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
+ /* Simple if have CIX instructions */
+ return __kernel_cttz(bits);
+#else
+ unsigned long t1, t2, t3;
+ /* Retain lowest set bit only */
+ bits &= -bits;
+ /* Binary search for lowest set bit */
+ t1 = bits & 0xf0;
+ t2 = bits & 0xcc;
+ t3 = bits & 0xaa;
+ if (t1) t1 = 4;
+ if (t2) t2 = 2;
+ if (t3) t3 = 1;
+ return t1 + t2 + t3;
+#endif
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index d96e742d4dc2..15fa821d09cd 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -52,7 +52,6 @@ EXPORT_SYMBOL(alpha_write_fp_reg_s);
/* entry.S */
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(kernel_execve);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_tcpudp_magic);
@@ -74,8 +73,6 @@ EXPORT_SYMBOL(alpha_fp_emul);
*/
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__do_clear_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
/*
* SMP-specific symbols.
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index 6d159cee5f2f..ec0da0567ab5 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -663,58 +663,6 @@ kernel_thread:
br ret_to_kernel
.end kernel_thread
-/*
- * kernel_execve(path, argv, envp)
- */
- .align 4
- .globl kernel_execve
- .ent kernel_execve
-kernel_execve:
- /* We can be called from a module. */
- ldgp $gp, 0($27)
- lda $sp, -(32+SIZEOF_PT_REGS+8)($sp)
- .frame $sp, 32+SIZEOF_PT_REGS+8, $26, 0
- stq $26, 0($sp)
- stq $16, 8($sp)
- stq $17, 16($sp)
- stq $18, 24($sp)
- .prologue 1
-
- lda $16, 32($sp)
- lda $17, 0
- lda $18, SIZEOF_PT_REGS
- bsr $26, memset !samegp
-
- /* Avoid the HAE being gratuitously wrong, which would cause us
- to do the whole turn off interrupts thing and restore it. */
- ldq $2, alpha_mv+HAE_CACHE
- stq $2, 152+32($sp)
-
- ldq $16, 8($sp)
- ldq $17, 16($sp)
- ldq $18, 24($sp)
- lda $19, 32($sp)
- bsr $26, do_execve !samegp
-
- ldq $26, 0($sp)
- bne $0, 1f /* error! */
-
- /* Move the temporary pt_regs struct from its current location
- to the top of the kernel stack frame. See copy_thread for
- details for a normal process. */
- lda $16, 0x4000 - SIZEOF_PT_REGS($8)
- lda $17, 32($sp)
- lda $18, SIZEOF_PT_REGS
- bsr $26, memmove !samegp
-
- /* Take that over as our new stack frame and visit userland! */
- lda $sp, 0x4000 - SIZEOF_PT_REGS($8)
- br $31, ret_from_sys_call
-
-1: lda $sp, 32+SIZEOF_PT_REGS+8($sp)
- ret
-.end kernel_execve
-
/*
* Special system calls. Most of these are special in that they either
@@ -797,115 +745,6 @@ sys_rt_sigreturn:
.end sys_rt_sigreturn
.align 4
- .globl sys_sethae
- .ent sys_sethae
-sys_sethae:
- .prologue 0
- stq $16, 152($sp)
- ret
-.end sys_sethae
-
- .align 4
- .globl osf_getpriority
- .ent osf_getpriority
-osf_getpriority:
- lda $sp, -16($sp)
- stq $26, 0($sp)
- .prologue 0
-
- jsr $26, sys_getpriority
-
- ldq $26, 0($sp)
- blt $0, 1f
-
- /* Return value is the unbiased priority, i.e. 20 - prio.
- This does result in negative return values, so signal
- no error by writing into the R0 slot. */
- lda $1, 20
- stq $31, 16($sp)
- subl $1, $0, $0
- unop
-
-1: lda $sp, 16($sp)
- ret
-.end osf_getpriority
-
- .align 4
- .globl sys_getxuid
- .ent sys_getxuid
-sys_getxuid:
- .prologue 0
- ldq $2, TI_TASK($8)
- ldq $3, TASK_CRED($2)
- ldl $0, CRED_UID($3)
- ldl $1, CRED_EUID($3)
- stq $1, 80($sp)
- ret
-.end sys_getxuid
-
- .align 4
- .globl sys_getxgid
- .ent sys_getxgid
-sys_getxgid:
- .prologue 0
- ldq $2, TI_TASK($8)
- ldq $3, TASK_CRED($2)
- ldl $0, CRED_GID($3)
- ldl $1, CRED_EGID($3)
- stq $1, 80($sp)
- ret
-.end sys_getxgid
-
- .align 4
- .globl sys_getxpid
- .ent sys_getxpid
-sys_getxpid:
- .prologue 0
- ldq $2, TI_TASK($8)
-
- /* See linux/kernel/timer.c sys_getppid for discussion
- about this loop. */
- ldq $3, TASK_GROUP_LEADER($2)
- ldq $4, TASK_REAL_PARENT($3)
- ldl $0, TASK_TGID($2)
-1: ldl $1, TASK_TGID($4)
-#ifdef CONFIG_SMP
- mov $4, $5
- mb
- ldq $3, TASK_GROUP_LEADER($2)
- ldq $4, TASK_REAL_PARENT($3)
- cmpeq $4, $5, $5
- beq $5, 1b
-#endif
- stq $1, 80($sp)
- ret
-.end sys_getxpid
-
- .align 4
- .globl sys_alpha_pipe
- .ent sys_alpha_pipe
-sys_alpha_pipe:
- lda $sp, -16($sp)
- stq $26, 0($sp)
- .prologue 0
-
- mov $31, $17
- lda $16, 8($sp)
- jsr $26, do_pipe_flags
-
- ldq $26, 0($sp)
- bne $0, 1f
-
- /* The return values are in $0 and $20. */
- ldl $1, 12($sp)
- ldl $0, 8($sp)
-
- stq $1, 80+16($sp)
-1: lda $sp, 16($sp)
- ret
-.end sys_alpha_pipe
-
- .align 4
.globl sys_execve
.ent sys_execve
sys_execve:
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 98a103621af6..bc1acdda7a5e 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1404,3 +1404,52 @@ SYSCALL_DEFINE3(osf_writev, unsigned long, fd,
}
#endif
+
+SYSCALL_DEFINE2(osf_getpriority, int, which, int, who)
+{
+ int prio = sys_getpriority(which, who);
+ if (prio >= 0) {
+ /* Return value is the unbiased priority, i.e. 20 - prio.
+ This does result in negative return values, so signal
+ no error */
+ force_successful_syscall_return();
+ prio = 20 - prio;
+ }
+ return prio;
+}
+
+SYSCALL_DEFINE0(getxuid)
+{
+ current_pt_regs()->r20 = sys_geteuid();
+ return sys_getuid();
+}
+
+SYSCALL_DEFINE0(getxgid)
+{
+ current_pt_regs()->r20 = sys_getegid();
+ return sys_getgid();
+}
+
+SYSCALL_DEFINE0(getxpid)
+{
+ current_pt_regs()->r20 = sys_getppid();
+ return sys_getpid();
+}
+
+SYSCALL_DEFINE0(alpha_pipe)
+{
+ int fd[2];
+ int res = do_pipe_flags(fd, 0);
+ if (!res) {
+ /* The return values are in $0 and $20. */
+ current_pt_regs()->r20 = fd[1];
+ res = fd[0];
+ }
+ return res;
+}
+
+SYSCALL_DEFINE1(sethae, unsigned long, val)
+{
+ current_pt_regs()->hae = val;
+ return 0;
+}
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 153d3fce3e8e..d6fde98b74b3 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -455,3 +455,22 @@ get_wchan(struct task_struct *p)
}
return pc;
}
+
+int kernel_execve(const char *path, const char *const argv[], const char *const envp[])
+{
+ /* Avoid the HAE being gratuitously wrong, which would cause us
+ to do the whole turn off interrupts thing and restore it. */
+ struct pt_regs regs = {.hae = alpha_mv.hae_cache};
+ int err = do_execve(path, argv, envp, &regs);
+ if (!err) {
+ struct pt_regs *p = current_pt_regs();
+ /* copy regs to normal position and off to userland we go... */
+ *p = regs;
+ __asm__ __volatile__ (
+ "mov %0, $sp;"
+ "br $31, ret_from_sys_call"
+ : : "r"(p));
+ }
+ return err;
+}
+EXPORT_SYMBOL(kernel_execve);
diff --git a/arch/alpha/kernel/smc37c669.c b/arch/alpha/kernel/smc37c669.c
index 0435921d41c6..c803fc76ae4f 100644
--- a/arch/alpha/kernel/smc37c669.c
+++ b/arch/alpha/kernel/smc37c669.c
@@ -933,18 +933,6 @@ void SMC37c669_display_device_info(
*
*--
*/
-#if 0
-/* $INCLUDE_OPTIONS$ */
-#include "cp$inc:platform_io.h"
-/* $INCLUDE_OPTIONS_END$ */
-#include "cp$src:common.h"
-#include "cp$inc:prototypes.h"
-#include "cp$src:kernel_def.h"
-#include "cp$src:msg_def.h"
-#include "cp$src:smcc669_def.h"
-/* Platform-specific includes */
-#include "cp$src:platform.h"
-#endif
#ifndef TRUE
#define TRUE 1
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 87835235f114..2ac6b45c3e00 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -111,7 +111,7 @@ sys_call_table:
.quad sys_socket
.quad sys_connect
.quad sys_accept
- .quad osf_getpriority /* 100 */
+ .quad sys_osf_getpriority /* 100 */
.quad sys_send
.quad sys_recv
.quad sys_sigreturn
@@ -522,6 +522,8 @@ sys_call_table:
.quad sys_setns
.quad sys_accept4
.quad sys_sendmmsg
+ .quad sys_process_vm_readv
+ .quad sys_process_vm_writev /* 505 */
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index c0a83ab62b78..59660743237c 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -31,8 +31,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
$(ev6-y)memchr.o \
$(ev6-y)copy_user.o \
$(ev6-y)clear_user.o \
- $(ev6-y)strncpy_from_user.o \
- $(ev67-y)strlen_user.o \
$(ev6-y)csum_ipv6_magic.o \
$(ev6-y)clear_page.o \
$(ev6-y)copy_page.o \
diff --git a/arch/alpha/lib/ev6-strncpy_from_user.S b/arch/alpha/lib/ev6-strncpy_from_user.S
deleted file mode 100644
index d2e28178cacc..000000000000
--- a/arch/alpha/lib/ev6-strncpy_from_user.S
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * arch/alpha/lib/ev6-strncpy_from_user.S
- * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
- *
- * Just like strncpy except in the return value:
- *
- * -EFAULT if an exception occurs before the terminator is copied.
- * N if the buffer filled.
- *
- * Otherwise the length of the string is returned.
- *
- * Much of the information about 21264 scheduling/coding comes from:
- * Compiler Writer's Guide for the Alpha 21264
- * abbreviated as 'CWG' in other comments here
- * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
- * Scheduling notation:
- * E - either cluster
- * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
- * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
- * A bunch of instructions got moved and temp registers were changed
- * to aid in scheduling. Control flow was also re-arranged to eliminate
- * branches, and to provide longer code sequences to enable better scheduling.
- * A total rewrite (using byte load/stores for start & tail sequences)
- * is desirable, but very difficult to do without a from-scratch rewrite.
- * Save that for the future.
- */
-
-
-#include <asm/errno.h>
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
- 99: x,##y; \
- .section __ex_table,"a"; \
- .long 99b - .; \
- lda $31, $exception-99b($0); \
- .previous
-
-
- .set noat
- .set noreorder
- .text
-
- .globl __strncpy_from_user
- .ent __strncpy_from_user
- .frame $30, 0, $26
- .prologue 0
-
- .align 4
-__strncpy_from_user:
- and a0, 7, t3 # E : find dest misalignment
- beq a2, $zerolength # U :
-
- /* Are source and destination co-aligned? */
- mov a0, v0 # E : save the string start
- xor a0, a1, t4 # E :
- EX( ldq_u t1, 0(a1) ) # L : Latency=3 load first quadword
- ldq_u t0, 0(a0) # L : load first (partial) aligned dest quadword
-
- addq a2, t3, a2 # E : bias count by dest misalignment
- subq a2, 1, a3 # E :
- addq zero, 1, t10 # E :
- and t4, 7, t4 # E : misalignment between the two
-
- and a3, 7, t6 # E : number of tail bytes
- sll t10, t6, t10 # E : t10 = bitmask of last count byte
- bne t4, $unaligned # U :
- lda t2, -1 # E : build a mask against false zero
-
- /*
- * We are co-aligned; take care of a partial first word.
- * On entry to this basic block:
- * t0 == the first destination word for masking back in
- * t1 == the first source word.
- */
-
- srl a3, 3, a2 # E : a2 = loop counter = (count - 1)/8
- addq a1, 8, a1 # E :
- mskqh t2, a1, t2 # U : detection in the src word
- nop
-
- /* Create the 1st output word and detect 0's in the 1st input word. */
- mskqh t1, a1, t3 # U :
- mskql t0, a1, t0 # U : assemble the first output word
- ornot t1, t2, t2 # E :
- nop
-
- cmpbge zero, t2, t8 # E : bits set iff null found
- or t0, t3, t0 # E :
- beq a2, $a_eoc # U :
- bne t8, $a_eos # U : 2nd branch in a quad. Bad.
-
- /* On entry to this basic block:
- * t0 == a source quad not containing a null.
- * a0 - current aligned destination address
- * a1 - current aligned source address
- * a2 - count of quadwords to move.
- * NOTE: Loop improvement - unrolling this is going to be
- * a huge win, since we're going to stall otherwise.
- * Fix this later. For _really_ large copies, look
- * at using wh64 on a look-ahead basis. See the code
- * in clear_user.S and copy_user.S.
- * Presumably, since (a0) and (a1) do not overlap (by C definition)
- * Lots of nops here:
- * - Separate loads from stores
- * - Keep it to 1 branch/quadpack so the branch predictor
- * can train.
- */
-$a_loop:
- stq_u t0, 0(a0) # L :
- addq a0, 8, a0 # E :
- nop
- subq a2, 1, a2 # E :
-
- EX( ldq_u t0, 0(a1) ) # L :
- addq a1, 8, a1 # E :
- cmpbge zero, t0, t8 # E : Stall 2 cycles on t0
- beq a2, $a_eoc # U :
-
- beq t8, $a_loop # U :
- nop
- nop
- nop
-
- /* Take care of the final (partial) word store. At this point
- * the end-of-count bit is set in t8 iff it applies.
- *
- * On entry to this basic block we have:
- * t0 == the source word containing the null
- * t8 == the cmpbge mask that found it.
- */
-$a_eos:
- negq t8, t12 # E : find low bit set
- and t8, t12, t12 # E :
-
- /* We're doing a partial word store and so need to combine
- our source and original destination words. */
- ldq_u t1, 0(a0) # L :
- subq t12, 1, t6 # E :
-
- or t12, t6, t8 # E :
- zapnot t0, t8, t0 # U : clear src bytes > null
- zap t1, t8, t1 # U : clear dst bytes <= null
- or t0, t1, t0 # E :
-
- stq_u t0, 0(a0) # L :
- br $finish_up # L0 :
- nop
- nop
-
- /* Add the end-of-count bit to the eos detection bitmask. */
- .align 4
-$a_eoc:
- or t10, t8, t8
- br $a_eos
- nop
- nop
-
-
-/* The source and destination are not co-aligned. Align the destination
- and cope. We have to be very careful about not reading too much and
- causing a SEGV. */
-
- .align 4
-$u_head:
- /* We know just enough now to be able to assemble the first
- full source word. We can still find a zero at the end of it
- that prevents us from outputting the whole thing.
-
- On entry to this basic block:
- t0 == the first dest word, unmasked
- t1 == the shifted low bits of the first source word
- t6 == bytemask that is -1 in dest word bytes */
-
- EX( ldq_u t2, 8(a1) ) # L : load second src word
- addq a1, 8, a1 # E :
- mskql t0, a0, t0 # U : mask trailing garbage in dst
- extqh t2, a1, t4 # U :
-
- or t1, t4, t1 # E : first aligned src word complete
- mskqh t1, a0, t1 # U : mask leading garbage in src
- or t0, t1, t0 # E : first output word complete
- or t0, t6, t6 # E : mask original data for zero test
-
- cmpbge zero, t6, t8 # E :
- beq a2, $u_eocfin # U :
- bne t8, $u_final # U : bad news - 2nd branch in a quad
- lda t6, -1 # E : mask out the bits we have
-
- mskql t6, a1, t6 # U : already seen
- stq_u t0, 0(a0) # L : store first output word
- or t6, t2, t2 # E :
- cmpbge zero, t2, t8 # E : find nulls in second partial
-
- addq a0, 8, a0 # E :
- subq a2, 1, a2 # E :
- bne t8, $u_late_head_exit # U :
- nop
-
- /* Finally, we've got all the stupid leading edge cases taken care
- of and we can set up to enter the main loop. */
-
- extql t2, a1, t1 # U : position hi-bits of lo word
- EX( ldq_u t2, 8(a1) ) # L : read next high-order source word
- addq a1, 8, a1 # E :
- cmpbge zero, t2, t8 # E :
-
- beq a2, $u_eoc # U :
- bne t8, $u_eos # U :
- nop
- nop
-
- /* Unaligned copy main loop. In order to avoid reading too much,
- the loop is structured to detect zeros in aligned source words.
- This has, unfortunately, effectively pulled half of a loop
- iteration out into the head and half into the tail, but it does
- prevent nastiness from accumulating in the very thing we want
- to run as fast as possible.
-
- On entry to this basic block:
- t1 == the shifted high-order bits from the previous source word
- t2 == the unshifted current source word
-
- We further know that t2 does not contain a null terminator. */
-
- /*
- * Extra nops here:
- * separate load quads from store quads
- * only one branch/quad to permit predictor training
- */
-
- .align 4
-$u_loop:
- extqh t2, a1, t0 # U : extract high bits for current word
- addq a1, 8, a1 # E :
- extql t2, a1, t3 # U : extract low bits for next time
- addq a0, 8, a0 # E :
-
- or t0, t1, t0 # E : current dst word now complete
- EX( ldq_u t2, 0(a1) ) # L : load high word for next time
- subq a2, 1, a2 # E :
- nop
-
- stq_u t0, -8(a0) # L : save the current word
- mov t3, t1 # E :
- cmpbge zero, t2, t8 # E : test new word for eos
- beq a2, $u_eoc # U :
-
- beq t8, $u_loop # U :
- nop
- nop
- nop
-
- /* We've found a zero somewhere in the source word we just read.
- If it resides in the lower half, we have one (probably partial)
- word to write out, and if it resides in the upper half, we
- have one full and one partial word left to write out.
-
- On entry to this basic block:
- t1 == the shifted high-order bits from the previous source word
- t2 == the unshifted current source word. */
- .align 4
-$u_eos:
- extqh t2, a1, t0 # U :
- or t0, t1, t0 # E : first (partial) source word complete
- cmpbge zero, t0, t8 # E : is the null in this first bit?
- nop
-
- bne t8, $u_final # U :
- stq_u t0, 0(a0) # L : the null was in the high-order bits
- addq a0, 8, a0 # E :
- subq a2, 1, a2 # E :
-
- .align 4
-$u_late_head_exit:
- extql t2, a1, t0 # U :
- cmpbge zero, t0, t8 # E :
- or t8, t10, t6 # E :
- cmoveq a2, t6, t8 # E :
-
- /* Take care of a final (probably partial) result word.
- On entry to this basic block:
- t0 == assembled source word
- t8 == cmpbge mask that found the null. */
- .align 4
-$u_final:
- negq t8, t6 # E : isolate low bit set
- and t6, t8, t12 # E :
- ldq_u t1, 0(a0) # L :
- subq t12, 1, t6 # E :
-
- or t6, t12, t8 # E :
- zapnot t0, t8, t0 # U : kill source bytes > null
- zap t1, t8, t1 # U : kill dest bytes <= null
- or t0, t1, t0 # E :
-
- stq_u t0, 0(a0) # E :
- br $finish_up # U :
- nop
- nop
-
- .align 4
-$u_eoc: # end-of-count
- extqh t2, a1, t0 # U :
- or t0, t1, t0 # E :
- cmpbge zero, t0, t8 # E :
- nop
-
- .align 4
-$u_eocfin: # end-of-count, final word
- or t10, t8, t8 # E :
- br $u_final # U :
- nop
- nop
-
- /* Unaligned copy entry point. */
- .align 4
-$unaligned:
-
- srl a3, 3, a2 # U : a2 = loop counter = (count - 1)/8
- and a0, 7, t4 # E : find dest misalignment
- and a1, 7, t5 # E : find src misalignment
- mov zero, t0 # E :
-
- /* Conditionally load the first destination word and a bytemask
- with 0xff indicating that the destination byte is sacrosanct. */
-
- mov zero, t6 # E :
- beq t4, 1f # U :
- ldq_u t0, 0(a0) # L :
- lda t6, -1 # E :
-
- mskql t6, a0, t6 # E :
- nop
- nop
- nop
-
- .align 4
-1:
- subq a1, t4, a1 # E : sub dest misalignment from src addr
- /* If source misalignment is larger than dest misalignment, we need
- extra startup checks to avoid SEGV. */
- cmplt t4, t5, t12 # E :
- extql t1, a1, t1 # U : shift src into place
- lda t2, -1 # E : for creating masks later
-
- beq t12, $u_head # U :
- mskqh t2, t5, t2 # U : begin src byte validity mask
- cmpbge zero, t1, t8 # E : is there a zero?
- nop
-
- extql t2, a1, t2 # U :
- or t8, t10, t5 # E : test for end-of-count too
- cmpbge zero, t2, t3 # E :
- cmoveq a2, t5, t8 # E : Latency=2, extra map slot
-
- nop # E : goes with cmov
- andnot t8, t3, t8 # E :
- beq t8, $u_head # U :
- nop
-
- /* At this point we've found a zero in the first partial word of
- the source. We need to isolate the valid source data and mask
- it into the original destination data. (Incidentally, we know
- that we'll need at least one byte of that original dest word.) */
-
- ldq_u t0, 0(a0) # L :
- negq t8, t6 # E : build bitmask of bytes <= zero
- mskqh t1, t4, t1 # U :
- and t6, t8, t12 # E :
-
- subq t12, 1, t6 # E :
- or t6, t12, t8 # E :
- zapnot t2, t8, t2 # U : prepare source word; mirror changes
- zapnot t1, t8, t1 # U : to source validity mask
-
- andnot t0, t2, t0 # E : zero place for source to reside
- or t0, t1, t0 # E : and put it there
- stq_u t0, 0(a0) # L :
- nop
-
- .align 4
-$finish_up:
- zapnot t0, t12, t4 # U : was last byte written null?
- and t12, 0xf0, t3 # E : binary search for the address of the
- cmovne t4, 1, t4 # E : Latency=2, extra map slot
- nop # E : with cmovne
-
- and t12, 0xcc, t2 # E : last byte written
- and t12, 0xaa, t1 # E :
- cmovne t3, 4, t3 # E : Latency=2, extra map slot
- nop # E : with cmovne
-
- bic a0, 7, t0
- cmovne t2, 2, t2 # E : Latency=2, extra map slot
- nop # E : with cmovne
- nop
-
- cmovne t1, 1, t1 # E : Latency=2, extra map slot
- nop # E : with cmovne
- addq t0, t3, t0 # E :
- addq t1, t2, t1 # E :
-
- addq t0, t1, t0 # E :
- addq t0, t4, t0 # add one if we filled the buffer
- subq t0, v0, v0 # find string length
- ret # L0 :
-
- .align 4
-$zerolength:
- nop
- nop
- nop
- clr v0
-
-$exception:
- nop
- nop
- nop
- ret
-
- .end __strncpy_from_user
diff --git a/arch/alpha/lib/ev67-strlen_user.S b/arch/alpha/lib/ev67-strlen_user.S
deleted file mode 100644
index 57e0d77b81a6..000000000000
--- a/arch/alpha/lib/ev67-strlen_user.S
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * arch/alpha/lib/ev67-strlen_user.S
- * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com>
- *
- * Return the length of the string including the NULL terminator
- * (strlen+1) or zero if an error occurred.
- *
- * In places where it is critical to limit the processing time,
- * and the data is not trusted, strnlen_user() should be used.
- * It will return a value greater than its second argument if
- * that limit would be exceeded. This implementation is allowed
- * to access memory beyond the limit, but will not cross a page
- * boundary when doing so.
- *
- * Much of the information about 21264 scheduling/coding comes from:
- * Compiler Writer's Guide for the Alpha 21264
- * abbreviated as 'CWG' in other comments here
- * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
- * Scheduling notation:
- * E - either cluster
- * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
- * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
- * Try not to change the actual algorithm if possible for consistency.
- */
-
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
- 99: x,##y; \
- .section __ex_table,"a"; \
- .long 99b - .; \
- lda v0, $exception-99b(zero); \
- .previous
-
-
- .set noreorder
- .set noat
- .text
-
- .globl __strlen_user
- .ent __strlen_user
- .frame sp, 0, ra
-
- .align 4
-__strlen_user:
- ldah a1, 32767(zero) # do not use plain strlen_user() for strings
- # that might be almost 2 GB long; you should
- # be using strnlen_user() instead
- nop
- nop
- nop
-
- .globl __strnlen_user
-
- .align 4
-__strnlen_user:
- .prologue 0
- EX( ldq_u t0, 0(a0) ) # L : load first quadword (a0 may be misaligned)
- lda t1, -1(zero) # E :
-
- insqh t1, a0, t1 # U :
- andnot a0, 7, v0 # E :
- or t1, t0, t0 # E :
- subq a0, 1, a0 # E : get our +1 for the return
-
- cmpbge zero, t0, t1 # E : t1 <- bitmask: bit i == 1 <==> i-th byte == 0
- subq a1, 7, t2 # E :
- subq a0, v0, t0 # E :
- bne t1, $found # U :
-
- addq t2, t0, t2 # E :
- addq a1, 1, a1 # E :
- nop # E :
- nop # E :
-
- .align 4
-$loop: ble t2, $limit # U :
- EX( ldq t0, 8(v0) ) # L :
- nop # E :
- nop # E :
-
- cmpbge zero, t0, t1 # E :
- subq t2, 8, t2 # E :
- addq v0, 8, v0 # E : addr += 8
- beq t1, $loop # U :
-
-$found: cttz t1, t2 # U0 :
- addq v0, t2, v0 # E :
- subq v0, a0, v0 # E :
- ret # L0 :
-
-$exception:
- nop
- nop
- nop
- ret
-
- .align 4 # currently redundant
-$limit:
- nop
- nop
- subq a1, t2, v0
- ret
-
- .end __strlen_user
diff --git a/arch/alpha/lib/strlen_user.S b/arch/alpha/lib/strlen_user.S
deleted file mode 100644
index 508a18e96479..000000000000
--- a/arch/alpha/lib/strlen_user.S
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * arch/alpha/lib/strlen_user.S
- *
- * Return the length of the string including the NUL terminator
- * (strlen+1) or zero if an error occurred.
- *
- * In places where it is critical to limit the processing time,
- * and the data is not trusted, strnlen_user() should be used.
- * It will return a value greater than its second argument if
- * that limit would be exceeded. This implementation is allowed
- * to access memory beyond the limit, but will not cross a page
- * boundary when doing so.
- */
-
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
- 99: x,##y; \
- .section __ex_table,"a"; \
- .long 99b - .; \
- lda v0, $exception-99b(zero); \
- .previous
-
-
- .set noreorder
- .set noat
- .text
-
- .globl __strlen_user
- .ent __strlen_user
- .frame sp, 0, ra
-
- .align 3
-__strlen_user:
- ldah a1, 32767(zero) # do not use plain strlen_user() for strings
- # that might be almost 2 GB long; you should
- # be using strnlen_user() instead
-
- .globl __strnlen_user
-
- .align 3
-__strnlen_user:
- .prologue 0
-
- EX( ldq_u t0, 0(a0) ) # load first quadword (a0 may be misaligned)
- lda t1, -1(zero)
- insqh t1, a0, t1
- andnot a0, 7, v0
- or t1, t0, t0
- subq a0, 1, a0 # get our +1 for the return
- cmpbge zero, t0, t1 # t1 <- bitmask: bit i == 1 <==> i-th byte == 0
- subq a1, 7, t2
- subq a0, v0, t0
- bne t1, $found
-
- addq t2, t0, t2
- addq a1, 1, a1
-
- .align 3
-$loop: ble t2, $limit
- EX( ldq t0, 8(v0) )
- subq t2, 8, t2
- addq v0, 8, v0 # addr += 8
- cmpbge zero, t0, t1
- beq t1, $loop
-
-$found: negq t1, t2 # clear all but least set bit
- and t1, t2, t1
-
- and t1, 0xf0, t2 # binary search for that set bit
- and t1, 0xcc, t3
- and t1, 0xaa, t4
- cmovne t2, 4, t2
- cmovne t3, 2, t3
- cmovne t4, 1, t4
- addq t2, t3, t2
- addq v0, t4, v0
- addq v0, t2, v0
- nop # dual issue next two on ev4 and ev5
- subq v0, a0, v0
-$exception:
- ret
-
- .align 3 # currently redundant
-$limit:
- subq a1, t2, v0
- ret
-
- .end __strlen_user
diff --git a/arch/alpha/lib/strncpy_from_user.S b/arch/alpha/lib/strncpy_from_user.S
deleted file mode 100644
index 73ee21160ff7..000000000000
--- a/arch/alpha/lib/strncpy_from_user.S
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * arch/alpha/lib/strncpy_from_user.S
- * Contributed by Richard Henderson (rth@tamu.edu)
- *
- * Just like strncpy except in the return value:
- *
- * -EFAULT if an exception occurs before the terminator is copied.
- * N if the buffer filled.
- *
- * Otherwise the length of the string is returned.
- */
-
-
-#include <asm/errno.h>
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
- 99: x,##y; \
- .section __ex_table,"a"; \
- .long 99b - .; \
- lda $31, $exception-99b($0); \
- .previous
-
-
- .set noat
- .set noreorder
- .text
-
- .globl __strncpy_from_user
- .ent __strncpy_from_user
- .frame $30, 0, $26
- .prologue 0
-
- .align 3
-$aligned:
- /* On entry to this basic block:
- t0 == the first destination word for masking back in
- t1 == the first source word. */
-
- /* Create the 1st output word and detect 0's in the 1st input word. */
- lda t2, -1 # e1 : build a mask against false zero
- mskqh t2, a1, t2 # e0 : detection in the src word
- mskqh t1, a1, t3 # e0 :
- ornot t1, t2, t2 # .. e1 :
- mskql t0, a1, t0 # e0 : assemble the first output word
- cmpbge zero, t2, t8 # .. e1 : bits set iff null found
- or t0, t3, t0 # e0 :
- beq a2, $a_eoc # .. e1 :
- bne t8, $a_eos # .. e1 :
-
- /* On entry to this basic block:
- t0 == a source word not containing a null. */
-
-$a_loop:
- stq_u t0, 0(a0) # e0 :
- addq a0, 8, a0 # .. e1 :
- EX( ldq_u t0, 0(a1) ) # e0 :
- addq a1, 8, a1 # .. e1 :
- subq a2, 1, a2 # e0 :
- cmpbge zero, t0, t8 # .. e1 (stall)
- beq a2, $a_eoc # e1 :
- beq t8, $a_loop # e1 :
-
- /* Take care of the final (partial) word store. At this point
- the end-of-count bit is set in t8 iff it applies.
-
- On entry to this basic block we have:
- t0 == the source word containing the null
- t8 == the cmpbge mask that found it. */
-
-$a_eos:
- negq t8, t12 # e0 : find low bit set
- and t8, t12, t12 # e1 (stall)
-
- /* For the sake of the cache, don't read a destination word
- if we're not going to need it. */
- and t12, 0x80, t6 # e0 :
- bne t6, 1f # .. e1 (zdb)
-
- /* We're doing a partial word store and so need to combine
- our source and original destination words. */
- ldq_u t1, 0(a0) # e0 :
- subq t12, 1, t6 # .. e1 :
- or t12, t6, t8 # e0 :
- unop #
- zapnot t0, t8, t0 # e0 : clear src bytes > null
- zap t1, t8, t1 # .. e1 : clear dst bytes <= null
- or t0, t1, t0 # e1 :
-
-1: stq_u t0, 0(a0)
- br $finish_up
-
- /* Add the end-of-count bit to the eos detection bitmask. */
-$a_eoc:
- or t10, t8, t8
- br $a_eos
-
- /*** The Function Entry Point ***/
- .align 3
-__strncpy_from_user:
- mov a0, v0 # save the string start
- beq a2, $zerolength
-
- /* Are source and destination co-aligned? */
- xor a0, a1, t1 # e0 :
- and a0, 7, t0 # .. e1 : find dest misalignment
- and t1, 7, t1 # e0 :
- addq a2, t0, a2 # .. e1 : bias count by dest misalignment
- subq a2, 1, a2 # e0 :
- and a2, 7, t2 # e1 :
- srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8
- addq zero, 1, t10 # .. e1 :
- sll t10, t2, t10 # e0 : t10 = bitmask of last count byte
- bne t1, $unaligned # .. e1 :
-
- /* We are co-aligned; take care of a partial first word. */
-
- EX( ldq_u t1, 0(a1) ) # e0 : load first src word
- addq a1, 8, a1 # .. e1 :
-
- beq t0, $aligned # avoid loading dest word if not needed
- ldq_u t0, 0(a0) # e0 :
- br $aligned # .. e1 :
-
-
-/* The source and destination are not co-aligned. Align the destination
- and cope. We have to be very careful about not reading too much and
- causing a SEGV. */
-
- .align 3
-$u_head:
- /* We know just enough now to be able to assemble the first
- full source word. We can still find a zero at the end of it
- that prevents us from outputting the whole thing.
-
- On entry to this basic block:
- t0 == the first dest word, unmasked
- t1 == the shifted low bits of the first source word
- t6 == bytemask that is -1 in dest word bytes */
-
- EX( ldq_u t2, 8(a1) ) # e0 : load second src word
- addq a1, 8, a1 # .. e1 :
- mskql t0, a0, t0 # e0 : mask trailing garbage in dst
- extqh t2, a1, t4 # e0 :
- or t1, t4, t1 # e1 : first aligned src word complete
- mskqh t1, a0, t1 # e0 : mask leading garbage in src
- or t0, t1, t0 # e0 : first output word complete
- or t0, t6, t6 # e1 : mask original data for zero test
- cmpbge zero, t6, t8 # e0 :
- beq a2, $u_eocfin # .. e1 :
- bne t8, $u_final # e1 :
-
- lda t6, -1 # e1 : mask out the bits we have
- mskql t6, a1, t6 # e0 : already seen
- stq_u t0, 0(a0) # e0 : store first output word
- or t6, t2, t2 # .. e1 :
- cmpbge zero, t2, t8 # e0 : find nulls in second partial
- addq a0, 8, a0 # .. e1 :
- subq a2, 1, a2 # e0 :
- bne t8, $u_late_head_exit # .. e1 :
-
- /* Finally, we've got all the stupid leading edge cases taken care
- of and we can set up to enter the main loop. */
-
- extql t2, a1, t1 # e0 : position hi-bits of lo word
- EX( ldq_u t2, 8(a1) ) # .. e1 : read next high-order source word
- addq a1, 8, a1 # e0 :
- cmpbge zero, t2, t8 # e1 (stall)
- beq a2, $u_eoc # e1 :
- bne t8, $u_eos # e1 :
-
- /* Unaligned copy main loop. In order to avoid reading too much,
- the loop is structured to detect zeros in aligned source words.
- This has, unfortunately, effectively pulled half of a loop
- iteration out into the head and half into the tail, but it does
- prevent nastiness from accumulating in the very thing we want
- to run as fast as possible.
-
- On entry to this basic block:
- t1 == the shifted high-order bits from the previous source word
- t2 == the unshifted current source word
-
- We further know that t2 does not contain a null terminator. */
-
- .align 3
-$u_loop:
- extqh t2, a1, t0 # e0 : extract high bits for current word
- addq a1, 8, a1 # .. e1 :
- extql t2, a1, t3 # e0 : extract low bits for next time
- addq a0, 8, a0 # .. e1 :
- or t0, t1, t0 # e0 : current dst word now complete
- EX( ldq_u t2, 0(a1) ) # .. e1 : load high word for next time
- stq_u t0, -8(a0) # e0 : save the current word
- mov t3, t1 # .. e1 :
- subq a2, 1, a2 # e0 :
- cmpbge zero, t2, t8 # .. e1 : test new word for eos
- beq a2, $u_eoc # e1 :
- beq t8, $u_loop # e1 :
-
- /* We've found a zero somewhere in the source word we just read.
- If it resides in the lower half, we have one (probably partial)
- word to write out, and if it resides in the upper half, we
- have one full and one partial word left to write out.
-
- On entry to this basic block:
- t1 == the shifted high-order bits from the previous source word
- t2 == the unshifted current source word. */
-$u_eos:
- extqh t2, a1, t0 # e0 :
- or t0, t1, t0 # e1 : first (partial) source word complete
-
- cmpbge zero, t0, t8 # e0 : is the null in this first bit?
- bne t8, $u_final # .. e1 (zdb)
-
- stq_u t0, 0(a0) # e0 : the null was in the high-order bits
- addq a0, 8, a0 # .. e1 :
- subq a2, 1, a2 # e1 :
-
-$u_late_head_exit:
- extql t2, a1, t0 # .. e0 :
- cmpbge zero, t0, t8 # e0 :
- or t8, t10, t6 # e1 :
- cmoveq a2, t6, t8 # e0 :
- nop # .. e1 :
-
- /* Take care of a final (probably partial) result word.
- On entry to this basic block:
- t0 == assembled source word
- t8 == cmpbge mask that found the null. */
-$u_final:
- negq t8, t6 # e0 : isolate low bit set
- and t6, t8, t12 # e1 :
-
- and t12, 0x80, t6 # e0 : avoid dest word load if we can
- bne t6, 1f # .. e1 (zdb)
-
- ldq_u t1, 0(a0) # e0 :
- subq t12, 1, t6 # .. e1 :
- or t6, t12, t8 # e0 :
- zapnot t0, t8, t0 # .. e1 : kill source bytes > null
- zap t1, t8, t1 # e0 : kill dest bytes <= null
- or t0, t1, t0 # e1 :
-
-1: stq_u t0, 0(a0) # e0 :
- br $finish_up
-
-$u_eoc: # end-of-count
- extqh t2, a1, t0
- or t0, t1, t0
- cmpbge zero, t0, t8
-
-$u_eocfin: # end-of-count, final word
- or t10, t8, t8
- br $u_final
-
- /* Unaligned copy entry point. */
- .align 3
-$unaligned:
-
- EX( ldq_u t1, 0(a1) ) # e0 : load first source word
-
- and a0, 7, t4 # .. e1 : find dest misalignment
- and a1, 7, t5 # e0 : find src misalignment
-
- /* Conditionally load the first destination word and a bytemask
- with 0xff indicating that the destination byte is sacrosanct. */
-
- mov zero, t0 # .. e1 :
- mov zero, t6 # e0 :
- beq t4, 1f # .. e1 :
- ldq_u t0, 0(a0) # e0 :
- lda t6, -1 # .. e1 :
- mskql t6, a0, t6 # e0 :
-1:
- subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
-
- /* If source misalignment is larger than dest misalignment, we need
- extra startup checks to avoid SEGV. */
-
- cmplt t4, t5, t12 # e1 :
- extql t1, a1, t1 # .. e0 : shift src into place
- lda t2, -1 # e0 : for creating masks later
- beq t12, $u_head # e1 :
-
- mskqh t2, t5, t2 # e0 : begin src byte validity mask
- cmpbge zero, t1, t8 # .. e1 : is there a zero?
- extql t2, a1, t2 # e0 :
- or t8, t10, t5 # .. e1 : test for end-of-count too
- cmpbge zero, t2, t3 # e0 :
- cmoveq a2, t5, t8 # .. e1 :
- andnot t8, t3, t8 # e0 :
- beq t8, $u_head # .. e1 (zdb)
-
- /* At this point we've found a zero in the first partial word of
- the source. We need to isolate the valid source data and mask
- it into the original destination data. (Incidentally, we know
- that we'll need at least one byte of that original dest word.) */
-
- ldq_u t0, 0(a0) # e0 :
- negq t8, t6 # .. e1 : build bitmask of bytes <= zero
- mskqh t1, t4, t1 # e0 :
- and t6, t8, t12 # .. e1 :
- subq t12, 1, t6 # e0 :
- or t6, t12, t8 # e1 :
-
- zapnot t2, t8, t2 # e0 : prepare source word; mirror changes
- zapnot t1, t8, t1 # .. e1 : to source validity mask
-
- andnot t0, t2, t0 # e0 : zero place for source to reside
- or t0, t1, t0 # e1 : and put it there
- stq_u t0, 0(a0) # e0 :
-
-$finish_up:
- zapnot t0, t12, t4 # was last byte written null?
- cmovne t4, 1, t4
-
- and t12, 0xf0, t3 # binary search for the address of the
- and t12, 0xcc, t2 # last byte written
- and t12, 0xaa, t1
- bic a0, 7, t0
- cmovne t3, 4, t3
- cmovne t2, 2, t2
- cmovne t1, 1, t1
- addq t0, t3, t0
- addq t1, t2, t1
- addq t0, t1, t0
- addq t0, t4, t0 # add one if we filled the buffer
-
- subq t0, v0, v0 # find string length
- ret
-
-$zerolength:
- clr v0
-$exception:
- ret
-
- .end __strncpy_from_user
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 5eecab1a84ef..0c4132dd3507 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -89,6 +89,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
const struct exception_table_entry *fixup;
int fault, si_code = SEGV_MAPERR;
siginfo_t info;
+ unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (cause > 0 ? FAULT_FLAG_WRITE : 0));
/* As of EV6, a load into $31/$f31 is a prefetch, and never faults
(or is suppressed by the PALcode). Support that for older CPUs
@@ -114,6 +116,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
goto vmalloc_fault;
#endif
+retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
@@ -144,8 +147,11 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* If for any reason at all we couldn't handle the fault,
make sure we exit gracefully rather than endlessly redo
the fault. */
- fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0);
- up_read(&mm->mmap_sem);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -153,10 +159,26 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
+
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /* No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+
return;
/* Something tried to access memory that isn't in our memory map.
@@ -186,12 +208,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */
out_of_memory:
+ up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
+ up_read(&mm->mmap_sem);
/* Send a sigbus, regardless of whether we were in kernel
or user mode. */
info.si_signo = SIGBUS;
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index a0a5d27aa215..b8ce18f485d3 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -12,6 +12,7 @@
#include <linux/smp.h>
#include <linux/errno.h>
#include <asm/ptrace.h>
+#include <asm/special_insns.h>
#include "op_impl.h"
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b25c9d3c379a..c5f9ae5dbd1a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -11,6 +11,7 @@ config ARM
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
+ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
@@ -37,7 +38,7 @@ config ARM
select HARDIRQS_SW_RESEND
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
- select GENERIC_IRQ_PROBE
+ select ARCH_WANT_IPC_PARSE_VERSION
select HARDIRQS_SW_RESEND
select CPU_PM if (SUSPEND || CPU_IDLE)
select GENERIC_PCI_IOMAP
@@ -45,6 +46,9 @@ config ARM
select GENERIC_SMP_IDLE_THREAD
select KTIME_SCALAR
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
+ select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
@@ -121,11 +125,6 @@ config TRACE_IRQFLAGS_SUPPORT
bool
default y
-config GENERIC_LOCKBREAK
- bool
- default y
- depends on SMP && PREEMPT
-
config RWSEM_GENERIC_SPINLOCK
bool
default y
@@ -279,7 +278,6 @@ config ARCH_INTEGRATOR
select ICST
select GENERIC_CLOCKEVENTS
select PLAT_VERSATILE
- select PLAT_VERSATILE_CLOCK
select PLAT_VERSATILE_FPGA_IRQ
select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
@@ -336,7 +334,6 @@ config ARCH_VEXPRESS
select ICST
select NO_IOPORT
select PLAT_VERSATILE
- select PLAT_VERSATILE_CLOCK
select PLAT_VERSATILE_CLCD
select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
@@ -1008,7 +1005,6 @@ config ARCH_VT8500
select ARCH_HAS_CPUFREQ
select GENERIC_CLOCKEVENTS
select ARCH_REQUIRE_GPIOLIB
- select HAVE_PWM
help
Support for VIA/WonderMedia VT8500/WM85xx System-on-Chip.
@@ -1149,6 +1145,7 @@ config PLAT_ORION
bool
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
select COMMON_CLK
config PLAT_PXA
@@ -2006,6 +2003,25 @@ config ARM_ATAG_DTB_COMPAT
bootloaders, this option allows zImage to extract the information
from the ATAG list and store it at run time into the appended DTB.
+choice
+ prompt "Kernel command line type" if ARM_ATAG_DTB_COMPAT
+ default ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER
+
+config ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER
+ bool "Use bootloader kernel arguments if available"
+ help
+ Uses the command-line options passed by the boot loader instead of
+ the device tree bootargs property. If the boot loader doesn't provide
+ any, the device tree bootargs property will be used.
+
+config ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND
+ bool "Extend with bootloader kernel arguments"
+ help
+ The command-line arguments provided by the boot loader will be
+ appended to the the device tree bootargs property.
+
+endchoice
+
config CMDLINE
string "Default kernel command string"
default ""
@@ -2128,6 +2144,7 @@ source "drivers/cpufreq/Kconfig"
config CPU_FREQ_IMX
tristate "CPUfreq driver for i.MX CPUs"
depends on ARCH_MXC && CPU_FREQ
+ select CPU_FREQ_TABLE
help
This enables the CPUfreq driver for i.MX CPUs.
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index a03b5a7059e2..f15f82bf3a50 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -395,4 +395,13 @@ config ARM_KPROBES_TEST
help
Perform tests of kprobes API and instruction set simulation.
+config PID_IN_CONTEXTIDR
+ bool "Write the current PID to the CONTEXTIDR register"
+ depends on CPU_COPY_V6
+ help
+ Enabling this option causes the kernel to write the current PID to
+ the PROCID field of the CONTEXTIDR register, at the expense of some
+ additional instructions during context switch. Say Y here only if you
+ are planning to use hardware trace tools with this kernel.
+
endmenu
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 4d6d31115cf2..30eae87ead6d 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -10,6 +10,9 @@
#
# Copyright (C) 1995-2001 by Russell King
+# Ensure linker flags are correct
+LDFLAGS :=
+
LDFLAGS_vmlinux :=-p --no-undefined -X
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index 797f04bedb47..aabc02a68482 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -1,6 +1,12 @@
#include <asm/setup.h>
#include <libfdt.h>
+#if defined(CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND)
+#define do_extend_cmdline 1
+#else
+#define do_extend_cmdline 0
+#endif
+
static int node_offset(void *fdt, const char *node_path)
{
int offset = fdt_path_offset(fdt, node_path);
@@ -36,6 +42,48 @@ static int setprop_cell(void *fdt, const char *node_path,
return fdt_setprop_cell(fdt, offset, property, val);
}
+static const void *getprop(const void *fdt, const char *node_path,
+ const char *property, int *len)
+{
+ int offset = fdt_path_offset(fdt, node_path);
+
+ if (offset == -FDT_ERR_NOTFOUND)
+ return NULL;
+
+ return fdt_getprop(fdt, offset, property, len);
+}
+
+static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline)
+{
+ char cmdline[COMMAND_LINE_SIZE];
+ const char *fdt_bootargs;
+ char *ptr = cmdline;
+ int len = 0;
+
+ /* copy the fdt command line into the buffer */
+ fdt_bootargs = getprop(fdt, "/chosen", "bootargs", &len);
+ if (fdt_bootargs)
+ if (len < COMMAND_LINE_SIZE) {
+ memcpy(ptr, fdt_bootargs, len);
+ /* len is the length of the string
+ * including the NULL terminator */
+ ptr += len - 1;
+ }
+
+ /* and append the ATAG_CMDLINE */
+ if (fdt_cmdline) {
+ len = strlen(fdt_cmdline);
+ if (ptr - cmdline + len + 2 < COMMAND_LINE_SIZE) {
+ *ptr++ = ' ';
+ memcpy(ptr, fdt_cmdline, len);
+ ptr += len;
+ }
+ }
+ *ptr = '\0';
+
+ setprop_string(fdt, "/chosen", "bootargs", cmdline);
+}
+
/*
* Convert and fold provided ATAGs into the provided FDT.
*
@@ -72,8 +120,18 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
for_each_tag(atag, atag_list) {
if (atag->hdr.tag == ATAG_CMDLINE) {
- setprop_string(fdt, "/chosen", "bootargs",
- atag->u.cmdline.cmdline);
+ /* Append the ATAGS command line to the device tree
+ * command line.
+ * NB: This means that if the same parameter is set in
+ * the device tree and in the tags, the one from the
+ * tags will be chosen.
+ */
+ if (do_extend_cmdline)
+ merge_fdt_bootargs(fdt,
+ atag->u.cmdline.cmdline);
+ else
+ setprop_string(fdt, "/chosen", "bootargs",
+ atag->u.cmdline.cmdline);
} else if (atag->hdr.tag == ATAG_MEM) {
if (memcount >= sizeof(mem_reg_property)/4)
continue;
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 59509c48d7e5..bd0cff3f808c 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -154,5 +154,10 @@
#size-cells = <0>;
ti,hwmods = "i2c3";
};
+
+ wdt2: wdt@44e35000 {
+ compatible = "ti,omap3-wdt";
+ ti,hwmods = "wd_timer2";
+ };
};
};
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index e1fa7e6edfe8..71d6b5d0daf1 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -12,7 +12,7 @@
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*
- * Contains definitions specific to the Armada 370 SoC that are not
+ * Contains definitions specific to the Armada XP SoC that are not
* common to all Armada SoCs.
*/
diff --git a/arch/arm/boot/dts/at91sam9g25ek.dts b/arch/arm/boot/dts/at91sam9g25ek.dts
index 7829a4d0cb22..96514c134e54 100644
--- a/arch/arm/boot/dts/at91sam9g25ek.dts
+++ b/arch/arm/boot/dts/at91sam9g25ek.dts
@@ -15,7 +15,7 @@
compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
chosen {
- bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
+ bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
};
ahb {
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts
index 2e1cfa00c25b..9fecf1ae777b 100644
--- a/arch/arm/boot/dts/highbank.dts
+++ b/arch/arm/boot/dts/highbank.dts
@@ -130,6 +130,12 @@
clocks = <&eclk>;
};
+ memory-controller@fff00000 {
+ compatible = "calxeda,hb-ddr-ctrl";
+ reg = <0xfff00000 0x1000>;
+ interrupts = <0 91 4>;
+ };
+
ipc@fff20000 {
compatible = "arm,pl320", "arm,primecell";
reg = <0xfff20000 0x1000>;
@@ -275,6 +281,12 @@
};
};
+ sregs@fff3c200 {
+ compatible = "calxeda,hb-sregs-l2-ecc";
+ reg = <0xfff3c200 0x100>;
+ interrupts = <0 71 4 0 72 4>;
+ };
+
dma@fff3d000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0xfff3d000 0x1000>;
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index a874dbfb5ae6..e6138310e5ce 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -51,11 +51,11 @@
dma-apbh@80004000 {
compatible = "fsl,imx23-dma-apbh";
- reg = <0x80004000 2000>;
+ reg = <0x80004000 0x2000>;
};
ecc@80008000 {
- reg = <0x80008000 2000>;
+ reg = <0x80008000 0x2000>;
status = "disabled";
};
@@ -63,7 +63,7 @@
compatible = "fsl,imx23-gpmi-nand";
#address-cells = <1>;
#size-cells = <1>;
- reg = <0x8000c000 2000>, <0x8000a000 2000>;
+ reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>;
reg-names = "gpmi-nand", "bch";
interrupts = <13>, <56>;
interrupt-names = "gpmi-dma", "bch";
@@ -72,14 +72,14 @@
};
ssp0: ssp@80010000 {
- reg = <0x80010000 2000>;
+ reg = <0x80010000 0x2000>;
interrupts = <15 14>;
fsl,ssp-dma-channel = <1>;
status = "disabled";
};
etm@80014000 {
- reg = <0x80014000 2000>;
+ reg = <0x80014000 0x2000>;
status = "disabled";
};
@@ -87,7 +87,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx23-pinctrl", "simple-bus";
- reg = <0x80018000 2000>;
+ reg = <0x80018000 0x2000>;
gpio0: gpio@0 {
compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
@@ -273,32 +273,32 @@
};
emi@80020000 {
- reg = <0x80020000 2000>;
+ reg = <0x80020000 0x2000>;
status = "disabled";
};
dma-apbx@80024000 {
compatible = "fsl,imx23-dma-apbx";
- reg = <0x80024000 2000>;
+ reg = <0x80024000 0x2000>;
};
dcp@80028000 {
- reg = <0x80028000 2000>;
+ reg = <0x80028000 0x2000>;
status = "disabled";
};
pxp@8002a000 {
- reg = <0x8002a000 2000>;
+ reg = <0x8002a000 0x2000>;
status = "disabled";
};
ocotp@8002c000 {
- reg = <0x8002c000 2000>;
+ reg = <0x8002c000 0x2000>;
status = "disabled";
};
axi-ahb@8002e000 {
- reg = <0x8002e000 2000>;
+ reg = <0x8002e000 0x2000>;
status = "disabled";
};
@@ -310,14 +310,14 @@
};
ssp1: ssp@80034000 {
- reg = <0x80034000 2000>;
+ reg = <0x80034000 0x2000>;
interrupts = <2 20>;
fsl,ssp-dma-channel = <2>;
status = "disabled";
};
tvenc@80038000 {
- reg = <0x80038000 2000>;
+ reg = <0x80038000 0x2000>;
status = "disabled";
};
};
@@ -330,37 +330,37 @@
ranges;
clkctl@80040000 {
- reg = <0x80040000 2000>;
+ reg = <0x80040000 0x2000>;
status = "disabled";
};
saif0: saif@80042000 {
- reg = <0x80042000 2000>;
+ reg = <0x80042000 0x2000>;
status = "disabled";
};
power@80044000 {
- reg = <0x80044000 2000>;
+ reg = <0x80044000 0x2000>;
status = "disabled";
};
saif1: saif@80046000 {
- reg = <0x80046000 2000>;
+ reg = <0x80046000 0x2000>;
status = "disabled";
};
audio-out@80048000 {
- reg = <0x80048000 2000>;
+ reg = <0x80048000 0x2000>;
status = "disabled";
};
audio-in@8004c000 {
- reg = <0x8004c000 2000>;
+ reg = <0x8004c000 0x2000>;
status = "disabled";
};
lradc@80050000 {
- reg = <0x80050000 2000>;
+ reg = <0x80050000 0x2000>;
status = "disabled";
};
@@ -370,26 +370,26 @@
};
i2c@80058000 {
- reg = <0x80058000 2000>;
+ reg = <0x80058000 0x2000>;
status = "disabled";
};
rtc@8005c000 {
compatible = "fsl,imx23-rtc", "fsl,stmp3xxx-rtc";
- reg = <0x8005c000 2000>;
+ reg = <0x8005c000 0x2000>;
interrupts = <22>;
};
pwm: pwm@80064000 {
compatible = "fsl,imx23-pwm";
- reg = <0x80064000 2000>;
+ reg = <0x80064000 0x2000>;
#pwm-cells = <2>;
fsl,pwm-number = <5>;
status = "disabled";
};
timrot@80068000 {
- reg = <0x80068000 2000>;
+ reg = <0x80068000 0x2000>;
status = "disabled";
};
@@ -429,7 +429,7 @@
ranges;
usbctrl@80080000 {
- reg = <0x80080000 0x10000>;
+ reg = <0x80080000 0x40000>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/imx27-3ds.dts b/arch/arm/boot/dts/imx27-3ds.dts
index d3f8296e19e0..0a8978a40ece 100644
--- a/arch/arm/boot/dts/imx27-3ds.dts
+++ b/arch/arm/boot/dts/imx27-3ds.dts
@@ -27,7 +27,7 @@
status = "okay";
};
- uart@1000a000 {
+ uart1: serial@1000a000 {
fsl,uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index 00bae3aad5ab..5303ab680a34 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -19,6 +19,12 @@
serial3 = &uart4;
serial4 = &uart5;
serial5 = &uart6;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ gpio5 = &gpio6;
};
avic: avic-interrupt-controller@e0000000 {
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 915db89e3644..3fa6d190fab4 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -57,18 +57,18 @@
};
hsadc@80002000 {
- reg = <0x80002000 2000>;
+ reg = <0x80002000 0x2000>;
interrupts = <13 87>;
status = "disabled";
};
dma-apbh@80004000 {
compatible = "fsl,imx28-dma-apbh";
- reg = <0x80004000 2000>;
+ reg = <0x80004000 0x2000>;
};
perfmon@80006000 {
- reg = <0x80006000 800>;
+ reg = <0x80006000 0x800>;
interrupts = <27>;
status = "disabled";
};
@@ -77,7 +77,7 @@
compatible = "fsl,imx28-gpmi-nand";
#address-cells = <1>;
#size-cells = <1>;
- reg = <0x8000c000 2000>, <0x8000a000 2000>;
+ reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>;
reg-names = "gpmi-nand", "bch";
interrupts = <88>, <41>;
interrupt-names = "gpmi-dma", "bch";
@@ -86,28 +86,28 @@
};
ssp0: ssp@80010000 {
- reg = <0x80010000 2000>;
+ reg = <0x80010000 0x2000>;
interrupts = <96 82>;
fsl,ssp-dma-channel = <0>;
status = "disabled";
};
ssp1: ssp@80012000 {
- reg = <0x80012000 2000>;
+ reg = <0x80012000 0x2000>;
interrupts = <97 83>;
fsl,ssp-dma-channel = <1>;
status = "disabled";
};
ssp2: ssp@80014000 {
- reg = <0x80014000 2000>;
+ reg = <0x80014000 0x2000>;
interrupts = <98 84>;
fsl,ssp-dma-channel = <2>;
status = "disabled";
};
ssp3: ssp@80016000 {
- reg = <0x80016000 2000>;
+ reg = <0x80016000 0x2000>;
interrupts = <99 85>;
fsl,ssp-dma-channel = <3>;
status = "disabled";
@@ -117,7 +117,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx28-pinctrl", "simple-bus";
- reg = <0x80018000 2000>;
+ reg = <0x80018000 0x2000>;
gpio0: gpio@0 {
compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
@@ -510,96 +510,96 @@
};
digctl@8001c000 {
- reg = <0x8001c000 2000>;
+ reg = <0x8001c000 0x2000>;
interrupts = <89>;
status = "disabled";
};
etm@80022000 {
- reg = <0x80022000 2000>;
+ reg = <0x80022000 0x2000>;
status = "disabled";
};
dma-apbx@80024000 {
compatible = "fsl,imx28-dma-apbx";
- reg = <0x80024000 2000>;
+ reg = <0x80024000 0x2000>;
};
dcp@80028000 {
- reg = <0x80028000 2000>;
+ reg = <0x80028000 0x2000>;
interrupts = <52 53 54>;
status = "disabled";
};
pxp@8002a000 {
- reg = <0x8002a000 2000>;
+ reg = <0x8002a000 0x2000>;
interrupts = <39>;
status = "disabled";
};
ocotp@8002c000 {
- reg = <0x8002c000 2000>;
+ reg = <0x8002c000 0x2000>;
status = "disabled";
};
axi-ahb@8002e000 {
- reg = <0x8002e000 2000>;
+ reg = <0x8002e000 0x2000>;
status = "disabled";
};
lcdif@80030000 {
compatible = "fsl,imx28-lcdif";
- reg = <0x80030000 2000>;
+ reg = <0x80030000 0x2000>;
interrupts = <38 86>;
status = "disabled";
};
can0: can@80032000 {
compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
- reg = <0x80032000 2000>;
+ reg = <0x80032000 0x2000>;
interrupts = <8>;
status = "disabled";
};
can1: can@80034000 {
compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
- reg = <0x80034000 2000>;
+ reg = <0x80034000 0x2000>;
interrupts = <9>;
status = "disabled";
};
simdbg@8003c000 {
- reg = <0x8003c000 200>;
+ reg = <0x8003c000 0x200>;
status = "disabled";
};
simgpmisel@8003c200 {
- reg = <0x8003c200 100>;
+ reg = <0x8003c200 0x100>;
status = "disabled";
};
simsspsel@8003c300 {
- reg = <0x8003c300 100>;
+ reg = <0x8003c300 0x100>;
status = "disabled";
};
simmemsel@8003c400 {
- reg = <0x8003c400 100>;
+ reg = <0x8003c400 0x100>;
status = "disabled";
};
gpiomon@8003c500 {
- reg = <0x8003c500 100>;
+ reg = <0x8003c500 0x100>;
status = "disabled";
};
simenet@8003c700 {
- reg = <0x8003c700 100>;
+ reg = <0x8003c700 0x100>;
status = "disabled";
};
armjtag@8003c800 {
- reg = <0x8003c800 100>;
+ reg = <0x8003c800 0x100>;
status = "disabled";
};
};
@@ -612,45 +612,45 @@
ranges;
clkctl@80040000 {
- reg = <0x80040000 2000>;
+ reg = <0x80040000 0x2000>;
status = "disabled";
};
saif0: saif@80042000 {
compatible = "fsl,imx28-saif";
- reg = <0x80042000 2000>;
+ reg = <0x80042000 0x2000>;
interrupts = <59 80>;
fsl,saif-dma-channel = <4>;
status = "disabled";
};
power@80044000 {
- reg = <0x80044000 2000>;
+ reg = <0x80044000 0x2000>;
status = "disabled";
};
saif1: saif@80046000 {
compatible = "fsl,imx28-saif";
- reg = <0x80046000 2000>;
+ reg = <0x80046000 0x2000>;
interrupts = <58 81>;
fsl,saif-dma-channel = <5>;
status = "disabled";
};
lradc@80050000 {
- reg = <0x80050000 2000>;
+ reg = <0x80050000 0x2000>;
status = "disabled";
};
spdif@80054000 {
- reg = <0x80054000 2000>;
+ reg = <0x80054000 0x2000>;
interrupts = <45 66>;
status = "disabled";
};
rtc@80056000 {
compatible = "fsl,imx28-rtc", "fsl,stmp3xxx-rtc";
- reg = <0x80056000 2000>;
+ reg = <0x80056000 0x2000>;
interrupts = <29>;
};
@@ -658,8 +658,9 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx28-i2c";
- reg = <0x80058000 2000>;
+ reg = <0x80058000 0x2000>;
interrupts = <111 68>;
+ clock-frequency = <100000>;
status = "disabled";
};
@@ -667,21 +668,22 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx28-i2c";
- reg = <0x8005a000 2000>;
+ reg = <0x8005a000 0x2000>;
interrupts = <110 69>;
+ clock-frequency = <100000>;
status = "disabled";
};
pwm: pwm@80064000 {
compatible = "fsl,imx28-pwm", "fsl,imx23-pwm";
- reg = <0x80064000 2000>;
+ reg = <0x80064000 0x2000>;
#pwm-cells = <2>;
fsl,pwm-number = <8>;
status = "disabled";
};
timrot@80068000 {
- reg = <0x80068000 2000>;
+ reg = <0x80068000 0x2000>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index de065b5976e6..59d9789e5508 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -25,8 +25,8 @@
aips@70000000 { /* aips-1 */
spba@70000000 {
esdhc@70004000 { /* ESDHC1 */
- fsl,cd-internal;
- fsl,wp-internal;
+ fsl,cd-controller;
+ fsl,wp-controller;
status = "okay";
};
@@ -53,7 +53,7 @@
spi-max-frequency = <6000000>;
reg = <0>;
interrupt-parent = <&gpio1>;
- interrupts = <8>;
+ interrupts = <8 0x4>;
regulators {
sw1_reg: sw1 {
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 922adefdd291..aba28dc87fc8 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -17,6 +17,10 @@
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
};
tzic: tz-interrupt-controller@e0000000 {
@@ -127,7 +131,7 @@
};
gpio1: gpio@73f84000 {
- compatible = "fsl,imx51-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx51-gpio", "fsl,imx35-gpio";
reg = <0x73f84000 0x4000>;
interrupts = <50 51>;
gpio-controller;
@@ -137,7 +141,7 @@
};
gpio2: gpio@73f88000 {
- compatible = "fsl,imx51-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx51-gpio", "fsl,imx35-gpio";
reg = <0x73f88000 0x4000>;
interrupts = <52 53>;
gpio-controller;
@@ -147,7 +151,7 @@
};
gpio3: gpio@73f8c000 {
- compatible = "fsl,imx51-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx51-gpio", "fsl,imx35-gpio";
reg = <0x73f8c000 0x4000>;
interrupts = <54 55>;
gpio-controller;
@@ -157,7 +161,7 @@
};
gpio4: gpio@73f90000 {
- compatible = "fsl,imx51-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx51-gpio", "fsl,imx35-gpio";
reg = <0x73f90000 0x4000>;
interrupts = <56 57>;
gpio-controller;
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index 5b8eafcdbeec..da895e93a999 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -64,12 +64,32 @@
reg = <0xf4000000 0x2000000>;
phy-mode = "mii";
interrupt-parent = <&gpio2>;
- interrupts = <31>;
+ interrupts = <31 0x8>;
reg-io-width = <4>;
+ /*
+ * VDD33A and VDDVARIO of LAN9220 are supplied by
+ * SW4_3V3 of LTC3589. Before the regulator driver
+ * for this PMIC is available, we use a fixed dummy
+ * 3V3 regulator to get LAN9220 driver probing work.
+ */
+ vdd33a-supply = <&reg_3p3v>;
+ vddvario-supply = <&reg_3p3v>;
smsc,irq-push-pull;
};
};
+ regulators {
+ compatible = "simple-bus";
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+
gpio-keys {
compatible = "gpio-keys";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 4e735edc78ed..cd37165edce5 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -19,6 +19,13 @@
serial2 = &uart3;
serial3 = &uart4;
serial4 = &uart5;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ gpio5 = &gpio6;
+ gpio6 = &gpio7;
};
tzic: tz-interrupt-controller@0fffc000 {
@@ -129,7 +136,7 @@
};
gpio1: gpio@53f84000 {
- compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx53-gpio", "fsl,imx35-gpio";
reg = <0x53f84000 0x4000>;
interrupts = <50 51>;
gpio-controller;
@@ -139,7 +146,7 @@
};
gpio2: gpio@53f88000 {
- compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx53-gpio", "fsl,imx35-gpio";
reg = <0x53f88000 0x4000>;
interrupts = <52 53>;
gpio-controller;
@@ -149,7 +156,7 @@
};
gpio3: gpio@53f8c000 {
- compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx53-gpio", "fsl,imx35-gpio";
reg = <0x53f8c000 0x4000>;
interrupts = <54 55>;
gpio-controller;
@@ -159,7 +166,7 @@
};
gpio4: gpio@53f90000 {
- compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx53-gpio", "fsl,imx35-gpio";
reg = <0x53f90000 0x4000>;
interrupts = <56 57>;
gpio-controller;
@@ -197,7 +204,7 @@
};
gpio5: gpio@53fdc000 {
- compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx53-gpio", "fsl,imx35-gpio";
reg = <0x53fdc000 0x4000>;
interrupts = <103 104>;
gpio-controller;
@@ -207,7 +214,7 @@
};
gpio6: gpio@53fe0000 {
- compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx53-gpio", "fsl,imx35-gpio";
reg = <0x53fe0000 0x4000>;
interrupts = <105 106>;
gpio-controller;
@@ -217,7 +224,7 @@
};
gpio7: gpio@53fe4000 {
- compatible = "fsl,imx53-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx53-gpio", "fsl,imx35-gpio";
reg = <0x53fe4000 0x4000>;
interrupts = <107 108>;
gpio-controller;
diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts
index d42e851ceb97..72f30f3e6171 100644
--- a/arch/arm/boot/dts/imx6q-sabrelite.dts
+++ b/arch/arm/boot/dts/imx6q-sabrelite.dts
@@ -53,6 +53,7 @@
fsl,pins = <
144 0x80000000 /* MX6Q_PAD_EIM_D22__GPIO_3_22 */
121 0x80000000 /* MX6Q_PAD_EIM_D19__GPIO_3_19 */
+ 953 0x80000000 /* MX6Q_PAD_GPIO_0__CCM_CLKO */
>;
};
};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index c25d49584814..fd57079f71a9 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -19,6 +19,13 @@
serial2 = &uart3;
serial3 = &uart4;
serial4 = &uart5;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ gpio5 = &gpio6;
+ gpio6 = &gpio7;
};
cpus {
@@ -277,7 +284,7 @@
};
gpio1: gpio@0209c000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
reg = <0x0209c000 0x4000>;
interrupts = <0 66 0x04 0 67 0x04>;
gpio-controller;
@@ -287,7 +294,7 @@
};
gpio2: gpio@020a0000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
reg = <0x020a0000 0x4000>;
interrupts = <0 68 0x04 0 69 0x04>;
gpio-controller;
@@ -297,7 +304,7 @@
};
gpio3: gpio@020a4000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
reg = <0x020a4000 0x4000>;
interrupts = <0 70 0x04 0 71 0x04>;
gpio-controller;
@@ -307,7 +314,7 @@
};
gpio4: gpio@020a8000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
reg = <0x020a8000 0x4000>;
interrupts = <0 72 0x04 0 73 0x04>;
gpio-controller;
@@ -317,7 +324,7 @@
};
gpio5: gpio@020ac000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
reg = <0x020ac000 0x4000>;
interrupts = <0 74 0x04 0 75 0x04>;
gpio-controller;
@@ -327,7 +334,7 @@
};
gpio6: gpio@020b0000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
reg = <0x020b0000 0x4000>;
interrupts = <0 76 0x04 0 77 0x04>;
gpio-controller;
@@ -337,7 +344,7 @@
};
gpio7: gpio@020b4000 {
- compatible = "fsl,imx6q-gpio", "fsl,imx31-gpio";
+ compatible = "fsl,imx6q-gpio", "fsl,imx35-gpio";
reg = <0x020b4000 0x4000>;
interrupts = <0 78 0x04 0 79 0x04>;
gpio-controller;
diff --git a/arch/arm/boot/dts/kirkwood-dns320.dts b/arch/arm/boot/dts/kirkwood-dns320.dts
index dc09a735b04a..5bb0bf39d3b8 100644
--- a/arch/arm/boot/dts/kirkwood-dns320.dts
+++ b/arch/arm/boot/dts/kirkwood-dns320.dts
@@ -1,10 +1,10 @@
/dts-v1/;
-/include/ "kirkwood.dtsi"
+/include/ "kirkwood-dnskw.dtsi"
/ {
model = "D-Link DNS-320 NAS (Rev A1)";
- compatible = "dlink,dns-320-a1", "dlink,dns-320", "dlink,dns-kirkwood", "mrvl,kirkwood-88f6281", "mrvl,kirkwood";
+ compatible = "dlink,dns-320-a1", "dlink,dns-320", "dlink,dns-kirkwood", "marvell,kirkwood-88f6281", "marvell,kirkwood";
memory {
device_type = "memory";
@@ -15,6 +15,31 @@
bootargs = "console=ttyS0,115200n8 earlyprintk";
};
+ gpio-leds {
+ compatible = "gpio-leds";
+ blue-power {
+ label = "dns320:blue:power";
+ gpios = <&gpio0 26 1>; /* GPIO 26 Active Low */
+ linux,default-trigger = "default-on";
+ };
+ blue-usb {
+ label = "dns320:blue:usb";
+ gpios = <&gpio1 11 1>; /* GPIO 43 Active Low */
+ };
+ orange-l_hdd {
+ label = "dns320:orange:l_hdd";
+ gpios = <&gpio0 28 1>; /* GPIO 28 Active Low */
+ };
+ orange-r_hdd {
+ label = "dns320:orange:r_hdd";
+ gpios = <&gpio0 27 1>; /* GPIO 27 Active Low */
+ };
+ orange-usb {
+ label = "dns320:orange:usb";
+ gpios = <&gpio1 3 1>; /* GPIO 35 Active Low */
+ };
+ };
+
ocp@f1000000 {
serial@12000 {
clock-frequency = <166666667>;
@@ -25,40 +50,5 @@
clock-frequency = <166666667>;
status = "okay";
};
-
- nand@3000000 {
- status = "okay";
-
- partition@0 {
- label = "u-boot";
- reg = <0x0000000 0x100000>;
- read-only;
- };
-
- partition@100000 {
- label = "uImage";
- reg = <0x0100000 0x500000>;
- };
-
- partition@600000 {
- label = "ramdisk";
- reg = <0x0600000 0x500000>;
- };
-
- partition@b00000 {
- label = "image";
- reg = <0x0b00000 0x6600000>;
- };
-
- partition@7100000 {
- label = "mini firmware";
- reg = <0x7100000 0xa00000>;
- };
-
- partition@7b00000 {
- label = "config";
- reg = <0x7b00000 0x500000>;
- };
- };
};
};
diff --git a/arch/arm/boot/dts/kirkwood-dns325.dts b/arch/arm/boot/dts/kirkwood-dns325.dts
index c2a5562525d2..d430713ea9b9 100644
--- a/arch/arm/boot/dts/kirkwood-dns325.dts
+++ b/arch/arm/boot/dts/kirkwood-dns325.dts
@@ -1,10 +1,10 @@
/dts-v1/;
-/include/ "kirkwood.dtsi"
+/include/ "kirkwood-dnskw.dtsi"
/ {
model = "D-Link DNS-325 NAS (Rev A1)";
- compatible = "dlink,dns-325-a1", "dlink,dns-325", "dlink,dns-kirkwood", "mrvl,kirkwood-88f6281", "mrvl,kirkwood";
+ compatible = "dlink,dns-325-a1", "dlink,dns-325", "dlink,dns-kirkwood", "marvell,kirkwood-88f6281", "marvell,kirkwood";
memory {
device_type = "memory";
@@ -15,45 +15,43 @@
bootargs = "console=ttyS0,115200n8 earlyprintk";
};
- ocp@f1000000 {
- serial@12000 {
- clock-frequency = <200000000>;
- status = "okay";
+ gpio-leds {
+ compatible = "gpio-leds";
+ white-power {
+ label = "dns325:white:power";
+ gpios = <&gpio0 26 1>; /* GPIO 26 Active Low */
+ linux,default-trigger = "default-on";
+ };
+ white-usb {
+ label = "dns325:white:usb";
+ gpios = <&gpio1 11 1>; /* GPIO 43 Active Low */
+ };
+ red-l_hdd {
+ label = "dns325:red:l_hdd";
+ gpios = <&gpio0 28 1>; /* GPIO 28 Active Low */
};
+ red-r_hdd {
+ label = "dns325:red:r_hdd";
+ gpios = <&gpio0 27 1>; /* GPIO 27 Active Low */
+ };
+ red-usb {
+ label = "dns325:red:usb";
+ gpios = <&gpio0 29 1>; /* GPIO 29 Active Low */
+ };
+ };
- nand@3000000 {
+ ocp@f1000000 {
+ i2c@11000 {
status = "okay";
- partition@0 {
- label = "u-boot";
- reg = <0x0000000 0x100000>;
- read-only;
- };
-
- partition@100000 {
- label = "uImage";
- reg = <0x0100000 0x500000>;
- };
-
- partition@600000 {
- label = "ramdisk";
- reg = <0x0600000 0x500000>;
- };
-
- partition@b00000 {
- label = "image";
- reg = <0x0b00000 0x6600000>;
- };
-
- partition@7100000 {
- label = "mini firmware";
- reg = <0x7100000 0xa00000>;
- };
-
- partition@7b00000 {
- label = "config";
- reg = <0x7b00000 0x500000>;
+ lm75: lm75@48 {
+ compatible = "national,lm75";
+ reg = <0x48>;
};
};
+ serial@12000 {
+ clock-frequency = <200000000>;
+ status = "okay";
+ };
};
};
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
new file mode 100644
index 000000000000..7408655f91b5
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
@@ -0,0 +1,69 @@
+/include/ "kirkwood.dtsi"
+
+/ {
+ model = "D-Link DNS NASes (kirkwood-based)";
+ compatible = "dlink,dns-kirkwood", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ button@1 {
+ label = "Power button";
+ linux,code = <116>;
+ gpios = <&gpio1 2 1>;
+ };
+ button@2 {
+ label = "USB unmount button";
+ linux,code = <161>;
+ gpios = <&gpio1 15 1>;
+ };
+ button@3 {
+ label = "Reset button";
+ linux,code = <0x198>;
+ gpios = <&gpio1 16 1>;
+ };
+ };
+
+ ocp@f1000000 {
+ sata@80000 {
+ status = "okay";
+ nr-ports = <2>;
+ };
+
+ nand@3000000 {
+ status = "okay";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x100000>;
+ read-only;
+ };
+
+ partition@100000 {
+ label = "uImage";
+ reg = <0x0100000 0x500000>;
+ };
+
+ partition@600000 {
+ label = "ramdisk";
+ reg = <0x0600000 0x500000>;
+ };
+
+ partition@b00000 {
+ label = "image";
+ reg = <0x0b00000 0x6600000>;
+ };
+
+ partition@7100000 {
+ label = "mini firmware";
+ reg = <0x7100000 0xa00000>;
+ };
+
+ partition@7b00000 {
+ label = "config";
+ reg = <0x7b00000 0x500000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-dreamplug.dts b/arch/arm/boot/dts/kirkwood-dreamplug.dts
index a5376b84227f..26e281fbf6bc 100644
--- a/arch/arm/boot/dts/kirkwood-dreamplug.dts
+++ b/arch/arm/boot/dts/kirkwood-dreamplug.dts
@@ -4,7 +4,7 @@
/ {
model = "Globalscale Technologies Dreamplug";
- compatible = "globalscale,dreamplug-003-ds2001", "globalscale,dreamplug", "mrvl,kirkwood-88f6281", "mrvl,kirkwood";
+ compatible = "globalscale,dreamplug-003-ds2001", "globalscale,dreamplug", "marvell,kirkwood-88f6281", "marvell,kirkwood";
memory {
device_type = "memory";
@@ -20,5 +20,55 @@
clock-frequency = <200000000>;
status = "ok";
};
+
+ spi@10600 {
+ status = "okay";
+
+ m25p40@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "mx25l1606e";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ mode = <0>;
+
+ partition@0 {
+ reg = <0x0 0x80000>;
+ label = "u-boot";
+ };
+
+ partition@100000 {
+ reg = <0x100000 0x10000>;
+ label = "u-boot env";
+ };
+
+ partition@180000 {
+ reg = <0x180000 0x10000>;
+ label = "dtb";
+ };
+ };
+ };
+
+ sata@80000 {
+ status = "okay";
+ nr-ports = <1>;
+ };
+ };
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ bluetooth {
+ label = "dreamplug:blue:bluetooth";
+ gpios = <&gpio1 15 1>;
+ };
+ wifi {
+ label = "dreamplug:green:wifi";
+ gpios = <&gpio1 16 1>;
+ };
+ wifi-ap {
+ label = "dreamplug:green:wifi_ap";
+ gpios = <&gpio1 17 1>;
+ };
};
};
diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts
new file mode 100644
index 000000000000..7c8238fbb6f9
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts
@@ -0,0 +1,99 @@
+/dts-v1/;
+
+/include/ "kirkwood.dtsi"
+
+/ {
+ model = "Seagate GoFlex Net";
+ compatible = "seagate,goflexnet", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x8000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8 earlyprintk root=/dev/sda1 rootdelay=10";
+ };
+
+ ocp@f1000000 {
+ serial@12000 {
+ clock-frequency = <200000000>;
+ status = "ok";
+ };
+
+ nand@3000000 {
+ status = "okay";
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x0000000 0x100000>;
+ read-only;
+ };
+
+ partition@100000 {
+ label = "uImage";
+ reg = <0x0100000 0x400000>;
+ };
+
+ partition@500000 {
+ label = "pogoplug";
+ reg = <0x0500000 0x2000000>;
+ };
+
+ partition@2500000 {
+ label = "root";
+ reg = <0x02500000 0xd800000>;
+ };
+ };
+ sata@80000 {
+ status = "okay";
+ nr-ports = <2>;
+ };
+
+ };
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ health {
+ label = "status:green:health";
+ gpios = <&gpio1 14 1>;
+ linux,default-trigger = "default-on";
+ };
+ fault {
+ label = "status:orange:fault";
+ gpios = <&gpio1 15 1>;
+ };
+ left0 {
+ label = "status:white:left0";
+ gpios = <&gpio1 10 0>;
+ };
+ left1 {
+ label = "status:white:left1";
+ gpios = <&gpio1 11 0>;
+ };
+ left2 {
+ label = "status:white:left2";
+ gpios = <&gpio1 12 0>;
+ };
+ left3 {
+ label = "status:white:left3";
+ gpios = <&gpio1 13 0>;
+ };
+ right0 {
+ label = "status:white:right0";
+ gpios = <&gpio1 6 0>;
+ };
+ right1 {
+ label = "status:white:right1";
+ gpios = <&gpio1 7 0>;
+ };
+ right2 {
+ label = "status:white:right2";
+ gpios = <&gpio1 8 0>;
+ };
+ right3 {
+ label = "status:white:right3";
+ gpios = <&gpio1 9 0>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts
index ada0f0c23085..66794ed75ff1 100644
--- a/arch/arm/boot/dts/kirkwood-ib62x0.dts
+++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts
@@ -4,7 +4,7 @@
/ {
model = "RaidSonic ICY BOX IB-NAS62x0 (Rev B)";
- compatible = "raidsonic,ib-nas6210-b", "raidsonic,ib-nas6220-b", "raidsonic,ib-nas6210", "raidsonic,ib-nas6220", "raidsonic,ib-nas62x0", "mrvl,kirkwood-88f6281", "mrvl,kirkwood";
+ compatible = "raidsonic,ib-nas6210-b", "raidsonic,ib-nas6220-b", "raidsonic,ib-nas6210", "raidsonic,ib-nas6220", "raidsonic,ib-nas62x0", "marvell,kirkwood-88f6281", "marvell,kirkwood";
memory {
device_type = "memory";
@@ -21,6 +21,11 @@
status = "okay";
};
+ sata@80000 {
+ status = "okay";
+ nr-ports = <2>;
+ };
+
nand@3000000 {
status = "okay";
@@ -41,4 +46,37 @@
};
};
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ button@1 {
+ label = "USB Copy";
+ linux,code = <133>;
+ gpios = <&gpio0 29 1>;
+ };
+ button@2 {
+ label = "Reset";
+ linux,code = <0x198>;
+ gpios = <&gpio0 28 1>;
+ };
+ };
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ green-os {
+ label = "ib62x0:green:os";
+ gpios = <&gpio0 25 0>;
+ linux,default-trigger = "default-on";
+ };
+ red-os {
+ label = "ib62x0:red:os";
+ gpios = <&gpio0 22 0>;
+ };
+ usb-copy {
+ label = "ib62x0:red:usb_copy";
+ gpios = <&gpio0 27 0>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts
index 1ba75d4adecc..f8ca6fa88192 100644
--- a/arch/arm/boot/dts/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/kirkwood-iconnect.dts
@@ -4,7 +4,7 @@
/ {
model = "Iomega Iconnect";
- compatible = "iom,iconnect-1.1", "iom,iconnect", "mrvl,kirkwood-88f6281", "mrvl,kirkwood";
+ compatible = "iom,iconnect-1.1", "iom,iconnect", "marvell,kirkwood-88f6281", "marvell,kirkwood";
memory {
device_type = "memory";
@@ -18,9 +18,55 @@
};
ocp@f1000000 {
+ i2c@11000 {
+ status = "okay";
+
+ lm63: lm63@4c {
+ compatible = "national,lm63";
+ reg = <0x4c>;
+ };
+ };
serial@12000 {
clock-frequency = <200000000>;
status = "ok";
};
};
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ led-level {
+ label = "led_level";
+ gpios = <&gpio1 9 0>;
+ linux,default-trigger = "default-on";
+ };
+ power-blue {
+ label = "power:blue";
+ gpios = <&gpio1 10 0>;
+ linux,default-trigger = "timer";
+ };
+ power-red {
+ label = "power:red";
+ gpios = <&gpio1 11 0>;
+ };
+ usb1 {
+ label = "usb1:blue";
+ gpios = <&gpio1 12 0>;
+ };
+ usb2 {
+ label = "usb2:blue";
+ gpios = <&gpio1 13 0>;
+ };
+ usb3 {
+ label = "usb3:blue";
+ gpios = <&gpio1 14 0>;
+ };
+ usb4 {
+ label = "usb4:blue";
+ gpios = <&gpio1 15 0>;
+ };
+ otb {
+ label = "otb:blue";
+ gpios = <&gpio1 16 0>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/kirkwood-lschlv2.dts b/arch/arm/boot/dts/kirkwood-lschlv2.dts
new file mode 100644
index 000000000000..9510c9ea666c
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-lschlv2.dts
@@ -0,0 +1,20 @@
+/dts-v1/;
+
+/include/ "kirkwood-lsxl.dtsi"
+
+/ {
+ model = "Buffalo Linkstation LS-CHLv2";
+ compatible = "buffalo,lschlv2", "buffalo,lsxl", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x4000000>;
+ };
+
+ ocp@f1000000 {
+ serial@12000 {
+ clock-frequency = <166666667>;
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-lsxhl.dts b/arch/arm/boot/dts/kirkwood-lsxhl.dts
new file mode 100644
index 000000000000..739019c4cba9
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-lsxhl.dts
@@ -0,0 +1,20 @@
+/dts-v1/;
+
+/include/ "kirkwood-lsxl.dtsi"
+
+/ {
+ model = "Buffalo Linkstation LS-XHL";
+ compatible = "buffalo,lsxhl", "buffalo,lsxl", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>;
+ };
+
+ ocp@f1000000 {
+ serial@12000 {
+ clock-frequency = <200000000>;
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-lsxl.dtsi b/arch/arm/boot/dts/kirkwood-lsxl.dtsi
new file mode 100644
index 000000000000..8ac51c08269d
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-lsxl.dtsi
@@ -0,0 +1,95 @@
+/include/ "kirkwood.dtsi"
+
+/ {
+ chosen {
+ bootargs = "console=ttyS0,115200n8 earlyprintk";
+ };
+
+ ocp@f1000000 {
+ sata@80000 {
+ status = "okay";
+ nr-ports = <1>;
+ };
+
+ spi@10600 {
+ status = "okay";
+
+ m25p40@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "m25p40";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+ mode = <0>;
+
+ partition@0 {
+ reg = <0x0 0x60000>;
+ label = "uboot";
+ read-only;
+ };
+
+ partition@60000 {
+ reg = <0x60000 0x10000>;
+ label = "dtb";
+ read-only;
+ };
+
+ partition@70000 {
+ reg = <0x70000 0x10000>;
+ label = "uboot_env";
+ };
+ };
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ button@1 {
+ label = "Function Button";
+ linux,code = <132>;
+ gpios = <&gpio1 9 1>;
+ };
+ button@2 {
+ label = "Power-on Switch";
+ linux,code = <116>;
+ gpios = <&gpio1 10 1>;
+ };
+ button@3 {
+ label = "Power-auto Switch";
+ linux,code = <142>;
+ gpios = <&gpio1 11 1>;
+ };
+ };
+
+ gpio_leds {
+ compatible = "gpio-leds";
+
+ led@1 {
+ label = "lschlv2:blue:func";
+ gpios = <&gpio1 4 1>;
+ };
+
+ led@2 {
+ label = "lschlv2:red:alarm";
+ gpios = <&gpio1 5 1>;
+ };
+
+ led@3 {
+ label = "lschlv2:amber:info";
+ gpios = <&gpio1 6 1>;
+ };
+
+ led@4 {
+ label = "lschlv2:blue:power";
+ gpios = <&gpio1 7 1>;
+ linux,default-trigger = "default-on";
+ };
+
+ led@5 {
+ label = "lschlv2:red:func";
+ gpios = <&gpio1 16 1>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood-ts219-6281.dts b/arch/arm/boot/dts/kirkwood-ts219-6281.dts
new file mode 100644
index 000000000000..ccbf32757800
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-ts219-6281.dts
@@ -0,0 +1,21 @@
+/dts-v1/;
+
+/include/ "kirkwood-ts219.dtsi"
+
+/ {
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ button@1 {
+ label = "USB Copy";
+ linux,code = <133>;
+ gpios = <&gpio0 15 1>;
+ };
+ button@2 {
+ label = "Reset";
+ linux,code = <0x198>;
+ gpios = <&gpio0 16 1>;
+ };
+ };
+}; \ No newline at end of file
diff --git a/arch/arm/boot/dts/kirkwood-ts219-6282.dts b/arch/arm/boot/dts/kirkwood-ts219-6282.dts
new file mode 100644
index 000000000000..fbe9932161a1
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-ts219-6282.dts
@@ -0,0 +1,21 @@
+/dts-v1/;
+
+/include/ "kirkwood-ts219.dtsi"
+
+/ {
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ button@1 {
+ label = "USB Copy";
+ linux,code = <133>;
+ gpios = <&gpio1 11 1>;
+ };
+ button@2 {
+ label = "Reset";
+ linux,code = <0x198>;
+ gpios = <&gpio1 5 1>;
+ };
+ };
+}; \ No newline at end of file
diff --git a/arch/arm/boot/dts/kirkwood-ts219.dtsi b/arch/arm/boot/dts/kirkwood-ts219.dtsi
new file mode 100644
index 000000000000..64ea27cb3298
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-ts219.dtsi
@@ -0,0 +1,78 @@
+/include/ "kirkwood.dtsi"
+
+/ {
+ model = "QNAP TS219 family";
+ compatible = "qnap,ts219", "marvell,kirkwood";
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x20000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200n8";
+ };
+
+ ocp@f1000000 {
+ i2c@11000 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ s35390a: s35390a@30 {
+ compatible = "s35390a";
+ reg = <0x30>;
+ };
+ };
+ serial@12000 {
+ clock-frequency = <200000000>;
+ status = "okay";
+ };
+ serial@12100 {
+ clock-frequency = <200000000>;
+ status = "okay";
+ };
+ spi@10600 {
+ status = "okay";
+
+ m25p128@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "m25p128";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ mode = <0>;
+
+ partition@0000000 {
+ reg = <0x00000000 0x00080000>;
+ label = "U-Boot";
+ };
+
+ partition@00200000 {
+ reg = <0x00200000 0x00200000>;
+ label = "Kernel";
+ };
+
+ partition@00400000 {
+ reg = <0x00400000 0x00900000>;
+ label = "RootFS1";
+ };
+ partition@00d00000 {
+ reg = <0x00d00000 0x00300000>;
+ label = "RootFS2";
+ };
+ partition@00040000 {
+ reg = <0x00080000 0x00040000>;
+ label = "U-Boot Config";
+ };
+ partition@000c0000 {
+ reg = <0x000c0000 0x00140000>;
+ label = "NAS Config";
+ };
+ };
+ };
+ sata@80000 {
+ status = "okay";
+ nr-ports = <2>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index 926528b81baa..cef9616f330a 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -1,7 +1,16 @@
/include/ "skeleton.dtsi"
/ {
- compatible = "mrvl,kirkwood";
+ compatible = "marvell,kirkwood";
+ interrupt-parent = <&intc>;
+
+ intc: interrupt-controller {
+ compatible = "marvell,orion-intc", "marvell,intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0xf1020204 0x04>,
+ <0xf1020214 0x04>;
+ };
ocp@f1000000 {
compatible = "simple-bus";
@@ -9,6 +18,24 @@
#address-cells = <1>;
#size-cells = <1>;
+ gpio0: gpio@10100 {
+ compatible = "marvell,orion-gpio";
+ #gpio-cells = <2>;
+ gpio-controller;
+ reg = <0x10100 0x40>;
+ ngpio = <32>;
+ interrupts = <35>, <36>, <37>, <38>;
+ };
+
+ gpio1: gpio@10140 {
+ compatible = "marvell,orion-gpio";
+ #gpio-cells = <2>;
+ gpio-controller;
+ reg = <0x10140 0x40>;
+ ngpio = <18>;
+ interrupts = <39>, <40>, <41>;
+ };
+
serial@12000 {
compatible = "ns16550a";
reg = <0x12000 0x100>;
@@ -28,22 +55,55 @@
};
rtc@10300 {
- compatible = "mrvl,kirkwood-rtc", "mrvl,orion-rtc";
+ compatible = "marvell,kirkwood-rtc", "marvell,orion-rtc";
reg = <0x10300 0x20>;
interrupts = <53>;
};
+ spi@10600 {
+ compatible = "marvell,orion-spi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ interrupts = <23>;
+ reg = <0x10600 0x28>;
+ status = "disabled";
+ };
+
+ wdt@20300 {
+ compatible = "marvell,orion-wdt";
+ reg = <0x20300 0x28>;
+ status = "okay";
+ };
+
+ sata@80000 {
+ compatible = "marvell,orion-sata";
+ reg = <0x80000 0x5000>;
+ interrupts = <21>;
+ status = "disabled";
+ };
+
nand@3000000 {
#address-cells = <1>;
#size-cells = <1>;
cle = <0>;
ale = <1>;
bank-width = <1>;
- compatible = "mrvl,orion-nand";
+ compatible = "marvell,orion-nand";
reg = <0x3000000 0x400>;
chip-delay = <25>;
/* set partition map and/or chip-delay in board dts */
status = "disabled";
};
+
+ i2c@11000 {
+ compatible = "marvell,mv64xxx-i2c";
+ reg = <0x11000 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <29>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
new file mode 100644
index 000000000000..798fa35c0005
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -0,0 +1,21 @@
+/*
+ * Device Tree Source for the r8a7740 SoC
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "renesas,r8a7740";
+
+ cpus {
+ cpu@0 {
+ compatible = "arm,cortex-a9";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/sh7377.dtsi b/arch/arm/boot/dts/sh7377.dtsi
new file mode 100644
index 000000000000..767ee0796daa
--- /dev/null
+++ b/arch/arm/boot/dts/sh7377.dtsi
@@ -0,0 +1,21 @@
+/*
+ * Device Tree Source for the sh7377 SoC
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "renesas,sh7377";
+
+ cpus {
+ cpu@0 {
+ compatible = "arm,cortex-a8";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index 9de5636023f6..27fb8a67ea42 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -276,9 +276,11 @@
usb@c5000000 {
status = "okay";
+ nvidia,vbus-gpio = <&gpio 170 0>; /* gpio PV2 */
};
usb@c5004000 {
+ status = "okay";
nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
};
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 9f1921634eb7..405d1673904e 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -123,6 +123,12 @@
status = "disabled";
};
+ pwm {
+ compatible = "nvidia,tegra20-pwm";
+ reg = <0x7000a000 0x100>;
+ #pwm-cells = <2>;
+ };
+
i2c@7000c000 {
compatible = "nvidia,tegra20-i2c";
reg = <0x7000c000 0x100>;
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index da740191771f..3e4334d14efb 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -117,6 +117,12 @@
status = "disabled";
};
+ pwm {
+ compatible = "nvidia,tegra30-pwm", "nvidia,tegra20-pwm";
+ reg = <0x7000a000 0x100>;
+ #pwm-cells = <2>;
+ };
+
i2c@7000c000 {
compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
reg = <0x7000c000 0x100>;
diff --git a/arch/arm/boot/dts/twl6030.dtsi b/arch/arm/boot/dts/twl6030.dtsi
index 3b2f3510d7eb..d351b27d7213 100644
--- a/arch/arm/boot/dts/twl6030.dtsi
+++ b/arch/arm/boot/dts/twl6030.dtsi
@@ -66,6 +66,7 @@
vcxio: regulator@8 {
compatible = "ti,twl6030-vcxio";
+ regulator-always-on;
};
vusb: regulator@9 {
@@ -74,10 +75,12 @@
v1v8: regulator@10 {
compatible = "ti,twl6030-v1v8";
+ regulator-always-on;
};
v2v1: regulator@11 {
compatible = "ti,twl6030-v2v1";
+ regulator-always-on;
};
clk32kg: regulator@12 {
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index aa07f5938f05..1143c4d5c567 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -452,6 +452,7 @@ static struct dma_map_ops dmabounce_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
.mmap = arm_dma_mmap,
+ .get_sgtable = arm_dma_get_sgtable,
.map_page = dmabounce_map_page,
.unmap_page = dmabounce_unmap_page,
.sync_single_for_cpu = dmabounce_sync_for_cpu,
diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig
index ddc9fe6a78ac..90610c7030f7 100644
--- a/arch/arm/configs/armadillo800eva_defconfig
+++ b/arch/arm/configs/armadillo800eva_defconfig
@@ -5,10 +5,7 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
# CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
-# CONFIG_USER_NS is not set
# CONFIG_PID_NS is not set
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SLAB=y
CONFIG_MODULES=y
@@ -21,7 +18,7 @@ CONFIG_ARCH_SHMOBILE=y
CONFIG_ARCH_R8A7740=y
CONFIG_MACH_ARMADILLO800EVA=y
# CONFIG_SH_TIMER_TMU is not set
-# CONFIG_ARM_THUMB is not set
+CONFIG_ARM_THUMB=y
CONFIG_CPU_BPREDICT_DISABLE=y
# CONFIG_CACHE_L2X0 is not set
CONFIG_ARM_ERRATA_430973=y
@@ -36,9 +33,10 @@ CONFIG_AEABI=y
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096"
+CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw"
CONFIG_CMDLINE_FORCE=y
CONFIG_KEXEC=y
+CONFIG_VFP=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_SUSPEND is not set
CONFIG_NET=y
@@ -89,26 +87,32 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_I2C=y
CONFIG_I2C_SH_MOBILE=y
# CONFIG_HWMON is not set
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_VIDEO_DEV=y
+# CONFIG_RC_CORE is not set
+# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
+# CONFIG_V4L_USB_DRIVERS is not set
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_SOC_CAMERA_MT9T112=y
+CONFIG_VIDEO_SH_MOBILE_CEU=y
+# CONFIG_RADIO_ADAPTERS is not set
CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_SH_MOBILE_LCDC=y
+CONFIG_FB_SH_MOBILE_HDMI=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
# CONFIG_SND_SUPPORT_OLD_API is not set
# CONFIG_SND_VERBOSE_PROCFS is not set
# CONFIG_SND_DRIVERS is not set
# CONFIG_SND_ARM is not set
-CONFIG_SND_SOC=y
CONFIG_SND_SOC_SH4_FSI=y
# CONFIG_HID_SUPPORT is not set
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_RENESAS_USBHS=y
CONFIG_USB_GADGET=y
CONFIG_USB_RENESAS_USBHS_UDC=y
@@ -116,6 +120,8 @@ CONFIG_USB_ETH=m
CONFIG_MMC=y
CONFIG_MMC_SDHI=y
CONFIG_MMC_SH_MMCIF=y
+CONFIG_DMADEVICES=y
+CONFIG_SH_DMAE=y
CONFIG_UIO=y
CONFIG_UIO_PDRV_GENIRQ=y
# CONFIG_DNOTIFY is not set
@@ -124,7 +130,6 @@ CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index f725b9637b33..3c9f32f9b6b4 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -192,6 +192,7 @@ CONFIG_RTC_DRV_MC13XXX=y
CONFIG_RTC_DRV_MXC=y
CONFIG_DMADEVICES=y
CONFIG_IMX_SDMA=y
+CONFIG_MXS_DMA=y
CONFIG_COMMON_CLK_DEBUG=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
diff --git a/arch/arm/configs/kzm9d_defconfig b/arch/arm/configs/kzm9d_defconfig
new file mode 100644
index 000000000000..26146ffea1a5
--- /dev/null
+++ b/arch/arm/configs/kzm9d_defconfig
@@ -0,0 +1,89 @@
+# CONFIG_ARM_PATCH_PHYS_VIRT is not set
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_SHMOBILE=y
+CONFIG_ARCH_EMEV2=y
+CONFIG_MACH_KZM9D=y
+CONFIG_MEMORY_START=0x40000000
+CONFIG_MEMORY_SIZE=0x10000000
+# CONFIG_SH_TIMER_TMU is not set
+# CONFIG_SWP_EMULATE is not set
+# CONFIG_CACHE_L2X0 is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_HOTPLUG_CPU=y
+# CONFIG_LOCAL_TIMERS is not set
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_FORCE_MAX_ZONEORDER=13
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_CMDLINE="console=tty0 console=ttyS1,115200n81 earlyprintk=serial8250-em.1,115200n81 mem=128M@0x40000000 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096"
+CONFIG_CMDLINE_FORCE=y
+CONFIG_VFP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_BLK_DEV is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_SMSC911X=y
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_EM=y
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+# CONFIG_FTRACE is not set
diff --git a/arch/arm/configs/kzm9g_defconfig b/arch/arm/configs/kzm9g_defconfig
index e3ebc20ed0a7..2388c8610627 100644
--- a/arch/arm/configs/kzm9g_defconfig
+++ b/arch/arm/configs/kzm9g_defconfig
@@ -100,7 +100,12 @@ CONFIG_SND_SOC_SH4_FSI=y
CONFIG_USB=y
CONFIG_USB_DEVICEFS=y
CONFIG_USB_R8A66597_HCD=y
+CONFIG_USB_RENESAS_USBHS=y
CONFIG_USB_STORAGE=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=y
+CONFIG_USB_ETH=m
+CONFIG_USB_MASS_STORAGE=m
CONFIG_MMC=y
# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_SDHI=y
@@ -108,12 +113,13 @@ CONFIG_MMC_SH_MMCIF=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_RS5C372=y
CONFIG_DMADEVICES=y
CONFIG_SH_DMAE=y
CONFIG_ASYNC_TX_DMA=y
CONFIG_STAGING=y
# CONFIG_DNOTIFY is not set
-# CONFIG_INOTIFY_USER is not set
+CONFIG_INOTIFY_USER=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index ccdb6357fb74..4edcfb4e4dee 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -34,7 +34,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_AEABI=y
-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
CONFIG_AUTO_ZRELADDR=y
CONFIG_FPE_NWFPE=y
CONFIG_NET=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index b152de79fd95..e58edc36b406 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -193,6 +193,8 @@ CONFIG_MMC_OMAP_HS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_TWL92330=y
CONFIG_RTC_DRV_TWL4030=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_OMAP=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
diff --git a/arch/arm/configs/tct_hammer_defconfig b/arch/arm/configs/tct_hammer_defconfig
index 1d24f8458bef..71277a1591ba 100644
--- a/arch/arm/configs/tct_hammer_defconfig
+++ b/arch/arm/configs/tct_hammer_defconfig
@@ -7,7 +7,7 @@ CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_BUG is not set
+# CONFIG_BUGVERBOSE is not set
# CONFIG_ELF_CORE is not set
# CONFIG_SHMEM is not set
CONFIG_SLOB=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 4be9c1e80ee6..db2245353f0f 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -106,6 +106,7 @@ CONFIG_I2C_TEGRA=y
CONFIG_SPI=y
CONFIG_SPI_TEGRA=y
CONFIG_GPIO_TPS65910=y
+CONFIG_GPIO_TPS6586X=y
CONFIG_POWER_SUPPLY=y
CONFIG_BATTERY_SBS=y
CONFIG_SENSORS_LM90=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 2d4f661d1cf6..da6845493caa 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -86,6 +86,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_LM3530=y
CONFIG_LEDS_LP5521=y
+CONFIG_LEDS_GPIO=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AB8500=y
CONFIG_RTC_DRV_PL031=y
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index ed2e95d46e29..62e75475e57e 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -1,7 +1,10 @@
#ifndef __ASMARM_ARCH_TIMER_H
#define __ASMARM_ARCH_TIMER_H
+#include <asm/errno.h>
+
#ifdef CONFIG_ARM_ARCH_TIMER
+#define ARCH_HAS_READ_CURRENT_TIMER
int arch_timer_of_register(void);
int arch_timer_sched_clock_init(void);
#else
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 004c1bc95d2b..e4448e16046d 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm)
static inline void
vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
}
@@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
static inline void
vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
index b2deda181549..dc6145120de3 100644
--- a/arch/arm/include/asm/delay.h
+++ b/arch/arm/include/asm/delay.h
@@ -6,9 +6,22 @@
#ifndef __ASM_ARM_DELAY_H
#define __ASM_ARM_DELAY_H
+#include <asm/memory.h>
#include <asm/param.h> /* HZ */
-extern void __delay(int loops);
+#define MAX_UDELAY_MS 2
+#define UDELAY_MULT ((UL(2199023) * HZ) >> 11)
+#define UDELAY_SHIFT 30
+
+#ifndef __ASSEMBLY__
+
+extern struct arm_delay_ops {
+ void (*delay)(unsigned long);
+ void (*const_udelay)(unsigned long);
+ void (*udelay)(unsigned long);
+} arm_delay_ops;
+
+#define __delay(n) arm_delay_ops.delay(n)
/*
* This function intentionally does not exist; if you see references to
@@ -23,22 +36,27 @@ extern void __bad_udelay(void);
* division by multiplication: you don't have to worry about
* loss of precision.
*
- * Use only for very small delays ( < 1 msec). Should probably use a
+ * Use only for very small delays ( < 2 msec). Should probably use a
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
* a constant)
*/
-extern void __udelay(unsigned long usecs);
-extern void __const_udelay(unsigned long);
-
-#define MAX_UDELAY_MS 2
+#define __udelay(n) arm_delay_ops.udelay(n)
+#define __const_udelay(n) arm_delay_ops.const_udelay(n)
#define udelay(n) \
(__builtin_constant_p(n) ? \
((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \
- __const_udelay((n) * ((2199023U*HZ)>>11))) : \
+ __const_udelay((n) * UDELAY_MULT)) : \
__udelay(n))
+/* Loop-based definitions for assembly code. */
+extern void __loop_delay(unsigned long loops);
+extern void __loop_udelay(unsigned long usecs);
+extern void __loop_const_udelay(unsigned long);
+
+#endif /* __ASSEMBLY__ */
+
#endif /* defined(_ARM_DELAY_H) */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index bbef15d04890..2ae842df4551 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -186,17 +186,6 @@ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs);
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-
-static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size, struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
-}
-
static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
@@ -213,20 +202,12 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
}
-static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
-}
-
/*
* This can be called during boot to increase the size of the consistent
* DMA region above it's default value of 2MB. It must be called before the
* memory allocator is initialised, i.e. before any core_initcall.
*/
-extern void __init init_consistent_dma_size(unsigned long size);
+static inline void init_consistent_dma_size(unsigned long size) { }
/*
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
@@ -280,6 +261,9 @@ extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
enum dma_data_direction);
extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
enum dma_data_direction);
+extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs);
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index e51b1e81df05..83eb2f772911 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -4,30 +4,6 @@
/*
* This is the "bare minimum". AIO seems to require this.
*/
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_L1_CACHE,
- KM_L2_CACHE,
- KM_KDB,
- KM_TYPE_NR
-};
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define KM_NMI (-1)
-#define KM_NMI_PTE (-1)
-#define KM_IRQ_PTE (-1)
-#endif
+#define KM_TYPE_NR 16
#endif
diff --git a/arch/arm/include/asm/locks.h b/arch/arm/include/asm/locks.h
deleted file mode 100644
index ef4c897772d1..000000000000
--- a/arch/arm/include/asm/locks.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * arch/arm/include/asm/locks.h
- *
- * Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Interrupt safe locking assembler.
- */
-#ifndef __ASM_PROC_LOCKS_H
-#define __ASM_PROC_LOCKS_H
-
-#if __LINUX_ARM_ARCH__ >= 6
-
-#define __down_op(ptr,fail) \
- ({ \
- __asm__ __volatile__( \
- "@ down_op\n" \
-"1: ldrex lr, [%0]\n" \
-" sub lr, lr, %1\n" \
-" strex ip, lr, [%0]\n" \
-" teq ip, #0\n" \
-" bne 1b\n" \
-" teq lr, #0\n" \
-" movmi ip, %0\n" \
-" blmi " #fail \
- : \
- : "r" (ptr), "I" (1) \
- : "ip", "lr", "cc"); \
- smp_mb(); \
- })
-
-#define __down_op_ret(ptr,fail) \
- ({ \
- unsigned int ret; \
- __asm__ __volatile__( \
- "@ down_op_ret\n" \
-"1: ldrex lr, [%1]\n" \
-" sub lr, lr, %2\n" \
-" strex ip, lr, [%1]\n" \
-" teq ip, #0\n" \
-" bne 1b\n" \
-" teq lr, #0\n" \
-" movmi ip, %1\n" \
-" movpl ip, #0\n" \
-" blmi " #fail "\n" \
-" mov %0, ip" \
- : "=&r" (ret) \
- : "r" (ptr), "I" (1) \
- : "ip", "lr", "cc"); \
- smp_mb(); \
- ret; \
- })
-
-#define __up_op(ptr,wake) \
- ({ \
- smp_mb(); \
- __asm__ __volatile__( \
- "@ up_op\n" \
-"1: ldrex lr, [%0]\n" \
-" add lr, lr, %1\n" \
-" strex ip, lr, [%0]\n" \
-" teq ip, #0\n" \
-" bne 1b\n" \
-" cmp lr, #0\n" \
-" movle ip, %0\n" \
-" blle " #wake \
- : \
- : "r" (ptr), "I" (1) \
- : "ip", "lr", "cc"); \
- })
-
-/*
- * The value 0x01000000 supports up to 128 processors and
- * lots of processes. BIAS must be chosen such that sub'ing
- * BIAS once per CPU will result in the long remaining
- * negative.
- */
-#define RW_LOCK_BIAS 0x01000000
-#define RW_LOCK_BIAS_STR "0x01000000"
-
-#define __down_op_write(ptr,fail) \
- ({ \
- __asm__ __volatile__( \
- "@ down_op_write\n" \
-"1: ldrex lr, [%0]\n" \
-" sub lr, lr, %1\n" \
-" strex ip, lr, [%0]\n" \
-" teq ip, #0\n" \
-" bne 1b\n" \
-" teq lr, #0\n" \
-" movne ip, %0\n" \
-" blne " #fail \
- : \
- : "r" (ptr), "I" (RW_LOCK_BIAS) \
- : "ip", "lr", "cc"); \
- smp_mb(); \
- })
-
-#define __up_op_write(ptr,wake) \
- ({ \
- smp_mb(); \
- __asm__ __volatile__( \
- "@ up_op_write\n" \
-"1: ldrex lr, [%0]\n" \
-" adds lr, lr, %1\n" \
-" strex ip, lr, [%0]\n" \
-" teq ip, #0\n" \
-" bne 1b\n" \
-" movcs ip, %0\n" \
-" blcs " #wake \
- : \
- : "r" (ptr), "I" (RW_LOCK_BIAS) \
- : "ip", "lr", "cc"); \
- })
-
-#define __down_op_read(ptr,fail) \
- __down_op(ptr, fail)
-
-#define __up_op_read(ptr,wake) \
- ({ \
- smp_mb(); \
- __asm__ __volatile__( \
- "@ up_op_read\n" \
-"1: ldrex lr, [%0]\n" \
-" add lr, lr, %1\n" \
-" strex ip, lr, [%0]\n" \
-" teq ip, #0\n" \
-" bne 1b\n" \
-" teq lr, #0\n" \
-" moveq ip, %0\n" \
-" bleq " #wake \
- : \
- : "r" (ptr), "I" (1) \
- : "ip", "lr", "cc"); \
- })
-
-#else
-
-#define __down_op(ptr,fail) \
- ({ \
- __asm__ __volatile__( \
- "@ down_op\n" \
-" mrs ip, cpsr\n" \
-" orr lr, ip, #128\n" \
-" msr cpsr_c, lr\n" \
-" ldr lr, [%0]\n" \
-" subs lr, lr, %1\n" \
-" str lr, [%0]\n" \
-" msr cpsr_c, ip\n" \
-" movmi ip, %0\n" \
-" blmi " #fail \
- : \
- : "r" (ptr), "I" (1) \
- : "ip", "lr", "cc"); \
- smp_mb(); \
- })
-
-#define __down_op_ret(ptr,fail) \
- ({ \
- unsigned int ret; \
- __asm__ __volatile__( \
- "@ down_op_ret\n" \
-" mrs ip, cpsr\n" \
-" orr lr, ip, #128\n" \
-" msr cpsr_c, lr\n" \
-" ldr lr, [%1]\n" \
-" subs lr, lr, %2\n" \
-" str lr, [%1]\n" \
-" msr cpsr_c, ip\n" \
-" movmi ip, %1\n" \
-" movpl ip, #0\n" \
-" blmi " #fail "\n" \
-" mov %0, ip" \
- : "=&r" (ret) \
- : "r" (ptr), "I" (1) \
- : "ip", "lr", "cc"); \
- smp_mb(); \
- ret; \
- })
-
-#define __up_op(ptr,wake) \
- ({ \
- smp_mb(); \
- __asm__ __volatile__( \
- "@ up_op\n" \
-" mrs ip, cpsr\n" \
-" orr lr, ip, #128\n" \
-" msr cpsr_c, lr\n" \
-" ldr lr, [%0]\n" \
-" adds lr, lr, %1\n" \
-" str lr, [%0]\n" \
-" msr cpsr_c, ip\n" \
-" movle ip, %0\n" \
-" blle " #wake \
- : \
- : "r" (ptr), "I" (1) \
- : "ip", "lr", "cc"); \
- })
-
-/*
- * The value 0x01000000 supports up to 128 processors and
- * lots of processes. BIAS must be chosen such that sub'ing
- * BIAS once per CPU will result in the long remaining
- * negative.
- */
-#define RW_LOCK_BIAS 0x01000000
-#define RW_LOCK_BIAS_STR "0x01000000"
-
-#define __down_op_write(ptr,fail) \
- ({ \
- __asm__ __volatile__( \
- "@ down_op_write\n" \
-" mrs ip, cpsr\n" \
-" orr lr, ip, #128\n" \
-" msr cpsr_c, lr\n" \
-" ldr lr, [%0]\n" \
-" subs lr, lr, %1\n" \
-" str lr, [%0]\n" \
-" msr cpsr_c, ip\n" \
-" movne ip, %0\n" \
-" blne " #fail \
- : \
- : "r" (ptr), "I" (RW_LOCK_BIAS) \
- : "ip", "lr", "cc"); \
- smp_mb(); \
- })
-
-#define __up_op_write(ptr,wake) \
- ({ \
- __asm__ __volatile__( \
- "@ up_op_write\n" \
-" mrs ip, cpsr\n" \
-" orr lr, ip, #128\n" \
-" msr cpsr_c, lr\n" \
-" ldr lr, [%0]\n" \
-" adds lr, lr, %1\n" \
-" str lr, [%0]\n" \
-" msr cpsr_c, ip\n" \
-" movcs ip, %0\n" \
-" blcs " #wake \
- : \
- : "r" (ptr), "I" (RW_LOCK_BIAS) \
- : "ip", "lr", "cc"); \
- smp_mb(); \
- })
-
-#define __down_op_read(ptr,fail) \
- __down_op(ptr, fail)
-
-#define __up_op_read(ptr,wake) \
- ({ \
- smp_mb(); \
- __asm__ __volatile__( \
- "@ up_op_read\n" \
-" mrs ip, cpsr\n" \
-" orr lr, ip, #128\n" \
-" msr cpsr_c, lr\n" \
-" ldr lr, [%0]\n" \
-" adds lr, lr, %1\n" \
-" str lr, [%0]\n" \
-" msr cpsr_c, ip\n" \
-" moveq ip, %0\n" \
-" bleq " #wake \
- : \
- : "r" (ptr), "I" (1) \
- : "ip", "lr", "cc"); \
- })
-
-#endif
-
-#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index fcb575747e5e..e965f1b560f1 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -16,7 +16,7 @@
#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
-#include <asm/sizes.h>
+#include <linux/sizes.h>
#ifdef CONFIG_NEED_MACH_MEMORY_H
#include <mach/memory.h>
diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
index 93226cf23ae0..b1479fd04a95 100644
--- a/arch/arm/include/asm/mutex.h
+++ b/arch/arm/include/asm/mutex.h
@@ -7,121 +7,10 @@
*/
#ifndef _ASM_MUTEX_H
#define _ASM_MUTEX_H
-
-#if __LINUX_ARM_ARCH__ < 6
-/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
-# include <asm-generic/mutex-xchg.h>
-#else
-
/*
- * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
- * atomic decrement (it is not a reliable atomic decrement but it satisfies
- * the defined semantics for our purpose, while being smaller and faster
- * than a real atomic decrement or atomic swap. The idea is to attempt
- * decrementing the lock value only once. If once decremented it isn't zero,
- * or if its store-back fails due to a dispute on the exclusive store, we
- * simply bail out immediately through the slow path where the lock will be
- * reattempted until it succeeds.
+ * On pre-ARMv6 hardware this results in a swp-based implementation,
+ * which is the most efficient. For ARMv6+, we emit a pair of exclusive
+ * accesses instead.
*/
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res;
-
- __asm__ (
-
- "ldrex %0, [%2] \n\t"
- "sub %0, %0, #1 \n\t"
- "strex %1, %0, [%2] "
-
- : "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&(count)->counter)
- : "cc","memory" );
-
- __res |= __ex_flag;
- if (unlikely(__res != 0))
- fail_fn(count);
-}
-
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res;
-
- __asm__ (
-
- "ldrex %0, [%2] \n\t"
- "sub %0, %0, #1 \n\t"
- "strex %1, %0, [%2] "
-
- : "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&(count)->counter)
- : "cc","memory" );
-
- __res |= __ex_flag;
- if (unlikely(__res != 0))
- __res = fail_fn(count);
- return __res;
-}
-
-/*
- * Same trick is used for the unlock fast path. However the original value,
- * rather than the result, is used to test for success in order to have
- * better generated assembly.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res, __orig;
-
- __asm__ (
-
- "ldrex %0, [%3] \n\t"
- "add %1, %0, #1 \n\t"
- "strex %2, %1, [%3] "
-
- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&(count)->counter)
- : "cc","memory" );
-
- __orig |= __ex_flag;
- if (unlikely(__orig != 0))
- fail_fn(count);
-}
-
-/*
- * If the unlock was done on a contended lock, or if the unlock simply fails
- * then the mutex remains locked.
- */
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/*
- * For __mutex_fastpath_trylock we use another construct which could be
- * described as a "single value cmpxchg".
- *
- * This provides the needed trylock semantics like cmpxchg would, but it is
- * lighter and less generic than a true cmpxchg implementation.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- int __ex_flag, __res, __orig;
-
- __asm__ (
-
- "1: ldrex %0, [%3] \n\t"
- "subs %1, %0, #1 \n\t"
- "strexeq %2, %1, [%3] \n\t"
- "movlt %0, #0 \n\t"
- "cmpeq %2, #0 \n\t"
- "bgt 1b "
-
- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
- : "r" (&count->counter)
- : "cc", "memory" );
-
- return __orig;
-}
-
-#endif
+#include <asm-generic/mutex-xchg.h>
#endif
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index 00cbe10a50e3..e074948d8143 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,21 +12,6 @@
#ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__
-/* ARM perf PMU IDs for use by internal perf clients. */
-enum arm_perf_pmu_ids {
- ARM_PERF_PMU_ID_XSCALE1 = 0,
- ARM_PERF_PMU_ID_XSCALE2,
- ARM_PERF_PMU_ID_V6,
- ARM_PERF_PMU_ID_V6MP,
- ARM_PERF_PMU_ID_CA8,
- ARM_PERF_PMU_ID_CA9,
- ARM_PERF_PMU_ID_CA5,
- ARM_PERF_PMU_ID_CA15,
- ARM_PERF_PMU_ID_CA7,
- ARM_NUM_PMU_IDS,
-};
-
-extern enum arm_perf_pmu_ids
-armpmu_get_pmu_id(void);
+/* Nothing to see here... */
#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index f66626d71e7d..41dc31f834c3 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
+#define pte_none(pte) (!pte_val(pte))
+#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
+#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
+#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
+#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
+#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
+#define pte_special(pte) (0)
+
+#define pte_present_user(pte) \
+ ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
+ (L_PTE_PRESENT | L_PTE_USER))
+
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
{
@@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
- if (addr >= TASK_SIZE)
- set_pte_ext(ptep, pteval, 0);
- else {
+ unsigned long ext = 0;
+
+ if (addr < TASK_SIZE && pte_present_user(pteval)) {
__sync_icache_dcache(pteval);
- set_pte_ext(ptep, pteval, PTE_EXT_NG);
+ ext |= PTE_EXT_NG;
}
-}
-#define pte_none(pte) (!pte_val(pte))
-#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
-#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
-#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
-#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
-#define pte_special(pte) (0)
-
-#define pte_present_user(pte) \
- ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
- (L_PTE_PRESENT | L_PTE_USER))
+ set_pte_ext(ptep, pteval, ext);
+}
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@@ -251,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <--------------- offset --------------------> <- type --> 0 0 0
+ * <--------------- offset ----------------------> < type -> 0 0 0
*
- * This gives us up to 63 swap files and 32GB per swap file. Note that
+ * This gives us up to 31 swap files and 64GB per swap file. Note that
* the offset field is always non-zero.
*/
#define __SWP_TYPE_SHIFT 3
-#define __SWP_TYPE_BITS 6
+#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 90114faa9f3c..4432305f4a2a 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -103,10 +103,9 @@ struct pmu_hw_events {
struct arm_pmu {
struct pmu pmu;
- enum arm_perf_pmu_ids id;
enum arm_pmu_type type;
cpumask_t active_irqs;
- const char *name;
+ char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
index e3f757263438..05b8e82ec9f5 100644
--- a/arch/arm/include/asm/sched_clock.h
+++ b/arch/arm/include/asm/sched_clock.h
@@ -10,5 +10,7 @@
extern void sched_clock_postinit(void);
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
+extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
+ unsigned long rate);
#endif
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 23ebc0c82a39..24d284a1bfc7 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -196,7 +196,7 @@ static const struct tagtable __tagtable_##fn __tag = { tag, fn }
struct membank {
phys_addr_t start;
- unsigned long size;
+ phys_addr_t size;
unsigned int highmem;
};
@@ -217,7 +217,7 @@ extern struct meminfo meminfo;
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
#define bank_phys_size(bank) (bank)->size
-extern int arm_add_memory(phys_addr_t start, unsigned long size);
+extern int arm_add_memory(phys_addr_t start, phys_addr_t size);
extern void early_print(const char *str, ...);
extern void dump_machine_table(void);
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 65fa3c88095c..b4ca707d0a69 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -59,18 +59,13 @@ static inline void dsb_sev(void)
}
/*
- * ARMv6 Spin-locking.
+ * ARMv6 ticket-based spin-locking.
*
- * We exclusively read the old value. If it is zero, we may have
- * won the lock, so we try exclusively storing it. A memory barrier
- * is required after we get a lock, and before we release it, because
- * V6 CPUs are assumed to have weakly ordered memory.
- *
- * Unlocked value: 0
- * Locked value: 1
+ * A memory barrier is required after we get a lock, and before we
+ * release it, because V6 CPUs are assumed to have weakly ordered
+ * memory.
*/
-#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
@@ -79,31 +74,39 @@ static inline void dsb_sev(void)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
+ u32 newval;
+ arch_spinlock_t lockval;
__asm__ __volatile__(
-"1: ldrex %0, [%1]\n"
-" teq %0, #0\n"
- WFE("ne")
-" strexeq %0, %2, [%1]\n"
-" teqeq %0, #0\n"
+"1: ldrex %0, [%3]\n"
+" add %1, %0, %4\n"
+" strex %2, %1, [%3]\n"
+" teq %2, #0\n"
" bne 1b"
- : "=&r" (tmp)
- : "r" (&lock->lock), "r" (1)
+ : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
+ : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
: "cc");
+ while (lockval.tickets.next != lockval.tickets.owner) {
+ wfe();
+ lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
+ }
+
smp_mb();
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp;
+ u32 slock;
__asm__ __volatile__(
-" ldrex %0, [%1]\n"
-" teq %0, #0\n"
-" strexeq %0, %2, [%1]"
- : "=&r" (tmp)
- : "r" (&lock->lock), "r" (1)
+" ldrex %0, [%2]\n"
+" subs %1, %0, %0, ror #16\n"
+" addeq %0, %0, %3\n"
+" strexeq %1, %0, [%2]"
+ : "=&r" (slock), "=&r" (tmp)
+ : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
: "cc");
if (tmp == 0) {
@@ -116,17 +119,38 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
+ unsigned long tmp;
+ u32 slock;
+
smp_mb();
__asm__ __volatile__(
-" str %1, [%0]\n"
- :
- : "r" (&lock->lock), "r" (0)
+" mov %1, #1\n"
+"1: ldrex %0, [%2]\n"
+" uadd16 %0, %0, %1\n"
+" strex %1, %0, [%2]\n"
+" teq %1, #0\n"
+" bne 1b"
+ : "=&r" (slock), "=&r" (tmp)
+ : "r" (&lock->slock)
: "cc");
dsb_sev();
}
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
+ return tickets.owner != tickets.next;
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
+ return (tickets.next - tickets.owner) > 1;
+}
+#define arch_spin_is_contended arch_spin_is_contended
+
/*
* RWLOCKS
*
@@ -158,7 +182,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
unsigned long tmp;
__asm__ __volatile__(
-"1: ldrex %0, [%1]\n"
+" ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]"
: "=&r" (tmp)
@@ -244,7 +268,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
unsigned long tmp, tmp2 = 1;
__asm__ __volatile__(
-"1: ldrex %0, [%2]\n"
+" ldrex %0, [%2]\n"
" adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n"
: "=&r" (tmp), "+r" (tmp2)
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index d14d197ae04a..b262d2f8b478 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -5,11 +5,24 @@
# error "please don't include this file directly"
#endif
+#define TICKET_SHIFT 16
+
typedef struct {
- volatile unsigned int lock;
+ union {
+ u32 slock;
+ struct __raw_tickets {
+#ifdef __ARMEB__
+ u16 next;
+ u16 owner;
+#else
+ u16 owner;
+ u16 next;
+#endif
+ } tickets;
+ };
} arch_spinlock_t;
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
typedef struct {
volatile unsigned int lock;
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
index 3be8de3adaba..ce119442277c 100644
--- a/arch/arm/include/asm/timex.h
+++ b/arch/arm/include/asm/timex.h
@@ -12,13 +12,15 @@
#ifndef _ASMARM_TIMEX_H
#define _ASMARM_TIMEX_H
+#include <asm/arch_timer.h>
#include <mach/timex.h>
typedef unsigned long cycles_t;
-static inline cycles_t get_cycles (void)
-{
- return 0;
-}
+#ifdef ARCH_HAS_READ_CURRENT_TIMER
+#define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
+#else
+#define get_cycles() (0)
+#endif
#endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 71f6536d17ac..479a6352e0b5 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -189,6 +189,9 @@ static inline void set_fs(mm_segment_t fs)
#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
+#define user_addr_max() \
+ (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
+
/*
* The "__xxx" versions of the user access functions do not verify the
* address space - it must have been done previously with a separate
@@ -398,9 +401,6 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0)
#endif
-extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count);
-extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
-
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
@@ -427,24 +427,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return n;
}
-static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res = -EFAULT;
- if (access_ok(VERIFY_READ, src, 1))
- res = __strncpy_from_user(dst, src, count);
- return res;
-}
-
-#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
-static inline long __must_check strnlen_user(const char __user *s, long n)
-{
- unsigned long res = 0;
-
- if (__addr_ok(s))
- res = __strnlen_user(s, n);
-
- return res;
-}
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* _ASMARM_UACCESS_H */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 512cd1473454..0cab47d4a83f 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -446,7 +446,6 @@
#ifdef __KERNEL__
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..4d52f92967a6
--- /dev/null
+++ b/arch/arm/include/asm/word-at-a-time.h
@@ -0,0 +1,96 @@
+#ifndef __ASM_ARM_WORD_AT_A_TIME_H
+#define __ASM_ARM_WORD_AT_A_TIME_H
+
+#ifndef __ARMEB__
+
+/*
+ * Little-endian word-at-a-time zero byte handling.
+ * Heavily based on the x86 algorithm.
+ */
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits,
+ const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+#define prep_zero_mask(a, bits, c) (bits)
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ unsigned long ret;
+
+#if __LINUX_ARM_ARCH__ >= 5
+ /* We have clz available. */
+ ret = fls(mask) >> 3;
+#else
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ ret = (0x0ff0001 + mask) >> 23;
+ /* Fix the 1 for 00 case */
+ ret &= mask;
+#endif
+
+ return ret;
+}
+
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+
+#define zero_bytemask(mask) (mask)
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret, offset;
+
+ /* Load word from unaligned pointer addr */
+ asm(
+ "1: ldr %0, [%2]\n"
+ "2:\n"
+ " .pushsection .fixup,\"ax\"\n"
+ " .align 2\n"
+ "3: and %1, %2, #0x3\n"
+ " bic %2, %2, #0x3\n"
+ " ldr %0, [%2]\n"
+ " lsl %1, %1, #0x3\n"
+ " lsr %0, %0, %1\n"
+ " b 2b\n"
+ " .popsection\n"
+ " .pushsection __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .long 1b, 3b\n"
+ " .popsection"
+ : "=&r" (ret), "=&r" (offset)
+ : "r" (addr), "Qo" (*(unsigned long *)addr));
+
+ return ret;
+}
+
+
+#endif /* DCACHE_WORD_ACCESS */
+
+#else /* __ARMEB__ */
+#include <asm-generic/word-at-a-time.h>
+#endif
+
+#endif /* __ASM_ARM_WORD_AT_A_TIME_H */
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index dd58035621f7..cf258807160d 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -32,6 +32,8 @@ static int arch_timer_ppi2;
static struct clock_event_device __percpu **arch_timer_evt;
+extern void init_current_timer_delay(unsigned long freq);
+
/*
* Architected system timer support.
*/
@@ -137,7 +139,7 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
/* Be safe... */
arch_timer_disable();
- clk->features = CLOCK_EVT_FEAT_ONESHOT;
+ clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->set_mode = arch_timer_set_mode;
@@ -223,6 +225,14 @@ static cycle_t arch_counter_read(struct clocksource *cs)
return arch_counter_get_cntpct();
}
+int read_current_timer(unsigned long *timer_val)
+{
+ if (!arch_timer_rate)
+ return -ENXIO;
+ *timer_val = arch_counter_get_cntpct();
+ return 0;
+}
+
static struct clocksource clocksource_counter = {
.name = "arch_sys_counter",
.rating = 400,
@@ -296,6 +306,7 @@ static int __init arch_timer_register(void)
if (err)
goto out_free_irq;
+ init_current_timer_delay(arch_timer_rate);
return 0;
out_free_irq:
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index b57c75e0b01f..60d3b738d420 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -49,8 +49,7 @@ extern void __aeabi_ulcmp(void);
extern void fpundefinstr(void);
/* platform dependent support */
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__const_udelay);
+EXPORT_SYMBOL(arm_delay_ops);
/* networking */
EXPORT_SYMBOL(csum_partial);
@@ -87,10 +86,6 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
- /* user mem (segment) */
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 0d1851ca6eb9..0f82098c9bfe 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -244,6 +244,19 @@ svc_preempt:
b 1b
#endif
+__und_fault:
+ @ Correct the PC such that it is pointing at the instruction
+ @ which caused the fault. If the faulting instruction was ARM
+ @ the PC will be pointing at the next instruction, and have to
+ @ subtract 4. Otherwise, it is Thumb, and the PC will be
+ @ pointing at the second half of the Thumb instruction. We
+ @ have to subtract 2.
+ ldr r2, [r0, #S_PC]
+ sub r2, r2, r1
+ str r2, [r0, #S_PC]
+ b do_undefinstr
+ENDPROC(__und_fault)
+
.align 5
__und_svc:
#ifdef CONFIG_KPROBES
@@ -261,25 +274,32 @@ __und_svc:
@
@ r0 - instruction
@
-#ifndef CONFIG_THUMB2_KERNEL
+#ifndef CONFIG_THUMB2_KERNEL
ldr r0, [r4, #-4]
#else
+ mov r1, #2
ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
- ldrhhs r9, [r4] @ bottom 16 bits
- orrhs r0, r9, r0, lsl #16
+ blo __und_svc_fault
+ ldrh r9, [r4] @ bottom 16 bits
+ add r4, r4, #2
+ str r4, [sp, #S_PC]
+ orr r0, r9, r0, lsl #16
#endif
- adr r9, BSYM(1f)
+ adr r9, BSYM(__und_svc_finish)
mov r2, r4
bl call_fpe
+ mov r1, #4 @ PC correction to apply
+__und_svc_fault:
mov r0, sp @ struct pt_regs *regs
- bl do_undefinstr
+ bl __und_fault
@
@ IRQs off again before pulling preserved data off the stack
@
-1: disable_irq_notrace
+__und_svc_finish:
+ disable_irq_notrace
@
@ restore SPSR and restart the instruction
@@ -423,25 +443,33 @@ __und_usr:
mov r2, r4
mov r3, r5
+ @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
+ @ faulting instruction depending on Thumb mode.
+ @ r3 = regs->ARM_cpsr
@
- @ fall through to the emulation code, which returns using r9 if
- @ it has emulated the instruction, or the more conventional lr
- @ if we are to treat this as a real undefined instruction
- @
- @ r0 - instruction
+ @ The emulation code returns using r9 if it has emulated the
+ @ instruction, or the more conventional lr if we are to treat
+ @ this as a real undefined instruction
@
adr r9, BSYM(ret_from_exception)
- adr lr, BSYM(__und_usr_unknown)
+
tst r3, #PSR_T_BIT @ Thumb mode?
- itet eq @ explicit IT needed for the 1f label
- subeq r4, r2, #4 @ ARM instr at LR - 4
- subne r4, r2, #2 @ Thumb instr at LR - 2
-1: ldreqt r0, [r4]
+ bne __und_usr_thumb
+ sub r4, r2, #4 @ ARM instr at LR - 4
+1: ldrt r0, [r4]
#ifdef CONFIG_CPU_ENDIAN_BE8
- reveq r0, r0 @ little endian instruction
+ rev r0, r0 @ little endian instruction
#endif
- beq call_fpe
+ @ r0 = 32-bit ARM instruction which caused the exception
+ @ r2 = PC value for the following instruction (:= regs->ARM_pc)
+ @ r4 = PC value for the faulting instruction
+ @ lr = 32-bit undefined instruction function
+ adr lr, BSYM(__und_usr_fault_32)
+ b call_fpe
+
+__und_usr_thumb:
@ Thumb instruction
+ sub r4, r2, #2 @ First half of thumb instr at LR - 2
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
/*
* Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
@@ -455,7 +483,7 @@ __und_usr:
ldr r5, .LCcpu_architecture
ldr r5, [r5]
cmp r5, #CPU_ARCH_ARMv7
- blo __und_usr_unknown
+ blo __und_usr_fault_16 @ 16bit undefined instruction
/*
* The following code won't get run unless the running CPU really is v7, so
* coding round the lack of ldrht on older arches is pointless. Temporarily
@@ -463,15 +491,18 @@ __und_usr:
*/
.arch armv6t2
#endif
-2:
- ARM( ldrht r5, [r4], #2 )
- THUMB( ldrht r5, [r4] )
- THUMB( add r4, r4, #2 )
+2: ldrht r5, [r4]
cmp r5, #0xe800 @ 32bit instruction if xx != 0
- blo __und_usr_unknown
-3: ldrht r0, [r4]
+ blo __und_usr_fault_16 @ 16bit undefined instruction
+3: ldrht r0, [r2]
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
+ str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
orr r0, r0, r5, lsl #16
+ adr lr, BSYM(__und_usr_fault_32)
+ @ r0 = the two 16-bit Thumb instructions which caused the exception
+ @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
+ @ r4 = PC value for the first 16-bit Thumb instruction
+ @ lr = 32bit undefined instruction function
#if __LINUX_ARM_ARCH__ < 7
/* If the target arch was overridden, change it back: */
@@ -482,17 +513,13 @@ __und_usr:
#endif
#endif /* __LINUX_ARM_ARCH__ < 7 */
#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
- b __und_usr_unknown
+ b __und_usr_fault_16
#endif
- UNWIND(.fnend )
+ UNWIND(.fnend)
ENDPROC(__und_usr)
- @
- @ fallthrough to call_fpe
- @
-
/*
- * The out of line fixup for the ldrt above.
+ * The out of line fixup for the ldrt instructions above.
*/
.pushsection .fixup, "ax"
.align 2
@@ -524,11 +551,12 @@ ENDPROC(__und_usr)
* NEON handler code.
*
* Emulators may wish to make use of the following registers:
- * r0 = instruction opcode.
- * r2 = PC+4
+ * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
+ * r2 = PC value to resume execution after successful emulation
* r9 = normal "successful" return address
- * r10 = this threads thread_info structure.
+ * r10 = this threads thread_info structure
* lr = unrecognised instruction return address
+ * IRQs disabled, FIQs enabled.
*/
@
@ Fall-through from Thumb-2 __und_usr
@@ -659,12 +687,17 @@ ENTRY(no_fp)
mov pc, lr
ENDPROC(no_fp)
-__und_usr_unknown:
- enable_irq
+__und_usr_fault_32:
+ mov r1, #4
+ b 1f
+__und_usr_fault_16:
+ mov r1, #2
+1: enable_irq
mov r0, sp
adr lr, BSYM(ret_from_exception)
- b do_undefinstr
-ENDPROC(__und_usr_unknown)
+ b __und_fault
+ENDPROC(__und_usr_fault_32)
+ENDPROC(__und_usr_fault_16)
.align 5
__pabt_usr:
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 4afed88d250a..978eac57e04a 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -51,23 +51,15 @@ ret_fast_syscall:
fast_work_pending:
str r0, [sp, #S_R0+S_OFF]! @ returned r0
work_pending:
- tst r1, #_TIF_NEED_RESCHED
- bne work_resched
- /*
- * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here
- */
- ldr r2, [sp, #S_PSR]
mov r0, sp @ 'regs'
- tst r2, #15 @ are we returning to user mode?
- bne no_work_pending @ no? just leave, then...
mov r2, why @ 'syscall'
- tst r1, #_TIF_SIGPENDING @ delivering a signal?
- movne why, #0 @ prevent further restarts
- bl do_notify_resume
- b ret_slow_syscall @ Check work again
+ bl do_work_pending
+ cmp r0, #0
+ beq no_work_pending
+ movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
+ ldmia sp, {r0 - r6} @ have to reload r0 - r6
+ b local_restart @ ... and off we go
-work_resched:
- bl schedule
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
*/
@@ -95,13 +87,7 @@ ENDPROC(ret_to_user)
ENTRY(ret_from_fork)
bl schedule_tail
get_thread_info tsk
- ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
mov why, #1
- tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
- beq ret_slow_syscall
- mov r1, sp
- mov r0, #1 @ trace exit [IP = 1]
- bl syscall_trace
b ret_slow_syscall
ENDPROC(ret_from_fork)
@@ -415,6 +401,7 @@ ENTRY(vector_swi)
eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
#endif
+local_restart:
ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
stmdb sp!, {r4, r5} @ push fifth and sixth args
@@ -448,25 +435,24 @@ ENDPROC(vector_swi)
* context switches, and waiting for our parent to respond.
*/
__sys_trace:
- mov r2, scno
- add r1, sp, #S_OFF
- mov r0, #0 @ trace entry [IP = 0]
- bl syscall_trace
+ mov r1, scno
+ add r0, sp, #S_OFF
+ bl syscall_trace_enter
adr lr, BSYM(__sys_trace_return) @ return address
mov scno, r0 @ syscall number (possibly new)
add r1, sp, #S_R0 + S_OFF @ pointer to regs
cmp scno, #NR_syscalls @ check upper syscall limit
- ldmccia r1, {r0 - r3} @ have to reload r0 - r3
+ ldmccia r1, {r0 - r6} @ have to reload r0 - r6
+ stmccia sp, {r4, r5} @ and update the stack args
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
b 2b
__sys_trace_return:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
- mov r2, scno
- mov r1, sp
- mov r0, #1 @ trace exit [IP = 1]
- bl syscall_trace
+ mov r1, scno
+ mov r0, sp
+ bl syscall_trace_exit
b ret_slow_syscall
.align 5
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index df0bf0c8cb79..34e56647dcee 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -179,19 +179,20 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
old = *parent;
*parent = return_hooker;
- err = ftrace_push_return_trace(old, self_addr, &trace.depth,
- frame_pointer);
- if (err == -EBUSY) {
- *parent = old;
- return;
- }
-
trace.func = self_addr;
+ trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
*parent = old;
+ return;
+ }
+
+ err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+ frame_pointer);
+ if (err == -EBUSY) {
+ *parent = old;
+ return;
}
}
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 835898e7d704..3db960e20cb8 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -55,14 +55,6 @@
add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
.endm
-#ifdef CONFIG_XIP_KERNEL
-#define KERNEL_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
-#define KERNEL_END _edata_loc
-#else
-#define KERNEL_START KERNEL_RAM_VADDR
-#define KERNEL_END _end
-#endif
-
/*
* Kernel startup entry point.
* ---------------------------
@@ -218,51 +210,46 @@ __create_page_tables:
blo 1b
/*
- * Now setup the pagetables for our kernel direct
- * mapped region.
+ * Map our RAM from the start to the end of the kernel .bss section.
*/
- mov r3, pc
- mov r3, r3, lsr #SECTION_SHIFT
- orr r3, r7, r3, lsl #SECTION_SHIFT
- add r0, r4, #(KERNEL_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
- str r3, [r0, #((KERNEL_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
- ldr r6, =(KERNEL_END - 1)
- add r0, r0, #1 << PMD_ORDER
+ add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
+ ldr r6, =(_end - 1)
+ orr r3, r8, r7
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
-1: cmp r0, r6
+1: str r3, [r0], #1 << PMD_ORDER
add r3, r3, #1 << SECTION_SHIFT
- strls r3, [r0], #1 << PMD_ORDER
+ cmp r0, r6
bls 1b
#ifdef CONFIG_XIP_KERNEL
/*
- * Map some ram to cover our .data and .bss areas.
+ * Map the kernel image separately as it is not located in RAM.
*/
- add r3, r8, #TEXT_OFFSET
- orr r3, r3, r7
- add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
- str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> (SECTION_SHIFT - PMD_ORDER)]!
- ldr r6, =(_end - 1)
- add r0, r0, #4
+#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
+ mov r3, pc
+ mov r3, r3, lsr #SECTION_SHIFT
+ orr r3, r7, r3, lsl #SECTION_SHIFT
+ add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
+ str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
+ ldr r6, =(_edata_loc - 1)
+ add r0, r0, #1 << PMD_ORDER
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: cmp r0, r6
- add r3, r3, #1 << 20
- strls r3, [r0], #4
+ add r3, r3, #1 << SECTION_SHIFT
+ strls r3, [r0], #1 << PMD_ORDER
bls 1b
#endif
/*
- * Then map boot params address in r2 or the first 1MB (2MB with LPAE)
- * of ram if boot params address is not specified.
+ * Then map boot params address in r2 if specified.
*/
mov r0, r2, lsr #SECTION_SHIFT
movs r0, r0, lsl #SECTION_SHIFT
- moveq r0, r8
- sub r3, r0, r8
- add r3, r3, #PAGE_OFFSET
- add r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
- orr r6, r7, r0
- str r6, [r3]
+ subne r3, r0, r8
+ addne r3, r3, #PAGE_OFFSET
+ addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
+ orrne r6, r7, r0
+ strne r6, [r3]
#ifdef CONFIG_DEBUG_LL
#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index a02eada3aa5d..ab243b87118d 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -47,17 +47,14 @@ static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *cpu_pmu;
-enum arm_perf_pmu_ids
-armpmu_get_pmu_id(void)
+const char *perf_pmu_name(void)
{
- int id = -ENODEV;
-
- if (cpu_pmu != NULL)
- id = cpu_pmu->id;
+ if (!cpu_pmu)
+ return NULL;
- return id;
+ return cpu_pmu->pmu.name;
}
-EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
+EXPORT_SYMBOL_GPL(perf_pmu_name);
int perf_num_counters(void)
{
@@ -760,7 +757,7 @@ init_hw_perf_events(void)
cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
register_cpu_notifier(&pmu_cpu_notifier);
- armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
+ armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");
}
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index ab627a740fa3..c90fcb2b6967 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -650,7 +650,6 @@ static int armv6_map_event(struct perf_event *event)
}
static struct arm_pmu armv6pmu = {
- .id = ARM_PERF_PMU_ID_V6,
.name = "v6",
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
@@ -685,7 +684,6 @@ static int armv6mpcore_map_event(struct perf_event *event)
}
static struct arm_pmu armv6mpcore_pmu = {
- .id = ARM_PERF_PMU_ID_V6MP,
.name = "v6mpcore",
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index d3c536068162..f04070bd2183 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1258,7 +1258,6 @@ static u32 __init armv7_read_num_pmnc_events(void)
static struct arm_pmu *__init armv7_a8_pmu_init(void)
{
- armv7pmu.id = ARM_PERF_PMU_ID_CA8;
armv7pmu.name = "ARMv7 Cortex-A8";
armv7pmu.map_event = armv7_a8_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
@@ -1267,7 +1266,6 @@ static struct arm_pmu *__init armv7_a8_pmu_init(void)
static struct arm_pmu *__init armv7_a9_pmu_init(void)
{
- armv7pmu.id = ARM_PERF_PMU_ID_CA9;
armv7pmu.name = "ARMv7 Cortex-A9";
armv7pmu.map_event = armv7_a9_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
@@ -1276,7 +1274,6 @@ static struct arm_pmu *__init armv7_a9_pmu_init(void)
static struct arm_pmu *__init armv7_a5_pmu_init(void)
{
- armv7pmu.id = ARM_PERF_PMU_ID_CA5;
armv7pmu.name = "ARMv7 Cortex-A5";
armv7pmu.map_event = armv7_a5_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
@@ -1285,7 +1282,6 @@ static struct arm_pmu *__init armv7_a5_pmu_init(void)
static struct arm_pmu *__init armv7_a15_pmu_init(void)
{
- armv7pmu.id = ARM_PERF_PMU_ID_CA15;
armv7pmu.name = "ARMv7 Cortex-A15";
armv7pmu.map_event = armv7_a15_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
@@ -1295,7 +1291,6 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void)
static struct arm_pmu *__init armv7_a7_pmu_init(void)
{
- armv7pmu.id = ARM_PERF_PMU_ID_CA7;
armv7pmu.name = "ARMv7 Cortex-A7";
armv7pmu.map_event = armv7_a7_map_event;
armv7pmu.num_events = armv7_read_num_pmnc_events();
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index e34e7254e652..f759fe0bab63 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -435,7 +435,6 @@ static int xscale_map_event(struct perf_event *event)
}
static struct arm_pmu xscale1pmu = {
- .id = ARM_PERF_PMU_ID_XSCALE1,
.name = "xscale1",
.handle_irq = xscale1pmu_handle_irq,
.enable = xscale1pmu_enable_event,
@@ -803,7 +802,6 @@ xscale2pmu_write_counter(int counter, u32 val)
}
static struct arm_pmu xscale2pmu = {
- .id = ARM_PERF_PMU_ID_XSCALE2,
.name = "xscale2",
.handle_irq = xscale2pmu_handle_irq,
.enable = xscale2pmu_enable_event,
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 19c95ea65b2f..693b744fd572 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -247,6 +247,7 @@ void machine_shutdown(void)
void machine_halt(void)
{
machine_shutdown();
+ local_irq_disable();
while (1);
}
@@ -268,6 +269,7 @@ void machine_restart(char *cmd)
/* Whoops - the platform was unable to reboot. Tell the user! */
printk("Reboot failed -- System halted\n");
+ local_irq_disable();
while (1);
}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 14e38261cd31..3e0fc5f7ed4b 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -25,6 +25,7 @@
#include <linux/regset.h>
#include <linux/audit.h>
#include <linux/tracehook.h>
+#include <linux/unistd.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
@@ -907,16 +908,16 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
-asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
+enum ptrace_syscall_dir {
+ PTRACE_SYSCALL_ENTER = 0,
+ PTRACE_SYSCALL_EXIT,
+};
+
+static int ptrace_syscall_trace(struct pt_regs *regs, int scno,
+ enum ptrace_syscall_dir dir)
{
unsigned long ip;
- if (why)
- audit_syscall_exit(regs);
- else
- audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
- regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
-
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return scno;
@@ -927,14 +928,28 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
* IP = 0 -> entry, =1 -> exit
*/
ip = regs->ARM_ip;
- regs->ARM_ip = why;
+ regs->ARM_ip = dir;
- if (why)
+ if (dir == PTRACE_SYSCALL_EXIT)
tracehook_report_syscall_exit(regs, 0);
else if (tracehook_report_syscall_entry(regs))
current_thread_info()->syscall = -1;
regs->ARM_ip = ip;
-
return current_thread_info()->syscall;
}
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
+{
+ int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER);
+ audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1,
+ regs->ARM_r2, regs->ARM_r3);
+ return ret;
+}
+
+asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno)
+{
+ int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT);
+ audit_syscall_exit(regs);
+ return ret;
+}
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 27d186abbc06..f4515393248d 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -21,6 +21,8 @@ struct clock_data {
u32 epoch_cyc_copy;
u32 mult;
u32 shift;
+ bool suspended;
+ bool needs_suspend;
};
static void sched_clock_poll(unsigned long wrap_ticks);
@@ -49,6 +51,9 @@ static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
u64 epoch_ns;
u32 epoch_cyc;
+ if (cd.suspended)
+ return cd.epoch_ns;
+
/*
* Load the epoch_cyc and epoch_ns atomically. We do this by
* ensuring that we always write epoch_cyc, epoch_ns and
@@ -98,6 +103,13 @@ static void sched_clock_poll(unsigned long wrap_ticks)
update_sched_clock();
}
+void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
+ unsigned long rate)
+{
+ setup_sched_clock(read, bits, rate);
+ cd.needs_suspend = true;
+}
+
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
unsigned long r, w;
@@ -169,11 +181,23 @@ void __init sched_clock_postinit(void)
static int sched_clock_suspend(void)
{
sched_clock_poll(sched_clock_timer.data);
+ if (cd.needs_suspend)
+ cd.suspended = true;
return 0;
}
+static void sched_clock_resume(void)
+{
+ if (cd.needs_suspend) {
+ cd.epoch_cyc = read_sched_clock();
+ cd.epoch_cyc_copy = cd.epoch_cyc;
+ cd.suspended = false;
+ }
+}
+
static struct syscore_ops sched_clock_ops = {
.suspend = sched_clock_suspend,
+ .resume = sched_clock_resume,
};
static int __init sched_clock_syscore_init(void)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index e15d83bb4ea3..a81dcecc7343 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -508,7 +508,7 @@ void __init dump_machine_table(void)
/* can't use cpu_relax() here as it may require MMU setup */;
}
-int __init arm_add_memory(phys_addr_t start, unsigned long size)
+int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
{
struct membank *bank = &meminfo.bank[meminfo.nr_banks];
@@ -538,7 +538,7 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size)
}
#endif
- bank->size = size & PAGE_MASK;
+ bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
/*
* Check whether this memory region has non-zero size or
@@ -558,7 +558,7 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size)
static int __init early_mem(char *p)
{
static int usermem __initdata = 0;
- unsigned long size;
+ phys_addr_t size;
phys_addr_t start;
char *endp;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 536c5d6b340b..f27789e4e38a 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -27,7 +27,6 @@
*/
#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
/*
* With EABI, the syscall number has to be loaded into r7.
@@ -48,18 +47,6 @@ const unsigned long sigreturn_codes[7] = {
};
/*
- * Either we support OABI only, or we have EABI with the OABI
- * compat layer enabled. In the later case we don't know if
- * user space is EABI or not, and if not we must not clobber r7.
- * Always using the OABI syscall solves that issue and works for
- * all those cases.
- */
-const unsigned long syscall_restart_code[2] = {
- SWI_SYS_RESTART, /* swi __NR_restart_syscall */
- 0xe49df004, /* ldr pc, [sp], #4 */
-};
-
-/*
* atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
@@ -582,12 +569,13 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
-static void do_signal(struct pt_regs *regs, int syscall)
+static int do_signal(struct pt_regs *regs, int syscall)
{
unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
struct k_sigaction ka;
siginfo_t info;
int signr;
+ int restart = 0;
/*
* If we were from a system call, check for system call restarting...
@@ -602,15 +590,15 @@ static void do_signal(struct pt_regs *regs, int syscall)
* debugger will see the already changed PSW.
*/
switch (retval) {
+ case -ERESTART_RESTARTBLOCK:
+ restart -= 2;
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
+ restart++;
regs->ARM_r0 = regs->ARM_ORIG_r0;
regs->ARM_pc = restart_addr;
break;
- case -ERESTART_RESTARTBLOCK:
- regs->ARM_r0 = -EINTR;
- break;
}
}
@@ -619,14 +607,17 @@ static void do_signal(struct pt_regs *regs, int syscall)
* point the debugger may change all our registers ...
*/
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ /*
+ * Depending on the signal settings we may need to revert the
+ * decision to restart the system call. But skip this if a
+ * debugger has chosen to restart at a different PC.
+ */
+ if (regs->ARM_pc != restart_addr)
+ restart = 0;
if (signr > 0) {
- /*
- * Depending on the signal settings we may need to revert the
- * decision to restart the system call. But skip this if a
- * debugger has chosen to restart at a different PC.
- */
- if (regs->ARM_pc == restart_addr) {
- if (retval == -ERESTARTNOHAND
+ if (unlikely(restart)) {
+ if (retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK
|| (retval == -ERESTARTSYS
&& !(ka.sa.sa_flags & SA_RESTART))) {
regs->ARM_r0 = -EINTR;
@@ -635,52 +626,43 @@ static void do_signal(struct pt_regs *regs, int syscall)
}
handle_signal(signr, &ka, &info, regs);
- return;
- }
-
- if (syscall) {
- /*
- * Handle restarting a different system call. As above,
- * if a debugger has chosen to restart at a different PC,
- * ignore the restart.
- */
- if (retval == -ERESTART_RESTARTBLOCK
- && regs->ARM_pc == continue_addr) {
- if (thumb_mode(regs)) {
- regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
- regs->ARM_pc -= 2;
- } else {
-#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
- regs->ARM_r7 = __NR_restart_syscall;
- regs->ARM_pc -= 4;
-#else
- u32 __user *usp;
-
- regs->ARM_sp -= 4;
- usp = (u32 __user *)regs->ARM_sp;
-
- if (put_user(regs->ARM_pc, usp) == 0) {
- regs->ARM_pc = KERN_RESTART_CODE;
- } else {
- regs->ARM_sp += 4;
- force_sigsegv(0, current);
- }
-#endif
- }
- }
+ return 0;
}
restore_saved_sigmask();
+ if (unlikely(restart))
+ regs->ARM_pc = continue_addr;
+ return restart;
}
-asmlinkage void
-do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+asmlinkage int
+do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
- if (thread_flags & _TIF_SIGPENDING)
- do_signal(regs, syscall);
-
- if (thread_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- }
+ do {
+ if (likely(thread_flags & _TIF_NEED_RESCHED)) {
+ schedule();
+ } else {
+ if (unlikely(!user_mode(regs)))
+ return 0;
+ local_irq_enable();
+ if (thread_flags & _TIF_SIGPENDING) {
+ int restart = do_signal(regs, syscall);
+ if (unlikely(restart)) {
+ /*
+ * Restart without handlers.
+ * Deal with it without leaving
+ * the kernel space.
+ */
+ return restart;
+ }
+ syscall = 0;
+ } else {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ }
+ }
+ local_irq_disable();
+ thread_flags = current_thread_info()->flags;
+ } while (thread_flags & _TIF_WORK_MASK);
+ return 0;
}
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index 6fcfe8398aa4..5ff067b7c752 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -8,7 +8,5 @@
* published by the Free Software Foundation.
*/
#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
-#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
extern const unsigned long sigreturn_codes[7];
-extern const unsigned long syscall_restart_code[2];
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 2c7217d971db..ebd8ad274d76 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -179,7 +179,7 @@ void __ref cpu_die(void)
mb();
/* Tell __cpu_die() that this CPU is now safe to dispose of */
- complete(&cpu_died);
+ RCU_NONIDLE(complete(&cpu_died));
/*
* actual CPU shutdown procedure is at least platform (if not
@@ -563,7 +563,8 @@ void smp_send_stop(void)
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
- smp_cross_call(&mask, IPI_CPU_STOP);
+ if (!cpumask_empty(&mask))
+ smp_cross_call(&mask, IPI_CPU_STOP);
/* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC;
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 8200deaa14f6..26c12c6440fc 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -17,11 +17,190 @@
#include <linux/percpu.h>
#include <linux/node.h>
#include <linux/nodemask.h>
+#include <linux/of.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <asm/cputype.h>
#include <asm/topology.h>
+/*
+ * cpu power scale management
+ */
+
+/*
+ * cpu power table
+ * This per cpu data structure describes the relative capacity of each core.
+ * On a heteregenous system, cores don't have the same computation capacity
+ * and we reflect that difference in the cpu_power field so the scheduler can
+ * take this difference into account during load balance. A per cpu structure
+ * is preferred because each CPU updates its own cpu_power field during the
+ * load balance except for idle cores. One idle core is selected to run the
+ * rebalance_domains for all idle cores and the cpu_power can be updated
+ * during this sequence.
+ */
+static DEFINE_PER_CPU(unsigned long, cpu_scale);
+
+unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+static void set_power_scale(unsigned int cpu, unsigned long power)
+{
+ per_cpu(cpu_scale, cpu) = power;
+}
+
+#ifdef CONFIG_OF
+struct cpu_efficiency {
+ const char *compatible;
+ unsigned long efficiency;
+};
+
+/*
+ * Table of relative efficiency of each processors
+ * The efficiency value must fit in 20bit and the final
+ * cpu_scale value must be in the range
+ * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
+ * in order to return at most 1 when DIV_ROUND_CLOSEST
+ * is used to compute the capacity of a CPU.
+ * Processors that are not defined in the table,
+ * use the default SCHED_POWER_SCALE value for cpu_scale.
+ */
+struct cpu_efficiency table_efficiency[] = {
+ {"arm,cortex-a15", 3891},
+ {"arm,cortex-a7", 2048},
+ {NULL, },
+};
+
+struct cpu_capacity {
+ unsigned long hwid;
+ unsigned long capacity;
+};
+
+struct cpu_capacity *cpu_capacity;
+
+unsigned long middle_capacity = 1;
+
+/*
+ * Iterate all CPUs' descriptor in DT and compute the efficiency
+ * (as per table_efficiency). Also calculate a middle efficiency
+ * as close as possible to (max{eff_i} - min{eff_i}) / 2
+ * This is later used to scale the cpu_power field such that an
+ * 'average' CPU is of middle power. Also see the comments near
+ * table_efficiency[] and update_cpu_power().
+ */
+static void __init parse_dt_topology(void)
+{
+ struct cpu_efficiency *cpu_eff;
+ struct device_node *cn = NULL;
+ unsigned long min_capacity = (unsigned long)(-1);
+ unsigned long max_capacity = 0;
+ unsigned long capacity = 0;
+ int alloc_size, cpu = 0;
+
+ alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
+ cpu_capacity = (struct cpu_capacity *)kzalloc(alloc_size, GFP_NOWAIT);
+
+ while ((cn = of_find_node_by_type(cn, "cpu"))) {
+ const u32 *rate, *reg;
+ int len;
+
+ if (cpu >= num_possible_cpus())
+ break;
+
+ for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
+ if (of_device_is_compatible(cn, cpu_eff->compatible))
+ break;
+
+ if (cpu_eff->compatible == NULL)
+ continue;
+
+ rate = of_get_property(cn, "clock-frequency", &len);
+ if (!rate || len != 4) {
+ pr_err("%s missing clock-frequency property\n",
+ cn->full_name);
+ continue;
+ }
+
+ reg = of_get_property(cn, "reg", &len);
+ if (!reg || len != 4) {
+ pr_err("%s missing reg property\n", cn->full_name);
+ continue;
+ }
+
+ capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
+
+ /* Save min capacity of the system */
+ if (capacity < min_capacity)
+ min_capacity = capacity;
+
+ /* Save max capacity of the system */
+ if (capacity > max_capacity)
+ max_capacity = capacity;
+
+ cpu_capacity[cpu].capacity = capacity;
+ cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
+ }
+
+ if (cpu < num_possible_cpus())
+ cpu_capacity[cpu].hwid = (unsigned long)(-1);
+
+ /* If min and max capacities are equals, we bypass the update of the
+ * cpu_scale because all CPUs have the same capacity. Otherwise, we
+ * compute a middle_capacity factor that will ensure that the capacity
+ * of an 'average' CPU of the system will be as close as possible to
+ * SCHED_POWER_SCALE, which is the default value, but with the
+ * constraint explained near table_efficiency[].
+ */
+ if (min_capacity == max_capacity)
+ cpu_capacity[0].hwid = (unsigned long)(-1);
+ else if (4*max_capacity < (3*(max_capacity + min_capacity)))
+ middle_capacity = (min_capacity + max_capacity)
+ >> (SCHED_POWER_SHIFT+1);
+ else
+ middle_capacity = ((max_capacity / 3)
+ >> (SCHED_POWER_SHIFT-1)) + 1;
+
+}
+
+/*
+ * Look for a customed capacity of a CPU in the cpu_capacity table during the
+ * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
+ * function returns directly for SMP system.
+ */
+void update_cpu_power(unsigned int cpu, unsigned long hwid)
+{
+ unsigned int idx = 0;
+
+ /* look for the cpu's hwid in the cpu capacity table */
+ for (idx = 0; idx < num_possible_cpus(); idx++) {
+ if (cpu_capacity[idx].hwid == hwid)
+ break;
+
+ if (cpu_capacity[idx].hwid == -1)
+ return;
+ }
+
+ if (idx == num_possible_cpus())
+ return;
+
+ set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity);
+
+ printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
+ cpu, arch_scale_freq_power(NULL, cpu));
+}
+
+#else
+static inline void parse_dt_topology(void) {}
+static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
+#endif
+
+
+/*
+ * cpu topology management
+ */
+
#define MPIDR_SMP_BITMASK (0x3 << 30)
#define MPIDR_SMP_VALUE (0x2 << 30)
@@ -31,6 +210,7 @@
* These masks reflect the current use of the affinity levels.
* The affinity level can be up to 16 bits according to ARM ARM
*/
+#define MPIDR_HWID_BITMASK 0xFFFFFF
#define MPIDR_LEVEL0_MASK 0x3
#define MPIDR_LEVEL0_SHIFT 0
@@ -41,6 +221,9 @@
#define MPIDR_LEVEL2_MASK 0xFF
#define MPIDR_LEVEL2_SHIFT 16
+/*
+ * cpu topology table
+ */
struct cputopo_arm cpu_topology[NR_CPUS];
const struct cpumask *cpu_coregroup_mask(int cpu)
@@ -48,6 +231,32 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
return &cpu_topology[cpu].core_sibling;
}
+void update_siblings_masks(unsigned int cpuid)
+{
+ struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
+ int cpu;
+
+ /* update core and thread sibling masks */
+ for_each_possible_cpu(cpu) {
+ cpu_topo = &cpu_topology[cpu];
+
+ if (cpuid_topo->socket_id != cpu_topo->socket_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+
+ if (cpuid_topo->core_id != cpu_topo->core_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
+ }
+ smp_wmb();
+}
+
/*
* store_cpu_topology is called at boot when only one cpu is running
* and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
@@ -57,7 +266,6 @@ void store_cpu_topology(unsigned int cpuid)
{
struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid];
unsigned int mpidr;
- unsigned int cpu;
/* If the cpu topology has been already set, just return */
if (cpuid_topo->core_id != -1)
@@ -99,26 +307,9 @@ void store_cpu_topology(unsigned int cpuid)
cpuid_topo->socket_id = -1;
}
- /* update core and thread sibling masks */
- for_each_possible_cpu(cpu) {
- struct cputopo_arm *cpu_topo = &cpu_topology[cpu];
-
- if (cpuid_topo->socket_id == cpu_topo->socket_id) {
- cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
- if (cpu != cpuid)
- cpumask_set_cpu(cpu,
- &cpuid_topo->core_sibling);
-
- if (cpuid_topo->core_id == cpu_topo->core_id) {
- cpumask_set_cpu(cpuid,
- &cpu_topo->thread_sibling);
- if (cpu != cpuid)
- cpumask_set_cpu(cpu,
- &cpuid_topo->thread_sibling);
- }
- }
- }
- smp_wmb();
+ update_siblings_masks(cpuid);
+
+ update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK);
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
cpuid, cpu_topology[cpuid].thread_id,
@@ -130,11 +321,11 @@ void store_cpu_topology(unsigned int cpuid)
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
*/
-void init_cpu_topology(void)
+void __init init_cpu_topology(void)
{
unsigned int cpu;
- /* init core mask */
+ /* init core mask and power*/
for_each_possible_cpu(cpu) {
struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
@@ -143,6 +334,10 @@ void init_cpu_topology(void)
cpu_topo->socket_id = -1;
cpumask_clear(&cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling);
+
+ set_power_scale(cpu, SCHED_POWER_SCALE);
}
smp_wmb();
+
+ parse_dt_topology();
}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 3647170e9a16..f7945218b8c6 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -233,9 +233,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
#define S_ISA " ARM"
#endif
-static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
+static int __die(const char *str, int err, struct pt_regs *regs)
{
- struct task_struct *tsk = thread->task;
+ struct task_struct *tsk = current;
static int die_counter;
int ret;
@@ -245,12 +245,12 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
/* trap and error numbers are mostly meaningless on ARM */
ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
if (ret == NOTIFY_STOP)
- return ret;
+ return 1;
print_modules();
__show_regs(regs);
printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
+ TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
@@ -259,45 +259,77 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
dump_instr(KERN_EMERG, regs);
}
- return ret;
+ return 0;
}
-static DEFINE_RAW_SPINLOCK(die_lock);
+static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+static int die_owner = -1;
+static unsigned int die_nest_count;
-/*
- * This function is protected against re-entrancy.
- */
-void die(const char *str, struct pt_regs *regs, int err)
+static unsigned long oops_begin(void)
{
- struct thread_info *thread = current_thread_info();
- int ret;
- enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
+ int cpu;
+ unsigned long flags;
oops_enter();
- raw_spin_lock_irq(&die_lock);
+ /* racy, but better than risking deadlock. */
+ raw_local_irq_save(flags);
+ cpu = smp_processor_id();
+ if (!arch_spin_trylock(&die_lock)) {
+ if (cpu == die_owner)
+ /* nested oops. should stop eventually */;
+ else
+ arch_spin_lock(&die_lock);
+ }
+ die_nest_count++;
+ die_owner = cpu;
console_verbose();
bust_spinlocks(1);
- if (!user_mode(regs))
- bug_type = report_bug(regs->ARM_pc, regs);
- if (bug_type != BUG_TRAP_TYPE_NONE)
- str = "Oops - BUG";
- ret = __die(str, err, thread, regs);
+ return flags;
+}
- if (regs && kexec_should_crash(thread->task))
+static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+{
+ if (regs && kexec_should_crash(current))
crash_kexec(regs);
bust_spinlocks(0);
+ die_owner = -1;
add_taint(TAINT_DIE);
- raw_spin_unlock_irq(&die_lock);
+ die_nest_count--;
+ if (!die_nest_count)
+ /* Nest count reaches zero, release the lock. */
+ arch_spin_unlock(&die_lock);
+ raw_local_irq_restore(flags);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
- if (ret != NOTIFY_STOP)
- do_exit(SIGSEGV);
+ if (signr)
+ do_exit(signr);
+}
+
+/*
+ * This function is protected against re-entrancy.
+ */
+void die(const char *str, struct pt_regs *regs, int err)
+{
+ enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
+ unsigned long flags = oops_begin();
+ int sig = SIGSEGV;
+
+ if (!user_mode(regs))
+ bug_type = report_bug(regs->ARM_pc, regs);
+ if (bug_type != BUG_TRAP_TYPE_NONE)
+ str = "Oops - BUG";
+
+ if (__die(str, err, regs))
+ sig = 0;
+
+ oops_end(flags, regs, sig);
}
void arm_notify_die(const char *str, struct pt_regs *regs,
@@ -370,18 +402,10 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
{
- unsigned int correction = thumb_mode(regs) ? 2 : 4;
unsigned int instr;
siginfo_t info;
void __user *pc;
- /*
- * According to the ARM ARM, PC is 2 or 4 bytes ahead,
- * depending whether we're in Thumb mode or not.
- * Correct this offset.
- */
- regs->ARM_pc -= correction;
-
pc = (void __user *)instruction_pointer(regs);
if (processor_mode(regs) == SVC_MODE) {
@@ -820,8 +844,6 @@ void __init early_trap_init(void *vectors_base)
*/
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
sigreturn_codes, sizeof(sigreturn_codes));
- memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
- syscall_restart_code, sizeof(syscall_restart_code));
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 992769ae2599..af72969820b4 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -6,9 +6,8 @@
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
- delay.o findbit.o memchr.o memcpy.o \
+ delay.o delay-loop.o findbit.o memchr.o memcpy.o \
memmove.o memset.o memzero.o setbit.o \
- strncpy_from_user.o strnlen_user.o \
strchr.o strrchr.o \
testchangebit.o testclearbit.o testsetbit.o \
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
@@ -17,13 +16,30 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
call_with_stack.o
mmu-y := clear_user.o copy_page.o getuser.o putuser.o
-mmu-y += copy_from_user.o copy_to_user.o
+
+# the code in uaccess.S is not preemption safe and
+# probably faster on ARMv3 only
+ifeq ($(CONFIG_PREEMPT),y)
+ mmu-y += copy_from_user.o copy_to_user.o
+else
+ifneq ($(CONFIG_CPU_32v3),y)
+ mmu-y += copy_from_user.o copy_to_user.o
+else
+ mmu-y += uaccess.o
+endif
+endif
# using lib_ here won't override already available weak symbols
obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
-lib-$(CONFIG_MMU) += $(mmu-y)
-lib-y += io-readsw-armv4.o io-writesw-armv4.o
+lib-$(CONFIG_MMU) += $(mmu-y)
+
+ifeq ($(CONFIG_CPU_32v3),y)
+ lib-y += io-readsw-armv3.o io-writesw-armv3.o
+else
+ lib-y += io-readsw-armv4.o io-writesw-armv4.o
+endif
+
lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o
lib-$(CONFIG_ARCH_SHARK) += io-shark.o
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay-loop.S
index 3c9a05c8d20b..36b668d8e121 100644
--- a/arch/arm/lib/delay.S
+++ b/arch/arm/lib/delay-loop.S
@@ -9,11 +9,11 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/param.h>
+#include <asm/delay.h>
.text
.LC0: .word loops_per_jiffy
-.LC1: .word (2199023*HZ)>>11
+.LC1: .word UDELAY_MULT
/*
* r0 <= 2000
@@ -21,10 +21,10 @@
* HZ <= 1000
*/
-ENTRY(__udelay)
+ENTRY(__loop_udelay)
ldr r2, .LC1
mul r0, r2, r0
-ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
+ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
mov r1, #-1
ldr r2, .LC0
ldr r2, [r2] @ max = 0x01ffffff
@@ -39,12 +39,10 @@ ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
/*
* loops = r0 * HZ * loops_per_jiffy / 1000000
- *
- * Oh, if only we had a cycle counter...
*/
@ Delay routine
-ENTRY(__delay)
+ENTRY(__loop_delay)
subs r0, r0, #1
#if 0
movls pc, lr
@@ -62,8 +60,8 @@ ENTRY(__delay)
movls pc, lr
subs r0, r0, #1
#endif
- bhi __delay
+ bhi __loop_delay
mov pc, lr
-ENDPROC(__udelay)
-ENDPROC(__const_udelay)
-ENDPROC(__delay)
+ENDPROC(__loop_udelay)
+ENDPROC(__loop_const_udelay)
+ENDPROC(__loop_delay)
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
new file mode 100644
index 000000000000..d6dacc69254e
--- /dev/null
+++ b/arch/arm/lib/delay.c
@@ -0,0 +1,71 @@
+/*
+ * Delay loops based on the OpenRISC implementation.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/timex.h>
+
+/*
+ * Default to the loop-based delay implementation.
+ */
+struct arm_delay_ops arm_delay_ops = {
+ .delay = __loop_delay,
+ .const_udelay = __loop_const_udelay,
+ .udelay = __loop_udelay,
+};
+
+#ifdef ARCH_HAS_READ_CURRENT_TIMER
+static void __timer_delay(unsigned long cycles)
+{
+ cycles_t start = get_cycles();
+
+ while ((get_cycles() - start) < cycles)
+ cpu_relax();
+}
+
+static void __timer_const_udelay(unsigned long xloops)
+{
+ unsigned long long loops = xloops;
+ loops *= loops_per_jiffy;
+ __timer_delay(loops >> UDELAY_SHIFT);
+}
+
+static void __timer_udelay(unsigned long usecs)
+{
+ __timer_const_udelay(usecs * UDELAY_MULT);
+}
+
+void __init init_current_timer_delay(unsigned long freq)
+{
+ pr_info("Switching to timer-based delay loop\n");
+ lpj_fine = freq / HZ;
+ arm_delay_ops.delay = __timer_delay;
+ arm_delay_ops.const_udelay = __timer_const_udelay;
+ arm_delay_ops.udelay = __timer_udelay;
+}
+
+unsigned long __cpuinit calibrate_delay_is_known(void)
+{
+ return lpj_fine;
+}
+#endif
diff --git a/arch/arm/lib/io-acorn.S b/arch/arm/lib/io-acorn.S
index 1b197ea7aab3..69719bad674d 100644
--- a/arch/arm/lib/io-acorn.S
+++ b/arch/arm/lib/io-acorn.S
@@ -11,13 +11,14 @@
*
*/
#include <linux/linkage.h>
+#include <linux/kern_levels.h>
#include <asm/assembler.h>
.text
.align
.Liosl_warning:
- .ascii "<4>insl/outsl not implemented, called from %08lX\0"
+ .ascii KERN_WARNING "insl/outsl not implemented, called from %08lX\0"
.align
/*
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
new file mode 100644
index 000000000000..88487c8c4f23
--- /dev/null
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -0,0 +1,106 @@
+/*
+ * linux/arch/arm/lib/io-readsw-armv3.S
+ *
+ * Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+.Linsw_bad_alignment:
+ adr r0, .Linsw_bad_align_msg
+ mov r2, lr
+ b panic
+.Linsw_bad_align_msg:
+ .asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
+ .align
+
+.Linsw_align: tst r1, #1
+ bne .Linsw_bad_alignment
+
+ ldr r3, [r0]
+ strb r3, [r1], #1
+ mov r3, r3, lsr #8
+ strb r3, [r1], #1
+
+ subs r2, r2, #1
+ moveq pc, lr
+
+ENTRY(__raw_readsw)
+ teq r2, #0 @ do we have to check for the zero len?
+ moveq pc, lr
+ tst r1, #3
+ bne .Linsw_align
+
+.Linsw_aligned: mov ip, #0xff
+ orr ip, ip, ip, lsl #8
+ stmfd sp!, {r4, r5, r6, lr}
+
+ subs r2, r2, #8
+ bmi .Lno_insw_8
+
+.Linsw_8_lp: ldr r3, [r0]
+ and r3, r3, ip
+ ldr r4, [r0]
+ orr r3, r3, r4, lsl #16
+
+ ldr r4, [r0]
+ and r4, r4, ip
+ ldr r5, [r0]
+ orr r4, r4, r5, lsl #16
+
+ ldr r5, [r0]
+ and r5, r5, ip
+ ldr r6, [r0]
+ orr r5, r5, r6, lsl #16
+
+ ldr r6, [r0]
+ and r6, r6, ip
+ ldr lr, [r0]
+ orr r6, r6, lr, lsl #16
+
+ stmia r1!, {r3 - r6}
+
+ subs r2, r2, #8
+ bpl .Linsw_8_lp
+
+ tst r2, #7
+ ldmeqfd sp!, {r4, r5, r6, pc}
+
+.Lno_insw_8: tst r2, #4
+ beq .Lno_insw_4
+
+ ldr r3, [r0]
+ and r3, r3, ip
+ ldr r4, [r0]
+ orr r3, r3, r4, lsl #16
+
+ ldr r4, [r0]
+ and r4, r4, ip
+ ldr r5, [r0]
+ orr r4, r4, r5, lsl #16
+
+ stmia r1!, {r3, r4}
+
+.Lno_insw_4: tst r2, #2
+ beq .Lno_insw_2
+
+ ldr r3, [r0]
+ and r3, r3, ip
+ ldr r4, [r0]
+ orr r3, r3, r4, lsl #16
+
+ str r3, [r1], #4
+
+.Lno_insw_2: tst r2, #1
+ ldrne r3, [r0]
+ strneb r3, [r1], #1
+ movne r3, r3, lsr #8
+ strneb r3, [r1]
+
+ ldmfd sp!, {r4, r5, r6, pc}
+
+
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
new file mode 100644
index 000000000000..49b800419e32
--- /dev/null
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -0,0 +1,126 @@
+/*
+ * linux/arch/arm/lib/io-writesw-armv3.S
+ *
+ * Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+.Loutsw_bad_alignment:
+ adr r0, .Loutsw_bad_align_msg
+ mov r2, lr
+ b panic
+.Loutsw_bad_align_msg:
+ .asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
+ .align
+
+.Loutsw_align: tst r1, #1
+ bne .Loutsw_bad_alignment
+
+ add r1, r1, #2
+
+ ldr r3, [r1, #-4]
+ mov r3, r3, lsr #16
+ orr r3, r3, r3, lsl #16
+ str r3, [r0]
+ subs r2, r2, #1
+ moveq pc, lr
+
+ENTRY(__raw_writesw)
+ teq r2, #0 @ do we have to check for the zero len?
+ moveq pc, lr
+ tst r1, #3
+ bne .Loutsw_align
+
+ stmfd sp!, {r4, r5, r6, lr}
+
+ subs r2, r2, #8
+ bmi .Lno_outsw_8
+
+.Loutsw_8_lp: ldmia r1!, {r3, r4, r5, r6}
+
+ mov ip, r3, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r3, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ mov ip, r4, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r4, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ mov ip, r5, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r5, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ mov ip, r6, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r6, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ subs r2, r2, #8
+ bpl .Loutsw_8_lp
+
+ tst r2, #7
+ ldmeqfd sp!, {r4, r5, r6, pc}
+
+.Lno_outsw_8: tst r2, #4
+ beq .Lno_outsw_4
+
+ ldmia r1!, {r3, r4}
+
+ mov ip, r3, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r3, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ mov ip, r4, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r4, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+.Lno_outsw_4: tst r2, #2
+ beq .Lno_outsw_2
+
+ ldr r3, [r1], #4
+
+ mov ip, r3, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r3, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+.Lno_outsw_2: tst r2, #1
+
+ ldrne r3, [r1]
+
+ movne ip, r3, lsl #16
+ orrne ip, ip, ip, lsr #16
+ strne ip, [r0]
+
+ ldmfd sp!, {r4, r5, r6, pc}
diff --git a/arch/arm/lib/strncpy_from_user.S b/arch/arm/lib/strncpy_from_user.S
deleted file mode 100644
index f202d7bd1647..000000000000
--- a/arch/arm/lib/strncpy_from_user.S
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * linux/arch/arm/lib/strncpy_from_user.S
- *
- * Copyright (C) 1995-2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-
- .text
- .align 5
-
-/*
- * Copy a string from user space to kernel space.
- * r0 = dst, r1 = src, r2 = byte length
- * returns the number of characters copied (strlen of copied string),
- * -EFAULT on exception, or "len" if we fill the whole buffer
- */
-ENTRY(__strncpy_from_user)
- mov ip, r1
-1: subs r2, r2, #1
- ldrusr r3, r1, 1, pl
- bmi 2f
- strb r3, [r0], #1
- teq r3, #0
- bne 1b
- sub r1, r1, #1 @ take NUL character out of count
-2: sub r0, r1, ip
- mov pc, lr
-ENDPROC(__strncpy_from_user)
-
- .pushsection .fixup,"ax"
- .align 0
-9001: mov r3, #0
- strb r3, [r0, #0] @ null terminate
- mov r0, #-EFAULT
- mov pc, lr
- .popsection
-
diff --git a/arch/arm/lib/strnlen_user.S b/arch/arm/lib/strnlen_user.S
deleted file mode 100644
index 0ecbb459c4f1..000000000000
--- a/arch/arm/lib/strnlen_user.S
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * linux/arch/arm/lib/strnlen_user.S
- *
- * Copyright (C) 1995-2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-
- .text
- .align 5
-
-/* Prototype: unsigned long __strnlen_user(const char *str, long n)
- * Purpose : get length of a string in user memory
- * Params : str - address of string in user memory
- * Returns : length of string *including terminator*
- * or zero on exception, or n + 1 if too long
- */
-ENTRY(__strnlen_user)
- mov r2, r0
-1:
- ldrusr r3, r0, 1
- teq r3, #0
- beq 2f
- subs r1, r1, #1
- bne 1b
- add r0, r0, #1
-2: sub r0, r0, r2
- mov pc, lr
-ENDPROC(__strnlen_user)
-
- .pushsection .fixup,"ax"
- .align 0
-9001: mov r0, #0
- mov pc, lr
- .popsection
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
new file mode 100644
index 000000000000..5c908b1cb8ed
--- /dev/null
+++ b/arch/arm/lib/uaccess.S
@@ -0,0 +1,564 @@
+/*
+ * linux/arch/arm/lib/uaccess.S
+ *
+ * Copyright (C) 1995, 1996,1997,1998 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Routines to block copy data to/from user memory
+ * These are highly optimised both for the 4k page size
+ * and for various alignments.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+#include <asm/domain.h>
+
+ .text
+
+#define PAGE_SHIFT 12
+
+/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
+ * Purpose : copy a block to user memory from kernel memory
+ * Params : to - user memory
+ * : from - kernel memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+
+.Lc2u_dest_not_aligned:
+ rsb ip, ip, #4
+ cmp ip, #2
+ ldrb r3, [r1], #1
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ ldrgeb r3, [r1], #1
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #1
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ sub r2, r2, ip
+ b .Lc2u_dest_aligned
+
+ENTRY(__copy_to_user)
+ stmfd sp!, {r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lc2u_not_enough
+ ands ip, r0, #3
+ bne .Lc2u_dest_not_aligned
+.Lc2u_dest_aligned:
+
+ ands ip, r1, #3
+ bne .Lc2u_src_not_aligned
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.Lc2u_0fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lc2u_0nowords
+ ldr r3, [r1], #4
+USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lc2u_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #32
+ blt .Lc2u_0rem8lp
+
+.Lc2u_0cpy8lp: ldmia r1!, {r3 - r6}
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ ldmia r1!, {r3 - r6}
+ subs ip, ip, #32
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ bpl .Lc2u_0cpy8lp
+
+.Lc2u_0rem8lp: cmn ip, #16
+ ldmgeia r1!, {r3 - r6}
+ stmgeia r0!, {r3 - r6} @ Shouldnt fault
+ tst ip, #8
+ ldmneia r1!, {r3 - r4}
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ ldrne r3, [r1], #4
+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_0fupi
+.Lc2u_0nowords: teq ip, #0
+ beq .Lc2u_finished
+.Lc2u_nowords: cmp ip, #2
+ ldrb r3, [r1], #1
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ ldrgeb r3, [r1], #1
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #1
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+
+.Lc2u_not_enough:
+ movs ip, r2
+ bne .Lc2u_nowords
+.Lc2u_finished: mov r0, #0
+ ldmfd sp!, {r2, r4 - r7, pc}
+
+.Lc2u_src_not_aligned:
+ bic r1, r1, #3
+ ldr r7, [r1], #4
+ cmp ip, #2
+ bgt .Lc2u_3fupi
+ beq .Lc2u_2fupi
+.Lc2u_1fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lc2u_1nowords
+ mov r3, r7, pull #8
+ ldr r7, [r1], #4
+ orr r3, r3, r7, push #24
+USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lc2u_1fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lc2u_1rem8lp
+
+.Lc2u_1cpy8lp: mov r3, r7, pull #8
+ ldmia r1!, {r4 - r7}
+ subs ip, ip, #16
+ orr r3, r3, r4, push #24
+ mov r4, r4, pull #8
+ orr r4, r4, r5, push #24
+ mov r5, r5, pull #8
+ orr r5, r5, r6, push #24
+ mov r6, r6, pull #8
+ orr r6, r6, r7, push #24
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ bpl .Lc2u_1cpy8lp
+
+.Lc2u_1rem8lp: tst ip, #8
+ movne r3, r7, pull #8
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, push #24
+ movne r4, r4, pull #8
+ orrne r4, r4, r7, push #24
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ movne r3, r7, pull #8
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, push #24
+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_1fupi
+.Lc2u_1nowords: mov r3, r7, get_byte_1
+ teq ip, #0
+ beq .Lc2u_finished
+ cmp ip, #2
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ movge r3, r7, get_byte_2
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ movgt r3, r7, get_byte_3
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+
+.Lc2u_2fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lc2u_2nowords
+ mov r3, r7, pull #16
+ ldr r7, [r1], #4
+ orr r3, r3, r7, push #16
+USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lc2u_2fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lc2u_2rem8lp
+
+.Lc2u_2cpy8lp: mov r3, r7, pull #16
+ ldmia r1!, {r4 - r7}
+ subs ip, ip, #16
+ orr r3, r3, r4, push #16
+ mov r4, r4, pull #16
+ orr r4, r4, r5, push #16
+ mov r5, r5, pull #16
+ orr r5, r5, r6, push #16
+ mov r6, r6, pull #16
+ orr r6, r6, r7, push #16
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ bpl .Lc2u_2cpy8lp
+
+.Lc2u_2rem8lp: tst ip, #8
+ movne r3, r7, pull #16
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, push #16
+ movne r4, r4, pull #16
+ orrne r4, r4, r7, push #16
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ movne r3, r7, pull #16
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, push #16
+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_2fupi
+.Lc2u_2nowords: mov r3, r7, get_byte_2
+ teq ip, #0
+ beq .Lc2u_finished
+ cmp ip, #2
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ movge r3, r7, get_byte_3
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #0
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+
+.Lc2u_3fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lc2u_3nowords
+ mov r3, r7, pull #24
+ ldr r7, [r1], #4
+ orr r3, r3, r7, push #8
+USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lc2u_3fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lc2u_3rem8lp
+
+.Lc2u_3cpy8lp: mov r3, r7, pull #24
+ ldmia r1!, {r4 - r7}
+ subs ip, ip, #16
+ orr r3, r3, r4, push #8
+ mov r4, r4, pull #24
+ orr r4, r4, r5, push #8
+ mov r5, r5, pull #24
+ orr r5, r5, r6, push #8
+ mov r6, r6, pull #24
+ orr r6, r6, r7, push #8
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ bpl .Lc2u_3cpy8lp
+
+.Lc2u_3rem8lp: tst ip, #8
+ movne r3, r7, pull #24
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, push #8
+ movne r4, r4, pull #24
+ orrne r4, r4, r7, push #8
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ movne r3, r7, pull #24
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, push #8
+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_3fupi
+.Lc2u_3nowords: mov r3, r7, get_byte_3
+ teq ip, #0
+ beq .Lc2u_finished
+ cmp ip, #2
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ ldrgeb r3, [r1], #1
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #0
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+ENDPROC(__copy_to_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+9001: ldmfd sp!, {r0, r4 - r7, pc}
+ .popsection
+
+/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
+ * Purpose : copy a block from user memory to kernel memory
+ * Params : to - kernel memory
+ * : from - user memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+.Lcfu_dest_not_aligned:
+ rsb ip, ip, #4
+ cmp ip, #2
+USER( TUSER( ldrb) r3, [r1], #1) @ May fault
+ strb r3, [r0], #1
+USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
+ strgeb r3, [r0], #1
+USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ sub r2, r2, ip
+ b .Lcfu_dest_aligned
+
+ENTRY(__copy_from_user)
+ stmfd sp!, {r0, r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lcfu_not_enough
+ ands ip, r0, #3
+ bne .Lcfu_dest_not_aligned
+.Lcfu_dest_aligned:
+ ands ip, r1, #3
+ bne .Lcfu_src_not_aligned
+
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.Lcfu_0fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_0nowords
+USER( TUSER( ldr) r3, [r1], #4)
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lcfu_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #32
+ blt .Lcfu_0rem8lp
+
+.Lcfu_0cpy8lp: ldmia r1!, {r3 - r6} @ Shouldnt fault
+ stmia r0!, {r3 - r6}
+ ldmia r1!, {r3 - r6} @ Shouldnt fault
+ subs ip, ip, #32
+ stmia r0!, {r3 - r6}
+ bpl .Lcfu_0cpy8lp
+
+.Lcfu_0rem8lp: cmn ip, #16
+ ldmgeia r1!, {r3 - r6} @ Shouldnt fault
+ stmgeia r0!, {r3 - r6}
+ tst ip, #8
+ ldmneia r1!, {r3 - r4} @ Shouldnt fault
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ TUSER( ldrne) r3, [r1], #4 @ Shouldnt fault
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_0fupi
+.Lcfu_0nowords: teq ip, #0
+ beq .Lcfu_finished
+.Lcfu_nowords: cmp ip, #2
+USER( TUSER( ldrb) r3, [r1], #1) @ May fault
+ strb r3, [r0], #1
+USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
+ strgeb r3, [r0], #1
+USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+
+.Lcfu_not_enough:
+ movs ip, r2
+ bne .Lcfu_nowords
+.Lcfu_finished: mov r0, #0
+ add sp, sp, #8
+ ldmfd sp!, {r4 - r7, pc}
+
+.Lcfu_src_not_aligned:
+ bic r1, r1, #3
+USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ cmp ip, #2
+ bgt .Lcfu_3fupi
+ beq .Lcfu_2fupi
+.Lcfu_1fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_1nowords
+ mov r3, r7, pull #8
+USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ orr r3, r3, r7, push #24
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lcfu_1fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lcfu_1rem8lp
+
+.Lcfu_1cpy8lp: mov r3, r7, pull #8
+ ldmia r1!, {r4 - r7} @ Shouldnt fault
+ subs ip, ip, #16
+ orr r3, r3, r4, push #24
+ mov r4, r4, pull #8
+ orr r4, r4, r5, push #24
+ mov r5, r5, pull #8
+ orr r5, r5, r6, push #24
+ mov r6, r6, pull #8
+ orr r6, r6, r7, push #24
+ stmia r0!, {r3 - r6}
+ bpl .Lcfu_1cpy8lp
+
+.Lcfu_1rem8lp: tst ip, #8
+ movne r3, r7, pull #8
+ ldmneia r1!, {r4, r7} @ Shouldnt fault
+ orrne r3, r3, r4, push #24
+ movne r4, r4, pull #8
+ orrne r4, r4, r7, push #24
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, pull #8
+USER( TUSER( ldrne) r7, [r1], #4) @ May fault
+ orrne r3, r3, r7, push #24
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_1fupi
+.Lcfu_1nowords: mov r3, r7, get_byte_1
+ teq ip, #0
+ beq .Lcfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+ movge r3, r7, get_byte_2
+ strgeb r3, [r0], #1
+ movgt r3, r7, get_byte_3
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+
+.Lcfu_2fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_2nowords
+ mov r3, r7, pull #16
+USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ orr r3, r3, r7, push #16
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lcfu_2fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lcfu_2rem8lp
+
+
+.Lcfu_2cpy8lp: mov r3, r7, pull #16
+ ldmia r1!, {r4 - r7} @ Shouldnt fault
+ subs ip, ip, #16
+ orr r3, r3, r4, push #16
+ mov r4, r4, pull #16
+ orr r4, r4, r5, push #16
+ mov r5, r5, pull #16
+ orr r5, r5, r6, push #16
+ mov r6, r6, pull #16
+ orr r6, r6, r7, push #16
+ stmia r0!, {r3 - r6}
+ bpl .Lcfu_2cpy8lp
+
+.Lcfu_2rem8lp: tst ip, #8
+ movne r3, r7, pull #16
+ ldmneia r1!, {r4, r7} @ Shouldnt fault
+ orrne r3, r3, r4, push #16
+ movne r4, r4, pull #16
+ orrne r4, r4, r7, push #16
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, pull #16
+USER( TUSER( ldrne) r7, [r1], #4) @ May fault
+ orrne r3, r3, r7, push #16
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_2fupi
+.Lcfu_2nowords: mov r3, r7, get_byte_2
+ teq ip, #0
+ beq .Lcfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+ movge r3, r7, get_byte_3
+ strgeb r3, [r0], #1
+USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+
+.Lcfu_3fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_3nowords
+ mov r3, r7, pull #24
+USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ orr r3, r3, r7, push #8
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lcfu_3fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lcfu_3rem8lp
+
+.Lcfu_3cpy8lp: mov r3, r7, pull #24
+ ldmia r1!, {r4 - r7} @ Shouldnt fault
+ orr r3, r3, r4, push #8
+ mov r4, r4, pull #24
+ orr r4, r4, r5, push #8
+ mov r5, r5, pull #24
+ orr r5, r5, r6, push #8
+ mov r6, r6, pull #24
+ orr r6, r6, r7, push #8
+ stmia r0!, {r3 - r6}
+ subs ip, ip, #16
+ bpl .Lcfu_3cpy8lp
+
+.Lcfu_3rem8lp: tst ip, #8
+ movne r3, r7, pull #24
+ ldmneia r1!, {r4, r7} @ Shouldnt fault
+ orrne r3, r3, r4, push #8
+ movne r4, r4, pull #24
+ orrne r4, r4, r7, push #8
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, pull #24
+USER( TUSER( ldrne) r7, [r1], #4) @ May fault
+ orrne r3, r3, r7, push #8
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_3fupi
+.Lcfu_3nowords: mov r3, r7, get_byte_3
+ teq ip, #0
+ beq .Lcfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
+ strgeb r3, [r0], #1
+USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+ENDPROC(__copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+ /*
+ * We took an exception. r0 contains a pointer to
+ * the byte not copied.
+ */
+9001: ldr r2, [sp], #4 @ void *to
+ sub r2, r0, r2 @ bytes copied
+ ldr r1, [sp], #4 @ unsigned long count
+ subs r4, r1, r2 @ bytes left to copy
+ movne r1, r4
+ blne __memzero
+ mov r0, r4
+ ldmfd sp!, {r4 - r7, pc}
+ .popsection
+
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 104ca40d8d18..aaa443b48c91 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void)
at91_st_read(AT91_ST_SR);
/* Make IRQs happen for the system timer */
- setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+ setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
* directly for the clocksource and all clockevents, after adjusting
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 7b9c2ba396ed..bce572a530ef 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -726,6 +726,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
},
};
@@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9260_rtt_device.num_resources = 2;
+ at91sam9260_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9260_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 8df5c1bdff92..bc2590d712d0 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -609,6 +609,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9261_rtt_device.num_resources = 2;
+ at91sam9261_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9261_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index eb6bbf86fb9f..9b6ca734f1a9 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed only for the chosen RTT:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9263_rtt0_device.num_resources = 2;
+ at91sam9263_rtt0_device.num_resources = 3;
at91sam9263_rtt1_device.num_resources = 1;
pdev = &at91sam9263_rtt0_device;
r = rtt0_resources;
break;
case 1:
at91sam9263_rtt0_device.num_resources = 1;
- at91sam9263_rtt1_device.num_resources = 2;
+ at91sam9263_rtt1_device.num_resources = 3;
pdev = &at91sam9263_rtt1_device;
r = rtt1_resources;
break;
@@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void)
pdev->name = "rtc-at91sam9";
r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
r[1].end = r[1].start + 3;
+ r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 06073996a382..1b47319ca00b 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9g45_rtt_device.num_resources = 2;
+ at91sam9g45_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9G45_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index f09fff932172..b3d365dadef5 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -688,6 +688,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9rl_rtt_device.num_resources = 2;
+ at91sam9rl_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9RL_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index de2ec6b8fea7..188c82971ebd 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base);
#define cpu_has_300M_plla() (cpu_is_at91sam9g10())
+#define cpu_has_240M_plla() (cpu_is_at91sam9261() \
+ || cpu_is_at91sam9263() \
+ || cpu_is_at91sam9rl())
+
+#define cpu_has_210M_plla() (cpu_is_at91sam9260())
+
#define cpu_has_pllb() (!(cpu_is_at91sam9rl() \
|| cpu_is_at91sam9g45() \
|| cpu_is_at91sam9x5() \
@@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock)
} else if (cpu_has_800M_plla()) {
if (plla.rate_hz > 800000000)
pll_overclock = true;
+ } else if (cpu_has_240M_plla()) {
+ if (plla.rate_hz > 240000000)
+ pll_overclock = true;
+ } else if (cpu_has_210M_plla()) {
+ if (plla.rate_hz > 210000000)
+ pll_overclock = true;
} else {
if (plla.rate_hz > 209000000)
pll_overclock = true;
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 5de69f2fcca9..f6b9fc70161b 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -162,38 +162,6 @@ static void __init davinci_ntosd2_map_io(void)
dm644x_init();
}
-/*
- I2C initialization
-*/
-static struct davinci_i2c_platform_data ntosd2_i2c_pdata = {
- .bus_freq = 20 /* kHz */,
- .bus_delay = 100 /* usec */,
-};
-
-static struct i2c_board_info __initdata ntosd2_i2c_info[] = {
-};
-
-static int ntosd2_init_i2c(void)
-{
- int status;
-
- davinci_init_i2c(&ntosd2_i2c_pdata);
- status = gpio_request(NTOSD2_MSP430_IRQ, ntosd2_i2c_info[0].type);
- if (status == 0) {
- status = gpio_direction_input(NTOSD2_MSP430_IRQ);
- if (status == 0) {
- status = gpio_to_irq(NTOSD2_MSP430_IRQ);
- if (status > 0) {
- ntosd2_i2c_info[0].irq = status;
- i2c_register_board_info(1,
- ntosd2_i2c_info,
- ARRAY_SIZE(ntosd2_i2c_info));
- }
- }
- }
- return status;
-}
-
static struct davinci_mmc_config davinci_ntosd2_mmc_config = {
.wires = 4,
.version = MMC_CTLR_VERSION_1
@@ -218,7 +186,6 @@ static __init void davinci_ntosd2_init(void)
{
struct clk *aemif_clk;
struct davinci_soc_info *soc_info = &davinci_soc_info;
- int status;
aemif_clk = clk_get(NULL, "aemif");
clk_enable(aemif_clk);
@@ -242,12 +209,6 @@ static __init void davinci_ntosd2_init(void)
platform_add_devices(davinci_ntosd2_devices,
ARRAY_SIZE(davinci_ntosd2_devices));
- /* Initialize I2C interface specific for this board */
- status = ntosd2_init_i2c();
- if (status < 0)
- pr_warning("davinci_ntosd2_init: msp430 irq setup failed:"
- " %d\n", status);
-
davinci_serial_init(&uart_config);
dm644x_init_asp(&dm644x_ntosd2_snd_data);
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index d1624a315c9a..783eab6845c4 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -546,6 +546,7 @@ static struct lcd_ctrl_config lcd_cfg = {
.sync_edge = 0,
.sync_ctrl = 1,
.raster_order = 0,
+ .fifo_th = 6,
};
struct da8xx_lcdc_platform_data sharp_lcd035q3dg01_pdata = {
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 9493076fc594..6321567d8eaa 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -101,8 +101,9 @@ void __init dove_ehci1_init(void)
****************************************************************************/
void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
- orion_ge00_init(eth_data,
- DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM, 0);
+ orion_ge00_init(eth_data, DOVE_GE00_PHYS_BASE,
+ IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR,
+ 1600);
}
/*****************************************************************************
diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
index f07fd16e0c9b..9bc97a5baaa8 100644
--- a/arch/arm/mach-dove/irq.c
+++ b/arch/arm/mach-dove/irq.c
@@ -20,22 +20,6 @@
#include <mach/bridge-regs.h>
#include "common.h"
-static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- int irqoff;
- BUG_ON(irq < IRQ_DOVE_GPIO_0_7 || irq > IRQ_DOVE_HIGH_GPIO);
-
- irqoff = irq <= IRQ_DOVE_GPIO_16_23 ? irq - IRQ_DOVE_GPIO_0_7 :
- 3 + irq - IRQ_DOVE_GPIO_24_31;
-
- orion_gpio_irq_handler(irqoff << 3);
- if (irq == IRQ_DOVE_HIGH_GPIO) {
- orion_gpio_irq_handler(40);
- orion_gpio_irq_handler(48);
- orion_gpio_irq_handler(56);
- }
-}
-
static void pmu_irq_mask(struct irq_data *d)
{
int pin = irq_to_pmu(d->irq);
@@ -90,6 +74,27 @@ static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc)
}
}
+static int __initdata gpio0_irqs[4] = {
+ IRQ_DOVE_GPIO_0_7,
+ IRQ_DOVE_GPIO_8_15,
+ IRQ_DOVE_GPIO_16_23,
+ IRQ_DOVE_GPIO_24_31,
+};
+
+static int __initdata gpio1_irqs[4] = {
+ IRQ_DOVE_HIGH_GPIO,
+ 0,
+ 0,
+ 0,
+};
+
+static int __initdata gpio2_irqs[4] = {
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
void __init dove_init_irq(void)
{
int i;
@@ -100,19 +105,14 @@ void __init dove_init_irq(void)
/*
* Initialize gpiolib for GPIOs 0-71.
*/
- orion_gpio_init(0, 32, DOVE_GPIO_LO_VIRT_BASE, 0,
- IRQ_DOVE_GPIO_START);
- irq_set_chained_handler(IRQ_DOVE_GPIO_0_7, gpio_irq_handler);
- irq_set_chained_handler(IRQ_DOVE_GPIO_8_15, gpio_irq_handler);
- irq_set_chained_handler(IRQ_DOVE_GPIO_16_23, gpio_irq_handler);
- irq_set_chained_handler(IRQ_DOVE_GPIO_24_31, gpio_irq_handler);
-
- orion_gpio_init(32, 32, DOVE_GPIO_HI_VIRT_BASE, 0,
- IRQ_DOVE_GPIO_START + 32);
- irq_set_chained_handler(IRQ_DOVE_HIGH_GPIO, gpio_irq_handler);
-
- orion_gpio_init(64, 8, DOVE_GPIO2_VIRT_BASE, 0,
- IRQ_DOVE_GPIO_START + 64);
+ orion_gpio_init(NULL, 0, 32, (void __iomem *)DOVE_GPIO_LO_VIRT_BASE, 0,
+ IRQ_DOVE_GPIO_START, gpio0_irqs);
+
+ orion_gpio_init(NULL, 32, 32, (void __iomem *)DOVE_GPIO_HI_VIRT_BASE, 0,
+ IRQ_DOVE_GPIO_START + 32, gpio1_irqs);
+
+ orion_gpio_init(NULL, 64, 8, (void __iomem *)DOVE_GPIO2_VIRT_BASE, 0,
+ IRQ_DOVE_GPIO_START + 64, gpio2_irqs);
/*
* Mask and clear PMU interrupts
diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c
index 26fe9de35ecb..2f51293c1875 100644
--- a/arch/arm/mach-exynos/clock-exynos4.c
+++ b/arch/arm/mach-exynos/clock-exynos4.c
@@ -620,10 +620,6 @@ static struct clk exynos4_init_clocks_off[] = {
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 27),
}, {
- .name = "fimg2d",
- .enable = exynos4_clk_ip_image_ctrl,
- .ctrlbit = (1 << 0),
- }, {
.name = "mfc",
.devname = "s5p-mfc",
.enable = exynos4_clk_ip_mfc_ctrl,
@@ -819,47 +815,21 @@ static struct clk *exynos4_clkset_mout_g2d0_list[] = {
[1] = &exynos4_clk_sclk_apll.clk,
};
-static struct clksrc_sources exynos4_clkset_mout_g2d0 = {
+struct clksrc_sources exynos4_clkset_mout_g2d0 = {
.sources = exynos4_clkset_mout_g2d0_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_mout_g2d0_list),
};
-static struct clksrc_clk exynos4_clk_mout_g2d0 = {
- .clk = {
- .name = "mout_g2d0",
- },
- .sources = &exynos4_clkset_mout_g2d0,
- .reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 0, .size = 1 },
-};
-
static struct clk *exynos4_clkset_mout_g2d1_list[] = {
[0] = &exynos4_clk_mout_epll.clk,
[1] = &exynos4_clk_sclk_vpll.clk,
};
-static struct clksrc_sources exynos4_clkset_mout_g2d1 = {
+struct clksrc_sources exynos4_clkset_mout_g2d1 = {
.sources = exynos4_clkset_mout_g2d1_list,
.nr_sources = ARRAY_SIZE(exynos4_clkset_mout_g2d1_list),
};
-static struct clksrc_clk exynos4_clk_mout_g2d1 = {
- .clk = {
- .name = "mout_g2d1",
- },
- .sources = &exynos4_clkset_mout_g2d1,
- .reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 4, .size = 1 },
-};
-
-static struct clk *exynos4_clkset_mout_g2d_list[] = {
- [0] = &exynos4_clk_mout_g2d0.clk,
- [1] = &exynos4_clk_mout_g2d1.clk,
-};
-
-static struct clksrc_sources exynos4_clkset_mout_g2d = {
- .sources = exynos4_clkset_mout_g2d_list,
- .nr_sources = ARRAY_SIZE(exynos4_clkset_mout_g2d_list),
-};
-
static struct clk *exynos4_clkset_mout_mfc0_list[] = {
[0] = &exynos4_clk_mout_mpll.clk,
[1] = &exynos4_clk_sclk_apll.clk,
@@ -1126,13 +1096,6 @@ static struct clksrc_clk exynos4_clksrcs[] = {
.reg_div = { .reg = EXYNOS4_CLKDIV_LCD0, .shift = 0, .size = 4 },
}, {
.clk = {
- .name = "sclk_fimg2d",
- },
- .sources = &exynos4_clkset_mout_g2d,
- .reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 8, .size = 1 },
- .reg_div = { .reg = EXYNOS4_CLKDIV_IMAGE, .shift = 0, .size = 4 },
- }, {
- .clk = {
.name = "sclk_mfc",
.devname = "s5p-mfc",
},
diff --git a/arch/arm/mach-exynos/clock-exynos4.h b/arch/arm/mach-exynos/clock-exynos4.h
index 28a119701182..bd12d5f8b63d 100644
--- a/arch/arm/mach-exynos/clock-exynos4.h
+++ b/arch/arm/mach-exynos/clock-exynos4.h
@@ -23,6 +23,9 @@ extern struct clksrc_sources exynos4_clkset_group;
extern struct clk *exynos4_clkset_aclk_top_list[];
extern struct clk *exynos4_clkset_group_list[];
+extern struct clksrc_sources exynos4_clkset_mout_g2d0;
+extern struct clksrc_sources exynos4_clkset_mout_g2d1;
+
extern int exynos4_clksrc_mask_fsys_ctrl(struct clk *clk, int enable);
extern int exynos4_clk_ip_fsys_ctrl(struct clk *clk, int enable);
extern int exynos4_clk_ip_lcd1_ctrl(struct clk *clk, int enable);
diff --git a/arch/arm/mach-exynos/clock-exynos4210.c b/arch/arm/mach-exynos/clock-exynos4210.c
index b8689ff60baf..fed4c26e9dad 100644
--- a/arch/arm/mach-exynos/clock-exynos4210.c
+++ b/arch/arm/mach-exynos/clock-exynos4210.c
@@ -48,6 +48,32 @@ static struct clksrc_clk *sysclks[] = {
/* nothing here yet */
};
+static struct clksrc_clk exynos4210_clk_mout_g2d0 = {
+ .clk = {
+ .name = "mout_g2d0",
+ },
+ .sources = &exynos4_clkset_mout_g2d0,
+ .reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 0, .size = 1 },
+};
+
+static struct clksrc_clk exynos4210_clk_mout_g2d1 = {
+ .clk = {
+ .name = "mout_g2d1",
+ },
+ .sources = &exynos4_clkset_mout_g2d1,
+ .reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 4, .size = 1 },
+};
+
+static struct clk *exynos4210_clkset_mout_g2d_list[] = {
+ [0] = &exynos4210_clk_mout_g2d0.clk,
+ [1] = &exynos4210_clk_mout_g2d1.clk,
+};
+
+static struct clksrc_sources exynos4210_clkset_mout_g2d = {
+ .sources = exynos4210_clkset_mout_g2d_list,
+ .nr_sources = ARRAY_SIZE(exynos4210_clkset_mout_g2d_list),
+};
+
static int exynos4_clksrc_mask_lcd1_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4210_CLKSRC_MASK_LCD1, clk, enable);
@@ -74,6 +100,13 @@ static struct clksrc_clk clksrcs[] = {
.sources = &exynos4_clkset_group,
.reg_src = { .reg = EXYNOS4210_CLKSRC_LCD1, .shift = 0, .size = 4 },
.reg_div = { .reg = EXYNOS4210_CLKDIV_LCD1, .shift = 0, .size = 4 },
+ }, {
+ .clk = {
+ .name = "sclk_fimg2d",
+ },
+ .sources = &exynos4210_clkset_mout_g2d,
+ .reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 8, .size = 1 },
+ .reg_div = { .reg = EXYNOS4_CLKDIV_IMAGE, .shift = 0, .size = 4 },
},
};
@@ -105,6 +138,10 @@ static struct clk init_clocks_off[] = {
.devname = SYSMMU_CLOCK_DEVNAME(fimd1, 11),
.enable = exynos4_clk_ip_lcd1_ctrl,
.ctrlbit = (1 << 4),
+ }, {
+ .name = "fimg2d",
+ .enable = exynos4_clk_ip_image_ctrl,
+ .ctrlbit = (1 << 0),
},
};
diff --git a/arch/arm/mach-exynos/clock-exynos4212.c b/arch/arm/mach-exynos/clock-exynos4212.c
index da397d21bbcf..8fba0b5fb8ab 100644
--- a/arch/arm/mach-exynos/clock-exynos4212.c
+++ b/arch/arm/mach-exynos/clock-exynos4212.c
@@ -68,12 +68,45 @@ static struct clksrc_clk clk_mout_mpll_user = {
.reg_src = { .reg = EXYNOS4_CLKSRC_CPU, .shift = 24, .size = 1 },
};
+static struct clksrc_clk exynos4x12_clk_mout_g2d0 = {
+ .clk = {
+ .name = "mout_g2d0",
+ },
+ .sources = &exynos4_clkset_mout_g2d0,
+ .reg_src = { .reg = EXYNOS4_CLKSRC_DMC, .shift = 20, .size = 1 },
+};
+
+static struct clksrc_clk exynos4x12_clk_mout_g2d1 = {
+ .clk = {
+ .name = "mout_g2d1",
+ },
+ .sources = &exynos4_clkset_mout_g2d1,
+ .reg_src = { .reg = EXYNOS4_CLKSRC_DMC, .shift = 24, .size = 1 },
+};
+
+static struct clk *exynos4x12_clkset_mout_g2d_list[] = {
+ [0] = &exynos4x12_clk_mout_g2d0.clk,
+ [1] = &exynos4x12_clk_mout_g2d1.clk,
+};
+
+static struct clksrc_sources exynos4x12_clkset_mout_g2d = {
+ .sources = exynos4x12_clkset_mout_g2d_list,
+ .nr_sources = ARRAY_SIZE(exynos4x12_clkset_mout_g2d_list),
+};
+
static struct clksrc_clk *sysclks[] = {
&clk_mout_mpll_user,
};
static struct clksrc_clk clksrcs[] = {
- /* nothing here yet */
+ {
+ .clk = {
+ .name = "sclk_fimg2d",
+ },
+ .sources = &exynos4x12_clkset_mout_g2d,
+ .reg_src = { .reg = EXYNOS4_CLKSRC_DMC, .shift = 28, .size = 1 },
+ .reg_div = { .reg = EXYNOS4_CLKDIV_DMC1, .shift = 0, .size = 4 },
+ },
};
static struct clk init_clocks_off[] = {
@@ -102,7 +135,11 @@ static struct clk init_clocks_off[] = {
.devname = "exynos-fimc-lite.1",
.enable = exynos4212_clk_ip_isp0_ctrl,
.ctrlbit = (1 << 3),
- }
+ }, {
+ .name = "fimg2d",
+ .enable = exynos4_clk_ip_dmc_ctrl,
+ .ctrlbit = (1 << 23),
+ },
};
#ifdef CONFIG_PM_SLEEP
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index f98a83a81ce7..ea785fcaf6c3 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -1066,12 +1066,8 @@ static struct platform_device nuri_max8903_device = {
static void __init nuri_power_init(void)
{
int gpio;
- int irq_base = IRQ_GPIO_END + 1;
int ta_en = 0;
- nuri_max8997_pdata.irq_base = irq_base;
- irq_base += MAX8997_IRQ_NR;
-
gpio = EXYNOS4_GPX0(7);
gpio_request(gpio, "AP_PMIC_IRQ");
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(0xf));
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index 5a12dc26f496..4e574c24581c 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -42,6 +42,7 @@
#include <plat/backlight.h>
#include <plat/fb.h>
#include <plat/mfc.h>
+#include <plat/hdmi.h>
#include <mach/ohci.h>
#include <mach/map.h>
@@ -426,7 +427,6 @@ static struct max8997_platform_data __initdata origen_max8997_pdata = {
.buck1_gpiodvs = false,
.buck2_gpiodvs = false,
.buck5_gpiodvs = false,
- .irq_base = IRQ_GPIO_END + 1,
.ignore_gpiodvs_side_effect = true,
.buck125_default_idx = 0x0,
@@ -735,6 +735,11 @@ static void __init origen_bt_setup(void)
s3c_gpio_setpull(EXYNOS4_GPX2(2), S3C_GPIO_PULL_NONE);
}
+/* I2C module and id for HDMIPHY */
+static struct i2c_board_info hdmiphy_info = {
+ I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
+};
+
static void s5p_tv_setup(void)
{
/* Direct HPD to HDMI chip */
@@ -782,6 +787,7 @@ static void __init origen_machine_init(void)
s5p_tv_setup();
s5p_i2c_hdmiphy_set_platdata(NULL);
+ s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
#ifdef CONFIG_DRM_EXYNOS
s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata;
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index 3cfa688d274a..73f2bce097e1 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -40,6 +40,7 @@
#include <plat/mfc.h>
#include <plat/ehci.h>
#include <plat/clock.h>
+#include <plat/hdmi.h>
#include <mach/map.h>
#include <mach/ohci.h>
@@ -354,6 +355,11 @@ static struct platform_pwm_backlight_data smdkv310_bl_data = {
.pwm_period_ns = 1000,
};
+/* I2C module and id for HDMIPHY */
+static struct i2c_board_info hdmiphy_info = {
+ I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
+};
+
static void s5p_tv_setup(void)
{
/* direct HPD to HDMI chip */
@@ -388,6 +394,7 @@ static void __init smdkv310_machine_init(void)
s5p_tv_setup();
s5p_i2c_hdmiphy_set_platdata(NULL);
+ s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
samsung_keypad_set_platdata(&smdkv310_keypad_data);
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 373c3c00d24c..c0bc83a7663e 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -115,7 +115,7 @@ static __init int exynos_pm_dt_parse_domains(void)
}
#endif /* CONFIG_OF */
-static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
+static __init __maybe_unused void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
struct exynos_pm_domain *pd)
{
if (pdev->dev.bus) {
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c
index ca70e5fcc7ac..020852d3bdd8 100644
--- a/arch/arm/mach-gemini/irq.c
+++ b/arch/arm/mach-gemini/irq.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
+#include <asm/system_misc.h>
#include <mach/hardware.h>
#define IRQ_SOURCE(base_addr) (base_addr + 0x00)
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 07f7c226e4cf..d004d37ad9d8 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -9,7 +9,8 @@ obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o
obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o
-obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+imx5-pm-$(CONFIG_PM) += pm-imx5.o
+obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o $(imx5-pm-y) cpu_op-mx51.o
obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \
clk-pfd.o clk-busy.o
@@ -70,14 +71,13 @@ obj-$(CONFIG_DEBUG_LL) += lluart.o
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
obj-$(CONFIG_HAVE_IMX_SRC) += src.o
-obj-$(CONFIG_CPU_V7) += head-v7.o
-AFLAGS_head-v7.o :=-Wa,-march=armv7-a
-obj-$(CONFIG_SMP) += platsmp.o
+AFLAGS_headsmp.o :=-Wa,-march=armv7-a
+obj-$(CONFIG_SMP) += headsmp.o platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
ifeq ($(CONFIG_PM),y)
-obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
+obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
endif
# i.MX5 based machines
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
index 7aa6313fb167..f69ca4680049 100644
--- a/arch/arm/mach-imx/clk-imx27.c
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -223,7 +223,7 @@ int __init mx27_clocks_init(unsigned long fref)
clk_register_clkdev(clk[per3_gate], "per", "imx-fb.0");
clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx-fb.0");
- clk_register_clkdev(clk[csi_ahb_gate], NULL, "mx2-camera.0");
+ clk_register_clkdev(clk[csi_ahb_gate], "ahb", "mx2-camera.0");
clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc");
clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc");
@@ -250,8 +250,10 @@ int __init mx27_clocks_init(unsigned long fref)
clk_register_clkdev(clk[i2c2_ipg_gate], NULL, "imx-i2c.1");
clk_register_clkdev(clk[owire_ipg_gate], NULL, "mxc_w1.0");
clk_register_clkdev(clk[kpp_ipg_gate], NULL, "imx-keypad");
- clk_register_clkdev(clk[emma_ahb_gate], "ahb", "imx-emma");
- clk_register_clkdev(clk[emma_ipg_gate], "ipg", "imx-emma");
+ clk_register_clkdev(clk[emma_ahb_gate], "emma-ahb", "mx2-camera.0");
+ clk_register_clkdev(clk[emma_ipg_gate], "emma-ipg", "mx2-camera.0");
+ clk_register_clkdev(clk[emma_ahb_gate], "ahb", "m2m-emmaprp.0");
+ clk_register_clkdev(clk[emma_ipg_gate], "ipg", "m2m-emmaprp.0");
clk_register_clkdev(clk[iim_ipg_gate], "iim", NULL);
clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL);
clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL);
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c
index 8e19e70f90f9..1253af2d9971 100644
--- a/arch/arm/mach-imx/clk-imx31.c
+++ b/arch/arm/mach-imx/clk-imx31.c
@@ -130,7 +130,7 @@ int __init mx31_clocks_init(unsigned long fref)
clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0");
clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
- clk_register_clkdev(clk[kpp_gate], "kpp", NULL);
+ clk_register_clkdev(clk[kpp_gate], NULL, "imx-keypad");
clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0");
clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0");
clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index f6086693ebd2..4bdcaa97bd98 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -303,6 +303,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
clk_prepare_enable(clk[aips_tz2]); /* fec */
clk_prepare_enable(clk[spba]);
clk_prepare_enable(clk[emi_fast_gate]); /* fec */
+ clk_prepare_enable(clk[emi_slow_gate]); /* eim */
clk_prepare_enable(clk[tmax1]);
clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */
clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index ea89520b6e22..4233d9e3531d 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -152,7 +152,7 @@ enum mx6q_clks {
ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
- ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2,
+ ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5,
clk_max
};
@@ -288,8 +288,10 @@ int __init mx6q_clocks_init(void)
clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
- clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_sel", base + 0x20, 10, 1);
- clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_sel", base + 0x20, 11, 1);
+ clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+ clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1);
+ clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+ clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1);
clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
diff --git a/arch/arm/mach-imx/head-v7.S b/arch/arm/mach-imx/headsmp.S
index 7e49deb128a4..7e49deb128a4 100644
--- a/arch/arm/mach-imx/head-v7.S
+++ b/arch/arm/mach-imx/headsmp.S
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 20ed2d56c1af..f8f7437c83b8 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void)
: "cc");
}
-static inline void cpu_leave_lowpower(void)
-{
- unsigned int v;
-
- asm volatile(
- "mrc p15, 0, %0, c1, c0, 0\n"
- " orr %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- " mrc p15, 0, %0, c1, c0, 1\n"
- " orr %0, %0, %2\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (v)
- : "Ir" (CR_C), "Ir" (0x40)
- : "cc");
-}
-
/*
* platform-specific code to shutdown a CPU
*
@@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu)
{
cpu_enter_lowpower();
imx_enable_cpu(cpu, false);
- cpu_do_idle();
- cpu_leave_lowpower();
- /* We should never return from idle */
- panic("cpu %d unexpectedly exit from shutdown\n", cpu);
+ /* spin here until hardware takes it down */
+ while (1)
+ ;
}
int platform_cpu_disable(unsigned int cpu)
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 5ec0608f2a76..045b3f6a387d 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -71,7 +71,7 @@ soft:
/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
static int ksz9021rn_phy_fixup(struct phy_device *phydev)
{
- if (IS_ENABLED(CONFIG_PHYLIB)) {
+ if (IS_BUILTIN(CONFIG_PHYLIB)) {
/* min rx data delay */
phy_write(phydev, 0x0b, 0x8105);
phy_write(phydev, 0x0c, 0x0000);
@@ -112,7 +112,7 @@ put_clk:
static void __init imx6q_sabrelite_init(void)
{
- if (IS_ENABLED(CONFIG_PHYLIB))
+ if (IS_BUILTIN(CONFIG_PHYLIB))
phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
ksz9021rn_phy_fixup);
imx6q_sabrelite_cko1_setup();
diff --git a/arch/arm/mach-imx/mm-imx25.c b/arch/arm/mach-imx/mm-imx25.c
index 388928fdb11a..f3f5c6542ab4 100644
--- a/arch/arm/mach-imx/mm-imx25.c
+++ b/arch/arm/mach-imx/mm-imx25.c
@@ -89,11 +89,11 @@ static const struct resource imx25_audmux_res[] __initconst = {
void __init imx25_soc_init(void)
{
- /* i.mx25 has the i.mx31 type gpio */
- mxc_register_gpio("imx31-gpio", 0, MX25_GPIO1_BASE_ADDR, SZ_16K, MX25_INT_GPIO1, 0);
- mxc_register_gpio("imx31-gpio", 1, MX25_GPIO2_BASE_ADDR, SZ_16K, MX25_INT_GPIO2, 0);
- mxc_register_gpio("imx31-gpio", 2, MX25_GPIO3_BASE_ADDR, SZ_16K, MX25_INT_GPIO3, 0);
- mxc_register_gpio("imx31-gpio", 3, MX25_GPIO4_BASE_ADDR, SZ_16K, MX25_INT_GPIO4, 0);
+ /* i.mx25 has the i.mx35 type gpio */
+ mxc_register_gpio("imx35-gpio", 0, MX25_GPIO1_BASE_ADDR, SZ_16K, MX25_INT_GPIO1, 0);
+ mxc_register_gpio("imx35-gpio", 1, MX25_GPIO2_BASE_ADDR, SZ_16K, MX25_INT_GPIO2, 0);
+ mxc_register_gpio("imx35-gpio", 2, MX25_GPIO3_BASE_ADDR, SZ_16K, MX25_INT_GPIO3, 0);
+ mxc_register_gpio("imx35-gpio", 3, MX25_GPIO4_BASE_ADDR, SZ_16K, MX25_INT_GPIO4, 0);
pinctrl_provide_dummies();
/* i.mx25 has the i.mx35 type sdma */
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index fe96105109b3..9d2c843bde02 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -272,10 +272,9 @@ void __init imx35_soc_init(void)
imx3_init_l2x0();
- /* i.mx35 has the i.mx31 type gpio */
- mxc_register_gpio("imx31-gpio", 0, MX35_GPIO1_BASE_ADDR, SZ_16K, MX35_INT_GPIO1, 0);
- mxc_register_gpio("imx31-gpio", 1, MX35_GPIO2_BASE_ADDR, SZ_16K, MX35_INT_GPIO2, 0);
- mxc_register_gpio("imx31-gpio", 2, MX35_GPIO3_BASE_ADDR, SZ_16K, MX35_INT_GPIO3, 0);
+ mxc_register_gpio("imx35-gpio", 0, MX35_GPIO1_BASE_ADDR, SZ_16K, MX35_INT_GPIO1, 0);
+ mxc_register_gpio("imx35-gpio", 1, MX35_GPIO2_BASE_ADDR, SZ_16K, MX35_INT_GPIO2, 0);
+ mxc_register_gpio("imx35-gpio", 2, MX35_GPIO3_BASE_ADDR, SZ_16K, MX35_INT_GPIO3, 0);
pinctrl_provide_dummies();
if (to_version == 1) {
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index f19d604e1b2a..52d8f534be10 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -161,13 +161,13 @@ static const struct resource imx53_audmux_res[] __initconst = {
void __init imx50_soc_init(void)
{
- /* i.mx50 has the i.mx31 type gpio */
- mxc_register_gpio("imx31-gpio", 0, MX50_GPIO1_BASE_ADDR, SZ_16K, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH);
- mxc_register_gpio("imx31-gpio", 1, MX50_GPIO2_BASE_ADDR, SZ_16K, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH);
- mxc_register_gpio("imx31-gpio", 2, MX50_GPIO3_BASE_ADDR, SZ_16K, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH);
- mxc_register_gpio("imx31-gpio", 3, MX50_GPIO4_BASE_ADDR, SZ_16K, MX50_INT_GPIO4_LOW, MX50_INT_GPIO4_HIGH);
- mxc_register_gpio("imx31-gpio", 4, MX50_GPIO5_BASE_ADDR, SZ_16K, MX50_INT_GPIO5_LOW, MX50_INT_GPIO5_HIGH);
- mxc_register_gpio("imx31-gpio", 5, MX50_GPIO6_BASE_ADDR, SZ_16K, MX50_INT_GPIO6_LOW, MX50_INT_GPIO6_HIGH);
+ /* i.mx50 has the i.mx35 type gpio */
+ mxc_register_gpio("imx35-gpio", 0, MX50_GPIO1_BASE_ADDR, SZ_16K, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH);
+ mxc_register_gpio("imx35-gpio", 1, MX50_GPIO2_BASE_ADDR, SZ_16K, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH);
+ mxc_register_gpio("imx35-gpio", 2, MX50_GPIO3_BASE_ADDR, SZ_16K, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH);
+ mxc_register_gpio("imx35-gpio", 3, MX50_GPIO4_BASE_ADDR, SZ_16K, MX50_INT_GPIO4_LOW, MX50_INT_GPIO4_HIGH);
+ mxc_register_gpio("imx35-gpio", 4, MX50_GPIO5_BASE_ADDR, SZ_16K, MX50_INT_GPIO5_LOW, MX50_INT_GPIO5_HIGH);
+ mxc_register_gpio("imx35-gpio", 5, MX50_GPIO6_BASE_ADDR, SZ_16K, MX50_INT_GPIO6_LOW, MX50_INT_GPIO6_HIGH);
/* i.mx50 has the i.mx31 type audmux */
platform_device_register_simple("imx31-audmux", 0, imx50_audmux_res,
@@ -176,11 +176,11 @@ void __init imx50_soc_init(void)
void __init imx51_soc_init(void)
{
- /* i.mx51 has the i.mx31 type gpio */
- mxc_register_gpio("imx31-gpio", 0, MX51_GPIO1_BASE_ADDR, SZ_16K, MX51_INT_GPIO1_LOW, MX51_INT_GPIO1_HIGH);
- mxc_register_gpio("imx31-gpio", 1, MX51_GPIO2_BASE_ADDR, SZ_16K, MX51_INT_GPIO2_LOW, MX51_INT_GPIO2_HIGH);
- mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_INT_GPIO3_LOW, MX51_INT_GPIO3_HIGH);
- mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_INT_GPIO4_LOW, MX51_INT_GPIO4_HIGH);
+ /* i.mx51 has the i.mx35 type gpio */
+ mxc_register_gpio("imx35-gpio", 0, MX51_GPIO1_BASE_ADDR, SZ_16K, MX51_INT_GPIO1_LOW, MX51_INT_GPIO1_HIGH);
+ mxc_register_gpio("imx35-gpio", 1, MX51_GPIO2_BASE_ADDR, SZ_16K, MX51_INT_GPIO2_LOW, MX51_INT_GPIO2_HIGH);
+ mxc_register_gpio("imx35-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_INT_GPIO3_LOW, MX51_INT_GPIO3_HIGH);
+ mxc_register_gpio("imx35-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_INT_GPIO4_LOW, MX51_INT_GPIO4_HIGH);
pinctrl_provide_dummies();
@@ -198,14 +198,14 @@ void __init imx51_soc_init(void)
void __init imx53_soc_init(void)
{
- /* i.mx53 has the i.mx31 type gpio */
- mxc_register_gpio("imx31-gpio", 0, MX53_GPIO1_BASE_ADDR, SZ_16K, MX53_INT_GPIO1_LOW, MX53_INT_GPIO1_HIGH);
- mxc_register_gpio("imx31-gpio", 1, MX53_GPIO2_BASE_ADDR, SZ_16K, MX53_INT_GPIO2_LOW, MX53_INT_GPIO2_HIGH);
- mxc_register_gpio("imx31-gpio", 2, MX53_GPIO3_BASE_ADDR, SZ_16K, MX53_INT_GPIO3_LOW, MX53_INT_GPIO3_HIGH);
- mxc_register_gpio("imx31-gpio", 3, MX53_GPIO4_BASE_ADDR, SZ_16K, MX53_INT_GPIO4_LOW, MX53_INT_GPIO4_HIGH);
- mxc_register_gpio("imx31-gpio", 4, MX53_GPIO5_BASE_ADDR, SZ_16K, MX53_INT_GPIO5_LOW, MX53_INT_GPIO5_HIGH);
- mxc_register_gpio("imx31-gpio", 5, MX53_GPIO6_BASE_ADDR, SZ_16K, MX53_INT_GPIO6_LOW, MX53_INT_GPIO6_HIGH);
- mxc_register_gpio("imx31-gpio", 6, MX53_GPIO7_BASE_ADDR, SZ_16K, MX53_INT_GPIO7_LOW, MX53_INT_GPIO7_HIGH);
+ /* i.mx53 has the i.mx35 type gpio */
+ mxc_register_gpio("imx35-gpio", 0, MX53_GPIO1_BASE_ADDR, SZ_16K, MX53_INT_GPIO1_LOW, MX53_INT_GPIO1_HIGH);
+ mxc_register_gpio("imx35-gpio", 1, MX53_GPIO2_BASE_ADDR, SZ_16K, MX53_INT_GPIO2_LOW, MX53_INT_GPIO2_HIGH);
+ mxc_register_gpio("imx35-gpio", 2, MX53_GPIO3_BASE_ADDR, SZ_16K, MX53_INT_GPIO3_LOW, MX53_INT_GPIO3_HIGH);
+ mxc_register_gpio("imx35-gpio", 3, MX53_GPIO4_BASE_ADDR, SZ_16K, MX53_INT_GPIO4_LOW, MX53_INT_GPIO4_HIGH);
+ mxc_register_gpio("imx35-gpio", 4, MX53_GPIO5_BASE_ADDR, SZ_16K, MX53_INT_GPIO5_LOW, MX53_INT_GPIO5_HIGH);
+ mxc_register_gpio("imx35-gpio", 5, MX53_GPIO6_BASE_ADDR, SZ_16K, MX53_INT_GPIO6_LOW, MX53_INT_GPIO6_HIGH);
+ mxc_register_gpio("imx35-gpio", 6, MX53_GPIO7_BASE_ADDR, SZ_16K, MX53_INT_GPIO7_LOW, MX53_INT_GPIO7_HIGH);
pinctrl_provide_dummies();
/* i.mx53 has the i.mx35 type sdma */
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index ebf680bebdf2..3fa6c51390da 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 7b1055c8e0b9..3b2267529f5e 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -456,7 +456,7 @@ static void __init ap_init_timer(void)
clk = clk_get_sys("ap_timer", NULL);
BUG_ON(IS_ERR(clk));
- clk_enable(clk);
+ clk_prepare_enable(clk);
rate = clk_get_rate(clk);
writel(0, TIMER0_VA_BASE + TIMER_CTRL);
diff --git a/arch/arm/mach-kirkwood/Kconfig b/arch/arm/mach-kirkwood/Kconfig
index 199764fe0fb0..ca5c15a4e626 100644
--- a/arch/arm/mach-kirkwood/Kconfig
+++ b/arch/arm/mach-kirkwood/Kconfig
@@ -80,6 +80,35 @@ config MACH_IB62X0_DT
RaidSonic IB-NAS6210 & IB-NAS6220 devices, using
Flattened Device Tree.
+config MACH_TS219_DT
+ bool "Device Tree for QNAP TS-11X, TS-21X NAS"
+ select ARCH_KIRKWOOD_DT
+ select ARM_APPENDED_DTB
+ select ARM_ATAG_DTB_COMPAT
+ help
+ Say 'Y' here if you want your kernel to support the QNAP
+ TS-110, TS-119, TS-119P+, TS-210, TS-219, TS-219P and
+ TS-219P+ Turbo NAS devices using Fattened Device Tree.
+ There are two different Device Tree descriptions, depending
+ on if the device is based on an if the board uses the MV6281
+ or MV6282. If you have the wrong one, the buttons will not
+ work.
+
+config MACH_GOFLEXNET_DT
+ bool "Seagate GoFlex Net (Flattened Device Tree)"
+ select ARCH_KIRKWOOD_DT
+ help
+ Say 'Y' here if you want your kernel to support the
+ Seagate GoFlex Net (Flattened Device Tree).
+
+config MACH_LSXL_DT
+ bool "Buffalo Linkstation LS-XHL, LS-CHLv2 (Flattened Device Tree)"
+ select ARCH_KIRKWOOD_DT
+ help
+ Say 'Y' here if you want your kernel to support the
+ Buffalo Linkstation LS-XHL & LS-CHLv2 devices, using
+ Flattened Device Tree.
+
config MACH_TS219
bool "QNAP TS-110, TS-119, TS-119P+, TS-210, TS-219, TS-219P and TS-219P+ Turbo NAS"
help
diff --git a/arch/arm/mach-kirkwood/Makefile b/arch/arm/mach-kirkwood/Makefile
index d2b05907b10e..055c85a1cc46 100644
--- a/arch/arm/mach-kirkwood/Makefile
+++ b/arch/arm/mach-kirkwood/Makefile
@@ -25,3 +25,6 @@ obj-$(CONFIG_MACH_DREAMPLUG_DT) += board-dreamplug.o
obj-$(CONFIG_MACH_ICONNECT_DT) += board-iconnect.o
obj-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += board-dnskw.o
obj-$(CONFIG_MACH_IB62X0_DT) += board-ib62x0.o
+obj-$(CONFIG_MACH_TS219_DT) += board-ts219.o tsx1x-common.o
+obj-$(CONFIG_MACH_GOFLEXNET_DT) += board-goflexnet.o
+obj-$(CONFIG_MACH_LSXL_DT) += board-lsxl.o
diff --git a/arch/arm/mach-kirkwood/Makefile.boot b/arch/arm/mach-kirkwood/Makefile.boot
index 02edbdf5b065..a13299d758e1 100644
--- a/arch/arm/mach-kirkwood/Makefile.boot
+++ b/arch/arm/mach-kirkwood/Makefile.boot
@@ -7,3 +7,8 @@ dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns320.dtb
dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns325.dtb
dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb
dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb
+dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6281.dtb
+dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6282.dtb
+dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb
+dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb
+dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb
diff --git a/arch/arm/mach-kirkwood/board-dnskw.c b/arch/arm/mach-kirkwood/board-dnskw.c
index 58c2d68f9443..4ab35065a144 100644
--- a/arch/arm/mach-kirkwood/board-dnskw.c
+++ b/arch/arm/mach-kirkwood/board-dnskw.c
@@ -14,13 +14,11 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/i2c.h>
#include <linux/ata_platform.h>
#include <linux/mv643xx_eth.h>
#include <linux/of.h>
#include <linux/gpio.h>
#include <linux/input.h>
-#include <linux/gpio_keys.h>
#include <linux/gpio-fan.h>
#include <linux/leds.h>
#include <asm/mach-types.h>
@@ -35,10 +33,6 @@ static struct mv643xx_eth_platform_data dnskw_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
-static struct mv_sata_platform_data dnskw_sata_data = {
- .n_ports = 2,
-};
-
static unsigned int dnskw_mpp_config[] __initdata = {
MPP13_UART1_TXD, /* Custom ... */
MPP14_UART1_RXD, /* ... Controller (DNS-320 only) */
@@ -73,132 +67,6 @@ static unsigned int dnskw_mpp_config[] __initdata = {
0
};
-static struct gpio_led dns325_led_pins[] = {
- {
- .name = "dns325:white:power",
- .gpio = 26,
- .active_low = 1,
- .default_trigger = "default-on",
- },
- {
- .name = "dns325:white:usb",
- .gpio = 43,
- .active_low = 1,
- },
- {
- .name = "dns325:red:l_hdd",
- .gpio = 28,
- .active_low = 1,
- },
- {
- .name = "dns325:red:r_hdd",
- .gpio = 27,
- .active_low = 1,
- },
- {
- .name = "dns325:red:usb",
- .gpio = 29,
- .active_low = 1,
- },
-};
-
-static struct gpio_led_platform_data dns325_led_data = {
- .num_leds = ARRAY_SIZE(dns325_led_pins),
- .leds = dns325_led_pins,
-};
-
-static struct platform_device dns325_led_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &dns325_led_data,
- },
-};
-
-static struct gpio_led dns320_led_pins[] = {
- {
- .name = "dns320:blue:power",
- .gpio = 26,
- .active_low = 1,
- .default_trigger = "default-on",
- },
- {
- .name = "dns320:blue:usb",
- .gpio = 43,
- .active_low = 1,
- },
- {
- .name = "dns320:orange:l_hdd",
- .gpio = 28,
- .active_low = 1,
- },
- {
- .name = "dns320:orange:r_hdd",
- .gpio = 27,
- .active_low = 1,
- },
- {
- .name = "dns320:orange:usb",
- .gpio = 35,
- .active_low = 1,
- },
-};
-
-static struct gpio_led_platform_data dns320_led_data = {
- .num_leds = ARRAY_SIZE(dns320_led_pins),
- .leds = dns320_led_pins,
-};
-
-static struct platform_device dns320_led_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &dns320_led_data,
- },
-};
-
-static struct i2c_board_info dns325_i2c_board_info[] __initdata = {
- {
- I2C_BOARD_INFO("lm75", 0x48),
- },
- /* Something at 0x0c also */
-};
-
-static struct gpio_keys_button dnskw_button_pins[] = {
- {
- .code = KEY_POWER,
- .gpio = 34,
- .desc = "Power button",
- .active_low = 1,
- },
- {
- .code = KEY_EJECTCD,
- .gpio = 47,
- .desc = "USB unmount button",
- .active_low = 1,
- },
- {
- .code = KEY_RESTART,
- .gpio = 48,
- .desc = "Reset button",
- .active_low = 1,
- },
-};
-
-static struct gpio_keys_platform_data dnskw_button_data = {
- .buttons = dnskw_button_pins,
- .nbuttons = ARRAY_SIZE(dnskw_button_pins),
-};
-
-static struct platform_device dnskw_button_device = {
- .name = "gpio-keys",
- .id = -1,
- .num_resources = 0,
- .dev = {
- .platform_data = &dnskw_button_data,
- }
-};
-
/* Fan: ADDA AD045HB-G73 40mm 6000rpm@5v */
static struct gpio_fan_speed dnskw_fan_speed[] = {
{ 0, 0 },
@@ -245,20 +113,9 @@ void __init dnskw_init(void)
kirkwood_ehci_init();
kirkwood_ge00_init(&dnskw_ge00_data);
- kirkwood_sata_init(&dnskw_sata_data);
- kirkwood_i2c_init();
- platform_device_register(&dnskw_button_device);
platform_device_register(&dnskw_fan_device);
- if (of_machine_is_compatible("dlink,dns-325")) {
- i2c_register_board_info(0, dns325_i2c_board_info,
- ARRAY_SIZE(dns325_i2c_board_info));
- platform_device_register(&dns325_led_device);
-
- } else if (of_machine_is_compatible("dlink,dns-320"))
- platform_device_register(&dns320_led_device);
-
/* Register power-off GPIO. */
if (gpio_request(36, "dnskw:power:off") == 0
&& gpio_direction_output(36, 0) == 0)
diff --git a/arch/arm/mach-kirkwood/board-dreamplug.c b/arch/arm/mach-kirkwood/board-dreamplug.c
index 55e357ab2923..aeb234d0d0e3 100644
--- a/arch/arm/mach-kirkwood/board-dreamplug.c
+++ b/arch/arm/mach-kirkwood/board-dreamplug.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/mtd/partitions.h>
#include <linux/ata_platform.h>
#include <linux/mv643xx_eth.h>
#include <linux/of.h>
@@ -23,7 +22,6 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/gpio.h>
-#include <linux/leds.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
@@ -36,42 +34,6 @@
#include "common.h"
#include "mpp.h"
-struct mtd_partition dreamplug_partitions[] = {
- {
- .name = "u-boot",
- .size = SZ_512K,
- .offset = 0,
- },
- {
- .name = "u-boot env",
- .size = SZ_64K,
- .offset = SZ_512K + SZ_512K,
- },
- {
- .name = "dtb",
- .size = SZ_64K,
- .offset = SZ_512K + SZ_512K + SZ_512K,
- },
-};
-
-static const struct flash_platform_data dreamplug_spi_slave_data = {
- .type = "mx25l1606e",
- .name = "spi_flash",
- .parts = dreamplug_partitions,
- .nr_parts = ARRAY_SIZE(dreamplug_partitions),
-};
-
-static struct spi_board_info __initdata dreamplug_spi_slave_info[] = {
- {
- .modalias = "m25p80",
- .platform_data = &dreamplug_spi_slave_data,
- .irq = -1,
- .max_speed_hz = 50000000,
- .bus_num = 0,
- .chip_select = 0,
- },
-};
-
static struct mv643xx_eth_platform_data dreamplug_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
};
@@ -80,45 +42,10 @@ static struct mv643xx_eth_platform_data dreamplug_ge01_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(1),
};
-static struct mv_sata_platform_data dreamplug_sata_data = {
- .n_ports = 1,
-};
-
static struct mvsdio_platform_data dreamplug_mvsdio_data = {
/* unfortunately the CD signal has not been connected */
};
-static struct gpio_led dreamplug_led_pins[] = {
- {
- .name = "dreamplug:blue:bluetooth",
- .gpio = 47,
- .active_low = 1,
- },
- {
- .name = "dreamplug:green:wifi",
- .gpio = 48,
- .active_low = 1,
- },
- {
- .name = "dreamplug:green:wifi_ap",
- .gpio = 49,
- .active_low = 1,
- },
-};
-
-static struct gpio_led_platform_data dreamplug_led_data = {
- .leds = dreamplug_led_pins,
- .num_leds = ARRAY_SIZE(dreamplug_led_pins),
-};
-
-static struct platform_device dreamplug_leds = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &dreamplug_led_data,
- }
-};
-
static unsigned int dreamplug_mpp_config[] __initdata = {
MPP0_SPI_SCn,
MPP1_SPI_MOSI,
@@ -137,15 +64,8 @@ void __init dreamplug_init(void)
*/
kirkwood_mpp_conf(dreamplug_mpp_config);
- spi_register_board_info(dreamplug_spi_slave_info,
- ARRAY_SIZE(dreamplug_spi_slave_info));
- kirkwood_spi_init();
-
kirkwood_ehci_init();
kirkwood_ge00_init(&dreamplug_ge00_data);
kirkwood_ge01_init(&dreamplug_ge01_data);
- kirkwood_sata_init(&dreamplug_sata_data);
kirkwood_sdio_init(&dreamplug_mvsdio_data);
-
- platform_device_register(&dreamplug_leds);
}
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index edc3f8a9d45e..e4eb450de301 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -18,6 +18,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <mach/bridge-regs.h>
+#include <plat/irq.h>
#include "common.h"
static struct of_device_id kirkwood_dt_match_table[] __initdata = {
@@ -25,6 +26,16 @@ static struct of_device_id kirkwood_dt_match_table[] __initdata = {
{ }
};
+struct of_dev_auxdata kirkwood_auxdata_lookup[] __initdata = {
+ OF_DEV_AUXDATA("marvell,orion-spi", 0xf1010600, "orion_spi.0", NULL),
+ OF_DEV_AUXDATA("marvell,mv64xxx-i2c", 0xf1011000, "mv64xxx_i2c.0",
+ NULL),
+ OF_DEV_AUXDATA("marvell,orion-wdt", 0xf1020300, "orion_wdt", NULL),
+ OF_DEV_AUXDATA("marvell,orion-sata", 0xf1080000, "sata_mv.0", NULL),
+ OF_DEV_AUXDATA("marvell,orion-nand", 0xf4000000, "orion_nand", NULL),
+ {},
+};
+
static void __init kirkwood_dt_init(void)
{
pr_info("Kirkwood: %s, TCLK=%d.\n", kirkwood_id(), kirkwood_tclk);
@@ -47,7 +58,6 @@ static void __init kirkwood_dt_init(void)
kirkwood_clk_init();
/* internal devices that every board has */
- kirkwood_wdt_init();
kirkwood_xor0_init();
kirkwood_xor1_init();
kirkwood_crypto_init();
@@ -68,7 +78,17 @@ static void __init kirkwood_dt_init(void)
if (of_machine_is_compatible("raidsonic,ib-nas62x0"))
ib62x0_init();
- of_platform_populate(NULL, kirkwood_dt_match_table, NULL, NULL);
+ if (of_machine_is_compatible("qnap,ts219"))
+ qnap_dt_ts219_init();
+
+ if (of_machine_is_compatible("seagate,goflexnet"))
+ goflexnet_init();
+
+ if (of_machine_is_compatible("buffalo,lsxl"))
+ lsxl_init();
+
+ of_platform_populate(NULL, kirkwood_dt_match_table,
+ kirkwood_auxdata_lookup, NULL);
}
static const char *kirkwood_dt_board_compat[] = {
@@ -77,6 +97,9 @@ static const char *kirkwood_dt_board_compat[] = {
"dlink,dns-325",
"iom,iconnect",
"raidsonic,ib-nas62x0",
+ "qnap,ts219",
+ "seagate,goflexnet",
+ "buffalo,lsxl",
NULL
};
@@ -84,7 +107,7 @@ DT_MACHINE_START(KIRKWOOD_DT, "Marvell Kirkwood (Flattened Device Tree)")
/* Maintainer: Jason Cooper <jason@lakedaemon.net> */
.map_io = kirkwood_map_io,
.init_early = kirkwood_init_early,
- .init_irq = kirkwood_init_irq,
+ .init_irq = orion_dt_init_irq,
.timer = &kirkwood_timer,
.init_machine = kirkwood_dt_init,
.restart = kirkwood_restart,
diff --git a/arch/arm/mach-kirkwood/board-goflexnet.c b/arch/arm/mach-kirkwood/board-goflexnet.c
new file mode 100644
index 000000000000..413e2c8ef5fe
--- /dev/null
+++ b/arch/arm/mach-kirkwood/board-goflexnet.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2012 (C), Jason Cooper <jason@lakedaemon.net>
+ *
+ * arch/arm/mach-kirkwood/board-goflexnet.c
+ *
+ * Seagate GoFlext Net Board Init for drivers not converted to
+ * flattened device tree yet.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Copied and modified for Seagate GoFlex Net support by
+ * Joshua Coombs <josh.coombs@gmail.com> based on ArchLinux ARM's
+ * GoFlex kernel patches.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/gpio.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <mach/kirkwood.h>
+#include <mach/bridge-regs.h>
+#include <plat/mvsdio.h>
+#include "common.h"
+#include "mpp.h"
+
+static struct mv643xx_eth_platform_data goflexnet_ge00_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(0),
+};
+
+static unsigned int goflexnet_mpp_config[] __initdata = {
+ MPP29_GPIO, /* USB Power Enable */
+ MPP47_GPIO, /* LED Orange */
+ MPP46_GPIO, /* LED Green */
+ MPP45_GPIO, /* LED Left Capacity 3 */
+ MPP44_GPIO, /* LED Left Capacity 2 */
+ MPP43_GPIO, /* LED Left Capacity 1 */
+ MPP42_GPIO, /* LED Left Capacity 0 */
+ MPP41_GPIO, /* LED Right Capacity 3 */
+ MPP40_GPIO, /* LED Right Capacity 2 */
+ MPP39_GPIO, /* LED Right Capacity 1 */
+ MPP38_GPIO, /* LED Right Capacity 0 */
+ 0
+};
+
+void __init goflexnet_init(void)
+{
+ /*
+ * Basic setup. Needs to be called early.
+ */
+ kirkwood_mpp_conf(goflexnet_mpp_config);
+
+ if (gpio_request(29, "USB Power Enable") != 0 ||
+ gpio_direction_output(29, 1) != 0)
+ pr_err("can't setup GPIO 29 (USB Power Enable)\n");
+ kirkwood_ehci_init();
+
+ kirkwood_ge00_init(&goflexnet_ge00_data);
+}
diff --git a/arch/arm/mach-kirkwood/board-ib62x0.c b/arch/arm/mach-kirkwood/board-ib62x0.c
index eddf1df8891f..cfc47f80e734 100644
--- a/arch/arm/mach-kirkwood/board-ib62x0.c
+++ b/arch/arm/mach-kirkwood/board-ib62x0.c
@@ -18,9 +18,7 @@
#include <linux/ata_platform.h>
#include <linux/mv643xx_eth.h>
#include <linux/gpio.h>
-#include <linux/gpio_keys.h>
#include <linux/input.h>
-#include <linux/leds.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/kirkwood.h>
@@ -33,10 +31,6 @@ static struct mv643xx_eth_platform_data ib62x0_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
-static struct mv_sata_platform_data ib62x0_sata_data = {
- .n_ports = 2,
-};
-
static unsigned int ib62x0_mpp_config[] __initdata = {
MPP0_NF_IO2,
MPP1_NF_IO3,
@@ -55,69 +49,6 @@ static unsigned int ib62x0_mpp_config[] __initdata = {
0
};
-static struct gpio_led ib62x0_led_pins[] = {
- {
- .name = "ib62x0:green:os",
- .default_trigger = "default-on",
- .gpio = 25,
- .active_low = 0,
- },
- {
- .name = "ib62x0:red:os",
- .default_trigger = "none",
- .gpio = 22,
- .active_low = 0,
- },
- {
- .name = "ib62x0:red:usb_copy",
- .default_trigger = "none",
- .gpio = 27,
- .active_low = 0,
- },
-};
-
-static struct gpio_led_platform_data ib62x0_led_data = {
- .leds = ib62x0_led_pins,
- .num_leds = ARRAY_SIZE(ib62x0_led_pins),
-};
-
-static struct platform_device ib62x0_led_device = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &ib62x0_led_data,
- }
-};
-
-static struct gpio_keys_button ib62x0_button_pins[] = {
- {
- .code = KEY_COPY,
- .gpio = 29,
- .desc = "USB Copy",
- .active_low = 1,
- },
- {
- .code = KEY_RESTART,
- .gpio = 28,
- .desc = "Reset",
- .active_low = 1,
- },
-};
-
-static struct gpio_keys_platform_data ib62x0_button_data = {
- .buttons = ib62x0_button_pins,
- .nbuttons = ARRAY_SIZE(ib62x0_button_pins),
-};
-
-static struct platform_device ib62x0_button_device = {
- .name = "gpio-keys",
- .id = -1,
- .num_resources = 0,
- .dev = {
- .platform_data = &ib62x0_button_data,
- }
-};
-
static void ib62x0_power_off(void)
{
gpio_set_value(IB62X0_GPIO_POWER_OFF, 1);
@@ -132,9 +63,6 @@ void __init ib62x0_init(void)
kirkwood_ehci_init();
kirkwood_ge00_init(&ib62x0_ge00_data);
- kirkwood_sata_init(&ib62x0_sata_data);
- platform_device_register(&ib62x0_led_device);
- platform_device_register(&ib62x0_button_device);
if (gpio_request(IB62X0_GPIO_POWER_OFF, "ib62x0:power:off") == 0 &&
gpio_direction_output(IB62X0_GPIO_POWER_OFF, 0) == 0)
pm_power_off = ib62x0_power_off;
diff --git a/arch/arm/mach-kirkwood/board-iconnect.c b/arch/arm/mach-kirkwood/board-iconnect.c
index b0d3cc49269d..d7a9198ed300 100644
--- a/arch/arm/mach-kirkwood/board-iconnect.c
+++ b/arch/arm/mach-kirkwood/board-iconnect.c
@@ -19,8 +19,6 @@
#include <linux/mtd/partitions.h>
#include <linux/mv643xx_eth.h>
#include <linux/gpio.h>
-#include <linux/leds.h>
-#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <asm/mach/arch.h>
@@ -32,50 +30,6 @@ static struct mv643xx_eth_platform_data iconnect_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(11),
};
-static struct gpio_led iconnect_led_pins[] = {
- {
- .name = "led_level",
- .gpio = 41,
- .default_trigger = "default-on",
- }, {
- .name = "power:blue",
- .gpio = 42,
- .default_trigger = "timer",
- }, {
- .name = "power:red",
- .gpio = 43,
- }, {
- .name = "usb1:blue",
- .gpio = 44,
- }, {
- .name = "usb2:blue",
- .gpio = 45,
- }, {
- .name = "usb3:blue",
- .gpio = 46,
- }, {
- .name = "usb4:blue",
- .gpio = 47,
- }, {
- .name = "otb:blue",
- .gpio = 48,
- },
-};
-
-static struct gpio_led_platform_data iconnect_led_data = {
- .leds = iconnect_led_pins,
- .num_leds = ARRAY_SIZE(iconnect_led_pins),
- .gpio_blink_set = orion_gpio_led_blink_set,
-};
-
-static struct platform_device iconnect_leds = {
- .name = "leds-gpio",
- .id = -1,
- .dev = {
- .platform_data = &iconnect_led_data,
- }
-};
-
static unsigned int iconnect_mpp_config[] __initdata = {
MPP12_GPIO,
MPP35_GPIO,
@@ -90,12 +44,6 @@ static unsigned int iconnect_mpp_config[] __initdata = {
0
};
-static struct i2c_board_info __initdata iconnect_board_info[] = {
- {
- I2C_BOARD_INFO("lm63", 0x4c),
- },
-};
-
static struct mtd_partition iconnect_nand_parts[] = {
{
.name = "flash",
@@ -142,15 +90,11 @@ void __init iconnect_init(void)
{
kirkwood_mpp_conf(iconnect_mpp_config);
kirkwood_nand_init(ARRAY_AND_SIZE(iconnect_nand_parts), 25);
- kirkwood_i2c_init();
- i2c_register_board_info(0, iconnect_board_info,
- ARRAY_SIZE(iconnect_board_info));
kirkwood_ehci_init();
kirkwood_ge00_init(&iconnect_ge00_data);
platform_device_register(&iconnect_button_device);
- platform_device_register(&iconnect_leds);
}
static int __init iconnect_pci_init(void)
diff --git a/arch/arm/mach-kirkwood/board-lsxl.c b/arch/arm/mach-kirkwood/board-lsxl.c
new file mode 100644
index 000000000000..83d8975592f8
--- /dev/null
+++ b/arch/arm/mach-kirkwood/board-lsxl.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2012 (C), Michael Walle <michael@walle.cc>
+ *
+ * arch/arm/mach-kirkwood/board-lsxl.c
+ *
+ * Buffalo Linkstation LS-XHL and LS-CHLv2 init for drivers not
+ * converted to flattened device tree yet.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+#include <linux/ata_platform.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/gpio.h>
+#include <linux/gpio-fan.h>
+#include <linux/input.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <mach/kirkwood.h>
+#include "common.h"
+#include "mpp.h"
+
+static struct mv643xx_eth_platform_data lsxl_ge00_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(0),
+};
+
+static struct mv643xx_eth_platform_data lsxl_ge01_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(8),
+};
+
+static unsigned int lsxl_mpp_config[] __initdata = {
+ MPP10_GPO, /* HDD Power Enable */
+ MPP11_GPIO, /* USB Vbus Enable */
+ MPP18_GPO, /* FAN High Enable# */
+ MPP19_GPO, /* FAN Low Enable# */
+ MPP36_GPIO, /* Function Blue LED */
+ MPP37_GPIO, /* Alarm LED */
+ MPP38_GPIO, /* Info LED */
+ MPP39_GPIO, /* Power LED */
+ MPP40_GPIO, /* Fan Lock */
+ MPP41_GPIO, /* Function Button */
+ MPP42_GPIO, /* Power Switch */
+ MPP43_GPIO, /* Power Auto Switch */
+ MPP48_GPIO, /* Function Red LED */
+ 0
+};
+
+#define LSXL_GPIO_FAN_HIGH 18
+#define LSXL_GPIO_FAN_LOW 19
+#define LSXL_GPIO_FAN_LOCK 40
+
+static struct gpio_fan_alarm lsxl_alarm = {
+ .gpio = LSXL_GPIO_FAN_LOCK,
+};
+
+static struct gpio_fan_speed lsxl_speeds[] = {
+ {
+ .rpm = 0,
+ .ctrl_val = 3,
+ }, {
+ .rpm = 1500,
+ .ctrl_val = 1,
+ }, {
+ .rpm = 3250,
+ .ctrl_val = 2,
+ }, {
+ .rpm = 5000,
+ .ctrl_val = 0,
+ }
+};
+
+static int lsxl_gpio_list[] = {
+ LSXL_GPIO_FAN_HIGH, LSXL_GPIO_FAN_LOW,
+};
+
+static struct gpio_fan_platform_data lsxl_fan_data = {
+ .num_ctrl = ARRAY_SIZE(lsxl_gpio_list),
+ .ctrl = lsxl_gpio_list,
+ .alarm = &lsxl_alarm,
+ .num_speed = ARRAY_SIZE(lsxl_speeds),
+ .speed = lsxl_speeds,
+};
+
+static struct platform_device lsxl_fan_device = {
+ .name = "gpio-fan",
+ .id = -1,
+ .num_resources = 0,
+ .dev = {
+ .platform_data = &lsxl_fan_data,
+ },
+};
+
+/*
+ * On the LS-XHL/LS-CHLv2, the shutdown process is following:
+ * - Userland monitors key events until the power switch goes to off position
+ * - The board reboots
+ * - U-boot starts and goes into an idle mode waiting for the user
+ * to move the switch to ON position
+ *
+ */
+static void lsxl_power_off(void)
+{
+ kirkwood_restart('h', NULL);
+}
+
+#define LSXL_GPIO_HDD_POWER 10
+#define LSXL_GPIO_USB_POWER 11
+
+void __init lsxl_init(void)
+{
+ /*
+ * Basic setup. Needs to be called early.
+ */
+ kirkwood_mpp_conf(lsxl_mpp_config);
+
+ /* usb and sata power on */
+ gpio_set_value(LSXL_GPIO_USB_POWER, 1);
+ gpio_set_value(LSXL_GPIO_HDD_POWER, 1);
+
+ kirkwood_ehci_init();
+ kirkwood_ge00_init(&lsxl_ge00_data);
+ kirkwood_ge01_init(&lsxl_ge01_data);
+ platform_device_register(&lsxl_fan_device);
+
+ /* register power-off method */
+ pm_power_off = lsxl_power_off;
+}
diff --git a/arch/arm/mach-kirkwood/board-ts219.c b/arch/arm/mach-kirkwood/board-ts219.c
new file mode 100644
index 000000000000..1750e68506c1
--- /dev/null
+++ b/arch/arm/mach-kirkwood/board-ts219.c
@@ -0,0 +1,82 @@
+/*
+ *
+ * QNAP TS-11x/TS-21x Turbo NAS Board Setup via DT
+ *
+ * Copyright (C) 2012 Andrew Lunn <andrew@lunn.ch>
+ *
+ * Based on the board file ts219-setup.c:
+ *
+ * Copyright (C) 2009 Martin Michlmayr <tbm@cyrius.com>
+ * Copyright (C) 2008 Byron Bradley <byron.bbradley@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/ata_platform.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <mach/kirkwood.h>
+#include "common.h"
+#include "mpp.h"
+#include "tsx1x-common.h"
+
+static struct mv643xx_eth_platform_data qnap_ts219_ge00_data = {
+ .phy_addr = MV643XX_ETH_PHY_ADDR(8),
+};
+
+static unsigned int qnap_ts219_mpp_config[] __initdata = {
+ MPP0_SPI_SCn,
+ MPP1_SPI_MOSI,
+ MPP2_SPI_SCK,
+ MPP3_SPI_MISO,
+ MPP4_SATA1_ACTn,
+ MPP5_SATA0_ACTn,
+ MPP8_TW0_SDA,
+ MPP9_TW0_SCK,
+ MPP10_UART0_TXD,
+ MPP11_UART0_RXD,
+ MPP13_UART1_TXD, /* PIC controller */
+ MPP14_UART1_RXD, /* PIC controller */
+ MPP15_GPIO, /* USB Copy button (on devices with 88F6281) */
+ MPP16_GPIO, /* Reset button (on devices with 88F6281) */
+ MPP36_GPIO, /* RAM: 0: 256 MB, 1: 512 MB */
+ MPP37_GPIO, /* Reset button (on devices with 88F6282) */
+ MPP43_GPIO, /* USB Copy button (on devices with 88F6282) */
+ MPP44_GPIO, /* Board ID: 0: TS-11x, 1: TS-21x */
+ 0
+};
+
+void __init qnap_dt_ts219_init(void)
+{
+ u32 dev, rev;
+
+ kirkwood_mpp_conf(qnap_ts219_mpp_config);
+
+ kirkwood_pcie_id(&dev, &rev);
+ if (dev == MV88F6282_DEV_ID)
+ qnap_ts219_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0);
+
+ kirkwood_ge00_init(&qnap_ts219_ge00_data);
+ kirkwood_ehci_init();
+
+ pm_power_off = qnap_tsx1x_power_off;
+}
+
+/* FIXME: Will not work with DT. Maybe use MPP40_GPIO? */
+static int __init ts219_pci_init(void)
+{
+ if (machine_is_ts219())
+ kirkwood_pcie_init(KW_PCIE0);
+
+ return 0;
+}
+subsys_initcall(ts219_pci_init);
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index f261cd242643..3226077735b1 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/clk-provider.h>
#include <linux/spinlock.h>
+#include <linux/mv643xx_i2c.h>
#include <net/dsa.h>
#include <asm/page.h>
#include <asm/timex.h>
@@ -67,6 +68,14 @@ void __init kirkwood_map_io(void)
* CLK tree
****************************************************************************/
+static void enable_sata0(void)
+{
+ /* Enable PLL and IVREF */
+ writel(readl(SATA0_PHY_MODE_2) | 0xf, SATA0_PHY_MODE_2);
+ /* Enable PHY */
+ writel(readl(SATA0_IF_CTRL) & ~0x200, SATA0_IF_CTRL);
+}
+
static void disable_sata0(void)
{
/* Disable PLL and IVREF */
@@ -75,6 +84,14 @@ static void disable_sata0(void)
writel(readl(SATA0_IF_CTRL) | 0x200, SATA0_IF_CTRL);
}
+static void enable_sata1(void)
+{
+ /* Enable PLL and IVREF */
+ writel(readl(SATA1_PHY_MODE_2) | 0xf, SATA1_PHY_MODE_2);
+ /* Enable PHY */
+ writel(readl(SATA1_IF_CTRL) & ~0x200, SATA1_IF_CTRL);
+}
+
static void disable_sata1(void)
{
/* Disable PLL and IVREF */
@@ -107,23 +124,38 @@ static void disable_pcie1(void)
}
}
-/* An extended version of the gated clk. This calls fn() before
- * disabling the clock. We use this to turn off PHYs etc. */
+/* An extended version of the gated clk. This calls fn_en()/fn_dis
+ * before enabling/disabling the clock. We use this to turn on/off
+ * PHYs etc. */
struct clk_gate_fn {
struct clk_gate gate;
- void (*fn)(void);
+ void (*fn_en)(void);
+ void (*fn_dis)(void);
};
#define to_clk_gate_fn(_gate) container_of(_gate, struct clk_gate_fn, gate)
#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+static int clk_gate_fn_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct clk_gate_fn *gate_fn = to_clk_gate_fn(gate);
+ int ret;
+
+ ret = clk_gate_ops.enable(hw);
+ if (!ret && gate_fn->fn_en)
+ gate_fn->fn_en();
+
+ return ret;
+}
+
static void clk_gate_fn_disable(struct clk_hw *hw)
{
struct clk_gate *gate = to_clk_gate(hw);
struct clk_gate_fn *gate_fn = to_clk_gate_fn(gate);
- if (gate_fn->fn)
- gate_fn->fn();
+ if (gate_fn->fn_dis)
+ gate_fn->fn_dis();
clk_gate_ops.disable(hw);
}
@@ -135,7 +167,7 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock,
- void (*fn)(void))
+ void (*fn_en)(void), void (*fn_dis)(void))
{
struct clk_gate_fn *gate_fn;
struct clk *clk;
@@ -159,11 +191,14 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
gate_fn->gate.flags = clk_gate_flags;
gate_fn->gate.lock = lock;
gate_fn->gate.hw.init = &init;
- gate_fn->fn = fn;
+ gate_fn->fn_en = fn_en;
+ gate_fn->fn_dis = fn_dis;
- /* ops is the gate ops, but with our disable function */
- if (clk_gate_fn_ops.disable != clk_gate_fn_disable) {
+ /* ops is the gate ops, but with our enable/disable functions */
+ if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
+ clk_gate_fn_ops.disable != clk_gate_fn_disable) {
clk_gate_fn_ops = clk_gate_ops;
+ clk_gate_fn_ops.enable = clk_gate_fn_enable;
clk_gate_fn_ops.disable = clk_gate_fn_disable;
}
@@ -187,11 +222,12 @@ static struct clk __init *kirkwood_register_gate(const char *name, u8 bit_idx)
static struct clk __init *kirkwood_register_gate_fn(const char *name,
u8 bit_idx,
- void (*fn)(void))
+ void (*fn_en)(void),
+ void (*fn_dis)(void))
{
return clk_register_gate_fn(NULL, name, "tclk", 0,
(void __iomem *)CLOCK_GATING_CTRL,
- bit_idx, 0, &gating_lock, fn);
+ bit_idx, 0, &gating_lock, fn_en, fn_dis);
}
static struct clk *ge0, *ge1;
@@ -208,18 +244,18 @@ void __init kirkwood_clk_init(void)
ge0 = kirkwood_register_gate("ge0", CGC_BIT_GE0);
ge1 = kirkwood_register_gate("ge1", CGC_BIT_GE1);
sata0 = kirkwood_register_gate_fn("sata0", CGC_BIT_SATA0,
- disable_sata0);
+ enable_sata0, disable_sata0);
sata1 = kirkwood_register_gate_fn("sata1", CGC_BIT_SATA1,
- disable_sata1);
+ enable_sata1, disable_sata1);
usb0 = kirkwood_register_gate("usb0", CGC_BIT_USB0);
sdio = kirkwood_register_gate("sdio", CGC_BIT_SDIO);
crypto = kirkwood_register_gate("crypto", CGC_BIT_CRYPTO);
xor0 = kirkwood_register_gate("xor0", CGC_BIT_XOR0);
xor1 = kirkwood_register_gate("xor1", CGC_BIT_XOR1);
pex0 = kirkwood_register_gate_fn("pex0", CGC_BIT_PEX0,
- disable_pcie0);
+ NULL, disable_pcie0);
pex1 = kirkwood_register_gate_fn("pex1", CGC_BIT_PEX1,
- disable_pcie1);
+ NULL, disable_pcie1);
audio = kirkwood_register_gate("audio", CGC_BIT_AUDIO);
kirkwood_register_gate("tdm", CGC_BIT_TDM);
kirkwood_register_gate("tsu", CGC_BIT_TSU);
@@ -241,6 +277,12 @@ void __init kirkwood_clk_init(void)
orion_clkdev_add("0", "pcie", pex0);
orion_clkdev_add("1", "pcie", pex1);
orion_clkdev_add(NULL, "kirkwood-i2s", audio);
+ orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".0", runit);
+
+ /* Marvell says runit is used by SPI, UART, NAND, TWSI, ...,
+ * so should never be gated.
+ */
+ clk_prepare_enable(runit);
}
/*****************************************************************************
@@ -259,7 +301,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
- IRQ_KIRKWOOD_GE00_ERR);
+ IRQ_KIRKWOOD_GE00_ERR, 1600);
/* The interface forgets the MAC address assigned by u-boot if
the clock is turned off, so claim the clk now. */
clk_prepare_enable(ge0);
@@ -273,7 +315,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
- IRQ_KIRKWOOD_GE01_ERR);
+ IRQ_KIRKWOOD_GE01_ERR, 1600);
clk_prepare_enable(ge1);
}
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index 9248fa2c165b..304dd1abfdca 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -58,6 +58,11 @@ void dreamplug_init(void);
#else
static inline void dreamplug_init(void) {};
#endif
+#ifdef CONFIG_MACH_TS219_DT
+void qnap_dt_ts219_init(void);
+#else
+static inline void qnap_dt_ts219_init(void) {};
+#endif
#ifdef CONFIG_MACH_DLINK_KIRKWOOD_DT
void dnskw_init(void);
@@ -77,6 +82,18 @@ void ib62x0_init(void);
static inline void ib62x0_init(void) {};
#endif
+#ifdef CONFIG_MACH_GOFLEXNET_DT
+void goflexnet_init(void);
+#else
+static inline void goflexnet_init(void) {};
+#endif
+
+#ifdef CONFIG_MACH_LSXL_DT
+void lsxl_init(void);
+#else
+static inline void lsxl_init(void) {};
+#endif
+
/* early init functions not converted to fdt yet */
char *kirkwood_id(void);
void kirkwood_l2_init(void);
diff --git a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
index d93359379598..be90b7d0e10b 100644
--- a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
+++ b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/sizes.h>
#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#include <linux/ata_platform.h>
diff --git a/arch/arm/mach-kirkwood/irq.c b/arch/arm/mach-kirkwood/irq.c
index c4c68e5b94f1..720063ffa19d 100644
--- a/arch/arm/mach-kirkwood/irq.c
+++ b/arch/arm/mach-kirkwood/irq.c
@@ -9,20 +9,23 @@
*/
#include <linux/gpio.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/irq.h>
-#include <linux/io.h>
#include <mach/bridge-regs.h>
#include <plat/irq.h>
-#include "common.h"
-static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- BUG_ON(irq < IRQ_KIRKWOOD_GPIO_LOW_0_7);
- BUG_ON(irq > IRQ_KIRKWOOD_GPIO_HIGH_16_23);
+static int __initdata gpio0_irqs[4] = {
+ IRQ_KIRKWOOD_GPIO_LOW_0_7,
+ IRQ_KIRKWOOD_GPIO_LOW_8_15,
+ IRQ_KIRKWOOD_GPIO_LOW_16_23,
+ IRQ_KIRKWOOD_GPIO_LOW_24_31,
+};
- orion_gpio_irq_handler((irq - IRQ_KIRKWOOD_GPIO_LOW_0_7) << 3);
-}
+static int __initdata gpio1_irqs[4] = {
+ IRQ_KIRKWOOD_GPIO_HIGH_0_7,
+ IRQ_KIRKWOOD_GPIO_HIGH_8_15,
+ IRQ_KIRKWOOD_GPIO_HIGH_16_23,
+ 0,
+};
void __init kirkwood_init_irq(void)
{
@@ -32,17 +35,8 @@ void __init kirkwood_init_irq(void)
/*
* Initialize gpiolib for GPIOs 0-49.
*/
- orion_gpio_init(0, 32, GPIO_LOW_VIRT_BASE, 0,
- IRQ_KIRKWOOD_GPIO_START);
- irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_0_7, gpio_irq_handler);
- irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_8_15, gpio_irq_handler);
- irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_16_23, gpio_irq_handler);
- irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_LOW_24_31, gpio_irq_handler);
-
- orion_gpio_init(32, 18, GPIO_HIGH_VIRT_BASE, 0,
- IRQ_KIRKWOOD_GPIO_START + 32);
- irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_0_7, gpio_irq_handler);
- irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_8_15, gpio_irq_handler);
- irq_set_chained_handler(IRQ_KIRKWOOD_GPIO_HIGH_16_23,
- gpio_irq_handler);
+ orion_gpio_init(NULL, 0, 32, (void __iomem *)GPIO_LOW_VIRT_BASE, 0,
+ IRQ_KIRKWOOD_GPIO_START, gpio0_irqs);
+ orion_gpio_init(NULL, 32, 18, (void __iomem *)GPIO_HIGH_VIRT_BASE, 0,
+ IRQ_KIRKWOOD_GPIO_START + 32, gpio1_irqs);
}
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c
index f516e74ce0d5..5c3d61ee729a 100644
--- a/arch/arm/mach-mmp/gplugd.c
+++ b/arch/arm/mach-mmp/gplugd.c
@@ -14,6 +14,7 @@
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
+#include <mach/irqs.h>
#include <mach/pxa168.h>
#include <mach/mfp-pxa168.h>
diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c
index 4304f9519372..7e8a5a2e1ec7 100644
--- a/arch/arm/mach-mmp/sram.c
+++ b/arch/arm/mach-mmp/sram.c
@@ -68,7 +68,7 @@ static int __devinit sram_probe(struct platform_device *pdev)
struct resource *res;
int ret = 0;
- if (!pdata && !pdata->pool_name)
+ if (!pdata || !pdata->pool_name)
return -ENODEV;
info = kzalloc(sizeof(*info), GFP_KERNEL);
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index db0117ec55f4..e012dc8391cf 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -127,7 +127,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
- gic_raise_softirq(cpumask_of(cpu), 1);
+ gic_raise_softirq(cpumask_of(cpu), 0);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
diff --git a/arch/arm/mach-mv78xx0/addr-map.c b/arch/arm/mach-mv78xx0/addr-map.c
index 62b53d710efd..a9bc84180d21 100644
--- a/arch/arm/mach-mv78xx0/addr-map.c
+++ b/arch/arm/mach-mv78xx0/addr-map.c
@@ -37,7 +37,7 @@
#define WIN0_OFF(n) (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4))
#define WIN8_OFF(n) (BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4))
-static void __init __iomem *win_cfg_base(int win)
+static void __init __iomem *win_cfg_base(const struct orion_addr_map_cfg *cfg, int win)
{
/*
* Find the control register base address for this window.
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index b4c53b846c9c..3057f7d4329a 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -213,7 +213,8 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
- IRQ_MV78XX0_GE_ERR);
+ IRQ_MV78XX0_GE_ERR,
+ MV643XX_TX_CSUM_DEFAULT_LIMIT);
}
@@ -224,7 +225,8 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
- NO_IRQ);
+ NO_IRQ,
+ MV643XX_TX_CSUM_DEFAULT_LIMIT);
}
diff --git a/arch/arm/mach-mv78xx0/irq.c b/arch/arm/mach-mv78xx0/irq.c
index e421b701663b..eff9a750bbe2 100644
--- a/arch/arm/mach-mv78xx0/irq.c
+++ b/arch/arm/mach-mv78xx0/irq.c
@@ -9,19 +9,17 @@
*/
#include <linux/gpio.h>
#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
#include <linux/irq.h>
#include <mach/bridge-regs.h>
#include <plat/irq.h>
#include "common.h"
-static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- BUG_ON(irq < IRQ_MV78XX0_GPIO_0_7 || irq > IRQ_MV78XX0_GPIO_24_31);
-
- orion_gpio_irq_handler((irq - IRQ_MV78XX0_GPIO_0_7) << 3);
-}
+static int __initdata gpio0_irqs[4] = {
+ IRQ_MV78XX0_GPIO_0_7,
+ IRQ_MV78XX0_GPIO_8_15,
+ IRQ_MV78XX0_GPIO_16_23,
+ IRQ_MV78XX0_GPIO_24_31,
+};
void __init mv78xx0_init_irq(void)
{
@@ -34,11 +32,7 @@ void __init mv78xx0_init_irq(void)
* registers for core #1 are at an offset of 0x18 from those of
* core #0.)
*/
- orion_gpio_init(0, 32, GPIO_VIRT_BASE,
+ orion_gpio_init(NULL, 0, 32, (void __iomem *)GPIO_VIRT_BASE,
mv78xx0_core_index() ? 0x18 : 0,
- IRQ_MV78XX0_GPIO_START);
- irq_set_chained_handler(IRQ_MV78XX0_GPIO_0_7, gpio_irq_handler);
- irq_set_chained_handler(IRQ_MV78XX0_GPIO_8_15, gpio_irq_handler);
- irq_set_chained_handler(IRQ_MV78XX0_GPIO_16_23, gpio_irq_handler);
- irq_set_chained_handler(IRQ_MV78XX0_GPIO_24_31, gpio_irq_handler);
+ IRQ_MV78XX0_GPIO_START, gpio0_irqs);
}
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index ccdf83b17cf1..9a8bbda195b2 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -2,9 +2,6 @@ if ARCH_MXS
source "arch/arm/mach-mxs/devices/Kconfig"
-config MXS_OCOTP
- bool
-
config SOC_IMX23
bool
select ARM_AMBA
@@ -66,7 +63,6 @@ config MACH_MX28EVK
select MXS_HAVE_PLATFORM_MXS_SAIF
select MXS_HAVE_PLATFORM_MXS_I2C
select MXS_HAVE_PLATFORM_RTC_STMP3XXX
- select MXS_OCOTP
help
Include support for MX28EVK platform. This includes specific
configurations for the board and its peripherals.
@@ -94,7 +90,6 @@ config MODULE_M28
select MXS_HAVE_PLATFORM_MXS_I2C
select MXS_HAVE_PLATFORM_MXS_MMC
select MXS_HAVE_PLATFORM_MXSFB
- select MXS_OCOTP
config MODULE_APX4
bool
@@ -106,7 +101,6 @@ config MODULE_APX4
select MXS_HAVE_PLATFORM_MXS_I2C
select MXS_HAVE_PLATFORM_MXS_MMC
select MXS_HAVE_PLATFORM_MXS_SAIF
- select MXS_OCOTP
config MACH_TX28
bool "Ka-Ro TX28 module"
diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile
index e41590ccb437..fed3695a1339 100644
--- a/arch/arm/mach-mxs/Makefile
+++ b/arch/arm/mach-mxs/Makefile
@@ -1,7 +1,6 @@
# Common support
-obj-y := devices.o icoll.o iomux.o system.o timer.o mm.o
+obj-y := devices.o icoll.o iomux.o ocotp.o system.o timer.o mm.o
-obj-$(CONFIG_MXS_OCOTP) += ocotp.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_MACH_MXS_DT) += mach-mxs.o
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 648bdd05d38b..8dabfe81d07c 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -162,7 +162,7 @@ enum mac_oui {
static void __init update_fec_mac_prop(enum mac_oui oui)
{
struct device_node *np, *from = NULL;
- struct property *oldmac, *newmac;
+ struct property *newmac;
const u32 *ocotp = mxs_get_ocotp();
u8 *macaddr;
u32 val;
@@ -208,11 +208,7 @@ static void __init update_fec_mac_prop(enum mac_oui oui)
macaddr[4] = (val >> 8) & 0xff;
macaddr[5] = (val >> 0) & 0xff;
- oldmac = of_find_property(np, newmac->name, NULL);
- if (oldmac)
- prom_update_property(np, newmac, oldmac);
- else
- prom_add_property(np, newmac);
+ prom_update_property(np, newmac);
}
}
diff --git a/arch/arm/mach-netx/fb.c b/arch/arm/mach-netx/fb.c
index 2cdf6ef69bee..d122ee6ab991 100644
--- a/arch/arm/mach-netx/fb.c
+++ b/arch/arm/mach-netx/fb.c
@@ -69,29 +69,6 @@ void netx_clcd_remove(struct clcd_fb *fb)
fb->fb.screen_base, fb->fb.fix.smem_start);
}
-void clk_disable(struct clk *clk)
-{
-}
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- return 0;
-}
-
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-
-struct clk *clk_get(struct device *dev, const char *id)
-{
- return dev && strcmp(dev_name(dev), "fb") == 0 ? NULL : ERR_PTR(-ENOENT);
-}
-
-void clk_put(struct clk *clk)
-{
-}
-
static AMBA_AHB_DEVICE(fb, "fb", 0, 0x00104000, { NETX_IRQ_LCD }, NULL);
int netx_fb_init(struct clcd_board *board, struct clcd_panel *panel)
diff --git a/arch/arm/mach-omap1/board-h2-mmc.c b/arch/arm/mach-omap1/board-h2-mmc.c
index da0e37d40823..e1362ce48497 100644
--- a/arch/arm/mach-omap1/board-h2-mmc.c
+++ b/arch/arm/mach-omap1/board-h2-mmc.c
@@ -54,7 +54,6 @@ static struct omap_mmc_platform_data mmc1_data = {
.nr_slots = 1,
.init = mmc_late_init,
.cleanup = mmc_cleanup,
- .dma_mask = 0xffffffff,
.slots[0] = {
.set_power = mmc_set_power,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
diff --git a/arch/arm/mach-omap1/board-h3-mmc.c b/arch/arm/mach-omap1/board-h3-mmc.c
index f8242aa9b763..c74daace8cd6 100644
--- a/arch/arm/mach-omap1/board-h3-mmc.c
+++ b/arch/arm/mach-omap1/board-h3-mmc.c
@@ -36,7 +36,6 @@ static int mmc_set_power(struct device *dev, int slot, int power_on,
*/
static struct omap_mmc_platform_data mmc1_data = {
.nr_slots = 1,
- .dma_mask = 0xffffffff,
.slots[0] = {
.set_power = mmc_set_power,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 4007a372481b..2c0ca8fc3380 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -185,7 +185,6 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot)
static struct omap_mmc_platform_data nokia770_mmc2_data = {
.nr_slots = 1,
- .dma_mask = 0xffffffff,
.max_freq = 12000000,
.slots[0] = {
.set_power = nokia770_mmc_set_power,
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index cc71a26723ef..355980321c2d 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -288,8 +288,7 @@ palmz71_gpio_setup(int early)
}
gpio_direction_input(PALMZ71_USBDETECT_GPIO);
if (request_irq(gpio_to_irq(PALMZ71_USBDETECT_GPIO),
- palmz71_powercable, IRQF_SAMPLE_RANDOM,
- "palmz71-cable", NULL))
+ palmz71_powercable, 0, "palmz71-cable", NULL))
printk(KERN_ERR
"IRQ request for power cable failed!\n");
palmz71_powercable(gpio_to_irq(PALMZ71_USBDETECT_GPIO), NULL);
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index dd0fbf76ac79..fcd4e85c4ddc 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -62,12 +62,14 @@ config ARCH_OMAP4
select PM_OPP if PM
select USB_ARCH_HAS_EHCI if USB_SUPPORT
select ARM_CPU_SUSPEND if PM
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
config SOC_OMAP5
bool "TI OMAP5"
select CPU_V7
select ARM_GIC
select HAVE_SMP
+ select ARM_CPU_SUSPEND if PM
comment "OMAP Core Type"
depends on ARCH_OMAP2
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 74915295482e..28214483aaba 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -554,6 +554,8 @@ static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = {
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
+ /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */
+ OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#endif
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 2c5d0ed75285..677357ff61ac 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -468,7 +468,6 @@ static struct omap_mmc_platform_data mmc1_data = {
.cleanup = n8x0_mmc_cleanup,
.shutdown = n8x0_mmc_shutdown,
.max_freq = 24000000,
- .dma_mask = 0xffffffff,
.slots[0] = {
.wires = 4,
.set_power = n8x0_mmc_set_power,
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index ef230a0eb5eb..0d362e9f9cb9 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -58,6 +58,7 @@
#include "hsmmc.h"
#include "common-board-devices.h"
+#define OMAP3_EVM_TS_GPIO 175
#define OMAP3_EVM_EHCI_VBUS 22
#define OMAP3_EVM_EHCI_SELECT 61
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index 14734746457c..c1875862679f 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -35,16 +35,6 @@ static struct omap2_mcspi_device_config ads7846_mcspi_config = {
.turbo_mode = 0,
};
-/*
- * ADS7846 driver maybe request a gpio according to the value
- * of pdata->get_pendown_state, but we have done this. So set
- * get_pendown_state to avoid twice gpio requesting.
- */
-static int omap3_get_pendown_state(void)
-{
- return !gpio_get_value(OMAP3_EVM_TS_GPIO);
-}
-
static struct ads7846_platform_data ads7846_config = {
.x_max = 0x0fff,
.y_max = 0x0fff,
@@ -55,7 +45,6 @@ static struct ads7846_platform_data ads7846_config = {
.debounce_rep = 1,
.gpio_pendown = -EINVAL,
.keep_vref_on = 1,
- .get_pendown_state = &omap3_get_pendown_state,
};
static struct spi_board_info ads7846_spi_board_info __initdata = {
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
index 4c4ef6a6166b..a0b4a42836ab 100644
--- a/arch/arm/mach-omap2/common-board-devices.h
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -4,7 +4,6 @@
#include "twl-common.h"
#define NAND_BLOCK_SIZE SZ_128K
-#define OMAP3_EVM_TS_GPIO 175
struct mtd_partition;
struct ads7846_platform_data;
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 02d15bbd4e35..288bee6cbb76 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -21,6 +21,7 @@
#include "common.h"
#include "pm.h"
#include "prm.h"
+#include "clockdomain.h"
/* Machine specific information */
struct omap4_idle_statedata {
@@ -47,10 +48,14 @@ static struct omap4_idle_statedata omap4_idle_data[] = {
},
};
-static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd;
+static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
+static struct clockdomain *cpu_clkdm[NR_CPUS];
+
+static atomic_t abort_barrier;
+static bool cpu_done[NR_CPUS];
/**
- * omap4_enter_idle - Programs OMAP4 to enter the specified state
+ * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
* @dev: cpuidle device
* @drv: cpuidle driver
* @index: the index of state to be entered
@@ -59,60 +64,84 @@ static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd;
* specified low power state selected by the governor.
* Returns the amount of time spent in the low power state.
*/
-static int omap4_enter_idle(struct cpuidle_device *dev,
+static int omap4_enter_idle_simple(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ local_fiq_disable();
+ omap_do_wfi();
+ local_fiq_enable();
+
+ return index;
+}
+
+static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
struct omap4_idle_statedata *cx = &omap4_idle_data[index];
- u32 cpu1_state;
int cpu_id = smp_processor_id();
local_fiq_disable();
/*
- * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state.
+ * CPU0 has to wait and stay ON until CPU1 is OFF state.
* This is necessary to honour hardware recommondation
* of triggeing all the possible low power modes once CPU1 is
* out of coherency and in OFF mode.
- * Update dev->last_state so that governor stats reflects right
- * data.
*/
- cpu1_state = pwrdm_read_pwrst(cpu1_pd);
- if (cpu1_state != PWRDM_POWER_OFF) {
- index = drv->safe_state_index;
- cx = &omap4_idle_data[index];
+ if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
+ while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
+ cpu_relax();
+
+ /*
+ * CPU1 could have already entered & exited idle
+ * without hitting off because of a wakeup
+ * or a failed attempt to hit off mode. Check for
+ * that here, otherwise we could spin forever
+ * waiting for CPU1 off.
+ */
+ if (cpu_done[1])
+ goto fail;
+
+ }
}
- if (index > 0)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
/*
* Call idle CPU PM enter notifier chain so that
* VFP and per CPU interrupt context is saved.
*/
- if (cx->cpu_state == PWRDM_POWER_OFF)
- cpu_pm_enter();
-
- pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
- omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
-
- /*
- * Call idle CPU cluster PM enter notifier chain
- * to save GIC and wakeupgen context.
- */
- if ((cx->mpu_state == PWRDM_POWER_RET) &&
- (cx->mpu_logic_state == PWRDM_POWER_OFF))
- cpu_cluster_pm_enter();
+ cpu_pm_enter();
+
+ if (dev->cpu == 0) {
+ pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
+ omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+
+ /*
+ * Call idle CPU cluster PM enter notifier chain
+ * to save GIC and wakeupgen context.
+ */
+ if ((cx->mpu_state == PWRDM_POWER_RET) &&
+ (cx->mpu_logic_state == PWRDM_POWER_OFF))
+ cpu_cluster_pm_enter();
+ }
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
+ cpu_done[dev->cpu] = true;
+
+ /* Wakeup CPU1 only if it is not offlined */
+ if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
+ clkdm_wakeup(cpu_clkdm[1]);
+ clkdm_allow_idle(cpu_clkdm[1]);
+ }
/*
* Call idle CPU PM exit notifier chain to restore
- * VFP and per CPU IRQ context. Only CPU0 state is
- * considered since CPU1 is managed by CPU hotplug.
+ * VFP and per CPU IRQ context.
*/
- if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF)
- cpu_pm_exit();
+ cpu_pm_exit();
/*
* Call idle CPU cluster PM exit notifier chain
@@ -121,8 +150,11 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
if (omap4_mpuss_read_prev_context_state())
cpu_cluster_pm_exit();
- if (index > 0)
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
+
+fail:
+ cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
+ cpu_done[dev->cpu] = false;
local_fiq_enable();
@@ -141,7 +173,7 @@ struct cpuidle_driver omap4_idle_driver = {
.exit_latency = 2 + 2,
.target_residency = 5,
.flags = CPUIDLE_FLAG_TIME_VALID,
- .enter = omap4_enter_idle,
+ .enter = omap4_enter_idle_simple,
.name = "C1",
.desc = "MPUSS ON"
},
@@ -149,8 +181,8 @@ struct cpuidle_driver omap4_idle_driver = {
/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
.exit_latency = 328 + 440,
.target_residency = 960,
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .enter = omap4_enter_idle,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
+ .enter = omap4_enter_idle_coupled,
.name = "C2",
.desc = "MPUSS CSWR",
},
@@ -158,8 +190,8 @@ struct cpuidle_driver omap4_idle_driver = {
/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
.exit_latency = 460 + 518,
.target_residency = 1100,
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .enter = omap4_enter_idle,
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
+ .enter = omap4_enter_idle_coupled,
.name = "C3",
.desc = "MPUSS OSWR",
},
@@ -168,6 +200,16 @@ struct cpuidle_driver omap4_idle_driver = {
.safe_state_index = 0,
};
+/*
+ * For each cpu, setup the broadcast timer because local timers
+ * stops for the states above C1.
+ */
+static void omap_setup_broadcast_timer(void *arg)
+{
+ int cpu = smp_processor_id();
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
+}
+
/**
* omap4_idle_init - Init routine for OMAP4 idle
*
@@ -180,19 +222,31 @@ int __init omap4_idle_init(void)
unsigned int cpu_id = 0;
mpu_pd = pwrdm_lookup("mpu_pwrdm");
- cpu0_pd = pwrdm_lookup("cpu0_pwrdm");
- cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
- if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd))
+ cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
+ cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
+ if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
return -ENODEV;
- dev = &per_cpu(omap4_idle_dev, cpu_id);
- dev->cpu = cpu_id;
-
- cpuidle_register_driver(&omap4_idle_driver);
+ cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
+ cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
+ if (!cpu_clkdm[0] || !cpu_clkdm[1])
+ return -ENODEV;
- if (cpuidle_register_device(dev)) {
- pr_err("%s: CPUidle register device failed\n", __func__);
- return -EIO;
+ /* Configure the broadcast timer on each cpu */
+ on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
+
+ for_each_cpu(cpu_id, cpu_online_mask) {
+ dev = &per_cpu(omap4_idle_dev, cpu_id);
+ dev->cpu = cpu_id;
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+ dev->coupled_cpus = *cpu_online_mask;
+#endif
+ cpuidle_register_driver(&omap4_idle_driver);
+
+ if (cpuidle_register_device(dev)) {
+ pr_err("%s: CPUidle register failed\n", __func__);
+ return -EIO;
+ }
}
return 0;
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 5fb47a14f4ba..af1ed7d24a1f 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -37,6 +37,7 @@
#define DISPC_CONTROL 0x0040
#define DISPC_CONTROL2 0x0238
+#define DISPC_CONTROL3 0x0848
#define DISPC_IRQSTATUS 0x0018
#define DSS_SYSCONFIG 0x10
@@ -52,6 +53,7 @@
#define EVSYNC_EVEN_IRQ_SHIFT 2
#define EVSYNC_ODD_IRQ_SHIFT 3
#define FRAMEDONE2_IRQ_SHIFT 22
+#define FRAMEDONE3_IRQ_SHIFT 30
#define FRAMEDONETV_IRQ_SHIFT 24
/*
@@ -376,7 +378,7 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
static void dispc_disable_outputs(void)
{
u32 v, irq_mask = 0;
- bool lcd_en, digit_en, lcd2_en = false;
+ bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
int i;
struct omap_dss_dispc_dev_attr *da;
struct omap_hwmod *oh;
@@ -405,7 +407,13 @@ static void dispc_disable_outputs(void)
lcd2_en = v & LCD_EN_MASK;
}
- if (!(lcd_en | digit_en | lcd2_en))
+ /* store value of LCDENABLE for LCD3 */
+ if (da->manager_count > 3) {
+ v = omap_hwmod_read(oh, DISPC_CONTROL3);
+ lcd3_en = v & LCD_EN_MASK;
+ }
+
+ if (!(lcd_en | digit_en | lcd2_en | lcd3_en))
return; /* no managers currently enabled */
/*
@@ -426,10 +434,12 @@ static void dispc_disable_outputs(void)
if (lcd2_en)
irq_mask |= 1 << FRAMEDONE2_IRQ_SHIFT;
+ if (lcd3_en)
+ irq_mask |= 1 << FRAMEDONE3_IRQ_SHIFT;
/*
* clear any previous FRAMEDONE, FRAMEDONETV,
- * EVSYNC_EVEN/ODD or FRAMEDONE2 interrupts
+ * EVSYNC_EVEN/ODD, FRAMEDONE2 or FRAMEDONE3 interrupts
*/
omap_hwmod_write(irq_mask, oh, DISPC_IRQSTATUS);
@@ -445,12 +455,19 @@ static void dispc_disable_outputs(void)
omap_hwmod_write(v, oh, DISPC_CONTROL2);
}
+ /* disable LCD3 manager */
+ if (da->manager_count > 3) {
+ v = omap_hwmod_read(oh, DISPC_CONTROL3);
+ v &= ~LCD_EN_MASK;
+ omap_hwmod_write(v, oh, DISPC_CONTROL3);
+ }
+
i = 0;
while ((omap_hwmod_read(oh, DISPC_IRQSTATUS) & irq_mask) !=
irq_mask) {
i++;
if (i > FRAMEDONE_IRQ_TIMEOUT) {
- pr_err("didn't get FRAMEDONE1/2 or TV interrupt\n");
+ pr_err("didn't get FRAMEDONE1/2/3 or TV interrupt\n");
break;
}
mdelay(1);
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index be697d4e0843..a9675d8d1822 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -315,7 +315,6 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
mmc->slots[0].caps = c->caps;
mmc->slots[0].pm_caps = c->pm_caps;
mmc->slots[0].internal_clock = !c->ext_clock;
- mmc->dma_mask = 0xffffffff;
mmc->max_freq = c->max_freq;
if (cpu_is_omap44xx())
mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 471e62a74a16..76f9b3c2f586 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -127,7 +127,6 @@ struct omap_mux_partition {
* @gpio: GPIO number
* @muxnames: available signal modes for a ball
* @balls: available balls on the package
- * @partition: mux partition
*/
struct omap_mux {
u16 reg_offset;
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 7d118b9bdd5f..9a35adf91232 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -125,7 +125,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
booted = true;
}
- gic_raise_softirq(cpumask_of(cpu), 1);
+ gic_raise_softirq(cpumask_of(cpu), 0);
/*
* Now the secondary core is starting up let it run its
diff --git a/arch/arm/mach-omap2/opp4xxx_data.c b/arch/arm/mach-omap2/opp4xxx_data.c
index 2293ba27101b..c95415da23c2 100644
--- a/arch/arm/mach-omap2/opp4xxx_data.c
+++ b/arch/arm/mach-omap2/opp4xxx_data.c
@@ -94,7 +94,7 @@ int __init omap4_opp_init(void)
{
int r = -ENODEV;
- if (!cpu_is_omap44xx())
+ if (!cpu_is_omap443x())
return r;
r = omap_init_opp_table(omap44xx_opp_def_list,
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index e4fc88c65dbd..05bd8f02723f 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -272,21 +272,16 @@ void omap_sram_idle(void)
per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
- if (mpu_next_state < PWRDM_POWER_ON) {
- pwrdm_pre_transition(mpu_pwrdm);
- pwrdm_pre_transition(neon_pwrdm);
- }
+ pwrdm_pre_transition(NULL);
/* PER */
if (per_next_state < PWRDM_POWER_ON) {
- pwrdm_pre_transition(per_pwrdm);
per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
omap2_gpio_prepare_for_idle(per_going_off);
}
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
- pwrdm_pre_transition(core_pwrdm);
if (core_next_state == PWRDM_POWER_OFF) {
omap3_core_save_context();
omap3_cm_save_context();
@@ -339,20 +334,14 @@ void omap_sram_idle(void)
omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
OMAP3430_GR_MOD,
OMAP3_PRM_VOLTCTRL_OFFSET);
- pwrdm_post_transition(core_pwrdm);
}
omap3_intc_resume_idle();
+ pwrdm_post_transition(NULL);
+
/* PER */
- if (per_next_state < PWRDM_POWER_ON) {
+ if (per_next_state < PWRDM_POWER_ON)
omap2_gpio_resume_after_idle();
- pwrdm_post_transition(per_pwrdm);
- }
-
- if (mpu_next_state < PWRDM_POWER_ON) {
- pwrdm_post_transition(mpu_pwrdm);
- pwrdm_post_transition(neon_pwrdm);
- }
}
static void omap3_pm_idle(void)
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index 9f6b83d1b193..91e71d8f46f0 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -56,9 +56,13 @@ ppa_por_params:
* The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
* It returns to the caller for CPU INACTIVE and ON power states or in case
* CPU failed to transition to targeted OFF/DORMANT state.
+ *
+ * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
+ * stack frame and it expects the caller to take care of it. Hence the entire
+ * stack frame is saved to avoid possible stack corruption.
*/
ENTRY(omap4_finish_suspend)
- stmfd sp!, {lr}
+ stmfd sp!, {r4-r12, lr}
cmp r0, #0x0
beq do_WFI @ No lowpower state, jump to WFI
@@ -226,7 +230,7 @@ scu_gp_clear:
skip_scu_gp_clear:
isb
dsb
- ldmfd sp!, {pc}
+ ldmfd sp!, {r4-r12, pc}
ENDPROC(omap4_finish_suspend)
/*
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 13d20c8a283d..2ff6d41ec6c6 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -130,6 +130,7 @@ static struct clock_event_device clockevent_gpt = {
.name = "gp_timer",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.shift = 32,
+ .rating = 300,
.set_next_event = omap2_gp_timer_set_next_event,
.set_mode = omap2_gp_timer_set_mode,
};
@@ -223,7 +224,8 @@ static void __init omap2_gp_clockevent_init(int gptimer_id,
clockevent_delta2ns(3, &clockevent_gpt);
/* Timer internal resynch latency. */
- clockevent_gpt.cpumask = cpumask_of(0);
+ clockevent_gpt.cpumask = cpu_possible_mask;
+ clockevent_gpt.irq = omap_dm_timer_get_irq(&clkev);
clockevents_register_device(&clockevent_gpt);
pr_info("OMAP clockevent source: GPTIMER%d at %lu Hz\n",
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index de47f170ba50..db5ff6642375 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -67,6 +67,7 @@ void __init omap_pmic_init(int bus, u32 clkrate,
const char *pmic_type, int pmic_irq,
struct twl4030_platform_data *pmic_data)
{
+ omap_mux_init_signal("sys_nirq", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
strncpy(pmic_i2c_board_info.type, pmic_type,
sizeof(pmic_i2c_board_info.type));
pmic_i2c_board_info.irq = pmic_irq;
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 9148b229d0de..410291c67666 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -109,7 +109,8 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
- IRQ_ORION5X_ETH_ERR);
+ IRQ_ORION5X_ETH_ERR,
+ MV643XX_TX_CSUM_DEFAULT_LIMIT);
}
diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c
index b1b45fff776e..17da7091d310 100644
--- a/arch/arm/mach-orion5x/irq.c
+++ b/arch/arm/mach-orion5x/irq.c
@@ -11,19 +11,16 @@
*/
#include <linux/gpio.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/irq.h>
-#include <linux/io.h>
#include <mach/bridge-regs.h>
#include <plat/irq.h>
-#include "common.h"
-static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- BUG_ON(irq < IRQ_ORION5X_GPIO_0_7 || irq > IRQ_ORION5X_GPIO_24_31);
-
- orion_gpio_irq_handler((irq - IRQ_ORION5X_GPIO_0_7) << 3);
-}
+static int __initdata gpio0_irqs[4] = {
+ IRQ_ORION5X_GPIO_0_7,
+ IRQ_ORION5X_GPIO_8_15,
+ IRQ_ORION5X_GPIO_16_23,
+ IRQ_ORION5X_GPIO_24_31,
+};
void __init orion5x_init_irq(void)
{
@@ -32,9 +29,6 @@ void __init orion5x_init_irq(void)
/*
* Initialize gpiolib for GPIOs 0-31.
*/
- orion_gpio_init(0, 32, GPIO_VIRT_BASE, 0, IRQ_ORION5X_GPIO_START);
- irq_set_chained_handler(IRQ_ORION5X_GPIO_0_7, gpio_irq_handler);
- irq_set_chained_handler(IRQ_ORION5X_GPIO_8_15, gpio_irq_handler);
- irq_set_chained_handler(IRQ_ORION5X_GPIO_16_23, gpio_irq_handler);
- irq_set_chained_handler(IRQ_ORION5X_GPIO_24_31, gpio_irq_handler);
+ orion_gpio_init(NULL, 0, 32, (void __iomem *)GPIO_VIRT_BASE, 0,
+ IRQ_ORION5X_GPIO_START, gpio0_irqs);
}
diff --git a/arch/arm/mach-prima2/timer.c b/arch/arm/mach-prima2/timer.c
index 0d024b1e916d..f224107de7bc 100644
--- a/arch/arm/mach-prima2/timer.c
+++ b/arch/arm/mach-prima2/timer.c
@@ -132,11 +132,11 @@ static void sirfsoc_clocksource_resume(struct clocksource *cs)
{
int i;
- for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++)
+ for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++)
writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
- writel_relaxed(sirfsoc_timer_reg_val[i - 2], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
- writel_relaxed(sirfsoc_timer_reg_val[i - 1], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
+ writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO);
+ writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
}
static struct clock_event_device sirfsoc_clockevent = {
diff --git a/arch/arm/mach-pxa/eseries.h b/arch/arm/mach-pxa/eseries.h
deleted file mode 100644
index b96949dd5adb..000000000000
--- a/arch/arm/mach-pxa/eseries.h
+++ /dev/null
@@ -1,14 +0,0 @@
-void __init eseries_fixup(struct tag *tags, char **cmdline, struct meminfo *mi);
-
-extern struct pxa2xx_udc_mach_info e7xx_udc_mach_info;
-extern struct pxaficp_platform_data e7xx_ficp_platform_data;
-extern int e7xx_irda_init(void);
-
-extern int eseries_tmio_enable(struct platform_device *dev);
-extern int eseries_tmio_disable(struct platform_device *dev);
-extern int eseries_tmio_suspend(struct platform_device *dev);
-extern int eseries_tmio_resume(struct platform_device *dev);
-extern void eseries_get_tmio_gpios(void);
-extern struct resource eseries_tmio_resources[];
-extern struct platform_device e300_tc6387xb_device;
-
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index d3de84b0dcbe..e6311988add2 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -296,27 +296,11 @@ static struct asic3_led asic3_leds[ASIC3_NUM_LEDS] = {
static struct resource asic3_resources[] = {
/* GPIO part */
- [0] = {
- .start = ASIC3_PHYS,
- .end = ASIC3_PHYS + ASIC3_MAP_SIZE_16BIT - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = PXA_GPIO_TO_IRQ(GPIO12_HX4700_ASIC3_IRQ),
- .end = PXA_GPIO_TO_IRQ(GPIO12_HX4700_ASIC3_IRQ),
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(ASIC3_PHYS, ASIC3_MAP_SIZE_16BIT),
+ [1] = DEFINE_RES_IRQ(PXA_GPIO_TO_IRQ(GPIO12_HX4700_ASIC3_IRQ)),
/* SD part */
- [2] = {
- .start = ASIC3_SD_PHYS,
- .end = ASIC3_SD_PHYS + ASIC3_MAP_SIZE_16BIT - 1,
- .flags = IORESOURCE_MEM,
- },
- [3] = {
- .start = PXA_GPIO_TO_IRQ(GPIO66_HX4700_ASIC3_nSDIO_IRQ),
- .end = PXA_GPIO_TO_IRQ(GPIO66_HX4700_ASIC3_nSDIO_IRQ),
- .flags = IORESOURCE_IRQ,
- },
+ [2] = DEFINE_RES_MEM(ASIC3_SD_PHYS, ASIC3_MAP_SIZE_16BIT),
+ [3] = DEFINE_RES_IRQ(PXA_GPIO_TO_IRQ(GPIO66_HX4700_ASIC3_nSDIO_IRQ)),
};
static struct asic3_platform_data asic3_platform_data = {
@@ -343,11 +327,7 @@ static struct platform_device asic3 = {
*/
static struct resource egpio_resources[] = {
- [0] = {
- .start = PXA_CS5_PHYS,
- .end = PXA_CS5_PHYS + 0x4 - 1,
- .flags = IORESOURCE_MEM,
- },
+ [0] = DEFINE_RES_MEM(PXA_CS5_PHYS, 0x4),
};
static struct htc_egpio_chip egpio_chips[] = {
@@ -537,11 +517,7 @@ static struct w100fb_mach_info w3220_info = {
};
static struct resource w3220_resources[] = {
- [0] = {
- .start = ATI_W3220_PHYS,
- .end = ATI_W3220_PHYS + 0x00ffffff,
- .flags = IORESOURCE_MEM,
- },
+ [0] = DEFINE_RES_MEM(ATI_W3220_PHYS, SZ_16M),
};
static struct platform_device w3220 = {
@@ -683,20 +659,12 @@ static struct pda_power_pdata power_supply_info = {
};
static struct resource power_supply_resources[] = {
- [0] = {
- .name = "ac",
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE |
- IORESOURCE_IRQ_LOWEDGE,
- .start = PXA_GPIO_TO_IRQ(GPIOD9_nAC_IN),
- .end = PXA_GPIO_TO_IRQ(GPIOD9_nAC_IN),
- },
- [1] = {
- .name = "usb",
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE |
- IORESOURCE_IRQ_LOWEDGE,
- .start = PXA_GPIO_TO_IRQ(GPIOD14_nUSBC_DETECT),
- .end = PXA_GPIO_TO_IRQ(GPIOD14_nUSBC_DETECT),
- },
+ [0] = DEFINE_RES_NAMED(PXA_GPIO_TO_IRQ(GPIOD9_nAC_IN), 1, "ac",
+ IORESOURCE_IRQ |
+ IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE),
+ [1] = DEFINE_RES_NAMED(PXA_GPIO_TO_IRQ(GPIOD14_nUSBC_DETECT), 1, "usb",
+ IORESOURCE_IRQ |
+ IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE),
};
static struct platform_device power_supply = {
diff --git a/arch/arm/mach-pxa/include/mach/regs-ost.h b/arch/arm/mach-pxa/include/mach/regs-ost.h
index a3e5f86ef67e..628819995c52 100644
--- a/arch/arm/mach-pxa/include/mach/regs-ost.h
+++ b/arch/arm/mach-pxa/include/mach/regs-ost.h
@@ -7,17 +7,17 @@
* OS Timer & Match Registers
*/
-#define OSMR0 __REG(0x40A00000) /* */
-#define OSMR1 __REG(0x40A00004) /* */
-#define OSMR2 __REG(0x40A00008) /* */
-#define OSMR3 __REG(0x40A0000C) /* */
-#define OSMR4 __REG(0x40A00080) /* */
-#define OSCR __REG(0x40A00010) /* OS Timer Counter Register */
-#define OSCR4 __REG(0x40A00040) /* OS Timer Counter Register */
-#define OMCR4 __REG(0x40A000C0) /* */
-#define OSSR __REG(0x40A00014) /* OS Timer Status Register */
-#define OWER __REG(0x40A00018) /* OS Timer Watchdog Enable Register */
-#define OIER __REG(0x40A0001C) /* OS Timer Interrupt Enable Register */
+#define OSMR0 io_p2v(0x40A00000) /* */
+#define OSMR1 io_p2v(0x40A00004) /* */
+#define OSMR2 io_p2v(0x40A00008) /* */
+#define OSMR3 io_p2v(0x40A0000C) /* */
+#define OSMR4 io_p2v(0x40A00080) /* */
+#define OSCR io_p2v(0x40A00010) /* OS Timer Counter Register */
+#define OSCR4 io_p2v(0x40A00040) /* OS Timer Counter Register */
+#define OMCR4 io_p2v(0x40A000C0) /* */
+#define OSSR io_p2v(0x40A00014) /* OS Timer Status Register */
+#define OWER io_p2v(0x40A00018) /* OS Timer Watchdog Enable Register */
+#define OIER io_p2v(0x40A0001C) /* OS Timer Interrupt Enable Register */
#define OSSR_M3 (1 << 3) /* Match status channel 3 */
#define OSSR_M2 (1 << 2) /* Match status channel 2 */
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index 6bb3f47b1f14..0ca0db787903 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -456,7 +456,7 @@ static int lubbock_mci_init(struct device *dev,
init_timer(&mmc_timer);
mmc_timer.data = (unsigned long) data;
return request_irq(LUBBOCK_SD_IRQ, lubbock_detect_int,
- IRQF_SAMPLE_RANDOM, "lubbock-sd-detect", data);
+ 0, "lubbock-sd-detect", data);
}
static int lubbock_mci_get_ro(struct device *dev)
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 2db697cd2b4e..39561dcf65f2 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -633,9 +633,8 @@ static struct platform_device bq24022 = {
static int magician_mci_init(struct device *dev,
irq_handler_t detect_irq, void *data)
{
- return request_irq(IRQ_MAGICIAN_SD, detect_irq,
- IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
- "mmc card detect", data);
+ return request_irq(IRQ_MAGICIAN_SD, detect_irq, IRQF_DISABLED,
+ "mmc card detect", data);
}
static void magician_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 5905ed130e94..d89d87ae144c 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -953,12 +953,12 @@ static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = {
static struct eeti_ts_platform_data eeti_ts_pdata = {
.irq_active_high = 1,
+ .irq_gpio = GPIO_TOUCH_IRQ,
};
static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = {
.type = "eeti_ts",
.addr = 0x0a,
- .irq = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ),
.platform_data = &eeti_ts_pdata,
};
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
index b4528899ef08..3fab583755d4 100644
--- a/arch/arm/mach-pxa/reset.c
+++ b/arch/arm/mach-pxa/reset.c
@@ -77,9 +77,10 @@ static void do_gpio_reset(void)
static void do_hw_reset(void)
{
/* Initialize the watchdog and let it fire */
- OWER = OWER_WME;
- OSSR = OSSR_M3;
- OSMR3 = OSCR + 368640; /* ... in 100 ms */
+ writel_relaxed(OWER_WME, OWER);
+ writel_relaxed(OSSR_M3, OSSR);
+ /* ... in 100 ms */
+ writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3);
}
void pxa_restart(char mode, const char *cmd)
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index 3d6c9bd90de6..4bc47d63698b 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -35,7 +35,7 @@
static u32 notrace pxa_read_sched_clock(void)
{
- return OSCR;
+ return readl_relaxed(OSCR);
}
@@ -47,8 +47,8 @@ pxa_ost0_interrupt(int irq, void *dev_id)
struct clock_event_device *c = dev_id;
/* Disarm the compare/match, signal the event. */
- OIER &= ~OIER_E0;
- OSSR = OSSR_M0;
+ writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
+ writel_relaxed(OSSR_M0, OSSR);
c->event_handler(c);
return IRQ_HANDLED;
@@ -59,10 +59,10 @@ pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev)
{
unsigned long next, oscr;
- OIER |= OIER_E0;
- next = OSCR + delta;
- OSMR0 = next;
- oscr = OSCR;
+ writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER);
+ next = readl_relaxed(OSCR) + delta;
+ writel_relaxed(next, OSMR0);
+ oscr = readl_relaxed(OSCR);
return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
}
@@ -72,15 +72,15 @@ pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev)
{
switch (mode) {
case CLOCK_EVT_MODE_ONESHOT:
- OIER &= ~OIER_E0;
- OSSR = OSSR_M0;
+ writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
+ writel_relaxed(OSSR_M0, OSSR);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
/* initializing, released, or preparing for suspend */
- OIER &= ~OIER_E0;
- OSSR = OSSR_M0;
+ writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
+ writel_relaxed(OSSR_M0, OSSR);
break;
case CLOCK_EVT_MODE_RESUME:
@@ -108,8 +108,8 @@ static void __init pxa_timer_init(void)
{
unsigned long clock_tick_rate = get_clock_tick_rate();
- OIER = 0;
- OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3;
+ writel_relaxed(0, OIER);
+ writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
setup_sched_clock(pxa_read_sched_clock, 32, clock_tick_rate);
@@ -122,7 +122,7 @@ static void __init pxa_timer_init(void)
setup_irq(IRQ_OST0, &pxa_ost0_irq);
- clocksource_mmio_init(&OSCR, "oscr0", clock_tick_rate, 200, 32,
+ clocksource_mmio_init(OSCR, "oscr0", clock_tick_rate, 200, 32,
clocksource_mmio_readl_up);
clockevents_register_device(&ckevt_pxa_osmr0);
}
@@ -132,12 +132,12 @@ static unsigned long osmr[4], oier, oscr;
static void pxa_timer_suspend(void)
{
- osmr[0] = OSMR0;
- osmr[1] = OSMR1;
- osmr[2] = OSMR2;
- osmr[3] = OSMR3;
- oier = OIER;
- oscr = OSCR;
+ osmr[0] = readl_relaxed(OSMR0);
+ osmr[1] = readl_relaxed(OSMR1);
+ osmr[2] = readl_relaxed(OSMR2);
+ osmr[3] = readl_relaxed(OSMR3);
+ oier = readl_relaxed(OIER);
+ oscr = readl_relaxed(OSCR);
}
static void pxa_timer_resume(void)
@@ -151,12 +151,12 @@ static void pxa_timer_resume(void)
if (osmr[0] - oscr < MIN_OSCR_DELTA)
osmr[0] += MIN_OSCR_DELTA;
- OSMR0 = osmr[0];
- OSMR1 = osmr[1];
- OSMR2 = osmr[2];
- OSMR3 = osmr[3];
- OIER = oier;
- OSCR = oscr;
+ writel_relaxed(osmr[0], OSMR0);
+ writel_relaxed(osmr[1], OSMR1);
+ writel_relaxed(osmr[2], OSMR2);
+ writel_relaxed(osmr[3], OSMR3);
+ writel_relaxed(oier, OIER);
+ writel_relaxed(oscr, OSCR);
}
#else
#define pxa_timer_suspend NULL
diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
index 2b6ac00b2cd9..166dd32cc1d3 100644
--- a/arch/arm/mach-pxa/trizeps4.c
+++ b/arch/arm/mach-pxa/trizeps4.c
@@ -332,8 +332,8 @@ static int trizeps4_mci_init(struct device *dev, irq_handler_t mci_detect_int,
int err;
err = request_irq(TRIZEPS4_MMC_IRQ, mci_detect_int,
- IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_SAMPLE_RANDOM,
- "MMC card detect", data);
+ IRQF_DISABLED | IRQF_TRIGGER_RISING,
+ "MMC card detect", data);
if (err) {
printk(KERN_ERR "trizeps4_mci_init: MMC/SD: can't request"
"MMC card detect IRQ\n");
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index e24961109b70..d56b0f7f2b20 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -483,7 +483,7 @@ config MACH_NEO1973_GTA02
select I2C
select POWER_SUPPLY
select MACH_NEO1973
- select S3C2410_PWM
+ select S3C24XX_PWM
select S3C_DEV_USB_HOST
help
Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
@@ -493,7 +493,7 @@ config MACH_RX1950
select S3C24XX_DCLK
select PM_H1940 if PM
select I2C
- select S3C2410_PWM
+ select S3C24XX_PWM
select S3C_DEV_NAND
select S3C2410_IOTIMING if S3C2440_CPUFREQ
select S3C2440_XTAL_16934400
diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h
index 454831b66037..ee99fd56c043 100644
--- a/arch/arm/mach-s3c24xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c24xx/include/mach/dma.h
@@ -24,7 +24,8 @@
*/
enum dma_ch {
- DMACH_XD0,
+ DMACH_DT_PROP = -1, /* not yet supported, do not use */
+ DMACH_XD0 = 0,
DMACH_XD1,
DMACH_SDI,
DMACH_SPI0,
diff --git a/arch/arm/mach-s3c64xx/include/mach/pm-core.h b/arch/arm/mach-s3c64xx/include/mach/pm-core.h
index fcf3dcabb694..c0537f40a3d8 100644
--- a/arch/arm/mach-s3c64xx/include/mach/pm-core.h
+++ b/arch/arm/mach-s3c64xx/include/mach/pm-core.h
@@ -12,6 +12,9 @@
* published by the Free Software Foundation.
*/
+#ifndef __MACH_S3C64XX_PM_CORE_H
+#define __MACH_S3C64XX_PM_CORE_H __FILE__
+
#include <mach/regs-gpio.h>
static inline void s3c_pm_debug_init_uart(void)
@@ -113,3 +116,4 @@ static inline void samsung_pm_saved_gpios(void)
__raw_writel(S3C64XX_SLPEN_USE_xSLP, S3C64XX_SLPEN);
}
+#endif /* __MACH_S3C64XX_PM_CORE_H */
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index d1dc7f1a239c..d673211f121c 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -362,7 +362,7 @@ static void __init assabet_init(void)
static void __init map_sa1100_gpio_regs( void )
{
unsigned long phys = __PREG(GPLR) & PMD_MASK;
- unsigned long virt = io_p2v(phys);
+ unsigned long virt = (unsigned long)io_p2v(phys);
int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
pmd_t *pmd;
diff --git a/arch/arm/mach-sa1100/cpu-sa1100.c b/arch/arm/mach-sa1100/cpu-sa1100.c
index 19b2053f5af4..e8f4d1e19233 100644
--- a/arch/arm/mach-sa1100/cpu-sa1100.c
+++ b/arch/arm/mach-sa1100/cpu-sa1100.c
@@ -87,6 +87,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
+#include <linux/io.h>
#include <asm/cputype.h>
diff --git a/arch/arm/mach-sa1100/cpu-sa1110.c b/arch/arm/mach-sa1100/cpu-sa1110.c
index 675bf8ef97e8..48c45b0c92bb 100644
--- a/arch/arm/mach-sa1100/cpu-sa1110.c
+++ b/arch/arm/mach-sa1100/cpu-sa1110.c
@@ -19,6 +19,7 @@
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
diff --git a/arch/arm/mach-sa1100/include/mach/SA-1100.h b/arch/arm/mach-sa1100/include/mach/SA-1100.h
index 3f2d1b60188c..0ac6cc08a19c 100644
--- a/arch/arm/mach-sa1100/include/mach/SA-1100.h
+++ b/arch/arm/mach-sa1100/include/mach/SA-1100.h
@@ -830,14 +830,14 @@
* (read/write).
*/
-#define OSMR0 __REG(0x90000000) /* OS timer Match Reg. 0 */
-#define OSMR1 __REG(0x90000004) /* OS timer Match Reg. 1 */
-#define OSMR2 __REG(0x90000008) /* OS timer Match Reg. 2 */
-#define OSMR3 __REG(0x9000000c) /* OS timer Match Reg. 3 */
-#define OSCR __REG(0x90000010) /* OS timer Counter Reg. */
-#define OSSR __REG(0x90000014 ) /* OS timer Status Reg. */
-#define OWER __REG(0x90000018 ) /* OS timer Watch-dog Enable Reg. */
-#define OIER __REG(0x9000001C ) /* OS timer Interrupt Enable Reg. */
+#define OSMR0 io_p2v(0x90000000) /* OS timer Match Reg. 0 */
+#define OSMR1 io_p2v(0x90000004) /* OS timer Match Reg. 1 */
+#define OSMR2 io_p2v(0x90000008) /* OS timer Match Reg. 2 */
+#define OSMR3 io_p2v(0x9000000c) /* OS timer Match Reg. 3 */
+#define OSCR io_p2v(0x90000010) /* OS timer Counter Reg. */
+#define OSSR io_p2v(0x90000014) /* OS timer Status Reg. */
+#define OWER io_p2v(0x90000018) /* OS timer Watch-dog Enable Reg. */
+#define OIER io_p2v(0x9000001C) /* OS timer Interrupt Enable Reg. */
#define OSSR_M(Nb) /* Match detected [0..3] */ \
(0x00000001 << (Nb))
diff --git a/arch/arm/mach-sa1100/include/mach/gpio.h b/arch/arm/mach-sa1100/include/mach/gpio.h
index a38fc4f54241..6a9eecf3137e 100644
--- a/arch/arm/mach-sa1100/include/mach/gpio.h
+++ b/arch/arm/mach-sa1100/include/mach/gpio.h
@@ -24,6 +24,7 @@
#ifndef __ASM_ARCH_SA1100_GPIO_H
#define __ASM_ARCH_SA1100_GPIO_H
+#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm-generic/gpio.h>
diff --git a/arch/arm/mach-sa1100/include/mach/hardware.h b/arch/arm/mach-sa1100/include/mach/hardware.h
index 99f5856d8de4..cbedd75a9d65 100644
--- a/arch/arm/mach-sa1100/include/mach/hardware.h
+++ b/arch/arm/mach-sa1100/include/mach/hardware.h
@@ -32,7 +32,7 @@
#define PIO_START 0x80000000 /* physical start of IO space */
#define io_p2v( x ) \
- ( (((x)&0x00ffffff) | (((x)&0x30000000)>>VIO_SHIFT)) + VIO_BASE )
+ IOMEM( (((x)&0x00ffffff) | (((x)&0x30000000)>>VIO_SHIFT)) + VIO_BASE )
#define io_v2p( x ) \
( (((x)&0x00ffffff) | (((x)&(0x30000000>>VIO_SHIFT))<<VIO_SHIFT)) + PIO_START )
@@ -47,6 +47,8 @@
#define CPU_SA1110_ID (0x6901b110)
#define CPU_SA1110_MASK (0xfffffff0)
+#define __MREG(x) IOMEM(io_p2v(x))
+
#ifndef __ASSEMBLY__
#include <asm/cputype.h>
@@ -56,7 +58,7 @@
#define cpu_is_sa1100() ((read_cpuid_id() & CPU_SA1100_MASK) == CPU_SA1100_ID)
#define cpu_is_sa1110() ((read_cpuid_id() & CPU_SA1110_MASK) == CPU_SA1110_ID)
-# define __REG(x) (*((volatile unsigned long *)io_p2v(x)))
+# define __REG(x) (*((volatile unsigned long __iomem *)io_p2v(x)))
# define __PREG(x) (io_v2p((unsigned long)&(x)))
static inline unsigned long get_clock_tick_rate(void)
diff --git a/arch/arm/mach-sa1100/include/mach/uncompress.h b/arch/arm/mach-sa1100/include/mach/uncompress.h
index 6cb39ddde656..5cf71da60e42 100644
--- a/arch/arm/mach-sa1100/include/mach/uncompress.h
+++ b/arch/arm/mach-sa1100/include/mach/uncompress.h
@@ -8,6 +8,8 @@
#include "hardware.h"
+#define IOMEM(x) (x)
+
/*
* The following code assumes the serial port has already been
* initialized by the bootloader. We search for the first enabled
diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c
index 516ccc25d7fd..2124f1fc2fbe 100644
--- a/arch/arm/mach-sa1100/irq.c
+++ b/arch/arm/mach-sa1100/irq.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/ioport.h>
#include <linux/syscore_ops.h>
diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c
index b412fc09c80c..7f07f08d8968 100644
--- a/arch/arm/mach-sa1100/jornada720_ssp.c
+++ b/arch/arm/mach-sa1100/jornada720_ssp.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
+#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/jornada720.h>
diff --git a/arch/arm/mach-sa1100/leds-cerf.c b/arch/arm/mach-sa1100/leds-cerf.c
index 040540fb7d8a..30fc3b2bf555 100644
--- a/arch/arm/mach-sa1100/leds-cerf.c
+++ b/arch/arm/mach-sa1100/leds-cerf.c
@@ -4,6 +4,7 @@
* Author: ???
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/leds.h>
diff --git a/arch/arm/mach-sa1100/leds-hackkit.c b/arch/arm/mach-sa1100/leds-hackkit.c
index 6a2352436e62..f8e47235babe 100644
--- a/arch/arm/mach-sa1100/leds-hackkit.c
+++ b/arch/arm/mach-sa1100/leds-hackkit.c
@@ -10,6 +10,7 @@
* as cpu led, the green one is used as timer led.
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/leds.h>
diff --git a/arch/arm/mach-sa1100/leds-lart.c b/arch/arm/mach-sa1100/leds-lart.c
index a51830c60e53..50a5b143b460 100644
--- a/arch/arm/mach-sa1100/leds-lart.c
+++ b/arch/arm/mach-sa1100/leds-lart.c
@@ -10,6 +10,7 @@
* pace of the LED.
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/leds.h>
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index 690cf0ce5c0c..6645d1e31f14 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -23,6 +23,7 @@
* Storage is local on the stack now.
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/time.h>
diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S
index 30cc6721665b..85863741ef8b 100644
--- a/arch/arm/mach-sa1100/sleep.S
+++ b/arch/arm/mach-sa1100/sleep.S
@@ -38,9 +38,9 @@ ENTRY(sa1100_finish_suspend)
orr r4, r4, #MDREFR_K1DB2
ldr r5, =PPCR
- @ Pre-load __udelay into the I-cache
+ @ Pre-load __loop_udelay into the I-cache
mov r0, #1
- bl __udelay
+ bl __loop_udelay
mov r0, r0
@ The following must all exist in a single cache line to
@@ -53,11 +53,11 @@ ENTRY(sa1100_finish_suspend)
@ delay 90us and set CPU PLL to lowest speed
@ fixes resume problem on high speed SA1110
mov r0, #90
- bl __udelay
+ bl __loop_udelay
mov r1, #0
str r1, [r5]
mov r0, #90
- bl __udelay
+ bl __loop_udelay
/*
* SA1110 SDRAM controller workaround. register values:
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c
index 6af26e8d55e6..80702c9ecc77 100644
--- a/arch/arm/mach-sa1100/time.c
+++ b/arch/arm/mach-sa1100/time.c
@@ -22,7 +22,7 @@
static u32 notrace sa1100_read_sched_clock(void)
{
- return OSCR;
+ return readl_relaxed(OSCR);
}
#define MIN_OSCR_DELTA 2
@@ -32,8 +32,8 @@ static irqreturn_t sa1100_ost0_interrupt(int irq, void *dev_id)
struct clock_event_device *c = dev_id;
/* Disarm the compare/match, signal the event. */
- OIER &= ~OIER_E0;
- OSSR = OSSR_M0;
+ writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
+ writel_relaxed(OSSR_M0, OSSR);
c->event_handler(c);
return IRQ_HANDLED;
@@ -44,10 +44,10 @@ sa1100_osmr0_set_next_event(unsigned long delta, struct clock_event_device *c)
{
unsigned long next, oscr;
- OIER |= OIER_E0;
- next = OSCR + delta;
- OSMR0 = next;
- oscr = OSCR;
+ writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER);
+ next = readl_relaxed(OSCR) + delta;
+ writel_relaxed(next, OSMR0);
+ oscr = readl_relaxed(OSCR);
return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
}
@@ -59,8 +59,8 @@ sa1100_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *c)
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
- OIER &= ~OIER_E0;
- OSSR = OSSR_M0;
+ writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER);
+ writel_relaxed(OSSR_M0, OSSR);
break;
case CLOCK_EVT_MODE_RESUME:
@@ -86,8 +86,8 @@ static struct irqaction sa1100_timer_irq = {
static void __init sa1100_timer_init(void)
{
- OIER = 0;
- OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3;
+ writel_relaxed(0, OIER);
+ writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
setup_sched_clock(sa1100_read_sched_clock, 32, 3686400);
@@ -100,7 +100,7 @@ static void __init sa1100_timer_init(void)
setup_irq(IRQ_OST0, &sa1100_timer_irq);
- clocksource_mmio_init(&OSCR, "oscr", CLOCK_TICK_RATE, 200, 32,
+ clocksource_mmio_init(OSCR, "oscr", CLOCK_TICK_RATE, 200, 32,
clocksource_mmio_readl_up);
clockevents_register_device(&ckevt_sa1100_osmr0);
}
@@ -110,26 +110,26 @@ unsigned long osmr[4], oier;
static void sa1100_timer_suspend(void)
{
- osmr[0] = OSMR0;
- osmr[1] = OSMR1;
- osmr[2] = OSMR2;
- osmr[3] = OSMR3;
- oier = OIER;
+ osmr[0] = readl_relaxed(OSMR0);
+ osmr[1] = readl_relaxed(OSMR1);
+ osmr[2] = readl_relaxed(OSMR2);
+ osmr[3] = readl_relaxed(OSMR3);
+ oier = readl_relaxed(OIER);
}
static void sa1100_timer_resume(void)
{
- OSSR = 0x0f;
- OSMR0 = osmr[0];
- OSMR1 = osmr[1];
- OSMR2 = osmr[2];
- OSMR3 = osmr[3];
- OIER = oier;
+ writel_relaxed(0x0f, OSSR);
+ writel_relaxed(osmr[0], OSMR0);
+ writel_relaxed(osmr[1], OSMR1);
+ writel_relaxed(osmr[2], OSMR2);
+ writel_relaxed(osmr[3], OSMR3);
+ writel_relaxed(oier, OIER);
/*
* OSMR0 is the system timer: make sure OSCR is sufficiently behind
*/
- OSCR = OSMR0 - LATCH;
+ writel_relaxed(OSMR0 - LATCH, OSCR);
}
#else
#define sa1100_timer_suspend NULL
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index df33909205e2..4cacc2d22fbe 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -19,6 +19,7 @@ config ARCH_SH7372
select CPU_V7
select SH_CLK_CPG
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARM_CPU_SUSPEND if PM || CPU_IDLE
config ARCH_SH73A0
bool "SH-Mobile AG5 (R8A73A00)"
@@ -58,6 +59,7 @@ config MACH_G4EVM
bool "G4EVM board"
depends on ARCH_SH7377
select ARCH_REQUIRE_GPIOLIB
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_AP4EVB
bool "AP4EVB board"
@@ -65,6 +67,7 @@ config MACH_AP4EVB
select ARCH_REQUIRE_GPIOLIB
select SH_LCD_MIPI_DSI
select SND_SOC_AK4642 if SND_SIMPLE_CARD
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
choice
prompt "AP4EVB LCD panel selection"
@@ -83,6 +86,7 @@ config MACH_AG5EVM
bool "AG5EVM board"
select ARCH_REQUIRE_GPIOLIB
select SH_LCD_MIPI_DSI
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
depends on ARCH_SH73A0
config MACH_MACKEREL
@@ -90,15 +94,18 @@ config MACH_MACKEREL
depends on ARCH_SH7372
select ARCH_REQUIRE_GPIOLIB
select SND_SOC_AK4642 if SND_SIMPLE_CARD
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_KOTA2
bool "KOTA2 board"
select ARCH_REQUIRE_GPIOLIB
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
depends on ARCH_SH73A0
config MACH_BONITO
bool "bonito board"
select ARCH_REQUIRE_GPIOLIB
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
depends on ARCH_R8A7740
config MACH_ARMADILLO800EVA
@@ -106,22 +113,28 @@ config MACH_ARMADILLO800EVA
depends on ARCH_R8A7740
select ARCH_REQUIRE_GPIOLIB
select USE_OF
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
+ select SND_SOC_WM8978 if SND_SIMPLE_CARD
config MACH_MARZEN
bool "MARZEN board"
depends on ARCH_R8A7779
select ARCH_REQUIRE_GPIOLIB
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_KZM9D
bool "KZM9D board"
depends on ARCH_EMEV2
select USE_OF
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_KZM9G
bool "KZM-A9-GT board"
depends on ARCH_SH73A0
select ARCH_REQUIRE_GPIOLIB
select USE_OF
+ select SND_SOC_AK4642 if SND_SIMPLE_CARD
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
comment "SH-Mobile System Configuration"
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 8aa1962c22a2..0df5ae6740c6 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -39,7 +39,9 @@ obj-$(CONFIG_ARCH_R8A7740) += entry-intc.o
# PM objects
obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+obj-$(CONFIG_ARCH_SHMOBILE) += pm-rmobile.o
obj-$(CONFIG_ARCH_SH7372) += pm-sh7372.o sleep-sh7372.o
+obj-$(CONFIG_ARCH_R8A7740) += pm-r8a7740.o
obj-$(CONFIG_ARCH_R8A7779) += pm-r8a7779.o
# Board objects
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 5a6f22f05e99..d82c010fdfc6 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -27,6 +27,8 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/serial_sci.h>
#include <linux/smsc911x.h>
#include <linux/gpio.h>
@@ -52,6 +54,12 @@
#include <asm/hardware/cache-l2x0.h>
#include <asm/traps.h>
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
static struct resource smsc9220_resources[] = {
[0] = {
.start = 0x14000000,
@@ -142,6 +150,13 @@ static struct platform_device fsi_device = {
.resource = fsi_resources,
};
+/* Fixed 1.8V regulator to be used by MMCIF */
+static struct regulator_consumer_supply fixed1v8_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
+};
+
static struct resource sh_mmcif_resources[] = {
[0] = {
.name = "MMCIF",
@@ -364,6 +379,13 @@ static struct platform_device mipidsi0_device = {
},
};
+/* Fixed 2.8V regulators to be used by SDHI0 */
+static struct regulator_consumer_supply fixed2v8_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+};
+
/* SDHI0 */
static struct sh_mobile_sdhi_info sdhi0_info = {
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
@@ -408,8 +430,57 @@ static struct platform_device sdhi0_device = {
},
};
-void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
+/* Fixed 3.3V regulator to be used by SDHI1 */
+static struct regulator_consumer_supply cn4_power_consumers[] =
{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"),
+};
+
+static struct regulator_init_data cn4_power_init_data = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(cn4_power_consumers),
+ .consumer_supplies = cn4_power_consumers,
+};
+
+static struct fixed_voltage_config cn4_power_info = {
+ .supply_name = "CN4 SD/MMC Vdd",
+ .microvolts = 3300000,
+ .gpio = GPIO_PORT114,
+ .enable_high = 1,
+ .init_data = &cn4_power_init_data,
+};
+
+static struct platform_device cn4_power = {
+ .name = "reg-fixed-voltage",
+ .id = 2,
+ .dev = {
+ .platform_data = &cn4_power_info,
+ },
+};
+
+static void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
+{
+ static int power_gpio = -EINVAL;
+
+ if (power_gpio < 0) {
+ int ret = gpio_request(GPIO_PORT114, "sdhi1_power");
+ if (!ret) {
+ power_gpio = GPIO_PORT114;
+ gpio_direction_output(power_gpio, 0);
+ }
+ }
+
+ /*
+ * If requesting the GPIO above failed, it means, that the regulator got
+ * probed and grabbed the GPIO, but we don't know, whether the sdhi
+ * driver already uses the regulator. If it doesn't, we have to toggle
+ * the GPIO ourselves, even though it is now owned by the fixed
+ * regulator driver. We have to live with the race in case the driver
+ * gets unloaded and the GPIO freed between these two steps.
+ */
gpio_set_value(GPIO_PORT114, state);
}
@@ -455,6 +526,7 @@ static struct platform_device sdhi1_device = {
};
static struct platform_device *ag5evm_devices[] __initdata = {
+ &cn4_power,
&eth_device,
&keysc_device,
&fsi_device,
@@ -468,6 +540,12 @@ static struct platform_device *ag5evm_devices[] __initdata = {
static void __init ag5evm_init(void)
{
+ regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers,
+ ARRAY_SIZE(fixed1v8_power_consumers), 1800000);
+ regulator_register_always_on(1, "fixed-2.8V", fixed2v8_power_consumers,
+ ARRAY_SIZE(fixed2v8_power_consumers), 3300000);
+ regulator_register_fixed(3, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
sh73a0_pinmux_init();
/* enable SCIFA2 */
@@ -562,8 +640,6 @@ static void __init ag5evm_init(void)
gpio_request(GPIO_FN_SDHID1_2_PU, NULL);
gpio_request(GPIO_FN_SDHID1_1_PU, NULL);
gpio_request(GPIO_FN_SDHID1_0_PU, NULL);
- gpio_request(GPIO_PORT114, "sdhi1_power");
- gpio_direction_output(GPIO_PORT114, 0);
#ifdef CONFIG_CACHE_L2X0
/* Shared attribute override enable, 64K*8way */
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index ace60246a5df..f172ca85905c 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -34,6 +34,8 @@
#include <linux/i2c.h>
#include <linux/i2c/tsc2007.h>
#include <linux/io.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/sh_intc.h>
#include <linux/sh_clk.h>
@@ -159,6 +161,27 @@
* CN12: 3.3v
*/
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply fixed1v8_power_consumers[] =
+{
+ /* J22 default position: 1.8V */
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"),
+ REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
+};
+
+static struct regulator_consumer_supply fixed3v3_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+};
+
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
/* MTD */
static struct mtd_partition nor_flash_partitions[] = {
{
@@ -1138,21 +1161,6 @@ static void __init fsi_init_pm_clock(void)
clk_put(fsia_ick);
}
-/*
- * FIXME !!
- *
- * gpio_no_direction
- * are quick_hack.
- *
- * current gpio frame work doesn't have
- * the method to control only pull up/down/free.
- * this function should be replaced by correct gpio function
- */
-static void __init gpio_no_direction(u32 addr)
-{
- __raw_writeb(0x00, addr);
-}
-
/* TouchScreen */
#ifdef CONFIG_AP4EVB_QHD
# define GPIO_TSC_IRQ GPIO_FN_IRQ28_123
@@ -1224,6 +1232,12 @@ static void __init ap4evb_init(void)
u32 srcr4;
struct clk *clk;
+ regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers,
+ ARRAY_SIZE(fixed1v8_power_consumers), 1800000);
+ regulator_register_always_on(1, "fixed-3.3V", fixed3v3_power_consumers,
+ ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+ regulator_register_fixed(2, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
/* External clock source */
clk_set_rate(&sh7372_dv_clki_clk, 27000000);
@@ -1302,8 +1316,8 @@ static void __init ap4evb_init(void)
gpio_request(GPIO_PORT9, NULL);
gpio_request(GPIO_PORT10, NULL);
- gpio_no_direction(GPIO_PORT9CR); /* FSIAOBT needs no direction */
- gpio_no_direction(GPIO_PORT10CR); /* FSIAOLR needs no direction */
+ gpio_direction_none(GPIO_PORT9CR); /* FSIAOBT needs no direction */
+ gpio_direction_none(GPIO_PORT10CR); /* FSIAOLR needs no direction */
/* card detect pin for MMC slot (CN7) */
gpio_request(GPIO_PORT41, NULL);
@@ -1447,14 +1461,14 @@ static void __init ap4evb_init(void)
platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
- sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc1_device);
- sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
- sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc1_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi1_device);
- sh7372_add_device_to_domain(&sh7372_a4r, &ceu_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device);
hdmi_init_pm_clock();
fsi_init_pm_clock();
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 9bd135531d76..453a6e50db8b 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -28,6 +28,8 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/sh_eth.h>
#include <linux/videodev2.h>
#include <linux/usb/renesas_usbhs.h>
@@ -37,14 +39,20 @@
#include <linux/mmc/sh_mobile_sdhi.h>
#include <mach/common.h>
#include <mach/irqs.h>
+#include <mach/r8a7740.h>
+#include <media/mt9t112.h>
+#include <media/sh_mobile_ceu.h>
+#include <media/soc_camera.h>
#include <asm/page.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <asm/hardware/cache-l2x0.h>
-#include <mach/r8a7740.h>
#include <video/sh_mobile_lcdc.h>
+#include <video/sh_mobile_hdmi.h>
+#include <sound/sh_fsi.h>
+#include <sound/simple_card.h>
/*
* CON1 Camera Module
@@ -108,6 +116,14 @@
*/
/*
+ * FSI-WM8978
+ *
+ * this command is required when playback.
+ *
+ * # amixer set "Headphone" 50
+ */
+
+/*
* USB function
*
* When you use USB Function,
@@ -117,14 +133,8 @@
* These are a little bit complex.
* see
* usbhsf_power_ctrl()
- *
- * CAUTION
- *
- * It uses autonomy mode for USB hotplug at this point
- * (= usbhs_private.platform_callback.get_vbus is NULL),
- * since we don't know what's happen on PM control
- * on this workaround.
*/
+#define IRQ7 evt2irq(0x02e0)
#define USBCR1 0xe605810a
#define USBH 0xC6700000
#define USBH_USBCTR 0x10834
@@ -204,6 +214,20 @@ static void usbhsf_power_ctrl(struct platform_device *pdev,
}
}
+static int usbhsf_get_vbus(struct platform_device *pdev)
+{
+ return gpio_get_value(GPIO_PORT209);
+}
+
+static irqreturn_t usbhsf_interrupt(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+
+ renesas_usbhs_call_notify_hotplug(pdev);
+
+ return IRQ_HANDLED;
+}
+
static void usbhsf_hardware_exit(struct platform_device *pdev)
{
struct usbhsf_private *priv = usbhsf_get_priv(pdev);
@@ -227,11 +251,14 @@ static void usbhsf_hardware_exit(struct platform_device *pdev)
priv->host = NULL;
priv->func = NULL;
priv->usbh_base = NULL;
+
+ free_irq(IRQ7, pdev);
}
static int usbhsf_hardware_init(struct platform_device *pdev)
{
struct usbhsf_private *priv = usbhsf_get_priv(pdev);
+ int ret;
priv->phy = clk_get(&pdev->dev, "phy");
priv->usb24 = clk_get(&pdev->dev, "usb24");
@@ -251,6 +278,14 @@ static int usbhsf_hardware_init(struct platform_device *pdev)
return -EIO;
}
+ ret = request_irq(IRQ7, usbhsf_interrupt, IRQF_TRIGGER_NONE,
+ dev_name(&pdev->dev), pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq err\n");
+ return ret;
+ }
+ irq_set_irq_type(IRQ7, IRQ_TYPE_EDGE_BOTH);
+
/* usb24 use 1/1 of parent clock (= usb24s = 24MHz) */
clk_set_rate(priv->usb24,
clk_get_rate(clk_get_parent(priv->usb24)));
@@ -262,6 +297,7 @@ static struct usbhsf_private usbhsf_private = {
.info = {
.platform_callback = {
.get_id = usbhsf_get_id,
+ .get_vbus = usbhsf_get_vbus,
.hardware_init = usbhsf_hardware_init,
.hardware_exit = usbhsf_hardware_exit,
.power_ctrl = usbhsf_power_ctrl,
@@ -269,6 +305,8 @@ static struct usbhsf_private usbhsf_private = {
.driver_param = {
.buswait_bwait = 5,
.detection_delay = 5,
+ .d0_rx_id = SHDMA_SLAVE_USBHS_RX,
+ .d1_tx_id = SHDMA_SLAVE_USBHS_TX,
},
}
};
@@ -384,14 +422,112 @@ static struct platform_device lcdc0_device = {
},
};
+/*
+ * LCDC1/HDMI
+ */
+static struct sh_mobile_hdmi_info hdmi_info = {
+ .flags = HDMI_OUTPUT_PUSH_PULL |
+ HDMI_OUTPUT_POLARITY_HI |
+ HDMI_32BIT_REG |
+ HDMI_HAS_HTOP1 |
+ HDMI_SND_SRC_SPDIF,
+};
+
+static struct resource hdmi_resources[] = {
+ [0] = {
+ .name = "HDMI",
+ .start = 0xe6be0000,
+ .end = 0xe6be03ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0x1700),
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .name = "HDMI emma3pf",
+ .start = 0xe6be4000,
+ .end = 0xe6be43ff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device hdmi_device = {
+ .name = "sh-mobile-hdmi",
+ .num_resources = ARRAY_SIZE(hdmi_resources),
+ .resource = hdmi_resources,
+ .id = -1,
+ .dev = {
+ .platform_data = &hdmi_info,
+ },
+};
+
+static const struct fb_videomode lcdc1_mode = {
+ .name = "HDMI 720p",
+ .xres = 1280,
+ .yres = 720,
+ .pixclock = 13468,
+ .left_margin = 220,
+ .right_margin = 110,
+ .hsync_len = 40,
+ .upper_margin = 20,
+ .lower_margin = 5,
+ .vsync_len = 5,
+ .refresh = 60,
+ .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
+};
+
+static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
+ .clock_source = LCDC_CLK_PERIPHERAL, /* HDMI clock */
+ .ch[0] = {
+ .chan = LCDC_CHAN_MAINLCD,
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .interface_type = RGB24,
+ .clock_divider = 1,
+ .flags = LCDC_FLAGS_DWPOL,
+ .lcd_modes = &lcdc1_mode,
+ .num_modes = 1,
+ .tx_dev = &hdmi_device,
+ .panel_cfg = {
+ .width = 1280,
+ .height = 720,
+ },
+ },
+};
+
+static struct resource hdmi_lcdc_resources[] = {
+ [0] = {
+ .name = "LCDC1",
+ .start = 0xfe944000,
+ .end = 0xfe948000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = intcs_evt2irq(0x1780),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device hdmi_lcdc_device = {
+ .name = "sh_mobile_lcdc_fb",
+ .num_resources = ARRAY_SIZE(hdmi_lcdc_resources),
+ .resource = hdmi_lcdc_resources,
+ .id = 1,
+ .dev = {
+ .platform_data = &hdmi_lcdc_info,
+ .coherent_dma_mask = ~0,
+ },
+};
+
/* GPIO KEY */
-#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
+#define GPIO_KEY(c, g, d, ...) \
+ { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ }
static struct gpio_keys_button gpio_buttons[] = {
- GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"),
- GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"),
- GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"),
- GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"),
+ GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1),
+ GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"),
+ GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"),
+ GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"),
};
static struct gpio_keys_platform_data gpio_key_info = {
@@ -407,6 +543,17 @@ static struct platform_device gpio_keys_device = {
},
};
+/* Fixed 3.3V regulator to be used by SDHI0, SDHI1, MMCIF */
+static struct regulator_consumer_supply fixed3v3_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"),
+ REGULATOR_SUPPLY("vmmc", "sh_mmcif"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mmcif"),
+};
+
/* SDHI0 */
/*
* FIXME
@@ -418,6 +565,8 @@ static struct platform_device gpio_keys_device = {
*/
#define IRQ31 evt2irq(0x33E0)
static struct sh_mobile_sdhi_info sdhi0_info = {
+ .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
+ .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
.tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |\
MMC_CAP_NEEDS_POLL,
.tmio_ocr_mask = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34,
@@ -458,6 +607,8 @@ static struct platform_device sdhi0_device = {
/* SDHI1 */
static struct sh_mobile_sdhi_info sdhi1_info = {
+ .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
+ .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
.tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
.tmio_ocr_mask = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34,
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
@@ -532,12 +683,209 @@ static struct platform_device sh_mmcif_device = {
.resource = sh_mmcif_resources,
};
+/* Camera */
+static int mt9t111_power(struct device *dev, int mode)
+{
+ struct clk *mclk = clk_get(NULL, "video1");
+
+ if (IS_ERR(mclk)) {
+ dev_err(dev, "can't get video1 clock\n");
+ return -EINVAL;
+ }
+
+ if (mode) {
+ /* video1 (= CON1 camera) expect 24MHz */
+ clk_set_rate(mclk, clk_round_rate(mclk, 24000000));
+ clk_enable(mclk);
+ gpio_direction_output(GPIO_PORT158, 1);
+ } else {
+ gpio_direction_output(GPIO_PORT158, 0);
+ clk_disable(mclk);
+ }
+
+ clk_put(mclk);
+
+ return 0;
+}
+
+static struct i2c_board_info i2c_camera_mt9t111 = {
+ I2C_BOARD_INFO("mt9t112", 0x3d),
+};
+
+static struct mt9t112_camera_info mt9t111_info = {
+ .divider = { 16, 0, 0, 7, 0, 10, 14, 7, 7 },
+};
+
+static struct soc_camera_link mt9t111_link = {
+ .i2c_adapter_id = 0,
+ .bus_id = 0,
+ .board_info = &i2c_camera_mt9t111,
+ .power = mt9t111_power,
+ .priv = &mt9t111_info,
+};
+
+static struct platform_device camera_device = {
+ .name = "soc-camera-pdrv",
+ .id = 0,
+ .dev = {
+ .platform_data = &mt9t111_link,
+ },
+};
+
+/* CEU0 */
+static struct sh_mobile_ceu_info sh_mobile_ceu0_info = {
+ .flags = SH_CEU_FLAG_LOWER_8BIT,
+};
+
+static struct resource ceu0_resources[] = {
+ [0] = {
+ .name = "CEU",
+ .start = 0xfe910000,
+ .end = 0xfe91009f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = intcs_evt2irq(0x0500),
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ /* place holder for contiguous memory */
+ },
+};
+
+static struct platform_device ceu0_device = {
+ .name = "sh_mobile_ceu",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(ceu0_resources),
+ .resource = ceu0_resources,
+ .dev = {
+ .platform_data = &sh_mobile_ceu0_info,
+ .coherent_dma_mask = 0xffffffff,
+ },
+};
+
+/* FSI */
+static int fsi_hdmi_set_rate(struct device *dev, int rate, int enable)
+{
+ struct clk *fsib;
+ int ret;
+
+ /* it support 48KHz only */
+ if (48000 != rate)
+ return -EINVAL;
+
+ fsib = clk_get(dev, "ickb");
+ if (IS_ERR(fsib))
+ return -EINVAL;
+
+ if (enable) {
+ ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
+ clk_enable(fsib);
+ } else {
+ ret = 0;
+ clk_disable(fsib);
+ }
+
+ clk_put(fsib);
+
+ return ret;
+}
+
+static struct sh_fsi_platform_info fsi_info = {
+ /* FSI-WM8978 */
+ .port_a = {
+ .tx_id = SHDMA_SLAVE_FSIA_TX,
+ },
+ /* FSI-HDMI */
+ .port_b = {
+ .flags = SH_FSI_FMT_SPDIF |
+ SH_FSI_ENABLE_STREAM_MODE,
+ .set_rate = fsi_hdmi_set_rate,
+ .tx_id = SHDMA_SLAVE_FSIB_TX,
+ }
+};
+
+static struct resource fsi_resources[] = {
+ [0] = {
+ .name = "FSI",
+ .start = 0xfe1f0000,
+ .end = 0xfe1f8400 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = evt2irq(0x1840),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device fsi_device = {
+ .name = "sh_fsi2",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(fsi_resources),
+ .resource = fsi_resources,
+ .dev = {
+ .platform_data = &fsi_info,
+ },
+};
+
+/* FSI-WM8978 */
+static struct asoc_simple_dai_init_info fsi_wm8978_init_info = {
+ .fmt = SND_SOC_DAIFMT_I2S,
+ .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_NB_NF,
+ .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS,
+ .sysclk = 12288000,
+};
+
+static struct asoc_simple_card_info fsi_wm8978_info = {
+ .name = "wm8978",
+ .card = "FSI2A-WM8978",
+ .cpu_dai = "fsia-dai",
+ .codec = "wm8978.0-001a",
+ .platform = "sh_fsi2",
+ .codec_dai = "wm8978-hifi",
+ .init = &fsi_wm8978_init_info,
+};
+
+static struct platform_device fsi_wm8978_device = {
+ .name = "asoc-simple-card",
+ .id = 0,
+ .dev = {
+ .platform_data = &fsi_wm8978_info,
+ },
+};
+
+/* FSI-HDMI */
+static struct asoc_simple_dai_init_info fsi2_hdmi_init_info = {
+ .cpu_daifmt = SND_SOC_DAIFMT_CBM_CFM,
+};
+
+static struct asoc_simple_card_info fsi2_hdmi_info = {
+ .name = "HDMI",
+ .card = "FSI2B-HDMI",
+ .cpu_dai = "fsib-dai",
+ .codec = "sh-mobile-hdmi",
+ .platform = "sh_fsi2",
+ .codec_dai = "sh_mobile_hdmi-hifi",
+ .init = &fsi2_hdmi_init_info,
+};
+
+static struct platform_device fsi_hdmi_device = {
+ .name = "asoc-simple-card",
+ .id = 1,
+ .dev = {
+ .platform_data = &fsi2_hdmi_info,
+ },
+};
+
/* I2C */
static struct i2c_board_info i2c0_devices[] = {
{
I2C_BOARD_INFO("st1232-ts", 0x55),
.irq = evt2irq(0x0340),
},
+ {
+ I2C_BOARD_INFO("wm8978", 0x1a),
+ },
};
/*
@@ -549,6 +897,13 @@ static struct platform_device *eva_devices[] __initdata = {
&sh_eth_device,
&sdhi0_device,
&sh_mmcif_device,
+ &hdmi_device,
+ &hdmi_lcdc_device,
+ &camera_device,
+ &ceu0_device,
+ &fsi_device,
+ &fsi_wm8978_device,
+ &fsi_hdmi_device,
};
static void __init eva_clock_init(void)
@@ -556,10 +911,14 @@ static void __init eva_clock_init(void)
struct clk *system = clk_get(NULL, "system_clk");
struct clk *xtal1 = clk_get(NULL, "extal1");
struct clk *usb24s = clk_get(NULL, "usb24s");
+ struct clk *fsibck = clk_get(NULL, "fsibck");
+ struct clk *fsib = clk_get(&fsi_device.dev, "ickb");
if (IS_ERR(system) ||
IS_ERR(xtal1) ||
- IS_ERR(usb24s)) {
+ IS_ERR(usb24s) ||
+ IS_ERR(fsibck) ||
+ IS_ERR(fsib)) {
pr_err("armadillo800eva board clock init failed\n");
goto clock_error;
}
@@ -570,6 +929,11 @@ static void __init eva_clock_init(void)
/* usb24s use extal1 (= system) clock (= 24MHz) */
clk_set_parent(usb24s, system);
+ /* FSIBCK is 12.288MHz, and it is parent of FSI-B */
+ clk_set_parent(fsib, fsibck);
+ clk_set_rate(fsibck, 12288000);
+ clk_set_rate(fsib, 12288000);
+
clock_error:
if (!IS_ERR(system))
clk_put(system);
@@ -577,16 +941,26 @@ clock_error:
clk_put(xtal1);
if (!IS_ERR(usb24s))
clk_put(usb24s);
+ if (!IS_ERR(fsibck))
+ clk_put(fsibck);
+ if (!IS_ERR(fsib))
+ clk_put(fsib);
}
/*
* board init
*/
+#define GPIO_PORT7CR 0xe6050007
+#define GPIO_PORT8CR 0xe6050008
static void __init eva_init(void)
{
- eva_clock_init();
+ struct platform_device *usb = NULL;
+
+ regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
+ ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
r8a7740_pinmux_init();
+ r8a7740_meram_workaround();
/* SCIFA1 */
gpio_request(GPIO_FN_SCIFA1_RXD, NULL);
@@ -667,8 +1041,19 @@ static void __init eva_init(void)
/* USB Host */
} else {
/* USB Func */
- gpio_request(GPIO_FN_VBUS, NULL);
+ /*
+ * A1 chip has 2 IRQ7 pin and it was controled by MSEL register.
+ * OTOH, usbhs interrupt needs its value (HI/LOW) to decide
+ * USB connection/disconnection (usbhsf_get_vbus()).
+ * This means we needs to select GPIO_FN_IRQ7_PORT209 first,
+ * and select GPIO_PORT209 here
+ */
+ gpio_request(GPIO_FN_IRQ7_PORT209, NULL);
+ gpio_request(GPIO_PORT209, NULL);
+ gpio_direction_input(GPIO_PORT209);
+
platform_device_register(&usbhsf_device);
+ usb = &usbhsf_device;
}
/* SDHI0 */
@@ -706,6 +1091,48 @@ static void __init eva_init(void)
gpio_request(GPIO_FN_MMC1_D6_PORT143, NULL);
gpio_request(GPIO_FN_MMC1_D7_PORT142, NULL);
+ /* CEU0 */
+ gpio_request(GPIO_FN_VIO0_D7, NULL);
+ gpio_request(GPIO_FN_VIO0_D6, NULL);
+ gpio_request(GPIO_FN_VIO0_D5, NULL);
+ gpio_request(GPIO_FN_VIO0_D4, NULL);
+ gpio_request(GPIO_FN_VIO0_D3, NULL);
+ gpio_request(GPIO_FN_VIO0_D2, NULL);
+ gpio_request(GPIO_FN_VIO0_D1, NULL);
+ gpio_request(GPIO_FN_VIO0_D0, NULL);
+ gpio_request(GPIO_FN_VIO0_CLK, NULL);
+ gpio_request(GPIO_FN_VIO0_HD, NULL);
+ gpio_request(GPIO_FN_VIO0_VD, NULL);
+ gpio_request(GPIO_FN_VIO0_FIELD, NULL);
+ gpio_request(GPIO_FN_VIO_CKO, NULL);
+
+ /* CON1/CON15 Camera */
+ gpio_request(GPIO_PORT173, NULL); /* STANDBY */
+ gpio_request(GPIO_PORT172, NULL); /* RST */
+ gpio_request(GPIO_PORT158, NULL); /* CAM_PON */
+ gpio_direction_output(GPIO_PORT173, 0);
+ gpio_direction_output(GPIO_PORT172, 1);
+ gpio_direction_output(GPIO_PORT158, 0); /* see mt9t111_power() */
+
+ /* FSI-WM8978 */
+ gpio_request(GPIO_FN_FSIAIBT, NULL);
+ gpio_request(GPIO_FN_FSIAILR, NULL);
+ gpio_request(GPIO_FN_FSIAOMC, NULL);
+ gpio_request(GPIO_FN_FSIAOSLD, NULL);
+ gpio_request(GPIO_FN_FSIAISLD_PORT5, NULL);
+
+ gpio_request(GPIO_PORT7, NULL);
+ gpio_request(GPIO_PORT8, NULL);
+ gpio_direction_none(GPIO_PORT7CR); /* FSIAOBT needs no direction */
+ gpio_direction_none(GPIO_PORT8CR); /* FSIAOLR needs no direction */
+
+ /* FSI-HDMI */
+ gpio_request(GPIO_FN_FSIBCK, NULL);
+
+ /* HDMI */
+ gpio_request(GPIO_FN_HDMI_HPD, NULL);
+ gpio_request(GPIO_FN_HDMI_CEC, NULL);
+
/*
* CAUTION
*
@@ -752,6 +1179,13 @@ static void __init eva_init(void)
platform_add_devices(eva_devices,
ARRAY_SIZE(eva_devices));
+
+ eva_clock_init();
+
+ rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &lcdc0_device);
+ rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &hdmi_lcdc_device);
+ if (usb)
+ rmobile_add_device_to_domain(&r8a7740_pd_a3sp, usb);
}
static void __init eva_earlytimer_init(void)
diff --git a/arch/arm/mach-shmobile/board-bonito.c b/arch/arm/mach-shmobile/board-bonito.c
index e9b32cfbf741..4129008eae29 100644
--- a/arch/arm/mach-shmobile/board-bonito.c
+++ b/arch/arm/mach-shmobile/board-bonito.c
@@ -26,6 +26,8 @@
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/videodev2.h>
#include <mach/common.h>
@@ -75,6 +77,12 @@
* S38.2 = OFF
*/
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
/*
* FPGA
*/
@@ -360,6 +368,8 @@ static void __init bonito_init(void)
{
u16 val;
+ regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
r8a7740_pinmux_init();
bonito_fpga_init();
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index f1257321999a..fa5dfc5c8ed6 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -26,6 +26,8 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/usb/r8a66597.h>
#include <linux/io.h>
#include <linux/input.h>
@@ -196,6 +198,15 @@ static struct platform_device keysc_device = {
},
};
+/* Fixed 3.3V regulator to be used by SDHI0 and SDHI1 */
+static struct regulator_consumer_supply fixed3v3_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"),
+};
+
/* SDHI */
static struct sh_mobile_sdhi_info sdhi0_info = {
.tmio_caps = MMC_CAP_SDIO_IRQ,
@@ -271,26 +282,11 @@ static struct platform_device *g4evm_devices[] __initdata = {
#define GPIO_SDHID1_D3 0xe6052106
#define GPIO_SDHICMD1 0xe6052107
-/*
- * FIXME !!
- *
- * gpio_pull_up is quick_hack.
- *
- * current gpio frame work doesn't have
- * the method to control only pull up/down/free.
- * this function should be replaced by correct gpio function
- */
-static void __init gpio_pull_up(u32 addr)
-{
- u8 data = __raw_readb(addr);
-
- data &= 0x0F;
- data |= 0xC0;
- __raw_writeb(data, addr);
-}
-
static void __init g4evm_init(void)
{
+ regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
+ ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+
sh7377_pinmux_init();
/* Lit DS14 LED */
@@ -351,11 +347,11 @@ static void __init g4evm_init(void)
gpio_request(GPIO_FN_SDHID0_3, NULL);
gpio_request(GPIO_FN_SDHICMD0, NULL);
gpio_request(GPIO_FN_SDHIWP0, NULL);
- gpio_pull_up(GPIO_SDHID0_D0);
- gpio_pull_up(GPIO_SDHID0_D1);
- gpio_pull_up(GPIO_SDHID0_D2);
- gpio_pull_up(GPIO_SDHID0_D3);
- gpio_pull_up(GPIO_SDHICMD0);
+ gpio_request_pullup(GPIO_SDHID0_D0);
+ gpio_request_pullup(GPIO_SDHID0_D1);
+ gpio_request_pullup(GPIO_SDHID0_D2);
+ gpio_request_pullup(GPIO_SDHID0_D3);
+ gpio_request_pullup(GPIO_SDHICMD0);
/* SDHI1 */
gpio_request(GPIO_FN_SDHICLK1, NULL);
@@ -364,11 +360,11 @@ static void __init g4evm_init(void)
gpio_request(GPIO_FN_SDHID1_2, NULL);
gpio_request(GPIO_FN_SDHID1_3, NULL);
gpio_request(GPIO_FN_SDHICMD1, NULL);
- gpio_pull_up(GPIO_SDHID1_D0);
- gpio_pull_up(GPIO_SDHID1_D1);
- gpio_pull_up(GPIO_SDHID1_D2);
- gpio_pull_up(GPIO_SDHID1_D3);
- gpio_pull_up(GPIO_SDHICMD1);
+ gpio_request_pullup(GPIO_SDHID1_D0);
+ gpio_request_pullup(GPIO_SDHID1_D1);
+ gpio_request_pullup(GPIO_SDHID1_D2);
+ gpio_request_pullup(GPIO_SDHID1_D3);
+ gpio_request_pullup(GPIO_SDHICMD1);
sh7377_add_standard_devices();
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index f60f1b281cc4..21dbe54304d5 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -27,6 +27,8 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/gpio.h>
#include <linux/input.h>
@@ -49,6 +51,12 @@
#include <asm/hardware/cache-l2x0.h>
#include <asm/traps.h>
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
/* SMSC 9220 */
static struct resource smsc9220_resources[] = {
[0] = {
@@ -288,6 +296,13 @@ static struct platform_device leds_tpu30_device = {
.resource = tpu30_resources,
};
+/* Fixed 1.8V regulator to be used by MMCIF */
+static struct regulator_consumer_supply fixed1v8_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
+};
+
/* MMCIF */
static struct resource mmcif_resources[] = {
[0] = {
@@ -321,6 +336,15 @@ static struct platform_device mmcif_device = {
.resource = mmcif_resources,
};
+/* Fixed 3.3V regulator to be used by SDHI0 and SDHI1 */
+static struct regulator_consumer_supply fixed3v3_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"),
+};
+
/* SDHI0 */
static struct sh_mobile_sdhi_info sdhi0_info = {
.tmio_caps = MMC_CAP_SD_HIGHSPEED,
@@ -411,6 +435,12 @@ static struct platform_device *kota2_devices[] __initdata = {
static void __init kota2_init(void)
{
+ regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers,
+ ARRAY_SIZE(fixed1v8_power_consumers), 1800000);
+ regulator_register_always_on(1, "fixed-3.3V", fixed3v3_power_consumers,
+ ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+ regulator_register_fixed(2, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
sh73a0_pinmux_init();
/* SCIFA2 (UART2) */
diff --git a/arch/arm/mach-shmobile/board-kzm9d.c b/arch/arm/mach-shmobile/board-kzm9d.c
index 6a33cf393428..2c986eaae7b4 100644
--- a/arch/arm/mach-shmobile/board-kzm9d.c
+++ b/arch/arm/mach-shmobile/board-kzm9d.c
@@ -21,6 +21,8 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <mach/common.h>
#include <mach/emev2.h>
@@ -28,6 +30,12 @@
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
/* Ether */
static struct resource smsc911x_resources[] = {
[0] = {
@@ -63,6 +71,8 @@ static struct platform_device *kzm9d_devices[] __initdata = {
void __init kzm9d_add_standard_devices(void)
{
+ regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
emev2_add_standard_devices();
platform_add_devices(kzm9d_devices, ARRAY_SIZE(kzm9d_devices));
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index c0ae815e7beb..53b7ea92c32c 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -30,9 +30,14 @@
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
#include <linux/platform_device.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/usb/r8a66597.h>
+#include <linux/usb/renesas_usbhs.h>
#include <linux/videodev2.h>
+#include <sound/sh_fsi.h>
+#include <sound/simple_card.h>
#include <mach/irqs.h>
#include <mach/sh73a0.h>
#include <mach/common.h>
@@ -54,6 +59,20 @@
#define GPIO_PCF8575_PORT15 (GPIO_NR + 13)
#define GPIO_PCF8575_PORT16 (GPIO_NR + 14)
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
+/*
+ * FSI-AK4648
+ *
+ * this command is required when playback.
+ *
+ * # amixer set "LINEOUT Mixer DACL" on
+ */
+
/* SMSC 9221 */
static struct resource smsc9221_resources[] = {
[0] = {
@@ -112,6 +131,151 @@ static struct platform_device usb_host_device = {
.resource = usb_resources,
};
+/* USB Func CN17 */
+struct usbhs_private {
+ unsigned int phy;
+ unsigned int cr2;
+ struct renesas_usbhs_platform_info info;
+};
+
+#define IRQ15 intcs_evt2irq(0x03e0)
+#define USB_PHY_MODE (1 << 4)
+#define USB_PHY_INT_EN ((1 << 3) | (1 << 2))
+#define USB_PHY_ON (1 << 1)
+#define USB_PHY_OFF (1 << 0)
+#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF)
+
+#define usbhs_get_priv(pdev) \
+ container_of(renesas_usbhs_get_info(pdev), struct usbhs_private, info)
+
+static int usbhs_get_vbus(struct platform_device *pdev)
+{
+ struct usbhs_private *priv = usbhs_get_priv(pdev);
+
+ return !((1 << 7) & __raw_readw(priv->cr2));
+}
+
+static void usbhs_phy_reset(struct platform_device *pdev)
+{
+ struct usbhs_private *priv = usbhs_get_priv(pdev);
+
+ /* init phy */
+ __raw_writew(0x8a0a, priv->cr2);
+}
+
+static int usbhs_get_id(struct platform_device *pdev)
+{
+ return USBHS_GADGET;
+}
+
+static irqreturn_t usbhs_interrupt(int irq, void *data)
+{
+ struct platform_device *pdev = data;
+ struct usbhs_private *priv = usbhs_get_priv(pdev);
+
+ renesas_usbhs_call_notify_hotplug(pdev);
+
+ /* clear status */
+ __raw_writew(__raw_readw(priv->phy) | USB_PHY_INT_CLR, priv->phy);
+
+ return IRQ_HANDLED;
+}
+
+static int usbhs_hardware_init(struct platform_device *pdev)
+{
+ struct usbhs_private *priv = usbhs_get_priv(pdev);
+ int ret;
+
+ /* clear interrupt status */
+ __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy);
+
+ ret = request_irq(IRQ15, usbhs_interrupt, IRQF_TRIGGER_HIGH,
+ dev_name(&pdev->dev), pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq err\n");
+ return ret;
+ }
+
+ /* enable USB phy interrupt */
+ __raw_writew(USB_PHY_MODE | USB_PHY_INT_EN, priv->phy);
+
+ return 0;
+}
+
+static void usbhs_hardware_exit(struct platform_device *pdev)
+{
+ struct usbhs_private *priv = usbhs_get_priv(pdev);
+
+ /* clear interrupt status */
+ __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy);
+
+ free_irq(IRQ15, pdev);
+}
+
+static u32 usbhs_pipe_cfg[] = {
+ USB_ENDPOINT_XFER_CONTROL,
+ USB_ENDPOINT_XFER_ISOC,
+ USB_ENDPOINT_XFER_ISOC,
+ USB_ENDPOINT_XFER_BULK,
+ USB_ENDPOINT_XFER_BULK,
+ USB_ENDPOINT_XFER_BULK,
+ USB_ENDPOINT_XFER_INT,
+ USB_ENDPOINT_XFER_INT,
+ USB_ENDPOINT_XFER_INT,
+ USB_ENDPOINT_XFER_BULK,
+ USB_ENDPOINT_XFER_BULK,
+ USB_ENDPOINT_XFER_BULK,
+ USB_ENDPOINT_XFER_BULK,
+ USB_ENDPOINT_XFER_BULK,
+ USB_ENDPOINT_XFER_BULK,
+ USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usbhs_private usbhs_private = {
+ .phy = 0xe60781e0, /* USBPHYINT */
+ .cr2 = 0xe605810c, /* USBCR2 */
+ .info = {
+ .platform_callback = {
+ .hardware_init = usbhs_hardware_init,
+ .hardware_exit = usbhs_hardware_exit,
+ .get_id = usbhs_get_id,
+ .phy_reset = usbhs_phy_reset,
+ .get_vbus = usbhs_get_vbus,
+ },
+ .driver_param = {
+ .buswait_bwait = 4,
+ .has_otg = 1,
+ .pipe_type = usbhs_pipe_cfg,
+ .pipe_size = ARRAY_SIZE(usbhs_pipe_cfg),
+ },
+ },
+};
+
+static struct resource usbhs_resources[] = {
+ [0] = {
+ .start = 0xE6890000,
+ .end = 0xE68900e6 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(62),
+ .end = gic_spi(62),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usbhs_device = {
+ .name = "renesas_usbhs",
+ .id = -1,
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ .platform_data = &usbhs_private.info,
+ },
+ .num_resources = ARRAY_SIZE(usbhs_resources),
+ .resource = usbhs_resources,
+};
+
/* LCDC */
static struct fb_videomode kzm_lcdc_mode = {
.name = "WVGA Panel",
@@ -166,6 +330,13 @@ static struct platform_device lcdc_device = {
},
};
+/* Fixed 1.8V regulator to be used by MMCIF */
+static struct regulator_consumer_supply fixed1v8_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
+};
+
/* MMCIF */
static struct resource sh_mmcif_resources[] = {
[0] = {
@@ -187,6 +358,8 @@ static struct resource sh_mmcif_resources[] = {
static struct sh_mmcif_plat_data sh_mmcif_platdata = {
.ocr = MMC_VDD_165_195,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .slave_id_tx = SHDMA_SLAVE_MMCIF_TX,
+ .slave_id_rx = SHDMA_SLAVE_MMCIF_RX,
};
static struct platform_device mmc_device = {
@@ -200,6 +373,15 @@ static struct platform_device mmc_device = {
.resource = sh_mmcif_resources,
};
+/* Fixed 2.8V regulators to be used by SDHI0 and SDHI2 */
+static struct regulator_consumer_supply fixed2v8_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.2"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.2"),
+};
+
/* SDHI */
static struct sh_mobile_sdhi_info sdhi0_info = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
@@ -240,6 +422,50 @@ static struct platform_device sdhi0_device = {
},
};
+/* Micro SD */
+static struct sh_mobile_sdhi_info sdhi2_info = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT |
+ TMIO_MMC_USE_GPIO_CD |
+ TMIO_MMC_WRPROTECT_DISABLE,
+ .tmio_caps = MMC_CAP_SD_HIGHSPEED,
+ .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
+ .cd_gpio = GPIO_PORT13,
+};
+
+static struct resource sdhi2_resources[] = {
+ [0] = {
+ .name = "SDHI2",
+ .start = 0xee140000,
+ .end = 0xee1400ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = SH_MOBILE_SDHI_IRQ_CARD_DETECT,
+ .start = gic_spi(103),
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .name = SH_MOBILE_SDHI_IRQ_SDCARD,
+ .start = gic_spi(104),
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .name = SH_MOBILE_SDHI_IRQ_SDIO,
+ .start = gic_spi(105),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device sdhi2_device = {
+ .name = "sh_mobile_sdhi",
+ .id = 2,
+ .num_resources = ARRAY_SIZE(sdhi2_resources),
+ .resource = sdhi2_resources,
+ .dev = {
+ .platform_data = &sdhi2_info,
+ },
+};
+
/* KEY */
#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
@@ -267,11 +493,74 @@ static struct platform_device gpio_keys_device = {
},
};
+/* FSI-AK4648 */
+static struct sh_fsi_platform_info fsi_info = {
+ .port_a = {
+ .tx_id = SHDMA_SLAVE_FSI2A_TX,
+ },
+};
+
+static struct resource fsi_resources[] = {
+ [0] = {
+ .name = "FSI",
+ .start = 0xEC230000,
+ .end = 0xEC230400 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = gic_spi(146),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device fsi_device = {
+ .name = "sh_fsi2",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(fsi_resources),
+ .resource = fsi_resources,
+ .dev = {
+ .platform_data = &fsi_info,
+ },
+};
+
+static struct asoc_simple_dai_init_info fsi2_ak4648_init_info = {
+ .fmt = SND_SOC_DAIFMT_LEFT_J,
+ .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM,
+ .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS,
+ .sysclk = 11289600,
+};
+
+static struct asoc_simple_card_info fsi2_ak4648_info = {
+ .name = "AK4648",
+ .card = "FSI2A-AK4648",
+ .cpu_dai = "fsia-dai",
+ .codec = "ak4642-codec.0-0012",
+ .platform = "sh_fsi2",
+ .codec_dai = "ak4642-hifi",
+ .init = &fsi2_ak4648_init_info,
+};
+
+static struct platform_device fsi_ak4648_device = {
+ .name = "asoc-simple-card",
+ .dev = {
+ .platform_data = &fsi2_ak4648_info,
+ },
+};
+
/* I2C */
static struct pcf857x_platform_data pcf8575_pdata = {
.gpio_base = GPIO_PCF8575_BASE,
};
+static struct i2c_board_info i2c0_devices[] = {
+ {
+ I2C_BOARD_INFO("ak4648", 0x12),
+ },
+ {
+ I2C_BOARD_INFO("r2025sd", 0x32),
+ }
+};
+
static struct i2c_board_info i2c1_devices[] = {
{
I2C_BOARD_INFO("st1232-ts", 0x55),
@@ -289,10 +578,14 @@ static struct i2c_board_info i2c3_devices[] = {
static struct platform_device *kzm_devices[] __initdata = {
&smsc_device,
&usb_host_device,
+ &usbhs_device,
&lcdc_device,
&mmc_device,
&sdhi0_device,
+ &sdhi2_device,
&gpio_keys_device,
+ &fsi_device,
+ &fsi_ak4648_device,
};
/*
@@ -350,6 +643,12 @@ device_initcall(as3711_enable_lcdc_backlight);
static void __init kzm_init(void)
{
+ regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers,
+ ARRAY_SIZE(fixed1v8_power_consumers), 1800000);
+ regulator_register_always_on(1, "fixed-2.8V", fixed2v8_power_consumers,
+ ARRAY_SIZE(fixed2v8_power_consumers), 2800000);
+ regulator_register_fixed(2, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
sh73a0_pinmux_init();
/* enable SCIFA4 */
@@ -427,15 +726,36 @@ static void __init kzm_init(void)
gpio_request(GPIO_PORT15, NULL);
gpio_direction_output(GPIO_PORT15, 1); /* power */
+ /* enable Micro SD */
+ gpio_request(GPIO_FN_SDHID2_0, NULL);
+ gpio_request(GPIO_FN_SDHID2_1, NULL);
+ gpio_request(GPIO_FN_SDHID2_2, NULL);
+ gpio_request(GPIO_FN_SDHID2_3, NULL);
+ gpio_request(GPIO_FN_SDHICMD2, NULL);
+ gpio_request(GPIO_FN_SDHICLK2, NULL);
+ gpio_request(GPIO_PORT14, NULL);
+ gpio_direction_output(GPIO_PORT14, 1); /* power */
+
/* I2C 3 */
gpio_request(GPIO_FN_PORT27_I2C_SCL3, NULL);
gpio_request(GPIO_FN_PORT28_I2C_SDA3, NULL);
+ /* enable FSI2 port A (ak4648) */
+ gpio_request(GPIO_FN_FSIACK, NULL);
+ gpio_request(GPIO_FN_FSIAILR, NULL);
+ gpio_request(GPIO_FN_FSIAIBT, NULL);
+ gpio_request(GPIO_FN_FSIAISLD, NULL);
+ gpio_request(GPIO_FN_FSIAOSLD, NULL);
+
+ /* enable USB */
+ gpio_request(GPIO_FN_VBUS_0, NULL);
+
#ifdef CONFIG_CACHE_L2X0
/* Early BRESP enable, Shared attribute override enable, 64K*8way */
l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff);
#endif
+ i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
i2c_register_board_info(1, i2c1_devices, ARRAY_SIZE(i2c1_devices));
i2c_register_board_info(3, i2c3_devices, ARRAY_SIZE(i2c3_devices));
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 150122a44630..c129542f6aed 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -41,6 +41,8 @@
#include <linux/mtd/physmap.h>
#include <linux/mtd/sh_flctl.h>
#include <linux/pm_clock.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/sh_intc.h>
#include <linux/tca6416_keypad.h>
@@ -203,31 +205,32 @@
* amixer set "HPOUTR Mixer DACH" on
*/
-/*
- * FIXME !!
- *
- * gpio_no_direction
- * gpio_pull_down
- * are quick_hack.
- *
- * current gpio frame work doesn't have
- * the method to control only pull up/down/free.
- * this function should be replaced by correct gpio function
- */
-static void __init gpio_no_direction(u32 addr)
+/* Fixed 3.3V and 1.8V regulators to be used by multiple devices */
+static struct regulator_consumer_supply fixed1v8_power_consumers[] =
{
- __raw_writeb(0x00, addr);
-}
+ /*
+ * J22 on mackerel switches mmcif.0 and sdhi.1 between 1.8V and 3.3V
+ * Since we cannot support both voltages, we support the default 1.8V
+ */
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"),
+ REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
+};
-static void __init gpio_pull_down(u32 addr)
+static struct regulator_consumer_supply fixed3v3_power_consumers[] =
{
- u8 data = __raw_readb(addr);
-
- data &= 0x0F;
- data |= 0xA0;
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.2"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.2"),
+};
- __raw_writeb(data, addr);
-}
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
/* MTD */
static struct mtd_partition nor_flash_partitions[] = {
@@ -692,6 +695,7 @@ static struct platform_device usbhs0_device = {
* - J30 "open"
* - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET
* - add .get_vbus = usbhs_get_vbus in usbhs1_private
+ * - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices.
*/
#define IRQ8 evt2irq(0x0300)
#define USB_PHY_MODE (1 << 4)
@@ -1322,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = {
&nor_flash_device,
&smc911x_device,
&lcdc_device,
- &usbhs1_device,
&usbhs0_device,
+ &usbhs1_device,
&leds_device,
&fsi_device,
&fsi_ak4643_device,
@@ -1409,6 +1413,12 @@ static void __init mackerel_init(void)
u32 srcr4;
struct clk *clk;
+ regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers,
+ ARRAY_SIZE(fixed1v8_power_consumers), 1800000);
+ regulator_register_always_on(1, "fixed-3.3V", fixed3v3_power_consumers,
+ ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+ regulator_register_fixed(2, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
/* External clock source */
clk_set_rate(&sh7372_dv_clki_clk, 27000000);
@@ -1458,11 +1468,11 @@ static void __init mackerel_init(void)
/* USBHS0 */
gpio_request(GPIO_FN_VBUS0_0, NULL);
- gpio_pull_down(GPIO_PORT168CR); /* VBUS0_0 pull down */
+ gpio_request_pulldown(GPIO_PORT168CR); /* VBUS0_0 pull down */
/* USBHS1 */
gpio_request(GPIO_FN_VBUS0_1, NULL);
- gpio_pull_down(GPIO_PORT167CR); /* VBUS0_1 pull down */
+ gpio_request_pulldown(GPIO_PORT167CR); /* VBUS0_1 pull down */
gpio_request(GPIO_FN_IDIN_1_113, NULL);
/* enable FSI2 port A (ak4643) */
@@ -1475,8 +1485,8 @@ static void __init mackerel_init(void)
gpio_request(GPIO_PORT9, NULL);
gpio_request(GPIO_PORT10, NULL);
- gpio_no_direction(GPIO_PORT9CR); /* FSIAOBT needs no direction */
- gpio_no_direction(GPIO_PORT10CR); /* FSIAOLR needs no direction */
+ gpio_direction_none(GPIO_PORT9CR); /* FSIAOBT needs no direction */
+ gpio_direction_none(GPIO_PORT10CR); /* FSIAOLR needs no direction */
intc_set_priority(IRQ_FSI, 3); /* irq priority FSI(3) > SMSC911X(2) */
@@ -1614,20 +1624,20 @@ static void __init mackerel_init(void)
platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
- sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
- sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device);
- sh7372_add_device_to_domain(&sh7372_a4lc, &meram_device);
- sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs0_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs1_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &nand_flash_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4lc, &hdmi_lcdc_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4lc, &meram_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs0_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs1_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &nand_flash_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device);
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
- sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi1_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device);
#endif
- sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi2_device);
- sh7372_add_device_to_domain(&sh7372_a4r, &ceu_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi2_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device);
hdmi_init_pm_clock();
sh7372_pm_init();
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index 14de3787cafc..fcf5a47f4772 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -27,6 +27,8 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/dma-mapping.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <mach/hardware.h>
#include <mach/r8a7779.h>
@@ -37,6 +39,12 @@
#include <asm/hardware/gic.h>
#include <asm/traps.h>
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
/* SMSC LAN89218 */
static struct resource smsc911x_resources[] = {
[0] = {
@@ -59,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = {
static struct platform_device eth_device = {
.name = "smsc911x",
- .id = 0,
+ .id = -1,
.dev = {
.platform_data = &smsc911x_platdata,
},
@@ -73,6 +81,8 @@ static struct platform_device *marzen_devices[] __initdata = {
static void __init marzen_init(void)
{
+ regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
r8a7779_pinmux_init();
/* SCIF2 (CN18: DEBUG0) */
diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c
index 26eea5f21054..ad5fccc7b5e7 100644
--- a/arch/arm/mach-shmobile/clock-r8a7740.c
+++ b/arch/arm/mach-shmobile/clock-r8a7740.c
@@ -43,7 +43,10 @@
/* CPG registers */
#define FRQCRA 0xe6150000
#define FRQCRB 0xe6150004
+#define VCLKCR1 0xE6150008
+#define VCLKCR2 0xE615000c
#define FRQCRC 0xe61500e0
+#define FSIACKCR 0xe6150018
#define PLLC01CR 0xe6150028
#define SUBCKCR 0xe6150080
@@ -54,6 +57,8 @@
#define MSTPSR2 0xe6150040
#define MSTPSR3 0xe6150048
#define MSTPSR4 0xe615004c
+#define FSIBCKCR 0xe6150090
+#define HDMICKCR 0xe6150094
#define SMSTPCR0 0xe6150130
#define SMSTPCR1 0xe6150134
#define SMSTPCR2 0xe6150138
@@ -271,6 +276,13 @@ static struct clk usb24_clk = {
.parent = &usb24s_clk,
};
+/* External FSIACK/FSIBCK clock */
+static struct clk fsiack_clk = {
+};
+
+static struct clk fsibck_clk = {
+};
+
struct clk *main_clks[] = {
&extalr_clk,
&extal1_clk,
@@ -288,6 +300,8 @@ struct clk *main_clks[] = {
&pllc1_div2_clk,
&usb24s_clk,
&usb24_clk,
+ &fsiack_clk,
+ &fsibck_clk,
};
static void div4_kick(struct clk *clk)
@@ -313,6 +327,107 @@ static struct clk_div4_table div4_table = {
.kick = div4_kick,
};
+/* DIV6 reparent */
+enum {
+ DIV6_HDMI,
+ DIV6_VCLK1, DIV6_VCLK2,
+ DIV6_FSIA, DIV6_FSIB,
+ DIV6_REPARENT_NR,
+};
+
+static struct clk *hdmi_parent[] = {
+ [0] = &pllc1_div2_clk,
+ [1] = &system_clk,
+ [2] = &dv_clk
+};
+
+static struct clk *vclk_parents[8] = {
+ [0] = &pllc1_div2_clk,
+ [2] = &dv_clk,
+ [3] = &usb24s_clk,
+ [4] = &extal1_div2_clk,
+ [5] = &extalr_clk,
+};
+
+static struct clk *fsia_parents[] = {
+ [0] = &pllc1_div2_clk,
+ [1] = &fsiack_clk, /* external clock */
+};
+
+static struct clk *fsib_parents[] = {
+ [0] = &pllc1_div2_clk,
+ [1] = &fsibck_clk, /* external clock */
+};
+
+static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
+ [DIV6_HDMI] = SH_CLK_DIV6_EXT(HDMICKCR, 0,
+ hdmi_parent, ARRAY_SIZE(hdmi_parent), 6, 2),
+ [DIV6_VCLK1] = SH_CLK_DIV6_EXT(VCLKCR1, 0,
+ vclk_parents, ARRAY_SIZE(vclk_parents), 12, 3),
+ [DIV6_VCLK2] = SH_CLK_DIV6_EXT(VCLKCR2, 0,
+ vclk_parents, ARRAY_SIZE(vclk_parents), 12, 3),
+ [DIV6_FSIA] = SH_CLK_DIV6_EXT(FSIACKCR, 0,
+ fsia_parents, ARRAY_SIZE(fsia_parents), 6, 2),
+ [DIV6_FSIB] = SH_CLK_DIV6_EXT(FSIBCKCR, 0,
+ fsib_parents, ARRAY_SIZE(fsib_parents), 6, 2),
+};
+
+/* HDMI1/2 clock */
+static unsigned long hdmi12_recalc(struct clk *clk)
+{
+ u32 val = __raw_readl(HDMICKCR);
+ int shift = (int)clk->priv;
+
+ val >>= shift;
+ val &= 0x3;
+
+ return clk->parent->rate / (1 << val);
+};
+
+static int hdmi12_set_rate(struct clk *clk, unsigned long rate)
+{
+ u32 val, mask;
+ int i, shift;
+
+ for (i = 0; i < 3; i++)
+ if (rate == clk->parent->rate / (1 << i))
+ goto find;
+ return -ENODEV;
+
+find:
+ shift = (int)clk->priv;
+
+ val = __raw_readl(HDMICKCR);
+ mask = ~(0x3 << shift);
+ val = (val & mask) | i << shift;
+ __raw_writel(val, HDMICKCR);
+
+ return 0;
+};
+
+static struct sh_clk_ops hdmi12_clk_ops = {
+ .recalc = hdmi12_recalc,
+ .set_rate = hdmi12_set_rate,
+};
+
+static struct clk hdmi1_clk = {
+ .ops = &hdmi12_clk_ops,
+ .priv = (void *)9,
+ .parent = &div6_reparent_clks[DIV6_HDMI], /* late install */
+};
+
+static struct clk hdmi2_clk = {
+ .ops = &hdmi12_clk_ops,
+ .priv = (void *)11,
+ .parent = &div6_reparent_clks[DIV6_HDMI], /* late install */
+};
+
+static struct clk *late_main_clks[] = {
+ &hdmi1_clk,
+ &hdmi2_clk,
+};
+
+/* MSTP */
enum {
DIV4_I, DIV4_ZG, DIV4_B, DIV4_M1, DIV4_HP,
DIV4_HPP, DIV4_USBP, DIV4_S, DIV4_ZB, DIV4_M3, DIV4_CP,
@@ -343,11 +458,12 @@ static struct clk div6_clks[DIV6_NR] = {
};
enum {
- MSTP125,
+ MSTP128, MSTP127, MSTP125,
MSTP116, MSTP111, MSTP100, MSTP117,
MSTP230,
MSTP222,
+ MSTP218, MSTP217, MSTP216, MSTP214,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
MSTP329, MSTP328, MSTP323, MSTP320,
@@ -360,6 +476,8 @@ enum {
};
static struct clk mstp_clks[MSTP_NR] = {
+ [MSTP128] = SH_CLK_MSTP32(&div4_clks[DIV4_S], SMSTPCR1, 28, 0), /* CEU21 */
+ [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_S], SMSTPCR1, 27, 0), /* CEU20 */
[MSTP125] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
[MSTP117] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
[MSTP116] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */
@@ -368,6 +486,10 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP230] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 30, 0), /* SCIFA6 */
[MSTP222] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 22, 0), /* SCIFA7 */
+ [MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
+ [MSTP217] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
+ [MSTP216] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */
+ [MSTP214] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 14, 0), /* USBDMAC */
[MSTP207] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
[MSTP206] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
[MSTP204] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -408,6 +530,12 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("pllc1_clk", &pllc1_clk),
CLKDEV_CON_ID("pllc1_div2_clk", &pllc1_div2_clk),
CLKDEV_CON_ID("usb24s", &usb24s_clk),
+ CLKDEV_CON_ID("hdmi1", &hdmi1_clk),
+ CLKDEV_CON_ID("hdmi2", &hdmi2_clk),
+ CLKDEV_CON_ID("video1", &div6_reparent_clks[DIV6_VCLK1]),
+ CLKDEV_CON_ID("video2", &div6_reparent_clks[DIV6_VCLK2]),
+ CLKDEV_CON_ID("fsiack", &fsiack_clk),
+ CLKDEV_CON_ID("fsibck", &fsibck_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("i_clk", &div4_clks[DIV4_I]),
@@ -430,6 +558,8 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]),
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]),
CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]),
+ CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]),
+ CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP128]),
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]),
CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]),
@@ -438,7 +568,10 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]),
CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]),
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]),
-
+ CLKDEV_DEV_ID("sh-dma-engine.3", &mstp_clks[MSTP214]),
+ CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]),
+ CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]),
+ CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]),
CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP222]),
CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP230]),
@@ -459,6 +592,10 @@ static struct clk_lookup lookups[] = {
CLKDEV_ICK_ID("phy", "renesas_usbhs", &mstp_clks[MSTP406]),
CLKDEV_ICK_ID("pci", "renesas_usbhs", &div4_clks[DIV4_USBP]),
CLKDEV_ICK_ID("usb24", "renesas_usbhs", &usb24_clk),
+ CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
+
+ CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]),
+ CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]),
};
void __init r8a7740_clock_init(u8 md_ck)
@@ -495,7 +632,14 @@ void __init r8a7740_clock_init(u8 md_ck)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_div6_reparent_register(div6_reparent_clks,
+ DIV6_REPARENT_NR);
+
+ if (!ret)
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
+
+ for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
+ ret = clk_register(late_main_clks[k]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
diff --git a/arch/arm/mach-shmobile/clock-r8a7779.c b/arch/arm/mach-shmobile/clock-r8a7779.c
index 7d6e9fe47b56..339c62c824d5 100644
--- a/arch/arm/mach-shmobile/clock-r8a7779.c
+++ b/arch/arm/mach-shmobile/clock-r8a7779.c
@@ -162,7 +162,7 @@ void __init r8a7779_clock_init(void)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
ret = clk_register(late_main_clks[k]);
diff --git a/arch/arm/mach-shmobile/clock-sh7367.c b/arch/arm/mach-shmobile/clock-sh7367.c
index 006e7b5d304c..162b791b8984 100644
--- a/arch/arm/mach-shmobile/clock-sh7367.c
+++ b/arch/arm/mach-shmobile/clock-sh7367.c
@@ -344,7 +344,7 @@ void __init sh7367_clock_init(void)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 94d1f88246d3..5a2894b1c965 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -704,7 +704,7 @@ void __init sh7372_clock_init(void)
ret = sh_clk_div6_reparent_register(div6_reparent_clks, DIV6_REPARENT_NR);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
ret = clk_register(late_main_clks[k]);
diff --git a/arch/arm/mach-shmobile/clock-sh7377.c b/arch/arm/mach-shmobile/clock-sh7377.c
index 0798a15936c3..85f2a3ec2c44 100644
--- a/arch/arm/mach-shmobile/clock-sh7377.c
+++ b/arch/arm/mach-shmobile/clock-sh7377.c
@@ -355,7 +355,7 @@ void __init sh7377_clock_init(void)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 3946c4ba2aa8..7f8da18a8580 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -475,9 +475,9 @@ static struct clk *late_main_clks[] = {
enum { MSTP001,
MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
- MSTP219, MSTP218,
+ MSTP219, MSTP218, MSTP217,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
- MSTP331, MSTP329, MSTP325, MSTP323,
+ MSTP331, MSTP329, MSTP328, MSTP325, MSTP323, MSTP322,
MSTP314, MSTP313, MSTP312, MSTP311,
MSTP303, MSTP302, MSTP301, MSTP300,
MSTP411, MSTP410, MSTP403,
@@ -498,6 +498,7 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
[MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
[MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* SY-DMAC */
+ [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* MP-DMAC */
[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -507,8 +508,10 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
[MSTP331] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 31, 0), /* SCIFA6 */
[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
+ [MSTP328] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 28, 0), /*FSI*/
[MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
[MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
+ [MSTP322] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 22, 0), /* USB */
[MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */
[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
@@ -553,6 +556,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* SY-DMAC */
+ CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* MP-DMAC */
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
@@ -562,8 +566,10 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */
CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
+ CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI */
CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
+ CLKDEV_DEV_ID("renesas_usbhs", &mstp_clks[MSTP322]), /* USB */
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
@@ -612,7 +618,7 @@ void __init sh73a0_clock_init(void)
ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR);
if (!ret)
- ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
+ ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
ret = clk_register(late_main_clks[k]);
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 01e2bc014f15..45e61dada030 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -77,6 +77,7 @@ extern void r8a7779_add_standard_devices(void);
extern void r8a7779_clock_init(void);
extern void r8a7779_pinmux_init(void);
extern void r8a7779_pm_init(void);
+extern void r8a7740_meram_workaround(void);
extern unsigned int r8a7779_get_core_count(void);
extern int r8a7779_platform_cpu_kill(unsigned int cpu);
diff --git a/arch/arm/mach-shmobile/include/mach/dma-register.h b/arch/arm/mach-shmobile/include/mach/dma-register.h
new file mode 100644
index 000000000000..97c40bd9b94f
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/dma-register.h
@@ -0,0 +1,84 @@
+/*
+ * SH-ARM CPU-specific DMA definitions, used by both DMA drivers
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp
+ *
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * Based on arch/sh/include/cpu-sh4/cpu/dma-register.h
+ * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef DMA_REGISTER_H
+#define DMA_REGISTER_H
+
+/*
+ * Direct Memory Access Controller
+ */
+
+/* Transmit sizes and respective CHCR register values */
+enum {
+ XMIT_SZ_8BIT = 0,
+ XMIT_SZ_16BIT = 1,
+ XMIT_SZ_32BIT = 2,
+ XMIT_SZ_64BIT = 7,
+ XMIT_SZ_128BIT = 3,
+ XMIT_SZ_256BIT = 4,
+ XMIT_SZ_512BIT = 5,
+};
+
+/* log2(size / 8) - used to calculate number of transfers */
+static const unsigned int dma_ts_shift[] = {
+ [XMIT_SZ_8BIT] = 0,
+ [XMIT_SZ_16BIT] = 1,
+ [XMIT_SZ_32BIT] = 2,
+ [XMIT_SZ_64BIT] = 3,
+ [XMIT_SZ_128BIT] = 4,
+ [XMIT_SZ_256BIT] = 5,
+ [XMIT_SZ_512BIT] = 6,
+};
+
+#define TS_LOW_BIT 0x3 /* --xx */
+#define TS_HI_BIT 0xc /* xx-- */
+
+#define TS_LOW_SHIFT (3)
+#define TS_HI_SHIFT (20 - 2) /* 2 bits for shifted low TS */
+
+#define TS_INDEX2VAL(i) \
+ ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\
+ (((i) & TS_HI_BIT) << TS_HI_SHIFT))
+
+#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz)))
+#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz)))
+
+
+/*
+ * USB High-Speed DMAC
+ */
+/* Transmit sizes and respective CHCR register values */
+enum {
+ USBTS_XMIT_SZ_8BYTE = 0,
+ USBTS_XMIT_SZ_16BYTE = 1,
+ USBTS_XMIT_SZ_32BYTE = 2,
+};
+
+/* log2(size / 8) - used to calculate number of transfers */
+static const unsigned int dma_usbts_shift[] = {
+ [USBTS_XMIT_SZ_8BYTE] = 3,
+ [USBTS_XMIT_SZ_16BYTE] = 4,
+ [USBTS_XMIT_SZ_32BYTE] = 5,
+};
+
+#define USBTS_LOW_BIT 0x3 /* --xx */
+#define USBTS_HI_BIT 0x0 /* ---- */
+
+#define USBTS_LOW_SHIFT 6
+#define USBTS_HI_SHIFT 0
+
+#define USBTS_INDEX2VAL(i) (((i) & 3) << 6)
+
+#endif /* DMA_REGISTER_H */
diff --git a/arch/arm/mach-shmobile/include/mach/gpio.h b/arch/arm/mach-shmobile/include/mach/gpio.h
index de795b42232a..844507d937cb 100644
--- a/arch/arm/mach-shmobile/include/mach/gpio.h
+++ b/arch/arm/mach-shmobile/include/mach/gpio.h
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/sh_pfc.h>
+#include <linux/io.h>
#ifdef CONFIG_GPIOLIB
@@ -27,4 +28,35 @@ static inline int irq_to_gpio(unsigned int irq)
#endif /* CONFIG_GPIOLIB */
+/*
+ * FIXME !!
+ *
+ * current gpio frame work doesn't have
+ * the method to control only pull up/down/free.
+ * this function should be replaced by correct gpio function
+ */
+static inline void __init gpio_direction_none(u32 addr)
+{
+ __raw_writeb(0x00, addr);
+}
+
+static inline void __init gpio_request_pullup(u32 addr)
+{
+ u8 data = __raw_readb(addr);
+
+ data &= 0x0F;
+ data |= 0xC0;
+ __raw_writeb(data, addr);
+}
+
+static inline void __init gpio_request_pulldown(u32 addr)
+{
+ u8 data = __raw_readb(addr);
+
+ data &= 0x0F;
+ data |= 0xA0;
+
+ __raw_writeb(data, addr);
+}
+
#endif /* __ASM_ARCH_GPIO_H */
diff --git a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h
new file mode 100644
index 000000000000..5a402840fe28
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ *
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef PM_RMOBILE_H
+#define PM_RMOBILE_H
+
+#include <linux/pm_domain.h>
+
+struct platform_device;
+
+struct rmobile_pm_domain {
+ struct generic_pm_domain genpd;
+ struct dev_power_governor *gov;
+ int (*suspend)(void);
+ void (*resume)(void);
+ unsigned int bit_shift;
+ bool no_debug;
+};
+
+static inline
+struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d)
+{
+ return container_of(d, struct rmobile_pm_domain, genpd);
+}
+
+#ifdef CONFIG_PM
+extern void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd);
+extern void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd,
+ struct platform_device *pdev);
+extern void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd,
+ struct rmobile_pm_domain *rmobile_sd);
+#else
+#define rmobile_init_pm_domain(pd) do { } while (0)
+#define rmobile_add_device_to_domain(pd, pdev) do { } while (0)
+#define rmobile_pm_add_subdomain(pd, sd) do { } while (0)
+#endif /* CONFIG_PM */
+
+#endif /* PM_RMOBILE_H */
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7740.h b/arch/arm/mach-shmobile/include/mach/r8a7740.h
index 9d447abb969c..7143147780df 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7740.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7740.h
@@ -19,6 +19,8 @@
#ifndef __ASM_R8A7740_H__
#define __ASM_R8A7740_H__
+#include <mach/pm-rmobile.h>
+
/*
* MD_CKx pin
*/
@@ -139,7 +141,7 @@ enum {
GPIO_FN_DBGMD10, GPIO_FN_DBGMD11, GPIO_FN_DBGMD20,
GPIO_FN_DBGMD21,
- /* FSI */
+ /* FSI-A */
GPIO_FN_FSIAISLD_PORT0, /* FSIAISLD Port 0/5 */
GPIO_FN_FSIAISLD_PORT5,
GPIO_FN_FSIASPDIF_PORT9, /* FSIASPDIF Port 9/18 */
@@ -150,6 +152,9 @@ enum {
GPIO_FN_FSIACK, GPIO_FN_FSIAILR,
GPIO_FN_FSIAIBT,
+ /* FSI-B */
+ GPIO_FN_FSIBCK,
+
/* FMSI */
GPIO_FN_FMSISLD_PORT1, /* FMSISLD Port 1/6 */
GPIO_FN_FMSISLD_PORT6,
@@ -565,6 +570,10 @@ enum {
GPIO_FN_RESETP_PULLUP,
GPIO_FN_RESETP_PLAIN,
+ /* HDMI */
+ GPIO_FN_HDMI_HPD,
+ GPIO_FN_HDMI_CEC,
+
/* SDENC */
GPIO_FN_SDENC_CPG,
GPIO_FN_SDENC_DV_CLKI,
@@ -581,4 +590,26 @@ enum {
GPIO_FN_TRACEAUD_FROM_MEMC,
};
+/* DMA slave IDs */
+enum {
+ SHDMA_SLAVE_INVALID,
+ SHDMA_SLAVE_SDHI0_RX,
+ SHDMA_SLAVE_SDHI0_TX,
+ SHDMA_SLAVE_SDHI1_RX,
+ SHDMA_SLAVE_SDHI1_TX,
+ SHDMA_SLAVE_SDHI2_RX,
+ SHDMA_SLAVE_SDHI2_TX,
+ SHDMA_SLAVE_FSIA_RX,
+ SHDMA_SLAVE_FSIA_TX,
+ SHDMA_SLAVE_FSIB_TX,
+ SHDMA_SLAVE_USBHS_TX,
+ SHDMA_SLAVE_USBHS_RX,
+};
+
+#ifdef CONFIG_PM
+extern struct rmobile_pm_domain r8a7740_pd_a4s;
+extern struct rmobile_pm_domain r8a7740_pd_a3sp;
+extern struct rmobile_pm_domain r8a7740_pd_a4lc;
+#endif /* CONFIG_PM */
+
#endif /* __ASM_R8A7740_H__ */
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 915d0093da08..b59048e6d8fd 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -13,6 +13,7 @@
#include <linux/sh_clk.h>
#include <linux/pm_domain.h>
+#include <mach/pm-rmobile.h>
/*
* Pin Function Controller:
@@ -477,42 +478,16 @@ extern struct clk sh7372_fsibck_clk;
extern struct clk sh7372_fsidiva_clk;
extern struct clk sh7372_fsidivb_clk;
-struct platform_device;
-
-struct sh7372_pm_domain {
- struct generic_pm_domain genpd;
- struct dev_power_governor *gov;
- int (*suspend)(void);
- void (*resume)(void);
- unsigned int bit_shift;
- bool no_debug;
-};
-
-static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
-{
- return container_of(d, struct sh7372_pm_domain, genpd);
-}
-
#ifdef CONFIG_PM
-extern struct sh7372_pm_domain sh7372_a4lc;
-extern struct sh7372_pm_domain sh7372_a4mp;
-extern struct sh7372_pm_domain sh7372_d4;
-extern struct sh7372_pm_domain sh7372_a4r;
-extern struct sh7372_pm_domain sh7372_a3rv;
-extern struct sh7372_pm_domain sh7372_a3ri;
-extern struct sh7372_pm_domain sh7372_a4s;
-extern struct sh7372_pm_domain sh7372_a3sp;
-extern struct sh7372_pm_domain sh7372_a3sg;
-
-extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd);
-extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
- struct platform_device *pdev);
-extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
- struct sh7372_pm_domain *sh7372_sd);
-#else
-#define sh7372_init_pm_domain(pd) do { } while(0)
-#define sh7372_add_device_to_domain(pd, pdev) do { } while(0)
-#define sh7372_pm_add_subdomain(pd, sd) do { } while(0)
+extern struct rmobile_pm_domain sh7372_pd_a4lc;
+extern struct rmobile_pm_domain sh7372_pd_a4mp;
+extern struct rmobile_pm_domain sh7372_pd_d4;
+extern struct rmobile_pm_domain sh7372_pd_a4r;
+extern struct rmobile_pm_domain sh7372_pd_a3rv;
+extern struct rmobile_pm_domain sh7372_pd_a3ri;
+extern struct rmobile_pm_domain sh7372_pd_a4s;
+extern struct rmobile_pm_domain sh7372_pd_a3sp;
+extern struct rmobile_pm_domain sh7372_pd_a3sg;
#endif /* CONFIG_PM */
extern void sh7372_intcs_suspend(void);
diff --git a/arch/arm/mach-shmobile/include/mach/sh73a0.h b/arch/arm/mach-shmobile/include/mach/sh73a0.h
index 398e2c10913b..fe950f25d793 100644
--- a/arch/arm/mach-shmobile/include/mach/sh73a0.h
+++ b/arch/arm/mach-shmobile/include/mach/sh73a0.h
@@ -516,6 +516,13 @@ enum {
SHDMA_SLAVE_SDHI2_RX,
SHDMA_SLAVE_MMCIF_TX,
SHDMA_SLAVE_MMCIF_RX,
+ SHDMA_SLAVE_FSI2A_TX,
+ SHDMA_SLAVE_FSI2A_RX,
+ SHDMA_SLAVE_FSI2B_TX,
+ SHDMA_SLAVE_FSI2B_RX,
+ SHDMA_SLAVE_FSI2C_TX,
+ SHDMA_SLAVE_FSI2C_RX,
+ SHDMA_SLAVE_FSI2D_RX,
};
/*
diff --git a/arch/arm/mach-shmobile/intc-r8a7740.c b/arch/arm/mach-shmobile/intc-r8a7740.c
index 09c42afcb22d..9a69a31918ba 100644
--- a/arch/arm/mach-shmobile/intc-r8a7740.c
+++ b/arch/arm/mach-shmobile/intc-r8a7740.c
@@ -71,10 +71,12 @@ enum {
DMAC3_1_DEI0, DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3,
DMAC3_2_DEI4, DMAC3_2_DEI5, DMAC3_2_DADERR,
SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM,
+ HDMI,
USBH_INT, USBH_OHCI, USBH_EHCI, USBH_PME, USBH_BIND,
RSPI_OVRF, RSPI_SPTEF, RSPI_SPRF,
SPU2_0, SPU2_1,
FSI, FMSI,
+ HDMI_SSS, HDMI_KEY,
IPMMU,
AP_ARM_CTIIRQ, AP_ARM_PMURQ,
MFIS2,
@@ -182,6 +184,7 @@ static struct intc_vect intca_vectors[] __initdata = {
INTC_VECT(USBH_EHCI, 0x1580),
INTC_VECT(USBH_PME, 0x15A0),
INTC_VECT(USBH_BIND, 0x15C0),
+ INTC_VECT(HDMI, 0x1700),
INTC_VECT(RSPI_OVRF, 0x1780),
INTC_VECT(RSPI_SPTEF, 0x17A0),
INTC_VECT(RSPI_SPRF, 0x17C0),
@@ -189,6 +192,8 @@ static struct intc_vect intca_vectors[] __initdata = {
INTC_VECT(SPU2_1, 0x1820),
INTC_VECT(FSI, 0x1840),
INTC_VECT(FMSI, 0x1860),
+ INTC_VECT(HDMI_SSS, 0x18A0),
+ INTC_VECT(HDMI_KEY, 0x18C0),
INTC_VECT(IPMMU, 0x1920),
INTC_VECT(AP_ARM_CTIIRQ, 0x1980),
INTC_VECT(AP_ARM_PMURQ, 0x19A0),
@@ -304,11 +309,11 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = {
USBH_EHCI, USBH_PME, USBH_BIND, 0 } },
/* IMR3A3 / IMCR3A3 */
{ /* IMR4A3 / IMCR4A3 */ 0xe6950090, 0xe69500d0, 8,
- { 0, 0, 0, 0,
+ { HDMI, 0, 0, 0,
RSPI_OVRF, RSPI_SPTEF, RSPI_SPRF, 0 } },
{ /* IMR5A3 / IMCR5A3 */ 0xe6950094, 0xe69500d4, 8,
{ SPU2_0, SPU2_1, FSI, FMSI,
- 0, 0, 0, 0 } },
+ 0, HDMI_SSS, HDMI_KEY, 0 } },
{ /* IMR6A3 / IMCR6A3 */ 0xe6950098, 0xe69500d8, 8,
{ 0, IPMMU, 0, 0,
AP_ARM_CTIIRQ, AP_ARM_PMURQ, 0, 0 } },
@@ -353,10 +358,10 @@ static struct intc_prio_reg intca_prio_registers[] __initdata = {
{ 0xe6950014, 0, 16, 4, /* IPRFA3 */ { USBH2, 0, 0, 0 } },
/* IPRGA3 */
/* IPRHA3 */
- /* IPRIA3 */
+ { 0xe6950020, 0, 16, 4, /* IPRIA3 */ { HDMI, 0, 0, 0 } },
{ 0xe6950024, 0, 16, 4, /* IPRJA3 */ { RSPI, 0, 0, 0 } },
{ 0xe6950028, 0, 16, 4, /* IPRKA3 */ { SPU2, 0, FSI, FMSI } },
- /* IPRLA3 */
+ { 0xe695002c, 0, 16, 4, /* IPRLA3 */ { 0, HDMI_SSS, HDMI_KEY, 0 } },
{ 0xe6950030, 0, 16, 4, /* IPRMA3 */ { IPMMU, 0, 0, 0 } },
{ 0xe6950034, 0, 16, 4, /* IPRNA3 */ { AP_ARM2, 0, 0, 0 } },
{ 0xe6950038, 0, 16, 4, /* IPROA3 */ { MFIS2, CPORTR2S,
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index ee447404c857..588555a67d9c 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
return 0; /* always allow wakeup */
}
-#define RELOC_BASE 0x1000
+#define RELOC_BASE 0x1200
-/* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */
+/* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */
#define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE)
INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000,
diff --git a/arch/arm/mach-shmobile/pfc-r8a7740.c b/arch/arm/mach-shmobile/pfc-r8a7740.c
index 670fe1869dbc..ce9e7fa5cc8a 100644
--- a/arch/arm/mach-shmobile/pfc-r8a7740.c
+++ b/arch/arm/mach-shmobile/pfc-r8a7740.c
@@ -169,7 +169,7 @@ enum {
DBGMD10_MARK, DBGMD11_MARK, DBGMD20_MARK,
DBGMD21_MARK,
- /* FSI */
+ /* FSI-A */
FSIAISLD_PORT0_MARK, /* FSIAISLD Port 0/5 */
FSIAISLD_PORT5_MARK,
FSIASPDIF_PORT9_MARK, /* FSIASPDIF Port 9/18 */
@@ -178,6 +178,9 @@ enum {
FSIAOBT_MARK, FSIAOSLD_MARK, FSIAOMC_MARK,
FSIACK_MARK, FSIAILR_MARK, FSIAIBT_MARK,
+ /* FSI-B */
+ FSIBCK_MARK,
+
/* FMSI */
FMSISLD_PORT1_MARK, /* FMSISLD Port 1/6 */
FMSISLD_PORT6_MARK,
@@ -560,6 +563,9 @@ enum {
/* SDENC */
SDENC_CPG_MARK, SDENC_DV_CLKI_MARK,
+ /* HDMI */
+ HDMI_HPD_MARK, HDMI_CEC_MARK,
+
/* DEBUG */
EDEBGREQ_PULLUP_MARK, /* for JTAG */
EDEBGREQ_PULLDOWN_MARK,
@@ -771,6 +777,7 @@ static pinmux_enum_t pinmux_data[] = {
/* Port11 */
PINMUX_DATA(FSIACK_MARK, PORT11_FN1),
+ PINMUX_DATA(FSIBCK_MARK, PORT11_FN2),
PINMUX_DATA(IRQ2_PORT11_MARK, PORT11_FN0, MSEL1CR_2_0),
/* Port12 */
@@ -1254,7 +1261,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(A21_MARK, PORT120_FN1),
PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT120_FN2),
PINMUX_DATA(MSIOF1_TSYNC_PORT120_MARK, PORT120_FN3, MSEL4CR_10_0),
- PINMUX_DATA(IRQ7_PORT120_MARK, PORT120_FN0, MSEL1CR_7_0),
+ PINMUX_DATA(IRQ7_PORT120_MARK, PORT120_FN0, MSEL1CR_7_1),
/* Port121 */
PINMUX_DATA(A20_MARK, PORT121_FN1),
@@ -1616,13 +1623,15 @@ static pinmux_enum_t pinmux_data[] = {
/* Port209 */
PINMUX_DATA(VBUS_MARK, PORT209_FN1),
- PINMUX_DATA(IRQ7_PORT209_MARK, PORT209_FN0, MSEL1CR_7_1),
+ PINMUX_DATA(IRQ7_PORT209_MARK, PORT209_FN0, MSEL1CR_7_0),
/* Port210 */
PINMUX_DATA(IRQ9_PORT210_MARK, PORT210_FN0, MSEL1CR_9_1),
+ PINMUX_DATA(HDMI_HPD_MARK, PORT210_FN1),
/* Port211 */
PINMUX_DATA(IRQ16_PORT211_MARK, PORT211_FN0, MSEL1CR_16_1),
+ PINMUX_DATA(HDMI_CEC_MARK, PORT211_FN1),
/* LCDC select */
PINMUX_DATA(LCDC0_SELECT_MARK, MSEL3CR_6_0),
@@ -1691,7 +1700,7 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(DBGMD10), GPIO_FN(DBGMD11), GPIO_FN(DBGMD20),
GPIO_FN(DBGMD21),
- /* FSI */
+ /* FSI-A */
GPIO_FN(FSIAISLD_PORT0), /* FSIAISLD Port 0/5 */
GPIO_FN(FSIAISLD_PORT5),
GPIO_FN(FSIASPDIF_PORT9), /* FSIASPDIF Port 9/18 */
@@ -1700,6 +1709,9 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(FSIAOBT), GPIO_FN(FSIAOSLD), GPIO_FN(FSIAOMC),
GPIO_FN(FSIACK), GPIO_FN(FSIAILR), GPIO_FN(FSIAIBT),
+ /* FSI-B */
+ GPIO_FN(FSIBCK),
+
/* FMSI */
GPIO_FN(FMSISLD_PORT1), /* FMSISLD Port 1/6 */
GPIO_FN(FMSISLD_PORT6),
@@ -2097,6 +2109,10 @@ static struct pinmux_gpio pinmux_gpios[] = {
GPIO_FN(SDENC_CPG),
GPIO_FN(SDENC_DV_CLKI),
+ /* HDMI */
+ GPIO_FN(HDMI_HPD),
+ GPIO_FN(HDMI_CEC),
+
/* SYSC */
GPIO_FN(RESETP_PULLUP),
GPIO_FN(RESETP_PLAIN),
diff --git a/arch/arm/mach-shmobile/pm-r8a7740.c b/arch/arm/mach-shmobile/pm-r8a7740.c
new file mode 100644
index 000000000000..893504d012a6
--- /dev/null
+++ b/arch/arm/mach-shmobile/pm-r8a7740.c
@@ -0,0 +1,54 @@
+/*
+ * r8a7740 power management support
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/console.h>
+#include <mach/pm-rmobile.h>
+
+#ifdef CONFIG_PM
+static int r8a7740_pd_a4s_suspend(void)
+{
+ /*
+ * The A4S domain contains the CPU core and therefore it should
+ * only be turned off if the CPU is in use.
+ */
+ return -EBUSY;
+}
+
+struct rmobile_pm_domain r8a7740_pd_a4s = {
+ .genpd.name = "A4S",
+ .bit_shift = 10,
+ .gov = &pm_domain_always_on_gov,
+ .no_debug = true,
+ .suspend = r8a7740_pd_a4s_suspend,
+};
+
+static int r8a7740_pd_a3sp_suspend(void)
+{
+ /*
+ * Serial consoles make use of SCIF hardware located in A3SP,
+ * keep such power domain on if "no_console_suspend" is set.
+ */
+ return console_suspend_enabled ? 0 : -EBUSY;
+}
+
+struct rmobile_pm_domain r8a7740_pd_a3sp = {
+ .genpd.name = "A3SP",
+ .bit_shift = 11,
+ .gov = &pm_domain_always_on_gov,
+ .no_debug = true,
+ .suspend = r8a7740_pd_a3sp_suspend,
+};
+
+struct rmobile_pm_domain r8a7740_pd_a4lc = {
+ .genpd.name = "A4LC",
+ .bit_shift = 1,
+};
+
+#endif /* CONFIG_PM */
diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c
new file mode 100644
index 000000000000..a8562540f1d6
--- /dev/null
+++ b/arch/arm/mach-shmobile/pm-rmobile.c
@@ -0,0 +1,167 @@
+/*
+ * rmobile power management support
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * based on pm-sh7372.c
+ * Copyright (C) 2011 Magnus Damm
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_clock.h>
+#include <asm/io.h>
+#include <mach/pm-rmobile.h>
+
+/* SYSC */
+#define SPDCR 0xe6180008
+#define SWUCR 0xe6180014
+#define PSTR 0xe6180080
+
+#define PSTR_RETRIES 100
+#define PSTR_DELAY_US 10
+
+#ifdef CONFIG_PM
+static int rmobile_pd_power_down(struct generic_pm_domain *genpd)
+{
+ struct rmobile_pm_domain *rmobile_pd = to_rmobile_pd(genpd);
+ unsigned int mask = 1 << rmobile_pd->bit_shift;
+
+ if (rmobile_pd->suspend) {
+ int ret = rmobile_pd->suspend();
+
+ if (ret)
+ return ret;
+ }
+
+ if (__raw_readl(PSTR) & mask) {
+ unsigned int retry_count;
+ __raw_writel(mask, SPDCR);
+
+ for (retry_count = PSTR_RETRIES; retry_count; retry_count--) {
+ if (!(__raw_readl(SPDCR) & mask))
+ break;
+ cpu_relax();
+ }
+ }
+
+ if (!rmobile_pd->no_debug)
+ pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n",
+ genpd->name, mask, __raw_readl(PSTR));
+
+ return 0;
+}
+
+static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd,
+ bool do_resume)
+{
+ unsigned int mask = 1 << rmobile_pd->bit_shift;
+ unsigned int retry_count;
+ int ret = 0;
+
+ if (__raw_readl(PSTR) & mask)
+ goto out;
+
+ __raw_writel(mask, SWUCR);
+
+ for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
+ if (!(__raw_readl(SWUCR) & mask))
+ break;
+ if (retry_count > PSTR_RETRIES)
+ udelay(PSTR_DELAY_US);
+ else
+ cpu_relax();
+ }
+ if (!retry_count)
+ ret = -EIO;
+
+ if (!rmobile_pd->no_debug)
+ pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n",
+ rmobile_pd->genpd.name, mask, __raw_readl(PSTR));
+
+out:
+ if (ret == 0 && rmobile_pd->resume && do_resume)
+ rmobile_pd->resume();
+
+ return ret;
+}
+
+static int rmobile_pd_power_up(struct generic_pm_domain *genpd)
+{
+ return __rmobile_pd_power_up(to_rmobile_pd(genpd), true);
+}
+
+static bool rmobile_pd_active_wakeup(struct device *dev)
+{
+ bool (*active_wakeup)(struct device *dev);
+
+ active_wakeup = dev_gpd_data(dev)->ops.active_wakeup;
+ return active_wakeup ? active_wakeup(dev) : true;
+}
+
+static int rmobile_pd_stop_dev(struct device *dev)
+{
+ int (*stop)(struct device *dev);
+
+ stop = dev_gpd_data(dev)->ops.stop;
+ if (stop) {
+ int ret = stop(dev);
+ if (ret)
+ return ret;
+ }
+ return pm_clk_suspend(dev);
+}
+
+static int rmobile_pd_start_dev(struct device *dev)
+{
+ int (*start)(struct device *dev);
+ int ret;
+
+ ret = pm_clk_resume(dev);
+ if (ret)
+ return ret;
+
+ start = dev_gpd_data(dev)->ops.start;
+ if (start)
+ ret = start(dev);
+
+ return ret;
+}
+
+void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
+{
+ struct generic_pm_domain *genpd = &rmobile_pd->genpd;
+ struct dev_power_governor *gov = rmobile_pd->gov;
+
+ pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
+ genpd->dev_ops.stop = rmobile_pd_stop_dev;
+ genpd->dev_ops.start = rmobile_pd_start_dev;
+ genpd->dev_ops.active_wakeup = rmobile_pd_active_wakeup;
+ genpd->dev_irq_safe = true;
+ genpd->power_off = rmobile_pd_power_down;
+ genpd->power_on = rmobile_pd_power_up;
+ __rmobile_pd_power_up(rmobile_pd, false);
+}
+
+void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ pm_genpd_add_device(&rmobile_pd->genpd, dev);
+ if (pm_clk_no_clocks(dev))
+ pm_clk_add(dev, NULL);
+}
+
+void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd,
+ struct rmobile_pm_domain *rmobile_sd)
+{
+ pm_genpd_add_subdomain(&rmobile_pd->genpd, &rmobile_sd->genpd);
+}
+#endif /* CONFIG_PM */
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index a3bdb12acde9..792037069226 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -26,6 +26,7 @@
#include <asm/suspend.h>
#include <mach/common.h>
#include <mach/sh7372.h>
+#include <mach/pm-rmobile.h>
/* DBG */
#define DBGREG1 0xe6100020
@@ -41,13 +42,10 @@
#define PLLC01STPCR 0xe61500c8
/* SYSC */
-#define SPDCR 0xe6180008
-#define SWUCR 0xe6180014
#define SBAR 0xe6180020
#define WUPRMSK 0xe6180028
#define WUPSMSK 0xe618002c
#define WUPSMSK2 0xe6180048
-#define PSTR 0xe6180080
#define WUPSFAC 0xe6180098
#define IRQCR 0xe618022c
#define IRQCR2 0xe6180238
@@ -71,188 +69,48 @@
/* AP-System Core */
#define APARMBAREA 0xe6f10020
-#define PSTR_RETRIES 100
-#define PSTR_DELAY_US 10
-
#ifdef CONFIG_PM
-static int pd_power_down(struct generic_pm_domain *genpd)
-{
- struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
- unsigned int mask = 1 << sh7372_pd->bit_shift;
-
- if (sh7372_pd->suspend) {
- int ret = sh7372_pd->suspend();
-
- if (ret)
- return ret;
- }
-
- if (__raw_readl(PSTR) & mask) {
- unsigned int retry_count;
-
- __raw_writel(mask, SPDCR);
-
- for (retry_count = PSTR_RETRIES; retry_count; retry_count--) {
- if (!(__raw_readl(SPDCR) & mask))
- break;
- cpu_relax();
- }
- }
-
- if (!sh7372_pd->no_debug)
- pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n",
- genpd->name, mask, __raw_readl(PSTR));
-
- return 0;
-}
-
-static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume)
-{
- unsigned int mask = 1 << sh7372_pd->bit_shift;
- unsigned int retry_count;
- int ret = 0;
-
- if (__raw_readl(PSTR) & mask)
- goto out;
-
- __raw_writel(mask, SWUCR);
-
- for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
- if (!(__raw_readl(SWUCR) & mask))
- break;
- if (retry_count > PSTR_RETRIES)
- udelay(PSTR_DELAY_US);
- else
- cpu_relax();
- }
- if (!retry_count)
- ret = -EIO;
-
- if (!sh7372_pd->no_debug)
- pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n",
- sh7372_pd->genpd.name, mask, __raw_readl(PSTR));
-
- out:
- if (ret == 0 && sh7372_pd->resume && do_resume)
- sh7372_pd->resume();
-
- return ret;
-}
-
-static int pd_power_up(struct generic_pm_domain *genpd)
-{
- return __pd_power_up(to_sh7372_pd(genpd), true);
-}
-
-static int sh7372_a4r_suspend(void)
-{
- sh7372_intcs_suspend();
- __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */
- return 0;
-}
-
-static bool pd_active_wakeup(struct device *dev)
-{
- bool (*active_wakeup)(struct device *dev);
-
- active_wakeup = dev_gpd_data(dev)->ops.active_wakeup;
- return active_wakeup ? active_wakeup(dev) : true;
-}
-
-static int sh7372_stop_dev(struct device *dev)
-{
- int (*stop)(struct device *dev);
-
- stop = dev_gpd_data(dev)->ops.stop;
- if (stop) {
- int ret = stop(dev);
- if (ret)
- return ret;
- }
- return pm_clk_suspend(dev);
-}
-
-static int sh7372_start_dev(struct device *dev)
-{
- int (*start)(struct device *dev);
- int ret;
-
- ret = pm_clk_resume(dev);
- if (ret)
- return ret;
-
- start = dev_gpd_data(dev)->ops.start;
- if (start)
- ret = start(dev);
-
- return ret;
-}
-
-void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
-{
- struct generic_pm_domain *genpd = &sh7372_pd->genpd;
- struct dev_power_governor *gov = sh7372_pd->gov;
-
- pm_genpd_init(genpd, gov ? : &simple_qos_governor, false);
- genpd->dev_ops.stop = sh7372_stop_dev;
- genpd->dev_ops.start = sh7372_start_dev;
- genpd->dev_ops.active_wakeup = pd_active_wakeup;
- genpd->dev_irq_safe = true;
- genpd->power_off = pd_power_down;
- genpd->power_on = pd_power_up;
- __pd_power_up(sh7372_pd, false);
-}
-
-void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
- struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
-
- pm_genpd_add_device(&sh7372_pd->genpd, dev);
- if (pm_clk_no_clocks(dev))
- pm_clk_add(dev, NULL);
-}
-
-void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
- struct sh7372_pm_domain *sh7372_sd)
-{
- pm_genpd_add_subdomain(&sh7372_pd->genpd, &sh7372_sd->genpd);
-}
-
-struct sh7372_pm_domain sh7372_a4lc = {
+struct rmobile_pm_domain sh7372_pd_a4lc = {
.genpd.name = "A4LC",
.bit_shift = 1,
};
-struct sh7372_pm_domain sh7372_a4mp = {
+struct rmobile_pm_domain sh7372_pd_a4mp = {
.genpd.name = "A4MP",
.bit_shift = 2,
};
-struct sh7372_pm_domain sh7372_d4 = {
+struct rmobile_pm_domain sh7372_pd_d4 = {
.genpd.name = "D4",
.bit_shift = 3,
};
-struct sh7372_pm_domain sh7372_a4r = {
+static int sh7372_a4r_pd_suspend(void)
+{
+ sh7372_intcs_suspend();
+ __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */
+ return 0;
+}
+
+struct rmobile_pm_domain sh7372_pd_a4r = {
.genpd.name = "A4R",
.bit_shift = 5,
- .suspend = sh7372_a4r_suspend,
+ .suspend = sh7372_a4r_pd_suspend,
.resume = sh7372_intcs_resume,
};
-struct sh7372_pm_domain sh7372_a3rv = {
+struct rmobile_pm_domain sh7372_pd_a3rv = {
.genpd.name = "A3RV",
.bit_shift = 6,
};
-struct sh7372_pm_domain sh7372_a3ri = {
+struct rmobile_pm_domain sh7372_pd_a3ri = {
.genpd.name = "A3RI",
.bit_shift = 8,
};
-static int sh7372_a4s_suspend(void)
+static int sh7372_pd_a4s_suspend(void)
{
/*
* The A4S domain contains the CPU core and therefore it should
@@ -261,15 +119,15 @@ static int sh7372_a4s_suspend(void)
return -EBUSY;
}
-struct sh7372_pm_domain sh7372_a4s = {
+struct rmobile_pm_domain sh7372_pd_a4s = {
.genpd.name = "A4S",
.bit_shift = 10,
.gov = &pm_domain_always_on_gov,
.no_debug = true,
- .suspend = sh7372_a4s_suspend,
+ .suspend = sh7372_pd_a4s_suspend,
};
-static int sh7372_a3sp_suspend(void)
+static int sh7372_a3sp_pd_suspend(void)
{
/*
* Serial consoles make use of SCIF hardware located in A3SP,
@@ -278,32 +136,22 @@ static int sh7372_a3sp_suspend(void)
return console_suspend_enabled ? 0 : -EBUSY;
}
-struct sh7372_pm_domain sh7372_a3sp = {
+struct rmobile_pm_domain sh7372_pd_a3sp = {
.genpd.name = "A3SP",
.bit_shift = 11,
.gov = &pm_domain_always_on_gov,
.no_debug = true,
- .suspend = sh7372_a3sp_suspend,
+ .suspend = sh7372_a3sp_pd_suspend,
};
-struct sh7372_pm_domain sh7372_a3sg = {
+struct rmobile_pm_domain sh7372_pd_a3sg = {
.genpd.name = "A3SG",
.bit_shift = 13,
};
-#else /* !CONFIG_PM */
-
-static inline void sh7372_a3sp_init(void) {}
-
-#endif /* !CONFIG_PM */
+#endif /* CONFIG_PM */
#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
-static int sh7372_do_idle_core_standby(unsigned long unused)
-{
- cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */
- return 0;
-}
-
static void sh7372_set_reset_vector(unsigned long address)
{
/* set reset vector, translate 4k */
@@ -311,21 +159,6 @@ static void sh7372_set_reset_vector(unsigned long address)
__raw_writel(0, APARMBAREA);
}
-static void sh7372_enter_core_standby(void)
-{
- sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
-
- /* enter sleep mode with SYSTBCR to 0x10 */
- __raw_writel(0x10, SYSTBCR);
- cpu_suspend(0, sh7372_do_idle_core_standby);
- __raw_writel(0, SYSTBCR);
-
- /* disable reset vector translation */
- __raw_writel(0, SBAR);
-}
-#endif
-
-#ifdef CONFIG_SUSPEND
static void sh7372_enter_sysc(int pllc0_on, unsigned long sleep_mode)
{
if (pllc0_on)
@@ -465,22 +298,42 @@ static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2)
static void sh7372_enter_a3sm_common(int pllc0_on)
{
+ /* use INTCA together with SYSC for wakeup */
+ sh7372_setup_sysc(1 << 0, 0);
sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
sh7372_enter_sysc(pllc0_on, 1 << 12);
}
+#endif /* CONFIG_SUSPEND || CONFIG_CPU_IDLE */
-static void sh7372_enter_a4s_common(int pllc0_on)
+#ifdef CONFIG_CPU_IDLE
+static int sh7372_do_idle_core_standby(unsigned long unused)
{
- sh7372_intca_suspend();
- memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
- sh7372_set_reset_vector(SMFRAM);
- sh7372_enter_sysc(pllc0_on, 1 << 10);
- sh7372_intca_resume();
+ cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */
+ return 0;
}
-#endif
+static void sh7372_enter_core_standby(void)
+{
+ sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc));
-#ifdef CONFIG_CPU_IDLE
+ /* enter sleep mode with SYSTBCR to 0x10 */
+ __raw_writel(0x10, SYSTBCR);
+ cpu_suspend(0, sh7372_do_idle_core_standby);
+ __raw_writel(0, SYSTBCR);
+
+ /* disable reset vector translation */
+ __raw_writel(0, SBAR);
+}
+
+static void sh7372_enter_a3sm_pll_on(void)
+{
+ sh7372_enter_a3sm_common(1);
+}
+
+static void sh7372_enter_a3sm_pll_off(void)
+{
+ sh7372_enter_a3sm_common(0);
+}
static void sh7372_cpuidle_setup(struct cpuidle_driver *drv)
{
@@ -492,7 +345,24 @@ static void sh7372_cpuidle_setup(struct cpuidle_driver *drv)
state->target_residency = 20 + 10;
state->flags = CPUIDLE_FLAG_TIME_VALID;
shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby;
+ drv->state_count++;
+
+ state = &drv->states[drv->state_count];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
+ strncpy(state->desc, "A3SM PLL ON", CPUIDLE_DESC_LEN);
+ state->exit_latency = 20;
+ state->target_residency = 30 + 20;
+ state->flags = CPUIDLE_FLAG_TIME_VALID;
+ shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_on;
+ drv->state_count++;
+ state = &drv->states[drv->state_count];
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C4");
+ strncpy(state->desc, "A3SM PLL OFF", CPUIDLE_DESC_LEN);
+ state->exit_latency = 120;
+ state->target_residency = 30 + 120;
+ state->flags = CPUIDLE_FLAG_TIME_VALID;
+ shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_off;
drv->state_count++;
}
@@ -505,6 +375,14 @@ static void sh7372_cpuidle_init(void) {}
#endif
#ifdef CONFIG_SUSPEND
+static void sh7372_enter_a4s_common(int pllc0_on)
+{
+ sh7372_intca_suspend();
+ memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100);
+ sh7372_set_reset_vector(SMFRAM);
+ sh7372_enter_sysc(pllc0_on, 1 << 10);
+ sh7372_intca_resume();
+}
static int sh7372_enter_suspend(suspend_state_t suspend_state)
{
@@ -512,24 +390,21 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state)
/* check active clocks to determine potential wakeup sources */
if (sh7372_sysc_valid(&msk, &msk2)) {
- /* convert INTC mask and sense to SYSC mask and sense */
- sh7372_setup_sysc(msk, msk2);
-
if (!console_suspend_enabled &&
- sh7372_a4s.genpd.status == GPD_STATE_POWER_OFF) {
+ sh7372_pd_a4s.genpd.status == GPD_STATE_POWER_OFF) {
+ /* convert INTC mask/sense to SYSC mask/sense */
+ sh7372_setup_sysc(msk, msk2);
+
/* enter A4S sleep with PLLC0 off */
pr_debug("entering A4S\n");
sh7372_enter_a4s_common(0);
- } else {
- /* enter A3SM sleep with PLLC0 off */
- pr_debug("entering A3SM\n");
- sh7372_enter_a3sm_common(0);
+ return 0;
}
- } else {
- /* default to Core Standby that supports all wakeup sources */
- pr_debug("entering Core Standby\n");
- sh7372_enter_core_standby();
}
+
+ /* default to enter A3SM sleep with PLLC0 off */
+ pr_debug("entering A3SM\n");
+ sh7372_enter_a3sm_common(0);
return 0;
}
@@ -550,7 +425,7 @@ static int sh7372_pm_notifier_fn(struct notifier_block *notifier,
* executed during system suspend and resume, respectively, so
* that those functions don't crash while accessing the INTCS.
*/
- pm_genpd_poweron(&sh7372_a4r.genpd);
+ pm_genpd_poweron(&sh7372_pd_a4r.genpd);
break;
case PM_POST_SUSPEND:
pm_genpd_poweroff_unused();
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index ec4eb49c1693..78948a9dba0e 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -23,9 +23,14 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/serial_sci.h>
+#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
+#include <linux/dma-mapping.h>
+#include <mach/dma-register.h>
#include <mach/r8a7740.h>
+#include <mach/pm-rmobile.h>
#include <mach/common.h>
#include <mach/irqs.h>
#include <asm/mach-types.h>
@@ -276,6 +281,272 @@ static struct platform_device *r8a7740_early_devices[] __initdata = {
&cmt10_device,
};
+/* DMA */
+static const struct sh_dmae_slave_config r8a7740_dmae_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_SDHI0_TX,
+ .addr = 0xe6850030,
+ .chcr = CHCR_TX(XMIT_SZ_16BIT),
+ .mid_rid = 0xc1,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI0_RX,
+ .addr = 0xe6850030,
+ .chcr = CHCR_RX(XMIT_SZ_16BIT),
+ .mid_rid = 0xc2,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI1_TX,
+ .addr = 0xe6860030,
+ .chcr = CHCR_TX(XMIT_SZ_16BIT),
+ .mid_rid = 0xc9,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI1_RX,
+ .addr = 0xe6860030,
+ .chcr = CHCR_RX(XMIT_SZ_16BIT),
+ .mid_rid = 0xca,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI2_TX,
+ .addr = 0xe6870030,
+ .chcr = CHCR_TX(XMIT_SZ_16BIT),
+ .mid_rid = 0xcd,
+ }, {
+ .slave_id = SHDMA_SLAVE_SDHI2_RX,
+ .addr = 0xe6870030,
+ .chcr = CHCR_RX(XMIT_SZ_16BIT),
+ .mid_rid = 0xce,
+ }, {
+ .slave_id = SHDMA_SLAVE_FSIA_TX,
+ .addr = 0xfe1f0024,
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xb1,
+ }, {
+ .slave_id = SHDMA_SLAVE_FSIA_RX,
+ .addr = 0xfe1f0020,
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xb2,
+ }, {
+ .slave_id = SHDMA_SLAVE_FSIB_TX,
+ .addr = 0xfe1f0064,
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xb5,
+ },
+};
+
+#define DMA_CHANNEL(a, b, c) \
+{ \
+ .offset = a, \
+ .dmars = b, \
+ .dmars_bit = c, \
+ .chclr_offset = (0x220 - 0x20) + a \
+}
+
+static const struct sh_dmae_channel r8a7740_dmae_channels[] = {
+ DMA_CHANNEL(0x00, 0, 0),
+ DMA_CHANNEL(0x10, 0, 8),
+ DMA_CHANNEL(0x20, 4, 0),
+ DMA_CHANNEL(0x30, 4, 8),
+ DMA_CHANNEL(0x50, 8, 0),
+ DMA_CHANNEL(0x60, 8, 8),
+};
+
+static struct sh_dmae_pdata dma_platform_data = {
+ .slave = r8a7740_dmae_slaves,
+ .slave_num = ARRAY_SIZE(r8a7740_dmae_slaves),
+ .channel = r8a7740_dmae_channels,
+ .channel_num = ARRAY_SIZE(r8a7740_dmae_channels),
+ .ts_low_shift = TS_LOW_SHIFT,
+ .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
+ .ts_high_shift = TS_HI_SHIFT,
+ .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
+ .ts_shift = dma_ts_shift,
+ .ts_shift_num = ARRAY_SIZE(dma_ts_shift),
+ .dmaor_init = DMAOR_DME,
+ .chclr_present = 1,
+};
+
+/* Resource order important! */
+static struct resource r8a7740_dmae0_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xfe008020,
+ .end = 0xfe00828f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* DMARSx */
+ .start = 0xfe009000,
+ .end = 0xfe00900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = evt2irq(0x20c0),
+ .end = evt2irq(0x20c0),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 0-5 */
+ .start = evt2irq(0x2000),
+ .end = evt2irq(0x20a0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* Resource order important! */
+static struct resource r8a7740_dmae1_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xfe018020,
+ .end = 0xfe01828f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* DMARSx */
+ .start = 0xfe019000,
+ .end = 0xfe01900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = evt2irq(0x21c0),
+ .end = evt2irq(0x21c0),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 0-5 */
+ .start = evt2irq(0x2100),
+ .end = evt2irq(0x21a0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* Resource order important! */
+static struct resource r8a7740_dmae2_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xfe028020,
+ .end = 0xfe02828f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* DMARSx */
+ .start = 0xfe029000,
+ .end = 0xfe02900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = evt2irq(0x22c0),
+ .end = evt2irq(0x22c0),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 0-5 */
+ .start = evt2irq(0x2200),
+ .end = evt2irq(0x22a0),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = r8a7740_dmae0_resources,
+ .num_resources = ARRAY_SIZE(r8a7740_dmae0_resources),
+ .dev = {
+ .platform_data = &dma_platform_data,
+ },
+};
+
+static struct platform_device dma1_device = {
+ .name = "sh-dma-engine",
+ .id = 1,
+ .resource = r8a7740_dmae1_resources,
+ .num_resources = ARRAY_SIZE(r8a7740_dmae1_resources),
+ .dev = {
+ .platform_data = &dma_platform_data,
+ },
+};
+
+static struct platform_device dma2_device = {
+ .name = "sh-dma-engine",
+ .id = 2,
+ .resource = r8a7740_dmae2_resources,
+ .num_resources = ARRAY_SIZE(r8a7740_dmae2_resources),
+ .dev = {
+ .platform_data = &dma_platform_data,
+ },
+};
+
+/* USB-DMAC */
+static const struct sh_dmae_channel r8a7740_usb_dma_channels[] = {
+ {
+ .offset = 0,
+ }, {
+ .offset = 0x20,
+ },
+};
+
+static const struct sh_dmae_slave_config r8a7740_usb_dma_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_USBHS_TX,
+ .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE),
+ }, {
+ .slave_id = SHDMA_SLAVE_USBHS_RX,
+ .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE),
+ },
+};
+
+static struct sh_dmae_pdata usb_dma_platform_data = {
+ .slave = r8a7740_usb_dma_slaves,
+ .slave_num = ARRAY_SIZE(r8a7740_usb_dma_slaves),
+ .channel = r8a7740_usb_dma_channels,
+ .channel_num = ARRAY_SIZE(r8a7740_usb_dma_channels),
+ .ts_low_shift = USBTS_LOW_SHIFT,
+ .ts_low_mask = USBTS_LOW_BIT << USBTS_LOW_SHIFT,
+ .ts_high_shift = USBTS_HI_SHIFT,
+ .ts_high_mask = USBTS_HI_BIT << USBTS_HI_SHIFT,
+ .ts_shift = dma_usbts_shift,
+ .ts_shift_num = ARRAY_SIZE(dma_usbts_shift),
+ .dmaor_init = DMAOR_DME,
+ .chcr_offset = 0x14,
+ .chcr_ie_bit = 1 << 5,
+ .dmaor_is_32bit = 1,
+ .needs_tend_set = 1,
+ .no_dmars = 1,
+ .slave_only = 1,
+};
+
+static struct resource r8a7740_usb_dma_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xe68a0020,
+ .end = 0xe68a0064 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* VCR/SWR/DMICR */
+ .start = 0xe68a0000,
+ .end = 0xe68a0014 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* IRQ for channels */
+ .start = evt2irq(0x0a00),
+ .end = evt2irq(0x0a00),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device usb_dma_device = {
+ .name = "sh-dma-engine",
+ .id = 3,
+ .resource = r8a7740_usb_dma_resources,
+ .num_resources = ARRAY_SIZE(r8a7740_usb_dma_resources),
+ .dev = {
+ .platform_data = &usb_dma_platform_data,
+ },
+};
+
/* I2C */
static struct resource i2c0_resources[] = {
[0] = {
@@ -322,8 +593,30 @@ static struct platform_device i2c1_device = {
static struct platform_device *r8a7740_late_devices[] __initdata = {
&i2c0_device,
&i2c1_device,
+ &dma0_device,
+ &dma1_device,
+ &dma2_device,
+ &usb_dma_device,
};
+/*
+ * r8a7740 chip has lasting errata on MERAM buffer.
+ * this is work-around for it.
+ * see
+ * "Media RAM (MERAM)" on r8a7740 documentation
+ */
+#define MEBUFCNTR 0xFE950098
+void r8a7740_meram_workaround(void)
+{
+ void __iomem *reg;
+
+ reg = ioremap_nocache(MEBUFCNTR, 4);
+ if (reg) {
+ iowrite32(0x01600164, reg);
+ iounmap(reg);
+ }
+}
+
#define ICCR 0x0004
#define ICSTART 0x0070
@@ -380,10 +673,31 @@ void __init r8a7740_add_standard_devices(void)
r8a7740_i2c_workaround(&i2c0_device);
r8a7740_i2c_workaround(&i2c1_device);
+ /* PM domain */
+ rmobile_init_pm_domain(&r8a7740_pd_a4s);
+ rmobile_init_pm_domain(&r8a7740_pd_a3sp);
+ rmobile_init_pm_domain(&r8a7740_pd_a4lc);
+
+ rmobile_pm_add_subdomain(&r8a7740_pd_a4s, &r8a7740_pd_a3sp);
+
+ /* add devices */
platform_add_devices(r8a7740_early_devices,
ARRAY_SIZE(r8a7740_early_devices));
platform_add_devices(r8a7740_late_devices,
ARRAY_SIZE(r8a7740_late_devices));
+
+ /* add devices to PM domain */
+
+ rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif0_device);
+ rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif1_device);
+ rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif2_device);
+ rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif3_device);
+ rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif4_device);
+ rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif5_device);
+ rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif6_device);
+ rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif7_device);
+ rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scifb_device);
+ rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &i2c1_device);
}
static void __init r8a7740_earlytimer_init(void)
@@ -403,3 +717,49 @@ void __init r8a7740_add_early_devices(void)
/* override timer setup with soc-specific code */
shmobile_timer.init = r8a7740_earlytimer_init;
}
+
+#ifdef CONFIG_USE_OF
+
+void __init r8a7740_add_early_devices_dt(void)
+{
+ shmobile_setup_delay(800, 1, 3); /* Cortex-A9 @ 800MHz */
+
+ early_platform_add_devices(r8a7740_early_devices,
+ ARRAY_SIZE(r8a7740_early_devices));
+
+ /* setup early console here as well */
+ shmobile_setup_console();
+}
+
+static const struct of_dev_auxdata r8a7740_auxdata_lookup[] __initconst = {
+ { }
+};
+
+void __init r8a7740_add_standard_devices_dt(void)
+{
+ /* clocks are setup late during boot in the case of DT */
+ r8a7740_clock_init(0);
+
+ platform_add_devices(r8a7740_early_devices,
+ ARRAY_SIZE(r8a7740_early_devices));
+
+ of_platform_populate(NULL, of_default_bus_match_table,
+ r8a7740_auxdata_lookup, NULL);
+}
+
+static const char *r8a7740_boards_compat_dt[] __initdata = {
+ "renesas,r8a7740",
+ NULL,
+};
+
+DT_MACHINE_START(SH7372_DT, "Generic R8A7740 (Flattened Device Tree)")
+ .map_io = r8a7740_map_io,
+ .init_early = r8a7740_add_early_devices_dt,
+ .init_irq = r8a7740_init_irq,
+ .handle_irq = shmobile_handle_irq_intc,
+ .init_machine = r8a7740_add_standard_devices_dt,
+ .timer = &shmobile_timer,
+ .dt_compat = r8a7740_boards_compat_dt,
+MACHINE_END
+
+#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index fafce9ce8218..838a87be1d5c 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -33,6 +33,7 @@
#include <linux/sh_timer.h>
#include <linux/pm_domain.h>
#include <linux/dma-mapping.h>
+#include <mach/dma-register.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/sh7372.h>
@@ -335,151 +336,126 @@ static struct platform_device iic1_device = {
};
/* DMA */
-/* Transmit sizes and respective CHCR register values */
-enum {
- XMIT_SZ_8BIT = 0,
- XMIT_SZ_16BIT = 1,
- XMIT_SZ_32BIT = 2,
- XMIT_SZ_64BIT = 7,
- XMIT_SZ_128BIT = 3,
- XMIT_SZ_256BIT = 4,
- XMIT_SZ_512BIT = 5,
-};
-
-/* log2(size / 8) - used to calculate number of transfers */
-#define TS_SHIFT { \
- [XMIT_SZ_8BIT] = 0, \
- [XMIT_SZ_16BIT] = 1, \
- [XMIT_SZ_32BIT] = 2, \
- [XMIT_SZ_64BIT] = 3, \
- [XMIT_SZ_128BIT] = 4, \
- [XMIT_SZ_256BIT] = 5, \
- [XMIT_SZ_512BIT] = 6, \
-}
-
-#define TS_INDEX2VAL(i) ((((i) & 3) << 3) | \
- (((i) & 0xc) << (20 - 2)))
-
static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
{
.slave_id = SHDMA_SLAVE_SCIF0_TX,
.addr = 0xe6c40020,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_TX(XMIT_SZ_8BIT),
.mid_rid = 0x21,
}, {
.slave_id = SHDMA_SLAVE_SCIF0_RX,
.addr = 0xe6c40024,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_RX(XMIT_SZ_8BIT),
.mid_rid = 0x22,
}, {
.slave_id = SHDMA_SLAVE_SCIF1_TX,
.addr = 0xe6c50020,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_TX(XMIT_SZ_8BIT),
.mid_rid = 0x25,
}, {
.slave_id = SHDMA_SLAVE_SCIF1_RX,
.addr = 0xe6c50024,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_RX(XMIT_SZ_8BIT),
.mid_rid = 0x26,
}, {
.slave_id = SHDMA_SLAVE_SCIF2_TX,
.addr = 0xe6c60020,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_TX(XMIT_SZ_8BIT),
.mid_rid = 0x29,
}, {
.slave_id = SHDMA_SLAVE_SCIF2_RX,
.addr = 0xe6c60024,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_RX(XMIT_SZ_8BIT),
.mid_rid = 0x2a,
}, {
.slave_id = SHDMA_SLAVE_SCIF3_TX,
.addr = 0xe6c70020,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_TX(XMIT_SZ_8BIT),
.mid_rid = 0x2d,
}, {
.slave_id = SHDMA_SLAVE_SCIF3_RX,
.addr = 0xe6c70024,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_RX(XMIT_SZ_8BIT),
.mid_rid = 0x2e,
}, {
.slave_id = SHDMA_SLAVE_SCIF4_TX,
.addr = 0xe6c80020,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_TX(XMIT_SZ_8BIT),
.mid_rid = 0x39,
}, {
.slave_id = SHDMA_SLAVE_SCIF4_RX,
.addr = 0xe6c80024,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_RX(XMIT_SZ_8BIT),
.mid_rid = 0x3a,
}, {
.slave_id = SHDMA_SLAVE_SCIF5_TX,
.addr = 0xe6cb0020,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_TX(XMIT_SZ_8BIT),
.mid_rid = 0x35,
}, {
.slave_id = SHDMA_SLAVE_SCIF5_RX,
.addr = 0xe6cb0024,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_RX(XMIT_SZ_8BIT),
.mid_rid = 0x36,
}, {
.slave_id = SHDMA_SLAVE_SCIF6_TX,
.addr = 0xe6c30040,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_TX(XMIT_SZ_8BIT),
.mid_rid = 0x3d,
}, {
.slave_id = SHDMA_SLAVE_SCIF6_RX,
.addr = 0xe6c30060,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .chcr = CHCR_RX(XMIT_SZ_8BIT),
.mid_rid = 0x3e,
}, {
.slave_id = SHDMA_SLAVE_SDHI0_TX,
.addr = 0xe6850030,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .chcr = CHCR_TX(XMIT_SZ_16BIT),
.mid_rid = 0xc1,
}, {
.slave_id = SHDMA_SLAVE_SDHI0_RX,
.addr = 0xe6850030,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .chcr = CHCR_RX(XMIT_SZ_16BIT),
.mid_rid = 0xc2,
}, {
.slave_id = SHDMA_SLAVE_SDHI1_TX,
.addr = 0xe6860030,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .chcr = CHCR_TX(XMIT_SZ_16BIT),
.mid_rid = 0xc9,
}, {
.slave_id = SHDMA_SLAVE_SDHI1_RX,
.addr = 0xe6860030,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .chcr = CHCR_RX(XMIT_SZ_16BIT),
.mid_rid = 0xca,
}, {
.slave_id = SHDMA_SLAVE_SDHI2_TX,
.addr = 0xe6870030,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .chcr = CHCR_TX(XMIT_SZ_16BIT),
.mid_rid = 0xcd,
}, {
.slave_id = SHDMA_SLAVE_SDHI2_RX,
.addr = 0xe6870030,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .chcr = CHCR_RX(XMIT_SZ_16BIT),
.mid_rid = 0xce,
}, {
.slave_id = SHDMA_SLAVE_FSIA_TX,
.addr = 0xfe1f0024,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
.mid_rid = 0xb1,
}, {
.slave_id = SHDMA_SLAVE_FSIA_RX,
.addr = 0xfe1f0020,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
.mid_rid = 0xb2,
}, {
.slave_id = SHDMA_SLAVE_MMCIF_TX,
.addr = 0xe6bd0034,
- .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
.mid_rid = 0xd1,
}, {
.slave_id = SHDMA_SLAVE_MMCIF_RX,
.addr = 0xe6bd0034,
- .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
.mid_rid = 0xd2,
},
};
@@ -520,19 +496,17 @@ static const struct sh_dmae_channel sh7372_dmae_channels[] = {
}
};
-static const unsigned int ts_shift[] = TS_SHIFT;
-
static struct sh_dmae_pdata dma_platform_data = {
.slave = sh7372_dmae_slaves,
.slave_num = ARRAY_SIZE(sh7372_dmae_slaves),
.channel = sh7372_dmae_channels,
.channel_num = ARRAY_SIZE(sh7372_dmae_channels),
- .ts_low_shift = 3,
- .ts_low_mask = 0x18,
- .ts_high_shift = (20 - 2), /* 2 bits for shifted low TS */
- .ts_high_mask = 0x00300000,
- .ts_shift = ts_shift,
- .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .ts_low_shift = TS_LOW_SHIFT,
+ .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
+ .ts_high_shift = TS_HI_SHIFT,
+ .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
+ .ts_shift = dma_ts_shift,
+ .ts_shift_num = ARRAY_SIZE(dma_ts_shift),
.dmaor_init = DMAOR_DME,
.chclr_present = 1,
};
@@ -654,17 +628,6 @@ static struct platform_device dma2_device = {
/*
* USB-DMAC
*/
-
-unsigned int usbts_shift[] = {3, 4, 5};
-
-enum {
- XMIT_SZ_8BYTE = 0,
- XMIT_SZ_16BYTE = 1,
- XMIT_SZ_32BYTE = 2,
-};
-
-#define USBTS_INDEX2VAL(i) (((i) & 3) << 6)
-
static const struct sh_dmae_channel sh7372_usb_dmae_channels[] = {
{
.offset = 0,
@@ -677,10 +640,10 @@ static const struct sh_dmae_channel sh7372_usb_dmae_channels[] = {
static const struct sh_dmae_slave_config sh7372_usb_dmae0_slaves[] = {
{
.slave_id = SHDMA_SLAVE_USB0_TX,
- .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
+ .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE),
}, {
.slave_id = SHDMA_SLAVE_USB0_RX,
- .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
+ .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE),
},
};
@@ -689,12 +652,12 @@ static struct sh_dmae_pdata usb_dma0_platform_data = {
.slave_num = ARRAY_SIZE(sh7372_usb_dmae0_slaves),
.channel = sh7372_usb_dmae_channels,
.channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels),
- .ts_low_shift = 6,
- .ts_low_mask = 0xc0,
- .ts_high_shift = 0,
- .ts_high_mask = 0,
- .ts_shift = usbts_shift,
- .ts_shift_num = ARRAY_SIZE(usbts_shift),
+ .ts_low_shift = USBTS_LOW_SHIFT,
+ .ts_low_mask = USBTS_LOW_BIT << USBTS_LOW_SHIFT,
+ .ts_high_shift = USBTS_HI_SHIFT,
+ .ts_high_mask = USBTS_HI_BIT << USBTS_HI_SHIFT,
+ .ts_shift = dma_usbts_shift,
+ .ts_shift_num = ARRAY_SIZE(dma_usbts_shift),
.dmaor_init = DMAOR_DME,
.chcr_offset = 0x14,
.chcr_ie_bit = 1 << 5,
@@ -739,10 +702,10 @@ static struct platform_device usb_dma0_device = {
static const struct sh_dmae_slave_config sh7372_usb_dmae1_slaves[] = {
{
.slave_id = SHDMA_SLAVE_USB1_TX,
- .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
+ .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE),
}, {
.slave_id = SHDMA_SLAVE_USB1_RX,
- .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE),
+ .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE),
},
};
@@ -751,12 +714,12 @@ static struct sh_dmae_pdata usb_dma1_platform_data = {
.slave_num = ARRAY_SIZE(sh7372_usb_dmae1_slaves),
.channel = sh7372_usb_dmae_channels,
.channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels),
- .ts_low_shift = 6,
- .ts_low_mask = 0xc0,
- .ts_high_shift = 0,
- .ts_high_mask = 0,
- .ts_shift = usbts_shift,
- .ts_shift_num = ARRAY_SIZE(usbts_shift),
+ .ts_low_shift = USBTS_LOW_SHIFT,
+ .ts_low_mask = USBTS_LOW_BIT << USBTS_LOW_SHIFT,
+ .ts_high_shift = USBTS_HI_SHIFT,
+ .ts_high_mask = USBTS_HI_BIT << USBTS_HI_SHIFT,
+ .ts_shift = dma_usbts_shift,
+ .ts_shift_num = ARRAY_SIZE(dma_usbts_shift),
.dmaor_init = DMAOR_DME,
.chcr_offset = 0x14,
.chcr_ie_bit = 1 << 5,
@@ -1038,21 +1001,21 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
void __init sh7372_add_standard_devices(void)
{
- sh7372_init_pm_domain(&sh7372_a4lc);
- sh7372_init_pm_domain(&sh7372_a4mp);
- sh7372_init_pm_domain(&sh7372_d4);
- sh7372_init_pm_domain(&sh7372_a4r);
- sh7372_init_pm_domain(&sh7372_a3rv);
- sh7372_init_pm_domain(&sh7372_a3ri);
- sh7372_init_pm_domain(&sh7372_a4s);
- sh7372_init_pm_domain(&sh7372_a3sp);
- sh7372_init_pm_domain(&sh7372_a3sg);
-
- sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv);
- sh7372_pm_add_subdomain(&sh7372_a4r, &sh7372_a4lc);
-
- sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sg);
- sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sp);
+ rmobile_init_pm_domain(&sh7372_pd_a4lc);
+ rmobile_init_pm_domain(&sh7372_pd_a4mp);
+ rmobile_init_pm_domain(&sh7372_pd_d4);
+ rmobile_init_pm_domain(&sh7372_pd_a4r);
+ rmobile_init_pm_domain(&sh7372_pd_a3rv);
+ rmobile_init_pm_domain(&sh7372_pd_a3ri);
+ rmobile_init_pm_domain(&sh7372_pd_a4s);
+ rmobile_init_pm_domain(&sh7372_pd_a3sp);
+ rmobile_init_pm_domain(&sh7372_pd_a3sg);
+
+ rmobile_pm_add_subdomain(&sh7372_pd_a4lc, &sh7372_pd_a3rv);
+ rmobile_pm_add_subdomain(&sh7372_pd_a4r, &sh7372_pd_a4lc);
+
+ rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sg);
+ rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sp);
platform_add_devices(sh7372_early_devices,
ARRAY_SIZE(sh7372_early_devices));
@@ -1060,30 +1023,30 @@ void __init sh7372_add_standard_devices(void)
platform_add_devices(sh7372_late_devices,
ARRAY_SIZE(sh7372_late_devices));
- sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device);
- sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device);
- sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &scif0_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &scif1_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &scif2_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &scif3_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &scif4_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &scif5_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &scif6_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &iic1_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &dma0_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &dma1_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &dma2_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &usb_dma0_device);
- sh7372_add_device_to_domain(&sh7372_a3sp, &usb_dma1_device);
- sh7372_add_device_to_domain(&sh7372_a4r, &iic0_device);
- sh7372_add_device_to_domain(&sh7372_a4r, &veu0_device);
- sh7372_add_device_to_domain(&sh7372_a4r, &veu1_device);
- sh7372_add_device_to_domain(&sh7372_a4r, &veu2_device);
- sh7372_add_device_to_domain(&sh7372_a4r, &veu3_device);
- sh7372_add_device_to_domain(&sh7372_a4r, &jpu_device);
- sh7372_add_device_to_domain(&sh7372_a4r, &tmu00_device);
- sh7372_add_device_to_domain(&sh7372_a4r, &tmu01_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3rv, &vpu_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu0_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu1_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif0_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif1_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif2_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif3_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif4_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif5_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif6_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &iic1_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma0_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma1_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma2_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma0_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma1_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4r, &iic0_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu0_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu1_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu2_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu3_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4r, &jpu_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu00_device);
+ rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu01_device);
}
static void __init sh7372_earlytimer_init(void)
diff --git a/arch/arm/mach-shmobile/setup-sh7377.c b/arch/arm/mach-shmobile/setup-sh7377.c
index d576a6abbade..855b1506caf8 100644
--- a/arch/arm/mach-shmobile/setup-sh7377.c
+++ b/arch/arm/mach-shmobile/setup-sh7377.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/uio_driver.h>
#include <linux/delay.h>
#include <linux/input.h>
@@ -500,3 +501,49 @@ void __init sh7377_add_early_devices(void)
/* override timer setup with soc-specific code */
shmobile_timer.init = sh7377_earlytimer_init;
}
+
+#ifdef CONFIG_USE_OF
+
+void __init sh7377_add_early_devices_dt(void)
+{
+ shmobile_setup_delay(600, 1, 3); /* Cortex-A8 @ 600MHz */
+
+ early_platform_add_devices(sh7377_early_devices,
+ ARRAY_SIZE(sh7377_early_devices));
+
+ /* setup early console here as well */
+ shmobile_setup_console();
+}
+
+static const struct of_dev_auxdata sh7377_auxdata_lookup[] __initconst = {
+ { }
+};
+
+void __init sh7377_add_standard_devices_dt(void)
+{
+ /* clocks are setup late during boot in the case of DT */
+ sh7377_clock_init();
+
+ platform_add_devices(sh7377_early_devices,
+ ARRAY_SIZE(sh7377_early_devices));
+
+ of_platform_populate(NULL, of_default_bus_match_table,
+ sh7377_auxdata_lookup, NULL);
+}
+
+static const char *sh7377_boards_compat_dt[] __initdata = {
+ "renesas,sh7377",
+ NULL,
+};
+
+DT_MACHINE_START(SH7377_DT, "Generic SH7377 (Flattened Device Tree)")
+ .map_io = sh7377_map_io,
+ .init_early = sh7377_add_early_devices_dt,
+ .init_irq = sh7377_init_irq,
+ .handle_irq = shmobile_handle_irq_intc,
+ .init_machine = sh7377_add_standard_devices_dt,
+ .timer = &shmobile_timer,
+ .dt_compat = sh7377_boards_compat_dt,
+MACHINE_END
+
+#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index 04a0dfe75493..d230af656fc9 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -30,6 +30,7 @@
#include <linux/sh_dma.h>
#include <linux/sh_intc.h>
#include <linux/sh_timer.h>
+#include <mach/dma-register.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include <mach/sh73a0.h>
@@ -415,32 +416,6 @@ static struct platform_device i2c4_device = {
.num_resources = ARRAY_SIZE(i2c4_resources),
};
-/* Transmit sizes and respective CHCR register values */
-enum {
- XMIT_SZ_8BIT = 0,
- XMIT_SZ_16BIT = 1,
- XMIT_SZ_32BIT = 2,
- XMIT_SZ_64BIT = 7,
- XMIT_SZ_128BIT = 3,
- XMIT_SZ_256BIT = 4,
- XMIT_SZ_512BIT = 5,
-};
-
-/* log2(size / 8) - used to calculate number of transfers */
-#define TS_SHIFT { \
- [XMIT_SZ_8BIT] = 0, \
- [XMIT_SZ_16BIT] = 1, \
- [XMIT_SZ_32BIT] = 2, \
- [XMIT_SZ_64BIT] = 3, \
- [XMIT_SZ_128BIT] = 4, \
- [XMIT_SZ_256BIT] = 5, \
- [XMIT_SZ_512BIT] = 6, \
-}
-
-#define TS_INDEX2VAL(i) ((((i) & 3) << 3) | (((i) & 0xc) << (20 - 2)))
-#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz)))
-#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz)))
-
static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = {
{
.slave_id = SHDMA_SLAVE_SCIF0_TX,
@@ -604,19 +579,17 @@ static const struct sh_dmae_channel sh73a0_dmae_channels[] = {
DMAE_CHANNEL(0x8980),
};
-static const unsigned int ts_shift[] = TS_SHIFT;
-
static struct sh_dmae_pdata sh73a0_dmae_platform_data = {
.slave = sh73a0_dmae_slaves,
.slave_num = ARRAY_SIZE(sh73a0_dmae_slaves),
.channel = sh73a0_dmae_channels,
.channel_num = ARRAY_SIZE(sh73a0_dmae_channels),
- .ts_low_shift = 3,
- .ts_low_mask = 0x18,
- .ts_high_shift = (20 - 2), /* 2 bits for shifted low TS */
- .ts_high_mask = 0x00300000,
- .ts_shift = ts_shift,
- .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .ts_low_shift = TS_LOW_SHIFT,
+ .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
+ .ts_high_shift = TS_HI_SHIFT,
+ .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
+ .ts_shift = dma_ts_shift,
+ .ts_shift_num = ARRAY_SIZE(dma_ts_shift),
.dmaor_init = DMAOR_DME,
};
@@ -651,6 +624,116 @@ static struct platform_device dma0_device = {
},
};
+/* MPDMAC */
+static const struct sh_dmae_slave_config sh73a0_mpdma_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_FSI2A_RX,
+ .addr = 0xec230020,
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd6, /* CHECK ME */
+ }, {
+ .slave_id = SHDMA_SLAVE_FSI2A_TX,
+ .addr = 0xec230024,
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd5, /* CHECK ME */
+ }, {
+ .slave_id = SHDMA_SLAVE_FSI2C_RX,
+ .addr = 0xec230060,
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0xda, /* CHECK ME */
+ }, {
+ .slave_id = SHDMA_SLAVE_FSI2C_TX,
+ .addr = 0xec230064,
+ .chcr = CHCR_TX(XMIT_SZ_32BIT),
+ .mid_rid = 0xd9, /* CHECK ME */
+ }, {
+ .slave_id = SHDMA_SLAVE_FSI2B_RX,
+ .addr = 0xec240020,
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0x8e, /* CHECK ME */
+ }, {
+ .slave_id = SHDMA_SLAVE_FSI2B_TX,
+ .addr = 0xec240024,
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0x8d, /* CHECK ME */
+ }, {
+ .slave_id = SHDMA_SLAVE_FSI2D_RX,
+ .addr = 0xec240060,
+ .chcr = CHCR_RX(XMIT_SZ_32BIT),
+ .mid_rid = 0x9a, /* CHECK ME */
+ },
+};
+
+#define MPDMA_CHANNEL(a, b, c) \
+{ \
+ .offset = a, \
+ .dmars = b, \
+ .dmars_bit = c, \
+ .chclr_offset = (0x220 - 0x20) + a \
+}
+
+static const struct sh_dmae_channel sh73a0_mpdma_channels[] = {
+ MPDMA_CHANNEL(0x00, 0, 0),
+ MPDMA_CHANNEL(0x10, 0, 8),
+ MPDMA_CHANNEL(0x20, 4, 0),
+ MPDMA_CHANNEL(0x30, 4, 8),
+ MPDMA_CHANNEL(0x50, 8, 0),
+ MPDMA_CHANNEL(0x70, 8, 8),
+};
+
+static struct sh_dmae_pdata sh73a0_mpdma_platform_data = {
+ .slave = sh73a0_mpdma_slaves,
+ .slave_num = ARRAY_SIZE(sh73a0_mpdma_slaves),
+ .channel = sh73a0_mpdma_channels,
+ .channel_num = ARRAY_SIZE(sh73a0_mpdma_channels),
+ .ts_low_shift = TS_LOW_SHIFT,
+ .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
+ .ts_high_shift = TS_HI_SHIFT,
+ .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
+ .ts_shift = dma_ts_shift,
+ .ts_shift_num = ARRAY_SIZE(dma_ts_shift),
+ .dmaor_init = DMAOR_DME,
+ .chclr_present = 1,
+};
+
+/* Resource order important! */
+static struct resource sh73a0_mpdma_resources[] = {
+ {
+ /* Channel registers and DMAOR */
+ .start = 0xec618020,
+ .end = 0xec61828f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* DMARSx */
+ .start = 0xec619000,
+ .end = 0xec61900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "error_irq",
+ .start = gic_spi(181),
+ .end = gic_spi(181),
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 0-5 */
+ .start = gic_spi(175),
+ .end = gic_spi(180),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mpdma0_device = {
+ .name = "sh-dma-engine",
+ .id = 1,
+ .resource = sh73a0_mpdma_resources,
+ .num_resources = ARRAY_SIZE(sh73a0_mpdma_resources),
+ .dev = {
+ .platform_data = &sh73a0_mpdma_platform_data,
+ },
+};
+
static struct platform_device *sh73a0_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
@@ -673,6 +756,7 @@ static struct platform_device *sh73a0_late_devices[] __initdata = {
&i2c3_device,
&i2c4_device,
&dma0_device,
+ &mpdma0_device,
};
#define SRCR2 0xe61580b0
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index 0f882ecb7d81..6ec300549960 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -120,182 +120,156 @@ struct pl08x_channel_data spear300_dma_info[] = {
.min_signal = 2,
.max_signal = 2,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart0_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "irda",
.min_signal = 12,
.max_signal = 12,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "adc",
.min_signal = 13,
.max_signal = 13,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "to_jpeg",
.min_signal = 14,
.max_signal = 14,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "from_jpeg",
.min_signal = 15,
.max_signal = 15,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras0_rx",
.min_signal = 0,
.max_signal = 0,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras0_tx",
.min_signal = 1,
.max_signal = 1,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras1_rx",
.min_signal = 2,
.max_signal = 2,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras1_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras2_rx",
.min_signal = 4,
.max_signal = 4,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras2_tx",
.min_signal = 5,
.max_signal = 5,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras3_rx",
.min_signal = 6,
.max_signal = 6,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras3_tx",
.min_signal = 7,
.max_signal = 7,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras4_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras4_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras5_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras5_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras6_rx",
.min_signal = 12,
.max_signal = 12,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras6_tx",
.min_signal = 13,
.max_signal = 13,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras7_rx",
.min_signal = 14,
.max_signal = 14,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras7_tx",
.min_signal = 15,
.max_signal = 15,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
},
};
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index bbcf4571d361..1d0e435b9045 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -205,182 +205,156 @@ struct pl08x_channel_data spear310_dma_info[] = {
.min_signal = 2,
.max_signal = 2,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart0_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "irda",
.min_signal = 12,
.max_signal = 12,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "adc",
.min_signal = 13,
.max_signal = 13,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "to_jpeg",
.min_signal = 14,
.max_signal = 14,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "from_jpeg",
.min_signal = 15,
.max_signal = 15,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart1_rx",
.min_signal = 0,
.max_signal = 0,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart1_tx",
.min_signal = 1,
.max_signal = 1,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart2_rx",
.min_signal = 2,
.max_signal = 2,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart2_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart3_rx",
.min_signal = 4,
.max_signal = 4,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart3_tx",
.min_signal = 5,
.max_signal = 5,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart4_rx",
.min_signal = 6,
.max_signal = 6,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart4_tx",
.min_signal = 7,
.max_signal = 7,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart5_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart5_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras5_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras5_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras6_rx",
.min_signal = 12,
.max_signal = 12,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras6_tx",
.min_signal = 13,
.max_signal = 13,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras7_rx",
.min_signal = 14,
.max_signal = 14,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras7_tx",
.min_signal = 15,
.max_signal = 15,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
},
};
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index 88d483bcd66a..fd823c624575 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -213,182 +213,156 @@ struct pl08x_channel_data spear320_dma_info[] = {
.min_signal = 2,
.max_signal = 2,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart0_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c0_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c0_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "irda",
.min_signal = 12,
.max_signal = 12,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "adc",
.min_signal = 13,
.max_signal = 13,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "to_jpeg",
.min_signal = 14,
.max_signal = 14,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "from_jpeg",
.min_signal = 15,
.max_signal = 15,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp1_rx",
.min_signal = 0,
.max_signal = 0,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ssp1_tx",
.min_signal = 1,
.max_signal = 1,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ssp2_rx",
.min_signal = 2,
.max_signal = 2,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ssp2_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart1_rx",
.min_signal = 4,
.max_signal = 4,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart1_tx",
.min_signal = 5,
.max_signal = 5,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart2_rx",
.min_signal = 6,
.max_signal = 6,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "uart2_tx",
.min_signal = 7,
.max_signal = 7,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2c1_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2c1_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2c2_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2c2_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2s_rx",
.min_signal = 12,
.max_signal = 12,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "i2s_tx",
.min_signal = 13,
.max_signal = 13,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "rs485_rx",
.min_signal = 14,
.max_signal = 14,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "rs485_tx",
.min_signal = 15,
.max_signal = 15,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
},
};
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index 66db5f13af84..98144baf8883 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -46,7 +46,8 @@ struct pl022_ssp_controller pl022_plat_data = {
struct pl08x_platform_data pl080_plat_data = {
.memcpy_channel = {
.bus_id = "memcpy",
- .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
+ .cctl_memcpy =
+ (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \
PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \
PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index 9af67d003c62..5a5a52db252b 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -36,336 +36,288 @@ static struct pl08x_channel_data spear600_dma_info[] = {
.min_signal = 0,
.max_signal = 0,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp1_tx",
.min_signal = 1,
.max_signal = 1,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart0_rx",
.min_signal = 2,
.max_signal = 2,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart0_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart1_rx",
.min_signal = 4,
.max_signal = 4,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "uart1_tx",
.min_signal = 5,
.max_signal = 5,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp2_rx",
.min_signal = 6,
.max_signal = 6,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ssp2_tx",
.min_signal = 7,
.max_signal = 7,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ssp0_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ssp0_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "i2c_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "irda",
.min_signal = 12,
.max_signal = 12,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "adc",
.min_signal = 13,
.max_signal = 13,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "to_jpeg",
.min_signal = 14,
.max_signal = 14,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "from_jpeg",
.min_signal = 15,
.max_signal = 15,
.muxval = 0,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras0_rx",
.min_signal = 0,
.max_signal = 0,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras0_tx",
.min_signal = 1,
.max_signal = 1,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras1_rx",
.min_signal = 2,
.max_signal = 2,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras1_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras2_rx",
.min_signal = 4,
.max_signal = 4,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras2_tx",
.min_signal = 5,
.max_signal = 5,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras3_rx",
.min_signal = 6,
.max_signal = 6,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras3_tx",
.min_signal = 7,
.max_signal = 7,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras4_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras4_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras5_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras5_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras6_rx",
.min_signal = 12,
.max_signal = 12,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras6_tx",
.min_signal = 13,
.max_signal = 13,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras7_rx",
.min_signal = 14,
.max_signal = 14,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ras7_tx",
.min_signal = 15,
.max_signal = 15,
.muxval = 1,
- .cctl = 0,
.periph_buses = PL08X_AHB1,
}, {
.bus_id = "ext0_rx",
.min_signal = 0,
.max_signal = 0,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext0_tx",
.min_signal = 1,
.max_signal = 1,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext1_rx",
.min_signal = 2,
.max_signal = 2,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext1_tx",
.min_signal = 3,
.max_signal = 3,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext2_rx",
.min_signal = 4,
.max_signal = 4,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext2_tx",
.min_signal = 5,
.max_signal = 5,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext3_rx",
.min_signal = 6,
.max_signal = 6,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext3_tx",
.min_signal = 7,
.max_signal = 7,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext4_rx",
.min_signal = 8,
.max_signal = 8,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext4_tx",
.min_signal = 9,
.max_signal = 9,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext5_rx",
.min_signal = 10,
.max_signal = 10,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext5_tx",
.min_signal = 11,
.max_signal = 11,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext6_rx",
.min_signal = 12,
.max_signal = 12,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext6_tx",
.min_signal = 13,
.max_signal = 13,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext7_rx",
.min_signal = 14,
.max_signal = 14,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
}, {
.bus_id = "ext7_tx",
.min_signal = 15,
.max_signal = 15,
.muxval = 2,
- .cctl = 0,
.periph_buses = PL08X_AHB2,
},
};
@@ -373,7 +325,8 @@ static struct pl08x_channel_data spear600_dma_info[] = {
struct pl08x_platform_data pl080_plat_data = {
.memcpy_channel = {
.bus_id = "memcpy",
- .cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
+ .cctl_memcpy =
+ (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \
PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \
PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \
diff --git a/arch/arm/mach-tegra/board-dt-tegra20.c b/arch/arm/mach-tegra/board-dt-tegra20.c
index d0de9c1192f7..c0999633a9ab 100644
--- a/arch/arm/mach-tegra/board-dt-tegra20.c
+++ b/arch/arm/mach-tegra/board-dt-tegra20.c
@@ -64,7 +64,8 @@ struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = {
&tegra_ehci2_pdata),
OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB3_BASE, "tegra-ehci.2",
&tegra_ehci3_pdata),
- OF_DEV_AUXDATA("nvidia,tegra20-apbdma", 0x6000a000, "tegra-apbdma", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra20-apbdma", TEGRA_APB_DMA_BASE, "tegra-apbdma", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra20-pwm", TEGRA_PWFM_BASE, "tegra-pwm", NULL),
{}
};
diff --git a/arch/arm/mach-tegra/board-dt-tegra30.c b/arch/arm/mach-tegra/board-dt-tegra30.c
index ee48214bfd89..53bf60f11580 100644
--- a/arch/arm/mach-tegra/board-dt-tegra30.c
+++ b/arch/arm/mach-tegra/board-dt-tegra30.c
@@ -33,6 +33,8 @@
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
+#include <mach/iomap.h>
+
#include "board.h"
#include "clock.h"
@@ -48,6 +50,7 @@ struct of_dev_auxdata tegra30_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000D000, "tegra-i2c.4", NULL),
OF_DEV_AUXDATA("nvidia,tegra30-ahub", 0x70080000, "tegra30-ahub", NULL),
OF_DEV_AUXDATA("nvidia,tegra30-apbdma", 0x6000a000, "tegra-apbdma", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra30-pwm", TEGRA_PWFM_BASE, "tegra-pwm", NULL),
{}
};
diff --git a/arch/arm/mach-tegra/board-harmony-power.c b/arch/arm/mach-tegra/board-harmony-power.c
index 44dcb2e869b5..b7344beec102 100644
--- a/arch/arm/mach-tegra/board-harmony-power.c
+++ b/arch/arm/mach-tegra/board-harmony-power.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
#include <linux/mfd/tps6586x.h>
#include <linux/of.h>
#include <linux/of_i2c.h>
@@ -34,7 +35,9 @@ static struct regulator_consumer_supply tps658621_ldo0_supply[] = {
};
static struct regulator_init_data ldo0_data = {
+ .supply_regulator = "vdd_sm2",
.constraints = {
+ .name = "vdd_ldo0",
.min_uV = 3300 * 1000,
.max_uV = 3300 * 1000,
.valid_modes_mask = (REGULATOR_MODE_NORMAL |
@@ -48,9 +51,11 @@ static struct regulator_init_data ldo0_data = {
.consumer_supplies = tps658621_ldo0_supply,
};
-#define HARMONY_REGULATOR_INIT(_id, _minmv, _maxmv) \
+#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv, _on)\
static struct regulator_init_data _id##_data = { \
+ .supply_regulator = _supply, \
.constraints = { \
+ .name = _name, \
.min_uV = (_minmv)*1000, \
.max_uV = (_maxmv)*1000, \
.valid_modes_mask = (REGULATOR_MODE_NORMAL | \
@@ -58,21 +63,22 @@ static struct regulator_init_data ldo0_data = {
.valid_ops_mask = (REGULATOR_CHANGE_MODE | \
REGULATOR_CHANGE_STATUS | \
REGULATOR_CHANGE_VOLTAGE), \
+ .always_on = _on, \
}, \
}
-HARMONY_REGULATOR_INIT(sm0, 725, 1500);
-HARMONY_REGULATOR_INIT(sm1, 725, 1500);
-HARMONY_REGULATOR_INIT(sm2, 3000, 4550);
-HARMONY_REGULATOR_INIT(ldo1, 725, 1500);
-HARMONY_REGULATOR_INIT(ldo2, 725, 1500);
-HARMONY_REGULATOR_INIT(ldo3, 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo4, 1700, 2475);
-HARMONY_REGULATOR_INIT(ldo5, 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo6, 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo7, 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo8, 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo9, 1250, 3300);
+HARMONY_REGULATOR_INIT(sm0, "vdd_sm0", "vdd_sys", 725, 1500, 1);
+HARMONY_REGULATOR_INIT(sm1, "vdd_sm1", "vdd_sys", 725, 1500, 1);
+HARMONY_REGULATOR_INIT(sm2, "vdd_sm2", "vdd_sys", 3000, 4550, 1);
+HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500, 1);
+HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500, 0);
+HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300, 1);
+HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475, 1);
+HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", NULL, 1250, 3300, 1);
+HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300, 0);
+HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300, 0);
+HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300, 0);
+HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300, 1);
#define TPS_REG(_id, _data) \
{ \
@@ -114,6 +120,9 @@ static struct i2c_board_info __initdata harmony_regulators[] = {
int __init harmony_regulator_init(void)
{
+ regulator_register_always_on(0, "vdd_sys",
+ NULL, 0, 5000000);
+
if (machine_is_harmony()) {
i2c_register_board_info(3, harmony_regulators, 1);
} else { /* Harmony, booted using device tree */
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index c013bbf79cac..53d3d46dec12 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -41,7 +41,6 @@ config MACH_HREFV60
config MACH_SNOWBALL
bool "U8500 Snowball platform"
select MACH_MOP500
- select LEDS_GPIO
help
Include support for the snowball development platform.
diff --git a/arch/arm/mach-ux500/board-mop500-msp.c b/arch/arm/mach-ux500/board-mop500-msp.c
index 996048038743..df15646036aa 100644
--- a/arch/arm/mach-ux500/board-mop500-msp.c
+++ b/arch/arm/mach-ux500/board-mop500-msp.c
@@ -191,9 +191,9 @@ static struct platform_device *db8500_add_msp_i2s(struct device *parent,
return pdev;
}
-/* Platform device for ASoC U8500 machine */
-static struct platform_device snd_soc_u8500 = {
- .name = "snd-soc-u8500",
+/* Platform device for ASoC MOP500 machine */
+static struct platform_device snd_soc_mop500 = {
+ .name = "snd-soc-mop500",
.id = 0,
.dev = {
.platform_data = NULL,
@@ -227,8 +227,8 @@ int mop500_msp_init(struct device *parent)
{
struct platform_device *msp1;
- pr_info("%s: Register platform-device 'snd-soc-u8500'.\n", __func__);
- platform_device_register(&snd_soc_u8500);
+ pr_info("%s: Register platform-device 'snd-soc-mop500'.\n", __func__);
+ platform_device_register(&snd_soc_mop500);
pr_info("Initialize MSP I2S-devices.\n");
db8500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0,
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index a310222951da..a534d8880de1 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/i2c.h>
+#include <linux/platform_data/i2c-nomadik.h>
#include <linux/gpio.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl022.h>
@@ -40,7 +41,6 @@
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
-#include <plat/i2c.h>
#include <plat/ste_dma40.h>
#include <plat/gpio-nomadik.h>
@@ -211,24 +211,6 @@ static struct ab8500_platform_data ab8500_platdata = {
.codec = &ab8500_codec_pdata,
};
-static struct resource ab8500_resources[] = {
- [0] = {
- .start = IRQ_DB8500_AB8500,
- .end = IRQ_DB8500_AB8500,
- .flags = IORESOURCE_IRQ
- }
-};
-
-struct platform_device ab8500_device = {
- .name = "ab8500-core",
- .id = 0,
- .dev = {
- .platform_data = &ab8500_platdata,
- },
- .num_resources = 1,
- .resource = ab8500_resources,
-};
-
/*
* TPS61052
*/
@@ -443,7 +425,6 @@ static struct hash_platform_data u8500_hash1_platform_data = {
/* add any platform devices here - TODO */
static struct platform_device *mop500_platform_devs[] __initdata = {
&mop500_gpio_keys_device,
- &ab8500_device,
};
#ifdef CONFIG_STE_DMA40
@@ -605,7 +586,6 @@ static struct platform_device *snowball_platform_devs[] __initdata = {
&snowball_led_dev,
&snowball_key_dev,
&snowball_sbnet_dev,
- &ab8500_device,
};
static void __init mop500_init_machine(void)
@@ -617,9 +597,8 @@ static void __init mop500_init_machine(void)
mop500_gpio_keys[0].gpio = GPIO_PROX_SENSOR;
mop500_pinmaps_init();
- parent = u8500_init_devices();
+ parent = u8500_init_devices(&ab8500_platdata);
- /* FIXME: parent of ab8500 should be prcmu */
for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
mop500_platform_devs[i]->dev.parent = parent;
@@ -652,7 +631,7 @@ static void __init snowball_init_machine(void)
int i;
snowball_pinmaps_init();
- parent = u8500_init_devices();
+ parent = u8500_init_devices(&ab8500_platdata);
for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++)
snowball_platform_devs[i]->dev.parent = parent;
@@ -684,7 +663,7 @@ static void __init hrefv60_init_machine(void)
mop500_gpio_keys[0].gpio = HREFV60_PROX_SENSE_GPIO;
hrefv60_pinmaps_init();
- parent = u8500_init_devices();
+ parent = u8500_init_devices(&ab8500_platdata);
for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
mop500_platform_devs[i]->dev.parent = parent;
@@ -785,9 +764,6 @@ static const struct of_device_id u8500_local_bus_nodes[] = {
/* only create devices below soc node */
{ .compatible = "stericsson,db8500", },
{ .compatible = "stericsson,db8500-prcmu", },
- { .compatible = "stericsson,db8500-prcmu-regulator", },
- { .compatible = "stericsson,ab8500", },
- { .compatible = "stericsson,ab8500-regulator", },
{ .compatible = "simple-bus"},
{ },
};
@@ -821,6 +797,7 @@ static void __init u8500_init_machine(void)
ARRAY_SIZE(mop500_platform_devs));
mop500_sdi_init(parent);
+ mop500_msp_init(parent);
i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
i2c_register_board_info(2, mop500_i2c2_devices,
@@ -828,6 +805,8 @@ static void __init u8500_init_machine(void)
mop500_uib_init();
+ } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
+ mop500_msp_init(parent);
} else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {
/*
* The HREFv60 board removed a GPIO expander and routed
@@ -839,6 +818,7 @@ static void __init u8500_init_machine(void)
ARRAY_SIZE(mop500_platform_devs));
hrefv60_sdi_init(parent);
+ mop500_msp_init(parent);
i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES;
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index c8dd94f606dc..db3c52d56ca4 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -16,6 +16,7 @@
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <asm/mach/map.h>
#include <asm/pmu.h>
@@ -115,7 +116,7 @@ static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
return ret;
}
-static struct arm_pmu_platdata db8500_pmu_platdata = {
+struct arm_pmu_platdata db8500_pmu_platdata = {
.handle_irq = db8500_pmu_handler,
};
@@ -206,7 +207,7 @@ static struct device * __init db8500_soc_device_init(void)
/*
* This function is called from the board init
*/
-struct device * __init u8500_init_devices(void)
+struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500)
{
struct device *parent;
int i;
@@ -223,6 +224,8 @@ struct device * __init u8500_init_devices(void)
for (i = 0; i < ARRAY_SIZE(platform_devs); i++)
platform_devs[i]->dev.parent = parent;
+ db8500_prcmu_device.dev.platform_data = ab8500;
+
platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
return parent;
diff --git a/arch/arm/mach-ux500/devices-common.h b/arch/arm/mach-ux500/devices-common.h
index 6e4706560266..ecdd8386cffb 100644
--- a/arch/arm/mach-ux500/devices-common.h
+++ b/arch/arm/mach-ux500/devices-common.h
@@ -12,7 +12,7 @@
#include <linux/dma-mapping.h>
#include <linux/sys_soc.h>
#include <linux/amba/bus.h>
-#include <plat/i2c.h>
+#include <linux/platform_data/i2c-nomadik.h>
#include <mach/crypto-ux500.h>
struct spi_master_cntlr;
@@ -56,27 +56,15 @@ dbx500_add_uart(struct device *parent, const char *name, resource_size_t base,
struct nmk_i2c_controller;
-static inline struct platform_device *
+static inline struct amba_device *
dbx500_add_i2c(struct device *parent, int id, resource_size_t base, int irq,
struct nmk_i2c_controller *data)
{
- struct resource res[] = {
- DEFINE_RES_MEM(base, SZ_4K),
- DEFINE_RES_IRQ(irq),
- };
+ /* Conjure a name similar to what the platform device used to have */
+ char name[16];
- struct platform_device_info pdevinfo = {
- .parent = parent,
- .name = "nmk-i2c",
- .id = id,
- .res = res,
- .num_res = ARRAY_SIZE(res),
- .data = data,
- .size_data = sizeof(*data),
- .dma_mask = DMA_BIT_MASK(32),
- };
-
- return platform_device_register_full(&pdevinfo);
+ snprintf(name, sizeof(name), "nmk-i2c.%d", id);
+ return amba_apb_device_add(parent, name, base, SZ_4K, irq, 0, data, 0);
}
static inline struct amba_device *
diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
index 8b7ed82a2866..7914e5eaa9c7 100644
--- a/arch/arm/mach-ux500/include/mach/setup.h
+++ b/arch/arm/mach-ux500/include/mach/setup.h
@@ -13,11 +13,12 @@
#include <asm/mach/time.h>
#include <linux/init.h>
+#include <linux/mfd/abx500/ab8500.h>
void __init ux500_map_io(void);
extern void __init u8500_map_io(void);
-extern struct device * __init u8500_init_devices(void);
+extern struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500);
extern void __init ux500_init_irq(void);
extern void __init ux500_init_late(void);
diff --git a/arch/arm/mach-vt8500/Makefile b/arch/arm/mach-vt8500/Makefile
index 54e69973f39b..7ce51767c99c 100644
--- a/arch/arm/mach-vt8500/Makefile
+++ b/arch/arm/mach-vt8500/Makefile
@@ -5,5 +5,3 @@ obj-$(CONFIG_VTWM_VERSION_WM8505) += devices-wm8505.o
obj-$(CONFIG_MACH_BV07) += bv07.o
obj-$(CONFIG_MACH_WM8505_7IN_NETBOOK) += wm8505_7in.o
-
-obj-$(CONFIG_HAVE_PWM) += pwm.o
diff --git a/arch/arm/mach-vt8500/pwm.c b/arch/arm/mach-vt8500/pwm.c
deleted file mode 100644
index 8ad825e93592..000000000000
--- a/arch/arm/mach-vt8500/pwm.c
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * arch/arm/mach-vt8500/pwm.c
- *
- * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/pwm.h>
-#include <linux/delay.h>
-
-#include <asm/div64.h>
-
-#define VT8500_NR_PWMS 4
-
-static DEFINE_MUTEX(pwm_lock);
-static LIST_HEAD(pwm_list);
-
-struct pwm_device {
- struct list_head node;
- struct platform_device *pdev;
-
- const char *label;
-
- void __iomem *regbase;
-
- unsigned int use_count;
- unsigned int pwm_id;
-};
-
-#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
-static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask)
-{
- int loops = msecs_to_loops(10);
- while ((readb(reg) & bitmask) && --loops)
- cpu_relax();
-
- if (unlikely(!loops))
- pr_warning("Waiting for status bits 0x%x to clear timed out\n",
- bitmask);
-}
-
-int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
-{
- unsigned long long c;
- unsigned long period_cycles, prescale, pv, dc;
-
- if (pwm == NULL || period_ns == 0 || duty_ns > period_ns)
- return -EINVAL;
-
- c = 25000000/2; /* wild guess --- need to implement clocks */
- c = c * period_ns;
- do_div(c, 1000000000);
- period_cycles = c;
-
- if (period_cycles < 1)
- period_cycles = 1;
- prescale = (period_cycles - 1) / 4096;
- pv = period_cycles / (prescale + 1) - 1;
- if (pv > 4095)
- pv = 4095;
-
- if (prescale > 1023)
- return -EINVAL;
-
- c = (unsigned long long)pv * duty_ns;
- do_div(c, period_ns);
- dc = c;
-
- pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 1));
- writel(prescale, pwm->regbase + 0x4 + (pwm->pwm_id << 4));
-
- pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 2));
- writel(pv, pwm->regbase + 0x8 + (pwm->pwm_id << 4));
-
- pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 3));
- writel(dc, pwm->regbase + 0xc + (pwm->pwm_id << 4));
-
- return 0;
-}
-EXPORT_SYMBOL(pwm_config);
-
-int pwm_enable(struct pwm_device *pwm)
-{
- pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 0));
- writel(5, pwm->regbase + (pwm->pwm_id << 4));
- return 0;
-}
-EXPORT_SYMBOL(pwm_enable);
-
-void pwm_disable(struct pwm_device *pwm)
-{
- pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 0));
- writel(0, pwm->regbase + (pwm->pwm_id << 4));
-}
-EXPORT_SYMBOL(pwm_disable);
-
-struct pwm_device *pwm_request(int pwm_id, const char *label)
-{
- struct pwm_device *pwm;
- int found = 0;
-
- mutex_lock(&pwm_lock);
-
- list_for_each_entry(pwm, &pwm_list, node) {
- if (pwm->pwm_id == pwm_id) {
- found = 1;
- break;
- }
- }
-
- if (found) {
- if (pwm->use_count == 0) {
- pwm->use_count++;
- pwm->label = label;
- } else {
- pwm = ERR_PTR(-EBUSY);
- }
- } else {
- pwm = ERR_PTR(-ENOENT);
- }
-
- mutex_unlock(&pwm_lock);
- return pwm;
-}
-EXPORT_SYMBOL(pwm_request);
-
-void pwm_free(struct pwm_device *pwm)
-{
- mutex_lock(&pwm_lock);
-
- if (pwm->use_count) {
- pwm->use_count--;
- pwm->label = NULL;
- } else {
- pr_warning("PWM device already freed\n");
- }
-
- mutex_unlock(&pwm_lock);
-}
-EXPORT_SYMBOL(pwm_free);
-
-static inline void __add_pwm(struct pwm_device *pwm)
-{
- mutex_lock(&pwm_lock);
- list_add_tail(&pwm->node, &pwm_list);
- mutex_unlock(&pwm_lock);
-}
-
-static int __devinit pwm_probe(struct platform_device *pdev)
-{
- struct pwm_device *pwms;
- struct resource *r;
- int ret = 0;
- int i;
-
- pwms = kzalloc(sizeof(struct pwm_device) * VT8500_NR_PWMS, GFP_KERNEL);
- if (pwms == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < VT8500_NR_PWMS; i++) {
- pwms[i].use_count = 0;
- pwms[i].pwm_id = i;
- pwms[i].pdev = pdev;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- ret = -ENODEV;
- goto err_free;
- }
-
- r = request_mem_region(r->start, resource_size(r), pdev->name);
- if (r == NULL) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- ret = -EBUSY;
- goto err_free;
- }
-
- pwms[0].regbase = ioremap(r->start, resource_size(r));
- if (pwms[0].regbase == NULL) {
- dev_err(&pdev->dev, "failed to ioremap() registers\n");
- ret = -ENODEV;
- goto err_free_mem;
- }
-
- for (i = 1; i < VT8500_NR_PWMS; i++)
- pwms[i].regbase = pwms[0].regbase;
-
- for (i = 0; i < VT8500_NR_PWMS; i++)
- __add_pwm(&pwms[i]);
-
- platform_set_drvdata(pdev, pwms);
- return 0;
-
-err_free_mem:
- release_mem_region(r->start, resource_size(r));
-err_free:
- kfree(pwms);
- return ret;
-}
-
-static int __devexit pwm_remove(struct platform_device *pdev)
-{
- struct pwm_device *pwms;
- struct resource *r;
- int i;
-
- pwms = platform_get_drvdata(pdev);
- if (pwms == NULL)
- return -ENODEV;
-
- mutex_lock(&pwm_lock);
-
- for (i = 0; i < VT8500_NR_PWMS; i++)
- list_del(&pwms[i].node);
- mutex_unlock(&pwm_lock);
-
- iounmap(pwms[0].regbase);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, resource_size(r));
-
- kfree(pwms);
- return 0;
-}
-
-static struct platform_driver pwm_driver = {
- .driver = {
- .name = "vt8500-pwm",
- .owner = THIS_MODULE,
- },
- .probe = pwm_probe,
- .remove = __devexit_p(pwm_remove),
-};
-
-static int __init pwm_init(void)
-{
- return platform_driver_register(&pwm_driver);
-}
-arch_initcall(pwm_init);
-
-static void __exit pwm_exit(void)
-{
- platform_driver_unregister(&pwm_driver);
-}
-module_exit(pwm_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 806cc4f63516..119bc52ab93e 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <asm/mmu_context.h>
+#include <asm/thread_notify.h>
#include <asm/tlbflush.h>
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
@@ -48,6 +49,40 @@ void cpu_set_reserved_ttbr0(void)
}
#endif
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
+ void *t)
+{
+ u32 contextidr;
+ pid_t pid;
+ struct thread_info *thread = t;
+
+ if (cmd != THREAD_NOTIFY_SWITCH)
+ return NOTIFY_DONE;
+
+ pid = task_pid_nr(thread->task) << ASID_BITS;
+ asm volatile(
+ " mrc p15, 0, %0, c13, c0, 1\n"
+ " bfi %1, %0, #0, %2\n"
+ " mcr p15, 0, %1, c13, c0, 1\n"
+ : "=r" (contextidr), "+r" (pid)
+ : "I" (ASID_BITS));
+ isb();
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block contextidr_notifier_block = {
+ .notifier_call = contextidr_notifier,
+};
+
+static int __init contextidr_notifier_init(void)
+{
+ return thread_register_notifier(&contextidr_notifier_block);
+}
+arch_initcall(contextidr_notifier_init);
+#endif
+
/*
* We fork()ed a process, and we need a new context for the child
* to run in.
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 655878bcc96d..4e7d1182e8a3 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -22,13 +22,14 @@
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/iommu.h>
+#include <linux/io.h>
#include <linux/vmalloc.h>
+#include <linux/sizes.h>
#include <asm/memory.h>
#include <asm/highmem.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-#include <asm/sizes.h>
#include <asm/mach/arch.h>
#include <asm/dma-iommu.h>
#include <asm/mach/map.h>
@@ -72,7 +73,7 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(page, offset, size, dir);
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
@@ -95,7 +96,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
handle & ~PAGE_MASK, size, dir);
}
@@ -124,6 +125,7 @@ struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
.mmap = arm_dma_mmap,
+ .get_sgtable = arm_dma_get_sgtable,
.map_page = arm_dma_map_page,
.unmap_page = arm_dma_unmap_page,
.map_sg = arm_dma_map_sg,
@@ -217,115 +219,70 @@ static void __dma_free_buffer(struct page *page, size_t size)
}
#ifdef CONFIG_MMU
+#ifdef CONFIG_HUGETLB_PAGE
+#error ARM Coherent DMA allocator does not (yet) support huge TLB
+#endif
-#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT)
-#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT)
-
-/*
- * These are the page tables (2MB each) covering uncached, DMA consistent allocations
- */
-static pte_t **consistent_pte;
-
-#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ pgprot_t prot, struct page **ret_page);
-static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+ pgprot_t prot, struct page **ret_page,
+ const void *caller);
-void __init init_consistent_dma_size(unsigned long size)
+static void *
+__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller)
{
- unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M);
+ struct vm_struct *area;
+ unsigned long addr;
- BUG_ON(consistent_pte); /* Check we're called before DMA region init */
- BUG_ON(base < VMALLOC_END);
+ /*
+ * DMA allocation can be mapped to user space, so lets
+ * set VM_USERMAP flags too.
+ */
+ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
+ caller);
+ if (!area)
+ return NULL;
+ addr = (unsigned long)area->addr;
+ area->phys_addr = __pfn_to_phys(page_to_pfn(page));
- /* Grow region to accommodate specified size */
- if (base < consistent_base)
- consistent_base = base;
+ if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+ return (void *)addr;
}
-#include "vmregion.h"
-
-static struct arm_vmregion_head consistent_head = {
- .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
- .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
- .vm_end = CONSISTENT_END,
-};
-
-#ifdef CONFIG_HUGETLB_PAGE
-#error ARM Coherent DMA allocator does not (yet) support huge TLB
-#endif
-
-/*
- * Initialise the consistent memory allocation.
- */
-static int __init consistent_init(void)
+static void __dma_free_remap(void *cpu_addr, size_t size)
{
- int ret = 0;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int i = 0;
- unsigned long base = consistent_base;
- unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
-
- if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
- return 0;
-
- consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
- if (!consistent_pte) {
- pr_err("%s: no memory\n", __func__);
- return -ENOMEM;
+ unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
+ struct vm_struct *area = find_vm_area(cpu_addr);
+ if (!area || (area->flags & flags) != flags) {
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ return;
}
-
- pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
- consistent_head.vm_start = base;
-
- do {
- pgd = pgd_offset(&init_mm, base);
-
- pud = pud_alloc(&init_mm, pgd, base);
- if (!pud) {
- pr_err("%s: no pud tables\n", __func__);
- ret = -ENOMEM;
- break;
- }
-
- pmd = pmd_alloc(&init_mm, pud, base);
- if (!pmd) {
- pr_err("%s: no pmd tables\n", __func__);
- ret = -ENOMEM;
- break;
- }
- WARN_ON(!pmd_none(*pmd));
-
- pte = pte_alloc_kernel(pmd, base);
- if (!pte) {
- pr_err("%s: no pte tables\n", __func__);
- ret = -ENOMEM;
- break;
- }
-
- consistent_pte[i++] = pte;
- base += PMD_SIZE;
- } while (base < CONSISTENT_END);
-
- return ret;
+ unmap_kernel_range((unsigned long)cpu_addr, size);
+ vunmap(cpu_addr);
}
-core_initcall(consistent_init);
-static void *__alloc_from_contiguous(struct device *dev, size_t size,
- pgprot_t prot, struct page **ret_page);
-
-static struct arm_vmregion_head coherent_head = {
- .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
- .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
+struct dma_pool {
+ size_t size;
+ spinlock_t lock;
+ unsigned long *bitmap;
+ unsigned long nr_pages;
+ void *vaddr;
+ struct page *page;
};
-static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+static struct dma_pool atomic_pool = {
+ .size = SZ_256K,
+};
static int __init early_coherent_pool(char *p)
{
- coherent_pool_size = memparse(p, &p);
+ atomic_pool.size = memparse(p, &p);
return 0;
}
early_param("coherent_pool", early_coherent_pool);
@@ -333,32 +290,45 @@ early_param("coherent_pool", early_coherent_pool);
/*
* Initialise the coherent pool for atomic allocations.
*/
-static int __init coherent_init(void)
+static int __init atomic_pool_init(void)
{
+ struct dma_pool *pool = &atomic_pool;
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
- size_t size = coherent_pool_size;
+ unsigned long nr_pages = pool->size >> PAGE_SHIFT;
+ unsigned long *bitmap;
struct page *page;
void *ptr;
+ int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
- if (!IS_ENABLED(CONFIG_CMA))
- return 0;
+ bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!bitmap)
+ goto no_bitmap;
- ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+ if (IS_ENABLED(CONFIG_CMA))
+ ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
+ else
+ ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
+ &page, NULL);
if (ptr) {
- coherent_head.vm_start = (unsigned long) ptr;
- coherent_head.vm_end = (unsigned long) ptr + size;
- printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
- (unsigned)size / 1024);
+ spin_lock_init(&pool->lock);
+ pool->vaddr = ptr;
+ pool->page = page;
+ pool->bitmap = bitmap;
+ pool->nr_pages = nr_pages;
+ pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+ (unsigned)pool->size / 1024);
return 0;
}
- printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
- (unsigned)size / 1024);
+ kfree(bitmap);
+no_bitmap:
+ pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+ (unsigned)pool->size / 1024);
return -ENOMEM;
}
/*
* CMA is activated by core_initcall, so we must be called after it.
*/
-postcore_initcall(coherent_init);
+postcore_initcall(atomic_pool_init);
struct dma_contig_early_reserve {
phys_addr_t base;
@@ -388,7 +358,7 @@ void __init dma_contiguous_remap(void)
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
- return;
+ continue;
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
@@ -406,112 +376,6 @@ void __init dma_contiguous_remap(void)
}
}
-static void *
-__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
- const void *caller)
-{
- struct arm_vmregion *c;
- size_t align;
- int bit;
-
- if (!consistent_pte) {
- pr_err("%s: not initialised\n", __func__);
- dump_stack();
- return NULL;
- }
-
- /*
- * Align the virtual region allocation - maximum alignment is
- * a section size, minimum is a page size. This helps reduce
- * fragmentation of the DMA space, and also prevents allocations
- * smaller than a section from crossing a section boundary.
- */
- bit = fls(size - 1);
- if (bit > SECTION_SHIFT)
- bit = SECTION_SHIFT;
- align = 1 << bit;
-
- /*
- * Allocate a virtual address in the consistent mapping region.
- */
- c = arm_vmregion_alloc(&consistent_head, align, size,
- gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
- if (c) {
- pte_t *pte;
- int idx = CONSISTENT_PTE_INDEX(c->vm_start);
- u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
-
- pte = consistent_pte[idx] + off;
- c->priv = page;
-
- do {
- BUG_ON(!pte_none(*pte));
-
- set_pte_ext(pte, mk_pte(page, prot), 0);
- page++;
- pte++;
- off++;
- if (off >= PTRS_PER_PTE) {
- off = 0;
- pte = consistent_pte[++idx];
- }
- } while (size -= PAGE_SIZE);
-
- dsb();
-
- return (void *)c->vm_start;
- }
- return NULL;
-}
-
-static void __dma_free_remap(void *cpu_addr, size_t size)
-{
- struct arm_vmregion *c;
- unsigned long addr;
- pte_t *ptep;
- int idx;
- u32 off;
-
- c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
- if (!c) {
- pr_err("%s: trying to free invalid coherent area: %p\n",
- __func__, cpu_addr);
- dump_stack();
- return;
- }
-
- if ((c->vm_end - c->vm_start) != size) {
- pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
- __func__, c->vm_end - c->vm_start, size);
- dump_stack();
- size = c->vm_end - c->vm_start;
- }
-
- idx = CONSISTENT_PTE_INDEX(c->vm_start);
- off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
- ptep = consistent_pte[idx] + off;
- addr = c->vm_start;
- do {
- pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
-
- ptep++;
- addr += PAGE_SIZE;
- off++;
- if (off >= PTRS_PER_PTE) {
- off = 0;
- ptep = consistent_pte[++idx];
- }
-
- if (pte_none(pte) || !pte_present(pte))
- pr_crit("%s: bad page in kernel page table\n",
- __func__);
- } while (size -= PAGE_SIZE);
-
- flush_tlb_kernel_range(c->vm_start, c->vm_end);
-
- arm_vmregion_free(&consistent_head, c);
-}
-
static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
void *data)
{
@@ -552,16 +416,17 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
return ptr;
}
-static void *__alloc_from_pool(struct device *dev, size_t size,
- struct page **ret_page, const void *caller)
+static void *__alloc_from_pool(size_t size, struct page **ret_page)
{
- struct arm_vmregion *c;
- size_t align;
+ struct dma_pool *pool = &atomic_pool;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned int pageno;
+ unsigned long flags;
+ void *ptr = NULL;
+ unsigned long align_mask;
- if (!coherent_head.vm_start) {
- printk(KERN_ERR "%s: coherent pool not initialised!\n",
- __func__);
- dump_stack();
+ if (!pool->vaddr) {
+ WARN(1, "coherent pool not initialised!\n");
return NULL;
}
@@ -570,36 +435,42 @@ static void *__alloc_from_pool(struct device *dev, size_t size,
* small, so align them to their order in pages, minimum is a page
* size. This helps reduce fragmentation of the DMA space.
*/
- align = PAGE_SIZE << get_order(size);
- c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
- if (c) {
- void *ptr = (void *)c->vm_start;
- struct page *page = virt_to_page(ptr);
- *ret_page = page;
- return ptr;
+ align_mask = (1 << get_order(size)) - 1;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
+ 0, count, align_mask);
+ if (pageno < pool->nr_pages) {
+ bitmap_set(pool->bitmap, pageno, count);
+ ptr = pool->vaddr + PAGE_SIZE * pageno;
+ *ret_page = pool->page + pageno;
}
- return NULL;
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return ptr;
}
-static int __free_from_pool(void *cpu_addr, size_t size)
+static int __free_from_pool(void *start, size_t size)
{
- unsigned long start = (unsigned long)cpu_addr;
- unsigned long end = start + size;
- struct arm_vmregion *c;
+ struct dma_pool *pool = &atomic_pool;
+ unsigned long pageno, count;
+ unsigned long flags;
- if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+ if (start < pool->vaddr || start > pool->vaddr + pool->size)
return 0;
- c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
-
- if ((c->vm_end - c->vm_start) != size) {
- printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
- __func__, c->vm_end - c->vm_start, size);
- dump_stack();
- size = c->vm_end - c->vm_start;
+ if (start + size > pool->vaddr + pool->size) {
+ WARN(1, "freeing wrong coherent size from pool\n");
+ return 0;
}
- arm_vmregion_free(&coherent_head, c);
+ pageno = (start - pool->vaddr) >> PAGE_SHIFT;
+ count = size >> PAGE_SHIFT;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ bitmap_clear(pool->bitmap, pageno, count);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
return 1;
}
@@ -644,7 +515,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
-#define __alloc_from_pool(dev, size, ret_page, c) NULL
+#define __alloc_from_pool(size, ret_page) NULL
#define __alloc_from_contiguous(dev, size, prot, ret) NULL
#define __free_from_pool(cpu_addr, size) 0
#define __free_from_contiguous(dev, page, size) do { } while (0)
@@ -702,10 +573,10 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
if (arch_is_coherent() || nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else if (gfp & GFP_ATOMIC)
+ addr = __alloc_from_pool(size, &page);
else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
- else if (gfp & GFP_ATOMIC)
- addr = __alloc_from_pool(dev, size, &page, caller);
else
addr = __alloc_from_contiguous(dev, size, prot, &page);
@@ -741,16 +612,22 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
{
int ret = -ENXIO;
#ifdef CONFIG_MMU
+ unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = dma_to_pfn(dev, dma_addr);
+ unsigned long off = vma->vm_pgoff;
+
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
- ret = remap_pfn_range(vma, vma->vm_start,
- pfn + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
+ if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
#endif /* CONFIG_MMU */
return ret;
@@ -771,12 +648,12 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
if (arch_is_coherent() || nommu()) {
__dma_free_buffer(page, size);
+ } else if (__free_from_pool(cpu_addr, size)) {
+ return;
} else if (!IS_ENABLED(CONFIG_CMA)) {
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {
- if (__free_from_pool(cpu_addr, size))
- return;
/*
* Non-atomic allocations cannot be freed with IRQs disabled
*/
@@ -785,6 +662,21 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
}
}
+int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ struct dma_attrs *attrs)
+{
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+ int ret;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (unlikely(ret))
+ return ret;
+
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return 0;
+}
+
static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
void (*op)(const void *, size_t, int))
@@ -998,9 +890,6 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
static int __init dma_debug_do_init(void)
{
-#ifdef CONFIG_MMU
- arm_vmregion_create_proc("dma-mappings", &consistent_head);
-#endif
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
@@ -1088,7 +977,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
return pages;
error:
- while (--i)
+ while (i--)
if (pages[i])
__free_pages(pages[i], 0);
if (array_size <= PAGE_SIZE)
@@ -1117,61 +1006,32 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s
* Create a CPU mapping for a specified pages
*/
static void *
-__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller)
{
- struct arm_vmregion *c;
- size_t align;
- size_t count = size >> PAGE_SHIFT;
- int bit;
+ unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct vm_struct *area;
+ unsigned long p;
- if (!consistent_pte[0]) {
- pr_err("%s: not initialised\n", __func__);
- dump_stack();
+ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
+ caller);
+ if (!area)
return NULL;
- }
-
- /*
- * Align the virtual region allocation - maximum alignment is
- * a section size, minimum is a page size. This helps reduce
- * fragmentation of the DMA space, and also prevents allocations
- * smaller than a section from crossing a section boundary.
- */
- bit = fls(size - 1);
- if (bit > SECTION_SHIFT)
- bit = SECTION_SHIFT;
- align = 1 << bit;
-
- /*
- * Allocate a virtual address in the consistent mapping region.
- */
- c = arm_vmregion_alloc(&consistent_head, align, size,
- gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
- if (c) {
- pte_t *pte;
- int idx = CONSISTENT_PTE_INDEX(c->vm_start);
- int i = 0;
- u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
-
- pte = consistent_pte[idx] + off;
- c->priv = pages;
-
- do {
- BUG_ON(!pte_none(*pte));
-
- set_pte_ext(pte, mk_pte(pages[i], prot), 0);
- pte++;
- off++;
- i++;
- if (off >= PTRS_PER_PTE) {
- off = 0;
- pte = consistent_pte[++idx];
- }
- } while (i < count);
- dsb();
+ area->pages = pages;
+ area->nr_pages = nr_pages;
+ p = (unsigned long)area->addr;
- return (void *)c->vm_start;
+ for (i = 0; i < nr_pages; i++) {
+ phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
+ if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
+ goto err;
+ p += PAGE_SIZE;
}
+ return area->addr;
+err:
+ unmap_kernel_range((unsigned long)area->addr, size);
+ vunmap(area->addr);
return NULL;
}
@@ -1230,6 +1090,19 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
return 0;
}
+static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
+{
+ struct vm_struct *area;
+
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return cpu_addr;
+
+ area = find_vm_area(cpu_addr);
+ if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
+ return area->pages;
+ return NULL;
+}
+
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
@@ -1248,7 +1121,11 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
if (*handle == DMA_ERROR_CODE)
goto err_buffer;
- addr = __iommu_alloc_remap(pages, size, gfp, prot);
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return pages;
+
+ addr = __iommu_alloc_remap(pages, size, gfp, prot,
+ __builtin_return_address(0));
if (!addr)
goto err_mapping;
@@ -1265,31 +1142,25 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
- struct arm_vmregion *c;
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
-
- if (c) {
- struct page **pages = c->priv;
-
- unsigned long uaddr = vma->vm_start;
- unsigned long usize = vma->vm_end - vma->vm_start;
- int i = 0;
- do {
- int ret;
+ if (!pages)
+ return -ENXIO;
- ret = vm_insert_page(vma, uaddr, pages[i++]);
- if (ret) {
- pr_err("Remapping memory, error: %d\n", ret);
- return ret;
- }
+ do {
+ int ret = vm_insert_page(vma, uaddr, *pages++);
+ if (ret) {
+ pr_err("Remapping memory failed: %d\n", ret);
+ return ret;
+ }
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
- }
return 0;
}
@@ -1300,16 +1171,35 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs)
{
- struct arm_vmregion *c;
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
size = PAGE_ALIGN(size);
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
- if (c) {
- struct page **pages = c->priv;
- __dma_free_remap(cpu_addr, size);
- __iommu_remove_mapping(dev, handle, size);
- __iommu_free_buffer(dev, pages, size);
+ if (!pages) {
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ return;
}
+
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+ unmap_kernel_range((unsigned long)cpu_addr, size);
+ vunmap(cpu_addr);
+ }
+
+ __iommu_remove_mapping(dev, handle, size);
+ __iommu_free_buffer(dev, pages, size);
+}
+
+static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, struct dma_attrs *attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+
+ if (!pages)
+ return -ENXIO;
+
+ return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
+ GFP_KERNEL);
}
/*
@@ -1317,7 +1207,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
*/
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle,
- enum dma_data_direction dir)
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
dma_addr_t iova, iova_base;
@@ -1336,7 +1226,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length);
- if (!arch_is_coherent())
+ if (!arch_is_coherent() &&
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
ret = iommu_map(mapping->domain, iova, phys, len, 0);
@@ -1383,7 +1274,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
if (__map_sg_chunk(dev, start, size, &dma->dma_address,
- dir) < 0)
+ dir, attrs) < 0)
goto bad_mapping;
dma->dma_address += offset;
@@ -1396,7 +1287,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
}
size += s->length;
}
- if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0)
goto bad_mapping;
dma->dma_address += offset;
@@ -1430,7 +1321,8 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
if (sg_dma_len(s))
__iommu_remove_mapping(dev, sg_dma_address(s),
sg_dma_len(s));
- if (!arch_is_coherent())
+ if (!arch_is_coherent() &&
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir);
}
@@ -1492,7 +1384,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
dma_addr_t dma_addr;
int ret, len = PAGE_ALIGN(size + offset);
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(page, offset, size, dir);
dma_addr = __alloc_iova(mapping, len);
@@ -1531,7 +1423,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
if (!iova)
return;
- if (!arch_is_coherent())
+ if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_dev_to_cpu(page, offset, size, dir);
iommu_unmap(mapping->domain, iova, len);
@@ -1571,6 +1463,7 @@ struct dma_map_ops iommu_ops = {
.alloc = arm_iommu_alloc_attrs,
.free = arm_iommu_free_attrs,
.mmap = arm_iommu_mmap_attrs,
+ .get_sgtable = arm_iommu_get_sgtable,
.map_page = arm_iommu_map_page,
.unmap_page = arm_iommu_unmap_page,
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 77458548e031..40ca11ed6e5f 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -231,8 +231,6 @@ void __sync_icache_dcache(pte_t pteval)
struct page *page;
struct address_space *mapping;
- if (!pte_present_user(pteval))
- return;
if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
/* only flush non-aliasing VIPT caches for exec mappings */
return;
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index f54d59219764..9aec41fa80ae 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -21,13 +21,13 @@
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/dma-contiguous.h>
+#include <linux/sizes.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
-#include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 4f55f5062ab7..566750fa57d4 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -25,6 +25,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <linux/sizes.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
@@ -32,7 +33,6 @@
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
-#include <asm/sizes.h>
#include <asm/system_info.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 2e8a1efdf7b8..6776160618ef 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -59,6 +59,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#define VM_ARM_MTYPE(mt) ((mt) << 20)
#define VM_ARM_MTYPE_MASK (0x1f << 20)
+/* consistent regions used by dma_alloc_attrs() */
+#define VM_ARM_DMA_CONSISTENT 0x20000000
+
#endif
#ifdef CONFIG_ZONE_DMA
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index cf4528d51774..4c2d0451e84a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -16,13 +16,13 @@
#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
+#include <linux/sizes.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/sections.h>
#include <asm/cachetype.h>
#include <asm/setup.h>
-#include <asm/sizes.h>
#include <asm/smp_plat.h>
#include <asm/tlb.h>
#include <asm/highmem.h>
@@ -422,12 +422,6 @@ static void __init build_mem_type_table(void)
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
/*
- * Only use write-through for non-SMP systems
- */
- if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
- vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
-
- /*
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
*/
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 5900cd520e84..86b8b480634f 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -107,6 +107,12 @@ ENTRY(cpu_v6_switch_mm)
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+ mrc p15, 0, r2, c13, c0, 1 @ read current context ID
+ bic r2, r2, #0xff @ extract the PID
+ and r1, r1, #0xff
+ orr r1, r1, r2 @ insert into new context ID
+#endif
mcr p15, 0, r1, c13, c0, 1 @ set context ID
#endif
mov pc, lr
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 42ac069c8012..fd045e706390 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -46,6 +46,11 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+ mrc p15, 0, r2, c13, c0, 1 @ read current context ID
+ lsr r2, r2, #8 @ extract the PID
+ bfi r1, r2, #8, #24 @ insert into new context ID
+#endif
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 845f461f8ec1..ea94765acf9a 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -39,10 +39,18 @@ ENTRY(v7wbi_flush_user_tlb_range)
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
asid r3, r3 @ mask ASID
+#ifdef CONFIG_ARM_ERRATA_720789
+ ALT_SMP(W(mov) r3, #0 )
+ ALT_UP(W(nop) )
+#endif
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
1:
+#ifdef CONFIG_ARM_ERRATA_720789
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
+#else
ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+#endif
ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
@@ -67,7 +75,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
+#ifdef CONFIG_ARM_ERRATA_720789
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
+#else
ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+#endif
ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
cmp r0, r1
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 4e0a371630b3..99c63d4b6af8 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -23,26 +23,37 @@
#include <asm/ptrace.h>
#ifdef CONFIG_HW_PERF_EVENTS
+
+/*
+ * OProfile has a curious naming scheme for the ARM PMUs, but they are
+ * part of the user ABI so we need to map from the perf PMU name for
+ * supported PMUs.
+ */
+static struct op_perf_name {
+ char *perf_name;
+ char *op_name;
+} op_perf_name_map[] = {
+ { "xscale1", "arm/xscale1" },
+ { "xscale1", "arm/xscale2" },
+ { "v6", "arm/armv6" },
+ { "v6mpcore", "arm/mpcore" },
+ { "ARMv7 Cortex-A8", "arm/armv7" },
+ { "ARMv7 Cortex-A9", "arm/armv7-ca9" },
+};
+
char *op_name_from_perf_id(void)
{
- enum arm_perf_pmu_ids id = armpmu_get_pmu_id();
-
- switch (id) {
- case ARM_PERF_PMU_ID_XSCALE1:
- return "arm/xscale1";
- case ARM_PERF_PMU_ID_XSCALE2:
- return "arm/xscale2";
- case ARM_PERF_PMU_ID_V6:
- return "arm/armv6";
- case ARM_PERF_PMU_ID_V6MP:
- return "arm/mpcore";
- case ARM_PERF_PMU_ID_CA8:
- return "arm/armv7";
- case ARM_PERF_PMU_ID_CA9:
- return "arm/armv7-ca9";
- default:
- return NULL;
+ int i;
+ struct op_perf_name names;
+ const char *perf_name = perf_pmu_name();
+
+ for (i = 0; i < ARRAY_SIZE(op_perf_name_map); ++i) {
+ names = op_perf_name_map[i];
+ if (!strcmp(names.perf_name, perf_name))
+ return names.op_name;
}
+
+ return NULL;
}
#endif
diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig
index c722f9ce6918..baf9064c0844 100644
--- a/arch/arm/plat-mxc/Kconfig
+++ b/arch/arm/plat-mxc/Kconfig
@@ -47,12 +47,6 @@ config MXC_TZIC
config MXC_AVIC
bool
-config MXC_PWM
- tristate "Enable PWM driver"
- select HAVE_PWM
- help
- Enable support for the i.MX PWM controller(s).
-
config MXC_DEBUG_BOARD
bool "Enable MXC debug board(for 3-stack)"
help
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index 63b064b5c1d5..6ac720031150 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -11,7 +11,6 @@ obj-$(CONFIG_MXC_AVIC) += avic.o
obj-$(CONFIG_IMX_HAVE_IOMUX_V1) += iomux-v1.o
obj-$(CONFIG_ARCH_MXC_IOMUX_V3) += iomux-v3.o
obj-$(CONFIG_IRAM_ALLOC) += iram_alloc.o
-obj-$(CONFIG_MXC_PWM) += pwm.o
obj-$(CONFIG_MXC_ULPI) += ulpi.o
obj-$(CONFIG_MXC_USE_EPIT) += epit.o
obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o
diff --git a/arch/arm/plat-mxc/include/mach/i2c.h b/arch/arm/plat-mxc/include/mach/i2c.h
index 375cdd0cf876..8289d915e615 100644
--- a/arch/arm/plat-mxc/include/mach/i2c.h
+++ b/arch/arm/plat-mxc/include/mach/i2c.h
@@ -15,7 +15,7 @@
*
**/
struct imxi2c_platform_data {
- int bitrate;
+ u32 bitrate;
};
#endif /* __ASM_ARCH_I2C_H_ */
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index c2193178210b..3ed1adbc09f8 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -23,6 +23,7 @@
#include <mach/hardware.h>
#include <mach/common.h>
+#include <mach/irqs.h>
#include "irq-common.h"
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 626ad8cad7a9..938b50a33439 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -189,6 +189,7 @@ struct omap_dm_timer *omap_dm_timer_request(void)
timer->reserved = 1;
break;
}
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
if (timer) {
ret = omap_dm_timer_prepare(timer);
@@ -197,7 +198,6 @@ struct omap_dm_timer *omap_dm_timer_request(void)
timer = NULL;
}
}
- spin_unlock_irqrestore(&dm_timer_lock, flags);
if (!timer)
pr_debug("%s: timer request failed!\n", __func__);
@@ -220,6 +220,7 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
break;
}
}
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
if (timer) {
ret = omap_dm_timer_prepare(timer);
@@ -228,7 +229,6 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
timer = NULL;
}
}
- spin_unlock_irqrestore(&dm_timer_lock, flags);
if (!timer)
pr_debug("%s: timer%d request failed!\n", __func__, id);
@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
void omap_dm_timer_disable(struct omap_dm_timer *timer)
{
- pm_runtime_put(&timer->pdev->dev);
+ pm_runtime_put_sync(&timer->pdev->dev);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 68b180edcfff..bb5d08a70dbc 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -372,7 +372,8 @@ IS_OMAP_TYPE(3430, 0x3430)
#define cpu_class_is_omap1() (cpu_is_omap7xx() || cpu_is_omap15xx() || \
cpu_is_omap16xx())
#define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \
- cpu_is_omap44xx() || soc_is_omap54xx())
+ cpu_is_omap44xx() || soc_is_omap54xx() || \
+ soc_is_am33xx())
/* Various silicon revisions for omap2 */
#define OMAP242X_CLASS 0x24200024
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index 5493bd95da5e..eb3e4d555343 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -81,8 +81,6 @@ struct omap_mmc_platform_data {
/* Return context loss count due to PM states changing */
int (*get_context_loss_count)(struct device *dev);
- u64 dma_mask;
-
/* Integrating attributes from the omap_hwmod layer */
u8 controller_flags;
diff --git a/arch/arm/plat-omap/include/plat/multi.h b/arch/arm/plat-omap/include/plat/multi.h
index 045e320f1067..324d31b14852 100644
--- a/arch/arm/plat-omap/include/plat/multi.h
+++ b/arch/arm/plat-omap/include/plat/multi.h
@@ -108,4 +108,13 @@
# endif
#endif
+#ifdef CONFIG_SOC_AM33XX
+# ifdef OMAP_NAME
+# undef MULTI_OMAP2
+# define MULTI_OMAP2
+# else
+# define OMAP_NAME am33xx
+# endif
+#endif
+
#endif /* __PLAT_OMAP_MULTI_H */
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index b8d19a136781..7f7b112acccb 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -110,7 +110,7 @@ static inline void flush(void)
_DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT, \
AM33XXUART##p)
-static inline void __arch_decomp_setup(unsigned long arch_id)
+static inline void arch_decomp_setup(void)
{
int port = 0;
@@ -198,8 +198,6 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
} while (0);
}
-#define arch_decomp_setup() __arch_decomp_setup(arch_id)
-
/*
* nothing to do
*/
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index c1793786aea9..b8b747a9d360 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -47,6 +47,7 @@ void __init orion_clkdev_init(struct clk *tclk)
orion_clkdev_add(NULL, MV643XX_ETH_NAME ".2", tclk);
orion_clkdev_add(NULL, MV643XX_ETH_NAME ".3", tclk);
orion_clkdev_add(NULL, "orion_wdt", tclk);
+ orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".0", tclk);
}
/* Fill in the resources structure and link it into the platform
@@ -290,10 +291,12 @@ static struct platform_device orion_ge00 = {
void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err)
+ unsigned long irq_err,
+ unsigned int tx_csum_limit)
{
fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
+ orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;
ge_complete(&orion_ge00_shared_data,
orion_ge00_resources, irq, &orion_ge00_shared,
eth_data, &orion_ge00);
@@ -342,10 +345,12 @@ static struct platform_device orion_ge01 = {
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err)
+ unsigned long irq_err,
+ unsigned int tx_csum_limit)
{
fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
+ orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;
ge_complete(&orion_ge01_shared_data,
orion_ge01_resources, irq, &orion_ge01_shared,
eth_data, &orion_ge01);
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index af95af257301..dfda74fae6f2 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -8,15 +8,22 @@
* warranty of any kind, whether express or implied.
*/
+#define DEBUG
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/leds.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <plat/gpio.h>
/*
* GPIO unit register offsets.
@@ -38,6 +45,7 @@ struct orion_gpio_chip {
unsigned long valid_output;
int mask_offset;
int secondary_irq_base;
+ struct irq_domain *domain;
};
static void __iomem *GPIO_OUT(struct orion_gpio_chip *ochip)
@@ -222,10 +230,10 @@ static int orion_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
struct orion_gpio_chip *ochip =
container_of(chip, struct orion_gpio_chip, chip);
- return ochip->secondary_irq_base + pin;
+ return irq_create_mapping(ochip->domain,
+ ochip->secondary_irq_base + pin);
}
-
/*
* Orion-specific GPIO API extensions.
*/
@@ -353,12 +361,10 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type)
int pin;
u32 u;
- pin = d->irq - gc->irq_base;
+ pin = d->hwirq - ochip->secondary_irq_base;
u = readl(GPIO_IO_CONF(ochip)) & (1 << pin);
if (!u) {
- printk(KERN_ERR "orion gpio_irq_set_type failed "
- "(irq %d, pin %d).\n", d->irq, pin);
return -EINVAL;
}
@@ -397,17 +403,53 @@ static int gpio_irq_set_type(struct irq_data *d, u32 type)
u &= ~(1 << pin); /* rising */
writel(u, GPIO_IN_POL(ochip));
}
-
return 0;
}
-void __init orion_gpio_init(int gpio_base, int ngpio,
- u32 base, int mask_offset, int secondary_irq_base)
+static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct orion_gpio_chip *ochip = irq_get_handler_data(irq);
+ u32 cause, type;
+ int i;
+
+ if (ochip == NULL)
+ return;
+
+ cause = readl(GPIO_DATA_IN(ochip)) & readl(GPIO_LEVEL_MASK(ochip));
+ cause |= readl(GPIO_EDGE_CAUSE(ochip)) & readl(GPIO_EDGE_MASK(ochip));
+
+ for (i = 0; i < ochip->chip.ngpio; i++) {
+ int irq;
+
+ irq = ochip->secondary_irq_base + i;
+
+ if (!(cause & (1 << i)))
+ continue;
+
+ type = irqd_get_trigger_type(irq_get_irq_data(irq));
+ if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ /* Swap polarity (race with GPIO line) */
+ u32 polarity;
+
+ polarity = readl(GPIO_IN_POL(ochip));
+ polarity ^= 1 << i;
+ writel(polarity, GPIO_IN_POL(ochip));
+ }
+ generic_handle_irq(irq);
+ }
+}
+
+void __init orion_gpio_init(struct device_node *np,
+ int gpio_base, int ngpio,
+ void __iomem *base, int mask_offset,
+ int secondary_irq_base,
+ int irqs[4])
{
struct orion_gpio_chip *ochip;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
char gc_label[16];
+ int i;
if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips))
return;
@@ -426,6 +468,10 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
ochip->chip.base = gpio_base;
ochip->chip.ngpio = ngpio;
ochip->chip.can_sleep = 0;
+#ifdef CONFIG_OF
+ ochip->chip.of_node = np;
+#endif
+
spin_lock_init(&ochip->lock);
ochip->base = (void __iomem *)base;
ochip->valid_input = 0;
@@ -435,8 +481,6 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
gpiochip_add(&ochip->chip);
- orion_gpio_chip_count++;
-
/*
* Mask and clear GPIO interrupts.
*/
@@ -444,16 +488,28 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
writel(0, GPIO_EDGE_MASK(ochip));
writel(0, GPIO_LEVEL_MASK(ochip));
- gc = irq_alloc_generic_chip("orion_gpio_irq", 2, secondary_irq_base,
+ /* Setup the interrupt handlers. Each chip can have up to 4
+ * interrupt handlers, with each handler dealing with 8 GPIO
+ * pins. */
+
+ for (i = 0; i < 4; i++) {
+ if (irqs[i]) {
+ irq_set_handler_data(irqs[i], ochip);
+ irq_set_chained_handler(irqs[i], gpio_irq_handler);
+ }
+ }
+
+ gc = irq_alloc_generic_chip("orion_gpio_irq", 2,
+ secondary_irq_base,
ochip->base, handle_level_irq);
gc->private = ochip;
-
ct = gc->chip_types;
ct->regs.mask = ochip->mask_offset + GPIO_LEVEL_MASK_OFF;
ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = gpio_irq_set_type;
+ ct->chip.name = ochip->chip.label;
ct++;
ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF;
@@ -464,41 +520,69 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = gpio_irq_set_type;
ct->handler = handle_edge_irq;
+ ct->chip.name = ochip->chip.label;
irq_setup_generic_chip(gc, IRQ_MSK(ngpio), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
-}
-void orion_gpio_irq_handler(int pinoff)
-{
- struct orion_gpio_chip *ochip;
- u32 cause, type;
- int i;
-
- ochip = orion_gpio_chip_find(pinoff);
- if (ochip == NULL)
- return;
-
- cause = readl(GPIO_DATA_IN(ochip)) & readl(GPIO_LEVEL_MASK(ochip));
- cause |= readl(GPIO_EDGE_CAUSE(ochip)) & readl(GPIO_EDGE_MASK(ochip));
-
- for (i = 0; i < ochip->chip.ngpio; i++) {
- int irq;
+ /* Setup irq domain on top of the generic chip. */
+ ochip->domain = irq_domain_add_legacy(np,
+ ochip->chip.ngpio,
+ ochip->secondary_irq_base,
+ ochip->secondary_irq_base,
+ &irq_domain_simple_ops,
+ ochip);
+ if (!ochip->domain)
+ panic("%s: couldn't allocate irq domain (DT).\n",
+ ochip->chip.label);
- irq = ochip->secondary_irq_base + i;
+ orion_gpio_chip_count++;
+}
- if (!(cause & (1 << i)))
- continue;
+#ifdef CONFIG_OF
+static void __init orion_gpio_of_init_one(struct device_node *np,
+ int irq_gpio_base)
+{
+ int ngpio, gpio_base, mask_offset;
+ void __iomem *base;
+ int ret, i;
+ int irqs[4];
+ int secondary_irq_base;
+
+ ret = of_property_read_u32(np, "ngpio", &ngpio);
+ if (ret)
+ goto out;
+ ret = of_property_read_u32(np, "mask-offset", &mask_offset);
+ if (ret == -EINVAL)
+ mask_offset = 0;
+ else
+ goto out;
+ base = of_iomap(np, 0);
+ if (!base)
+ goto out;
+
+ secondary_irq_base = irq_gpio_base + (32 * orion_gpio_chip_count);
+ gpio_base = 32 * orion_gpio_chip_count;
+
+ /* Get the interrupt numbers. Each chip can have up to 4
+ * interrupt handlers, with each handler dealing with 8 GPIO
+ * pins. */
+
+ for (i = 0; i < 4; i++)
+ irqs[i] = irq_of_parse_and_map(np, i);
+
+ orion_gpio_init(np, gpio_base, ngpio, base, mask_offset,
+ secondary_irq_base, irqs);
+ return;
+out:
+ pr_err("%s: %s: missing mandatory property\n", __func__, np->name);
+}
- type = irqd_get_trigger_type(irq_get_irq_data(irq));
- if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
- /* Swap polarity (race with GPIO line) */
- u32 polarity;
+void __init orion_gpio_of_init(int irq_gpio_base)
+{
+ struct device_node *np;
- polarity = readl(GPIO_IN_POL(ochip));
- polarity ^= 1 << i;
- writel(polarity, GPIO_IN_POL(ochip));
- }
- generic_handle_irq(irq);
- }
+ for_each_compatible_node(np, NULL, "marvell,orion-gpio")
+ orion_gpio_of_init_one(np, irq_gpio_base);
}
+#endif
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index e00fdb213609..ae2377ef63e5 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -39,12 +39,14 @@ void __init orion_rtc_init(unsigned long mapbase,
void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err);
+ unsigned long irq_err,
+ unsigned int tx_csum_limit);
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err);
+ unsigned long irq_err,
+ unsigned int tx_csum_limit);
void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
diff --git a/arch/arm/plat-orion/include/plat/gpio.h b/arch/arm/plat-orion/include/plat/gpio.h
index bec0c98ce41f..81c6fc8a7b28 100644
--- a/arch/arm/plat-orion/include/plat/gpio.h
+++ b/arch/arm/plat-orion/include/plat/gpio.h
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/types.h>
-
+#include <linux/irqdomain.h>
/*
* Orion-specific GPIO API extensions.
*/
@@ -27,13 +27,11 @@ int orion_gpio_led_blink_set(unsigned gpio, int state,
void orion_gpio_set_valid(unsigned pin, int mode);
/* Initialize gpiolib. */
-void __init orion_gpio_init(int gpio_base, int ngpio,
- u32 base, int mask_offset, int secondary_irq_base);
-
-/*
- * GPIO interrupt handling.
- */
-void orion_gpio_irq_handler(int irqoff);
-
+void __init orion_gpio_init(struct device_node *np,
+ int gpio_base, int ngpio,
+ void __iomem *base, int mask_offset,
+ int secondary_irq_base,
+ int irq[4]);
+void __init orion_gpio_of_init(int irq_gpio_base);
#endif
diff --git a/arch/arm/plat-orion/include/plat/irq.h b/arch/arm/plat-orion/include/plat/irq.h
index f05eeab94968..50547e417936 100644
--- a/arch/arm/plat-orion/include/plat/irq.h
+++ b/arch/arm/plat-orion/include/plat/irq.h
@@ -12,6 +12,5 @@
#define __PLAT_IRQ_H
void orion_irq_init(unsigned int irq_start, void __iomem *maskaddr);
-
-
+void __init orion_dt_init_irq(void);
#endif
diff --git a/arch/arm/plat-orion/irq.c b/arch/arm/plat-orion/irq.c
index 2d5b9c1ef389..d751964def4c 100644
--- a/arch/arm/plat-orion/irq.c
+++ b/arch/arm/plat-orion/irq.c
@@ -11,8 +11,12 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <plat/irq.h>
+#include <plat/gpio.h>
void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr)
{
@@ -32,3 +36,39 @@ void __init orion_irq_init(unsigned int irq_start, void __iomem *maskaddr)
irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
}
+
+#ifdef CONFIG_OF
+static int __init orion_add_irq_domain(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ int i = 0, irq_gpio;
+ void __iomem *base;
+
+ do {
+ base = of_iomap(np, i);
+ if (base) {
+ orion_irq_init(i * 32, base);
+ i++;
+ }
+ } while (base);
+
+ irq_domain_add_legacy(np, i * 32, 0, 0,
+ &irq_domain_simple_ops, NULL);
+
+ irq_gpio = i * 32;
+ orion_gpio_of_init(irq_gpio);
+
+ return 0;
+}
+
+static const struct of_device_id orion_irq_match[] = {
+ { .compatible = "marvell,orion-intc",
+ .data = orion_add_irq_domain, },
+ {},
+};
+
+void __init orion_dt_init_irq(void)
+{
+ of_irq_init(orion_irq_match);
+}
+#endif
diff --git a/arch/arm/plat-pxa/Makefile b/arch/arm/plat-pxa/Makefile
index f302d048392d..af8e484001e5 100644
--- a/arch/arm/plat-pxa/Makefile
+++ b/arch/arm/plat-pxa/Makefile
@@ -8,5 +8,4 @@ obj-$(CONFIG_PXA3xx) += mfp.o
obj-$(CONFIG_PXA95x) += mfp.o
obj-$(CONFIG_ARCH_MMP) += mfp.o
-obj-$(CONFIG_HAVE_PWM) += pwm.o
obj-$(CONFIG_PXA_SSP) += ssp.o
diff --git a/arch/arm/plat-pxa/pwm.c b/arch/arm/plat-pxa/pwm.c
deleted file mode 100644
index ef32686feef9..000000000000
--- a/arch/arm/plat-pxa/pwm.c
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * linux/arch/arm/mach-pxa/pwm.c
- *
- * simple driver for PWM (Pulse Width Modulator) controller
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * 2008-02-13 initial version
- * eric miao <eric.miao@marvell.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/pwm.h>
-
-#include <asm/div64.h>
-
-#define HAS_SECONDARY_PWM 0x10
-#define PWM_ID_BASE(d) ((d) & 0xf)
-
-static const struct platform_device_id pwm_id_table[] = {
- /* PWM has_secondary_pwm? */
- { "pxa25x-pwm", 0 },
- { "pxa27x-pwm", 0 | HAS_SECONDARY_PWM },
- { "pxa168-pwm", 1 },
- { "pxa910-pwm", 1 },
- { },
-};
-MODULE_DEVICE_TABLE(platform, pwm_id_table);
-
-/* PWM registers and bits definitions */
-#define PWMCR (0x00)
-#define PWMDCR (0x04)
-#define PWMPCR (0x08)
-
-#define PWMCR_SD (1 << 6)
-#define PWMDCR_FD (1 << 10)
-
-struct pwm_device {
- struct list_head node;
- struct pwm_device *secondary;
- struct platform_device *pdev;
-
- const char *label;
- struct clk *clk;
- int clk_enabled;
- void __iomem *mmio_base;
-
- unsigned int use_count;
- unsigned int pwm_id;
-};
-
-/*
- * period_ns = 10^9 * (PRESCALE + 1) * (PV + 1) / PWM_CLK_RATE
- * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
- */
-int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
-{
- unsigned long long c;
- unsigned long period_cycles, prescale, pv, dc;
-
- if (pwm == NULL || period_ns == 0 || duty_ns > period_ns)
- return -EINVAL;
-
- c = clk_get_rate(pwm->clk);
- c = c * period_ns;
- do_div(c, 1000000000);
- period_cycles = c;
-
- if (period_cycles < 1)
- period_cycles = 1;
- prescale = (period_cycles - 1) / 1024;
- pv = period_cycles / (prescale + 1) - 1;
-
- if (prescale > 63)
- return -EINVAL;
-
- if (duty_ns == period_ns)
- dc = PWMDCR_FD;
- else
- dc = (pv + 1) * duty_ns / period_ns;
-
- /* NOTE: the clock to PWM has to be enabled first
- * before writing to the registers
- */
- clk_enable(pwm->clk);
- __raw_writel(prescale, pwm->mmio_base + PWMCR);
- __raw_writel(dc, pwm->mmio_base + PWMDCR);
- __raw_writel(pv, pwm->mmio_base + PWMPCR);
- clk_disable(pwm->clk);
-
- return 0;
-}
-EXPORT_SYMBOL(pwm_config);
-
-int pwm_enable(struct pwm_device *pwm)
-{
- int rc = 0;
-
- if (!pwm->clk_enabled) {
- rc = clk_enable(pwm->clk);
- if (!rc)
- pwm->clk_enabled = 1;
- }
- return rc;
-}
-EXPORT_SYMBOL(pwm_enable);
-
-void pwm_disable(struct pwm_device *pwm)
-{
- if (pwm->clk_enabled) {
- clk_disable(pwm->clk);
- pwm->clk_enabled = 0;
- }
-}
-EXPORT_SYMBOL(pwm_disable);
-
-static DEFINE_MUTEX(pwm_lock);
-static LIST_HEAD(pwm_list);
-
-struct pwm_device *pwm_request(int pwm_id, const char *label)
-{
- struct pwm_device *pwm;
- int found = 0;
-
- mutex_lock(&pwm_lock);
-
- list_for_each_entry(pwm, &pwm_list, node) {
- if (pwm->pwm_id == pwm_id) {
- found = 1;
- break;
- }
- }
-
- if (found) {
- if (pwm->use_count == 0) {
- pwm->use_count++;
- pwm->label = label;
- } else
- pwm = ERR_PTR(-EBUSY);
- } else
- pwm = ERR_PTR(-ENOENT);
-
- mutex_unlock(&pwm_lock);
- return pwm;
-}
-EXPORT_SYMBOL(pwm_request);
-
-void pwm_free(struct pwm_device *pwm)
-{
- mutex_lock(&pwm_lock);
-
- if (pwm->use_count) {
- pwm->use_count--;
- pwm->label = NULL;
- } else
- pr_warning("PWM device already freed\n");
-
- mutex_unlock(&pwm_lock);
-}
-EXPORT_SYMBOL(pwm_free);
-
-static inline void __add_pwm(struct pwm_device *pwm)
-{
- mutex_lock(&pwm_lock);
- list_add_tail(&pwm->node, &pwm_list);
- mutex_unlock(&pwm_lock);
-}
-
-static int __devinit pwm_probe(struct platform_device *pdev)
-{
- const struct platform_device_id *id = platform_get_device_id(pdev);
- struct pwm_device *pwm, *secondary = NULL;
- struct resource *r;
- int ret = 0;
-
- pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL);
- if (pwm == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
- return -ENOMEM;
- }
-
- pwm->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(pwm->clk)) {
- ret = PTR_ERR(pwm->clk);
- goto err_free;
- }
- pwm->clk_enabled = 0;
-
- pwm->use_count = 0;
- pwm->pwm_id = PWM_ID_BASE(id->driver_data) + pdev->id;
- pwm->pdev = pdev;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- ret = -ENODEV;
- goto err_free_clk;
- }
-
- r = request_mem_region(r->start, resource_size(r), pdev->name);
- if (r == NULL) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- ret = -EBUSY;
- goto err_free_clk;
- }
-
- pwm->mmio_base = ioremap(r->start, resource_size(r));
- if (pwm->mmio_base == NULL) {
- dev_err(&pdev->dev, "failed to ioremap() registers\n");
- ret = -ENODEV;
- goto err_free_mem;
- }
-
- if (id->driver_data & HAS_SECONDARY_PWM) {
- secondary = kzalloc(sizeof(struct pwm_device), GFP_KERNEL);
- if (secondary == NULL) {
- ret = -ENOMEM;
- goto err_free_mem;
- }
-
- *secondary = *pwm;
- pwm->secondary = secondary;
-
- /* registers for the second PWM has offset of 0x10 */
- secondary->mmio_base = pwm->mmio_base + 0x10;
- secondary->pwm_id = pdev->id + 2;
- }
-
- __add_pwm(pwm);
- if (secondary)
- __add_pwm(secondary);
-
- platform_set_drvdata(pdev, pwm);
- return 0;
-
-err_free_mem:
- release_mem_region(r->start, resource_size(r));
-err_free_clk:
- clk_put(pwm->clk);
-err_free:
- kfree(pwm);
- return ret;
-}
-
-static int __devexit pwm_remove(struct platform_device *pdev)
-{
- struct pwm_device *pwm;
- struct resource *r;
-
- pwm = platform_get_drvdata(pdev);
- if (pwm == NULL)
- return -ENODEV;
-
- mutex_lock(&pwm_lock);
-
- if (pwm->secondary) {
- list_del(&pwm->secondary->node);
- kfree(pwm->secondary);
- }
-
- list_del(&pwm->node);
- mutex_unlock(&pwm_lock);
-
- iounmap(pwm->mmio_base);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, resource_size(r));
-
- clk_put(pwm->clk);
- kfree(pwm);
- return 0;
-}
-
-static struct platform_driver pwm_driver = {
- .driver = {
- .name = "pxa25x-pwm",
- .owner = THIS_MODULE,
- },
- .probe = pwm_probe,
- .remove = __devexit_p(pwm_remove),
- .id_table = pwm_id_table,
-};
-
-static int __init pwm_init(void)
-{
- return platform_driver_register(&pwm_driver);
-}
-arch_initcall(pwm_init);
-
-static void __exit pwm_exit(void)
-{
- platform_driver_unregister(&pwm_driver);
-}
-module_exit(pwm_exit);
-
-MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 28f898f75380..db98e7021f0d 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -430,7 +430,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
* when necessary.
*/
-int s3c2410_dma_enqueue(unsigned int channel, void *id,
+int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
dma_addr_t data, int size)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 7aca31c1df1f..9c3b90c3538e 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -403,7 +403,8 @@ config S5P_DEV_USB_EHCI
config S3C24XX_PWM
bool "PWM device support"
- select HAVE_PWM
+ select PWM
+ select PWM_SAMSUNG
help
Support for exporting the PWM timer blocks via the pwm device
system
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index b78717496677..9e40e8d00740 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -59,7 +59,3 @@ obj-$(CONFIG_SAMSUNG_WAKEMASK) += wakeup-mask.o
obj-$(CONFIG_S5P_PM) += s5p-pm.o s5p-irq-pm.o
obj-$(CONFIG_S5P_SLEEP) += s5p-sleep.o
-
-# PWM support
-
-obj-$(CONFIG_HAVE_PWM) += pwm.o
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 74e31ce35538..fc49f3dabd76 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -32,6 +32,8 @@
#include <linux/platform_data/s3c-hsudc.h>
#include <linux/platform_data/s3c-hsotg.h>
+#include <media/s5p_hdmi.h>
+
#include <asm/irq.h>
#include <asm/pmu.h>
#include <asm/mach/arch.h>
@@ -748,7 +750,8 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)
if (!pd) {
pd = &default_i2c_data;
- if (soc_is_exynos4210())
+ if (soc_is_exynos4210() ||
+ soc_is_exynos4212() || soc_is_exynos4412())
pd->bus_num = 8;
else if (soc_is_s5pv210())
pd->bus_num = 3;
@@ -759,6 +762,30 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)
npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
&s5p_device_i2c_hdmiphy);
}
+
+struct s5p_hdmi_platform_data s5p_hdmi_def_platdata;
+
+void __init s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info,
+ struct i2c_board_info *mhl_info, int mhl_bus)
+{
+ struct s5p_hdmi_platform_data *pd = &s5p_hdmi_def_platdata;
+
+ if (soc_is_exynos4210() ||
+ soc_is_exynos4212() || soc_is_exynos4412())
+ pd->hdmiphy_bus = 8;
+ else if (soc_is_s5pv210())
+ pd->hdmiphy_bus = 3;
+ else
+ pd->hdmiphy_bus = 0;
+
+ pd->hdmiphy_info = hdmiphy_info;
+ pd->mhl_info = mhl_info;
+ pd->mhl_bus = mhl_bus;
+
+ s3c_set_platdata(pd, sizeof(struct s5p_hdmi_platform_data),
+ &s5p_device_hdmi);
+}
+
#endif /* CONFIG_S5P_DEV_I2C_HDMIPHY */
/* I2S */
diff --git a/arch/arm/plat-samsung/include/plat/hdmi.h b/arch/arm/plat-samsung/include/plat/hdmi.h
new file mode 100644
index 000000000000..331d046ac2c5
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/hdmi.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __PLAT_SAMSUNG_HDMI_H
+#define __PLAT_SAMSUNG_HDMI_H __FILE__
+
+extern void s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info,
+ struct i2c_board_info *mhl_info, int mhl_bus);
+
+#endif /* __PLAT_SAMSUNG_HDMI_H */
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 64ab65f0fdbc..15070284343e 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -74,7 +74,7 @@ unsigned char pm_uart_udivslot;
#ifdef CONFIG_SAMSUNG_PM_DEBUG
-struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
+static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
{
diff --git a/arch/arm/plat-spear/include/plat/pl080.h b/arch/arm/plat-spear/include/plat/pl080.h
index 2bc6b54460a8..eb6590ded40d 100644
--- a/arch/arm/plat-spear/include/plat/pl080.h
+++ b/arch/arm/plat-spear/include/plat/pl080.h
@@ -14,8 +14,8 @@
#ifndef __PLAT_PL080_H
#define __PLAT_PL080_H
-struct pl08x_dma_chan;
-int pl080_get_signal(struct pl08x_dma_chan *ch);
-void pl080_put_signal(struct pl08x_dma_chan *ch);
+struct pl08x_channel_data;
+int pl080_get_signal(const struct pl08x_channel_data *cd);
+void pl080_put_signal(const struct pl08x_channel_data *cd, int signal);
#endif /* __PLAT_PL080_H */
diff --git a/arch/arm/plat-spear/pl080.c b/arch/arm/plat-spear/pl080.c
index 12cf27f935f9..cfa1199d0f4a 100644
--- a/arch/arm/plat-spear/pl080.c
+++ b/arch/arm/plat-spear/pl080.c
@@ -27,9 +27,8 @@ struct {
unsigned char val;
} signals[16] = {{0, 0}, };
-int pl080_get_signal(struct pl08x_dma_chan *ch)
+int pl080_get_signal(const struct pl08x_channel_data *cd)
{
- const struct pl08x_channel_data *cd = ch->cd;
unsigned int signal = cd->min_signal, val;
unsigned long flags;
@@ -63,18 +62,17 @@ int pl080_get_signal(struct pl08x_dma_chan *ch)
return signal;
}
-void pl080_put_signal(struct pl08x_dma_chan *ch)
+void pl080_put_signal(const struct pl08x_channel_data *cd, int signal)
{
- const struct pl08x_channel_data *cd = ch->cd;
unsigned long flags;
spin_lock_irqsave(&lock, flags);
/* if signal is not used */
- if (!signals[cd->min_signal].busy)
+ if (!signals[signal].busy)
BUG();
- signals[cd->min_signal].busy--;
+ signals[signal].busy--;
spin_unlock_irqrestore(&lock, flags);
}
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
index 49c7db48c7f1..d7c5c171f5aa 100644
--- a/arch/arm/plat-versatile/platsmp.c
+++ b/arch/arm/plat-versatile/platsmp.c
@@ -85,7 +85,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
- gic_raise_softirq(cpumask_of(cpu), 1);
+ gic_raise_softirq(cpumask_of(cpu), 0);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index 4fa9903b83cf..cc926c985981 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -7,18 +7,20 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * Basic entry code, called from the kernel's undefined instruction trap.
- * r0 = faulted instruction
- * r5 = faulted PC+4
- * r9 = successful return
- * r10 = thread_info structure
- * lr = failure return
*/
#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
#include "../kernel/entry-header.S"
+@ VFP entry point.
+@
+@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
+@ r2 = PC value to resume execution after successful emulation
+@ r9 = normal "successful" return address
+@ r10 = this threads thread_info structure
+@ lr = unrecognised instruction return address
+@ IRQs disabled.
+@
ENTRY(do_vfp)
#ifdef CONFIG_PREEMPT
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 2d30c7f6edd3..ea0349f63586 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -16,6 +16,7 @@
*/
#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
+#include <linux/kern_levels.h>
#include "../kernel/entry-header.S"
.macro DBGSTR, str
@@ -24,7 +25,7 @@
add r0, pc, #4
bl printk
b 1f
- .asciz "<7>VFP: \str\n"
+ .asciz KERN_DEBUG "VFP: \str\n"
.balign 4
1: ldmfd sp!, {r0-r3, ip, lr}
#endif
@@ -37,7 +38,7 @@
add r0, pc, #4
bl printk
b 1f
- .asciz "<7>VFP: \str\n"
+ .asciz KERN_DEBUG "VFP: \str\n"
.balign 4
1: ldmfd sp!, {r0-r3, ip, lr}
#endif
@@ -52,7 +53,7 @@
add r0, pc, #4
bl printk
b 1f
- .asciz "<7>VFP: \str\n"
+ .asciz KERN_DEBUG "VFP: \str\n"
.balign 4
1: ldmfd sp!, {r0-r3, ip, lr}
#endif
@@ -61,13 +62,13 @@
@ VFP hardware support entry point.
@
-@ r0 = faulted instruction
-@ r2 = faulted PC+4
-@ r9 = successful return
+@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
+@ r2 = PC value to resume execution after successful emulation
+@ r9 = normal "successful" return address
@ r10 = vfp_state union
@ r11 = CPU number
-@ lr = failure return
-
+@ lr = unrecognised instruction return address
+@ IRQs enabled.
ENTRY(vfp_support_entry)
DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
@@ -161,9 +162,12 @@ vfp_hw_state_valid:
@ exception before retrying branch
@ out before setting an FPEXC that
@ stops us reading stuff
- VFPFMXR FPEXC, r1 @ restore FPEXC last
- sub r2, r2, #4
- str r2, [sp, #S_PC] @ retry the instruction
+ VFPFMXR FPEXC, r1 @ Restore FPEXC last
+ sub r2, r2, #4 @ Retry current instruction - if Thumb
+ str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
+ @ else it's one 32-bit instruction, so
+ @ always subtract 4 from the following
+ @ instruction address.
#ifdef CONFIG_PREEMPT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 586961929e96..c834b32af275 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -457,10 +457,16 @@ static int vfp_pm_suspend(void)
/* disable, just in case */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ } else if (vfp_current_hw_state[ti->cpu]) {
+#ifndef CONFIG_SMP
+ fmxr(FPEXC, fpexc | FPEXC_EN);
+ vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
+ fmxr(FPEXC, fpexc);
+#endif
}
/* clear any information we had about last context state */
- memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
+ vfp_current_hw_state[ti->cpu] = NULL;
return 0;
}
@@ -713,8 +719,10 @@ static int __init vfp_init(void)
if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
elf_hwcap |= HWCAP_NEON;
#endif
+#ifdef CONFIG_VFPv3
if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
elf_hwcap |= HWCAP_VFPv4;
+#endif
}
}
return 0;
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 71d38c76726c..5ade51c8a87f 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -12,6 +12,7 @@ config AVR32
select HARDIRQS_SW_RESEND
select GENERIC_IRQ_SHOW
select ARCH_HAVE_CUSTOM_GPIO_H
+ select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_CLOCKEVENTS
help
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index dc5263321480..6c80aba7bf96 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -97,7 +97,7 @@ static struct atmel_nand_data atstk1006_nand_data __initdata = {
.enable_pin = GPIO_PIN_PB(29),
.ecc_mode = NAND_ECC_SOFT,
.parts = nand_partitions,
- .num_parts = ARRAY_SIZE(num_partitions),
+ .num_parts = ARRAY_SIZE(nand_partitions),
};
#endif
diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
index b7f5c6870107..479330b89796 100644
--- a/arch/avr32/include/asm/kmap_types.h
+++ b/arch/avr32/include/asm/kmap_types.h
@@ -2,29 +2,9 @@
#define __ASM_AVR32_KMAP_TYPES_H
#ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
+# define KM_TYPE_NR 29
#else
-# define D(n)
+# define KM_TYPE_NR 14
#endif
-enum km_type {
-D(0) KM_BOUNCE_READ,
-D(1) KM_SKB_SUNRPC_DATA,
-D(2) KM_SKB_DATA_SOFTIRQ,
-D(3) KM_USER0,
-D(4) KM_USER1,
-D(5) KM_BIO_SRC_IRQ,
-D(6) KM_BIO_DST_IRQ,
-D(7) KM_PTE0,
-D(8) KM_PTE1,
-D(9) KM_PTE2,
-D(10) KM_IRQ0,
-D(11) KM_IRQ1,
-D(12) KM_SOFTIRQ0,
-D(13) KM_SOFTIRQ1,
-D(14) KM_TYPE_NR
-};
-
-#undef D
-
#endif /* __ASM_AVR32_KMAP_TYPES_H */
diff --git a/arch/avr32/include/asm/unistd.h b/arch/avr32/include/asm/unistd.h
index f714544e5560..1358e366f4be 100644
--- a/arch/avr32/include/asm/unistd.h
+++ b/arch/avr32/include/asm/unistd.h
@@ -318,7 +318,6 @@
/* SMP stuff */
#define __IGNORE_getcpu
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index f7040a1e399f..b92e60958617 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -61,10 +61,10 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
const struct exception_table_entry *fixup;
unsigned long address;
unsigned long page;
- int writeaccess;
long signr;
int code;
int fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (notify_page_fault(regs, ecr))
return;
@@ -86,6 +86,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
local_irq_enable();
+retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
@@ -104,7 +105,6 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
*/
good_area:
code = SEGV_ACCERR;
- writeaccess = 0;
switch (ecr) {
case ECR_PROTECTION_X:
@@ -121,7 +121,7 @@ good_area:
case ECR_TLB_MISS_W:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
- writeaccess = 1;
+ flags |= FAULT_FLAG_WRITE;
break;
default:
panic("Unhandled case %lu in do_page_fault!", ecr);
@@ -132,7 +132,11 @@ good_area:
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
- fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -140,10 +144,23 @@ good_area:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
+
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /*
+ * No need to up_read(&mm->mmap_sem) as we would have
+ * already released it in __lock_page_or_retry() in
+ * mm/filemap.c.
+ */
+ goto retry;
+ }
+ }
up_read(&mm->mmap_sem);
return;
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 9b765107e15c..f34861920634 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -33,6 +33,7 @@ config BLACKFIN
select HAVE_PERF_EVENTS
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_GENERIC_HARDIRQS
select GENERIC_ATOMIC64
select GENERIC_IRQ_PROBE
@@ -1002,16 +1003,6 @@ config BFIN_GPTIMERS
To compile this driver as a module, choose M here: the module
will be called gptimers.
-config HAVE_PWM
- tristate "Enable PWM API support"
- depends on BFIN_GPTIMERS
- help
- Enable support for the Pulse Width Modulation framework (as
- found in linux/pwm.h).
-
- To compile this driver as a module, choose M here: the module
- will be called pwm.
-
choice
prompt "Uncached DMA region"
default DMA_UNCACHED_1M
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index 3287222cba34..5b2a0748d7d3 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -434,7 +434,6 @@
#define __IGNORE_getcpu
#ifdef __KERNEL__
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 08e6625106be..735f24e07425 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -21,7 +21,6 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
CFLAGS_REMOVE_ftrace.o = -pg
-obj-$(CONFIG_HAVE_PWM) += pwm.o
obj-$(CONFIG_IPIPE) += ipipe.o
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
diff --git a/arch/blackfin/kernel/pwm.c b/arch/blackfin/kernel/pwm.c
deleted file mode 100644
index 33f5942733bd..000000000000
--- a/arch/blackfin/kernel/pwm.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Blackfin Pulse Width Modulation (PWM) core
- *
- * Copyright (c) 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/module.h>
-#include <linux/pwm.h>
-#include <linux/slab.h>
-
-#include <asm/gptimers.h>
-#include <asm/portmux.h>
-
-struct pwm_device {
- unsigned id;
- unsigned short pin;
-};
-
-static const unsigned short pwm_to_gptimer_per[] = {
- P_TMR0, P_TMR1, P_TMR2, P_TMR3, P_TMR4, P_TMR5,
- P_TMR6, P_TMR7, P_TMR8, P_TMR9, P_TMR10, P_TMR11,
-};
-
-struct pwm_device *pwm_request(int pwm_id, const char *label)
-{
- struct pwm_device *pwm;
- int ret;
-
- /* XXX: pwm_id really should be unsigned */
- if (pwm_id < 0)
- return NULL;
-
- pwm = kzalloc(sizeof(*pwm), GFP_KERNEL);
- if (!pwm)
- return pwm;
-
- pwm->id = pwm_id;
- if (pwm->id >= ARRAY_SIZE(pwm_to_gptimer_per))
- goto err;
-
- pwm->pin = pwm_to_gptimer_per[pwm->id];
- ret = peripheral_request(pwm->pin, label);
- if (ret)
- goto err;
-
- return pwm;
- err:
- kfree(pwm);
- return NULL;
-}
-EXPORT_SYMBOL(pwm_request);
-
-void pwm_free(struct pwm_device *pwm)
-{
- peripheral_free(pwm->pin);
- kfree(pwm);
-}
-EXPORT_SYMBOL(pwm_free);
-
-int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
-{
- unsigned long period, duty;
- unsigned long long val;
-
- if (duty_ns < 0 || duty_ns > period_ns)
- return -EINVAL;
-
- val = (unsigned long long)get_sclk() * period_ns;
- do_div(val, NSEC_PER_SEC);
- period = val;
-
- val = (unsigned long long)period * duty_ns;
- do_div(val, period_ns);
- duty = period - val;
-
- if (duty >= period)
- duty = period - 1;
-
- set_gptimer_config(pwm->id, TIMER_MODE_PWM | TIMER_PERIOD_CNT);
- set_gptimer_pwidth(pwm->id, duty);
- set_gptimer_period(pwm->id, period);
-
- return 0;
-}
-EXPORT_SYMBOL(pwm_config);
-
-int pwm_enable(struct pwm_device *pwm)
-{
- enable_gptimer(pwm->id);
- return 0;
-}
-EXPORT_SYMBOL(pwm_enable);
-
-void pwm_disable(struct pwm_device *pwm)
-{
- disable_gptimer(pwm->id);
-}
-EXPORT_SYMBOL(pwm_disable);
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index ada8f0fc71e4..fb96e607adcf 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -52,7 +52,6 @@ EXPORT_SYMBOL(reserved_mem_dcache_on);
#ifdef CONFIG_MTD_UCLINUX
extern struct map_info uclinux_ram_map;
unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
-unsigned long _ebss;
EXPORT_SYMBOL(memory_mtd_end);
EXPORT_SYMBOL(memory_mtd_start);
EXPORT_SYMBOL(mtd_size);
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 052f81a76239..983c859e40b7 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -6,6 +6,7 @@
config C6X
def_bool y
select CLKDEV_LOOKUP
+ select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h
index 6d521d96d941..09c5a0f5f4d1 100644
--- a/arch/c6x/include/asm/cache.h
+++ b/arch/c6x/include/asm/cache.h
@@ -1,7 +1,7 @@
/*
* Port on Texas Instruments TMS320C6x architecture
*
- * Copyright (C) 2005, 2006, 2009, 2010 Texas Instruments Incorporated
+ * Copyright (C) 2005, 2006, 2009, 2010, 2012 Texas Instruments Incorporated
* Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
*
* This program is free software; you can redistribute it and/or modify
@@ -16,9 +16,14 @@
/*
* Cache line size
*/
-#define L1D_CACHE_BYTES 64
-#define L1P_CACHE_BYTES 32
-#define L2_CACHE_BYTES 128
+#define L1D_CACHE_SHIFT 6
+#define L1D_CACHE_BYTES (1 << L1D_CACHE_SHIFT)
+
+#define L1P_CACHE_SHIFT 5
+#define L1P_CACHE_BYTES (1 << L1P_CACHE_SHIFT)
+
+#define L2_CACHE_SHIFT 7
+#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT)
/*
* L2 used as cache
@@ -29,7 +34,8 @@
* For practical reasons the L1_CACHE_BYTES defines should not be smaller than
* the L2 line size
*/
-#define L1_CACHE_BYTES L2_CACHE_BYTES
+#define L1_CACHE_SHIFT L2_CACHE_SHIFT
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define L2_CACHE_ALIGN_LOW(x) \
(((x) & ~(L2_CACHE_BYTES - 1)))
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index bb344650a14f..e92215428a37 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -42,6 +42,7 @@ config CRIS
select HAVE_IDE
select GENERIC_ATOMIC64
select HAVE_GENERIC_HARDIRQS
+ select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
select GENERIC_SMP_IDLE_THREAD if ETRAX_ARCH_V32
diff --git a/arch/cris/include/asm/unistd.h b/arch/cris/include/asm/unistd.h
index f921b8b0f97e..51873a446f87 100644
--- a/arch/cris/include/asm/unistd.h
+++ b/arch/cris/include/asm/unistd.h
@@ -347,7 +347,6 @@
#include <arch/unistd.h>
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
#define __ARCH_WANT_STAT64
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index a685910d2d5c..971c0a19facb 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -9,6 +9,7 @@ config FRV
select GENERIC_IRQ_SHOW
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_CPU_DEVICES
+ select ARCH_WANT_IPC_PARSE_VERSION
config ZONE_DMA
bool
diff --git a/arch/frv/include/asm/cpumask.h b/arch/frv/include/asm/cpumask.h
deleted file mode 100644
index d999c20c84d2..000000000000
--- a/arch/frv/include/asm/cpumask.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_CPUMASK_H
-#define _ASM_CPUMASK_H
-
-#include <asm-generic/cpumask.h>
-
-#endif /* _ASM_CPUMASK_H */
diff --git a/arch/frv/include/asm/highmem.h b/arch/frv/include/asm/highmem.h
index 716956a5317b..b3adc93611f3 100644
--- a/arch/frv/include/asm/highmem.h
+++ b/arch/frv/include/asm/highmem.h
@@ -76,15 +76,16 @@ extern struct page *kmap_atomic_to_page(void *ptr);
#ifndef __ASSEMBLY__
-#define __kmap_atomic_primary(type, paddr, ampr) \
+#define __kmap_atomic_primary(cached, paddr, ampr) \
({ \
unsigned long damlr, dampr; \
\
dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
\
- if (type != __KM_CACHE) \
+ if (!cached) \
asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \
else \
+ /* cache flush page attachment point */ \
asm volatile("movgs %0,iampr"#ampr"\n" \
"movgs %0,dampr"#ampr"\n" \
:: "r"(dampr) : "memory" \
@@ -112,29 +113,20 @@ extern struct page *kmap_atomic_to_page(void *ptr);
(void *) damlr; \
})
-static inline void *kmap_atomic_primary(struct page *page, enum km_type type)
+static inline void *kmap_atomic_primary(struct page *page)
{
unsigned long paddr;
pagefault_disable();
paddr = page_to_phys(page);
- switch (type) {
- case 0: return __kmap_atomic_primary(0, paddr, 2);
- case 1: return __kmap_atomic_primary(1, paddr, 3);
- case 2: return __kmap_atomic_primary(2, paddr, 4);
- case 3: return __kmap_atomic_primary(3, paddr, 5);
-
- default:
- BUG();
- return NULL;
- }
+ return __kmap_atomic_primary(1, paddr, 2);
}
-#define __kunmap_atomic_primary(type, ampr) \
+#define __kunmap_atomic_primary(cached, ampr) \
do { \
asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \
- if (type == __KM_CACHE) \
+ if (cached) \
asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \
} while(0)
@@ -143,17 +135,9 @@ do { \
asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
} while(0)
-static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
+static inline void kunmap_atomic_primary(void *kvaddr)
{
- switch (type) {
- case 0: __kunmap_atomic_primary(0, 2); break;
- case 1: __kunmap_atomic_primary(1, 3); break;
- case 2: __kunmap_atomic_primary(2, 4); break;
- case 3: __kunmap_atomic_primary(3, 5); break;
-
- default:
- BUG();
- }
+ __kunmap_atomic_primary(1, 2);
pagefault_enable();
}
diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
index f8e16b2a5804..43901f220963 100644
--- a/arch/frv/include/asm/kmap_types.h
+++ b/arch/frv/include/asm/kmap_types.h
@@ -2,28 +2,6 @@
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
-enum km_type {
- /* arch specific kmaps - change the numbers attached to these at your peril */
- __KM_CACHE, /* cache flush page attachment point */
- __KM_PGD, /* current page directory */
- __KM_ITLB_PTD, /* current instruction TLB miss page table lookup */
- __KM_DTLB_PTD, /* current data TLB miss page table lookup */
-
- /* general kmaps */
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
+#define KM_TYPE_NR 17
#endif
diff --git a/arch/frv/include/asm/unistd.h b/arch/frv/include/asm/unistd.h
index a569dff7cd59..67f23a311db6 100644
--- a/arch/frv/include/asm/unistd.h
+++ b/arch/frv/include/asm/unistd.h
@@ -349,7 +349,6 @@
#define NR_syscalls 338
-#define __ARCH_WANT_IPC_PARSE_VERSION
/* #define __ARCH_WANT_OLD_READDIR */
#define __ARCH_WANT_OLD_STAT
#define __ARCH_WANT_STAT64
diff --git a/arch/frv/kernel/kernel_thread.S b/arch/frv/kernel/kernel_thread.S
index 4531c830d20b..f0e52943f923 100644
--- a/arch/frv/kernel/kernel_thread.S
+++ b/arch/frv/kernel/kernel_thread.S
@@ -10,10 +10,10 @@
*/
#include <linux/linkage.h>
+#include <linux/kern_levels.h>
#include <asm/unistd.h>
#define CLONE_VM 0x00000100 /* set if VM shared between processes */
-#define KERN_ERR "<3>"
.section .rodata
kernel_thread_emsg:
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index 4f8d8bcdc7de..82478979ac9a 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -62,14 +62,14 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
dampr2 = __get_DAMPR(2);
for (i = 0; i < nents; i++) {
- vaddr = kmap_atomic_primary(sg_page(&sg[i]), __KM_CACHE);
+ vaddr = kmap_atomic_primary(sg_page(&sg[i]));
frv_dcache_writeback((unsigned long) vaddr,
(unsigned long) vaddr + PAGE_SIZE);
}
- kunmap_atomic_primary(vaddr, __KM_CACHE);
+ kunmap_atomic_primary(vaddr);
if (dampr2) {
__set_DAMPR(2, dampr2);
__set_IAMPR(2, dampr2);
diff --git a/arch/frv/mm/cache-page.c b/arch/frv/mm/cache-page.c
index b24ade27a0f0..8e09dae0ec3f 100644
--- a/arch/frv/mm/cache-page.c
+++ b/arch/frv/mm/cache-page.c
@@ -26,11 +26,11 @@ void flush_dcache_page(struct page *page)
dampr2 = __get_DAMPR(2);
- vaddr = kmap_atomic_primary(page, __KM_CACHE);
+ vaddr = kmap_atomic_primary(page);
frv_dcache_writeback((unsigned long) vaddr, (unsigned long) vaddr + PAGE_SIZE);
- kunmap_atomic_primary(vaddr, __KM_CACHE);
+ kunmap_atomic_primary(vaddr);
if (dampr2) {
__set_DAMPR(2, dampr2);
@@ -54,12 +54,12 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
dampr2 = __get_DAMPR(2);
- vaddr = kmap_atomic_primary(page, __KM_CACHE);
+ vaddr = kmap_atomic_primary(page);
start = (start & ~PAGE_MASK) | (unsigned long) vaddr;
frv_cache_wback_inv(start, start + len);
- kunmap_atomic_primary(vaddr, __KM_CACHE);
+ kunmap_atomic_primary(vaddr);
if (dampr2) {
__set_DAMPR(2, dampr2);
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c
index 31902c9d5be5..bed9a9bd3c10 100644
--- a/arch/frv/mm/highmem.c
+++ b/arch/frv/mm/highmem.c
@@ -50,11 +50,11 @@ void *kmap_atomic(struct page *page)
/*
* The first 4 primary maps are reserved for architecture code
*/
- case 0: return __kmap_atomic_primary(4, paddr, 6);
- case 1: return __kmap_atomic_primary(5, paddr, 7);
- case 2: return __kmap_atomic_primary(6, paddr, 8);
- case 3: return __kmap_atomic_primary(7, paddr, 9);
- case 4: return __kmap_atomic_primary(8, paddr, 10);
+ case 0: return __kmap_atomic_primary(0, paddr, 6);
+ case 1: return __kmap_atomic_primary(0, paddr, 7);
+ case 2: return __kmap_atomic_primary(0, paddr, 8);
+ case 3: return __kmap_atomic_primary(0, paddr, 9);
+ case 4: return __kmap_atomic_primary(0, paddr, 10);
case 5 ... 5 + NR_TLB_LINES - 1:
return __kmap_atomic_secondary(type - 5, paddr);
@@ -70,11 +70,11 @@ void __kunmap_atomic(void *kvaddr)
{
int type = kmap_atomic_idx();
switch (type) {
- case 0: __kunmap_atomic_primary(4, 6); break;
- case 1: __kunmap_atomic_primary(5, 7); break;
- case 2: __kunmap_atomic_primary(6, 8); break;
- case 3: __kunmap_atomic_primary(7, 9); break;
- case 4: __kunmap_atomic_primary(8, 10); break;
+ case 0: __kunmap_atomic_primary(0, 6); break;
+ case 1: __kunmap_atomic_primary(0, 7); break;
+ case 2: __kunmap_atomic_primary(0, 8); break;
+ case 3: __kunmap_atomic_primary(0, 9); break;
+ case 4: __kunmap_atomic_primary(0, 10); break;
case 5 ... 5 + NR_TLB_LINES - 1:
__kunmap_atomic_secondary(type - 5, kvaddr);
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 56e890df5053..5e8a0d9a09ce 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -3,6 +3,7 @@ config H8300
default y
select HAVE_IDE
select HAVE_GENERIC_HARDIRQS
+ select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
diff --git a/arch/h8300/include/asm/unistd.h b/arch/h8300/include/asm/unistd.h
index 718511303b4e..5cd882801d79 100644
--- a/arch/h8300/include/asm/unistd.h
+++ b/arch/h8300/include/asm/unistd.h
@@ -331,7 +331,6 @@
#define NR_syscalls 321
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
#define __ARCH_WANT_STAT64
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index 9aa17f1917ea..06906427c0ac 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -7,7 +7,6 @@ header-y += user.h
generic-y += auxvec.h
generic-y += bug.h
generic-y += bugs.h
-generic-y += cpumask.h
generic-y += cputime.h
generic-y += current.h
generic-y += device.h
@@ -23,7 +22,6 @@ generic-y += ioctl.h
generic-y += ioctls.h
generic-y += iomap.h
generic-y += ipcbuf.h
-generic-y += ipc.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += kmap_types.h
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 8186ec5ea151..310cf5781fad 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -126,6 +126,7 @@ config AUDIT_ARCH
menuconfig PARAVIRT_GUEST
bool "Paravirtualized guest support"
+ depends on BROKEN
help
Say Y here to get to see options related to running Linux under
various hypervisors. This option alone does not add any kernel code.
@@ -138,8 +139,6 @@ config PARAVIRT
bool "Enable paravirtualization code"
depends on PARAVIRT_GUEST
default y
- bool
- default y
help
This changes the kernel so it can modify itself when it is run
under a hypervisor, potentially improving performance significantly
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 954d81e2e837..7913695b2fcb 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -234,5 +234,4 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_T10DIF=y
-CONFIG_MISC_DEVICES=y
CONFIG_INTEL_IOMMU=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 91c41ecfa6d9..f8e913365423 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -209,4 +209,3 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_CRYPTO_MD5=y
-CONFIG_MISC_DEVICES=y
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 7d9116600a36..6e6fe1839f5d 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -17,8 +17,8 @@
#include <asm/intrinsics.h>
-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
+#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
index 367d299d9938..2d1ad4b11a85 100644
--- a/arch/ia64/include/asm/machvec.h
+++ b/arch/ia64/include/asm/machvec.h
@@ -120,7 +120,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# ifdef MACHVEC_PLATFORM_HEADER
# include MACHVEC_PLATFORM_HEADER
# else
-# define platform_name ia64_mv.name
+# define ia64_platform_name ia64_mv.name
# define platform_setup ia64_mv.setup
# define platform_cpu_init ia64_mv.cpu_init
# define platform_irq_init ia64_mv.irq_init
diff --git a/arch/ia64/include/asm/machvec_dig.h b/arch/ia64/include/asm/machvec_dig.h
index 8a0752f40987..1f7403a2fbee 100644
--- a/arch/ia64/include/asm/machvec_dig.h
+++ b/arch/ia64/include/asm/machvec_dig.h
@@ -10,7 +10,7 @@ extern ia64_mv_setup_t dig_setup;
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
-#define platform_name "dig"
+#define ia64_platform_name "dig"
#define platform_setup dig_setup
#endif /* _ASM_IA64_MACHVEC_DIG_h */
diff --git a/arch/ia64/include/asm/machvec_dig_vtd.h b/arch/ia64/include/asm/machvec_dig_vtd.h
index 6ab1de5c45ef..44308b4c3f6e 100644
--- a/arch/ia64/include/asm/machvec_dig_vtd.h
+++ b/arch/ia64/include/asm/machvec_dig_vtd.h
@@ -11,7 +11,7 @@ extern ia64_mv_dma_init pci_iommu_alloc;
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
-#define platform_name "dig_vtd"
+#define ia64_platform_name "dig_vtd"
#define platform_setup dig_setup
#define platform_dma_init pci_iommu_alloc
diff --git a/arch/ia64/include/asm/machvec_hpsim.h b/arch/ia64/include/asm/machvec_hpsim.h
index cf72fc87fdfe..e75711279366 100644
--- a/arch/ia64/include/asm/machvec_hpsim.h
+++ b/arch/ia64/include/asm/machvec_hpsim.h
@@ -11,7 +11,7 @@ extern ia64_mv_irq_init_t hpsim_irq_init;
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
-#define platform_name "hpsim"
+#define ia64_platform_name "hpsim"
#define platform_setup hpsim_setup
#define platform_irq_init hpsim_irq_init
diff --git a/arch/ia64/include/asm/machvec_hpzx1.h b/arch/ia64/include/asm/machvec_hpzx1.h
index 3bd83d78a412..c74d3159e9eb 100644
--- a/arch/ia64/include/asm/machvec_hpzx1.h
+++ b/arch/ia64/include/asm/machvec_hpzx1.h
@@ -11,7 +11,7 @@ extern ia64_mv_dma_init sba_dma_init;
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
-#define platform_name "hpzx1"
+#define ia64_platform_name "hpzx1"
#define platform_setup dig_setup
#define platform_dma_init sba_dma_init
diff --git a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
index 1091ac39740c..906ef6210774 100644
--- a/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
+++ b/arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
@@ -11,7 +11,7 @@ extern ia64_mv_dma_get_ops hwsw_dma_get_ops;
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
-#define platform_name "hpzx1_swiotlb"
+#define ia64_platform_name "hpzx1_swiotlb"
#define platform_setup dig_setup
#define platform_dma_init machvec_noop
#define platform_dma_get_ops hwsw_dma_get_ops
diff --git a/arch/ia64/include/asm/machvec_sn2.h b/arch/ia64/include/asm/machvec_sn2.h
index f061a30aac42..ece9fa85be88 100644
--- a/arch/ia64/include/asm/machvec_sn2.h
+++ b/arch/ia64/include/asm/machvec_sn2.h
@@ -71,7 +71,7 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
-#define platform_name "sn2"
+#define ia64_platform_name "sn2"
#define platform_setup sn_setup
#define platform_cpu_init sn_cpu_init
#define platform_irq_init sn_irq_init
diff --git a/arch/ia64/include/asm/machvec_uv.h b/arch/ia64/include/asm/machvec_uv.h
index 2931447f3813..2c50853f35ac 100644
--- a/arch/ia64/include/asm/machvec_uv.h
+++ b/arch/ia64/include/asm/machvec_uv.h
@@ -20,7 +20,7 @@ extern ia64_mv_setup_t uv_setup;
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
-#define platform_name "uv"
+#define ia64_platform_name "uv"
#define platform_setup uv_setup
#endif /* _ASM_IA64_MACHVEC_UV_H */
diff --git a/arch/ia64/include/asm/machvec_xen.h b/arch/ia64/include/asm/machvec_xen.h
index 55f9228056cd..8b8bd0eb3923 100644
--- a/arch/ia64/include/asm/machvec_xen.h
+++ b/arch/ia64/include/asm/machvec_xen.h
@@ -13,7 +13,7 @@ extern ia64_mv_send_ipi_t xen_platform_send_ipi;
* platform's machvec structure. When compiling a non-generic kernel,
* the macros are used directly.
*/
-#define platform_name "xen"
+#define ia64_platform_name "xen"
#define platform_setup dig_setup
#define platform_cpu_init xen_cpu_init
#define platform_irq_init xen_irq_init
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 832dd3789e9d..944152a50912 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -719,7 +719,7 @@ enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_FORCE_MWAIT,
void default_idle(void);
-#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
+#define ia64_platform_is(x) (strcmp(x, ia64_platform_name) == 0)
#endif /* !__ASSEMBLY__ */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 6f38b6120d96..440578850ae5 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -497,7 +497,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
srat_num_cpus++;
}
-void __init
+int __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
unsigned long paddr, size;
@@ -512,7 +512,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
/* Ignore disabled entries */
if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
- return;
+ return -1;
/* record this node in proximity bitmap */
pxm_bit_set(pxm);
@@ -531,6 +531,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
p->size = size;
p->nid = pxm;
num_node_memblks++;
+ return 0;
}
void __init acpi_numa_arch_fixup(void)
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 5c3e0888265a..1034884b77da 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -23,7 +23,6 @@
#include <linux/ioport.h>
#include <linux/kernel_stat.h>
#include <linux/ptrace.h>
-#include <linux/random.h> /* for rand_initialize_irq() */
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/threads.h>
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index d7f558c1e711..3fa4bc536953 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2353,7 +2353,6 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
*/
insert_vm_struct(mm, vma);
- mm->total_vm += size >> PAGE_SHIFT;
vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
vma_pages(vma));
up_write(&task->mm->mmap_sem);
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index df5351e3eed7..e7947528aee6 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -23,6 +23,7 @@ config KVM
depends on HAVE_KVM && MODULES && EXPERIMENTAL
# for device assignment:
depends on PCI
+ depends on BROKEN
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_IRQCHIP
diff --git a/arch/ia64/pci/fixup.c b/arch/ia64/pci/fixup.c
index f5959c0c1810..eab28e314022 100644
--- a/arch/ia64/pci/fixup.c
+++ b/arch/ia64/pci/fixup.c
@@ -30,8 +30,8 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
struct pci_bus *bus;
u16 config;
- if ((strcmp(platform_name, "dig") != 0)
- && (strcmp(platform_name, "hpzx1") != 0))
+ if ((strcmp(ia64_platform_name, "dig") != 0)
+ && (strcmp(ia64_platform_name, "hpzx1") != 0))
return;
/* Maybe, this machine supports legacy memory map. */
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index b638d5bfa14d..49498bbb9616 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -7,6 +7,7 @@ config M32R
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
+ select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index 3e1db561aacc..d5e66a480782 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -336,7 +336,6 @@
#define NR_syscalls 326
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 147120128260..b22df9410dce 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -5,11 +5,13 @@ config M68K
select HAVE_AOUT if MMU
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
+ select GENERIC_ATOMIC64
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
select GENERIC_CPU_DEVICES
select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU
select FPU if MMU
+ select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
config RWSEM_GENERIC_SPINLOCK
@@ -53,18 +55,6 @@ config ZONE_DMA
bool
default y
-config CPU_HAS_NO_BITFIELDS
- bool
-
-config CPU_HAS_NO_MULDIV64
- bool
-
-config CPU_HAS_ADDRESS_SPACES
- bool
-
-config FPU
- bool
-
config HZ
int
default 1000 if CLEOPATRA
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 43a9f8f1b8eb..c4eb79edecec 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -28,6 +28,7 @@ config COLDFIRE
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
select GENERIC_CSUM
+ select HAVE_CLK
endchoice
@@ -37,6 +38,7 @@ config M68000
bool
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
+ select CPU_HAS_NO_UNALIGNED
select GENERIC_CSUM
help
The Freescale (was Motorola) 68000 CPU is the first generation of
@@ -48,6 +50,7 @@ config M68000
config MCPU32
bool
select CPU_HAS_NO_BITFIELDS
+ select CPU_HAS_NO_UNALIGNED
help
The Freescale (was then Motorola) CPU32 is a CPU core that is
based on the 68020 processor. For the most part it is used in
@@ -56,7 +59,6 @@ config MCPU32
config M68020
bool "68020 support"
depends on MMU
- select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68020
@@ -67,7 +69,6 @@ config M68020
config M68030
bool "68030 support"
depends on MMU && !MMU_SUN3
- select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68030
@@ -77,7 +78,6 @@ config M68030
config M68040
bool "68040 support"
depends on MMU && !MMU_SUN3
- select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68LC040
@@ -88,7 +88,6 @@ config M68040
config M68060
bool "68060 support"
depends on MMU && !MMU_SUN3
- select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68060
@@ -376,6 +375,18 @@ config NODES_SHIFT
default "3"
depends on !SINGLE_MEMORY_CHUNK
+config CPU_HAS_NO_BITFIELDS
+ bool
+
+config CPU_HAS_NO_MULDIV64
+ bool
+
+config CPU_HAS_NO_UNALIGNED
+ bool
+
+config CPU_HAS_ADDRESS_SPACES
+ bool
+
config FPU
bool
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 0a30406b9442..f5565d6eeb8e 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -177,8 +177,8 @@ irqreturn_t dn_timer_int(int irq, void *dev_id)
timer_handler(irq, dev_id);
- x=*(volatile unsigned char *)(timer+3);
- x=*(volatile unsigned char *)(timer+5);
+ x = *(volatile unsigned char *)(apollo_timer + 3);
+ x = *(volatile unsigned char *)(apollo_timer + 5);
return IRQ_HANDLED;
}
@@ -186,17 +186,17 @@ irqreturn_t dn_timer_int(int irq, void *dev_id)
void dn_sched_init(irq_handler_t timer_routine)
{
/* program timer 1 */
- *(volatile unsigned char *)(timer+3)=0x01;
- *(volatile unsigned char *)(timer+1)=0x40;
- *(volatile unsigned char *)(timer+5)=0x09;
- *(volatile unsigned char *)(timer+7)=0xc4;
+ *(volatile unsigned char *)(apollo_timer + 3) = 0x01;
+ *(volatile unsigned char *)(apollo_timer + 1) = 0x40;
+ *(volatile unsigned char *)(apollo_timer + 5) = 0x09;
+ *(volatile unsigned char *)(apollo_timer + 7) = 0xc4;
/* enable IRQ of PIC B */
*(volatile unsigned char *)(pica+1)&=(~8);
#if 0
- printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
- printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
+ printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3));
+ printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3));
#endif
if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine))
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index eafa2539a8ee..a74e5d95c384 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,4 +1,29 @@
include include/asm-generic/Kbuild.asm
header-y += cachectl.h
+generic-y += bitsperlong.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += futex.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local64.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += mutex.h
+generic-y += percpu.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += siginfo.h
+generic-y += statfs.h
+generic-y += topology.h
+generic-y += types.h
generic-y += word-at-a-time.h
+generic-y += xor.h
diff --git a/arch/m68k/include/asm/MC68332.h b/arch/m68k/include/asm/MC68332.h
deleted file mode 100644
index 6bb8f02685a2..000000000000
--- a/arch/m68k/include/asm/MC68332.h
+++ /dev/null
@@ -1,152 +0,0 @@
-
-/* include/asm-m68knommu/MC68332.h: '332 control registers
- *
- * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>,
- *
- */
-
-#ifndef _MC68332_H_
-#define _MC68332_H_
-
-#define BYTE_REF(addr) (*((volatile unsigned char*)addr))
-#define WORD_REF(addr) (*((volatile unsigned short*)addr))
-
-#define PORTE_ADDR 0xfffa11
-#define PORTE BYTE_REF(PORTE_ADDR)
-#define DDRE_ADDR 0xfffa15
-#define DDRE BYTE_REF(DDRE_ADDR)
-#define PEPAR_ADDR 0xfffa17
-#define PEPAR BYTE_REF(PEPAR_ADDR)
-
-#define PORTF_ADDR 0xfffa19
-#define PORTF BYTE_REF(PORTF_ADDR)
-#define DDRF_ADDR 0xfffa1d
-#define DDRF BYTE_REF(DDRF_ADDR)
-#define PFPAR_ADDR 0xfffa1f
-#define PFPAR BYTE_REF(PFPAR_ADDR)
-
-#define PORTQS_ADDR 0xfffc15
-#define PORTQS BYTE_REF(PORTQS_ADDR)
-#define DDRQS_ADDR 0xfffc17
-#define DDRQS BYTE_REF(DDRQS_ADDR)
-#define PQSPAR_ADDR 0xfffc16
-#define PQSPAR BYTE_REF(PQSPAR_ADDR)
-
-#define CSPAR0_ADDR 0xFFFA44
-#define CSPAR0 WORD_REF(CSPAR0_ADDR)
-#define CSPAR1_ADDR 0xFFFA46
-#define CSPAR1 WORD_REF(CSPAR1_ADDR)
-#define CSARBT_ADDR 0xFFFA48
-#define CSARBT WORD_REF(CSARBT_ADDR)
-#define CSOPBT_ADDR 0xFFFA4A
-#define CSOPBT WORD_REF(CSOPBT_ADDR)
-#define CSBAR0_ADDR 0xFFFA4C
-#define CSBAR0 WORD_REF(CSBAR0_ADDR)
-#define CSOR0_ADDR 0xFFFA4E
-#define CSOR0 WORD_REF(CSOR0_ADDR)
-#define CSBAR1_ADDR 0xFFFA50
-#define CSBAR1 WORD_REF(CSBAR1_ADDR)
-#define CSOR1_ADDR 0xFFFA52
-#define CSOR1 WORD_REF(CSOR1_ADDR)
-#define CSBAR2_ADDR 0xFFFA54
-#define CSBAR2 WORD_REF(CSBAR2_ADDR)
-#define CSOR2_ADDR 0xFFFA56
-#define CSOR2 WORD_REF(CSOR2_ADDR)
-#define CSBAR3_ADDR 0xFFFA58
-#define CSBAR3 WORD_REF(CSBAR3_ADDR)
-#define CSOR3_ADDR 0xFFFA5A
-#define CSOR3 WORD_REF(CSOR3_ADDR)
-#define CSBAR4_ADDR 0xFFFA5C
-#define CSBAR4 WORD_REF(CSBAR4_ADDR)
-#define CSOR4_ADDR 0xFFFA5E
-#define CSOR4 WORD_REF(CSOR4_ADDR)
-#define CSBAR5_ADDR 0xFFFA60
-#define CSBAR5 WORD_REF(CSBAR5_ADDR)
-#define CSOR5_ADDR 0xFFFA62
-#define CSOR5 WORD_REF(CSOR5_ADDR)
-#define CSBAR6_ADDR 0xFFFA64
-#define CSBAR6 WORD_REF(CSBAR6_ADDR)
-#define CSOR6_ADDR 0xFFFA66
-#define CSOR6 WORD_REF(CSOR6_ADDR)
-#define CSBAR7_ADDR 0xFFFA68
-#define CSBAR7 WORD_REF(CSBAR7_ADDR)
-#define CSOR7_ADDR 0xFFFA6A
-#define CSOR7 WORD_REF(CSOR7_ADDR)
-#define CSBAR8_ADDR 0xFFFA6C
-#define CSBAR8 WORD_REF(CSBAR8_ADDR)
-#define CSOR8_ADDR 0xFFFA6E
-#define CSOR8 WORD_REF(CSOR8_ADDR)
-#define CSBAR9_ADDR 0xFFFA70
-#define CSBAR9 WORD_REF(CSBAR9_ADDR)
-#define CSOR9_ADDR 0xFFFA72
-#define CSOR9 WORD_REF(CSOR9_ADDR)
-#define CSBAR10_ADDR 0xFFFA74
-#define CSBAR10 WORD_REF(CSBAR10_ADDR)
-#define CSOR10_ADDR 0xFFFA76
-#define CSOR10 WORD_REF(CSOR10_ADDR)
-
-#define CSOR_MODE_ASYNC 0x0000
-#define CSOR_MODE_SYNC 0x8000
-#define CSOR_MODE_MASK 0x8000
-#define CSOR_BYTE_DISABLE 0x0000
-#define CSOR_BYTE_UPPER 0x4000
-#define CSOR_BYTE_LOWER 0x2000
-#define CSOR_BYTE_BOTH 0x6000
-#define CSOR_BYTE_MASK 0x6000
-#define CSOR_RW_RSVD 0x0000
-#define CSOR_RW_READ 0x0800
-#define CSOR_RW_WRITE 0x1000
-#define CSOR_RW_BOTH 0x1800
-#define CSOR_RW_MASK 0x1800
-#define CSOR_STROBE_DS 0x0400
-#define CSOR_STROBE_AS 0x0000
-#define CSOR_STROBE_MASK 0x0400
-#define CSOR_DSACK_WAIT(x) (wait << 6)
-#define CSOR_DSACK_FTERM (14 << 6)
-#define CSOR_DSACK_EXTERNAL (15 << 6)
-#define CSOR_DSACK_MASK 0x03c0
-#define CSOR_SPACE_CPU 0x0000
-#define CSOR_SPACE_USER 0x0010
-#define CSOR_SPACE_SU 0x0020
-#define CSOR_SPACE_BOTH 0x0030
-#define CSOR_SPACE_MASK 0x0030
-#define CSOR_IPL_ALL 0x0000
-#define CSOR_IPL_PRIORITY(x) (x << 1)
-#define CSOR_IPL_MASK 0x000e
-#define CSOR_AVEC_ON 0x0001
-#define CSOR_AVEC_OFF 0x0000
-#define CSOR_AVEC_MASK 0x0001
-
-#define CSBAR_ADDR(x) ((addr >> 11) << 3)
-#define CSBAR_ADDR_MASK 0xfff8
-#define CSBAR_BLKSIZE_2K 0x0000
-#define CSBAR_BLKSIZE_8K 0x0001
-#define CSBAR_BLKSIZE_16K 0x0002
-#define CSBAR_BLKSIZE_64K 0x0003
-#define CSBAR_BLKSIZE_128K 0x0004
-#define CSBAR_BLKSIZE_256K 0x0005
-#define CSBAR_BLKSIZE_512K 0x0006
-#define CSBAR_BLKSIZE_1M 0x0007
-#define CSBAR_BLKSIZE_MASK 0x0007
-
-#define CSPAR_DISC 0
-#define CSPAR_ALT 1
-#define CSPAR_CS8 2
-#define CSPAR_CS16 3
-#define CSPAR_MASK 3
-
-#define CSPAR0_CSBOOT(x) (x << 0)
-#define CSPAR0_CS0(x) (x << 2)
-#define CSPAR0_CS1(x) (x << 4)
-#define CSPAR0_CS2(x) (x << 6)
-#define CSPAR0_CS3(x) (x << 8)
-#define CSPAR0_CS4(x) (x << 10)
-#define CSPAR0_CS5(x) (x << 12)
-
-#define CSPAR1_CS6(x) (x << 0)
-#define CSPAR1_CS7(x) (x << 2)
-#define CSPAR1_CS8(x) (x << 4)
-#define CSPAR1_CS9(x) (x << 6)
-#define CSPAR1_CS10(x) (x << 8)
-
-#endif
diff --git a/arch/m68k/include/asm/apollodma.h b/arch/m68k/include/asm/apollodma.h
deleted file mode 100644
index 954adc851adb..000000000000
--- a/arch/m68k/include/asm/apollodma.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * linux/include/asm/dma.h: Defines for using and allocating dma channels.
- * Written by Hennus Bergman, 1992.
- * High DMA channel support & info by Hannu Savolainen
- * and John Boyd, Nov. 1992.
- */
-
-#ifndef _ASM_APOLLO_DMA_H
-#define _ASM_APOLLO_DMA_H
-
-#include <asm/apollohw.h> /* need byte IO */
-#include <linux/spinlock.h> /* And spinlocks */
-#include <linux/delay.h>
-
-
-#define dma_outb(val,addr) (*((volatile unsigned char *)(addr+IO_BASE)) = (val))
-#define dma_inb(addr) (*((volatile unsigned char *)(addr+IO_BASE)))
-
-/*
- * NOTES about DMA transfers:
- *
- * controller 1: channels 0-3, byte operations, ports 00-1F
- * controller 2: channels 4-7, word operations, ports C0-DF
- *
- * - ALL registers are 8 bits only, regardless of transfer size
- * - channel 4 is not used - cascades 1 into 2.
- * - channels 0-3 are byte - addresses/counts are for physical bytes
- * - channels 5-7 are word - addresses/counts are for physical words
- * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
- * - transfer count loaded to registers is 1 less than actual count
- * - controller 2 offsets are all even (2x offsets for controller 1)
- * - page registers for 5-7 don't use data bit 0, represent 128K pages
- * - page registers for 0-3 use bit 0, represent 64K pages
- *
- * DMA transfers are limited to the lower 16MB of _physical_ memory.
- * Note that addresses loaded into registers must be _physical_ addresses,
- * not logical addresses (which may differ if paging is active).
- *
- * Address mapping for channels 0-3:
- *
- * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * P7 ... P0 A7 ... A0 A7 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
- *
- * Address mapping for channels 5-7:
- *
- * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
- * | ... | \ \ ... \ \ \ ... \ \
- * | ... | \ \ ... \ \ \ ... \ (not used)
- * | ... | \ \ ... \ \ \ ... \
- * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
- *
- * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
- * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
- * the hardware level, so odd-byte transfers aren't possible).
- *
- * Transfer count (_not # bytes_) is limited to 64K, represented as actual
- * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
- * and up to 128K bytes may be transferred on channels 5-7 in one operation.
- *
- */
-
-#define MAX_DMA_CHANNELS 8
-
-/* The maximum address that we can perform a DMA transfer to on this platform */#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
-
-/* 8237 DMA controllers */
-#define IO_DMA1_BASE 0x10C00 /* 8 bit slave DMA, channels 0..3 */
-#define IO_DMA2_BASE 0x10D00 /* 16 bit master DMA, ch 4(=slave input)..7 */
-
-/* DMA controller registers */
-#define DMA1_CMD_REG (IO_DMA1_BASE+0x08) /* command register (w) */
-#define DMA1_STAT_REG (IO_DMA1_BASE+0x08) /* status register (r) */
-#define DMA1_REQ_REG (IO_DMA1_BASE+0x09) /* request register (w) */
-#define DMA1_MASK_REG (IO_DMA1_BASE+0x0A) /* single-channel mask (w) */
-#define DMA1_MODE_REG (IO_DMA1_BASE+0x0B) /* mode register (w) */
-#define DMA1_CLEAR_FF_REG (IO_DMA1_BASE+0x0C) /* clear pointer flip-flop (w) */
-#define DMA1_TEMP_REG (IO_DMA1_BASE+0x0D) /* Temporary Register (r) */
-#define DMA1_RESET_REG (IO_DMA1_BASE+0x0D) /* Master Clear (w) */
-#define DMA1_CLR_MASK_REG (IO_DMA1_BASE+0x0E) /* Clear Mask */
-#define DMA1_MASK_ALL_REG (IO_DMA1_BASE+0x0F) /* all-channels mask (w) */
-
-#define DMA2_CMD_REG (IO_DMA2_BASE+0x10) /* command register (w) */
-#define DMA2_STAT_REG (IO_DMA2_BASE+0x10) /* status register (r) */
-#define DMA2_REQ_REG (IO_DMA2_BASE+0x12) /* request register (w) */
-#define DMA2_MASK_REG (IO_DMA2_BASE+0x14) /* single-channel mask (w) */
-#define DMA2_MODE_REG (IO_DMA2_BASE+0x16) /* mode register (w) */
-#define DMA2_CLEAR_FF_REG (IO_DMA2_BASE+0x18) /* clear pointer flip-flop (w) */
-#define DMA2_TEMP_REG (IO_DMA2_BASE+0x1A) /* Temporary Register (r) */
-#define DMA2_RESET_REG (IO_DMA2_BASE+0x1A) /* Master Clear (w) */
-#define DMA2_CLR_MASK_REG (IO_DMA2_BASE+0x1C) /* Clear Mask */
-#define DMA2_MASK_ALL_REG (IO_DMA2_BASE+0x1E) /* all-channels mask (w) */
-
-#define DMA_ADDR_0 (IO_DMA1_BASE+0x00) /* DMA address registers */
-#define DMA_ADDR_1 (IO_DMA1_BASE+0x02)
-#define DMA_ADDR_2 (IO_DMA1_BASE+0x04)
-#define DMA_ADDR_3 (IO_DMA1_BASE+0x06)
-#define DMA_ADDR_4 (IO_DMA2_BASE+0x00)
-#define DMA_ADDR_5 (IO_DMA2_BASE+0x04)
-#define DMA_ADDR_6 (IO_DMA2_BASE+0x08)
-#define DMA_ADDR_7 (IO_DMA2_BASE+0x0C)
-
-#define DMA_CNT_0 (IO_DMA1_BASE+0x01) /* DMA count registers */
-#define DMA_CNT_1 (IO_DMA1_BASE+0x03)
-#define DMA_CNT_2 (IO_DMA1_BASE+0x05)
-#define DMA_CNT_3 (IO_DMA1_BASE+0x07)
-#define DMA_CNT_4 (IO_DMA2_BASE+0x02)
-#define DMA_CNT_5 (IO_DMA2_BASE+0x06)
-#define DMA_CNT_6 (IO_DMA2_BASE+0x0A)
-#define DMA_CNT_7 (IO_DMA2_BASE+0x0E)
-
-#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
-#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
-#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
-
-#define DMA_AUTOINIT 0x10
-
-#define DMA_8BIT 0
-#define DMA_16BIT 1
-#define DMA_BUSMASTER 2
-
-extern spinlock_t dma_spin_lock;
-
-static __inline__ unsigned long claim_dma_lock(void)
-{
- unsigned long flags;
- spin_lock_irqsave(&dma_spin_lock, flags);
- return flags;
-}
-
-static __inline__ void release_dma_lock(unsigned long flags)
-{
- spin_unlock_irqrestore(&dma_spin_lock, flags);
-}
-
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(dmanr, DMA1_MASK_REG);
- else
- dma_outb(dmanr & 3, DMA2_MASK_REG);
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(dmanr | 4, DMA1_MASK_REG);
- else
- dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
-}
-
-/* Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while holding the DMA lock ! ---
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(0, DMA1_CLEAR_FF_REG);
- else
- dma_outb(0, DMA2_CLEAR_FF_REG);
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
- if (dmanr<=3)
- dma_outb(mode | dmanr, DMA1_MODE_REG);
- else
- dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
-}
-
-/* Set transfer address & page bits for specific DMA channel.
- * Assumes dma flipflop is clear.
- */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
- if (dmanr <= 3) {
- dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- } else {
- dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
- dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
- }
-}
-
-
-/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
- * a specific DMA channel.
- * You must ensure the parameters are valid.
- * NOTE: from a manual: "the number of transfers is one more
- * than the initial word count"! This is taken into account.
- * Assumes dma flip-flop is clear.
- * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
- count--;
- if (dmanr <= 3) {
- dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- } else {
- dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- }
-}
-
-
-/* Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * If called before the channel has been used, it may return 1.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- *
- * Assumes DMA flip-flop is clear.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
- unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
- : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
-
- /* using short to get 16-bit wrap around */
- unsigned short count;
-
- count = 1 + dma_inb(io_port);
- count += dma_inb(io_port) << 8;
-
- return (dmanr<=3)? count : (count<<1);
-}
-
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr); /* release it again */
-
-/* These are in arch/m68k/apollo/dma.c: */
-extern unsigned short dma_map_page(unsigned long phys_addr,int count,int type);
-extern void dma_unmap_page(unsigned short dma_addr);
-
-#endif /* _ASM_APOLLO_DMA_H */
diff --git a/arch/m68k/include/asm/apollohw.h b/arch/m68k/include/asm/apollohw.h
index a1373b9aa281..635ef4f89010 100644
--- a/arch/m68k/include/asm/apollohw.h
+++ b/arch/m68k/include/asm/apollohw.h
@@ -98,7 +98,7 @@ extern u_long timer_physaddr;
#define cpuctrl (*(volatile unsigned int *)(IO_BASE + cpuctrl_physaddr))
#define pica (IO_BASE + pica_physaddr)
#define picb (IO_BASE + picb_physaddr)
-#define timer (IO_BASE + timer_physaddr)
+#define apollo_timer (IO_BASE + timer_physaddr)
#define addr_xlat_map ((unsigned short *)(IO_BASE + 0x17000))
#define isaIO2mem(x) (((((x) & 0x3f8) << 7) | (((x) & 0xfc00) >> 6) | ((x) & 0x7)) + 0x40000 + IO_BASE)
diff --git a/arch/m68k/include/asm/bitsperlong.h b/arch/m68k/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/m68k/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/m68k/include/asm/cputime.h b/arch/m68k/include/asm/cputime.h
deleted file mode 100644
index c79c5e892305..000000000000
--- a/arch/m68k/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __M68K_CPUTIME_H
-#define __M68K_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __M68K_CPUTIME_H */
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index 9c09becfd4c9..12d8fe4f1d30 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -43,7 +43,7 @@ static inline void __delay(unsigned long loops)
extern void __bad_udelay(void);
-#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+#ifdef CONFIG_CPU_HAS_NO_MULDIV64
/*
* The simpler m68k and ColdFire processors do not have a 32*32->64
* multiply instruction. So we need to handle them a little differently.
diff --git a/arch/m68k/include/asm/device.h b/arch/m68k/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2d..000000000000
--- a/arch/m68k/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/m68k/include/asm/emergency-restart.h b/arch/m68k/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/arch/m68k/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/m68k/include/asm/errno.h b/arch/m68k/include/asm/errno.h
deleted file mode 100644
index 0d4e188d6ef6..000000000000
--- a/arch/m68k/include/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_ERRNO_H
-#define _M68K_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* _M68K_ERRNO_H */
diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h
deleted file mode 100644
index 6a332a9f099c..000000000000
--- a/arch/m68k/include/asm/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
-
-#include <asm-generic/futex.h>
-
-#endif
diff --git a/arch/m68k/include/asm/ioctl.h b/arch/m68k/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/arch/m68k/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/m68k/include/asm/ipcbuf.h b/arch/m68k/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/m68k/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/m68k/include/asm/irq_regs.h b/arch/m68k/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/m68k/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/m68k/include/asm/kdebug.h b/arch/m68k/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b037665..000000000000
--- a/arch/m68k/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/m68k/include/asm/kmap_types.h b/arch/m68k/include/asm/kmap_types.h
deleted file mode 100644
index 3413cc1390ec..000000000000
--- a/arch/m68k/include/asm/kmap_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_M68K_KMAP_TYPES_H
-#define __ASM_M68K_KMAP_TYPES_H
-
-#include <asm-generic/kmap_types.h>
-
-#endif /* __ASM_M68K_KMAP_TYPES_H */
diff --git a/arch/m68k/include/asm/kvm_para.h b/arch/m68k/include/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/arch/m68k/include/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/m68k/include/asm/local.h b/arch/m68k/include/asm/local.h
deleted file mode 100644
index 6c259263e1f0..000000000000
--- a/arch/m68k/include/asm/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_M68K_LOCAL_H
-#define _ASM_M68K_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif /* _ASM_M68K_LOCAL_H */
diff --git a/arch/m68k/include/asm/local64.h b/arch/m68k/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/m68k/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/m68k/include/asm/mac_mouse.h b/arch/m68k/include/asm/mac_mouse.h
deleted file mode 100644
index 39a5c292eaee..000000000000
--- a/arch/m68k/include/asm/mac_mouse.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _ASM_MAC_MOUSE_H
-#define _ASM_MAC_MOUSE_H
-
-/*
- * linux/include/asm-m68k/mac_mouse.h
- * header file for Macintosh ADB mouse driver
- * 27-10-97 Michael Schmitz
- * copied from:
- * header file for Atari Mouse driver
- * by Robert de Vries (robert@and.nl) on 19Jul93
- */
-
-struct mouse_status {
- char buttons;
- short dx;
- short dy;
- int ready;
- int active;
- wait_queue_head_t wait;
- struct fasync_struct *fasyncptr;
-};
-
-#endif
diff --git a/arch/m68k/include/asm/mcfmbus.h b/arch/m68k/include/asm/mcfmbus.h
deleted file mode 100644
index 319899c47a2c..000000000000
--- a/arch/m68k/include/asm/mcfmbus.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/****************************************************************************/
-
-/*
- * mcfmbus.h -- Coldfire MBUS support defines.
- *
- * (C) Copyright 1999, Martin Floeer (mfloeer@axcent.de)
- */
-
-/****************************************************************************/
-
-
-#ifndef mcfmbus_h
-#define mcfmbus_h
-
-
-#define MCFMBUS_BASE 0x280
-#define MCFMBUS_IRQ_VECTOR 0x19
-#define MCFMBUS_IRQ 0x1
-#define MCFMBUS_CLK 0x3f
-#define MCFMBUS_IRQ_LEVEL 0x07 /*IRQ Level 1*/
-#define MCFMBUS_ADDRESS 0x01
-
-
-/*
-* Define the 5307 MBUS register set addresses
-*/
-
-#define MCFMBUS_MADR 0x00
-#define MCFMBUS_MFDR 0x04
-#define MCFMBUS_MBCR 0x08
-#define MCFMBUS_MBSR 0x0C
-#define MCFMBUS_MBDR 0x10
-
-
-#define MCFMBUS_MADR_ADDR(a) (((a)&0x7F)<<0x01) /*Slave Address*/
-
-#define MCFMBUS_MFDR_MBC(a) ((a)&0x3F) /*M-Bus Clock*/
-
-/*
-* Define bit flags in Control Register
-*/
-
-#define MCFMBUS_MBCR_MEN (0x80) /* M-Bus Enable */
-#define MCFMBUS_MBCR_MIEN (0x40) /* M-Bus Interrupt Enable */
-#define MCFMBUS_MBCR_MSTA (0x20) /* Master/Slave Mode Select Bit */
-#define MCFMBUS_MBCR_MTX (0x10) /* Transmit/Rcv Mode Select Bit */
-#define MCFMBUS_MBCR_TXAK (0x08) /* Transmit Acknowledge Enable */
-#define MCFMBUS_MBCR_RSTA (0x04) /* Repeat Start */
-
-/*
-* Define bit flags in Status Register
-*/
-
-#define MCFMBUS_MBSR_MCF (0x80) /* Data Transfer Complete */
-#define MCFMBUS_MBSR_MAAS (0x40) /* Addressed as a Slave */
-#define MCFMBUS_MBSR_MBB (0x20) /* Bus Busy */
-#define MCFMBUS_MBSR_MAL (0x10) /* Arbitration Lost */
-#define MCFMBUS_MBSR_SRW (0x04) /* Slave Transmit */
-#define MCFMBUS_MBSR_MIF (0x02) /* M-Bus Interrupt */
-#define MCFMBUS_MBSR_RXAK (0x01) /* No Acknowledge Received */
-
-/*
-* Define bit flags in DATA I/O Register
-*/
-
-#define MCFMBUS_MBDR_READ (0x01) /* 1=read 0=write MBUS */
-
-#define MBUSIOCSCLOCK 1
-#define MBUSIOCGCLOCK 2
-#define MBUSIOCSADDR 3
-#define MBUSIOCGADDR 4
-#define MBUSIOCSSLADDR 5
-#define MBUSIOCGSLADDR 6
-#define MBUSIOCSSUBADDR 7
-#define MBUSIOCGSUBADDR 8
-
-#endif
diff --git a/arch/m68k/include/asm/mman.h b/arch/m68k/include/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/m68k/include/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/m68k/include/asm/mutex.h b/arch/m68k/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/m68k/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/m68k/include/asm/percpu.h b/arch/m68k/include/asm/percpu.h
deleted file mode 100644
index 0859d048faf5..000000000000
--- a/arch/m68k/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_M68K_PERCPU_H
-#define __ASM_M68K_PERCPU_H
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ASM_M68K_PERCPU_H */
diff --git a/arch/m68k/include/asm/resource.h b/arch/m68k/include/asm/resource.h
deleted file mode 100644
index e7d35019f337..000000000000
--- a/arch/m68k/include/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_RESOURCE_H
-#define _M68K_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* _M68K_RESOURCE_H */
diff --git a/arch/m68k/include/asm/sbus.h b/arch/m68k/include/asm/sbus.h
deleted file mode 100644
index bfe3ba147f2e..000000000000
--- a/arch/m68k/include/asm/sbus.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * some sbus structures and macros to make usage of sbus drivers possible
- */
-
-#ifndef __M68K_SBUS_H
-#define __M68K_SBUS_H
-
-struct sbus_dev {
- struct {
- unsigned int which_io;
- unsigned int phys_addr;
- } reg_addrs[1];
-};
-
-/* sbus IO functions stolen from include/asm-sparc/io.h for the serial driver */
-/* No SBUS on the Sun3, kludge -- sam */
-
-static inline void _sbus_writeb(unsigned char val, unsigned long addr)
-{
- *(volatile unsigned char *)addr = val;
-}
-
-static inline unsigned char _sbus_readb(unsigned long addr)
-{
- return *(volatile unsigned char *)addr;
-}
-
-static inline void _sbus_writel(unsigned long val, unsigned long addr)
-{
- *(volatile unsigned long *)addr = val;
-
-}
-
-extern inline unsigned long _sbus_readl(unsigned long addr)
-{
- return *(volatile unsigned long *)addr;
-}
-
-
-#define sbus_readb(a) _sbus_readb((unsigned long)a)
-#define sbus_writeb(v, a) _sbus_writeb(v, (unsigned long)a)
-#define sbus_readl(a) _sbus_readl((unsigned long)a)
-#define sbus_writel(v, a) _sbus_writel(v, (unsigned long)a)
-
-#endif
diff --git a/arch/m68k/include/asm/scatterlist.h b/arch/m68k/include/asm/scatterlist.h
deleted file mode 100644
index 312505452a1e..000000000000
--- a/arch/m68k/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_SCATTERLIST_H
-#define _M68K_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* !(_M68K_SCATTERLIST_H) */
diff --git a/arch/m68k/include/asm/sections.h b/arch/m68k/include/asm/sections.h
deleted file mode 100644
index 5277e52715ec..000000000000
--- a/arch/m68k/include/asm/sections.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _ASM_M68K_SECTIONS_H
-#define _ASM_M68K_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-extern char _sbss[], _ebss[];
-
-#endif /* _ASM_M68K_SECTIONS_H */
diff --git a/arch/m68k/include/asm/shm.h b/arch/m68k/include/asm/shm.h
deleted file mode 100644
index fa56ec84a126..000000000000
--- a/arch/m68k/include/asm/shm.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _M68K_SHM_H
-#define _M68K_SHM_H
-
-
-/* format of page table entries that correspond to shared memory pages
- currently out in swap space (see also mm/swap.c):
- bits 0-1 (PAGE_PRESENT) is = 0
- bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE
- bits 31..9 are used like this:
- bits 15..9 (SHM_ID) the id of the shared memory segment
- bits 30..16 (SHM_IDX) the index of the page within the shared memory segment
- (actually only bits 25..16 get used since SHMMAX is so low)
- bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach
-*/
-/* on the m68k both bits 0 and 1 must be zero */
-/* format on the sun3 is similar, but bits 30, 31 are set to zero and all
- others are reduced by 2. --m */
-
-#ifndef CONFIG_SUN3
-#define SHM_ID_SHIFT 9
-#else
-#define SHM_ID_SHIFT 7
-#endif
-#define _SHM_ID_BITS 7
-#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1)
-
-#define SHM_IDX_SHIFT (SHM_ID_SHIFT+_SHM_ID_BITS)
-#define _SHM_IDX_BITS 15
-#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1)
-
-#endif /* _M68K_SHM_H */
diff --git a/arch/m68k/include/asm/siginfo.h b/arch/m68k/include/asm/siginfo.h
deleted file mode 100644
index 851d3d784b53..000000000000
--- a/arch/m68k/include/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_SIGINFO_H
-#define _M68K_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/m68k/include/asm/statfs.h b/arch/m68k/include/asm/statfs.h
deleted file mode 100644
index 08d93f14e061..000000000000
--- a/arch/m68k/include/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_STATFS_H
-#define _M68K_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* _M68K_STATFS_H */
diff --git a/arch/m68k/include/asm/topology.h b/arch/m68k/include/asm/topology.h
deleted file mode 100644
index ca173e9f26ff..000000000000
--- a/arch/m68k/include/asm/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_M68K_TOPOLOGY_H
-#define _ASM_M68K_TOPOLOGY_H
-
-#include <asm-generic/topology.h>
-
-#endif /* _ASM_M68K_TOPOLOGY_H */
diff --git a/arch/m68k/include/asm/types.h b/arch/m68k/include/asm/types.h
deleted file mode 100644
index 89705adcbd52..000000000000
--- a/arch/m68k/include/asm/types.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _M68K_TYPES_H
-#define _M68K_TYPES_H
-
-/*
- * This file is never included by application software unless
- * explicitly requested (e.g., via linux/types.h) in which case the
- * application is Linux specific so (user-) name space pollution is
- * not a major issue. However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- */
-#include <asm-generic/int-ll64.h>
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-#ifdef __KERNEL__
-
-#define BITS_PER_LONG 32
-
-#endif /* __KERNEL__ */
-
-#endif /* _M68K_TYPES_H */
diff --git a/arch/m68k/include/asm/unaligned.h b/arch/m68k/include/asm/unaligned.h
index f4043ae63db1..2b3ca0bf7a0d 100644
--- a/arch/m68k/include/asm/unaligned.h
+++ b/arch/m68k/include/asm/unaligned.h
@@ -2,7 +2,7 @@
#define _ASM_M68K_UNALIGNED_H
-#if defined(CONFIG_COLDFIRE) || defined(CONFIG_M68000)
+#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
#include <linux/unaligned/be_struct.h>
#include <linux/unaligned/le_byteshift.h>
#include <linux/unaligned/generic.h>
@@ -12,7 +12,7 @@
#else
/*
- * The m68k can do unaligned accesses itself.
+ * The m68k can do unaligned accesses itself.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index ea0b502f845e..045cfd6a9e31 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -357,7 +357,6 @@
#define NR_syscalls 347
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
#define __ARCH_WANT_STAT64
diff --git a/arch/m68k/include/asm/xor.h b/arch/m68k/include/asm/xor.h
deleted file mode 100644
index c82eb12a5b18..000000000000
--- a/arch/m68k/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 7dc186b7a85f..71fb29938dba 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -218,13 +218,10 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
#endif
- pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
- "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
- (int) &_sdata, (int) &_edata,
- (int) &_sbss, (int) &_ebss);
- pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
- (int) &_ebss, (int) memory_start,
- (int) memory_start, (int) memory_end);
+ pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n",
+ _stext, _etext, _sdata, _edata, __bss_start, __bss_stop);
+ pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
+ __bss_stop, memory_start, memory_start, memory_end);
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 8623f8dc16f8..9a5932ec3689 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
goto bad_access;
}
- mem_value = *mem;
+ /*
+ * No need to check for EFAULT; we know that the page is
+ * present and writable.
+ */
+ __get_user(mem_value, mem);
if (mem_value == oldval)
- *mem = newval;
+ __put_user(newval, mem);
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
index 40e02d9c38b4..06a763f49fd3 100644
--- a/arch/m68k/kernel/vmlinux-nommu.lds
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -78,9 +78,7 @@ SECTIONS {
__init_end = .;
}
- _sbss = .;
BSS_SECTION(0, 0, 0)
- _ebss = .;
_end = .;
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 63407c836826..d0993594f558 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -31,9 +31,7 @@ SECTIONS
RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
- _sbss = .;
BSS_SECTION(0, 0, 0)
- _ebss = .;
_edata = .; /* End of data section */
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index ad0f46d64c0b..8080469ee6c1 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -44,9 +44,7 @@ __init_begin = .;
. = ALIGN(PAGE_SIZE);
__init_end = .;
- _sbss = .;
BSS_SECTION(0, 0, 0)
- _ebss = .;
_end = . ;
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 79e928a525d0..ee5f0b1b5c5d 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -19,7 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+#ifdef CONFIG_CPU_HAS_NO_MULDIV64
#define SI_TYPE_SIZE 32
#define __BITS4 (SI_TYPE_SIZE / 4)
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index f77f258dce3a..282f9de68966 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -104,7 +104,7 @@ void __init print_memmap(void)
MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_stext, _etext),
MLK_ROUNDUP(_sdata, _edata),
- MLK_ROUNDUP(_sbss, _ebss));
+ MLK_ROUNDUP(__bss_start, __bss_stop));
}
void __init mem_init(void)
diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c
index 345ec0d83e3d..688e3664aea0 100644
--- a/arch/m68k/mm/init_no.c
+++ b/arch/m68k/mm/init_no.c
@@ -91,7 +91,7 @@ void __init mem_init(void)
totalram_pages = free_all_bootmem();
codek = (_etext - _stext) >> 10;
- datak = (_ebss - _sdata) >> 10;
+ datak = (__bss_stop - _sdata) >> 10;
initk = (__init_begin - __init_end) >> 10;
tmp = nr_free_pages() << PAGE_SHIFT;
diff --git a/arch/m68k/platform/68328/head-de2.S b/arch/m68k/platform/68328/head-de2.S
index f632fdcb93e9..537d3245b539 100644
--- a/arch/m68k/platform/68328/head-de2.S
+++ b/arch/m68k/platform/68328/head-de2.S
@@ -60,8 +60,8 @@ _start:
* Move ROM filesystem above bss :-)
*/
- moveal #_sbss, %a0 /* romfs at the start of bss */
- moveal #_ebss, %a1 /* Set up destination */
+ moveal #__bss_start, %a0 /* romfs at the start of bss */
+ moveal #__bss_stop, %a1 /* Set up destination */
movel %a0, %a2 /* Copy of bss start */
movel 8(%a0), %d1 /* Get size of ROMFS */
@@ -84,8 +84,8 @@ _start:
* Initialize BSS segment to 0
*/
- lea _sbss, %a0
- lea _ebss, %a1
+ lea __bss_start, %a0
+ lea __bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
2: cmpal %a0, %a1
diff --git a/arch/m68k/platform/68328/head-pilot.S b/arch/m68k/platform/68328/head-pilot.S
index 2ebfd6420818..45a9dad29e3d 100644
--- a/arch/m68k/platform/68328/head-pilot.S
+++ b/arch/m68k/platform/68328/head-pilot.S
@@ -110,7 +110,7 @@ L0:
movel #CONFIG_VECTORBASE, %d7
addl #16, %d7
moveal %d7, %a0
- moveal #_ebss, %a1
+ moveal #__bss_stop, %a1
lea %a1@(512), %a2
DBG_PUTC('C')
@@ -138,8 +138,8 @@ LD1:
DBG_PUTC('E')
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
L1:
@@ -150,7 +150,7 @@ L1:
DBG_PUTC('F')
/* Copy command line from end of bss to command line */
- moveal #_ebss, %a0
+ moveal #__bss_stop, %a0
moveal #command_line, %a1
lea %a1@(512), %a2
@@ -165,7 +165,7 @@ L3:
movel #_sdata, %d0
movel %d0, _rambase
- movel #_ebss, %d0
+ movel #__bss_stop, %d0
movel %d0, _ramstart
movel %a4, %d0
diff --git a/arch/m68k/platform/68328/head-ram.S b/arch/m68k/platform/68328/head-ram.S
index 7f1aeeacb219..5189ef926098 100644
--- a/arch/m68k/platform/68328/head-ram.S
+++ b/arch/m68k/platform/68328/head-ram.S
@@ -76,8 +76,8 @@ pclp3:
beq pclp3
#endif /* DEBUG */
moveal #0x007ffff0, %ssp
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 >= %a1 */
L1:
diff --git a/arch/m68k/platform/68328/head-rom.S b/arch/m68k/platform/68328/head-rom.S
index a5ff96d0295f..3dff98ba2e97 100644
--- a/arch/m68k/platform/68328/head-rom.S
+++ b/arch/m68k/platform/68328/head-rom.S
@@ -59,8 +59,8 @@ _stext: movew #0x2700,%sr
cmpal %a1, %a2
bhi 1b
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
1:
@@ -70,7 +70,7 @@ _stext: movew #0x2700,%sr
movel #_sdata, %d0
movel %d0, _rambase
- movel #_ebss, %d0
+ movel #__bss_stop, %d0
movel %d0, _ramstart
movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
movel %d0, _ramend
diff --git a/arch/m68k/platform/68360/head-ram.S b/arch/m68k/platform/68360/head-ram.S
index 8eb94fb6b971..acd213170d80 100644
--- a/arch/m68k/platform/68360/head-ram.S
+++ b/arch/m68k/platform/68360/head-ram.S
@@ -219,8 +219,8 @@ LD1:
cmp.l #_edata, %a1
blt LD1
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
L1:
@@ -234,7 +234,7 @@ load_quicc:
store_ram_size:
/* Set ram size information */
move.l #_sdata, _rambase
- move.l #_ebss, _ramstart
+ move.l #__bss_stop, _ramstart
move.l #RAMEND, %d0
sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
move.l %d0, _ramend /* Different from RAMEND.*/
diff --git a/arch/m68k/platform/68360/head-rom.S b/arch/m68k/platform/68360/head-rom.S
index 97510e55b802..dfc756d99886 100644
--- a/arch/m68k/platform/68360/head-rom.S
+++ b/arch/m68k/platform/68360/head-rom.S
@@ -13,7 +13,7 @@
*/
.global _stext
-.global _sbss
+.global __bss_start
.global _start
.global _rambase
@@ -229,8 +229,8 @@ LD1:
cmp.l #_edata, %a1
blt LD1
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
L1:
@@ -244,7 +244,7 @@ load_quicc:
store_ram_size:
/* Set ram size information */
move.l #_sdata, _rambase
- move.l #_ebss, _ramstart
+ move.l #__bss_stop, _ramstart
move.l #RAMEND, %d0
sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
move.l %d0, _ramend /* Different from RAMEND.*/
diff --git a/arch/m68k/platform/coldfire/head.S b/arch/m68k/platform/coldfire/head.S
index 4e0c9eb3bd1f..b88f5716f357 100644
--- a/arch/m68k/platform/coldfire/head.S
+++ b/arch/m68k/platform/coldfire/head.S
@@ -230,8 +230,8 @@ _vstart:
/*
* Move ROM filesystem above bss :-)
*/
- lea _sbss,%a0 /* get start of bss */
- lea _ebss,%a1 /* set up destination */
+ lea __bss_start,%a0 /* get start of bss */
+ lea __bss_stop,%a1 /* set up destination */
movel %a0,%a2 /* copy of bss start */
movel 8(%a0),%d0 /* get size of ROMFS */
@@ -249,7 +249,7 @@ _copy_romfs:
bne _copy_romfs
#else /* CONFIG_ROMFS_FS */
- lea _ebss,%a1
+ lea __bss_stop,%a1
movel %a1,_ramstart
#endif /* CONFIG_ROMFS_FS */
@@ -257,8 +257,8 @@ _copy_romfs:
/*
* Zero out the bss region.
*/
- lea _sbss,%a0 /* get start of bss */
- lea _ebss,%a1 /* get end of bss */
+ lea __bss_start,%a0 /* get start of bss */
+ lea __bss_stop,%a1 /* get end of bss */
clrl %d0 /* set value */
_clear_bss:
movel %d0,(%a0)+ /* clear each word */
diff --git a/arch/m68k/sun3/prom/init.c b/arch/m68k/sun3/prom/init.c
index d8e6349336b4..eeba067d565f 100644
--- a/arch/m68k/sun3/prom/init.c
+++ b/arch/m68k/sun3/prom/init.c
@@ -22,57 +22,13 @@ int prom_root_node;
struct linux_nodeops *prom_nodeops;
/* You must call prom_init() before you attempt to use any of the
- * routines in the prom library. It returns 0 on success, 1 on
- * failure. It gets passed the pointer to the PROM vector.
+ * routines in the prom library.
+ * It gets passed the pointer to the PROM vector.
*/
-extern void prom_meminit(void);
-extern void prom_ranges_init(void);
-
void __init prom_init(struct linux_romvec *rp)
{
romvec = rp;
-#ifndef CONFIG_SUN3
- switch(romvec->pv_romvers) {
- case 0:
- prom_vers = PROM_V0;
- break;
- case 2:
- prom_vers = PROM_V2;
- break;
- case 3:
- prom_vers = PROM_V3;
- break;
- case 4:
- prom_vers = PROM_P1275;
- prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n");
- prom_halt();
- break;
- default:
- prom_printf("PROMLIB: Bad PROM version %d\n",
- romvec->pv_romvers);
- prom_halt();
- break;
- };
-
- prom_rev = romvec->pv_plugin_revision;
- prom_prev = romvec->pv_printrev;
- prom_nodeops = romvec->pv_nodeops;
-
- prom_root_node = prom_getsibling(0);
- if((prom_root_node == 0) || (prom_root_node == -1))
- prom_halt();
-
- if((((unsigned long) prom_nodeops) == 0) ||
- (((unsigned long) prom_nodeops) == -1))
- prom_halt();
-
- prom_meminit();
-
- prom_ranges_init();
-#endif
-// printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n",
-// romvec->pv_romvers, prom_rev);
/* Initialization successful. */
return;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 0bf44231aaf9..ab9afcaa7f6a 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -15,6 +15,7 @@ config MICROBLAZE
select TRACING_SUPPORT
select OF
select OF_EARLY_FLATTREE
+ select ARCH_WANT_IPC_PARSE_VERSION
select IRQ_DOMAIN
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h
index 4487e150b455..c07ed5d2a820 100644
--- a/arch/microblaze/include/asm/sections.h
+++ b/arch/microblaze/include/asm/sections.h
@@ -18,10 +18,6 @@ extern char _ssbss[], _esbss[];
extern unsigned long __ivt_start[], __ivt_end[];
extern char _etext[], _stext[];
-# ifdef CONFIG_MTD_UCLINUX
-extern char *_ebss;
-# endif
-
extern u32 _fdt_start[], _fdt_end[];
# endif /* !__ASSEMBLY__ */
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index d20ffbc86beb..6985e6e9d826 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -400,7 +400,6 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
-#define __ARCH_WANT_IPC_PARSE_VERSION
/* #define __ARCH_WANT_OLD_READDIR */
/* #define __ARCH_WANT_OLD_STAT */
#define __ARCH_WANT_STAT64
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index bb4907c828dc..2b25bcf05c00 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -21,9 +21,6 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
-extern char *_ebss;
-EXPORT_SYMBOL_GPL(_ebss);
-
#ifdef CONFIG_FUNCTION_TRACER
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 16d8dfd9094b..4da971d4392f 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -121,7 +121,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
/* Move ROMFS out of BSS before clearing it */
if (romfs_size > 0) {
- memmove(&_ebss, (int *)romfs_base, romfs_size);
+ memmove(&__bss_stop, (int *)romfs_base, romfs_size);
klimit += romfs_size;
}
#endif
@@ -165,7 +165,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
BUG_ON(romfs_size < 0); /* What else can we do? */
printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
- romfs_size, romfs_base, (unsigned)&_ebss);
+ romfs_size, romfs_base, (unsigned)&__bss_stop);
printk("New klimit: 0x%08x\n", (unsigned)klimit);
#endif
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 109e9d86ade4..936d01a689d7 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -131,7 +131,6 @@ SECTIONS {
*(COMMON)
. = ALIGN (4) ;
__bss_stop = . ;
- _ebss = . ;
}
. = ALIGN(PAGE_SIZE);
_end = .;
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index 5ce8029f558b..d64786d5e2f3 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -14,6 +14,7 @@ platforms += jz4740
platforms += lantiq
platforms += lasat
platforms += loongson
+platforms += loongson1
platforms += mipssim
platforms += mti-malta
platforms += netlogic
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index b3e10fdd3898..faf65286574e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -20,12 +20,14 @@ config MIPS
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
+ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select HAVE_ARCH_JUMP_LABEL
+ select ARCH_WANT_IPC_PARSE_VERSION
select IRQ_FORCED_THREADING
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
@@ -75,6 +77,7 @@ config AR7
select SYS_SUPPORTS_ZBOOT_UART16550
select ARCH_REQUIRE_GPIOLIB
select VLYNQ
+ select HAVE_CLK
help
Support for the Texas Instruments AR7 System-on-a-Chip
family: TNETD7100, 7200 and 7300.
@@ -86,6 +89,7 @@ config ATH79
select CEVT_R4K
select CSRC_R4K
select DMA_NONCOHERENT
+ select HAVE_CLK
select IRQ_CPU
select MIPS_MACHINE
select SYS_HAS_CPU_MIPS32_R2
@@ -122,6 +126,7 @@ config BCM63XX
select SYS_HAS_EARLY_PRINTK
select SWAP_IO_SPACE
select ARCH_REQUIRE_GPIOLIB
+ select HAVE_CLK
help
Support for BCM63XX based boards
@@ -209,6 +214,7 @@ config MACH_JZ4740
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_ZBOOT_UART16550
select DMA_NONCOHERENT
select IRQ_CPU
select GENERIC_GPIO
@@ -264,6 +270,16 @@ config MACH_LOONGSON
Chinese Academy of Sciences (CAS) in the People's Republic
of China. The chief architect is Professor Weiwu Hu.
+config MACH_LOONGSON1
+ bool "Loongson 1 family of machines"
+ select SYS_SUPPORTS_ZBOOT
+ help
+ This enables support for the Loongson 1 based machines.
+
+ Loongson 1 is a family of 32-bit MIPS-compatible SoCs developed by
+ the ICT (Institute of Computing Technology) and the Chinese Academy
+ of Sciences.
+
config MIPS_MALTA
bool "MIPS Malta board"
select ARCH_MAY_HAVE_PC_FDC
@@ -787,6 +803,8 @@ config NLM_XLR_BOARD
select ZONE_DMA if 64BIT
select SYNC_R4K
select SYS_HAS_EARLY_PRINTK
+ select USB_ARCH_HAS_OHCI if USB_SUPPORT
+ select USB_ARCH_HAS_EHCI if USB_SUPPORT
help
Support for systems based on Netlogic XLR and XLS processors.
Say Y here if you have a XLR or XLS based board.
@@ -799,7 +817,6 @@ config NLM_XLP_BOARD
select SYS_HAS_CPU_XLP
select SYS_SUPPORTS_SMP
select HW_HAS_PCI
- select SWAP_IO_SPACE
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select 64BIT_PHYS_ADDR
@@ -836,6 +853,7 @@ source "arch/mips/txx9/Kconfig"
source "arch/mips/vr41xx/Kconfig"
source "arch/mips/cavium-octeon/Kconfig"
source "arch/mips/loongson/Kconfig"
+source "arch/mips/loongson1/Kconfig"
source "arch/mips/netlogic/Kconfig"
endmenu
@@ -1217,6 +1235,14 @@ config CPU_LOONGSON2F
have a similar programming interface with FPGA northbridge used in
Loongson2E.
+config CPU_LOONGSON1B
+ bool "Loongson 1B"
+ depends on SYS_HAS_CPU_LOONGSON1B
+ select CPU_LOONGSON1
+ help
+ The Loongson 1B is a 32-bit SoC, which implements the MIPS32
+ release 2 instruction set.
+
config CPU_MIPS32_R1
bool "MIPS32 Release 1"
depends on SYS_HAS_CPU_MIPS32_R1
@@ -1432,6 +1458,8 @@ config CPU_CAVIUM_OCTEON
select WEAK_ORDERING
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
+ select LIBFDT
+ select USE_OF
help
The Cavium Octeon processor is a highly integrated chip containing
many ethernet hardware widgets for networking tasks. The processor
@@ -1544,6 +1572,14 @@ config CPU_LOONGSON2
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+config CPU_LOONGSON1
+ bool
+ select CPU_MIPS32
+ select CPU_MIPSR2
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+
config CPU_BMIPS
bool
select CPU_MIPS32
@@ -1562,6 +1598,9 @@ config SYS_HAS_CPU_LOONGSON2F
select CPU_SUPPORTS_ADDRWINCFG if 64BIT
select CPU_SUPPORTS_UNCACHED_ACCELERATED
+config SYS_HAS_CPU_LOONGSON1B
+ bool
+
config SYS_HAS_CPU_MIPS32_R1
bool
@@ -2366,6 +2405,8 @@ config PCI_DOMAINS
source "drivers/pci/Kconfig"
+source "drivers/pci/pcie/Kconfig"
+
#
# ISA support is now enabled via select. Too many systems still have the one
# or other ISA chip on the board that users don't know about so don't expect
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 295f1a95f745..a124c251c0c9 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -81,10 +81,10 @@ static void mtx1_power_off(void)
void __init board_setup(void)
{
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
/* Enable USB power switch */
alchemy_gpio_direction_output(204, 0);
-#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
+#endif /* IS_ENABLED(CONFIG_USB_OHCI_HCD) */
/* Initialize sys_pinfunc */
au_writel(SYS_PF_NI2, SYS_PINFUNC);
@@ -228,6 +228,8 @@ static int mtx1_pci_idsel(unsigned int devsel, int assert)
* adapter on the mtx-1 "singleboard" variant. It triggers a custom
* logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals.
*/
+ udelay(1);
+
if (assert && devsel != 0)
/* Suppress signal to Cardbus */
alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index 95cb9113b12c..c0f3ce6dcb56 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -334,13 +334,12 @@ static void __init alchemy_setup_macs(int ctype)
if (alchemy_get_macs(ctype) < 1)
return;
- macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
+ macres = kmemdup(au1xxx_eth0_resources[ctype],
+ sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
if (!macres) {
printk(KERN_INFO "Alchemy: no memory for MAC0 resources\n");
return;
}
- memcpy(macres, au1xxx_eth0_resources[ctype],
- sizeof(struct resource) * MAC_RES_COUNT);
au1xxx_eth0_device.resource = macres;
i = prom_get_ethernet_addr(ethaddr);
@@ -356,13 +355,12 @@ static void __init alchemy_setup_macs(int ctype)
if (alchemy_get_macs(ctype) < 2)
return;
- macres = kmalloc(sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
+ macres = kmemdup(au1xxx_eth1_resources[ctype],
+ sizeof(struct resource) * MAC_RES_COUNT, GFP_KERNEL);
if (!macres) {
printk(KERN_INFO "Alchemy: no memory for MAC1 resources\n");
return;
}
- memcpy(macres, au1xxx_eth1_resources[ctype],
- sizeof(struct resource) * MAC_RES_COUNT);
au1xxx_eth1_device.resource = macres;
ethaddr[5] += 1; /* next addr for 2nd MAC */
diff --git a/arch/mips/alchemy/devboards/Makefile b/arch/mips/alchemy/devboards/Makefile
index 3c37fb303364..c9e747dd9fc2 100644
--- a/arch/mips/alchemy/devboards/Makefile
+++ b/arch/mips/alchemy/devboards/Makefile
@@ -2,7 +2,7 @@
# Alchemy Develboards
#
-obj-y += prom.o bcsr.o platform.o
+obj-y += bcsr.o platform.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_MIPS_PB1100) += pb1100.o
obj-$(CONFIG_MIPS_PB1500) += pb1500.o
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
index 1e83ce2e1147..f2039ef2c293 100644
--- a/arch/mips/alchemy/devboards/bcsr.c
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -90,10 +90,7 @@ static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
disable_irq_nosync(irq);
-
- for ( ; bisr; bisr &= bisr - 1)
- generic_handle_irq(bcsr_csc_base + __ffs(bisr));
-
+ generic_handle_irq(bcsr_csc_base + __ffs(bisr));
enable_irq(irq);
}
diff --git a/arch/mips/alchemy/devboards/pb1100.c b/arch/mips/alchemy/devboards/pb1100.c
index cff50d05ddd4..78c77a44a317 100644
--- a/arch/mips/alchemy/devboards/pb1100.c
+++ b/arch/mips/alchemy/devboards/pb1100.c
@@ -46,7 +46,7 @@ void __init board_setup(void)
alchemy_gpio1_input_enable();
udelay(100);
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
{
u32 pin_func, sys_freqctrl, sys_clksrc;
@@ -93,7 +93,7 @@ void __init board_setup(void)
pin_func |= SYS_PF_USB;
au_writel(pin_func, SYS_PINFUNC);
}
-#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
+#endif /* IS_ENABLED(CONFIG_USB_OHCI_HCD) */
/* Enable sys bus clock divider when IDLE state or no bus activity. */
au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL);
diff --git a/arch/mips/alchemy/devboards/pb1500.c b/arch/mips/alchemy/devboards/pb1500.c
index e7b807b3ec51..232fee942000 100644
--- a/arch/mips/alchemy/devboards/pb1500.c
+++ b/arch/mips/alchemy/devboards/pb1500.c
@@ -53,7 +53,7 @@ void __init board_setup(void)
alchemy_gpio_direction_input(201);
alchemy_gpio_direction_input(203);
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+#if IS_ENABLED(CONFIG_USB_OHCI_HCD)
/* Zero and disable FREQ2 */
sys_freqctrl = au_readl(SYS_FREQCTRL0);
@@ -87,7 +87,7 @@ void __init board_setup(void)
/* 2nd USB port is USB host */
pin_func |= SYS_PF_USB;
au_writel(pin_func, SYS_PINFUNC);
-#endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */
+#endif /* IS_ENABLED(CONFIG_USB_OHCI_HCD) */
#ifdef CONFIG_PCI
{
diff --git a/arch/mips/alchemy/devboards/platform.c b/arch/mips/alchemy/devboards/platform.c
index 621f70afb63a..f39042e99d0d 100644
--- a/arch/mips/alchemy/devboards/platform.c
+++ b/arch/mips/alchemy/devboards/platform.c
@@ -10,9 +10,39 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <asm/bootinfo.h>
#include <asm/reboot.h>
+#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/bcsr.h>
+#include <prom.h>
+
+void __init prom_init(void)
+{
+ unsigned char *memsize_str;
+ unsigned long memsize;
+
+ prom_argc = (int)fw_arg0;
+ prom_argv = (char **)fw_arg1;
+ prom_envp = (char **)fw_arg2;
+
+ prom_init_cmdline();
+ memsize_str = prom_getenv("memsize");
+ if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
+ memsize = 64 << 20; /* all devboards have at least 64MB RAM */
+
+ add_memory_region(0, memsize, BOOT_MEM_RAM);
+}
+
+void prom_putchar(unsigned char c)
+{
+#ifdef CONFIG_MIPS_DB1300
+ alchemy_uart_putchar(AU1300_UART2_PHYS_ADDR, c);
+#else
+ alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
+#endif
+}
+
static struct platform_device db1x00_rtc_dev = {
.name = "rtc-au1xxx",
diff --git a/arch/mips/alchemy/devboards/prom.c b/arch/mips/alchemy/devboards/prom.c
deleted file mode 100644
index 93a22107cc41..000000000000
--- a/arch/mips/alchemy/devboards/prom.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Common code used by all Alchemy develboards.
- *
- * Extracted from files which had this to say:
- *
- * Copyright 2000, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <asm/bootinfo.h>
-#include <asm/mach-au1x00/au1000.h>
-#include <prom.h>
-
-#if defined(CONFIG_MIPS_DB1000) || \
- defined(CONFIG_MIPS_PB1100) || \
- defined(CONFIG_MIPS_PB1500)
-#define ALCHEMY_BOARD_DEFAULT_MEMSIZE 0x04000000
-
-#else /* Au1550/Au1200-based develboards */
-#define ALCHEMY_BOARD_DEFAULT_MEMSIZE 0x08000000
-#endif
-
-void __init prom_init(void)
-{
- unsigned char *memsize_str;
- unsigned long memsize;
-
- prom_argc = (int)fw_arg0;
- prom_argv = (char **)fw_arg1;
- prom_envp = (char **)fw_arg2;
-
- prom_init_cmdline();
- memsize_str = prom_getenv("memsize");
- if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize))
- memsize = ALCHEMY_BOARD_DEFAULT_MEMSIZE;
-
- add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
-
-void prom_putchar(unsigned char c)
-{
-#ifdef CONFIG_MIPS_DB1300
- alchemy_uart_putchar(AU1300_UART2_PHYS_ADDR, c);
-#else
- alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
-#endif
-}
diff --git a/arch/mips/ath79/dev-usb.c b/arch/mips/ath79/dev-usb.c
index 36e9570e7bc4..b2a2311ec85b 100644
--- a/arch/mips/ath79/dev-usb.c
+++ b/arch/mips/ath79/dev-usb.c
@@ -145,6 +145,8 @@ static void __init ar7240_usb_setup(void)
ath79_ohci_resources[0].start = AR7240_OHCI_BASE;
ath79_ohci_resources[0].end = AR7240_OHCI_BASE + AR7240_OHCI_SIZE - 1;
+ ath79_ohci_resources[1].start = ATH79_CPU_IRQ_USB;
+ ath79_ohci_resources[1].end = ATH79_CPU_IRQ_USB;
platform_device_register(&ath79_ohci_device);
}
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
index 29054f211832..48fe762d2526 100644
--- a/arch/mips/ath79/gpio.c
+++ b/arch/mips/ath79/gpio.c
@@ -188,8 +188,10 @@ void __init ath79_gpio_init(void)
if (soc_is_ar71xx())
ath79_gpio_count = AR71XX_GPIO_COUNT;
- else if (soc_is_ar724x())
- ath79_gpio_count = AR724X_GPIO_COUNT;
+ else if (soc_is_ar7240())
+ ath79_gpio_count = AR7240_GPIO_COUNT;
+ else if (soc_is_ar7241() || soc_is_ar7242())
+ ath79_gpio_count = AR7241_GPIO_COUNT;
else if (soc_is_ar913x())
ath79_gpio_count = AR913X_GPIO_COUNT;
else if (soc_is_ar933x())
diff --git a/arch/mips/bcm63xx/Kconfig b/arch/mips/bcm63xx/Kconfig
index 6b1b9ad8d857..d03e8799d1cf 100644
--- a/arch/mips/bcm63xx/Kconfig
+++ b/arch/mips/bcm63xx/Kconfig
@@ -1,6 +1,10 @@
menu "CPU support"
depends on BCM63XX
+config BCM63XX_CPU_6328
+ bool "support 6328 CPU"
+ select HW_HAS_PCI
+
config BCM63XX_CPU_6338
bool "support 6338 CPU"
select HW_HAS_PCI
diff --git a/arch/mips/bcm63xx/Makefile b/arch/mips/bcm63xx/Makefile
index 6dfdc69928ac..833af72c852a 100644
--- a/arch/mips/bcm63xx/Makefile
+++ b/arch/mips/bcm63xx/Makefile
@@ -1,5 +1,6 @@
obj-y += clk.o cpu.o cs.o gpio.o irq.o prom.o setup.o timer.o \
- dev-dsp.o dev-enet.o dev-pcmcia.o dev-uart.o dev-wdt.o
+ dev-dsp.o dev-enet.o dev-flash.o dev-pcmcia.o dev-rng.o \
+ dev-spi.o dev-uart.o dev-wdt.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-y += boards/
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 2f1773f3fb7a..feb05258a4d1 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -11,9 +11,6 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
#include <linux/ssb/ssb.h>
#include <asm/addrspace.h>
#include <bcm63xx_board.h>
@@ -24,7 +21,9 @@
#include <bcm63xx_dev_pci.h>
#include <bcm63xx_dev_enet.h>
#include <bcm63xx_dev_dsp.h>
+#include <bcm63xx_dev_flash.h>
#include <bcm63xx_dev_pcmcia.h>
+#include <bcm63xx_dev_spi.h>
#include <board_bcm963xx.h>
#define PFX "board_bcm963xx: "
@@ -34,6 +33,48 @@ static unsigned int mac_addr_used;
static struct board_info board;
/*
+ * known 6328 boards
+ */
+#ifdef CONFIG_BCM63XX_CPU_6328
+static struct board_info __initdata board_96328avng = {
+ .name = "96328avng",
+ .expected_cpu_id = 0x6328,
+
+ .has_uart0 = 1,
+ .has_pci = 1,
+
+ .leds = {
+ {
+ .name = "96328avng::ppp-fail",
+ .gpio = 2,
+ .active_low = 1,
+ },
+ {
+ .name = "96328avng::power",
+ .gpio = 4,
+ .active_low = 1,
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "96328avng::power-fail",
+ .gpio = 8,
+ .active_low = 1,
+ },
+ {
+ .name = "96328avng::wps",
+ .gpio = 9,
+ .active_low = 1,
+ },
+ {
+ .name = "96328avng::ppp",
+ .gpio = 11,
+ .active_low = 1,
+ },
+ },
+};
+#endif
+
+/*
* known 6338 boards
*/
#ifdef CONFIG_BCM63XX_CPU_6338
@@ -592,6 +633,9 @@ static struct board_info __initdata board_DWVS0 = {
* all boards
*/
static const struct board_info __initdata *bcm963xx_boards[] = {
+#ifdef CONFIG_BCM63XX_CPU_6328
+ &board_96328avng,
+#endif
#ifdef CONFIG_BCM63XX_CPU_6338
&board_96338gw,
&board_96338w,
@@ -709,9 +753,15 @@ void __init board_prom_init(void)
char cfe_version[32];
u32 val;
- /* read base address of boot chip select (0) */
- val = bcm_mpi_readl(MPI_CSBASE_REG(0));
- val &= MPI_CSBASE_BASE_MASK;
+ /* read base address of boot chip select (0)
+ * 6328 does not have MPI but boots from a fixed address
+ */
+ if (BCMCPU_IS_6328())
+ val = 0x18000000;
+ else {
+ val = bcm_mpi_readl(MPI_CSBASE_REG(0));
+ val &= MPI_CSBASE_BASE_MASK;
+ }
boot_addr = (u8 *)KSEG1ADDR(val);
/* dump cfe version */
@@ -808,40 +858,6 @@ void __init board_setup(void)
panic("unexpected CPU for bcm963xx board");
}
-static struct mtd_partition mtd_partitions[] = {
- {
- .name = "cfe",
- .offset = 0x0,
- .size = 0x40000,
- }
-};
-
-static const char *bcm63xx_part_types[] = { "bcm63xxpart", NULL };
-
-static struct physmap_flash_data flash_data = {
- .width = 2,
- .nr_parts = ARRAY_SIZE(mtd_partitions),
- .parts = mtd_partitions,
- .part_probe_types = bcm63xx_part_types,
-};
-
-static struct resource mtd_resources[] = {
- {
- .start = 0, /* filled at runtime */
- .end = 0, /* filled at runtime */
- .flags = IORESOURCE_MEM,
- }
-};
-
-static struct platform_device mtd_dev = {
- .name = "physmap-flash",
- .resource = mtd_resources,
- .num_resources = ARRAY_SIZE(mtd_resources),
- .dev = {
- .platform_data = &flash_data,
- },
-};
-
static struct gpio_led_platform_data bcm63xx_led_data;
static struct platform_device bcm63xx_gpio_leds = {
@@ -855,8 +871,6 @@ static struct platform_device bcm63xx_gpio_leds = {
*/
int __init board_register_devices(void)
{
- u32 val;
-
if (board.has_uart0)
bcm63xx_uart_register(0);
@@ -890,14 +904,9 @@ int __init board_register_devices(void)
}
#endif
- /* read base address of boot chip select (0) */
- val = bcm_mpi_readl(MPI_CSBASE_REG(0));
- val &= MPI_CSBASE_BASE_MASK;
-
- mtd_resources[0].start = val;
- mtd_resources[0].end = 0x1FFFFFFF;
+ bcm63xx_spi_register();
- platform_device_register(&mtd_dev);
+ bcm63xx_flash_register();
bcm63xx_led_data.num_leds = ARRAY_SIZE(board.leds);
bcm63xx_led_data.leds = board.leds;
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index 9d57c71b7b58..1db48adb543a 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -120,7 +120,7 @@ static void enetsw_set(struct clk *clk, int enable)
{
if (!BCMCPU_IS_6368())
return;
- bcm_hwclock_set(CKCTL_6368_ROBOSW_CLK_EN |
+ bcm_hwclock_set(CKCTL_6368_ROBOSW_EN |
CKCTL_6368_SWPKT_USB_EN |
CKCTL_6368_SWPKT_SAR_EN, enable);
if (enable) {
@@ -163,7 +163,7 @@ static void usbh_set(struct clk *clk, int enable)
if (BCMCPU_IS_6348())
bcm_hwclock_set(CKCTL_6348_USBH_EN, enable);
else if (BCMCPU_IS_6368())
- bcm_hwclock_set(CKCTL_6368_USBH_CLK_EN, enable);
+ bcm_hwclock_set(CKCTL_6368_USBH_EN, enable);
}
static struct clk clk_usbh = {
@@ -181,9 +181,11 @@ static void spi_set(struct clk *clk, int enable)
mask = CKCTL_6338_SPI_EN;
else if (BCMCPU_IS_6348())
mask = CKCTL_6348_SPI_EN;
- else
- /* BCMCPU_IS_6358 */
+ else if (BCMCPU_IS_6358())
mask = CKCTL_6358_SPI_EN;
+ else
+ /* BCMCPU_IS_6368 */
+ mask = CKCTL_6368_SPI_EN;
bcm_hwclock_set(mask, enable);
}
@@ -199,7 +201,7 @@ static void xtm_set(struct clk *clk, int enable)
if (!BCMCPU_IS_6368())
return;
- bcm_hwclock_set(CKCTL_6368_SAR_CLK_EN |
+ bcm_hwclock_set(CKCTL_6368_SAR_EN |
CKCTL_6368_SWPKT_SAR_EN, enable);
if (enable) {
@@ -222,6 +224,18 @@ static struct clk clk_xtm = {
};
/*
+ * IPsec clock
+ */
+static void ipsec_set(struct clk *clk, int enable)
+{
+ bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable);
+}
+
+static struct clk clk_ipsec = {
+ .set = ipsec_set,
+};
+
+/*
* Internal peripheral clock
*/
static struct clk clk_periph = {
@@ -278,6 +292,8 @@ struct clk *clk_get(struct device *dev, const char *id)
return &clk_periph;
if (BCMCPU_IS_6358() && !strcmp(id, "pcm"))
return &clk_pcm;
+ if (BCMCPU_IS_6368() && !strcmp(id, "ipsec"))
+ return &clk_ipsec;
return ERR_PTR(-ENOENT);
}
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index 8f0d6c7725ea..a7afb289b15a 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -29,6 +29,14 @@ static u16 bcm63xx_cpu_rev;
static unsigned int bcm63xx_cpu_freq;
static unsigned int bcm63xx_memory_size;
+static const unsigned long bcm6328_regs_base[] = {
+ __GEN_CPU_REGS_TABLE(6328)
+};
+
+static const int bcm6328_irqs[] = {
+ __GEN_CPU_IRQ_TABLE(6328)
+};
+
static const unsigned long bcm6338_regs_base[] = {
__GEN_CPU_REGS_TABLE(6338)
};
@@ -99,6 +107,33 @@ unsigned int bcm63xx_get_memory_size(void)
static unsigned int detect_cpu_clock(void)
{
switch (bcm63xx_get_cpu_id()) {
+ case BCM6328_CPU_ID:
+ {
+ unsigned int tmp, mips_pll_fcvo;
+
+ tmp = bcm_misc_readl(MISC_STRAPBUS_6328_REG);
+ mips_pll_fcvo = (tmp & STRAPBUS_6328_FCVO_MASK)
+ >> STRAPBUS_6328_FCVO_SHIFT;
+
+ switch (mips_pll_fcvo) {
+ case 0x12:
+ case 0x14:
+ case 0x19:
+ return 160000000;
+ case 0x1c:
+ return 192000000;
+ case 0x13:
+ case 0x15:
+ return 200000000;
+ case 0x1a:
+ return 384000000;
+ case 0x16:
+ return 400000000;
+ default:
+ return 320000000;
+ }
+
+ }
case BCM6338_CPU_ID:
/* BCM6338 has a fixed 240 Mhz frequency */
return 240000000;
@@ -170,6 +205,9 @@ static unsigned int detect_memory_size(void)
unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0;
u32 val;
+ if (BCMCPU_IS_6328())
+ return bcm_ddr_readl(DDR_CSEND_REG) << 24;
+
if (BCMCPU_IS_6345()) {
val = bcm_sdram_readl(SDRAM_MBASE_REG);
return (val * 8 * 1024 * 1024);
@@ -228,17 +266,26 @@ void __init bcm63xx_cpu_init(void)
bcm63xx_irqs = bcm6345_irqs;
break;
case CPU_BMIPS4350:
- switch (read_c0_prid() & 0xf0) {
- case 0x10:
+ if ((read_c0_prid() & 0xf0) == 0x10) {
expected_cpu_id = BCM6358_CPU_ID;
bcm63xx_regs_base = bcm6358_regs_base;
bcm63xx_irqs = bcm6358_irqs;
- break;
- case 0x30:
- expected_cpu_id = BCM6368_CPU_ID;
- bcm63xx_regs_base = bcm6368_regs_base;
- bcm63xx_irqs = bcm6368_irqs;
- break;
+ } else {
+ /* all newer chips have the same chip id location */
+ u16 chip_id = bcm_readw(BCM_6368_PERF_BASE);
+
+ switch (chip_id) {
+ case BCM6328_CPU_ID:
+ expected_cpu_id = BCM6328_CPU_ID;
+ bcm63xx_regs_base = bcm6328_regs_base;
+ bcm63xx_irqs = bcm6328_irqs;
+ break;
+ case BCM6368_CPU_ID:
+ expected_cpu_id = BCM6368_CPU_ID;
+ bcm63xx_regs_base = bcm6368_regs_base;
+ bcm63xx_irqs = bcm6368_irqs;
+ break;
+ }
}
break;
}
diff --git a/arch/mips/bcm63xx/dev-dsp.c b/arch/mips/bcm63xx/dev-dsp.c
index da46d1d3c77c..5bb5b154c9bd 100644
--- a/arch/mips/bcm63xx/dev-dsp.c
+++ b/arch/mips/bcm63xx/dev-dsp.c
@@ -31,7 +31,7 @@ static struct resource voip_dsp_resources[] = {
static struct platform_device bcm63xx_voip_dsp_device = {
.name = "bcm63xx-voip-dsp",
- .id = 0,
+ .id = -1,
.num_resources = ARRAY_SIZE(voip_dsp_resources),
.resource = voip_dsp_resources,
};
diff --git a/arch/mips/bcm63xx/dev-flash.c b/arch/mips/bcm63xx/dev-flash.c
new file mode 100644
index 000000000000..58371c7deac2
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-flash.c
@@ -0,0 +1,123 @@
+/*
+ * Broadcom BCM63xx flash registration
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2012 Jonas Gorski <jonas.gorski@gmail.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_flash.h>
+#include <bcm63xx_regs.h>
+#include <bcm63xx_io.h>
+
+static struct mtd_partition mtd_partitions[] = {
+ {
+ .name = "cfe",
+ .offset = 0x0,
+ .size = 0x40000,
+ }
+};
+
+static const char *bcm63xx_part_types[] = { "bcm63xxpart", NULL };
+
+static struct physmap_flash_data flash_data = {
+ .width = 2,
+ .parts = mtd_partitions,
+ .part_probe_types = bcm63xx_part_types,
+};
+
+static struct resource mtd_resources[] = {
+ {
+ .start = 0, /* filled at runtime */
+ .end = 0, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct platform_device mtd_dev = {
+ .name = "physmap-flash",
+ .resource = mtd_resources,
+ .num_resources = ARRAY_SIZE(mtd_resources),
+ .dev = {
+ .platform_data = &flash_data,
+ },
+};
+
+static int __init bcm63xx_detect_flash_type(void)
+{
+ u32 val;
+
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM6328_CPU_ID:
+ val = bcm_misc_readl(MISC_STRAPBUS_6328_REG);
+ if (val & STRAPBUS_6328_BOOT_SEL_SERIAL)
+ return BCM63XX_FLASH_TYPE_SERIAL;
+ else
+ return BCM63XX_FLASH_TYPE_NAND;
+ case BCM6338_CPU_ID:
+ case BCM6345_CPU_ID:
+ case BCM6348_CPU_ID:
+ /* no way to auto detect so assume parallel */
+ return BCM63XX_FLASH_TYPE_PARALLEL;
+ case BCM6358_CPU_ID:
+ val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
+ if (val & STRAPBUS_6358_BOOT_SEL_PARALLEL)
+ return BCM63XX_FLASH_TYPE_PARALLEL;
+ else
+ return BCM63XX_FLASH_TYPE_SERIAL;
+ case BCM6368_CPU_ID:
+ val = bcm_gpio_readl(GPIO_STRAPBUS_REG);
+ switch (val & STRAPBUS_6368_BOOT_SEL_MASK) {
+ case STRAPBUS_6368_BOOT_SEL_NAND:
+ return BCM63XX_FLASH_TYPE_NAND;
+ case STRAPBUS_6368_BOOT_SEL_SERIAL:
+ return BCM63XX_FLASH_TYPE_SERIAL;
+ case STRAPBUS_6368_BOOT_SEL_PARALLEL:
+ return BCM63XX_FLASH_TYPE_PARALLEL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+int __init bcm63xx_flash_register(void)
+{
+ int flash_type;
+ u32 val;
+
+ flash_type = bcm63xx_detect_flash_type();
+
+ switch (flash_type) {
+ case BCM63XX_FLASH_TYPE_PARALLEL:
+ /* read base address of boot chip select (0) */
+ val = bcm_mpi_readl(MPI_CSBASE_REG(0));
+ val &= MPI_CSBASE_BASE_MASK;
+
+ mtd_resources[0].start = val;
+ mtd_resources[0].end = 0x1FFFFFFF;
+
+ return platform_device_register(&mtd_dev);
+ case BCM63XX_FLASH_TYPE_SERIAL:
+ pr_warn("unsupported serial flash detected\n");
+ return -ENODEV;
+ case BCM63XX_FLASH_TYPE_NAND:
+ pr_warn("unsupported NAND flash detected\n");
+ return -ENODEV;
+ default:
+ pr_err("flash detection failed for BCM%x: %d\n",
+ bcm63xx_get_cpu_id(), flash_type);
+ return -ENODEV;
+ }
+}
diff --git a/arch/mips/bcm63xx/dev-rng.c b/arch/mips/bcm63xx/dev-rng.c
new file mode 100644
index 000000000000..d277b4dc6c68
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-rng.c
@@ -0,0 +1,40 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 Florian Fainelli <florian@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <bcm63xx_cpu.h>
+
+static struct resource rng_resources[] = {
+ {
+ .start = -1, /* filled at runtime */
+ .end = -1, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device bcm63xx_rng_device = {
+ .name = "bcm63xx-rng",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rng_resources),
+ .resource = rng_resources,
+};
+
+int __init bcm63xx_rng_register(void)
+{
+ if (!BCMCPU_IS_6368())
+ return -ENODEV;
+
+ rng_resources[0].start = bcm63xx_regset_address(RSET_RNG);
+ rng_resources[0].end = rng_resources[0].start;
+ rng_resources[0].end += RSET_RNG_SIZE - 1;
+
+ return platform_device_register(&bcm63xx_rng_device);
+}
+arch_initcall(bcm63xx_rng_register);
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
new file mode 100644
index 000000000000..f1c9c3e2f678
--- /dev/null
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -0,0 +1,123 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009-2011 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+
+#include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_spi.h>
+#include <bcm63xx_regs.h>
+
+#ifdef BCMCPU_RUNTIME_DETECT
+/*
+ * register offsets
+ */
+static const unsigned long bcm6338_regs_spi[] = {
+ __GEN_SPI_REGS_TABLE(6338)
+};
+
+static const unsigned long bcm6348_regs_spi[] = {
+ __GEN_SPI_REGS_TABLE(6348)
+};
+
+static const unsigned long bcm6358_regs_spi[] = {
+ __GEN_SPI_REGS_TABLE(6358)
+};
+
+static const unsigned long bcm6368_regs_spi[] = {
+ __GEN_SPI_REGS_TABLE(6368)
+};
+
+const unsigned long *bcm63xx_regs_spi;
+EXPORT_SYMBOL(bcm63xx_regs_spi);
+
+static __init void bcm63xx_spi_regs_init(void)
+{
+ if (BCMCPU_IS_6338())
+ bcm63xx_regs_spi = bcm6338_regs_spi;
+ if (BCMCPU_IS_6348())
+ bcm63xx_regs_spi = bcm6348_regs_spi;
+ if (BCMCPU_IS_6358())
+ bcm63xx_regs_spi = bcm6358_regs_spi;
+ if (BCMCPU_IS_6368())
+ bcm63xx_regs_spi = bcm6368_regs_spi;
+}
+#else
+static __init void bcm63xx_spi_regs_init(void) { }
+#endif
+
+static struct resource spi_resources[] = {
+ {
+ .start = -1, /* filled at runtime */
+ .end = -1, /* filled at runtime */
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = -1, /* filled at runtime */
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct bcm63xx_spi_pdata spi_pdata = {
+ .bus_num = 0,
+ .num_chipselect = 8,
+};
+
+static struct platform_device bcm63xx_spi_device = {
+ .name = "bcm63xx-spi",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(spi_resources),
+ .resource = spi_resources,
+ .dev = {
+ .platform_data = &spi_pdata,
+ },
+};
+
+int __init bcm63xx_spi_register(void)
+{
+ struct clk *periph_clk;
+
+ if (BCMCPU_IS_6328() || BCMCPU_IS_6345())
+ return -ENODEV;
+
+ periph_clk = clk_get(NULL, "periph");
+ if (IS_ERR(periph_clk)) {
+ pr_err("unable to get periph clock\n");
+ return -ENODEV;
+ }
+
+ /* Set bus frequency */
+ spi_pdata.speed_hz = clk_get_rate(periph_clk);
+
+ spi_resources[0].start = bcm63xx_regset_address(RSET_SPI);
+ spi_resources[0].end = spi_resources[0].start;
+ spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
+
+ if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
+ spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;
+ spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE;
+ spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT;
+ spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;
+ }
+
+ if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
+ spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
+ spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
+ spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
+ spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH;
+ }
+
+ bcm63xx_spi_regs_init();
+
+ return platform_device_register(&bcm63xx_spi_device);
+}
diff --git a/arch/mips/bcm63xx/dev-wdt.c b/arch/mips/bcm63xx/dev-wdt.c
index 3e6c716a4c11..2a2346a99bcb 100644
--- a/arch/mips/bcm63xx/dev-wdt.c
+++ b/arch/mips/bcm63xx/dev-wdt.c
@@ -21,7 +21,7 @@ static struct resource wdt_resources[] = {
static struct platform_device bcm63xx_wdt_device = {
.name = "bcm63xx-wdt",
- .id = 0,
+ .id = -1,
.num_resources = ARRAY_SIZE(wdt_resources),
.resource = wdt_resources,
};
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index 9a216a451d92..18e051ad18a5 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -27,6 +27,17 @@ static void __internal_irq_unmask_32(unsigned int irq) __maybe_unused;
static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused;
#ifndef BCMCPU_RUNTIME_DETECT
+#ifdef CONFIG_BCM63XX_CPU_6328
+#define irq_stat_reg PERF_IRQSTAT_6328_REG
+#define irq_mask_reg PERF_IRQMASK_6328_REG
+#define irq_bits 64
+#define is_ext_irq_cascaded 1
+#define ext_irq_start (BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE)
+#define ext_irq_end (BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE)
+#define ext_irq_count 4
+#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6328
+#define ext_irq_cfg_reg2 0
+#endif
#ifdef CONFIG_BCM63XX_CPU_6338
#define irq_stat_reg PERF_IRQSTAT_6338_REG
#define irq_mask_reg PERF_IRQMASK_6338_REG
@@ -118,6 +129,16 @@ static void bcm63xx_init_irq(void)
irq_mask_addr = bcm63xx_regset_address(RSET_PERF);
switch (bcm63xx_get_cpu_id()) {
+ case BCM6328_CPU_ID:
+ irq_stat_addr += PERF_IRQSTAT_6328_REG;
+ irq_mask_addr += PERF_IRQMASK_6328_REG;
+ irq_bits = 64;
+ ext_irq_count = 4;
+ is_ext_irq_cascaded = 1;
+ ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
+ ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
+ ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
+ break;
case BCM6338_CPU_ID:
irq_stat_addr += PERF_IRQSTAT_6338_REG;
irq_mask_addr += PERF_IRQMASK_6338_REG;
diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
index 99d7f405cbeb..10eaff458071 100644
--- a/arch/mips/bcm63xx/prom.c
+++ b/arch/mips/bcm63xx/prom.c
@@ -26,7 +26,9 @@ void __init prom_init(void)
bcm_wdt_writel(WDT_STOP_2, WDT_CTL_REG);
/* disable all hardware blocks clock for now */
- if (BCMCPU_IS_6338())
+ if (BCMCPU_IS_6328())
+ mask = CKCTL_6328_ALL_SAFE_EN;
+ else if (BCMCPU_IS_6338())
mask = CKCTL_6338_ALL_SAFE_EN;
else if (BCMCPU_IS_6345())
mask = CKCTL_6345_ALL_SAFE_EN;
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
index 356b05583e14..0e74a13639cd 100644
--- a/arch/mips/bcm63xx/setup.c
+++ b/arch/mips/bcm63xx/setup.c
@@ -68,6 +68,9 @@ void bcm63xx_machine_reboot(void)
/* mask and clear all external irq */
switch (bcm63xx_get_cpu_id()) {
+ case BCM6328_CPU_ID:
+ perf_regs[0] = PERF_EXTIRQ_CFG_REG_6328;
+ break;
case BCM6338_CPU_ID:
perf_regs[0] = PERF_EXTIRQ_CFG_REG_6338;
break;
@@ -95,9 +98,13 @@ void bcm63xx_machine_reboot(void)
bcm6348_a1_reboot();
printk(KERN_INFO "triggering watchdog soft-reset...\n");
- reg = bcm_perf_readl(PERF_SYS_PLL_CTL_REG);
- reg |= SYS_PLL_SOFT_RESET;
- bcm_perf_writel(reg, PERF_SYS_PLL_CTL_REG);
+ if (BCMCPU_IS_6328()) {
+ bcm_wdt_writel(1, WDT_SOFTRESET_REG);
+ } else {
+ reg = bcm_perf_readl(PERF_SYS_PLL_CTL_REG);
+ reg |= SYS_PLL_SOFT_RESET;
+ bcm_perf_writel(reg, PERF_SYS_PLL_CTL_REG);
+ }
while (1)
;
}
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 5042d51b0512..c2a3fb0ffc87 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -58,8 +58,12 @@ $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
# Calculate the load address of the compressed kernel image
hostprogs-y := calc_vmlinuz_load_addr
+ifeq ($(CONFIG_MACH_JZ4740),y)
+VMLINUZ_LOAD_ADDRESS := 0x80600000
+else
VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \
$(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS))
+endif
vmlinuzobjs-y += $(obj)/piggy.o
diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c
index c9caaf4fbf60..1c7b739b6a1d 100644
--- a/arch/mips/boot/compressed/uart-16550.c
+++ b/arch/mips/boot/compressed/uart-16550.c
@@ -18,6 +18,11 @@
#define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
#endif
+#ifdef CONFIG_MACH_JZ4740
+#define UART0_BASE 0xB0030000
+#define PORT(offset) (UART0_BASE + (4 * offset))
+#endif
+
#ifndef PORT
#error please define the serial port address for your own machine
#endif
diff --git a/arch/mips/cavium-octeon/.gitignore b/arch/mips/cavium-octeon/.gitignore
new file mode 100644
index 000000000000..39c968605ff6
--- /dev/null
+++ b/arch/mips/cavium-octeon/.gitignore
@@ -0,0 +1,2 @@
+*.dtb.S
+*.dtb
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
index 19eb0434269f..bc96e2908f14 100644
--- a/arch/mips/cavium-octeon/Makefile
+++ b/arch/mips/cavium-octeon/Makefile
@@ -9,9 +9,25 @@
# Copyright (C) 2005-2009 Cavium Networks
#
+CFLAGS_octeon-platform.o = -I$(src)/../../../scripts/dtc/libfdt
+CFLAGS_setup.o = -I$(src)/../../../scripts/dtc/libfdt
+
obj-y := cpu.o setup.o serial.o octeon-platform.o octeon-irq.o csrc-octeon.o
obj-y += dma-octeon.o flash_setup.o
obj-y += octeon-memcpy.o
obj-y += executive/
obj-$(CONFIG_SMP) += smp.o
+
+DTS_FILES = octeon_3xxx.dts octeon_68xx.dts
+DTB_FILES = $(patsubst %.dts, %.dtb, $(DTS_FILES))
+
+obj-y += $(patsubst %.dts, %.dtb.o, $(DTS_FILES))
+
+$(obj)/%.dtb: $(src)/%.dts FORCE
+ $(call if_changed_dep,dtc)
+
+# Let's keep the .dtb files around in case we want to look at them.
+.SECONDARY: $(addprefix $(obj)/, $(DTB_FILES))
+
+clean-files += $(DTB_FILES) $(patsubst %.dtb, %.dtb.S, $(DTB_FILES))
diff --git a/arch/mips/cavium-octeon/executive/cvmx-fpa.c b/arch/mips/cavium-octeon/executive/cvmx-fpa.c
deleted file mode 100644
index ad44b8bd8057..000000000000
--- a/arch/mips/cavium-octeon/executive/cvmx-fpa.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Support library for the hardware Free Pool Allocator.
- *
- *
- */
-
-#include "cvmx-config.h"
-#include "cvmx.h"
-#include "cvmx-fpa.h"
-#include "cvmx-ipd.h"
-
-/**
- * Current state of all the pools. Use access functions
- * instead of using it directly.
- */
-CVMX_SHARED cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
-
-/**
- * Setup a FPA pool to control a new block of memory. The
- * buffer pointer must be a physical address.
- *
- * @pool: Pool to initialize
- * 0 <= pool < 8
- * @name: Constant character string to name this pool.
- * String is not copied.
- * @buffer: Pointer to the block of memory to use. This must be
- * accessible by all processors and external hardware.
- * @block_size: Size for each block controlled by the FPA
- * @num_blocks: Number of blocks
- *
- * Returns 0 on Success,
- * -1 on failure
- */
-int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
- uint64_t block_size, uint64_t num_blocks)
-{
- char *ptr;
- if (!buffer) {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_setup_pool: NULL buffer pointer!\n");
- return -1;
- }
- if (pool >= CVMX_FPA_NUM_POOLS) {
- cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Illegal pool!\n");
- return -1;
- }
-
- if (block_size < CVMX_FPA_MIN_BLOCK_SIZE) {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_setup_pool: Block size too small.\n");
- return -1;
- }
-
- if (((unsigned long)buffer & (CVMX_FPA_ALIGNMENT - 1)) != 0) {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_setup_pool: Buffer not aligned properly.\n");
- return -1;
- }
-
- cvmx_fpa_pool_info[pool].name = name;
- cvmx_fpa_pool_info[pool].size = block_size;
- cvmx_fpa_pool_info[pool].starting_element_count = num_blocks;
- cvmx_fpa_pool_info[pool].base = buffer;
-
- ptr = (char *)buffer;
- while (num_blocks--) {
- cvmx_fpa_free(ptr, pool, 0);
- ptr += block_size;
- }
- return 0;
-}
-
-/**
- * Shutdown a Memory pool and validate that it had all of
- * the buffers originally placed in it.
- *
- * @pool: Pool to shutdown
- * Returns Zero on success
- * - Positive is count of missing buffers
- * - Negative is too many buffers or corrupted pointers
- */
-uint64_t cvmx_fpa_shutdown_pool(uint64_t pool)
-{
- uint64_t errors = 0;
- uint64_t count = 0;
- uint64_t base = cvmx_ptr_to_phys(cvmx_fpa_pool_info[pool].base);
- uint64_t finish =
- base +
- cvmx_fpa_pool_info[pool].size *
- cvmx_fpa_pool_info[pool].starting_element_count;
- void *ptr;
- uint64_t address;
-
- count = 0;
- do {
- ptr = cvmx_fpa_alloc(pool);
- if (ptr)
- address = cvmx_ptr_to_phys(ptr);
- else
- address = 0;
- if (address) {
- if ((address >= base) && (address < finish) &&
- (((address -
- base) % cvmx_fpa_pool_info[pool].size) == 0)) {
- count++;
- } else {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_shutdown_pool: Illegal address 0x%llx in pool %s(%d)\n",
- (unsigned long long)address,
- cvmx_fpa_pool_info[pool].name, (int)pool);
- errors++;
- }
- }
- } while (address);
-
-#ifdef CVMX_ENABLE_PKO_FUNCTIONS
- if (pool == 0)
- cvmx_ipd_free_ptr();
-#endif
-
- if (errors) {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) started at 0x%llx, ended at 0x%llx, with a step of 0x%llx\n",
- cvmx_fpa_pool_info[pool].name, (int)pool,
- (unsigned long long)base, (unsigned long long)finish,
- (unsigned long long)cvmx_fpa_pool_info[pool].size);
- return -errors;
- } else
- return 0;
-}
-
-uint64_t cvmx_fpa_get_block_size(uint64_t pool)
-{
- switch (pool) {
- case 0:
- return CVMX_FPA_POOL_0_SIZE;
- case 1:
- return CVMX_FPA_POOL_1_SIZE;
- case 2:
- return CVMX_FPA_POOL_2_SIZE;
- case 3:
- return CVMX_FPA_POOL_3_SIZE;
- case 4:
- return CVMX_FPA_POOL_4_SIZE;
- case 5:
- return CVMX_FPA_POOL_5_SIZE;
- case 6:
- return CVMX_FPA_POOL_6_SIZE;
- case 7:
- return CVMX_FPA_POOL_7_SIZE;
- default:
- return 0;
- }
-}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-fpa.c b/arch/mips/cavium-octeon/executive/cvmx-helper-fpa.c
deleted file mode 100644
index c239e5f4ab9a..000000000000
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-fpa.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Helper functions for FPA setup.
- *
- */
-#include "executive-config.h"
-#include "cvmx-config.h"
-#include "cvmx.h"
-#include "cvmx-bootmem.h"
-#include "cvmx-fpa.h"
-#include "cvmx-helper-fpa.h"
-
-/**
- * Allocate memory for and initialize a single FPA pool.
- *
- * @pool: Pool to initialize
- * @buffer_size: Size of buffers to allocate in bytes
- * @buffers: Number of buffers to put in the pool. Zero is allowed
- * @name: String name of the pool for debugging purposes
- * Returns Zero on success, non-zero on failure
- */
-static int __cvmx_helper_initialize_fpa_pool(int pool, uint64_t buffer_size,
- uint64_t buffers, const char *name)
-{
- uint64_t current_num;
- void *memory;
- uint64_t align = CVMX_CACHE_LINE_SIZE;
-
- /*
- * Align the allocation so that power of 2 size buffers are
- * naturally aligned.
- */
- while (align < buffer_size)
- align = align << 1;
-
- if (buffers == 0)
- return 0;
-
- current_num = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(pool));
- if (current_num) {
- cvmx_dprintf("Fpa pool %d(%s) already has %llu buffers. "
- "Skipping setup.\n",
- pool, name, (unsigned long long)current_num);
- return 0;
- }
-
- memory = cvmx_bootmem_alloc(buffer_size * buffers, align);
- if (memory == NULL) {
- cvmx_dprintf("Out of memory initializing fpa pool %d(%s).\n",
- pool, name);
- return -1;
- }
- cvmx_fpa_setup_pool(pool, name, memory, buffer_size, buffers);
- return 0;
-}
-
-/**
- * Allocate memory and initialize the FPA pools using memory
- * from cvmx-bootmem. Specifying zero for the number of
- * buffers will cause that FPA pool to not be setup. This is
- * useful if you aren't using some of the hardware and want
- * to save memory. Use cvmx_helper_initialize_fpa instead of
- * this function directly.
- *
- * @pip_pool: Should always be CVMX_FPA_PACKET_POOL
- * @pip_size: Should always be CVMX_FPA_PACKET_POOL_SIZE
- * @pip_buffers:
- * Number of packet buffers.
- * @wqe_pool: Should always be CVMX_FPA_WQE_POOL
- * @wqe_size: Should always be CVMX_FPA_WQE_POOL_SIZE
- * @wqe_entries:
- * Number of work queue entries
- * @pko_pool: Should always be CVMX_FPA_OUTPUT_BUFFER_POOL
- * @pko_size: Should always be CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
- * @pko_buffers:
- * PKO Command buffers. You should at minimum have two per
- * each PKO queue.
- * @tim_pool: Should always be CVMX_FPA_TIMER_POOL
- * @tim_size: Should always be CVMX_FPA_TIMER_POOL_SIZE
- * @tim_buffers:
- * TIM ring buffer command queues. At least two per timer bucket
- * is recommened.
- * @dfa_pool: Should always be CVMX_FPA_DFA_POOL
- * @dfa_size: Should always be CVMX_FPA_DFA_POOL_SIZE
- * @dfa_buffers:
- * DFA command buffer. A relatively small (32 for example)
- * number should work.
- * Returns Zero on success, non-zero if out of memory
- */
-static int __cvmx_helper_initialize_fpa(int pip_pool, int pip_size,
- int pip_buffers, int wqe_pool,
- int wqe_size, int wqe_entries,
- int pko_pool, int pko_size,
- int pko_buffers, int tim_pool,
- int tim_size, int tim_buffers,
- int dfa_pool, int dfa_size,
- int dfa_buffers)
-{
- int status;
-
- cvmx_fpa_enable();
-
- if ((pip_buffers > 0) && (pip_buffers <= 64))
- cvmx_dprintf
- ("Warning: %d packet buffers may not be enough for hardware"
- " prefetch. 65 or more is recommended.\n", pip_buffers);
-
- if (pip_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(pip_pool, pip_size,
- pip_buffers,
- "Packet Buffers");
- if (status)
- return status;
- }
-
- if (wqe_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(wqe_pool, wqe_size,
- wqe_entries,
- "Work Queue Entries");
- if (status)
- return status;
- }
-
- if (pko_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(pko_pool, pko_size,
- pko_buffers,
- "PKO Command Buffers");
- if (status)
- return status;
- }
-
- if (tim_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(tim_pool, tim_size,
- tim_buffers,
- "TIM Command Buffers");
- if (status)
- return status;
- }
-
- if (dfa_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(dfa_pool, dfa_size,
- dfa_buffers,
- "DFA Command Buffers");
- if (status)
- return status;
- }
-
- return 0;
-}
-
-/**
- * Allocate memory and initialize the FPA pools using memory
- * from cvmx-bootmem. Sizes of each element in the pools is
- * controlled by the cvmx-config.h header file. Specifying
- * zero for any parameter will cause that FPA pool to not be
- * setup. This is useful if you aren't using some of the
- * hardware and want to save memory.
- *
- * @packet_buffers:
- * Number of packet buffers to allocate
- * @work_queue_entries:
- * Number of work queue entries
- * @pko_buffers:
- * PKO Command buffers. You should at minimum have two per
- * each PKO queue.
- * @tim_buffers:
- * TIM ring buffer command queues. At least two per timer bucket
- * is recommened.
- * @dfa_buffers:
- * DFA command buffer. A relatively small (32 for example)
- * number should work.
- * Returns Zero on success, non-zero if out of memory
- */
-int cvmx_helper_initialize_fpa(int packet_buffers, int work_queue_entries,
- int pko_buffers, int tim_buffers,
- int dfa_buffers)
-{
-#ifndef CVMX_FPA_PACKET_POOL
-#define CVMX_FPA_PACKET_POOL -1
-#define CVMX_FPA_PACKET_POOL_SIZE 0
-#endif
-#ifndef CVMX_FPA_WQE_POOL
-#define CVMX_FPA_WQE_POOL -1
-#define CVMX_FPA_WQE_POOL_SIZE 0
-#endif
-#ifndef CVMX_FPA_OUTPUT_BUFFER_POOL
-#define CVMX_FPA_OUTPUT_BUFFER_POOL -1
-#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 0
-#endif
-#ifndef CVMX_FPA_TIMER_POOL
-#define CVMX_FPA_TIMER_POOL -1
-#define CVMX_FPA_TIMER_POOL_SIZE 0
-#endif
-#ifndef CVMX_FPA_DFA_POOL
-#define CVMX_FPA_DFA_POOL -1
-#define CVMX_FPA_DFA_POOL_SIZE 0
-#endif
- return __cvmx_helper_initialize_fpa(CVMX_FPA_PACKET_POOL,
- CVMX_FPA_PACKET_POOL_SIZE,
- packet_buffers, CVMX_FPA_WQE_POOL,
- CVMX_FPA_WQE_POOL_SIZE,
- work_queue_entries,
- CVMX_FPA_OUTPUT_BUFFER_POOL,
- CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE,
- pko_buffers, CVMX_FPA_TIMER_POOL,
- CVMX_FPA_TIMER_POOL_SIZE,
- tim_buffers, CVMX_FPA_DFA_POOL,
- CVMX_FPA_DFA_POOL_SIZE,
- dfa_buffers);
-}
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index ffd4ae660f79..274cd4fad30c 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -3,14 +3,17 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2008, 2009, 2010, 2011 Cavium Networks
+ * Copyright (C) 2004-2012 Cavium, Inc.
*/
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/bitops.h>
#include <linux/percpu.h>
+#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/smp.h>
+#include <linux/of.h>
#include <asm/octeon/octeon.h>
@@ -42,9 +45,9 @@ struct octeon_core_chip_data {
static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
-static void __init octeon_irq_set_ciu_mapping(int irq, int line, int bit,
- struct irq_chip *chip,
- irq_flow_handler_t handler)
+static void octeon_irq_set_ciu_mapping(int irq, int line, int bit,
+ struct irq_chip *chip,
+ irq_flow_handler_t handler)
{
union octeon_ciu_chip_data cd;
@@ -58,6 +61,12 @@ static void __init octeon_irq_set_ciu_mapping(int irq, int line, int bit,
octeon_irq_ciu_to_irq[line][bit] = irq;
}
+static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
+ int irq, int line, int bit)
+{
+ irq_domain_associate(domain, irq, line << 6 | bit);
+}
+
static int octeon_coreid_for_cpu(int cpu)
{
#ifdef CONFIG_SMP
@@ -180,19 +189,9 @@ static void __init octeon_irq_init_core(void)
mutex_init(&cd->core_irq_mutex);
irq = OCTEON_IRQ_SW0 + i;
- switch (irq) {
- case OCTEON_IRQ_TIMER:
- case OCTEON_IRQ_SW0:
- case OCTEON_IRQ_SW1:
- case OCTEON_IRQ_5:
- case OCTEON_IRQ_PERF:
- irq_set_chip_data(irq, cd);
- irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
- handle_percpu_irq);
- break;
- default:
- break;
- }
+ irq_set_chip_data(irq, cd);
+ irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
+ handle_percpu_irq);
}
}
@@ -505,6 +504,85 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
}
}
+static void octeon_irq_gpio_setup(struct irq_data *data)
+{
+ union cvmx_gpio_bit_cfgx cfg;
+ union octeon_ciu_chip_data cd;
+ u32 t = irqd_get_trigger_type(data);
+
+ cd.p = irq_data_get_irq_chip_data(data);
+
+ cfg.u64 = 0;
+ cfg.s.int_en = 1;
+ cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
+ cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
+
+ /* 140 nS glitch filter*/
+ cfg.s.fil_cnt = 7;
+ cfg.s.fil_sel = 3;
+
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.bit - 16), cfg.u64);
+}
+
+static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
+{
+ octeon_irq_gpio_setup(data);
+ octeon_irq_ciu_enable_v2(data);
+}
+
+static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
+{
+ octeon_irq_gpio_setup(data);
+ octeon_irq_ciu_enable(data);
+}
+
+static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
+{
+ irqd_set_trigger_type(data, t);
+ octeon_irq_gpio_setup(data);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
+{
+ union octeon_ciu_chip_data cd;
+
+ cd.p = irq_data_get_irq_chip_data(data);
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.bit - 16), 0);
+
+ octeon_irq_ciu_disable_all_v2(data);
+}
+
+static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
+{
+ union octeon_ciu_chip_data cd;
+
+ cd.p = irq_data_get_irq_chip_data(data);
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.bit - 16), 0);
+
+ octeon_irq_ciu_disable_all(data);
+}
+
+static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
+{
+ union octeon_ciu_chip_data cd;
+ u64 mask;
+
+ cd.p = irq_data_get_irq_chip_data(data);
+ mask = 1ull << (cd.s.bit - 16);
+
+ cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
+}
+
+static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc)
+{
+ if (irqd_get_trigger_type(irq_desc_get_irq_data(desc)) & IRQ_TYPE_EDGE_BOTH)
+ handle_edge_irq(irq, desc);
+ else
+ handle_level_irq(irq, desc);
+}
+
#ifdef CONFIG_SMP
static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
@@ -650,18 +728,6 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = {
.name = "CIU",
.irq_enable = octeon_irq_ciu_enable_v2,
.irq_disable = octeon_irq_ciu_disable_all_v2,
- .irq_mask = octeon_irq_ciu_disable_local_v2,
- .irq_unmask = octeon_irq_ciu_enable_v2,
-#ifdef CONFIG_SMP
- .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
- .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
-#endif
-};
-
-static struct irq_chip octeon_irq_chip_ciu_edge_v2 = {
- .name = "CIU-E",
- .irq_enable = octeon_irq_ciu_enable_v2,
- .irq_disable = octeon_irq_ciu_disable_all_v2,
.irq_ack = octeon_irq_ciu_ack,
.irq_mask = octeon_irq_ciu_disable_local_v2,
.irq_unmask = octeon_irq_ciu_enable_v2,
@@ -675,19 +741,8 @@ static struct irq_chip octeon_irq_chip_ciu = {
.name = "CIU",
.irq_enable = octeon_irq_ciu_enable,
.irq_disable = octeon_irq_ciu_disable_all,
- .irq_mask = octeon_irq_dummy_mask,
-#ifdef CONFIG_SMP
- .irq_set_affinity = octeon_irq_ciu_set_affinity,
- .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
-#endif
-};
-
-static struct irq_chip octeon_irq_chip_ciu_edge = {
- .name = "CIU-E",
- .irq_enable = octeon_irq_ciu_enable,
- .irq_disable = octeon_irq_ciu_disable_all,
- .irq_mask = octeon_irq_dummy_mask,
.irq_ack = octeon_irq_ciu_ack,
+ .irq_mask = octeon_irq_dummy_mask,
#ifdef CONFIG_SMP
.irq_set_affinity = octeon_irq_ciu_set_affinity,
.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
@@ -717,6 +772,33 @@ static struct irq_chip octeon_irq_chip_ciu_mbox = {
.flags = IRQCHIP_ONOFFLINE_ENABLED,
};
+static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
+ .name = "CIU-GPIO",
+ .irq_enable = octeon_irq_ciu_enable_gpio_v2,
+ .irq_disable = octeon_irq_ciu_disable_gpio_v2,
+ .irq_ack = octeon_irq_ciu_gpio_ack,
+ .irq_mask = octeon_irq_ciu_disable_local_v2,
+ .irq_unmask = octeon_irq_ciu_enable_v2,
+ .irq_set_type = octeon_irq_ciu_gpio_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static struct irq_chip octeon_irq_chip_ciu_gpio = {
+ .name = "CIU-GPIO",
+ .irq_enable = octeon_irq_ciu_enable_gpio,
+ .irq_disable = octeon_irq_ciu_disable_gpio,
+ .irq_mask = octeon_irq_dummy_mask,
+ .irq_ack = octeon_irq_ciu_gpio_ack,
+ .irq_set_type = octeon_irq_ciu_gpio_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu_set_affinity,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
/*
* Watchdog interrupts are special. They are associated with a single
* core, so we hardwire the affinity to that core.
@@ -764,6 +846,178 @@ static struct irq_chip octeon_irq_chip_ciu_wd = {
.irq_mask = octeon_irq_dummy_mask,
};
+static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
+{
+ bool edge = false;
+
+ if (line == 0)
+ switch (bit) {
+ case 48 ... 49: /* GMX DRP */
+ case 50: /* IPD_DRP */
+ case 52 ... 55: /* Timers */
+ case 58: /* MPI */
+ edge = true;
+ break;
+ default:
+ break;
+ }
+ else /* line == 1 */
+ switch (bit) {
+ case 47: /* PTP */
+ edge = true;
+ break;
+ default:
+ break;
+ }
+ return edge;
+}
+
+struct octeon_irq_gpio_domain_data {
+ unsigned int base_hwirq;
+};
+
+static int octeon_irq_gpio_xlat(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ unsigned int type;
+ unsigned int pin;
+ unsigned int trigger;
+
+ if (d->of_node != node)
+ return -EINVAL;
+
+ if (intsize < 2)
+ return -EINVAL;
+
+ pin = intspec[0];
+ if (pin >= 16)
+ return -EINVAL;
+
+ trigger = intspec[1];
+
+ switch (trigger) {
+ case 1:
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case 2:
+ type = IRQ_TYPE_EDGE_FALLING;
+ break;
+ case 4:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case 8:
+ type = IRQ_TYPE_LEVEL_LOW;
+ break;
+ default:
+ pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
+ node->name,
+ trigger);
+ type = IRQ_TYPE_LEVEL_LOW;
+ break;
+ }
+ *out_type = type;
+ *out_hwirq = pin;
+
+ return 0;
+}
+
+static int octeon_irq_ciu_xlat(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ unsigned int ciu, bit;
+
+ ciu = intspec[0];
+ bit = intspec[1];
+
+ if (ciu > 1 || bit > 63)
+ return -EINVAL;
+
+ /* These are the GPIO lines */
+ if (ciu == 0 && bit >= 16 && bit < 32)
+ return -EINVAL;
+
+ *out_hwirq = (ciu << 6) | bit;
+ *out_type = 0;
+
+ return 0;
+}
+
+static struct irq_chip *octeon_irq_ciu_chip;
+static struct irq_chip *octeon_irq_gpio_chip;
+
+static bool octeon_irq_virq_in_range(unsigned int virq)
+{
+ /* We cannot let it overflow the mapping array. */
+ if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0])))
+ return true;
+
+ WARN_ONCE(true, "virq out of range %u.\n", virq);
+ return false;
+}
+
+static int octeon_irq_ciu_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ unsigned int line = hw >> 6;
+ unsigned int bit = hw & 63;
+
+ if (!octeon_irq_virq_in_range(virq))
+ return -EINVAL;
+
+ if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
+ return -EINVAL;
+
+ if (octeon_irq_ciu_is_edge(line, bit))
+ octeon_irq_set_ciu_mapping(virq, line, bit,
+ octeon_irq_ciu_chip,
+ handle_edge_irq);
+ else
+ octeon_irq_set_ciu_mapping(virq, line, bit,
+ octeon_irq_ciu_chip,
+ handle_level_irq);
+
+ return 0;
+}
+
+static int octeon_irq_gpio_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
+ unsigned int line, bit;
+
+ if (!octeon_irq_virq_in_range(virq))
+ return -EINVAL;
+
+ hw += gpiod->base_hwirq;
+ line = hw >> 6;
+ bit = hw & 63;
+ if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
+ return -EINVAL;
+
+ octeon_irq_set_ciu_mapping(virq, line, bit,
+ octeon_irq_gpio_chip,
+ octeon_irq_handle_gpio);
+ return 0;
+}
+
+static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
+ .map = octeon_irq_ciu_map,
+ .xlate = octeon_irq_ciu_xlat,
+};
+
+static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
+ .map = octeon_irq_gpio_map,
+ .xlate = octeon_irq_gpio_xlat,
+};
+
static void octeon_irq_ip2_v1(void)
{
const unsigned long core_id = cvmx_get_core_num();
@@ -887,9 +1141,11 @@ static void __init octeon_irq_init_ciu(void)
{
unsigned int i;
struct irq_chip *chip;
- struct irq_chip *chip_edge;
struct irq_chip *chip_mbox;
struct irq_chip *chip_wd;
+ struct device_node *gpio_node;
+ struct device_node *ciu_node;
+ struct irq_domain *ciu_domain = NULL;
octeon_irq_init_ciu_percpu();
octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
@@ -901,99 +1157,69 @@ static void __init octeon_irq_init_ciu(void)
octeon_irq_ip2 = octeon_irq_ip2_v2;
octeon_irq_ip3 = octeon_irq_ip3_v2;
chip = &octeon_irq_chip_ciu_v2;
- chip_edge = &octeon_irq_chip_ciu_edge_v2;
chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
chip_wd = &octeon_irq_chip_ciu_wd_v2;
+ octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
} else {
octeon_irq_ip2 = octeon_irq_ip2_v1;
octeon_irq_ip3 = octeon_irq_ip3_v1;
chip = &octeon_irq_chip_ciu;
- chip_edge = &octeon_irq_chip_ciu_edge;
chip_mbox = &octeon_irq_chip_ciu_mbox;
chip_wd = &octeon_irq_chip_ciu_wd;
+ octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
}
+ octeon_irq_ciu_chip = chip;
octeon_irq_ip4 = octeon_irq_ip4_mask;
/* Mips internal */
octeon_irq_init_core();
+ gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
+ if (gpio_node) {
+ struct octeon_irq_gpio_domain_data *gpiod;
+
+ gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
+ if (gpiod) {
+ /* gpio domain host_data is the base hwirq number. */
+ gpiod->base_hwirq = 16;
+ irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
+ of_node_put(gpio_node);
+ } else
+ pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
+ } else
+ pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
+
+ ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
+ if (ciu_node) {
+ ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
+ of_node_put(ciu_node);
+ } else
+ panic("Cannot find device node for cavium,octeon-3860-ciu.");
+
/* CIU_0 */
for (i = 0; i < 16; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq);
- for (i = 0; i < 16; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GPIO0, 0, i + 16, chip, handle_level_irq);
+ octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART0, 0, 34, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART1, 0, 35, chip, handle_level_irq);
-
for (i = 0; i < 4; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq);
+ octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
for (i = 0; i < 4; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI, 0, 45, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_TRACE0, 0, 47, chip, handle_level_irq);
-
- for (i = 0; i < 2; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GMX_DRP0, 0, i + 48, chip_edge, handle_edge_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD_DRP, 0, 50, chip_edge, handle_edge_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY_ZERO, 0, 51, chip_edge, handle_edge_irq);
+ octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+ octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
for (i = 0; i < 4; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip_edge, handle_edge_irq);
+ octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_PCM, 0, 57, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_MPI, 0, 58, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI2, 0, 59, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_POWIQ, 0, 60, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPDPPTHR, 0, 61, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII0, 0, 62, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq);
+ octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
+ octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63);
/* CIU_1 */
for (i = 0; i < 16; i++)
octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART2, 1, 16, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII1, 1, 18, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_NAND, 1, 19, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_MIO, 1, 20, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_IOB, 1, 21, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_FPA, 1, 22, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_POW, 1, 23, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_L2C, 1, 24, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD, 1, 25, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_PIP, 1, 26, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_PKO, 1, 27, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_ZIP, 1, 28, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_TIM, 1, 29, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_RAD, 1, 30, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY, 1, 31, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFA, 1, 32, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_USBCTL, 1, 33, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_SLI, 1, 34, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_DPI, 1, 35, chip, handle_level_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGX0, 1, 36, chip, handle_level_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGL, 1, 46, chip, handle_level_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_PTP, 1, 47, chip_edge, handle_edge_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM0, 1, 48, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM1, 1, 49, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO0, 1, 50, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO1, 1, 51, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_LMC0, 1, 52, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFM, 1, 56, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_RST, 1, 63, chip, handle_level_irq);
+ octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
/* Enable the CIU lines */
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index 88e0cddca205..db478dbb9c7b 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -164,6 +164,14 @@
.set noat
/*
+ * t7 is used as a flag to note inatomic mode.
+ */
+LEAF(__copy_user_inatomic)
+ b __copy_user_common
+ li t7, 1
+ END(__copy_user_inatomic)
+
+/*
* A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of
* the number of uncopied bytes.
@@ -174,6 +182,8 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */
move v0, dst /* return value */
__memcpy:
FEXPORT(__copy_user)
+ li t7, 0 /* not inatomic */
+__copy_user_common:
/*
* Note: dst & src may be unaligned, len may be 0
* Temps
@@ -412,7 +422,6 @@ l_exc_copy:
* Assumes src < THREAD_BUADDR($28)
*/
LOAD t0, TI_TASK($28)
- nop
LOAD t0, THREAD_BUADDR(t0)
1:
EXC( lb t1, 0(src), l_exc)
@@ -422,10 +431,9 @@ EXC( lb t1, 0(src), l_exc)
ADD dst, dst, 1
l_exc:
LOAD t0, TI_TASK($28)
- nop
LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
- nop
SUB len, AT, t0 # len number of uncopied bytes
+ bnez t7, 2f /* Skip the zeroing out part if inatomic */
/*
* Here's where we rely on src and dst being incremented in tandem,
* See (3) above.
@@ -443,7 +451,7 @@ l_exc:
ADD dst, dst, 1
bnez src, 1b
SUB src, src, 1
- jr ra
+2: jr ra
nop
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index cd61d7281d91..0938df10a71c 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2010 Cavium Networks
+ * Copyright (C) 2004-2011 Cavium Networks
* Copyright (C) 2008 Wind River Systems
*/
@@ -13,10 +13,16 @@
#include <linux/usb.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-rnm-defs.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-board.h>
static struct octeon_cf_data octeon_cf_data;
@@ -162,182 +168,6 @@ out:
}
device_initcall(octeon_rng_device_init);
-static struct i2c_board_info __initdata octeon_i2c_devices[] = {
- {
- I2C_BOARD_INFO("ds1337", 0x68),
- },
-};
-
-static int __init octeon_i2c_devices_init(void)
-{
- return i2c_register_board_info(0, octeon_i2c_devices,
- ARRAY_SIZE(octeon_i2c_devices));
-}
-arch_initcall(octeon_i2c_devices_init);
-
-#define OCTEON_I2C_IO_BASE 0x1180000001000ull
-#define OCTEON_I2C_IO_UNIT_OFFSET 0x200
-
-static struct octeon_i2c_data octeon_i2c_data[2];
-
-static int __init octeon_i2c_device_init(void)
-{
- struct platform_device *pd;
- int ret = 0;
- int port, num_ports;
-
- struct resource i2c_resources[] = {
- {
- .flags = IORESOURCE_MEM,
- }, {
- .flags = IORESOURCE_IRQ,
- }
- };
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
- num_ports = 2;
- else
- num_ports = 1;
-
- for (port = 0; port < num_ports; port++) {
- octeon_i2c_data[port].sys_freq = octeon_get_io_clock_rate();
- /*FIXME: should be examined. At the moment is set for 100Khz */
- octeon_i2c_data[port].i2c_freq = 100000;
-
- pd = platform_device_alloc("i2c-octeon", port);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
-
- pd->dev.platform_data = octeon_i2c_data + port;
-
- i2c_resources[0].start =
- OCTEON_I2C_IO_BASE + (port * OCTEON_I2C_IO_UNIT_OFFSET);
- i2c_resources[0].end = i2c_resources[0].start + 0x1f;
- switch (port) {
- case 0:
- i2c_resources[1].start = OCTEON_IRQ_TWSI;
- i2c_resources[1].end = OCTEON_IRQ_TWSI;
- break;
- case 1:
- i2c_resources[1].start = OCTEON_IRQ_TWSI2;
- i2c_resources[1].end = OCTEON_IRQ_TWSI2;
- break;
- default:
- BUG();
- }
-
- ret = platform_device_add_resources(pd,
- i2c_resources,
- ARRAY_SIZE(i2c_resources));
- if (ret)
- goto fail;
-
- ret = platform_device_add(pd);
- if (ret)
- goto fail;
- }
- return ret;
-fail:
- platform_device_put(pd);
-out:
- return ret;
-}
-device_initcall(octeon_i2c_device_init);
-
-/* Octeon SMI/MDIO interface. */
-static int __init octeon_mdiobus_device_init(void)
-{
- struct platform_device *pd;
- int ret = 0;
-
- if (octeon_is_simulation())
- return 0; /* No mdio in the simulator. */
-
- /* The bus number is the platform_device id. */
- pd = platform_device_alloc("mdio-octeon", 0);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = platform_device_add(pd);
- if (ret)
- goto fail;
-
- return ret;
-fail:
- platform_device_put(pd);
-
-out:
- return ret;
-
-}
-device_initcall(octeon_mdiobus_device_init);
-
-/* Octeon mgmt port Ethernet interface. */
-static int __init octeon_mgmt_device_init(void)
-{
- struct platform_device *pd;
- int ret = 0;
- int port, num_ports;
-
- struct resource mgmt_port_resource = {
- .flags = IORESOURCE_IRQ,
- .start = -1,
- .end = -1
- };
-
- if (!OCTEON_IS_MODEL(OCTEON_CN56XX) && !OCTEON_IS_MODEL(OCTEON_CN52XX))
- return 0;
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX))
- num_ports = 1;
- else
- num_ports = 2;
-
- for (port = 0; port < num_ports; port++) {
- pd = platform_device_alloc("octeon_mgmt", port);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
- /* No DMA restrictions */
- pd->dev.coherent_dma_mask = DMA_BIT_MASK(64);
- pd->dev.dma_mask = &pd->dev.coherent_dma_mask;
-
- switch (port) {
- case 0:
- mgmt_port_resource.start = OCTEON_IRQ_MII0;
- break;
- case 1:
- mgmt_port_resource.start = OCTEON_IRQ_MII1;
- break;
- default:
- BUG();
- }
- mgmt_port_resource.end = mgmt_port_resource.start;
-
- ret = platform_device_add_resources(pd, &mgmt_port_resource, 1);
-
- if (ret)
- goto fail;
-
- ret = platform_device_add(pd);
- if (ret)
- goto fail;
- }
- return ret;
-fail:
- platform_device_put(pd);
-
-out:
- return ret;
-
-}
-device_initcall(octeon_mgmt_device_init);
-
#ifdef CONFIG_USB
static int __init octeon_ehci_device_init(void)
@@ -440,6 +270,521 @@ device_initcall(octeon_ohci_device_init);
#endif /* CONFIG_USB */
+static struct of_device_id __initdata octeon_ids[] = {
+ { .compatible = "simple-bus", },
+ { .compatible = "cavium,octeon-6335-uctl", },
+ { .compatible = "cavium,octeon-3860-bootbus", },
+ { .compatible = "cavium,mdio-mux", },
+ { .compatible = "gpio-leds", },
+ {},
+};
+
+static bool __init octeon_has_88e1145(void)
+{
+ return !OCTEON_IS_MODEL(OCTEON_CN52XX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN6XXX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN56XX);
+}
+
+static void __init octeon_fdt_set_phy(int eth, int phy_addr)
+{
+ const __be32 *phy_handle;
+ const __be32 *alt_phy_handle;
+ const __be32 *reg;
+ u32 phandle;
+ int phy;
+ int alt_phy;
+ const char *p;
+ int current_len;
+ char new_name[20];
+
+ phy_handle = fdt_getprop(initial_boot_params, eth, "phy-handle", NULL);
+ if (!phy_handle)
+ return;
+
+ phandle = be32_to_cpup(phy_handle);
+ phy = fdt_node_offset_by_phandle(initial_boot_params, phandle);
+
+ alt_phy_handle = fdt_getprop(initial_boot_params, eth, "cavium,alt-phy-handle", NULL);
+ if (alt_phy_handle) {
+ u32 alt_phandle = be32_to_cpup(alt_phy_handle);
+ alt_phy = fdt_node_offset_by_phandle(initial_boot_params, alt_phandle);
+ } else {
+ alt_phy = -1;
+ }
+
+ if (phy_addr < 0 || phy < 0) {
+ /* Delete the PHY things */
+ fdt_nop_property(initial_boot_params, eth, "phy-handle");
+ /* This one may fail */
+ fdt_nop_property(initial_boot_params, eth, "cavium,alt-phy-handle");
+ if (phy >= 0)
+ fdt_nop_node(initial_boot_params, phy);
+ if (alt_phy >= 0)
+ fdt_nop_node(initial_boot_params, alt_phy);
+ return;
+ }
+
+ if (phy_addr >= 256 && alt_phy > 0) {
+ const struct fdt_property *phy_prop;
+ struct fdt_property *alt_prop;
+ u32 phy_handle_name;
+
+ /* Use the alt phy node instead.*/
+ phy_prop = fdt_get_property(initial_boot_params, eth, "phy-handle", NULL);
+ phy_handle_name = phy_prop->nameoff;
+ fdt_nop_node(initial_boot_params, phy);
+ fdt_nop_property(initial_boot_params, eth, "phy-handle");
+ alt_prop = fdt_get_property_w(initial_boot_params, eth, "cavium,alt-phy-handle", NULL);
+ alt_prop->nameoff = phy_handle_name;
+ phy = alt_phy;
+ }
+
+ phy_addr &= 0xff;
+
+ if (octeon_has_88e1145()) {
+ fdt_nop_property(initial_boot_params, phy, "marvell,reg-init");
+ memset(new_name, 0, sizeof(new_name));
+ strcpy(new_name, "marvell,88e1145");
+ p = fdt_getprop(initial_boot_params, phy, "compatible",
+ &current_len);
+ if (p && current_len >= strlen(new_name))
+ fdt_setprop_inplace(initial_boot_params, phy,
+ "compatible", new_name, current_len);
+ }
+
+ reg = fdt_getprop(initial_boot_params, phy, "reg", NULL);
+ if (phy_addr == be32_to_cpup(reg))
+ return;
+
+ fdt_setprop_inplace_cell(initial_boot_params, phy, "reg", phy_addr);
+
+ snprintf(new_name, sizeof(new_name), "ethernet-phy@%x", phy_addr);
+
+ p = fdt_get_name(initial_boot_params, phy, &current_len);
+ if (p && current_len == strlen(new_name))
+ fdt_set_name(initial_boot_params, phy, new_name);
+ else
+ pr_err("Error: could not rename ethernet phy: <%s>", p);
+}
+
+static void __init octeon_fdt_set_mac_addr(int n, u64 *pmac)
+{
+ u8 new_mac[6];
+ u64 mac = *pmac;
+ int r;
+
+ new_mac[0] = (mac >> 40) & 0xff;
+ new_mac[1] = (mac >> 32) & 0xff;
+ new_mac[2] = (mac >> 24) & 0xff;
+ new_mac[3] = (mac >> 16) & 0xff;
+ new_mac[4] = (mac >> 8) & 0xff;
+ new_mac[5] = mac & 0xff;
+
+ r = fdt_setprop_inplace(initial_boot_params, n, "local-mac-address",
+ new_mac, sizeof(new_mac));
+
+ if (r) {
+ pr_err("Setting \"local-mac-address\" failed %d", r);
+ return;
+ }
+ *pmac = mac + 1;
+}
+
+static void __init octeon_fdt_rm_ethernet(int node)
+{
+ const __be32 *phy_handle;
+
+ phy_handle = fdt_getprop(initial_boot_params, node, "phy-handle", NULL);
+ if (phy_handle) {
+ u32 ph = be32_to_cpup(phy_handle);
+ int p = fdt_node_offset_by_phandle(initial_boot_params, ph);
+ if (p >= 0)
+ fdt_nop_node(initial_boot_params, p);
+ }
+ fdt_nop_node(initial_boot_params, node);
+}
+
+static void __init octeon_fdt_pip_port(int iface, int i, int p, int max, u64 *pmac)
+{
+ char name_buffer[20];
+ int eth;
+ int phy_addr;
+ int ipd_port;
+
+ snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", p);
+ eth = fdt_subnode_offset(initial_boot_params, iface, name_buffer);
+ if (eth < 0)
+ return;
+ if (p > max) {
+ pr_debug("Deleting port %x:%x\n", i, p);
+ octeon_fdt_rm_ethernet(eth);
+ return;
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ ipd_port = (0x100 * i) + (0x10 * p) + 0x800;
+ else
+ ipd_port = 16 * i + p;
+
+ phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
+ octeon_fdt_set_phy(eth, phy_addr);
+ octeon_fdt_set_mac_addr(eth, pmac);
+}
+
+static void __init octeon_fdt_pip_iface(int pip, int idx, u64 *pmac)
+{
+ char name_buffer[20];
+ int iface;
+ int p;
+ int count;
+
+ count = cvmx_helper_interface_enumerate(idx);
+
+ snprintf(name_buffer, sizeof(name_buffer), "interface@%d", idx);
+ iface = fdt_subnode_offset(initial_boot_params, pip, name_buffer);
+ if (iface < 0)
+ return;
+
+ for (p = 0; p < 16; p++)
+ octeon_fdt_pip_port(iface, idx, p, count - 1, pmac);
+}
+
+int __init octeon_prune_device_tree(void)
+{
+ int i, max_port, uart_mask;
+ const char *pip_path;
+ const char *alias_prop;
+ char name_buffer[20];
+ int aliases;
+ u64 mac_addr_base;
+
+ if (fdt_check_header(initial_boot_params))
+ panic("Corrupt Device Tree.");
+
+ aliases = fdt_path_offset(initial_boot_params, "/aliases");
+ if (aliases < 0) {
+ pr_err("Error: No /aliases node in device tree.");
+ return -EINVAL;
+ }
+
+
+ mac_addr_base =
+ ((octeon_bootinfo->mac_addr_base[0] & 0xffull)) << 40 |
+ ((octeon_bootinfo->mac_addr_base[1] & 0xffull)) << 32 |
+ ((octeon_bootinfo->mac_addr_base[2] & 0xffull)) << 24 |
+ ((octeon_bootinfo->mac_addr_base[3] & 0xffull)) << 16 |
+ ((octeon_bootinfo->mac_addr_base[4] & 0xffull)) << 8 |
+ (octeon_bootinfo->mac_addr_base[5] & 0xffull);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
+ max_port = 2;
+ else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))
+ max_port = 1;
+ else
+ max_port = 0;
+
+ if (octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC10E)
+ max_port = 0;
+
+ for (i = 0; i < 2; i++) {
+ int mgmt;
+ snprintf(name_buffer, sizeof(name_buffer),
+ "mix%d", i);
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ name_buffer, NULL);
+ if (alias_prop) {
+ mgmt = fdt_path_offset(initial_boot_params, alias_prop);
+ if (mgmt < 0)
+ continue;
+ if (i >= max_port) {
+ pr_debug("Deleting mix%d\n", i);
+ octeon_fdt_rm_ethernet(mgmt);
+ fdt_nop_property(initial_boot_params, aliases,
+ name_buffer);
+ } else {
+ int phy_addr = cvmx_helper_board_get_mii_address(CVMX_HELPER_BOARD_MGMT_IPD_PORT + i);
+ octeon_fdt_set_phy(mgmt, phy_addr);
+ octeon_fdt_set_mac_addr(mgmt, &mac_addr_base);
+ }
+ }
+ }
+
+ pip_path = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
+ if (pip_path) {
+ int pip = fdt_path_offset(initial_boot_params, pip_path);
+ if (pip >= 0)
+ for (i = 0; i <= 4; i++)
+ octeon_fdt_pip_iface(pip, i, &mac_addr_base);
+ }
+
+ /* I2C */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN68XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX))
+ max_port = 2;
+ else
+ max_port = 1;
+
+ for (i = 0; i < 2; i++) {
+ int i2c;
+ snprintf(name_buffer, sizeof(name_buffer),
+ "twsi%d", i);
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ name_buffer, NULL);
+
+ if (alias_prop) {
+ i2c = fdt_path_offset(initial_boot_params, alias_prop);
+ if (i2c < 0)
+ continue;
+ if (i >= max_port) {
+ pr_debug("Deleting twsi%d\n", i);
+ fdt_nop_node(initial_boot_params, i2c);
+ fdt_nop_property(initial_boot_params, aliases,
+ name_buffer);
+ }
+ }
+ }
+
+ /* SMI/MDIO */
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ max_port = 4;
+ else if (OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN63XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN56XX))
+ max_port = 2;
+ else
+ max_port = 1;
+
+ for (i = 0; i < 2; i++) {
+ int i2c;
+ snprintf(name_buffer, sizeof(name_buffer),
+ "smi%d", i);
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ name_buffer, NULL);
+
+ if (alias_prop) {
+ i2c = fdt_path_offset(initial_boot_params, alias_prop);
+ if (i2c < 0)
+ continue;
+ if (i >= max_port) {
+ pr_debug("Deleting smi%d\n", i);
+ fdt_nop_node(initial_boot_params, i2c);
+ fdt_nop_property(initial_boot_params, aliases,
+ name_buffer);
+ }
+ }
+ }
+
+ /* Serial */
+ uart_mask = 3;
+
+ /* Right now CN52XX is the only chip with a third uart */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+ uart_mask |= 4; /* uart2 */
+
+ for (i = 0; i < 3; i++) {
+ int uart;
+ snprintf(name_buffer, sizeof(name_buffer),
+ "uart%d", i);
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ name_buffer, NULL);
+
+ if (alias_prop) {
+ uart = fdt_path_offset(initial_boot_params, alias_prop);
+ if (uart_mask & (1 << i))
+ continue;
+ pr_debug("Deleting uart%d\n", i);
+ fdt_nop_node(initial_boot_params, uart);
+ fdt_nop_property(initial_boot_params, aliases,
+ name_buffer);
+ }
+ }
+
+ /* Compact Flash */
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ "cf0", NULL);
+ if (alias_prop) {
+ union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
+ unsigned long base_ptr, region_base, region_size;
+ unsigned long region1_base = 0;
+ unsigned long region1_size = 0;
+ int cs, bootbus;
+ bool is_16bit = false;
+ bool is_true_ide = false;
+ __be32 new_reg[6];
+ __be32 *ranges;
+ int len;
+
+ int cf = fdt_path_offset(initial_boot_params, alias_prop);
+ base_ptr = 0;
+ if (octeon_bootinfo->major_version == 1
+ && octeon_bootinfo->minor_version >= 1) {
+ if (octeon_bootinfo->compact_flash_common_base_addr)
+ base_ptr = octeon_bootinfo->compact_flash_common_base_addr;
+ } else {
+ base_ptr = 0x1d000800;
+ }
+
+ if (!base_ptr)
+ goto no_cf;
+
+ /* Find CS0 region. */
+ for (cs = 0; cs < 8; cs++) {
+ mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
+ region_base = mio_boot_reg_cfg.s.base << 16;
+ region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+ if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
+ && base_ptr < region_base + region_size) {
+ is_16bit = mio_boot_reg_cfg.s.width;
+ break;
+ }
+ }
+ if (cs >= 7) {
+ /* cs and cs + 1 are CS0 and CS1, both must be less than 8. */
+ goto no_cf;
+ }
+
+ if (!(base_ptr & 0xfffful)) {
+ /*
+ * Boot loader signals availability of DMA (true_ide
+ * mode) by setting low order bits of base_ptr to
+ * zero.
+ */
+
+ /* Asume that CS1 immediately follows. */
+ mio_boot_reg_cfg.u64 =
+ cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs + 1));
+ region1_base = mio_boot_reg_cfg.s.base << 16;
+ region1_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+ if (!mio_boot_reg_cfg.s.en)
+ goto no_cf;
+ is_true_ide = true;
+
+ } else {
+ fdt_nop_property(initial_boot_params, cf, "cavium,true-ide");
+ fdt_nop_property(initial_boot_params, cf, "cavium,dma-engine-handle");
+ if (!is_16bit) {
+ __be32 width = cpu_to_be32(8);
+ fdt_setprop_inplace(initial_boot_params, cf,
+ "cavium,bus-width", &width, sizeof(width));
+ }
+ }
+ new_reg[0] = cpu_to_be32(cs);
+ new_reg[1] = cpu_to_be32(0);
+ new_reg[2] = cpu_to_be32(0x10000);
+ new_reg[3] = cpu_to_be32(cs + 1);
+ new_reg[4] = cpu_to_be32(0);
+ new_reg[5] = cpu_to_be32(0x10000);
+ fdt_setprop_inplace(initial_boot_params, cf,
+ "reg", new_reg, sizeof(new_reg));
+
+ bootbus = fdt_parent_offset(initial_boot_params, cf);
+ if (bootbus < 0)
+ goto no_cf;
+ ranges = fdt_getprop_w(initial_boot_params, bootbus, "ranges", &len);
+ if (!ranges || len < (5 * 8 * sizeof(__be32)))
+ goto no_cf;
+
+ ranges[(cs * 5) + 2] = cpu_to_be32(region_base >> 32);
+ ranges[(cs * 5) + 3] = cpu_to_be32(region_base & 0xffffffff);
+ ranges[(cs * 5) + 4] = cpu_to_be32(region_size);
+ if (is_true_ide) {
+ cs++;
+ ranges[(cs * 5) + 2] = cpu_to_be32(region1_base >> 32);
+ ranges[(cs * 5) + 3] = cpu_to_be32(region1_base & 0xffffffff);
+ ranges[(cs * 5) + 4] = cpu_to_be32(region1_size);
+ }
+ goto end_cf;
+no_cf:
+ fdt_nop_node(initial_boot_params, cf);
+
+end_cf:
+ ;
+ }
+
+ /* 8 char LED */
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ "led0", NULL);
+ if (alias_prop) {
+ union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
+ unsigned long base_ptr, region_base, region_size;
+ int cs, bootbus;
+ __be32 new_reg[6];
+ __be32 *ranges;
+ int len;
+ int led = fdt_path_offset(initial_boot_params, alias_prop);
+
+ base_ptr = octeon_bootinfo->led_display_base_addr;
+ if (base_ptr == 0)
+ goto no_led;
+ /* Find CS0 region. */
+ for (cs = 0; cs < 8; cs++) {
+ mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
+ region_base = mio_boot_reg_cfg.s.base << 16;
+ region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+ if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
+ && base_ptr < region_base + region_size)
+ break;
+ }
+
+ if (cs > 7)
+ goto no_led;
+
+ new_reg[0] = cpu_to_be32(cs);
+ new_reg[1] = cpu_to_be32(0x20);
+ new_reg[2] = cpu_to_be32(0x20);
+ new_reg[3] = cpu_to_be32(cs);
+ new_reg[4] = cpu_to_be32(0);
+ new_reg[5] = cpu_to_be32(0x20);
+ fdt_setprop_inplace(initial_boot_params, led,
+ "reg", new_reg, sizeof(new_reg));
+
+ bootbus = fdt_parent_offset(initial_boot_params, led);
+ if (bootbus < 0)
+ goto no_led;
+ ranges = fdt_getprop_w(initial_boot_params, bootbus, "ranges", &len);
+ if (!ranges || len < (5 * 8 * sizeof(__be32)))
+ goto no_led;
+
+ ranges[(cs * 5) + 2] = cpu_to_be32(region_base >> 32);
+ ranges[(cs * 5) + 3] = cpu_to_be32(region_base & 0xffffffff);
+ ranges[(cs * 5) + 4] = cpu_to_be32(region_size);
+ goto end_led;
+
+no_led:
+ fdt_nop_node(initial_boot_params, led);
+end_led:
+ ;
+ }
+
+ /* OHCI/UHCI USB */
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ "uctl", NULL);
+ if (alias_prop) {
+ int uctl = fdt_path_offset(initial_boot_params, alias_prop);
+
+ if (uctl >= 0 && (!OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
+ octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC2E)) {
+ pr_debug("Deleting uctl\n");
+ fdt_nop_node(initial_boot_params, uctl);
+ fdt_nop_property(initial_boot_params, aliases, "uctl");
+ } else if (octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC10E ||
+ octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC4E) {
+ /* Missing "refclk-type" defaults to crystal. */
+ fdt_nop_property(initial_boot_params, uctl, "refclk-type");
+ }
+ }
+
+ return 0;
+}
+
+static int __init octeon_publish_devices(void)
+{
+ return of_platform_bus_probe(NULL, octeon_ids, NULL);
+}
+device_initcall(octeon_publish_devices);
+
MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Platform driver for Octeon SOC");
diff --git a/arch/mips/cavium-octeon/octeon_3xxx.dts b/arch/mips/cavium-octeon/octeon_3xxx.dts
new file mode 100644
index 000000000000..f28b2d0fde22
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon_3xxx.dts
@@ -0,0 +1,571 @@
+/dts-v1/;
+/*
+ * OCTEON 3XXX, 5XXX, 63XX device tree skeleton.
+ *
+ * This device tree is pruned and patched by early boot code before
+ * use. Because of this, it contains a super-set of the available
+ * devices and properties.
+ */
+/ {
+ compatible = "cavium,octeon-3860";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&ciu>;
+
+ soc@0 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges; /* Direct mapping */
+
+ ciu: interrupt-controller@1070000000000 {
+ compatible = "cavium,octeon-3860-ciu";
+ interrupt-controller;
+ /* Interrupts are specified by two parts:
+ * 1) Controller register (0 or 1)
+ * 2) Bit within the register (0..63)
+ */
+ #interrupt-cells = <2>;
+ reg = <0x10700 0x00000000 0x0 0x7000>;
+ };
+
+ gpio: gpio-controller@1070000000800 {
+ #gpio-cells = <2>;
+ compatible = "cavium,octeon-3860-gpio";
+ reg = <0x10700 0x00000800 0x0 0x100>;
+ gpio-controller;
+ /* Interrupts are specified by two parts:
+ * 1) GPIO pin number (0..15)
+ * 2) Triggering (1 - edge rising
+ * 2 - edge falling
+ * 4 - level active high
+ * 8 - level active low)
+ */
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ /* The GPIO pin connect to 16 consecutive CUI bits */
+ interrupts = <0 16>, <0 17>, <0 18>, <0 19>,
+ <0 20>, <0 21>, <0 22>, <0 23>,
+ <0 24>, <0 25>, <0 26>, <0 27>,
+ <0 28>, <0 29>, <0 30>, <0 31>;
+ };
+
+ smi0: mdio@1180000001800 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00001800 0x0 0x40>;
+
+ phy0: ethernet-phy@0 {
+ compatible = "marvell,88e1118";
+ marvell,reg-init =
+ /* Fix rx and tx clock transition timing */
+ <2 0x15 0xffcf 0>, /* Reg 2,21 Clear bits 4, 5 */
+ /* Adjust LED drive. */
+ <3 0x11 0 0x442a>, /* Reg 3,17 <- 0442a */
+ /* irq, blink-activity, blink-link */
+ <3 0x10 0 0x0242>; /* Reg 3,16 <- 0x0242 */
+ reg = <0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ compatible = "marvell,88e1118";
+ marvell,reg-init =
+ /* Fix rx and tx clock transition timing */
+ <2 0x15 0xffcf 0>, /* Reg 2,21 Clear bits 4, 5 */
+ /* Adjust LED drive. */
+ <3 0x11 0 0x442a>, /* Reg 3,17 <- 0442a */
+ /* irq, blink-activity, blink-link */
+ <3 0x10 0 0x0242>; /* Reg 3,16 <- 0x0242 */
+ reg = <1>;
+ };
+
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy3: ethernet-phy@3 {
+ reg = <3>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy4: ethernet-phy@4 {
+ reg = <4>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy5: ethernet-phy@5 {
+ reg = <5>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+
+ phy6: ethernet-phy@6 {
+ reg = <6>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy7: ethernet-phy@7 {
+ reg = <7>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy8: ethernet-phy@8 {
+ reg = <8>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy9: ethernet-phy@9 {
+ reg = <9>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ };
+
+ smi1: mdio@1180000001900 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00001900 0x0 0x40>;
+
+ phy100: ethernet-phy@1 {
+ reg = <1>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+ phy101: ethernet-phy@2 {
+ reg = <2>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+ phy102: ethernet-phy@3 {
+ reg = <3>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+ phy103: ethernet-phy@4 {
+ reg = <4>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ };
+ };
+
+ mix0: ethernet@1070000100000 {
+ compatible = "cavium,octeon-5750-mix";
+ reg = <0x10700 0x00100000 0x0 0x100>, /* MIX */
+ <0x11800 0xE0000000 0x0 0x300>, /* AGL */
+ <0x11800 0xE0000400 0x0 0x400>, /* AGL_SHARED */
+ <0x11800 0xE0002000 0x0 0x8>; /* AGL_PRT_CTL */
+ cell-index = <0>;
+ interrupts = <0 62>, <1 46>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy0>;
+ };
+
+ mix1: ethernet@1070000100800 {
+ compatible = "cavium,octeon-5750-mix";
+ reg = <0x10700 0x00100800 0x0 0x100>, /* MIX */
+ <0x11800 0xE0000800 0x0 0x300>, /* AGL */
+ <0x11800 0xE0000400 0x0 0x400>, /* AGL_SHARED */
+ <0x11800 0xE0002008 0x0 0x8>; /* AGL_PRT_CTL */
+ cell-index = <1>;
+ interrupts = <1 18>, < 1 46>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy1>;
+ };
+
+ pip: pip@11800a0000000 {
+ compatible = "cavium,octeon-3860-pip";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0xa0000000 0x0 0x2000>;
+
+ interface@0 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy2>;
+ cavium,alt-phy-handle = <&phy100>;
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy3>;
+ cavium,alt-phy-handle = <&phy101>;
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy4>;
+ cavium,alt-phy-handle = <&phy102>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy5>;
+ cavium,alt-phy-handle = <&phy103>;
+ };
+ ethernet@4 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x4>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@5 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x5>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@6 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x6>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@7 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x7>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@8 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x8>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@9 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x9>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@a {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0xa>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@b {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0xb>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@c {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0xc>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@d {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0xd>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@e {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0xe>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@f {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0xf>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ };
+
+ interface@1 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy6>;
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy7>;
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy8>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy9>;
+ };
+ };
+ };
+
+ twsi0: i2c@1180000001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cavium,octeon-3860-twsi";
+ reg = <0x11800 0x00001000 0x0 0x200>;
+ interrupts = <0 45>;
+ clock-frequency = <100000>;
+
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ };
+ tmp@4c {
+ compatible = "ti,tmp421";
+ reg = <0x4c>;
+ };
+ };
+
+ twsi1: i2c@1180000001200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cavium,octeon-3860-twsi";
+ reg = <0x11800 0x00001200 0x0 0x200>;
+ interrupts = <0 59>;
+ clock-frequency = <100000>;
+ };
+
+ uart0: serial@1180000000800 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000800 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <0 34>;
+ };
+
+ uart1: serial@1180000000c00 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000c00 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <0 35>;
+ };
+
+ uart2: serial@1180000000400 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000400 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <1 16>;
+ };
+
+ bootbus: bootbus@1180000000000 {
+ compatible = "cavium,octeon-3860-bootbus";
+ reg = <0x11800 0x00000000 0x0 0x200>;
+ /* The chip select number and offset */
+ #address-cells = <2>;
+ /* The size of the chip select region */
+ #size-cells = <1>;
+ ranges = <0 0 0x0 0x1f400000 0xc00000>,
+ <1 0 0x10000 0x30000000 0>,
+ <2 0 0x10000 0x40000000 0>,
+ <3 0 0x10000 0x50000000 0>,
+ <4 0 0x0 0x1d020000 0x10000>,
+ <5 0 0x0 0x1d040000 0x10000>,
+ <6 0 0x0 0x1d050000 0x10000>,
+ <7 0 0x10000 0x90000000 0>;
+
+ cavium,cs-config@0 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <0>;
+ cavium,t-adr = <20>;
+ cavium,t-ce = <60>;
+ cavium,t-oe = <60>;
+ cavium,t-we = <45>;
+ cavium,t-rd-hld = <35>;
+ cavium,t-wr-hld = <45>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <0>;
+ cavium,t-page = <35>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <8>;
+ };
+ cavium,cs-config@4 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <4>;
+ cavium,t-adr = <320>;
+ cavium,t-ce = <320>;
+ cavium,t-oe = <320>;
+ cavium,t-we = <320>;
+ cavium,t-rd-hld = <320>;
+ cavium,t-wr-hld = <320>;
+ cavium,t-pause = <320>;
+ cavium,t-wait = <320>;
+ cavium,t-page = <320>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <8>;
+ };
+ cavium,cs-config@5 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <5>;
+ cavium,t-adr = <5>;
+ cavium,t-ce = <300>;
+ cavium,t-oe = <125>;
+ cavium,t-we = <150>;
+ cavium,t-rd-hld = <100>;
+ cavium,t-wr-hld = <30>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <30>;
+ cavium,t-page = <320>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <16>;
+ };
+ cavium,cs-config@6 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <6>;
+ cavium,t-adr = <5>;
+ cavium,t-ce = <300>;
+ cavium,t-oe = <270>;
+ cavium,t-we = <150>;
+ cavium,t-rd-hld = <100>;
+ cavium,t-wr-hld = <70>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <0>;
+ cavium,t-page = <320>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,wait-mode;
+ cavium,bus-width = <16>;
+ };
+
+ flash0: nor@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0 0x800000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+
+ led0: led-display@4,0 {
+ compatible = "avago,hdsp-253x";
+ reg = <4 0x20 0x20>, <4 0 0x20>;
+ };
+
+ cf0: compact-flash@5,0 {
+ compatible = "cavium,ebt3000-compact-flash";
+ reg = <5 0 0x10000>, <6 0 0x10000>;
+ cavium,bus-width = <16>;
+ cavium,true-ide;
+ cavium,dma-engine-handle = <&dma0>;
+ };
+ };
+
+ dma0: dma-engine@1180000000100 {
+ compatible = "cavium,octeon-5750-bootbus-dma";
+ reg = <0x11800 0x00000100 0x0 0x8>;
+ interrupts = <0 63>;
+ };
+ dma1: dma-engine@1180000000108 {
+ compatible = "cavium,octeon-5750-bootbus-dma";
+ reg = <0x11800 0x00000108 0x0 0x8>;
+ interrupts = <0 63>;
+ };
+
+ uctl: uctl@118006f000000 {
+ compatible = "cavium,octeon-6335-uctl";
+ reg = <0x11800 0x6f000000 0x0 0x100>;
+ ranges; /* Direct mapping */
+ #address-cells = <2>;
+ #size-cells = <2>;
+ /* 12MHz, 24MHz and 48MHz allowed */
+ refclk-frequency = <12000000>;
+ /* Either "crystal" or "external" */
+ refclk-type = "crystal";
+
+ ehci@16f0000000000 {
+ compatible = "cavium,octeon-6335-ehci","usb-ehci";
+ reg = <0x16f00 0x00000000 0x0 0x100>;
+ interrupts = <0 56>;
+ big-endian-regs;
+ };
+ ohci@16f0000000400 {
+ compatible = "cavium,octeon-6335-ohci","usb-ohci";
+ reg = <0x16f00 0x00000400 0x0 0x100>;
+ interrupts = <0 56>;
+ big-endian-regs;
+ };
+ };
+ };
+
+ aliases {
+ mix0 = &mix0;
+ mix1 = &mix1;
+ pip = &pip;
+ smi0 = &smi0;
+ smi1 = &smi1;
+ twsi0 = &twsi0;
+ twsi1 = &twsi1;
+ uart0 = &uart0;
+ uart1 = &uart1;
+ uart2 = &uart2;
+ flash0 = &flash0;
+ cf0 = &cf0;
+ uctl = &uctl;
+ led0 = &led0;
+ };
+ };
diff --git a/arch/mips/cavium-octeon/octeon_68xx.dts b/arch/mips/cavium-octeon/octeon_68xx.dts
new file mode 100644
index 000000000000..1839468932b6
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon_68xx.dts
@@ -0,0 +1,625 @@
+/dts-v1/;
+/*
+ * OCTEON 68XX device tree skeleton.
+ *
+ * This device tree is pruned and patched by early boot code before
+ * use. Because of this, it contains a super-set of the available
+ * devices and properties.
+ */
+/ {
+ compatible = "cavium,octeon-6880";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&ciu2>;
+
+ soc@0 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges; /* Direct mapping */
+
+ ciu2: interrupt-controller@1070100000000 {
+ compatible = "cavium,octeon-6880-ciu2";
+ interrupt-controller;
+ /* Interrupts are specified by two parts:
+ * 1) Controller register (0 or 7)
+ * 2) Bit within the register (0..63)
+ */
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x10701 0x00000000 0x0 0x4000000>;
+ };
+
+ gpio: gpio-controller@1070000000800 {
+ #gpio-cells = <2>;
+ compatible = "cavium,octeon-3860-gpio";
+ reg = <0x10700 0x00000800 0x0 0x100>;
+ gpio-controller;
+ /* Interrupts are specified by two parts:
+ * 1) GPIO pin number (0..15)
+ * 2) Triggering (1 - edge rising
+ * 2 - edge falling
+ * 4 - level active high
+ * 8 - level active low)
+ */
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ /* The GPIO pins connect to 16 consecutive CUI bits */
+ interrupts = <7 0>, <7 1>, <7 2>, <7 3>,
+ <7 4>, <7 5>, <7 6>, <7 7>,
+ <7 8>, <7 9>, <7 10>, <7 11>,
+ <7 12>, <7 13>, <7 14>, <7 15>;
+ };
+
+ smi0: mdio@1180000003800 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00003800 0x0 0x40>;
+
+ phy0: ethernet-phy@6 {
+ compatible = "marvell,88e1118";
+ marvell,reg-init =
+ /* Fix rx and tx clock transition timing */
+ <2 0x15 0xffcf 0>, /* Reg 2,21 Clear bits 4, 5 */
+ /* Adjust LED drive. */
+ <3 0x11 0 0x442a>, /* Reg 3,17 <- 0442a */
+ /* irq, blink-activity, blink-link */
+ <3 0x10 0 0x0242>; /* Reg 3,16 <- 0x0242 */
+ reg = <6>;
+ };
+
+ phy1: ethernet-phy@1 {
+ cavium,qlm-trim = "4,sgmii";
+ reg = <1>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy2: ethernet-phy@2 {
+ cavium,qlm-trim = "4,sgmii";
+ reg = <2>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy3: ethernet-phy@3 {
+ cavium,qlm-trim = "4,sgmii";
+ reg = <3>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy4: ethernet-phy@4 {
+ cavium,qlm-trim = "4,sgmii";
+ reg = <4>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ };
+
+ smi1: mdio@1180000003880 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00003880 0x0 0x40>;
+
+ phy41: ethernet-phy@1 {
+ cavium,qlm-trim = "0,sgmii";
+ reg = <1>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy42: ethernet-phy@2 {
+ cavium,qlm-trim = "0,sgmii";
+ reg = <2>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy43: ethernet-phy@3 {
+ cavium,qlm-trim = "0,sgmii";
+ reg = <3>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy44: ethernet-phy@4 {
+ cavium,qlm-trim = "0,sgmii";
+ reg = <4>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ };
+
+ smi2: mdio@1180000003900 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00003900 0x0 0x40>;
+
+ phy21: ethernet-phy@1 {
+ cavium,qlm-trim = "2,sgmii";
+ reg = <1>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy22: ethernet-phy@2 {
+ cavium,qlm-trim = "2,sgmii";
+ reg = <2>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy23: ethernet-phy@3 {
+ cavium,qlm-trim = "2,sgmii";
+ reg = <3>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy24: ethernet-phy@4 {
+ cavium,qlm-trim = "2,sgmii";
+ reg = <4>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ };
+
+ smi3: mdio@1180000003980 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00003980 0x0 0x40>;
+
+ phy11: ethernet-phy@1 {
+ cavium,qlm-trim = "3,sgmii";
+ reg = <1>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy12: ethernet-phy@2 {
+ cavium,qlm-trim = "3,sgmii";
+ reg = <2>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy13: ethernet-phy@3 {
+ cavium,qlm-trim = "3,sgmii";
+ reg = <3>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ phy14: ethernet-phy@4 {
+ cavium,qlm-trim = "3,sgmii";
+ reg = <4>;
+ compatible = "marvell,88e1149r";
+ marvell,reg-init = <3 0x10 0 0x5777>,
+ <3 0x11 0 0x00aa>,
+ <3 0x12 0 0x4105>,
+ <3 0x13 0 0x0a60>;
+ };
+ };
+
+ mix0: ethernet@1070000100000 {
+ compatible = "cavium,octeon-5750-mix";
+ reg = <0x10700 0x00100000 0x0 0x100>, /* MIX */
+ <0x11800 0xE0000000 0x0 0x300>, /* AGL */
+ <0x11800 0xE0000400 0x0 0x400>, /* AGL_SHARED */
+ <0x11800 0xE0002000 0x0 0x8>; /* AGL_PRT_CTL */
+ cell-index = <0>;
+ interrupts = <6 40>, <6 32>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy0>;
+ };
+
+ pip: pip@11800a0000000 {
+ compatible = "cavium,octeon-3860-pip";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0xa0000000 0x0 0x2000>;
+
+ interface@4 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x4>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy1>;
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy2>;
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy3>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy4>;
+ };
+ };
+
+ interface@3 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x3>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy11>;
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy12>;
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy13>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy14>;
+ };
+ };
+
+ interface@2 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy21>;
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy22>;
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy23>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy24>;
+ };
+ };
+
+ interface@1 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ };
+
+ interface@0 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy41>;
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy42>;
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy43>;
+ };
+ ethernet@3 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x3>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-handle = <&phy44>;
+ };
+ };
+ };
+
+ twsi0: i2c@1180000001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cavium,octeon-3860-twsi";
+ reg = <0x11800 0x00001000 0x0 0x200>;
+ interrupts = <3 32>;
+ clock-frequency = <100000>;
+
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ };
+ tmp@4c {
+ compatible = "ti,tmp421";
+ reg = <0x4c>;
+ };
+ };
+
+ twsi1: i2c@1180000001200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cavium,octeon-3860-twsi";
+ reg = <0x11800 0x00001200 0x0 0x200>;
+ interrupts = <3 33>;
+ clock-frequency = <100000>;
+ };
+
+ uart0: serial@1180000000800 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000800 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <3 36>;
+ };
+
+ uart1: serial@1180000000c00 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000c00 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <3 37>;
+ };
+
+ bootbus: bootbus@1180000000000 {
+ compatible = "cavium,octeon-3860-bootbus";
+ reg = <0x11800 0x00000000 0x0 0x200>;
+ /* The chip select number and offset */
+ #address-cells = <2>;
+ /* The size of the chip select region */
+ #size-cells = <1>;
+ ranges = <0 0 0 0x1f400000 0xc00000>,
+ <1 0 0x10000 0x30000000 0>,
+ <2 0 0x10000 0x40000000 0>,
+ <3 0 0x10000 0x50000000 0>,
+ <4 0 0 0x1d020000 0x10000>,
+ <5 0 0 0x1d040000 0x10000>,
+ <6 0 0 0x1d050000 0x10000>,
+ <7 0 0x10000 0x90000000 0>;
+
+ cavium,cs-config@0 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <0>;
+ cavium,t-adr = <10>;
+ cavium,t-ce = <50>;
+ cavium,t-oe = <50>;
+ cavium,t-we = <35>;
+ cavium,t-rd-hld = <25>;
+ cavium,t-wr-hld = <35>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <300>;
+ cavium,t-page = <25>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <8>;
+ };
+ cavium,cs-config@4 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <4>;
+ cavium,t-adr = <320>;
+ cavium,t-ce = <320>;
+ cavium,t-oe = <320>;
+ cavium,t-we = <320>;
+ cavium,t-rd-hld = <320>;
+ cavium,t-wr-hld = <320>;
+ cavium,t-pause = <320>;
+ cavium,t-wait = <320>;
+ cavium,t-page = <320>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <8>;
+ };
+ cavium,cs-config@5 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <5>;
+ cavium,t-adr = <0>;
+ cavium,t-ce = <300>;
+ cavium,t-oe = <125>;
+ cavium,t-we = <150>;
+ cavium,t-rd-hld = <100>;
+ cavium,t-wr-hld = <300>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <300>;
+ cavium,t-page = <310>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <16>;
+ };
+ cavium,cs-config@6 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <6>;
+ cavium,t-adr = <0>;
+ cavium,t-ce = <30>;
+ cavium,t-oe = <125>;
+ cavium,t-we = <150>;
+ cavium,t-rd-hld = <100>;
+ cavium,t-wr-hld = <30>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <30>;
+ cavium,t-page = <310>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,wait-mode;
+ cavium,bus-width = <16>;
+ };
+
+ flash0: nor@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0 0x800000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "bootloader";
+ reg = <0 0x200000>;
+ read-only;
+ };
+ partition@200000 {
+ label = "kernel";
+ reg = <0x200000 0x200000>;
+ };
+ partition@400000 {
+ label = "cramfs";
+ reg = <0x400000 0x3fe000>;
+ };
+ partition@7fe000 {
+ label = "environment";
+ reg = <0x7fe000 0x2000>;
+ read-only;
+ };
+ };
+
+ led0: led-display@4,0 {
+ compatible = "avago,hdsp-253x";
+ reg = <4 0x20 0x20>, <4 0 0x20>;
+ };
+
+ compact-flash@5,0 {
+ compatible = "cavium,ebt3000-compact-flash";
+ reg = <5 0 0x10000>, <6 0 0x10000>;
+ cavium,bus-width = <16>;
+ cavium,true-ide;
+ cavium,dma-engine-handle = <&dma0>;
+ };
+ };
+
+ dma0: dma-engine@1180000000100 {
+ compatible = "cavium,octeon-5750-bootbus-dma";
+ reg = <0x11800 0x00000100 0x0 0x8>;
+ interrupts = <0 63>;
+ };
+ dma1: dma-engine@1180000000108 {
+ compatible = "cavium,octeon-5750-bootbus-dma";
+ reg = <0x11800 0x00000108 0x0 0x8>;
+ interrupts = <0 63>;
+ };
+
+ uctl: uctl@118006f000000 {
+ compatible = "cavium,octeon-6335-uctl";
+ reg = <0x11800 0x6f000000 0x0 0x100>;
+ ranges; /* Direct mapping */
+ #address-cells = <2>;
+ #size-cells = <2>;
+ /* 12MHz, 24MHz and 48MHz allowed */
+ refclk-frequency = <12000000>;
+ /* Either "crystal" or "external" */
+ refclk-type = "crystal";
+
+ ehci@16f0000000000 {
+ compatible = "cavium,octeon-6335-ehci","usb-ehci";
+ reg = <0x16f00 0x00000000 0x0 0x100>;
+ interrupts = <3 44>;
+ big-endian-regs;
+ };
+ ohci@16f0000000400 {
+ compatible = "cavium,octeon-6335-ohci","usb-ohci";
+ reg = <0x16f00 0x00000400 0x0 0x100>;
+ interrupts = <3 44>;
+ big-endian-regs;
+ };
+ };
+ };
+
+ aliases {
+ mix0 = &mix0;
+ pip = &pip;
+ smi0 = &smi0;
+ smi1 = &smi1;
+ smi2 = &smi2;
+ smi3 = &smi3;
+ twsi0 = &twsi0;
+ twsi1 = &twsi1;
+ uart0 = &uart0;
+ uart1 = &uart1;
+ uctl = &uctl;
+ led0 = &led0;
+ flash0 = &flash0;
+ };
+ };
diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c
index 057f0ae88c99..138b2216b4f8 100644
--- a/arch/mips/cavium-octeon/serial.c
+++ b/arch/mips/cavium-octeon/serial.c
@@ -43,95 +43,67 @@ void octeon_serial_out(struct uart_port *up, int offset, int value)
cvmx_write_csr((uint64_t)(up->membase + (offset << 3)), (u8)value);
}
-/*
- * Allocated in .bss, so it is all zeroed.
- */
-#define OCTEON_MAX_UARTS 3
-static struct plat_serial8250_port octeon_uart8250_data[OCTEON_MAX_UARTS + 1];
-static struct platform_device octeon_uart8250_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = octeon_uart8250_data,
- },
-};
-
-static void __init octeon_uart_set_common(struct plat_serial8250_port *p)
+static int __devinit octeon_serial_probe(struct platform_device *pdev)
{
- p->flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
- p->type = PORT_OCTEON;
- p->iotype = UPIO_MEM;
- p->regshift = 3; /* I/O addresses are every 8 bytes */
+ int irq, res;
+ struct resource *res_mem;
+ struct uart_port port;
+
+ /* All adaptors have an irq. */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ memset(&port, 0, sizeof(port));
+
+ port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
+ port.type = PORT_OCTEON;
+ port.iotype = UPIO_MEM;
+ port.regshift = 3;
+ port.dev = &pdev->dev;
+
if (octeon_is_simulation())
/* Make simulator output fast*/
- p->uartclk = 115200 * 16;
+ port.uartclk = 115200 * 16;
else
- p->uartclk = octeon_get_io_clock_rate();
- p->serial_in = octeon_serial_in;
- p->serial_out = octeon_serial_out;
-}
+ port.uartclk = octeon_get_io_clock_rate();
-static int __init octeon_serial_init(void)
-{
- int enable_uart0;
- int enable_uart1;
- int enable_uart2;
- struct plat_serial8250_port *p;
-
-#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
- /*
- * If we are configured to run as the second of two kernels,
- * disable uart0 and enable uart1. Uart0 is owned by the first
- * kernel
- */
- enable_uart0 = 0;
- enable_uart1 = 1;
-#else
- /*
- * We are configured for the first kernel. We'll enable uart0
- * if the bootloader told us to use 0, otherwise will enable
- * uart 1.
- */
- enable_uart0 = (octeon_get_boot_uart() == 0);
- enable_uart1 = (octeon_get_boot_uart() == 1);
-#ifdef CONFIG_KGDB
- enable_uart1 = 1;
-#endif
-#endif
-
- /* Right now CN52XX is the only chip with a third uart */
- enable_uart2 = OCTEON_IS_MODEL(OCTEON_CN52XX);
-
- p = octeon_uart8250_data;
- if (enable_uart0) {
- /* Add a ttyS device for hardware uart 0 */
- octeon_uart_set_common(p);
- p->membase = (void *) CVMX_MIO_UARTX_RBR(0);
- p->mapbase = CVMX_MIO_UARTX_RBR(0) & ((1ull << 49) - 1);
- p->irq = OCTEON_IRQ_UART0;
- p++;
- }
+ port.serial_in = octeon_serial_in;
+ port.serial_out = octeon_serial_out;
+ port.irq = irq;
- if (enable_uart1) {
- /* Add a ttyS device for hardware uart 1 */
- octeon_uart_set_common(p);
- p->membase = (void *) CVMX_MIO_UARTX_RBR(1);
- p->mapbase = CVMX_MIO_UARTX_RBR(1) & ((1ull << 49) - 1);
- p->irq = OCTEON_IRQ_UART1;
- p++;
- }
- if (enable_uart2) {
- /* Add a ttyS device for hardware uart 2 */
- octeon_uart_set_common(p);
- p->membase = (void *) CVMX_MIO_UART2_RBR;
- p->mapbase = CVMX_MIO_UART2_RBR & ((1ull << 49) - 1);
- p->irq = OCTEON_IRQ_UART2;
- p++;
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem == NULL) {
+ dev_err(&pdev->dev, "found no memory resource\n");
+ return -ENXIO;
}
+ port.mapbase = res_mem->start;
+ port.membase = ioremap(res_mem->start, resource_size(res_mem));
- BUG_ON(p > &octeon_uart8250_data[OCTEON_MAX_UARTS]);
+ res = serial8250_register_port(&port);
- return platform_device_register(&octeon_uart8250_device);
+ return res >= 0 ? 0 : res;
}
-device_initcall(octeon_serial_init);
+static struct of_device_id octeon_serial_match[] = {
+ {
+ .compatible = "cavium,octeon-3860-uart",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_serial_match);
+
+static struct platform_driver octeon_serial_driver = {
+ .probe = octeon_serial_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "octeon_serial",
+ .of_match_table = octeon_serial_match,
+ },
+};
+
+static int __init octeon_serial_init(void)
+{
+ return platform_driver_register(&octeon_serial_driver);
+}
+late_initcall(octeon_serial_init);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 260dc247c052..919b0fb7bb1a 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -21,6 +21,8 @@
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
#include <asm/processor.h>
#include <asm/reboot.h>
@@ -775,3 +777,46 @@ void prom_free_prom_memory(void)
}
#endif
}
+
+int octeon_prune_device_tree(void);
+
+extern const char __dtb_octeon_3xxx_begin;
+extern const char __dtb_octeon_3xxx_end;
+extern const char __dtb_octeon_68xx_begin;
+extern const char __dtb_octeon_68xx_end;
+void __init device_tree_init(void)
+{
+ int dt_size;
+ struct boot_param_header *fdt;
+ bool do_prune;
+
+ if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
+ fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
+ if (fdt_check_header(fdt))
+ panic("Corrupt Device Tree passed to kernel.");
+ dt_size = be32_to_cpu(fdt->totalsize);
+ do_prune = false;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+ fdt = (struct boot_param_header *)&__dtb_octeon_68xx_begin;
+ dt_size = &__dtb_octeon_68xx_end - &__dtb_octeon_68xx_begin;
+ do_prune = true;
+ } else {
+ fdt = (struct boot_param_header *)&__dtb_octeon_3xxx_begin;
+ dt_size = &__dtb_octeon_3xxx_end - &__dtb_octeon_3xxx_begin;
+ do_prune = true;
+ }
+
+ /* Copy the default tree from init memory. */
+ initial_boot_params = early_init_dt_alloc_memory_arch(dt_size, 8);
+ if (initial_boot_params == NULL)
+ panic("Could not allocate initial_boot_params\n");
+ memcpy(initial_boot_params, fdt, dt_size);
+
+ if (do_prune) {
+ octeon_prune_device_tree();
+ pr_info("Using internal Device Tree.\n");
+ } else {
+ pr_info("Using passed Device Tree.\n");
+ }
+ unflatten_device_tree();
+}
diff --git a/arch/mips/configs/ls1b_defconfig b/arch/mips/configs/ls1b_defconfig
new file mode 100644
index 000000000000..80cff8bea8e8
--- /dev/null
+++ b/arch/mips/configs/ls1b_defconfig
@@ -0,0 +1,109 @@
+CONFIG_MACH_LOONGSON1=y
+CONFIG_PREEMPT=y
+# CONFIG_SECCOMP is not set
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_NAMESPACES=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=m
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+CONFIG_STMMAC_DA=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=8
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_USB_HID=m
+CONFIG_HID_GENERIC=m
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_STORAGE=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_LOONGSON1=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+# CONFIG_EARLY_PRINTK is not set
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index d0b857d98c91..138f698d7c00 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -367,6 +367,10 @@ CONFIG_SERIAL_8250_RSA=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_TIMERIOMEM=m
CONFIG_RAW_DRIVER=m
+CONFIG_I2C=y
+CONFIG_I2C_XLR=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1374=y
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID_SUPPORT is not set
diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c
index e95ff3054ff6..8c62316f22f4 100644
--- a/arch/mips/dec/prom/memory.c
+++ b/arch/mips/dec/prom/memory.c
@@ -101,7 +101,7 @@ void __init prom_free_prom_memory(void)
* the first page reserved for the exception handlers.
*/
-#if defined(CONFIG_DECLANCE) || defined(CONFIG_DECLANCE_MODULE)
+#if IS_ENABLED(CONFIG_DECLANCE)
/*
* Leave 128 KB reserved for Lance memory for
* IOASIC DECstations.
diff --git a/arch/mips/include/asm/clock.h b/arch/mips/include/asm/clock.h
index 83894aa7932c..c9456e7a7283 100644
--- a/arch/mips/include/asm/clock.h
+++ b/arch/mips/include/asm/clock.h
@@ -50,15 +50,4 @@ void clk_recalc_rate(struct clk *);
int clk_register(struct clk *);
void clk_unregister(struct clk *);
-/* the exported API, in addition to clk_set_rate */
-/**
- * clk_set_rate_ex - set the clock rate for a clock source, with additional parameter
- * @clk: clock source
- * @rate: desired clock rate in Hz
- * @algo_id: algorithm id to be passed down to ops->set_rate
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id);
-
#endif /* __ASM_MIPS_CLOCK_H */
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 95e40c1e8ed1..f21b7c04e95a 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -197,6 +197,7 @@
#define PRID_REV_VR4181A 0x0070 /* Same as VR4122 */
#define PRID_REV_VR4130 0x0080
#define PRID_REV_34K_V1_0_2 0x0022
+#define PRID_REV_LOONGSON1B 0x0020
#define PRID_REV_LOONGSON2E 0x0002
#define PRID_REV_LOONGSON2F 0x0003
@@ -261,7 +262,7 @@ enum cpu_type_enum {
*/
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
- CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_M14KC,
+ CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
/*
* MIPS64 class processors
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index 1caa78ad06d5..dde504477fac 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -393,7 +393,8 @@
#define AR71XX_GPIO_REG_FUNC 0x28
#define AR71XX_GPIO_COUNT 16
-#define AR724X_GPIO_COUNT 18
+#define AR7240_GPIO_COUNT 18
+#define AR7241_GPIO_COUNT 20
#define AR913X_GPIO_COUNT 22
#define AR933X_GPIO_COUNT 30
#define AR934X_GPIO_COUNT 23
diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
index 4476fa03bf36..6ddae926bf79 100644
--- a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
@@ -42,7 +42,6 @@
#define cpu_has_mips64r1 0
#define cpu_has_mips64r2 0
-#define cpu_has_dsp 0
#define cpu_has_mipsmt 0
#define cpu_has_64bits 0
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
index 5b8d15bb5fe8..e104ddb694a8 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
@@ -9,6 +9,7 @@
* compile time if only one CPU support is enabled (idea stolen from
* arm mach-types)
*/
+#define BCM6328_CPU_ID 0x6328
#define BCM6338_CPU_ID 0x6338
#define BCM6345_CPU_ID 0x6345
#define BCM6348_CPU_ID 0x6348
@@ -20,6 +21,19 @@ u16 __bcm63xx_get_cpu_id(void);
u16 bcm63xx_get_cpu_rev(void);
unsigned int bcm63xx_get_cpu_freq(void);
+#ifdef CONFIG_BCM63XX_CPU_6328
+# ifdef bcm63xx_get_cpu_id
+# undef bcm63xx_get_cpu_id
+# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id()
+# define BCMCPU_RUNTIME_DETECT
+# else
+# define bcm63xx_get_cpu_id() BCM6328_CPU_ID
+# endif
+# define BCMCPU_IS_6328() (bcm63xx_get_cpu_id() == BCM6328_CPU_ID)
+#else
+# define BCMCPU_IS_6328() (0)
+#endif
+
#ifdef CONFIG_BCM63XX_CPU_6338
# ifdef bcm63xx_get_cpu_id
# undef bcm63xx_get_cpu_id
@@ -102,13 +116,13 @@ enum bcm63xx_regs_set {
RSET_UART1,
RSET_GPIO,
RSET_SPI,
- RSET_SPI2,
RSET_UDC0,
RSET_OHCI0,
RSET_OHCI_PRIV,
RSET_USBH_PRIV,
RSET_MPI,
RSET_PCMCIA,
+ RSET_PCIE,
RSET_DSL,
RSET_ENET0,
RSET_ENET1,
@@ -130,11 +144,17 @@ enum bcm63xx_regs_set {
RSET_PCMDMA,
RSET_PCMDMAC,
RSET_PCMDMAS,
+ RSET_RNG,
+ RSET_MISC
};
#define RSET_DSL_LMEM_SIZE (64 * 1024 * 4)
#define RSET_DSL_SIZE 4096
#define RSET_WDT_SIZE 12
+#define BCM_6338_RSET_SPI_SIZE 64
+#define BCM_6348_RSET_SPI_SIZE 64
+#define BCM_6358_RSET_SPI_SIZE 1804
+#define BCM_6368_RSET_SPI_SIZE 1804
#define RSET_ENET_SIZE 2048
#define RSET_ENETDMA_SIZE 2048
#define RSET_ENETSW_SIZE 65536
@@ -149,8 +169,53 @@ enum bcm63xx_regs_set {
#define RSET_XTMDMA_SIZE 256
#define RSET_XTMDMAC_SIZE(chans) (16 * (chans))
#define RSET_XTMDMAS_SIZE(chans) (16 * (chans))
+#define RSET_RNG_SIZE 20
/*
+ * 6328 register sets base address
+ */
+#define BCM_6328_DSL_LMEM_BASE (0xdeadbeef)
+#define BCM_6328_PERF_BASE (0xb0000000)
+#define BCM_6328_TIMER_BASE (0xb0000040)
+#define BCM_6328_WDT_BASE (0xb000005c)
+#define BCM_6328_UART0_BASE (0xb0000100)
+#define BCM_6328_UART1_BASE (0xb0000120)
+#define BCM_6328_GPIO_BASE (0xb0000080)
+#define BCM_6328_SPI_BASE (0xdeadbeef)
+#define BCM_6328_UDC0_BASE (0xdeadbeef)
+#define BCM_6328_USBDMA_BASE (0xdeadbeef)
+#define BCM_6328_OHCI0_BASE (0xdeadbeef)
+#define BCM_6328_OHCI_PRIV_BASE (0xdeadbeef)
+#define BCM_6328_USBH_PRIV_BASE (0xdeadbeef)
+#define BCM_6328_MPI_BASE (0xdeadbeef)
+#define BCM_6328_PCMCIA_BASE (0xdeadbeef)
+#define BCM_6328_PCIE_BASE (0xb0e40000)
+#define BCM_6328_SDRAM_REGS_BASE (0xdeadbeef)
+#define BCM_6328_DSL_BASE (0xb0001900)
+#define BCM_6328_UBUS_BASE (0xdeadbeef)
+#define BCM_6328_ENET0_BASE (0xdeadbeef)
+#define BCM_6328_ENET1_BASE (0xdeadbeef)
+#define BCM_6328_ENETDMA_BASE (0xb000d800)
+#define BCM_6328_ENETDMAC_BASE (0xb000da00)
+#define BCM_6328_ENETDMAS_BASE (0xb000dc00)
+#define BCM_6328_ENETSW_BASE (0xb0e00000)
+#define BCM_6328_EHCI0_BASE (0x10002500)
+#define BCM_6328_SDRAM_BASE (0xdeadbeef)
+#define BCM_6328_MEMC_BASE (0xdeadbeef)
+#define BCM_6328_DDR_BASE (0xb0003000)
+#define BCM_6328_M2M_BASE (0xdeadbeef)
+#define BCM_6328_ATM_BASE (0xdeadbeef)
+#define BCM_6328_XTM_BASE (0xdeadbeef)
+#define BCM_6328_XTMDMA_BASE (0xb000b800)
+#define BCM_6328_XTMDMAC_BASE (0xdeadbeef)
+#define BCM_6328_XTMDMAS_BASE (0xdeadbeef)
+#define BCM_6328_PCM_BASE (0xb000a800)
+#define BCM_6328_PCMDMA_BASE (0xdeadbeef)
+#define BCM_6328_PCMDMAC_BASE (0xdeadbeef)
+#define BCM_6328_PCMDMAS_BASE (0xdeadbeef)
+#define BCM_6328_RNG_BASE (0xdeadbeef)
+#define BCM_6328_MISC_BASE (0xb0001800)
+/*
* 6338 register sets base address
*/
#define BCM_6338_DSL_LMEM_BASE (0xfff00000)
@@ -162,7 +227,6 @@ enum bcm63xx_regs_set {
#define BCM_6338_UART1_BASE (0xdeadbeef)
#define BCM_6338_GPIO_BASE (0xfffe0400)
#define BCM_6338_SPI_BASE (0xfffe0c00)
-#define BCM_6338_SPI2_BASE (0xdeadbeef)
#define BCM_6338_UDC0_BASE (0xdeadbeef)
#define BCM_6338_USBDMA_BASE (0xfffe2400)
#define BCM_6338_OHCI0_BASE (0xdeadbeef)
@@ -170,6 +234,7 @@ enum bcm63xx_regs_set {
#define BCM_6338_USBH_PRIV_BASE (0xdeadbeef)
#define BCM_6338_MPI_BASE (0xfffe3160)
#define BCM_6338_PCMCIA_BASE (0xdeadbeef)
+#define BCM_6338_PCIE_BASE (0xdeadbeef)
#define BCM_6338_SDRAM_REGS_BASE (0xfffe3100)
#define BCM_6338_DSL_BASE (0xfffe1000)
#define BCM_6338_UBUS_BASE (0xdeadbeef)
@@ -193,6 +258,8 @@ enum bcm63xx_regs_set {
#define BCM_6338_PCMDMA_BASE (0xdeadbeef)
#define BCM_6338_PCMDMAC_BASE (0xdeadbeef)
#define BCM_6338_PCMDMAS_BASE (0xdeadbeef)
+#define BCM_6338_RNG_BASE (0xdeadbeef)
+#define BCM_6338_MISC_BASE (0xdeadbeef)
/*
* 6345 register sets base address
@@ -206,7 +273,6 @@ enum bcm63xx_regs_set {
#define BCM_6345_UART1_BASE (0xdeadbeef)
#define BCM_6345_GPIO_BASE (0xfffe0400)
#define BCM_6345_SPI_BASE (0xdeadbeef)
-#define BCM_6345_SPI2_BASE (0xdeadbeef)
#define BCM_6345_UDC0_BASE (0xdeadbeef)
#define BCM_6345_USBDMA_BASE (0xfffe2800)
#define BCM_6345_ENET0_BASE (0xfffe1800)
@@ -216,6 +282,7 @@ enum bcm63xx_regs_set {
#define BCM_6345_ENETSW_BASE (0xdeadbeef)
#define BCM_6345_PCMCIA_BASE (0xfffe2028)
#define BCM_6345_MPI_BASE (0xfffe2000)
+#define BCM_6345_PCIE_BASE (0xdeadbeef)
#define BCM_6345_OHCI0_BASE (0xfffe2100)
#define BCM_6345_OHCI_PRIV_BASE (0xfffe2200)
#define BCM_6345_USBH_PRIV_BASE (0xdeadbeef)
@@ -237,6 +304,8 @@ enum bcm63xx_regs_set {
#define BCM_6345_PCMDMA_BASE (0xdeadbeef)
#define BCM_6345_PCMDMAC_BASE (0xdeadbeef)
#define BCM_6345_PCMDMAS_BASE (0xdeadbeef)
+#define BCM_6345_RNG_BASE (0xdeadbeef)
+#define BCM_6345_MISC_BASE (0xdeadbeef)
/*
* 6348 register sets base address
@@ -249,13 +318,13 @@ enum bcm63xx_regs_set {
#define BCM_6348_UART1_BASE (0xdeadbeef)
#define BCM_6348_GPIO_BASE (0xfffe0400)
#define BCM_6348_SPI_BASE (0xfffe0c00)
-#define BCM_6348_SPI2_BASE (0xdeadbeef)
#define BCM_6348_UDC0_BASE (0xfffe1000)
#define BCM_6348_OHCI0_BASE (0xfffe1b00)
#define BCM_6348_OHCI_PRIV_BASE (0xfffe1c00)
#define BCM_6348_USBH_PRIV_BASE (0xdeadbeef)
#define BCM_6348_MPI_BASE (0xfffe2000)
#define BCM_6348_PCMCIA_BASE (0xfffe2054)
+#define BCM_6348_PCIE_BASE (0xdeadbeef)
#define BCM_6348_SDRAM_REGS_BASE (0xfffe2300)
#define BCM_6348_M2M_BASE (0xfffe2800)
#define BCM_6348_DSL_BASE (0xfffe3000)
@@ -278,6 +347,8 @@ enum bcm63xx_regs_set {
#define BCM_6348_PCMDMA_BASE (0xdeadbeef)
#define BCM_6348_PCMDMAC_BASE (0xdeadbeef)
#define BCM_6348_PCMDMAS_BASE (0xdeadbeef)
+#define BCM_6348_RNG_BASE (0xdeadbeef)
+#define BCM_6348_MISC_BASE (0xdeadbeef)
/*
* 6358 register sets base address
@@ -289,14 +360,14 @@ enum bcm63xx_regs_set {
#define BCM_6358_UART0_BASE (0xfffe0100)
#define BCM_6358_UART1_BASE (0xfffe0120)
#define BCM_6358_GPIO_BASE (0xfffe0080)
-#define BCM_6358_SPI_BASE (0xdeadbeef)
-#define BCM_6358_SPI2_BASE (0xfffe0800)
+#define BCM_6358_SPI_BASE (0xfffe0800)
#define BCM_6358_UDC0_BASE (0xfffe0800)
#define BCM_6358_OHCI0_BASE (0xfffe1400)
#define BCM_6358_OHCI_PRIV_BASE (0xdeadbeef)
#define BCM_6358_USBH_PRIV_BASE (0xfffe1500)
#define BCM_6358_MPI_BASE (0xfffe1000)
#define BCM_6358_PCMCIA_BASE (0xfffe1054)
+#define BCM_6358_PCIE_BASE (0xdeadbeef)
#define BCM_6358_SDRAM_REGS_BASE (0xfffe2300)
#define BCM_6358_M2M_BASE (0xdeadbeef)
#define BCM_6358_DSL_BASE (0xfffe3000)
@@ -319,6 +390,8 @@ enum bcm63xx_regs_set {
#define BCM_6358_PCMDMA_BASE (0xfffe1800)
#define BCM_6358_PCMDMAC_BASE (0xfffe1900)
#define BCM_6358_PCMDMAS_BASE (0xfffe1a00)
+#define BCM_6358_RNG_BASE (0xdeadbeef)
+#define BCM_6358_MISC_BASE (0xdeadbeef)
/*
@@ -331,14 +404,14 @@ enum bcm63xx_regs_set {
#define BCM_6368_UART0_BASE (0xb0000100)
#define BCM_6368_UART1_BASE (0xb0000120)
#define BCM_6368_GPIO_BASE (0xb0000080)
-#define BCM_6368_SPI_BASE (0xdeadbeef)
-#define BCM_6368_SPI2_BASE (0xb0000800)
+#define BCM_6368_SPI_BASE (0xb0000800)
#define BCM_6368_UDC0_BASE (0xdeadbeef)
#define BCM_6368_OHCI0_BASE (0xb0001600)
#define BCM_6368_OHCI_PRIV_BASE (0xdeadbeef)
#define BCM_6368_USBH_PRIV_BASE (0xb0001700)
#define BCM_6368_MPI_BASE (0xb0001000)
#define BCM_6368_PCMCIA_BASE (0xb0001054)
+#define BCM_6368_PCIE_BASE (0xdeadbeef)
#define BCM_6368_SDRAM_REGS_BASE (0xdeadbeef)
#define BCM_6368_M2M_BASE (0xdeadbeef)
#define BCM_6368_DSL_BASE (0xdeadbeef)
@@ -361,6 +434,8 @@ enum bcm63xx_regs_set {
#define BCM_6368_PCMDMA_BASE (0xb0005800)
#define BCM_6368_PCMDMAC_BASE (0xb0005a00)
#define BCM_6368_PCMDMAS_BASE (0xb0005c00)
+#define BCM_6368_RNG_BASE (0xb0004180)
+#define BCM_6368_MISC_BASE (0xdeadbeef)
extern const unsigned long *bcm63xx_regs_base;
@@ -379,13 +454,13 @@ extern const unsigned long *bcm63xx_regs_base;
__GEN_RSET_BASE(__cpu, UART1) \
__GEN_RSET_BASE(__cpu, GPIO) \
__GEN_RSET_BASE(__cpu, SPI) \
- __GEN_RSET_BASE(__cpu, SPI2) \
__GEN_RSET_BASE(__cpu, UDC0) \
__GEN_RSET_BASE(__cpu, OHCI0) \
__GEN_RSET_BASE(__cpu, OHCI_PRIV) \
__GEN_RSET_BASE(__cpu, USBH_PRIV) \
__GEN_RSET_BASE(__cpu, MPI) \
__GEN_RSET_BASE(__cpu, PCMCIA) \
+ __GEN_RSET_BASE(__cpu, PCIE) \
__GEN_RSET_BASE(__cpu, DSL) \
__GEN_RSET_BASE(__cpu, ENET0) \
__GEN_RSET_BASE(__cpu, ENET1) \
@@ -407,6 +482,8 @@ extern const unsigned long *bcm63xx_regs_base;
__GEN_RSET_BASE(__cpu, PCMDMA) \
__GEN_RSET_BASE(__cpu, PCMDMAC) \
__GEN_RSET_BASE(__cpu, PCMDMAS) \
+ __GEN_RSET_BASE(__cpu, RNG) \
+ __GEN_RSET_BASE(__cpu, MISC) \
}
#define __GEN_CPU_REGS_TABLE(__cpu) \
@@ -418,13 +495,13 @@ extern const unsigned long *bcm63xx_regs_base;
[RSET_UART1] = BCM_## __cpu ##_UART1_BASE, \
[RSET_GPIO] = BCM_## __cpu ##_GPIO_BASE, \
[RSET_SPI] = BCM_## __cpu ##_SPI_BASE, \
- [RSET_SPI2] = BCM_## __cpu ##_SPI2_BASE, \
[RSET_UDC0] = BCM_## __cpu ##_UDC0_BASE, \
[RSET_OHCI0] = BCM_## __cpu ##_OHCI0_BASE, \
[RSET_OHCI_PRIV] = BCM_## __cpu ##_OHCI_PRIV_BASE, \
[RSET_USBH_PRIV] = BCM_## __cpu ##_USBH_PRIV_BASE, \
[RSET_MPI] = BCM_## __cpu ##_MPI_BASE, \
[RSET_PCMCIA] = BCM_## __cpu ##_PCMCIA_BASE, \
+ [RSET_PCIE] = BCM_## __cpu ##_PCIE_BASE, \
[RSET_DSL] = BCM_## __cpu ##_DSL_BASE, \
[RSET_ENET0] = BCM_## __cpu ##_ENET0_BASE, \
[RSET_ENET1] = BCM_## __cpu ##_ENET1_BASE, \
@@ -446,6 +523,8 @@ extern const unsigned long *bcm63xx_regs_base;
[RSET_PCMDMA] = BCM_## __cpu ##_PCMDMA_BASE, \
[RSET_PCMDMAC] = BCM_## __cpu ##_PCMDMAC_BASE, \
[RSET_PCMDMAS] = BCM_## __cpu ##_PCMDMAS_BASE, \
+ [RSET_RNG] = BCM_## __cpu ##_RNG_BASE, \
+ [RSET_MISC] = BCM_## __cpu ##_MISC_BASE, \
static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
@@ -453,6 +532,9 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
#ifdef BCMCPU_RUNTIME_DETECT
return bcm63xx_regs_base[set];
#else
+#ifdef CONFIG_BCM63XX_CPU_6328
+ __GEN_RSET(6328)
+#endif
#ifdef CONFIG_BCM63XX_CPU_6338
__GEN_RSET(6338)
#endif
@@ -478,6 +560,7 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set)
*/
enum bcm63xx_irq {
IRQ_TIMER = 0,
+ IRQ_SPI,
IRQ_UART0,
IRQ_UART1,
IRQ_DSL,
@@ -506,9 +589,51 @@ enum bcm63xx_irq {
};
/*
+ * 6328 irqs
+ */
+#define BCM_6328_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32)
+
+#define BCM_6328_TIMER_IRQ (IRQ_INTERNAL_BASE + 31)
+#define BCM_6328_SPI_IRQ 0
+#define BCM_6328_UART0_IRQ (IRQ_INTERNAL_BASE + 28)
+#define BCM_6328_UART1_IRQ (BCM_6328_HIGH_IRQ_BASE + 7)
+#define BCM_6328_DSL_IRQ (IRQ_INTERNAL_BASE + 4)
+#define BCM_6328_UDC0_IRQ 0
+#define BCM_6328_ENET0_IRQ 0
+#define BCM_6328_ENET1_IRQ 0
+#define BCM_6328_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 12)
+#define BCM_6328_OHCI0_IRQ (IRQ_INTERNAL_BASE + 9)
+#define BCM_6328_EHCI0_IRQ (IRQ_INTERNAL_BASE + 10)
+#define BCM_6328_PCMCIA_IRQ 0
+#define BCM_6328_ENET0_RXDMA_IRQ 0
+#define BCM_6328_ENET0_TXDMA_IRQ 0
+#define BCM_6328_ENET1_RXDMA_IRQ 0
+#define BCM_6328_ENET1_TXDMA_IRQ 0
+#define BCM_6328_PCI_IRQ (IRQ_INTERNAL_BASE + 23)
+#define BCM_6328_ATM_IRQ 0
+#define BCM_6328_ENETSW_RXDMA0_IRQ (BCM_6328_HIGH_IRQ_BASE + 0)
+#define BCM_6328_ENETSW_RXDMA1_IRQ (BCM_6328_HIGH_IRQ_BASE + 1)
+#define BCM_6328_ENETSW_RXDMA2_IRQ (BCM_6328_HIGH_IRQ_BASE + 2)
+#define BCM_6328_ENETSW_RXDMA3_IRQ (BCM_6328_HIGH_IRQ_BASE + 3)
+#define BCM_6328_ENETSW_TXDMA0_IRQ (BCM_6328_HIGH_IRQ_BASE + 4)
+#define BCM_6328_ENETSW_TXDMA1_IRQ (BCM_6328_HIGH_IRQ_BASE + 5)
+#define BCM_6328_ENETSW_TXDMA2_IRQ (BCM_6328_HIGH_IRQ_BASE + 6)
+#define BCM_6328_ENETSW_TXDMA3_IRQ (BCM_6328_HIGH_IRQ_BASE + 7)
+#define BCM_6328_XTM_IRQ (BCM_6328_HIGH_IRQ_BASE + 31)
+#define BCM_6328_XTM_DMA0_IRQ (BCM_6328_HIGH_IRQ_BASE + 11)
+
+#define BCM_6328_PCM_DMA0_IRQ (IRQ_INTERNAL_BASE + 2)
+#define BCM_6328_PCM_DMA1_IRQ (IRQ_INTERNAL_BASE + 3)
+#define BCM_6328_EXT_IRQ0 (IRQ_INTERNAL_BASE + 24)
+#define BCM_6328_EXT_IRQ1 (IRQ_INTERNAL_BASE + 25)
+#define BCM_6328_EXT_IRQ2 (IRQ_INTERNAL_BASE + 26)
+#define BCM_6328_EXT_IRQ3 (IRQ_INTERNAL_BASE + 27)
+
+/*
* 6338 irqs
*/
#define BCM_6338_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_6338_SPI_IRQ (IRQ_INTERNAL_BASE + 1)
#define BCM_6338_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
#define BCM_6338_UART1_IRQ 0
#define BCM_6338_DSL_IRQ (IRQ_INTERNAL_BASE + 5)
@@ -539,6 +664,7 @@ enum bcm63xx_irq {
* 6345 irqs
*/
#define BCM_6345_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_6345_SPI_IRQ 0
#define BCM_6345_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
#define BCM_6345_UART1_IRQ 0
#define BCM_6345_DSL_IRQ (IRQ_INTERNAL_BASE + 3)
@@ -569,6 +695,7 @@ enum bcm63xx_irq {
* 6348 irqs
*/
#define BCM_6348_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_6348_SPI_IRQ (IRQ_INTERNAL_BASE + 1)
#define BCM_6348_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
#define BCM_6348_UART1_IRQ 0
#define BCM_6348_DSL_IRQ (IRQ_INTERNAL_BASE + 4)
@@ -599,6 +726,7 @@ enum bcm63xx_irq {
* 6358 irqs
*/
#define BCM_6358_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_6358_SPI_IRQ (IRQ_INTERNAL_BASE + 1)
#define BCM_6358_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
#define BCM_6358_UART1_IRQ (IRQ_INTERNAL_BASE + 3)
#define BCM_6358_DSL_IRQ (IRQ_INTERNAL_BASE + 29)
@@ -638,6 +766,7 @@ enum bcm63xx_irq {
#define BCM_6368_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32)
#define BCM_6368_TIMER_IRQ (IRQ_INTERNAL_BASE + 0)
+#define BCM_6368_SPI_IRQ (IRQ_INTERNAL_BASE + 1)
#define BCM_6368_UART0_IRQ (IRQ_INTERNAL_BASE + 2)
#define BCM_6368_UART1_IRQ (IRQ_INTERNAL_BASE + 3)
#define BCM_6368_DSL_IRQ (IRQ_INTERNAL_BASE + 4)
@@ -677,6 +806,7 @@ extern const int *bcm63xx_irqs;
#define __GEN_CPU_IRQ_TABLE(__cpu) \
[IRQ_TIMER] = BCM_## __cpu ##_TIMER_IRQ, \
+ [IRQ_SPI] = BCM_## __cpu ##_SPI_IRQ, \
[IRQ_UART0] = BCM_## __cpu ##_UART0_IRQ, \
[IRQ_UART1] = BCM_## __cpu ##_UART1_IRQ, \
[IRQ_DSL] = BCM_## __cpu ##_DSL_IRQ, \
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_flash.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_flash.h
new file mode 100644
index 000000000000..354b8481ec4a
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_flash.h
@@ -0,0 +1,12 @@
+#ifndef __BCM63XX_FLASH_H
+#define __BCM63XX_FLASH_H
+
+enum {
+ BCM63XX_FLASH_TYPE_PARALLEL,
+ BCM63XX_FLASH_TYPE_SERIAL,
+ BCM63XX_FLASH_TYPE_NAND,
+};
+
+int __init bcm63xx_flash_register(void);
+
+#endif /* __BCM63XX_FLASH_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
new file mode 100644
index 000000000000..c9bae1362606
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -0,0 +1,91 @@
+#ifndef BCM63XX_DEV_SPI_H
+#define BCM63XX_DEV_SPI_H
+
+#include <linux/types.h>
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+
+int __init bcm63xx_spi_register(void);
+
+struct bcm63xx_spi_pdata {
+ unsigned int fifo_size;
+ unsigned int msg_type_shift;
+ unsigned int msg_ctl_width;
+ int bus_num;
+ int num_chipselect;
+ u32 speed_hz;
+};
+
+enum bcm63xx_regs_spi {
+ SPI_CMD,
+ SPI_INT_STATUS,
+ SPI_INT_MASK_ST,
+ SPI_INT_MASK,
+ SPI_ST,
+ SPI_CLK_CFG,
+ SPI_FILL_BYTE,
+ SPI_MSG_TAIL,
+ SPI_RX_TAIL,
+ SPI_MSG_CTL,
+ SPI_MSG_DATA,
+ SPI_RX_DATA,
+};
+
+#define __GEN_SPI_RSET_BASE(__cpu, __rset) \
+ case SPI_## __rset: \
+ return SPI_## __cpu ##_## __rset;
+
+#define __GEN_SPI_RSET(__cpu) \
+ switch (reg) { \
+ __GEN_SPI_RSET_BASE(__cpu, CMD) \
+ __GEN_SPI_RSET_BASE(__cpu, INT_STATUS) \
+ __GEN_SPI_RSET_BASE(__cpu, INT_MASK_ST) \
+ __GEN_SPI_RSET_BASE(__cpu, INT_MASK) \
+ __GEN_SPI_RSET_BASE(__cpu, ST) \
+ __GEN_SPI_RSET_BASE(__cpu, CLK_CFG) \
+ __GEN_SPI_RSET_BASE(__cpu, FILL_BYTE) \
+ __GEN_SPI_RSET_BASE(__cpu, MSG_TAIL) \
+ __GEN_SPI_RSET_BASE(__cpu, RX_TAIL) \
+ __GEN_SPI_RSET_BASE(__cpu, MSG_CTL) \
+ __GEN_SPI_RSET_BASE(__cpu, MSG_DATA) \
+ __GEN_SPI_RSET_BASE(__cpu, RX_DATA) \
+ }
+
+#define __GEN_SPI_REGS_TABLE(__cpu) \
+ [SPI_CMD] = SPI_## __cpu ##_CMD, \
+ [SPI_INT_STATUS] = SPI_## __cpu ##_INT_STATUS, \
+ [SPI_INT_MASK_ST] = SPI_## __cpu ##_INT_MASK_ST, \
+ [SPI_INT_MASK] = SPI_## __cpu ##_INT_MASK, \
+ [SPI_ST] = SPI_## __cpu ##_ST, \
+ [SPI_CLK_CFG] = SPI_## __cpu ##_CLK_CFG, \
+ [SPI_FILL_BYTE] = SPI_## __cpu ##_FILL_BYTE, \
+ [SPI_MSG_TAIL] = SPI_## __cpu ##_MSG_TAIL, \
+ [SPI_RX_TAIL] = SPI_## __cpu ##_RX_TAIL, \
+ [SPI_MSG_CTL] = SPI_## __cpu ##_MSG_CTL, \
+ [SPI_MSG_DATA] = SPI_## __cpu ##_MSG_DATA, \
+ [SPI_RX_DATA] = SPI_## __cpu ##_RX_DATA,
+
+static inline unsigned long bcm63xx_spireg(enum bcm63xx_regs_spi reg)
+{
+#ifdef BCMCPU_RUNTIME_DETECT
+ extern const unsigned long *bcm63xx_regs_spi;
+
+ return bcm63xx_regs_spi[reg];
+#else
+#ifdef CONFIG_BCM63XX_CPU_6338
+ __GEN_SPI_RSET(6338)
+#endif
+#ifdef CONFIG_BCM63XX_CPU_6348
+ __GEN_SPI_RSET(6348)
+#endif
+#ifdef CONFIG_BCM63XX_CPU_6358
+ __GEN_SPI_RSET(6358)
+#endif
+#ifdef CONFIG_BCM63XX_CPU_6368
+ __GEN_SPI_RSET(6368)
+#endif
+#endif
+ return 0;
+}
+
+#endif /* BCM63XX_DEV_SPI_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 1d7dd96aa460..0a9891f7580d 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -9,6 +9,8 @@ int __init bcm63xx_gpio_init(void);
static inline unsigned long bcm63xx_gpio_count(void)
{
switch (bcm63xx_get_cpu_id()) {
+ case BCM6328_CPU_ID:
+ return 32;
case BCM6358_CPU_ID:
return 40;
case BCM6338_CPU_ID:
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h
index 72477a6441dd..9203d90e610c 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h
@@ -40,6 +40,10 @@
#define BCM_CB_MEM_END_PA (BCM_CB_MEM_BASE_PA + \
BCM_CB_MEM_SIZE - 1)
+#define BCM_PCIE_MEM_BASE_PA 0x10f00000
+#define BCM_PCIE_MEM_SIZE (16 * 1024 * 1024)
+#define BCM_PCIE_MEM_END_PA (BCM_PCIE_MEM_BASE_PA + \
+ BCM_PCIE_MEM_SIZE - 1)
/*
* Internal registers are accessed through KSEG3
@@ -85,11 +89,15 @@
#define bcm_mpi_writel(v, o) bcm_rset_writel(RSET_MPI, (v), (o))
#define bcm_pcmcia_readl(o) bcm_rset_readl(RSET_PCMCIA, (o))
#define bcm_pcmcia_writel(v, o) bcm_rset_writel(RSET_PCMCIA, (v), (o))
+#define bcm_pcie_readl(o) bcm_rset_readl(RSET_PCIE, (o))
+#define bcm_pcie_writel(v, o) bcm_rset_writel(RSET_PCIE, (v), (o))
#define bcm_sdram_readl(o) bcm_rset_readl(RSET_SDRAM, (o))
#define bcm_sdram_writel(v, o) bcm_rset_writel(RSET_SDRAM, (v), (o))
#define bcm_memc_readl(o) bcm_rset_readl(RSET_MEMC, (o))
#define bcm_memc_writel(v, o) bcm_rset_writel(RSET_MEMC, (v), (o))
#define bcm_ddr_readl(o) bcm_rset_readl(RSET_DDR, (o))
#define bcm_ddr_writel(v, o) bcm_rset_writel(RSET_DDR, (v), (o))
+#define bcm_misc_readl(o) bcm_rset_readl(RSET_MISC, (o))
+#define bcm_misc_writel(v, o) bcm_rset_writel(RSET_MISC, (v), (o))
#endif /* ! BCM63XX_IO_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index fdcd78ca1b03..61f2a2a5099d 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -15,6 +15,30 @@
/* Clock Control register */
#define PERF_CKCTL_REG 0x4
+#define CKCTL_6328_PHYMIPS_EN (1 << 0)
+#define CKCTL_6328_ADSL_QPROC_EN (1 << 1)
+#define CKCTL_6328_ADSL_AFE_EN (1 << 2)
+#define CKCTL_6328_ADSL_EN (1 << 3)
+#define CKCTL_6328_MIPS_EN (1 << 4)
+#define CKCTL_6328_SAR_EN (1 << 5)
+#define CKCTL_6328_PCM_EN (1 << 6)
+#define CKCTL_6328_USBD_EN (1 << 7)
+#define CKCTL_6328_USBH_EN (1 << 8)
+#define CKCTL_6328_HSSPI_EN (1 << 9)
+#define CKCTL_6328_PCIE_EN (1 << 10)
+#define CKCTL_6328_ROBOSW_EN (1 << 11)
+
+#define CKCTL_6328_ALL_SAFE_EN (CKCTL_6328_PHYMIPS_EN | \
+ CKCTL_6328_ADSL_QPROC_EN | \
+ CKCTL_6328_ADSL_AFE_EN | \
+ CKCTL_6328_ADSL_EN | \
+ CKCTL_6328_SAR_EN | \
+ CKCTL_6328_PCM_EN | \
+ CKCTL_6328_USBD_EN | \
+ CKCTL_6328_USBH_EN | \
+ CKCTL_6328_ROBOSW_EN | \
+ CKCTL_6328_PCIE_EN)
+
#define CKCTL_6338_ADSLPHY_EN (1 << 0)
#define CKCTL_6338_MPI_EN (1 << 1)
#define CKCTL_6338_DRAM_EN (1 << 2)
@@ -90,35 +114,36 @@
#define CKCTL_6368_PHYMIPS_EN (1 << 6)
#define CKCTL_6368_SWPKT_USB_EN (1 << 7)
#define CKCTL_6368_SWPKT_SAR_EN (1 << 8)
-#define CKCTL_6368_SPI_CLK_EN (1 << 9)
-#define CKCTL_6368_USBD_CLK_EN (1 << 10)
-#define CKCTL_6368_SAR_CLK_EN (1 << 11)
-#define CKCTL_6368_ROBOSW_CLK_EN (1 << 12)
-#define CKCTL_6368_UTOPIA_CLK_EN (1 << 13)
-#define CKCTL_6368_PCM_CLK_EN (1 << 14)
-#define CKCTL_6368_USBH_CLK_EN (1 << 15)
+#define CKCTL_6368_SPI_EN (1 << 9)
+#define CKCTL_6368_USBD_EN (1 << 10)
+#define CKCTL_6368_SAR_EN (1 << 11)
+#define CKCTL_6368_ROBOSW_EN (1 << 12)
+#define CKCTL_6368_UTOPIA_EN (1 << 13)
+#define CKCTL_6368_PCM_EN (1 << 14)
+#define CKCTL_6368_USBH_EN (1 << 15)
#define CKCTL_6368_DISABLE_GLESS_EN (1 << 16)
-#define CKCTL_6368_NAND_CLK_EN (1 << 17)
-#define CKCTL_6368_IPSEC_CLK_EN (1 << 18)
+#define CKCTL_6368_NAND_EN (1 << 17)
+#define CKCTL_6368_IPSEC_EN (1 << 18)
#define CKCTL_6368_ALL_SAFE_EN (CKCTL_6368_SWPKT_USB_EN | \
CKCTL_6368_SWPKT_SAR_EN | \
- CKCTL_6368_SPI_CLK_EN | \
- CKCTL_6368_USBD_CLK_EN | \
- CKCTL_6368_SAR_CLK_EN | \
- CKCTL_6368_ROBOSW_CLK_EN | \
- CKCTL_6368_UTOPIA_CLK_EN | \
- CKCTL_6368_PCM_CLK_EN | \
- CKCTL_6368_USBH_CLK_EN | \
+ CKCTL_6368_SPI_EN | \
+ CKCTL_6368_USBD_EN | \
+ CKCTL_6368_SAR_EN | \
+ CKCTL_6368_ROBOSW_EN | \
+ CKCTL_6368_UTOPIA_EN | \
+ CKCTL_6368_PCM_EN | \
+ CKCTL_6368_USBH_EN | \
CKCTL_6368_DISABLE_GLESS_EN | \
- CKCTL_6368_NAND_CLK_EN | \
- CKCTL_6368_IPSEC_CLK_EN)
+ CKCTL_6368_NAND_EN | \
+ CKCTL_6368_IPSEC_EN)
/* System PLL Control register */
#define PERF_SYS_PLL_CTL_REG 0x8
#define SYS_PLL_SOFT_RESET 0x1
/* Interrupt Mask register */
+#define PERF_IRQMASK_6328_REG 0x20
#define PERF_IRQMASK_6338_REG 0xc
#define PERF_IRQMASK_6345_REG 0xc
#define PERF_IRQMASK_6348_REG 0xc
@@ -126,6 +151,7 @@
#define PERF_IRQMASK_6368_REG 0x20
/* Interrupt Status register */
+#define PERF_IRQSTAT_6328_REG 0x28
#define PERF_IRQSTAT_6338_REG 0x10
#define PERF_IRQSTAT_6345_REG 0x10
#define PERF_IRQSTAT_6348_REG 0x10
@@ -133,6 +159,7 @@
#define PERF_IRQSTAT_6368_REG 0x28
/* External Interrupt Configuration register */
+#define PERF_EXTIRQ_CFG_REG_6328 0x18
#define PERF_EXTIRQ_CFG_REG_6338 0x14
#define PERF_EXTIRQ_CFG_REG_6348 0x14
#define PERF_EXTIRQ_CFG_REG_6358 0x14
@@ -162,8 +189,21 @@
/* Soft Reset register */
#define PERF_SOFTRESET_REG 0x28
+#define PERF_SOFTRESET_6328_REG 0x10
#define PERF_SOFTRESET_6368_REG 0x10
+#define SOFTRESET_6328_SPI_MASK (1 << 0)
+#define SOFTRESET_6328_EPHY_MASK (1 << 1)
+#define SOFTRESET_6328_SAR_MASK (1 << 2)
+#define SOFTRESET_6328_ENETSW_MASK (1 << 3)
+#define SOFTRESET_6328_USBS_MASK (1 << 4)
+#define SOFTRESET_6328_USBH_MASK (1 << 5)
+#define SOFTRESET_6328_PCM_MASK (1 << 6)
+#define SOFTRESET_6328_PCIE_CORE_MASK (1 << 7)
+#define SOFTRESET_6328_PCIE_MASK (1 << 8)
+#define SOFTRESET_6328_PCIE_EXT_MASK (1 << 9)
+#define SOFTRESET_6328_PCIE_HARD_MASK (1 << 10)
+
#define SOFTRESET_6338_SPI_MASK (1 << 0)
#define SOFTRESET_6338_ENET_MASK (1 << 2)
#define SOFTRESET_6338_USBH_MASK (1 << 3)
@@ -307,6 +347,8 @@
/* Watchdog reset length register */
#define WDT_RSTLEN_REG 0x8
+/* Watchdog soft reset register (BCM6328 only) */
+#define WDT_SOFTRESET_REG 0xc
/*************************************************************************
* _REG relative to RSET_UARTx
@@ -507,6 +549,15 @@
#define GPIO_BASEMODE_6368_MASK 0x7
/* those bits must be kept as read in gpio basemode register*/
+#define GPIO_STRAPBUS_REG 0x40
+#define STRAPBUS_6358_BOOT_SEL_PARALLEL (1 << 1)
+#define STRAPBUS_6358_BOOT_SEL_SERIAL (0 << 1)
+#define STRAPBUS_6368_BOOT_SEL_MASK 0x3
+#define STRAPBUS_6368_BOOT_SEL_NAND 0
+#define STRAPBUS_6368_BOOT_SEL_SERIAL 1
+#define STRAPBUS_6368_BOOT_SEL_PARALLEL 3
+
+
/*************************************************************************
* _REG relative to RSET_ENET
*************************************************************************/
@@ -924,6 +975,8 @@
* _REG relative to RSET_DDR
*************************************************************************/
+#define DDR_CSEND_REG 0x8
+
#define DDR_DMIPSPLLCFG_REG 0x18
#define DMIPSPLLCFG_M1_SHIFT 0
#define DMIPSPLLCFG_M1_MASK (0xff << DMIPSPLLCFG_M1_SHIFT)
@@ -973,4 +1026,208 @@
#define M2M_SRCID_REG(x) ((x) * 0x40 + 0x14)
#define M2M_DSTID_REG(x) ((x) * 0x40 + 0x18)
+/*************************************************************************
+ * _REG relative to RSET_RNG
+ *************************************************************************/
+
+#define RNG_CTRL 0x00
+#define RNG_EN (1 << 0)
+
+#define RNG_STAT 0x04
+#define RNG_AVAIL_MASK (0xff000000)
+
+#define RNG_DATA 0x08
+#define RNG_THRES 0x0c
+#define RNG_MASK 0x10
+
+/*************************************************************************
+ * _REG relative to RSET_SPI
+ *************************************************************************/
+
+/* BCM 6338 SPI core */
+#define SPI_6338_CMD 0x00 /* 16-bits register */
+#define SPI_6338_INT_STATUS 0x02
+#define SPI_6338_INT_MASK_ST 0x03
+#define SPI_6338_INT_MASK 0x04
+#define SPI_6338_ST 0x05
+#define SPI_6338_CLK_CFG 0x06
+#define SPI_6338_FILL_BYTE 0x07
+#define SPI_6338_MSG_TAIL 0x09
+#define SPI_6338_RX_TAIL 0x0b
+#define SPI_6338_MSG_CTL 0x40 /* 8-bits register */
+#define SPI_6338_MSG_CTL_WIDTH 8
+#define SPI_6338_MSG_DATA 0x41
+#define SPI_6338_MSG_DATA_SIZE 0x3f
+#define SPI_6338_RX_DATA 0x80
+#define SPI_6338_RX_DATA_SIZE 0x3f
+
+/* BCM 6348 SPI core */
+#define SPI_6348_CMD 0x00 /* 16-bits register */
+#define SPI_6348_INT_STATUS 0x02
+#define SPI_6348_INT_MASK_ST 0x03
+#define SPI_6348_INT_MASK 0x04
+#define SPI_6348_ST 0x05
+#define SPI_6348_CLK_CFG 0x06
+#define SPI_6348_FILL_BYTE 0x07
+#define SPI_6348_MSG_TAIL 0x09
+#define SPI_6348_RX_TAIL 0x0b
+#define SPI_6348_MSG_CTL 0x40 /* 8-bits register */
+#define SPI_6348_MSG_CTL_WIDTH 8
+#define SPI_6348_MSG_DATA 0x41
+#define SPI_6348_MSG_DATA_SIZE 0x3f
+#define SPI_6348_RX_DATA 0x80
+#define SPI_6348_RX_DATA_SIZE 0x3f
+
+/* BCM 6358 SPI core */
+#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
+#define SPI_6358_MSG_CTL_WIDTH 16
+#define SPI_6358_MSG_DATA 0x02
+#define SPI_6358_MSG_DATA_SIZE 0x21e
+#define SPI_6358_RX_DATA 0x400
+#define SPI_6358_RX_DATA_SIZE 0x220
+#define SPI_6358_CMD 0x700 /* 16-bits register */
+#define SPI_6358_INT_STATUS 0x702
+#define SPI_6358_INT_MASK_ST 0x703
+#define SPI_6358_INT_MASK 0x704
+#define SPI_6358_ST 0x705
+#define SPI_6358_CLK_CFG 0x706
+#define SPI_6358_FILL_BYTE 0x707
+#define SPI_6358_MSG_TAIL 0x709
+#define SPI_6358_RX_TAIL 0x70B
+
+/* BCM 6358 SPI core */
+#define SPI_6368_MSG_CTL 0x00 /* 16-bits register */
+#define SPI_6368_MSG_CTL_WIDTH 16
+#define SPI_6368_MSG_DATA 0x02
+#define SPI_6368_MSG_DATA_SIZE 0x21e
+#define SPI_6368_RX_DATA 0x400
+#define SPI_6368_RX_DATA_SIZE 0x220
+#define SPI_6368_CMD 0x700 /* 16-bits register */
+#define SPI_6368_INT_STATUS 0x702
+#define SPI_6368_INT_MASK_ST 0x703
+#define SPI_6368_INT_MASK 0x704
+#define SPI_6368_ST 0x705
+#define SPI_6368_CLK_CFG 0x706
+#define SPI_6368_FILL_BYTE 0x707
+#define SPI_6368_MSG_TAIL 0x709
+#define SPI_6368_RX_TAIL 0x70B
+
+/* Shared SPI definitions */
+
+/* Message configuration */
+#define SPI_FD_RW 0x00
+#define SPI_HD_W 0x01
+#define SPI_HD_R 0x02
+#define SPI_BYTE_CNT_SHIFT 0
+#define SPI_6338_MSG_TYPE_SHIFT 6
+#define SPI_6348_MSG_TYPE_SHIFT 6
+#define SPI_6358_MSG_TYPE_SHIFT 14
+#define SPI_6368_MSG_TYPE_SHIFT 14
+
+/* Command */
+#define SPI_CMD_NOOP 0x00
+#define SPI_CMD_SOFT_RESET 0x01
+#define SPI_CMD_HARD_RESET 0x02
+#define SPI_CMD_START_IMMEDIATE 0x03
+#define SPI_CMD_COMMAND_SHIFT 0
+#define SPI_CMD_COMMAND_MASK 0x000f
+#define SPI_CMD_DEVICE_ID_SHIFT 4
+#define SPI_CMD_PREPEND_BYTE_CNT_SHIFT 8
+#define SPI_CMD_ONE_BYTE_SHIFT 11
+#define SPI_CMD_ONE_WIRE_SHIFT 12
+#define SPI_DEV_ID_0 0
+#define SPI_DEV_ID_1 1
+#define SPI_DEV_ID_2 2
+#define SPI_DEV_ID_3 3
+
+/* Interrupt mask */
+#define SPI_INTR_CMD_DONE 0x01
+#define SPI_INTR_RX_OVERFLOW 0x02
+#define SPI_INTR_TX_UNDERFLOW 0x04
+#define SPI_INTR_TX_OVERFLOW 0x08
+#define SPI_INTR_RX_UNDERFLOW 0x10
+#define SPI_INTR_CLEAR_ALL 0x1f
+
+/* Status */
+#define SPI_RX_EMPTY 0x02
+#define SPI_CMD_BUSY 0x04
+#define SPI_SERIAL_BUSY 0x08
+
+/* Clock configuration */
+#define SPI_CLK_20MHZ 0x00
+#define SPI_CLK_0_391MHZ 0x01
+#define SPI_CLK_0_781MHZ 0x02 /* default */
+#define SPI_CLK_1_563MHZ 0x03
+#define SPI_CLK_3_125MHZ 0x04
+#define SPI_CLK_6_250MHZ 0x05
+#define SPI_CLK_12_50MHZ 0x06
+#define SPI_CLK_MASK 0x07
+#define SPI_SSOFFTIME_MASK 0x38
+#define SPI_SSOFFTIME_SHIFT 3
+#define SPI_BYTE_SWAP 0x80
+
+/*************************************************************************
+ * _REG relative to RSET_MISC
+ *************************************************************************/
+#define MISC_SERDES_CTRL_REG 0x0
+#define SERDES_PCIE_EN (1 << 0)
+#define SERDES_PCIE_EXD_EN (1 << 15)
+
+#define MISC_STRAPBUS_6328_REG 0x240
+#define STRAPBUS_6328_FCVO_SHIFT 7
+#define STRAPBUS_6328_FCVO_MASK (0x1f << STRAPBUS_6328_FCVO_SHIFT)
+#define STRAPBUS_6328_BOOT_SEL_SERIAL (1 << 28)
+#define STRAPBUS_6328_BOOT_SEL_NAND (0 << 28)
+
+/*************************************************************************
+ * _REG relative to RSET_PCIE
+ *************************************************************************/
+
+#define PCIE_CONFIG2_REG 0x408
+#define CONFIG2_BAR1_SIZE_EN 1
+#define CONFIG2_BAR1_SIZE_MASK 0xf
+
+#define PCIE_IDVAL3_REG 0x43c
+#define IDVAL3_CLASS_CODE_MASK 0xffffff
+#define IDVAL3_SUBCLASS_SHIFT 8
+#define IDVAL3_CLASS_SHIFT 16
+
+#define PCIE_DLSTATUS_REG 0x1048
+#define DLSTATUS_PHYLINKUP (1 << 13)
+
+#define PCIE_BRIDGE_OPT1_REG 0x2820
+#define OPT1_RD_BE_OPT_EN (1 << 7)
+#define OPT1_RD_REPLY_BE_FIX_EN (1 << 9)
+#define OPT1_PCIE_BRIDGE_HOLE_DET_EN (1 << 11)
+#define OPT1_L1_INT_STATUS_MASK_POL (1 << 12)
+
+#define PCIE_BRIDGE_OPT2_REG 0x2824
+#define OPT2_UBUS_UR_DECODE_DIS (1 << 2)
+#define OPT2_TX_CREDIT_CHK_EN (1 << 4)
+#define OPT2_CFG_TYPE1_BD_SEL (1 << 7)
+#define OPT2_CFG_TYPE1_BUS_NO_SHIFT 16
+#define OPT2_CFG_TYPE1_BUS_NO_MASK (0xff << OPT2_CFG_TYPE1_BUS_NO_SHIFT)
+
+#define PCIE_BRIDGE_BAR0_BASEMASK_REG 0x2828
+#define PCIE_BRIDGE_BAR1_BASEMASK_REG 0x2830
+#define BASEMASK_REMAP_EN (1 << 0)
+#define BASEMASK_SWAP_EN (1 << 1)
+#define BASEMASK_MASK_SHIFT 4
+#define BASEMASK_MASK_MASK (0xfff << BASEMASK_MASK_SHIFT)
+#define BASEMASK_BASE_SHIFT 20
+#define BASEMASK_BASE_MASK (0xfff << BASEMASK_BASE_SHIFT)
+
+#define PCIE_BRIDGE_BAR0_REBASE_ADDR_REG 0x282c
+#define PCIE_BRIDGE_BAR1_REBASE_ADDR_REG 0x2834
+#define REBASE_ADDR_BASE_SHIFT 20
+#define REBASE_ADDR_BASE_MASK (0xfff << REBASE_ADDR_BASE_SHIFT)
+
+#define PCIE_BRIDGE_RC_INT_MASK_REG 0x2854
+#define PCIE_RC_INT_A (1 << 0)
+#define PCIE_RC_INT_B (1 << 1)
+#define PCIE_RC_INT_C (1 << 2)
+#define PCIE_RC_INT_D (1 << 3)
+
+#define PCIE_DEVICE_OFFSET 0x8000
+
#endif /* BCM63XX_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
index ef94ba73646e..30931c42379d 100644
--- a/arch/mips/include/asm/mach-bcm63xx/ioremap.h
+++ b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
@@ -18,6 +18,7 @@ static inline int is_bcm63xx_internal_registers(phys_t offset)
if (offset >= 0xfff00000)
return 1;
break;
+ case BCM6328_CPU_ID:
case BCM6368_CPU_ID:
if (offset >= 0xb0000000 && offset < 0xb1000000)
return 1;
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index 5b05f186e395..c22a3078bf11 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -21,14 +21,10 @@ enum octeon_irq {
OCTEON_IRQ_TIMER,
/* sources in CIU_INTX_EN0 */
OCTEON_IRQ_WORKQ0,
- OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16,
- OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16,
+ OCTEON_IRQ_WDOG0 = OCTEON_IRQ_WORKQ0 + 16,
OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15,
OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16,
OCTEON_IRQ_MBOX1,
- OCTEON_IRQ_UART0,
- OCTEON_IRQ_UART1,
- OCTEON_IRQ_UART2,
OCTEON_IRQ_PCI_INT0,
OCTEON_IRQ_PCI_INT1,
OCTEON_IRQ_PCI_INT2,
@@ -38,64 +34,25 @@ enum octeon_irq {
OCTEON_IRQ_PCI_MSI2,
OCTEON_IRQ_PCI_MSI3,
- OCTEON_IRQ_TWSI,
- OCTEON_IRQ_TWSI2,
OCTEON_IRQ_RML,
- OCTEON_IRQ_TRACE0,
- OCTEON_IRQ_GMX_DRP0 = OCTEON_IRQ_TRACE0 + 4,
- OCTEON_IRQ_IPD_DRP = OCTEON_IRQ_GMX_DRP0 + 5,
- OCTEON_IRQ_KEY_ZERO,
OCTEON_IRQ_TIMER0,
OCTEON_IRQ_TIMER1,
OCTEON_IRQ_TIMER2,
OCTEON_IRQ_TIMER3,
OCTEON_IRQ_USB0,
OCTEON_IRQ_USB1,
- OCTEON_IRQ_PCM,
- OCTEON_IRQ_MPI,
- OCTEON_IRQ_POWIQ,
- OCTEON_IRQ_IPDPPTHR,
- OCTEON_IRQ_MII0,
- OCTEON_IRQ_MII1,
OCTEON_IRQ_BOOTDMA,
-
- OCTEON_IRQ_NAND,
- OCTEON_IRQ_MIO, /* Summary of MIO_BOOT_ERR */
- OCTEON_IRQ_IOB, /* Summary of IOB_INT_SUM */
- OCTEON_IRQ_FPA, /* Summary of FPA_INT_SUM */
- OCTEON_IRQ_POW, /* Summary of POW_ECC_ERR */
- OCTEON_IRQ_L2C, /* Summary of L2C_INT_STAT */
- OCTEON_IRQ_IPD, /* Summary of IPD_INT_SUM */
- OCTEON_IRQ_PIP, /* Summary of PIP_INT_REG */
- OCTEON_IRQ_PKO, /* Summary of PKO_REG_ERROR */
- OCTEON_IRQ_ZIP, /* Summary of ZIP_ERROR */
- OCTEON_IRQ_TIM, /* Summary of TIM_REG_ERROR */
- OCTEON_IRQ_RAD, /* Summary of RAD_REG_ERROR */
- OCTEON_IRQ_KEY, /* Summary of KEY_INT_SUM */
- OCTEON_IRQ_DFA, /* Summary of DFA */
- OCTEON_IRQ_USBCTL, /* Summary of USBN0_INT_SUM */
- OCTEON_IRQ_SLI, /* Summary of SLI_INT_SUM */
- OCTEON_IRQ_DPI, /* Summary of DPI_INT_SUM */
- OCTEON_IRQ_AGX0, /* Summary of GMX0*+PCS0_INT*_REG */
- OCTEON_IRQ_AGL = OCTEON_IRQ_AGX0 + 5,
- OCTEON_IRQ_PTP,
- OCTEON_IRQ_PEM0,
- OCTEON_IRQ_PEM1,
- OCTEON_IRQ_SRIO0,
- OCTEON_IRQ_SRIO1,
- OCTEON_IRQ_LMC0,
- OCTEON_IRQ_DFM = OCTEON_IRQ_LMC0 + 4, /* Summary of DFM */
- OCTEON_IRQ_RST,
+#ifndef CONFIG_PCI_MSI
+ OCTEON_IRQ_LAST = 127
+#endif
};
#ifdef CONFIG_PCI_MSI
-/* 152 - 407 represent the MSI interrupts 0-255 */
-#define OCTEON_IRQ_MSI_BIT0 (OCTEON_IRQ_RST + 1)
+/* 256 - 511 represent the MSI interrupts 0-255 */
+#define OCTEON_IRQ_MSI_BIT0 (256)
#define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255)
#define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1)
-#else
-#define OCTEON_IRQ_LAST (OCTEON_IRQ_RST + 1)
#endif
#endif
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
index bb5b9a4e29c8..986982db7c38 100644
--- a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
+++ b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
@@ -19,6 +19,8 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#define JZ_NAND_NUM_BANKS 4
+
struct jz_nand_platform_data {
int num_partitions;
struct mtd_partition *partitions;
@@ -27,6 +29,8 @@ struct jz_nand_platform_data {
unsigned int busy_gpio;
+ unsigned char banks[JZ_NAND_NUM_BANKS];
+
void (*ident_callback)(struct platform_device *, struct nand_chip *,
struct mtd_partition **, int *num_partitions);
};
diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h
index 1e29b9dd1d73..5222a007bc21 100644
--- a/arch/mips/include/asm/mach-loongson/loongson.h
+++ b/arch/mips/include/asm/mach-loongson/loongson.h
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/kconfig.h>
/* loongson internal northbridge initialization */
extern void bonito_irq_init(void);
@@ -66,7 +67,7 @@ extern int mach_i8259_irq(void);
#include <linux/interrupt.h>
static inline void do_perfcnt_IRQ(void)
{
-#if defined(CONFIG_OPROFILE) || defined(CONFIG_OPROFILE_MODULE)
+#if IS_ENABLED(CONFIG_OPROFILE)
do_IRQ(LOONGSON2_PERFCNT_IRQ);
#endif
}
@@ -244,7 +245,6 @@ static inline void do_perfcnt_IRQ(void)
#ifdef CONFIG_CPU_SUPPORTS_CPUFREQ
#include <linux/cpufreq.h>
-extern void loongson2_cpu_wait(void);
extern struct cpufreq_frequency_table loongson2_clockmod_table[];
/* Chip Config */
diff --git a/arch/mips/include/asm/mach-loongson1/irq.h b/arch/mips/include/asm/mach-loongson1/irq.h
new file mode 100644
index 000000000000..da96ed42f733
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/irq.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * IRQ mappings for Loongson 1
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+
+#ifndef __ASM_MACH_LOONGSON1_IRQ_H
+#define __ASM_MACH_LOONGSON1_IRQ_H
+
+/*
+ * CPU core Interrupt Numbers
+ */
+#define MIPS_CPU_IRQ_BASE 0
+#define MIPS_CPU_IRQ(x) (MIPS_CPU_IRQ_BASE + (x))
+
+#define SOFTINT0_IRQ MIPS_CPU_IRQ(0)
+#define SOFTINT1_IRQ MIPS_CPU_IRQ(1)
+#define INT0_IRQ MIPS_CPU_IRQ(2)
+#define INT1_IRQ MIPS_CPU_IRQ(3)
+#define INT2_IRQ MIPS_CPU_IRQ(4)
+#define INT3_IRQ MIPS_CPU_IRQ(5)
+#define INT4_IRQ MIPS_CPU_IRQ(6)
+#define TIMER_IRQ MIPS_CPU_IRQ(7) /* cpu timer */
+
+#define MIPS_CPU_IRQS (MIPS_CPU_IRQ(7) + 1 - MIPS_CPU_IRQ_BASE)
+
+/*
+ * INT0~3 Interrupt Numbers
+ */
+#define LS1X_IRQ_BASE MIPS_CPU_IRQS
+#define LS1X_IRQ(n, x) (LS1X_IRQ_BASE + (n << 5) + (x))
+
+#define LS1X_UART0_IRQ LS1X_IRQ(0, 2)
+#define LS1X_UART1_IRQ LS1X_IRQ(0, 3)
+#define LS1X_UART2_IRQ LS1X_IRQ(0, 4)
+#define LS1X_UART3_IRQ LS1X_IRQ(0, 5)
+#define LS1X_CAN0_IRQ LS1X_IRQ(0, 6)
+#define LS1X_CAN1_IRQ LS1X_IRQ(0, 7)
+#define LS1X_SPI0_IRQ LS1X_IRQ(0, 8)
+#define LS1X_SPI1_IRQ LS1X_IRQ(0, 9)
+#define LS1X_AC97_IRQ LS1X_IRQ(0, 10)
+#define LS1X_DMA0_IRQ LS1X_IRQ(0, 13)
+#define LS1X_DMA1_IRQ LS1X_IRQ(0, 14)
+#define LS1X_DMA2_IRQ LS1X_IRQ(0, 15)
+#define LS1X_PWM0_IRQ LS1X_IRQ(0, 17)
+#define LS1X_PWM1_IRQ LS1X_IRQ(0, 18)
+#define LS1X_PWM2_IRQ LS1X_IRQ(0, 19)
+#define LS1X_PWM3_IRQ LS1X_IRQ(0, 20)
+#define LS1X_RTC_INT0_IRQ LS1X_IRQ(0, 21)
+#define LS1X_RTC_INT1_IRQ LS1X_IRQ(0, 22)
+#define LS1X_RTC_INT2_IRQ LS1X_IRQ(0, 23)
+#define LS1X_TOY_INT0_IRQ LS1X_IRQ(0, 24)
+#define LS1X_TOY_INT1_IRQ LS1X_IRQ(0, 25)
+#define LS1X_TOY_INT2_IRQ LS1X_IRQ(0, 26)
+#define LS1X_RTC_TICK_IRQ LS1X_IRQ(0, 27)
+#define LS1X_TOY_TICK_IRQ LS1X_IRQ(0, 28)
+
+#define LS1X_EHCI_IRQ LS1X_IRQ(1, 0)
+#define LS1X_OHCI_IRQ LS1X_IRQ(1, 1)
+#define LS1X_GMAC0_IRQ LS1X_IRQ(1, 2)
+#define LS1X_GMAC1_IRQ LS1X_IRQ(1, 3)
+
+#define LS1X_IRQS (LS1X_IRQ(4, 31) + 1 - LS1X_IRQ_BASE)
+
+#define NR_IRQS (MIPS_CPU_IRQS + LS1X_IRQS)
+
+#endif /* __ASM_MACH_LOONGSON1_IRQ_H */
diff --git a/arch/mips/include/asm/mach-loongson1/loongson1.h b/arch/mips/include/asm/mach-loongson1/loongson1.h
new file mode 100644
index 000000000000..4e18e88cebbf
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/loongson1.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Register mappings for Loongson 1
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+
+#ifndef __ASM_MACH_LOONGSON1_LOONGSON1_H
+#define __ASM_MACH_LOONGSON1_LOONGSON1_H
+
+#define DEFAULT_MEMSIZE 256 /* If no memsize provided */
+
+/* Loongson 1 Register Bases */
+#define LS1X_INTC_BASE 0x1fd01040
+#define LS1X_EHCI_BASE 0x1fe00000
+#define LS1X_OHCI_BASE 0x1fe08000
+#define LS1X_GMAC0_BASE 0x1fe10000
+#define LS1X_GMAC1_BASE 0x1fe20000
+
+#define LS1X_UART0_BASE 0x1fe40000
+#define LS1X_UART1_BASE 0x1fe44000
+#define LS1X_UART2_BASE 0x1fe48000
+#define LS1X_UART3_BASE 0x1fe4c000
+#define LS1X_CAN0_BASE 0x1fe50000
+#define LS1X_CAN1_BASE 0x1fe54000
+#define LS1X_I2C0_BASE 0x1fe58000
+#define LS1X_I2C1_BASE 0x1fe68000
+#define LS1X_I2C2_BASE 0x1fe70000
+#define LS1X_PWM_BASE 0x1fe5c000
+#define LS1X_WDT_BASE 0x1fe5c060
+#define LS1X_RTC_BASE 0x1fe64000
+#define LS1X_AC97_BASE 0x1fe74000
+#define LS1X_NAND_BASE 0x1fe78000
+#define LS1X_CLK_BASE 0x1fe78030
+
+#include <regs-clk.h>
+#include <regs-wdt.h>
+
+#endif /* __ASM_MACH_LOONGSON1_LOONGSON1_H */
diff --git a/arch/mips/include/asm/mach-loongson1/platform.h b/arch/mips/include/asm/mach-loongson1/platform.h
new file mode 100644
index 000000000000..2f171617bade
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/platform.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+
+#ifndef __ASM_MACH_LOONGSON1_PLATFORM_H
+#define __ASM_MACH_LOONGSON1_PLATFORM_H
+
+#include <linux/platform_device.h>
+
+extern struct platform_device ls1x_uart_device;
+extern struct platform_device ls1x_eth0_device;
+extern struct platform_device ls1x_ehci_device;
+extern struct platform_device ls1x_rtc_device;
+
+void ls1x_serial_setup(void);
+
+#endif /* __ASM_MACH_LOONGSON1_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-loongson1/prom.h b/arch/mips/include/asm/mach-loongson1/prom.h
new file mode 100644
index 000000000000..b871dc41b8d9
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/prom.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_MACH_LOONGSON1_PROM_H
+#define __ASM_MACH_LOONGSON1_PROM_H
+
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+
+/* environment arguments from bootloader */
+extern unsigned long memsize, highmemsize;
+
+/* loongson-specific command line, env and memory initialization */
+extern char *prom_getenv(char *name);
+extern void __init prom_init_cmdline(void);
+
+#endif /* __ASM_MACH_LOONGSON1_PROM_H */
diff --git a/arch/mips/include/asm/mach-loongson1/regs-clk.h b/arch/mips/include/asm/mach-loongson1/regs-clk.h
new file mode 100644
index 000000000000..8efa7fb9f73a
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/regs-clk.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 Clock Register Definitions.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_MACH_LOONGSON1_REGS_CLK_H
+#define __ASM_MACH_LOONGSON1_REGS_CLK_H
+
+#define LS1X_CLK_REG(x) \
+ ((void __iomem *)KSEG1ADDR(LS1X_CLK_BASE + (x)))
+
+#define LS1X_CLK_PLL_FREQ LS1X_CLK_REG(0x0)
+#define LS1X_CLK_PLL_DIV LS1X_CLK_REG(0x4)
+
+/* Clock PLL Divisor Register Bits */
+#define DIV_DC_EN (0x1 << 31)
+#define DIV_DC (0x1f << 26)
+#define DIV_CPU_EN (0x1 << 25)
+#define DIV_CPU (0x1f << 20)
+#define DIV_DDR_EN (0x1 << 19)
+#define DIV_DDR (0x1f << 14)
+
+#define DIV_DC_SHIFT 26
+#define DIV_CPU_SHIFT 20
+#define DIV_DDR_SHIFT 14
+
+#endif /* __ASM_MACH_LOONGSON1_REGS_CLK_H */
diff --git a/arch/mips/include/asm/mach-loongson1/regs-wdt.h b/arch/mips/include/asm/mach-loongson1/regs-wdt.h
new file mode 100644
index 000000000000..f897de68c527
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/regs-wdt.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 watchdog register definitions.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_MACH_LOONGSON1_REGS_WDT_H
+#define __ASM_MACH_LOONGSON1_REGS_WDT_H
+
+#define LS1X_WDT_REG(x) \
+ ((void __iomem *)KSEG1ADDR(LS1X_WDT_BASE + (x)))
+
+#define LS1X_WDT_EN LS1X_WDT_REG(0x0)
+#define LS1X_WDT_SET LS1X_WDT_REG(0x4)
+#define LS1X_WDT_TIMER LS1X_WDT_REG(0x8)
+
+#endif /* __ASM_MACH_LOONGSON1_REGS_WDT_H */
diff --git a/arch/mips/include/asm/mach-loongson1/war.h b/arch/mips/include/asm/mach-loongson1/war.h
new file mode 100644
index 000000000000..e3680a8fb349
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/war.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MACH_LOONGSON1_WAR_H
+#define __ASM_MACH_LOONGSON1_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR 0
+#define R4600_V1_HIT_CACHEOP_WAR 0
+#define R4600_V2_HIT_CACHEOP_WAR 0
+#define R5432_CP0_INTERRUPT_WAR 0
+#define BCM1250_M3_WAR 0
+#define SIBYTE_1956_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 0
+#define MIPS_CACHE_SYNC_WAR 0
+#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define RM9000_CDEX_SMP_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 0
+#define R10000_LLSC_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
+
+#endif /* __ASM_MACH_LOONGSON1_WAR_H */
diff --git a/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h
index d193fb68cf27..966db4be377c 100644
--- a/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-netlogic/cpu-feature-overrides.h
@@ -48,7 +48,6 @@
#define cpu_has_userlocal 1
#define cpu_has_mips32r2 1
#define cpu_has_mips64r2 1
-#define cpu_has_dc_aliases 1
#else
#error "Unknown Netlogic CPU"
#endif
diff --git a/arch/mips/include/asm/mach-tx49xx/mangle-port.h b/arch/mips/include/asm/mach-tx49xx/mangle-port.h
index 5e6912fdd0ed..490867b03c8f 100644
--- a/arch/mips/include/asm/mach-tx49xx/mangle-port.h
+++ b/arch/mips/include/asm/mach-tx49xx/mangle-port.h
@@ -9,7 +9,7 @@
#define ioswabb(a, x) (x)
#define __mem_ioswabb(a, x) (x)
#if defined(CONFIG_TOSHIBA_RBTX4939) && \
- (defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)) && \
+ IS_ENABLED(CONFIG_SMC91X) && \
defined(__BIG_ENDIAN)
#define NEEDS_TXX9_IOSWABW
extern u16 (*ioswabw)(volatile u16 *a, u16 x);
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index e71ff4c317f2..5b3cb8553e9a 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -28,6 +28,9 @@
#define read_c0_vpeconf0() __read_32bit_c0_register($1, 2)
#define write_c0_vpeconf0(val) __write_32bit_c0_register($1, 2, val)
+#define read_c0_vpeconf1() __read_32bit_c0_register($1, 3)
+#define write_c0_vpeconf1(val) __write_32bit_c0_register($1, 3, val)
+
#define read_c0_tcstatus() __read_32bit_c0_register($2, 1)
#define write_c0_tcstatus(val) __write_32bit_c0_register($2, 1, val)
@@ -124,6 +127,14 @@
#define VPECONF0_XTC_SHIFT 21
#define VPECONF0_XTC (_ULCAST_(0xff) << VPECONF0_XTC_SHIFT)
+/* VPEConf1 fields (per VPE) */
+#define VPECONF1_NCP1_SHIFT 0
+#define VPECONF1_NCP1 (_ULCAST_(0xff) << VPECONF1_NCP1_SHIFT)
+#define VPECONF1_NCP2_SHIFT 10
+#define VPECONF1_NCP2 (_ULCAST_(0xff) << VPECONF1_NCP2_SHIFT)
+#define VPECONF1_NCX_SHIFT 20
+#define VPECONF1_NCX (_ULCAST_(0xff) << VPECONF1_NCX_SHIFT)
+
/* TCStatus fields (per TC) */
#define TCSTATUS_TASID (_ULCAST_(0xff))
#define TCSTATUS_IXMT_SHIFT 10
@@ -350,6 +361,8 @@ do { \
#define write_vpe_c0_vpecontrol(val) mttc0(1, 1, val)
#define read_vpe_c0_vpeconf0() mftc0(1, 2)
#define write_vpe_c0_vpeconf0(val) mttc0(1, 2, val)
+#define read_vpe_c0_vpeconf1() mftc0(1, 3)
+#define write_vpe_c0_vpeconf1(val) mttc0(1, 3, val)
#define read_vpe_c0_count() mftc0(9, 0)
#define write_vpe_c0_count(val) mttc0(9, 0, val)
#define read_vpe_c0_status() mftc0(12, 0)
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 530008048c62..dca8bce8c7ab 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -10,6 +10,7 @@ struct mod_arch_specific {
struct list_head dbe_list;
const struct exception_table_entry *dbe_start;
const struct exception_table_entry *dbe_end;
+ struct mips_hi16 *r_mips_hi16_list;
};
typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
@@ -117,6 +118,8 @@ search_module_dbetables(unsigned long addr)
#define MODULE_PROC_FAMILY "RM9000 "
#elif defined CONFIG_CPU_SB1
#define MODULE_PROC_FAMILY "SB1 "
+#elif defined CONFIG_CPU_LOONGSON1
+#define MODULE_PROC_FAMILY "LOONGSON1 "
#elif defined CONFIG_CPU_LOONGSON2
#define MODULE_PROC_FAMILY "LOONGSON2 "
#elif defined CONFIG_CPU_CAVIUM_OCTEON
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h b/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
index bf7d41deb9be..7b63a6b722a0 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
@@ -47,7 +47,9 @@
#define CPU_BLOCKID_MAP 10
#define LSU_DEFEATURE 0x304
-#define LSU_CERRLOG_REGID 0x09
+#define LSU_DEBUG_ADDR 0x305
+#define LSU_DEBUG_DATA0 0x306
+#define LSU_CERRLOG_REGID 0x309
#define SCHED_DEFEATURE 0x700
/* Offsets of interest from the 'MAP' Block */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
index 86cc3391e50c..2c63f9754640 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
@@ -36,6 +36,9 @@
#define __NLM_HAL_IOMAP_H__
#define XLP_DEFAULT_IO_BASE 0x18000000
+#define XLP_DEFAULT_PCI_ECFG_BASE XLP_DEFAULT_IO_BASE
+#define XLP_DEFAULT_PCI_CFG_BASE 0x1c000000
+
#define NMI_BASE 0xbfc00000
#define XLP_IO_CLK 133333333
@@ -129,7 +132,7 @@
#define PCI_DEVICE_ID_NLM_PIC 0x1003
#define PCI_DEVICE_ID_NLM_PCIE 0x1004
#define PCI_DEVICE_ID_NLM_EHCI 0x1007
-#define PCI_DEVICE_ID_NLM_ILK 0x1008
+#define PCI_DEVICE_ID_NLM_OHCI 0x1008
#define PCI_DEVICE_ID_NLM_NAE 0x1009
#define PCI_DEVICE_ID_NLM_POE 0x100A
#define PCI_DEVICE_ID_NLM_FMN 0x100B
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
new file mode 100644
index 000000000000..66c323d1bd7d
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __NLM_HAL_PCIBUS_H__
+#define __NLM_HAL_PCIBUS_H__
+
+/* PCIE Memory and IO regions */
+#define PCIE_MEM_BASE 0xd0000000ULL
+#define PCIE_MEM_LIMIT 0xdfffffffULL
+#define PCIE_IO_BASE 0x14000000ULL
+#define PCIE_IO_LIMIT 0x15ffffffULL
+
+#define PCIE_BRIDGE_CMD 0x1
+#define PCIE_BRIDGE_MSI_CAP 0x14
+#define PCIE_BRIDGE_MSI_ADDRL 0x15
+#define PCIE_BRIDGE_MSI_ADDRH 0x16
+#define PCIE_BRIDGE_MSI_DATA 0x17
+
+/* XLP Global PCIE configuration space registers */
+#define PCIE_BYTE_SWAP_MEM_BASE 0x247
+#define PCIE_BYTE_SWAP_MEM_LIM 0x248
+#define PCIE_BYTE_SWAP_IO_BASE 0x249
+#define PCIE_BYTE_SWAP_IO_LIM 0x24A
+#define PCIE_MSI_STATUS 0x25A
+#define PCIE_MSI_EN 0x25B
+#define PCIE_INT_EN0 0x261
+
+/* PCIE_MSI_EN */
+#define PCIE_MSI_VECTOR_INT_EN 0xFFFFFFFF
+
+/* PCIE_INT_EN0 */
+#define PCIE_MSI_INT_EN (1 << 9)
+
+#ifndef __ASSEMBLY__
+
+#define nlm_read_pcie_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_pcie_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_pcie_base(node, inst) \
+ nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node, inst))
+#define nlm_get_pcie_regbase(node, inst) \
+ (nlm_get_pcie_base(node, inst) + XLP_IO_PCI_HDRSZ)
+
+int xlp_pcie_link_irt(int link);
+#endif
+#endif /* __NLM_HAL_PCIBUS_H__ */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
index b6628f7ccf74..ad8b80233a63 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
@@ -201,7 +201,11 @@
#define PIC_NUM_USB_IRTS 6
#define PIC_IRT_USB_0_INDEX 115
#define PIC_IRT_EHCI_0_INDEX 115
+#define PIC_IRT_OHCI_0_INDEX 116
+#define PIC_IRT_OHCI_1_INDEX 117
#define PIC_IRT_EHCI_1_INDEX 118
+#define PIC_IRT_OHCI_2_INDEX 119
+#define PIC_IRT_OHCI_3_INDEX 120
#define PIC_IRT_USB_INDEX(num) ((num) + PIC_IRT_USB_0_INDEX)
/* 115 to 120 */
#define PIC_IRT_GDX_INDEX 121
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/usb.h b/arch/mips/include/asm/netlogic/xlp-hal/usb.h
new file mode 100644
index 000000000000..a9cd350dfb6c
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlp-hal/usb.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __NLM_HAL_USB_H__
+#define __NLM_HAL_USB_H__
+
+#define USB_CTL_0 0x01
+#define USB_PHY_0 0x0A
+#define USB_PHY_RESET 0x01
+#define USB_PHY_PORT_RESET_0 0x10
+#define USB_PHY_PORT_RESET_1 0x20
+#define USB_CONTROLLER_RESET 0x01
+#define USB_INT_STATUS 0x0E
+#define USB_INT_EN 0x0F
+#define USB_PHY_INTERRUPT_EN 0x01
+#define USB_OHCI_INTERRUPT_EN 0x02
+#define USB_OHCI_INTERRUPT1_EN 0x04
+#define USB_OHCI_INTERRUPT2_EN 0x08
+#define USB_CTRL_INTERRUPT_EN 0x10
+
+#ifndef __ASSEMBLY__
+
+#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_usb_pcibase(node, inst) \
+ nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
+#define nlm_get_usb_hcd_base(node, inst) \
+ nlm_xkphys_map_pcibar0(nlm_get_usb_pcibase(node, inst))
+#define nlm_get_usb_regbase(node, inst) \
+ (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
+
+#endif
+#endif /* __NLM_HAL_USB_H__ */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
index 1540588e396d..7e47209327a5 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/xlp.h
@@ -35,8 +35,21 @@
#ifndef _NLM_HAL_XLP_H
#define _NLM_HAL_XLP_H
-#define PIC_UART_0_IRQ 17
-#define PIC_UART_1_IRQ 18
+#define PIC_UART_0_IRQ 17
+#define PIC_UART_1_IRQ 18
+#define PIC_PCIE_LINK_0_IRQ 19
+#define PIC_PCIE_LINK_1_IRQ 20
+#define PIC_PCIE_LINK_2_IRQ 21
+#define PIC_PCIE_LINK_3_IRQ 22
+#define PIC_EHCI_0_IRQ 23
+#define PIC_EHCI_1_IRQ 24
+#define PIC_OHCI_0_IRQ 25
+#define PIC_OHCI_1_IRQ 26
+#define PIC_OHCI_2_IRQ 27
+#define PIC_OHCI_3_IRQ 28
+#define PIC_MMC_IRQ 29
+#define PIC_I2C_0_IRQ 30
+#define PIC_I2C_1_IRQ 31
#ifndef __ASSEMBLY__
diff --git a/arch/mips/include/asm/netlogic/xlr/bridge.h b/arch/mips/include/asm/netlogic/xlr/bridge.h
new file mode 100644
index 000000000000..2d02428c4f1b
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/bridge.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ASM_NLM_BRIDGE_H_
+#define _ASM_NLM_BRIDGE_H_
+
+#define BRIDGE_DRAM_0_BAR 0
+#define BRIDGE_DRAM_1_BAR 1
+#define BRIDGE_DRAM_2_BAR 2
+#define BRIDGE_DRAM_3_BAR 3
+#define BRIDGE_DRAM_4_BAR 4
+#define BRIDGE_DRAM_5_BAR 5
+#define BRIDGE_DRAM_6_BAR 6
+#define BRIDGE_DRAM_7_BAR 7
+#define BRIDGE_DRAM_CHN_0_MTR_0_BAR 8
+#define BRIDGE_DRAM_CHN_0_MTR_1_BAR 9
+#define BRIDGE_DRAM_CHN_0_MTR_2_BAR 10
+#define BRIDGE_DRAM_CHN_0_MTR_3_BAR 11
+#define BRIDGE_DRAM_CHN_0_MTR_4_BAR 12
+#define BRIDGE_DRAM_CHN_0_MTR_5_BAR 13
+#define BRIDGE_DRAM_CHN_0_MTR_6_BAR 14
+#define BRIDGE_DRAM_CHN_0_MTR_7_BAR 15
+#define BRIDGE_DRAM_CHN_1_MTR_0_BAR 16
+#define BRIDGE_DRAM_CHN_1_MTR_1_BAR 17
+#define BRIDGE_DRAM_CHN_1_MTR_2_BAR 18
+#define BRIDGE_DRAM_CHN_1_MTR_3_BAR 19
+#define BRIDGE_DRAM_CHN_1_MTR_4_BAR 20
+#define BRIDGE_DRAM_CHN_1_MTR_5_BAR 21
+#define BRIDGE_DRAM_CHN_1_MTR_6_BAR 22
+#define BRIDGE_DRAM_CHN_1_MTR_7_BAR 23
+#define BRIDGE_CFG_BAR 24
+#define BRIDGE_PHNX_IO_BAR 25
+#define BRIDGE_FLASH_BAR 26
+#define BRIDGE_SRAM_BAR 27
+#define BRIDGE_HTMEM_BAR 28
+#define BRIDGE_HTINT_BAR 29
+#define BRIDGE_HTPIC_BAR 30
+#define BRIDGE_HTSM_BAR 31
+#define BRIDGE_HTIO_BAR 32
+#define BRIDGE_HTCFG_BAR 33
+#define BRIDGE_PCIXCFG_BAR 34
+#define BRIDGE_PCIXMEM_BAR 35
+#define BRIDGE_PCIXIO_BAR 36
+#define BRIDGE_DEVICE_MASK 37
+#define BRIDGE_AERR_INTR_LOG1 38
+#define BRIDGE_AERR_INTR_LOG2 39
+#define BRIDGE_AERR_INTR_LOG3 40
+#define BRIDGE_AERR_DEV_STAT 41
+#define BRIDGE_AERR1_LOG1 42
+#define BRIDGE_AERR1_LOG2 43
+#define BRIDGE_AERR1_LOG3 44
+#define BRIDGE_AERR1_DEV_STAT 45
+#define BRIDGE_AERR_INTR_EN 46
+#define BRIDGE_AERR_UPG 47
+#define BRIDGE_AERR_CLEAR 48
+#define BRIDGE_AERR1_CLEAR 49
+#define BRIDGE_SBE_COUNTS 50
+#define BRIDGE_DBE_COUNTS 51
+#define BRIDGE_BITERR_INT_EN 52
+
+#define BRIDGE_SYS2IO_CREDITS 53
+#define BRIDGE_EVNT_CNT_CTRL1 54
+#define BRIDGE_EVNT_COUNTER1 55
+#define BRIDGE_EVNT_CNT_CTRL2 56
+#define BRIDGE_EVNT_COUNTER2 57
+#define BRIDGE_RESERVED1 58
+
+#define BRIDGE_DEFEATURE 59
+#define BRIDGE_SCRATCH0 60
+#define BRIDGE_SCRATCH1 61
+#define BRIDGE_SCRATCH2 62
+#define BRIDGE_SCRATCH3 63
+
+#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/flash.h b/arch/mips/include/asm/netlogic/xlr/flash.h
new file mode 100644
index 000000000000..f8aca5472b6c
--- /dev/null
+++ b/arch/mips/include/asm/netlogic/xlr/flash.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ASM_NLM_FLASH_H_
+#define _ASM_NLM_FLASH_H_
+
+#define FLASH_CSBASE_ADDR(cs) (cs)
+#define FLASH_CSADDR_MASK(cs) (0x10 + (cs))
+#define FLASH_CSDEV_PARM(cs) (0x20 + (cs))
+#define FLASH_CSTIME_PARMA(cs) (0x30 + (cs))
+#define FLASH_CSTIME_PARMB(cs) (0x40 + (cs))
+
+#define FLASH_INT_MASK 0x50
+#define FLASH_INT_STATUS 0x60
+#define FLASH_ERROR_STATUS 0x70
+#define FLASH_ERROR_ADDR 0x80
+
+#define FLASH_NAND_CLE(cs) (0x90 + (cs))
+#define FLASH_NAND_ALE(cs) (0xa0 + (cs))
+
+#define FLASH_NAND_CSDEV_PARAM 0x000041e6
+#define FLASH_NAND_CSTIME_PARAMA 0x4f400e22
+#define FLASH_NAND_CSTIME_PARAMB 0x000083cf
+
+#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/gpio.h b/arch/mips/include/asm/netlogic/xlr/gpio.h
index 51f6ad4aeb14..8492e835b110 100644
--- a/arch/mips/include/asm/netlogic/xlr/gpio.h
+++ b/arch/mips/include/asm/netlogic/xlr/gpio.h
@@ -35,39 +35,40 @@
#ifndef _ASM_NLM_GPIO_H
#define _ASM_NLM_GPIO_H
-#define NETLOGIC_GPIO_INT_EN_REG 0
-#define NETLOGIC_GPIO_INPUT_INVERSION_REG 1
-#define NETLOGIC_GPIO_IO_DIR_REG 2
-#define NETLOGIC_GPIO_IO_DATA_WR_REG 3
-#define NETLOGIC_GPIO_IO_DATA_RD_REG 4
+#define GPIO_INT_EN_REG 0
+#define GPIO_INPUT_INVERSION_REG 1
+#define GPIO_IO_DIR_REG 2
+#define GPIO_IO_DATA_WR_REG 3
+#define GPIO_IO_DATA_RD_REG 4
-#define NETLOGIC_GPIO_SWRESET_REG 8
-#define NETLOGIC_GPIO_DRAM1_CNTRL_REG 9
-#define NETLOGIC_GPIO_DRAM1_RATIO_REG 10
-#define NETLOGIC_GPIO_DRAM1_RESET_REG 11
-#define NETLOGIC_GPIO_DRAM1_STATUS_REG 12
-#define NETLOGIC_GPIO_DRAM2_CNTRL_REG 13
-#define NETLOGIC_GPIO_DRAM2_RATIO_REG 14
-#define NETLOGIC_GPIO_DRAM2_RESET_REG 15
-#define NETLOGIC_GPIO_DRAM2_STATUS_REG 16
+#define GPIO_SWRESET_REG 8
+#define GPIO_DRAM1_CNTRL_REG 9
+#define GPIO_DRAM1_RATIO_REG 10
+#define GPIO_DRAM1_RESET_REG 11
+#define GPIO_DRAM1_STATUS_REG 12
+#define GPIO_DRAM2_CNTRL_REG 13
+#define GPIO_DRAM2_RATIO_REG 14
+#define GPIO_DRAM2_RESET_REG 15
+#define GPIO_DRAM2_STATUS_REG 16
-#define NETLOGIC_GPIO_PWRON_RESET_CFG_REG 21
-#define NETLOGIC_GPIO_BIST_ALL_GO_STATUS_REG 24
-#define NETLOGIC_GPIO_BIST_CPU_GO_STATUS_REG 25
-#define NETLOGIC_GPIO_BIST_DEV_GO_STATUS_REG 26
+#define GPIO_PWRON_RESET_CFG_REG 21
+#define GPIO_BIST_ALL_GO_STATUS_REG 24
+#define GPIO_BIST_CPU_GO_STATUS_REG 25
+#define GPIO_BIST_DEV_GO_STATUS_REG 26
-#define NETLOGIC_GPIO_FUSE_BANK_REG 35
-#define NETLOGIC_GPIO_CPU_RESET_REG 40
-#define NETLOGIC_GPIO_RNG_REG 43
+#define GPIO_FUSE_BANK_REG 35
+#define GPIO_CPU_RESET_REG 40
+#define GPIO_RNG_REG 43
-#define NETLOGIC_PWRON_RESET_PCMCIA_BOOT 17
-#define NETLOGIC_GPIO_LED_BITMAP 0x1700000
-#define NETLOGIC_GPIO_LED_0_SHIFT 20
-#define NETLOGIC_GPIO_LED_1_SHIFT 24
+#define PWRON_RESET_PCMCIA_BOOT 17
-#define NETLOGIC_GPIO_LED_OUTPUT_CODE_RESET 0x01
-#define NETLOGIC_GPIO_LED_OUTPUT_CODE_HARD_RESET 0x02
-#define NETLOGIC_GPIO_LED_OUTPUT_CODE_SOFT_RESET 0x03
-#define NETLOGIC_GPIO_LED_OUTPUT_CODE_MAIN 0x04
+#define GPIO_LED_BITMAP 0x1700000
+#define GPIO_LED_0_SHIFT 20
+#define GPIO_LED_1_SHIFT 24
+
+#define GPIO_LED_OUTPUT_CODE_RESET 0x01
+#define GPIO_LED_OUTPUT_CODE_HARD_RESET 0x02
+#define GPIO_LED_OUTPUT_CODE_SOFT_RESET 0x03
+#define GPIO_LED_OUTPUT_CODE_MAIN 0x04
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-fpa.h b/arch/mips/include/asm/octeon/cvmx-helper-fpa.h
deleted file mode 100644
index 5ff8c93198de..000000000000
--- a/arch/mips/include/asm/octeon/cvmx-helper-fpa.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Helper functions for FPA setup.
- *
- */
-#ifndef __CVMX_HELPER_H_FPA__
-#define __CVMX_HELPER_H_FPA__
-
-/**
- * Allocate memory and initialize the FPA pools using memory
- * from cvmx-bootmem. Sizes of each element in the pools is
- * controlled by the cvmx-config.h header file. Specifying
- * zero for any parameter will cause that FPA pool to not be
- * setup. This is useful if you aren't using some of the
- * hardware and want to save memory.
- *
- * @packet_buffers:
- * Number of packet buffers to allocate
- * @work_queue_entries:
- * Number of work queue entries
- * @pko_buffers:
- * PKO Command buffers. You should at minimum have two per
- * each PKO queue.
- * @tim_buffers:
- * TIM ring buffer command queues. At least two per timer bucket
- * is recommened.
- * @dfa_buffers:
- * DFA command buffer. A relatively small (32 for example)
- * number should work.
- * Returns Zero on success, non-zero if out of memory
- */
-extern int cvmx_helper_initialize_fpa(int packet_buffers,
- int work_queue_entries, int pko_buffers,
- int tim_buffers, int dfa_buffers);
-
-#endif /* __CVMX_HELPER_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-helper.h b/arch/mips/include/asm/octeon/cvmx-helper.h
index 3169cd79f2ac..0ac6b9f412be 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper.h
@@ -61,8 +61,6 @@ typedef union {
} s;
} cvmx_helper_link_info_t;
-#include "cvmx-helper-fpa.h"
-
#include <asm/octeon/cvmx-helper-errata.h>
#include "cvmx-helper-loop.h"
#include "cvmx-helper-npi.h"
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index f72f768cd3a4..1e2486e23573 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -215,11 +215,6 @@ struct octeon_cf_data {
int dma_engine; /* -1 for no DMA */
};
-struct octeon_i2c_data {
- unsigned int sys_freq;
- unsigned int i2c_freq;
-};
-
extern void octeon_write_lcd(const char *s);
extern void octeon_check_cpu_bist(void);
extern int octeon_get_boot_debug_flag(void);
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index 7206d445bab8..8808bf548b99 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -20,9 +20,6 @@
extern int early_init_dt_scan_memory_arch(unsigned long node,
const char *uname, int depth, void *data);
-extern int reserve_mem_mach(unsigned long addr, unsigned long size);
-extern void free_mem_mach(unsigned long addr, unsigned long size);
-
extern void device_tree_init(void);
static inline unsigned long pci_address_to_pio(phys_addr_t address)
diff --git a/arch/mips/include/asm/r4k-timer.h b/arch/mips/include/asm/r4k-timer.h
index a37d12b3b61c..afe9e0e03fe9 100644
--- a/arch/mips/include/asm/r4k-timer.h
+++ b/arch/mips/include/asm/r4k-timer.h
@@ -12,16 +12,16 @@
#ifdef CONFIG_SYNC_R4K
-extern void synchronise_count_master(void);
-extern void synchronise_count_slave(void);
+extern void synchronise_count_master(int cpu);
+extern void synchronise_count_slave(int cpu);
#else
-static inline void synchronise_count_master(void)
+static inline void synchronise_count_master(int cpu)
{
}
-static inline void synchronise_count_slave(void)
+static inline void synchronise_count_slave(int cpu)
{
}
diff --git a/arch/mips/include/asm/smtc.h b/arch/mips/include/asm/smtc.h
index c9736fc06325..8935426a56ab 100644
--- a/arch/mips/include/asm/smtc.h
+++ b/arch/mips/include/asm/smtc.h
@@ -33,6 +33,12 @@ typedef long asiduse;
#endif
#endif
+/*
+ * VPE Management information
+ */
+
+#define MAX_SMTC_VPES MAX_SMTC_TLBS /* FIXME: May not always be true. */
+
extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
struct mm_struct;
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 653a412c036c..3b92efef56d3 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -687,7 +687,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
__MODULE_JAL(__copy_user) \
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
: \
- : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
DADDI_SCRATCH, "memory"); \
__cu_len_r; \
})
@@ -797,7 +797,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
".set\treorder" \
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
: \
- : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
DADDI_SCRATCH, "memory"); \
__cu_len_r; \
})
@@ -820,7 +820,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
".set\treorder" \
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
: \
- : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
DADDI_SCRATCH, "memory"); \
__cu_len_r; \
})
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 440a21dab575..3d9f75f7ffc9 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -6,6 +6,7 @@
* Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2012 MIPS Technologies, Inc.
*/
#include <linux/types.h>
@@ -62,8 +63,10 @@ void __uasminit uasm_i##op(u32 **buf, unsigned int a, signed int b)
Ip_u2u1s3(_addiu);
Ip_u3u1u2(_addu);
-Ip_u2u1u3(_andi);
Ip_u3u1u2(_and);
+Ip_u2u1u3(_andi);
+Ip_u1u2s3(_bbit0);
+Ip_u1u2s3(_bbit1);
Ip_u1u2s3(_beq);
Ip_u1u2s3(_beql);
Ip_u1s2(_bgez);
@@ -72,55 +75,54 @@ Ip_u1s2(_bltz);
Ip_u1s2(_bltzl);
Ip_u1u2s3(_bne);
Ip_u2s3u1(_cache);
-Ip_u1u2u3(_dmfc0);
-Ip_u1u2u3(_dmtc0);
Ip_u2u1s3(_daddiu);
Ip_u3u1u2(_daddu);
+Ip_u2u1msbu3(_dins);
+Ip_u2u1msbu3(_dinsm);
+Ip_u1u2u3(_dmfc0);
+Ip_u1u2u3(_dmtc0);
+Ip_u2u1u3(_drotr);
+Ip_u2u1u3(_drotr32);
Ip_u2u1u3(_dsll);
Ip_u2u1u3(_dsll32);
Ip_u2u1u3(_dsra);
Ip_u2u1u3(_dsrl);
Ip_u2u1u3(_dsrl32);
-Ip_u2u1u3(_drotr);
-Ip_u2u1u3(_drotr32);
Ip_u3u1u2(_dsubu);
Ip_0(_eret);
Ip_u1(_j);
Ip_u1(_jal);
Ip_u1(_jr);
Ip_u2s3u1(_ld);
+Ip_u3u1u2(_ldx);
Ip_u2s3u1(_ll);
Ip_u2s3u1(_lld);
Ip_u1s2(_lui);
Ip_u2s3u1(_lw);
+Ip_u3u1u2(_lwx);
Ip_u1u2u3(_mfc0);
Ip_u1u2u3(_mtc0);
-Ip_u2u1u3(_ori);
Ip_u3u1u2(_or);
+Ip_u2u1u3(_ori);
Ip_u2s3u1(_pref);
Ip_0(_rfe);
+Ip_u2u1u3(_rotr);
Ip_u2s3u1(_sc);
Ip_u2s3u1(_scd);
Ip_u2s3u1(_sd);
Ip_u2u1u3(_sll);
Ip_u2u1u3(_sra);
Ip_u2u1u3(_srl);
-Ip_u2u1u3(_rotr);
Ip_u3u1u2(_subu);
Ip_u2s3u1(_sw);
+Ip_u1(_syscall);
Ip_0(_tlbp);
Ip_0(_tlbr);
Ip_0(_tlbwi);
Ip_0(_tlbwr);
Ip_u3u1u2(_xor);
Ip_u2u1u3(_xori);
-Ip_u2u1msbu3(_dins);
-Ip_u2u1msbu3(_dinsm);
-Ip_u1(_syscall);
-Ip_u1u2s3(_bbit0);
-Ip_u1u2s3(_bbit1);
-Ip_u3u1u2(_lwx);
-Ip_u3u1u2(_ldx);
+
/* Handle labels. */
struct uasm_label {
@@ -145,37 +147,37 @@ static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
/* convenience macros for instructions */
#ifdef CONFIG_64BIT
+# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_daddiu(buf, rs, rt, val)
+# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_daddu(buf, rs, rt, rd)
+# define UASM_i_LL(buf, rs, rt, off) uasm_i_lld(buf, rs, rt, off)
# define UASM_i_LW(buf, rs, rt, off) uasm_i_ld(buf, rs, rt, off)
-# define UASM_i_SW(buf, rs, rt, off) uasm_i_sd(buf, rs, rt, off)
+# define UASM_i_LWX(buf, rs, rt, rd) uasm_i_ldx(buf, rs, rt, rd)
+# define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd)
+# define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd)
+# define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh)
+# define UASM_i_SC(buf, rs, rt, off) uasm_i_scd(buf, rs, rt, off)
# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh)
# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh)
# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh)
# define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_dsrl_safe(buf, rs, rt, sh)
-# define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh)
-# define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd)
-# define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd)
-# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_daddiu(buf, rs, rt, val)
-# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_daddu(buf, rs, rt, rd)
# define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_dsubu(buf, rs, rt, rd)
-# define UASM_i_LL(buf, rs, rt, off) uasm_i_lld(buf, rs, rt, off)
-# define UASM_i_SC(buf, rs, rt, off) uasm_i_scd(buf, rs, rt, off)
-# define UASM_i_LWX(buf, rs, rt, rd) uasm_i_ldx(buf, rs, rt, rd)
+# define UASM_i_SW(buf, rs, rt, off) uasm_i_sd(buf, rs, rt, off)
#else
+# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_addiu(buf, rs, rt, val)
+# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_addu(buf, rs, rt, rd)
+# define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off)
# define UASM_i_LW(buf, rs, rt, off) uasm_i_lw(buf, rs, rt, off)
-# define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off)
+# define UASM_i_LWX(buf, rs, rt, rd) uasm_i_lwx(buf, rs, rt, rd)
+# define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd)
+# define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd)
+# define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh)
+# define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off)
# define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh)
# define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh)
# define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
# define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh)
-# define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh)
-# define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd)
-# define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd)
-# define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_addiu(buf, rs, rt, val)
-# define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_addu(buf, rs, rt, rd)
# define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_subu(buf, rs, rt, rd)
-# define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off)
-# define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off)
-# define UASM_i_LWX(buf, rs, rt, rd) uasm_i_lwx(buf, rs, rt, rd)
+# define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off)
#endif
#define uasm_i_b(buf, off) uasm_i_beq(buf, 0, 0, off)
@@ -183,19 +185,10 @@ static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
#define uasm_i_beqzl(buf, rs, off) uasm_i_beql(buf, rs, 0, off)
#define uasm_i_bnez(buf, rs, off) uasm_i_bne(buf, rs, 0, off)
#define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off)
+#define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3)
#define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b)
#define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0)
#define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1)
-#define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3)
-
-static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
- unsigned int a2, unsigned int a3)
-{
- if (a3 < 32)
- uasm_i_dsrl(p, a1, a2, a3);
- else
- uasm_i_dsrl32(p, a1, a2, a3 - 32);
-}
static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
@@ -215,6 +208,15 @@ static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
uasm_i_dsll32(p, a1, a2, a3 - 32);
}
+static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
+ unsigned int a2, unsigned int a3)
+{
+ if (a3 < 32)
+ uasm_i_dsrl(p, a1, a2, a3);
+ else
+ uasm_i_dsrl32(p, a1, a2, a3 - 32);
+}
+
/* Handle relocations. */
struct uasm_reloc {
u32 *addr;
@@ -234,16 +236,16 @@ void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
/* Convenience functions for labeled branches. */
-void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
+void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ unsigned int bit, int lid);
+void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
+ unsigned int bit, int lid);
void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
unsigned int reg2, int lid);
void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
-void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg,
- unsigned int bit, int lid);
-void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg,
- unsigned int bit, int lid);
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index d8dad5340ea3..bebbde01be92 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -1034,7 +1034,6 @@
#ifndef __ASSEMBLY__
#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 9a91fe9de696..9a3d9de4d04e 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -140,6 +140,7 @@ static void qi_lb60_nand_ident(struct platform_device *pdev,
static struct jz_nand_platform_data qi_lb60_nand_pdata = {
.ident_callback = qi_lb60_nand_ident,
.busy_gpio = 94,
+ .banks = { 1 },
};
/* Keyboard*/
diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c
index 10929e2bc6d8..e342ed4cbd43 100644
--- a/arch/mips/jz4740/platform.c
+++ b/arch/mips/jz4740/platform.c
@@ -157,11 +157,29 @@ static struct resource jz4740_nand_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .name = "bank",
+ .name = "bank1",
.start = 0x18000000,
.end = 0x180C0000 - 1,
.flags = IORESOURCE_MEM,
},
+ {
+ .name = "bank2",
+ .start = 0x14000000,
+ .end = 0x140C0000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "bank3",
+ .start = 0x0C000000,
+ .end = 0x0C0C0000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "bank4",
+ .start = 0x08000000,
+ .end = 0x080C0000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
};
struct platform_device jz4740_nand_device = {
diff --git a/arch/mips/jz4740/reset.c b/arch/mips/jz4740/reset.c
index 5f1fb95c0d0d..6c0da5afcf17 100644
--- a/arch/mips/jz4740/reset.c
+++ b/arch/mips/jz4740/reset.c
@@ -21,6 +21,9 @@
#include <asm/mach-jz4740/base.h>
#include <asm/mach-jz4740/timer.h>
+#include "reset.h"
+#include "clock.h"
+
static void jz4740_halt(void)
{
while (1) {
@@ -53,21 +56,57 @@ static void jz4740_restart(char *command)
jz4740_halt();
}
-#define JZ_REG_RTC_CTRL 0x00
-#define JZ_REG_RTC_HIBERNATE 0x20
+#define JZ_REG_RTC_CTRL 0x00
+#define JZ_REG_RTC_HIBERNATE 0x20
+#define JZ_REG_RTC_WAKEUP_FILTER 0x24
+#define JZ_REG_RTC_RESET_COUNTER 0x28
-#define JZ_RTC_CTRL_WRDY BIT(7)
+#define JZ_RTC_CTRL_WRDY BIT(7)
+#define JZ_RTC_WAKEUP_FILTER_MASK 0x0000FFE0
+#define JZ_RTC_RESET_COUNTER_MASK 0x00000FE0
-static void jz4740_power_off(void)
+static inline void jz4740_rtc_wait_ready(void __iomem *rtc_base)
{
- void __iomem *rtc_base = ioremap(JZ4740_RTC_BASE_ADDR, 0x24);
uint32_t ctrl;
do {
ctrl = readl(rtc_base + JZ_REG_RTC_CTRL);
} while (!(ctrl & JZ_RTC_CTRL_WRDY));
+}
+static void jz4740_power_off(void)
+{
+ void __iomem *rtc_base = ioremap(JZ4740_RTC_BASE_ADDR, 0x38);
+ unsigned long wakeup_filter_ticks;
+ unsigned long reset_counter_ticks;
+
+ /*
+ * Set minimum wakeup pin assertion time: 100 ms.
+ * Range is 0 to 2 sec if RTC is clocked at 32 kHz.
+ */
+ wakeup_filter_ticks = (100 * jz4740_clock_bdata.rtc_rate) / 1000;
+ if (wakeup_filter_ticks < JZ_RTC_WAKEUP_FILTER_MASK)
+ wakeup_filter_ticks &= JZ_RTC_WAKEUP_FILTER_MASK;
+ else
+ wakeup_filter_ticks = JZ_RTC_WAKEUP_FILTER_MASK;
+ jz4740_rtc_wait_ready(rtc_base);
+ writel(wakeup_filter_ticks, rtc_base + JZ_REG_RTC_WAKEUP_FILTER);
+
+ /*
+ * Set reset pin low-level assertion time after wakeup: 60 ms.
+ * Range is 0 to 125 ms if RTC is clocked at 32 kHz.
+ */
+ reset_counter_ticks = (60 * jz4740_clock_bdata.rtc_rate) / 1000;
+ if (reset_counter_ticks < JZ_RTC_RESET_COUNTER_MASK)
+ reset_counter_ticks &= JZ_RTC_RESET_COUNTER_MASK;
+ else
+ reset_counter_ticks = JZ_RTC_RESET_COUNTER_MASK;
+ jz4740_rtc_wait_ready(rtc_base);
+ writel(reset_counter_ticks, rtc_base + JZ_REG_RTC_RESET_COUNTER);
+
+ jz4740_rtc_wait_ready(rtc_base);
writel(1, rtc_base + JZ_REG_RTC_HIBERNATE);
+
jz4740_halt();
}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index f4630e1082ab..1b51046191e8 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -190,6 +190,7 @@ void __init check_wait(void)
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
case CPU_JZRISC:
+ case CPU_LOONGSON1:
case CPU_XLR:
case CPU_XLP:
cpu_wait = r4k_wait;
@@ -330,6 +331,154 @@ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
#endif
}
+static char unknown_isa[] __cpuinitdata = KERN_ERR \
+ "Unsupported ISA type, c0.config0: %d.";
+
+static inline unsigned int decode_config0(struct cpuinfo_mips *c)
+{
+ unsigned int config0;
+ int isa;
+
+ config0 = read_c0_config();
+
+ if (((config0 & MIPS_CONF_MT) >> 7) == 1)
+ c->options |= MIPS_CPU_TLB;
+ isa = (config0 & MIPS_CONF_AT) >> 13;
+ switch (isa) {
+ case 0:
+ switch ((config0 & MIPS_CONF_AR) >> 10) {
+ case 0:
+ c->isa_level = MIPS_CPU_ISA_M32R1;
+ break;
+ case 1:
+ c->isa_level = MIPS_CPU_ISA_M32R2;
+ break;
+ default:
+ goto unknown;
+ }
+ break;
+ case 2:
+ switch ((config0 & MIPS_CONF_AR) >> 10) {
+ case 0:
+ c->isa_level = MIPS_CPU_ISA_M64R1;
+ break;
+ case 1:
+ c->isa_level = MIPS_CPU_ISA_M64R2;
+ break;
+ default:
+ goto unknown;
+ }
+ break;
+ default:
+ goto unknown;
+ }
+
+ return config0 & MIPS_CONF_M;
+
+unknown:
+ panic(unknown_isa, config0);
+}
+
+static inline unsigned int decode_config1(struct cpuinfo_mips *c)
+{
+ unsigned int config1;
+
+ config1 = read_c0_config1();
+
+ if (config1 & MIPS_CONF1_MD)
+ c->ases |= MIPS_ASE_MDMX;
+ if (config1 & MIPS_CONF1_WR)
+ c->options |= MIPS_CPU_WATCH;
+ if (config1 & MIPS_CONF1_CA)
+ c->ases |= MIPS_ASE_MIPS16;
+ if (config1 & MIPS_CONF1_EP)
+ c->options |= MIPS_CPU_EJTAG;
+ if (config1 & MIPS_CONF1_FP) {
+ c->options |= MIPS_CPU_FPU;
+ c->options |= MIPS_CPU_32FPR;
+ }
+ if (cpu_has_tlb)
+ c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
+
+ return config1 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config2(struct cpuinfo_mips *c)
+{
+ unsigned int config2;
+
+ config2 = read_c0_config2();
+
+ if (config2 & MIPS_CONF2_SL)
+ c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+
+ return config2 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config3(struct cpuinfo_mips *c)
+{
+ unsigned int config3;
+
+ config3 = read_c0_config3();
+
+ if (config3 & MIPS_CONF3_SM)
+ c->ases |= MIPS_ASE_SMARTMIPS;
+ if (config3 & MIPS_CONF3_DSP)
+ c->ases |= MIPS_ASE_DSP;
+ if (config3 & MIPS_CONF3_VINT)
+ c->options |= MIPS_CPU_VINT;
+ if (config3 & MIPS_CONF3_VEIC)
+ c->options |= MIPS_CPU_VEIC;
+ if (config3 & MIPS_CONF3_MT)
+ c->ases |= MIPS_ASE_MIPSMT;
+ if (config3 & MIPS_CONF3_ULRI)
+ c->options |= MIPS_CPU_ULRI;
+
+ return config3 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config4(struct cpuinfo_mips *c)
+{
+ unsigned int config4;
+
+ config4 = read_c0_config4();
+
+ if ((config4 & MIPS_CONF4_MMUEXTDEF) == MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT
+ && cpu_has_tlb)
+ c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
+
+ c->kscratch_mask = (config4 >> 16) & 0xff;
+
+ return config4 & MIPS_CONF_M;
+}
+
+static void __cpuinit decode_configs(struct cpuinfo_mips *c)
+{
+ int ok;
+
+ /* MIPS32 or MIPS64 compliant CPU. */
+ c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
+
+ c->scache.flags = MIPS_CACHE_NOT_PRESENT;
+
+ ok = decode_config0(c); /* Read Config registers. */
+ BUG_ON(!ok); /* Arch spec violation! */
+ if (ok)
+ ok = decode_config1(c);
+ if (ok)
+ ok = decode_config2(c);
+ if (ok)
+ ok = decode_config3(c);
+ if (ok)
+ ok = decode_config4(c);
+
+ mips_probe_watch_registers(c);
+
+ if (cpu_has_mips_r2)
+ c->core = read_c0_ebase() & 0x3ff;
+}
+
#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
| MIPS_CPU_COUNTER)
@@ -638,155 +787,19 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
MIPS_CPU_32FPR;
c->tlbsize = 64;
break;
- }
-}
-
-static char unknown_isa[] __cpuinitdata = KERN_ERR \
- "Unsupported ISA type, c0.config0: %d.";
+ case PRID_IMP_LOONGSON1:
+ decode_configs(c);
-static inline unsigned int decode_config0(struct cpuinfo_mips *c)
-{
- unsigned int config0;
- int isa;
+ c->cputype = CPU_LOONGSON1;
- config0 = read_c0_config();
-
- if (((config0 & MIPS_CONF_MT) >> 7) == 1)
- c->options |= MIPS_CPU_TLB;
- isa = (config0 & MIPS_CONF_AT) >> 13;
- switch (isa) {
- case 0:
- switch ((config0 & MIPS_CONF_AR) >> 10) {
- case 0:
- c->isa_level = MIPS_CPU_ISA_M32R1;
- break;
- case 1:
- c->isa_level = MIPS_CPU_ISA_M32R2;
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON1B:
+ __cpu_name[cpu] = "Loongson 1B";
break;
- default:
- goto unknown;
}
- break;
- case 2:
- switch ((config0 & MIPS_CONF_AR) >> 10) {
- case 0:
- c->isa_level = MIPS_CPU_ISA_M64R1;
- break;
- case 1:
- c->isa_level = MIPS_CPU_ISA_M64R2;
- break;
- default:
- goto unknown;
- }
- break;
- default:
- goto unknown;
- }
-
- return config0 & MIPS_CONF_M;
-
-unknown:
- panic(unknown_isa, config0);
-}
-static inline unsigned int decode_config1(struct cpuinfo_mips *c)
-{
- unsigned int config1;
-
- config1 = read_c0_config1();
-
- if (config1 & MIPS_CONF1_MD)
- c->ases |= MIPS_ASE_MDMX;
- if (config1 & MIPS_CONF1_WR)
- c->options |= MIPS_CPU_WATCH;
- if (config1 & MIPS_CONF1_CA)
- c->ases |= MIPS_ASE_MIPS16;
- if (config1 & MIPS_CONF1_EP)
- c->options |= MIPS_CPU_EJTAG;
- if (config1 & MIPS_CONF1_FP) {
- c->options |= MIPS_CPU_FPU;
- c->options |= MIPS_CPU_32FPR;
+ break;
}
- if (cpu_has_tlb)
- c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
-
- return config1 & MIPS_CONF_M;
-}
-
-static inline unsigned int decode_config2(struct cpuinfo_mips *c)
-{
- unsigned int config2;
-
- config2 = read_c0_config2();
-
- if (config2 & MIPS_CONF2_SL)
- c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
-
- return config2 & MIPS_CONF_M;
-}
-
-static inline unsigned int decode_config3(struct cpuinfo_mips *c)
-{
- unsigned int config3;
-
- config3 = read_c0_config3();
-
- if (config3 & MIPS_CONF3_SM)
- c->ases |= MIPS_ASE_SMARTMIPS;
- if (config3 & MIPS_CONF3_DSP)
- c->ases |= MIPS_ASE_DSP;
- if (config3 & MIPS_CONF3_VINT)
- c->options |= MIPS_CPU_VINT;
- if (config3 & MIPS_CONF3_VEIC)
- c->options |= MIPS_CPU_VEIC;
- if (config3 & MIPS_CONF3_MT)
- c->ases |= MIPS_ASE_MIPSMT;
- if (config3 & MIPS_CONF3_ULRI)
- c->options |= MIPS_CPU_ULRI;
-
- return config3 & MIPS_CONF_M;
-}
-
-static inline unsigned int decode_config4(struct cpuinfo_mips *c)
-{
- unsigned int config4;
-
- config4 = read_c0_config4();
-
- if ((config4 & MIPS_CONF4_MMUEXTDEF) == MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT
- && cpu_has_tlb)
- c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
-
- c->kscratch_mask = (config4 >> 16) & 0xff;
-
- return config4 & MIPS_CONF_M;
-}
-
-static void __cpuinit decode_configs(struct cpuinfo_mips *c)
-{
- int ok;
-
- /* MIPS32 or MIPS64 compliant CPU. */
- c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
- MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
-
- c->scache.flags = MIPS_CACHE_NOT_PRESENT;
-
- ok = decode_config0(c); /* Read Config registers. */
- BUG_ON(!ok); /* Arch spec violation! */
- if (ok)
- ok = decode_config1(c);
- if (ok)
- ok = decode_config2(c);
- if (ok)
- ok = decode_config3(c);
- if (ok)
- ok = decode_config4(c);
-
- mips_probe_watch_registers(c);
-
- if (cpu_has_mips_r2)
- c->core = read_c0_ebase() & 0x3ff;
}
static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
diff --git a/arch/mips/kernel/cpufreq/Makefile b/arch/mips/kernel/cpufreq/Makefile
index c3479a432efe..05a5715ee38c 100644
--- a/arch/mips/kernel/cpufreq/Makefile
+++ b/arch/mips/kernel/cpufreq/Makefile
@@ -2,4 +2,4 @@
# Makefile for the Linux/MIPS cpufreq.
#
-obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o loongson2_clock.o
+obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
index ae5db206347c..e7c98e2b78b6 100644
--- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
+++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
@@ -19,7 +19,7 @@
#include <asm/clock.h>
-#include <loongson.h>
+#include <asm/mach-loongson/loongson.h>
static uint nowait;
@@ -181,6 +181,25 @@ static struct platform_driver platform_driver = {
.id_table = platform_device_ids,
};
+/*
+ * This is the simple version of Loongson-2 wait, Maybe we need do this in
+ * interrupt disabled context.
+ */
+
+static DEFINE_SPINLOCK(loongson2_wait_lock);
+
+static void loongson2_cpu_wait(void)
+{
+ unsigned long flags;
+ u32 cpu_freq;
+
+ spin_lock_irqsave(&loongson2_wait_lock, flags);
+ cpu_freq = LOONGSON_CHIPCFG0;
+ LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
+ LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
+ spin_unlock_irqrestore(&loongson2_wait_lock, flags);
+}
+
static int __init cpufreq_init(void)
{
int ret;
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index 84d0639e4580..b77f56bbb477 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -323,7 +323,7 @@ static void sp_cleanup(void)
fdt = files_fdtable(files);
for (;;) {
unsigned long set;
- i = j * __NFDBITS;
+ i = j * BITS_PER_LONG;
if (i >= fdt->max_fds)
break;
set = fdt->open_fds[j++];
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index a5066b1c3de3..4f8c3cba8c0c 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -39,8 +39,6 @@ struct mips_hi16 {
Elf_Addr value;
};
-static struct mips_hi16 *mips_hi16_list;
-
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
@@ -128,8 +126,8 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
n->addr = (Elf_Addr *)location;
n->value = v;
- n->next = mips_hi16_list;
- mips_hi16_list = n;
+ n->next = me->arch.r_mips_hi16_list;
+ me->arch.r_mips_hi16_list = n;
return 0;
}
@@ -142,18 +140,28 @@ static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
return 0;
}
+static void free_relocation_chain(struct mips_hi16 *l)
+{
+ struct mips_hi16 *next;
+
+ while (l) {
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+}
+
static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
{
unsigned long insnlo = *location;
+ struct mips_hi16 *l;
Elf_Addr val, vallo;
/* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
- if (mips_hi16_list != NULL) {
- struct mips_hi16 *l;
-
- l = mips_hi16_list;
+ if (me->arch.r_mips_hi16_list != NULL) {
+ l = me->arch.r_mips_hi16_list;
while (l != NULL) {
struct mips_hi16 *next;
unsigned long insn;
@@ -188,7 +196,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
l = next;
}
- mips_hi16_list = NULL;
+ me->arch.r_mips_hi16_list = NULL;
}
/*
@@ -201,6 +209,9 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
return 0;
out_danger:
+ free_relocation_chain(l);
+ me->arch.r_mips_hi16_list = NULL;
+
pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);
return -ENOEXEC;
@@ -273,6 +284,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
+ me->arch.r_mips_hi16_list = NULL;
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -296,6 +308,19 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
return res;
}
+ /*
+ * Normally the hi16 list should be deallocated at this point. A
+ * malformed binary however could contain a series of R_MIPS_HI16
+ * relocations not followed by a R_MIPS_LO16 relocation. In that
+ * case, free up the list and return an error.
+ */
+ if (me->arch.r_mips_hi16_list) {
+ free_relocation_chain(me->arch.r_mips_hi16_list);
+ me->arch.r_mips_hi16_list = NULL;
+
+ return -ENOEXEC;
+ }
+
return 0;
}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index eb5e394a4650..2f28d3b55687 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1559,6 +1559,11 @@ init_hw_perf_events(void)
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
+ case CPU_LOONGSON1:
+ mipspmu.name = "mips/loongson1";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index f11b2bbb826d..028f6f837ef9 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -35,16 +35,6 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
return add_memory_region(base, size, BOOT_MEM_RAM);
}
-int __init reserve_mem_mach(unsigned long addr, unsigned long size)
-{
- return reserve_bootmem(addr, size, BOOTMEM_DEFAULT);
-}
-
-void __init free_mem_mach(unsigned long addr, unsigned long size)
-{
- return free_bootmem(addr, size);
-}
-
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
{
return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
@@ -77,25 +67,6 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL);
}
-void __init device_tree_init(void)
-{
- unsigned long base, size;
-
- if (!initial_boot_params)
- return;
-
- base = virt_to_phys((void *)initial_boot_params);
- size = be32_to_cpu(initial_boot_params->totalsize);
-
- /* Before we do anything, lets reserve the dt blob */
- reserve_mem_mach(base, size);
-
- unflatten_device_tree();
-
- /* free the space reserved for the dt blob */
- free_mem_mach(base, size);
-}
-
void __init __dt_setup_arch(struct boot_param_header *bph)
{
if (be32_to_cpu(bph->magic) != OF_DT_HEADER) {
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 1268392f1d27..9005bf9fb859 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -102,7 +102,9 @@ asmlinkage __cpuinit void start_secondary(void)
#ifdef CONFIG_MIPS_MT_SMTC
/* Only do cpu_probe for first TC of CPU */
- if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
+ if ((read_c0_tcbind() & TCBIND_CURTC) != 0)
+ __cpu_name[smp_processor_id()] = __cpu_name[0];
+ else
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_probe();
cpu_report();
@@ -128,7 +130,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_set(cpu, cpu_callin_map);
- synchronise_count_slave();
+ synchronise_count_slave(cpu);
/*
* irq will be enabled in ->smp_finish(), enabling it too early
@@ -171,7 +173,6 @@ void smp_send_stop(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
mp_ops->cpus_done();
- synchronise_count_master();
}
/* called from main before smp_init() */
@@ -204,6 +205,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
while (!cpu_isset(cpu, cpu_callin_map))
udelay(100);
+ synchronise_count_master(cpu);
return 0;
}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 15b5f3cfd20c..1d47843d3cc0 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -86,6 +86,13 @@ struct smtc_ipi_q IPIQ[NR_CPUS];
static struct smtc_ipi_q freeIPIq;
+/*
+ * Number of FPU contexts for each VPE
+ */
+
+static int smtc_nconf1[MAX_SMTC_VPES];
+
+
/* Forward declarations */
void ipi_decode(struct smtc_ipi *);
@@ -174,9 +181,9 @@ static int __init tintq(char *str)
__setup("tintq=", tintq);
-static int imstuckcount[2][8];
+static int imstuckcount[MAX_SMTC_VPES][8];
/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
-static int vpemask[2][8] = {
+static int vpemask[MAX_SMTC_VPES][8] = {
{0, 0, 1, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 0, 0, 1}
};
@@ -331,6 +338,22 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
static void smtc_tc_setup(int vpe, int tc, int cpu)
{
+ static int cp1contexts[MAX_SMTC_VPES];
+
+ /*
+ * Make a local copy of the available FPU contexts in order
+ * to keep track of TCs that can have one.
+ */
+ if (tc == 1)
+ {
+ /*
+ * FIXME: Multi-core SMTC hasn't been tested and the
+ * maximum number of VPEs may change.
+ */
+ cp1contexts[0] = smtc_nconf1[0] - 1;
+ cp1contexts[1] = smtc_nconf1[1];
+ }
+
settc(tc);
write_tc_c0_tchalt(TCHALT_H);
mips_ihb();
@@ -343,22 +366,29 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
* an active IPI queue.
*/
write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
- /* Bind tc to vpe */
+
+ /* Bind TC to VPE. */
write_tc_c0_tcbind(vpe);
+
/* In general, all TCs should have the same cpu_data indications. */
memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
- /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
- if (cpu_data[0].cputype == CPU_34K ||
- cpu_data[0].cputype == CPU_1004K)
+
+ /* Check to see if there is a FPU context available for this TC. */
+ if (!cp1contexts[vpe])
cpu_data[cpu].options &= ~MIPS_CPU_FPU;
+ else
+ cp1contexts[vpe]--;
+
+ /* Store the TC and VPE into the cpu_data structure. */
cpu_data[cpu].vpe_id = vpe;
cpu_data[cpu].tc_id = tc;
- /* Multi-core SMTC hasn't been tested, but be prepared */
+
+ /* FIXME: Multi-core SMTC hasn't been tested, but be prepared. */
cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
}
/*
- * Tweak to get Count registes in as close a sync as possible. The
+ * Tweak to get Count registers synced as closely as possible. The
* value seems good for 34K-class cores.
*/
@@ -466,6 +496,24 @@ void smtc_prepare_cpus(int cpus)
smtc_configure_tlb();
for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
+ /* Get number of CP1 contexts for each VPE. */
+ if (tc == 0)
+ {
+ /*
+ * Do not call settc() for TC0 or the FPU context
+ * value will be incorrect. Besides, we know that
+ * we are TC0 anyway.
+ */
+ smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() &
+ VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT);
+ if (nvpe == 2)
+ {
+ settc(1);
+ smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() &
+ VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT);
+ settc(0);
+ }
+ }
if (tcpervpe[vpe] == 0)
continue;
if (vpe != 0)
@@ -479,6 +527,18 @@ void smtc_prepare_cpus(int cpus)
*/
if (tc != 0) {
smtc_tc_setup(vpe, tc, cpu);
+ if (vpe != 0) {
+ /*
+ * Set MVP bit (possibly again). Do it
+ * here to catch CPUs that have no TCs
+ * bound to the VPE at reset. In that
+ * case, a TC must be bound to the VPE
+ * before we can set VPEControl[MVP]
+ */
+ write_vpe_c0_vpeconf0(
+ read_vpe_c0_vpeconf0() |
+ VPECONF0_MVP);
+ }
cpu++;
}
printk(" %d", tc);
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 842d55e411fd..7f1eca3858de 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -28,12 +28,11 @@ static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
#define COUNTON 100
#define NR_LOOPS 5
-void __cpuinit synchronise_count_master(void)
+void __cpuinit synchronise_count_master(int cpu)
{
int i;
unsigned long flags;
unsigned int initcount;
- int nslaves;
#ifdef CONFIG_MIPS_MT_SMTC
/*
@@ -43,8 +42,7 @@ void __cpuinit synchronise_count_master(void)
return;
#endif
- printk(KERN_INFO "Synchronize counters across %u CPUs: ",
- num_online_cpus());
+ printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
local_irq_save(flags);
@@ -52,7 +50,7 @@ void __cpuinit synchronise_count_master(void)
* Notify the slaves that it's time to start
*/
atomic_set(&count_reference, read_c0_count());
- atomic_set(&count_start_flag, 1);
+ atomic_set(&count_start_flag, cpu);
smp_wmb();
/* Count will be initialised to current timer for all CPU's */
@@ -69,10 +67,9 @@ void __cpuinit synchronise_count_master(void)
* two CPUs.
*/
- nslaves = num_online_cpus()-1;
for (i = 0; i < NR_LOOPS; i++) {
- /* slaves loop on '!= ncpus' */
- while (atomic_read(&count_count_start) != nslaves)
+ /* slaves loop on '!= 2' */
+ while (atomic_read(&count_count_start) != 1)
mb();
atomic_set(&count_count_stop, 0);
smp_wmb();
@@ -89,7 +86,7 @@ void __cpuinit synchronise_count_master(void)
/*
* Wait for all slaves to leave the synchronization point:
*/
- while (atomic_read(&count_count_stop) != nslaves)
+ while (atomic_read(&count_count_stop) != 1)
mb();
atomic_set(&count_count_start, 0);
smp_wmb();
@@ -97,6 +94,7 @@ void __cpuinit synchronise_count_master(void)
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
+ atomic_set(&count_start_flag, 0);
local_irq_restore(flags);
@@ -108,11 +106,10 @@ void __cpuinit synchronise_count_master(void)
printk("done.\n");
}
-void __cpuinit synchronise_count_slave(void)
+void __cpuinit synchronise_count_slave(int cpu)
{
int i;
unsigned int initcount;
- int ncpus;
#ifdef CONFIG_MIPS_MT_SMTC
/*
@@ -127,16 +124,15 @@ void __cpuinit synchronise_count_slave(void)
* so we first wait for the master to say everyone is ready
*/
- while (!atomic_read(&count_start_flag))
+ while (atomic_read(&count_start_flag) != cpu)
mb();
/* Count will be initialised to next expire for all CPU's */
initcount = atomic_read(&count_reference);
- ncpus = num_online_cpus();
for (i = 0; i < NR_LOOPS; i++) {
atomic_inc(&count_count_start);
- while (atomic_read(&count_count_start) != ncpus)
+ while (atomic_read(&count_count_start) != 2)
mb();
/*
@@ -146,7 +142,7 @@ void __cpuinit synchronise_count_slave(void)
write_c0_count(initcount);
atomic_inc(&count_count_stop);
- while (atomic_read(&count_count_stop) != ncpus)
+ while (atomic_read(&count_count_stop) != 2)
mb();
}
/* Arrange for an interrupt in a short while */
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index c3c293543703..9be3df1fa8a4 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1253,6 +1253,7 @@ static inline void parity_protection_init(void)
case CPU_5KC:
case CPU_5KE:
+ case CPU_LOONGSON1:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index d3bcc33f4699..ce2f129b081f 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -135,6 +135,11 @@ void clk_deactivate(struct clk *clk)
}
EXPORT_SYMBOL(clk_deactivate);
+struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
+{
+ return NULL;
+}
+
static inline u32 get_counter_resolution(void)
{
u32 res;
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index d185e8477fdf..6cfd6117fbfd 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -8,7 +8,10 @@
#include <linux/export.h>
#include <linux/clk.h>
+#include <linux/bootmem.h>
#include <linux/of_platform.h>
+#include <linux/of_fdt.h>
+
#include <asm/bootinfo.h>
#include <asm/time.h>
@@ -70,6 +73,25 @@ void __init plat_mem_setup(void)
__dt_setup_arch(&__dtb_start);
}
+void __init device_tree_init(void)
+{
+ unsigned long base, size;
+
+ if (!initial_boot_params)
+ return;
+
+ base = virt_to_phys((void *)initial_boot_params);
+ size = be32_to_cpu(initial_boot_params->totalsize);
+
+ /* Before we do anything, lets reserve the dt blob */
+ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+
+ unflatten_device_tree();
+
+ /* free the space reserved for the dt blob */
+ free_bootmem(base, size);
+}
+
void __init prom_init(void)
{
/* call the soc specific detetcion code and get it to fill soc_info */
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 83780f7c842b..befbb760ab76 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -20,10 +20,12 @@
/* clock control register */
#define CGU_IFCCR 0x0018
+#define CGU_IFCCR_VR9 0x0024
/* system clock register */
#define CGU_SYS 0x0010
/* pci control register */
#define CGU_PCICR 0x0034
+#define CGU_PCICR_VR9 0x0038
/* ephy configuration register */
#define CGU_EPHY 0x10
/* power control register */
@@ -80,6 +82,9 @@ static void __iomem *pmu_membase;
void __iomem *ltq_cgu_membase;
void __iomem *ltq_ebu_membase;
+static u32 ifccr = CGU_IFCCR;
+static u32 pcicr = CGU_PCICR;
+
/* legacy function kept alive to ease clkdev transition */
void ltq_pmu_enable(unsigned int module)
{
@@ -103,14 +108,14 @@ EXPORT_SYMBOL(ltq_pmu_disable);
/* enable a hw clock */
static int cgu_enable(struct clk *clk)
{
- ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | clk->bits, CGU_IFCCR);
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) | clk->bits, ifccr);
return 0;
}
/* disable a hw clock */
static void cgu_disable(struct clk *clk)
{
- ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~clk->bits, CGU_IFCCR);
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~clk->bits, ifccr);
}
/* enable a clock gate */
@@ -138,22 +143,22 @@ static void pmu_disable(struct clk *clk)
/* the pci enable helper */
static int pci_enable(struct clk *clk)
{
- unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
+ unsigned int val = ltq_cgu_r32(ifccr);
/* set bus clock speed */
if (of_machine_is_compatible("lantiq,ar9")) {
- ifccr &= ~0x1f00000;
+ val &= ~0x1f00000;
if (clk->rate == CLOCK_33M)
- ifccr |= 0xe00000;
+ val |= 0xe00000;
else
- ifccr |= 0x700000; /* 62.5M */
+ val |= 0x700000; /* 62.5M */
} else {
- ifccr &= ~0xf00000;
+ val &= ~0xf00000;
if (clk->rate == CLOCK_33M)
- ifccr |= 0x800000;
+ val |= 0x800000;
else
- ifccr |= 0x400000; /* 62.5M */
+ val |= 0x400000; /* 62.5M */
}
- ltq_cgu_w32(ifccr, CGU_IFCCR);
+ ltq_cgu_w32(val, ifccr);
pmu_enable(clk);
return 0;
}
@@ -161,18 +166,16 @@ static int pci_enable(struct clk *clk)
/* enable the external clock as a source */
static int pci_ext_enable(struct clk *clk)
{
- ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~(1 << 16),
- CGU_IFCCR);
- ltq_cgu_w32((1 << 30), CGU_PCICR);
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~(1 << 16), ifccr);
+ ltq_cgu_w32((1 << 30), pcicr);
return 0;
}
/* disable the external clock as a source */
static void pci_ext_disable(struct clk *clk)
{
- ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | (1 << 16),
- CGU_IFCCR);
- ltq_cgu_w32((1 << 31) | (1 << 30), CGU_PCICR);
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) | (1 << 16), ifccr);
+ ltq_cgu_w32((1 << 31) | (1 << 30), pcicr);
}
/* enable a clockout source */
@@ -184,11 +187,11 @@ static int clkout_enable(struct clk *clk)
for (i = 0; i < 4; i++) {
if (clk->rates[i] == clk->rate) {
int shift = 14 - (2 * clk->module);
- unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
+ unsigned int val = ltq_cgu_r32(ifccr);
- ifccr &= ~(3 << shift);
- ifccr |= i << shift;
- ltq_cgu_w32(ifccr, CGU_IFCCR);
+ val &= ~(3 << shift);
+ val |= i << shift;
+ ltq_cgu_w32(val, ifccr);
return 0;
}
}
@@ -336,8 +339,12 @@ void __init ltq_soc_init(void)
clkdev_add_clkout();
/* add the soc dependent clocks */
- if (!of_machine_is_compatible("lantiq,vr9"))
+ if (of_machine_is_compatible("lantiq,vr9")) {
+ ifccr = CGU_IFCCR_VR9;
+ pcicr = CGU_PCICR_VR9;
+ } else {
clkdev_add_pmu("1e180000.etop", NULL, 0, PMU_PPE);
+ }
if (!of_machine_is_compatible("lantiq,ase")) {
clkdev_add_pmu("1e100c00.serial", NULL, 0, PMU_ASC1);
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 2a7c74fc15fc..399a50a541d4 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -2,7 +2,7 @@
# Makefile for MIPS-specific library files..
#
-lib-y += csum_partial.o delay.o memcpy.o memcpy-inatomic.o memset.o \
+lib-y += csum_partial.o delay.o memcpy.o memset.o \
strlen_user.o strncpy_user.o strnlen_user.o uncached.o
obj-y += iomap.o
diff --git a/arch/mips/lib/memcpy-inatomic.S b/arch/mips/lib/memcpy-inatomic.S
deleted file mode 100644
index 68853a038d3f..000000000000
--- a/arch/mips/lib/memcpy-inatomic.S
+++ /dev/null
@@ -1,451 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Unified implementation of memcpy, memmove and the __copy_user backend.
- *
- * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
- * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
- * Copyright (C) 2002 Broadcom, Inc.
- * memcpy/copy_user author: Mark Vandevoorde
- * Copyright (C) 2007 Maciej W. Rozycki
- *
- * Mnemonic names for arguments to memcpy/__copy_user
- */
-
-/*
- * Hack to resolve longstanding prefetch issue
- *
- * Prefetching may be fatal on some systems if we're prefetching beyond the
- * end of memory on some systems. It's also a seriously bad idea on non
- * dma-coherent systems.
- */
-#ifdef CONFIG_DMA_NONCOHERENT
-#undef CONFIG_CPU_HAS_PREFETCH
-#endif
-#ifdef CONFIG_MIPS_MALTA
-#undef CONFIG_CPU_HAS_PREFETCH
-#endif
-
-#include <asm/asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/regdef.h>
-
-#define dst a0
-#define src a1
-#define len a2
-
-/*
- * Spec
- *
- * memcpy copies len bytes from src to dst and sets v0 to dst.
- * It assumes that
- * - src and dst don't overlap
- * - src is readable
- * - dst is writable
- * memcpy uses the standard calling convention
- *
- * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
- * the number of uncopied bytes due to an exception caused by a read or write.
- * __copy_user assumes that src and dst don't overlap, and that the call is
- * implementing one of the following:
- * copy_to_user
- * - src is readable (no exceptions when reading src)
- * copy_from_user
- * - dst is writable (no exceptions when writing dst)
- * __copy_user uses a non-standard calling convention; see
- * include/asm-mips/uaccess.h
- *
- * When an exception happens on a load, the handler must
- # ensure that all of the destination buffer is overwritten to prevent
- * leaking information to user mode programs.
- */
-
-/*
- * Implementation
- */
-
-/*
- * The exception handler for loads requires that:
- * 1- AT contain the address of the byte just past the end of the source
- * of the copy,
- * 2- src_entry <= src < AT, and
- * 3- (dst - src) == (dst_entry - src_entry),
- * The _entry suffix denotes values when __copy_user was called.
- *
- * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
- * (2) is met by incrementing src by the number of bytes copied
- * (3) is met by not doing loads between a pair of increments of dst and src
- *
- * The exception handlers for stores adjust len (if necessary) and return.
- * These handlers do not need to overwrite any data.
- *
- * For __rmemcpy and memmove an exception is always a kernel bug, therefore
- * they're not protected.
- */
-
-#define EXC(inst_reg,addr,handler) \
-9: inst_reg, addr; \
- .section __ex_table,"a"; \
- PTR 9b, handler; \
- .previous
-
-/*
- * Only on the 64-bit kernel we can made use of 64-bit registers.
- */
-#ifdef CONFIG_64BIT
-#define USE_DOUBLE
-#endif
-
-#ifdef USE_DOUBLE
-
-#define LOAD ld
-#define LOADL ldl
-#define LOADR ldr
-#define STOREL sdl
-#define STORER sdr
-#define STORE sd
-#define ADD daddu
-#define SUB dsubu
-#define SRL dsrl
-#define SRA dsra
-#define SLL dsll
-#define SLLV dsllv
-#define SRLV dsrlv
-#define NBYTES 8
-#define LOG_NBYTES 3
-
-/*
- * As we are sharing code base with the mips32 tree (which use the o32 ABI
- * register definitions). We need to redefine the register definitions from
- * the n64 ABI register naming to the o32 ABI register naming.
- */
-#undef t0
-#undef t1
-#undef t2
-#undef t3
-#define t0 $8
-#define t1 $9
-#define t2 $10
-#define t3 $11
-#define t4 $12
-#define t5 $13
-#define t6 $14
-#define t7 $15
-
-#else
-
-#define LOAD lw
-#define LOADL lwl
-#define LOADR lwr
-#define STOREL swl
-#define STORER swr
-#define STORE sw
-#define ADD addu
-#define SUB subu
-#define SRL srl
-#define SLL sll
-#define SRA sra
-#define SLLV sllv
-#define SRLV srlv
-#define NBYTES 4
-#define LOG_NBYTES 2
-
-#endif /* USE_DOUBLE */
-
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-#define LDFIRST LOADR
-#define LDREST LOADL
-#define STFIRST STORER
-#define STREST STOREL
-#define SHIFT_DISCARD SLLV
-#else
-#define LDFIRST LOADL
-#define LDREST LOADR
-#define STFIRST STOREL
-#define STREST STORER
-#define SHIFT_DISCARD SRLV
-#endif
-
-#define FIRST(unit) ((unit)*NBYTES)
-#define REST(unit) (FIRST(unit)+NBYTES-1)
-#define UNIT(unit) FIRST(unit)
-
-#define ADDRMASK (NBYTES-1)
-
- .text
- .set noreorder
-#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
- .set noat
-#else
- .set at=v1
-#endif
-
-/*
- * A combined memcpy/__copy_user
- * __copy_user sets len to 0 for success; else to an upper bound of
- * the number of uncopied bytes.
- * memcpy sets v0 to dst.
- */
- .align 5
-LEAF(__copy_user_inatomic)
- /*
- * Note: dst & src may be unaligned, len may be 0
- * Temps
- */
-#define rem t8
-
- /*
- * The "issue break"s below are very approximate.
- * Issue delays for dcache fills will perturb the schedule, as will
- * load queue full replay traps, etc.
- *
- * If len < NBYTES use byte operations.
- */
- PREF( 0, 0(src) )
- PREF( 1, 0(dst) )
- sltu t2, len, NBYTES
- and t1, dst, ADDRMASK
- PREF( 0, 1*32(src) )
- PREF( 1, 1*32(dst) )
- bnez t2, .Lcopy_bytes_checklen
- and t0, src, ADDRMASK
- PREF( 0, 2*32(src) )
- PREF( 1, 2*32(dst) )
- bnez t1, .Ldst_unaligned
- nop
- bnez t0, .Lsrc_unaligned_dst_aligned
- /*
- * use delay slot for fall-through
- * src and dst are aligned; need to compute rem
- */
-.Lboth_aligned:
- SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
- beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
- and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
- PREF( 0, 3*32(src) )
- PREF( 1, 3*32(dst) )
- .align 4
-1:
-EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
-EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
-EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
-EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
- SUB len, len, 8*NBYTES
-EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy)
-EXC( LOAD t7, UNIT(5)(src), .Ll_exc_copy)
- STORE t0, UNIT(0)(dst)
- STORE t1, UNIT(1)(dst)
-EXC( LOAD t0, UNIT(6)(src), .Ll_exc_copy)
-EXC( LOAD t1, UNIT(7)(src), .Ll_exc_copy)
- ADD src, src, 8*NBYTES
- ADD dst, dst, 8*NBYTES
- STORE t2, UNIT(-6)(dst)
- STORE t3, UNIT(-5)(dst)
- STORE t4, UNIT(-4)(dst)
- STORE t7, UNIT(-3)(dst)
- STORE t0, UNIT(-2)(dst)
- STORE t1, UNIT(-1)(dst)
- PREF( 0, 8*32(src) )
- PREF( 1, 8*32(dst) )
- bne len, rem, 1b
- nop
-
- /*
- * len == rem == the number of bytes left to copy < 8*NBYTES
- */
-.Lcleanup_both_aligned:
- beqz len, .Ldone
- sltu t0, len, 4*NBYTES
- bnez t0, .Lless_than_4units
- and rem, len, (NBYTES-1) # rem = len % NBYTES
- /*
- * len >= 4*NBYTES
- */
-EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
-EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
-EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy)
-EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy)
- SUB len, len, 4*NBYTES
- ADD src, src, 4*NBYTES
- STORE t0, UNIT(0)(dst)
- STORE t1, UNIT(1)(dst)
- STORE t2, UNIT(2)(dst)
- STORE t3, UNIT(3)(dst)
- .set reorder /* DADDI_WAR */
- ADD dst, dst, 4*NBYTES
- beqz len, .Ldone
- .set noreorder
-.Lless_than_4units:
- /*
- * rem = len % NBYTES
- */
- beq rem, len, .Lcopy_bytes
- nop
-1:
-EXC( LOAD t0, 0(src), .Ll_exc)
- ADD src, src, NBYTES
- SUB len, len, NBYTES
- STORE t0, 0(dst)
- .set reorder /* DADDI_WAR */
- ADD dst, dst, NBYTES
- bne rem, len, 1b
- .set noreorder
-
- /*
- * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
- * A loop would do only a byte at a time with possible branch
- * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
- * because can't assume read-access to dst. Instead, use
- * STREST dst, which doesn't require read access to dst.
- *
- * This code should perform better than a simple loop on modern,
- * wide-issue mips processors because the code has fewer branches and
- * more instruction-level parallelism.
- */
-#define bits t2
- beqz len, .Ldone
- ADD t1, dst, len # t1 is just past last byte of dst
- li bits, 8*NBYTES
- SLL rem, len, 3 # rem = number of bits to keep
-EXC( LOAD t0, 0(src), .Ll_exc)
- SUB bits, bits, rem # bits = number of bits to discard
- SHIFT_DISCARD t0, t0, bits
- STREST t0, -1(t1)
- jr ra
- move len, zero
-.Ldst_unaligned:
- /*
- * dst is unaligned
- * t0 = src & ADDRMASK
- * t1 = dst & ADDRMASK; T1 > 0
- * len >= NBYTES
- *
- * Copy enough bytes to align dst
- * Set match = (src and dst have same alignment)
- */
-#define match rem
-EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
- ADD t2, zero, NBYTES
-EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
- SUB t2, t2, t1 # t2 = number of bytes copied
- xor match, t0, t1
- STFIRST t3, FIRST(0)(dst)
- beq len, t2, .Ldone
- SUB len, len, t2
- ADD dst, dst, t2
- beqz match, .Lboth_aligned
- ADD src, src, t2
-
-.Lsrc_unaligned_dst_aligned:
- SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
- PREF( 0, 3*32(src) )
- beqz t0, .Lcleanup_src_unaligned
- and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
- PREF( 1, 3*32(dst) )
-1:
-/*
- * Avoid consecutive LD*'s to the same register since some mips
- * implementations can't issue them in the same cycle.
- * It's OK to load FIRST(N+1) before REST(N) because the two addresses
- * are to the same unit (unless src is aligned, but it's not).
- */
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
- SUB len, len, 4*NBYTES
-EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
-EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
-EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
-EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
-EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
-EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
- PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
- ADD src, src, 4*NBYTES
-#ifdef CONFIG_CPU_SB1
- nop # improves slotting
-#endif
- STORE t0, UNIT(0)(dst)
- STORE t1, UNIT(1)(dst)
- STORE t2, UNIT(2)(dst)
- STORE t3, UNIT(3)(dst)
- PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
- .set reorder /* DADDI_WAR */
- ADD dst, dst, 4*NBYTES
- bne len, rem, 1b
- .set noreorder
-
-.Lcleanup_src_unaligned:
- beqz len, .Ldone
- and rem, len, NBYTES-1 # rem = len % NBYTES
- beq rem, len, .Lcopy_bytes
- nop
-1:
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
- ADD src, src, NBYTES
- SUB len, len, NBYTES
- STORE t0, 0(dst)
- .set reorder /* DADDI_WAR */
- ADD dst, dst, NBYTES
- bne len, rem, 1b
- .set noreorder
-
-.Lcopy_bytes_checklen:
- beqz len, .Ldone
- nop
-.Lcopy_bytes:
- /* 0 < len < NBYTES */
-#define COPY_BYTE(N) \
-EXC( lb t0, N(src), .Ll_exc); \
- SUB len, len, 1; \
- beqz len, .Ldone; \
- sb t0, N(dst)
-
- COPY_BYTE(0)
- COPY_BYTE(1)
-#ifdef USE_DOUBLE
- COPY_BYTE(2)
- COPY_BYTE(3)
- COPY_BYTE(4)
- COPY_BYTE(5)
-#endif
-EXC( lb t0, NBYTES-2(src), .Ll_exc)
- SUB len, len, 1
- jr ra
- sb t0, NBYTES-2(dst)
-.Ldone:
- jr ra
- nop
- END(__copy_user_inatomic)
-
-.Ll_exc_copy:
- /*
- * Copy bytes from src until faulting load address (or until a
- * lb faults)
- *
- * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
- * may be more than a byte beyond the last address.
- * Hence, the lb below may get an exception.
- *
- * Assumes src < THREAD_BUADDR($28)
- */
- LOAD t0, TI_TASK($28)
- nop
- LOAD t0, THREAD_BUADDR(t0)
-1:
-EXC( lb t1, 0(src), .Ll_exc)
- ADD src, src, 1
- sb t1, 0(dst) # can't fault -- we're copy_from_user
- .set reorder /* DADDI_WAR */
- ADD dst, dst, 1
- bne src, t0, 1b
- .set noreorder
-.Ll_exc:
- LOAD t0, TI_TASK($28)
- nop
- LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
- nop
- SUB len, AT, t0 # len number of uncopied bytes
- jr ra
- nop
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 56a1f85a1ce8..65192c06781e 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -183,6 +183,14 @@
#endif
/*
+ * t6 is used as a flag to note inatomic mode.
+ */
+LEAF(__copy_user_inatomic)
+ b __copy_user_common
+ li t6, 1
+ END(__copy_user_inatomic)
+
+/*
* A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of
* the number of uncopied bytes.
@@ -193,6 +201,8 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */
move v0, dst /* return value */
.L__memcpy:
FEXPORT(__copy_user)
+ li t6, 0 /* not inatomic */
+__copy_user_common:
/*
* Note: dst & src may be unaligned, len may be 0
* Temps
@@ -458,6 +468,7 @@ EXC( lb t1, 0(src), .Ll_exc)
LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
nop
SUB len, AT, t0 # len number of uncopied bytes
+ bnez t6, .Ldone /* Skip the zeroing part if inatomic */
/*
* Here's where we rely on src and dst being incremented in tandem,
* See (3) above.
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index aca93eed8779..263beb9322a8 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -41,6 +41,7 @@ config LEMOTE_MACH2F
select CSRC_R4K if ! MIPS_EXTERNAL_TIMER
select DMA_NONCOHERENT
select GENERIC_ISA_DMA_SUPPORT_BROKEN
+ select HAVE_CLK
select HW_HAS_PCI
select I8259
select IRQ_CPU
diff --git a/arch/mips/loongson/lemote-2f/Makefile b/arch/mips/loongson/lemote-2f/Makefile
index 8699a53f0477..4f9eaa328a16 100644
--- a/arch/mips/loongson/lemote-2f/Makefile
+++ b/arch/mips/loongson/lemote-2f/Makefile
@@ -2,7 +2,7 @@
# Makefile for lemote loongson2f family machines
#
-obj-y += machtype.o irq.o reset.o ec_kb3310b.o
+obj-y += clock.o machtype.o irq.o reset.o ec_kb3310b.o
#
# Suspend Support
diff --git a/arch/mips/kernel/cpufreq/loongson2_clock.c b/arch/mips/loongson/lemote-2f/clock.c
index 5426779d9fdb..bc739d4bab2e 100644
--- a/arch/mips/kernel/cpufreq/loongson2_clock.c
+++ b/arch/mips/loongson/lemote-2f/clock.c
@@ -6,14 +6,17 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
-
-#include <linux/module.h>
+#include <linux/clk.h>
#include <linux/cpufreq.h>
-#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <asm/clock.h>
-
-#include <loongson.h>
+#include <asm/mach-loongson/loongson.h>
static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
@@ -89,12 +92,6 @@ EXPORT_SYMBOL(clk_put);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
- return clk_set_rate_ex(clk, rate, 0);
-}
-EXPORT_SYMBOL_GPL(clk_set_rate);
-
-int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
-{
int ret = 0;
int regval;
int i;
@@ -103,7 +100,7 @@ int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
unsigned long flags;
spin_lock_irqsave(&clock_lock, flags);
- ret = clk->ops->set_rate(clk, rate, algo_id);
+ ret = clk->ops->set_rate(clk, rate, 0);
spin_unlock_irqrestore(&clock_lock, flags);
}
@@ -129,7 +126,7 @@ int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
return ret;
}
-EXPORT_SYMBOL_GPL(clk_set_rate_ex);
+EXPORT_SYMBOL_GPL(clk_set_rate);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
@@ -146,26 +143,3 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
return rate;
}
EXPORT_SYMBOL_GPL(clk_round_rate);
-
-/*
- * This is the simple version of Loongson-2 wait, Maybe we need do this in
- * interrupt disabled content
- */
-
-DEFINE_SPINLOCK(loongson2_wait_lock);
-void loongson2_cpu_wait(void)
-{
- u32 cpu_freq;
- unsigned long flags;
-
- spin_lock_irqsave(&loongson2_wait_lock, flags);
- cpu_freq = LOONGSON_CHIPCFG0;
- LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
- LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
- spin_unlock_irqrestore(&loongson2_wait_lock, flags);
-}
-EXPORT_SYMBOL_GPL(loongson2_cpu_wait);
-
-MODULE_AUTHOR("Yanhua <yanh@lemote.com>");
-MODULE_DESCRIPTION("cpufreq driver for Loongson 2F");
-MODULE_LICENSE("GPL");
diff --git a/arch/mips/loongson1/Kconfig b/arch/mips/loongson1/Kconfig
new file mode 100644
index 000000000000..a9a14d6e81af
--- /dev/null
+++ b/arch/mips/loongson1/Kconfig
@@ -0,0 +1,22 @@
+if MACH_LOONGSON1
+
+choice
+ prompt "Machine Type"
+
+config LOONGSON1_LS1B
+ bool "Loongson LS1B board"
+ select CEVT_R4K
+ select CSRC_R4K
+ select SYS_HAS_CPU_LOONGSON1B
+ select DMA_NONCOHERENT
+ select BOOT_ELF32
+ select IRQ_CPU
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_HAS_EARLY_PRINTK
+ select HAVE_CLK
+
+endchoice
+
+endif # MACH_LOONGSON1
diff --git a/arch/mips/loongson1/Makefile b/arch/mips/loongson1/Makefile
new file mode 100644
index 000000000000..9719c75886f5
--- /dev/null
+++ b/arch/mips/loongson1/Makefile
@@ -0,0 +1,11 @@
+#
+# Common code for all Loongson 1 based systems
+#
+
+obj-$(CONFIG_MACH_LOONGSON1) += common/
+
+#
+# Loongson LS1B board
+#
+
+obj-$(CONFIG_LOONGSON1_LS1B) += ls1b/
diff --git a/arch/mips/loongson1/Platform b/arch/mips/loongson1/Platform
new file mode 100644
index 000000000000..99bdefe627af
--- /dev/null
+++ b/arch/mips/loongson1/Platform
@@ -0,0 +1,7 @@
+cflags-$(CONFIG_CPU_LOONGSON1) += \
+ $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
+ -Wa,-mips32r2 -Wa,--trap
+
+platform-$(CONFIG_MACH_LOONGSON1) += loongson1/
+cflags-$(CONFIG_MACH_LOONGSON1) += -I$(srctree)/arch/mips/include/asm/mach-loongson1
+load-$(CONFIG_LOONGSON1_LS1B) += 0xffffffff80100000
diff --git a/arch/mips/loongson1/common/Makefile b/arch/mips/loongson1/common/Makefile
new file mode 100644
index 000000000000..b2797709ef5b
--- /dev/null
+++ b/arch/mips/loongson1/common/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for common code of loongson1 based machines.
+#
+
+obj-y += clock.o irq.o platform.o prom.o reset.o setup.o
diff --git a/arch/mips/loongson1/common/clock.c b/arch/mips/loongson1/common/clock.c
new file mode 100644
index 000000000000..1bbbbec12085
--- /dev/null
+++ b/arch/mips/loongson1/common/clock.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <asm/clock.h>
+#include <asm/time.h>
+
+#include <loongson1.h>
+
+static LIST_HEAD(clocks);
+static DEFINE_MUTEX(clocks_mutex);
+
+struct clk *clk_get(struct device *dev, const char *name)
+{
+ struct clk *c;
+ struct clk *ret = NULL;
+
+ mutex_lock(&clocks_mutex);
+ list_for_each_entry(c, &clocks, node) {
+ if (!strcmp(c->name, name)) {
+ ret = c;
+ break;
+ }
+ }
+ mutex_unlock(&clocks_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(clk_get);
+
+int clk_enable(struct clk *clk)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+void clk_put(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_put);
+
+static void pll_clk_init(struct clk *clk)
+{
+ u32 pll;
+
+ pll = __raw_readl(LS1X_CLK_PLL_FREQ);
+ clk->rate = (12 + (pll & 0x3f)) * 33 / 2
+ + ((pll >> 8) & 0x3ff) * 33 / 1024 / 2;
+ clk->rate *= 1000000;
+}
+
+static void cpu_clk_init(struct clk *clk)
+{
+ u32 pll, ctrl;
+
+ pll = clk_get_rate(clk->parent);
+ ctrl = __raw_readl(LS1X_CLK_PLL_DIV) & DIV_CPU;
+ clk->rate = pll / (ctrl >> DIV_CPU_SHIFT);
+}
+
+static void ddr_clk_init(struct clk *clk)
+{
+ u32 pll, ctrl;
+
+ pll = clk_get_rate(clk->parent);
+ ctrl = __raw_readl(LS1X_CLK_PLL_DIV) & DIV_DDR;
+ clk->rate = pll / (ctrl >> DIV_DDR_SHIFT);
+}
+
+static void dc_clk_init(struct clk *clk)
+{
+ u32 pll, ctrl;
+
+ pll = clk_get_rate(clk->parent);
+ ctrl = __raw_readl(LS1X_CLK_PLL_DIV) & DIV_DC;
+ clk->rate = pll / (ctrl >> DIV_DC_SHIFT);
+}
+
+static struct clk_ops pll_clk_ops = {
+ .init = pll_clk_init,
+};
+
+static struct clk_ops cpu_clk_ops = {
+ .init = cpu_clk_init,
+};
+
+static struct clk_ops ddr_clk_ops = {
+ .init = ddr_clk_init,
+};
+
+static struct clk_ops dc_clk_ops = {
+ .init = dc_clk_init,
+};
+
+static struct clk pll_clk = {
+ .name = "pll",
+ .ops = &pll_clk_ops,
+};
+
+static struct clk cpu_clk = {
+ .name = "cpu",
+ .parent = &pll_clk,
+ .ops = &cpu_clk_ops,
+};
+
+static struct clk ddr_clk = {
+ .name = "ddr",
+ .parent = &pll_clk,
+ .ops = &ddr_clk_ops,
+};
+
+static struct clk dc_clk = {
+ .name = "dc",
+ .parent = &pll_clk,
+ .ops = &dc_clk_ops,
+};
+
+int clk_register(struct clk *clk)
+{
+ mutex_lock(&clocks_mutex);
+ list_add(&clk->node, &clocks);
+ if (clk->ops->init)
+ clk->ops->init(clk);
+ mutex_unlock(&clocks_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_register);
+
+static struct clk *ls1x_clks[] = {
+ &pll_clk,
+ &cpu_clk,
+ &ddr_clk,
+ &dc_clk,
+};
+
+int __init ls1x_clock_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ls1x_clks); i++)
+ clk_register(ls1x_clks[i]);
+
+ return 0;
+}
+
+void __init plat_time_init(void)
+{
+ struct clk *clk;
+
+ /* Initialize LS1X clocks */
+ ls1x_clock_init();
+
+ /* setup mips r4k timer */
+ clk = clk_get(NULL, "cpu");
+ if (IS_ERR(clk))
+ panic("unable to get dc clock, err=%ld", PTR_ERR(clk));
+
+ mips_hpt_frequency = clk_get_rate(clk) / 2;
+}
diff --git a/arch/mips/loongson1/common/irq.c b/arch/mips/loongson1/common/irq.c
new file mode 100644
index 000000000000..41bc8ffe7bba
--- /dev/null
+++ b/arch/mips/loongson1/common/irq.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <asm/irq_cpu.h>
+
+#include <loongson1.h>
+#include <irq.h>
+
+#define LS1X_INTC_REG(n, x) \
+ ((void __iomem *)KSEG1ADDR(LS1X_INTC_BASE + (n * 0x18) + (x)))
+
+#define LS1X_INTC_INTISR(n) LS1X_INTC_REG(n, 0x0)
+#define LS1X_INTC_INTIEN(n) LS1X_INTC_REG(n, 0x4)
+#define LS1X_INTC_INTSET(n) LS1X_INTC_REG(n, 0x8)
+#define LS1X_INTC_INTCLR(n) LS1X_INTC_REG(n, 0xc)
+#define LS1X_INTC_INTPOL(n) LS1X_INTC_REG(n, 0x10)
+#define LS1X_INTC_INTEDGE(n) LS1X_INTC_REG(n, 0x14)
+
+static void ls1x_irq_ack(struct irq_data *d)
+{
+ unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
+ unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
+
+ __raw_writel(__raw_readl(LS1X_INTC_INTCLR(n))
+ | (1 << bit), LS1X_INTC_INTCLR(n));
+}
+
+static void ls1x_irq_mask(struct irq_data *d)
+{
+ unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
+ unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
+
+ __raw_writel(__raw_readl(LS1X_INTC_INTIEN(n))
+ & ~(1 << bit), LS1X_INTC_INTIEN(n));
+}
+
+static void ls1x_irq_mask_ack(struct irq_data *d)
+{
+ unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
+ unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
+
+ __raw_writel(__raw_readl(LS1X_INTC_INTIEN(n))
+ & ~(1 << bit), LS1X_INTC_INTIEN(n));
+ __raw_writel(__raw_readl(LS1X_INTC_INTCLR(n))
+ | (1 << bit), LS1X_INTC_INTCLR(n));
+}
+
+static void ls1x_irq_unmask(struct irq_data *d)
+{
+ unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
+ unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
+
+ __raw_writel(__raw_readl(LS1X_INTC_INTIEN(n))
+ | (1 << bit), LS1X_INTC_INTIEN(n));
+}
+
+static struct irq_chip ls1x_irq_chip = {
+ .name = "LS1X-INTC",
+ .irq_ack = ls1x_irq_ack,
+ .irq_mask = ls1x_irq_mask,
+ .irq_mask_ack = ls1x_irq_mask_ack,
+ .irq_unmask = ls1x_irq_unmask,
+};
+
+static void ls1x_irq_dispatch(int n)
+{
+ u32 int_status, irq;
+
+ /* Get pending sources, masked by current enables */
+ int_status = __raw_readl(LS1X_INTC_INTISR(n)) &
+ __raw_readl(LS1X_INTC_INTIEN(n));
+
+ if (int_status) {
+ irq = LS1X_IRQ(n, __ffs(int_status));
+ do_IRQ(irq);
+ }
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int pending;
+
+ pending = read_c0_cause() & read_c0_status() & ST0_IM;
+
+ if (pending & CAUSEF_IP7)
+ do_IRQ(TIMER_IRQ);
+ else if (pending & CAUSEF_IP2)
+ ls1x_irq_dispatch(0); /* INT0 */
+ else if (pending & CAUSEF_IP3)
+ ls1x_irq_dispatch(1); /* INT1 */
+ else if (pending & CAUSEF_IP4)
+ ls1x_irq_dispatch(2); /* INT2 */
+ else if (pending & CAUSEF_IP5)
+ ls1x_irq_dispatch(3); /* INT3 */
+ else if (pending & CAUSEF_IP6)
+ ls1x_irq_dispatch(4); /* INT4 */
+ else
+ spurious_interrupt();
+
+}
+
+struct irqaction cascade_irqaction = {
+ .handler = no_action,
+ .name = "cascade",
+ .flags = IRQF_NO_THREAD,
+};
+
+static void __init ls1x_irq_init(int base)
+{
+ int n;
+
+ /* Disable interrupts and clear pending,
+ * setup all IRQs as high level triggered
+ */
+ for (n = 0; n < 4; n++) {
+ __raw_writel(0x0, LS1X_INTC_INTIEN(n));
+ __raw_writel(0xffffffff, LS1X_INTC_INTCLR(n));
+ __raw_writel(0xffffffff, LS1X_INTC_INTPOL(n));
+ /* set DMA0, DMA1 and DMA2 to edge trigger */
+ __raw_writel(n ? 0x0 : 0xe000, LS1X_INTC_INTEDGE(n));
+ }
+
+
+ for (n = base; n < LS1X_IRQS; n++) {
+ irq_set_chip_and_handler(n, &ls1x_irq_chip,
+ handle_level_irq);
+ }
+
+ setup_irq(INT0_IRQ, &cascade_irqaction);
+ setup_irq(INT1_IRQ, &cascade_irqaction);
+ setup_irq(INT2_IRQ, &cascade_irqaction);
+ setup_irq(INT3_IRQ, &cascade_irqaction);
+}
+
+void __init arch_init_irq(void)
+{
+ mips_cpu_irq_init();
+ ls1x_irq_init(LS1X_IRQ_BASE);
+}
diff --git a/arch/mips/loongson1/common/platform.c b/arch/mips/loongson1/common/platform.c
new file mode 100644
index 000000000000..e92d59c4bd78
--- /dev/null
+++ b/arch/mips/loongson1/common/platform.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/phy.h>
+#include <linux/serial_8250.h>
+#include <linux/stmmac.h>
+#include <asm-generic/sizes.h>
+
+#include <loongson1.h>
+
+#define LS1X_UART(_id) \
+ { \
+ .mapbase = LS1X_UART ## _id ## _BASE, \
+ .irq = LS1X_UART ## _id ## _IRQ, \
+ .iotype = UPIO_MEM, \
+ .flags = UPF_IOREMAP | UPF_FIXED_TYPE, \
+ .type = PORT_16550A, \
+ }
+
+static struct plat_serial8250_port ls1x_serial8250_port[] = {
+ LS1X_UART(0),
+ LS1X_UART(1),
+ LS1X_UART(2),
+ LS1X_UART(3),
+ {},
+};
+
+struct platform_device ls1x_uart_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = ls1x_serial8250_port,
+ },
+};
+
+void __init ls1x_serial_setup(void)
+{
+ struct clk *clk;
+ struct plat_serial8250_port *p;
+
+ clk = clk_get(NULL, "dc");
+ if (IS_ERR(clk))
+ panic("unable to get dc clock, err=%ld", PTR_ERR(clk));
+
+ for (p = ls1x_serial8250_port; p->flags != 0; ++p)
+ p->uartclk = clk_get_rate(clk);
+}
+
+/* Synopsys Ethernet GMAC */
+static struct resource ls1x_eth0_resources[] = {
+ [0] = {
+ .start = LS1X_GMAC0_BASE,
+ .end = LS1X_GMAC0_BASE + SZ_64K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "macirq",
+ .start = LS1X_GMAC0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
+ .bus_id = 0,
+ .phy_mask = 0,
+};
+
+static struct plat_stmmacenet_data ls1x_eth_data = {
+ .bus_id = 0,
+ .phy_addr = -1,
+ .mdio_bus_data = &ls1x_mdio_bus_data,
+ .has_gmac = 1,
+ .tx_coe = 1,
+};
+
+struct platform_device ls1x_eth0_device = {
+ .name = "stmmaceth",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(ls1x_eth0_resources),
+ .resource = ls1x_eth0_resources,
+ .dev = {
+ .platform_data = &ls1x_eth_data,
+ },
+};
+
+/* USB EHCI */
+static u64 ls1x_ehci_dmamask = DMA_BIT_MASK(32);
+
+static struct resource ls1x_ehci_resources[] = {
+ [0] = {
+ .start = LS1X_EHCI_BASE,
+ .end = LS1X_EHCI_BASE + SZ_32K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = LS1X_EHCI_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device ls1x_ehci_device = {
+ .name = "ls1x-ehci",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ls1x_ehci_resources),
+ .resource = ls1x_ehci_resources,
+ .dev = {
+ .dma_mask = &ls1x_ehci_dmamask,
+ },
+};
+
+/* Real Time Clock */
+struct platform_device ls1x_rtc_device = {
+ .name = "ls1x-rtc",
+ .id = -1,
+};
diff --git a/arch/mips/loongson1/common/prom.c b/arch/mips/loongson1/common/prom.c
new file mode 100644
index 000000000000..1f8e49f9886d
--- /dev/null
+++ b/arch/mips/loongson1/common/prom.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Modified from arch/mips/pnx833x/common/prom.c.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/serial_reg.h>
+#include <asm/bootinfo.h>
+
+#include <loongson1.h>
+#include <prom.h>
+
+int prom_argc;
+char **prom_argv, **prom_envp;
+unsigned long memsize, highmemsize;
+
+char *prom_getenv(char *envname)
+{
+ char **env = prom_envp;
+ int i;
+
+ i = strlen(envname);
+
+ while (*env) {
+ if (strncmp(envname, *env, i) == 0 && *(*env+i) == '=')
+ return *env + i + 1;
+ env++;
+ }
+
+ return 0;
+}
+
+static inline unsigned long env_or_default(char *env, unsigned long dfl)
+{
+ char *str = prom_getenv(env);
+ return str ? simple_strtol(str, 0, 0) : dfl;
+}
+
+void __init prom_init_cmdline(void)
+{
+ char *c = &(arcs_cmdline[0]);
+ int i;
+
+ for (i = 1; i < prom_argc; i++) {
+ strcpy(c, prom_argv[i]);
+ c += strlen(prom_argv[i]);
+ if (i < prom_argc-1)
+ *c++ = ' ';
+ }
+ *c = 0;
+}
+
+void __init prom_init(void)
+{
+ prom_argc = fw_arg0;
+ prom_argv = (char **)fw_arg1;
+ prom_envp = (char **)fw_arg2;
+
+ prom_init_cmdline();
+
+ memsize = env_or_default("memsize", DEFAULT_MEMSIZE);
+ highmemsize = env_or_default("highmemsize", 0x0);
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+#define PORT(offset) (u8 *)(KSEG1ADDR(LS1X_UART0_BASE + offset))
+
+void __init prom_putchar(char c)
+{
+ int timeout;
+
+ timeout = 1024;
+
+ while (((readb(PORT(UART_LSR)) & UART_LSR_THRE) == 0)
+ && (timeout-- > 0))
+ ;
+
+ writeb(c, PORT(UART_TX));
+}
diff --git a/arch/mips/loongson1/common/reset.c b/arch/mips/loongson1/common/reset.c
new file mode 100644
index 000000000000..fb979a784eca
--- /dev/null
+++ b/arch/mips/loongson1/common/reset.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+
+#include <loongson1.h>
+
+static void ls1x_restart(char *command)
+{
+ __raw_writel(0x1, LS1X_WDT_EN);
+ __raw_writel(0x5000000, LS1X_WDT_TIMER);
+ __raw_writel(0x1, LS1X_WDT_SET);
+}
+
+static void ls1x_halt(void)
+{
+ while (1) {
+ if (cpu_wait)
+ cpu_wait();
+ }
+}
+
+static void ls1x_power_off(void)
+{
+ ls1x_halt();
+}
+
+static int __init ls1x_reboot_setup(void)
+{
+ _machine_restart = ls1x_restart;
+ _machine_halt = ls1x_halt;
+ pm_power_off = ls1x_power_off;
+
+ return 0;
+}
+
+arch_initcall(ls1x_reboot_setup);
diff --git a/arch/mips/loongson1/common/setup.c b/arch/mips/loongson1/common/setup.c
new file mode 100644
index 000000000000..62128cc27e68
--- /dev/null
+++ b/arch/mips/loongson1/common/setup.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/bootinfo.h>
+
+#include <prom.h>
+
+void __init plat_mem_setup(void)
+{
+ add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM);
+}
+
+const char *get_system_type(void)
+{
+ unsigned int processor_id = (&current_cpu_data)->processor_id;
+
+ switch (processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON1B:
+ return "LOONGSON LS1B";
+ default:
+ return "LOONGSON (unknown)";
+ }
+}
diff --git a/arch/mips/loongson1/ls1b/Makefile b/arch/mips/loongson1/ls1b/Makefile
new file mode 100644
index 000000000000..891eac482b82
--- /dev/null
+++ b/arch/mips/loongson1/ls1b/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for loongson1B based machines.
+#
+
+obj-y += board.o
diff --git a/arch/mips/loongson1/ls1b/board.c b/arch/mips/loongson1/ls1b/board.c
new file mode 100644
index 000000000000..295b1be893e3
--- /dev/null
+++ b/arch/mips/loongson1/ls1b/board.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <platform.h>
+
+#include <linux/serial_8250.h>
+#include <loongson1.h>
+
+static struct platform_device *ls1b_platform_devices[] __initdata = {
+ &ls1x_uart_device,
+ &ls1x_eth0_device,
+ &ls1x_ehci_device,
+ &ls1x_rtc_device,
+};
+
+static int __init ls1b_platform_init(void)
+{
+ int err;
+
+ ls1x_serial_setup();
+
+ err = platform_add_devices(ls1b_platform_devices,
+ ARRAY_SIZE(ls1b_platform_devices));
+ return err;
+}
+
+arch_initcall(ls1b_platform_init);
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 5fa185151fc8..64a28e819064 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -58,18 +58,16 @@ enum fields {
enum opcode {
insn_invalid,
- insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
- insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
- insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0,
- insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
- insn_dsrl32, insn_drotr, insn_drotr32, insn_dsubu, insn_eret,
- insn_j, insn_jal, insn_jr, insn_ld, insn_ll, insn_lld,
- insn_lui, insn_lw, insn_mfc0, insn_mtc0, insn_or, insn_ori,
- insn_pref, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
- insn_sra, insn_srl, insn_rotr, insn_subu, insn_sw, insn_tlbp,
+ insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1,
+ insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
+ insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm,
+ insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,
+ insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret,
+ insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, insn_ll, insn_lld,
+ insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, insn_or, insn_ori,
+ insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll,
+ insn_sra, insn_srl, insn_subu, insn_sw, insn_syscall, insn_tlbp,
insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori,
- insn_dins, insn_dinsm, insn_syscall, insn_bbit0, insn_bbit1,
- insn_lwx, insn_ldx
};
struct insn {
@@ -90,65 +88,65 @@ struct insn {
static struct insn insn_table[] __uasminitdata = {
{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
- { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
{ insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
+ { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+ { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
+ { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
- { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
+ { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM },
{ insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
+ { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
{ insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
+ { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
+ { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
{ insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
+ { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
+ { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
{ insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE },
+ { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE },
{ insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE },
- { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
{ insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE },
- { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE },
- { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE },
+ { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE },
{ insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD },
{ insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 },
- { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
{ insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
{ insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
{ insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
{ insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
{ insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
{ insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
{ insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
- { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
{ insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
{ insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
{ insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
{ insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
{ insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
{ insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
{ insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 },
{ insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 },
{ insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 },
- { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
{ insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
- { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
- { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
- { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
- { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
- { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
- { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
+ { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
{ insn_invalid, 0, 0 }
};
diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c
index 284dea54faf5..2147cb34e705 100644
--- a/arch/mips/mti-malta/malta-pci.c
+++ b/arch/mips/mti-malta/malta-pci.c
@@ -252,16 +252,3 @@ void __init mips_pcibios_init(void)
register_pci_controller(controller);
}
-
-/* Enable PCI 2.1 compatibility in PIIX4 */
-static void __devinit quirk_dlcsetup(struct pci_dev *dev)
-{
- u8 odlc, ndlc;
- (void) pci_read_config_byte(dev, 0x82, &odlc);
- /* Enable passive releases and delayed transaction */
- ndlc = odlc | 7;
- (void) pci_write_config_byte(dev, 0x82, ndlc);
-}
-
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
- quirk_dlcsetup);
diff --git a/arch/mips/netlogic/common/earlycons.c b/arch/mips/netlogic/common/earlycons.c
index f193f7b3bd81..1902fa22d277 100644
--- a/arch/mips/netlogic/common/earlycons.c
+++ b/arch/mips/netlogic/common/earlycons.c
@@ -54,7 +54,7 @@ void prom_putchar(char c)
#elif defined(CONFIG_CPU_XLR)
uartbase = nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET);
#endif
- while (nlm_read_reg(uartbase, UART_LSR) == 0)
+ while ((nlm_read_reg(uartbase, UART_LSR) & UART_LSR_THRE) == 0)
;
nlm_write_reg(uartbase, UART_TX, c);
}
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index c138b1a6dec3..a13355cc97eb 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -54,28 +54,68 @@
XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \
SYS_CPU_NONCOHERENT_MODE * 4
-.macro __config_lsu
- li t0, LSU_DEFEATURE
- mfcr t1, t0
+#define XLP_AX_WORKAROUND /* enable Ax silicon workarounds */
- lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
- or t1, t1, t2
- li t2, ~0xe /* S1RCM */
+/* Enable XLP features and workarounds in the LSU */
+.macro xlp_config_lsu
+ li t0, LSU_DEFEATURE
+ mfcr t1, t0
+
+ lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
+ or t1, t1, t2
+#ifdef XLP_AX_WORKAROUND
+ li t2, ~0xe /* S1RCM */
and t1, t1, t2
- mtcr t1, t0
+#endif
+ mtcr t1, t0
- li t0, SCHED_DEFEATURE
- lui t1, 0x0100 /* Experimental: Disable BRU accepting ALU ops */
- mtcr t1, t0
+#ifdef XLP_AX_WORKAROUND
+ li t0, SCHED_DEFEATURE
+ lui t1, 0x0100 /* Disable BRU accepting ALU ops */
+ mtcr t1, t0
+#endif
+.endm
+
+/*
+ * This is the code that will be copied to the reset entry point for
+ * XLR and XLP. The XLP cores start here when they are woken up. This
+ * is also the NMI entry point.
+ */
+.macro xlp_flush_l1_dcache
+ li t0, LSU_DEBUG_DATA0
+ li t1, LSU_DEBUG_ADDR
+ li t2, 0 /* index */
+ li t3, 0x1000 /* loop count */
+1:
+ sll v0, t2, 5
+ mtcr zero, t0
+ ori v1, v0, 0x3 /* way0 | write_enable | write_active */
+ mtcr v1, t1
+2:
+ mfcr v1, t1
+ andi v1, 0x1 /* wait for write_active == 0 */
+ bnez v1, 2b
+ nop
+ mtcr zero, t0
+ ori v1, v0, 0x7 /* way1 | write_enable | write_active */
+ mtcr v1, t1
+3:
+ mfcr v1, t1
+ andi v1, 0x1 /* wait for write_active == 0 */
+ bnez v1, 3b
+ nop
+ addi t2, 1
+ bne t3, t2, 1b
+ nop
.endm
/*
* The cores can come start when they are woken up. This is also the NMI
* entry, so check that first.
*
- * The data corresponding to reset is stored at RESET_DATA_PHYS location,
- * this will have the thread mask (used when core is woken up) and the
- * current NMI handler in case we reached here for an NMI.
+ * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS
+ * location, this will have the thread mask (used when core is woken up)
+ * and the current NMI handler in case we reached here for an NMI.
*
* When a core or thread is newly woken up, it loops in a 'wait'. When
* the CPU really needs waking up, we send an NMI to it, with the NMI
@@ -89,12 +129,12 @@
FEXPORT(nlm_reset_entry)
dmtc0 k0, $22, 6
dmtc0 k1, $22, 7
- mfc0 k0, CP0_STATUS
- li k1, 0x80000
- and k1, k0, k1
- beqz k1, 1f /* go to real reset entry */
+ mfc0 k0, CP0_STATUS
+ li k1, 0x80000
+ and k1, k0, k1
+ beqz k1, 1f /* go to real reset entry */
nop
- li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
+ li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
ld k0, BOOT_NMI_HANDLER(k1)
jr k0
nop
@@ -114,21 +154,25 @@ FEXPORT(nlm_reset_entry)
li t2, SYS_CPU_COHERENT_BASE(0)
add t2, t2, t3 /* t2 <- SYS offset for node */
lw t1, 0(t2)
- and t1, t1, t0
- sw t1, 0(t2)
+ and t1, t1, t0
+ sw t1, 0(t2)
/* read back to ensure complete */
- lw t1, 0(t2)
+ lw t1, 0(t2)
sync
/* Configure LSU on Non-0 Cores. */
- __config_lsu
+ xlp_config_lsu
+ /* FALL THROUGH */
/*
* Wake up sibling threads from the initial thread in
* a core.
*/
EXPORT(nlm_boot_siblings)
+ /* core L1D flush before enable threads */
+ xlp_flush_l1_dcache
+ /* Enable hw threads by writing to MAP_THREADMODE of the core */
li t0, CKSEG1ADDR(RESET_DATA_PHYS)
lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
@@ -139,31 +183,28 @@ EXPORT(nlm_boot_siblings)
/*
* The new hardware thread starts at the next instruction
* For all the cases other than core 0 thread 0, we will
- * jump to the secondary wait function.
- */
+ * jump to the secondary wait function.
+ */
mfc0 v0, CP0_EBASE, 1
andi v0, 0x7f /* v0 <- node/core */
-#if 1
- /* A0 errata - Write MMU_SETUP after changing thread mode register. */
+ /* Init MMU in the first thread after changing THREAD_MODE
+ * register (Ax Errata?)
+ */
andi v1, v0, 0x3 /* v1 <- thread id */
bnez v1, 2f
nop
- li t0, MMU_SETUP
- li t1, 0
- mtcr t1, t0
- ehb
-#endif
+ li t0, MMU_SETUP
+ li t1, 0
+ mtcr t1, t0
+ _ehb
-2: beqz v0, 4f
+2: beqz v0, 4f /* boot cpu (cpuid == 0)? */
nop
/* setup status reg */
- mfc0 t1, CP0_STATUS
- li t0, ST0_BEV
- or t1, t0
- xor t1, t0
+ move t1, zero
#ifdef CONFIG_64BIT
ori t1, ST0_KX
#endif
@@ -183,9 +224,9 @@ EXPORT(nlm_boot_siblings)
* For the boot CPU, we have to restore registers and
* return
*/
-4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */
+4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */
li t1, 0xfadebeef
- dmtc0 t1, $4, 2 /* restore SP from UserLocal */
+ dmtc0 t1, $4, 2 /* restore SP from UserLocal */
PTR_SUBU sp, t0, PT_SIZE
RESTORE_ALL
jr ra
@@ -193,7 +234,7 @@ EXPORT(nlm_boot_siblings)
EXPORT(nlm_reset_entry_end)
FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
- __config_lsu
+ xlp_config_lsu
dmtc0 sp, $4, 2 /* SP saved in UserLocal */
SAVE_ALL
sync
@@ -210,6 +251,12 @@ FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
__CPUINIT
NESTED(nlm_boot_secondary_cpus, 16, sp)
+ /* Initialize CP0 Status */
+ move t1, zero
+#ifdef CONFIG_64BIT
+ ori t1, ST0_KX
+#endif
+ mtc0 t1, CP0_STATUS
PTR_LA t1, nlm_next_sp
PTR_L sp, 0(t1)
PTR_LA t1, nlm_next_gp
@@ -234,36 +281,36 @@ END(nlm_boot_secondary_cpus)
*/
__CPUINIT
NESTED(nlm_rmiboot_preboot, 16, sp)
- mfc0 t0, $15, 1 # read ebase
- andi t0, 0x1f # t0 has the processor_id()
- andi t2, t0, 0x3 # thread no
- sll t0, 2 # offset in cpu array
+ mfc0 t0, $15, 1 /* read ebase */
+ andi t0, 0x1f /* t0 has the processor_id() */
+ andi t2, t0, 0x3 /* thread num */
+ sll t0, 2 /* offset in cpu array */
- PTR_LA t1, nlm_cpu_ready # mark CPU ready
+ PTR_LA t1, nlm_cpu_ready /* mark CPU ready */
PTR_ADDU t1, t0
li t3, 1
sw t3, 0(t1)
- bnez t2, 1f # skip thread programming
- nop # for non zero hw threads
+ bnez t2, 1f /* skip thread programming */
+ nop /* for thread id != 0 */
/*
- * MMU setup only for first thread in core
+ * XLR MMU setup only for first thread in core
*/
li t0, 0x400
mfcr t1, t0
- li t2, 6 # XLR thread mode mask
+ li t2, 6 /* XLR thread mode mask */
nor t3, t2, zero
- and t2, t1, t2 # t2 - current thread mode
+ and t2, t1, t2 /* t2 - current thread mode */
li v0, CKSEG1ADDR(RESET_DATA_PHYS)
- lw v1, BOOT_THREAD_MODE(v0) # v1 - new thread mode
+ lw v1, BOOT_THREAD_MODE(v0) /* v1 - new thread mode */
sll v1, 1
- beq v1, t2, 1f # same as request value
- nop # nothing to do */
+ beq v1, t2, 1f /* same as request value */
+ nop /* nothing to do */
- and t2, t1, t3 # mask out old thread mode
- or t1, t2, v1 # put in new value
- mtcr t1, t0 # update core control
+ and t2, t1, t3 /* mask out old thread mode */
+ or t1, t2, v1 /* put in new value */
+ mtcr t1, t0 /* update core control */
1: wait
j 1b
diff --git a/arch/mips/netlogic/xlp/Makefile b/arch/mips/netlogic/xlp/Makefile
index b93ed83474ec..6b4b972218f0 100644
--- a/arch/mips/netlogic/xlp/Makefile
+++ b/arch/mips/netlogic/xlp/Makefile
@@ -1,2 +1,4 @@
obj-y += setup.o platform.o nlm_hal.o
+obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_SMP) += wakeup.o
+obj-$(CONFIG_USB) += usb-init.o
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index 9428e7125fed..6c65ac701912 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -69,6 +69,32 @@ int nlm_irq_to_irt(int irq)
return PIC_IRT_UART_0_INDEX;
case PIC_UART_1_IRQ:
return PIC_IRT_UART_1_INDEX;
+ case PIC_PCIE_LINK_0_IRQ:
+ return PIC_IRT_PCIE_LINK_0_INDEX;
+ case PIC_PCIE_LINK_1_IRQ:
+ return PIC_IRT_PCIE_LINK_1_INDEX;
+ case PIC_PCIE_LINK_2_IRQ:
+ return PIC_IRT_PCIE_LINK_2_INDEX;
+ case PIC_PCIE_LINK_3_IRQ:
+ return PIC_IRT_PCIE_LINK_3_INDEX;
+ case PIC_EHCI_0_IRQ:
+ return PIC_IRT_EHCI_0_INDEX;
+ case PIC_EHCI_1_IRQ:
+ return PIC_IRT_EHCI_1_INDEX;
+ case PIC_OHCI_0_IRQ:
+ return PIC_IRT_OHCI_0_INDEX;
+ case PIC_OHCI_1_IRQ:
+ return PIC_IRT_OHCI_1_INDEX;
+ case PIC_OHCI_2_IRQ:
+ return PIC_IRT_OHCI_2_INDEX;
+ case PIC_OHCI_3_IRQ:
+ return PIC_IRT_OHCI_3_INDEX;
+ case PIC_MMC_IRQ:
+ return PIC_IRT_MMC_INDEX;
+ case PIC_I2C_0_IRQ:
+ return PIC_IRT_I2C_0_INDEX;
+ case PIC_I2C_1_IRQ:
+ return PIC_IRT_I2C_1_INDEX;
default:
return -1;
}
@@ -81,6 +107,32 @@ int nlm_irt_to_irq(int irt)
return PIC_UART_0_IRQ;
case PIC_IRT_UART_1_INDEX:
return PIC_UART_1_IRQ;
+ case PIC_IRT_PCIE_LINK_0_INDEX:
+ return PIC_PCIE_LINK_0_IRQ;
+ case PIC_IRT_PCIE_LINK_1_INDEX:
+ return PIC_PCIE_LINK_1_IRQ;
+ case PIC_IRT_PCIE_LINK_2_INDEX:
+ return PIC_PCIE_LINK_2_IRQ;
+ case PIC_IRT_PCIE_LINK_3_INDEX:
+ return PIC_PCIE_LINK_3_IRQ;
+ case PIC_IRT_EHCI_0_INDEX:
+ return PIC_EHCI_0_IRQ;
+ case PIC_IRT_EHCI_1_INDEX:
+ return PIC_EHCI_1_IRQ;
+ case PIC_IRT_OHCI_0_INDEX:
+ return PIC_OHCI_0_IRQ;
+ case PIC_IRT_OHCI_1_INDEX:
+ return PIC_OHCI_1_IRQ;
+ case PIC_IRT_OHCI_2_INDEX:
+ return PIC_OHCI_2_IRQ;
+ case PIC_IRT_OHCI_3_INDEX:
+ return PIC_OHCI_3_IRQ;
+ case PIC_IRT_MMC_INDEX:
+ return PIC_MMC_IRQ;
+ case PIC_IRT_I2C_0_INDEX:
+ return PIC_I2C_0_IRQ;
+ case PIC_IRT_I2C_1_INDEX:
+ return PIC_I2C_1_IRQ;
default:
return -1;
}
diff --git a/arch/mips/netlogic/xlp/of.c b/arch/mips/netlogic/xlp/of.c
new file mode 100644
index 000000000000..8e3921c0c201
--- /dev/null
+++ b/arch/mips/netlogic/xlp/of.c
@@ -0,0 +1,34 @@
+#include <linux/bootmem.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of_fdt.h>
+#include <asm/byteorder.h>
+
+static int __init reserve_mem_mach(unsigned long addr, unsigned long size)
+{
+ return reserve_bootmem(addr, size, BOOTMEM_DEFAULT);
+}
+
+void __init free_mem_mach(unsigned long addr, unsigned long size)
+{
+ return free_bootmem(addr, size);
+}
+
+void __init device_tree_init(void)
+{
+ unsigned long base, size;
+
+ if (!initial_boot_params)
+ return;
+
+ base = virt_to_phys((void *)initial_boot_params);
+ size = be32_to_cpu(initial_boot_params->totalsize);
+
+ /* Before we do anything, lets reserve the dt blob */
+ reserve_mem_mach(base, size);
+
+ unflatten_device_tree();
+
+ /* free the space reserved for the dt blob */
+ free_mem_mach(base, size);
+}
diff --git a/arch/mips/netlogic/xlp/platform.c b/arch/mips/netlogic/xlp/platform.c
index 1f5e4cba891d..2c510d585447 100644
--- a/arch/mips/netlogic/xlp/platform.c
+++ b/arch/mips/netlogic/xlp/platform.c
@@ -53,7 +53,7 @@
static unsigned int nlm_xlp_uart_in(struct uart_port *p, int offset)
{
- return nlm_read_reg(p->iobase, offset);
+ return nlm_read_reg(p->iobase, offset);
}
static void nlm_xlp_uart_out(struct uart_port *p, int offset, int value)
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index b3df7c2aad1e..3dec9f28b65b 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -41,6 +41,8 @@
#include <asm/bootinfo.h>
#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/common.h>
@@ -109,3 +111,17 @@ void __init prom_init(void)
register_smp_ops(&nlm_smp_ops);
#endif
}
+
+static struct of_device_id __initdata xlp_ids[] = {
+ { .compatible = "simple-bus", },
+ {},
+};
+
+int __init xlp8xx_ds_publish_devices(void)
+{
+ if (!of_have_populated_dt())
+ return 0;
+ return of_platform_bus_probe(NULL, xlp_ids, NULL);
+}
+
+device_initcall(xlp8xx_ds_publish_devices);
diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c
new file mode 100644
index 000000000000..dbe083a93538
--- /dev/null
+++ b/arch/mips/netlogic/xlp/usb-init.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/usb.h>
+
+static void nlm_usb_intr_en(int node, int port)
+{
+ uint32_t val;
+ uint64_t port_addr;
+
+ port_addr = nlm_get_usb_regbase(node, port);
+ val = nlm_read_usb_reg(port_addr, USB_INT_EN);
+ val = USB_CTRL_INTERRUPT_EN | USB_OHCI_INTERRUPT_EN |
+ USB_OHCI_INTERRUPT1_EN | USB_CTRL_INTERRUPT_EN |
+ USB_OHCI_INTERRUPT_EN | USB_OHCI_INTERRUPT2_EN;
+ nlm_write_usb_reg(port_addr, USB_INT_EN, val);
+}
+
+static void nlm_usb_hw_reset(int node, int port)
+{
+ uint64_t port_addr;
+ uint32_t val;
+
+ /* reset USB phy */
+ port_addr = nlm_get_usb_regbase(node, port);
+ val = nlm_read_usb_reg(port_addr, USB_PHY_0);
+ val &= ~(USB_PHY_RESET | USB_PHY_PORT_RESET_0 | USB_PHY_PORT_RESET_1);
+ nlm_write_usb_reg(port_addr, USB_PHY_0, val);
+
+ mdelay(100);
+ val = nlm_read_usb_reg(port_addr, USB_CTL_0);
+ val &= ~(USB_CONTROLLER_RESET);
+ val |= 0x4;
+ nlm_write_usb_reg(port_addr, USB_CTL_0, val);
+}
+
+static int __init nlm_platform_usb_init(void)
+{
+ pr_info("Initializing USB Interface\n");
+ nlm_usb_hw_reset(0, 0);
+ nlm_usb_hw_reset(0, 3);
+
+ /* Enable PHY interrupts */
+ nlm_usb_intr_en(0, 0);
+ nlm_usb_intr_en(0, 3);
+
+ return 0;
+}
+
+arch_initcall(nlm_platform_usb_init);
+
+static u64 xlp_usb_dmamask = ~(u32)0;
+
+/* Fixup the IRQ for USB devices which is exist on XLP SOC PCIE bus */
+static void nlm_usb_fixup_final(struct pci_dev *dev)
+{
+ dev->dev.dma_mask = &xlp_usb_dmamask;
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
+ switch (dev->devfn) {
+ case 0x10:
+ dev->irq = PIC_EHCI_0_IRQ;
+ break;
+ case 0x11:
+ dev->irq = PIC_OHCI_0_IRQ;
+ break;
+ case 0x12:
+ dev->irq = PIC_OHCI_1_IRQ;
+ break;
+ case 0x13:
+ dev->irq = PIC_EHCI_1_IRQ;
+ break;
+ case 0x14:
+ dev->irq = PIC_OHCI_2_IRQ;
+ break;
+ case 0x15:
+ dev->irq = PIC_OHCI_3_IRQ;
+ break;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI,
+ nlm_usb_fixup_final);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_OHCI,
+ nlm_usb_fixup_final);
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile
index f01e4d7a0600..c287dea87570 100644
--- a/arch/mips/netlogic/xlr/Makefile
+++ b/arch/mips/netlogic/xlr/Makefile
@@ -1,2 +1,2 @@
-obj-y += setup.o platform.o
+obj-y += setup.o platform.o platform-flash.o
obj-$(CONFIG_SMP) += wakeup.o
diff --git a/arch/mips/netlogic/xlr/platform-flash.c b/arch/mips/netlogic/xlr/platform-flash.c
new file mode 100644
index 000000000000..340ab1601c42
--- /dev/null
+++ b/arch/mips/netlogic/xlr/platform-flash.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2011, Netlogic Microsystems.
+ * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/resource.h>
+#include <linux/spi/flash.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/flash.h>
+#include <asm/netlogic/xlr/bridge.h>
+#include <asm/netlogic/xlr/gpio.h>
+#include <asm/netlogic/xlr/xlr.h>
+
+/*
+ * Default NOR partition layout
+ */
+static struct mtd_partition xlr_nor_parts[] = {
+ {
+ .name = "User FS",
+ .offset = 0x800000,
+ .size = MTDPART_SIZ_FULL,
+ }
+};
+
+/*
+ * Default NAND partition layout
+ */
+static struct mtd_partition xlr_nand_parts[] = {
+ {
+ .name = "Root Filesystem",
+ .offset = 64 * 64 * 2048,
+ .size = 432 * 64 * 2048,
+ },
+ {
+ .name = "Home Filesystem",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+/* Use PHYSMAP flash for NOR */
+struct physmap_flash_data xlr_nor_data = {
+ .width = 2,
+ .parts = xlr_nor_parts,
+ .nr_parts = ARRAY_SIZE(xlr_nor_parts),
+};
+
+static struct resource xlr_nor_res[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device xlr_nor_dev = {
+ .name = "physmap-flash",
+ .dev = {
+ .platform_data = &xlr_nor_data,
+ },
+ .num_resources = ARRAY_SIZE(xlr_nor_res),
+ .resource = xlr_nor_res,
+};
+
+const char *xlr_part_probes[] = { "cmdlinepart", NULL };
+
+/*
+ * Use "gen_nand" driver for NAND flash
+ *
+ * There seems to be no way to store a private pointer containing
+ * platform specific info in gen_nand drivier. We will use a global
+ * struct for now, since we currently have only one NAND chip per board.
+ */
+struct xlr_nand_flash_priv {
+ int cs;
+ uint64_t flash_mmio;
+};
+
+static struct xlr_nand_flash_priv nand_priv;
+
+static void xlr_nand_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ if (ctrl & NAND_CLE)
+ nlm_write_reg(nand_priv.flash_mmio,
+ FLASH_NAND_CLE(nand_priv.cs), cmd);
+ else if (ctrl & NAND_ALE)
+ nlm_write_reg(nand_priv.flash_mmio,
+ FLASH_NAND_ALE(nand_priv.cs), cmd);
+}
+
+struct platform_nand_data xlr_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .nr_partitions = ARRAY_SIZE(xlr_nand_parts),
+ .chip_delay = 50,
+ .partitions = xlr_nand_parts,
+ .part_probe_types = xlr_part_probes,
+ },
+ .ctrl = {
+ .cmd_ctrl = xlr_nand_ctrl,
+ },
+};
+
+static struct resource xlr_nand_res[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device xlr_nand_dev = {
+ .name = "gen_nand",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(xlr_nand_res),
+ .resource = xlr_nand_res,
+ .dev = {
+ .platform_data = &xlr_nand_data,
+ }
+};
+
+/*
+ * XLR/XLS supports upto 8 devices on its FLASH interface. The value in
+ * FLASH_BAR (on the MEM/IO bridge) gives the base for mapping all the
+ * flash devices.
+ * Under this, each flash device has an offset and size given by the
+ * CSBASE_ADDR and CSBASE_MASK registers for the device.
+ *
+ * The CSBASE_ registers are expected to be setup by the bootloader.
+ */
+static void setup_flash_resource(uint64_t flash_mmio,
+ uint64_t flash_map_base, int cs, struct resource *res)
+{
+ u32 base, mask;
+
+ base = nlm_read_reg(flash_mmio, FLASH_CSBASE_ADDR(cs));
+ mask = nlm_read_reg(flash_mmio, FLASH_CSADDR_MASK(cs));
+
+ res->start = flash_map_base + ((unsigned long)base << 16);
+ res->end = res->start + (mask + 1) * 64 * 1024;
+}
+
+static int __init xlr_flash_init(void)
+{
+ uint64_t gpio_mmio, flash_mmio, flash_map_base;
+ u32 gpio_resetcfg, flash_bar;
+ int cs, boot_nand, boot_nor;
+
+ /* Flash address bits 39:24 is in bridge flash BAR */
+ flash_bar = nlm_read_reg(nlm_io_base, BRIDGE_FLASH_BAR);
+ flash_map_base = (flash_bar & 0xffff0000) << 8;
+
+ gpio_mmio = nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET);
+ flash_mmio = nlm_mmio_base(NETLOGIC_IO_FLASH_OFFSET);
+
+ /* Get the chip reset config */
+ gpio_resetcfg = nlm_read_reg(gpio_mmio, GPIO_PWRON_RESET_CFG_REG);
+
+ /* Check for boot flash type */
+ boot_nor = boot_nand = 0;
+ if (nlm_chip_is_xls()) {
+ /* On XLS, check boot from NAND bit (GPIO reset reg bit 16) */
+ if (gpio_resetcfg & (1 << 16))
+ boot_nand = 1;
+
+ /* check boot from PCMCIA, (GPIO reset reg bit 15 */
+ if ((gpio_resetcfg & (1 << 15)) == 0)
+ boot_nor = 1; /* not set, booted from NOR */
+ } else { /* XLR */
+ /* check boot from PCMCIA (bit 16 in GPIO reset on XLR) */
+ if ((gpio_resetcfg & (1 << 16)) == 0)
+ boot_nor = 1; /* not set, booted from NOR */
+ }
+
+ /* boot flash at chip select 0 */
+ cs = 0;
+
+ if (boot_nand) {
+ nand_priv.cs = cs;
+ nand_priv.flash_mmio = flash_mmio;
+ setup_flash_resource(flash_mmio, flash_map_base, cs,
+ xlr_nand_res);
+
+ /* Initialize NAND flash at CS 0 */
+ nlm_write_reg(flash_mmio, FLASH_CSDEV_PARM(cs),
+ FLASH_NAND_CSDEV_PARAM);
+ nlm_write_reg(flash_mmio, FLASH_CSTIME_PARMA(cs),
+ FLASH_NAND_CSTIME_PARAMA);
+ nlm_write_reg(flash_mmio, FLASH_CSTIME_PARMB(cs),
+ FLASH_NAND_CSTIME_PARAMB);
+
+ pr_info("ChipSelect %d: NAND Flash %pR\n", cs, xlr_nand_res);
+ return platform_device_register(&xlr_nand_dev);
+ }
+
+ if (boot_nor) {
+ setup_flash_resource(flash_mmio, flash_map_base, cs,
+ xlr_nor_res);
+ pr_info("ChipSelect %d: NOR Flash %pR\n", cs, xlr_nor_res);
+ return platform_device_register(&xlr_nor_dev);
+ }
+ return 0;
+}
+
+arch_initcall(xlr_flash_init);
diff --git a/arch/mips/netlogic/xlr/platform.c b/arch/mips/netlogic/xlr/platform.c
index eab64b45dffd..71b44d82621d 100644
--- a/arch/mips/netlogic/xlr/platform.c
+++ b/arch/mips/netlogic/xlr/platform.c
@@ -14,6 +14,7 @@
#include <linux/resource.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
+#include <linux/i2c.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/xlr/iomap.h>
@@ -97,3 +98,142 @@ static int __init nlm_uart_init(void)
}
arch_initcall(nlm_uart_init);
+
+#ifdef CONFIG_USB
+/* Platform USB devices, only on XLS chips */
+static u64 xls_usb_dmamask = ~(u32)0;
+#define USB_PLATFORM_DEV(n, i, irq) \
+ { \
+ .name = n, \
+ .id = i, \
+ .num_resources = 2, \
+ .dev = { \
+ .dma_mask = &xls_usb_dmamask, \
+ .coherent_dma_mask = 0xffffffff, \
+ }, \
+ .resource = (struct resource[]) { \
+ { \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = irq, \
+ .end = irq, \
+ .flags = IORESOURCE_IRQ, \
+ }, \
+ }, \
+ }
+
+static struct platform_device xls_usb_ehci_device =
+ USB_PLATFORM_DEV("ehci-xls", 0, PIC_USB_IRQ);
+static struct platform_device xls_usb_ohci_device_0 =
+ USB_PLATFORM_DEV("ohci-xls-0", 1, PIC_USB_IRQ);
+static struct platform_device xls_usb_ohci_device_1 =
+ USB_PLATFORM_DEV("ohci-xls-1", 2, PIC_USB_IRQ);
+
+static struct platform_device *xls_platform_devices[] = {
+ &xls_usb_ehci_device,
+ &xls_usb_ohci_device_0,
+ &xls_usb_ohci_device_1,
+};
+
+int xls_platform_usb_init(void)
+{
+ uint64_t usb_mmio, gpio_mmio;
+ unsigned long memres;
+ uint32_t val;
+
+ if (!nlm_chip_is_xls())
+ return 0;
+
+ gpio_mmio = nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET);
+ usb_mmio = nlm_mmio_base(NETLOGIC_IO_USB_1_OFFSET);
+
+ /* Clear Rogue Phy INTs */
+ nlm_write_reg(usb_mmio, 49, 0x10000000);
+ /* Enable all interrupts */
+ nlm_write_reg(usb_mmio, 50, 0x1f000000);
+
+ /* Enable ports */
+ nlm_write_reg(usb_mmio, 1, 0x07000500);
+
+ val = nlm_read_reg(gpio_mmio, 21);
+ if (((val >> 22) & 0x01) == 0) {
+ pr_info("Detected USB Device mode - Not supported!\n");
+ nlm_write_reg(usb_mmio, 0, 0x01000000);
+ return 0;
+ }
+
+ pr_info("Detected USB Host mode - Adding XLS USB devices.\n");
+ /* Clear reset, host mode */
+ nlm_write_reg(usb_mmio, 0, 0x02000000);
+
+ /* Memory resource for various XLS usb ports */
+ usb_mmio = nlm_mmio_base(NETLOGIC_IO_USB_0_OFFSET);
+ memres = CPHYSADDR((unsigned long)usb_mmio);
+ xls_usb_ehci_device.resource[0].start = memres;
+ xls_usb_ehci_device.resource[0].end = memres + 0x400 - 1;
+
+ memres += 0x400;
+ xls_usb_ohci_device_0.resource[0].start = memres;
+ xls_usb_ohci_device_0.resource[0].end = memres + 0x400 - 1;
+
+ memres += 0x400;
+ xls_usb_ohci_device_1.resource[0].start = memres;
+ xls_usb_ohci_device_1.resource[0].end = memres + 0x400 - 1;
+
+ return platform_add_devices(xls_platform_devices,
+ ARRAY_SIZE(xls_platform_devices));
+}
+
+arch_initcall(xls_platform_usb_init);
+#endif
+
+#ifdef CONFIG_I2C
+static struct i2c_board_info nlm_i2c_board_info1[] __initdata = {
+ /* All XLR boards have this RTC and Max6657 Temp Chip */
+ [0] = {
+ .type = "ds1374",
+ .addr = 0x68
+ },
+ [1] = {
+ .type = "lm90",
+ .addr = 0x4c
+ },
+};
+
+static struct resource i2c_resources[] = {
+ [0] = {
+ .start = 0, /* filled at init */
+ .end = 0,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device nlm_xlr_i2c_1 = {
+ .name = "xlr-i2cbus",
+ .id = 1,
+ .num_resources = 1,
+ .resource = i2c_resources,
+};
+
+static int __init nlm_i2c_init(void)
+{
+ int err = 0;
+ unsigned int offset;
+
+ /* I2C bus 0 does not have any useful devices, configure only bus 1 */
+ offset = NETLOGIC_IO_I2C_1_OFFSET;
+ nlm_xlr_i2c_1.resource[0].start = CPHYSADDR(nlm_mmio_base(offset));
+ nlm_xlr_i2c_1.resource[0].end = nlm_xlr_i2c_1.resource[0].start + 0xfff;
+
+ platform_device_register(&nlm_xlr_i2c_1);
+
+ err = i2c_register_board_info(1, nlm_i2c_board_info1,
+ ARRAY_SIZE(nlm_i2c_board_info1));
+ if (err < 0)
+ pr_err("nlm-i2c: cannot register board I2C devices\n");
+ return err;
+}
+
+arch_initcall(nlm_i2c_init);
+#endif
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index c9d066dedc4e..81b1d311834f 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -85,7 +85,7 @@ static void nlm_linux_exit(void)
gpiobase = nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET);
/* trigger a chip reset by writing 1 to GPIO_SWRESET_REG */
- nlm_write_reg(gpiobase, NETLOGIC_GPIO_SWRESET_REG, 1);
+ nlm_write_reg(gpiobase, GPIO_SWRESET_REG, 1);
for ( ; ; )
cpu_wait();
}
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index b6e378211a2c..f80480a5a032 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -85,6 +85,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case CPU_34K:
case CPU_1004K:
case CPU_74K:
+ case CPU_LOONGSON1:
case CPU_SB1:
case CPU_SB1A:
case CPU_R10000:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 4d80a856048d..28ea1a4cc576 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -339,12 +339,6 @@ static int __init mipsxx_init(void)
break;
case CPU_1004K:
-#if 0
- /* FIXME: report as 34K for now */
- op_model_mipsxx_ops.cpu_type = "mips/1004K";
- break;
-#endif
-
case CPU_34K:
op_model_mipsxx_ops.cpu_type = "mips/34K";
break;
@@ -374,6 +368,10 @@ static int __init mipsxx_init(void)
op_model_mipsxx_ops.cpu_type = "mips/sb1";
break;
+ case CPU_LOONGSON1:
+ op_model_mipsxx_ops.cpu_type = "mips/loongson1";
+ break;
+
default:
printk(KERN_ERR "Profiling unsupported for this CPU\n");
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index c703f43a9914..e13a71cbc3c7 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o
obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o
obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o
obj-$(CONFIG_CPU_XLR) += pci-xlr.o
+obj-$(CONFIG_CPU_XLP) += pci-xlp.o
ifdef CONFIG_PCI_MSI
obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o
diff --git a/arch/mips/pci/fixup-cobalt.c b/arch/mips/pci/fixup-cobalt.c
index 9553b14002dd..3e7ce65d776c 100644
--- a/arch/mips/pci/fixup-cobalt.c
+++ b/arch/mips/pci/fixup-cobalt.c
@@ -37,7 +37,7 @@
#define VIA_COBALT_BRD_ID_REG 0x94
#define VIA_COBALT_BRD_REG_to_ID(reg) ((unsigned char)(reg) >> 4)
-static void qube_raq_galileo_early_fixup(struct pci_dev *dev)
+static void __devinit qube_raq_galileo_early_fixup(struct pci_dev *dev)
{
if (dev->devfn == PCI_DEVFN(0, 0) &&
(dev->class >> 8) == PCI_CLASS_MEMORY_OTHER) {
@@ -51,7 +51,7 @@ static void qube_raq_galileo_early_fixup(struct pci_dev *dev)
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_GT64111,
qube_raq_galileo_early_fixup);
-static void qube_raq_via_bmIDE_fixup(struct pci_dev *dev)
+static void __devinit qube_raq_via_bmIDE_fixup(struct pci_dev *dev)
{
unsigned short cfgword;
unsigned char lt;
@@ -74,7 +74,7 @@ static void qube_raq_via_bmIDE_fixup(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1,
qube_raq_via_bmIDE_fixup);
-static void qube_raq_galileo_fixup(struct pci_dev *dev)
+static void __devinit qube_raq_galileo_fixup(struct pci_dev *dev)
{
if (dev->devfn != PCI_DEVFN(0, 0))
return;
@@ -129,7 +129,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_GT64111,
int cobalt_board_id;
-static void qube_raq_via_board_id_fixup(struct pci_dev *dev)
+static void __devinit qube_raq_via_board_id_fixup(struct pci_dev *dev)
{
u8 id;
int retval;
diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c
index 70073c98ed32..819622f93e9c 100644
--- a/arch/mips/pci/fixup-malta.c
+++ b/arch/mips/pci/fixup-malta.c
@@ -101,3 +101,17 @@ static void __devinit malta_piix_func1_fixup(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB,
malta_piix_func1_fixup);
+
+/* Enable PCI 2.1 compatibility in PIIX4 */
+static void __devinit quirk_dlcsetup(struct pci_dev *dev)
+{
+ u8 odlc, ndlc;
+
+ (void) pci_read_config_byte(dev, 0x82, &odlc);
+ /* Enable passive releases and delayed transaction */
+ ndlc = odlc | 7;
+ (void) pci_write_config_byte(dev, 0x82, ndlc);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
+ quirk_dlcsetup);
diff --git a/arch/mips/pci/fixup-rc32434.c b/arch/mips/pci/fixup-rc32434.c
index 3d86823d03a0..76bb1be99d43 100644
--- a/arch/mips/pci/fixup-rc32434.c
+++ b/arch/mips/pci/fixup-rc32434.c
@@ -47,7 +47,7 @@ int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq + GROUP4_IRQ_BASE + 4;
}
-static void rc32434_pci_early_fixup(struct pci_dev *dev)
+static void __devinit rc32434_pci_early_fixup(struct pci_dev *dev)
{
if (PCI_SLOT(dev->devfn) == 6 && dev->bus->number == 0) {
/* disable prefetched memory range */
diff --git a/arch/mips/pci/ops-bcm63xx.c b/arch/mips/pci/ops-bcm63xx.c
index 822ae179bc56..65c7bd100486 100644
--- a/arch/mips/pci/ops-bcm63xx.c
+++ b/arch/mips/pci/ops-bcm63xx.c
@@ -411,7 +411,7 @@ struct pci_ops bcm63xx_cb_ops = {
* only one IO window, so it cannot be shared by PCI and cardbus, use
* fixup to choose and detect unhandled configuration
*/
-static void bcm63xx_fixup(struct pci_dev *dev)
+static void __devinit bcm63xx_fixup(struct pci_dev *dev)
{
static int io_window = -1;
int i, found, new_io_window;
@@ -465,3 +465,64 @@ static void bcm63xx_fixup(struct pci_dev *dev)
DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, bcm63xx_fixup);
#endif
+
+static int bcm63xx_pcie_can_access(struct pci_bus *bus, int devfn)
+{
+ switch (bus->number) {
+ case PCIE_BUS_BRIDGE:
+ return (PCI_SLOT(devfn) == 0);
+ case PCIE_BUS_DEVICE:
+ if (PCI_SLOT(devfn) == 0)
+ return bcm_pcie_readl(PCIE_DLSTATUS_REG)
+ & DLSTATUS_PHYLINKUP;
+ default:
+ return false;
+ }
+}
+
+static int bcm63xx_pcie_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ u32 data;
+ u32 reg = where & ~3;
+
+ if (!bcm63xx_pcie_can_access(bus, devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == PCIE_BUS_DEVICE)
+ reg += PCIE_DEVICE_OFFSET;
+
+ data = bcm_pcie_readl(reg);
+
+ *val = postprocess_read(data, where, size);
+
+ return PCIBIOS_SUCCESSFUL;
+
+}
+
+static int bcm63xx_pcie_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 data;
+ u32 reg = where & ~3;
+
+ if (!bcm63xx_pcie_can_access(bus, devfn))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == PCIE_BUS_DEVICE)
+ reg += PCIE_DEVICE_OFFSET;
+
+
+ data = bcm_pcie_readl(reg);
+
+ data = preprocess_write(data, val, where, size);
+ bcm_pcie_writel(data, reg);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+struct pci_ops bcm63xx_pcie_ops = {
+ .read = bcm63xx_pcie_read,
+ .write = bcm63xx_pcie_write
+};
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 414a7459858d..86d77a666458 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -23,9 +23,12 @@
#define AR724X_PCI_MEM_BASE 0x10000000
#define AR724X_PCI_MEM_SIZE 0x08000000
+#define AR724X_PCI_REG_RESET 0x18
#define AR724X_PCI_REG_INT_STATUS 0x4c
#define AR724X_PCI_REG_INT_MASK 0x50
+#define AR724X_PCI_RESET_LINK_UP BIT(0)
+
#define AR724X_PCI_INT_DEV0 BIT(14)
#define AR724X_PCI_IRQ_COUNT 1
@@ -38,6 +41,15 @@ static void __iomem *ar724x_pci_ctrl_base;
static u32 ar724x_pci_bar0_value;
static bool ar724x_pci_bar0_is_cached;
+static bool ar724x_pci_link_up;
+
+static inline bool ar724x_pci_check_link(void)
+{
+ u32 reset;
+
+ reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET);
+ return reset & AR724X_PCI_RESET_LINK_UP;
+}
static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t *value)
@@ -46,6 +58,9 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
void __iomem *base;
u32 data;
+ if (!ar724x_pci_link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
if (devfn)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -96,6 +111,9 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
u32 data;
int s;
+ if (!ar724x_pci_link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
if (devfn)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -280,6 +298,10 @@ int __init ar724x_pcibios_init(int irq)
if (ar724x_pci_ctrl_base == NULL)
goto err_unmap_devcfg;
+ ar724x_pci_link_up = ar724x_pci_check_link();
+ if (!ar724x_pci_link_up)
+ pr_warn("ar724x: PCIe link is down\n");
+
ar724x_pci_irq_init(irq);
register_pci_controller(&ar724x_pci_controller);
diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c
index 39eb7c417e2f..8a48139d219c 100644
--- a/arch/mips/pci/pci-bcm63xx.c
+++ b/arch/mips/pci/pci-bcm63xx.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <asm/bootinfo.h>
#include "pci-bcm63xx.h"
@@ -71,6 +72,26 @@ struct pci_controller bcm63xx_cb_controller = {
};
#endif
+static struct resource bcm_pcie_mem_resource = {
+ .name = "bcm63xx PCIe memory space",
+ .start = BCM_PCIE_MEM_BASE_PA,
+ .end = BCM_PCIE_MEM_END_PA,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource bcm_pcie_io_resource = {
+ .name = "bcm63xx PCIe IO space",
+ .start = 0,
+ .end = 0,
+ .flags = 0,
+};
+
+struct pci_controller bcm63xx_pcie_controller = {
+ .pci_ops = &bcm63xx_pcie_ops,
+ .io_resource = &bcm_pcie_io_resource,
+ .mem_resource = &bcm_pcie_mem_resource,
+};
+
static u32 bcm63xx_int_cfg_readl(u32 reg)
{
u32 tmp;
@@ -94,17 +115,99 @@ static void bcm63xx_int_cfg_writel(u32 val, u32 reg)
void __iomem *pci_iospace_start;
-static int __init bcm63xx_pci_init(void)
+static void __init bcm63xx_reset_pcie(void)
{
- unsigned int mem_size;
u32 val;
- if (!BCMCPU_IS_6348() && !BCMCPU_IS_6358() && !BCMCPU_IS_6368())
- return -ENODEV;
+ /* enable clock */
+ val = bcm_perf_readl(PERF_CKCTL_REG);
+ val |= CKCTL_6328_PCIE_EN;
+ bcm_perf_writel(val, PERF_CKCTL_REG);
+
+ /* enable SERDES */
+ val = bcm_misc_readl(MISC_SERDES_CTRL_REG);
+ val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN;
+ bcm_misc_writel(val, MISC_SERDES_CTRL_REG);
+
+ /* reset the PCIe core */
+ val = bcm_perf_readl(PERF_SOFTRESET_6328_REG);
+
+ val &= ~SOFTRESET_6328_PCIE_MASK;
+ val &= ~SOFTRESET_6328_PCIE_CORE_MASK;
+ val &= ~SOFTRESET_6328_PCIE_HARD_MASK;
+ val &= ~SOFTRESET_6328_PCIE_EXT_MASK;
+ bcm_perf_writel(val, PERF_SOFTRESET_6328_REG);
+ mdelay(10);
+
+ val |= SOFTRESET_6328_PCIE_MASK;
+ val |= SOFTRESET_6328_PCIE_CORE_MASK;
+ val |= SOFTRESET_6328_PCIE_HARD_MASK;
+ bcm_perf_writel(val, PERF_SOFTRESET_6328_REG);
+ mdelay(10);
+
+ val |= SOFTRESET_6328_PCIE_EXT_MASK;
+ bcm_perf_writel(val, PERF_SOFTRESET_6328_REG);
+ mdelay(200);
+}
- if (!bcm63xx_pci_enabled)
- return -ENODEV;
+static int __init bcm63xx_register_pcie(void)
+{
+ u32 val;
+ bcm63xx_reset_pcie();
+
+ /* configure the PCIe bridge */
+ val = bcm_pcie_readl(PCIE_BRIDGE_OPT1_REG);
+ val |= OPT1_RD_BE_OPT_EN;
+ val |= OPT1_RD_REPLY_BE_FIX_EN;
+ val |= OPT1_PCIE_BRIDGE_HOLE_DET_EN;
+ val |= OPT1_L1_INT_STATUS_MASK_POL;
+ bcm_pcie_writel(val, PCIE_BRIDGE_OPT1_REG);
+
+ /* setup the interrupts */
+ val = bcm_pcie_readl(PCIE_BRIDGE_RC_INT_MASK_REG);
+ val |= PCIE_RC_INT_A | PCIE_RC_INT_B | PCIE_RC_INT_C | PCIE_RC_INT_D;
+ bcm_pcie_writel(val, PCIE_BRIDGE_RC_INT_MASK_REG);
+
+ val = bcm_pcie_readl(PCIE_BRIDGE_OPT2_REG);
+ /* enable credit checking and error checking */
+ val |= OPT2_TX_CREDIT_CHK_EN;
+ val |= OPT2_UBUS_UR_DECODE_DIS;
+
+ /* set device bus/func for the pcie device */
+ val |= (PCIE_BUS_DEVICE << OPT2_CFG_TYPE1_BUS_NO_SHIFT);
+ val |= OPT2_CFG_TYPE1_BD_SEL;
+ bcm_pcie_writel(val, PCIE_BRIDGE_OPT2_REG);
+
+ /* setup class code as bridge */
+ val = bcm_pcie_readl(PCIE_IDVAL3_REG);
+ val &= ~IDVAL3_CLASS_CODE_MASK;
+ val |= (PCI_CLASS_BRIDGE_PCI << IDVAL3_SUBCLASS_SHIFT);
+ bcm_pcie_writel(val, PCIE_IDVAL3_REG);
+
+ /* disable bar1 size */
+ val = bcm_pcie_readl(PCIE_CONFIG2_REG);
+ val &= ~CONFIG2_BAR1_SIZE_MASK;
+ bcm_pcie_writel(val, PCIE_CONFIG2_REG);
+
+ /* set bar0 to little endian */
+ val = (BCM_PCIE_MEM_BASE_PA >> 20) << BASEMASK_BASE_SHIFT;
+ val |= (BCM_PCIE_MEM_BASE_PA >> 20) << BASEMASK_MASK_SHIFT;
+ val |= BASEMASK_REMAP_EN;
+ bcm_pcie_writel(val, PCIE_BRIDGE_BAR0_BASEMASK_REG);
+
+ val = (BCM_PCIE_MEM_BASE_PA >> 20) << REBASE_ADDR_BASE_SHIFT;
+ bcm_pcie_writel(val, PCIE_BRIDGE_BAR0_REBASE_ADDR_REG);
+
+ register_pci_controller(&bcm63xx_pcie_controller);
+
+ return 0;
+}
+
+static int __init bcm63xx_register_pci(void)
+{
+ unsigned int mem_size;
+ u32 val;
/*
* configuration access are done through IO space, remap 4
* first bytes to access it from CPU.
@@ -221,4 +324,22 @@ static int __init bcm63xx_pci_init(void)
return 0;
}
+
+static int __init bcm63xx_pci_init(void)
+{
+ if (!bcm63xx_pci_enabled)
+ return -ENODEV;
+
+ switch (bcm63xx_get_cpu_id()) {
+ case BCM6328_CPU_ID:
+ return bcm63xx_register_pcie();
+ case BCM6348_CPU_ID:
+ case BCM6358_CPU_ID:
+ case BCM6368_CPU_ID:
+ return bcm63xx_register_pci();
+ default:
+ return -ENODEV;
+ }
+}
+
arch_initcall(bcm63xx_pci_init);
diff --git a/arch/mips/pci/pci-bcm63xx.h b/arch/mips/pci/pci-bcm63xx.h
index a6e594ef3d6a..e6736d558ac7 100644
--- a/arch/mips/pci/pci-bcm63xx.h
+++ b/arch/mips/pci/pci-bcm63xx.h
@@ -13,11 +13,16 @@
*/
#define CARDBUS_PCI_IDSEL 0x8
+
+#define PCIE_BUS_BRIDGE 0
+#define PCIE_BUS_DEVICE 1
+
/*
* defined in ops-bcm63xx.c
*/
extern struct pci_ops bcm63xx_pci_ops;
extern struct pci_ops bcm63xx_cb_ops;
+extern struct pci_ops bcm63xx_pcie_ops;
/*
* defined in pci-bcm63xx.c
diff --git a/arch/mips/pci/pci-xlp.c b/arch/mips/pci/pci-xlp.c
new file mode 100644
index 000000000000..140557a20488
--- /dev/null
+++ b/arch/mips/pci/pci-xlp.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/msi.h>
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/console.h>
+
+#include <asm/io.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/haldefs.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pcibus.h>
+#include <asm/netlogic/xlp-hal/bridge.h>
+
+static void *pci_config_base;
+
+#define pci_cfg_addr(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
+
+/* PCI ops */
+static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ u32 data;
+ u32 *cfgaddr;
+
+ cfgaddr = (u32 *)(pci_config_base +
+ pci_cfg_addr(bus->number, devfn, where & ~3));
+ data = *cfgaddr;
+ return data;
+}
+
+static inline void pci_cfg_write_32bit(struct pci_bus *bus, unsigned int devfn,
+ int where, u32 data)
+{
+ u32 *cfgaddr;
+
+ cfgaddr = (u32 *)(pci_config_base +
+ pci_cfg_addr(bus->number, devfn, where & ~3));
+ *cfgaddr = data;
+}
+
+static int nlm_pcibios_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ u32 data;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ data = pci_cfg_read_32bit(bus, devfn, where);
+
+ if (size == 1)
+ *val = (data >> ((where & 3) << 3)) & 0xff;
+ else if (size == 2)
+ *val = (data >> ((where & 3) << 3)) & 0xffff;
+ else
+ *val = data;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+static int nlm_pcibios_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ u32 data;
+
+ if ((size == 2) && (where & 1))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ else if ((size == 4) && (where & 3))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ data = pci_cfg_read_32bit(bus, devfn, where);
+
+ if (size == 1)
+ data = (data & ~(0xff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else if (size == 2)
+ data = (data & ~(0xffff << ((where & 3) << 3))) |
+ (val << ((where & 3) << 3));
+ else
+ data = val;
+
+ pci_cfg_write_32bit(bus, devfn, where, data);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops nlm_pci_ops = {
+ .read = nlm_pcibios_read,
+ .write = nlm_pcibios_write
+};
+
+static struct resource nlm_pci_mem_resource = {
+ .name = "XLP PCI MEM",
+ .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
+ .end = 0xdfffffffUL,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource nlm_pci_io_resource = {
+ .name = "XLP IO MEM",
+ .start = 0x14000000UL, /* 64MB PCI IO @ 0x1000_0000 */
+ .end = 0x17ffffffUL,
+ .flags = IORESOURCE_IO,
+};
+
+struct pci_controller nlm_pci_controller = {
+ .index = 0,
+ .pci_ops = &nlm_pci_ops,
+ .mem_resource = &nlm_pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_resource = &nlm_pci_io_resource,
+ .io_offset = 0x00000000UL,
+};
+
+static int get_irq_vector(const struct pci_dev *dev)
+{
+ /*
+ * For XLP PCIe, there is an IRQ per Link, find out which
+ * link the device is on to assign interrupts
+ */
+ if (dev->bus->self == NULL)
+ return 0;
+
+ switch (dev->bus->self->devfn) {
+ case 0x8:
+ return PIC_PCIE_LINK_0_IRQ;
+ case 0x9:
+ return PIC_PCIE_LINK_1_IRQ;
+ case 0xa:
+ return PIC_PCIE_LINK_2_IRQ;
+ case 0xb:
+ return PIC_PCIE_LINK_3_IRQ;
+ }
+ WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn);
+ return 0;
+}
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return get_irq_vector(dev);
+}
+
+/* Do platform specific device initialization at pci_enable_device() time */
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ return 0;
+}
+
+static int xlp_enable_pci_bswap(void)
+{
+ uint64_t pciebase, sysbase;
+ int node, i;
+ u32 reg;
+
+ /* Chip-0 so node set to 0 */
+ node = 0;
+ sysbase = nlm_get_bridge_regbase(node);
+ /*
+ * Enable byte swap in hardware. Program each link's PCIe SWAP regions
+ * from the link's address ranges.
+ */
+ for (i = 0; i < 4; i++) {
+ pciebase = nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node, i));
+ if (nlm_read_pci_reg(pciebase, 0) == 0xffffffff)
+ continue;
+
+ reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEMEM_BASE0 + i);
+ nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_MEM_BASE, reg);
+
+ reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEMEM_LIMIT0 + i);
+ nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_MEM_LIM,
+ reg | 0xfff);
+
+ reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEIO_BASE0 + i);
+ nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_IO_BASE, reg);
+
+ reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEIO_LIMIT0 + i);
+ nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_IO_LIM, reg | 0xfff);
+ }
+ return 0;
+}
+
+static int __init pcibios_init(void)
+{
+ /* Firmware assigns PCI resources */
+ pci_set_flags(PCI_PROBE_ONLY);
+ pci_config_base = ioremap(XLP_DEFAULT_PCI_ECFG_BASE, 64 << 20);
+
+ /* Extend IO port for memory mapped io */
+ ioport_resource.start = 0;
+ ioport_resource.end = ~0;
+
+ xlp_enable_pci_bswap();
+ set_io_port_base(CKSEG1);
+ nlm_pci_controller.io_map_base = CKSEG1;
+
+ register_pci_controller(&nlm_pci_controller);
+ pr_info("XLP PCIe Controller %pR%pR.\n", &nlm_pci_io_resource,
+ &nlm_pci_mem_resource);
+
+ return 0;
+}
+arch_initcall(pcibios_init);
diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c
index 172af1cd5867..18af021d289a 100644
--- a/arch/mips/pci/pci-xlr.c
+++ b/arch/mips/pci/pci-xlr.c
@@ -375,7 +375,3 @@ static int __init pcibios_init(void)
}
arch_initcall(pcibios_init);
-
-struct pci_fixup pcibios_fixups[] = {
- {0}
-};
diff --git a/arch/mips/pnx833x/stb22x/board.c b/arch/mips/pnx833x/stb22x/board.c
index 644eb7c3210f..4b328ac43050 100644
--- a/arch/mips/pnx833x/stb22x/board.c
+++ b/arch/mips/pnx833x/stb22x/board.c
@@ -91,7 +91,7 @@ void __init pnx833x_board_setup(void)
pnx833x_gpio_select_function_alt(32);
pnx833x_gpio_select_function_alt(33);
-#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
+#if IS_ENABLED(CONFIG_MTD_NAND_PLATFORM)
/* Setup MIU for NAND access on CS0...
*
* (it seems that we must also configure CS1 for reliable operation,
@@ -117,7 +117,7 @@ void __init pnx833x_board_setup(void)
pnx833x_gpio_select_output(5);
pnx833x_gpio_write(1, 5);
-#elif defined(CONFIG_MTD_CFI) || defined(CONFIG_MTD_CFI_MODULE)
+#elif IS_ENABLED(CONFIG_MTD_CFI)
/* Set up MIU for 16-bit NOR access on CS0 and CS1... */
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index b105eca3c020..cd8fcab6b054 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -401,6 +401,7 @@ static void __init node_mem_init(cnodeid_t node)
* Allocate the node data structures on the node first.
*/
__node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
+ memset(__node_data[node], 0, PAGE_SIZE);
NODE_DATA(node)->bdata = &bootmem_node_data[node];
NODE_DATA(node)->node_start_pfn = start_pfn;
diff --git a/arch/mips/txx9/Kconfig b/arch/mips/txx9/Kconfig
index 852ae4bb7a85..6d40bc783459 100644
--- a/arch/mips/txx9/Kconfig
+++ b/arch/mips/txx9/Kconfig
@@ -20,6 +20,7 @@ config MACH_TXX9
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_BIG_ENDIAN
+ select HAVE_CLK
config TOSHIBA_JMR3927
bool "Toshiba JMR-TX3927 board"
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index 125db323ab1e..4efd9185f294 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -304,7 +304,7 @@ static void __devinit quirk_slc90e66_bridge(struct pci_dev *dev)
smsc_fdc37m81x_config_end();
}
-static void quirk_slc90e66_ide(struct pci_dev *dev)
+static void __devinit quirk_slc90e66_ide(struct pci_dev *dev)
{
unsigned char dat;
int regs[2] = {0x41, 0x43};
@@ -339,7 +339,7 @@ static void quirk_slc90e66_ide(struct pci_dev *dev)
}
#endif /* CONFIG_TOSHIBA_FPCIB0 */
-static void tc35815_fixup(struct pci_dev *dev)
+static void __devinit tc35815_fixup(struct pci_dev *dev)
{
/* This device may have PM registers but not they are not suported. */
if (dev->pm_cap) {
@@ -348,7 +348,7 @@ static void tc35815_fixup(struct pci_dev *dev)
}
}
-static void final_fixup(struct pci_dev *dev)
+static void __devinit final_fixup(struct pci_dev *dev)
{
unsigned char bist;
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index ae77a7916c03..560fe8991753 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -632,7 +632,7 @@ void __init txx9_physmap_flash_init(int no, unsigned long addr,
unsigned long size,
const struct physmap_flash_data *pdata)
{
-#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+#if IS_ENABLED(CONFIG_MTD_PHYSMAP)
struct resource res = {
.start = addr,
.end = addr + size - 1,
@@ -670,8 +670,7 @@ void __init txx9_physmap_flash_init(int no, unsigned long addr,
void __init txx9_ndfmc_init(unsigned long baseaddr,
const struct txx9ndfmc_platform_data *pdata)
{
-#if defined(CONFIG_MTD_NAND_TXX9NDFMC) || \
- defined(CONFIG_MTD_NAND_TXX9NDFMC_MODULE)
+#if IS_ENABLED(CONFIG_MTD_NAND_TXX9NDFMC)
struct resource res = {
.start = baseaddr,
.end = baseaddr + 0x1000 - 1,
@@ -687,7 +686,7 @@ void __init txx9_ndfmc_init(unsigned long baseaddr,
#endif
}
-#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_GPIO)
static DEFINE_SPINLOCK(txx9_iocled_lock);
#define TXX9_IOCLED_MAXLEDS 8
@@ -810,7 +809,7 @@ void __init txx9_iocled_init(unsigned long baseaddr,
void __init txx9_dmac_init(int id, unsigned long baseaddr, int irq,
const struct txx9dmac_platform_data *pdata)
{
-#if defined(CONFIG_TXX9_DMAC) || defined(CONFIG_TXX9_DMAC_MODULE)
+#if IS_ENABLED(CONFIG_TXX9_DMAC)
struct resource res[] = {
{
.start = baseaddr,
@@ -866,8 +865,7 @@ void __init txx9_aclc_init(unsigned long baseaddr, int irq,
unsigned int dma_chan_out,
unsigned int dma_chan_in)
{
-#if defined(CONFIG_SND_SOC_TXX9ACLC) || \
- defined(CONFIG_SND_SOC_TXX9ACLC_MODULE)
+#if IS_ENABLED(CONFIG_SND_SOC_TXX9ACLC)
unsigned int dma_base = dmac_id * TXX9_DMA_MAX_NR_CHANNELS;
struct resource res[] = {
{
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
index 6567895d1f59..5ff7a9584daf 100644
--- a/arch/mips/txx9/generic/setup_tx4939.c
+++ b/arch/mips/txx9/generic/setup_tx4939.c
@@ -317,7 +317,7 @@ void __init tx4939_sio_init(unsigned int sclk, unsigned int cts_mask)
}
}
-#if defined(CONFIG_TC35815) || defined(CONFIG_TC35815_MODULE)
+#if IS_ENABLED(CONFIG_TC35815)
static u32 tx4939_get_eth_speed(struct net_device *dev)
{
struct ethtool_cmd cmd;
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
index 2ad8973ba13d..e15641d93092 100644
--- a/arch/mips/txx9/rbtx4939/setup.c
+++ b/arch/mips/txx9/rbtx4939/setup.c
@@ -40,8 +40,7 @@ static void __init rbtx4939_time_init(void)
tx4939_time_init(0);
}
-#if defined(__BIG_ENDIAN) && \
- (defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE))
+#if defined(__BIG_ENDIAN) && IS_ENABLED(CONFIG_SMC91X)
#define HAVE_RBTX4939_IOSWAB
#define IS_CE1_ADDR(addr) \
((((unsigned long)(addr) - IO_BASE) & 0xfff00000) == TXX9_CE(1))
@@ -187,7 +186,7 @@ static void __init rbtx4939_update_ioc_pen(void)
#define RBTX4939_MAX_7SEGLEDS 8
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
static u8 led_val[RBTX4939_MAX_7SEGLEDS];
struct rbtx4939_led_data {
struct led_classdev cdev;
@@ -263,7 +262,7 @@ static inline void rbtx4939_led_setup(void)
static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
{
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
unsigned long flags;
local_irq_save(flags);
/* bit7: reserved for LED class */
@@ -287,7 +286,7 @@ static void rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
__rbtx4939_7segled_putc(pos, val);
}
-#if defined(CONFIG_MTD_RBTX4939) || defined(CONFIG_MTD_RBTX4939_MODULE)
+#if IS_ENABLED(CONFIG_MTD_RBTX4939)
/* special mapping for boot rom */
static unsigned long rbtx4939_flash_fixup_ofs(unsigned long ofs)
{
@@ -463,7 +462,7 @@ static void __init rbtx4939_device_init(void)
.flags = SMC91X_USE_16BIT,
};
struct platform_device *pdev;
-#if defined(CONFIG_TC35815) || defined(CONFIG_TC35815_MODULE)
+#if IS_ENABLED(CONFIG_TC35815)
int i, j;
unsigned char ethaddr[2][6];
u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f;
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 687f9b4a2ed6..5cfb086b3903 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -3,6 +3,7 @@ config MN10300
select HAVE_OPROFILE
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
+ select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_KGDB
select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER
diff --git a/arch/mn10300/include/asm/ipc.h b/arch/mn10300/include/asm/ipc.h
deleted file mode 100644
index a46e3d9c2a3f..000000000000
--- a/arch/mn10300/include/asm/ipc.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipc.h>
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index 9051f921cbc7..866eb14749d7 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -358,7 +358,6 @@
/*
* specify the deprecated syscalls we want to support on this arch
*/
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
#define __ARCH_WANT_STAT64
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 3f35c38d7b64..0922959663a0 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -13,7 +13,6 @@ generic-y += cacheflush.h
generic-y += checksum.h
generic-y += cmpxchg.h
generic-y += cmpxchg-local.h
-generic-y += cpumask.h
generic-y += cputime.h
generic-y += current.h
generic-y += device.h
@@ -43,7 +42,6 @@ generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
generic-y += resource.h
-generic-y += rmap.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += segment.h
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 6c6defc24619..af9cf30ed474 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+#define ATOMIC_INIT(i) { (i) }
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
@@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#ifdef CONFIG_64BIT
-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
+#define ATOMIC64_INIT(i) { (i) }
static __inline__ s64
__atomic64_add_return(s64 i, atomic64_t *v)
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index d4b94b395c16..2c05a9292a81 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -309,7 +309,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
cregs->ksp = (unsigned long)stack
+ (pregs->gr[21] & (THREAD_SIZE - 1));
cregs->gr[30] = usp;
- if (p->personality == PER_HPUX) {
+ if (personality(p->personality) == PER_HPUX) {
#ifdef CONFIG_HPUX
cregs->kpc = (unsigned long) &hpux_child_return;
#else
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index c9b932260f47..7426e40699bd 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality)
long err;
if (personality(current->personality) == PER_LINUX32
- && personality == PER_LINUX)
- personality = PER_LINUX32;
+ && personality(personality) == PER_LINUX)
+ personality = (personality & ~PER_MASK) | PER_LINUX32;
err = sys_personality(personality);
- if (err == PER_LINUX32)
- err = PER_LINUX;
+ if (personality(err) == PER_LINUX32)
+ err = (err & ~PER_MASK) | PER_LINUX;
return err;
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9a5d3cdc3e12..352f416269ce 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -115,11 +115,13 @@ config PPC
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
+ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select HAVE_IRQ_WORK
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
select HAVE_GENERIC_HARDIRQS
+ select ARCH_WANT_IPC_PARSE_VERSION
select SPARSE_IRQ
select IRQ_PER_CPU
select IRQ_DOMAIN
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
index 8d35d2c1f694..4f9c9f682ecf 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -345,6 +345,13 @@
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-gpio-0.dtsi"
/include/ "qoriq-usb2-mph-0.dtsi"
+ usb@210000 {
+ compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
+ port0;
+ };
/include/ "qoriq-usb2-dr-0.dtsi"
+ usb@211000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
+ };
/include/ "qoriq-sec4.0-0.dtsi"
};
diff --git a/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts b/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts
index 852e5b27485d..57573bd52caa 100644
--- a/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts
+++ b/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts
@@ -56,7 +56,7 @@
ranges = <0x0 0x0 0xffe00000 0x100000>;
};
- pci0: pcie@ffe08000 {
+ pci2: pcie@ffe08000 {
reg = <0 0xffe08000 0 0x1000>;
status = "disabled";
};
@@ -76,7 +76,7 @@
};
};
- pci2: pcie@ffe0a000 {
+ pci0: pcie@ffe0a000 {
reg = <0 0xffe0a000 0 0x1000>;
ranges = <0x2000000 0x0 0xe0000000 0 0x80000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
diff --git a/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts b/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts
index b5a56ca51cf7..470247ea68b4 100644
--- a/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts
+++ b/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts
@@ -56,7 +56,7 @@
ranges = <0x0 0xf 0xffe00000 0x100000>;
};
- pci0: pcie@fffe08000 {
+ pci2: pcie@fffe08000 {
reg = <0xf 0xffe08000 0 0x1000>;
status = "disabled";
};
@@ -76,7 +76,7 @@
};
};
- pci2: pcie@fffe0a000 {
+ pci0: pcie@fffe0a000 {
reg = <0xf 0xffe0a000 0 0x1000>;
ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
index 22a215e94162..6cdcadc80c30 100644
--- a/arch/powerpc/boot/dts/p3041ds.dts
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -58,7 +58,7 @@
#size-cells = <1>;
compatible = "spansion,s25sl12801";
reg = <0>;
- spi-max-frequency = <40000000>; /* input clock */
+ spi-max-frequency = <35000000>; /* input clock */
partition@u-boot {
label = "u-boot";
reg = <0x00000000 0x00100000>;
diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig
index f4337bacd0e7..26e541c4662b 100644
--- a/arch/powerpc/configs/85xx/p1023rds_defconfig
+++ b/arch/powerpc/configs/85xx/p1023rds_defconfig
@@ -6,28 +6,27 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_EMBEDDED=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_P1023_RDS=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_CPM2=y
-CONFIG_GPIO_MPC8XXX=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
@@ -63,11 +62,11 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -80,15 +79,14 @@ CONFIG_SATA_FSL=y
CONFIG_SATA_SIL24=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
+CONFIG_FS_ENET=y
+CONFIG_FSL_PQ_MDIO=y
+CONFIG_E1000E=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_CICADA_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_FS_ENET=y
-CONFIG_E1000E=y
-CONFIG_FSL_PQ_MDIO=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -98,16 +96,15 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_QE=m
-CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CPM=m
CONFIG_I2C_MPC=y
+CONFIG_GPIO_MPC8XXX=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_SOUND=y
@@ -123,7 +120,6 @@ CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
# CONFIG_NET_DMA is not set
CONFIG_STAGING=y
-# CONFIG_STAGING_EXCLUDE_BUILD is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -150,22 +146,15 @@ CONFIG_QNX4FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
CONFIG_FRAME_WARN=8092
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
index b1f9597fe312..29bb11ec6c64 100644
--- a/arch/powerpc/configs/chroma_defconfig
+++ b/arch/powerpc/configs/chroma_defconfig
@@ -21,8 +21,8 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
-CONFIG_CGROUP_MEM_RES_CTLR=y
-CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_CGROUP_MEMCG=y
+CONFIG_CGROUP_MEMCG_SWAP=y
CONFIG_NAMESPACES=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index cbb98c1234fd..8b3d57c1ebe8 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -6,8 +6,8 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -21,23 +21,22 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_P2041_RDB=y
CONFIG_P3041_DS=y
CONFIG_P4080_DS=y
CONFIG_P5020_DS=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_FORCE_MAX_ZONEORDER=13
-CONFIG_FSL_LBC=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_MSI=y
# CONFIG_PCIEASPM is not set
+CONFIG_PCI_MSI=y
CONFIG_RAPIDIO=y
CONFIG_FSL_RIO=y
CONFIG_NET=y
@@ -70,6 +69,7 @@ CONFIG_INET_IPCOMP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
@@ -77,17 +77,14 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_NAND_IDS=y
-CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
@@ -115,11 +112,9 @@ CONFIG_SERIO_LIBPS2=y
CONFIG_PPC_EPAPR_HV_BYTECHAN=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
-CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
@@ -132,7 +127,6 @@ CONFIG_SPI_FSL_ESPI=y
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_USB_HID=m
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
@@ -142,8 +136,6 @@ CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_OF=y
-CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_EDAC_MPC85XX=y
@@ -170,19 +162,16 @@ CONFIG_HUGETLBFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_RCU_TRACE=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index dd89de8b0b7f..0516e22ca3de 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -56,6 +56,7 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 5aac9a8bc53b..9352e4430c3b 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -2,12 +2,12 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
@@ -25,7 +25,6 @@ CONFIG_ASP834x=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -42,10 +41,9 @@ CONFIG_INET_ESP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
@@ -64,15 +62,14 @@ CONFIG_ATA=y
CONFIG_SATA_FSL=y
CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_UCC_GETH=y
+CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_ICPLUS_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_GIANFAR=y
-CONFIG_UCC_GETH=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -112,17 +109,12 @@ CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_CRC_T10DIF=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 03ee911c4577..8b5bda27d248 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -5,7 +5,9 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -17,6 +19,8 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_MPC8540_ADS=y
CONFIG_MPC8560_ADS=y
CONFIG_MPC85xx_CDS=y
@@ -40,8 +44,6 @@ CONFIG_SBC8548=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
CONFIG_FORCE_MAX_ZONEORDER=12
@@ -74,36 +76,25 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
CONFIG_FTL=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_NAND_IDS=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_M25P80=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -115,6 +106,7 @@ CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_FSL=y
CONFIG_PATA_ALI=y
+CONFIG_PATA_VIA=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_FS_ENET=y
@@ -134,7 +126,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
@@ -183,7 +174,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
@@ -229,18 +219,13 @@ CONFIG_QNX4FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index fdfa84dc908f..b0974e7e98ae 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -7,7 +7,9 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -19,6 +21,8 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_MPC8540_ADS=y
CONFIG_MPC8560_ADS=y
CONFIG_MPC85xx_CDS=y
@@ -42,8 +46,6 @@ CONFIG_SBC8548=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
CONFIG_IRQ_ALL_CPUS=y
@@ -77,36 +79,25 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
CONFIG_FTL=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_NAND_IDS=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_M25P80=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -137,7 +128,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
@@ -186,7 +176,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
@@ -232,18 +221,13 @@ CONFIG_QNX4FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index f2fe0c2b41e4..db27c82e0542 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -279,7 +279,8 @@ CONFIG_HVC_RTAS=y
CONFIG_HVC_BEAT=y
CONFIG_HVCS=m
CONFIG_IBM_BSR=m
-# CONFIG_HW_RANDOM is not set
+CONFIG_HW_RANDOM=m
+CONFIG_HW_RANDOM_PSERIES=m
CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_AMD8111=y
@@ -485,7 +486,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_DEV_NX=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 187fb8d53605..1f65b3c9b59a 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -226,7 +226,8 @@ CONFIG_HVC_CONSOLE=y
CONFIG_HVC_RTAS=y
CONFIG_HVCS=m
CONFIG_IBM_BSR=m
-# CONFIG_HW_RANDOM is not set
+CONFIG_HW_RANDOM=m
+CONFIG_HW_RANDOM_PSERIES=m
CONFIG_GEN_RTC=y
CONFIG_RAW_DRIVER=y
CONFIG_MAX_RAW_DEVS=1024
@@ -367,7 +368,8 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
+CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_DEV_NX=m
CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=y
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 50d82c8a037f..b3c083de17ad 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -553,9 +553,7 @@ static inline int cpu_has_feature(unsigned long feature)
& feature);
}
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
#define HBP_NUM 1
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 62678e365ca0..78160874809a 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -27,7 +27,10 @@ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
extern void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs);
-
+extern int dma_direct_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle,
+ size_t size, struct dma_attrs *attrs);
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
@@ -207,11 +210,8 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t);
#define ARCH_HAS_DMA_MMAP_COHERENT
-
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
index bca8fdcd2542..5acabbd7ac6f 100644
--- a/arch/powerpc/include/asm/kmap_types.h
+++ b/arch/powerpc/include/asm/kmap_types.h
@@ -10,36 +10,7 @@
* 2 of the License, or (at your option) any later version.
*/
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_PPC_SYNC_PAGE,
- KM_PPC_SYNC_ICACHE,
- KM_KDB,
- KM_TYPE_NR
-};
-
-/*
- * This is a temporary build fix that (so they say on lkml....) should no longer
- * be required after 2.6.33, because of changes planned to the kmap code.
- * Let's try to remove this cruft then.
- */
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define KM_NMI (-1)
-#define KM_NMI_PTE (-1)
-#define KM_IRQ_PTE (-1)
-#endif
+#define KM_TYPE_NR 16
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 50ea12fd7bf5..a8bf5c673a3c 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -33,6 +33,7 @@
#include <asm/kvm_asm.h>
#include <asm/processor.h>
#include <asm/page.h>
+#include <asm/cacheflush.h>
#define KVM_MAX_VCPUS NR_CPUS
#define KVM_MAX_VCORES NR_CPUS
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 0124937a23b9..e006f0bdea95 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -219,4 +219,16 @@ void kvmppc_claim_lpid(long lpid);
void kvmppc_free_lpid(long lpid);
void kvmppc_init_lpid(unsigned long nr_lpids);
+static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
+{
+ /* Clear i-cache for new pages */
+ struct page *page;
+ page = pfn_to_page(pfn);
+ if (!test_bit(PG_arch_1, &page->flags)) {
+ flush_dcache_icache_page(page);
+ set_bit(PG_arch_1, &page->flags);
+ }
+}
+
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
index 326d33ca55cd..d4f471fb1031 100644
--- a/arch/powerpc/include/asm/mpic_msgr.h
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <asm/smp.h>
+#include <asm/io.h>
struct mpic_msgr {
u32 __iomem *base;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 53b6dfa83344..54b73a28c205 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -386,6 +386,7 @@ extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
+extern void power7_nap(void);
#ifdef CONFIG_PSERIES_IDLE
extern void update_smt_snooze_delay(int snooze);
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index d3d1b5efd7eb..bd377a368611 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -389,7 +389,6 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 85b05c463fae..e8995727b1c1 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -76,6 +76,7 @@ int main(void)
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
+ DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 5b25c8060fd6..a892680668d8 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void)
void doorbell_cause_ipi(int cpu, unsigned long data)
{
+ /* Order previous accesses vs. msgsnd, which is treated as a store */
+ mb();
ppc_msgsnd(PPC_DBELL, 0, data);
}
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index bcfdcd22c766..e4897523de41 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -83,11 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
return 0;
}
- if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) {
- dev_info(dev, "Warning: IOMMU window too big for device mask\n");
- dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n",
- mask, (tbl->it_offset + tbl->it_size) <<
- IOMMU_PAGE_SHIFT);
+ if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) {
+ dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
+ dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
+ mask, tbl->it_offset << IOMMU_PAGE_SHIFT);
return 0;
} else
return 1;
@@ -109,6 +108,7 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
+ .mmap = dma_direct_mmap_coherent,
.map_sg = dma_iommu_map_sg,
.unmap_sg = dma_iommu_unmap_sg,
.dma_supported = dma_iommu_dma_supported,
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 4ab88dafb235..46943651da23 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -49,6 +49,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
struct dma_map_ops swiotlb_dma_ops = {
.alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent,
+ .mmap = dma_direct_mmap_coherent,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.dma_supported = swiotlb_dma_supported,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 289be751cd75..355b9d84b0f8 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -67,6 +67,24 @@ void dma_direct_free_coherent(struct device *dev, size_t size,
#endif
}
+int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ struct dma_attrs *attrs)
+{
+ unsigned long pfn;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
+#else
+ pfn = page_to_pfn(virt_to_page(cpu_addr));
+#endif
+ return remap_pfn_range(vma, vma->vm_start,
+ pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
struct dma_attrs *attrs)
@@ -156,6 +174,7 @@ static inline void dma_direct_sync_single(struct device *dev,
struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc_coherent,
.free = dma_direct_free_coherent,
+ .mmap = dma_direct_mmap_coherent,
.map_sg = dma_direct_map_sg,
.unmap_sg = dma_direct_unmap_sg,
.dma_supported = dma_direct_dma_supported,
@@ -219,20 +238,3 @@ static int __init dma_init(void)
}
fs_initcall(dma_init);
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t handle, size_t size)
-{
- unsigned long pfn;
-
-#ifdef CONFIG_NOT_COHERENT_CACHE
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
-#else
- pfn = page_to_pfn(virt_to_page(cpu_addr));
-#endif
- return remap_pfn_range(vma, vma->vm_start,
- pfn + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
-}
-EXPORT_SYMBOL_GPL(dma_mmap_coherent);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 5207d5a405e2..ead5016b02d0 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -89,10 +89,14 @@ crit_transfer_to_handler:
mfspr r0,SPRN_SRR1
stw r0,_SRR1(r11)
+ /* set the stack limit to the current stack
+ * and set the limit to protect the thread_info
+ * struct
+ */
mfspr r8,SPRN_SPRG_THREAD
lwz r0,KSP_LIMIT(r8)
stw r0,SAVED_KSP_LIMIT(r11)
- CURRENT_THREAD_INFO(r0, r1)
+ rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
stw r0,KSP_LIMIT(r8)
/* fall through */
#endif
@@ -109,10 +113,14 @@ crit_transfer_to_handler:
mfspr r0,SPRN_SRR1
stw r0,crit_srr1@l(0)
+ /* set the stack limit to the current stack
+ * and set the limit to protect the thread_info
+ * struct
+ */
mfspr r8,SPRN_SPRG_THREAD
lwz r0,KSP_LIMIT(r8)
stw r0,saved_ksp_limit@l(0)
- CURRENT_THREAD_INFO(r0, r1)
+ rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
stw r0,KSP_LIMIT(r8)
/* fall through */
#endif
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 4b01a25e29ef..b40e0b4815b3 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork)
li r3,0
b syscall_exit
+ .section ".toc","aw"
+DSCR_DEFAULT:
+ .tc dscr_default[TC],dscr_default
+
+ .section ".text"
+
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
@@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
mr r1,r8 /* start using new stack pointer */
std r7,PACAKSAVE(r13)
- ld r6,_CCR(r1)
- mtcrf 0xFF,r6
-
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
ld r0,THREAD_VRSAVE(r4)
@@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
+ lwz r6,THREAD_DSCR_INHERIT(r4)
+ ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4)
- cmpd r0,r25
- beq 1f
+ cmpwi r6,0
+ bne 1f
+ ld r0,0(r7)
+1: cmpd r0,r25
+ beq 2f
mtspr SPRN_DSCR,r0
-1:
+2:
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#endif
+ ld r6,_CCR(r1)
+ mtcrf 0xFF,r6
+
/* r3-r13 are destroyed -- Cort */
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e894515e77bb..39aa97d3ff88 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -186,7 +186,7 @@ hardware_interrupt_hv:
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
- MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
+ STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
@@ -486,6 +486,7 @@ machine_check_common:
STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
+ STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 91b46b7f6f0d..1fb78561096a 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -630,18 +630,17 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return;
}
- if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) {
- *parent = old;
- return;
- }
-
trace.func = self_addr;
+ trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
*parent = old;
+ return;
}
+
+ if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY)
+ *parent = old;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index f3a82dde61db..956a4c496de9 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -253,7 +253,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
/* Do not emulate user-space instructions, instead single-step them */
if (user_mode(regs)) {
- bp->ctx->task->thread.last_hit_ubp = bp;
+ current->thread.last_hit_ubp = bp;
regs->msr |= MSR_SE;
goto out;
}
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 7140d838339e..e11863f4e595 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -28,7 +28,9 @@ _GLOBAL(power7_idle)
lwz r4,ADDROFF(powersave_nap)(r3)
cmpwi 0,r4,0
beqlr
+ /* fall through */
+_GLOBAL(power7_nap)
/* NAP is a state loss, we create a regs frame on the
* stack, fill it up with the state we care about and
* stick a pointer to it in PACAR1. We really only
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 782bd0a3c2f0..c470a40b29f5 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -25,6 +25,7 @@
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/debug.h>
+#include <linux/slab.h>
/*
* This table contains the mapping between PowerPC hardware trap types, and
@@ -101,6 +102,21 @@ static int computeSignal(unsigned int tt)
return SIGHUP; /* default for things we don't know about */
}
+/**
+ *
+ * kgdb_skipexception - Bail out of KGDB when we've been triggered.
+ * @exception: Exception vector number
+ * @regs: Current &struct pt_regs.
+ *
+ * On some architectures we need to skip a breakpoint exception when
+ * it occurs after a breakpoint has been removed.
+ *
+ */
+int kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+ return kgdb_isremovedbreak(regs->nip);
+}
+
static int kgdb_call_nmi_hook(struct pt_regs *regs)
{
kgdb_nmicallback(raw_smp_processor_id(), regs);
@@ -138,6 +154,8 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
static int kgdb_singlestep(struct pt_regs *regs)
{
struct thread_info *thread_info, *exception_thread_info;
+ struct thread_info *backup_current_thread_info = \
+ (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
if (user_mode(regs))
return 0;
@@ -155,13 +173,17 @@ static int kgdb_singlestep(struct pt_regs *regs)
thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
exception_thread_info = current_thread_info();
- if (thread_info != exception_thread_info)
+ if (thread_info != exception_thread_info) {
+ /* Save the original current_thread_info. */
+ memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info);
memcpy(exception_thread_info, thread_info, sizeof *thread_info);
+ }
kgdb_handle_exception(0, SIGTRAP, 0, regs);
if (thread_info != exception_thread_info)
- memcpy(thread_info, exception_thread_info, sizeof *thread_info);
+ /* Restore current_thread_info lastly. */
+ memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
return 1;
}
@@ -410,7 +432,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
#else
linux_regs->msr |= MSR_SE;
#endif
- kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 710f400476de..1a1f2ddfb581 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -802,16 +802,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#endif /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_DSCR)) {
- if (current->thread.dscr_inherit) {
- p->thread.dscr_inherit = 1;
- p->thread.dscr = current->thread.dscr;
- } else if (0 != dscr_default) {
- p->thread.dscr_inherit = 1;
- p->thread.dscr = dscr_default;
- } else {
- p->thread.dscr_inherit = 0;
- p->thread.dscr = 0;
- }
+ p->thread.dscr_inherit = current->thread.dscr_inherit;
+ p->thread.dscr = current->thread.dscr;
}
#endif
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 4174b4b23246..2c0ee6405633 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -709,7 +709,7 @@ static int __init rtas_flash_init(void)
if (rtas_token("ibm,update-flash-64-and-reboot") ==
RTAS_UNKNOWN_SERVICE) {
- printk(KERN_ERR "rtas_flash: no firmware flash support\n");
+ pr_info("rtas_flash: no firmware flash support\n");
return 1;
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 0321007086f7..8d4214afc21d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
char *message = (char *)&info->messages;
+ /*
+ * Order previous accesses before accesses in the IPI handler.
+ */
+ smp_mb();
message[msg] = 1;
- mb();
+ /*
+ * cause_ipi functions are required to include a full barrier
+ * before doing whatever causes the IPI.
+ */
smp_ops->cause_ipi(cpu, info->data);
}
@@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void)
mb(); /* order any irq clear */
do {
- all = xchg_local(&info->messages, 0);
+ all = xchg(&info->messages, 0);
#ifdef __BIG_ENDIAN
if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index f2496f2faecc..4e3cc47f26b9 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
long ret;
if (personality(current->personality) == PER_LINUX32
- && personality == PER_LINUX)
- personality = PER_LINUX32;
+ && personality(personality) == PER_LINUX)
+ personality = (personality & ~PER_MASK) | PER_LINUX32;
ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
+ if (personality(ret) == PER_LINUX32)
+ ret = (ret & ~PER_MASK) | PER_LINUX;
return ret;
}
#endif
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 3529446c2abd..8302af649219 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev,
return sprintf(buf, "%lx\n", dscr_default);
}
+static void update_dscr(void *dummy)
+{
+ if (!current->thread.dscr_inherit) {
+ current->thread.dscr = dscr_default;
+ mtspr(SPRN_DSCR, dscr_default);
+ }
+}
+
static ssize_t __used store_dscr_default(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev,
return -EINVAL;
dscr_default = val;
+ on_each_cpu(update_dscr, NULL, 1);
+
return count;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index be171ee73bf8..e49e93191b69 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -535,6 +535,15 @@ void timer_interrupt(struct pt_regs * regs)
trace_timer_interrupt_exit(regs);
}
+/*
+ * Hypervisor decrementer interrupts shouldn't occur but are sometimes
+ * left pending on exit from a KVM guest. We don't need to do anything
+ * to clear them, as they are edge-triggered.
+ */
+void hdec_interrupt(struct pt_regs *regs)
+{
+}
+
#ifdef CONFIG_SUSPEND
static void generic_suspend_disable_irqs(void)
{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 158972341a2d..ae0843fa7a61 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs)
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mtdscr, regs);
rd = (instword >> 21) & 0x1f;
- mtspr(SPRN_DSCR, regs->gpr[rd]);
+ current->thread.dscr = regs->gpr[rd];
current->thread.dscr_inherit = 1;
+ mtspr(SPRN_DSCR, current->thread.dscr);
return 0;
}
#endif
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 3052a931f2b5..02b32216bbc3 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -611,6 +611,7 @@ static u64 vio_dma_get_required_mask(struct device *dev)
struct dma_map_ops vio_dma_mapping_ops = {
.alloc = vio_dma_iommu_alloc_coherent,
.free = vio_dma_iommu_free_coherent,
+ .mmap = dma_direct_mmap_coherent,
.map_sg = vio_dma_iommu_map_sg,
.unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page,
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index f922c29bb234..837f13e7b6bf 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -211,6 +211,9 @@ next_pteg:
pteg1 |= PP_RWRX;
}
+ if (orig_pte->may_execute)
+ kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
+
local_irq_disable();
if (pteg[rr]) {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 10fc8ec9d2a8..0688b6b39585 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -126,6 +126,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
if (!orig_pte->may_execute)
rflags |= HPTE_R_N;
+ else
+ kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 5a84c8d3d040..44b72feaff7d 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1421,13 +1421,13 @@ _GLOBAL(kvmppc_h_cede)
sync /* order setting ceded vs. testing prodded */
lbz r5,VCPU_PRODDED(r3)
cmpwi r5,0
- bne 1f
+ bne kvm_cede_prodded
li r0,0 /* set trap to 0 to say hcall is handled */
stw r0,VCPU_TRAP(r3)
li r0,H_SUCCESS
std r0,VCPU_GPR(R3)(r3)
BEGIN_FTR_SECTION
- b 2f /* just send it up to host on 970 */
+ b kvm_cede_exit /* just send it up to host on 970 */
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
/*
@@ -1446,7 +1446,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
or r4,r4,r0
PPC_POPCNTW(R7,R4)
cmpw r7,r8
- bge 2f
+ bge kvm_cede_exit
stwcx. r4,0,r6
bne 31b
li r0,1
@@ -1555,7 +1555,8 @@ kvm_end_cede:
b hcall_real_fallback
/* cede when already previously prodded case */
-1: li r0,0
+kvm_cede_prodded:
+ li r0,0
stb r0,VCPU_PRODDED(r3)
sync /* order testing prodded vs. clearing ceded */
stb r0,VCPU_CEDED(r3)
@@ -1563,7 +1564,8 @@ kvm_end_cede:
blr
/* we've ceded but we want to give control to the host */
-2: li r3,H_TOO_HARD
+kvm_cede_exit:
+ li r3,H_TOO_HARD
blr
secondary_too_late:
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index ab523f3c1731..9ecf6e35cd8d 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -67,7 +67,6 @@ kvmppc_skip_Hinterrupt:
#elif defined(CONFIG_PPC_BOOK3S_32)
#define FUNC(name) name
-#define MTMSR_EERI(reg) mtmsr (reg)
.macro INTERRUPT_TRAMPOLINE intno
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index d28c2d43ac1b..099fe8272b57 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -50,8 +50,9 @@
#define HOST_R2 (3 * LONGBYTES)
#define HOST_CR (4 * LONGBYTES)
#define HOST_NV_GPRS (5 * LONGBYTES)
-#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
-#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES)
+#define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
+#define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n)
+#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + LONGBYTES)
#define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */
#define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */
@@ -410,24 +411,24 @@ heavyweight_exit:
PPC_STL r31, VCPU_GPR(R31)(r4)
/* Load host non-volatile register state from host stack. */
- PPC_LL r14, HOST_NV_GPR(r14)(r1)
- PPC_LL r15, HOST_NV_GPR(r15)(r1)
- PPC_LL r16, HOST_NV_GPR(r16)(r1)
- PPC_LL r17, HOST_NV_GPR(r17)(r1)
- PPC_LL r18, HOST_NV_GPR(r18)(r1)
- PPC_LL r19, HOST_NV_GPR(r19)(r1)
- PPC_LL r20, HOST_NV_GPR(r20)(r1)
- PPC_LL r21, HOST_NV_GPR(r21)(r1)
- PPC_LL r22, HOST_NV_GPR(r22)(r1)
- PPC_LL r23, HOST_NV_GPR(r23)(r1)
- PPC_LL r24, HOST_NV_GPR(r24)(r1)
- PPC_LL r25, HOST_NV_GPR(r25)(r1)
- PPC_LL r26, HOST_NV_GPR(r26)(r1)
- PPC_LL r27, HOST_NV_GPR(r27)(r1)
- PPC_LL r28, HOST_NV_GPR(r28)(r1)
- PPC_LL r29, HOST_NV_GPR(r29)(r1)
- PPC_LL r30, HOST_NV_GPR(r30)(r1)
- PPC_LL r31, HOST_NV_GPR(r31)(r1)
+ PPC_LL r14, HOST_NV_GPR(R14)(r1)
+ PPC_LL r15, HOST_NV_GPR(R15)(r1)
+ PPC_LL r16, HOST_NV_GPR(R16)(r1)
+ PPC_LL r17, HOST_NV_GPR(R17)(r1)
+ PPC_LL r18, HOST_NV_GPR(R18)(r1)
+ PPC_LL r19, HOST_NV_GPR(R19)(r1)
+ PPC_LL r20, HOST_NV_GPR(R20)(r1)
+ PPC_LL r21, HOST_NV_GPR(R21)(r1)
+ PPC_LL r22, HOST_NV_GPR(R22)(r1)
+ PPC_LL r23, HOST_NV_GPR(R23)(r1)
+ PPC_LL r24, HOST_NV_GPR(R24)(r1)
+ PPC_LL r25, HOST_NV_GPR(R25)(r1)
+ PPC_LL r26, HOST_NV_GPR(R26)(r1)
+ PPC_LL r27, HOST_NV_GPR(R27)(r1)
+ PPC_LL r28, HOST_NV_GPR(R28)(r1)
+ PPC_LL r29, HOST_NV_GPR(R29)(r1)
+ PPC_LL r30, HOST_NV_GPR(R30)(r1)
+ PPC_LL r31, HOST_NV_GPR(R31)(r1)
/* Return to kvm_vcpu_run(). */
mtlr r5
@@ -453,24 +454,24 @@ _GLOBAL(__kvmppc_vcpu_run)
stw r5, HOST_CR(r1)
/* Save host non-volatile register state to stack. */
- PPC_STL r14, HOST_NV_GPR(r14)(r1)
- PPC_STL r15, HOST_NV_GPR(r15)(r1)
- PPC_STL r16, HOST_NV_GPR(r16)(r1)
- PPC_STL r17, HOST_NV_GPR(r17)(r1)
- PPC_STL r18, HOST_NV_GPR(r18)(r1)
- PPC_STL r19, HOST_NV_GPR(r19)(r1)
- PPC_STL r20, HOST_NV_GPR(r20)(r1)
- PPC_STL r21, HOST_NV_GPR(r21)(r1)
- PPC_STL r22, HOST_NV_GPR(r22)(r1)
- PPC_STL r23, HOST_NV_GPR(r23)(r1)
- PPC_STL r24, HOST_NV_GPR(r24)(r1)
- PPC_STL r25, HOST_NV_GPR(r25)(r1)
- PPC_STL r26, HOST_NV_GPR(r26)(r1)
- PPC_STL r27, HOST_NV_GPR(r27)(r1)
- PPC_STL r28, HOST_NV_GPR(r28)(r1)
- PPC_STL r29, HOST_NV_GPR(r29)(r1)
- PPC_STL r30, HOST_NV_GPR(r30)(r1)
- PPC_STL r31, HOST_NV_GPR(r31)(r1)
+ PPC_STL r14, HOST_NV_GPR(R14)(r1)
+ PPC_STL r15, HOST_NV_GPR(R15)(r1)
+ PPC_STL r16, HOST_NV_GPR(R16)(r1)
+ PPC_STL r17, HOST_NV_GPR(R17)(r1)
+ PPC_STL r18, HOST_NV_GPR(R18)(r1)
+ PPC_STL r19, HOST_NV_GPR(R19)(r1)
+ PPC_STL r20, HOST_NV_GPR(R20)(r1)
+ PPC_STL r21, HOST_NV_GPR(R21)(r1)
+ PPC_STL r22, HOST_NV_GPR(R22)(r1)
+ PPC_STL r23, HOST_NV_GPR(R23)(r1)
+ PPC_STL r24, HOST_NV_GPR(R24)(r1)
+ PPC_STL r25, HOST_NV_GPR(R25)(r1)
+ PPC_STL r26, HOST_NV_GPR(R26)(r1)
+ PPC_STL r27, HOST_NV_GPR(R27)(r1)
+ PPC_STL r28, HOST_NV_GPR(R28)(r1)
+ PPC_STL r29, HOST_NV_GPR(R29)(r1)
+ PPC_STL r30, HOST_NV_GPR(R30)(r1)
+ PPC_STL r31, HOST_NV_GPR(R31)(r1)
/* Load guest non-volatiles. */
PPC_LL r14, VCPU_GPR(R14)(r4)
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index c510fc961302..a2b66717813d 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -322,11 +322,11 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
{
if (vcpu_e500->g2h_tlb1_map)
- memset(vcpu_e500->g2h_tlb1_map,
- sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
+ memset(vcpu_e500->g2h_tlb1_map, 0,
+ sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
if (vcpu_e500->h2g_tlb1_rmap)
- memset(vcpu_e500->h2g_tlb1_rmap,
- sizeof(unsigned int) * host_tlb_params[1].entries, 0);
+ memset(vcpu_e500->h2g_tlb1_rmap, 0,
+ sizeof(unsigned int) * host_tlb_params[1].entries);
}
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -539,6 +539,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
ref, gvaddr, stlbe);
+
+ /* Clear i-cache for new pages */
+ kvmppc_mmu_flush_icache(pfn);
}
/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index dd223b3eb333..17e5b2364312 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
{
int err;
- err = __put_user(instr, addr);
+ __put_user_size(instr, addr, 4, err);
if (err)
return err;
asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index f9ede7c6606e..0d24ff15f5f6 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -288,7 +288,7 @@ err1; stb r0,0(r3)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl .enter_vmx_usercopy
- cmpwi r3,0
+ cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STACKFRAMESIZE+48(r1)
ld r4,STACKFRAMESIZE+56(r1)
@@ -326,38 +326,7 @@ err1; stb r0,0(r3)
dcbt r0,r8,0b01010 /* GO */
.machine pop
- /*
- * We prefetch both the source and destination using enhanced touch
- * instructions. We use a stream ID of 0 for the load side and
- * 1 for the store side.
- */
- clrrdi r6,r4,7
- clrrdi r9,r3,7
- ori r9,r9,1 /* stream=1 */
-
- srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
- cmpldi cr1,r7,0x3FF
- ble cr1,1f
- li r7,0x3FF
-1: lis r0,0x0E00 /* depth=7 */
- sldi r7,r7,7
- or r7,r7,r0
- ori r10,r7,1 /* stream=1 */
-
- lis r8,0x8000 /* GO=1 */
- clrldi r8,r8,32
-
-.machine push
-.machine "power4"
- dcbt r0,r6,0b01000
- dcbt r0,r7,0b01010
- dcbtst r0,r9,0b01000
- dcbtst r0,r10,0b01010
- eieio
- dcbt r0,r8,0b01010 /* GO */
-.machine pop
-
- beq .Lunwind_stack_nonvmx_copy
+ beq cr1,.Lunwind_stack_nonvmx_copy
/*
* If source and destination are not relatively aligned we use a
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 0efdc51bc716..7ba6c96de778 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -222,7 +222,7 @@ _GLOBAL(memcpy_power7)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl .enter_vmx_copy
- cmpwi r3,0
+ cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STACKFRAMESIZE+48(r1)
ld r4,STACKFRAMESIZE+56(r1)
@@ -260,7 +260,7 @@ _GLOBAL(memcpy_power7)
dcbt r0,r8,0b01010 /* GO */
.machine pop
- beq .Lunwind_stack_nonvmx_copy
+ beq cr1,.Lunwind_stack_nonvmx_copy
/*
* If source and destination are not relatively aligned we use a
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index baaafde7d135..fbdad0e3929a 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -469,6 +469,7 @@ void flush_dcache_icache_page(struct page *page)
__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
#endif
}
+EXPORT_SYMBOL(flush_dcache_icache_page);
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 39b159751c35..59213cfaeca9 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1436,11 +1436,11 @@ static long vphn_get_associativity(unsigned long cpu,
/*
* Update the node maps and sysfs entries for each cpu whose home node
- * has changed.
+ * has changed. Returns 1 when the topology has changed, and 0 otherwise.
*/
int arch_update_cpu_topology(void)
{
- int cpu, nid, old_nid;
+ int cpu, nid, old_nid, changed = 0;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
struct device *dev;
@@ -1466,9 +1466,10 @@ int arch_update_cpu_topology(void)
dev = get_cpu_device(cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ changed = 1;
}
- return 1;
+ return changed;
}
static void topology_work_fn(struct work_struct *work)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 77b49ddda9d3..7cd2dbd6e4c4 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1431,7 +1431,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
val = read_pmc(event->hw.idx);
- if ((int)val < 0) {
+ if (pmc_overflow(val)) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs);
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 89ee02c54561..3c732acf331d 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -208,6 +208,7 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
u8 __iomem *lbc_lcs0_ba = NULL;
u8 __iomem *lbc_lcs1_ba = NULL;
phys_addr_t cs0_addr, cs1_addr;
+ u32 br0, or0, br1, or1;
const __be32 *iprop;
unsigned int num_laws;
u8 b;
@@ -256,11 +257,70 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
}
num_laws = be32_to_cpup(iprop);
- cs0_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[0].br));
- cs1_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[1].br));
+ /*
+ * Indirect mode requires both BR0 and BR1 to be set to "GPCM",
+ * otherwise writes to these addresses won't actually appear on the
+ * local bus, and so the PIXIS won't see them.
+ *
+ * In FCM mode, writes go to the NAND controller, which does not pass
+ * them to the localbus directly. So we force BR0 and BR1 into GPCM
+ * mode, since we don't care about what's behind the localbus any
+ * more.
+ */
+ br0 = in_be32(&lbc->bank[0].br);
+ br1 = in_be32(&lbc->bank[1].br);
+ or0 = in_be32(&lbc->bank[0].or);
+ or1 = in_be32(&lbc->bank[1].or);
+
+ /* Make sure CS0 and CS1 are programmed */
+ if (!(br0 & BR_V) || !(br1 & BR_V)) {
+ pr_err("p1022ds: CS0 and/or CS1 is not programmed\n");
+ goto exit;
+ }
+
+ /*
+ * Use the existing BRx/ORx values if it's already GPCM. Otherwise,
+ * force the values to simple 32KB GPCM windows with the most
+ * conservative timing.
+ */
+ if ((br0 & BR_MSEL) != BR_MS_GPCM) {
+ br0 = (br0 & BR_BA) | BR_V;
+ or0 = 0xFFFF8000 | 0xFF7;
+ out_be32(&lbc->bank[0].br, br0);
+ out_be32(&lbc->bank[0].or, or0);
+ }
+ if ((br1 & BR_MSEL) != BR_MS_GPCM) {
+ br1 = (br1 & BR_BA) | BR_V;
+ or1 = 0xFFFF8000 | 0xFF7;
+ out_be32(&lbc->bank[1].br, br1);
+ out_be32(&lbc->bank[1].or, or1);
+ }
+
+ cs0_addr = lbc_br_to_phys(ecm, num_laws, br0);
+ if (!cs0_addr) {
+ pr_err("p1022ds: could not determine physical address for CS0"
+ " (BR0=%08x)\n", br0);
+ goto exit;
+ }
+ cs1_addr = lbc_br_to_phys(ecm, num_laws, br1);
+ if (!cs0_addr) {
+ pr_err("p1022ds: could not determine physical address for CS1"
+ " (BR1=%08x)\n", br1);
+ goto exit;
+ }
lbc_lcs0_ba = ioremap(cs0_addr, 1);
+ if (!lbc_lcs0_ba) {
+ pr_err("p1022ds: could not ioremap CS0 address %llx\n",
+ (unsigned long long)cs0_addr);
+ goto exit;
+ }
lbc_lcs1_ba = ioremap(cs1_addr, 1);
+ if (!lbc_lcs1_ba) {
+ pr_err("p1022ds: could not ioremap CS1 address %llx\n",
+ (unsigned long long)cs1_addr);
+ goto exit;
+ }
/* Make sure we're in indirect mode first. */
if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) !=
@@ -419,18 +479,6 @@ void __init p1022_ds_pic_init(void)
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
-/*
- * Disables a node in the device tree.
- *
- * This function is called before kmalloc() is available, so the 'new' object
- * should be allocated in the global area. The easiest way is to do that is
- * to allocate one static local variable for each call to this function.
- */
-static void __init disable_one_node(struct device_node *np, struct property *new)
-{
- prom_update_property(np, new);
-}
-
/* TRUE if there is a "video=fslfb" command-line parameter. */
static bool fslfb;
@@ -493,28 +541,58 @@ static void __init p1022_ds_setup_arch(void)
diu_ops.valid_monitor_port = p1022ds_valid_monitor_port;
/*
- * Disable the NOR flash node if there is video=fslfb... command-line
- * parameter. When the DIU is active, NOR flash is unavailable, so we
- * have to disable the node before the MTD driver loads.
+ * Disable the NOR and NAND flash nodes if there is video=fslfb...
+ * command-line parameter. When the DIU is active, the localbus is
+ * unavailable, so we have to disable these nodes before the MTD
+ * driver loads.
*/
if (fslfb) {
struct device_node *np =
of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc");
if (np) {
- np = of_find_compatible_node(np, NULL, "cfi-flash");
- if (np) {
+ struct device_node *np2;
+
+ of_node_get(np);
+ np2 = of_find_compatible_node(np, NULL, "cfi-flash");
+ if (np2) {
static struct property nor_status = {
.name = "status",
.value = "disabled",
.length = sizeof("disabled"),
};
+ /*
+ * prom_update_property() is called before
+ * kmalloc() is available, so the 'new' object
+ * should be allocated in the global area.
+ * The easiest way is to do that is to
+ * allocate one static local variable for each
+ * call to this function.
+ */
pr_info("p1022ds: disabling %s node",
- np->full_name);
- disable_one_node(np, &nor_status);
- of_node_put(np);
+ np2->full_name);
+ prom_update_property(np2, &nor_status);
+ of_node_put(np2);
}
+
+ of_node_get(np);
+ np2 = of_find_compatible_node(np, NULL,
+ "fsl,elbc-fcm-nand");
+ if (np2) {
+ static struct property nand_status = {
+ .name = "status",
+ .value = "disabled",
+ .length = sizeof("disabled"),
+ };
+
+ pr_info("p1022ds: disabling %s node",
+ np2->full_name);
+ prom_update_property(np2, &nand_status);
+ of_node_put(np2);
+ }
+
+ of_node_put(np);
}
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index d544d7816df3..dba1ce235da5 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -186,10 +186,13 @@ static void spufs_prune_dir(struct dentry *dir)
static int spufs_rmdir(struct inode *parent, struct dentry *dir)
{
/* remove all entries */
+ int res;
spufs_prune_dir(dir);
d_drop(dir);
-
- return simple_rmdir(parent, dir);
+ res = simple_rmdir(parent, dir);
+ /* We have to give up the mm_struct */
+ spu_forget(SPUFS_I(dir->d_inode)->i_ctx);
+ return res;
}
static int spufs_fill_dir(struct dentry *dir,
@@ -245,9 +248,6 @@ static int spufs_dir_close(struct inode *inode, struct file *file)
mutex_unlock(&parent->i_mutex);
WARN_ON(ret);
- /* We have to give up the mm_struct */
- spu_forget(ctx);
-
return dcache_dir_close(inode, file);
}
@@ -450,28 +450,24 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
struct spu_context *neighbor;
struct path path = {.mnt = mnt, .dentry = dentry};
- ret = -EPERM;
if ((flags & SPU_CREATE_NOSCHED) &&
!capable(CAP_SYS_NICE))
- goto out_unlock;
+ return -EPERM;
- ret = -EINVAL;
if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
== SPU_CREATE_ISOLATE)
- goto out_unlock;
+ return -EINVAL;
- ret = -ENODEV;
if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
- goto out_unlock;
+ return -ENODEV;
gang = NULL;
neighbor = NULL;
affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
if (affinity) {
gang = SPUFS_I(inode)->i_gang;
- ret = -EINVAL;
if (!gang)
- goto out_unlock;
+ return -EINVAL;
mutex_lock(&gang->aff_mutex);
neighbor = spufs_assert_affinity(flags, gang, aff_filp);
if (IS_ERR(neighbor)) {
@@ -492,22 +488,12 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
}
ret = spufs_context_open(&path);
- if (ret < 0) {
+ if (ret < 0)
WARN_ON(spufs_rmdir(inode, dentry));
- if (affinity)
- mutex_unlock(&gang->aff_mutex);
- mutex_unlock(&inode->i_mutex);
- spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
- goto out;
- }
out_aff_unlock:
if (affinity)
mutex_unlock(&gang->aff_mutex);
-out_unlock:
- mutex_unlock(&inode->i_mutex);
-out:
- dput(dentry);
return ret;
}
@@ -580,18 +566,13 @@ static int spufs_create_gang(struct inode *inode,
int ret;
ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO);
- if (ret)
- goto out;
-
- ret = spufs_gang_open(&path);
- if (ret < 0) {
- int err = simple_rmdir(inode, dentry);
- WARN_ON(err);
+ if (!ret) {
+ ret = spufs_gang_open(&path);
+ if (ret < 0) {
+ int err = simple_rmdir(inode, dentry);
+ WARN_ON(err);
+ }
}
-
-out:
- mutex_unlock(&inode->i_mutex);
- dput(dentry);
return ret;
}
@@ -601,40 +582,32 @@ static struct file_system_type spufs_type;
long spufs_create(struct path *path, struct dentry *dentry,
unsigned int flags, umode_t mode, struct file *filp)
{
+ struct inode *dir = path->dentry->d_inode;
int ret;
- ret = -EINVAL;
/* check if we are on spufs */
if (path->dentry->d_sb->s_type != &spufs_type)
- goto out;
+ return -EINVAL;
/* don't accept undefined flags */
if (flags & (~SPU_CREATE_FLAG_ALL))
- goto out;
+ return -EINVAL;
/* only threads can be underneath a gang */
- if (path->dentry != path->dentry->d_sb->s_root) {
- if ((flags & SPU_CREATE_GANG) ||
- !SPUFS_I(path->dentry->d_inode)->i_gang)
- goto out;
- }
+ if (path->dentry != path->dentry->d_sb->s_root)
+ if ((flags & SPU_CREATE_GANG) || !SPUFS_I(dir)->i_gang)
+ return -EINVAL;
mode &= ~current_umask();
if (flags & SPU_CREATE_GANG)
- ret = spufs_create_gang(path->dentry->d_inode,
- dentry, path->mnt, mode);
+ ret = spufs_create_gang(dir, dentry, path->mnt, mode);
else
- ret = spufs_create_context(path->dentry->d_inode,
- dentry, path->mnt, flags, mode,
+ ret = spufs_create_context(dir, dentry, path->mnt, flags, mode,
filp);
if (ret >= 0)
- fsnotify_mkdir(path->dentry->d_inode, dentry);
- return ret;
+ fsnotify_mkdir(dir, dentry);
-out:
- mutex_unlock(&path->dentry->d_inode->i_mutex);
- dput(dentry);
return ret;
}
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 5665dcc382c7..5b7d8ffbf890 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
ret = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
ret = spufs_create(&path, dentry, flags, mode, neighbor);
- path_put(&path);
+ done_path_create(&path, dentry);
}
return ret;
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 3ef46254c35b..7698b6e13c57 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -106,14 +106,6 @@ static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
- /* If powersave_nap is enabled, use NAP mode, else just
- * spin aimlessly
- */
- if (!powersave_nap) {
- generic_mach_cpu_die();
- return;
- }
-
/* Standard hot unplug procedure */
local_irq_disable();
idle_task_exit();
@@ -128,7 +120,7 @@ static void pnv_smp_cpu_kill_self(void)
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
while (!generic_check_cpu_restart(cpu)) {
- power7_idle();
+ power7_nap();
if (!generic_check_cpu_restart(cpu)) {
DBG("CPU%d Unexpected exit while offline !\n", cpu);
/* We may be getting an IPI, so we re-enable
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h
index 60c9c0bd5ba2..2aa97ddb7b78 100644
--- a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h
+++ b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2009-2010 Freescale Semiconductor, Inc
+ * Copyright 2009-2010, 2012 Freescale Semiconductor, Inc
*
* QorIQ based Cache Controller Memory Mapped Registers
*
@@ -91,7 +91,7 @@ struct mpc85xx_l2ctlr {
struct sram_parameters {
unsigned int sram_size;
- uint64_t sram_offset;
+ phys_addr_t sram_offset;
};
extern int instantiate_cache_sram(struct platform_device *dev,
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
index cedabd0f4bfe..68ac3aacb191 100644
--- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
+++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2009-2010 Freescale Semiconductor, Inc.
+ * Copyright 2009-2010, 2012 Freescale Semiconductor, Inc.
*
* QorIQ (P1/P2) L2 controller init for Cache-SRAM instantiation
*
@@ -31,24 +31,21 @@ static char *sram_size;
static char *sram_offset;
struct mpc85xx_l2ctlr __iomem *l2ctlr;
-static long get_cache_sram_size(void)
+static int get_cache_sram_params(struct sram_parameters *sram_params)
{
- unsigned long val;
+ unsigned long long addr;
+ unsigned int size;
- if (!sram_size || (strict_strtoul(sram_size, 0, &val) < 0))
+ if (!sram_size || (kstrtouint(sram_size, 0, &size) < 0))
return -EINVAL;
- return val;
-}
-
-static long get_cache_sram_offset(void)
-{
- unsigned long val;
-
- if (!sram_offset || (strict_strtoul(sram_offset, 0, &val) < 0))
+ if (!sram_offset || (kstrtoull(sram_offset, 0, &addr) < 0))
return -EINVAL;
- return val;
+ sram_params->sram_offset = addr;
+ sram_params->sram_size = size;
+
+ return 0;
}
static int __init get_size_from_cmdline(char *str)
@@ -93,17 +90,9 @@ static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev)
}
l2cache_size = *prop;
- sram_params.sram_size = get_cache_sram_size();
- if ((int)sram_params.sram_size <= 0) {
- dev_err(&dev->dev,
- "Entire L2 as cache, Aborting Cache-SRAM stuff\n");
- return -EINVAL;
- }
-
- sram_params.sram_offset = get_cache_sram_offset();
- if ((int64_t)sram_params.sram_offset <= 0) {
+ if (get_cache_sram_params(&sram_params)) {
dev_err(&dev->dev,
- "Entire L2 as cache, provide a valid sram offset\n");
+ "Entire L2 as cache, provide valid sram offset and size\n");
return -EINVAL;
}
@@ -125,14 +114,14 @@ static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev)
* Write bits[0-17] to srbar0
*/
out_be32(&l2ctlr->srbar0,
- sram_params.sram_offset & L2SRAM_BAR_MSK_LO18);
+ lower_32_bits(sram_params.sram_offset) & L2SRAM_BAR_MSK_LO18);
/*
* Write bits[18-21] to srbare0
*/
#ifdef CONFIG_PHYS_64BIT
out_be32(&l2ctlr->srbarea0,
- (sram_params.sram_offset >> 32) & L2SRAM_BARE_MSK_HI4);
+ upper_32_bits(sram_params.sram_offset) & L2SRAM_BARE_MSK_HI4);
#endif
clrsetbits_be32(&l2ctlr->ctl, L2CR_L2E, L2CR_L2FI);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index a7b2a600d0a4..c37f46136321 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -465,7 +465,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
iounmap(hose->cfg_data);
iounmap(hose->cfg_addr);
pcibios_free_controller(hose);
- return 0;
+ return -ENODEV;
}
setup_pci_cmd(hose);
@@ -827,6 +827,7 @@ struct device_node *fsl_pci_primary;
void __devinit fsl_pci_init(void)
{
+ int ret;
struct device_node *node;
struct pci_controller *hose;
dma_addr_t max = 0xffffffff;
@@ -855,10 +856,12 @@ void __devinit fsl_pci_init(void)
if (!fsl_pci_primary)
fsl_pci_primary = node;
- fsl_add_bridge(node, fsl_pci_primary == node);
- hose = pci_find_hose_for_OF_device(node);
- max = min(max, hose->dma_window_base_cur +
- hose->dma_window_size);
+ ret = fsl_add_bridge(node, fsl_pci_primary == node);
+ if (ret == 0) {
+ hose = pci_find_hose_for_OF_device(node);
+ max = min(max, hose->dma_window_base_cur +
+ hose->dma_window_size);
+ }
}
}
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 483d8fa72e8b..e961f8c4a8f0 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -14,6 +14,9 @@
#include <linux/list.h>
#include <linux/of_platform.h>
#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 253dce98c16e..df0fc5821469 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value)
static inline void icp_hv_set_qirr(int n_cpu , u8 value)
{
int hw_cpu = get_hard_smp_processor_id(n_cpu);
- long rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
+ long rc;
+
+ /* Make sure all previous accesses are ordered before IPI sending */
+ mb();
+ rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
if (rc != H_SUCCESS) {
pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
"returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
@@ -111,7 +115,7 @@ static unsigned int icp_hv_get_irq(void)
if (vec == XICS_IRQ_SPURIOUS)
return NO_IRQ;
- irq = irq_radix_revmap_lookup(xics_host, vec);
+ irq = irq_find_mapping(xics_host, vec);
if (likely(irq != NO_IRQ)) {
xics_push_cppr(vec);
return irq;
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 4c79b6fbee1c..48861d3fcd07 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -119,7 +119,7 @@ static unsigned int icp_native_get_irq(void)
if (vec == XICS_IRQ_SPURIOUS)
return NO_IRQ;
- irq = irq_radix_revmap_lookup(xics_host, vec);
+ irq = irq_find_mapping(xics_host, vec);
if (likely(irq != NO_IRQ)) {
xics_push_cppr(vec);
return irq;
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index cd1d18db92c6..9049d9f44485 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -329,9 +329,6 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq,
pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
- /* Insert the interrupt mapping into the radix tree for fast lookup */
- irq_radix_revmap_insert(xics_host, virq, hw);
-
/* They aren't all level sensitive but we just don't really know */
irq_set_status_flags(virq, IRQ_LEVEL);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index eab3492a45c5..9b49c65ee7a4 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -17,6 +17,7 @@
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/kallsyms.h>
+#include <linux/kmsg_dump.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/sysrq.h>
@@ -894,13 +895,13 @@ cmds(struct pt_regs *excp)
#endif
default:
printf("Unrecognized command: ");
- do {
+ do {
if (' ' < cmd && cmd <= '~')
putchar(cmd);
else
printf("\\x%x", cmd);
cmd = inchar();
- } while (cmd != '\n');
+ } while (cmd != '\n');
printf(" (type ? for help)\n");
break;
}
@@ -1097,7 +1098,7 @@ static long check_bp_loc(unsigned long addr)
return 1;
}
-static char *breakpoint_help_string =
+static char *breakpoint_help_string =
"Breakpoint command usage:\n"
"b show breakpoints\n"
"b <addr> [cnt] set breakpoint at given instr addr\n"
@@ -1193,7 +1194,7 @@ bpt_cmds(void)
default:
termch = cmd;
- cmd = skipbl();
+ cmd = skipbl();
if (cmd == '?') {
printf(breakpoint_help_string);
break;
@@ -1359,7 +1360,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
sp + REGS_OFFSET);
break;
}
- printf("--- Exception: %lx %s at ", regs.trap,
+ printf("--- Exception: %lx %s at ", regs.trap,
getvecname(TRAP(&regs)));
pc = regs.nip;
lr = regs.link;
@@ -1623,14 +1624,14 @@ static void super_regs(void)
cmd = skipbl();
if (cmd == '\n') {
- unsigned long sp, toc;
+ unsigned long sp, toc;
asm("mr %0,1" : "=r" (sp) :);
asm("mr %0,2" : "=r" (toc) :);
printf("msr = "REG" sprg0= "REG"\n",
mfmsr(), mfspr(SPRN_SPRG0));
printf("pvr = "REG" sprg1= "REG"\n",
- mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
+ mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
printf("dec = "REG" sprg2= "REG"\n",
mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
@@ -1783,7 +1784,7 @@ byterev(unsigned char *val, int size)
static int brev;
static int mnoread;
-static char *memex_help_string =
+static char *memex_help_string =
"Memory examine command usage:\n"
"m [addr] [flags] examine/change memory\n"
" addr is optional. will start where left off.\n"
@@ -1798,7 +1799,7 @@ static char *memex_help_string =
"NOTE: flags are saved as defaults\n"
"";
-static char *memex_subcmd_help_string =
+static char *memex_subcmd_help_string =
"Memory examine subcommands:\n"
" hexval write this val to current location\n"
" 'string' write chars from string to this location\n"
@@ -2064,7 +2065,7 @@ prdump(unsigned long adrs, long ndump)
nr = mread(adrs, temp, r);
adrs += nr;
for (m = 0; m < r; ++m) {
- if ((m & (sizeof(long) - 1)) == 0 && m > 0)
+ if ((m & (sizeof(long) - 1)) == 0 && m > 0)
putchar(' ');
if (m < nr)
printf("%.2x", temp[m]);
@@ -2072,7 +2073,7 @@ prdump(unsigned long adrs, long ndump)
printf("%s", fault_chars[fault_type]);
}
for (; m < 16; ++m) {
- if ((m & (sizeof(long) - 1)) == 0)
+ if ((m & (sizeof(long) - 1)) == 0)
putchar(' ');
printf(" ");
}
@@ -2148,45 +2149,28 @@ print_address(unsigned long addr)
void
dump_log_buf(void)
{
- const unsigned long size = 128;
- unsigned long end, addr;
- unsigned char buf[size + 1];
-
- addr = 0;
- buf[size] = '\0';
-
- if (setjmp(bus_error_jmp) != 0) {
- printf("Unable to lookup symbol __log_buf!\n");
- return;
- }
-
- catch_memory_errors = 1;
- sync();
- addr = kallsyms_lookup_name("__log_buf");
-
- if (! addr)
- printf("Symbol __log_buf not found!\n");
- else {
- end = addr + (1 << CONFIG_LOG_BUF_SHIFT);
- while (addr < end) {
- if (! mread(addr, buf, size)) {
- printf("Can't read memory at address 0x%lx\n", addr);
- break;
- }
-
- printf("%s", buf);
-
- if (strlen(buf) < size)
- break;
-
- addr += size;
- }
- }
-
- sync();
- /* wait a little while to see if we get a machine check */
- __delay(200);
- catch_memory_errors = 0;
+ struct kmsg_dumper dumper = { .active = 1 };
+ unsigned char buf[128];
+ size_t len;
+
+ if (setjmp(bus_error_jmp) != 0) {
+ printf("Error dumping printk buffer!\n");
+ return;
+ }
+
+ catch_memory_errors = 1;
+ sync();
+
+ kmsg_dump_rewind_nolock(&dumper);
+ while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) {
+ buf[len] = '\0';
+ printf("%s", buf);
+ }
+
+ sync();
+ /* wait a little while to see if we get a machine check */
+ __delay(200);
+ catch_memory_errors = 0;
}
/*
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index a39b4690c171..107610e01a29 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -85,10 +85,12 @@ config S390
select HAVE_ARCH_MUTEX_CPU_RELAX
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
+ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_CMPXCHG_LOCAL
select ARCH_DISCARD_MEMBLOCK
+ select BUILDTIME_EXTABLE_SORT
select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK
@@ -117,10 +119,12 @@ config S390
select ARCH_INLINE_WRITE_UNLOCK_BH
select ARCH_INLINE_WRITE_UNLOCK_IRQ
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
+ select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select GENERIC_CLOCKEVENTS
select KTIME_SCALAR if 32BIT
+ select HAVE_ARCH_SECCOMP_FILTER
config SCHED_OMIT_FRAME_POINTER
def_bool y
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 37d2bf267964..f39cd710980b 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -7,13 +7,16 @@ CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_RCU_FAST_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
-CONFIG_CGROUP_MEM_RES_CTLR=y
+CONFIG_CGROUP_MEMCG=y
CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
@@ -35,8 +38,6 @@ CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 32e8449640fa..9b94a160fe7f 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -180,7 +180,8 @@ extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
#ifndef CONFIG_64BIT
-#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
#else /* CONFIG_64BIT */
#define SET_PERSONALITY(ex) \
do { \
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 5c63615f1349..b749c5733657 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -11,7 +11,6 @@
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/ctl_reg.h>
-#include <asm-generic/mm_hooks.h>
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
@@ -58,7 +57,7 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
- if (user_mode != HOME_SPACE_MODE) {
+ if (addressing_mode != HOME_SPACE_MODE) {
/* Load primary space page table origin. */
asm volatile(LCTL_OPCODE" 1,1,%0\n"
: : "m" (S390_lowcore.user_asce) );
@@ -91,4 +90,17 @@ static inline void activate_mm(struct mm_struct *prev,
switch_mm(prev, next, current);
}
+static inline void arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+#ifdef CONFIG_64BIT
+ if (oldmm->context.asce_limit < mm->context.asce_limit)
+ crst_table_downgrade(mm, oldmm->context.asce_limit);
+#endif
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
#endif /* __S390_MMU_CONTEXT_H */
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 7bcc14e395f0..bf2a2ad2f800 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -13,6 +13,7 @@
*/
typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
#define __kernel_size_t __kernel_size_t
typedef unsigned short __kernel_old_dev_t;
@@ -25,7 +26,6 @@ typedef unsigned short __kernel_mode_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
-typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
#else /* __s390x__ */
@@ -35,7 +35,6 @@ typedef unsigned int __kernel_mode_t;
typedef int __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
-typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c40fa91e38a8..11e4e3236937 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -120,7 +120,9 @@ struct stack_frame {
regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
regs->gprs[15] = new_stackp; \
+ __tlb_flush_mm(current->mm); \
crst_table_downgrade(current->mm, 1UL << 31); \
+ update_mm(current->mm, current); \
} while (0)
/* Forward declaration, a strange C thing */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 57e80534375a..e6859d16ee2d 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -60,7 +60,7 @@ void create_mem_hole(struct mem_chunk memory_chunk[], unsigned long addr,
#define SECONDARY_SPACE_MODE 2
#define HOME_SPACE_MODE 3
-extern unsigned int user_mode;
+extern unsigned int addressing_mode;
/*
* Machine features detected in head.S
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index a0a8340daafa..ce26ac3cb162 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -44,6 +44,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
}
static inline int smp_find_processor_id(int address) { return 0; }
+static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { }
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index 0fb34027d3f6..a60d085ddb4d 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -4,13 +4,11 @@
#ifdef CONFIG_64BIT
#define SECTION_SIZE_BITS 28
-#define MAX_PHYSADDR_BITS 46
#define MAX_PHYSMEM_BITS 46
#else
#define SECTION_SIZE_BITS 25
-#define MAX_PHYSADDR_BITS 31
#define MAX_PHYSMEM_BITS 31
#endif /* CONFIG_64BIT */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index fb214dd9b7e0..fe7b99759e12 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -12,6 +12,7 @@
#ifndef _ASM_SYSCALL_H
#define _ASM_SYSCALL_H 1
+#include <linux/audit.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <asm/ptrace.h>
@@ -87,4 +88,13 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->orig_gpr2 = args[0];
}
+static inline int syscall_get_arch(struct task_struct *task,
+ struct pt_regs *regs)
+{
+#ifdef CONFIG_COMPAT
+ if (test_tsk_thread_flag(task, TIF_31BIT))
+ return AUDIT_ARCH_S390;
+#endif
+ return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
+}
#endif /* _ASM_SYSCALL_H */
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 2e37157ba6a9..6756e78f4808 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -388,7 +388,6 @@
#define __IGNORE_recvmmsg
#define __IGNORE_sendmmsg
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index d1225089a4bb..f606d935f495 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -620,7 +620,6 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
return -EFAULT;
if (a.offset & ~PAGE_MASK)
return -EINVAL;
- a.addr = (unsigned long) compat_ptr(a.addr);
return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
a.offset >> PAGE_SHIFT);
}
@@ -631,7 +630,6 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
- a.addr = (unsigned long) compat_ptr(a.addr);
return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
}
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index e835d6d5b7fd..2d82cfcbce5b 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1635,7 +1635,7 @@ ENTRY(compat_sys_process_vm_readv_wrapper)
llgfr %r6,%r6 # unsigned long
llgf %r0,164(%r15) # unsigned long
stg %r0,160(%r15)
- jg sys_process_vm_readv
+ jg compat_sys_process_vm_readv
ENTRY(compat_sys_process_vm_writev_wrapper)
lgfr %r2,%r2 # compat_pid_t
@@ -1645,4 +1645,4 @@ ENTRY(compat_sys_process_vm_writev_wrapper)
llgfr %r6,%r6 # unsigned long
llgf %r0,164(%r15) # unsigned long
stg %r0,160(%r15)
- jg sys_process_vm_writev
+ jg compat_sys_process_vm_writev
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 21be961e8a43..ba500d8dc392 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -110,6 +110,7 @@ struct debug_view debug_raw_view = {
NULL,
NULL
};
+EXPORT_SYMBOL(debug_raw_view);
struct debug_view debug_hex_ascii_view = {
"hex_ascii",
@@ -119,6 +120,7 @@ struct debug_view debug_hex_ascii_view = {
NULL,
NULL
};
+EXPORT_SYMBOL(debug_hex_ascii_view);
static struct debug_view debug_level_view = {
"level",
@@ -155,6 +157,7 @@ struct debug_view debug_sprintf_view = {
NULL,
NULL
};
+EXPORT_SYMBOL(debug_sprintf_view);
/* used by dump analysis tools to determine version of debug feature */
static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION;
@@ -730,6 +733,7 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
return debug_register_mode(name, pages_per_area, nr_areas, buf_size,
S_IRUSR | S_IWUSR, 0, 0);
}
+EXPORT_SYMBOL(debug_register);
/*
* debug_unregister:
@@ -748,6 +752,7 @@ debug_unregister(debug_info_t * id)
out:
return;
}
+EXPORT_SYMBOL(debug_unregister);
/*
* debug_set_size:
@@ -810,7 +815,7 @@ debug_set_level(debug_info_t* id, int new_level)
}
spin_unlock_irqrestore(&id->lock,flags);
}
-
+EXPORT_SYMBOL(debug_set_level);
/*
* proceed_active_entry:
@@ -930,7 +935,7 @@ debug_stop_all(void)
if (debug_stoppable)
debug_active = 0;
}
-
+EXPORT_SYMBOL(debug_stop_all);
void debug_set_critical(void)
{
@@ -963,6 +968,7 @@ debug_event_common(debug_info_t * id, int level, const void *buf, int len)
return active;
}
+EXPORT_SYMBOL(debug_event_common);
/*
* debug_exception_common:
@@ -990,6 +996,7 @@ debug_entry_t
return active;
}
+EXPORT_SYMBOL(debug_exception_common);
/*
* counts arguments in format string for sprintf view
@@ -1043,6 +1050,7 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
return active;
}
+EXPORT_SYMBOL(debug_sprintf_event);
/*
* debug_sprintf_exception:
@@ -1081,25 +1089,7 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
return active;
}
-
-/*
- * debug_init:
- * - is called exactly once to initialize the debug feature
- */
-
-static int
-__init debug_init(void)
-{
- int rc = 0;
-
- s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
- mutex_lock(&debug_mutex);
- debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL);
- initialized = 1;
- mutex_unlock(&debug_mutex);
-
- return rc;
-}
+EXPORT_SYMBOL(debug_sprintf_exception);
/*
* debug_register_view:
@@ -1147,6 +1137,7 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
out:
return rc;
}
+EXPORT_SYMBOL(debug_register_view);
/*
* debug_unregister_view:
@@ -1176,6 +1167,7 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view)
out:
return rc;
}
+EXPORT_SYMBOL(debug_unregister_view);
static inline char *
debug_get_user_string(const char __user *user_buf, size_t user_len)
@@ -1485,6 +1477,7 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
except_str, entry->id.fields.cpuid, (void *) caller);
return rc;
}
+EXPORT_SYMBOL(debug_dflt_header_fn);
/*
* prints debug data sprintf-formated:
@@ -1533,33 +1526,16 @@ out:
}
/*
- * clean up module
+ * debug_init:
+ * - is called exactly once to initialize the debug feature
*/
-static void __exit debug_exit(void)
+static int __init debug_init(void)
{
- debugfs_remove(debug_debugfs_root_entry);
- unregister_sysctl_table(s390dbf_sysctl_header);
- return;
+ s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
+ mutex_lock(&debug_mutex);
+ debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT, NULL);
+ initialized = 1;
+ mutex_unlock(&debug_mutex);
+ return 0;
}
-
-/*
- * module definitions
- */
postcore_initcall(debug_init);
-module_exit(debug_exit);
-MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(debug_register);
-EXPORT_SYMBOL(debug_unregister);
-EXPORT_SYMBOL(debug_set_level);
-EXPORT_SYMBOL(debug_stop_all);
-EXPORT_SYMBOL(debug_register_view);
-EXPORT_SYMBOL(debug_unregister_view);
-EXPORT_SYMBOL(debug_event_common);
-EXPORT_SYMBOL(debug_exception_common);
-EXPORT_SYMBOL(debug_hex_ascii_view);
-EXPORT_SYMBOL(debug_raw_view);
-EXPORT_SYMBOL(debug_dflt_header_fn);
-EXPORT_SYMBOL(debug_sprintf_view);
-EXPORT_SYMBOL(debug_sprintf_exception);
-EXPORT_SYMBOL(debug_sprintf_event);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 1f6b428e2762..619c5d350726 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1531,7 +1531,7 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
void show_code(struct pt_regs *regs)
{
- char *mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
+ char *mode = user_mode(regs) ? "User" : "Krnl";
unsigned char code[64];
char buffer[64], *ptr;
mm_segment_t old_fs;
@@ -1540,7 +1540,7 @@ void show_code(struct pt_regs *regs)
/* Get a snapshot of the 64 bytes surrounding the fault address. */
old_fs = get_fs();
- set_fs((regs->psw.mask & PSW_MASK_PSTATE) ? USER_DS : KERNEL_DS);
+ set_fs(user_mode(regs) ? USER_DS : KERNEL_DS);
for (start = 32; start && regs->psw.addr >= 34 - start; start -= 2) {
addr = regs->psw.addr - 34 + start;
if (__copy_from_user(code + start - 2,
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index bc95a8ebd9cc..83c3271c442b 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -455,7 +455,6 @@ void __init startup_init(void)
init_kernel_storage_key();
lockdep_init();
lockdep_off();
- sort_main_extable();
setup_lowcore_early();
setup_facility_list();
detect_machine_type();
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index e64d141555ce..6ffcd3203215 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1583,7 +1583,7 @@ static struct kset *vmcmd_kset;
static void vmcmd_run(struct shutdown_trigger *trigger)
{
- char *cmd, *next_cmd;
+ char *cmd;
if (strcmp(trigger->name, ON_REIPL_STR) == 0)
cmd = vmcmd_on_reboot;
@@ -1600,15 +1600,7 @@ static void vmcmd_run(struct shutdown_trigger *trigger)
if (strlen(cmd) == 0)
return;
- do {
- next_cmd = strchr(cmd, '\n');
- if (next_cmd) {
- next_cmd[0] = 0;
- next_cmd += 1;
- }
- __cpcmd(cmd, NULL, 0, NULL);
- cmd = next_cmd;
- } while (cmd != NULL);
+ __cpcmd(cmd, NULL, 0, NULL);
}
static int vmcmd_init(void)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index f4eb37680b91..e4be113fbac6 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -719,7 +719,11 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
long ret = 0;
/* Do the secure computing check first. */
- secure_computing_strict(regs->gprs[2]);
+ if (secure_computing(regs->gprs[2])) {
+ /* seccomp failures shouldn't expose any additional code. */
+ ret = -1;
+ goto out;
+ }
/*
* The sysc_tracesys code in entry.S stored the system
@@ -745,6 +749,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
regs->gprs[2], regs->orig_gpr2,
regs->gprs[3], regs->gprs[4],
regs->gprs[5]);
+out:
return ret ?: regs->gprs[2];
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 743c0f32fe3b..f86c81e13c37 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -302,8 +302,8 @@ static int __init parse_vmalloc(char *arg)
}
early_param("vmalloc", parse_vmalloc);
-unsigned int user_mode = HOME_SPACE_MODE;
-EXPORT_SYMBOL_GPL(user_mode);
+unsigned int addressing_mode = HOME_SPACE_MODE;
+EXPORT_SYMBOL_GPL(addressing_mode);
static int set_amode_primary(void)
{
@@ -328,7 +328,7 @@ static int set_amode_primary(void)
*/
static int __init early_parse_switch_amode(char *p)
{
- user_mode = PRIMARY_SPACE_MODE;
+ addressing_mode = PRIMARY_SPACE_MODE;
return 0;
}
early_param("switch_amode", early_parse_switch_amode);
@@ -336,9 +336,9 @@ early_param("switch_amode", early_parse_switch_amode);
static int __init early_parse_user_mode(char *p)
{
if (p && strcmp(p, "primary") == 0)
- user_mode = PRIMARY_SPACE_MODE;
+ addressing_mode = PRIMARY_SPACE_MODE;
else if (!p || strcmp(p, "home") == 0)
- user_mode = HOME_SPACE_MODE;
+ addressing_mode = HOME_SPACE_MODE;
else
return 1;
return 0;
@@ -347,7 +347,7 @@ early_param("user_mode", early_parse_user_mode);
static void setup_addressing_mode(void)
{
- if (user_mode == PRIMARY_SPACE_MODE) {
+ if (addressing_mode == PRIMARY_SPACE_MODE) {
if (set_amode_primary())
pr_info("Address spaces switched, "
"mvcos available\n");
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index b4a29eee41b8..d0964d22adb5 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -81,11 +81,12 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
{
unsigned int ret;
- if (current->personality == PER_LINUX32 && personality == PER_LINUX)
- personality = PER_LINUX32;
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(personality) == PER_LINUX)
+ personality |= PER_LINUX32;
ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
+ if (personality(ret) == PER_LINUX32)
+ ret &= ~PER_LINUX32;
return ret;
}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index af2421a0f315..01775c04a90e 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -185,7 +185,7 @@ void show_registers(struct pt_regs *regs)
{
char *mode;
- mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
+ mode = user_mode(regs) ? "User" : "Krnl";
printk("%s PSW : %p %p",
mode, (void *) regs->psw.mask,
(void *) regs->psw.addr);
@@ -225,7 +225,7 @@ void show_regs(struct pt_regs *regs)
(void *) current->thread.ksp);
show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */
- if (!(regs->psw.mask & PSW_MASK_PSTATE))
+ if (!user_mode(regs))
show_trace(NULL, (unsigned long *) regs->gprs[15]);
show_last_breaking_event(regs);
}
@@ -300,7 +300,7 @@ static void __kprobes do_trap(struct pt_regs *regs,
regs->int_code, si_signo) == NOTIFY_STOP)
return;
- if (regs->psw.mask & PSW_MASK_PSTATE) {
+ if (user_mode(regs)) {
info.si_signo = si_signo;
info.si_errno = 0;
info.si_code = si_code;
@@ -341,7 +341,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
static void default_trap_handler(struct pt_regs *regs)
{
- if (regs->psw.mask & PSW_MASK_PSTATE) {
+ if (user_mode(regs)) {
report_user_fault(regs, SIGSEGV);
do_exit(SIGSEGV);
} else
@@ -410,7 +410,7 @@ static void __kprobes illegal_op(struct pt_regs *regs)
location = get_psw_address(regs);
- if (regs->psw.mask & PSW_MASK_PSTATE) {
+ if (user_mode(regs)) {
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
return;
if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
@@ -478,7 +478,7 @@ void specification_exception(struct pt_regs *regs)
location = (__u16 __user *) get_psw_address(regs);
- if (regs->psw.mask & PSW_MASK_PSTATE) {
+ if (user_mode(regs)) {
get_user(*((__u16 *) opcode), location);
switch (opcode[0]) {
case 0x28: /* LDR Rx,Ry */
@@ -531,7 +531,7 @@ static void data_exception(struct pt_regs *regs)
asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
#ifdef CONFIG_MATHEMU
- else if (regs->psw.mask & PSW_MASK_PSTATE) {
+ else if (user_mode(regs)) {
__u8 opcode[6];
get_user(*((__u16 *) opcode), location);
switch (opcode[0]) {
@@ -598,7 +598,7 @@ static void data_exception(struct pt_regs *regs)
static void space_switch_exception(struct pt_regs *regs)
{
/* Set user psw back to home space mode. */
- if (regs->psw.mask & PSW_MASK_PSTATE)
+ if (user_mode(regs))
regs->psw.mask |= PSW_ASC_HOME;
/* Send SIGILL. */
do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index ea5590fdca3b..9a19ca367c17 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -84,7 +84,8 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
*/
static void vdso_init_data(struct vdso_data *vd)
{
- vd->ectg_available = user_mode != HOME_SPACE_MODE && test_facility(31);
+ vd->ectg_available =
+ addressing_mode != HOME_SPACE_MODE && test_facility(31);
}
#ifdef CONFIG_64BIT
@@ -101,7 +102,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
lowcore->vdso_per_cpu_data = __LC_PASTE;
- if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
+ if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled)
return 0;
segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -146,7 +147,7 @@ void vdso_free_per_cpu(struct _lowcore *lowcore)
unsigned long segment_table, page_table, page_frame;
u32 *psal, *aste;
- if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
+ if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled)
return;
psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -164,7 +165,7 @@ static void vdso_init_cr5(void)
{
unsigned long cr5;
- if (user_mode == HOME_SPACE_MODE || !vdso_enabled)
+ if (addressing_mode == HOME_SPACE_MODE || !vdso_enabled)
return;
cr5 = offsetof(struct _lowcore, paste);
__ctl_load(cr5, 5, 5);
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 21109c63eb12..de8fa9bbd35e 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -45,7 +45,7 @@ SECTIONS
.dummy : { *(.dummy) } :data
- RODATA
+ RO_DATA_SECTION(PAGE_SIZE)
#ifdef CONFIG_SHARED_KERNEL
. = ALIGN(0x100000); /* VM shared segments are 1MB aligned */
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6a12d1bb6e09..6c013f544146 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -49,6 +49,7 @@
#define VM_FAULT_BADCONTEXT 0x010000
#define VM_FAULT_BADMAP 0x020000
#define VM_FAULT_BADACCESS 0x040000
+#define VM_FAULT_SIGNAL 0x080000
static unsigned long store_indication;
@@ -110,7 +111,7 @@ static inline int user_space_fault(unsigned long trans_exc_code)
if (trans_exc_code == 2)
/* Access via secondary space, set_fs setting decides */
return current->thread.mm_segment.ar4;
- if (user_mode == HOME_SPACE_MODE)
+ if (addressing_mode == HOME_SPACE_MODE)
/* User space if the access has been done via home space. */
return trans_exc_code == 3;
/*
@@ -219,7 +220,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
case VM_FAULT_BADACCESS:
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
- if (regs->psw.mask & PSW_MASK_PSTATE) {
+ if (user_mode(regs)) {
/* User mode accesses just cause a SIGSEGV */
si_code = (fault == VM_FAULT_BADMAP) ?
SEGV_MAPERR : SEGV_ACCERR;
@@ -229,15 +230,19 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
case VM_FAULT_BADCONTEXT:
do_no_context(regs);
break;
+ case VM_FAULT_SIGNAL:
+ if (!user_mode(regs))
+ do_no_context(regs);
+ break;
default: /* fault & VM_FAULT_ERROR */
if (fault & VM_FAULT_OOM) {
- if (!(regs->psw.mask & PSW_MASK_PSTATE))
+ if (!user_mode(regs))
do_no_context(regs);
else
pagefault_out_of_memory();
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
- if (!(regs->psw.mask & PSW_MASK_PSTATE))
+ if (!user_mode(regs))
do_no_context(regs);
else
do_sigbus(regs);
@@ -286,7 +291,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- flags = FAULT_FLAG_ALLOW_RETRY;
+ flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE;
down_read(&mm->mmap_sem);
@@ -335,6 +340,11 @@ retry:
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, flags);
+ /* No reason to continue if interrupted by SIGKILL. */
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ fault = VM_FAULT_SIGNAL;
+ goto out;
+ }
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
@@ -426,7 +436,7 @@ void __kprobes do_asce_exception(struct pt_regs *regs)
}
/* User mode accesses just cause a SIGSEGV */
- if (regs->psw.mask & PSW_MASK_PSTATE) {
+ if (user_mode(regs)) {
do_sigsegv(regs, SEGV_MAPERR);
return;
}
@@ -441,6 +451,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
struct pt_regs regs;
int access, fault;
+ /* Emulate a uaccess fault from kernel mode. */
regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
if (!irqs_disabled())
regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
@@ -450,12 +461,12 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
access = write ? VM_WRITE : VM_READ;
fault = do_exception(&regs, access);
- if (unlikely(fault)) {
- if (fault & VM_FAULT_OOM)
- return -EFAULT;
- else if (fault & VM_FAULT_SIGBUS)
- do_sigbus(&regs);
- }
+ /*
+ * Since the fault happened in kernel mode while performing a uaccess
+ * all we need to do now is emulating a fixup in case "fault" is not
+ * zero.
+ * For the calling uaccess functions this results always in -EFAULT.
+ */
return fault ? -EFAULT : 0;
}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 573384256c5c..c59a5efa58b1 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -103,9 +103,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
int s390_mmap_check(unsigned long addr, unsigned long len)
{
+ int rc;
+
if (!is_compat_task() &&
- len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
- return crst_table_upgrade(current->mm, 1UL << 53);
+ len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
+ rc = crst_table_upgrade(current->mm, 1UL << 53);
+ if (rc)
+ return rc;
+ update_mm(current->mm, current);
+ }
return 0;
}
@@ -125,6 +131,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
+ update_mm(mm, current);
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
}
return area;
@@ -147,6 +154,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
rc = crst_table_upgrade(mm, 1UL << 53);
if (rc)
return (unsigned long) rc;
+ update_mm(mm, current);
area = arch_get_unmapped_area_topdown(filp, addr, len,
pgoff, flags);
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 1cab221077cc..18df31d1f2c9 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -85,7 +85,6 @@ repeat:
crst_table_free(mm, table);
if (mm->context.asce_limit < limit)
goto repeat;
- update_mm(mm, current);
return 0;
}
@@ -93,9 +92,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{
pgd_t *pgd;
- if (mm->context.asce_limit <= limit)
- return;
- __tlb_flush_mm(mm);
while (mm->context.asce_limit > limit) {
pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -118,7 +114,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
mm->task_size = mm->context.asce_limit;
crst_table_free(mm, (unsigned long *) pgd);
}
- update_mm(mm, current);
}
#endif
@@ -801,7 +796,7 @@ int s390_enable_sie(void)
struct mm_struct *mm, *old_mm;
/* Do we have switched amode? If no, we cannot do sie */
- if (user_mode == HOME_SPACE_MODE)
+ if (addressing_mode == HOME_SPACE_MODE)
return -EINVAL;
/* Do we have pgstes? if yes, we are done */
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c
index c82f62fb9c28..8a6811b2cdb9 100644
--- a/arch/s390/oprofile/backtrace.c
+++ b/arch/s390/oprofile/backtrace.c
@@ -58,7 +58,7 @@ void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
unsigned long head;
struct stack_frame* head_sf;
- if (user_mode (regs))
+ if (user_mode(regs))
return;
head = regs->gprs[15];
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index a24595d83ad6..36f5141e8041 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -21,6 +21,7 @@ config SUPERH
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_XZ
select HAVE_KERNEL_LZO
+ select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_GENERIC_HARDIRQS
@@ -50,6 +51,7 @@ config SUPERH32
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
+ select ARCH_WANT_IPC_PARSE_VERSION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_ARCH_KGDB
select HAVE_HW_BREAKPOINT
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 7048c03490d9..fb5805745ace 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -57,6 +57,7 @@ config SH_7724_SOLUTION_ENGINE
depends on CPU_SUBTYPE_SH7724
select ARCH_REQUIRE_GPIOLIB
select SND_SOC_AK4642 if SND_SIMPLE_CARD
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
Select 7724 SolutionEngine if configuring for a Hitachi SH7724
evaluation board.
@@ -140,6 +141,7 @@ config SH_RSK
bool "Renesas Starter Kit"
depends on CPU_SUBTYPE_SH7201 || CPU_SUBTYPE_SH7203 || \
CPU_SUBTYPE_SH7264 || CPU_SUBTYPE_SH7269
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
Select this option if configuring for any of the RSK+ MCU
evaluation platforms.
@@ -159,6 +161,7 @@ config SH_SDK7786
select NO_IOPORT if !PCI
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_SRAM_POOL
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
Select SDK7786 if configuring for a Renesas Technology Europe
SH7786-65nm board.
@@ -173,6 +176,7 @@ config SH_SH7757LCR
bool "SH7757LCR"
depends on CPU_SUBTYPE_SH7757
select ARCH_REQUIRE_GPIOLIB
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
config SH_SH7785LCR
bool "SH7785LCR"
@@ -206,6 +210,7 @@ config SH_MIGOR
bool "Migo-R"
depends on CPU_SUBTYPE_SH7722
select ARCH_REQUIRE_GPIOLIB
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
Select Migo-R if configuring for the SH7722 Migo-R platform
by Renesas System Solutions Asia Pte. Ltd.
@@ -214,6 +219,7 @@ config SH_AP325RXA
bool "AP-325RXA"
depends on CPU_SUBTYPE_SH7723
select ARCH_REQUIRE_GPIOLIB
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
Renesas "AP-325RXA" support.
Compatible with ALGO SYSTEM CO.,LTD. "AP-320A"
@@ -222,6 +228,7 @@ config SH_KFR2R09
bool "KFR2R09"
depends on CPU_SUBTYPE_SH7724
select ARCH_REQUIRE_GPIOLIB
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
"Kit For R2R for 2009" support.
@@ -230,6 +237,7 @@ config SH_ECOVEC
depends on CPU_SUBTYPE_SH7724
select ARCH_REQUIRE_GPIOLIB
select SND_SOC_DA7210 if SND_SIMPLE_CARD
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
Renesas "R0P7724LC0011/21RL (EcoVec)" support.
@@ -305,6 +313,7 @@ config SH_MAGIC_PANEL_R2
bool "Magic Panel R2"
depends on CPU_SUBTYPE_SH7720
select ARCH_REQUIRE_GPIOLIB
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
Select Magic Panel R2 if configuring for Magic Panel R2.
@@ -316,6 +325,7 @@ config SH_CAYMAN
config SH_POLARIS
bool "SMSC Polaris"
select CPU_HAS_IPR_IRQ
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
depends on CPU_SUBTYPE_SH7709
help
Select if configuring for an SMSC Polaris development board
@@ -323,6 +333,7 @@ config SH_POLARIS
config SH_SH2007
bool "SH-2007 board"
select NO_IOPORT
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
depends on CPU_SUBTYPE_SH7780
help
SH-2007 is a single-board computer based around SH7780 chip
@@ -334,6 +345,7 @@ config SH_SH2007
config SH_APSH4A3A
bool "AP-SH4A-3A"
select SH_ALPHA_BOARD
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
depends on CPU_SUBTYPE_SH7785
help
Select AP-SH4A-3A if configuring for an ALPHAPROJECT AP-SH4A-3A.
@@ -342,6 +354,7 @@ config SH_APSH4AD0A
bool "AP-SH4AD-0A"
select SH_ALPHA_BOARD
select SYS_SUPPORTS_PCI
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
depends on CPU_SUBTYPE_SH7786
help
Select AP-SH4AD-0A if configuring for an ALPHAPROJECT AP-SH4AD-0A.
diff --git a/arch/sh/boards/board-apsh4a3a.c b/arch/sh/boards/board-apsh4a3a.c
index 2823619c6006..0a39c241628a 100644
--- a/arch/sh/boards/board-apsh4a3a.c
+++ b/arch/sh/boards/board-apsh4a3a.c
@@ -13,6 +13,8 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mtd/physmap.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/irq.h>
#include <linux/clk.h>
@@ -66,6 +68,12 @@ static struct platform_device nor_flash_device = {
.resource = nor_flash_resources,
};
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
static struct resource smsc911x_resources[] = {
[0] = {
.name = "smsc911x-memory",
@@ -105,6 +113,8 @@ static struct platform_device *apsh4a3a_devices[] __initdata = {
static int __init apsh4a3a_devices_setup(void)
{
+ regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
return platform_add_devices(apsh4a3a_devices,
ARRAY_SIZE(apsh4a3a_devices));
}
diff --git a/arch/sh/boards/board-apsh4ad0a.c b/arch/sh/boards/board-apsh4ad0a.c
index b4d6292a9247..92eac3a99187 100644
--- a/arch/sh/boards/board-apsh4ad0a.c
+++ b/arch/sh/boards/board-apsh4ad0a.c
@@ -12,12 +12,20 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <asm/machvec.h>
#include <asm/sizes.h>
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
static struct resource smsc911x_resources[] = {
[0] = {
.name = "smsc911x-memory",
@@ -56,6 +64,8 @@ static struct platform_device *apsh4ad0a_devices[] __initdata = {
static int __init apsh4ad0a_devices_setup(void)
{
+ regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
return platform_add_devices(apsh4ad0a_devices,
ARRAY_SIZE(apsh4ad0a_devices));
}
diff --git a/arch/sh/boards/board-magicpanelr2.c b/arch/sh/boards/board-magicpanelr2.c
index 90568f9de3a4..20500858b56c 100644
--- a/arch/sh/boards/board-magicpanelr2.c
+++ b/arch/sh/boards/board-magicpanelr2.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -24,6 +26,12 @@
#include <asm/heartbeat.h>
#include <cpu/sh7720.h>
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
#define LAN9115_READY (__raw_readl(0xA8000084UL) & 0x00000001UL)
/* Wait until reset finished. Timeout is 100ms. */
@@ -348,6 +356,8 @@ static struct platform_device *mpr2_devices[] __initdata = {
static int __init mpr2_devices_setup(void)
{
+ regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
return platform_add_devices(mpr2_devices, ARRAY_SIZE(mpr2_devices));
}
device_initcall(mpr2_devices_setup);
diff --git a/arch/sh/boards/board-polaris.c b/arch/sh/boards/board-polaris.c
index 0978ae2e4847..37a08d094727 100644
--- a/arch/sh/boards/board-polaris.c
+++ b/arch/sh/boards/board-polaris.c
@@ -9,6 +9,8 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/io.h>
#include <asm/irq.h>
@@ -22,6 +24,12 @@
#define AREA5_WAIT_CTRL (0x1C00)
#define WAIT_STATES_10 (0x7)
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
+};
+
static struct resource smsc911x_resources[] = {
[0] = {
.name = "smsc911x-memory",
@@ -88,6 +96,8 @@ static int __init polaris_initialise(void)
printk(KERN_INFO "Configuring Polaris external bus\n");
+ regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
/* Configure area 5 with 2 wait states */
wcr = __raw_readw(WCR2);
wcr &= (~AREA5_WAIT_CTRL);
diff --git a/arch/sh/boards/board-sh2007.c b/arch/sh/boards/board-sh2007.c
index b90b78f6a829..1980bb7e5780 100644
--- a/arch/sh/boards/board-sh2007.c
+++ b/arch/sh/boards/board-sh2007.c
@@ -6,6 +6,8 @@
*/
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
@@ -13,6 +15,14 @@
#include <asm/machvec.h>
#include <mach/sh2007.h>
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
+ REGULATOR_SUPPLY("vddvario", "smsc911x.1"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x.1"),
+};
+
struct smsc911x_platform_config smc911x_info = {
.flags = SMSC911X_USE_32BIT,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
@@ -98,6 +108,8 @@ static struct platform_device *sh2007_devices[] __initdata = {
static int __init sh2007_io_init(void)
{
+ regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
platform_add_devices(sh2007_devices, ARRAY_SIZE(sh2007_devices));
return 0;
}
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index 5087f8bb4cff..41f86702eb9f 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -12,6 +12,8 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/irq.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/io.h>
@@ -199,6 +201,15 @@ static struct platform_device sh7757_eth_giga1_device = {
},
};
+/* Fixed 3.3V regulator to be used by SDHI0, MMCIF */
+static struct regulator_consumer_supply fixed3v3_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
+};
+
/* SH_MMCIF */
static struct resource sh_mmcif_resources[] = {
[0] = {
@@ -329,6 +340,9 @@ static struct spi_board_info spi_board_info[] = {
static int __init sh7757lcr_devices_setup(void)
{
+ regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
+ ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+
/* RGMII (PTA) */
gpio_request(GPIO_FN_ET0_MDC, NULL);
gpio_request(GPIO_FN_ET0_MDIO, NULL);
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c
index f33ebf447073..9e963c1d1447 100644
--- a/arch/sh/boards/mach-ap325rxa/setup.c
+++ b/arch/sh/boards/mach-ap325rxa/setup.c
@@ -20,6 +20,8 @@
#include <linux/mtd/sh_flctl.h>
#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/gpio.h>
#include <linux/videodev2.h>
@@ -34,6 +36,12 @@
#include <asm/suspend.h>
#include <cpu/sh7723.h>
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
static struct smsc911x_platform_config smsc911x_config = {
.phy_interface = PHY_INTERFACE_MODE_MII,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
@@ -423,6 +431,15 @@ static struct platform_device ceu_device = {
},
};
+/* Fixed 3.3V regulators to be used by SDHI0, SDHI1 */
+static struct regulator_consumer_supply fixed3v3_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"),
+};
+
static struct resource sdhi0_cn3_resources[] = {
[0] = {
.name = "SDHI0",
@@ -544,6 +561,10 @@ static int __init ap325rxa_devices_setup(void)
&ap325rxa_sdram_leave_start,
&ap325rxa_sdram_leave_end);
+ regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
+ ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+ regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
/* LD3 and LD4 LEDs */
gpio_request(GPIO_PTX5, NULL); /* RUN */
gpio_direction_output(GPIO_PTX5, 1);
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 4158d70c0dea..64559e8af14b 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -19,6 +19,8 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/usb/r8a66597.h>
#include <linux/usb/renesas_usbhs.h>
#include <linux/i2c.h>
@@ -242,9 +244,17 @@ static int usbhs_get_id(struct platform_device *pdev)
return gpio_get_value(GPIO_PTB3);
}
+static void usbhs_phy_reset(struct platform_device *pdev)
+{
+ /* enable vbus if HOST */
+ if (!gpio_get_value(GPIO_PTB3))
+ gpio_set_value(GPIO_PTB5, 1);
+}
+
static struct renesas_usbhs_platform_info usbhs_info = {
.platform_callback = {
.get_id = usbhs_get_id,
+ .phy_reset = usbhs_phy_reset,
},
.driver_param = {
.buswait_bwait = 4,
@@ -518,10 +528,86 @@ static struct i2c_board_info ts_i2c_clients = {
.irq = IRQ0,
};
+static struct regulator_consumer_supply cn12_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"),
+};
+
+static struct regulator_init_data cn12_power_init_data = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(cn12_power_consumers),
+ .consumer_supplies = cn12_power_consumers,
+};
+
+static struct fixed_voltage_config cn12_power_info = {
+ .supply_name = "CN12 SD/MMC Vdd",
+ .microvolts = 3300000,
+ .gpio = GPIO_PTB7,
+ .enable_high = 1,
+ .init_data = &cn12_power_init_data,
+};
+
+static struct platform_device cn12_power = {
+ .name = "reg-fixed-voltage",
+ .id = 0,
+ .dev = {
+ .platform_data = &cn12_power_info,
+ },
+};
+
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
/* SDHI0 */
+static struct regulator_consumer_supply sdhi0_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+};
+
+static struct regulator_init_data sdhi0_power_init_data = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(sdhi0_power_consumers),
+ .consumer_supplies = sdhi0_power_consumers,
+};
+
+static struct fixed_voltage_config sdhi0_power_info = {
+ .supply_name = "CN11 SD/MMC Vdd",
+ .microvolts = 3300000,
+ .gpio = GPIO_PTB6,
+ .enable_high = 1,
+ .init_data = &sdhi0_power_init_data,
+};
+
+static struct platform_device sdhi0_power = {
+ .name = "reg-fixed-voltage",
+ .id = 1,
+ .dev = {
+ .platform_data = &sdhi0_power_info,
+ },
+};
+
static void sdhi0_set_pwr(struct platform_device *pdev, int state)
{
+ static int power_gpio = -EINVAL;
+
+ if (power_gpio < 0) {
+ int ret = gpio_request(GPIO_PTB6, NULL);
+ if (!ret) {
+ power_gpio = GPIO_PTB6;
+ gpio_direction_output(power_gpio, 0);
+ }
+ }
+
+ /*
+ * Toggle the GPIO regardless, whether we managed to grab it above or
+ * the fixed regulator driver did.
+ */
gpio_set_value(GPIO_PTB6, state);
}
@@ -562,13 +648,27 @@ static struct platform_device sdhi0_device = {
},
};
-#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
-/* SDHI1 */
-static void sdhi1_set_pwr(struct platform_device *pdev, int state)
+static void cn12_set_pwr(struct platform_device *pdev, int state)
{
+ static int power_gpio = -EINVAL;
+
+ if (power_gpio < 0) {
+ int ret = gpio_request(GPIO_PTB7, NULL);
+ if (!ret) {
+ power_gpio = GPIO_PTB7;
+ gpio_direction_output(power_gpio, 0);
+ }
+ }
+
+ /*
+ * Toggle the GPIO regardless, whether we managed to grab it above or
+ * the fixed regulator driver did.
+ */
gpio_set_value(GPIO_PTB7, state);
}
+#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
+/* SDHI1 */
static int sdhi1_get_cd(struct platform_device *pdev)
{
return !gpio_get_value(GPIO_PTW7);
@@ -579,7 +679,7 @@ static struct sh_mobile_sdhi_info sdhi1_info = {
.dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
.tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD |
MMC_CAP_NEEDS_POLL,
- .set_pwr = sdhi1_set_pwr,
+ .set_pwr = cn12_set_pwr,
.get_cd = sdhi1_get_cd,
};
@@ -899,14 +999,9 @@ static struct platform_device vou_device = {
#if defined(CONFIG_MMC_SH_MMCIF) || defined(CONFIG_MMC_SH_MMCIF_MODULE)
/* SH_MMCIF */
-static void mmcif_set_pwr(struct platform_device *pdev, int state)
-{
- gpio_set_value(GPIO_PTB7, state);
-}
-
static void mmcif_down_pwr(struct platform_device *pdev)
{
- gpio_set_value(GPIO_PTB7, 0);
+ cn12_set_pwr(pdev, 0);
}
static struct resource sh_mmcif_resources[] = {
@@ -929,7 +1024,7 @@ static struct resource sh_mmcif_resources[] = {
};
static struct sh_mmcif_plat_data sh_mmcif_plat = {
- .set_pwr = mmcif_set_pwr,
+ .set_pwr = cn12_set_pwr,
.down_pwr = mmcif_down_pwr,
.sup_pclk = 0, /* SH7724: Max Pclk/2 */
.caps = MMC_CAP_4_BIT_DATA |
@@ -960,7 +1055,9 @@ static struct platform_device *ecovec_devices[] __initdata = {
&ceu0_device,
&ceu1_device,
&keysc_device,
+ &cn12_power,
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
+ &sdhi0_power,
&sdhi0_device,
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
&sdhi1_device,
@@ -1258,8 +1355,6 @@ static int __init arch_setup(void)
gpio_request(GPIO_FN_SDHI0D2, NULL);
gpio_request(GPIO_FN_SDHI0D1, NULL);
gpio_request(GPIO_FN_SDHI0D0, NULL);
- gpio_request(GPIO_PTB6, NULL);
- gpio_direction_output(GPIO_PTB6, 0);
#else
/* enable MSIOF0 on CN11 (needs DS2.4 set to OFF) */
gpio_request(GPIO_FN_MSIOF0_TXD, NULL);
@@ -1288,8 +1383,6 @@ static int __init arch_setup(void)
gpio_request(GPIO_FN_MMC_D0, NULL);
gpio_request(GPIO_FN_MMC_CLK, NULL);
gpio_request(GPIO_FN_MMC_CMD, NULL);
- gpio_request(GPIO_PTB7, NULL);
- gpio_direction_output(GPIO_PTB7, 0);
cn12_enabled = true;
#elif defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
@@ -1301,8 +1394,6 @@ static int __init arch_setup(void)
gpio_request(GPIO_FN_SDHI1D2, NULL);
gpio_request(GPIO_FN_SDHI1D1, NULL);
gpio_request(GPIO_FN_SDHI1D0, NULL);
- gpio_request(GPIO_PTB7, NULL);
- gpio_direction_output(GPIO_PTB7, 0);
/* Card-detect, used on CN12 with SDHI1 */
gpio_request(GPIO_PTW7, NULL);
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index 43a179ce9afc..f2a4304fbe23 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -21,6 +21,8 @@
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
#include <linux/i2c.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/usb/r8a66597.h>
#include <linux/videodev2.h>
#include <linux/sh_intc.h>
@@ -341,6 +343,13 @@ static struct platform_device kfr2r09_camera = {
},
};
+/* Fixed 3.3V regulator to be used by SDHI0 */
+static struct regulator_consumer_supply fixed3v3_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+};
+
static struct resource kfr2r09_sh_sdhi0_resources[] = {
[0] = {
.name = "SDHI0",
@@ -523,6 +532,9 @@ static int __init kfr2r09_devices_setup(void)
&kfr2r09_sdram_leave_start,
&kfr2r09_sdram_leave_end);
+ regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
+ ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+
/* enable SCIF1 serial port for YC401 console support */
gpio_request(GPIO_FN_SCIF1_RXD, NULL);
gpio_request(GPIO_FN_SCIF1_TXD, NULL);
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index a8a1ca741c85..8b73194ed2ce 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -17,6 +17,8 @@
#include <linux/mtd/physmap.h>
#include <linux/mtd/nand.h>
#include <linux/i2c.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smc91x.h>
#include <linux/delay.h>
#include <linux/clk.h>
@@ -386,6 +388,13 @@ static struct platform_device migor_ceu_device = {
},
};
+/* Fixed 3.3V regulator to be used by SDHI0 */
+static struct regulator_consumer_supply fixed3v3_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+};
+
static struct resource sdhi_cn9_resources[] = {
[0] = {
.name = "SDHI",
@@ -498,6 +507,10 @@ static int __init migor_devices_setup(void)
&migor_sdram_enter_end,
&migor_sdram_leave_start,
&migor_sdram_leave_end);
+
+ regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
+ ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+
/* Let D11 LED show STATUS0 */
gpio_request(GPIO_FN_STATUS0, NULL);
diff --git a/arch/sh/boards/mach-rsk/setup.c b/arch/sh/boards/mach-rsk/setup.c
index 895f030070d3..2685ea03b064 100644
--- a/arch/sh/boards/mach-rsk/setup.c
+++ b/arch/sh/boards/mach-rsk/setup.c
@@ -16,9 +16,17 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/map.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <asm/machvec.h>
#include <asm/io.h>
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
static const char *part_probes[] = { "cmdlinepart", NULL };
static struct mtd_partition rsk_partitions[] = {
@@ -67,6 +75,8 @@ static struct platform_device *rsk_devices[] __initdata = {
static int __init rsk_devices_setup(void)
{
+ regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
return platform_add_devices(rsk_devices,
ARRAY_SIZE(rsk_devices));
}
diff --git a/arch/sh/boards/mach-sdk7786/setup.c b/arch/sh/boards/mach-sdk7786/setup.c
index 27a2314f50ac..c29268bfd34a 100644
--- a/arch/sh/boards/mach-sdk7786/setup.c
+++ b/arch/sh/boards/mach-sdk7786/setup.c
@@ -11,6 +11,8 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smsc911x.h>
#include <linux/i2c.h>
#include <linux/irq.h>
@@ -38,6 +40,12 @@ static struct platform_device heartbeat_device = {
.resource = &heartbeat_resource,
};
+/* Dummy supplies, where voltage doesn't matter */
+static struct regulator_consumer_supply dummy_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
static struct resource smsc911x_resources[] = {
[0] = {
.name = "smsc911x-memory",
@@ -236,6 +244,8 @@ static void __init sdk7786_setup(char **cmdline_p)
{
pr_info("Renesas Technology Europe SDK7786 support:\n");
+ regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+
sdk7786_fpga_init();
sdk7786_nmi_init();
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index ffbf5bc7366b..35f6efa3ac0e 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -18,6 +18,8 @@
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h>
#include <linux/delay.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <linux/smc91x.h>
#include <linux/gpio.h>
#include <linux/input.h>
@@ -454,6 +456,15 @@ static struct platform_device sh7724_usb1_gadget_device = {
.resource = sh7724_usb1_gadget_resources,
};
+/* Fixed 3.3V regulator to be used by SDHI0, SDHI1 */
+static struct regulator_consumer_supply fixed3v3_power_consumers[] =
+{
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
+ REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"),
+};
+
static struct resource sdhi0_cn7_resources[] = {
[0] = {
.name = "SDHI0",
@@ -684,6 +695,10 @@ static int __init devices_setup(void)
&ms7724se_sdram_enter_end,
&ms7724se_sdram_leave_start,
&ms7724se_sdram_leave_end);
+
+ regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
+ ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+
/* Reset Release */
fpga_out = __raw_readw(FPGA_OUT);
/* bit4: NTSC_PDN, bit5: NTSC_RESET */
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index e7583484cc07..95ae23fcfdd6 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -11,7 +11,7 @@ CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
-CONFIG_CGROUP_MEM_RES_CTLR=y
+CONFIG_CGROUP_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index 8a7dd7b59c5c..76a76a295d74 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -18,8 +18,8 @@ CONFIG_CPUSETS=y
# CONFIG_PROC_PID_CPUSET is not set
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
-CONFIG_CGROUP_MEM_RES_CTLR=y
-CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_CGROUP_MEMCG=y
+CONFIG_CGROUP_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_CGROUP=y
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index 72c3fad7383f..6bc30ab9fd18 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -11,7 +11,7 @@ CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
-CONFIG_CGROUP_MEM_RES_CTLR=y
+CONFIG_CGROUP_MEMCG=y
CONFIG_RELAY=y
CONFIG_NAMESPACES=y
CONFIG_UTS_NS=y
diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig
index 6bb413036892..cd6c519f8fad 100644
--- a/arch/sh/configs/shx3_defconfig
+++ b/arch/sh/configs/shx3_defconfig
@@ -13,7 +13,7 @@ CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
-CONFIG_CGROUP_MEM_RES_CTLR=y
+CONFIG_CGROUP_MEMCG=y
CONFIG_RELAY=y
CONFIG_NAMESPACES=y
CONFIG_UTS_NS=y
diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig
index 8bfa4d056d7a..d7f89be9f474 100644
--- a/arch/sh/configs/urquell_defconfig
+++ b/arch/sh/configs/urquell_defconfig
@@ -15,8 +15,8 @@ CONFIG_CPUSETS=y
# CONFIG_PROC_PID_CPUSET is not set
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
-CONFIG_CGROUP_MEM_RES_CTLR=y
-CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_CGROUP_MEMCG=y
+CONFIG_CGROUP_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
index 4c171f13b0e8..b22565623142 100644
--- a/arch/sh/drivers/dma/dma-sh.c
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -335,7 +335,7 @@ static int dmae_irq_init(void)
for (n = 0; n < NR_DMAE; n++) {
int i = request_irq(get_dma_error_irq(n), dma_err,
- IRQF_SHARED, dmae_name[n], NULL);
+ IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]);
if (unlikely(i < 0)) {
printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
return i;
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index 4a5350037c8f..1b6199740e98 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -6,7 +6,6 @@
extern long __nosave_begin, __nosave_end;
extern long __machvec_start, __machvec_end;
extern char __uncached_start, __uncached_end;
-extern char _ebss[];
extern char __start_eh_frame[], __stop_eh_frame[];
#endif /* __ASM_SH_SECTIONS_H */
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index e800a38c9f8d..7bc67076baac 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -6,7 +6,6 @@
# endif
# define __ARCH_WANT_SYS_RT_SIGSUSPEND
-# define __ARCH_WANT_IPC_PARSE_VERSION
# define __ARCH_WANT_OLD_READDIR
# define __ARCH_WANT_OLD_STAT
# define __ARCH_WANT_STAT64
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
index 48d14498e774..2a0ca8780f0d 100644
--- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h
+++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
@@ -183,18 +183,30 @@ enum {
GPIO_FN_DV_DATA1, GPIO_FN_DV_DATA0,
GPIO_FN_LCD_CLK, GPIO_FN_LCD_EXTCLK,
GPIO_FN_LCD_VSYNC, GPIO_FN_LCD_HSYNC, GPIO_FN_LCD_DE,
- GPIO_FN_LCD_DATA23, GPIO_FN_LCD_DATA22,
- GPIO_FN_LCD_DATA21, GPIO_FN_LCD_DATA20,
- GPIO_FN_LCD_DATA19, GPIO_FN_LCD_DATA18,
- GPIO_FN_LCD_DATA17, GPIO_FN_LCD_DATA16,
- GPIO_FN_LCD_DATA15, GPIO_FN_LCD_DATA14,
- GPIO_FN_LCD_DATA13, GPIO_FN_LCD_DATA12,
- GPIO_FN_LCD_DATA11, GPIO_FN_LCD_DATA10,
- GPIO_FN_LCD_DATA9, GPIO_FN_LCD_DATA8,
- GPIO_FN_LCD_DATA7, GPIO_FN_LCD_DATA6,
- GPIO_FN_LCD_DATA5, GPIO_FN_LCD_DATA4,
- GPIO_FN_LCD_DATA3, GPIO_FN_LCD_DATA2,
- GPIO_FN_LCD_DATA1, GPIO_FN_LCD_DATA0,
+ GPIO_FN_LCD_DATA23_PG23, GPIO_FN_LCD_DATA22_PG22,
+ GPIO_FN_LCD_DATA21_PG21, GPIO_FN_LCD_DATA20_PG20,
+ GPIO_FN_LCD_DATA19_PG19, GPIO_FN_LCD_DATA18_PG18,
+ GPIO_FN_LCD_DATA17_PG17, GPIO_FN_LCD_DATA16_PG16,
+ GPIO_FN_LCD_DATA15_PG15, GPIO_FN_LCD_DATA14_PG14,
+ GPIO_FN_LCD_DATA13_PG13, GPIO_FN_LCD_DATA12_PG12,
+ GPIO_FN_LCD_DATA11_PG11, GPIO_FN_LCD_DATA10_PG10,
+ GPIO_FN_LCD_DATA9_PG9, GPIO_FN_LCD_DATA8_PG8,
+ GPIO_FN_LCD_DATA7_PG7, GPIO_FN_LCD_DATA6_PG6,
+ GPIO_FN_LCD_DATA5_PG5, GPIO_FN_LCD_DATA4_PG4,
+ GPIO_FN_LCD_DATA3_PG3, GPIO_FN_LCD_DATA2_PG2,
+ GPIO_FN_LCD_DATA1_PG1, GPIO_FN_LCD_DATA0_PG0,
+ GPIO_FN_LCD_DATA23_PJ23, GPIO_FN_LCD_DATA22_PJ22,
+ GPIO_FN_LCD_DATA21_PJ21, GPIO_FN_LCD_DATA20_PJ20,
+ GPIO_FN_LCD_DATA19_PJ19, GPIO_FN_LCD_DATA18_PJ18,
+ GPIO_FN_LCD_DATA17_PJ17, GPIO_FN_LCD_DATA16_PJ16,
+ GPIO_FN_LCD_DATA15_PJ15, GPIO_FN_LCD_DATA14_PJ14,
+ GPIO_FN_LCD_DATA13_PJ13, GPIO_FN_LCD_DATA12_PJ12,
+ GPIO_FN_LCD_DATA11_PJ11, GPIO_FN_LCD_DATA10_PJ10,
+ GPIO_FN_LCD_DATA9_PJ9, GPIO_FN_LCD_DATA8_PJ8,
+ GPIO_FN_LCD_DATA7_PJ7, GPIO_FN_LCD_DATA6_PJ6,
+ GPIO_FN_LCD_DATA5_PJ5, GPIO_FN_LCD_DATA4_PJ4,
+ GPIO_FN_LCD_DATA3_PJ3, GPIO_FN_LCD_DATA2_PJ2,
+ GPIO_FN_LCD_DATA1_PJ1, GPIO_FN_LCD_DATA0_PJ0,
GPIO_FN_LCD_M_DISP,
};
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7757.h b/arch/sh/include/cpu-sh4/cpu/sh7757.h
index 41f9f8b9db73..5340f3bc1863 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7757.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7757.h
@@ -283,5 +283,7 @@ enum {
SHDMA_SLAVE_RIIC8_RX,
SHDMA_SLAVE_RIIC9_TX,
SHDMA_SLAVE_RIIC9_RX,
+ SHDMA_SLAVE_RSPI_TX,
+ SHDMA_SLAVE_RSPI_RX,
};
#endif /* __ASM_SH7757_H__ */
diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
index f25127c46eca..039e4587dd9b 100644
--- a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
+++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
@@ -758,12 +758,22 @@ enum {
DV_DATA3_MARK, DV_DATA2_MARK, DV_DATA1_MARK, DV_DATA0_MARK,
LCD_CLK_MARK, LCD_EXTCLK_MARK,
LCD_VSYNC_MARK, LCD_HSYNC_MARK, LCD_DE_MARK,
- LCD_DATA23_MARK, LCD_DATA22_MARK, LCD_DATA21_MARK, LCD_DATA20_MARK,
- LCD_DATA19_MARK, LCD_DATA18_MARK, LCD_DATA17_MARK, LCD_DATA16_MARK,
- LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK,
- LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK,
- LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK,
- LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK,
+ LCD_DATA23_PG23_MARK, LCD_DATA22_PG22_MARK, LCD_DATA21_PG21_MARK,
+ LCD_DATA20_PG20_MARK, LCD_DATA19_PG19_MARK, LCD_DATA18_PG18_MARK,
+ LCD_DATA17_PG17_MARK, LCD_DATA16_PG16_MARK, LCD_DATA15_PG15_MARK,
+ LCD_DATA14_PG14_MARK, LCD_DATA13_PG13_MARK, LCD_DATA12_PG12_MARK,
+ LCD_DATA11_PG11_MARK, LCD_DATA10_PG10_MARK, LCD_DATA9_PG9_MARK,
+ LCD_DATA8_PG8_MARK, LCD_DATA7_PG7_MARK, LCD_DATA6_PG6_MARK,
+ LCD_DATA5_PG5_MARK, LCD_DATA4_PG4_MARK, LCD_DATA3_PG3_MARK,
+ LCD_DATA2_PG2_MARK, LCD_DATA1_PG1_MARK, LCD_DATA0_PG0_MARK,
+ LCD_DATA23_PJ23_MARK, LCD_DATA22_PJ22_MARK, LCD_DATA21_PJ21_MARK,
+ LCD_DATA20_PJ20_MARK, LCD_DATA19_PJ19_MARK, LCD_DATA18_PJ18_MARK,
+ LCD_DATA17_PJ17_MARK, LCD_DATA16_PJ16_MARK, LCD_DATA15_PJ15_MARK,
+ LCD_DATA14_PJ14_MARK, LCD_DATA13_PJ13_MARK, LCD_DATA12_PJ12_MARK,
+ LCD_DATA11_PJ11_MARK, LCD_DATA10_PJ10_MARK, LCD_DATA9_PJ9_MARK,
+ LCD_DATA8_PJ8_MARK, LCD_DATA7_PJ7_MARK, LCD_DATA6_PJ6_MARK,
+ LCD_DATA5_PJ5_MARK, LCD_DATA4_PJ4_MARK, LCD_DATA3_PJ3_MARK,
+ LCD_DATA2_PJ2_MARK, LCD_DATA1_PJ1_MARK, LCD_DATA0_PJ0_MARK,
LCD_TCON6_MARK, LCD_TCON5_MARK, LCD_TCON4_MARK,
LCD_TCON3_MARK, LCD_TCON2_MARK, LCD_TCON1_MARK, LCD_TCON0_MARK,
LCD_M_DISP_MARK,
@@ -1036,6 +1046,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PF1_DATA, PF1MD_000),
PINMUX_DATA(BACK_MARK, PF1MD_001),
+ PINMUX_DATA(SSL10_MARK, PF1MD_011),
PINMUX_DATA(TIOC4B_MARK, PF1MD_100),
PINMUX_DATA(DACK0_MARK, PF1MD_101),
@@ -1049,47 +1060,50 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PG27_DATA, PG27MD_00),
PINMUX_DATA(LCD_TCON2_MARK, PG27MD_10),
PINMUX_DATA(LCD_EXTCLK_MARK, PG27MD_11),
+ PINMUX_DATA(LCD_DE_MARK, PG27MD_11),
PINMUX_DATA(PG26_DATA, PG26MD_00),
PINMUX_DATA(LCD_TCON1_MARK, PG26MD_10),
+ PINMUX_DATA(LCD_HSYNC_MARK, PG26MD_10),
PINMUX_DATA(PG25_DATA, PG25MD_00),
PINMUX_DATA(LCD_TCON0_MARK, PG25MD_10),
+ PINMUX_DATA(LCD_VSYNC_MARK, PG25MD_10),
PINMUX_DATA(PG24_DATA, PG24MD_00),
PINMUX_DATA(LCD_CLK_MARK, PG24MD_10),
PINMUX_DATA(PG23_DATA, PG23MD_000),
- PINMUX_DATA(LCD_DATA23_MARK, PG23MD_010),
+ PINMUX_DATA(LCD_DATA23_PG23_MARK, PG23MD_010),
PINMUX_DATA(LCD_TCON6_MARK, PG23MD_011),
PINMUX_DATA(TXD5_MARK, PG23MD_100),
PINMUX_DATA(PG22_DATA, PG22MD_000),
- PINMUX_DATA(LCD_DATA22_MARK, PG22MD_010),
+ PINMUX_DATA(LCD_DATA22_PG22_MARK, PG22MD_010),
PINMUX_DATA(LCD_TCON5_MARK, PG22MD_011),
PINMUX_DATA(RXD5_MARK, PG22MD_100),
PINMUX_DATA(PG21_DATA, PG21MD_000),
PINMUX_DATA(DV_DATA7_MARK, PG21MD_001),
- PINMUX_DATA(LCD_DATA21_MARK, PG21MD_010),
+ PINMUX_DATA(LCD_DATA21_PG21_MARK, PG21MD_010),
PINMUX_DATA(LCD_TCON4_MARK, PG21MD_011),
PINMUX_DATA(TXD4_MARK, PG21MD_100),
PINMUX_DATA(PG20_DATA, PG20MD_000),
PINMUX_DATA(DV_DATA6_MARK, PG20MD_001),
- PINMUX_DATA(LCD_DATA20_MARK, PG21MD_010),
+ PINMUX_DATA(LCD_DATA20_PG20_MARK, PG21MD_010),
PINMUX_DATA(LCD_TCON3_MARK, PG20MD_011),
PINMUX_DATA(RXD4_MARK, PG20MD_100),
PINMUX_DATA(PG19_DATA, PG19MD_000),
PINMUX_DATA(DV_DATA5_MARK, PG19MD_001),
- PINMUX_DATA(LCD_DATA19_MARK, PG19MD_010),
+ PINMUX_DATA(LCD_DATA19_PG19_MARK, PG19MD_010),
PINMUX_DATA(SPDIF_OUT_MARK, PG19MD_011),
PINMUX_DATA(SCK5_MARK, PG19MD_100),
PINMUX_DATA(PG18_DATA, PG18MD_000),
PINMUX_DATA(DV_DATA4_MARK, PG18MD_001),
- PINMUX_DATA(LCD_DATA18_MARK, PG18MD_010),
+ PINMUX_DATA(LCD_DATA18_PG18_MARK, PG18MD_010),
PINMUX_DATA(SPDIF_IN_MARK, PG18MD_011),
PINMUX_DATA(SCK4_MARK, PG18MD_100),
@@ -1097,103 +1111,103 @@ static pinmux_enum_t pinmux_data[] = {
// we're going with 2 bits
PINMUX_DATA(PG17_DATA, PG17MD_00),
PINMUX_DATA(WE3ICIOWRAHDQMUU_MARK, PG17MD_01),
- PINMUX_DATA(LCD_DATA17_MARK, PG17MD_10),
+ PINMUX_DATA(LCD_DATA17_PG17_MARK, PG17MD_10),
// TODO hardware manual has PG16 3 bits wide in reg picture and 2 bits in description
// we're going with 2 bits
PINMUX_DATA(PG16_DATA, PG16MD_00),
PINMUX_DATA(WE2ICIORDDQMUL_MARK, PG16MD_01),
- PINMUX_DATA(LCD_DATA16_MARK, PG16MD_10),
+ PINMUX_DATA(LCD_DATA16_PG16_MARK, PG16MD_10),
PINMUX_DATA(PG15_DATA, PG15MD_00),
PINMUX_DATA(D31_MARK, PG15MD_01),
- PINMUX_DATA(LCD_DATA15_MARK, PG15MD_10),
+ PINMUX_DATA(LCD_DATA15_PG15_MARK, PG15MD_10),
PINMUX_DATA(PINT7_PG_MARK, PG15MD_11),
PINMUX_DATA(PG14_DATA, PG14MD_00),
PINMUX_DATA(D30_MARK, PG14MD_01),
- PINMUX_DATA(LCD_DATA14_MARK, PG14MD_10),
+ PINMUX_DATA(LCD_DATA14_PG14_MARK, PG14MD_10),
PINMUX_DATA(PINT6_PG_MARK, PG14MD_11),
PINMUX_DATA(PG13_DATA, PG13MD_00),
PINMUX_DATA(D29_MARK, PG13MD_01),
- PINMUX_DATA(LCD_DATA13_MARK, PG13MD_10),
+ PINMUX_DATA(LCD_DATA13_PG13_MARK, PG13MD_10),
PINMUX_DATA(PINT5_PG_MARK, PG13MD_11),
PINMUX_DATA(PG12_DATA, PG12MD_00),
PINMUX_DATA(D28_MARK, PG12MD_01),
- PINMUX_DATA(LCD_DATA12_MARK, PG12MD_10),
+ PINMUX_DATA(LCD_DATA12_PG12_MARK, PG12MD_10),
PINMUX_DATA(PINT4_PG_MARK, PG12MD_11),
PINMUX_DATA(PG11_DATA, PG11MD_000),
PINMUX_DATA(D27_MARK, PG11MD_001),
- PINMUX_DATA(LCD_DATA11_MARK, PG11MD_010),
+ PINMUX_DATA(LCD_DATA11_PG11_MARK, PG11MD_010),
PINMUX_DATA(PINT3_PG_MARK, PG11MD_011),
PINMUX_DATA(TIOC3D_MARK, PG11MD_100),
PINMUX_DATA(PG10_DATA, PG10MD_000),
PINMUX_DATA(D26_MARK, PG10MD_001),
- PINMUX_DATA(LCD_DATA10_MARK, PG10MD_010),
+ PINMUX_DATA(LCD_DATA10_PG10_MARK, PG10MD_010),
PINMUX_DATA(PINT2_PG_MARK, PG10MD_011),
PINMUX_DATA(TIOC3C_MARK, PG10MD_100),
PINMUX_DATA(PG9_DATA, PG9MD_000),
PINMUX_DATA(D25_MARK, PG9MD_001),
- PINMUX_DATA(LCD_DATA9_MARK, PG9MD_010),
+ PINMUX_DATA(LCD_DATA9_PG9_MARK, PG9MD_010),
PINMUX_DATA(PINT1_PG_MARK, PG9MD_011),
PINMUX_DATA(TIOC3B_MARK, PG9MD_100),
PINMUX_DATA(PG8_DATA, PG8MD_000),
PINMUX_DATA(D24_MARK, PG8MD_001),
- PINMUX_DATA(LCD_DATA8_MARK, PG8MD_010),
+ PINMUX_DATA(LCD_DATA8_PG8_MARK, PG8MD_010),
PINMUX_DATA(PINT0_PG_MARK, PG8MD_011),
PINMUX_DATA(TIOC3A_MARK, PG8MD_100),
PINMUX_DATA(PG7_DATA, PG7MD_000),
PINMUX_DATA(D23_MARK, PG7MD_001),
- PINMUX_DATA(LCD_DATA7_MARK, PG7MD_010),
+ PINMUX_DATA(LCD_DATA7_PG7_MARK, PG7MD_010),
PINMUX_DATA(IRQ7_PG_MARK, PG7MD_011),
PINMUX_DATA(TIOC2B_MARK, PG7MD_100),
PINMUX_DATA(PG6_DATA, PG6MD_000),
PINMUX_DATA(D22_MARK, PG6MD_001),
- PINMUX_DATA(LCD_DATA6_MARK, PG6MD_010),
+ PINMUX_DATA(LCD_DATA6_PG6_MARK, PG6MD_010),
PINMUX_DATA(IRQ6_PG_MARK, PG6MD_011),
PINMUX_DATA(TIOC2A_MARK, PG6MD_100),
PINMUX_DATA(PG5_DATA, PG5MD_000),
PINMUX_DATA(D21_MARK, PG5MD_001),
- PINMUX_DATA(LCD_DATA5_MARK, PG5MD_010),
+ PINMUX_DATA(LCD_DATA5_PG5_MARK, PG5MD_010),
PINMUX_DATA(IRQ5_PG_MARK, PG5MD_011),
PINMUX_DATA(TIOC1B_MARK, PG5MD_100),
PINMUX_DATA(PG4_DATA, PG4MD_000),
PINMUX_DATA(D20_MARK, PG4MD_001),
- PINMUX_DATA(LCD_DATA4_MARK, PG4MD_010),
+ PINMUX_DATA(LCD_DATA4_PG4_MARK, PG4MD_010),
PINMUX_DATA(IRQ4_PG_MARK, PG4MD_011),
PINMUX_DATA(TIOC1A_MARK, PG4MD_100),
PINMUX_DATA(PG3_DATA, PG3MD_000),
PINMUX_DATA(D19_MARK, PG3MD_001),
- PINMUX_DATA(LCD_DATA3_MARK, PG3MD_010),
+ PINMUX_DATA(LCD_DATA3_PG3_MARK, PG3MD_010),
PINMUX_DATA(IRQ3_PG_MARK, PG3MD_011),
PINMUX_DATA(TIOC0D_MARK, PG3MD_100),
PINMUX_DATA(PG2_DATA, PG2MD_000),
PINMUX_DATA(D18_MARK, PG2MD_001),
- PINMUX_DATA(LCD_DATA2_MARK, PG2MD_010),
+ PINMUX_DATA(LCD_DATA2_PG2_MARK, PG2MD_010),
PINMUX_DATA(IRQ2_PG_MARK, PG2MD_011),
PINMUX_DATA(TIOC0C_MARK, PG2MD_100),
PINMUX_DATA(PG1_DATA, PG1MD_000),
PINMUX_DATA(D17_MARK, PG1MD_001),
- PINMUX_DATA(LCD_DATA1_MARK, PG1MD_010),
+ PINMUX_DATA(LCD_DATA1_PG1_MARK, PG1MD_010),
PINMUX_DATA(IRQ1_PG_MARK, PG1MD_011),
PINMUX_DATA(TIOC0B_MARK, PG1MD_100),
PINMUX_DATA(PG0_DATA, PG0MD_000),
PINMUX_DATA(D16_MARK, PG0MD_001),
- PINMUX_DATA(LCD_DATA0_MARK, PG0MD_010),
+ PINMUX_DATA(LCD_DATA0_PG0_MARK, PG0MD_010),
PINMUX_DATA(IRQ0_PG_MARK, PG0MD_011),
PINMUX_DATA(TIOC0A_MARK, PG0MD_100),
@@ -1275,14 +1289,14 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PJ23_DATA, PJ23MD_000),
PINMUX_DATA(DV_DATA23_MARK, PJ23MD_001),
- PINMUX_DATA(LCD_DATA23_MARK, PJ23MD_010),
+ PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
PINMUX_DATA(CTX1_MARK, PJ23MD_101),
PINMUX_DATA(PJ22_DATA, PJ22MD_000),
PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
- PINMUX_DATA(LCD_DATA22_MARK, PJ22MD_010),
+ PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
PINMUX_DATA(CRX1_MARK, PJ22MD_101),
@@ -1290,14 +1304,14 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PJ21_DATA, PJ21MD_000),
PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
- PINMUX_DATA(LCD_DATA21_MARK, PJ21MD_010),
+ PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
PINMUX_DATA(CTX2_MARK, PJ21MD_101),
PINMUX_DATA(PJ20_DATA, PJ20MD_000),
PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
- PINMUX_DATA(LCD_DATA20_MARK, PJ20MD_010),
+ PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
PINMUX_DATA(CRX2_MARK, PJ20MD_101),
@@ -1305,7 +1319,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PJ19_DATA, PJ19MD_000),
PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
- PINMUX_DATA(LCD_DATA19_MARK, PJ19MD_010),
+ PINMUX_DATA(LCD_DATA19_PJ19_MARK, PJ19MD_010),
PINMUX_DATA(MISO0_PJ19_MARK, PJ19MD_011),
PINMUX_DATA(TIOC0D_MARK, PJ19MD_100),
PINMUX_DATA(SIOFRXD_MARK, PJ19MD_101),
@@ -1313,126 +1327,126 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PJ18_DATA, PJ18MD_000),
PINMUX_DATA(DV_DATA18_MARK, PJ18MD_001),
- PINMUX_DATA(LCD_DATA18_MARK, PJ18MD_010),
+ PINMUX_DATA(LCD_DATA18_PJ18_MARK, PJ18MD_010),
PINMUX_DATA(MOSI0_PJ18_MARK, PJ18MD_011),
PINMUX_DATA(TIOC0C_MARK, PJ18MD_100),
PINMUX_DATA(SIOFTXD_MARK, PJ18MD_101),
PINMUX_DATA(PJ17_DATA, PJ17MD_000),
PINMUX_DATA(DV_DATA17_MARK, PJ17MD_001),
- PINMUX_DATA(LCD_DATA17_MARK, PJ17MD_010),
+ PINMUX_DATA(LCD_DATA17_PJ17_MARK, PJ17MD_010),
PINMUX_DATA(SSL00_PJ17_MARK, PJ17MD_011),
PINMUX_DATA(TIOC0B_MARK, PJ17MD_100),
PINMUX_DATA(SIOFSYNC_MARK, PJ17MD_101),
PINMUX_DATA(PJ16_DATA, PJ16MD_000),
PINMUX_DATA(DV_DATA16_MARK, PJ16MD_001),
- PINMUX_DATA(LCD_DATA16_MARK, PJ16MD_010),
+ PINMUX_DATA(LCD_DATA16_PJ16_MARK, PJ16MD_010),
PINMUX_DATA(RSPCK0_PJ16_MARK, PJ16MD_011),
PINMUX_DATA(TIOC0A_MARK, PJ16MD_100),
PINMUX_DATA(SIOFSCK_MARK, PJ16MD_101),
PINMUX_DATA(PJ15_DATA, PJ15MD_000),
PINMUX_DATA(DV_DATA15_MARK, PJ15MD_001),
- PINMUX_DATA(LCD_DATA15_MARK, PJ15MD_010),
+ PINMUX_DATA(LCD_DATA15_PJ15_MARK, PJ15MD_010),
PINMUX_DATA(PINT7_PJ_MARK, PJ15MD_011),
PINMUX_DATA(PWM2H_MARK, PJ15MD_100),
PINMUX_DATA(TXD7_MARK, PJ15MD_101),
PINMUX_DATA(PJ14_DATA, PJ14MD_000),
PINMUX_DATA(DV_DATA14_MARK, PJ14MD_001),
- PINMUX_DATA(LCD_DATA14_MARK, PJ14MD_010),
+ PINMUX_DATA(LCD_DATA14_PJ14_MARK, PJ14MD_010),
PINMUX_DATA(PINT6_PJ_MARK, PJ14MD_011),
PINMUX_DATA(PWM2G_MARK, PJ14MD_100),
PINMUX_DATA(TXD6_MARK, PJ14MD_101),
PINMUX_DATA(PJ13_DATA, PJ13MD_000),
PINMUX_DATA(DV_DATA13_MARK, PJ13MD_001),
- PINMUX_DATA(LCD_DATA13_MARK, PJ13MD_010),
+ PINMUX_DATA(LCD_DATA13_PJ13_MARK, PJ13MD_010),
PINMUX_DATA(PINT5_PJ_MARK, PJ13MD_011),
PINMUX_DATA(PWM2F_MARK, PJ13MD_100),
PINMUX_DATA(TXD5_MARK, PJ13MD_101),
PINMUX_DATA(PJ12_DATA, PJ12MD_000),
PINMUX_DATA(DV_DATA12_MARK, PJ12MD_001),
- PINMUX_DATA(LCD_DATA12_MARK, PJ12MD_010),
+ PINMUX_DATA(LCD_DATA12_PJ12_MARK, PJ12MD_010),
PINMUX_DATA(PINT4_PJ_MARK, PJ12MD_011),
PINMUX_DATA(PWM2E_MARK, PJ12MD_100),
PINMUX_DATA(SCK7_MARK, PJ12MD_101),
PINMUX_DATA(PJ11_DATA, PJ11MD_000),
PINMUX_DATA(DV_DATA11_MARK, PJ11MD_001),
- PINMUX_DATA(LCD_DATA11_MARK, PJ11MD_010),
+ PINMUX_DATA(LCD_DATA11_PJ11_MARK, PJ11MD_010),
PINMUX_DATA(PINT3_PJ_MARK, PJ11MD_011),
PINMUX_DATA(PWM2D_MARK, PJ11MD_100),
PINMUX_DATA(SCK6_MARK, PJ11MD_101),
PINMUX_DATA(PJ10_DATA, PJ10MD_000),
PINMUX_DATA(DV_DATA10_MARK, PJ10MD_001),
- PINMUX_DATA(LCD_DATA10_MARK, PJ10MD_010),
+ PINMUX_DATA(LCD_DATA10_PJ10_MARK, PJ10MD_010),
PINMUX_DATA(PINT2_PJ_MARK, PJ10MD_011),
PINMUX_DATA(PWM2C_MARK, PJ10MD_100),
PINMUX_DATA(SCK5_MARK, PJ10MD_101),
PINMUX_DATA(PJ9_DATA, PJ9MD_000),
PINMUX_DATA(DV_DATA9_MARK, PJ9MD_001),
- PINMUX_DATA(LCD_DATA9_MARK, PJ9MD_010),
+ PINMUX_DATA(LCD_DATA9_PJ9_MARK, PJ9MD_010),
PINMUX_DATA(PINT1_PJ_MARK, PJ9MD_011),
PINMUX_DATA(PWM2B_MARK, PJ9MD_100),
PINMUX_DATA(RTS5_MARK, PJ9MD_101),
PINMUX_DATA(PJ8_DATA, PJ8MD_000),
PINMUX_DATA(DV_DATA8_MARK, PJ8MD_001),
- PINMUX_DATA(LCD_DATA8_MARK, PJ8MD_010),
+ PINMUX_DATA(LCD_DATA8_PJ8_MARK, PJ8MD_010),
PINMUX_DATA(PINT0_PJ_MARK, PJ8MD_011),
PINMUX_DATA(PWM2A_MARK, PJ8MD_100),
PINMUX_DATA(CTS5_MARK, PJ8MD_101),
PINMUX_DATA(PJ7_DATA, PJ7MD_000),
PINMUX_DATA(DV_DATA7_MARK, PJ7MD_001),
- PINMUX_DATA(LCD_DATA7_MARK, PJ7MD_010),
+ PINMUX_DATA(LCD_DATA7_PJ7_MARK, PJ7MD_010),
PINMUX_DATA(SD_D2_MARK, PJ7MD_011),
PINMUX_DATA(PWM1H_MARK, PJ7MD_100),
PINMUX_DATA(PJ6_DATA, PJ6MD_000),
PINMUX_DATA(DV_DATA6_MARK, PJ6MD_001),
- PINMUX_DATA(LCD_DATA6_MARK, PJ6MD_010),
+ PINMUX_DATA(LCD_DATA6_PJ6_MARK, PJ6MD_010),
PINMUX_DATA(SD_D3_MARK, PJ6MD_011),
PINMUX_DATA(PWM1G_MARK, PJ6MD_100),
PINMUX_DATA(PJ5_DATA, PJ5MD_000),
PINMUX_DATA(DV_DATA5_MARK, PJ5MD_001),
- PINMUX_DATA(LCD_DATA5_MARK, PJ5MD_010),
+ PINMUX_DATA(LCD_DATA5_PJ5_MARK, PJ5MD_010),
PINMUX_DATA(SD_CMD_MARK, PJ5MD_011),
PINMUX_DATA(PWM1F_MARK, PJ5MD_100),
PINMUX_DATA(PJ4_DATA, PJ4MD_000),
PINMUX_DATA(DV_DATA4_MARK, PJ4MD_001),
- PINMUX_DATA(LCD_DATA4_MARK, PJ4MD_010),
+ PINMUX_DATA(LCD_DATA4_PJ4_MARK, PJ4MD_010),
PINMUX_DATA(SD_CLK_MARK, PJ4MD_011),
PINMUX_DATA(PWM1E_MARK, PJ4MD_100),
PINMUX_DATA(PJ3_DATA, PJ3MD_000),
PINMUX_DATA(DV_DATA3_MARK, PJ3MD_001),
- PINMUX_DATA(LCD_DATA3_MARK, PJ3MD_010),
+ PINMUX_DATA(LCD_DATA3_PJ3_MARK, PJ3MD_010),
PINMUX_DATA(SD_D0_MARK, PJ3MD_011),
PINMUX_DATA(PWM1D_MARK, PJ3MD_100),
PINMUX_DATA(PJ2_DATA, PJ2MD_000),
PINMUX_DATA(DV_DATA2_MARK, PJ2MD_001),
- PINMUX_DATA(LCD_DATA2_MARK, PJ2MD_010),
+ PINMUX_DATA(LCD_DATA2_PJ2_MARK, PJ2MD_010),
PINMUX_DATA(SD_D1_MARK, PJ2MD_011),
PINMUX_DATA(PWM1C_MARK, PJ2MD_100),
PINMUX_DATA(PJ1_DATA, PJ1MD_000),
PINMUX_DATA(DV_DATA1_MARK, PJ1MD_001),
- PINMUX_DATA(LCD_DATA1_MARK, PJ1MD_010),
+ PINMUX_DATA(LCD_DATA1_PJ1_MARK, PJ1MD_010),
PINMUX_DATA(SD_WP_MARK, PJ1MD_011),
PINMUX_DATA(PWM1B_MARK, PJ1MD_100),
PINMUX_DATA(PJ0_DATA, PJ0MD_000),
PINMUX_DATA(DV_DATA0_MARK, PJ0MD_001),
- PINMUX_DATA(LCD_DATA0_MARK, PJ0MD_010),
+ PINMUX_DATA(LCD_DATA0_PJ0_MARK, PJ0MD_010),
PINMUX_DATA(SD_CD_MARK, PJ0MD_011),
PINMUX_DATA(PWM1A_MARK, PJ0MD_100),
};
@@ -1877,30 +1891,55 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK),
PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA23, LCD_DATA23_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA22, LCD_DATA22_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA21, LCD_DATA21_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA20, LCD_DATA20_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA19, LCD_DATA19_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA18, LCD_DATA18_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA17, LCD_DATA17_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA16, LCD_DATA16_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA23_PG23, LCD_DATA23_PG23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA22_PG22, LCD_DATA22_PG22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA21_PG21, LCD_DATA21_PG21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA20_PG20, LCD_DATA20_PG20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA19_PG19, LCD_DATA19_PG19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA18_PG18, LCD_DATA18_PG18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA17_PG17, LCD_DATA17_PG17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA16_PG16, LCD_DATA16_PG16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15_PG15, LCD_DATA15_PG15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14_PG14, LCD_DATA14_PG14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13_PG13, LCD_DATA13_PG13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12_PG12, LCD_DATA12_PG12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11_PG11, LCD_DATA11_PG11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10_PG10, LCD_DATA10_PG10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9_PG9, LCD_DATA9_PG9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8_PG8, LCD_DATA8_PG8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7_PG7, LCD_DATA7_PG7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6_PG6, LCD_DATA6_PG6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5_PG5, LCD_DATA5_PG5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4_PG4, LCD_DATA4_PG4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3_PG3, LCD_DATA3_PG3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2_PG2, LCD_DATA2_PG2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1_PG1, LCD_DATA1_PG1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0_PG0, LCD_DATA0_PG0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_DATA23_PJ23, LCD_DATA23_PJ23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA22_PJ22, LCD_DATA22_PJ22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA21_PJ21, LCD_DATA21_PJ21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA20_PJ20, LCD_DATA20_PJ20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA19_PJ19, LCD_DATA19_PJ19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA18_PJ18, LCD_DATA18_PJ18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA17_PJ17, LCD_DATA17_PJ17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA16_PJ16, LCD_DATA16_PJ16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15_PJ15, LCD_DATA15_PJ15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14_PJ14, LCD_DATA14_PJ14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13_PJ13, LCD_DATA13_PJ13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12_PJ12, LCD_DATA12_PJ12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11_PJ11, LCD_DATA11_PJ11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10_PJ10, LCD_DATA10_PJ10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9_PJ9, LCD_DATA9_PJ9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8_PJ8, LCD_DATA8_PJ8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7_PJ7, LCD_DATA7_PJ7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6_PJ6, LCD_DATA6_PJ6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5_PJ5, LCD_DATA5_PJ5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4_PJ4, LCD_DATA4_PJ4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3_PJ3, LCD_DATA3_PJ3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2_PJ2, LCD_DATA2_PJ2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1_PJ1, LCD_DATA1_PJ1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0_PJ0, LCD_DATA0_PJ0_MARK),
PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
};
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index c87e78f73234..5f30f805d2f2 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -334,8 +334,8 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]),
CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]),
CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]),
- CLKDEV_CON_ID("usb1", &mstp_clks[HWBLK_USB1]),
- CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB0]),
+ CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[HWBLK_USB1]),
+ CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[HWBLK_USB0]),
CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]),
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]),
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]),
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index 65786c7f5ded..6a868b091c2d 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/sh_dma.h>
#include <linux/sh_timer.h>
#include <linux/sh_intc.h>
#include <linux/uio_driver.h>
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index a7708425afa9..4a2f357f4df8 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -216,6 +216,20 @@ static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = {
TS_INDEX2VAL(XMIT_SZ_8BIT),
.mid_rid = 0x42,
},
+ {
+ .slave_id = SHDMA_SLAVE_RSPI_TX,
+ .addr = 0xfe480004,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc1,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RSPI_RX,
+ .addr = 0xfe480004,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc2,
+ },
};
static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 7b57bf1dc855..ebe7a7d97215 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -273,7 +273,7 @@ void __init setup_arch(char **cmdline_p)
data_resource.start = virt_to_phys(_etext);
data_resource.end = virt_to_phys(_edata)-1;
bss_resource.start = virt_to_phys(__bss_start);
- bss_resource.end = virt_to_phys(_ebss)-1;
+ bss_resource.end = virt_to_phys(__bss_stop)-1;
#ifdef CONFIG_CMDLINE_OVERWRITE
strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 3896f26efa4a..2a0a596ebf67 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -19,7 +19,6 @@ EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(_ebss);
EXPORT_SYMBOL(empty_zero_page);
#define DECLARE_EXPORT(name) \
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index c98905f71e28..db88cbf9eafd 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -78,7 +78,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
BSS_SECTION(0, PAGE_SIZE, 4)
- _ebss = .; /* uClinux MTD sucks */
_end = . ;
STABS_DEBUG
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S
index 84a57761f17e..60164e65d665 100644
--- a/arch/sh/lib/mcount.S
+++ b/arch/sh/lib/mcount.S
@@ -39,7 +39,7 @@
*
* Make sure the stack pointer contains a valid address. Valid
* addresses for kernel stacks are anywhere after the bss
- * (after _ebss) and anywhere in init_thread_union (init_stack).
+ * (after __bss_stop) and anywhere in init_thread_union (init_stack).
*/
#define STACK_CHECK() \
mov #(THREAD_SIZE >> 10), r0; \
@@ -60,7 +60,7 @@
cmp/hi r2, r1; \
bf stack_panic; \
\
- /* If sp > _ebss then we're OK. */ \
+ /* If sp > __bss_stop then we're OK. */ \
mov.l .L_ebss, r1; \
cmp/hi r1, r15; \
bt 1f; \
@@ -70,7 +70,7 @@
cmp/hs r1, r15; \
bf stack_panic; \
\
- /* If sp > init_stack && sp < _ebss, not OK. */ \
+ /* If sp > init_stack && sp < __bss_stop, not OK. */ \
add r0, r1; \
cmp/hs r1, r15; \
bt stack_panic; \
@@ -292,8 +292,6 @@ stack_panic:
nop
.align 2
-.L_ebss:
- .long _ebss
.L_init_thread_union:
.long init_thread_union
.Lpanic:
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 1fc25d85e515..3bdc1ad9a341 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -58,11 +58,15 @@ static void show_pte(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
- if (mm)
+ if (mm) {
pgd = mm->pgd;
- else
+ } else {
pgd = get_TTB();
+ if (unlikely(!pgd))
+ pgd = swapper_pg_dir;
+ }
+
printk(KERN_ALERT "pgd = %p\n", pgd);
pgd += pgd_index(addr);
printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index e74ff1377626..67f1f6f5f4e1 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -27,6 +27,7 @@ config SPARC
select HAVE_ARCH_JUMP_LABEL
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
+ select ARCH_WANT_IPC_PARSE_VERSION
select USE_GENERIC_SMP_HELPERS if SMP
select GENERIC_PCI_IOMAP
select HAVE_NMI_WATCHDOG if SPARC64
diff --git a/arch/sparc/include/asm/fixmap.h b/arch/sparc/include/asm/fixmap.h
deleted file mode 100644
index f18fc0755adf..000000000000
--- a/arch/sparc/include/asm/fixmap.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * fixmap.h: compile-time virtual memory allocation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ingo Molnar
- *
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- */
-
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
-
-#include <linux/kernel.h>
-#include <asm/page.h>
-#ifdef CONFIG_HIGHMEM
-#include <linux/threads.h>
-#include <asm/kmap_types.h>
-#endif
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the top of unused virtual memory (0xfd000000 - 1 page) backwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * highger than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-
-/*
- * on UP currently we will have no trace of the fixmap mechanism,
- * no page table allocations, etc. This might change in the
- * future, say framebuffers for the console driver(s) could be
- * fix-mapped?
- */
-enum fixed_addresses {
- FIX_HOLE,
-#ifdef CONFIG_HIGHMEM
- FIX_KMAP_BEGIN,
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-#endif
- __end_of_fixed_addresses
-};
-
-extern void __set_fixmap (enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags);
-
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-/*
- * used by vmalloc.c.
- *
- * Leave one empty page between IO pages at 0xfd000000 and
- * the start of the fixmap.
- */
-#define FIXADDR_TOP (0xfcfff000UL)
-#define FIXADDR_SIZE ((__end_of_fixed_addresses) << PAGE_SHIFT)
-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-
-extern void __this_fixmap_does_not_exist(void);
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without tranlation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /*
- * this branch gets completely eliminated after inlining,
- * except when someone tries to use fixaddr indices in an
- * illegal way. (such as mixing up address types or using
- * out-of-range indices).
- *
- * If it doesn't get removed, the linker will complain
- * loudly with a reasonably clear error message..
- */
- if (idx >= __end_of_fixed_addresses)
- __this_fixmap_does_not_exist();
-
- return __fix_to_virt(idx);
-}
-
-static inline unsigned long virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
-
-#endif
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index 3b6e00dd96e5..4f9e15c757e2 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -21,7 +21,6 @@
#ifdef __KERNEL__
#include <linux/interrupt.h>
-#include <asm/fixmap.h>
#include <asm/vaddrs.h>
#include <asm/kmap_types.h>
#include <asm/pgtable.h>
@@ -29,7 +28,6 @@
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
-extern pte_t *kmap_pte;
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
@@ -72,7 +70,6 @@ static inline void kunmap(struct page *page)
extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
-extern struct page *kmap_atomic_to_page(void *vaddr);
#define flush_cache_kmaps() flush_cache_all()
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 3375c6293893..15a716934e4d 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -82,7 +82,6 @@ static inline unsigned long leon_load_reg(unsigned long paddr)
#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x))
#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v))
-extern void leon_init(void);
extern void leon_switch_mm(void);
extern void leon_init_IRQ(void);
diff --git a/arch/sparc/include/asm/mmu_context_32.h b/arch/sparc/include/asm/mmu_context_32.h
index 01456c900720..2df2a9be8f6d 100644
--- a/arch/sparc/include/asm/mmu_context_32.h
+++ b/arch/sparc/include/asm/mmu_context_32.h
@@ -9,14 +9,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-/*
- * Initialize a new mmu context. This is invoked when a new
+/* Initialize a new mmu context. This is invoked when a new
* address space instance (unique or shared) is instantiated.
*/
-#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-/*
- * Destroy a dead context. This occurs when mmput drops the
+/* Destroy a dead context. This occurs when mmput drops the
* mm_users count to zero, the mmaps have been released, and
* all the page tables have been flushed. Our job is to destroy
* any remaining processor-specific state.
diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
index fab78a308ebf..f82a1f36b655 100644
--- a/arch/sparc/include/asm/page_32.h
+++ b/arch/sparc/include/asm/page_32.h
@@ -107,8 +107,7 @@ typedef unsigned long iopgprot_t;
typedef struct page *pgtable_t;
-extern unsigned long sparc_unmapped_base;
-#define TASK_UNMAPPED_BASE sparc_unmapped_base
+#define TASK_UNMAPPED_BASE 0x50000000
#else /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index e5b169b46d21..9b1c36de0f18 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -11,28 +11,15 @@
struct page;
-extern struct pgtable_cache_struct {
- unsigned long *pgd_cache;
- unsigned long *pte_cache;
- unsigned long pgtable_cache_sz;
- unsigned long pgd_cache_sz;
-} pgt_quicklists;
-
-unsigned long srmmu_get_nocache(int size, int align);
-void srmmu_free_nocache(unsigned long vaddr, int size);
-
-#define pgd_quicklist (pgt_quicklists.pgd_cache)
-#define pmd_quicklist ((unsigned long *)0)
-#define pte_quicklist (pgt_quicklists.pte_cache)
-#define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz)
-#define pgd_cache_size (pgt_quicklists.pgd_cache_sz)
+void *srmmu_get_nocache(int size, int align);
+void srmmu_free_nocache(void *addr, int size);
#define check_pgt_cache() do { } while (0)
pgd_t *get_pgd_fast(void);
static inline void free_pgd_fast(pgd_t *pgd)
{
- srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
+ srmmu_free_nocache(pgd, SRMMU_PGD_TABLE_SIZE);
}
#define pgd_free(mm, pgd) free_pgd_fast(pgd)
@@ -50,13 +37,13 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
- SRMMU_PMD_TABLE_SIZE);
+ return srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
+ SRMMU_PMD_TABLE_SIZE);
}
static inline void free_pmd_fast(pmd_t * pmd)
{
- srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
+ srmmu_free_nocache(pmd, SRMMU_PMD_TABLE_SIZE);
}
#define pmd_free(mm, pmd) free_pmd_fast(pmd)
@@ -73,13 +60,13 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
+ return srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
}
static inline void free_pte_fast(pte_t *pte)
{
- srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
+ srmmu_free_nocache(pte, PTE_SIZE);
}
#define pte_free_kernel(mm, pte) free_pte_fast(pte)
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index cbbbed5cb3aa..6fc13483f702 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -52,8 +52,9 @@ extern unsigned long calc_highpages(void);
#define PAGE_READONLY SRMMU_PAGE_RDONLY
#define PAGE_KERNEL SRMMU_PAGE_KERNEL
-/* Top-level page directory */
-extern pgd_t swapper_pg_dir[1024];
+/* Top-level page directory - dummy used by init-mm.
+ * srmmu.c will assign the real one (which is dynamically sized) */
+#define swapper_pg_dir NULL
extern void paging_init(void);
@@ -78,8 +79,6 @@ extern unsigned long ptr_in_current_pgd;
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
-extern int num_contexts;
-
/* First physical page can be anywhere, the following is needed so that
* va-->pa and vice versa conversions work properly without performance
* hit for all __pa()/__va() operations.
@@ -88,18 +87,11 @@ extern unsigned long phys_base;
extern unsigned long pfn_base;
/*
- * BAD_PAGETABLE is used when we need a bogus page-table, while
- * BAD_PAGE is used for a bogus page.
- *
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern pte_t * __bad_pagetable(void);
-extern pte_t __bad_page(void);
extern unsigned long empty_zero_page;
-#define BAD_PAGETABLE __bad_pagetable()
-#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
/*
@@ -398,36 +390,6 @@ static inline pte_t pgoff_to_pte(unsigned long pgoff)
*/
#define PTE_FILE_MAX_BITS 24
-/*
- */
-struct ctx_list {
- struct ctx_list *next;
- struct ctx_list *prev;
- unsigned int ctx_number;
- struct mm_struct *ctx_mm;
-};
-
-extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
-extern struct ctx_list ctx_free; /* Head of free list */
-extern struct ctx_list ctx_used; /* Head of used contexts list */
-
-#define NO_CONTEXT -1
-
-static inline void remove_from_ctx_list(struct ctx_list *entry)
-{
- entry->next->prev = entry->prev;
- entry->prev->next = entry->next;
-}
-
-static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
-{
- entry->next = head;
- (entry->prev = head->prev)->next = entry;
- head->prev = entry;
-}
-#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
-#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
-
static inline unsigned long
__get_phys (unsigned long addr)
{
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index c7cb0af0eb59..fb2693464807 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -423,7 +423,6 @@
#endif
#ifdef __KERNEL__
-#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
diff --git a/arch/sparc/include/asm/vaddrs.h b/arch/sparc/include/asm/vaddrs.h
index da6535d88a72..c3dbcf902034 100644
--- a/arch/sparc/include/asm/vaddrs.h
+++ b/arch/sparc/include/asm/vaddrs.h
@@ -30,6 +30,28 @@
*/
#define SRMMU_NOCACHE_ALCRATIO 64 /* 256 pages per 64MB of system RAM */
+#ifndef __ASSEMBLY__
+#include <asm/kmap_types.h>
+
+enum fixed_addresses {
+ FIX_HOLE,
+#ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = (KM_TYPE_NR * NR_CPUS),
+#endif
+ __end_of_fixed_addresses
+};
+#endif
+
+/* Leave one empty page between IO pages at 0xfd000000 and
+ * the top of the fixmap.
+ */
+#define FIXADDR_TOP (0xfcfff000UL)
+#define FIXADDR_SIZE ((FIX_KMAP_END + 1) << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+
#define SUN4M_IOBASE_VADDR 0xfd000000 /* Base for mapping pages */
#define IOBASE_VADDR 0xfe000000
#define IOBASE_END 0xfe600000
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index afeb1d770303..3d92c0a8f6c4 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -58,8 +58,6 @@ sun4e_notsup:
/* This was the only reasonable way I could think of to properly align
* these page-table data structures.
*/
- .globl swapper_pg_dir
-swapper_pg_dir: .skip PAGE_SIZE
.globl empty_zero_page
empty_zero_page: .skip PAGE_SIZE
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 435e406fdec3..81d92fc9983b 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1250,14 +1250,12 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name);
snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
- err = request_irq(lp->cfg.rx_irq, ldc_rx,
- IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
+ err = request_irq(lp->cfg.rx_irq, ldc_rx, IRQF_DISABLED,
lp->rx_irq_name, lp);
if (err)
return err;
- err = request_irq(lp->cfg.tx_irq, ldc_tx,
- IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
+ err = request_irq(lp->cfg.tx_irq, ldc_tx, IRQF_DISABLED,
lp->tx_irq_name, lp);
if (err) {
free_irq(lp->cfg.rx_irq, lp);
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index e34e2c40c060..f8b6eee40bde 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -486,17 +486,6 @@ void __init leon_trans_init(struct device_node *dp)
}
}
-void __initdata (*prom_amba_init)(struct device_node *dp, struct device_node ***nextp) = 0;
-
-void __init leon_node_init(struct device_node *dp, struct device_node ***nextp)
-{
- if (prom_amba_init &&
- strcmp(dp->type, "ambapp") == 0 &&
- strcmp(dp->name, "ambapp0") == 0) {
- prom_amba_init(dp, nextp);
- }
-}
-
#ifdef CONFIG_SMP
void leon_clear_profile_irq(int cpu)
{
@@ -522,8 +511,3 @@ void __init leon_init_IRQ(void)
sparc_config.clear_clock_irq = leon_clear_clock_irq;
sparc_config.load_profile_irq = leon_load_profile_irq;
}
-
-void __init leon_init(void)
-{
- of_pdt_build_more = &leon_node_init;
-}
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index cb36e82dcd5d..14006d8aca28 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -333,9 +333,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
put_psr(get_psr() | PSR_EF);
fpsave(&p->thread.float_regs[0], &p->thread.fsr,
&p->thread.fpqueue[0], &p->thread.fpqdepth);
-#ifdef CONFIG_SMP
- clear_thread_flag(TIF_USEDFPU);
-#endif
}
/*
@@ -413,6 +410,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
#ifdef CONFIG_SMP
/* FPU must be disabled on SMP. */
childregs->psr &= ~PSR_EF;
+ clear_tsk_thread_flag(p, TIF_USEDFPU);
#endif
/* Set the return value for the child. */
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index efe3e64bba38..38bf80a22f02 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -371,7 +371,6 @@ void __init setup_arch(char **cmdline_p)
(*(linux_dbvec->teach_debugger))();
}
- init_mm.context = (unsigned long) NO_CONTEXT;
init_task.thread.kregs = &fake_swapper_regs;
/* Run-time patch instructions to match the cpu model */
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 275f74fd6f6a..11c6c9603e71 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -66,23 +66,6 @@ static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
return 0;
}
-/* Does start,end straddle the VA-space hole? */
-static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
-{
- unsigned long va_exclude_start, va_exclude_end;
-
- va_exclude_start = VA_EXCLUDE_START;
- va_exclude_end = VA_EXCLUDE_END;
-
- if (likely(start < va_exclude_start && end < va_exclude_start))
- return 0;
-
- if (likely(start >= va_exclude_end && end >= va_exclude_end))
- return 0;
-
- return 1;
-}
-
/* These functions differ from the default implementations in
* mm/mmap.c in two ways:
*
@@ -487,7 +470,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
switch (call) {
case SHMAT: {
ulong raddr;
- err = do_shmat(first, ptr, (int)second, &raddr);
+ err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
if (!err) {
if (put_user(raddr,
(ulong __user *) third))
@@ -519,12 +502,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
{
int ret;
- if (current->personality == PER_LINUX32 &&
- personality == PER_LINUX)
- personality = PER_LINUX32;
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(personality) == PER_LINUX)
+ personality |= PER_LINUX32;
ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
+ if (personality(ret) == PER_LINUX32)
+ ret &= ~PER_LINUX32;
return ret;
}
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index 0aed75653b50..03eadf66b0d3 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -90,49 +90,49 @@
faligndata %x7, %x8, %f14;
#define FREG_MOVE_1(x0) \
- fmovd %x0, %f0;
+ fsrc2 %x0, %f0;
#define FREG_MOVE_2(x0, x1) \
- fmovd %x0, %f0; \
- fmovd %x1, %f2;
+ fsrc2 %x0, %f0; \
+ fsrc2 %x1, %f2;
#define FREG_MOVE_3(x0, x1, x2) \
- fmovd %x0, %f0; \
- fmovd %x1, %f2; \
- fmovd %x2, %f4;
+ fsrc2 %x0, %f0; \
+ fsrc2 %x1, %f2; \
+ fsrc2 %x2, %f4;
#define FREG_MOVE_4(x0, x1, x2, x3) \
- fmovd %x0, %f0; \
- fmovd %x1, %f2; \
- fmovd %x2, %f4; \
- fmovd %x3, %f6;
+ fsrc2 %x0, %f0; \
+ fsrc2 %x1, %f2; \
+ fsrc2 %x2, %f4; \
+ fsrc2 %x3, %f6;
#define FREG_MOVE_5(x0, x1, x2, x3, x4) \
- fmovd %x0, %f0; \
- fmovd %x1, %f2; \
- fmovd %x2, %f4; \
- fmovd %x3, %f6; \
- fmovd %x4, %f8;
+ fsrc2 %x0, %f0; \
+ fsrc2 %x1, %f2; \
+ fsrc2 %x2, %f4; \
+ fsrc2 %x3, %f6; \
+ fsrc2 %x4, %f8;
#define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \
- fmovd %x0, %f0; \
- fmovd %x1, %f2; \
- fmovd %x2, %f4; \
- fmovd %x3, %f6; \
- fmovd %x4, %f8; \
- fmovd %x5, %f10;
+ fsrc2 %x0, %f0; \
+ fsrc2 %x1, %f2; \
+ fsrc2 %x2, %f4; \
+ fsrc2 %x3, %f6; \
+ fsrc2 %x4, %f8; \
+ fsrc2 %x5, %f10;
#define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \
- fmovd %x0, %f0; \
- fmovd %x1, %f2; \
- fmovd %x2, %f4; \
- fmovd %x3, %f6; \
- fmovd %x4, %f8; \
- fmovd %x5, %f10; \
- fmovd %x6, %f12;
+ fsrc2 %x0, %f0; \
+ fsrc2 %x1, %f2; \
+ fsrc2 %x2, %f4; \
+ fsrc2 %x3, %f6; \
+ fsrc2 %x4, %f8; \
+ fsrc2 %x5, %f10; \
+ fsrc2 %x6, %f12;
#define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \
- fmovd %x0, %f0; \
- fmovd %x1, %f2; \
- fmovd %x2, %f4; \
- fmovd %x3, %f6; \
- fmovd %x4, %f8; \
- fmovd %x5, %f10; \
- fmovd %x6, %f12; \
- fmovd %x7, %f14;
+ fsrc2 %x0, %f0; \
+ fsrc2 %x1, %f2; \
+ fsrc2 %x2, %f4; \
+ fsrc2 %x3, %f6; \
+ fsrc2 %x4, %f8; \
+ fsrc2 %x5, %f10; \
+ fsrc2 %x6, %f12; \
+ fsrc2 %x7, %f14;
#define FREG_LOAD_1(base, x0) \
EX_LD(LOAD(ldd, base + 0x00, %x0))
#define FREG_LOAD_2(base, x0, x1) \
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index bafd2fc07acb..b67142b7768e 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -109,7 +109,7 @@
#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
subcc %left, 8, %left; \
bl,pn %xcc, 95f; \
- fsrc1 %f0, %f1;
+ fsrc2 %f0, %f1;
#define UNEVEN_VISCHUNK(dest, f0, f1, left) \
UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
@@ -201,7 +201,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
andn %o1, (0x40 - 1), %o1
and %g2, 7, %g2
andncc %g3, 0x7, %g3
- fmovd %f0, %f2
+ fsrc2 %f0, %f2
sub %g3, 0x8, %g3
sub %o2, %GLOBAL_SPARE, %o2
diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S
index b243d3b606ba..4d2df328e514 100644
--- a/arch/sparc/lib/copy_page.S
+++ b/arch/sparc/lib/copy_page.S
@@ -34,10 +34,10 @@
#endif
#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \
- fmovd %reg0, %f48; fmovd %reg1, %f50; \
- fmovd %reg2, %f52; fmovd %reg3, %f54; \
- fmovd %reg4, %f56; fmovd %reg5, %f58; \
- fmovd %reg6, %f60; fmovd %reg7, %f62;
+ fsrc2 %reg0, %f48; fsrc2 %reg1, %f50; \
+ fsrc2 %reg2, %f52; fsrc2 %reg3, %f54; \
+ fsrc2 %reg4, %f56; fsrc2 %reg5, %f58; \
+ fsrc2 %reg6, %f60; fsrc2 %reg7, %f62;
.text
@@ -104,60 +104,60 @@ cheetah_copy_page_insn:
prefetch [%o1 + 0x140], #one_read
ldd [%o1 + 0x010], %f4
prefetch [%o1 + 0x180], #one_read
- fmovd %f0, %f16
+ fsrc2 %f0, %f16
ldd [%o1 + 0x018], %f6
- fmovd %f2, %f18
+ fsrc2 %f2, %f18
ldd [%o1 + 0x020], %f8
- fmovd %f4, %f20
+ fsrc2 %f4, %f20
ldd [%o1 + 0x028], %f10
- fmovd %f6, %f22
+ fsrc2 %f6, %f22
ldd [%o1 + 0x030], %f12
- fmovd %f8, %f24
+ fsrc2 %f8, %f24
ldd [%o1 + 0x038], %f14
- fmovd %f10, %f26
+ fsrc2 %f10, %f26
ldd [%o1 + 0x040], %f0
1: ldd [%o1 + 0x048], %f2
- fmovd %f12, %f28
+ fsrc2 %f12, %f28
ldd [%o1 + 0x050], %f4
- fmovd %f14, %f30
+ fsrc2 %f14, %f30
stda %f16, [%o0] ASI_BLK_P
ldd [%o1 + 0x058], %f6
- fmovd %f0, %f16
+ fsrc2 %f0, %f16
ldd [%o1 + 0x060], %f8
- fmovd %f2, %f18
+ fsrc2 %f2, %f18
ldd [%o1 + 0x068], %f10
- fmovd %f4, %f20
+ fsrc2 %f4, %f20
ldd [%o1 + 0x070], %f12
- fmovd %f6, %f22
+ fsrc2 %f6, %f22
ldd [%o1 + 0x078], %f14
- fmovd %f8, %f24
+ fsrc2 %f8, %f24
ldd [%o1 + 0x080], %f0
prefetch [%o1 + 0x180], #one_read
- fmovd %f10, %f26
+ fsrc2 %f10, %f26
subcc %o2, 1, %o2
add %o0, 0x40, %o0
bne,pt %xcc, 1b
add %o1, 0x40, %o1
ldd [%o1 + 0x048], %f2
- fmovd %f12, %f28
+ fsrc2 %f12, %f28
ldd [%o1 + 0x050], %f4
- fmovd %f14, %f30
+ fsrc2 %f14, %f30
stda %f16, [%o0] ASI_BLK_P
ldd [%o1 + 0x058], %f6
- fmovd %f0, %f16
+ fsrc2 %f0, %f16
ldd [%o1 + 0x060], %f8
- fmovd %f2, %f18
+ fsrc2 %f2, %f18
ldd [%o1 + 0x068], %f10
- fmovd %f4, %f20
+ fsrc2 %f4, %f20
ldd [%o1 + 0x070], %f12
- fmovd %f6, %f22
+ fsrc2 %f6, %f22
add %o0, 0x40, %o0
ldd [%o1 + 0x078], %f14
- fmovd %f8, %f24
- fmovd %f10, %f26
- fmovd %f12, %f28
- fmovd %f14, %f30
+ fsrc2 %f8, %f24
+ fsrc2 %f10, %f26
+ fsrc2 %f12, %f28
+ fsrc2 %f14, %f30
stda %f16, [%o0] ASI_BLK_P
membar #Sync
VISExitHalf
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index f46cf6be3370..77ac917be152 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -32,24 +32,6 @@
int show_unhandled_signals = 1;
-/* At boot time we determine these two values necessary for setting
- * up the segment maps and page table entries (pte's).
- */
-
-int num_contexts;
-
-/* Return how much physical memory we have. */
-unsigned long probe_memory(void)
-{
- unsigned long total = 0;
- int i;
-
- for (i = 0; sp_banks[i].num_bytes; i++)
- total += sp_banks[i].num_bytes;
-
- return total;
-}
-
static void unhandled_fault(unsigned long, struct task_struct *,
struct pt_regs *) __attribute__ ((noreturn));
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 055c66cf1bf4..449f864f0cef 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -22,13 +22,31 @@
* shared by CPUs, and so precious, and establishing them requires IPI.
* Atomic kmaps are lightweight and we may have NCPUS more of them.
*/
-#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/export.h>
-#include <asm/pgalloc.h>
+#include <linux/mm.h>
+
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-#include <asm/fixmap.h>
+#include <asm/pgalloc.h>
+#include <asm/vaddrs.h>
+
+pgprot_t kmap_prot;
+
+static pte_t *kmap_pte;
+
+void __init kmap_init(void)
+{
+ unsigned long address;
+ pmd_t *dir;
+
+ address = __fix_to_virt(FIX_KMAP_BEGIN);
+ dir = pmd_offset(pgd_offset_k(address), address);
+
+ /* cache the first kmap pte */
+ kmap_pte = pte_offset_kernel(dir, address);
+ kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
+}
void *kmap_atomic(struct page *page)
{
@@ -110,21 +128,3 @@ void __kunmap_atomic(void *kvaddr)
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
-
-/* We may be fed a pagetable here by ptep_to_xxx and others. */
-struct page *kmap_atomic_to_page(void *ptr)
-{
- unsigned long idx, vaddr = (unsigned long)ptr;
- pte_t *pte;
-
- if (vaddr < SRMMU_NOCACHE_VADDR)
- return virt_to_page(ptr);
- if (vaddr < PKMAP_BASE)
- return pfn_to_page(__nocache_pa(vaddr) >> PAGE_SHIFT);
- BUG_ON(vaddr < FIXADDR_START);
- BUG_ON(vaddr > FIXADDR_TOP);
-
- idx = virt_to_fix(vaddr);
- pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
- return pte_page(*pte);
-}
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index ef5c779ec855..dde85ef1c56d 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -45,9 +45,6 @@ unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);
struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
-unsigned long sparc_unmapped_base;
-
-struct pgtable_cache_struct pgt_quicklists;
/* Initial ramdisk setup */
extern unsigned int sparc_ramdisk_image;
@@ -55,19 +52,6 @@ extern unsigned int sparc_ramdisk_size;
unsigned long highstart_pfn, highend_pfn;
-pte_t *kmap_pte;
-pgprot_t kmap_prot;
-
-#define kmap_get_fixmap_pte(vaddr) \
- pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
-
-void __init kmap_init(void)
-{
- /* cache the first kmap pte */
- kmap_pte = kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
- kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
-}
-
void show_mem(unsigned int filter)
{
printk("Mem-info:\n");
@@ -76,33 +60,8 @@ void show_mem(unsigned int filter)
nr_swap_pages << (PAGE_SHIFT-10));
printk("%ld pages of RAM\n", totalram_pages);
printk("%ld free pages\n", nr_free_pages());
-#if 0 /* undefined pgtable_cache_size, pgd_cache_size */
- printk("%ld pages in page table cache\n",pgtable_cache_size);
-#ifndef CONFIG_SMP
- if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d)
- printk("%ld entries in page dir cache\n",pgd_cache_size);
-#endif
-#endif
}
-void __init sparc_context_init(int numctx)
-{
- int ctx;
-
- ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL);
-
- for(ctx = 0; ctx < numctx; ctx++) {
- struct ctx_list *clist;
-
- clist = (ctx_list_pool + ctx);
- clist->ctx_number = ctx;
- clist->ctx_mm = NULL;
- }
- ctx_free.next = ctx_free.prev = &ctx_free;
- ctx_used.next = ctx_used.prev = &ctx_used;
- for(ctx = 0; ctx < numctx; ctx++)
- add_to_free_ctxlist(ctx_list_pool + ctx);
-}
extern unsigned long cmdline_memory_size;
unsigned long last_valid_pfn;
@@ -292,22 +251,7 @@ extern void device_scan(void);
void __init paging_init(void)
{
- switch(sparc_cpu_model) {
- case sparc_leon:
- leon_init();
- /* fall through */
- case sun4m:
- case sun4d:
- srmmu_paging_init();
- sparc_unmapped_base = 0x50000000;
- break;
- default:
- prom_printf("paging_init: Cannot init paging on this Sparc\n");
- prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
- prom_printf("paging_init: Halting...\n");
- prom_halt();
- }
-
+ srmmu_paging_init();
prom_build_devicetree();
of_fill_in_cpu_data();
device_scan();
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 6026fdd1b2ed..d58edf5fefdb 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2020,6 +2020,9 @@ EXPORT_SYMBOL(_PAGE_CACHE);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
unsigned long vmemmap_table[VMEMMAP_SIZE];
+static long __meminitdata addr_start, addr_end;
+static int __meminitdata node_start;
+
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
{
unsigned long vstart = (unsigned long) start;
@@ -2050,15 +2053,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
*vmem_pp = pte_base | __pa(block);
- printk(KERN_INFO "[%p-%p] page_structs=%lu "
- "node=%d entry=%lu/%lu\n", start, block, nr,
- node,
- addr >> VMEMMAP_CHUNK_SHIFT,
- VMEMMAP_SIZE);
+ /* check to see if we have contiguous blocks */
+ if (addr_end != addr || node_start != node) {
+ if (addr_start)
+ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+ addr_start, addr_end-1, node_start);
+ addr_start = addr;
+ node_start = node;
+ }
+ addr_end = addr + VMEMMAP_CHUNK;
}
}
return 0;
}
+
+void __meminit vmemmap_populate_print_last(void)
+{
+ if (addr_start) {
+ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+ addr_start, addr_end-1, node_start);
+ addr_start = 0;
+ addr_end = 0;
+ node_start = 0;
+ }
+}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
static void prot_init_common(unsigned long page_none,
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 62e3f5773303..c38bb72e3e80 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -8,45 +8,45 @@
* Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
*/
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
+#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/bootmem.h>
-#include <linux/fs.h>
-#include <linux/seq_file.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
#include <linux/kdebug.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
#include <linux/log2.h>
#include <linux/gfp.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
-#include <asm/bitext.h>
-#include <asm/page.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/io-unit.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-#include <asm/io.h>
+#include <asm/bitext.h>
#include <asm/vaddrs.h>
-#include <asm/traps.h>
-#include <asm/smp.h>
-#include <asm/mbus.h>
#include <asm/cache.h>
+#include <asm/traps.h>
#include <asm/oplib.h>
+#include <asm/mbus.h>
+#include <asm/page.h>
#include <asm/asi.h>
#include <asm/msi.h>
-#include <asm/mmu_context.h>
-#include <asm/io-unit.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
+#include <asm/smp.h>
+#include <asm/io.h>
/* Now the cpu specific definitions. */
-#include <asm/viking.h>
-#include <asm/mxcc.h>
-#include <asm/ross.h>
+#include <asm/turbosparc.h>
#include <asm/tsunami.h>
+#include <asm/viking.h>
#include <asm/swift.h>
-#include <asm/turbosparc.h>
#include <asm/leon.h>
+#include <asm/mxcc.h>
+#include <asm/ross.h>
#include "srmmu.h"
@@ -55,10 +55,6 @@ static unsigned int hwbug_bitmask;
int vac_cache_size;
int vac_line_size;
-struct ctx_list *ctx_list_pool;
-struct ctx_list ctx_free;
-struct ctx_list ctx_used;
-
extern struct resource sparc_iomap;
extern unsigned long last_valid_pfn;
@@ -136,8 +132,8 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
}
}
-/* Find an entry in the third-level page table.. */
-pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address)
+/* Find an entry in the third-level page table.. */
+pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
{
void *pte;
@@ -151,55 +147,61 @@ pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address)
* align: bytes, number to align at.
* Returns the virtual address of the allocated area.
*/
-static unsigned long __srmmu_get_nocache(int size, int align)
+static void *__srmmu_get_nocache(int size, int align)
{
int offset;
+ unsigned long addr;
if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
- printk("Size 0x%x too small for nocache request\n", size);
+ printk(KERN_ERR "Size 0x%x too small for nocache request\n",
+ size);
size = SRMMU_NOCACHE_BITMAP_SHIFT;
}
- if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) {
- printk("Size 0x%x unaligned int nocache request\n", size);
- size += SRMMU_NOCACHE_BITMAP_SHIFT-1;
+ if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
+ printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
+ size);
+ size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
}
BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
offset = bit_map_string_get(&srmmu_nocache_map,
- size >> SRMMU_NOCACHE_BITMAP_SHIFT,
- align >> SRMMU_NOCACHE_BITMAP_SHIFT);
+ size >> SRMMU_NOCACHE_BITMAP_SHIFT,
+ align >> SRMMU_NOCACHE_BITMAP_SHIFT);
if (offset == -1) {
- printk("srmmu: out of nocache %d: %d/%d\n",
- size, (int) srmmu_nocache_size,
- srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
+ printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
+ size, (int) srmmu_nocache_size,
+ srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
return 0;
}
- return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
+ addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
+ return (void *)addr;
}
-unsigned long srmmu_get_nocache(int size, int align)
+void *srmmu_get_nocache(int size, int align)
{
- unsigned long tmp;
+ void *tmp;
tmp = __srmmu_get_nocache(size, align);
if (tmp)
- memset((void *)tmp, 0, size);
+ memset(tmp, 0, size);
return tmp;
}
-void srmmu_free_nocache(unsigned long vaddr, int size)
+void srmmu_free_nocache(void *addr, int size)
{
+ unsigned long vaddr;
int offset;
+ vaddr = (unsigned long)addr;
if (vaddr < SRMMU_NOCACHE_VADDR) {
printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
BUG();
}
- if (vaddr+size > srmmu_nocache_end) {
+ if (vaddr + size > srmmu_nocache_end) {
printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
vaddr, srmmu_nocache_end);
BUG();
@@ -212,7 +214,7 @@ void srmmu_free_nocache(unsigned long vaddr, int size)
printk("Size 0x%x is too small\n", size);
BUG();
}
- if (vaddr & (size-1)) {
+ if (vaddr & (size - 1)) {
printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
BUG();
}
@@ -226,13 +228,23 @@ void srmmu_free_nocache(unsigned long vaddr, int size)
static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
unsigned long end);
-extern unsigned long probe_memory(void); /* in fault.c */
+/* Return how much physical memory we have. */
+static unsigned long __init probe_memory(void)
+{
+ unsigned long total = 0;
+ int i;
+
+ for (i = 0; sp_banks[i].num_bytes; i++)
+ total += sp_banks[i].num_bytes;
+
+ return total;
+}
/*
* Reserve nocache dynamically proportionally to the amount of
* system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
*/
-static void srmmu_nocache_calcsize(void)
+static void __init srmmu_nocache_calcsize(void)
{
unsigned long sysmemavail = probe_memory() / 1024;
int srmmu_nocache_npages;
@@ -271,7 +283,7 @@ static void __init srmmu_nocache_init(void)
srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
- srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
+ srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
init_mm.pgd = srmmu_swapper_pg_dir;
@@ -304,7 +316,7 @@ pgd_t *get_pgd_fast(void)
{
pgd_t *pgd = NULL;
- pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
+ pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
if (pgd) {
pgd_t *init = pgd_offset_k(0);
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
@@ -330,7 +342,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
return NULL;
- page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
+ page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
pgtable_page_ctor(page);
return page;
}
@@ -344,18 +356,50 @@ void pte_free(struct mm_struct *mm, pgtable_t pte)
if (p == 0)
BUG();
p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
- p = (unsigned long) __nocache_va(p); /* Nocached virtual */
- srmmu_free_nocache(p, PTE_SIZE);
+
+ /* free non cached virtual address*/
+ srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
}
-/*
- */
+/* context handling - a dynamically sized pool is used */
+#define NO_CONTEXT -1
+
+struct ctx_list {
+ struct ctx_list *next;
+ struct ctx_list *prev;
+ unsigned int ctx_number;
+ struct mm_struct *ctx_mm;
+};
+
+static struct ctx_list *ctx_list_pool;
+static struct ctx_list ctx_free;
+static struct ctx_list ctx_used;
+
+/* At boot time we determine the number of contexts */
+static int num_contexts;
+
+static inline void remove_from_ctx_list(struct ctx_list *entry)
+{
+ entry->next->prev = entry->prev;
+ entry->prev->next = entry->next;
+}
+
+static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
+{
+ entry->next = head;
+ (entry->prev = head->prev)->next = entry;
+ head->prev = entry;
+}
+#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
+#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
+
+
static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
{
struct ctx_list *ctxp;
ctxp = ctx_free.next;
- if(ctxp != &ctx_free) {
+ if (ctxp != &ctx_free) {
remove_from_ctx_list(ctxp);
add_to_used_ctxlist(ctxp);
mm->context = ctxp->ctx_number;
@@ -363,9 +407,9 @@ static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
return;
}
ctxp = ctx_used.next;
- if(ctxp->ctx_mm == old_mm)
+ if (ctxp->ctx_mm == old_mm)
ctxp = ctxp->next;
- if(ctxp == &ctx_used)
+ if (ctxp == &ctx_used)
panic("out of mmu contexts");
flush_cache_mm(ctxp->ctx_mm);
flush_tlb_mm(ctxp->ctx_mm);
@@ -385,11 +429,31 @@ static inline void free_context(int context)
add_to_free_ctxlist(ctx_old);
}
+static void __init sparc_context_init(int numctx)
+{
+ int ctx;
+ unsigned long size;
+
+ size = numctx * sizeof(struct ctx_list);
+ ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
+
+ for (ctx = 0; ctx < numctx; ctx++) {
+ struct ctx_list *clist;
+
+ clist = (ctx_list_pool + ctx);
+ clist->ctx_number = ctx;
+ clist->ctx_mm = NULL;
+ }
+ ctx_free.next = ctx_free.prev = &ctx_free;
+ ctx_used.next = ctx_used.prev = &ctx_used;
+ for (ctx = 0; ctx < numctx; ctx++)
+ add_to_free_ctxlist(ctx_list_pool + ctx);
+}
void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
struct task_struct *tsk)
{
- if(mm->context == NO_CONTEXT) {
+ if (mm->context == NO_CONTEXT) {
spin_lock(&srmmu_context_spinlock);
alloc_context(old_mm, mm);
spin_unlock(&srmmu_context_spinlock);
@@ -407,7 +471,7 @@ void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
/* Low level IO area allocation on the SRMMU. */
static inline void srmmu_mapioaddr(unsigned long physaddr,
- unsigned long virt_addr, int bus_type)
+ unsigned long virt_addr, int bus_type)
{
pgd_t *pgdp;
pmd_t *pmdp;
@@ -420,8 +484,7 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
ptep = pte_offset_kernel(pmdp, virt_addr);
tmp = (physaddr >> 4) | SRMMU_ET_PTE;
- /*
- * I need to test whether this is consistent over all
+ /* I need to test whether this is consistent over all
* sun4m's. The bus_type represents the upper 4 bits of
* 36-bit physical address on the I/O space lines...
*/
@@ -591,10 +654,10 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
pmd_t *pmdp;
pte_t *ptep;
- while(start < end) {
+ while (start < end) {
pgdp = pgd_offset_k(start);
if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
- pmdp = (pmd_t *) __srmmu_get_nocache(
+ pmdp = __srmmu_get_nocache(
SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
early_pgtable_allocfail("pmd");
@@ -602,8 +665,8 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
pgd_set(__nocache_fix(pgdp), pmdp);
}
pmdp = pmd_offset(__nocache_fix(pgdp), start);
- if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
- ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
+ if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+ ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
memset(__nocache_fix(ptep), 0, PTE_SIZE);
@@ -622,18 +685,18 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
pmd_t *pmdp;
pte_t *ptep;
- while(start < end) {
+ while (start < end) {
pgdp = pgd_offset_k(start);
if (pgd_none(*pgdp)) {
- pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
+ pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
early_pgtable_allocfail("pmd");
memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
pgd_set(pgdp, pmdp);
}
pmdp = pmd_offset(pgdp, start);
- if(srmmu_pmd_none(*pmdp)) {
- ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
+ if (srmmu_pmd_none(*pmdp)) {
+ ptep = __srmmu_get_nocache(PTE_SIZE,
PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
@@ -671,72 +734,76 @@ static inline unsigned long srmmu_probe(unsigned long vaddr)
static void __init srmmu_inherit_prom_mappings(unsigned long start,
unsigned long end)
{
+ unsigned long probed;
+ unsigned long addr;
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
- unsigned long prompte;
+ int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
- while(start <= end) {
+ while (start <= end) {
if (start == 0)
break; /* probably wrap around */
- if(start == 0xfef00000)
+ if (start == 0xfef00000)
start = KADB_DEBUGGER_BEGVM;
- if(!(prompte = srmmu_probe(start))) {
+ probed = srmmu_probe(start);
+ if (!probed) {
+ /* continue probing until we find an entry */
start += PAGE_SIZE;
continue;
}
-
+
/* A red snapper, see what it really is. */
what = 0;
-
- if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
- if(srmmu_probe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
+ addr = start - PAGE_SIZE;
+
+ if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
+ if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)
what = 1;
}
-
- if(!(start & ~(SRMMU_PGDIR_MASK))) {
- if(srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
- prompte)
+
+ if (!(start & ~(SRMMU_PGDIR_MASK))) {
+ if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)
what = 2;
}
-
+
pgdp = pgd_offset_k(start);
- if(what == 2) {
- *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
+ if (what == 2) {
+ *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
start += SRMMU_PGDIR_SIZE;
continue;
}
if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
- pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
+ pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
+ SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
early_pgtable_allocfail("pmd");
memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
pgd_set(__nocache_fix(pgdp), pmdp);
}
pmdp = pmd_offset(__nocache_fix(pgdp), start);
- if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
- ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
- PTE_SIZE);
+ if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+ ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
memset(__nocache_fix(ptep), 0, PTE_SIZE);
pmd_set(__nocache_fix(pmdp), ptep);
}
- if(what == 1) {
- /*
- * We bend the rule where all 16 PTPs in a pmd_t point
+ if (what == 1) {
+ /* We bend the rule where all 16 PTPs in a pmd_t point
* inside the same PTE page, and we leak a perfectly
* good hardware PTE piece. Alternatives seem worse.
*/
unsigned int x; /* Index of HW PMD in soft cluster */
+ unsigned long *val;
x = (start >> PMD_SHIFT) & 15;
- *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
+ val = &pmdp->pmdv[x];
+ *(unsigned long *)__nocache_fix(val) = probed;
start += SRMMU_REAL_PMD_SIZE;
continue;
}
ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
- *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
+ *(pte_t *)__nocache_fix(ptep) = __pte(probed);
start += PAGE_SIZE;
}
}
@@ -765,18 +832,18 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
if (vstart < min_vaddr || vstart >= max_vaddr)
return vstart;
-
+
if (vend > max_vaddr || vend < min_vaddr)
vend = max_vaddr;
- while(vstart < vend) {
+ while (vstart < vend) {
do_large_mapping(vstart, pstart);
vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
}
return vstart;
}
-static inline void map_kernel(void)
+static void __init map_kernel(void)
{
int i;
@@ -789,9 +856,6 @@ static inline void map_kernel(void)
}
}
-/* Paging initialization on the Sparc Reference MMU. */
-extern void sparc_context_init(int);
-
void (*poke_srmmu)(void) __cpuinitdata = NULL;
extern unsigned long bootmem_init(unsigned long *pages_avail);
@@ -806,6 +870,7 @@ void __init srmmu_paging_init(void)
pte_t *pte;
unsigned long pages_avail;
+ init_mm.context = (unsigned long) NO_CONTEXT;
sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
if (sparc_cpu_model == sun4d)
@@ -814,9 +879,9 @@ void __init srmmu_paging_init(void)
/* Find the number of contexts on the srmmu. */
cpunode = prom_getchild(prom_root_node);
num_contexts = 0;
- while(cpunode != 0) {
+ while (cpunode != 0) {
prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
- if(!strcmp(node_str, "cpu")) {
+ if (!strcmp(node_str, "cpu")) {
num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
break;
}
@@ -824,7 +889,7 @@ void __init srmmu_paging_init(void)
}
}
- if(!num_contexts) {
+ if (!num_contexts) {
prom_printf("Something wrong, can't find cpu node in paging_init.\n");
prom_halt();
}
@@ -834,14 +899,14 @@ void __init srmmu_paging_init(void)
srmmu_nocache_calcsize();
srmmu_nocache_init();
- srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
+ srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
map_kernel();
/* ctx table has to be physically aligned to its size */
- srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));
+ srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
- for(i = 0; i < num_contexts; i++)
+ for (i = 0; i < num_contexts; i++)
srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
flush_cache_all();
@@ -897,7 +962,7 @@ void __init srmmu_paging_init(void)
void mmu_info(struct seq_file *m)
{
- seq_printf(m,
+ seq_printf(m,
"MMU type\t: %s\n"
"contexts\t: %d\n"
"nocache total\t: %ld\n"
@@ -908,10 +973,16 @@ void mmu_info(struct seq_file *m)
srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
}
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ mm->context = NO_CONTEXT;
+ return 0;
+}
+
void destroy_context(struct mm_struct *mm)
{
- if(mm->context != NO_CONTEXT) {
+ if (mm->context != NO_CONTEXT) {
flush_cache_mm(mm);
srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
flush_tlb_mm(mm);
@@ -941,13 +1012,12 @@ static void __init init_vac_layout(void)
#endif
nd = prom_getchild(prom_root_node);
- while((nd = prom_getsibling(nd)) != 0) {
+ while ((nd = prom_getsibling(nd)) != 0) {
prom_getstring(nd, "device_type", node_str, sizeof(node_str));
- if(!strcmp(node_str, "cpu")) {
+ if (!strcmp(node_str, "cpu")) {
vac_line_size = prom_getint(nd, "cache-line-size");
if (vac_line_size == -1) {
- prom_printf("can't determine cache-line-size, "
- "halting.\n");
+ prom_printf("can't determine cache-line-size, halting.\n");
prom_halt();
}
cache_lines = prom_getint(nd, "cache-nlines");
@@ -958,9 +1028,9 @@ static void __init init_vac_layout(void)
vac_cache_size = cache_lines * vac_line_size;
#ifdef CONFIG_SMP
- if(vac_cache_size > max_size)
+ if (vac_cache_size > max_size)
max_size = vac_cache_size;
- if(vac_line_size < min_line_size)
+ if (vac_line_size < min_line_size)
min_line_size = vac_line_size;
//FIXME: cpus not contiguous!!
cpu++;
@@ -971,7 +1041,7 @@ static void __init init_vac_layout(void)
#endif
}
}
- if(nd == 0) {
+ if (nd == 0) {
prom_printf("No CPU nodes found, halting.\n");
prom_halt();
}
@@ -1082,7 +1152,7 @@ static void __init init_swift(void)
"=r" (swift_rev) :
"r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
srmmu_name = "Fujitsu Swift";
- switch(swift_rev) {
+ switch (swift_rev) {
case 0x11:
case 0x20:
case 0x23:
@@ -1222,10 +1292,11 @@ static void __cpuinit poke_turbosparc(void)
/* Clear any crap from the cache or else... */
turbosparc_flush_cache_all();
- mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */
+ /* Temporarily disable I & D caches */
+ mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
srmmu_set_mmureg(mreg);
-
+
ccreg = turbosparc_get_ccreg();
#ifdef TURBOSPARC_WRITEBACK
@@ -1248,7 +1319,7 @@ static void __cpuinit poke_turbosparc(void)
default:
ccreg |= (TURBOSPARC_SCENABLE);
}
- turbosparc_set_ccreg (ccreg);
+ turbosparc_set_ccreg(ccreg);
mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
@@ -1342,7 +1413,7 @@ static void __cpuinit poke_viking(void)
unsigned long bpreg;
mreg &= ~(VIKING_TCENABLE);
- if(smp_catch++) {
+ if (smp_catch++) {
/* Must disable mixed-cmd mode here for other cpu's. */
bpreg = viking_get_bpreg();
bpreg &= ~(VIKING_ACTION_MIX);
@@ -1411,7 +1482,7 @@ static void __init init_viking(void)
unsigned long mreg = srmmu_get_mmureg();
/* Ahhh, the viking. SRMMU VLSI abortion number two... */
- if(mreg & VIKING_MMODE) {
+ if (mreg & VIKING_MMODE) {
srmmu_name = "TI Viking";
viking_mxcc_present = 0;
msi_set_sync();
@@ -1467,8 +1538,8 @@ static void __init get_srmmu_type(void)
}
/* Second, check for HyperSparc or Cypress. */
- if(mod_typ == 1) {
- switch(mod_rev) {
+ if (mod_typ == 1) {
+ switch (mod_rev) {
case 7:
/* UP or MP Hypersparc */
init_hypersparc();
@@ -1488,9 +1559,8 @@ static void __init get_srmmu_type(void)
}
return;
}
-
- /*
- * Now Fujitsu TurboSparc. It might happen that it is
+
+ /* Now Fujitsu TurboSparc. It might happen that it is
* in Swift emulation mode, so we will check later...
*/
if (psr_typ == 0 && psr_vers == 5) {
@@ -1499,15 +1569,15 @@ static void __init get_srmmu_type(void)
}
/* Next check for Fujitsu Swift. */
- if(psr_typ == 0 && psr_vers == 4) {
+ if (psr_typ == 0 && psr_vers == 4) {
phandle cpunode;
char node_str[128];
/* Look if it is not a TurboSparc emulating Swift... */
cpunode = prom_getchild(prom_root_node);
- while((cpunode = prom_getsibling(cpunode)) != 0) {
+ while ((cpunode = prom_getsibling(cpunode)) != 0) {
prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
- if(!strcmp(node_str, "cpu")) {
+ if (!strcmp(node_str, "cpu")) {
if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
prom_getintdefault(cpunode, "psr-version", 1) == 5) {
init_turbosparc();
@@ -1516,13 +1586,13 @@ static void __init get_srmmu_type(void)
break;
}
}
-
+
init_swift();
return;
}
/* Now the Viking family of srmmu. */
- if(psr_typ == 4 &&
+ if (psr_typ == 4 &&
((psr_vers == 0) ||
((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
init_viking();
@@ -1530,7 +1600,7 @@ static void __init get_srmmu_type(void)
}
/* Finally the Tsunami. */
- if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
+ if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
init_tsunami();
return;
}
diff --git a/arch/sparc/prom/init_32.c b/arch/sparc/prom/init_32.c
index 26c64cea3c9c..9ac30c2b7dba 100644
--- a/arch/sparc/prom/init_32.c
+++ b/arch/sparc/prom/init_32.c
@@ -27,13 +27,10 @@ EXPORT_SYMBOL(prom_root_node);
struct linux_nodeops *prom_nodeops;
/* You must call prom_init() before you attempt to use any of the
- * routines in the prom library. It returns 0 on success, 1 on
- * failure. It gets passed the pointer to the PROM vector.
+ * routines in the prom library.
+ * It gets passed the pointer to the PROM vector.
*/
-extern void prom_meminit(void);
-extern void prom_ranges_init(void);
-
void __init prom_init(struct linux_romvec *rp)
{
romvec = rp;
diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
index 5016c5e20575..d95db755828f 100644
--- a/arch/sparc/prom/init_64.c
+++ b/arch/sparc/prom/init_64.c
@@ -22,8 +22,8 @@ int prom_stdout;
phandle prom_chosen_node;
/* You must call prom_init() before you attempt to use any of the
- * routines in the prom library. It returns 0 on success, 1 on
- * failure. It gets passed the pointer to the PROM vector.
+ * routines in the prom library.
+ * It gets passed the pointer to the PROM vector.
*/
extern void prom_cif_init(void *, void *);
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index b8d99aca5431..0270620a1692 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -18,8 +18,8 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
-CONFIG_CGROUP_MEM_RES_CTLR=y
-CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_CGROUP_MEMCG=y
+CONFIG_CGROUP_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_CGROUP=y
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index 2b1fd31894f1..c11de27a9bcb 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -17,8 +17,8 @@ CONFIG_CGROUP_DEVICE=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
-CONFIG_CGROUP_MEM_RES_CTLR=y
-CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
+CONFIG_CGROUP_MEMCG=y
+CONFIG_CGROUP_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_CGROUP=y
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index fb7c65ae8de0..5bd71994452d 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += fb.h
generic-y += fcntl.h
generic-y += ioctl.h
generic-y += ioctls.h
-generic-y += ipc.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h
index 3d0f20246260..92b28e3e9972 100644
--- a/arch/tile/include/asm/kmap_types.h
+++ b/arch/tile/include/asm/kmap_types.h
@@ -23,35 +23,6 @@
* adds 4MB of required address-space. For now we leave KM_TYPE_NR
* set to depth 8.
*/
-enum km_type {
- KM_TYPE_NR = 8
-};
-
-/*
- * We provide dummy definitions of all the stray values that used to be
- * required for kmap_atomic() and no longer are.
- */
-enum {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_SYNC_ICACHE,
- KM_SYNC_DCACHE,
- KM_UML_USERCOPY,
- KM_IRQ_PTE,
- KM_NMI,
- KM_NMI_PTE,
- KM_KDB
-};
+#define KM_TYPE_NR 8
#endif /* _ASM_TILE_KMAP_TYPES_H */
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 0fdd99d0d8b7..33c10864d2f7 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -369,7 +369,7 @@ int __init pcibios_init(void)
*/
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
(PCI_SLOT(dev->devfn) == 0)) {
- next_bus = dev->busn_res.end;
+ next_bus = dev->subordinate;
controllers[i].mem_resources[0] =
*next_bus->resource[0];
controllers[i].mem_resources[1] =
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index fa75264a82ae..0e213e35ffc3 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -853,7 +853,7 @@ int __init pcibios_init(void)
bus = pci_scan_root_bus(NULL, next_busno, controller->ops,
controller, &resources);
controller->root_bus = bus;
- next_busno = bus->subordinate + 1;
+ next_busno = bus->busn_res.end + 1;
}
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index ef8e5a62b6e3..347d123b14be 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -93,7 +93,7 @@ static DEFINE_PER_CPU(struct kmap_amps, amps);
* If we examine it earlier we are exposed to a race where it looks
* writable earlier, but becomes immutable before we write the PTE.
*/
-static void kmap_atomic_register(struct page *page, enum km_type type,
+static void kmap_atomic_register(struct page *page, int type,
unsigned long va, pte_t *ptep, pte_t pteval)
{
unsigned long flags;
diff --git a/arch/um/defconfig b/arch/um/defconfig
index 7823ab12e6a4..08107a795062 100644
--- a/arch/um/defconfig
+++ b/arch/um/defconfig
@@ -155,15 +155,15 @@ CONFIG_CPUSETS=y
CONFIG_PROC_PID_CPUSET=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_RESOURCE_COUNTERS=y
-CONFIG_CGROUP_MEM_RES_CTLR=y
-CONFIG_CGROUP_MEM_RES_CTLR_SWAP=y
-# CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED is not set
-# CONFIG_CGROUP_MEM_RES_CTLR_KMEM is not set
+CONFIG_CGROUP_MEMCG=y
+CONFIG_CGROUP_MEMCG_SWAP=y
+# CONFIG_CGROUP_MEMCG_SWAP_ENABLED is not set
+# CONFIG_CGROUP_MEMCG_KMEM is not set
CONFIG_CGROUP_SCHED=y
CONFIG_FAIR_GROUP_SCHED=y
# CONFIG_CFS_BANDWIDTH is not set
# CONFIG_RT_GROUP_SCHED is not set
-CONFIG_BLK_CGROUP=m
+CONFIG_BLK_CGROUP=y
# CONFIG_DEBUG_BLK_CGROUP is not set
# CONFIG_CHECKPOINT_RESTORE is not set
CONFIG_NAMESPACES=y
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
index 45e248c2f43c..87eebfe03c61 100644
--- a/arch/um/drivers/chan_kern.c
+++ b/arch/um/drivers/chan_kern.c
@@ -150,9 +150,11 @@ void chan_enable_winch(struct chan *chan, struct tty_struct *tty)
static void line_timer_cb(struct work_struct *work)
{
struct line *line = container_of(work, struct line, task.work);
+ struct tty_struct *tty = tty_port_tty_get(&line->port);
if (!line->throttled)
- chan_interrupt(line, line->tty, line->driver->read_irq);
+ chan_interrupt(line, tty, line->driver->read_irq);
+ tty_kref_put(tty);
}
int enable_chan(struct line *line)
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index acfd0e0fd0c9..bbaf2c59830a 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -19,9 +19,11 @@ static irqreturn_t line_interrupt(int irq, void *data)
{
struct chan *chan = data;
struct line *line = chan->line;
+ struct tty_struct *tty = tty_port_tty_get(&line->port);
if (line)
- chan_interrupt(line, line->tty, irq);
+ chan_interrupt(line, tty, irq);
+ tty_kref_put(tty);
return IRQ_HANDLED;
}
@@ -219,92 +221,6 @@ void line_set_termios(struct tty_struct *tty, struct ktermios * old)
/* nothing */
}
-static const struct {
- int cmd;
- char *level;
- char *name;
-} tty_ioctls[] = {
- /* don't print these, they flood the log ... */
- { TCGETS, NULL, "TCGETS" },
- { TCSETS, NULL, "TCSETS" },
- { TCSETSW, NULL, "TCSETSW" },
- { TCFLSH, NULL, "TCFLSH" },
- { TCSBRK, NULL, "TCSBRK" },
-
- /* general tty stuff */
- { TCSETSF, KERN_DEBUG, "TCSETSF" },
- { TCGETA, KERN_DEBUG, "TCGETA" },
- { TIOCMGET, KERN_DEBUG, "TIOCMGET" },
- { TCSBRKP, KERN_DEBUG, "TCSBRKP" },
- { TIOCMSET, KERN_DEBUG, "TIOCMSET" },
-
- /* linux-specific ones */
- { TIOCLINUX, KERN_INFO, "TIOCLINUX" },
- { KDGKBMODE, KERN_INFO, "KDGKBMODE" },
- { KDGKBTYPE, KERN_INFO, "KDGKBTYPE" },
- { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" },
-};
-
-int line_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg)
-{
- int ret;
- int i;
-
- ret = 0;
- switch(cmd) {
-#ifdef TIOCGETP
- case TIOCGETP:
- case TIOCSETP:
- case TIOCSETN:
-#endif
-#ifdef TIOCGETC
- case TIOCGETC:
- case TIOCSETC:
-#endif
-#ifdef TIOCGLTC
- case TIOCGLTC:
- case TIOCSLTC:
-#endif
- /* Note: these are out of date as we now have TCGETS2 etc but this
- whole lot should probably go away */
- case TCGETS:
- case TCSETSF:
- case TCSETSW:
- case TCSETS:
- case TCGETA:
- case TCSETAF:
- case TCSETAW:
- case TCSETA:
- case TCXONC:
- case TCFLSH:
- case TIOCOUTQ:
- case TIOCINQ:
- case TIOCGLCKTRMIOS:
- case TIOCSLCKTRMIOS:
- case TIOCPKT:
- case TIOCGSOFTCAR:
- case TIOCSSOFTCAR:
- return -ENOIOCTLCMD;
-#if 0
- case TCwhatever:
- /* do something */
- break;
-#endif
- default:
- for (i = 0; i < ARRAY_SIZE(tty_ioctls); i++)
- if (cmd == tty_ioctls[i].cmd)
- break;
- if (i == ARRAY_SIZE(tty_ioctls)) {
- printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n",
- __func__, tty->name, cmd);
- }
- ret = -ENOIOCTLCMD;
- break;
- }
- return ret;
-}
-
void line_throttle(struct tty_struct *tty)
{
struct line *line = tty->driver_data;
@@ -333,7 +249,7 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
{
struct chan *chan = data;
struct line *line = chan->line;
- struct tty_struct *tty = line->tty;
+ struct tty_struct *tty;
int err;
/*
@@ -352,68 +268,42 @@ static irqreturn_t line_write_interrupt(int irq, void *data)
}
spin_unlock(&line->lock);
+ tty = tty_port_tty_get(&line->port);
if (tty == NULL)
return IRQ_NONE;
tty_wakeup(tty);
+ tty_kref_put(tty);
+
return IRQ_HANDLED;
}
int line_setup_irq(int fd, int input, int output, struct line *line, void *data)
{
const struct line_driver *driver = line->driver;
- int err = 0, flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
+ int err = 0;
if (input)
err = um_request_irq(driver->read_irq, fd, IRQ_READ,
- line_interrupt, flags,
- driver->read_irq_name, data);
+ line_interrupt, IRQF_SHARED,
+ driver->read_irq_name, data);
if (err)
return err;
if (output)
err = um_request_irq(driver->write_irq, fd, IRQ_WRITE,
- line_write_interrupt, flags,
- driver->write_irq_name, data);
+ line_write_interrupt, IRQF_SHARED,
+ driver->write_irq_name, data);
return err;
}
-/*
- * Normally, a driver like this can rely mostly on the tty layer
- * locking, particularly when it comes to the driver structure.
- * However, in this case, mconsole requests can come in "from the
- * side", and race with opens and closes.
- *
- * mconsole config requests will want to be sure the device isn't in
- * use, and get_config, open, and close will want a stable
- * configuration. The checking and modification of the configuration
- * is done under a spinlock. Checking whether the device is in use is
- * line->tty->count > 1, also under the spinlock.
- *
- * line->count serves to decide whether the device should be enabled or
- * disabled on the host. If it's equal to 0, then we are doing the
- * first open or last close. Otherwise, open and close just return.
- */
-
-int line_open(struct line *lines, struct tty_struct *tty)
+static int line_activate(struct tty_port *port, struct tty_struct *tty)
{
- struct line *line = &lines[tty->index];
- int err = -ENODEV;
-
- mutex_lock(&line->count_lock);
- if (!line->valid)
- goto out_unlock;
-
- err = 0;
- if (line->count++)
- goto out_unlock;
-
- BUG_ON(tty->driver_data);
- tty->driver_data = line;
- line->tty = tty;
+ int ret;
+ struct line *line = tty->driver_data;
- err = enable_chan(line);
- if (err) /* line_close() will be called by our caller */
- goto out_unlock;
+ ret = enable_chan(line);
+ if (ret)
+ return ret;
if (!line->sigio) {
chan_enable_winch(line->chan_out, tty);
@@ -421,44 +311,60 @@ int line_open(struct line *lines, struct tty_struct *tty)
}
chan_window_size(line, &tty->winsize.ws_row,
- &tty->winsize.ws_col);
-out_unlock:
- mutex_unlock(&line->count_lock);
- return err;
+ &tty->winsize.ws_col);
+
+ return 0;
}
-static void unregister_winch(struct tty_struct *tty);
+static const struct tty_port_operations line_port_ops = {
+ .activate = line_activate,
+};
-void line_close(struct tty_struct *tty, struct file * filp)
+int line_open(struct tty_struct *tty, struct file *filp)
{
struct line *line = tty->driver_data;
- /*
- * If line_open fails (and tty->driver_data is never set),
- * tty_open will call line_close. So just return in this case.
- */
- if (line == NULL)
- return;
+ return tty_port_open(&line->port, tty, filp);
+}
- /* We ignore the error anyway! */
- flush_buffer(line);
+int line_install(struct tty_driver *driver, struct tty_struct *tty,
+ struct line *line)
+{
+ int ret;
- mutex_lock(&line->count_lock);
- BUG_ON(!line->valid);
+ ret = tty_standard_install(driver, tty);
+ if (ret)
+ return ret;
- if (--line->count)
- goto out_unlock;
+ tty->driver_data = line;
- line->tty = NULL;
- tty->driver_data = NULL;
+ return 0;
+}
+
+static void unregister_winch(struct tty_struct *tty);
+
+void line_cleanup(struct tty_struct *tty)
+{
+ struct line *line = tty->driver_data;
if (line->sigio) {
unregister_winch(tty);
line->sigio = 0;
}
+}
+
+void line_close(struct tty_struct *tty, struct file * filp)
+{
+ struct line *line = tty->driver_data;
-out_unlock:
- mutex_unlock(&line->count_lock);
+ tty_port_close(&line->port, tty, filp);
+}
+
+void line_hangup(struct tty_struct *tty)
+{
+ struct line *line = tty->driver_data;
+
+ tty_port_hangup(&line->port);
}
void close_lines(struct line *lines, int nlines)
@@ -476,9 +382,7 @@ int setup_one_line(struct line *lines, int n, char *init,
struct tty_driver *driver = line->driver->driver;
int err = -EINVAL;
- mutex_lock(&line->count_lock);
-
- if (line->count) {
+ if (line->port.count) {
*error_out = "Device is already open";
goto out;
}
@@ -519,7 +423,6 @@ int setup_one_line(struct line *lines, int n, char *init,
}
}
out:
- mutex_unlock(&line->count_lock);
return err;
}
@@ -607,13 +510,17 @@ int line_get_config(char *name, struct line *lines, unsigned int num, char *str,
line = &lines[dev];
- mutex_lock(&line->count_lock);
if (!line->valid)
CONFIG_CHUNK(str, size, n, "none", 1);
- else if (line->tty == NULL)
- CONFIG_CHUNK(str, size, n, line->init_str, 1);
- else n = chan_config_string(line, str, size, error_out);
- mutex_unlock(&line->count_lock);
+ else {
+ struct tty_struct *tty = tty_port_tty_get(&line->port);
+ if (tty == NULL) {
+ CONFIG_CHUNK(str, size, n, line->init_str, 1);
+ } else {
+ n = chan_config_string(line, str, size, error_out);
+ tty_kref_put(tty);
+ }
+ }
return n;
}
@@ -663,8 +570,9 @@ int register_lines(struct line_driver *line_driver,
driver->init_termios = tty_std_termios;
for (i = 0; i < nlines; i++) {
+ tty_port_init(&lines[i].port);
+ lines[i].port.ops = &line_port_ops;
spin_lock_init(&lines[i].lock);
- mutex_init(&lines[i].count_lock);
lines[i].driver = line_driver;
INIT_LIST_HEAD(&lines[i].chan_list);
}
@@ -779,8 +687,7 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
.stack = stack });
if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- "winch", winch) < 0) {
+ IRQF_SHARED, "winch", winch) < 0) {
printk(KERN_ERR "register_winch_irq - failed to register "
"IRQ\n");
goto out_free;
diff --git a/arch/um/drivers/line.h b/arch/um/drivers/line.h
index 0a1834719dba..bae95611e7ab 100644
--- a/arch/um/drivers/line.h
+++ b/arch/um/drivers/line.h
@@ -32,9 +32,7 @@ struct line_driver {
};
struct line {
- struct tty_struct *tty;
- struct mutex count_lock;
- unsigned long count;
+ struct tty_port port;
int valid;
char *init_str;
@@ -59,7 +57,11 @@ struct line {
};
extern void line_close(struct tty_struct *tty, struct file * filp);
-extern int line_open(struct line *lines, struct tty_struct *tty);
+extern int line_open(struct tty_struct *tty, struct file *filp);
+extern int line_install(struct tty_driver *driver, struct tty_struct *tty,
+ struct line *line);
+extern void line_cleanup(struct tty_struct *tty);
+extern void line_hangup(struct tty_struct *tty);
extern int line_setup(char **conf, unsigned nlines, char **def,
char *init, char *name);
extern int line_write(struct tty_struct *tty, const unsigned char *buf,
@@ -70,8 +72,6 @@ extern int line_chars_in_buffer(struct tty_struct *tty);
extern void line_flush_buffer(struct tty_struct *tty);
extern void line_flush_chars(struct tty_struct *tty);
extern int line_write_room(struct tty_struct *tty);
-extern int line_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg);
extern void line_throttle(struct tty_struct *tty);
extern void line_unthrottle(struct tty_struct *tty);
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 43b39d61b538..664a60e8dfb4 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -774,8 +774,7 @@ static int __init mconsole_init(void)
register_reboot_notifier(&reboot_notifier);
err = um_request_irq(MCONSOLE_IRQ, sock, IRQ_READ, mconsole_interrupt,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- "mconsole", (void *)sock);
+ IRQF_SHARED, "mconsole", (void *)sock);
if (err) {
printk(KERN_ERR "Failed to get IRQ for management console\n");
goto out;
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c
index 11866ffd45a9..1d83d50236e1 100644
--- a/arch/um/drivers/port_kern.c
+++ b/arch/um/drivers/port_kern.c
@@ -100,8 +100,7 @@ static int port_accept(struct port_list *port)
.port = port });
if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- "telnetd", conn)) {
+ IRQF_SHARED, "telnetd", conn)) {
printk(KERN_ERR "port_accept : failed to get IRQ for "
"telnetd\n");
goto out_free;
@@ -184,8 +183,7 @@ void *port_data(int port_num)
}
if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- "port", port)) {
+ IRQF_SHARED, "port", port)) {
printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
goto out_close;
}
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index b25296e6218a..e32c6aa6396f 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -131,8 +131,7 @@ static int __init rng_init (void)
random_fd = err;
err = um_request_irq(RANDOM_IRQ, random_fd, IRQ_READ, random_interrupt,
- IRQF_SAMPLE_RANDOM, "random",
- NULL);
+ 0, "random", NULL);
if (err)
goto err_out_cleanup_hw;
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index e09801a1327b..7e86f0070123 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -87,40 +87,13 @@ static int ssl_remove(int n, char **error_out)
error_out);
}
-static int ssl_open(struct tty_struct *tty, struct file *filp)
-{
- int err = line_open(serial_lines, tty);
-
- if (err)
- printk(KERN_ERR "Failed to open serial line %d, err = %d\n",
- tty->index, err);
-
- return err;
-}
-
-#if 0
-static void ssl_flush_buffer(struct tty_struct *tty)
-{
- return;
-}
-
-static void ssl_stop(struct tty_struct *tty)
-{
- printk(KERN_ERR "Someone should implement ssl_stop\n");
-}
-
-static void ssl_start(struct tty_struct *tty)
-{
- printk(KERN_ERR "Someone should implement ssl_start\n");
-}
-
-void ssl_hangup(struct tty_struct *tty)
+static int ssl_install(struct tty_driver *driver, struct tty_struct *tty)
{
+ return line_install(driver, tty, &serial_lines[tty->index]);
}
-#endif
static const struct tty_operations ssl_ops = {
- .open = ssl_open,
+ .open = line_open,
.close = line_close,
.write = line_write,
.put_char = line_put_char,
@@ -129,14 +102,11 @@ static const struct tty_operations ssl_ops = {
.flush_buffer = line_flush_buffer,
.flush_chars = line_flush_chars,
.set_termios = line_set_termios,
- .ioctl = line_ioctl,
.throttle = line_throttle,
.unthrottle = line_unthrottle,
-#if 0
- .stop = ssl_stop,
- .start = ssl_start,
- .hangup = ssl_hangup,
-#endif
+ .install = ssl_install,
+ .cleanup = line_cleanup,
+ .hangup = line_hangup,
};
/* Changed by ssl_init and referenced by ssl_exit, which are both serialized
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 7663541c372e..929b99a261f3 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -89,21 +89,17 @@ static int con_remove(int n, char **error_out)
return line_remove(vts, ARRAY_SIZE(vts), n, error_out);
}
-static int con_open(struct tty_struct *tty, struct file *filp)
-{
- int err = line_open(vts, tty);
- if (err)
- printk(KERN_ERR "Failed to open console %d, err = %d\n",
- tty->index, err);
-
- return err;
-}
-
/* Set in an initcall, checked in an exitcall */
static int con_init_done = 0;
+static int con_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ return line_install(driver, tty, &vts[tty->index]);
+}
+
static const struct tty_operations console_ops = {
- .open = con_open,
+ .open = line_open,
+ .install = con_install,
.close = line_close,
.write = line_write,
.put_char = line_put_char,
@@ -112,9 +108,10 @@ static const struct tty_operations console_ops = {
.flush_buffer = line_flush_buffer,
.flush_chars = line_flush_chars,
.set_termios = line_set_termios,
- .ioctl = line_ioctl,
.throttle = line_throttle,
.unthrottle = line_unthrottle,
+ .cleanup = line_cleanup,
+ .hangup = line_hangup,
};
static void uml_console_write(struct console *console, const char *string,
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 20505cafa299..0643e5bc9f41 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -514,7 +514,7 @@ static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out)
goto out;
}
- fd = os_open_file(ubd_dev->file, global_openflags, 0);
+ fd = os_open_file(ubd_dev->file, of_read(OPENFLAGS()), 0);
if (fd < 0)
return fd;
diff --git a/arch/um/drivers/xterm_kern.c b/arch/um/drivers/xterm_kern.c
index b68bbe269e01..e3031e69445d 100644
--- a/arch/um/drivers/xterm_kern.c
+++ b/arch/um/drivers/xterm_kern.c
@@ -50,8 +50,7 @@ int xterm_fd(int socket, int *pid_out)
init_completion(&data->ready);
err = um_request_irq(XTERM_IRQ, socket, IRQ_READ, xterm_interrupt,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- "xterm", data);
+ IRQF_SHARED, "xterm", data);
if (err) {
printk(KERN_ERR "xterm_fd : failed to get IRQ for xterm, "
"err = %d\n", err);
diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
index 6c03acdb4405..2e0a6b1d8300 100644
--- a/arch/um/include/asm/kmap_types.h
+++ b/arch/um/include/asm/kmap_types.h
@@ -8,22 +8,6 @@
/* No more #include "asm/arch/kmap_types.h" ! */
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
+#define KM_TYPE_NR 14
#endif
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h
index e786a6a3ec5e..442f1d025dc2 100644
--- a/arch/um/include/asm/ptrace-generic.h
+++ b/arch/um/include/asm/ptrace-generic.h
@@ -37,6 +37,8 @@ extern int putreg(struct task_struct *child, int regno, unsigned long value);
extern int arch_copy_tls(struct task_struct *new);
extern void clear_flushed_tls(struct task_struct *task);
+extern void syscall_trace_enter(struct pt_regs *regs);
+extern void syscall_trace_leave(struct pt_regs *regs);
#endif
diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h
index 896e16602176..86daa5461815 100644
--- a/arch/um/include/shared/as-layout.h
+++ b/arch/um/include/shared/as-layout.h
@@ -60,7 +60,8 @@ extern unsigned long host_task_size;
extern int linux_main(int argc, char **argv);
-extern void (*sig_info[])(int, struct uml_pt_regs *);
+struct siginfo;
+extern void (*sig_info[])(int, struct siginfo *si, struct uml_pt_regs *);
#endif
diff --git a/arch/um/include/shared/irq_user.h b/arch/um/include/shared/irq_user.h
index c6c784df2673..2b6d703925b5 100644
--- a/arch/um/include/shared/irq_user.h
+++ b/arch/um/include/shared/irq_user.h
@@ -20,7 +20,8 @@ struct irq_fd {
enum { IRQ_READ, IRQ_WRITE };
-extern void sigio_handler(int sig, struct uml_pt_regs *regs);
+struct siginfo;
+extern void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
extern void free_irq_by_fd(int fd);
extern void reactivate_fd(int fd, int irqnum);
extern void deactivate_fd(int fd, int irqnum);
diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
index 00965d06d2ca..af6b6dc868ba 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -9,6 +9,8 @@
#include "sysdep/ptrace.h"
#include "sysdep/faultinfo.h"
+struct siginfo;
+
extern int uml_exitcode;
extern int ncpus;
@@ -22,7 +24,7 @@ extern void free_stack(unsigned long stack, int order);
extern int do_signal(void);
extern void interrupt_end(void);
-extern void relay_signal(int sig, struct uml_pt_regs *regs);
+extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs);
extern unsigned long segv(struct faultinfo fi, unsigned long ip,
int is_user, struct uml_pt_regs *regs);
@@ -33,9 +35,8 @@ extern unsigned int do_IRQ(int irq, struct uml_pt_regs *regs);
extern int smp_sigio_handler(void);
extern void initial_thread_cb(void (*proc)(void *), void *arg);
extern int is_syscall(unsigned long addr);
-extern void timer_handler(int sig, struct uml_pt_regs *regs);
-extern void timer_handler(int sig, struct uml_pt_regs *regs);
+extern void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
extern int start_uml(void);
extern void paging_init(void);
@@ -59,9 +60,9 @@ extern unsigned long from_irq_stack(int nested);
extern void syscall_trace(struct uml_pt_regs *regs, int entryexit);
extern int singlestepping(void *t);
-extern void segv_handler(int sig, struct uml_pt_regs *regs);
-extern void bus_handler(int sig, struct uml_pt_regs *regs);
-extern void winch(int sig, struct uml_pt_regs *regs);
+extern void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
+extern void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs);
+extern void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs);
extern void fatal_sigsegv(void) __attribute__ ((noreturn));
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 00506c3d5d6e..9883026f0730 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -30,7 +30,7 @@ static struct irq_fd **last_irq_ptr = &active_fds;
extern void free_irqs(void);
-void sigio_handler(int sig, struct uml_pt_regs *regs)
+void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
struct irq_fd *irq_fd;
int n;
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index ccb9a9d283f1..57fc7028714a 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -151,12 +151,10 @@ void new_thread_handler(void)
* 0 if it just exits
*/
n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
- if (n == 1) {
- /* Handle any immediate reschedules or signals */
- interrupt_end();
+ if (n == 1)
userspace(&current->thread.regs.regs);
- }
- else do_exit(0);
+ else
+ do_exit(0);
}
/* Called magically, see new_thread_handler above */
@@ -175,9 +173,6 @@ void fork_handler(void)
current->thread.prev_sched = NULL;
- /* Handle any immediate reschedules or signals */
- interrupt_end();
-
userspace(&current->thread.regs.regs);
}
@@ -193,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
if (current->thread.forking) {
memcpy(&p->thread.regs.regs, &regs->regs,
sizeof(p->thread.regs.regs));
- UPT_SET_SYSCALL_RETURN(&p->thread.regs.regs, 0);
+ PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
if (sp != 0)
REGS_SP(p->thread.regs.regs.gp) = sp;
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 06b190390505..694d551c8899 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -3,11 +3,12 @@
* Licensed under the GPL
*/
-#include "linux/audit.h"
-#include "linux/ptrace.h"
-#include "linux/sched.h"
-#include "asm/uaccess.h"
-#include "skas_ptrace.h"
+#include <linux/audit.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/tracehook.h>
+#include <asm/uaccess.h>
+#include <skas_ptrace.h>
@@ -162,48 +163,36 @@ static void send_sigtrap(struct task_struct *tsk, struct uml_pt_regs *regs,
* XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and
* PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check
*/
-void syscall_trace(struct uml_pt_regs *regs, int entryexit)
+void syscall_trace_enter(struct pt_regs *regs)
{
- int is_singlestep = (current->ptrace & PT_DTRACE) && entryexit;
- int tracesysgood;
-
- if (!entryexit)
- audit_syscall_entry(HOST_AUDIT_ARCH,
- UPT_SYSCALL_NR(regs),
- UPT_SYSCALL_ARG1(regs),
- UPT_SYSCALL_ARG2(regs),
- UPT_SYSCALL_ARG3(regs),
- UPT_SYSCALL_ARG4(regs));
- else
- audit_syscall_exit(regs);
-
- /* Fake a debug trap */
- if (is_singlestep)
- send_sigtrap(current, regs, 0);
+ audit_syscall_entry(HOST_AUDIT_ARCH,
+ UPT_SYSCALL_NR(&regs->regs),
+ UPT_SYSCALL_ARG1(&regs->regs),
+ UPT_SYSCALL_ARG2(&regs->regs),
+ UPT_SYSCALL_ARG3(&regs->regs),
+ UPT_SYSCALL_ARG4(&regs->regs));
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
- if (!(current->ptrace & PT_PTRACED))
- return;
+ tracehook_report_syscall_entry(regs);
+}
- /*
- * the 0x80 provides a way for the tracing parent to distinguish
- * between a syscall stop and SIGTRAP delivery
- */
- tracesysgood = (current->ptrace & PT_TRACESYSGOOD);
- ptrace_notify(SIGTRAP | (tracesysgood ? 0x80 : 0));
+void syscall_trace_leave(struct pt_regs *regs)
+{
+ int ptraced = current->ptrace;
- if (entryexit) /* force do_signal() --> is_syscall() */
- set_thread_flag(TIF_SIGPENDING);
+ audit_syscall_exit(regs);
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+ /* Fake a debug trap */
+ if (ptraced & PT_DTRACE)
+ send_sigtrap(current, &regs->regs, 0);
+
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+
+ tracehook_report_syscall_exit(regs, 0);
+ /* force do_signal() --> is_syscall() */
+ if (ptraced & PT_PTRACED)
+ set_thread_flag(TIF_SIGPENDING);
}
diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c
index 2a1639255763..c88211139a51 100644
--- a/arch/um/kernel/sigio.c
+++ b/arch/um/kernel/sigio.c
@@ -25,8 +25,7 @@ int write_sigio_irq(int fd)
int err;
err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt,
- IRQF_SAMPLE_RANDOM, "write sigio",
- NULL);
+ 0, "write sigio", NULL);
if (err) {
printk(KERN_ERR "write_sigio_irq : um_request_irq failed, "
"err = %d\n", err);
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 05fbeb480e0b..86368a025a96 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -18,7 +18,7 @@ void handle_syscall(struct uml_pt_regs *r)
long result;
int syscall;
- syscall_trace(r, 0);
+ syscall_trace_enter(regs);
/*
* This should go in the declaration of syscall, but when I do that,
@@ -34,7 +34,7 @@ void handle_syscall(struct uml_pt_regs *r)
result = -ENOSYS;
else result = EXECUTE_SYSCALL(syscall, regs);
- UPT_SET_SYSCALL_RETURN(r, result);
+ PT_REGS_SET_SYSCALL_RETURN(regs, result);
- syscall_trace(r, 1);
+ syscall_trace_leave(regs);
}
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index d1a23fb3190d..5f76d4ba151c 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -13,7 +13,7 @@
#include "kern_util.h"
#include "os.h"
-void timer_handler(int sig, struct uml_pt_regs *regs)
+void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
unsigned long flags;
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 3be60765c0e2..0353b98ae35a 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -172,7 +172,7 @@ void fatal_sigsegv(void)
os_dump_core();
}
-void segv_handler(int sig, struct uml_pt_regs *regs)
+void segv_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
struct faultinfo * fi = UPT_FAULTINFO(regs);
@@ -258,8 +258,11 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
return 0;
}
-void relay_signal(int sig, struct uml_pt_regs *regs)
+void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs)
{
+ struct faultinfo *fi;
+ struct siginfo clean_si;
+
if (!UPT_IS_USER(regs)) {
if (sig == SIGBUS)
printk(KERN_ERR "Bus error - the host /dev/shm or /tmp "
@@ -269,18 +272,40 @@ void relay_signal(int sig, struct uml_pt_regs *regs)
arch_examine_signal(sig, regs);
- current->thread.arch.faultinfo = *UPT_FAULTINFO(regs);
- force_sig(sig, current);
+ memset(&clean_si, 0, sizeof(clean_si));
+ clean_si.si_signo = si->si_signo;
+ clean_si.si_errno = si->si_errno;
+ clean_si.si_code = si->si_code;
+ switch (sig) {
+ case SIGILL:
+ case SIGFPE:
+ case SIGSEGV:
+ case SIGBUS:
+ case SIGTRAP:
+ fi = UPT_FAULTINFO(regs);
+ clean_si.si_addr = (void __user *) FAULT_ADDRESS(*fi);
+ current->thread.arch.faultinfo = *fi;
+#ifdef __ARCH_SI_TRAPNO
+ clean_si.si_trapno = si->si_trapno;
+#endif
+ break;
+ default:
+ printk(KERN_ERR "Attempted to relay unknown signal %d (si_code = %d)\n",
+ sig, si->si_code);
+ }
+
+ force_sig_info(sig, &clean_si, current);
}
-void bus_handler(int sig, struct uml_pt_regs *regs)
+void bus_handler(int sig, struct siginfo *si, struct uml_pt_regs *regs)
{
if (current->thread.fault_catcher != NULL)
UML_LONGJMP(current->thread.fault_catcher, 1);
- else relay_signal(sig, regs);
+ else
+ relay_signal(sig, si, regs);
}
-void winch(int sig, struct uml_pt_regs *regs)
+void winch(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
do_IRQ(WINCH_IRQ, regs);
}
diff --git a/arch/um/os-Linux/internal.h b/arch/um/os-Linux/internal.h
index 2c3c3ecd8c01..0dc2c9f135f6 100644
--- a/arch/um/os-Linux/internal.h
+++ b/arch/um/os-Linux/internal.h
@@ -1 +1 @@
-void alarm_handler(int, mcontext_t *);
+void alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc);
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 2d22f1fcd8e2..6366ce904b9b 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -13,8 +13,9 @@
#include "kern_util.h"
#include "os.h"
#include "sysdep/mcontext.h"
+#include "internal.h"
-void (*sig_info[NSIG])(int, struct uml_pt_regs *) = {
+void (*sig_info[NSIG])(int, siginfo_t *, struct uml_pt_regs *) = {
[SIGTRAP] = relay_signal,
[SIGFPE] = relay_signal,
[SIGILL] = relay_signal,
@@ -24,7 +25,7 @@ void (*sig_info[NSIG])(int, struct uml_pt_regs *) = {
[SIGIO] = sigio_handler,
[SIGVTALRM] = timer_handler };
-static void sig_handler_common(int sig, mcontext_t *mc)
+static void sig_handler_common(int sig, siginfo_t *si, mcontext_t *mc)
{
struct uml_pt_regs r;
int save_errno = errno;
@@ -40,7 +41,7 @@ static void sig_handler_common(int sig, mcontext_t *mc)
if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGVTALRM))
unblock_signals();
- (*sig_info[sig])(sig, &r);
+ (*sig_info[sig])(sig, si, &r);
errno = save_errno;
}
@@ -60,7 +61,7 @@ static void sig_handler_common(int sig, mcontext_t *mc)
static int signals_enabled;
static unsigned int signals_pending;
-void sig_handler(int sig, mcontext_t *mc)
+void sig_handler(int sig, siginfo_t *si, mcontext_t *mc)
{
int enabled;
@@ -72,7 +73,7 @@ void sig_handler(int sig, mcontext_t *mc)
block_signals();
- sig_handler_common(sig, mc);
+ sig_handler_common(sig, si, mc);
set_signals(enabled);
}
@@ -85,10 +86,10 @@ static void real_alarm_handler(mcontext_t *mc)
get_regs_from_mc(&regs, mc);
regs.is_user = 0;
unblock_signals();
- timer_handler(SIGVTALRM, &regs);
+ timer_handler(SIGVTALRM, NULL, &regs);
}
-void alarm_handler(int sig, mcontext_t *mc)
+void alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
{
int enabled;
@@ -119,7 +120,7 @@ void set_sigstack(void *sig_stack, int size)
panic("enabling signal stack failed, errno = %d\n", errno);
}
-static void (*handlers[_NSIG])(int sig, mcontext_t *mc) = {
+static void (*handlers[_NSIG])(int sig, siginfo_t *si, mcontext_t *mc) = {
[SIGSEGV] = sig_handler,
[SIGBUS] = sig_handler,
[SIGILL] = sig_handler,
@@ -132,7 +133,7 @@ static void (*handlers[_NSIG])(int sig, mcontext_t *mc) = {
};
-static void hard_handler(int sig, siginfo_t *info, void *p)
+static void hard_handler(int sig, siginfo_t *si, void *p)
{
struct ucontext *uc = p;
mcontext_t *mc = &uc->uc_mcontext;
@@ -161,7 +162,7 @@ static void hard_handler(int sig, siginfo_t *info, void *p)
while ((sig = ffs(pending)) != 0){
sig--;
pending &= ~(1 << sig);
- (*handlers[sig])(sig, mc);
+ (*handlers[sig])(sig, si, mc);
}
/*
@@ -273,9 +274,12 @@ void unblock_signals(void)
* Deal with SIGIO first because the alarm handler might
* schedule, leaving the pending SIGIO stranded until we come
* back here.
+ *
+ * SIGIO's handler doesn't use siginfo or mcontext,
+ * so they can be NULL.
*/
if (save_pending & SIGIO_MASK)
- sig_handler_common(SIGIO, NULL);
+ sig_handler_common(SIGIO, NULL, NULL);
if (save_pending & SIGVTALRM_MASK)
real_alarm_handler(NULL);
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index cd65727854eb..d93bb40499f7 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -346,6 +346,10 @@ void userspace(struct uml_pt_regs *regs)
int err, status, op, pid = userspace_pid[0];
/* To prevent races if using_sysemu changes under us.*/
int local_using_sysemu;
+ siginfo_t si;
+
+ /* Handle any immediate reschedules or signals */
+ interrupt_end();
if (getitimer(ITIMER_VIRTUAL, &timer))
printk(UM_KERN_ERR "Failed to get itimer, errno = %d\n", errno);
@@ -404,13 +408,17 @@ void userspace(struct uml_pt_regs *regs)
if (WIFSTOPPED(status)) {
int sig = WSTOPSIG(status);
+
+ ptrace(PTRACE_GETSIGINFO, pid, 0, &si);
+
switch (sig) {
case SIGSEGV:
if (PTRACE_FULL_FAULTINFO ||
!ptrace_faultinfo) {
get_skas_faultinfo(pid,
&regs->faultinfo);
- (*sig_info[SIGSEGV])(SIGSEGV, regs);
+ (*sig_info[SIGSEGV])(SIGSEGV, &si,
+ regs);
}
else handle_segv(pid, regs);
break;
@@ -418,14 +426,14 @@ void userspace(struct uml_pt_regs *regs)
handle_trap(pid, regs, local_using_sysemu);
break;
case SIGTRAP:
- relay_signal(SIGTRAP, regs);
+ relay_signal(SIGTRAP, &si, regs);
break;
case SIGVTALRM:
now = os_nsecs();
if (now < nsecs)
break;
block_signals();
- (*sig_info[sig])(sig, regs);
+ (*sig_info[sig])(sig, &si, regs);
unblock_signals();
nsecs = timer.it_value.tv_sec *
UM_NSEC_PER_SEC +
@@ -439,7 +447,7 @@ void userspace(struct uml_pt_regs *regs)
case SIGFPE:
case SIGWINCH:
block_signals();
- (*sig_info[sig])(sig, regs);
+ (*sig_info[sig])(sig, &si, regs);
unblock_signals();
break;
default:
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index 910499d76a67..0748fe0c8a73 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -87,7 +87,7 @@ static int after_sleep_interval(struct timespec *ts)
static void deliver_alarm(void)
{
- alarm_handler(SIGVTALRM, NULL);
+ alarm_handler(SIGVTALRM, NULL, NULL);
}
static unsigned long long sleep_time(unsigned long long nsecs)
@@ -114,7 +114,7 @@ static void deliver_alarm(void)
skew += this_tick - last_tick;
while (skew >= one_tick) {
- alarm_handler(SIGVTALRM, NULL);
+ alarm_handler(SIGVTALRM, NULL, NULL);
skew -= one_tick;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c70684f859e1..8ec3a1aa4abd 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -70,6 +70,7 @@ config X86
select HAVE_ARCH_JUMP_LABEL
select HAVE_TEXT_POKE_SMP
select HAVE_GENERIC_HARDIRQS
+ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select SPARSE_IRQ
select GENERIC_FIND_FIRST_BIT
select GENERIC_IRQ_PROBE
@@ -84,6 +85,7 @@ config X86
select GENERIC_IOMAP
select DCACHE_WORD_ACCESS
select GENERIC_SMP_IDLE_THREAD
+ select ARCH_WANT_IPC_PARSE_VERSION if X86_32
select HAVE_ARCH_SECCOMP_FILTER
select BUILDTIME_EXTABLE_SORT
select GENERIC_CMOS_UPDATE
@@ -1525,7 +1527,7 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
config CC_STACKPROTECTOR
- bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
+ bool "Enable -fstack-protector buffer overflow detection"
---help---
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of functions, a canary value on
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index b0c5276861ec..682e9c210baa 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -27,6 +27,10 @@ ifeq ($(CONFIG_X86_32),y)
KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
+ # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
+ # with nonstandard options
+ KBUILD_CFLAGS += -fno-pic
+
# prevent gcc from keeping the stack 16 byte aligned
KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 5a747dd884db..f7535bedc33f 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -57,7 +57,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
-Wall -Wstrict-prototypes \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/code16gcc.h \
- -fno-strict-aliasing -fomit-frame-pointer \
+ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
$(call cc-option, -fno-unit-at-a-time)) \
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 441520e4174f..a3ac52b29cbf 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -33,6 +33,14 @@
#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
+#define MCACOD 0xffff /* MCA Error Code */
+
+/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
+#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
+#define MCACOD_SCRUBMSK 0xfff0
+#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
+#define MCACOD_DATA 0x0134 /* Data Load */
+#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
/* MCi_MISC register defines */
#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 87bdbca72f94..72f9adf6eca4 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -100,25 +100,6 @@ extern void olpc_xo1_pm_wakeup_clear(u16 value);
extern int pci_olpc_init(void);
-/* EC related functions */
-
-extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
- unsigned char *outbuf, size_t outlen);
-
-/* EC commands */
-
-#define EC_FIRMWARE_REV 0x08
-#define EC_WRITE_SCI_MASK 0x1b
-#define EC_WAKE_UP_WLAN 0x24
-#define EC_WLAN_LEAVE_RESET 0x25
-#define EC_READ_EB_MODE 0x2a
-#define EC_SET_SCI_INHIBIT 0x32
-#define EC_SET_SCI_INHIBIT_RELEASE 0x34
-#define EC_WLAN_ENTER_RESET 0x35
-#define EC_WRITE_EXT_SCI_MASK 0x38
-#define EC_SCI_QUERY 0x84
-#define EC_EXT_SCI_QUERY 0x85
-
/* SCI source values */
#define EC_SCI_SRC_EMPTY 0x00
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index c78f14a0df00..cb4e43bce98a 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -196,11 +196,16 @@ static inline u32 get_ibs_caps(void) { return 0; }
extern void perf_events_lapic_init(void);
/*
- * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
- * This flag is otherwise unused and ABI specified to be 0, so nobody should
- * care what we do with it.
+ * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
+ * unused and ABI specified to be 0, so nobody should care what we do with
+ * them.
+ *
+ * EXACT - the IP points to the exact instruction that triggered the
+ * event (HW bugs exempt).
+ * VM - original X86_VM_MASK; see set_linear_ip().
*/
#define PERF_EFLAGS_EXACT (1UL << 3)
+#define PERF_EFLAGS_VM (1UL << 5)
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
@@ -234,7 +239,7 @@ extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
extern void perf_check_microcode(void);
#else
-static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
+static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
*nr = 0;
return NULL;
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index b315a33867f2..33692eaabab5 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -12,8 +12,7 @@
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
- * These are fair FIFO ticket locks, which are currently limited to 256
- * CPUs.
+ * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
*
* (the type definitions are in asm/spinlock_types.h)
*/
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 4437001d8e3d..0d9776e9e2dc 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -15,7 +15,6 @@
# ifdef CONFIG_X86_32
# include <asm/unistd_32.h>
-# define __ARCH_WANT_IPC_PARSE_VERSION
# define __ARCH_WANT_STAT64
# define __ARCH_WANT_SYS_IPC
# define __ARCH_WANT_SYS_OLD_MMAP
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 95bf99de9058..1b8e5a03d942 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -25,10 +25,6 @@ unsigned long acpi_realmode_flags;
static char temp_stack[4096];
#endif
-asmlinkage void acpi_enter_s3(void)
-{
- acpi_enter_sleep_state(3, wake_sleep_flags);
-}
/**
* acpi_suspend_lowlevel - save kernel state
*
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index 5653a5791ec9..67f59f8c6956 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -2,7 +2,6 @@
* Variables and functions used by the code in sleep.c
*/
-#include <linux/linkage.h>
#include <asm/realmode.h>
extern unsigned long saved_video_mode;
@@ -11,7 +10,6 @@ extern long saved_magic;
extern int wakeup_pmode_return;
extern u8 wake_sleep_flags;
-extern asmlinkage void acpi_enter_s3(void);
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
extern void wakeup_long64(void);
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 72610839f03b..13ab720573e3 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -74,7 +74,9 @@ restore_registers:
ENTRY(do_suspend_lowlevel)
call save_processor_state
call save_registers
- call acpi_enter_s3
+ pushl $3
+ call acpi_enter_sleep_state
+ addl $4, %esp
# In case of S3 failure, we'll emerge here. Jump
# to ret_point to recover
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 014d1d28c397..8ea5164cbd04 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -71,7 +71,9 @@ ENTRY(do_suspend_lowlevel)
movq %rsi, saved_rsi
addq $8, %rsp
- call acpi_enter_s3
+ movl $3, %edi
+ xorl %eax, %eax
+ call acpi_enter_sleep_state
/* in case something went wrong, restore the machine status and go on */
jmp resume_point
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 931280ff8299..ced4534baed5 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -165,7 +165,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
#endif
#ifdef P6_NOP1
-static const unsigned char __initconst_or_module p6nops[] =
+static const unsigned char p6nops[] =
{
P6_NOP1,
P6_NOP2,
@@ -224,7 +224,7 @@ void __init arch_init_ideal_nops(void)
ideal_nops = intel_nops;
#endif
}
-
+ break;
default:
#ifdef CONFIG_X86_64
ideal_nops = k8_nops;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 406eee784684..c265593ec2cd 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1204,7 +1204,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
BUG_ON(!cfg->vector);
vector = cfg->vector;
- for_each_cpu(cpu, cfg->domain)
+ for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
per_cpu(vector_irq, cpu)[vector] = -1;
cfg->vector = 0;
@@ -1212,7 +1212,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
if (likely(!cfg->move_in_progress))
return;
- for_each_cpu(cpu, cfg->old_domain) {
+ for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
vector++) {
if (per_cpu(vector_irq, cpu)[vector] != irq)
@@ -1356,6 +1356,16 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
if (!IO_APIC_IRQ(irq))
return;
+ /*
+ * For legacy irqs, cfg->domain starts with cpu 0. Now that IO-APIC
+ * can handle this irq and the apic driver is finialized at this point,
+ * update the cfg->domain.
+ */
+ if (irq < legacy_pic->nr_legacy_irqs &&
+ cpumask_equal(cfg->domain, cpumask_of(0)))
+ apic->vector_allocation_domain(0, cfg->domain,
+ apic->target_cpus());
+
if (assign_irq_vector(irq, cfg, apic->target_cpus()))
return;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 46d8786d655e..a5fbc3c5fccc 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -144,6 +144,8 @@ static int __init x86_xsave_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+ setup_clear_cpu_cap(X86_FEATURE_AVX);
+ setup_clear_cpu_cap(X86_FEATURE_AVX2);
return 1;
}
__setup("noxsave", x86_xsave_setup);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 413c2ced887c..13017626f9a8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -55,13 +55,6 @@ static struct severity {
#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
#define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
-#define MCACOD 0xffff
-/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
-#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK 0xfff0
-#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
-#define MCACOD_DATA 0x0134 /* Data Load */
-#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
MCESEV(
NO, "Invalid",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 5e095f873e3e..292d0258311c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -103,6 +103,8 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
static DEFINE_PER_CPU(struct work_struct, mce_work);
+static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
+
/*
* CPU/chipset specific EDAC code can register a notifier call here to print
* MCE errors in a human-readable form.
@@ -650,14 +652,18 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
* Do a quick check if any of the events requires a panic.
* This decides if we keep the events around or clear them.
*/
-static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp)
+static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
+ struct pt_regs *regs)
{
int i, ret = 0;
for (i = 0; i < banks; i++) {
m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
- if (m->status & MCI_STATUS_VAL)
+ if (m->status & MCI_STATUS_VAL) {
__set_bit(i, validp);
+ if (quirk_no_way_out)
+ quirk_no_way_out(i, m, regs);
+ }
if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
ret = 1;
}
@@ -1040,7 +1046,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
*final = m;
memset(valid_banks, 0, sizeof(valid_banks));
- no_way_out = mce_no_way_out(&m, &msg, valid_banks);
+ no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
barrier();
@@ -1418,6 +1424,34 @@ static void __mcheck_cpu_init_generic(void)
}
}
+/*
+ * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
+ * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
+ * Vol 3B Table 15-20). But this confuses both the code that determines
+ * whether the machine check occurred in kernel or user mode, and also
+ * the severity assessment code. Pretend that EIPV was set, and take the
+ * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
+ */
+static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
+{
+ if (bank != 0)
+ return;
+ if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
+ return;
+ if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
+ MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
+ MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
+ MCACOD)) !=
+ (MCI_STATUS_UC|MCI_STATUS_EN|
+ MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
+ MCI_STATUS_AR|MCACOD_INSTR))
+ return;
+
+ m->mcgstatus |= MCG_STATUS_EIPV;
+ m->ip = regs->ip;
+ m->cs = regs->cs;
+}
+
/* Add per CPU specific workarounds here */
static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
{
@@ -1515,6 +1549,9 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
*/
if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
mce_bootlog = 0;
+
+ if (c->x86 == 6 && c->x86_model == 45)
+ quirk_no_way_out = quirk_sandybridge_ifu;
}
if (monarch_timeout < 0)
monarch_timeout = 0;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 29557aa06dda..915b876edd1e 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -32,6 +32,8 @@
#include <asm/smp.h>
#include <asm/alternative.h>
#include <asm/timer.h>
+#include <asm/desc.h>
+#include <asm/ldt.h>
#include "perf_event.h"
@@ -1738,6 +1740,29 @@ valid_user_frame(const void __user *fp, unsigned long size)
return (__range_not_ok(fp, size, TASK_SIZE) == 0);
}
+static unsigned long get_segment_base(unsigned int segment)
+{
+ struct desc_struct *desc;
+ int idx = segment >> 3;
+
+ if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+ if (idx > LDT_ENTRIES)
+ return 0;
+
+ if (idx > current->active_mm->context.size)
+ return 0;
+
+ desc = current->active_mm->context.ldt;
+ } else {
+ if (idx > GDT_ENTRIES)
+ return 0;
+
+ desc = __this_cpu_ptr(&gdt_page.gdt[0]);
+ }
+
+ return get_desc_base(desc + idx);
+}
+
#ifdef CONFIG_COMPAT
#include <asm/compat.h>
@@ -1746,13 +1771,17 @@ static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
/* 32-bit process in 64-bit kernel. */
+ unsigned long ss_base, cs_base;
struct stack_frame_ia32 frame;
const void __user *fp;
if (!test_thread_flag(TIF_IA32))
return 0;
- fp = compat_ptr(regs->bp);
+ cs_base = get_segment_base(regs->cs);
+ ss_base = get_segment_base(regs->ss);
+
+ fp = compat_ptr(ss_base + regs->bp);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
unsigned long bytes;
frame.next_frame = 0;
@@ -1765,8 +1794,8 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
if (!valid_user_frame(fp, sizeof(frame)))
break;
- perf_callchain_store(entry, frame.return_address);
- fp = compat_ptr(frame.next_frame);
+ perf_callchain_store(entry, cs_base + frame.return_address);
+ fp = compat_ptr(ss_base + frame.next_frame);
}
return 1;
}
@@ -1789,6 +1818,12 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;
}
+ /*
+ * We don't know what to do with VM86 stacks.. ignore them for now.
+ */
+ if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
+ return;
+
fp = (void __user *)regs->bp;
perf_callchain_store(entry, regs->ip);
@@ -1816,16 +1851,50 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
}
}
-unsigned long perf_instruction_pointer(struct pt_regs *regs)
+/*
+ * Deal with code segment offsets for the various execution modes:
+ *
+ * VM86 - the good olde 16 bit days, where the linear address is
+ * 20 bits and we use regs->ip + 0x10 * regs->cs.
+ *
+ * IA32 - Where we need to look at GDT/LDT segment descriptor tables
+ * to figure out what the 32bit base address is.
+ *
+ * X32 - has TIF_X32 set, but is running in x86_64
+ *
+ * X86_64 - CS,DS,SS,ES are all zero based.
+ */
+static unsigned long code_segment_base(struct pt_regs *regs)
{
- unsigned long ip;
+ /*
+ * If we are in VM86 mode, add the segment offset to convert to a
+ * linear address.
+ */
+ if (regs->flags & X86_VM_MASK)
+ return 0x10 * regs->cs;
+
+ /*
+ * For IA32 we look at the GDT/LDT segment base to convert the
+ * effective IP to a linear address.
+ */
+#ifdef CONFIG_X86_32
+ if (user_mode(regs) && regs->cs != __USER_CS)
+ return get_segment_base(regs->cs);
+#else
+ if (test_thread_flag(TIF_IA32)) {
+ if (user_mode(regs) && regs->cs != __USER32_CS)
+ return get_segment_base(regs->cs);
+ }
+#endif
+ return 0;
+}
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- ip = perf_guest_cbs->get_guest_ip();
- else
- ip = instruction_pointer(regs);
+ return perf_guest_cbs->get_guest_ip();
- return ip;
+ return regs->ip + code_segment_base(regs);
}
unsigned long perf_misc_flags(struct pt_regs *regs)
@@ -1838,7 +1907,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
- if (!kernel_ip(regs->ip))
+ if (user_mode(regs))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index a15df4be151f..6605a81ba339 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -374,7 +374,7 @@ struct x86_pmu {
/*
* Intel DebugStore bits
*/
- int bts :1,
+ unsigned int bts :1,
bts_active :1,
pebs :1,
pebs_active :1,
@@ -516,6 +516,26 @@ static inline bool kernel_ip(unsigned long ip)
#endif
}
+/*
+ * Not all PMUs provide the right context information to place the reported IP
+ * into full context. Specifically segment registers are typically not
+ * supplied.
+ *
+ * Assuming the address is a linear address (it is for IBS), we fake the CS and
+ * vm86 mode using the known zero-based code segment and 'fix up' the registers
+ * to reflect this.
+ *
+ * Intel PEBS/LBR appear to typically provide the effective address, nothing
+ * much we can do about that but pray and treat it like a linear address.
+ */
+static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
+{
+ regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
+ if (regs->flags & X86_VM_MASK)
+ regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
+ regs->ip = ip;
+}
+
#ifdef CONFIG_CPU_SUP_AMD
int amd_pmu_init(void);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index da9bcdcd9856..7bfb5bec8630 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -13,6 +13,8 @@
#include <asm/apic.h>
+#include "perf_event.h"
+
static u32 ibs_caps;
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
@@ -536,7 +538,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
regs.flags &= ~PERF_EFLAGS_EXACT;
} else {
- instruction_pointer_set(&regs, ibs_data.regs[1]);
+ set_linear_ip(&regs, ibs_data.regs[1]);
regs.flags |= PERF_EFLAGS_EXACT;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 7a8b9d0abcaa..7f2739e03e79 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -138,6 +138,84 @@ static u64 intel_pmu_event_map(int hw_event)
return intel_perfmon_event_map[hw_event];
}
+#define SNB_DMND_DATA_RD (1ULL << 0)
+#define SNB_DMND_RFO (1ULL << 1)
+#define SNB_DMND_IFETCH (1ULL << 2)
+#define SNB_DMND_WB (1ULL << 3)
+#define SNB_PF_DATA_RD (1ULL << 4)
+#define SNB_PF_RFO (1ULL << 5)
+#define SNB_PF_IFETCH (1ULL << 6)
+#define SNB_LLC_DATA_RD (1ULL << 7)
+#define SNB_LLC_RFO (1ULL << 8)
+#define SNB_LLC_IFETCH (1ULL << 9)
+#define SNB_BUS_LOCKS (1ULL << 10)
+#define SNB_STRM_ST (1ULL << 11)
+#define SNB_OTHER (1ULL << 15)
+#define SNB_RESP_ANY (1ULL << 16)
+#define SNB_NO_SUPP (1ULL << 17)
+#define SNB_LLC_HITM (1ULL << 18)
+#define SNB_LLC_HITE (1ULL << 19)
+#define SNB_LLC_HITS (1ULL << 20)
+#define SNB_LLC_HITF (1ULL << 21)
+#define SNB_LOCAL (1ULL << 22)
+#define SNB_REMOTE (0xffULL << 23)
+#define SNB_SNP_NONE (1ULL << 31)
+#define SNB_SNP_NOT_NEEDED (1ULL << 32)
+#define SNB_SNP_MISS (1ULL << 33)
+#define SNB_NO_FWD (1ULL << 34)
+#define SNB_SNP_FWD (1ULL << 35)
+#define SNB_HITM (1ULL << 36)
+#define SNB_NON_DRAM (1ULL << 37)
+
+#define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
+#define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
+#define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
+
+#define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
+ SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
+ SNB_HITM)
+
+#define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
+#define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
+
+#define SNB_L3_ACCESS SNB_RESP_ANY
+#define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
+
+static __initconst const u64 snb_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
+ [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
+ [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
+ [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
+ [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
+ [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
+ [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
+ },
+ },
+};
+
static __initconst const u64 snb_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -235,16 +313,16 @@ static __initconst const u64 snb_hw_cache_event_ids
},
[ C(NODE) ] = {
[ C(OP_READ) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
+ [ C(RESULT_ACCESS) ] = 0x01b7,
+ [ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
+ [ C(RESULT_ACCESS) ] = 0x01b7,
+ [ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_PREFETCH) ] = {
- [ C(RESULT_ACCESS) ] = -1,
- [ C(RESULT_MISS) ] = -1,
+ [ C(RESULT_ACCESS) ] = 0x01b7,
+ [ C(RESULT_MISS) ] = 0x01b7,
},
},
@@ -1444,8 +1522,16 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
+ /*
+ * If PMU counter has PEBS enabled it is not enough to disable counter
+ * on a guest entry since PEBS memory write can overshoot guest entry
+ * and corrupt guest memory. Disabling PEBS solves the problem.
+ */
+ arr[1].msr = MSR_IA32_PEBS_ENABLE;
+ arr[1].host = cpuc->pebs_enabled;
+ arr[1].guest = 0;
- *nr = 1;
+ *nr = 2;
return arr;
}
@@ -1964,6 +2050,8 @@ __init int intel_pmu_init(void)
case 58: /* IvyBridge */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_snb();
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 629ae0b7ad90..e38d97bf4259 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -499,7 +499,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
* We sampled a branch insn, rewind using the LBR stack
*/
if (ip == to) {
- regs->ip = from;
+ set_linear_ip(regs, from);
return 1;
}
@@ -529,7 +529,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
} while (to < ip);
if (to == ip) {
- regs->ip = old_to;
+ set_linear_ip(regs, old_to);
return 1;
}
@@ -569,7 +569,8 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
*/
regs = *iregs;
- regs.ip = pebs->ip;
+ regs.flags = pebs->flags;
+ set_linear_ip(&regs, pebs->ip);
regs.bp = pebs->bp;
regs.sp = pebs->sp;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 19faffc60886..0a5571080e74 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -18,6 +18,7 @@ static struct event_constraint constraint_empty =
EVENT_CONSTRAINT(0, 0, 0);
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(event_ext, event, "config:0-7,21");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
@@ -33,10 +34,81 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
-DEFINE_UNCORE_FORMAT_ATTR(filter_brand0, filter_brand0, "config1:0-7");
-DEFINE_UNCORE_FORMAT_ATTR(filter_brand1, filter_brand1, "config1:8-15");
-DEFINE_UNCORE_FORMAT_ATTR(filter_brand2, filter_brand2, "config1:16-23");
-DEFINE_UNCORE_FORMAT_ATTR(filter_brand3, filter_brand3, "config1:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
+DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
+
+static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
+{
+ u64 count;
+
+ rdmsrl(event->hw.event_base, count);
+
+ return count;
+}
+
+/*
+ * generic get constraint function for shared match/mask registers.
+ */
+static struct event_constraint *
+uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct intel_uncore_extra_reg *er;
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
+ unsigned long flags;
+ bool ok = false;
+
+ /*
+ * reg->alloc can be set due to existing state, so for fake box we
+ * need to ignore this, otherwise we might fail to allocate proper
+ * fake state for this extra reg constraint.
+ */
+ if (reg1->idx == EXTRA_REG_NONE ||
+ (!uncore_box_is_fake(box) && reg1->alloc))
+ return NULL;
+
+ er = &box->shared_regs[reg1->idx];
+ raw_spin_lock_irqsave(&er->lock, flags);
+ if (!atomic_read(&er->ref) ||
+ (er->config1 == reg1->config && er->config2 == reg2->config)) {
+ atomic_inc(&er->ref);
+ er->config1 = reg1->config;
+ er->config2 = reg2->config;
+ ok = true;
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ if (ok) {
+ if (!uncore_box_is_fake(box))
+ reg1->alloc = 1;
+ return NULL;
+ }
+
+ return &constraint_empty;
+}
+
+static void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct intel_uncore_extra_reg *er;
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+
+ /*
+ * Only put constraint if extra reg was actually allocated. Also
+ * takes care of event which do not use an extra shared reg.
+ *
+ * Also, if this is a fake box we shouldn't touch any event state
+ * (reg->alloc) and we don't care about leaving inconsistent box
+ * state either since it will be thrown out.
+ */
+ if (uncore_box_is_fake(box) || !reg1->alloc)
+ return;
+
+ er = &box->shared_regs[reg1->idx];
+ atomic_dec(&er->ref);
+ reg1->alloc = 0;
+}
/* Sandy Bridge-EP uncore support */
static struct intel_uncore_type snbep_uncore_cbox;
@@ -64,18 +136,15 @@ static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
pci_write_config_dword(pdev, box_ctl, config);
}
-static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box,
- struct perf_event *event)
+static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
{
struct pci_dev *pdev = box->pci_dev;
struct hw_perf_event *hwc = &event->hw;
- pci_write_config_dword(pdev, hwc->config_base, hwc->config |
- SNBEP_PMON_CTL_EN);
+ pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
}
-static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box,
- struct perf_event *event)
+static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event)
{
struct pci_dev *pdev = box->pci_dev;
struct hw_perf_event *hwc = &event->hw;
@@ -83,8 +152,7 @@ static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box,
pci_write_config_dword(pdev, hwc->config_base, hwc->config);
}
-static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box,
- struct perf_event *event)
+static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event)
{
struct pci_dev *pdev = box->pci_dev;
struct hw_perf_event *hwc = &event->hw;
@@ -92,14 +160,15 @@ static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box,
pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
+
return count;
}
static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
{
struct pci_dev *pdev = box->pci_dev;
- pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL,
- SNBEP_PMON_BOX_CTL_INT);
+
+ pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, SNBEP_PMON_BOX_CTL_INT);
}
static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
@@ -112,7 +181,6 @@ static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
rdmsrl(msr, config);
config |= SNBEP_PMON_BOX_CTL_FRZ;
wrmsrl(msr, config);
- return;
}
}
@@ -126,12 +194,10 @@ static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
rdmsrl(msr, config);
config &= ~SNBEP_PMON_BOX_CTL_FRZ;
wrmsrl(msr, config);
- return;
}
}
-static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box,
- struct perf_event *event)
+static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
@@ -150,68 +216,15 @@ static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
wrmsrl(hwc->config_base, hwc->config);
}
-static u64 snbep_uncore_msr_read_counter(struct intel_uncore_box *box,
- struct perf_event *event)
-{
- struct hw_perf_event *hwc = &event->hw;
- u64 count;
-
- rdmsrl(hwc->event_base, count);
- return count;
-}
-
static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
{
unsigned msr = uncore_msr_box_ctl(box);
+
if (msr)
wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
}
-static struct event_constraint *
-snbep_uncore_get_constraint(struct intel_uncore_box *box,
- struct perf_event *event)
-{
- struct intel_uncore_extra_reg *er;
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- unsigned long flags;
- bool ok = false;
-
- if (reg1->idx == EXTRA_REG_NONE || (box->phys_id >= 0 && reg1->alloc))
- return NULL;
-
- er = &box->shared_regs[reg1->idx];
- raw_spin_lock_irqsave(&er->lock, flags);
- if (!atomic_read(&er->ref) || er->config1 == reg1->config) {
- atomic_inc(&er->ref);
- er->config1 = reg1->config;
- ok = true;
- }
- raw_spin_unlock_irqrestore(&er->lock, flags);
-
- if (ok) {
- if (box->phys_id >= 0)
- reg1->alloc = 1;
- return NULL;
- }
- return &constraint_empty;
-}
-
-static void snbep_uncore_put_constraint(struct intel_uncore_box *box,
- struct perf_event *event)
-{
- struct intel_uncore_extra_reg *er;
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
-
- if (box->phys_id < 0 || !reg1->alloc)
- return;
-
- er = &box->shared_regs[reg1->idx];
- atomic_dec(&er->ref);
- reg1->alloc = 0;
-}
-
-static int snbep_uncore_hw_config(struct intel_uncore_box *box,
- struct perf_event *event)
+static int snbep_uncore_hw_config(struct intel_uncore_box *box, struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
@@ -221,14 +234,16 @@ static int snbep_uncore_hw_config(struct intel_uncore_box *box,
SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
reg1->config = event->attr.config1 &
SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
- } else if (box->pmu->type == &snbep_uncore_pcu) {
- reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
- reg1->config = event->attr.config1 &
- SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
} else {
- return 0;
+ if (box->pmu->type == &snbep_uncore_pcu) {
+ reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
+ reg1->config = event->attr.config1 & SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
+ } else {
+ return 0;
+ }
}
reg1->idx = 0;
+
return 0;
}
@@ -272,10 +287,19 @@ static struct attribute *snbep_uncore_pcu_formats_attr[] = {
&format_attr_thresh5.attr,
&format_attr_occ_invert.attr,
&format_attr_occ_edge.attr,
- &format_attr_filter_brand0.attr,
- &format_attr_filter_brand1.attr,
- &format_attr_filter_brand2.attr,
- &format_attr_filter_brand3.attr,
+ &format_attr_filter_band0.attr,
+ &format_attr_filter_band1.attr,
+ &format_attr_filter_band2.attr,
+ &format_attr_filter_band3.attr,
+ NULL,
+};
+
+static struct attribute *snbep_uncore_qpi_formats_attr[] = {
+ &format_attr_event_ext.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
NULL,
};
@@ -314,15 +338,20 @@ static struct attribute_group snbep_uncore_pcu_format_group = {
.attrs = snbep_uncore_pcu_formats_attr,
};
+static struct attribute_group snbep_uncore_qpi_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_qpi_formats_attr,
+};
+
static struct intel_uncore_ops snbep_uncore_msr_ops = {
.init_box = snbep_uncore_msr_init_box,
.disable_box = snbep_uncore_msr_disable_box,
.enable_box = snbep_uncore_msr_enable_box,
.disable_event = snbep_uncore_msr_disable_event,
.enable_event = snbep_uncore_msr_enable_event,
- .read_counter = snbep_uncore_msr_read_counter,
- .get_constraint = snbep_uncore_get_constraint,
- .put_constraint = snbep_uncore_put_constraint,
+ .read_counter = uncore_msr_read_counter,
+ .get_constraint = uncore_get_constraint,
+ .put_constraint = uncore_put_constraint,
.hw_config = snbep_uncore_hw_config,
};
@@ -485,8 +514,13 @@ static struct intel_uncore_type snbep_uncore_qpi = {
.num_counters = 4,
.num_boxes = 2,
.perf_ctr_bits = 48,
+ .perf_ctr = SNBEP_PCI_PMON_CTR0,
+ .event_ctl = SNBEP_PCI_PMON_CTL0,
+ .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
+ .ops = &snbep_uncore_pci_ops,
.event_descs = snbep_uncore_qpi_events,
- SNBEP_UNCORE_PCI_COMMON_INIT(),
+ .format_group = &snbep_uncore_qpi_format_group,
};
@@ -603,10 +637,8 @@ static void snbep_pci2phy_map_init(void)
}
/* end of Sandy Bridge-EP uncore support */
-
/* Sandy Bridge uncore support */
-static void snb_uncore_msr_enable_event(struct intel_uncore_box *box,
- struct perf_event *event)
+static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -616,20 +648,11 @@ static void snb_uncore_msr_enable_event(struct intel_uncore_box *box,
wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
}
-static void snb_uncore_msr_disable_event(struct intel_uncore_box *box,
- struct perf_event *event)
+static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
{
wrmsrl(event->hw.config_base, 0);
}
-static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box,
- struct perf_event *event)
-{
- u64 count;
- rdmsrl(event->hw.event_base, count);
- return count;
-}
-
static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
{
if (box->pmu->pmu_idx == 0) {
@@ -648,15 +671,15 @@ static struct attribute *snb_uncore_formats_attr[] = {
};
static struct attribute_group snb_uncore_format_group = {
- .name = "format",
- .attrs = snb_uncore_formats_attr,
+ .name = "format",
+ .attrs = snb_uncore_formats_attr,
};
static struct intel_uncore_ops snb_uncore_msr_ops = {
.init_box = snb_uncore_msr_init_box,
.disable_event = snb_uncore_msr_disable_event,
.enable_event = snb_uncore_msr_enable_event,
- .read_counter = snb_uncore_msr_read_counter,
+ .read_counter = uncore_msr_read_counter,
};
static struct event_constraint snb_uncore_cbox_constraints[] = {
@@ -697,12 +720,10 @@ static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
{
- wrmsrl(NHM_UNC_PERF_GLOBAL_CTL,
- NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
+ wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
}
-static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box,
- struct perf_event *event)
+static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -744,7 +765,7 @@ static struct intel_uncore_ops nhm_uncore_msr_ops = {
.enable_box = nhm_uncore_msr_enable_box,
.disable_event = snb_uncore_msr_disable_event,
.enable_event = nhm_uncore_msr_enable_event,
- .read_counter = snb_uncore_msr_read_counter,
+ .read_counter = uncore_msr_read_counter,
};
static struct intel_uncore_type nhm_uncore = {
@@ -769,8 +790,1049 @@ static struct intel_uncore_type *nhm_msr_uncores[] = {
};
/* end of Nehalem uncore support */
-static void uncore_assign_hw_event(struct intel_uncore_box *box,
- struct perf_event *event, int idx)
+/* Nehalem-EX uncore support */
+#define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \
+ ((1ULL << (n)) - 1)))
+
+DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
+DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
+DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
+DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
+
+static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
+}
+
+static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+ u64 config;
+
+ if (msr) {
+ rdmsrl(msr, config);
+ config &= ~((1ULL << uncore_num_counters(box)) - 1);
+ /* WBox has a fixed counter */
+ if (uncore_msr_fixed_ctl(box))
+ config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN;
+ wrmsrl(msr, config);
+ }
+}
+
+static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+ u64 config;
+
+ if (msr) {
+ rdmsrl(msr, config);
+ config |= (1ULL << uncore_num_counters(box)) - 1;
+ /* WBox has a fixed counter */
+ if (uncore_msr_fixed_ctl(box))
+ config |= NHMEX_W_PMON_GLOBAL_FIXED_EN;
+ wrmsrl(msr, config);
+ }
+}
+
+static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ wrmsrl(event->hw.config_base, 0);
+}
+
+static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
+ wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
+ else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
+ wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
+ else
+ wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
+}
+
+#define NHMEX_UNCORE_OPS_COMMON_INIT() \
+ .init_box = nhmex_uncore_msr_init_box, \
+ .disable_box = nhmex_uncore_msr_disable_box, \
+ .enable_box = nhmex_uncore_msr_enable_box, \
+ .disable_event = nhmex_uncore_msr_disable_event, \
+ .read_counter = uncore_msr_read_counter
+
+static struct intel_uncore_ops nhmex_uncore_ops = {
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
+ .enable_event = nhmex_uncore_msr_enable_event,
+};
+
+static struct attribute *nhmex_uncore_ubox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_edge.attr,
+ NULL,
+};
+
+static struct attribute_group nhmex_uncore_ubox_format_group = {
+ .name = "format",
+ .attrs = nhmex_uncore_ubox_formats_attr,
+};
+
+static struct intel_uncore_type nhmex_uncore_ubox = {
+ .name = "ubox",
+ .num_counters = 1,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_U_MSR_PMON_EV_SEL,
+ .perf_ctr = NHMEX_U_MSR_PMON_CTR,
+ .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL,
+ .ops = &nhmex_uncore_ops,
+ .format_group = &nhmex_uncore_ubox_format_group
+};
+
+static struct attribute *nhmex_uncore_cbox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ NULL,
+};
+
+static struct attribute_group nhmex_uncore_cbox_format_group = {
+ .name = "format",
+ .attrs = nhmex_uncore_cbox_formats_attr,
+};
+
+/* msr offset for each instance of cbox */
+static unsigned nhmex_cbox_msr_offsets[] = {
+ 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
+};
+
+static struct intel_uncore_type nhmex_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 6,
+ .num_boxes = 10,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
+ .perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
+ .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
+ .msr_offsets = nhmex_cbox_msr_offsets,
+ .pair_ctr_ctl = 1,
+ .ops = &nhmex_uncore_ops,
+ .format_group = &nhmex_uncore_cbox_format_group
+};
+
+static struct uncore_event_desc nhmex_uncore_wbox_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"),
+ { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_type nhmex_uncore_wbox = {
+ .name = "wbox",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_W_MSR_PMON_CNT0,
+ .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0,
+ .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR,
+ .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL,
+ .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_W_MSR_GLOBAL_CTL,
+ .pair_ctr_ctl = 1,
+ .event_descs = nhmex_uncore_wbox_events,
+ .ops = &nhmex_uncore_ops,
+ .format_group = &nhmex_uncore_cbox_format_group
+};
+
+static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+ int ctr, ev_sel;
+
+ ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >>
+ NHMEX_B_PMON_CTR_SHIFT;
+ ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >>
+ NHMEX_B_PMON_CTL_EV_SEL_SHIFT;
+
+ /* events that do not use the match/mask registers */
+ if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) ||
+ (ctr == 2 && ev_sel != 0x4) || ctr == 3)
+ return 0;
+
+ if (box->pmu->pmu_idx == 0)
+ reg1->reg = NHMEX_B0_MSR_MATCH;
+ else
+ reg1->reg = NHMEX_B1_MSR_MATCH;
+ reg1->idx = 0;
+ reg1->config = event->attr.config1;
+ reg2->config = event->attr.config2;
+ return 0;
+}
+
+static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+
+ if (reg1->idx != EXTRA_REG_NONE) {
+ wrmsrl(reg1->reg, reg1->config);
+ wrmsrl(reg1->reg + 1, reg2->config);
+ }
+ wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
+ (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK));
+}
+
+/*
+ * The Bbox has 4 counters, but each counter monitors different events.
+ * Use bits 6-7 in the event config to select counter.
+ */
+static struct event_constraint nhmex_uncore_bbox_constraints[] = {
+ EVENT_CONSTRAINT(0 , 1, 0xc0),
+ EVENT_CONSTRAINT(0x40, 2, 0xc0),
+ EVENT_CONSTRAINT(0x80, 4, 0xc0),
+ EVENT_CONSTRAINT(0xc0, 8, 0xc0),
+ EVENT_CONSTRAINT_END,
+};
+
+static struct attribute *nhmex_uncore_bbox_formats_attr[] = {
+ &format_attr_event5.attr,
+ &format_attr_counter.attr,
+ &format_attr_match.attr,
+ &format_attr_mask.attr,
+ NULL,
+};
+
+static struct attribute_group nhmex_uncore_bbox_format_group = {
+ .name = "format",
+ .attrs = nhmex_uncore_bbox_formats_attr,
+};
+
+static struct intel_uncore_ops nhmex_uncore_bbox_ops = {
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
+ .enable_event = nhmex_bbox_msr_enable_event,
+ .hw_config = nhmex_bbox_hw_config,
+ .get_constraint = uncore_get_constraint,
+ .put_constraint = uncore_put_constraint,
+};
+
+static struct intel_uncore_type nhmex_uncore_bbox = {
+ .name = "bbox",
+ .num_counters = 4,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_B0_MSR_PMON_CTL0,
+ .perf_ctr = NHMEX_B0_MSR_PMON_CTR0,
+ .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL,
+ .msr_offset = NHMEX_B_MSR_OFFSET,
+ .pair_ctr_ctl = 1,
+ .num_shared_regs = 1,
+ .constraints = nhmex_uncore_bbox_constraints,
+ .ops = &nhmex_uncore_bbox_ops,
+ .format_group = &nhmex_uncore_bbox_format_group
+};
+
+static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+
+ /* only TO_R_PROG_EV event uses the match/mask register */
+ if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
+ NHMEX_S_EVENT_TO_R_PROG_EV)
+ return 0;
+
+ if (box->pmu->pmu_idx == 0)
+ reg1->reg = NHMEX_S0_MSR_MM_CFG;
+ else
+ reg1->reg = NHMEX_S1_MSR_MM_CFG;
+ reg1->idx = 0;
+ reg1->config = event->attr.config1;
+ reg2->config = event->attr.config2;
+ return 0;
+}
+
+static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+
+ if (reg1->idx != EXTRA_REG_NONE) {
+ wrmsrl(reg1->reg, 0);
+ wrmsrl(reg1->reg + 1, reg1->config);
+ wrmsrl(reg1->reg + 2, reg2->config);
+ wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
+ }
+ wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
+}
+
+static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ &format_attr_match.attr,
+ &format_attr_mask.attr,
+ NULL,
+};
+
+static struct attribute_group nhmex_uncore_sbox_format_group = {
+ .name = "format",
+ .attrs = nhmex_uncore_sbox_formats_attr,
+};
+
+static struct intel_uncore_ops nhmex_uncore_sbox_ops = {
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
+ .enable_event = nhmex_sbox_msr_enable_event,
+ .hw_config = nhmex_sbox_hw_config,
+ .get_constraint = uncore_get_constraint,
+ .put_constraint = uncore_put_constraint,
+};
+
+static struct intel_uncore_type nhmex_uncore_sbox = {
+ .name = "sbox",
+ .num_counters = 4,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_S0_MSR_PMON_CTL0,
+ .perf_ctr = NHMEX_S0_MSR_PMON_CTR0,
+ .event_mask = NHMEX_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL,
+ .msr_offset = NHMEX_S_MSR_OFFSET,
+ .pair_ctr_ctl = 1,
+ .num_shared_regs = 1,
+ .ops = &nhmex_uncore_sbox_ops,
+ .format_group = &nhmex_uncore_sbox_format_group
+};
+
+enum {
+ EXTRA_REG_NHMEX_M_FILTER,
+ EXTRA_REG_NHMEX_M_DSP,
+ EXTRA_REG_NHMEX_M_ISS,
+ EXTRA_REG_NHMEX_M_MAP,
+ EXTRA_REG_NHMEX_M_MSC_THR,
+ EXTRA_REG_NHMEX_M_PGT,
+ EXTRA_REG_NHMEX_M_PLD,
+ EXTRA_REG_NHMEX_M_ZDP_CTL_FVC,
+};
+
+static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
+ MBOX_INC_SEL_EXTAR_REG(0x0, DSP),
+ MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR),
+ MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR),
+ MBOX_INC_SEL_EXTAR_REG(0x9, ISS),
+ /* event 0xa uses two extra registers */
+ MBOX_INC_SEL_EXTAR_REG(0xa, ISS),
+ MBOX_INC_SEL_EXTAR_REG(0xa, PLD),
+ MBOX_INC_SEL_EXTAR_REG(0xb, PLD),
+ /* events 0xd ~ 0x10 use the same extra register */
+ MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC),
+ MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC),
+ MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC),
+ MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC),
+ MBOX_INC_SEL_EXTAR_REG(0x16, PGT),
+ MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP),
+ MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS),
+ MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT),
+ MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP),
+ EVENT_EXTRA_END
+};
+
+/* Nehalem-EX or Westmere-EX ? */
+bool uncore_nhmex;
+
+static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
+{
+ struct intel_uncore_extra_reg *er;
+ unsigned long flags;
+ bool ret = false;
+ u64 mask;
+
+ if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
+ er = &box->shared_regs[idx];
+ raw_spin_lock_irqsave(&er->lock, flags);
+ if (!atomic_read(&er->ref) || er->config == config) {
+ atomic_inc(&er->ref);
+ er->config = config;
+ ret = true;
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ return ret;
+ }
+ /*
+ * The ZDP_CTL_FVC MSR has 4 fields which are used to control
+ * events 0xd ~ 0x10. Besides these 4 fields, there are additional
+ * fields which are shared.
+ */
+ idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
+ if (WARN_ON_ONCE(idx >= 4))
+ return false;
+
+ /* mask of the shared fields */
+ if (uncore_nhmex)
+ mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
+ else
+ mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
+ er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
+
+ raw_spin_lock_irqsave(&er->lock, flags);
+ /* add mask of the non-shared field if it's in use */
+ if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
+ if (uncore_nhmex)
+ mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ else
+ mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ }
+
+ if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
+ atomic_add(1 << (idx * 8), &er->ref);
+ if (uncore_nhmex)
+ mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
+ NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ else
+ mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
+ WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ er->config &= ~mask;
+ er->config |= (config & mask);
+ ret = true;
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ return ret;
+}
+
+static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx)
+{
+ struct intel_uncore_extra_reg *er;
+
+ if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
+ er = &box->shared_regs[idx];
+ atomic_dec(&er->ref);
+ return;
+ }
+
+ idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
+ er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
+ atomic_sub(1 << (idx * 8), &er->ref);
+}
+
+u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ int idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8);
+ u64 config = reg1->config;
+
+ /* get the non-shared control bits and shift them */
+ idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
+ if (uncore_nhmex)
+ config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ else
+ config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ if (new_idx > orig_idx) {
+ idx = new_idx - orig_idx;
+ config <<= 3 * idx;
+ } else {
+ idx = orig_idx - new_idx;
+ config >>= 3 * idx;
+ }
+
+ /* add the shared control bits back */
+ if (uncore_nhmex)
+ config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
+ else
+ config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
+ config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
+ if (modify) {
+ /* adjust the main event selector */
+ if (new_idx > orig_idx)
+ hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
+ else
+ hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT;
+ reg1->config = config;
+ reg1->idx = ~0xff | new_idx;
+ }
+ return config;
+}
+
+static struct event_constraint *
+nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
+ int i, idx[2], alloc = 0;
+ u64 config1 = reg1->config;
+
+ idx[0] = __BITS_VALUE(reg1->idx, 0, 8);
+ idx[1] = __BITS_VALUE(reg1->idx, 1, 8);
+again:
+ for (i = 0; i < 2; i++) {
+ if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i)))
+ idx[i] = 0xff;
+
+ if (idx[i] == 0xff)
+ continue;
+
+ if (!nhmex_mbox_get_shared_reg(box, idx[i],
+ __BITS_VALUE(config1, i, 32)))
+ goto fail;
+ alloc |= (0x1 << i);
+ }
+
+ /* for the match/mask registers */
+ if (reg2->idx != EXTRA_REG_NONE &&
+ (uncore_box_is_fake(box) || !reg2->alloc) &&
+ !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
+ goto fail;
+
+ /*
+ * If it's a fake box -- as per validate_{group,event}() we
+ * shouldn't touch event state and we can avoid doing so
+ * since both will only call get_event_constraints() once
+ * on each event, this avoids the need for reg->alloc.
+ */
+ if (!uncore_box_is_fake(box)) {
+ if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
+ nhmex_mbox_alter_er(event, idx[0], true);
+ reg1->alloc |= alloc;
+ if (reg2->idx != EXTRA_REG_NONE)
+ reg2->alloc = 1;
+ }
+ return NULL;
+fail:
+ if (idx[0] != 0xff && !(alloc & 0x1) &&
+ idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) {
+ /*
+ * events 0xd ~ 0x10 are functional identical, but are
+ * controlled by different fields in the ZDP_CTL_FVC
+ * register. If we failed to take one field, try the
+ * rest 3 choices.
+ */
+ BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff);
+ idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
+ idx[0] = (idx[0] + 1) % 4;
+ idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
+ if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) {
+ config1 = nhmex_mbox_alter_er(event, idx[0], false);
+ goto again;
+ }
+ }
+
+ if (alloc & 0x1)
+ nhmex_mbox_put_shared_reg(box, idx[0]);
+ if (alloc & 0x2)
+ nhmex_mbox_put_shared_reg(box, idx[1]);
+ return &constraint_empty;
+}
+
+static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
+
+ if (uncore_box_is_fake(box))
+ return;
+
+ if (reg1->alloc & 0x1)
+ nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8));
+ if (reg1->alloc & 0x2)
+ nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8));
+ reg1->alloc = 0;
+
+ if (reg2->alloc) {
+ nhmex_mbox_put_shared_reg(box, reg2->idx);
+ reg2->alloc = 0;
+ }
+}
+
+static int nhmex_mbox_extra_reg_idx(struct extra_reg *er)
+{
+ if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
+ return er->idx;
+ return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd;
+}
+
+static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct intel_uncore_type *type = box->pmu->type;
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
+ struct extra_reg *er;
+ unsigned msr;
+ int reg_idx = 0;
+ /*
+ * The mbox events may require 2 extra MSRs at the most. But only
+ * the lower 32 bits in these MSRs are significant, so we can use
+ * config1 to pass two MSRs' config.
+ */
+ for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) {
+ if (er->event != (event->hw.config & er->config_mask))
+ continue;
+ if (event->attr.config1 & ~er->valid_mask)
+ return -EINVAL;
+
+ msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
+ if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
+ return -EINVAL;
+
+ /* always use the 32~63 bits to pass the PLD config */
+ if (er->idx == EXTRA_REG_NHMEX_M_PLD)
+ reg_idx = 1;
+ else if (WARN_ON_ONCE(reg_idx > 0))
+ return -EINVAL;
+
+ reg1->idx &= ~(0xff << (reg_idx * 8));
+ reg1->reg &= ~(0xffff << (reg_idx * 16));
+ reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8);
+ reg1->reg |= msr << (reg_idx * 16);
+ reg1->config = event->attr.config1;
+ reg_idx++;
+ }
+ /*
+ * The mbox only provides ability to perform address matching
+ * for the PLD events.
+ */
+ if (reg_idx == 2) {
+ reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
+ if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
+ reg2->config = event->attr.config2;
+ else
+ reg2->config = ~0ULL;
+ if (box->pmu->pmu_idx == 0)
+ reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
+ else
+ reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
+ }
+ return 0;
+}
+
+static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx)
+{
+ struct intel_uncore_extra_reg *er;
+ unsigned long flags;
+ u64 config;
+
+ if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC)
+ return box->shared_regs[idx].config;
+
+ er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
+ raw_spin_lock_irqsave(&er->lock, flags);
+ config = er->config;
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+ return config;
+}
+
+static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+ int idx;
+
+ idx = __BITS_VALUE(reg1->idx, 0, 8);
+ if (idx != 0xff)
+ wrmsrl(__BITS_VALUE(reg1->reg, 0, 16),
+ nhmex_mbox_shared_reg_config(box, idx));
+ idx = __BITS_VALUE(reg1->idx, 1, 8);
+ if (idx != 0xff)
+ wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
+ nhmex_mbox_shared_reg_config(box, idx));
+
+ if (reg2->idx != EXTRA_REG_NONE) {
+ wrmsrl(reg2->reg, 0);
+ if (reg2->config != ~0ULL) {
+ wrmsrl(reg2->reg + 1,
+ reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
+ wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
+ (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
+ wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
+ }
+ }
+
+ wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
+}
+
+DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
+DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
+DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
+DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
+DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
+DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
+DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
+DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
+DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
+DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
+
+static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
+ &format_attr_count_mode.attr,
+ &format_attr_storage_mode.attr,
+ &format_attr_wrap_mode.attr,
+ &format_attr_flag_mode.attr,
+ &format_attr_inc_sel.attr,
+ &format_attr_set_flag_sel.attr,
+ &format_attr_filter_cfg_en.attr,
+ &format_attr_filter_match.attr,
+ &format_attr_filter_mask.attr,
+ &format_attr_dsp.attr,
+ &format_attr_thr.attr,
+ &format_attr_fvc.attr,
+ &format_attr_pgt.attr,
+ &format_attr_map.attr,
+ &format_attr_iss.attr,
+ &format_attr_pld.attr,
+ NULL,
+};
+
+static struct attribute_group nhmex_uncore_mbox_format_group = {
+ .name = "format",
+ .attrs = nhmex_uncore_mbox_formats_attr,
+};
+
+static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"),
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"),
+ { /* end: all zeroes */ },
+};
+
+static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
+ { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
+ .enable_event = nhmex_mbox_msr_enable_event,
+ .hw_config = nhmex_mbox_hw_config,
+ .get_constraint = nhmex_mbox_get_constraint,
+ .put_constraint = nhmex_mbox_put_constraint,
+};
+
+static struct intel_uncore_type nhmex_uncore_mbox = {
+ .name = "mbox",
+ .num_counters = 6,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_M0_MSR_PMU_CTL0,
+ .perf_ctr = NHMEX_M0_MSR_PMU_CNT0,
+ .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL,
+ .msr_offset = NHMEX_M_MSR_OFFSET,
+ .pair_ctr_ctl = 1,
+ .num_shared_regs = 8,
+ .event_descs = nhmex_uncore_mbox_events,
+ .ops = &nhmex_uncore_mbox_ops,
+ .format_group = &nhmex_uncore_mbox_format_group,
+};
+
+void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ int port;
+
+ /* adjust the main event selector and extra register index */
+ if (reg1->idx % 2) {
+ reg1->idx--;
+ hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
+ } else {
+ reg1->idx++;
+ hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
+ }
+
+ /* adjust extra register config */
+ port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
+ switch (reg1->idx % 6) {
+ case 2:
+ /* shift the 8~15 bits to the 0~7 bits */
+ reg1->config >>= 8;
+ break;
+ case 3:
+ /* shift the 0~7 bits to the 8~15 bits */
+ reg1->config <<= 8;
+ break;
+ };
+}
+
+/*
+ * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7.
+ * An event set consists of 6 events, the 3rd and 4th events in
+ * an event set use the same extra register. So an event set uses
+ * 5 extra registers.
+ */
+static struct event_constraint *
+nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+ struct intel_uncore_extra_reg *er;
+ unsigned long flags;
+ int idx, er_idx;
+ u64 config1;
+ bool ok = false;
+
+ if (!uncore_box_is_fake(box) && reg1->alloc)
+ return NULL;
+
+ idx = reg1->idx % 6;
+ config1 = reg1->config;
+again:
+ er_idx = idx;
+ /* the 3rd and 4th events use the same extra register */
+ if (er_idx > 2)
+ er_idx--;
+ er_idx += (reg1->idx / 6) * 5;
+
+ er = &box->shared_regs[er_idx];
+ raw_spin_lock_irqsave(&er->lock, flags);
+ if (idx < 2) {
+ if (!atomic_read(&er->ref) || er->config == reg1->config) {
+ atomic_inc(&er->ref);
+ er->config = reg1->config;
+ ok = true;
+ }
+ } else if (idx == 2 || idx == 3) {
+ /*
+ * these two events use different fields in a extra register,
+ * the 0~7 bits and the 8~15 bits respectively.
+ */
+ u64 mask = 0xff << ((idx - 2) * 8);
+ if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) ||
+ !((er->config ^ config1) & mask)) {
+ atomic_add(1 << ((idx - 2) * 8), &er->ref);
+ er->config &= ~mask;
+ er->config |= config1 & mask;
+ ok = true;
+ }
+ } else {
+ if (!atomic_read(&er->ref) ||
+ (er->config == (hwc->config >> 32) &&
+ er->config1 == reg1->config &&
+ er->config2 == reg2->config)) {
+ atomic_inc(&er->ref);
+ er->config = (hwc->config >> 32);
+ er->config1 = reg1->config;
+ er->config2 = reg2->config;
+ ok = true;
+ }
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ if (!ok) {
+ /*
+ * The Rbox events are always in pairs. The paired
+ * events are functional identical, but use different
+ * extra registers. If we failed to take an extra
+ * register, try the alternative.
+ */
+ if (idx % 2)
+ idx--;
+ else
+ idx++;
+ if (idx != reg1->idx % 6) {
+ if (idx == 2)
+ config1 >>= 8;
+ else if (idx == 3)
+ config1 <<= 8;
+ goto again;
+ }
+ } else {
+ if (!uncore_box_is_fake(box)) {
+ if (idx != reg1->idx % 6)
+ nhmex_rbox_alter_er(box, event);
+ reg1->alloc = 1;
+ }
+ return NULL;
+ }
+ return &constraint_empty;
+}
+
+static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct intel_uncore_extra_reg *er;
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ int idx, er_idx;
+
+ if (uncore_box_is_fake(box) || !reg1->alloc)
+ return;
+
+ idx = reg1->idx % 6;
+ er_idx = idx;
+ if (er_idx > 2)
+ er_idx--;
+ er_idx += (reg1->idx / 6) * 5;
+
+ er = &box->shared_regs[er_idx];
+ if (idx == 2 || idx == 3)
+ atomic_sub(1 << ((idx - 2) * 8), &er->ref);
+ else
+ atomic_dec(&er->ref);
+
+ reg1->alloc = 0;
+}
+
+static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
+ int idx;
+
+ idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
+ NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
+ if (idx >= 0x18)
+ return -EINVAL;
+
+ reg1->idx = idx;
+ reg1->config = event->attr.config1;
+
+ switch (idx % 6) {
+ case 4:
+ case 5:
+ hwc->config |= event->attr.config & (~0ULL << 32);
+ reg2->config = event->attr.config2;
+ break;
+ };
+ return 0;
+}
+
+static u64 nhmex_rbox_shared_reg_config(struct intel_uncore_box *box, int idx)
+{
+ struct intel_uncore_extra_reg *er;
+ unsigned long flags;
+ u64 config;
+
+ er = &box->shared_regs[idx];
+
+ raw_spin_lock_irqsave(&er->lock, flags);
+ config = er->config;
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ return config;
+}
+
+static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+ int idx, port;
+
+ idx = reg1->idx;
+ port = idx / 6 + box->pmu->pmu_idx * 4;
+
+ switch (idx % 6) {
+ case 0:
+ wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
+ break;
+ case 1:
+ wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
+ break;
+ case 2:
+ case 3:
+ wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
+ nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5));
+ break;
+ case 4:
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
+ hwc->config >> 32);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
+ break;
+ case 5:
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
+ hwc->config >> 32);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
+ break;
+ };
+
+ wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 |
+ (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
+}
+
+DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
+DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
+DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
+DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
+DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
+
+static struct attribute *nhmex_uncore_rbox_formats_attr[] = {
+ &format_attr_event5.attr,
+ &format_attr_xbr_mm_cfg.attr,
+ &format_attr_xbr_match.attr,
+ &format_attr_xbr_mask.attr,
+ &format_attr_qlx_cfg.attr,
+ &format_attr_iperf_cfg.attr,
+ NULL,
+};
+
+static struct attribute_group nhmex_uncore_rbox_format_group = {
+ .name = "format",
+ .attrs = nhmex_uncore_rbox_formats_attr,
+};
+
+static struct uncore_event_desc nhmex_uncore_rbox_events[] = {
+ INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"),
+ INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"),
+ INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"),
+ INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"),
+ INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"),
+ INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"),
+ { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_ops nhmex_uncore_rbox_ops = {
+ NHMEX_UNCORE_OPS_COMMON_INIT(),
+ .enable_event = nhmex_rbox_msr_enable_event,
+ .hw_config = nhmex_rbox_hw_config,
+ .get_constraint = nhmex_rbox_get_constraint,
+ .put_constraint = nhmex_rbox_put_constraint,
+};
+
+static struct intel_uncore_type nhmex_uncore_rbox = {
+ .name = "rbox",
+ .num_counters = 8,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ .event_ctl = NHMEX_R_MSR_PMON_CTL0,
+ .perf_ctr = NHMEX_R_MSR_PMON_CNT0,
+ .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK,
+ .box_ctl = NHMEX_R_MSR_GLOBAL_CTL,
+ .msr_offset = NHMEX_R_MSR_OFFSET,
+ .pair_ctr_ctl = 1,
+ .num_shared_regs = 20,
+ .event_descs = nhmex_uncore_rbox_events,
+ .ops = &nhmex_uncore_rbox_ops,
+ .format_group = &nhmex_uncore_rbox_format_group
+};
+
+static struct intel_uncore_type *nhmex_msr_uncores[] = {
+ &nhmex_uncore_ubox,
+ &nhmex_uncore_cbox,
+ &nhmex_uncore_bbox,
+ &nhmex_uncore_sbox,
+ &nhmex_uncore_mbox,
+ &nhmex_uncore_rbox,
+ &nhmex_uncore_wbox,
+ NULL,
+};
+/* end of Nehalem-EX uncore support */
+
+static void uncore_assign_hw_event(struct intel_uncore_box *box, struct perf_event *event, int idx)
{
struct hw_perf_event *hwc = &event->hw;
@@ -787,8 +1849,7 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box,
hwc->event_base = uncore_perf_ctr(box, hwc->idx);
}
-static void uncore_perf_event_update(struct intel_uncore_box *box,
- struct perf_event *event)
+static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event)
{
u64 prev_count, new_count, delta;
int shift;
@@ -858,14 +1919,12 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
box->hrtimer.function = uncore_pmu_hrtimer;
}
-struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
- int cpu)
+struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
{
struct intel_uncore_box *box;
int i, size;
- size = sizeof(*box) + type->num_shared_regs *
- sizeof(struct intel_uncore_extra_reg);
+ size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
if (!box)
@@ -915,12 +1974,11 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
* perf core schedules event on the basis of cpu, uncore events are
* collected by one of the cpus inside a physical package.
*/
- return uncore_pmu_to_box(uncore_event_to_pmu(event),
- smp_processor_id());
+ return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
}
-static int uncore_collect_events(struct intel_uncore_box *box,
- struct perf_event *leader, bool dogrp)
+static int
+uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
{
struct perf_event *event;
int n, max_count;
@@ -952,8 +2010,7 @@ static int uncore_collect_events(struct intel_uncore_box *box,
}
static struct event_constraint *
-uncore_get_event_constraint(struct intel_uncore_box *box,
- struct perf_event *event)
+uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
{
struct intel_uncore_type *type = box->pmu->type;
struct event_constraint *c;
@@ -977,15 +2034,13 @@ uncore_get_event_constraint(struct intel_uncore_box *box,
return &type->unconstrainted;
}
-static void uncore_put_event_constraint(struct intel_uncore_box *box,
- struct perf_event *event)
+static void uncore_put_event_constraint(struct intel_uncore_box *box, struct perf_event *event)
{
if (box->pmu->type->ops->put_constraint)
box->pmu->type->ops->put_constraint(box, event);
}
-static int uncore_assign_events(struct intel_uncore_box *box,
- int assign[], int n)
+static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n)
{
unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
@@ -1256,6 +2311,7 @@ int uncore_pmu_event_init(struct perf_event *event)
event->hw.idx = -1;
event->hw.last_tag = ~0ULL;
event->hw.extra_reg.idx = EXTRA_REG_NONE;
+ event->hw.branch_reg.idx = EXTRA_REG_NONE;
if (event->attr.config == UNCORE_FIXED_EVENT) {
/* no fixed counter */
@@ -1326,7 +2382,7 @@ static void __init uncore_type_exit(struct intel_uncore_type *type)
type->attr_groups[1] = NULL;
}
-static void uncore_types_exit(struct intel_uncore_type **types)
+static void __init uncore_types_exit(struct intel_uncore_type **types)
{
int i;
for (i = 0; types[i]; i++)
@@ -1407,8 +2463,7 @@ static bool pcidrv_registered;
/*
* add a pci uncore device
*/
-static int __devinit uncore_pci_add(struct intel_uncore_type *type,
- struct pci_dev *pdev)
+static int __devinit uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
{
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
@@ -1485,6 +2540,7 @@ static int __devinit uncore_pci_probe(struct pci_dev *pdev,
struct intel_uncore_type *type;
type = (struct intel_uncore_type *)id->driver_data;
+
return uncore_pci_add(type, pdev);
}
@@ -1612,8 +2668,8 @@ static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
return 0;
}
-static void __cpuinit uncore_change_context(struct intel_uncore_type **uncores,
- int old_cpu, int new_cpu)
+static void __cpuinit
+uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
{
struct intel_uncore_type *type;
struct intel_uncore_pmu *pmu;
@@ -1694,8 +2750,8 @@ static void __cpuinit uncore_event_init_cpu(int cpu)
uncore_change_context(pci_uncores, -1, cpu);
}
-static int __cpuinit uncore_cpu_notifier(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int
+ __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
@@ -1732,12 +2788,12 @@ static int __cpuinit uncore_cpu_notifier(struct notifier_block *self,
}
static struct notifier_block uncore_cpu_nb __cpuinitdata = {
- .notifier_call = uncore_cpu_notifier,
+ .notifier_call = uncore_cpu_notifier,
/*
* to migrate uncore events, our notifier should be executed
* before perf core's notifier.
*/
- .priority = CPU_PRI_PERF + 1,
+ .priority = CPU_PRI_PERF + 1,
};
static void __init uncore_cpu_setup(void *dummy)
@@ -1767,6 +2823,15 @@ static int __init uncore_cpu_init(void)
snbep_uncore_cbox.num_boxes = max_cores;
msr_uncores = snbep_msr_uncores;
break;
+ case 46: /* Nehalem-EX */
+ uncore_nhmex = true;
+ case 47: /* Westmere-EX aka. Xeon E7 */
+ if (!uncore_nhmex)
+ nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
+ if (nhmex_uncore_cbox.num_boxes > max_cores)
+ nhmex_uncore_cbox.num_boxes = max_cores;
+ msr_uncores = nhmex_msr_uncores;
+ break;
default:
return 0;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index b13e9ea81def..5b81c1856aac 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -5,9 +5,7 @@
#include "perf_event.h"
#define UNCORE_PMU_NAME_LEN 32
-#define UNCORE_BOX_HASH_SIZE 8
-
-#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC)
+#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
#define UNCORE_FIXED_EVENT 0xff
#define UNCORE_PMC_IDX_MAX_GENERIC 8
@@ -115,6 +113,10 @@
SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
+#define SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK \
+ (SNBEP_PMON_RAW_EVENT_MASK | \
+ SNBEP_PMON_CTL_EV_SEL_EXT)
+
/* SNB-EP pci control register */
#define SNBEP_PCI_PMON_BOX_CTL 0xf4
#define SNBEP_PCI_PMON_CTL0 0xd8
@@ -158,6 +160,188 @@
#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
+/* NHM-EX event control */
+#define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff
+#define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00
+#define NHMEX_PMON_CTL_EN_BIT0 (1 << 0)
+#define NHMEX_PMON_CTL_EDGE_DET (1 << 18)
+#define NHMEX_PMON_CTL_PMI_EN (1 << 20)
+#define NHMEX_PMON_CTL_EN_BIT22 (1 << 22)
+#define NHMEX_PMON_CTL_INVERT (1 << 23)
+#define NHMEX_PMON_CTL_TRESH_MASK 0xff000000
+#define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \
+ NHMEX_PMON_CTL_UMASK_MASK | \
+ NHMEX_PMON_CTL_EDGE_DET | \
+ NHMEX_PMON_CTL_INVERT | \
+ NHMEX_PMON_CTL_TRESH_MASK)
+
+/* NHM-EX Ubox */
+#define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00
+#define NHMEX_U_MSR_PMON_CTR 0xc11
+#define NHMEX_U_MSR_PMON_EV_SEL 0xc10
+
+#define NHMEX_U_PMON_GLOBAL_EN (1 << 0)
+#define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e
+#define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28)
+#define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29)
+#define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31)
+
+#define NHMEX_U_PMON_RAW_EVENT_MASK \
+ (NHMEX_PMON_CTL_EV_SEL_MASK | \
+ NHMEX_PMON_CTL_EDGE_DET)
+
+/* NHM-EX Cbox */
+#define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00
+#define NHMEX_C0_MSR_PMON_CTR0 0xd11
+#define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10
+#define NHMEX_C_MSR_OFFSET 0x20
+
+/* NHM-EX Bbox */
+#define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20
+#define NHMEX_B0_MSR_PMON_CTR0 0xc31
+#define NHMEX_B0_MSR_PMON_CTL0 0xc30
+#define NHMEX_B_MSR_OFFSET 0x40
+#define NHMEX_B0_MSR_MATCH 0xe45
+#define NHMEX_B0_MSR_MASK 0xe46
+#define NHMEX_B1_MSR_MATCH 0xe4d
+#define NHMEX_B1_MSR_MASK 0xe4e
+
+#define NHMEX_B_PMON_CTL_EN (1 << 0)
+#define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1
+#define NHMEX_B_PMON_CTL_EV_SEL_MASK \
+ (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT)
+#define NHMEX_B_PMON_CTR_SHIFT 6
+#define NHMEX_B_PMON_CTR_MASK \
+ (0x3 << NHMEX_B_PMON_CTR_SHIFT)
+#define NHMEX_B_PMON_RAW_EVENT_MASK \
+ (NHMEX_B_PMON_CTL_EV_SEL_MASK | \
+ NHMEX_B_PMON_CTR_MASK)
+
+/* NHM-EX Sbox */
+#define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40
+#define NHMEX_S0_MSR_PMON_CTR0 0xc51
+#define NHMEX_S0_MSR_PMON_CTL0 0xc50
+#define NHMEX_S_MSR_OFFSET 0x80
+#define NHMEX_S0_MSR_MM_CFG 0xe48
+#define NHMEX_S0_MSR_MATCH 0xe49
+#define NHMEX_S0_MSR_MASK 0xe4a
+#define NHMEX_S1_MSR_MM_CFG 0xe58
+#define NHMEX_S1_MSR_MATCH 0xe59
+#define NHMEX_S1_MSR_MASK 0xe5a
+
+#define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63)
+#define NHMEX_S_EVENT_TO_R_PROG_EV 0
+
+/* NHM-EX Mbox */
+#define NHMEX_M0_MSR_GLOBAL_CTL 0xca0
+#define NHMEX_M0_MSR_PMU_DSP 0xca5
+#define NHMEX_M0_MSR_PMU_ISS 0xca6
+#define NHMEX_M0_MSR_PMU_MAP 0xca7
+#define NHMEX_M0_MSR_PMU_MSC_THR 0xca8
+#define NHMEX_M0_MSR_PMU_PGT 0xca9
+#define NHMEX_M0_MSR_PMU_PLD 0xcaa
+#define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab
+#define NHMEX_M0_MSR_PMU_CTL0 0xcb0
+#define NHMEX_M0_MSR_PMU_CNT0 0xcb1
+#define NHMEX_M_MSR_OFFSET 0x40
+#define NHMEX_M0_MSR_PMU_MM_CFG 0xe54
+#define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c
+
+#define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63)
+#define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL
+#define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL
+#define NHMEX_M_PMON_ADDR_MASK_SHIFT 34
+
+#define NHMEX_M_PMON_CTL_EN (1 << 0)
+#define NHMEX_M_PMON_CTL_PMI_EN (1 << 1)
+#define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2
+#define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \
+ (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT)
+#define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4
+#define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \
+ (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT)
+#define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6)
+#define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7)
+#define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9
+#define NHMEX_M_PMON_CTL_INC_SEL_MASK \
+ (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
+#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19
+#define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \
+ (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT)
+#define NHMEX_M_PMON_RAW_EVENT_MASK \
+ (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \
+ NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \
+ NHMEX_M_PMON_CTL_WRAP_MODE | \
+ NHMEX_M_PMON_CTL_FLAG_MODE | \
+ NHMEX_M_PMON_CTL_INC_SEL_MASK | \
+ NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
+
+#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23))
+#define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n)))
+
+#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24))
+#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (12 + 3 * (n)))
+
+/*
+ * use the 9~13 bits to select event If the 7th bit is not set,
+ * otherwise use the 19~21 bits to select event.
+ */
+#define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT)
+#define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \
+ NHMEX_M_PMON_CTL_FLAG_MODE)
+#define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \
+ NHMEX_M_PMON_CTL_FLAG_MODE)
+#define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \
+ NHMEX_M_PMON_CTL_FLAG_MODE)
+#define MBOX_INC_SEL_EXTAR_REG(c, r) \
+ EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \
+ MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r)
+#define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \
+ EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \
+ MBOX_SET_FLAG_SEL_MASK, \
+ (u64)-1, NHMEX_M_##r)
+
+/* NHM-EX Rbox */
+#define NHMEX_R_MSR_GLOBAL_CTL 0xe00
+#define NHMEX_R_MSR_PMON_CTL0 0xe10
+#define NHMEX_R_MSR_PMON_CNT0 0xe11
+#define NHMEX_R_MSR_OFFSET 0x20
+
+#define NHMEX_R_MSR_PORTN_QLX_CFG(n) \
+ ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4))
+#define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n))
+#define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n))
+#define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \
+ (((n) < 4 ? 0 : 0x10) + (n) * 4)
+#define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \
+ (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
+#define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \
+ (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1)
+#define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \
+ (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2)
+#define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \
+ (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n))
+#define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \
+ (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1)
+#define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \
+ (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2)
+
+#define NHMEX_R_PMON_CTL_EN (1 << 0)
+#define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1
+#define NHMEX_R_PMON_CTL_EV_SEL_MASK \
+ (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT)
+#define NHMEX_R_PMON_CTL_PMI_EN (1 << 6)
+#define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK
+
+/* NHM-EX Wbox */
+#define NHMEX_W_MSR_GLOBAL_CTL 0xc80
+#define NHMEX_W_MSR_PMON_CNT0 0xc90
+#define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91
+#define NHMEX_W_MSR_PMON_FIXED_CTR 0x394
+#define NHMEX_W_MSR_PMON_FIXED_CTL 0x395
+
+#define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31)
+
struct intel_uncore_ops;
struct intel_uncore_pmu;
struct intel_uncore_box;
@@ -178,6 +362,8 @@ struct intel_uncore_type {
unsigned msr_offset;
unsigned num_shared_regs:8;
unsigned single_fixed:1;
+ unsigned pair_ctr_ctl:1;
+ unsigned *msr_offsets;
struct event_constraint unconstrainted;
struct event_constraint *constraints;
struct intel_uncore_pmu *pmus;
@@ -213,7 +399,7 @@ struct intel_uncore_pmu {
struct intel_uncore_extra_reg {
raw_spinlock_t lock;
- u64 config1;
+ u64 config, config1, config2;
atomic_t ref;
};
@@ -295,43 +481,47 @@ unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
return idx * 8 + box->pmu->type->perf_ctr;
}
-static inline
-unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
+static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
+{
+ struct intel_uncore_pmu *pmu = box->pmu;
+ return pmu->type->msr_offsets ?
+ pmu->type->msr_offsets[pmu->pmu_idx] :
+ pmu->type->msr_offset * pmu->pmu_idx;
+}
+
+static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
{
if (!box->pmu->type->box_ctl)
return 0;
- return box->pmu->type->box_ctl +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
}
-static inline
-unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
+static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
{
if (!box->pmu->type->fixed_ctl)
return 0;
- return box->pmu->type->fixed_ctl +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
}
-static inline
-unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
+static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
{
- return box->pmu->type->fixed_ctr +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
}
static inline
unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
{
- return idx + box->pmu->type->event_ctl +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ return box->pmu->type->event_ctl +
+ (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
+ uncore_msr_box_offset(box);
}
static inline
unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
{
- return idx + box->pmu->type->perf_ctr +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ return box->pmu->type->perf_ctr +
+ (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
+ uncore_msr_box_offset(box);
}
static inline
@@ -422,3 +612,8 @@ static inline void uncore_box_init(struct intel_uncore_box *box)
box->pmu->type->ops->init_box(box);
}
}
+
+static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
+{
+ return (box->phys_id < 0);
+}
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 41857970517f..ed858e9e9a74 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -944,7 +944,7 @@ void __init e820_reserve_resources(void)
for (i = 0; i < e820_saved.nr_map; i++) {
struct e820entry *entry = &e820_saved.map[i];
firmware_map_add_early(entry->addr,
- entry->addr + entry->size - 1,
+ entry->addr + entry->size,
e820_type_to_string(entry->type));
}
}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 1f5f1d5d2a02..d44f7829968e 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -270,7 +270,7 @@ void fixup_irqs(void)
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
break_affinity = 1;
- affinity = cpu_all_mask;
+ affinity = cpu_online_mask;
}
chip = irq_data_get_irq_chip(data);
@@ -328,6 +328,7 @@ void fixup_irqs(void)
chip->irq_retrigger(data);
raw_spin_unlock(&desc->lock);
}
+ __this_cpu_write(vector_irq[vector], -1);
}
}
#endif
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index 1d5d31ea686b..dc1404bf8e4b 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -107,7 +107,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
{
struct setup_data_node *node;
struct setup_data *data;
- int error = -ENOMEM;
+ int error;
struct dentry *d;
struct page *pg;
u64 pa_data;
@@ -121,8 +121,10 @@ static int __init create_setup_data_nodes(struct dentry *parent)
while (pa_data) {
node = kmalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
+ if (!node) {
+ error = -ENOMEM;
goto err_dir;
+ }
pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT);
if (PageHighMem(pg)) {
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 8a2ce8fd41c0..82746f942cd8 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -143,11 +143,12 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
unsigned int *current_size)
{
struct microcode_header_amd *mc_hdr;
- unsigned int actual_size;
+ unsigned int actual_size, patch_size;
u16 equiv_cpu_id;
/* size of the current patch we're staring at */
- *current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE;
+ patch_size = *(u32 *)(ucode_ptr + 4);
+ *current_size = patch_size + SECTION_HDR_SIZE;
equiv_cpu_id = find_equiv_id();
if (!equiv_cpu_id)
@@ -174,7 +175,7 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
/*
* now that the header looks sane, verify its size
*/
- actual_size = verify_ucode_size(cpu, *current_size, leftover_size);
+ actual_size = verify_ucode_size(cpu, patch_size, leftover_size);
if (!actual_size)
return 0;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 97d9a9914ba8..a3b57a27be88 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
return address_mask(ctxt, reg);
}
+static void masked_increment(ulong *reg, ulong mask, int inc)
+{
+ assign_masked(reg, *reg + inc, mask);
+}
+
static inline void
register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
{
+ ulong mask;
+
if (ctxt->ad_bytes == sizeof(unsigned long))
- *reg += inc;
+ mask = ~0UL;
else
- *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
+ mask = ad_mask(ctxt);
+ masked_increment(reg, mask, inc);
+}
+
+static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
+{
+ masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc);
}
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
{
struct segmented_address addr;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes);
- addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
+ rsp_increment(ctxt, -bytes);
+ addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
return segmented_write(ctxt, addr, data, bytes);
@@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
int rc;
struct segmented_address addr;
- addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
+ addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
rc = segmented_read(ctxt, addr, dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
+ rsp_increment(ctxt, len);
return rc;
}
@@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
- ctxt->op_bytes);
+ rsp_increment(ctxt, ctxt->op_bytes);
--reg;
}
@@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
+ rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 1df8fb9e1d5d..e498b18f010c 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -316,6 +316,11 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
addr &= 1;
if (addr == 0) {
if (val & 0x10) {
+ u8 edge_irr = s->irr & ~s->elcr;
+ int i;
+ bool found;
+ struct kvm_vcpu *vcpu;
+
s->init4 = val & 1;
s->last_irr = 0;
s->irr &= s->elcr;
@@ -333,6 +338,18 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
if (val & 0x08)
pr_pic_unimpl(
"level sensitive irq not supported");
+
+ kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
+ if (kvm_apic_accept_pic_intr(vcpu)) {
+ found = true;
+ break;
+ }
+
+
+ if (found)
+ for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
+ if (edge_irr & (1 << irq))
+ pic_clear_isr(s, irq);
} else if (val & 0x08) {
if (val & 0x04)
s->poll = 1;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 01ca00423938..7fbd0d273ea8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4113,16 +4113,21 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
LIST_HEAD(invalid_list);
/*
+ * Never scan more than sc->nr_to_scan VM instances.
+ * Will not hit this condition practically since we do not try
+ * to shrink more than one VM and it is very unlikely to see
+ * !n_used_mmu_pages so many times.
+ */
+ if (!nr_to_scan--)
+ break;
+ /*
* n_used_mmu_pages is accessed without holding kvm->mmu_lock
* here. We may skip a VM instance errorneosly, but we do not
* want to shrink a VM that only started to populate its MMU
* anyway.
*/
- if (kvm->arch.n_used_mmu_pages > 0) {
- if (!nr_to_scan--)
- break;
+ if (!kvm->arch.n_used_mmu_pages)
continue;
- }
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c39b60707e02..c00f03de1b79 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1488,13 +1488,6 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
loadsegment(ds, vmx->host_state.ds_sel);
loadsegment(es, vmx->host_state.es_sel);
}
-#else
- /*
- * The sysexit path does not restore ds/es, so we must set them to
- * a reasonable value ourselves.
- */
- loadsegment(ds, __USER_DS);
- loadsegment(es, __USER_DS);
#endif
reload_tss();
#ifdef CONFIG_X86_64
@@ -6370,6 +6363,19 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
+#ifndef CONFIG_X86_64
+ /*
+ * The sysexit path does not restore ds/es, so we must set them to
+ * a reasonable value ourselves.
+ *
+ * We can't defer this to vmx_load_host_state() since that function
+ * may be executed in interrupt context, which saves and restore segments
+ * around it, nullifying its effect.
+ */
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
+#endif
+
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
| (1 << VCPU_EXREG_RFLAGS)
| (1 << VCPU_EXREG_CPL)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 59b59508ff07..148ed666e311 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
* kvm-specific. Those are put in the beginning of the list.
*/
-#define KVM_SAVE_MSRS_BEGIN 9
+#define KVM_SAVE_MSRS_BEGIN 10
static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
@@ -925,6 +925,10 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
*/
getboottime(&boot);
+ if (kvm->arch.kvmclock_offset) {
+ struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
+ boot = timespec_sub(boot, ts);
+ }
wc.sec = boot.tv_sec;
wc.nsec = boot.tv_nsec;
wc.version = version;
@@ -1996,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_KVM_STEAL_TIME:
data = vcpu->arch.st.msr_val;
break;
+ case MSR_KVM_PV_EOI_EN:
+ data = vcpu->arch.pv_eoi.msr_val;
+ break;
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
case MSR_IA32_MCG_CAP:
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index f6679a7fb8ca..b91e48512425 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
}
/*
- * search for a shareable pmd page for hugetlb.
+ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
+ * and returns the corresponding pte. While this is not necessary for the
+ * !shared pmd case because we can allocate the pmd later as well, it makes the
+ * code much cleaner. pmd allocation is essential for the shared case because
+ * pud has to be populated inside the same i_mmap_mutex section - otherwise
+ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
+ * bad pmd for sharing.
*/
-static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+static pte_t *
+huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
{
struct vm_area_struct *vma = find_vma(mm, addr);
struct address_space *mapping = vma->vm_file->f_mapping;
@@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
struct vm_area_struct *svma;
unsigned long saddr;
pte_t *spte = NULL;
+ pte_t *pte;
if (!vma_shareable(vma, addr))
- return;
+ return (pte_t *)pmd_alloc(mm, pud, addr);
mutex_lock(&mapping->i_mmap_mutex);
vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
@@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
put_page(virt_to_page(spte));
spin_unlock(&mm->page_table_lock);
out:
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
mutex_unlock(&mapping->i_mmap_mutex);
+ return pte;
}
/*
@@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
} else {
BUG_ON(sz != PMD_SIZE);
if (pud_none(*pud))
- huge_pmd_share(mm, addr, pud);
- pte = (pte_t *) pmd_alloc(mm, pud, addr);
+ pte = huge_pmd_share(mm, addr, pud);
+ else
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
}
BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 931930a96160..a718e0d23503 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
/*
* On success we use clflush, when the CPU supports it to
- * avoid the wbindv. If the CPU does not support it, in the
- * error case, and during early boot (for EFI) we fall back
- * to cpa_flush_all (which uses wbinvd):
+ * avoid the wbindv. If the CPU does not support it and in the
+ * error case we fall back to cpa_flush_all (which uses
+ * wbindv):
*/
- if (early_boot_irqs_disabled)
- __cpa_flush_all((void *)(long)cache);
- else if (!ret && cpu_has_clflush) {
+ if (!ret && cpu_has_clflush) {
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
cpa_flush_array(addr, numpages, cache,
cpa.flags, pages);
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 4599c3e8bcb6..4ddf497ca65b 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -142,23 +142,23 @@ static inline int save_add_info(void) {return 0;}
#endif
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
-void __init
+int __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
u64 start, end;
int node, pxm;
if (srat_disabled())
- return;
+ return -1;
if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
bad_srat();
- return;
+ return -1;
}
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
- return;
+ return -1;
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
- return;
+ return -1;
start = ma->base_address;
end = start + ma->length;
pxm = ma->proximity_domain;
@@ -168,12 +168,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains.\n");
bad_srat();
- return;
+ return -1;
}
if (numa_add_memblk(node, start, end) < 0) {
bad_srat();
- return;
+ return -1;
}
node_set(node, numa_nodes_parsed);
@@ -181,6 +181,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
node, pxm,
(unsigned long long) start, (unsigned long long) end - 1);
+ return 0;
}
void __init acpi_numa_arch_fixup(void) {}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 2dc29f51e75a..92660edaa1e7 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -234,7 +234,22 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
return status;
}
-static int efi_set_rtc_mmss(unsigned long nowtime)
+static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
+ efi_time_cap_t *tc)
+{
+ unsigned long flags;
+ efi_status_t status;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ efi_call_phys_prelog();
+ status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
+ virt_to_phys(tc));
+ efi_call_phys_epilog();
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return status;
+}
+
+int efi_set_rtc_mmss(unsigned long nowtime)
{
int real_seconds, real_minutes;
efi_status_t status;
@@ -263,7 +278,7 @@ static int efi_set_rtc_mmss(unsigned long nowtime)
return 0;
}
-static unsigned long efi_get_time(void)
+unsigned long efi_get_time(void)
{
efi_status_t status;
efi_time_t eft;
@@ -606,13 +621,18 @@ static int __init efi_runtime_init(void)
}
/*
* We will only need *early* access to the following
- * EFI runtime service before set_virtual_address_map
+ * two EFI runtime services before set_virtual_address_map
* is invoked.
*/
+ efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
efi_phys.set_virtual_address_map =
(efi_set_virtual_address_map_t *)
runtime->set_virtual_address_map;
-
+ /*
+ * Make efi_get_time can be called before entering
+ * virtual mode.
+ */
+ efi.get_time = phys_efi_get_time;
early_iounmap(runtime, sizeof(efi_runtime_services_t));
return 0;
@@ -700,10 +720,12 @@ void __init efi_init(void)
efi_enabled = 0;
return;
}
+#ifdef CONFIG_X86_32
if (efi_native) {
x86_platform.get_wallclock = efi_get_time;
x86_platform.set_wallclock = efi_set_rtc_mmss;
}
+#endif
#if EFI_DEBUG
print_efi_memmap();
diff --git a/arch/x86/platform/olpc/olpc-xo1-pm.c b/arch/x86/platform/olpc/olpc-xo1-pm.c
index 0ce8616c88ae..d75582d1aa55 100644
--- a/arch/x86/platform/olpc/olpc-xo1-pm.c
+++ b/arch/x86/platform/olpc/olpc-xo1-pm.c
@@ -18,6 +18,7 @@
#include <linux/pm.h>
#include <linux/mfd/core.h>
#include <linux/suspend.h>
+#include <linux/olpc-ec.h>
#include <asm/io.h>
#include <asm/olpc.h>
@@ -51,16 +52,11 @@ EXPORT_SYMBOL_GPL(olpc_xo1_pm_wakeup_clear);
static int xo1_power_state_enter(suspend_state_t pm_state)
{
unsigned long saved_sci_mask;
- int r;
/* Only STR is supported */
if (pm_state != PM_SUSPEND_MEM)
return -EINVAL;
- r = olpc_ec_cmd(EC_SET_SCI_INHIBIT, NULL, 0, NULL, 0);
- if (r)
- return r;
-
/*
* Save SCI mask (this gets lost since PM1_EN is used as a mask for
* wakeup events, which is not necessarily the same event set)
@@ -76,16 +72,6 @@ static int xo1_power_state_enter(suspend_state_t pm_state)
/* Restore SCI mask (using dword access to CS5536_PM1_EN) */
outl(saved_sci_mask, acpi_base + CS5536_PM1_STS);
- /* Tell the EC to stop inhibiting SCIs */
- olpc_ec_cmd(EC_SET_SCI_INHIBIT_RELEASE, NULL, 0, NULL, 0);
-
- /*
- * Tell the wireless module to restart USB communication.
- * Must be done twice.
- */
- olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0);
- olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0);
-
return 0;
}
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index 04b8c73659c5..63d4aa40956e 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -23,6 +23,7 @@
#include <linux/power_supply.h>
#include <linux/suspend.h>
#include <linux/workqueue.h>
+#include <linux/olpc-ec.h>
#include <asm/io.h>
#include <asm/msr.h>
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 599be499fdf7..2fdca25905ae 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/power_supply.h>
+#include <linux/olpc-ec.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
index a4bee53c2e54..27376081ddec 100644
--- a/arch/x86/platform/olpc/olpc.c
+++ b/arch/x86/platform/olpc/olpc.c
@@ -14,14 +14,13 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/syscore_ops.h>
-#include <linux/debugfs.h>
#include <linux/mutex.h>
+#include <linux/olpc-ec.h>
#include <asm/geode.h>
#include <asm/setup.h>
@@ -31,17 +30,6 @@
struct olpc_platform_t olpc_platform_info;
EXPORT_SYMBOL_GPL(olpc_platform_info);
-static DEFINE_SPINLOCK(ec_lock);
-
-/* debugfs interface to EC commands */
-#define EC_MAX_CMD_ARGS (5 + 1) /* cmd byte + 5 args */
-#define EC_MAX_CMD_REPLY (8)
-
-static struct dentry *ec_debugfs_dir;
-static DEFINE_MUTEX(ec_debugfs_cmd_lock);
-static unsigned char ec_debugfs_resp[EC_MAX_CMD_REPLY];
-static unsigned int ec_debugfs_resp_bytes;
-
/* EC event mask to be applied during suspend (defining wakeup sources). */
static u16 ec_wakeup_mask;
@@ -125,16 +113,13 @@ static int __wait_on_obf(unsigned int line, unsigned int port, int desired)
* <http://wiki.laptop.org/go/Ec_specification>. Unfortunately, while
* OpenFirmware's source is available, the EC's is not.
*/
-int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
- unsigned char *outbuf, size_t outlen)
+static int olpc_xo1_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf,
+ size_t outlen, void *arg)
{
- unsigned long flags;
int ret = -EIO;
int i;
int restarts = 0;
- spin_lock_irqsave(&ec_lock, flags);
-
/* Clear OBF */
for (i = 0; i < 10 && (obf_status(0x6c) == 1); i++)
inb(0x68);
@@ -198,10 +183,8 @@ restart:
ret = 0;
err:
- spin_unlock_irqrestore(&ec_lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(olpc_ec_cmd);
void olpc_ec_wakeup_set(u16 value)
{
@@ -280,96 +263,6 @@ int olpc_ec_sci_query(u16 *sci_value)
}
EXPORT_SYMBOL_GPL(olpc_ec_sci_query);
-static ssize_t ec_debugfs_cmd_write(struct file *file, const char __user *buf,
- size_t size, loff_t *ppos)
-{
- int i, m;
- unsigned char ec_cmd[EC_MAX_CMD_ARGS];
- unsigned int ec_cmd_int[EC_MAX_CMD_ARGS];
- char cmdbuf[64];
- int ec_cmd_bytes;
-
- mutex_lock(&ec_debugfs_cmd_lock);
-
- size = simple_write_to_buffer(cmdbuf, sizeof(cmdbuf), ppos, buf, size);
-
- m = sscanf(cmdbuf, "%x:%u %x %x %x %x %x", &ec_cmd_int[0],
- &ec_debugfs_resp_bytes,
- &ec_cmd_int[1], &ec_cmd_int[2], &ec_cmd_int[3],
- &ec_cmd_int[4], &ec_cmd_int[5]);
- if (m < 2 || ec_debugfs_resp_bytes > EC_MAX_CMD_REPLY) {
- /* reset to prevent overflow on read */
- ec_debugfs_resp_bytes = 0;
-
- printk(KERN_DEBUG "olpc-ec: bad ec cmd: "
- "cmd:response-count [arg1 [arg2 ...]]\n");
- size = -EINVAL;
- goto out;
- }
-
- /* convert scanf'd ints to char */
- ec_cmd_bytes = m - 2;
- for (i = 0; i <= ec_cmd_bytes; i++)
- ec_cmd[i] = ec_cmd_int[i];
-
- printk(KERN_DEBUG "olpc-ec: debugfs cmd 0x%02x with %d args "
- "%02x %02x %02x %02x %02x, want %d returns\n",
- ec_cmd[0], ec_cmd_bytes, ec_cmd[1], ec_cmd[2], ec_cmd[3],
- ec_cmd[4], ec_cmd[5], ec_debugfs_resp_bytes);
-
- olpc_ec_cmd(ec_cmd[0], (ec_cmd_bytes == 0) ? NULL : &ec_cmd[1],
- ec_cmd_bytes, ec_debugfs_resp, ec_debugfs_resp_bytes);
-
- printk(KERN_DEBUG "olpc-ec: response "
- "%02x %02x %02x %02x %02x %02x %02x %02x (%d bytes expected)\n",
- ec_debugfs_resp[0], ec_debugfs_resp[1], ec_debugfs_resp[2],
- ec_debugfs_resp[3], ec_debugfs_resp[4], ec_debugfs_resp[5],
- ec_debugfs_resp[6], ec_debugfs_resp[7], ec_debugfs_resp_bytes);
-
-out:
- mutex_unlock(&ec_debugfs_cmd_lock);
- return size;
-}
-
-static ssize_t ec_debugfs_cmd_read(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
-{
- unsigned int i, r;
- char *rp;
- char respbuf[64];
-
- mutex_lock(&ec_debugfs_cmd_lock);
- rp = respbuf;
- rp += sprintf(rp, "%02x", ec_debugfs_resp[0]);
- for (i = 1; i < ec_debugfs_resp_bytes; i++)
- rp += sprintf(rp, ", %02x", ec_debugfs_resp[i]);
- mutex_unlock(&ec_debugfs_cmd_lock);
- rp += sprintf(rp, "\n");
-
- r = rp - respbuf;
- return simple_read_from_buffer(buf, size, ppos, respbuf, r);
-}
-
-static const struct file_operations ec_debugfs_genops = {
- .write = ec_debugfs_cmd_write,
- .read = ec_debugfs_cmd_read,
-};
-
-static void setup_debugfs(void)
-{
- ec_debugfs_dir = debugfs_create_dir("olpc-ec", 0);
- if (ec_debugfs_dir == ERR_PTR(-ENODEV))
- return;
-
- debugfs_create_file("cmd", 0600, ec_debugfs_dir, NULL,
- &ec_debugfs_genops);
-}
-
-static int olpc_ec_suspend(void)
-{
- return olpc_ec_mask_write(ec_wakeup_mask);
-}
-
static bool __init check_ofw_architecture(struct device_node *root)
{
const char *olpc_arch;
@@ -424,8 +317,59 @@ static int __init add_xo1_platform_devices(void)
return 0;
}
-static struct syscore_ops olpc_syscore_ops = {
- .suspend = olpc_ec_suspend,
+static int olpc_xo1_ec_probe(struct platform_device *pdev)
+{
+ /* get the EC revision */
+ olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0,
+ (unsigned char *) &olpc_platform_info.ecver, 1);
+
+ /* EC version 0x5f adds support for wide SCI mask */
+ if (olpc_platform_info.ecver >= 0x5f)
+ olpc_platform_info.flags |= OLPC_F_EC_WIDE_SCI;
+
+ pr_info("OLPC board revision %s%X (EC=%x)\n",
+ ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "",
+ olpc_platform_info.boardrev >> 4,
+ olpc_platform_info.ecver);
+
+ return 0;
+}
+static int olpc_xo1_ec_suspend(struct platform_device *pdev)
+{
+ olpc_ec_mask_write(ec_wakeup_mask);
+
+ /*
+ * Squelch SCIs while suspended. This is a fix for
+ * <http://dev.laptop.org/ticket/1835>.
+ */
+ return olpc_ec_cmd(EC_SET_SCI_INHIBIT, NULL, 0, NULL, 0);
+}
+
+static int olpc_xo1_ec_resume(struct platform_device *pdev)
+{
+ /* Tell the EC to stop inhibiting SCIs */
+ olpc_ec_cmd(EC_SET_SCI_INHIBIT_RELEASE, NULL, 0, NULL, 0);
+
+ /*
+ * Tell the wireless module to restart USB communication.
+ * Must be done twice.
+ */
+ olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0);
+ olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0);
+
+ return 0;
+}
+
+static struct olpc_ec_driver ec_xo1_driver = {
+ .probe = olpc_xo1_ec_probe,
+ .suspend = olpc_xo1_ec_suspend,
+ .resume = olpc_xo1_ec_resume,
+ .ec_cmd = olpc_xo1_ec_cmd,
+};
+
+static struct olpc_ec_driver ec_xo1_5_driver = {
+ .probe = olpc_xo1_ec_probe,
+ .ec_cmd = olpc_xo1_ec_cmd,
};
static int __init olpc_init(void)
@@ -435,16 +379,17 @@ static int __init olpc_init(void)
if (!olpc_ofw_present() || !platform_detect())
return 0;
- spin_lock_init(&ec_lock);
+ /* register the XO-1 and 1.5-specific EC handler */
+ if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) /* XO-1 */
+ olpc_ec_driver_register(&ec_xo1_driver, NULL);
+ else
+ olpc_ec_driver_register(&ec_xo1_5_driver, NULL);
+ platform_device_register_simple("olpc-ec", -1, NULL, 0);
/* assume B1 and above models always have a DCON */
if (olpc_board_at_least(olpc_board(0xb1)))
olpc_platform_info.flags |= OLPC_F_DCON;
- /* get the EC revision */
- olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0,
- (unsigned char *) &olpc_platform_info.ecver, 1);
-
#ifdef CONFIG_PCI_OLPC
/* If the VSA exists let it emulate PCI, if not emulate in kernel.
* XO-1 only. */
@@ -452,14 +397,6 @@ static int __init olpc_init(void)
!cs5535_has_vsa2())
x86_init.pci.arch_init = pci_olpc_init;
#endif
- /* EC version 0x5f adds support for wide SCI mask */
- if (olpc_platform_info.ecver >= 0x5f)
- olpc_platform_info.flags |= OLPC_F_EC_WIDE_SCI;
-
- printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n",
- ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "",
- olpc_platform_info.boardrev >> 4,
- olpc_platform_info.ecver);
if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) { /* XO-1 */
r = add_xo1_platform_devices();
@@ -467,9 +404,6 @@ static int __init olpc_init(void)
return r;
}
- register_syscore_ops(&olpc_syscore_ops);
- setup_debugfs();
-
return 0;
}
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index b2d534cab25f..88692871823f 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -72,7 +72,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
-Wall -Wstrict-prototypes \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/../../boot/code16gcc.h \
- -fno-strict-aliasing -fomit-frame-pointer \
+ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
$(call cc-option, -fno-unit-at-a-time)) \
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 51171aeff0dc..a582bfed95bb 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -60,8 +60,8 @@
51 common getsockname sys_getsockname
52 common getpeername sys_getpeername
53 common socketpair sys_socketpair
-54 common setsockopt sys_setsockopt
-55 common getsockopt sys_getsockopt
+54 64 setsockopt sys_setsockopt
+55 64 getsockopt sys_getsockopt
56 common clone stub_clone
57 common fork stub_fork
58 common vfork stub_vfork
@@ -318,7 +318,7 @@
309 common getcpu sys_getcpu
310 64 process_vm_readv sys_process_vm_readv
311 64 process_vm_writev sys_process_vm_writev
-312 64 kcmp sys_kcmp
+312 common kcmp sys_kcmp
#
# x32-specific system call numbers start at 512 to avoid cache impact
@@ -353,3 +353,5 @@
538 x32 sendmmsg compat_sys_sendmmsg
539 x32 process_vm_readv compat_sys_process_vm_readv
540 x32 process_vm_writev compat_sys_process_vm_writev
+541 x32 setsockopt compat_sys_setsockopt
+542 x32 getsockopt compat_sys_getsockopt
diff --git a/arch/x86/um/asm/ptrace.h b/arch/x86/um/asm/ptrace.h
index 950dfb7b8417..e72cd0df5ba3 100644
--- a/arch/x86/um/asm/ptrace.h
+++ b/arch/x86/um/asm/ptrace.h
@@ -30,10 +30,10 @@
#define profile_pc(regs) PT_REGS_IP(regs)
#define UPT_RESTART_SYSCALL(r) (UPT_IP(r) -= 2)
-#define UPT_SET_SYSCALL_RETURN(r, res) (UPT_AX(r) = (res))
+#define PT_REGS_SET_SYSCALL_RETURN(r, res) (PT_REGS_AX(r) = (res))
-static inline long regs_return_value(struct uml_pt_regs *regs)
+static inline long regs_return_value(struct pt_regs *regs)
{
- return UPT_AX(regs);
+ return PT_REGS_AX(regs);
}
#endif /* __UM_X86_PTRACE_H */
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 8a3f8351f438..8ed64cfae4ff 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -7,6 +7,7 @@ config ZONE_DMA
config XTENSA
def_bool y
select HAVE_IDE
+ select GENERIC_ATOMIC64
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
diff --git a/arch/xtensa/include/asm/cpumask.h b/arch/xtensa/include/asm/cpumask.h
deleted file mode 100644
index ebeede397db3..000000000000
--- a/arch/xtensa/include/asm/cpumask.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/cpumask.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_CPUMASK_H
-#define _XTENSA_CPUMASK_H
-
-#include <asm-generic/cpumask.h>
-
-#endif /* _XTENSA_CPUMASK_H */
diff --git a/arch/xtensa/include/asm/rmap.h b/arch/xtensa/include/asm/rmap.h
deleted file mode 100644
index 649588b7e9ad..000000000000
--- a/arch/xtensa/include/asm/rmap.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/rmap.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_RMAP_H
-#define _XTENSA_RMAP_H
-
-#include <asm-generic/rmap.h>
-
-#endif
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 816e6d0d686c..05b3f093d5d7 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -44,7 +44,7 @@ asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
unsigned long ret;
long err;
- err = do_shmat(shmid, shmaddr, shmflg, &ret);
+ err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
if (err)
return err;
return (long)ret;
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index b17885a0b508..5a74c53bc69c 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -44,6 +44,7 @@ void do_page_fault(struct pt_regs *regs)
int is_write, is_exec;
int fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
info.si_code = SEGV_MAPERR;
@@ -71,6 +72,7 @@ void do_page_fault(struct pt_regs *regs)
address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
#endif
+retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
@@ -93,6 +95,7 @@ good_area:
if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
} else if (is_exec) {
if (!(vma->vm_flags & VM_EXEC))
goto bad_area;
@@ -104,7 +107,11 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -112,10 +119,22 @@ good_area:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /* No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
up_read(&mm->mmap_sem);
return;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index e7dee617358e..f3b44a65fc7a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -31,27 +31,6 @@ EXPORT_SYMBOL_GPL(blkcg_root);
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
-struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
-{
- return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
- struct blkcg, css);
-}
-EXPORT_SYMBOL_GPL(cgroup_to_blkcg);
-
-static struct blkcg *task_blkcg(struct task_struct *tsk)
-{
- return container_of(task_subsys_state(tsk, blkio_subsys_id),
- struct blkcg, css);
-}
-
-struct blkcg *bio_blkcg(struct bio *bio)
-{
- if (bio && bio->bi_css)
- return container_of(bio->bi_css, struct blkcg, css);
- return task_blkcg(current);
-}
-EXPORT_SYMBOL_GPL(bio_blkcg);
-
static bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol)
{
@@ -84,6 +63,7 @@ static void blkg_free(struct blkcg_gq *blkg)
kfree(pd);
}
+ blk_exit_rl(&blkg->rl);
kfree(blkg);
}
@@ -91,16 +71,18 @@ static void blkg_free(struct blkcg_gq *blkg)
* blkg_alloc - allocate a blkg
* @blkcg: block cgroup the new blkg is associated with
* @q: request_queue the new blkg is associated with
+ * @gfp_mask: allocation mask to use
*
* Allocate a new blkg assocating @blkcg and @q.
*/
-static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
+static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
+ gfp_t gfp_mask)
{
struct blkcg_gq *blkg;
int i;
/* alloc and init base part */
- blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
+ blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
if (!blkg)
return NULL;
@@ -109,6 +91,13 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
blkg->blkcg = blkcg;
blkg->refcnt = 1;
+ /* root blkg uses @q->root_rl, init rl only for !root blkgs */
+ if (blkcg != &blkcg_root) {
+ if (blk_init_rl(&blkg->rl, q, gfp_mask))
+ goto err_free;
+ blkg->rl.blkg = blkg;
+ }
+
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
struct blkg_policy_data *pd;
@@ -117,11 +106,9 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
continue;
/* alloc per-policy data and attach it to blkg */
- pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node);
- if (!pd) {
- blkg_free(blkg);
- return NULL;
- }
+ pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
+ if (!pd)
+ goto err_free;
blkg->pd[i] = pd;
pd->blkg = blkg;
@@ -132,6 +119,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
}
return blkg;
+
+err_free:
+ blkg_free(blkg);
+ return NULL;
}
static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
@@ -175,9 +166,13 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blkg_lookup);
+/*
+ * If @new_blkg is %NULL, this function tries to allocate a new one as
+ * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
+ */
static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q)
- __releases(q->queue_lock) __acquires(q->queue_lock)
+ struct request_queue *q,
+ struct blkcg_gq *new_blkg)
{
struct blkcg_gq *blkg;
int ret;
@@ -189,24 +184,26 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
blkg = __blkg_lookup(blkcg, q);
if (blkg) {
rcu_assign_pointer(blkcg->blkg_hint, blkg);
- return blkg;
+ goto out_free;
}
/* blkg holds a reference to blkcg */
- if (!css_tryget(&blkcg->css))
- return ERR_PTR(-EINVAL);
+ if (!css_tryget(&blkcg->css)) {
+ blkg = ERR_PTR(-EINVAL);
+ goto out_free;
+ }
/* allocate */
- ret = -ENOMEM;
- blkg = blkg_alloc(blkcg, q);
- if (unlikely(!blkg))
- goto err_put;
+ if (!new_blkg) {
+ new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
+ if (unlikely(!new_blkg)) {
+ blkg = ERR_PTR(-ENOMEM);
+ goto out_put;
+ }
+ }
+ blkg = new_blkg;
/* insert */
- ret = radix_tree_preload(GFP_ATOMIC);
- if (ret)
- goto err_free;
-
spin_lock(&blkcg->lock);
ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
if (likely(!ret)) {
@@ -215,15 +212,15 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
}
spin_unlock(&blkcg->lock);
- radix_tree_preload_end();
-
if (!ret)
return blkg;
-err_free:
- blkg_free(blkg);
-err_put:
+
+ blkg = ERR_PTR(ret);
+out_put:
css_put(&blkcg->css);
- return ERR_PTR(ret);
+out_free:
+ blkg_free(new_blkg);
+ return blkg;
}
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
@@ -235,7 +232,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
*/
if (unlikely(blk_queue_bypass(q)))
return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
- return __blkg_lookup_create(blkcg, q);
+ return __blkg_lookup_create(blkcg, q, NULL);
}
EXPORT_SYMBOL_GPL(blkg_lookup_create);
@@ -313,6 +310,38 @@ void __blkg_release(struct blkcg_gq *blkg)
}
EXPORT_SYMBOL_GPL(__blkg_release);
+/*
+ * The next function used by blk_queue_for_each_rl(). It's a bit tricky
+ * because the root blkg uses @q->root_rl instead of its own rl.
+ */
+struct request_list *__blk_queue_next_rl(struct request_list *rl,
+ struct request_queue *q)
+{
+ struct list_head *ent;
+ struct blkcg_gq *blkg;
+
+ /*
+ * Determine the current blkg list_head. The first entry is
+ * root_rl which is off @q->blkg_list and mapped to the head.
+ */
+ if (rl == &q->root_rl) {
+ ent = &q->blkg_list;
+ } else {
+ blkg = container_of(rl, struct blkcg_gq, rl);
+ ent = &blkg->q_node;
+ }
+
+ /* walk to the next list_head, skip root blkcg */
+ ent = ent->next;
+ if (ent == &q->root_blkg->q_node)
+ ent = ent->next;
+ if (ent == &q->blkg_list)
+ return NULL;
+
+ blkg = container_of(ent, struct blkcg_gq, q_node);
+ return &blkg->rl;
+}
+
static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
u64 val)
{
@@ -734,24 +763,36 @@ int blkcg_activate_policy(struct request_queue *q,
struct blkcg_gq *blkg;
struct blkg_policy_data *pd, *n;
int cnt = 0, ret;
+ bool preloaded;
if (blkcg_policy_enabled(q, pol))
return 0;
+ /* preallocations for root blkg */
+ blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
+ if (!blkg)
+ return -ENOMEM;
+
+ preloaded = !radix_tree_preload(GFP_KERNEL);
+
blk_queue_bypass_start(q);
/* make sure the root blkg exists and count the existing blkgs */
spin_lock_irq(q->queue_lock);
rcu_read_lock();
- blkg = __blkg_lookup_create(&blkcg_root, q);
+ blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
rcu_read_unlock();
+ if (preloaded)
+ radix_tree_preload_end();
+
if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
goto out_unlock;
}
q->root_blkg = blkg;
+ q->root_rl.blkg = blkg;
list_for_each_entry(blkg, &q->blkg_list, q_node)
cnt++;
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 8ac457ce7783..24597309e23d 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -17,6 +17,7 @@
#include <linux/u64_stats_sync.h>
#include <linux/seq_file.h>
#include <linux/radix-tree.h>
+#include <linux/blkdev.h>
/* Max limits for throttle policy */
#define THROTL_IOPS_MAX UINT_MAX
@@ -93,6 +94,8 @@ struct blkcg_gq {
struct list_head q_node;
struct hlist_node blkcg_node;
struct blkcg *blkcg;
+ /* request allocation list for this blkcg-q pair */
+ struct request_list rl;
/* reference count */
int refcnt;
@@ -120,8 +123,6 @@ struct blkcg_policy {
extern struct blkcg blkcg_root;
-struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup);
-struct blkcg *bio_blkcg(struct bio *bio);
struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
@@ -160,6 +161,25 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
+{
+ return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
+ struct blkcg, css);
+}
+
+static inline struct blkcg *task_blkcg(struct task_struct *tsk)
+{
+ return container_of(task_subsys_state(tsk, blkio_subsys_id),
+ struct blkcg, css);
+}
+
+static inline struct blkcg *bio_blkcg(struct bio *bio)
+{
+ if (bio && bio->bi_css)
+ return container_of(bio->bi_css, struct blkcg, css);
+ return task_blkcg(current);
+}
+
/**
* blkg_to_pdata - get policy private data
* @blkg: blkg of interest
@@ -234,6 +254,95 @@ static inline void blkg_put(struct blkcg_gq *blkg)
}
/**
+ * blk_get_rl - get request_list to use
+ * @q: request_queue of interest
+ * @bio: bio which will be attached to the allocated request (may be %NULL)
+ *
+ * The caller wants to allocate a request from @q to use for @bio. Find
+ * the request_list to use and obtain a reference on it. Should be called
+ * under queue_lock. This function is guaranteed to return non-%NULL
+ * request_list.
+ */
+static inline struct request_list *blk_get_rl(struct request_queue *q,
+ struct bio *bio)
+{
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+
+ rcu_read_lock();
+
+ blkcg = bio_blkcg(bio);
+
+ /* bypass blkg lookup and use @q->root_rl directly for root */
+ if (blkcg == &blkcg_root)
+ goto root_rl;
+
+ /*
+ * Try to use blkg->rl. blkg lookup may fail under memory pressure
+ * or if either the blkcg or queue is going away. Fall back to
+ * root_rl in such cases.
+ */
+ blkg = blkg_lookup_create(blkcg, q);
+ if (unlikely(IS_ERR(blkg)))
+ goto root_rl;
+
+ blkg_get(blkg);
+ rcu_read_unlock();
+ return &blkg->rl;
+root_rl:
+ rcu_read_unlock();
+ return &q->root_rl;
+}
+
+/**
+ * blk_put_rl - put request_list
+ * @rl: request_list to put
+ *
+ * Put the reference acquired by blk_get_rl(). Should be called under
+ * queue_lock.
+ */
+static inline void blk_put_rl(struct request_list *rl)
+{
+ /* root_rl may not have blkg set */
+ if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
+ blkg_put(rl->blkg);
+}
+
+/**
+ * blk_rq_set_rl - associate a request with a request_list
+ * @rq: request of interest
+ * @rl: target request_list
+ *
+ * Associate @rq with @rl so that accounting and freeing can know the
+ * request_list @rq came from.
+ */
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
+{
+ rq->rl = rl;
+}
+
+/**
+ * blk_rq_rl - return the request_list a request came from
+ * @rq: request of interest
+ *
+ * Return the request_list @rq is allocated from.
+ */
+static inline struct request_list *blk_rq_rl(struct request *rq)
+{
+ return rq->rl;
+}
+
+struct request_list *__blk_queue_next_rl(struct request_list *rl,
+ struct request_queue *q);
+/**
+ * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
+ *
+ * Should be used under queue_lock.
+ */
+#define blk_queue_for_each_rl(rl, q) \
+ for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
+
+/**
* blkg_stat_add - add a value to a blkg_stat
* @stat: target blkg_stat
* @val: value to add
@@ -351,6 +460,7 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
#else /* CONFIG_BLK_CGROUP */
struct cgroup;
+struct blkcg;
struct blkg_policy_data {
};
@@ -361,8 +471,6 @@ struct blkcg_gq {
struct blkcg_policy {
};
-static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
-static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
@@ -374,6 +482,9 @@ static inline int blkcg_activate_policy(struct request_queue *q,
static inline void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol) { }
+static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
+static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
+
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
struct blkcg_policy *pol) { return NULL; }
static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
@@ -381,5 +492,14 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
+static inline struct request_list *blk_get_rl(struct request_queue *q,
+ struct bio *bio) { return &q->root_rl; }
+static inline void blk_put_rl(struct request_list *rl) { }
+static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
+static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
+
+#define blk_queue_for_each_rl(rl, q) \
+ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
+
#endif /* CONFIG_BLK_CGROUP */
#endif /* _BLK_CGROUP_H */
diff --git a/block/blk-core.c b/block/blk-core.c
index 93eb3e4f88ce..4b4dbdfbca89 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -387,7 +387,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (!list_empty(&q->queue_head) && q->request_fn)
__blk_run_queue(q);
- drain |= q->rq.elvpriv;
+ drain |= q->nr_rqs_elvpriv;
/*
* Unfortunately, requests are queued at and tracked from
@@ -397,7 +397,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (drain_all) {
drain |= !list_empty(&q->queue_head);
for (i = 0; i < 2; i++) {
- drain |= q->rq.count[i];
+ drain |= q->nr_rqs[i];
drain |= q->in_flight[i];
drain |= !list_empty(&q->flush_queue[i]);
}
@@ -416,9 +416,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
* left with hung waiters. We need to wake up those waiters.
*/
if (q->request_fn) {
+ struct request_list *rl;
+
spin_lock_irq(q->queue_lock);
- for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
- wake_up_all(&q->rq.wait[i]);
+
+ blk_queue_for_each_rl(rl, q)
+ for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
+ wake_up_all(&rl->wait[i]);
+
spin_unlock_irq(q->queue_lock);
}
}
@@ -517,28 +522,33 @@ void blk_cleanup_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_cleanup_queue);
-static int blk_init_free_list(struct request_queue *q)
+int blk_init_rl(struct request_list *rl, struct request_queue *q,
+ gfp_t gfp_mask)
{
- struct request_list *rl = &q->rq;
-
if (unlikely(rl->rq_pool))
return 0;
+ rl->q = q;
rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
- rl->elvpriv = 0;
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
- mempool_free_slab, request_cachep, q->node);
-
+ mempool_free_slab, request_cachep,
+ gfp_mask, q->node);
if (!rl->rq_pool)
return -ENOMEM;
return 0;
}
+void blk_exit_rl(struct request_list *rl)
+{
+ if (rl->rq_pool)
+ mempool_destroy(rl->rq_pool);
+}
+
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
return blk_alloc_queue_node(gfp_mask, -1);
@@ -680,7 +690,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
if (!q)
return NULL;
- if (blk_init_free_list(q))
+ if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
return NULL;
q->request_fn = rfn;
@@ -722,15 +732,15 @@ bool blk_get_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_get_queue);
-static inline void blk_free_request(struct request_queue *q, struct request *rq)
+static inline void blk_free_request(struct request_list *rl, struct request *rq)
{
if (rq->cmd_flags & REQ_ELVPRIV) {
- elv_put_request(q, rq);
+ elv_put_request(rl->q, rq);
if (rq->elv.icq)
put_io_context(rq->elv.icq->ioc);
}
- mempool_free(rq, q->rq.rq_pool);
+ mempool_free(rq, rl->rq_pool);
}
/*
@@ -767,18 +777,23 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
ioc->last_waited = jiffies;
}
-static void __freed_request(struct request_queue *q, int sync)
+static void __freed_request(struct request_list *rl, int sync)
{
- struct request_list *rl = &q->rq;
+ struct request_queue *q = rl->q;
- if (rl->count[sync] < queue_congestion_off_threshold(q))
+ /*
+ * bdi isn't aware of blkcg yet. As all async IOs end up root
+ * blkcg anyway, just use root blkcg state.
+ */
+ if (rl == &q->root_rl &&
+ rl->count[sync] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, sync);
if (rl->count[sync] + 1 <= q->nr_requests) {
if (waitqueue_active(&rl->wait[sync]))
wake_up(&rl->wait[sync]);
- blk_clear_queue_full(q, sync);
+ blk_clear_rl_full(rl, sync);
}
}
@@ -786,19 +801,20 @@ static void __freed_request(struct request_queue *q, int sync)
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
-static void freed_request(struct request_queue *q, unsigned int flags)
+static void freed_request(struct request_list *rl, unsigned int flags)
{
- struct request_list *rl = &q->rq;
+ struct request_queue *q = rl->q;
int sync = rw_is_sync(flags);
+ q->nr_rqs[sync]--;
rl->count[sync]--;
if (flags & REQ_ELVPRIV)
- rl->elvpriv--;
+ q->nr_rqs_elvpriv--;
- __freed_request(q, sync);
+ __freed_request(rl, sync);
if (unlikely(rl->starved[sync ^ 1]))
- __freed_request(q, sync ^ 1);
+ __freed_request(rl, sync ^ 1);
}
/*
@@ -837,8 +853,8 @@ static struct io_context *rq_ioc(struct bio *bio)
}
/**
- * get_request - get a free request
- * @q: request_queue to allocate request from
+ * __get_request - get a free request
+ * @rl: request list to allocate from
* @rw_flags: RW and SYNC flags
* @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask
@@ -850,20 +866,16 @@ static struct io_context *rq_ioc(struct bio *bio)
* Returns %NULL on failure, with @q->queue_lock held.
* Returns !%NULL on success, with @q->queue_lock *not held*.
*/
-static struct request *get_request(struct request_queue *q, int rw_flags,
- struct bio *bio, gfp_t gfp_mask)
+static struct request *__get_request(struct request_list *rl, int rw_flags,
+ struct bio *bio, gfp_t gfp_mask)
{
+ struct request_queue *q = rl->q;
struct request *rq;
- struct request_list *rl = &q->rq;
- struct elevator_type *et;
- struct io_context *ioc;
+ struct elevator_type *et = q->elevator->type;
+ struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL;
const bool is_sync = rw_is_sync(rw_flags) != 0;
- bool retried = false;
int may_queue;
-retry:
- et = q->elevator->type;
- ioc = rq_ioc(bio);
if (unlikely(blk_queue_dead(q)))
return NULL;
@@ -875,28 +887,14 @@ retry:
if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
if (rl->count[is_sync]+1 >= q->nr_requests) {
/*
- * We want ioc to record batching state. If it's
- * not already there, creating a new one requires
- * dropping queue_lock, which in turn requires
- * retesting conditions to avoid queue hang.
- */
- if (!ioc && !retried) {
- spin_unlock_irq(q->queue_lock);
- create_io_context(gfp_mask, q->node);
- spin_lock_irq(q->queue_lock);
- retried = true;
- goto retry;
- }
-
- /*
* The queue will fill after this allocation, so set
* it as full, and mark this process as "batching".
* This process will be allowed to complete a batch of
* requests, others will be blocked.
*/
- if (!blk_queue_full(q, is_sync)) {
+ if (!blk_rl_full(rl, is_sync)) {
ioc_set_batching(q, ioc);
- blk_set_queue_full(q, is_sync);
+ blk_set_rl_full(rl, is_sync);
} else {
if (may_queue != ELV_MQUEUE_MUST
&& !ioc_batching(q, ioc)) {
@@ -909,7 +907,12 @@ retry:
}
}
}
- blk_set_queue_congested(q, is_sync);
+ /*
+ * bdi isn't aware of blkcg yet. As all async IOs end up
+ * root blkcg anyway, just use root blkcg state.
+ */
+ if (rl == &q->root_rl)
+ blk_set_queue_congested(q, is_sync);
}
/*
@@ -920,6 +923,7 @@ retry:
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
return NULL;
+ q->nr_rqs[is_sync]++;
rl->count[is_sync]++;
rl->starved[is_sync] = 0;
@@ -935,7 +939,7 @@ retry:
*/
if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
rw_flags |= REQ_ELVPRIV;
- rl->elvpriv++;
+ q->nr_rqs_elvpriv++;
if (et->icq_cache && ioc)
icq = ioc_lookup_icq(ioc, q);
}
@@ -945,22 +949,19 @@ retry:
spin_unlock_irq(q->queue_lock);
/* allocate and init request */
- rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+ rq = mempool_alloc(rl->rq_pool, gfp_mask);
if (!rq)
goto fail_alloc;
blk_rq_init(q, rq);
+ blk_rq_set_rl(rq, rl);
rq->cmd_flags = rw_flags | REQ_ALLOCED;
/* init elvpriv */
if (rw_flags & REQ_ELVPRIV) {
if (unlikely(et->icq_cache && !icq)) {
- create_io_context(gfp_mask, q->node);
- ioc = rq_ioc(bio);
- if (!ioc)
- goto fail_elvpriv;
-
- icq = ioc_create_icq(ioc, q, gfp_mask);
+ if (ioc)
+ icq = ioc_create_icq(ioc, q, gfp_mask);
if (!icq)
goto fail_elvpriv;
}
@@ -1000,7 +1001,7 @@ fail_elvpriv:
rq->elv.icq = NULL;
spin_lock_irq(q->queue_lock);
- rl->elvpriv--;
+ q->nr_rqs_elvpriv--;
spin_unlock_irq(q->queue_lock);
goto out;
@@ -1013,7 +1014,7 @@ fail_alloc:
* queue, but this is pretty rare.
*/
spin_lock_irq(q->queue_lock);
- freed_request(q, rw_flags);
+ freed_request(rl, rw_flags);
/*
* in the very unlikely event that allocation failed and no
@@ -1029,56 +1030,58 @@ rq_starved:
}
/**
- * get_request_wait - get a free request with retry
+ * get_request - get a free request
* @q: request_queue to allocate request from
* @rw_flags: RW and SYNC flags
* @bio: bio to allocate request for (can be %NULL)
+ * @gfp_mask: allocation mask
*
- * Get a free request from @q. This function keeps retrying under memory
- * pressure and fails iff @q is dead.
+ * Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
+ * function keeps retrying under memory pressure and fails iff @q is dead.
*
* Must be callled with @q->queue_lock held and,
* Returns %NULL on failure, with @q->queue_lock held.
* Returns !%NULL on success, with @q->queue_lock *not held*.
*/
-static struct request *get_request_wait(struct request_queue *q, int rw_flags,
- struct bio *bio)
+static struct request *get_request(struct request_queue *q, int rw_flags,
+ struct bio *bio, gfp_t gfp_mask)
{
const bool is_sync = rw_is_sync(rw_flags) != 0;
+ DEFINE_WAIT(wait);
+ struct request_list *rl;
struct request *rq;
- rq = get_request(q, rw_flags, bio, GFP_NOIO);
- while (!rq) {
- DEFINE_WAIT(wait);
- struct request_list *rl = &q->rq;
-
- if (unlikely(blk_queue_dead(q)))
- return NULL;
+ rl = blk_get_rl(q, bio); /* transferred to @rq on success */
+retry:
+ rq = __get_request(rl, rw_flags, bio, gfp_mask);
+ if (rq)
+ return rq;
- prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
- TASK_UNINTERRUPTIBLE);
+ if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
+ blk_put_rl(rl);
+ return NULL;
+ }
- trace_block_sleeprq(q, bio, rw_flags & 1);
+ /* wait on @rl and retry */
+ prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
+ TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(q->queue_lock);
- io_schedule();
+ trace_block_sleeprq(q, bio, rw_flags & 1);
- /*
- * After sleeping, we become a "batching" process and
- * will be able to allocate at least one request, and
- * up to a big batch of them for a small period time.
- * See ioc_batching, ioc_set_batching
- */
- create_io_context(GFP_NOIO, q->node);
- ioc_set_batching(q, current->io_context);
+ spin_unlock_irq(q->queue_lock);
+ io_schedule();
- spin_lock_irq(q->queue_lock);
- finish_wait(&rl->wait[is_sync], &wait);
+ /*
+ * After sleeping, we become a "batching" process and will be able
+ * to allocate at least one request, and up to a big batch of them
+ * for a small period time. See ioc_batching, ioc_set_batching
+ */
+ ioc_set_batching(q, current->io_context);
- rq = get_request(q, rw_flags, bio, GFP_NOIO);
- };
+ spin_lock_irq(q->queue_lock);
+ finish_wait(&rl->wait[is_sync], &wait);
- return rq;
+ goto retry;
}
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
@@ -1087,11 +1090,11 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
BUG_ON(rw != READ && rw != WRITE);
+ /* create ioc upfront */
+ create_io_context(gfp_mask, q->node);
+
spin_lock_irq(q->queue_lock);
- if (gfp_mask & __GFP_WAIT)
- rq = get_request_wait(q, rw, NULL);
- else
- rq = get_request(q, rw, NULL, gfp_mask);
+ rq = get_request(q, rw, NULL, gfp_mask);
if (!rq)
spin_unlock_irq(q->queue_lock);
/* q->queue_lock is unlocked at this point */
@@ -1248,12 +1251,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
*/
if (req->cmd_flags & REQ_ALLOCED) {
unsigned int flags = req->cmd_flags;
+ struct request_list *rl = blk_rq_rl(req);
BUG_ON(!list_empty(&req->queuelist));
BUG_ON(!hlist_unhashed(&req->hash));
- blk_free_request(q, req);
- freed_request(q, flags);
+ blk_free_request(rl, req);
+ freed_request(rl, flags);
+ blk_put_rl(rl);
}
}
EXPORT_SYMBOL_GPL(__blk_put_request);
@@ -1481,7 +1486,7 @@ get_rq:
* Grab a free request. This is might sleep but can not fail.
* Returns with the queue unlocked.
*/
- req = get_request_wait(q, rw_flags, bio);
+ req = get_request(q, rw_flags, bio, GFP_NOIO);
if (unlikely(!req)) {
bio_endio(bio, -ENODEV); /* @q is dead */
goto out_unlock;
@@ -1702,6 +1707,14 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}
+ /*
+ * Various block parts want %current->io_context and lazy ioc
+ * allocation ends up trading a lot of pain for a small amount of
+ * memory. Just allocate it upfront. This may fail and block
+ * layer knows how to live with it.
+ */
+ create_io_context(GFP_ATOMIC, q->node);
+
if (blk_throtl_bio(q, bio))
return false; /* throttled, will be resubmitted later */
@@ -2896,23 +2909,47 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
}
-static void flush_plug_callbacks(struct blk_plug *plug)
+static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
{
LIST_HEAD(callbacks);
- if (list_empty(&plug->cb_list))
- return;
+ while (!list_empty(&plug->cb_list)) {
+ list_splice_init(&plug->cb_list, &callbacks);
- list_splice_init(&plug->cb_list, &callbacks);
-
- while (!list_empty(&callbacks)) {
- struct blk_plug_cb *cb = list_first_entry(&callbacks,
+ while (!list_empty(&callbacks)) {
+ struct blk_plug_cb *cb = list_first_entry(&callbacks,
struct blk_plug_cb,
list);
- list_del(&cb->list);
- cb->callback(cb);
+ list_del(&cb->list);
+ cb->callback(cb, from_schedule);
+ }
+ }
+}
+
+struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
+ int size)
+{
+ struct blk_plug *plug = current->plug;
+ struct blk_plug_cb *cb;
+
+ if (!plug)
+ return NULL;
+
+ list_for_each_entry(cb, &plug->cb_list, list)
+ if (cb->callback == unplug && cb->data == data)
+ return cb;
+
+ /* Not currently on the callback list */
+ BUG_ON(size < sizeof(*cb));
+ cb = kzalloc(size, GFP_ATOMIC);
+ if (cb) {
+ cb->data = data;
+ cb->callback = unplug;
+ list_add(&cb->list, &plug->cb_list);
}
+ return cb;
}
+EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
@@ -2924,7 +2961,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
BUG_ON(plug->magic != PLUG_MAGIC);
- flush_plug_callbacks(plug);
+ flush_plug_callbacks(plug, from_schedule);
if (list_empty(&plug->list))
return;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 893b8007c657..fab4cdd3f7bb 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -244,6 +244,7 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
/* initialize */
atomic_long_set(&ioc->refcount, 1);
+ atomic_set(&ioc->nr_tasks, 1);
atomic_set(&ioc->active_ref, 1);
spin_lock_init(&ioc->lock);
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2b461b496a78..19cc761cacb2 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -44,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD;
unsigned int max_discard_sectors;
+ unsigned int granularity, alignment, mask;
struct bio_batch bb;
struct bio *bio;
int ret = 0;
@@ -54,18 +55,20 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
+ /* Zero-sector (unknown) and one-sector granularities are the same. */
+ granularity = max(q->limits.discard_granularity >> 9, 1U);
+ mask = granularity - 1;
+ alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
+
/*
* Ensure that max_discard_sectors is of the proper
- * granularity
+ * granularity, so that requests stay aligned after a split.
*/
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+ max_discard_sectors = round_down(max_discard_sectors, granularity);
if (unlikely(!max_discard_sectors)) {
/* Avoid infinite loop below. Being cautious never hurts. */
return -EOPNOTSUPP;
- } else if (q->limits.discard_granularity) {
- unsigned int disc_sects = q->limits.discard_granularity >> 9;
-
- max_discard_sectors &= ~(disc_sects - 1);
}
if (flags & BLKDEV_DISCARD_SECURE) {
@@ -79,25 +82,37 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bb.wait = &wait;
while (nr_sects) {
+ unsigned int req_sects;
+ sector_t end_sect;
+
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
ret = -ENOMEM;
break;
}
+ req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
+
+ /*
+ * If splitting a request, and the next starting sector would be
+ * misaligned, stop the discard at the previous aligned sector.
+ */
+ end_sect = sector + req_sects;
+ if (req_sects < nr_sects && (end_sect & mask) != alignment) {
+ end_sect =
+ round_down(end_sect - alignment, granularity)
+ + alignment;
+ req_sects = end_sect - sector;
+ }
+
bio->bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
bio->bi_private = &bb;
- if (nr_sects > max_discard_sectors) {
- bio->bi_size = max_discard_sectors << 9;
- nr_sects -= max_discard_sectors;
- sector += max_discard_sectors;
- } else {
- bio->bi_size = nr_sects << 9;
- nr_sects = 0;
- }
+ bio->bi_size = req_sects << 9;
+ nr_sects -= req_sects;
+ sector = end_sect;
atomic_inc(&bb.done);
submit_bio(type, bio);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 160035f54882..e76279e41162 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return 0;
}
+static void
+__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
+ struct scatterlist *sglist, struct bio_vec **bvprv,
+ struct scatterlist **sg, int *nsegs, int *cluster)
+{
+
+ int nbytes = bvec->bv_len;
+
+ if (*bvprv && *cluster) {
+ if ((*sg)->length + nbytes > queue_max_segment_size(q))
+ goto new_segment;
+
+ if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
+ goto new_segment;
+ if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
+ goto new_segment;
+
+ (*sg)->length += nbytes;
+ } else {
+new_segment:
+ if (!*sg)
+ *sg = sglist;
+ else {
+ /*
+ * If the driver previously mapped a shorter
+ * list, we could see a termination bit
+ * prematurely unless it fully inits the sg
+ * table on each mapping. We KNOW that there
+ * must be more entries here or the driver
+ * would be buggy, so force clear the
+ * termination bit to avoid doing a full
+ * sg_init_table() in drivers for each command.
+ */
+ (*sg)->page_link &= ~0x02;
+ *sg = sg_next(*sg);
+ }
+
+ sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
+ (*nsegs)++;
+ }
+ *bvprv = bvec;
+}
+
/*
* map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries
@@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
bvprv = NULL;
sg = NULL;
rq_for_each_segment(bvec, rq, iter) {
- int nbytes = bvec->bv_len;
-
- if (bvprv && cluster) {
- if (sg->length + nbytes > queue_max_segment_size(q))
- goto new_segment;
-
- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
- goto new_segment;
-
- sg->length += nbytes;
- } else {
-new_segment:
- if (!sg)
- sg = sglist;
- else {
- /*
- * If the driver previously mapped a shorter
- * list, we could see a termination bit
- * prematurely unless it fully inits the sg
- * table on each mapping. We KNOW that there
- * must be more entries here or the driver
- * would be buggy, so force clear the
- * termination bit to avoid doing a full
- * sg_init_table() in drivers for each command.
- */
- sg->page_link &= ~0x02;
- sg = sg_next(sg);
- }
-
- sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
- nsegs++;
- }
- bvprv = bvec;
+ __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+ &nsegs, &cluster);
} /* segments in rq */
@@ -199,6 +209,43 @@ new_segment:
}
EXPORT_SYMBOL(blk_rq_map_sg);
+/**
+ * blk_bio_map_sg - map a bio to a scatterlist
+ * @q: request_queue in question
+ * @bio: bio being mapped
+ * @sglist: scatterlist being mapped
+ *
+ * Note:
+ * Caller must make sure sg can hold bio->bi_phys_segments entries
+ *
+ * Will return the number of sg entries setup
+ */
+int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
+ struct scatterlist *sglist)
+{
+ struct bio_vec *bvec, *bvprv;
+ struct scatterlist *sg;
+ int nsegs, cluster;
+ unsigned long i;
+
+ nsegs = 0;
+ cluster = blk_queue_cluster(q);
+
+ bvprv = NULL;
+ sg = NULL;
+ bio_for_each_segment(bvec, bio, i) {
+ __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
+ &nsegs, &cluster);
+ } /* segments in bio */
+
+ if (sg)
+ sg_mark_end(sg);
+
+ BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
+ return nsegs;
+}
+EXPORT_SYMBOL(blk_bio_map_sg);
+
static inline int ll_new_hw_segment(struct request_queue *q,
struct request *req,
struct bio *bio)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index d3234fc494ad..565a6786032f 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -143,8 +143,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
lim->discard_zeroes_data = 1;
lim->max_segments = USHRT_MAX;
lim->max_hw_sectors = UINT_MAX;
-
- lim->max_sectors = BLK_DEF_MAX_SECTORS;
+ lim->max_sectors = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index aa41b47c22d2..9628b291f960 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -40,7 +40,7 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page)
static ssize_t
queue_requests_store(struct request_queue *q, const char *page, size_t count)
{
- struct request_list *rl = &q->rq;
+ struct request_list *rl;
unsigned long nr;
int ret;
@@ -55,6 +55,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
q->nr_requests = nr;
blk_queue_congestion_threshold(q);
+ /* congestion isn't cgroup aware and follows root blkcg for now */
+ rl = &q->root_rl;
+
if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_SYNC);
else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
@@ -65,19 +68,22 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_ASYNC);
- if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
- blk_set_queue_full(q, BLK_RW_SYNC);
- } else {
- blk_clear_queue_full(q, BLK_RW_SYNC);
- wake_up(&rl->wait[BLK_RW_SYNC]);
+ blk_queue_for_each_rl(rl, q) {
+ if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
+ blk_set_rl_full(rl, BLK_RW_SYNC);
+ } else {
+ blk_clear_rl_full(rl, BLK_RW_SYNC);
+ wake_up(&rl->wait[BLK_RW_SYNC]);
+ }
+
+ if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
+ blk_set_rl_full(rl, BLK_RW_ASYNC);
+ } else {
+ blk_clear_rl_full(rl, BLK_RW_ASYNC);
+ wake_up(&rl->wait[BLK_RW_ASYNC]);
+ }
}
- if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
- blk_set_queue_full(q, BLK_RW_ASYNC);
- } else {
- blk_clear_queue_full(q, BLK_RW_ASYNC);
- wake_up(&rl->wait[BLK_RW_ASYNC]);
- }
spin_unlock_irq(q->queue_lock);
return ret;
}
@@ -476,7 +482,6 @@ static void blk_release_queue(struct kobject *kobj)
{
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
- struct request_list *rl = &q->rq;
blk_sync_queue(q);
@@ -489,8 +494,7 @@ static void blk_release_queue(struct kobject *kobj)
elevator_exit(q->elevator);
}
- if (rl->rq_pool)
- mempool_destroy(rl->rq_pool);
+ blk_exit_rl(&q->root_rl);
if (q->queue_tags)
__blk_queue_free_tags(q);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5b0659512047..e287c19908c8 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1123,9 +1123,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
goto out;
}
- /* bio_associate_current() needs ioc, try creating */
- create_io_context(GFP_ATOMIC, q->node);
-
/*
* A throtl_grp pointer retrieved under rcu can be used to access
* basic fields like stats and io rates. If a group has no rules,
diff --git a/block/blk.h b/block/blk.h
index 85f6ae42f7d3..2a0ea32d249f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -18,6 +18,9 @@ static inline void __blk_get_queue(struct request_queue *q)
kobject_get(&q->kobj);
}
+int blk_init_rl(struct request_list *rl, struct request_queue *q,
+ gfp_t gfp_mask);
+void blk_exit_rl(struct request_list *rl);
void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
@@ -33,7 +36,6 @@ bool __blk_end_bidi_request(struct request *rq, int error,
void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
-void __generic_unplug_device(struct request_queue *);
/*
* Internal atomic flags for request handling
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 7ad49c88f6b1..deee61fbb741 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -243,56 +243,3 @@ int bsg_setup_queue(struct device *dev, struct request_queue *q,
return 0;
}
EXPORT_SYMBOL_GPL(bsg_setup_queue);
-
-/**
- * bsg_remove_queue - Deletes the bsg dev from the q
- * @q: the request_queue that is to be torn down.
- *
- * Notes:
- * Before unregistering the queue empty any requests that are blocked
- */
-void bsg_remove_queue(struct request_queue *q)
-{
- struct request *req; /* block request */
- int counts; /* totals for request_list count and starved */
-
- if (!q)
- return;
-
- /* Stop taking in new requests */
- spin_lock_irq(q->queue_lock);
- blk_stop_queue(q);
-
- /* drain all requests in the queue */
- while (1) {
- /* need the lock to fetch a request
- * this may fetch the same reqeust as the previous pass
- */
- req = blk_fetch_request(q);
- /* save requests in use and starved */
- counts = q->rq.count[0] + q->rq.count[1] +
- q->rq.starved[0] + q->rq.starved[1];
- spin_unlock_irq(q->queue_lock);
- /* any requests still outstanding? */
- if (counts == 0)
- break;
-
- /* This may be the same req as the previous iteration,
- * always send the blk_end_request_all after a prefetch.
- * It is not okay to not end the request because the
- * prefetch started the request.
- */
- if (req) {
- /* return -ENXIO to indicate that this queue is
- * going away
- */
- req->errors = -ENXIO;
- blk_end_request_all(req, -ENXIO);
- }
-
- msleep(200); /* allow bsg to possibly finish */
- spin_lock_irq(q->queue_lock);
- }
- bsg_unregister_queue(q);
-}
-EXPORT_SYMBOL_GPL(bsg_remove_queue);
diff --git a/block/genhd.c b/block/genhd.c
index 9cf5583c90ff..d839723303c8 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -154,7 +154,7 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
part = rcu_dereference(ptbl->part[piter->idx]);
if (!part)
continue;
- if (!part->nr_sects &&
+ if (!part_nr_sects_read(part) &&
!(piter->flags & DISK_PITER_INCL_EMPTY) &&
!(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
piter->idx == 0))
@@ -191,7 +191,7 @@ EXPORT_SYMBOL_GPL(disk_part_iter_exit);
static inline int sector_in_part(struct hd_struct *part, sector_t sector)
{
return part->start_sect <= sector &&
- sector < part->start_sect + part->nr_sects;
+ sector < part->start_sect + part_nr_sects_read(part);
}
/**
@@ -769,8 +769,8 @@ void __init printk_all_partitions(void)
printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
bdevt_str(part_devt(part), devt_buf),
- (unsigned long long)part->nr_sects >> 1,
- disk_name(disk, part->partno, name_buf),
+ (unsigned long long)part_nr_sects_read(part) >> 1
+ , disk_name(disk, part->partno, name_buf),
uuid_buf);
if (is_part0) {
if (disk->driverfs_dev != NULL &&
@@ -835,7 +835,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
{
- static void *p;
+ void *p;
p = disk_seqf_start(seqf, pos);
if (!IS_ERR_OR_NULL(p) && !*pos)
@@ -862,7 +862,7 @@ static int show_partition(struct seq_file *seqf, void *v)
while ((part = disk_part_iter_next(&piter)))
seq_printf(seqf, "%4d %7d %10llu %s\n",
MAJOR(part_devt(part)), MINOR(part_devt(part)),
- (unsigned long long)part->nr_sects >> 1,
+ (unsigned long long)part_nr_sects_read(part) >> 1,
disk_name(sgp, part->partno, buf));
disk_part_iter_exit(&piter);
@@ -1268,6 +1268,16 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
}
disk->part_tbl->part[0] = &disk->part0;
+ /*
+ * set_capacity() and get_capacity() currently don't use
+ * seqcounter to read/update the part0->nr_sects. Still init
+ * the counter as we can read the sectors in IO submission
+ * patch using seqence counters.
+ *
+ * TODO: Ideally set_capacity() and get_capacity() should be
+ * converted to make use of bd_mutex and sequence counters.
+ */
+ seqcount_init(&disk->part0.nr_sects_seq);
hd_ref_init(&disk->part0);
disk->minors = minors;
diff --git a/block/ioctl.c b/block/ioctl.c
index ba15b2dbfb98..4476e0e85d16 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -13,7 +13,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
{
struct block_device *bdevp;
struct gendisk *disk;
- struct hd_struct *part;
+ struct hd_struct *part, *lpart;
struct blkpg_ioctl_arg a;
struct blkpg_partition p;
struct disk_part_iter piter;
@@ -36,8 +36,8 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
case BLKPG_ADD_PARTITION:
start = p.start >> 9;
length = p.length >> 9;
- /* check for fit in a hd_struct */
- if (sizeof(sector_t) == sizeof(long) &&
+ /* check for fit in a hd_struct */
+ if (sizeof(sector_t) == sizeof(long) &&
sizeof(long long) > sizeof(long)) {
long pstart = start, plength = length;
if (pstart != start || plength != length
@@ -92,6 +92,59 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
bdput(bdevp);
return 0;
+ case BLKPG_RESIZE_PARTITION:
+ start = p.start >> 9;
+ /* new length of partition in bytes */
+ length = p.length >> 9;
+ /* check for fit in a hd_struct */
+ if (sizeof(sector_t) == sizeof(long) &&
+ sizeof(long long) > sizeof(long)) {
+ long pstart = start, plength = length;
+ if (pstart != start || plength != length
+ || pstart < 0 || plength < 0)
+ return -EINVAL;
+ }
+ part = disk_get_part(disk, partno);
+ if (!part)
+ return -ENXIO;
+ bdevp = bdget(part_devt(part));
+ if (!bdevp) {
+ disk_put_part(part);
+ return -ENOMEM;
+ }
+ mutex_lock(&bdevp->bd_mutex);
+ mutex_lock_nested(&bdev->bd_mutex, 1);
+ if (start != part->start_sect) {
+ mutex_unlock(&bdevp->bd_mutex);
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdevp);
+ disk_put_part(part);
+ return -EINVAL;
+ }
+ /* overlap? */
+ disk_part_iter_init(&piter, disk,
+ DISK_PITER_INCL_EMPTY);
+ while ((lpart = disk_part_iter_next(&piter))) {
+ if (lpart->partno != partno &&
+ !(start + length <= lpart->start_sect ||
+ start >= lpart->start_sect + lpart->nr_sects)
+ ) {
+ disk_part_iter_exit(&piter);
+ mutex_unlock(&bdevp->bd_mutex);
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdevp);
+ disk_put_part(part);
+ return -EBUSY;
+ }
+ }
+ disk_part_iter_exit(&piter);
+ part_nr_sects_write(part, (sector_t)length);
+ i_size_write(bdevp->bd_inode, p.length);
+ mutex_unlock(&bdevp->bd_mutex);
+ mutex_unlock(&bdev->bd_mutex);
+ bdput(bdevp);
+ disk_put_part(part);
+ return 0;
default:
return -EINVAL;
}
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 6df5d6928a44..f1d14519cc04 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -84,7 +84,7 @@ ssize_t part_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hd_struct *p = dev_to_part(dev);
- return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects);
+ return sprintf(buf, "%llu\n",(unsigned long long)part_nr_sects_read(p));
}
static ssize_t part_ro_show(struct device *dev,
@@ -294,6 +294,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
err = -ENOMEM;
goto out_free;
}
+
+ seqcount_init(&p->nr_sects_seq);
pdev = part_to_dev(p);
p->start_sect = start;
diff --git a/drivers/Kconfig b/drivers/Kconfig
index bfc918633fd9..ece958d3762e 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -112,6 +112,8 @@ source "drivers/auxdisplay/Kconfig"
source "drivers/uio/Kconfig"
+source "drivers/vfio/Kconfig"
+
source "drivers/vlynq/Kconfig"
source "drivers/virtio/Kconfig"
@@ -148,4 +150,6 @@ source "drivers/iio/Kconfig"
source "drivers/vme/Kconfig"
+source "drivers/pwm/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 2ba29ffef2cb..5b421840c48d 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -8,6 +8,7 @@
# GPIO must come after pinctrl as gpios may need to mux pins etc
obj-y += pinctrl/
obj-y += gpio/
+obj-y += pwm/
obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_PARISC) += parisc/
obj-$(CONFIG_RAPIDIO) += rapidio/
@@ -59,6 +60,7 @@ obj-$(CONFIG_ATM) += atm/
obj-$(CONFIG_FUSION) += message/
obj-y += firewire/
obj-$(CONFIG_UIO) += uio/
+obj-$(CONFIG_VFIO) += vfio/
obj-y += cdrom/
obj-y += auxdisplay/
obj-$(CONFIG_PCCARD) += pcmcia/
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index ff9f6bd48301..d5fdd36190cc 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -69,7 +69,9 @@ static const struct acpi_device_id ac_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, ac_device_ids);
+#ifdef CONFIG_PM_SLEEP
static int acpi_ac_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
static struct acpi_driver acpi_ac_driver = {
@@ -294,7 +296,9 @@ static int acpi_ac_add(struct acpi_device *device)
ac->charger.properties = ac_props;
ac->charger.num_properties = ARRAY_SIZE(ac_props);
ac->charger.get_property = get_ac_property;
- power_supply_register(&ac->device->dev, &ac->charger);
+ result = power_supply_register(&ac->device->dev, &ac->charger);
+ if (result)
+ goto end;
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
@@ -311,6 +315,7 @@ static int acpi_ac_add(struct acpi_device *device)
return result;
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_ac_resume(struct device *dev)
{
struct acpi_ac *ac;
@@ -330,6 +335,7 @@ static int acpi_ac_resume(struct device *dev)
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
return 0;
}
+#endif
static int acpi_ac_remove(struct acpi_device *device, int type)
{
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index d98571385656..24c807f96636 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -341,7 +341,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_memory_device *mem_device;
struct acpi_device *device;
-
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
@@ -354,15 +354,20 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
"\nReceived DEVICE CHECK notification for device\n"));
if (acpi_memory_get_device(handle, &mem_device)) {
printk(KERN_ERR PREFIX "Cannot find driver data\n");
- return;
+ break;
}
- if (!acpi_memory_check_device(mem_device)) {
- if (acpi_memory_enable_device(mem_device))
- printk(KERN_ERR PREFIX
- "Cannot enable memory device\n");
+ if (acpi_memory_check_device(mem_device))
+ break;
+
+ if (acpi_memory_enable_device(mem_device)) {
+ printk(KERN_ERR PREFIX "Cannot enable memory device\n");
+ break;
}
+
+ ost_code = ACPI_OST_SC_SUCCESS;
break;
+
case ACPI_NOTIFY_EJECT_REQUEST:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"\nReceived EJECT REQUEST notification for device\n"));
@@ -383,19 +388,35 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
* TBD: Can also be disabled by Callback registration
* with generic sysfs driver
*/
- if (acpi_memory_disable_device(mem_device))
- printk(KERN_ERR PREFIX
- "Disable memory device\n");
+ if (acpi_memory_disable_device(mem_device)) {
+ printk(KERN_ERR PREFIX "Disable memory device\n");
+ /*
+ * If _EJ0 was called but failed, _OST is not
+ * necessary.
+ */
+ if (mem_device->state == MEMORY_INVALID_STATE)
+ return;
+
+ break;
+ }
+
/*
* TBD: Invoke acpi_bus_remove to cleanup data structures
*/
- break;
+
+ /* _EJ0 succeeded; _OST is not necessary */
+ return;
+
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
- break;
+
+ /* non-hotplug event; possibly handled by other handler */
+ return;
}
+ /* Inform firmware that the hotplug operation has completed */
+ (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
return;
}
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 1502c50273b5..af4aad6ee2eb 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -145,7 +145,7 @@ static void exit_round_robin(unsigned int tsk_index)
}
static unsigned int idle_pct = 5; /* percentage */
-static unsigned int round_robin_time = 10; /* second */
+static unsigned int round_robin_time = 1; /* second */
static int power_saving_thread(void *data)
{
struct sched_param param = {.sched_priority = 1};
@@ -235,7 +235,7 @@ static int create_power_saving_task(void)
ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
(void *)(unsigned long)ps_tsk_num,
- "power_saving/%d", ps_tsk_num);
+ "acpi_pad/%d", ps_tsk_num);
rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
if (!rc)
ps_tsk_num++;
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 793b8cc8e256..0a1b3435f920 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -134,12 +134,14 @@ acpi-y += \
tbinstal.o \
tbutils.o \
tbxface.o \
+ tbxfload.o \
tbxfroot.o
acpi-y += \
utaddress.o \
utalloc.o \
utcopy.o \
+ utexcep.o \
utdebug.o \
utdecode.o \
utdelete.o \
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index d700f63e4701..c0a43b38c6a3 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -237,7 +237,7 @@ u32 acpi_ev_install_sci_handler(void);
acpi_status acpi_ev_remove_sci_handler(void);
-u32 acpi_ev_initialize_sCI(u32 program_sCI);
+u32 acpi_ev_initialize_SCI(u32 program_SCI);
ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
#endif /* __ACEVENTS_H__ */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 4f7d3f57d05c..ce79100fb5eb 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -278,8 +278,7 @@ ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
/* Global handlers */
-ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_device_notify;
-ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify;
+ACPI_EXTERN struct acpi_global_notify_handler acpi_gbl_global_notify[2];
ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler;
@@ -327,14 +326,6 @@ extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
#endif
-/* Exception codes */
-
-extern char const *acpi_gbl_exception_names_env[];
-extern char const *acpi_gbl_exception_names_pgm[];
-extern char const *acpi_gbl_exception_names_tbl[];
-extern char const *acpi_gbl_exception_names_aml[];
-extern char const *acpi_gbl_exception_names_ctrl[];
-
/*****************************************************************************
*
* Namespace globals
@@ -463,4 +454,12 @@ ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
#endif /* ACPI_DEBUGGER */
+/*****************************************************************************
+ *
+ * Info/help support
+ *
+ ****************************************************************************/
+
+extern const struct ah_predefined_name asl_predefined_info[];
+
#endif /* __ACGLOBAL_H__ */
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 5ccb99ae3a6f..5de4ec72766d 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -83,22 +83,22 @@ acpi_status acpi_hw_clear_acpi_status(void);
/*
* hwsleep - sleep/wake support (Legacy sleep registers)
*/
-acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state);
-acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state);
-acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_legacy_wake(u8 sleep_state);
/*
* hwesleep - sleep/wake support (Extended FADT-V5 sleep registers)
*/
void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument);
-acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_extended_sleep(u8 sleep_state);
-acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state);
-acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags);
+acpi_status acpi_hw_extended_wake(u8 sleep_state);
/*
* hwvalid - Port I/O with validation
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index e3922ca20e7f..cc80fe10e8ea 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -299,7 +299,7 @@ acpi_status(*ACPI_INTERNAL_METHOD) (struct acpi_walk_state * walk_state);
* Information structure for ACPI predefined names.
* Each entry in the table contains the following items:
*
- * Name - The ACPI reserved name
+ * name - The ACPI reserved name
* param_count - Number of arguments to the method
* expected_return_btypes - Allowed type(s) for the return value
*/
@@ -404,6 +404,13 @@ struct acpi_gpe_handler_info {
u8 originally_enabled; /* True if GPE was originally enabled */
};
+/* Notify info for implicit notify, multiple device objects */
+
+struct acpi_gpe_notify_info {
+ struct acpi_namespace_node *device_node; /* Device to be notified */
+ struct acpi_gpe_notify_info *next;
+};
+
struct acpi_gpe_notify_object {
struct acpi_namespace_node *node;
struct acpi_gpe_notify_object *next;
@@ -412,7 +419,7 @@ struct acpi_gpe_notify_object {
union acpi_gpe_dispatch_info {
struct acpi_namespace_node *method_node; /* Method node for this GPE level */
struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
- struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */
+ struct acpi_gpe_notify_info *notify_list; /* List of _PRW devices for implicit notifies */
};
/*
@@ -420,7 +427,7 @@ union acpi_gpe_dispatch_info {
* NOTE: Important to keep this struct as small as possible.
*/
struct acpi_gpe_event_info {
- union acpi_gpe_dispatch_info dispatch; /* Either Method or Handler */
+ union acpi_gpe_dispatch_info dispatch; /* Either Method, Handler, or notify_list */
struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
u8 flags; /* Misc info about this GPE */
u8 gpe_number; /* This GPE */
@@ -600,13 +607,22 @@ acpi_status(*acpi_parse_downwards) (struct acpi_walk_state * walk_state,
typedef acpi_status(*acpi_parse_upwards) (struct acpi_walk_state * walk_state);
+/* Global handlers for AML Notifies */
+
+struct acpi_global_notify_handler {
+ acpi_notify_handler handler;
+ void *context;
+};
+
/*
* Notify info - used to pass info to the deferred notify
* handler/dispatcher.
*/
struct acpi_notify_info {
- ACPI_STATE_COMMON struct acpi_namespace_node *node;
- union acpi_operand_object *handler_obj;
+ ACPI_STATE_COMMON u8 handler_list_id;
+ struct acpi_namespace_node *node;
+ union acpi_operand_object *handler_list_head;
+ struct acpi_global_notify_handler *global;
};
/* Generic state is union of structs above */
@@ -718,7 +734,7 @@ struct acpi_parse_obj_named {
u32 name; /* 4-byte name or zero if no name */
};
-/* This version is used by the i_aSL compiler only */
+/* This version is used by the iASL compiler only */
#define ACPI_MAX_PARSEOP_NAME 20
@@ -787,6 +803,7 @@ struct acpi_parse_state {
#define ACPI_PARSEOP_IGNORE 0x01
#define ACPI_PARSEOP_PARAMLIST 0x02
#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
+#define ACPI_PARSEOP_PREDEF_CHECKED 0x08
#define ACPI_PARSEOP_SPECIAL 0x10
/*****************************************************************************
@@ -1075,4 +1092,18 @@ struct acpi_debug_mem_block {
#define ACPI_MEM_LIST_MAX 1
#define ACPI_NUM_MEM_LISTS 2
+/*****************************************************************************
+ *
+ * Info/help support
+ *
+ ****************************************************************************/
+
+struct ah_predefined_name {
+ char *name;
+ char *description;
+#ifndef ACPI_ASL_COMPILER
+ char *action;
+#endif
+};
+
#endif /* __ACLOCAL_H__ */
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index f119f473f71a..832b6198652e 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -62,7 +62,7 @@
* printf() format helpers
*/
-/* Split 64-bit integer into two 32-bit values. Use with %8.8_x%8.8_x */
+/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
@@ -283,8 +283,8 @@
#define ACPI_INSERT_BITS(target, mask, source) target = ((target & (~(mask))) | (source & mask))
/*
- * A struct acpi_namespace_node can appear in some contexts
- * where a pointer to a union acpi_operand_object can also
+ * An object of type struct acpi_namespace_node can appear in some contexts
+ * where a pointer to an object of type union acpi_operand_object can also
* appear. This macro is used to distinguish them.
*
* The "Descriptor" field is the first field in both structures.
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index c065078ca83b..364a1303fb8f 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -113,8 +113,8 @@ struct acpi_object_integer {
};
/*
- * Note: The String and Buffer object must be identical through the Pointer
- * and length elements. There is code that depends on this.
+ * Note: The String and Buffer object must be identical through the
+ * pointer and length elements. There is code that depends on this.
*
* Fields common to both Strings and Buffers
*/
@@ -206,8 +206,7 @@ struct acpi_object_method {
* Common fields for objects that support ASL notifications
*/
#define ACPI_COMMON_NOTIFY_INFO \
- union acpi_operand_object *system_notify; /* Handler for system notifies */\
- union acpi_operand_object *device_notify; /* Handler for driver notifies */\
+ union acpi_operand_object *notify_list[2]; /* Handlers for system/device notifies */\
union acpi_operand_object *handler; /* Handler for Address space */
struct acpi_object_notify_common { /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
@@ -296,10 +295,10 @@ struct acpi_object_buffer_field {
struct acpi_object_notify_handler {
ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *node; /* Parent device */
- u32 handler_type;
- acpi_notify_handler handler;
+ u32 handler_type; /* Type: Device/System/Both */
+ acpi_notify_handler handler; /* Handler address */
void *context;
- struct acpi_object_notify_handler *next;
+ union acpi_operand_object *next[2]; /* Device and System handler lists */
};
struct acpi_object_addr_handler {
@@ -382,7 +381,7 @@ struct acpi_object_cache_list {
/******************************************************************************
*
- * union acpi_operand_object Descriptor - a giant union of all of the above
+ * union acpi_operand_object descriptor - a giant union of all of the above
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index bbb34c9be4e8..3080c017f5ba 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -140,7 +140,7 @@ enum acpi_return_package_types {
*
* The main entries in the table each contain the following items:
*
- * Name - The ACPI reserved name
+ * name - The ACPI reserved name
* param_count - Number of arguments to the method
* expected_btypes - Allowed type(s) for the return value.
* 0 means that no return value is expected.
@@ -511,14 +511,14 @@ static const union acpi_predefined_info predefined_names[] =
{{"_TMP", 0, ACPI_RTYPE_INTEGER}},
{{"_TPC", 0, ACPI_RTYPE_INTEGER}},
{{"_TPT", 1, 0}},
- {{"_TRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2_ref/6_int */
+ {{"_TRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2 Ref/6 Int */
{{{ACPI_PTYPE2, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER}, 6, 0}},
- {{"_TSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5_int with count */
+ {{"_TSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5 Int with count */
{{{ACPI_PTYPE2_COUNT,ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
{{"_TSP", 0, ACPI_RTYPE_INTEGER}},
- {{"_TSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5_int */
+ {{"_TSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5 Int */
{{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
{{"_TST", 0, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 0404df605bc1..f196e2c9a71f 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -68,7 +68,7 @@
#define ACPI_WALK_METHOD 0x01
#define ACPI_WALK_METHOD_RESTART 0x02
-/* Flags for i_aSL compiler only */
+/* Flags for iASL compiler only */
#define ACPI_WALK_CONST_REQUIRED 0x10
#define ACPI_WALK_CONST_OPTIONAL 0x20
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 925ccf22101b..5035327ebccc 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -460,6 +460,8 @@ acpi_ut_short_divide(u64 in_dividend,
/*
* utmisc
*/
+void ut_convert_backslashes(char *pathname);
+
const char *acpi_ut_validate_exception(acpi_status status);
u8 acpi_ut_is_pci_root_bridge(char *id);
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 905280fec0fa..c26f8ff6c3b9 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -182,7 +182,7 @@
/*
* Combination opcodes (actually two one-byte opcodes)
- * Used by the disassembler and i_aSL compiler
+ * Used by the disassembler and iASL compiler
*/
#define AML_LGREATEREQUAL_OP (u16) 0x9295
#define AML_LLESSEQUAL_OP (u16) 0x9294
@@ -280,7 +280,7 @@
/* Multiple/complex types */
-#define ARGI_DATAOBJECT 0x12 /* Buffer, String, package or reference to a Node - Used only by size_of operator */
+#define ARGI_DATAOBJECT 0x12 /* Buffer, String, package or reference to a node - Used only by size_of operator */
#define ARGI_COMPLEXOBJ 0x13 /* Buffer, String, or package (Used by INDEX op only) */
#define ARGI_REF_OR_STRING 0x14 /* Reference or String (Used by DEREFOF op only) */
#define ARGI_REGION_OR_BUFFER 0x15 /* Used by LOAD op only */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 7b2128f274e7..af4947956ec2 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -98,7 +98,7 @@
#define ACPI_RESTAG_TRANSLATION "_TRA"
#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */
-#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */
+#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8And16(1), 16(2) */
#define ACPI_RESTAG_VENDORDATA "_VEN"
/* Default sizes for "small" resource descriptors */
@@ -235,7 +235,7 @@ AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_ADDRESS_COMMON};
struct aml_resource_extended_address64 {
AML_RESOURCE_LARGE_HEADER_COMMON
- AML_RESOURCE_ADDRESS_COMMON u8 revision_iD;
+ AML_RESOURCE_ADDRESS_COMMON u8 revision_ID;
u8 reserved;
u64 granularity;
u64 minimum;
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 80eb1900297f..c8b5e2565b98 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -62,7 +62,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
*
* FUNCTION: acpi_ds_execute_arguments
*
- * PARAMETERS: Node - Object NS node
+ * PARAMETERS: node - Object NS node
* scope_node - Parent NS node
* aml_length - Length of executable AML
* aml_start - Pointer to the AML
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index effe4ca1133f..465f02134b89 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -56,7 +56,7 @@ ACPI_MODULE_NAME("dscontrol")
* FUNCTION: acpi_ds_exec_begin_control_op
*
* PARAMETERS: walk_list - The list that owns the walk stack
- * Op - The control Op
+ * op - The control Op
*
* RETURN: Status
*
@@ -153,7 +153,7 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
* FUNCTION: acpi_ds_exec_end_control_op
*
* PARAMETERS: walk_list - The list that owns the walk stack
- * Op - The control Op
+ * op - The control Op
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index cd243cf2cab2..3da6fd8530c5 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -53,16 +53,84 @@
ACPI_MODULE_NAME("dsfield")
/* Local prototypes */
+#ifdef ACPI_ASL_COMPILER
+#include "acdisasm.h"
+static acpi_status
+acpi_ds_create_external_region(acpi_status lookup_status,
+ union acpi_parse_object *op,
+ char *path,
+ struct acpi_walk_state *walk_state,
+ struct acpi_namespace_node **node);
+#endif
+
static acpi_status
acpi_ds_get_field_names(struct acpi_create_field_info *info,
struct acpi_walk_state *walk_state,
union acpi_parse_object *arg);
+#ifdef ACPI_ASL_COMPILER
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_create_external_region (iASL Disassembler only)
+ *
+ * PARAMETERS: lookup_status - Status from ns_lookup operation
+ * op - Op containing the Field definition and args
+ * path - Pathname of the region
+ * ` walk_state - Current method state
+ * node - Where the new region node is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Add region to the external list if NOT_FOUND. Create a new
+ * region node/object.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ds_create_external_region(acpi_status lookup_status,
+ union acpi_parse_object *op,
+ char *path,
+ struct acpi_walk_state *walk_state,
+ struct acpi_namespace_node **node)
+{
+ acpi_status status;
+ union acpi_operand_object *obj_desc;
+
+ if (lookup_status != AE_NOT_FOUND) {
+ return (lookup_status);
+ }
+
+ /*
+ * Table disassembly:
+ * operation_region not found. Generate an External for it, and
+ * insert the name into the namespace.
+ */
+ acpi_dm_add_to_external_list(op, path, ACPI_TYPE_REGION, 0);
+ status = acpi_ns_lookup(walk_state->scope_info, path, ACPI_TYPE_REGION,
+ ACPI_IMODE_LOAD_PASS1, ACPI_NS_SEARCH_PARENT,
+ walk_state, node);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Must create and install a region object for the new node */
+
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_REGION);
+ if (!obj_desc) {
+ return (AE_NO_MEMORY);
+ }
+
+ obj_desc->region.node = *node;
+ status = acpi_ns_attach_object(*node, obj_desc, ACPI_TYPE_REGION);
+ return (status);
+}
+#endif
+
/*******************************************************************************
*
* FUNCTION: acpi_ds_create_buffer_field
*
- * PARAMETERS: Op - Current parse op (create_xXField)
+ * PARAMETERS: op - Current parse op (create_XXField)
* walk_state - Current state
*
* RETURN: Status
@@ -99,7 +167,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
arg = acpi_ps_get_arg(op, 3);
} else {
- /* For all other create_xXXField operators, name is the 3rd argument */
+ /* For all other create_XXXField operators, name is the 3rd argument */
arg = acpi_ps_get_arg(op, 2);
}
@@ -203,9 +271,9 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
*
* FUNCTION: acpi_ds_get_field_names
*
- * PARAMETERS: Info - create_field info structure
+ * PARAMETERS: info - create_field info structure
* ` walk_state - Current method state
- * Arg - First parser arg for the field name list
+ * arg - First parser arg for the field name list
*
* RETURN: Status
*
@@ -234,10 +302,10 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
while (arg) {
/*
* Four types of field elements are handled:
- * 1) Name - Enters a new named field into the namespace
- * 2) Offset - specifies a bit offset
+ * 1) name - Enters a new named field into the namespace
+ * 2) offset - specifies a bit offset
* 3) access_as - changes the access mode/attributes
- * 4) Connection - Associate a resource template with the field
+ * 4) connection - Associate a resource template with the field
*/
switch (arg->common.aml_opcode) {
case AML_INT_RESERVEDFIELD_OP:
@@ -389,7 +457,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
*
* FUNCTION: acpi_ds_create_field
*
- * PARAMETERS: Op - Op containing the Field definition and args
+ * PARAMETERS: op - Op containing the Field definition and args
* region_node - Object for the containing Operation Region
* ` walk_state - Current method state
*
@@ -413,12 +481,19 @@ acpi_ds_create_field(union acpi_parse_object *op,
/* First arg is the name of the parent op_region (must already exist) */
arg = op->common.value.arg;
+
if (!region_node) {
status =
acpi_ns_lookup(walk_state->scope_info,
arg->common.value.name, ACPI_TYPE_REGION,
ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
walk_state, &region_node);
+#ifdef ACPI_ASL_COMPILER
+ status = acpi_ds_create_external_region(status, arg,
+ arg->common.value.name,
+ walk_state,
+ &region_node);
+#endif
if (ACPI_FAILURE(status)) {
ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
return_ACPI_STATUS(status);
@@ -446,7 +521,7 @@ acpi_ds_create_field(union acpi_parse_object *op,
*
* FUNCTION: acpi_ds_init_field_objects
*
- * PARAMETERS: Op - Op containing the Field definition and args
+ * PARAMETERS: op - Op containing the Field definition and args
* ` walk_state - Current method state
*
* RETURN: Status
@@ -561,7 +636,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
*
* FUNCTION: acpi_ds_create_bank_field
*
- * PARAMETERS: Op - Op containing the Field definition and args
+ * PARAMETERS: op - Op containing the Field definition and args
* region_node - Object for the containing Operation Region
* walk_state - Current method state
*
@@ -591,6 +666,12 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
arg->common.value.name, ACPI_TYPE_REGION,
ACPI_IMODE_EXECUTE, ACPI_NS_SEARCH_PARENT,
walk_state, &region_node);
+#ifdef ACPI_ASL_COMPILER
+ status = acpi_ds_create_external_region(status, arg,
+ arg->common.value.name,
+ walk_state,
+ &region_node);
+#endif
if (ACPI_FAILURE(status)) {
ACPI_ERROR_NAMESPACE(arg->common.value.name, status);
return_ACPI_STATUS(status);
@@ -645,7 +726,7 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
*
* FUNCTION: acpi_ds_create_index_field
*
- * PARAMETERS: Op - Op containing the Field definition and args
+ * PARAMETERS: op - Op containing the Field definition and args
* region_node - Object for the containing Operation Region
* ` walk_state - Current method state
*
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 9e5ac7f780a7..87eff701ecfa 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -60,8 +60,8 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
* FUNCTION: acpi_ds_init_one_object
*
* PARAMETERS: obj_handle - Node for the object
- * Level - Current nesting level
- * Context - Points to a init info struct
+ * level - Current nesting level
+ * context - Points to a init info struct
* return_value - Not used
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 00f5dab5bcc0..aa9a5d4e4052 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -61,7 +61,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
*
* FUNCTION: acpi_ds_method_error
*
- * PARAMETERS: Status - Execution status
+ * PARAMETERS: status - Execution status
* walk_state - Current state
*
* RETURN: Status
@@ -306,9 +306,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
*
* FUNCTION: acpi_ds_call_control_method
*
- * PARAMETERS: Thread - Info for this thread
+ * PARAMETERS: thread - Info for this thread
* this_walk_state - Current walk state
- * Op - Current Op to be walked
+ * op - Current Op to be walked
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index b40bd507be5d..8d55cebaa656 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -177,7 +177,7 @@ void acpi_ds_method_data_delete_all(struct acpi_walk_state *walk_state)
*
* FUNCTION: acpi_ds_method_data_init_args
*
- * PARAMETERS: *Params - Pointer to a parameter list for the method
+ * PARAMETERS: *params - Pointer to a parameter list for the method
* max_param_count - The arg count for this method
* walk_state - Current walk state object
*
@@ -232,11 +232,11 @@ acpi_ds_method_data_init_args(union acpi_operand_object **params,
*
* FUNCTION: acpi_ds_method_data_get_node
*
- * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
- * Index - Which Local or Arg whose type to get
+ * index - Which Local or Arg whose type to get
* walk_state - Current walk state object
- * Node - Where the node is returned.
+ * node - Where the node is returned.
*
* RETURN: Status and node
*
@@ -296,10 +296,10 @@ acpi_ds_method_data_get_node(u8 type,
*
* FUNCTION: acpi_ds_method_data_set_value
*
- * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
- * Index - Which Local or Arg to get
- * Object - Object to be inserted into the stack entry
+ * index - Which Local or Arg to get
+ * object - Object to be inserted into the stack entry
* walk_state - Current walk state object
*
* RETURN: Status
@@ -336,7 +336,7 @@ acpi_ds_method_data_set_value(u8 type,
* Increment ref count so object can't be deleted while installed.
* NOTE: We do not copy the object in order to preserve the call by
* reference semantics of ACPI Control Method invocation.
- * (See ACPI Specification 2.0_c)
+ * (See ACPI Specification 2.0C)
*/
acpi_ut_add_reference(object);
@@ -350,9 +350,9 @@ acpi_ds_method_data_set_value(u8 type,
*
* FUNCTION: acpi_ds_method_data_get_value
*
- * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
- * Index - Which local_var or argument to get
+ * index - Which localVar or argument to get
* walk_state - Current walk state object
* dest_desc - Where Arg or Local value is returned
*
@@ -458,9 +458,9 @@ acpi_ds_method_data_get_value(u8 type,
*
* FUNCTION: acpi_ds_method_data_delete_value
*
- * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
- * Index - Which local_var or argument to delete
+ * index - Which localVar or argument to delete
* walk_state - Current walk state object
*
* RETURN: None
@@ -515,9 +515,9 @@ acpi_ds_method_data_delete_value(u8 type,
*
* FUNCTION: acpi_ds_store_object_to_local
*
- * PARAMETERS: Type - Either ACPI_REFCLASS_LOCAL or
+ * PARAMETERS: type - Either ACPI_REFCLASS_LOCAL or
* ACPI_REFCLASS_ARG
- * Index - Which Local or Arg to set
+ * index - Which Local or Arg to set
* obj_desc - Value to be stored
* walk_state - Current walk state
*
@@ -670,8 +670,8 @@ acpi_ds_store_object_to_local(u8 type,
*
* FUNCTION: acpi_ds_method_data_get_type
*
- * PARAMETERS: Opcode - Either AML_LOCAL_OP or AML_ARG_OP
- * Index - Which Local or Arg whose type to get
+ * PARAMETERS: opcode - Either AML_LOCAL_OP or AML_ARG_OP
+ * index - Which Local or Arg whose type to get
* walk_state - Current walk state object
*
* RETURN: Data type of current value of the selected Arg or Local
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index d7045ca3e32a..68592dd34960 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -64,7 +64,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
* FUNCTION: acpi_ds_build_internal_object
*
* PARAMETERS: walk_state - Current walk state
- * Op - Parser object to be translated
+ * op - Parser object to be translated
* obj_desc_ptr - Where the ACPI internal object is returned
*
* RETURN: Status
@@ -250,7 +250,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
* FUNCTION: acpi_ds_build_internal_buffer_obj
*
* PARAMETERS: walk_state - Current walk state
- * Op - Parser object to be translated
+ * op - Parser object to be translated
* buffer_length - Length of the buffer
* obj_desc_ptr - Where the ACPI internal object is returned
*
@@ -354,7 +354,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
* FUNCTION: acpi_ds_build_internal_package_obj
*
* PARAMETERS: walk_state - Current walk state
- * Op - Parser object to be translated
+ * op - Parser object to be translated
* element_count - Number of elements in the package - this is
* the num_elements argument to Package()
* obj_desc_ptr - Where the ACPI internal object is returned
@@ -547,8 +547,8 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
* FUNCTION: acpi_ds_create_node
*
* PARAMETERS: walk_state - Current walk state
- * Node - NS Node to be initialized
- * Op - Parser object to be translated
+ * node - NS Node to be initialized
+ * op - Parser object to be translated
*
* RETURN: Status
*
@@ -611,8 +611,8 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
* FUNCTION: acpi_ds_init_object_from_op
*
* PARAMETERS: walk_state - Current walk state
- * Op - Parser op used to init the internal object
- * Opcode - AML opcode associated with the object
+ * op - Parser op used to init the internal object
+ * opcode - AML opcode associated with the object
* ret_obj_desc - Namespace object to be initialized
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index e5eff7585102..aa34d8984d34 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -286,7 +286,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
* FUNCTION: acpi_ds_eval_buffer_field_operands
*
* PARAMETERS: walk_state - Current walk
- * Op - A valid buffer_field Op object
+ * op - A valid buffer_field Op object
*
* RETURN: Status
*
@@ -370,7 +370,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
* FUNCTION: acpi_ds_eval_region_operands
*
* PARAMETERS: walk_state - Current walk
- * Op - A valid region Op object
+ * op - A valid region Op object
*
* RETURN: Status
*
@@ -397,7 +397,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
*/
node = op->common.node;
- /* next_op points to the op that holds the space_iD */
+ /* next_op points to the op that holds the space_ID */
next_op = op->common.value.arg;
@@ -461,7 +461,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
* FUNCTION: acpi_ds_eval_table_region_operands
*
* PARAMETERS: walk_state - Current walk
- * Op - A valid region Op object
+ * op - A valid region Op object
*
* RETURN: Status
*
@@ -560,7 +560,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
* FUNCTION: acpi_ds_eval_data_object_operands
*
* PARAMETERS: walk_state - Current walk
- * Op - A valid data_object Op object
+ * op - A valid data_object Op object
* obj_desc - data_object
*
* RETURN: Status
@@ -662,7 +662,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
* FUNCTION: acpi_ds_eval_bank_field_operands
*
* PARAMETERS: walk_state - Current walk
- * Op - A valid bank_field Op object
+ * op - A valid bank_field Op object
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 1abcda31037f..73a5447475f5 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -157,7 +157,7 @@ acpi_ds_do_implicit_return(union acpi_operand_object *return_desc,
*
* FUNCTION: acpi_ds_is_result_used
*
- * PARAMETERS: Op - Current Op
+ * PARAMETERS: op - Current Op
* walk_state - Current State
*
* RETURN: TRUE if result is used, FALSE otherwise
@@ -323,7 +323,7 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
*
* FUNCTION: acpi_ds_delete_result_if_not_used
*
- * PARAMETERS: Op - Current parse Op
+ * PARAMETERS: op - Current parse Op
* result_obj - Result of the operation
* walk_state - Current state
*
@@ -445,7 +445,7 @@ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state)
* FUNCTION: acpi_ds_create_operand
*
* PARAMETERS: walk_state - Current walk state
- * Arg - Parse object for the argument
+ * arg - Parse object for the argument
* arg_index - Which argument (zero based)
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 9e9490a9cbf0..f6c4295470ae 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -85,8 +85,8 @@ void acpi_ds_scope_stack_clear(struct acpi_walk_state *walk_state)
*
* FUNCTION: acpi_ds_scope_stack_push
*
- * PARAMETERS: Node - Name to be made current
- * Type - Type of frame being pushed
+ * PARAMETERS: node - Name to be made current
+ * type - Type of frame being pushed
* walk_state - Current state
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index c9c2ac13e7cc..d0e6555061e4 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -58,7 +58,7 @@ static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws);
*
* FUNCTION: acpi_ds_result_pop
*
- * PARAMETERS: Object - Where to return the popped object
+ * PARAMETERS: object - Where to return the popped object
* walk_state - Current Walk state
*
* RETURN: Status
@@ -132,7 +132,7 @@ acpi_ds_result_pop(union acpi_operand_object **object,
*
* FUNCTION: acpi_ds_result_push
*
- * PARAMETERS: Object - Where to return the popped object
+ * PARAMETERS: object - Where to return the popped object
* walk_state - Current Walk state
*
* RETURN: Status
@@ -296,7 +296,7 @@ static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state)
*
* FUNCTION: acpi_ds_obj_stack_push
*
- * PARAMETERS: Object - Object to push
+ * PARAMETERS: object - Object to push
* walk_state - Current Walk state
*
* RETURN: Status
@@ -433,7 +433,7 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
*
* FUNCTION: acpi_ds_get_current_walk_state
*
- * PARAMETERS: Thread - Get current active state for this Thread
+ * PARAMETERS: thread - Get current active state for this Thread
*
* RETURN: Pointer to the current walk state
*
@@ -462,7 +462,7 @@ struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
* FUNCTION: acpi_ds_push_walk_state
*
* PARAMETERS: walk_state - State to push
- * Thread - Thread state object
+ * thread - Thread state object
*
* RETURN: None
*
@@ -486,7 +486,7 @@ acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
*
* FUNCTION: acpi_ds_pop_walk_state
*
- * PARAMETERS: Thread - Current thread state
+ * PARAMETERS: thread - Current thread state
*
* RETURN: A walk_state object popped from the thread's stack
*
@@ -525,9 +525,9 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
* FUNCTION: acpi_ds_create_walk_state
*
* PARAMETERS: owner_id - ID for object creation
- * Origin - Starting point for this walk
+ * origin - Starting point for this walk
* method_desc - Method object
- * Thread - Current thread state
+ * thread - Current thread state
*
* RETURN: Pointer to the new walk state.
*
@@ -578,11 +578,11 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union
* FUNCTION: acpi_ds_init_aml_walk
*
* PARAMETERS: walk_state - New state to be initialized
- * Op - Current parse op
+ * op - Current parse op
* method_node - Control method NS node, if any
* aml_start - Start of AML
* aml_length - Length of AML
- * Info - Method info block (params, etc.)
+ * info - Method info block (params, etc.)
* pass_number - 1, 2, or 3
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 07e4dc44f81c..d4acfbbe5b29 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -251,7 +251,7 @@ u32 acpi_ev_fixed_event_detect(void)
*
* FUNCTION: acpi_ev_fixed_event_dispatch
*
- * PARAMETERS: Event - Event type
+ * PARAMETERS: event - Event type
*
* RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
*
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index cfeab38795d8..af14a7137632 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -135,7 +135,7 @@ acpi_status acpi_ev_remove_global_lock_handler(void)
*
* FUNCTION: acpi_ev_global_lock_handler
*
- * PARAMETERS: Context - From thread interface, not used
+ * PARAMETERS: context - From thread interface, not used
*
* RETURN: ACPI_INTERRUPT_HANDLED
*
@@ -182,7 +182,7 @@ static u32 acpi_ev_global_lock_handler(void *context)
*
* FUNCTION: acpi_ev_acquire_global_lock
*
- * PARAMETERS: Timeout - Max time to wait for the lock, in millisec.
+ * PARAMETERS: timeout - Max time to wait for the lock, in millisec.
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 8ba0e5f17091..afbd5cb391f6 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -466,7 +466,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
acpi_status status;
struct acpi_gpe_event_info *local_gpe_event_info;
struct acpi_evaluate_info *info;
- struct acpi_gpe_notify_object *notify_object;
+ struct acpi_gpe_notify_info *notify;
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
@@ -517,17 +517,17 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
* completes. The notify handlers are NOT invoked synchronously
* from this thread -- because handlers may in turn run other
* control methods.
+ *
+ * June 2012: Expand implicit notify mechanism to support
+ * notifies on multiple device objects.
*/
- status = acpi_ev_queue_notify_request(
- local_gpe_event_info->dispatch.device.node,
- ACPI_NOTIFY_DEVICE_WAKE);
-
- notify_object = local_gpe_event_info->dispatch.device.next;
- while (ACPI_SUCCESS(status) && notify_object) {
- status = acpi_ev_queue_notify_request(
- notify_object->node,
- ACPI_NOTIFY_DEVICE_WAKE);
- notify_object = notify_object->next;
+ notify = local_gpe_event_info->dispatch.notify_list;
+ while (ACPI_SUCCESS(status) && notify) {
+ status =
+ acpi_ev_queue_notify_request(notify->device_node,
+ ACPI_NOTIFY_DEVICE_WAKE);
+
+ notify = notify->next;
}
break;
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 23a3ca86b2eb..8cf4c104c7b7 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -318,7 +318,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
* FUNCTION: acpi_ev_create_gpe_block
*
* PARAMETERS: gpe_device - Handle to the parent GPE block
- * gpe_block_address - Address and space_iD
+ * gpe_block_address - Address and space_ID
* register_count - Number of GPE register pairs in the block
* gpe_block_base_number - Starting GPE number for the block
* interrupt_number - H/W interrupt for the block
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 3c43796b8361..cb50dd91bc18 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -54,7 +54,7 @@ ACPI_MODULE_NAME("evgpeutil")
* FUNCTION: acpi_ev_walk_gpe_list
*
* PARAMETERS: gpe_walk_callback - Routine called for each GPE block
- * Context - Value passed to callback
+ * context - Value passed to callback
*
* RETURN: Status
*
@@ -347,6 +347,8 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
void *context)
{
struct acpi_gpe_event_info *gpe_event_info;
+ struct acpi_gpe_notify_info *notify;
+ struct acpi_gpe_notify_info *next;
u32 i;
u32 j;
@@ -365,10 +367,28 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_HANDLER) {
+
+ /* Delete an installed handler block */
+
ACPI_FREE(gpe_event_info->dispatch.handler);
gpe_event_info->dispatch.handler = NULL;
gpe_event_info->flags &=
~ACPI_GPE_DISPATCH_MASK;
+ } else if ((gpe_event_info->
+ flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_NOTIFY) {
+
+ /* Delete the implicit notification device list */
+
+ notify = gpe_event_info->dispatch.notify_list;
+ while (notify) {
+ next = notify->next;
+ ACPI_FREE(notify);
+ notify = next;
+ }
+ gpe_event_info->dispatch.notify_list = NULL;
+ gpe_event_info->flags &=
+ ~ACPI_GPE_DISPATCH_MASK;
}
}
}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 51ef9f5e002d..51f537937c1f 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -56,7 +56,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context);
*
* FUNCTION: acpi_ev_is_notify_object
*
- * PARAMETERS: Node - Node to check
+ * PARAMETERS: node - Node to check
*
* RETURN: TRUE if notifies allowed on this object
*
@@ -86,7 +86,7 @@ u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
*
* FUNCTION: acpi_ev_queue_notify_request
*
- * PARAMETERS: Node - NS node for the notified object
+ * PARAMETERS: node - NS node for the notified object
* notify_value - Value from the Notify() request
*
* RETURN: Status
@@ -101,102 +101,77 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
u32 notify_value)
{
union acpi_operand_object *obj_desc;
- union acpi_operand_object *handler_obj = NULL;
- union acpi_generic_state *notify_info;
+ union acpi_operand_object *handler_list_head = NULL;
+ union acpi_generic_state *info;
+ u8 handler_list_id = 0;
acpi_status status = AE_OK;
ACPI_FUNCTION_NAME(ev_queue_notify_request);
- /*
- * For value 0x03 (Ejection Request), may need to run a device method.
- * For value 0x02 (Device Wake), if _PRW exists, may need to run
- * the _PS0 method.
- * For value 0x80 (Status Change) on the power button or sleep button,
- * initiate soft-off or sleep operation.
- *
- * For all cases, simply dispatch the notify to the handler.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n",
- acpi_ut_get_node_name(node),
- acpi_ut_get_type_name(node->type), notify_value,
- acpi_ut_get_notify_name(notify_value), node));
+ /* Are Notifies allowed on this object? */
- /* Get the notify object attached to the NS Node */
-
- obj_desc = acpi_ns_get_attached_object(node);
- if (obj_desc) {
-
- /* We have the notify object, Get the correct handler */
-
- switch (node->type) {
+ if (!acpi_ev_is_notify_object(node)) {
+ return (AE_TYPE);
+ }
- /* Notify is allowed only on these types */
+ /* Get the correct notify list type (System or Device) */
- case ACPI_TYPE_DEVICE:
- case ACPI_TYPE_THERMAL:
- case ACPI_TYPE_PROCESSOR:
+ if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
+ handler_list_id = ACPI_SYSTEM_HANDLER_LIST;
+ } else {
+ handler_list_id = ACPI_DEVICE_HANDLER_LIST;
+ }
- if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
- handler_obj =
- obj_desc->common_notify.system_notify;
- } else {
- handler_obj =
- obj_desc->common_notify.device_notify;
- }
- break;
+ /* Get the notify object attached to the namespace Node */
- default:
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (obj_desc) {
- /* All other types are not supported */
+ /* We have an attached object, Get the correct handler list */
- return (AE_TYPE);
- }
+ handler_list_head =
+ obj_desc->common_notify.notify_list[handler_list_id];
}
/*
- * If there is a handler to run, schedule the dispatcher.
- * Check for:
- * 1) Global system notify handler
- * 2) Global device notify handler
- * 3) Per-device notify handler
+ * If there is no notify handler (Global or Local)
+ * for this object, just ignore the notify
*/
- if ((acpi_gbl_system_notify.handler &&
- (notify_value <= ACPI_MAX_SYS_NOTIFY)) ||
- (acpi_gbl_device_notify.handler &&
- (notify_value > ACPI_MAX_SYS_NOTIFY)) || handler_obj) {
- notify_info = acpi_ut_create_generic_state();
- if (!notify_info) {
- return (AE_NO_MEMORY);
- }
+ if (!acpi_gbl_global_notify[handler_list_id].handler
+ && !handler_list_head) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "No notify handler for Notify, ignoring (%4.4s, %X) node %p\n",
+ acpi_ut_get_node_name(node), notify_value,
+ node));
- if (!handler_obj) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Executing system notify handler for Notify (%4.4s, %X) "
- "node %p\n",
- acpi_ut_get_node_name(node),
- notify_value, node));
- }
+ return (AE_OK);
+ }
- notify_info->common.descriptor_type =
- ACPI_DESC_TYPE_STATE_NOTIFY;
- notify_info->notify.node = node;
- notify_info->notify.value = (u16) notify_value;
- notify_info->notify.handler_obj = handler_obj;
+ /* Setup notify info and schedule the notify dispatcher */
- status =
- acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
- notify_info);
- if (ACPI_FAILURE(status)) {
- acpi_ut_delete_generic_state(notify_info);
- }
- } else {
- /* There is no notify handler (per-device or system) for this device */
+ info = acpi_ut_create_generic_state();
+ if (!info) {
+ return (AE_NO_MEMORY);
+ }
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "No notify handler for Notify (%4.4s, %X) node %p\n",
- acpi_ut_get_node_name(node), notify_value,
- node));
+ info->common.descriptor_type = ACPI_DESC_TYPE_STATE_NOTIFY;
+
+ info->notify.node = node;
+ info->notify.value = (u16)notify_value;
+ info->notify.handler_list_id = handler_list_id;
+ info->notify.handler_list_head = handler_list_head;
+ info->notify.global = &acpi_gbl_global_notify[handler_list_id];
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n",
+ acpi_ut_get_node_name(node),
+ acpi_ut_get_type_name(node->type), notify_value,
+ acpi_ut_get_notify_name(notify_value), node));
+
+ status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
+ info);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_delete_generic_state(info);
}
return (status);
@@ -206,7 +181,7 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
*
* FUNCTION: acpi_ev_notify_dispatch
*
- * PARAMETERS: Context - To be passed to the notify handler
+ * PARAMETERS: context - To be passed to the notify handler
*
* RETURN: None.
*
@@ -217,60 +192,34 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context)
{
- union acpi_generic_state *notify_info =
- (union acpi_generic_state *)context;
- acpi_notify_handler global_handler = NULL;
- void *global_context = NULL;
+ union acpi_generic_state *info = (union acpi_generic_state *)context;
union acpi_operand_object *handler_obj;
ACPI_FUNCTION_ENTRY();
- /*
- * We will invoke a global notify handler if installed. This is done
- * _before_ we invoke the per-device handler attached to the device.
- */
- if (notify_info->notify.value <= ACPI_MAX_SYS_NOTIFY) {
-
- /* Global system notification handler */
-
- if (acpi_gbl_system_notify.handler) {
- global_handler = acpi_gbl_system_notify.handler;
- global_context = acpi_gbl_system_notify.context;
- }
- } else {
- /* Global driver notification handler */
-
- if (acpi_gbl_device_notify.handler) {
- global_handler = acpi_gbl_device_notify.handler;
- global_context = acpi_gbl_device_notify.context;
- }
- }
-
- /* Invoke the system handler first, if present */
+ /* Invoke a global notify handler if installed */
- if (global_handler) {
- global_handler(notify_info->notify.node,
- notify_info->notify.value, global_context);
+ if (info->notify.global->handler) {
+ info->notify.global->handler(info->notify.node,
+ info->notify.value,
+ info->notify.global->context);
}
- /* Now invoke the per-device handler, if present */
+ /* Now invoke the local notify handler(s) if any are installed */
- handler_obj = notify_info->notify.handler_obj;
- if (handler_obj) {
- struct acpi_object_notify_handler *notifier;
+ handler_obj = info->notify.handler_list_head;
+ while (handler_obj) {
+ handler_obj->notify.handler(info->notify.node,
+ info->notify.value,
+ handler_obj->notify.context);
- notifier = &handler_obj->notify;
- while (notifier) {
- notifier->handler(notify_info->notify.node,
- notify_info->notify.value,
- notifier->context);
- notifier = notifier->next;
- }
+ handler_obj =
+ handler_obj->notify.next[info->notify.handler_list_id];
}
/* All done with the info object */
- acpi_ut_delete_generic_state(notify_info);
+ acpi_ut_delete_generic_state(info);
}
#if (!ACPI_REDUCED_HARDWARE)
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 1b0180a1b798..0cc6a16fedc7 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -150,7 +150,7 @@ acpi_status acpi_ev_install_region_handlers(void)
*
* FUNCTION: acpi_ev_has_default_handler
*
- * PARAMETERS: Node - Namespace node for the device
+ * PARAMETERS: node - Namespace node for the device
* space_id - The address space ID
*
* RETURN: TRUE if default handler is installed, FALSE otherwise
@@ -244,7 +244,7 @@ acpi_status acpi_ev_initialize_op_regions(void)
* FUNCTION: acpi_ev_execute_reg_method
*
* PARAMETERS: region_obj - Region object
- * Function - Passed to _REG: On (1) or Off (0)
+ * function - Passed to _REG: On (1) or Off (0)
*
* RETURN: Status
*
@@ -286,10 +286,10 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
/*
* The _REG method has two arguments:
*
- * Arg0 - Integer:
+ * arg0 - Integer:
* Operation region space ID Same value as region_obj->Region.space_id
*
- * Arg1 - Integer:
+ * arg1 - Integer:
* connection status 1 for connecting the handler, 0 for disconnecting
* the handler (Passed as a parameter)
*/
@@ -330,10 +330,10 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
*
* PARAMETERS: region_obj - Internal region object
* field_obj - Corresponding field. Can be NULL.
- * Function - Read or Write operation
+ * function - Read or Write operation
* region_offset - Where in the region to read or write
* bit_width - Field width in bits (8, 16, 32, or 64)
- * Value - Pointer to in or out value, must be
+ * value - Pointer to in or out value, must be
* a full 64-bit integer
*
* RETURN: Status
@@ -840,11 +840,11 @@ acpi_ev_install_handler(acpi_handle obj_handle,
*
* FUNCTION: acpi_ev_install_space_handler
*
- * PARAMETERS: Node - Namespace node for the device
+ * PARAMETERS: node - Namespace node for the device
* space_id - The address space ID
- * Handler - Address of the handler
- * Setup - Address of the setup function
- * Context - Value passed to the handler on each access
+ * handler - Address of the handler
+ * setup - Address of the setup function
+ * context - Value passed to the handler on each access
*
* RETURN: Status
*
@@ -1061,7 +1061,7 @@ acpi_ev_install_space_handler(struct acpi_namespace_node * node,
*
* FUNCTION: acpi_ev_execute_reg_methods
*
- * PARAMETERS: Node - Namespace node for the device
+ * PARAMETERS: node - Namespace node for the device
* space_id - The address space ID
*
* RETURN: Status
@@ -1104,7 +1104,7 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
*
* PARAMETERS: walk_namespace callback
*
- * DESCRIPTION: Run _REG method for region objects of the requested space_iD
+ * DESCRIPTION: Run _REG method for region objects of the requested spaceID
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 819c17f5897a..4c1c8261166f 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -56,8 +56,8 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
*
* FUNCTION: acpi_ev_system_memory_region_setup
*
- * PARAMETERS: Handle - Region we are interested in
- * Function - Start or stop
+ * PARAMETERS: handle - Region we are interested in
+ * function - Start or stop
* handler_context - Address space handler context
* region_context - Region specific context
*
@@ -118,8 +118,8 @@ acpi_ev_system_memory_region_setup(acpi_handle handle,
*
* FUNCTION: acpi_ev_io_space_region_setup
*
- * PARAMETERS: Handle - Region we are interested in
- * Function - Start or stop
+ * PARAMETERS: handle - Region we are interested in
+ * function - Start or stop
* handler_context - Address space handler context
* region_context - Region specific context
*
@@ -149,8 +149,8 @@ acpi_ev_io_space_region_setup(acpi_handle handle,
*
* FUNCTION: acpi_ev_pci_config_region_setup
*
- * PARAMETERS: Handle - Region we are interested in
- * Function - Start or stop
+ * PARAMETERS: handle - Region we are interested in
+ * function - Start or stop
* handler_context - Address space handler context
* region_context - Region specific context
*
@@ -338,7 +338,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*
* FUNCTION: acpi_ev_is_pci_root_bridge
*
- * PARAMETERS: Node - Device node being examined
+ * PARAMETERS: node - Device node being examined
*
* RETURN: TRUE if device is a PCI/PCI-Express Root Bridge
*
@@ -393,14 +393,14 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
*
* FUNCTION: acpi_ev_pci_bar_region_setup
*
- * PARAMETERS: Handle - Region we are interested in
- * Function - Start or stop
+ * PARAMETERS: handle - Region we are interested in
+ * function - Start or stop
* handler_context - Address space handler context
* region_context - Region specific context
*
* RETURN: Status
*
- * DESCRIPTION: Setup a pci_bAR operation region
+ * DESCRIPTION: Setup a pci_BAR operation region
*
* MUTEX: Assumes namespace is not locked
*
@@ -420,8 +420,8 @@ acpi_ev_pci_bar_region_setup(acpi_handle handle,
*
* FUNCTION: acpi_ev_cmos_region_setup
*
- * PARAMETERS: Handle - Region we are interested in
- * Function - Start or stop
+ * PARAMETERS: handle - Region we are interested in
+ * function - Start or stop
* handler_context - Address space handler context
* region_context - Region specific context
*
@@ -447,8 +447,8 @@ acpi_ev_cmos_region_setup(acpi_handle handle,
*
* FUNCTION: acpi_ev_default_region_setup
*
- * PARAMETERS: Handle - Region we are interested in
- * Function - Start or stop
+ * PARAMETERS: handle - Region we are interested in
+ * function - Start or stop
* handler_context - Address space handler context
* region_context - Region specific context
*
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 6a57aa2d70d1..f9661e2b46a9 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -56,7 +56,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
*
* FUNCTION: acpi_ev_sci_xrupt_handler
*
- * PARAMETERS: Context - Calling Context
+ * PARAMETERS: context - Calling Context
*
* RETURN: Status code indicates whether interrupt was handled.
*
@@ -96,7 +96,7 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
*
* FUNCTION: acpi_ev_gpe_xrupt_handler
*
- * PARAMETERS: Context - Calling Context
+ * PARAMETERS: context - Calling Context
*
* RETURN: Status code indicates whether interrupt was handled.
*
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 44bef5744ebb..7587eb6c9584 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -54,86 +54,25 @@ ACPI_MODULE_NAME("evxface")
/*******************************************************************************
*
- * FUNCTION: acpi_populate_handler_object
- *
- * PARAMETERS: handler_obj - Handler object to populate
- * handler_type - The type of handler:
- * ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
- * ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
- * ACPI_ALL_NOTIFY: both system and device
- * handler - Address of the handler
- * context - Value passed to the handler on each GPE
- * next - Address of a handler object to link to
- *
- * RETURN: None
- *
- * DESCRIPTION: Populate a handler object.
- *
- ******************************************************************************/
-static void
-acpi_populate_handler_object(struct acpi_object_notify_handler *handler_obj,
- u32 handler_type,
- acpi_notify_handler handler, void *context,
- struct acpi_object_notify_handler *next)
-{
- handler_obj->handler_type = handler_type;
- handler_obj->handler = handler;
- handler_obj->context = context;
- handler_obj->next = next;
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_add_handler_object
- *
- * PARAMETERS: parent_obj - Parent of the new object
- * handler - Address of the handler
- * context - Value passed to the handler on each GPE
- *
- * RETURN: Status
- *
- * DESCRIPTION: Create a new handler object and populate it.
- *
- ******************************************************************************/
-static acpi_status
-acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj,
- acpi_notify_handler handler, void *context)
-{
- struct acpi_object_notify_handler *handler_obj;
-
- /* The parent must not be a defice notify handler object. */
- if (parent_obj->handler_type & ACPI_DEVICE_NOTIFY)
- return AE_BAD_PARAMETER;
-
- handler_obj = ACPI_ALLOCATE_ZEROED(sizeof(*handler_obj));
- if (!handler_obj)
- return AE_NO_MEMORY;
-
- acpi_populate_handler_object(handler_obj,
- ACPI_SYSTEM_NOTIFY,
- handler, context,
- parent_obj->next);
- parent_obj->next = handler_obj;
-
- return AE_OK;
-}
-
-
-/*******************************************************************************
- *
* FUNCTION: acpi_install_notify_handler
*
* PARAMETERS: Device - The device for which notifies will be handled
* handler_type - The type of handler:
- * ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
- * ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
- * ACPI_ALL_NOTIFY: both system and device
+ * ACPI_SYSTEM_NOTIFY: System Handler (00-7F)
+ * ACPI_DEVICE_NOTIFY: Device Handler (80-FF)
+ * ACPI_ALL_NOTIFY: Both System and Device
* Handler - Address of the handler
* Context - Value passed to the handler on each GPE
*
* RETURN: Status
*
- * DESCRIPTION: Install a handler for notifies on an ACPI device
+ * DESCRIPTION: Install a handler for notifications on an ACPI Device,
+ * thermal_zone, or Processor object.
+ *
+ * NOTES: The Root namespace object may have only one handler for each
+ * type of notify (System/Device). Device/Thermal/Processor objects
+ * may have one device notify handler, and multiple system notify
+ * handlers.
*
******************************************************************************/
acpi_status
@@ -141,17 +80,19 @@ acpi_install_notify_handler(acpi_handle device,
u32 handler_type,
acpi_notify_handler handler, void *context)
{
+ struct acpi_namespace_node *node =
+ ACPI_CAST_PTR(struct acpi_namespace_node, device);
union acpi_operand_object *obj_desc;
- union acpi_operand_object *notify_obj;
- struct acpi_namespace_node *node;
+ union acpi_operand_object *handler_obj;
acpi_status status;
+ u32 i;
ACPI_FUNCTION_TRACE(acpi_install_notify_handler);
/* Parameter validation */
- if ((!device) ||
- (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
+ if ((!device) || (!handler) || (!handler_type) ||
+ (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -160,144 +101,112 @@ acpi_install_notify_handler(acpi_handle device,
return_ACPI_STATUS(status);
}
- /* Convert and validate the device handle */
-
- node = acpi_ns_validate_handle(device);
- if (!node) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
- }
-
/*
* Root Object:
* Registering a notify handler on the root object indicates that the
* caller wishes to receive notifications for all objects. Note that
- * only one <external> global handler can be regsitered (per notify type).
+ * only one global handler can be registered per notify type.
+ * Ensure that a handler is not already installed.
*/
if (device == ACPI_ROOT_OBJECT) {
+ for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
+ if (handler_type & (i + 1)) {
+ if (acpi_gbl_global_notify[i].handler) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
- /* Make sure the handler is not already installed */
-
- if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
- acpi_gbl_system_notify.handler) ||
- ((handler_type & ACPI_DEVICE_NOTIFY) &&
- acpi_gbl_device_notify.handler)) {
- status = AE_ALREADY_EXISTS;
- goto unlock_and_exit;
- }
-
- if (handler_type & ACPI_SYSTEM_NOTIFY) {
- acpi_gbl_system_notify.node = node;
- acpi_gbl_system_notify.handler = handler;
- acpi_gbl_system_notify.context = context;
- }
-
- if (handler_type & ACPI_DEVICE_NOTIFY) {
- acpi_gbl_device_notify.node = node;
- acpi_gbl_device_notify.handler = handler;
- acpi_gbl_device_notify.context = context;
+ acpi_gbl_global_notify[i].handler = handler;
+ acpi_gbl_global_notify[i].context = context;
+ }
}
- /* Global notify handler installed */
+ goto unlock_and_exit; /* Global notify handler installed, all done */
}
/*
* All Other Objects:
- * Caller will only receive notifications specific to the target object.
- * Note that only certain object types can receive notifications.
+ * Caller will only receive notifications specific to the target
+ * object. Note that only certain object types are allowed to
+ * receive notifications.
*/
- else {
- /* Notifies allowed on this object? */
- if (!acpi_ev_is_notify_object(node)) {
- status = AE_TYPE;
- goto unlock_and_exit;
- }
+ /* Are Notifies allowed on this object? */
- /* Check for an existing internal object */
+ if (!acpi_ev_is_notify_object(node)) {
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
- obj_desc = acpi_ns_get_attached_object(node);
- if (obj_desc) {
+ /* Check for an existing internal object, might not exist */
- /* Object exists. */
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
- /* For a device notify, make sure there's no handler. */
- if ((handler_type & ACPI_DEVICE_NOTIFY) &&
- obj_desc->common_notify.device_notify) {
- status = AE_ALREADY_EXISTS;
- goto unlock_and_exit;
- }
+ /* Create a new object */
- /* System notifies may have more handlers installed. */
- notify_obj = obj_desc->common_notify.system_notify;
+ obj_desc = acpi_ut_create_internal_object(node->type);
+ if (!obj_desc) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
- if ((handler_type & ACPI_SYSTEM_NOTIFY) && notify_obj) {
- struct acpi_object_notify_handler *parent_obj;
+ /* Attach new object to the Node, remove local reference */
+
+ status = acpi_ns_attach_object(device, obj_desc, node->type);
+ acpi_ut_remove_reference(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+ }
- if (handler_type & ACPI_DEVICE_NOTIFY) {
+ /* Ensure that the handler is not already installed in the lists */
+
+ for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
+ if (handler_type & (i + 1)) {
+ handler_obj = obj_desc->common_notify.notify_list[i];
+ while (handler_obj) {
+ if (handler_obj->notify.handler == handler) {
status = AE_ALREADY_EXISTS;
goto unlock_and_exit;
}
- parent_obj = &notify_obj->notify;
- status = acpi_add_handler_object(parent_obj,
- handler,
- context);
- goto unlock_and_exit;
- }
- } else {
- /* Create a new object */
-
- obj_desc = acpi_ut_create_internal_object(node->type);
- if (!obj_desc) {
- status = AE_NO_MEMORY;
- goto unlock_and_exit;
- }
-
- /* Attach new object to the Node */
-
- status =
- acpi_ns_attach_object(device, obj_desc, node->type);
-
- /* Remove local reference to the object */
-
- acpi_ut_remove_reference(obj_desc);
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
+ handler_obj = handler_obj->notify.next[i];
}
}
+ }
- /* Install the handler */
+ /* Create and populate a new notify handler object */
- notify_obj =
- acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY);
- if (!notify_obj) {
- status = AE_NO_MEMORY;
- goto unlock_and_exit;
- }
+ handler_obj = acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY);
+ if (!handler_obj) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
- acpi_populate_handler_object(&notify_obj->notify,
- handler_type,
- handler, context,
- NULL);
+ handler_obj->notify.node = node;
+ handler_obj->notify.handler_type = handler_type;
+ handler_obj->notify.handler = handler;
+ handler_obj->notify.context = context;
- if (handler_type & ACPI_SYSTEM_NOTIFY) {
- obj_desc->common_notify.system_notify = notify_obj;
- }
+ /* Install the handler at the list head(s) */
- if (handler_type & ACPI_DEVICE_NOTIFY) {
- obj_desc->common_notify.device_notify = notify_obj;
- }
+ for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
+ if (handler_type & (i + 1)) {
+ handler_obj->notify.next[i] =
+ obj_desc->common_notify.notify_list[i];
- if (handler_type == ACPI_ALL_NOTIFY) {
+ obj_desc->common_notify.notify_list[i] = handler_obj;
+ }
+ }
- /* Extra ref if installed in both */
+ /* Add an extra reference if handler was installed in both lists */
- acpi_ut_add_reference(notify_obj);
- }
+ if (handler_type == ACPI_ALL_NOTIFY) {
+ acpi_ut_add_reference(handler_obj);
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
@@ -308,11 +217,11 @@ ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
*
* FUNCTION: acpi_remove_notify_handler
*
- * PARAMETERS: Device - The device for which notifies will be handled
+ * PARAMETERS: Device - The device for which the handler is installed
* handler_type - The type of handler:
- * ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
- * ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
- * ACPI_ALL_NOTIFY: both system and device
+ * ACPI_SYSTEM_NOTIFY: System Handler (00-7F)
+ * ACPI_DEVICE_NOTIFY: Device Handler (80-FF)
+ * ACPI_ALL_NOTIFY: Both System and Device
* Handler - Address of the handler
*
* RETURN: Status
@@ -324,165 +233,106 @@ acpi_status
acpi_remove_notify_handler(acpi_handle device,
u32 handler_type, acpi_notify_handler handler)
{
- union acpi_operand_object *notify_obj;
+ struct acpi_namespace_node *node =
+ ACPI_CAST_PTR(struct acpi_namespace_node, device);
union acpi_operand_object *obj_desc;
- struct acpi_namespace_node *node;
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *previous_handler_obj;
acpi_status status;
+ u32 i;
ACPI_FUNCTION_TRACE(acpi_remove_notify_handler);
/* Parameter validation */
- if ((!device) ||
- (!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
- status = AE_BAD_PARAMETER;
- goto exit;
+ if ((!device) || (!handler) || (!handler_type) ||
+ (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
}
-
-
/* Make sure all deferred tasks are completed */
- acpi_os_wait_events_complete(NULL);
+
+ acpi_os_wait_events_complete();
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
- goto exit;
- }
-
- /* Convert and validate the device handle */
-
- node = acpi_ns_validate_handle(device);
- if (!node) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
+ return_ACPI_STATUS(status);
}
- /* Root Object */
+ /* Root Object. Global handlers are removed here */
if (device == ACPI_ROOT_OBJECT) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Removing notify handler for namespace root object\n"));
+ for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
+ if (handler_type & (i + 1)) {
+ if (!acpi_gbl_global_notify[i].handler ||
+ (acpi_gbl_global_notify[i].handler !=
+ handler)) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
- if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
- !acpi_gbl_system_notify.handler) ||
- ((handler_type & ACPI_DEVICE_NOTIFY) &&
- !acpi_gbl_device_notify.handler)) {
- status = AE_NOT_EXIST;
- goto unlock_and_exit;
- }
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Removing global notify handler\n"));
- if (handler_type & ACPI_SYSTEM_NOTIFY) {
- acpi_gbl_system_notify.node = NULL;
- acpi_gbl_system_notify.handler = NULL;
- acpi_gbl_system_notify.context = NULL;
+ acpi_gbl_global_notify[i].handler = NULL;
+ acpi_gbl_global_notify[i].context = NULL;
+ }
}
- if (handler_type & ACPI_DEVICE_NOTIFY) {
- acpi_gbl_device_notify.node = NULL;
- acpi_gbl_device_notify.handler = NULL;
- acpi_gbl_device_notify.context = NULL;
- }
+ goto unlock_and_exit;
}
- /* All Other Objects */
+ /* All other objects: Are Notifies allowed on this object? */
- else {
- /* Notifies allowed on this object? */
+ if (!acpi_ev_is_notify_object(node)) {
+ status = AE_TYPE;
+ goto unlock_and_exit;
+ }
- if (!acpi_ev_is_notify_object(node)) {
- status = AE_TYPE;
- goto unlock_and_exit;
- }
+ /* Must have an existing internal object */
- /* Check for an existing internal object */
+ obj_desc = acpi_ns_get_attached_object(node);
+ if (!obj_desc) {
+ status = AE_NOT_EXIST;
+ goto unlock_and_exit;
+ }
- obj_desc = acpi_ns_get_attached_object(node);
- if (!obj_desc) {
- status = AE_NOT_EXIST;
- goto unlock_and_exit;
- }
+ /* Internal object exists. Find the handler and remove it */
- /* Object exists - make sure there's an existing handler */
+ for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
+ if (handler_type & (i + 1)) {
+ handler_obj = obj_desc->common_notify.notify_list[i];
+ previous_handler_obj = NULL;
- if (handler_type & ACPI_SYSTEM_NOTIFY) {
- struct acpi_object_notify_handler *handler_obj;
- struct acpi_object_notify_handler *parent_obj;
+ /* Attempt to find the handler in the handler list */
- notify_obj = obj_desc->common_notify.system_notify;
- if (!notify_obj) {
- status = AE_NOT_EXIST;
- goto unlock_and_exit;
- }
-
- handler_obj = &notify_obj->notify;
- parent_obj = NULL;
- while (handler_obj->handler != handler) {
- if (handler_obj->next) {
- parent_obj = handler_obj;
- handler_obj = handler_obj->next;
- } else {
- break;
- }
+ while (handler_obj &&
+ (handler_obj->notify.handler != handler)) {
+ previous_handler_obj = handler_obj;
+ handler_obj = handler_obj->notify.next[i];
}
- if (handler_obj->handler != handler) {
- status = AE_BAD_PARAMETER;
+ if (!handler_obj) {
+ status = AE_NOT_EXIST;
goto unlock_and_exit;
}
- /*
- * Remove the handler. There are three possible cases.
- * First, we may need to remove a non-embedded object.
- * Second, we may need to remove the embedded object's
- * handler data, while non-embedded objects exist.
- * Finally, we may need to remove the embedded object
- * entirely along with its container.
- */
- if (parent_obj) {
- /* Non-embedded object is being removed. */
- parent_obj->next = handler_obj->next;
- ACPI_FREE(handler_obj);
- } else if (notify_obj->notify.next) {
- /*
- * The handler matches the embedded object, but
- * there are more handler objects in the list.
- * Replace the embedded object's data with the
- * first next object's data and remove that
- * object.
- */
- parent_obj = &notify_obj->notify;
- handler_obj = notify_obj->notify.next;
- *parent_obj = *handler_obj;
- ACPI_FREE(handler_obj);
- } else {
- /* No more handler objects in the list. */
- obj_desc->common_notify.system_notify = NULL;
- acpi_ut_remove_reference(notify_obj);
- }
- }
+ /* Remove the handler object from the list */
- if (handler_type & ACPI_DEVICE_NOTIFY) {
- notify_obj = obj_desc->common_notify.device_notify;
- if (!notify_obj) {
- status = AE_NOT_EXIST;
- goto unlock_and_exit;
- }
+ if (previous_handler_obj) { /* Handler is not at the list head */
+ previous_handler_obj->notify.next[i] =
+ handler_obj->notify.next[i];
+ } else { /* Handler is at the list head */
- if (notify_obj->notify.handler != handler) {
- status = AE_BAD_PARAMETER;
- goto unlock_and_exit;
+ obj_desc->common_notify.notify_list[i] =
+ handler_obj->notify.next[i];
}
- /* Remove the handler */
- obj_desc->common_notify.device_notify = NULL;
- acpi_ut_remove_reference(notify_obj);
+ acpi_ut_remove_reference(handler_obj);
}
}
- unlock_and_exit:
+unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
- exit:
- if (ACPI_FAILURE(status))
- ACPI_EXCEPTION((AE_INFO, status, "Removing notify handler"));
return_ACPI_STATUS(status);
}
@@ -492,7 +342,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
*
* FUNCTION: acpi_install_exception_handler
*
- * PARAMETERS: Handler - Pointer to the handler function for the
+ * PARAMETERS: handler - Pointer to the handler function for the
* event
*
* RETURN: Status
@@ -536,8 +386,8 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
*
* FUNCTION: acpi_install_global_event_handler
*
- * PARAMETERS: Handler - Pointer to the global event handler function
- * Context - Value passed to the handler on each event
+ * PARAMETERS: handler - Pointer to the global event handler function
+ * context - Value passed to the handler on each event
*
* RETURN: Status
*
@@ -586,10 +436,10 @@ ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
*
* FUNCTION: acpi_install_fixed_event_handler
*
- * PARAMETERS: Event - Event type to enable.
- * Handler - Pointer to the handler function for the
+ * PARAMETERS: event - Event type to enable.
+ * handler - Pointer to the handler function for the
* event
- * Context - Value passed to the handler on each GPE
+ * context - Value passed to the handler on each GPE
*
* RETURN: Status
*
@@ -656,8 +506,8 @@ ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
*
* FUNCTION: acpi_remove_fixed_event_handler
*
- * PARAMETERS: Event - Event type to disable.
- * Handler - Address of the handler
+ * PARAMETERS: event - Event type to disable.
+ * handler - Address of the handler
*
* RETURN: Status
*
@@ -713,10 +563,10 @@ ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
* PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
* defined GPEs)
* gpe_number - The GPE number within the GPE block
- * Type - Whether this GPE should be treated as an
+ * type - Whether this GPE should be treated as an
* edge- or level-triggered interrupt.
- * Address - Address of the handler
- * Context - Value passed to the handler on each GPE
+ * address - Address of the handler
+ * context - Value passed to the handler on each GPE
*
* RETURN: Status
*
@@ -823,7 +673,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
* PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
* defined GPEs)
* gpe_number - The event to remove a handler
- * Address - Address of the handler
+ * address - Address of the handler
*
* RETURN: Status
*
@@ -849,7 +699,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
/* Make sure all deferred tasks are completed */
- acpi_os_wait_events_complete(NULL);
+ acpi_os_wait_events_complete();
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
@@ -919,8 +769,8 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
*
* FUNCTION: acpi_acquire_global_lock
*
- * PARAMETERS: Timeout - How long the caller is willing to wait
- * Handle - Where the handle to the lock is returned
+ * PARAMETERS: timeout - How long the caller is willing to wait
+ * handle - Where the handle to the lock is returned
* (if acquired)
*
* RETURN: Status
@@ -967,7 +817,7 @@ ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock)
*
* FUNCTION: acpi_release_global_lock
*
- * PARAMETERS: Handle - Returned from acpi_acquire_global_lock
+ * PARAMETERS: handle - Returned from acpi_acquire_global_lock
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 77cee5a5e891..35520c6eeefb 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -153,8 +153,8 @@ ACPI_EXPORT_SYMBOL(acpi_disable)
*
* FUNCTION: acpi_enable_event
*
- * PARAMETERS: Event - The fixed eventto be enabled
- * Flags - Reserved
+ * PARAMETERS: event - The fixed eventto be enabled
+ * flags - Reserved
*
* RETURN: Status
*
@@ -265,7 +265,7 @@ ACPI_EXPORT_SYMBOL(acpi_disable_event)
*
* FUNCTION: acpi_clear_event
*
- * PARAMETERS: Event - The fixed event to be cleared
+ * PARAMETERS: event - The fixed event to be cleared
*
* RETURN: Status
*
@@ -301,7 +301,7 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
*
* FUNCTION: acpi_get_event_status
*
- * PARAMETERS: Event - The fixed event
+ * PARAMETERS: event - The fixed event
* event_status - Where the current status of the event will
* be returned
*
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 86f9b343ebd4..6affbdb4b88c 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -197,12 +197,12 @@ acpi_status
acpi_setup_gpe_for_wake(acpi_handle wake_device,
acpi_handle gpe_device, u32 gpe_number)
{
- acpi_status status = AE_BAD_PARAMETER;
+ acpi_status status;
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_namespace_node *device_node;
- struct acpi_gpe_notify_object *notify_object;
+ struct acpi_gpe_notify_info *notify;
+ struct acpi_gpe_notify_info *new_notify;
acpi_cpu_flags flags;
- u8 gpe_dispatch_mask;
ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
@@ -216,63 +216,95 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
+ /* Handle root object case */
+
+ if (wake_device == ACPI_ROOT_OBJECT) {
+ device_node = acpi_gbl_root_node;
+ } else {
+ device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+ }
+
+ /* Validate WakeDevice is of type Device */
+
+ if (device_node->type != ACPI_TYPE_DEVICE) {
+ return_ACPI_STATUS (AE_BAD_PARAMETER);
+ }
+
+ /*
+ * Allocate a new notify object up front, in case it is needed.
+ * Memory allocation while holding a spinlock is a big no-no
+ * on some hosts.
+ */
+ new_notify = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_notify_info));
+ if (!new_notify) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
if (!gpe_event_info) {
+ status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
- if (wake_device == ACPI_ROOT_OBJECT) {
- goto out;
- }
-
/*
* If there is no method or handler for this GPE, then the
- * wake_device will be notified whenever this GPE fires (aka
- * "implicit notify") Note: The GPE is assumed to be
+ * wake_device will be notified whenever this GPE fires. This is
+ * known as an "implicit notify". Note: The GPE is assumed to be
* level-triggered (for windows compatibility).
*/
- gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
- if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
- && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
- goto out;
- }
-
- /* Validate wake_device is of type Device */
-
- device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
- if (device_node->type != ACPI_TYPE_DEVICE) {
- goto unlock_and_exit;
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_NONE) {
+ /*
+ * This is the first device for implicit notify on this GPE.
+ * Just set the flags here, and enter the NOTIFY block below.
+ */
+ gpe_event_info->flags =
+ (ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED);
}
- if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
- gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
- ACPI_GPE_LEVEL_TRIGGERED);
- gpe_event_info->dispatch.device.node = device_node;
- gpe_event_info->dispatch.device.next = NULL;
- } else {
- /* There are multiple devices to notify implicitly. */
-
- notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
- if (!notify_object) {
- status = AE_NO_MEMORY;
- goto unlock_and_exit;
+ /*
+ * If we already have an implicit notify on this GPE, add
+ * this device to the notify list.
+ */
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+ ACPI_GPE_DISPATCH_NOTIFY) {
+
+ /* Ensure that the device is not already in the list */
+
+ notify = gpe_event_info->dispatch.notify_list;
+ while (notify) {
+ if (notify->device_node == device_node) {
+ status = AE_ALREADY_EXISTS;
+ goto unlock_and_exit;
+ }
+ notify = notify->next;
}
- notify_object->node = device_node;
- notify_object->next = gpe_event_info->dispatch.device.next;
- gpe_event_info->dispatch.device.next = notify_object;
+ /* Add this device to the notify list for this GPE */
+
+ new_notify->device_node = device_node;
+ new_notify->next = gpe_event_info->dispatch.notify_list;
+ gpe_event_info->dispatch.notify_list = new_notify;
+ new_notify = NULL;
}
- out:
+ /* Mark the GPE as a possible wake event */
+
gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
status = AE_OK;
- unlock_and_exit:
+unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ /* Delete the notify object if it was not used above */
+
+ if (new_notify) {
+ ACPI_FREE(new_notify);
+ }
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
@@ -283,7 +315,7 @@ ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake)
*
* PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
* gpe_number - GPE level within the GPE block
- * Action - Enable or Disable
+ * action - Enable or Disable
*
* RETURN: Status
*
@@ -508,7 +540,7 @@ ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
* FUNCTION: acpi_install_gpe_block
*
* PARAMETERS: gpe_device - Handle to the parent GPE Block Device
- * gpe_block_address - Address and space_iD
+ * gpe_block_address - Address and space_ID
* register_count - Number of GPE register pairs in the block
* interrupt_number - H/W interrupt for the block
*
@@ -653,7 +685,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
*
* FUNCTION: acpi_get_gpe_device
*
- * PARAMETERS: Index - System GPE index (0-current_gpe_count)
+ * PARAMETERS: index - System GPE index (0-current_gpe_count)
* gpe_device - Where the parent GPE Device is returned
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 6019208cd4b6..96b412d03950 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -55,11 +55,11 @@ ACPI_MODULE_NAME("evxfregn")
*
* FUNCTION: acpi_install_address_space_handler
*
- * PARAMETERS: Device - Handle for the device
+ * PARAMETERS: device - Handle for the device
* space_id - The address space ID
- * Handler - Address of the handler
- * Setup - Address of the setup function
- * Context - Value passed to the handler on each access
+ * handler - Address of the handler
+ * setup - Address of the setup function
+ * context - Value passed to the handler on each access
*
* RETURN: Status
*
@@ -112,16 +112,16 @@ acpi_install_address_space_handler(acpi_handle device,
}
/*
- * For the default space_iDs, (the IDs for which there are default region handlers
+ * For the default space_IDs, (the IDs for which there are default region handlers
* installed) Only execute the _REG methods if the global initialization _REG
* methods have already been run (via acpi_initialize_objects). In other words,
- * we will defer the execution of the _REG methods for these space_iDs until
+ * we will defer the execution of the _REG methods for these space_IDs until
* execution of acpi_initialize_objects. This is done because we need the handlers
* for the default spaces (mem/io/pci/table) to be installed before we can run
* any control methods (or _REG methods). There is known BIOS code that depends
* on this.
*
- * For all other space_iDs, we can safely execute the _REG methods immediately.
+ * For all other space_IDs, we can safely execute the _REG methods immediately.
* This means that for IDs like embedded_controller, this function should be called
* only after acpi_enable_subsystem has been called.
*/
@@ -157,9 +157,9 @@ ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler)
*
* FUNCTION: acpi_remove_address_space_handler
*
- * PARAMETERS: Device - Handle for the device
+ * PARAMETERS: device - Handle for the device
* space_id - The address space ID
- * Handler - Address of the handler
+ * handler - Address of the handler
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index c86d44e41bc8..16219bde48da 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -66,7 +66,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_add_table
*
- * PARAMETERS: Table - Pointer to raw table
+ * PARAMETERS: table - Pointer to raw table
* parent_node - Where to load the table (scope)
* ddb_handle - Where to return the table handle.
*
@@ -276,8 +276,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
* FUNCTION: acpi_ex_region_read
*
* PARAMETERS: obj_desc - Region descriptor
- * Length - Number of bytes to read
- * Buffer - Pointer to where to put the data
+ * length - Number of bytes to read
+ * buffer - Pointer to where to put the data
*
* RETURN: Status
*
@@ -318,7 +318,7 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
*
* PARAMETERS: obj_desc - Region or Buffer/Field where the table will be
* obtained
- * Target - Where a handle to the table will be stored
+ * target - Where a handle to the table will be stored
* walk_state - Current state
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index e385436bd424..bfb062e4c4b4 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -60,7 +60,7 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 max_length);
* PARAMETERS: obj_desc - Object to be converted. Must be an
* Integer, Buffer, or String
* result_desc - Where the new Integer object is returned
- * Flags - Used for string conversion
+ * flags - Used for string conversion
*
* RETURN: Status
*
@@ -272,9 +272,9 @@ acpi_ex_convert_to_buffer(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_convert_to_ascii
*
- * PARAMETERS: Integer - Value to be converted
- * Base - ACPI_STRING_DECIMAL or ACPI_STRING_HEX
- * String - Where the string is returned
+ * PARAMETERS: integer - Value to be converted
+ * base - ACPI_STRING_DECIMAL or ACPI_STRING_HEX
+ * string - Where the string is returned
* data_width - Size of data item to be converted, in bytes
*
* RETURN: Actual string length
@@ -385,7 +385,7 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
* PARAMETERS: obj_desc - Object to be converted. Must be an
* Integer, Buffer, or String
* result_desc - Where the string object is returned
- * Type - String flags (base and conversion type)
+ * type - String flags (base and conversion type)
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 3f5bc998c1cb..691d4763102c 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -369,7 +369,7 @@ acpi_ex_create_region(u8 * aml_start,
*
* DESCRIPTION: Create a new processor object and populate the fields
*
- * Processor (Name[0], cpu_iD[1], pblock_addr[2], pblock_length[3])
+ * Processor (Name[0], cpu_ID[1], pblock_addr[2], pblock_length[3])
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index e211e9c19215..bc5b9a6a1316 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -54,8 +54,8 @@ ACPI_MODULE_NAME("exdebug")
* FUNCTION: acpi_ex_do_debug_object
*
* PARAMETERS: source_desc - Object to be output to "Debug Object"
- * Level - Indentation level (used for packages)
- * Index - Current package element, zero if not pkg
+ * level - Indentation level (used for packages)
+ * index - Current package element, zero if not pkg
*
* RETURN: None
*
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 2a6ac0a3bc1e..213c081776fc 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -109,9 +109,9 @@ static struct acpi_exdump_info acpi_ex_dump_package[5] = {
static struct acpi_exdump_info acpi_ex_dump_device[4] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.handler), "Handler"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.system_notify),
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[0]),
"System Notify"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.device_notify),
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[1]),
"Device Notify"}
};
@@ -158,9 +158,9 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = {
"System Level"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.resource_order),
"Resource Order"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.system_notify),
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[0]),
"System Notify"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.device_notify),
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[1]),
"Device Notify"}
};
@@ -169,18 +169,18 @@ static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"},
{ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify),
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.notify_list[0]),
"System Notify"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.device_notify),
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.notify_list[1]),
"Device Notify"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.handler), "Handler"}
};
static struct acpi_exdump_info acpi_ex_dump_thermal[4] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_thermal), NULL},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.system_notify),
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.notify_list[0]),
"System Notify"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.device_notify),
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.notify_list[1]),
"Device Notify"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(thermal_zone.handler), "Handler"}
};
@@ -241,10 +241,15 @@ static struct acpi_exdump_info acpi_ex_dump_address_handler[6] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"}
};
-static struct acpi_exdump_info acpi_ex_dump_notify[3] = {
+static struct acpi_exdump_info acpi_ex_dump_notify[7] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.node), "Node"},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"}
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(notify.handler_type), "Handler Type"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.handler), "Handler"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.next[0]),
+ "Next System Notify"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.next[1]), "Next Device Notify"}
};
/* Miscellaneous tables */
@@ -318,7 +323,7 @@ static struct acpi_exdump_info *acpi_ex_dump_info[] = {
* FUNCTION: acpi_ex_dump_object
*
* PARAMETERS: obj_desc - Descriptor to dump
- * Info - Info table corresponding to this object
+ * info - Info table corresponding to this object
* type
*
* RETURN: None
@@ -444,7 +449,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
* FUNCTION: acpi_ex_dump_operand
*
* PARAMETERS: *obj_desc - Pointer to entry to be dumped
- * Depth - Current nesting depth
+ * depth - Current nesting depth
*
* RETURN: None
*
@@ -726,7 +731,7 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
*
* FUNCTION: acpi_ex_dump_operands
*
- * PARAMETERS: Operands - A list of Operand objects
+ * PARAMETERS: operands - A list of Operand objects
* opcode_name - AML opcode name
* num_operands - Operand count for this opcode
*
@@ -769,8 +774,8 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
*
* FUNCTION: acpi_ex_out* functions
*
- * PARAMETERS: Title - Descriptive text
- * Value - Value to be displayed
+ * PARAMETERS: title - Descriptive text
+ * value - Value to be displayed
*
* DESCRIPTION: Object dump output formatting functions. These functions
* reduce the number of format strings required and keeps them
@@ -792,8 +797,8 @@ static void acpi_ex_out_pointer(char *title, void *value)
*
* FUNCTION: acpi_ex_dump_namespace_node
*
- * PARAMETERS: Node - Descriptor to dump
- * Flags - Force display if TRUE
+ * PARAMETERS: node - Descriptor to dump
+ * flags - Force display if TRUE
*
* DESCRIPTION: Dumps the members of the given.Node
*
@@ -825,7 +830,7 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
*
* FUNCTION: acpi_ex_dump_reference_obj
*
- * PARAMETERS: Object - Descriptor to dump
+ * PARAMETERS: object - Descriptor to dump
*
* DESCRIPTION: Dumps a reference object
*
@@ -882,8 +887,8 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
* FUNCTION: acpi_ex_dump_package_obj
*
* PARAMETERS: obj_desc - Descriptor to dump
- * Level - Indentation Level
- * Index - Package index for this object
+ * level - Indentation Level
+ * index - Package index for this object
*
* DESCRIPTION: Dumps the elements of the package
*
@@ -926,9 +931,7 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
case ACPI_TYPE_STRING:
acpi_os_printf("[String] Value: ");
- for (i = 0; i < obj_desc->string.length; i++) {
- acpi_os_printf("%c", obj_desc->string.pointer[i]);
- }
+ acpi_ut_print_string(obj_desc->string.pointer, ACPI_UINT8_MAX);
acpi_os_printf("\n");
break;
@@ -977,7 +980,7 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
* FUNCTION: acpi_ex_dump_object_descriptor
*
* PARAMETERS: obj_desc - Descriptor to dump
- * Flags - Force display if TRUE
+ * flags - Force display if TRUE
*
* DESCRIPTION: Dumps the members of the object descriptor given.
*
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 149de45fdadd..a7784152ed30 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -222,9 +222,9 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
* PARAMETERS: obj_desc - Field to be read
* field_datum_byte_offset - Byte offset of this datum within the
* parent field
- * Value - Where to store value (must at least
+ * value - Where to store value (must at least
* 64 bits)
- * Function - Read or Write flag plus other region-
+ * function - Read or Write flag plus other region-
* dependent flags
*
* RETURN: Status
@@ -315,7 +315,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
* FUNCTION: acpi_ex_register_overflow
*
* PARAMETERS: obj_desc - Register(Field) to be written
- * Value - Value to be stored
+ * value - Value to be stored
*
* RETURN: TRUE if value overflows the field, FALSE otherwise
*
@@ -365,7 +365,7 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
* PARAMETERS: obj_desc - Field to be read
* field_datum_byte_offset - Byte offset of this datum within the
* parent field
- * Value - Where to store value (must be 64 bits)
+ * value - Where to store value (must be 64 bits)
* read_write - Read or Write flag
*
* RETURN: Status
@@ -574,7 +574,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
* FUNCTION: acpi_ex_write_with_update_rule
*
* PARAMETERS: obj_desc - Field to be written
- * Mask - bitmask within field datum
+ * mask - bitmask within field datum
* field_value - Value to write
* field_datum_byte_offset - Offset of datum within field
*
@@ -678,7 +678,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
* FUNCTION: acpi_ex_extract_from_field
*
* PARAMETERS: obj_desc - Field to be read
- * Buffer - Where to store the field data
+ * buffer - Where to store the field data
* buffer_length - Length of Buffer
*
* RETURN: Status
@@ -823,7 +823,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
* FUNCTION: acpi_ex_insert_into_field
*
* PARAMETERS: obj_desc - Field to be written
- * Buffer - Data to be written
+ * buffer - Data to be written
* buffer_length - Length of Buffer
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 0a0893310348..271c0c57ea10 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -144,8 +144,8 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_concat_template
*
- * PARAMETERS: Operand0 - First source object
- * Operand1 - Second source object
+ * PARAMETERS: operand0 - First source object
+ * operand1 - Second source object
* actual_return_desc - Where to place the return object
* walk_state - Current walk state
*
@@ -229,8 +229,8 @@ acpi_ex_concat_template(union acpi_operand_object *operand0,
*
* FUNCTION: acpi_ex_do_concatenate
*
- * PARAMETERS: Operand0 - First source object
- * Operand1 - Second source object
+ * PARAMETERS: operand0 - First source object
+ * operand1 - Second source object
* actual_return_desc - Where to place the return object
* walk_state - Current walk state
*
@@ -397,9 +397,9 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
*
* FUNCTION: acpi_ex_do_math_op
*
- * PARAMETERS: Opcode - AML opcode
- * Integer0 - Integer operand #0
- * Integer1 - Integer operand #1
+ * PARAMETERS: opcode - AML opcode
+ * integer0 - Integer operand #0
+ * integer1 - Integer operand #1
*
* RETURN: Integer result of the operation
*
@@ -479,9 +479,9 @@ u64 acpi_ex_do_math_op(u16 opcode, u64 integer0, u64 integer1)
*
* FUNCTION: acpi_ex_do_logical_numeric_op
*
- * PARAMETERS: Opcode - AML opcode
- * Integer0 - Integer operand #0
- * Integer1 - Integer operand #1
+ * PARAMETERS: opcode - AML opcode
+ * integer0 - Integer operand #0
+ * integer1 - Integer operand #1
* logical_result - TRUE/FALSE result of the operation
*
* RETURN: Status
@@ -534,9 +534,9 @@ acpi_ex_do_logical_numeric_op(u16 opcode,
*
* FUNCTION: acpi_ex_do_logical_op
*
- * PARAMETERS: Opcode - AML opcode
- * Operand0 - operand #0
- * Operand1 - operand #1
+ * PARAMETERS: opcode - AML opcode
+ * operand0 - operand #0
+ * operand1 - operand #1
* logical_result - TRUE/FALSE result of the operation
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 60933e9dc3c0..bcceda5be9e3 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -102,7 +102,7 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
* FUNCTION: acpi_ex_link_mutex
*
* PARAMETERS: obj_desc - The mutex to be linked
- * Thread - Current executing thread object
+ * thread - Current executing thread object
*
* RETURN: None
*
@@ -138,7 +138,7 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_acquire_mutex_object
*
- * PARAMETERS: Timeout - Timeout in milliseconds
+ * PARAMETERS: timeout - Timeout in milliseconds
* obj_desc - Mutex object
* thread_id - Current thread state
*
@@ -443,7 +443,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_release_all_mutexes
*
- * PARAMETERS: Thread - Current executing thread object
+ * PARAMETERS: thread - Current executing thread object
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 30157f5a12d7..81eca60d2748 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -391,12 +391,12 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
*
* FUNCTION: acpi_ex_prep_field_value
*
- * PARAMETERS: Info - Contains all field creation info
+ * PARAMETERS: info - Contains all field creation info
*
* RETURN: Status
*
- * DESCRIPTION: Construct a union acpi_operand_object of type def_field and
- * connect it to the parent Node.
+ * DESCRIPTION: Construct an object of type union acpi_operand_object with a
+ * subtype of def_field and connect it to the parent Node.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 12d51df6d3bf..1f1ce0c3d2f8 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -53,10 +53,10 @@ ACPI_MODULE_NAME("exregion")
*
* FUNCTION: acpi_ex_system_memory_space_handler
*
- * PARAMETERS: Function - Read or Write operation
- * Address - Where in the space to read or write
+ * PARAMETERS: function - Read or Write operation
+ * address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, or 32)
- * Value - Pointer to in or out value
+ * value - Pointer to in or out value
* handler_context - Pointer to Handler's context
* region_context - Pointer to context specific to the
* accessed region
@@ -270,10 +270,10 @@ acpi_ex_system_memory_space_handler(u32 function,
*
* FUNCTION: acpi_ex_system_io_space_handler
*
- * PARAMETERS: Function - Read or Write operation
- * Address - Where in the space to read or write
+ * PARAMETERS: function - Read or Write operation
+ * address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, or 32)
- * Value - Pointer to in or out value
+ * value - Pointer to in or out value
* handler_context - Pointer to Handler's context
* region_context - Pointer to context specific to the
* accessed region
@@ -329,10 +329,10 @@ acpi_ex_system_io_space_handler(u32 function,
*
* FUNCTION: acpi_ex_pci_config_space_handler
*
- * PARAMETERS: Function - Read or Write operation
- * Address - Where in the space to read or write
+ * PARAMETERS: function - Read or Write operation
+ * address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, or 32)
- * Value - Pointer to in or out value
+ * value - Pointer to in or out value
* handler_context - Pointer to Handler's context
* region_context - Pointer to context specific to the
* accessed region
@@ -365,7 +365,7 @@ acpi_ex_pci_config_space_handler(u32 function,
* pci_function is the PCI device function number
* pci_register is the Config space register range 0-255 bytes
*
- * Value - input value for write, output address for read
+ * value - input value for write, output address for read
*
*/
pci_id = (struct acpi_pci_id *)region_context;
@@ -402,10 +402,10 @@ acpi_ex_pci_config_space_handler(u32 function,
*
* FUNCTION: acpi_ex_cmos_space_handler
*
- * PARAMETERS: Function - Read or Write operation
- * Address - Where in the space to read or write
+ * PARAMETERS: function - Read or Write operation
+ * address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, or 32)
- * Value - Pointer to in or out value
+ * value - Pointer to in or out value
* handler_context - Pointer to Handler's context
* region_context - Pointer to context specific to the
* accessed region
@@ -434,10 +434,10 @@ acpi_ex_cmos_space_handler(u32 function,
*
* FUNCTION: acpi_ex_pci_bar_space_handler
*
- * PARAMETERS: Function - Read or Write operation
- * Address - Where in the space to read or write
+ * PARAMETERS: function - Read or Write operation
+ * address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, or 32)
- * Value - Pointer to in or out value
+ * value - Pointer to in or out value
* handler_context - Pointer to Handler's context
* region_context - Pointer to context specific to the
* accessed region
@@ -466,10 +466,10 @@ acpi_ex_pci_bar_space_handler(u32 function,
*
* FUNCTION: acpi_ex_data_table_space_handler
*
- * PARAMETERS: Function - Read or Write operation
- * Address - Where in the space to read or write
+ * PARAMETERS: function - Read or Write operation
+ * address - Where in the space to read or write
* bit_width - Field width in bits (8, 16, or 32)
- * Value - Pointer to in or out value
+ * value - Pointer to in or out value
* handler_context - Pointer to Handler's context
* region_context - Pointer to context specific to the
* accessed region
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 6e335dc34528..bbf40ac27585 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -147,7 +147,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
stack_desc = *stack_ptr;
- /* This is a union acpi_operand_object */
+ /* This is an object of type union acpi_operand_object */
switch (stack_desc->common.type) {
case ACPI_TYPE_LOCAL_REFERENCE:
@@ -321,7 +321,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
* FUNCTION: acpi_ex_resolve_multiple
*
* PARAMETERS: walk_state - Current state (contains AML opcode)
- * Operand - Starting point for resolution
+ * operand - Starting point for resolution
* return_type - Where the object type is returned
* return_desc - Where the resolved object is returned
*
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index a67b1d925ddd..f232fbabdea8 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -113,7 +113,7 @@ acpi_ex_check_object_type(acpi_object_type type_needed,
*
* FUNCTION: acpi_ex_resolve_operands
*
- * PARAMETERS: Opcode - Opcode being interpreted
+ * PARAMETERS: opcode - Opcode being interpreted
* stack_ptr - Pointer to the operand stack to be
* resolved
* walk_state - Current state
@@ -307,7 +307,7 @@ acpi_ex_resolve_operands(u16 opcode,
case ARGI_DEVICE_REF:
case ARGI_TARGETREF: /* Allows implicit conversion rules before store */
case ARGI_FIXED_TARGET: /* No implicit conversion before store to target */
- case ARGI_SIMPLE_TARGET: /* Name, Local, or Arg - no implicit conversion */
+ case ARGI_SIMPLE_TARGET: /* Name, Local, or arg - no implicit conversion */
/*
* Need an operand of type ACPI_TYPE_LOCAL_REFERENCE
@@ -410,7 +410,7 @@ acpi_ex_resolve_operands(u16 opcode,
/*
* Need an operand of type ACPI_TYPE_INTEGER,
* But we can implicitly convert from a STRING or BUFFER
- * Aka - "Implicit Source Operand Conversion"
+ * aka - "Implicit Source Operand Conversion"
*/
status =
acpi_ex_convert_to_integer(obj_desc, stack_ptr, 16);
@@ -437,7 +437,7 @@ acpi_ex_resolve_operands(u16 opcode,
/*
* Need an operand of type ACPI_TYPE_BUFFER,
* But we can implicitly convert from a STRING or INTEGER
- * Aka - "Implicit Source Operand Conversion"
+ * aka - "Implicit Source Operand Conversion"
*/
status = acpi_ex_convert_to_buffer(obj_desc, stack_ptr);
if (ACPI_FAILURE(status)) {
@@ -463,7 +463,7 @@ acpi_ex_resolve_operands(u16 opcode,
/*
* Need an operand of type ACPI_TYPE_STRING,
* But we can implicitly convert from a BUFFER or INTEGER
- * Aka - "Implicit Source Operand Conversion"
+ * aka - "Implicit Source Operand Conversion"
*/
status = acpi_ex_convert_to_string(obj_desc, stack_ptr,
ACPI_IMPLICIT_CONVERT_HEX);
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index c6cf843cc4c9..5fffe7ab5ece 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -62,8 +62,8 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
* FUNCTION: acpi_ex_store
*
* PARAMETERS: *source_desc - Value to be stored
- * *dest_desc - Where to store it. Must be an NS node
- * or a union acpi_operand_object of type
+ * *dest_desc - Where to store it. Must be an NS node
+ * or union acpi_operand_object of type
* Reference;
* walk_state - Current walk state
*
@@ -361,7 +361,7 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
* FUNCTION: acpi_ex_store_object_to_node
*
* PARAMETERS: source_desc - Value to be stored
- * Node - Named object to receive the value
+ * node - Named object to receive the value
* walk_state - Current walk state
* implicit_conversion - Perform implicit conversion (yes/no)
*
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 65a45d8335c8..53c248473547 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -110,7 +110,7 @@ acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc,
* NOTE: ACPI versions up to 3.0 specified that the buffer must be
* truncated if the string is smaller than the buffer. However, "other"
* implementations of ACPI never did this and thus became the defacto
- * standard. ACPI 3.0_a changes this behavior such that the buffer
+ * standard. ACPI 3.0A changes this behavior such that the buffer
* is no longer truncated.
*/
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 191a12945226..b760641e2fc6 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -53,8 +53,8 @@ ACPI_MODULE_NAME("exsystem")
*
* FUNCTION: acpi_ex_system_wait_semaphore
*
- * PARAMETERS: Semaphore - Semaphore to wait on
- * Timeout - Max time to wait
+ * PARAMETERS: semaphore - Semaphore to wait on
+ * timeout - Max time to wait
*
* RETURN: Status
*
@@ -98,8 +98,8 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
*
* FUNCTION: acpi_ex_system_wait_mutex
*
- * PARAMETERS: Mutex - Mutex to wait on
- * Timeout - Max time to wait
+ * PARAMETERS: mutex - Mutex to wait on
+ * timeout - Max time to wait
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index eb6798ba8b59..d1ab7917eed7 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void)
*
* DESCRIPTION: Reacquire the interpreter execution region from within the
* interpreter code. Failure to enter the interpreter region is a
- * fatal system error. Used in conjunction with
+ * fatal system error. Used in conjunction with
* relinquish_interpreter
*
******************************************************************************/
@@ -317,8 +317,8 @@ void acpi_ex_release_global_lock(u32 field_flags)
*
* FUNCTION: acpi_ex_digits_needed
*
- * PARAMETERS: Value - Value to be represented
- * Base - Base of representation
+ * PARAMETERS: value - Value to be represented
+ * base - Base of representation
*
* RETURN: The number of digits.
*
@@ -408,7 +408,7 @@ void acpi_ex_eisa_id_to_string(char *out_string, u64 compressed_id)
* PARAMETERS: out_string - Where to put the converted string. At least
* 21 bytes are needed to hold the largest
* possible 64-bit integer.
- * Value - Value to be converted
+ * value - Value to be converted
*
* RETURN: None, string
*
@@ -443,7 +443,7 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
*
* RETURN: TRUE if valid/supported ID.
*
- * DESCRIPTION: Validate an operation region space_iD.
+ * DESCRIPTION: Validate an operation region space_ID.
*
******************************************************************************/
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index d0b9ed5df97e..a1e71d0ef57b 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("hwacpi")
*
* FUNCTION: acpi_hw_set_mode
*
- * PARAMETERS: Mode - SYS_MODE_ACPI or SYS_MODE_LEGACY
+ * PARAMETERS: mode - SYS_MODE_ACPI or SYS_MODE_LEGACY
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 29e859293edd..94996f9ae3ad 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -90,7 +90,6 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
* FUNCTION: acpi_hw_extended_sleep
*
* PARAMETERS: sleep_state - Which sleep state to enter
- * Flags - ACPI_EXECUTE_GTS to run optional method
*
* RETURN: Status
*
@@ -100,7 +99,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument)
*
******************************************************************************/
-acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_extended_sleep(u8 sleep_state)
{
acpi_status status;
u8 sleep_type_value;
@@ -117,19 +116,14 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
/* Clear wake status (WAK_STS) */
- status = acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
+ status =
+ acpi_write((u64)ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
acpi_gbl_system_awake_and_running = FALSE;
- /* Optionally execute _GTS (Going To Sleep) */
-
- if (flags & ACPI_EXECUTE_GTS) {
- acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
- }
-
/* Flush caches, as per ACPI specification */
ACPI_FLUSH_CPU_CACHE();
@@ -147,7 +141,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
ACPI_X_SLEEP_TYPE_MASK);
- status = acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE),
+ status = acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE),
&acpi_gbl_FADT.sleep_control);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
@@ -171,7 +165,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
* FUNCTION: acpi_hw_extended_wake_prep
*
* PARAMETERS: sleep_state - Which sleep state we just exited
- * Flags - ACPI_EXECUTE_BFS to run optional method
*
* RETURN: Status
*
@@ -180,7 +173,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags)
*
******************************************************************************/
-acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
{
acpi_status status;
u8 sleep_type_value;
@@ -195,15 +188,10 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
ACPI_X_SLEEP_TYPE_MASK);
- (void)acpi_write((sleep_type_value | ACPI_X_SLEEP_ENABLE),
+ (void)acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE),
&acpi_gbl_FADT.sleep_control);
}
- /* Optionally execute _BFS (Back From Sleep) */
-
- if (flags & ACPI_EXECUTE_BFS) {
- acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
- }
return_ACPI_STATUS(AE_OK);
}
@@ -212,7 +200,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
* FUNCTION: acpi_hw_extended_wake
*
* PARAMETERS: sleep_state - Which sleep state we just exited
- * Flags - Reserved, set to zero
+ * flags - Reserved, set to zero
*
* RETURN: Status
*
@@ -221,7 +209,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags)
*
******************************************************************************/
-acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_extended_wake(u8 sleep_state)
{
ACPI_FUNCTION_TRACE(hw_extended_wake);
@@ -239,7 +227,7 @@ acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags)
* and use it to determine whether the system is rebooting or
* resuming. Clear WAK_STS for compatibility.
*/
- (void)acpi_write(ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
+ (void)acpi_write((u64)ACPI_X_WAKE_STATUS, &acpi_gbl_FADT.sleep_status);
acpi_gbl_system_awake_and_running = TRUE;
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 6b6c83b87b52..4af6d20ef077 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -69,9 +69,9 @@ acpi_hw_write_multiple(u32 value,
*
* FUNCTION: acpi_hw_validate_register
*
- * PARAMETERS: Reg - GAS register structure
+ * PARAMETERS: reg - GAS register structure
* max_bit_width - Max bit_width supported (32 or 64)
- * Address - Pointer to where the gas->address
+ * address - Pointer to where the gas->address
* is returned
*
* RETURN: Status
@@ -102,7 +102,7 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
return (AE_BAD_ADDRESS);
}
- /* Validate the space_iD */
+ /* Validate the space_ID */
if ((reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
(reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
@@ -137,8 +137,8 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
*
* FUNCTION: acpi_hw_read
*
- * PARAMETERS: Value - Where the value is returned
- * Reg - GAS register structure
+ * PARAMETERS: value - Where the value is returned
+ * reg - GAS register structure
*
* RETURN: Status
*
@@ -148,7 +148,7 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
*
* LIMITATIONS: <These limitations also apply to acpi_hw_write>
* bit_width must be exactly 8, 16, or 32.
- * space_iD must be system_memory or system_iO.
+ * space_ID must be system_memory or system_IO.
* bit_offset and access_width are currently ignored, as there has
* not been a need to implement these.
*
@@ -200,8 +200,8 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
*
* FUNCTION: acpi_hw_write
*
- * PARAMETERS: Value - Value to be written
- * Reg - GAS register structure
+ * PARAMETERS: value - Value to be written
+ * reg - GAS register structure
*
* RETURN: Status
*
@@ -439,7 +439,7 @@ acpi_hw_register_read(u32 register_id, u32 * return_value)
* FUNCTION: acpi_hw_register_write
*
* PARAMETERS: register_id - ACPI Register ID
- * Value - The value to write
+ * value - The value to write
*
* RETURN: Status
*
@@ -571,7 +571,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
*
* FUNCTION: acpi_hw_read_multiple
*
- * PARAMETERS: Value - Where the register value is returned
+ * PARAMETERS: value - Where the register value is returned
* register_a - First ACPI register (required)
* register_b - Second ACPI register (optional)
*
@@ -624,7 +624,7 @@ acpi_hw_read_multiple(u32 *value,
*
* FUNCTION: acpi_hw_write_multiple
*
- * PARAMETERS: Value - The value to write
+ * PARAMETERS: value - The value to write
* register_a - First ACPI register (required)
* register_b - Second ACPI register (optional)
*
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 615996a36bed..3fddde056a5e 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -56,7 +56,6 @@ ACPI_MODULE_NAME("hwsleep")
* FUNCTION: acpi_hw_legacy_sleep
*
* PARAMETERS: sleep_state - Which sleep state to enter
- * Flags - ACPI_EXECUTE_GTS to run optional method
*
* RETURN: Status
*
@@ -64,7 +63,7 @@ ACPI_MODULE_NAME("hwsleep")
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
{
struct acpi_bit_register_info *sleep_type_reg_info;
struct acpi_bit_register_info *sleep_enable_reg_info;
@@ -110,12 +109,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
return_ACPI_STATUS(status);
}
- /* Optionally execute _GTS (Going To Sleep) */
-
- if (flags & ACPI_EXECUTE_GTS) {
- acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
- }
-
/* Get current value of PM1A control */
status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
@@ -214,7 +207,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
* FUNCTION: acpi_hw_legacy_wake_prep
*
* PARAMETERS: sleep_state - Which sleep state we just exited
- * Flags - ACPI_EXECUTE_BFS to run optional method
*
* RETURN: Status
*
@@ -224,7 +216,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
*
******************************************************************************/
-acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
{
acpi_status status;
struct acpi_bit_register_info *sleep_type_reg_info;
@@ -275,11 +267,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
}
}
- /* Optionally execute _BFS (Back From Sleep) */
-
- if (flags & ACPI_EXECUTE_BFS) {
- acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state);
- }
return_ACPI_STATUS(status);
}
@@ -288,7 +275,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
* FUNCTION: acpi_hw_legacy_wake
*
* PARAMETERS: sleep_state - Which sleep state we just exited
- * Flags - Reserved, set to zero
*
* RETURN: Status
*
@@ -297,7 +283,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags)
*
******************************************************************************/
-acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
+acpi_status acpi_hw_legacy_wake(u8 sleep_state)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index f1b2c3b94cac..b6411f16832f 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -54,7 +54,7 @@ ACPI_MODULE_NAME("hwtimer")
*
* FUNCTION: acpi_get_timer_resolution
*
- * PARAMETERS: Resolution - Where the resolution is returned
+ * PARAMETERS: resolution - Where the resolution is returned
*
* RETURN: Status and timer resolution
*
@@ -84,7 +84,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_timer_resolution)
*
* FUNCTION: acpi_get_timer
*
- * PARAMETERS: Ticks - Where the timer value is returned
+ * PARAMETERS: ticks - Where the timer value is returned
*
* RETURN: Status and current timer value (ticks)
*
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 6e5c43a60bb7..c99d546b217f 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -58,7 +58,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
*
* The table is used to implement the Microsoft port access rules that
* first appeared in Windows XP. Some ports are always illegal, and some
- * ports are only illegal if the BIOS calls _OSI with a win_xP string or
+ * ports are only illegal if the BIOS calls _OSI with a win_XP string or
* later (meaning that the BIOS itelf is post-XP.)
*
* This provides ACPICA with the desired port protections and
@@ -66,7 +66,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
*
* Description of port entries:
* DMA: DMA controller
- * PIC0: Programmable Interrupt Controller (8259_a)
+ * PIC0: Programmable Interrupt Controller (8259A)
* PIT1: System Timer 1
* PIT2: System Timer 2 failsafe
* RTC: Real-time clock
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index a716fede4f25..7bfd649d1996 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -104,8 +104,8 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
*
* FUNCTION: acpi_read
*
- * PARAMETERS: Value - Where the value is returned
- * Reg - GAS register structure
+ * PARAMETERS: value - Where the value is returned
+ * reg - GAS register structure
*
* RETURN: Status
*
@@ -113,7 +113,7 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
*
* LIMITATIONS: <These limitations also apply to acpi_write>
* bit_width must be exactly 8, 16, 32, or 64.
- * space_iD must be system_memory or system_iO.
+ * space_ID must be system_memory or system_IO.
* bit_offset and access_width are currently ignored, as there has
* not been a need to implement these.
*
@@ -196,8 +196,8 @@ ACPI_EXPORT_SYMBOL(acpi_read)
*
* FUNCTION: acpi_write
*
- * PARAMETERS: Value - Value to be written
- * Reg - GAS register structure
+ * PARAMETERS: value - Value to be written
+ * reg - GAS register structure
*
* RETURN: Status
*
@@ -441,7 +441,7 @@ ACPI_EXPORT_SYMBOL(acpi_write_bit_register)
* *sleep_type_a - Where SLP_TYPa is returned
* *sleep_type_b - Where SLP_TYPb is returned
*
- * RETURN: Status - ACPI status
+ * RETURN: status - ACPI status
*
* DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested sleep
* state.
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 762d059bb508..1f165a750ae2 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -50,7 +50,7 @@ ACPI_MODULE_NAME("hwxfsleep")
/* Local prototypes */
static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id);
+acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
/*
* Dispatch table used to efficiently branch to the various sleep
@@ -205,7 +205,7 @@ acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)
ACPI_FLUSH_CPU_CACHE();
status = acpi_hw_write_port(acpi_gbl_FADT.smi_command,
- (u32)acpi_gbl_FADT.S4bios_request, 8);
+ (u32)acpi_gbl_FADT.s4_bios_request, 8);
do {
acpi_os_stall(1000);
@@ -235,7 +235,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios)
*
******************************************************************************/
static acpi_status
-acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
+acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id)
{
acpi_status status;
struct acpi_sleep_functions *sleep_functions =
@@ -248,11 +248,11 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
* use the extended sleep registers
*/
if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) {
- status = sleep_functions->extended_function(sleep_state, flags);
+ status = sleep_functions->extended_function(sleep_state);
} else {
/* Legacy sleep */
- status = sleep_functions->legacy_function(sleep_state, flags);
+ status = sleep_functions->legacy_function(sleep_state);
}
return (status);
@@ -262,7 +262,7 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id)
* For the case where reduced-hardware-only code is being generated,
* we know that only the extended sleep registers are available
*/
- status = sleep_functions->extended_function(sleep_state, flags);
+ status = sleep_functions->extended_function(sleep_state);
return (status);
#endif /* !ACPI_REDUCED_HARDWARE */
@@ -349,7 +349,6 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
* FUNCTION: acpi_enter_sleep_state
*
* PARAMETERS: sleep_state - Which sleep state to enter
- * Flags - ACPI_EXECUTE_GTS to run optional method
*
* RETURN: Status
*
@@ -357,7 +356,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
*
******************************************************************************/
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
{
acpi_status status;
@@ -371,7 +370,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags)
}
status =
- acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID);
+ acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID);
return_ACPI_STATUS(status);
}
@@ -382,7 +381,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
* FUNCTION: acpi_leave_sleep_state_prep
*
* PARAMETERS: sleep_state - Which sleep state we are exiting
- * Flags - ACPI_EXECUTE_BFS to run optional method
+ * flags - ACPI_EXECUTE_BFS to run optional method
*
* RETURN: Status
*
@@ -391,14 +390,14 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state)
* Called with interrupts DISABLED.
*
******************************************************************************/
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags)
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep);
status =
- acpi_hw_sleep_dispatch(sleep_state, flags,
+ acpi_hw_sleep_dispatch(sleep_state,
ACPI_WAKE_PREP_FUNCTION_ID);
return_ACPI_STATUS(status);
}
@@ -423,8 +422,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
ACPI_FUNCTION_TRACE(acpi_leave_sleep_state);
-
- status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID);
+ status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 61623f3f6826..23db53ce2293 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -157,7 +157,7 @@ acpi_status acpi_ns_root_initialize(void)
#if defined (ACPI_ASL_COMPILER)
- /* Save the parameter count for the i_aSL compiler */
+ /* Save the parameter count for the iASL compiler */
new_node->value = obj_desc->method.param_count;
#else
@@ -258,11 +258,11 @@ acpi_status acpi_ns_root_initialize(void)
* FUNCTION: acpi_ns_lookup
*
* PARAMETERS: scope_info - Current scope info block
- * Pathname - Search pathname, in internal format
+ * pathname - Search pathname, in internal format
* (as represented in the AML stream)
- * Type - Type associated with name
+ * type - Type associated with name
* interpreter_mode - IMODE_LOAD_PASS2 => add name if not found
- * Flags - Flags describing the search restrictions
+ * flags - Flags describing the search restrictions
* walk_state - Current state of the walk
* return_node - Where the Node is placed (if found
* or created successfully)
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 7c3d3ceb98b3..ac389e5bb594 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -52,7 +52,7 @@ ACPI_MODULE_NAME("nsalloc")
*
* FUNCTION: acpi_ns_create_node
*
- * PARAMETERS: Name - Name of the new node (4 char ACPI name)
+ * PARAMETERS: name - Name of the new node (4 char ACPI name)
*
* RETURN: New namespace node (Null on failure)
*
@@ -92,7 +92,7 @@ struct acpi_namespace_node *acpi_ns_create_node(u32 name)
*
* FUNCTION: acpi_ns_delete_node
*
- * PARAMETERS: Node - Node to be deleted
+ * PARAMETERS: node - Node to be deleted
*
* RETURN: None
*
@@ -143,7 +143,7 @@ void acpi_ns_delete_node(struct acpi_namespace_node *node)
*
* FUNCTION: acpi_ns_remove_node
*
- * PARAMETERS: Node - Node to be removed/deleted
+ * PARAMETERS: node - Node to be removed/deleted
*
* RETURN: None
*
@@ -196,8 +196,8 @@ void acpi_ns_remove_node(struct acpi_namespace_node *node)
*
* PARAMETERS: walk_state - Current state of the walk
* parent_node - The parent of the new Node
- * Node - The new Node to install
- * Type - ACPI object type of the new Node
+ * node - The new Node to install
+ * type - ACPI object type of the new Node
*
* RETURN: None
*
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 3f7f3f6e7dd5..7ee4e6aeb0a2 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -63,7 +63,7 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
* FUNCTION: acpi_ns_print_pathname
*
* PARAMETERS: num_segments - Number of ACPI name segments
- * Pathname - The compressed (internal) path
+ * pathname - The compressed (internal) path
*
* RETURN: None
*
@@ -107,10 +107,10 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
*
* FUNCTION: acpi_ns_dump_pathname
*
- * PARAMETERS: Handle - Object
- * Msg - Prefix message
- * Level - Desired debug level
- * Component - Caller's component ID
+ * PARAMETERS: handle - Object
+ * msg - Prefix message
+ * level - Desired debug level
+ * component - Caller's component ID
*
* RETURN: None
*
@@ -143,8 +143,8 @@ acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
* FUNCTION: acpi_ns_dump_one_object
*
* PARAMETERS: obj_handle - Node to be dumped
- * Level - Nesting level of the handle
- * Context - Passed into walk_namespace
+ * level - Nesting level of the handle
+ * context - Passed into walk_namespace
* return_value - Not used
*
* RETURN: Status
@@ -615,7 +615,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
*
* FUNCTION: acpi_ns_dump_objects
*
- * PARAMETERS: Type - Object type to be dumped
+ * PARAMETERS: type - Object type to be dumped
* display_type - 0 or ACPI_DISPLAY_SUMMARY
* max_depth - Maximum depth of dump. Use ACPI_UINT32_MAX
* for an effectively unlimited depth.
@@ -671,7 +671,7 @@ acpi_ns_dump_objects(acpi_object_type type,
*
* FUNCTION: acpi_ns_dump_entry
*
- * PARAMETERS: Handle - Node to be dumped
+ * PARAMETERS: handle - Node to be dumped
* debug_level - Output level
*
* RETURN: None
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 3b5acb0eb406..944d4c8d9438 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -55,9 +55,9 @@ ACPI_MODULE_NAME("nsdumpdv")
*
* FUNCTION: acpi_ns_dump_one_device
*
- * PARAMETERS: Handle - Node to be dumped
- * Level - Nesting level of the handle
- * Context - Passed into walk_namespace
+ * PARAMETERS: handle - Node to be dumped
+ * level - Nesting level of the handle
+ * context - Passed into walk_namespace
* return_value - Not used
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index f375cb82e321..69074be498e8 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -59,11 +59,11 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
*
* FUNCTION: acpi_ns_evaluate
*
- * PARAMETERS: Info - Evaluation info block, contains:
+ * PARAMETERS: info - Evaluation info block, contains:
* prefix_node - Prefix or Method/Object Node to execute
- * Pathname - Name of method to execute, If NULL, the
+ * pathname - Name of method to execute, If NULL, the
* Node is the object to execute
- * Parameters - List of parameters to pass to the method,
+ * parameters - List of parameters to pass to the method,
* terminated by NULL. Params itself may be
* NULL if no parameters are being passed.
* return_object - Where to put method's return value (if
@@ -71,7 +71,7 @@ acpi_ns_exec_module_code(union acpi_operand_object *method_obj,
* parameter_type - Type of Parameter list
* return_object - Where to put method's return value (if
* any). If NULL, no value is returned.
- * Flags - ACPI_IGNORE_RETURN_VALUE to delete return
+ * flags - ACPI_IGNORE_RETURN_VALUE to delete return
*
* RETURN: Status
*
@@ -351,7 +351,7 @@ void acpi_ns_exec_module_code_list(void)
* FUNCTION: acpi_ns_exec_module_code
*
* PARAMETERS: method_obj - Object container for the module-level code
- * Info - Info block for method evaluation
+ * info - Info block for method evaluation
*
* RETURN: None. Exceptions during method execution are ignored, since
* we cannot abort a table load.
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 9d84ec2f0211..95ffe8dfa1f1 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -224,8 +224,8 @@ acpi_status acpi_ns_initialize_devices(void)
* FUNCTION: acpi_ns_init_one_object
*
* PARAMETERS: obj_handle - Node
- * Level - Current nesting level
- * Context - Points to a init info struct
+ * level - Current nesting level
+ * context - Points to a init info struct
* return_value - Not used
*
* RETURN: Status
@@ -530,7 +530,7 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
* we will not run _INI, but we continue to examine the children
* of this device.
*
- * From the ACPI spec, description of _STA: (Note - no mention
+ * From the ACPI spec, description of _STA: (note - no mention
* of whether to run _INI or not on the device in question)
*
* "_STA may return bit 0 clear (not present) with bit 3 set
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 5cbf15ffe7d8..76935ff29289 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -63,7 +63,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
* FUNCTION: acpi_ns_load_table
*
* PARAMETERS: table_index - Index for table to be loaded
- * Node - Owning NS node
+ * node - Owning NS node
*
* RETURN: Status
*
@@ -278,7 +278,7 @@ static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle)
*
* FUNCTION: acpi_ns_unload_name_space
*
- * PARAMETERS: Handle - Root of namespace subtree to be deleted
+ * PARAMETERS: handle - Root of namespace subtree to be deleted
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index b20e7c8c3ffb..96e0eb609bb4 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -53,8 +53,8 @@ ACPI_MODULE_NAME("nsnames")
*
* FUNCTION: acpi_ns_build_external_path
*
- * PARAMETERS: Node - NS node whose pathname is needed
- * Size - Size of the pathname
+ * PARAMETERS: node - NS node whose pathname is needed
+ * size - Size of the pathname
* *name_buffer - Where to return the pathname
*
* RETURN: Status
@@ -120,7 +120,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
*
* FUNCTION: acpi_ns_get_external_pathname
*
- * PARAMETERS: Node - Namespace node whose pathname is needed
+ * PARAMETERS: node - Namespace node whose pathname is needed
*
* RETURN: Pointer to storage containing the fully qualified name of
* the node, In external format (name segments separated by path
@@ -168,7 +168,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
*
* FUNCTION: acpi_ns_get_pathname_length
*
- * PARAMETERS: Node - Namespace node
+ * PARAMETERS: node - Namespace node
*
* RETURN: Length of path, including prefix
*
@@ -214,7 +214,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
*
* PARAMETERS: target_handle - Handle of named object whose name is
* to be found
- * Buffer - Where the pathname is returned
+ * buffer - Where the pathname is returned
*
* RETURN: Status, Buffer is filled with pathname if status is AE_OK
*
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index dd77a3ce6e50..d6c9a3cc6716 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -53,9 +53,9 @@ ACPI_MODULE_NAME("nsobject")
*
* FUNCTION: acpi_ns_attach_object
*
- * PARAMETERS: Node - Parent Node
- * Object - Object to be attached
- * Type - Type of object, or ACPI_TYPE_ANY if not
+ * PARAMETERS: node - Parent Node
+ * object - Object to be attached
+ * type - Type of object, or ACPI_TYPE_ANY if not
* known
*
* RETURN: Status
@@ -191,7 +191,7 @@ acpi_ns_attach_object(struct acpi_namespace_node *node,
*
* FUNCTION: acpi_ns_detach_object
*
- * PARAMETERS: Node - A Namespace node whose object will be detached
+ * PARAMETERS: node - A Namespace node whose object will be detached
*
* RETURN: None.
*
@@ -250,7 +250,7 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
*
* FUNCTION: acpi_ns_get_attached_object
*
- * PARAMETERS: Node - Namespace node
+ * PARAMETERS: node - Namespace node
*
* RETURN: Current value of the object field from the Node whose
* handle is passed
@@ -285,7 +285,7 @@ union acpi_operand_object *acpi_ns_get_attached_object(struct
*
* FUNCTION: acpi_ns_get_secondary_object
*
- * PARAMETERS: Node - Namespace node
+ * PARAMETERS: node - Namespace node
*
* RETURN: Current value of the object field from the Node whose
* handle is passed.
@@ -315,9 +315,9 @@ union acpi_operand_object *acpi_ns_get_secondary_object(union
*
* FUNCTION: acpi_ns_attach_data
*
- * PARAMETERS: Node - Namespace node
- * Handler - Handler to be associated with the data
- * Data - Data to be attached
+ * PARAMETERS: node - Namespace node
+ * handler - Handler to be associated with the data
+ * data - Data to be attached
*
* RETURN: Status
*
@@ -372,8 +372,8 @@ acpi_ns_attach_data(struct acpi_namespace_node *node,
*
* FUNCTION: acpi_ns_detach_data
*
- * PARAMETERS: Node - Namespace node
- * Handler - Handler associated with the data
+ * PARAMETERS: node - Namespace node
+ * handler - Handler associated with the data
*
* RETURN: Status
*
@@ -416,9 +416,9 @@ acpi_ns_detach_data(struct acpi_namespace_node * node,
*
* FUNCTION: acpi_ns_get_attached_data
*
- * PARAMETERS: Node - Namespace node
- * Handler - Handler associated with the data
- * Data - Where the data is returned
+ * PARAMETERS: node - Namespace node
+ * handler - Handler associated with the data
+ * data - Where the data is returned
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index fe6626035495..2419f417ea33 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -116,7 +116,7 @@ static const char *acpi_rtype_names[] = {
*
* FUNCTION: acpi_ns_check_predefined_names
*
- * PARAMETERS: Node - Namespace node for the method/object
+ * PARAMETERS: node - Namespace node for the method/object
* user_param_count - Number of parameters actually passed
* return_status - Status from the object evaluation
* return_object_ptr - Pointer to the object returned from the
@@ -275,10 +275,10 @@ cleanup:
*
* FUNCTION: acpi_ns_check_parameter_count
*
- * PARAMETERS: Pathname - Full pathname to the node (for error msgs)
- * Node - Namespace node for the method/object
+ * PARAMETERS: pathname - Full pathname to the node (for error msgs)
+ * node - Namespace node for the method/object
* user_param_count - Number of args passed in by the caller
- * Predefined - Pointer to entry in predefined name table
+ * predefined - Pointer to entry in predefined name table
*
* RETURN: None
*
@@ -364,7 +364,7 @@ acpi_ns_check_parameter_count(char *pathname,
*
* FUNCTION: acpi_ns_check_for_predefined_name
*
- * PARAMETERS: Node - Namespace node for the method/object
+ * PARAMETERS: node - Namespace node for the method/object
*
* RETURN: Pointer to entry in predefined table. NULL indicates not found.
*
@@ -410,7 +410,7 @@ const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
*
* FUNCTION: acpi_ns_check_package
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
*
@@ -685,11 +685,11 @@ package_too_small:
*
* FUNCTION: acpi_ns_check_package_list
*
- * PARAMETERS: Data - Pointer to validation data structure
- * Package - Pointer to package-specific info for method
- * Elements - Element list of parent package. All elements
+ * PARAMETERS: data - Pointer to validation data structure
+ * package - Pointer to package-specific info for method
+ * elements - Element list of parent package. All elements
* of this list should be of type Package.
- * Count - Count of subpackages
+ * count - Count of subpackages
*
* RETURN: Status
*
@@ -911,12 +911,12 @@ package_too_small:
*
* FUNCTION: acpi_ns_check_package_elements
*
- * PARAMETERS: Data - Pointer to validation data structure
- * Elements - Pointer to the package elements array
- * Type1 - Object type for first group
- * Count1 - Count for first group
- * Type2 - Object type for second group
- * Count2 - Count for second group
+ * PARAMETERS: data - Pointer to validation data structure
+ * elements - Pointer to the package elements array
+ * type1 - Object type for first group
+ * count1 - Count for first group
+ * type2 - Object type for second group
+ * count2 - Count for second group
* start_index - Start of the first group of elements
*
* RETURN: Status
@@ -968,7 +968,7 @@ acpi_ns_check_package_elements(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_check_object_type
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
* expected_btypes - Bitmap of expected return type(s)
@@ -1102,7 +1102,7 @@ acpi_ns_check_object_type(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_check_reference
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* return_object - Object returned from the evaluation of a
* method or object
*
@@ -1140,7 +1140,7 @@ acpi_ns_check_reference(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_get_expected_types
*
- * PARAMETERS: Buffer - Pointer to where the string is returned
+ * PARAMETERS: buffer - Pointer to where the string is returned
* expected_btypes - Bitmap of expected return type(s)
*
* RETURN: Buffer is populated with type names.
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 5519a64a353f..8c5f292860fc 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -94,7 +94,7 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
*
* FUNCTION: acpi_ns_repair_object
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* expected_btypes - Object types expected
* package_index - Index of object within parent package (if
* applicable - ACPI_NOT_PACKAGE_ELEMENT
@@ -470,7 +470,7 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
*
* FUNCTION: acpi_ns_repair_null_element
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* expected_btypes - Object types expected
* package_index - Index of object within parent package (if
* applicable - ACPI_NOT_PACKAGE_ELEMENT
@@ -509,17 +509,17 @@ acpi_ns_repair_null_element(struct acpi_predefined_data *data,
*/
if (expected_btypes & ACPI_RTYPE_INTEGER) {
- /* Need an Integer - create a zero-value integer */
+ /* Need an integer - create a zero-value integer */
new_object = acpi_ut_create_integer_object((u64)0);
} else if (expected_btypes & ACPI_RTYPE_STRING) {
- /* Need a String - create a NULL string */
+ /* Need a string - create a NULL string */
new_object = acpi_ut_create_string_object(0);
} else if (expected_btypes & ACPI_RTYPE_BUFFER) {
- /* Need a Buffer - create a zero-length buffer */
+ /* Need a buffer - create a zero-length buffer */
new_object = acpi_ut_create_buffer_object(0);
} else {
@@ -552,7 +552,7 @@ acpi_ns_repair_null_element(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_remove_null_elements
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* package_type - An acpi_return_package_types value
* obj_desc - A Package object
*
@@ -635,7 +635,7 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_wrap_with_package
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* original_object - Pointer to the object to repair.
* obj_desc_ptr - The new package object is returned here
*
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 726bc8e687f7..90189251cdf0 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -149,8 +149,8 @@ static const struct acpi_repair_info acpi_ns_repairable_names[] = {
*
* FUNCTION: acpi_ns_complex_repairs
*
- * PARAMETERS: Data - Pointer to validation data structure
- * Node - Namespace node for the method/object
+ * PARAMETERS: data - Pointer to validation data structure
+ * node - Namespace node for the method/object
* validate_status - Original status of earlier validation
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
@@ -187,7 +187,7 @@ acpi_ns_complex_repairs(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_match_repairable_name
*
- * PARAMETERS: Node - Namespace node for the method/object
+ * PARAMETERS: node - Namespace node for the method/object
*
* RETURN: Pointer to entry in repair table. NULL indicates not found.
*
@@ -218,7 +218,7 @@ static const struct acpi_repair_info *acpi_ns_match_repairable_name(struct
*
* FUNCTION: acpi_ns_repair_ALR
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
*
@@ -247,7 +247,7 @@ acpi_ns_repair_ALR(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_repair_FDE
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
*
@@ -335,7 +335,7 @@ acpi_ns_repair_FDE(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_repair_CID
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
*
@@ -405,7 +405,7 @@ acpi_ns_repair_CID(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_repair_HID
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
*
@@ -487,7 +487,7 @@ acpi_ns_repair_HID(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_repair_TSS
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
*
@@ -531,7 +531,7 @@ acpi_ns_repair_TSS(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_repair_PSS
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* return_object_ptr - Pointer to the object returned from the
* evaluation of a method or object
*
@@ -600,7 +600,7 @@ acpi_ns_repair_PSS(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_check_sorted_list
*
- * PARAMETERS: Data - Pointer to validation data structure
+ * PARAMETERS: data - Pointer to validation data structure
* return_object - Pointer to the top-level returned object
* expected_count - Minimum length of each sub-package
* sort_index - Sub-package entry to sort on
@@ -707,9 +707,9 @@ acpi_ns_check_sorted_list(struct acpi_predefined_data *data,
*
* FUNCTION: acpi_ns_sort_list
*
- * PARAMETERS: Elements - Package object element list
- * Count - Element count for above
- * Index - Sort by which package element
+ * PARAMETERS: elements - Package object element list
+ * count - Element count for above
+ * index - Sort by which package element
* sort_direction - Ascending or Descending sort
*
* RETURN: None
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 507043d66114..456cc859f869 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -65,7 +65,7 @@ acpi_ns_search_parent_tree(u32 target_name,
*
* PARAMETERS: target_name - Ascii ACPI name to search for
* parent_node - Starting node where search will begin
- * Type - Object type to match
+ * type - Object type to match
* return_node - Where the matched Named obj is returned
*
* RETURN: Status
@@ -175,8 +175,8 @@ acpi_ns_search_one_scope(u32 target_name,
* FUNCTION: acpi_ns_search_parent_tree
*
* PARAMETERS: target_name - Ascii ACPI name to search for
- * Node - Starting node where search will begin
- * Type - Object type to match
+ * node - Starting node where search will begin
+ * type - Object type to match
* return_node - Where the matched Node is returned
*
* RETURN: Status
@@ -264,11 +264,11 @@ acpi_ns_search_parent_tree(u32 target_name,
*
* PARAMETERS: target_name - Ascii ACPI name to search for (4 chars)
* walk_state - Current state of the walk
- * Node - Starting node where search will begin
+ * node - Starting node where search will begin
* interpreter_mode - Add names only in ACPI_MODE_LOAD_PASS_x.
* Otherwise,search only.
- * Type - Object type to match
- * Flags - Flags describing the search restrictions
+ * type - Object type to match
+ * flags - Flags describing the search restrictions
* return_node - Where the Node is returned
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 75113759f69d..ef753a41e087 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -62,8 +62,8 @@ acpi_name acpi_ns_find_parent_name(struct acpi_namespace_node *node_to_search);
*
* FUNCTION: acpi_ns_print_node_pathname
*
- * PARAMETERS: Node - Object
- * Message - Prefix message
+ * PARAMETERS: node - Object
+ * message - Prefix message
*
* DESCRIPTION: Print an object's full namespace pathname
* Manages allocation/freeing of a pathname buffer
@@ -101,7 +101,7 @@ acpi_ns_print_node_pathname(struct acpi_namespace_node *node,
*
* FUNCTION: acpi_ns_valid_root_prefix
*
- * PARAMETERS: Prefix - Character to be checked
+ * PARAMETERS: prefix - Character to be checked
*
* RETURN: TRUE if a valid prefix
*
@@ -119,7 +119,7 @@ u8 acpi_ns_valid_root_prefix(char prefix)
*
* FUNCTION: acpi_ns_valid_path_separator
*
- * PARAMETERS: Sep - Character to be checked
+ * PARAMETERS: sep - Character to be checked
*
* RETURN: TRUE if a valid path separator
*
@@ -137,7 +137,7 @@ static u8 acpi_ns_valid_path_separator(char sep)
*
* FUNCTION: acpi_ns_get_type
*
- * PARAMETERS: Node - Parent Node to be examined
+ * PARAMETERS: node - Parent Node to be examined
*
* RETURN: Type field from Node whose handle is passed
*
@@ -161,7 +161,7 @@ acpi_object_type acpi_ns_get_type(struct acpi_namespace_node * node)
*
* FUNCTION: acpi_ns_local
*
- * PARAMETERS: Type - A namespace object type
+ * PARAMETERS: type - A namespace object type
*
* RETURN: LOCAL if names must be found locally in objects of the
* passed type, 0 if enclosing scopes should be searched
@@ -189,7 +189,7 @@ u32 acpi_ns_local(acpi_object_type type)
*
* FUNCTION: acpi_ns_get_internal_name_length
*
- * PARAMETERS: Info - Info struct initialized with the
+ * PARAMETERS: info - Info struct initialized with the
* external name pointer.
*
* RETURN: None
@@ -260,7 +260,7 @@ void acpi_ns_get_internal_name_length(struct acpi_namestring_info *info)
*
* FUNCTION: acpi_ns_build_internal_name
*
- * PARAMETERS: Info - Info struct fully initialized
+ * PARAMETERS: info - Info struct fully initialized
*
* RETURN: Status
*
@@ -371,7 +371,7 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
* FUNCTION: acpi_ns_internalize_name
*
* PARAMETERS: *external_name - External representation of name
- * **Converted Name - Where to return the resulting
+ * **Converted name - Where to return the resulting
* internal represention of the name
*
* RETURN: Status
@@ -575,7 +575,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
*
* FUNCTION: acpi_ns_validate_handle
*
- * PARAMETERS: Handle - Handle to be validated and typecast to a
+ * PARAMETERS: handle - Handle to be validated and typecast to a
* namespace node.
*
* RETURN: A pointer to a namespace node
@@ -651,7 +651,7 @@ void acpi_ns_terminate(void)
*
* FUNCTION: acpi_ns_opens_scope
*
- * PARAMETERS: Type - A valid namespace type
+ * PARAMETERS: type - A valid namespace type
*
* RETURN: NEWSCOPE if the passed type "opens a name scope" according
* to the ACPI specification, else 0
@@ -677,14 +677,14 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
*
* FUNCTION: acpi_ns_get_node
*
- * PARAMETERS: *Pathname - Name to be found, in external (ASL) format. The
+ * PARAMETERS: *pathname - Name to be found, in external (ASL) format. The
* \ (backslash) and ^ (carat) prefixes, and the
* . (period) to separate segments are supported.
* prefix_node - Root of subtree to be searched, or NS_ALL for the
* root of the name space. If Name is fully
* qualified (first s8 is '\'), the passed value
* of Scope will not be accessed.
- * Flags - Used to indicate whether to perform upsearch or
+ * flags - Used to indicate whether to perform upsearch or
* not.
* return_node - Where the Node is returned
*
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index f69895a54895..730bccc5e7f7 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -88,7 +88,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
*
* FUNCTION: acpi_ns_get_next_node_typed
*
- * PARAMETERS: Type - Type of node to be searched for
+ * PARAMETERS: type - Type of node to be searched for
* parent_node - Parent node whose children we are
* getting
* child_node - Previous child that was found.
@@ -151,16 +151,16 @@ struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
*
* FUNCTION: acpi_ns_walk_namespace
*
- * PARAMETERS: Type - acpi_object_type to search for
+ * PARAMETERS: type - acpi_object_type to search for
* start_node - Handle in namespace where search begins
* max_depth - Depth to which search is to reach
- * Flags - Whether to unlock the NS before invoking
+ * flags - Whether to unlock the NS before invoking
* the callback routine
* pre_order_visit - Called during tree pre-order visit
* when an object of "Type" is found
* post_order_visit - Called during tree post-order visit
* when an object of "Type" is found
- * Context - Passed to user function(s) above
+ * context - Passed to user function(s) above
* return_value - from the user_function if terminated
* early. Otherwise, returns NULL.
* RETURNS: Status
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 71d15f61807b..9692e6702333 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -58,8 +58,8 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info);
*
* FUNCTION: acpi_evaluate_object_typed
*
- * PARAMETERS: Handle - Object handle (optional)
- * Pathname - Object pathname (optional)
+ * PARAMETERS: handle - Object handle (optional)
+ * pathname - Object pathname (optional)
* external_params - List of parameters to pass to method,
* terminated by NULL. May be NULL
* if no parameters are being passed.
@@ -152,8 +152,8 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
*
* FUNCTION: acpi_evaluate_object
*
- * PARAMETERS: Handle - Object handle (optional)
- * Pathname - Object pathname (optional)
+ * PARAMETERS: handle - Object handle (optional)
+ * pathname - Object pathname (optional)
* external_params - List of parameters to pass to method,
* terminated by NULL. May be NULL
* if no parameters are being passed.
@@ -364,7 +364,7 @@ ACPI_EXPORT_SYMBOL(acpi_evaluate_object)
*
* FUNCTION: acpi_ns_resolve_references
*
- * PARAMETERS: Info - Evaluation info block
+ * PARAMETERS: info - Evaluation info block
*
* RETURN: Info->return_object is replaced with the dereferenced object
*
@@ -431,14 +431,14 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info)
*
* FUNCTION: acpi_walk_namespace
*
- * PARAMETERS: Type - acpi_object_type to search for
+ * PARAMETERS: type - acpi_object_type to search for
* start_object - Handle in namespace where search begins
* max_depth - Depth to which search is to reach
* pre_order_visit - Called during tree pre-order visit
* when an object of "Type" is found
* post_order_visit - Called during tree post-order visit
* when an object of "Type" is found
- * Context - Passed to user function(s) above
+ * context - Passed to user function(s) above
* return_value - Location where return value of
* user_function is put if terminated early
*
@@ -646,7 +646,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
*
* PARAMETERS: HID - HID to search for. Can be NULL.
* user_function - Called when a matching object is found
- * Context - Passed to user function
+ * context - Passed to user function
* return_value - Location where return value of
* user_function is put if terminated early
*
@@ -716,8 +716,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_devices)
* FUNCTION: acpi_attach_data
*
* PARAMETERS: obj_handle - Namespace node
- * Handler - Handler for this attachment
- * Data - Pointer to data to be attached
+ * handler - Handler for this attachment
+ * data - Pointer to data to be attached
*
* RETURN: Status
*
@@ -764,7 +764,7 @@ ACPI_EXPORT_SYMBOL(acpi_attach_data)
* FUNCTION: acpi_detach_data
*
* PARAMETERS: obj_handle - Namespace node handle
- * Handler - Handler used in call to acpi_attach_data
+ * handler - Handler used in call to acpi_attach_data
*
* RETURN: Status
*
@@ -810,8 +810,8 @@ ACPI_EXPORT_SYMBOL(acpi_detach_data)
* FUNCTION: acpi_get_data
*
* PARAMETERS: obj_handle - Namespace node
- * Handler - Handler used in call to attach_data
- * Data - Where the data is returned
+ * handler - Handler used in call to attach_data
+ * data - Where the data is returned
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index af401c9c4dfc..08e9610b34ca 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -61,8 +61,8 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
*
* FUNCTION: acpi_get_handle
*
- * PARAMETERS: Parent - Object to search under (search scope).
- * Pathname - Pointer to an asciiz string containing the
+ * PARAMETERS: parent - Object to search under (search scope).
+ * pathname - Pointer to an asciiz string containing the
* name
* ret_handle - Where the return handle is returned
*
@@ -142,9 +142,9 @@ ACPI_EXPORT_SYMBOL(acpi_get_handle)
*
* FUNCTION: acpi_get_name
*
- * PARAMETERS: Handle - Handle to be converted to a pathname
+ * PARAMETERS: handle - Handle to be converted to a pathname
* name_type - Full pathname or single segment
- * Buffer - Buffer for returned path
+ * buffer - Buffer for returned path
*
* RETURN: Pointer to a string containing the fully qualified Name.
*
@@ -219,8 +219,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_name)
*
* FUNCTION: acpi_ns_copy_device_id
*
- * PARAMETERS: Dest - Pointer to the destination DEVICE_ID
- * Source - Pointer to the source DEVICE_ID
+ * PARAMETERS: dest - Pointer to the destination DEVICE_ID
+ * source - Pointer to the source DEVICE_ID
* string_area - Pointer to where to copy the dest string
*
* RETURN: Pointer to the next string area
@@ -247,7 +247,7 @@ static char *acpi_ns_copy_device_id(struct acpica_device_id *dest,
*
* FUNCTION: acpi_get_object_info
*
- * PARAMETERS: Handle - Object Handle
+ * PARAMETERS: handle - Object Handle
* return_buffer - Where the info is returned
*
* RETURN: Status
@@ -493,7 +493,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_object_info)
*
* FUNCTION: acpi_install_method
*
- * PARAMETERS: Buffer - An ACPI table containing one control method
+ * PARAMETERS: buffer - An ACPI table containing one control method
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 880a605cee20..6766fc4f088f 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -98,7 +98,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_id)
*
* FUNCTION: acpi_get_type
*
- * PARAMETERS: Handle - Handle of object whose type is desired
+ * PARAMETERS: handle - Handle of object whose type is desired
* ret_type - Where the type will be placed
*
* RETURN: Status
@@ -151,7 +151,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_type)
*
* FUNCTION: acpi_get_parent
*
- * PARAMETERS: Handle - Handle of object whose parent is desired
+ * PARAMETERS: handle - Handle of object whose parent is desired
* ret_handle - Where the parent handle will be placed
*
* RETURN: Status
@@ -212,8 +212,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_parent)
*
* FUNCTION: acpi_get_next_object
*
- * PARAMETERS: Type - Type of object to be searched for
- * Parent - Parent object whose children we are getting
+ * PARAMETERS: type - Type of object to be searched for
+ * parent - Parent object whose children we are getting
* last_child - Previous child that was found.
* The NEXT child will be returned
* ret_handle - Where handle to the next object is placed
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 5ac36aba507c..844464c4f901 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -210,7 +210,7 @@ char *acpi_ps_get_next_namestring(struct acpi_parse_state *parser_state)
* FUNCTION: acpi_ps_get_next_namepath
*
* PARAMETERS: parser_state - Current parser state object
- * Arg - Where the namepath will be stored
+ * arg - Where the namepath will be stored
* arg_count - If the namepath points to a control method
* the method's argument is returned here.
* possible_method_call - Whether the namepath can possibly be the
@@ -379,7 +379,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
*
* PARAMETERS: parser_state - Current parser state object
* arg_type - The argument type (AML_*_ARG)
- * Arg - Where the argument is returned
+ * arg - Where the argument is returned
*
* RETURN: None
*
@@ -618,6 +618,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
if (!arg) {
+ acpi_ps_free_op(field);
return_PTR(NULL);
}
@@ -662,6 +663,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
} else {
arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
if (!arg) {
+ acpi_ps_free_op(field);
return_PTR(NULL);
}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 9547ad8a620b..799162c1b6df 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -167,7 +167,7 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
* PARAMETERS: walk_state - Current state
* aml_op_start - Begin of named Op in AML
* unnamed_op - Early Op (not a named Op)
- * Op - Returned Op
+ * op - Returned Op
*
* RETURN: Status
*
@@ -323,7 +323,7 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
if (walk_state->op_info->flags & AML_CREATE) {
/*
- * Backup to beginning of create_xXXfield declaration
+ * Backup to beginning of create_XXXfield declaration
* body_length is unknown until we parse the body
*/
op->named.data = aml_op_start;
@@ -380,7 +380,7 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
*
* PARAMETERS: walk_state - Current state
* aml_op_start - Op start in AML
- * Op - Current Op
+ * op - Current Op
*
* RETURN: Status
*
@@ -679,8 +679,8 @@ acpi_ps_link_module_code(union acpi_parse_object *parent_op,
* FUNCTION: acpi_ps_complete_op
*
* PARAMETERS: walk_state - Current state
- * Op - Returned Op
- * Status - Parse status before complete Op
+ * op - Returned Op
+ * status - Parse status before complete Op
*
* RETURN: Status
*
@@ -853,8 +853,8 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
* FUNCTION: acpi_ps_complete_final_op
*
* PARAMETERS: walk_state - Current state
- * Op - Current Op
- * Status - Current parse status before complete last
+ * op - Current Op
+ * status - Current parse status before complete last
* Op
*
* RETURN: Status
@@ -1165,7 +1165,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
if (walk_state->op_info->flags & AML_CREATE) {
/*
- * Backup to beginning of create_xXXfield declaration (1 for
+ * Backup to beginning of create_XXXfield declaration (1 for
* Opcode)
*
* body_length is unknown until we parse the body
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index a0226fdcf75c..ed1d457bd5ca 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -724,7 +724,7 @@ static const u8 acpi_gbl_long_op_index[NUM_EXTENDED_OPCODE] = {
*
* FUNCTION: acpi_ps_get_opcode_info
*
- * PARAMETERS: Opcode - The AML opcode
+ * PARAMETERS: opcode - The AML opcode
*
* RETURN: A pointer to the info about the opcode.
*
@@ -769,7 +769,7 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
*
* FUNCTION: acpi_ps_get_opcode_name
*
- * PARAMETERS: Opcode - The AML opcode
+ * PARAMETERS: opcode - The AML opcode
*
* RETURN: A pointer to the name of the opcode (ASCII String)
* Note: Never returns NULL.
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 2ff9c35a1968..01985703bb98 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -64,7 +64,7 @@ ACPI_MODULE_NAME("psparse")
*
* FUNCTION: acpi_ps_get_opcode_size
*
- * PARAMETERS: Opcode - An AML opcode
+ * PARAMETERS: opcode - An AML opcode
*
* RETURN: Size of the opcode, in bytes (1 or 2)
*
@@ -121,7 +121,7 @@ u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
* FUNCTION: acpi_ps_complete_this_op
*
* PARAMETERS: walk_state - Current State
- * Op - Op to complete
+ * op - Op to complete
*
* RETURN: Status
*
@@ -311,7 +311,7 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
* FUNCTION: acpi_ps_next_parse_state
*
* PARAMETERS: walk_state - Current state
- * Op - Current parse op
+ * op - Current parse op
* callback_status - Status from previous operation
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index c872aa4b926e..608dc20dc173 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -93,7 +93,7 @@ u8 acpi_ps_has_completed_scope(struct acpi_parse_state * parser_state)
* FUNCTION: acpi_ps_init_scope
*
* PARAMETERS: parser_state - Current parser state object
- * Root - the Root Node of this new scope
+ * root - the Root Node of this new scope
*
* RETURN: Status
*
@@ -131,7 +131,7 @@ acpi_ps_init_scope(struct acpi_parse_state * parser_state,
* FUNCTION: acpi_ps_push_scope
*
* PARAMETERS: parser_state - Current parser state object
- * Op - Current op to be pushed
+ * op - Current op to be pushed
* remaining_args - List of args remaining
* arg_count - Fixed or variable number of args
*
@@ -184,7 +184,7 @@ acpi_ps_push_scope(struct acpi_parse_state *parser_state,
* FUNCTION: acpi_ps_pop_scope
*
* PARAMETERS: parser_state - Current parser state object
- * Op - Where the popped op is returned
+ * op - Where the popped op is returned
* arg_list - Where the popped "next argument" is
* returned
* arg_count - Count of objects in arg_list
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index 2b03cdbbe1c0..fdb2e71f3046 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -58,8 +58,8 @@ union acpi_parse_object *acpi_ps_get_child(union acpi_parse_object *op);
*
* FUNCTION: acpi_ps_get_arg
*
- * PARAMETERS: Op - Get an argument for this op
- * Argn - Nth argument to get
+ * PARAMETERS: op - Get an argument for this op
+ * argn - Nth argument to get
*
* RETURN: The argument (as an Op object). NULL if argument does not exist
*
@@ -114,8 +114,8 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
*
* FUNCTION: acpi_ps_append_arg
*
- * PARAMETERS: Op - Append an argument to this Op.
- * Arg - Argument Op to append
+ * PARAMETERS: op - Append an argument to this Op.
+ * arg - Argument Op to append
*
* RETURN: None.
*
@@ -188,8 +188,8 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
*
* FUNCTION: acpi_ps_get_depth_next
*
- * PARAMETERS: Origin - Root of subtree to search
- * Op - Last (previous) Op that was found
+ * PARAMETERS: origin - Root of subtree to search
+ * op - Last (previous) Op that was found
*
* RETURN: Next Op found in the search.
*
@@ -261,7 +261,7 @@ union acpi_parse_object *acpi_ps_get_depth_next(union acpi_parse_object *origin,
*
* FUNCTION: acpi_ps_get_child
*
- * PARAMETERS: Op - Get the child of this Op
+ * PARAMETERS: op - Get the child of this Op
*
* RETURN: Child Op, Null if none is found.
*
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 13bb131ae125..8736ad5f04d3 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -77,8 +77,8 @@ union acpi_parse_object *acpi_ps_create_scope_op(void)
*
* FUNCTION: acpi_ps_init_op
*
- * PARAMETERS: Op - A newly allocated Op object
- * Opcode - Opcode to store in the Op
+ * PARAMETERS: op - A newly allocated Op object
+ * opcode - Opcode to store in the Op
*
* RETURN: None
*
@@ -103,7 +103,7 @@ void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode)
*
* FUNCTION: acpi_ps_alloc_op
*
- * PARAMETERS: Opcode - Opcode that will be stored in the new Op
+ * PARAMETERS: opcode - Opcode that will be stored in the new Op
*
* RETURN: Pointer to the new Op, null on failure
*
@@ -160,7 +160,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
*
* FUNCTION: acpi_ps_free_op
*
- * PARAMETERS: Op - Op to be freed
+ * PARAMETERS: op - Op to be freed
*
* RETURN: None.
*
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 9d98c5ff66a5..963e16225797 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -66,7 +66,7 @@ acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
* PARAMETERS: method_name - Valid ACPI name string
* debug_level - Optional level mask. 0 to use default
* debug_layer - Optional layer mask. 0 to use default
- * Flags - bit 1: one shot(1) or persistent(0)
+ * flags - bit 1: one shot(1) or persistent(0)
*
* RETURN: Status
*
@@ -105,7 +105,7 @@ acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
*
* FUNCTION: acpi_ps_start_trace
*
- * PARAMETERS: Info - Method info struct
+ * PARAMETERS: info - Method info struct
*
* RETURN: None
*
@@ -150,7 +150,7 @@ static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
*
* FUNCTION: acpi_ps_stop_trace
*
- * PARAMETERS: Info - Method info struct
+ * PARAMETERS: info - Method info struct
*
* RETURN: None
*
@@ -193,10 +193,10 @@ static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
*
* FUNCTION: acpi_ps_execute_method
*
- * PARAMETERS: Info - Method info block, contains:
- * Node - Method Node to execute
+ * PARAMETERS: info - Method info block, contains:
+ * node - Method Node to execute
* obj_desc - Method object
- * Parameters - List of parameters to pass to the method,
+ * parameters - List of parameters to pass to the method,
* terminated by NULL. Params itself may be
* NULL if no parameters are being passed.
* return_object - Where to put method's return value (if
@@ -361,9 +361,9 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
*
* FUNCTION: acpi_ps_update_parameter_list
*
- * PARAMETERS: Info - See struct acpi_evaluate_info
+ * PARAMETERS: info - See struct acpi_evaluate_info
* (Used: parameter_type and Parameters)
- * Action - Add or Remove reference
+ * action - Add or Remove reference
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index a0305652394f..856ff075b6ab 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -182,8 +182,8 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_address64[5] = {
/* Revision ID */
- {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.ext_address64.revision_iD),
- AML_OFFSET(ext_address64.revision_iD),
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.ext_address64.revision_ID),
+ AML_OFFSET(ext_address64.revision_ID),
1},
/*
* These fields are contiguous in both the source and destination:
@@ -215,7 +215,7 @@ static struct acpi_rsconvert_info acpi_rs_convert_general_flags[6] = {
AML_OFFSET(address.resource_type),
1},
- /* General Flags - Consume, Decode, min_fixed, max_fixed */
+ /* General flags - Consume, Decode, min_fixed, max_fixed */
{ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.address.producer_consumer),
AML_OFFSET(address.flags),
@@ -293,8 +293,8 @@ static struct acpi_rsconvert_info acpi_rs_convert_io_flags[4] = {
*
* FUNCTION: acpi_rs_get_address_common
*
- * PARAMETERS: Resource - Pointer to the internal resource struct
- * Aml - Pointer to the AML resource descriptor
+ * PARAMETERS: resource - Pointer to the internal resource struct
+ * aml - Pointer to the AML resource descriptor
*
* RETURN: TRUE if the resource_type field is OK, FALSE otherwise
*
@@ -343,8 +343,8 @@ acpi_rs_get_address_common(struct acpi_resource *resource,
*
* FUNCTION: acpi_rs_set_address_common
*
- * PARAMETERS: Aml - Pointer to the AML resource descriptor
- * Resource - Pointer to the internal resource struct
+ * PARAMETERS: aml - Pointer to the AML resource descriptor
+ * resource - Pointer to the internal resource struct
*
* RETURN: None
*
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 3c6df4b7eb2d..de12469d1c9c 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -173,7 +173,7 @@ acpi_rs_stream_option_length(u32 resource_length,
*
* FUNCTION: acpi_rs_get_aml_length
*
- * PARAMETERS: Resource - Pointer to the resource linked list
+ * PARAMETERS: resource - Pointer to the resource linked list
* size_needed - Where the required size is returned
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 46d6eb38ae66..311cbc4f05fa 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -190,8 +190,8 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
*
* FUNCTION: acpi_rs_create_pci_routing_table
*
- * PARAMETERS: package_object - Pointer to a union acpi_operand_object
- * package
+ * PARAMETERS: package_object - Pointer to a package containing one
+ * of more ACPI_OPERAND_OBJECTs
* output_buffer - Pointer to the user's buffer
*
* RETURN: Status AE_OK if okay, else a valid acpi_status code.
@@ -199,7 +199,7 @@ acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
* AE_BUFFER_OVERFLOW and output_buffer->Length will point
* to the size buffer needed.
*
- * DESCRIPTION: Takes the union acpi_operand_object package and creates a
+ * DESCRIPTION: Takes the union acpi_operand_object package and creates a
* linked list of PCI interrupt descriptions
*
* NOTE: It is the caller's responsibility to ensure that the start of the
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index b4c581132393..4d11b072388c 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -703,7 +703,7 @@ acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source)
*
* FUNCTION: acpi_rs_dump_address_common
*
- * PARAMETERS: Resource - Pointer to an internal resource descriptor
+ * PARAMETERS: resource - Pointer to an internal resource descriptor
*
* RETURN: None
*
@@ -850,8 +850,8 @@ void acpi_rs_dump_irq_list(u8 * route_table)
*
* FUNCTION: acpi_rs_out*
*
- * PARAMETERS: Title - Name of the resource field
- * Value - Value of the resource field
+ * PARAMETERS: title - Name of the resource field
+ * value - Value of the resource field
*
* RETURN: None
*
@@ -898,8 +898,8 @@ static void acpi_rs_out_title(char *title)
*
* FUNCTION: acpi_rs_dump*List
*
- * PARAMETERS: Length - Number of elements in the list
- * Data - Start of the list
+ * PARAMETERS: length - Number of elements in the list
+ * data - Start of the list
*
* RETURN: None
*
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 9be129f5d6f4..46b5324b22d6 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -139,7 +139,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
*
* FUNCTION: acpi_rs_convert_resources_to_aml
*
- * PARAMETERS: Resource - Pointer to the resource linked list
+ * PARAMETERS: resource - Pointer to the resource linked list
* aml_size_needed - Calculated size of the byte stream
* needed from calling acpi_rs_get_aml_length()
* The size of the output_buffer is
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 8073b371cc7c..c6f291c2bc83 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -57,9 +57,9 @@ ACPI_MODULE_NAME("rsmisc")
*
* FUNCTION: acpi_rs_convert_aml_to_resource
*
- * PARAMETERS: Resource - Pointer to the resource descriptor
- * Aml - Where the AML descriptor is returned
- * Info - Pointer to appropriate conversion table
+ * PARAMETERS: resource - Pointer to the resource descriptor
+ * aml - Where the AML descriptor is returned
+ * info - Pointer to appropriate conversion table
*
* RETURN: Status
*
@@ -406,7 +406,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
case ACPI_RSC_EXIT_NE:
/*
- * Control - Exit conversion if not equal
+ * control - Exit conversion if not equal
*/
switch (info->resource_offset) {
case ACPI_RSC_COMPARE_AML_LENGTH:
@@ -454,9 +454,9 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
*
* FUNCTION: acpi_rs_convert_resource_to_aml
*
- * PARAMETERS: Resource - Pointer to the resource descriptor
- * Aml - Where the AML descriptor is returned
- * Info - Pointer to appropriate conversion table
+ * PARAMETERS: resource - Pointer to the resource descriptor
+ * aml - Where the AML descriptor is returned
+ * info - Pointer to appropriate conversion table
*
* RETURN: Status
*
@@ -726,7 +726,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
case ACPI_RSC_EXIT_LE:
/*
- * Control - Exit conversion if less than or equal
+ * control - Exit conversion if less than or equal
*/
if (item_count <= info->value) {
goto exit;
@@ -735,7 +735,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
case ACPI_RSC_EXIT_NE:
/*
- * Control - Exit conversion if not equal
+ * control - Exit conversion if not equal
*/
switch (COMPARE_OPCODE(info)) {
case ACPI_RSC_COMPARE_VALUE:
@@ -757,7 +757,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
case ACPI_RSC_EXIT_EQ:
/*
- * Control - Exit conversion if equal
+ * control - Exit conversion if equal
*/
if (*ACPI_ADD_PTR(u8, resource,
COMPARE_TARGET(info)) ==
@@ -783,7 +783,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
#if 0
/* Previous resource validations */
-if (aml->ext_address64.revision_iD != AML_RESOURCE_EXTENDED_ADDRESS_REVISION) {
+if (aml->ext_address64.revision_ID != AML_RESOURCE_EXTENDED_ADDRESS_REVISION) {
return_ACPI_STATUS(AE_SUPPORT);
}
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 433a375deb93..37d5241c0acf 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -53,8 +53,8 @@ ACPI_MODULE_NAME("rsutils")
*
* FUNCTION: acpi_rs_decode_bitmask
*
- * PARAMETERS: Mask - Bitmask to decode
- * List - Where the converted list is returned
+ * PARAMETERS: mask - Bitmask to decode
+ * list - Where the converted list is returned
*
* RETURN: Count of bits set (length of list)
*
@@ -86,8 +86,8 @@ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list)
*
* FUNCTION: acpi_rs_encode_bitmask
*
- * PARAMETERS: List - List of values to encode
- * Count - Length of list
+ * PARAMETERS: list - List of values to encode
+ * count - Length of list
*
* RETURN: Encoded bitmask
*
@@ -115,8 +115,8 @@ u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
*
* FUNCTION: acpi_rs_move_data
*
- * PARAMETERS: Destination - Pointer to the destination descriptor
- * Source - Pointer to the source descriptor
+ * PARAMETERS: destination - Pointer to the destination descriptor
+ * source - Pointer to the source descriptor
* item_count - How many items to move
* move_type - Byte width
*
@@ -183,7 +183,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
*
* PARAMETERS: total_length - Length of the AML descriptor, including
* the header and length fields.
- * Aml - Pointer to the raw AML descriptor
+ * aml - Pointer to the raw AML descriptor
*
* RETURN: None
*
@@ -235,7 +235,7 @@ acpi_rs_set_resource_length(acpi_rsdesc_size total_length,
* PARAMETERS: descriptor_type - Byte to be inserted as the type
* total_length - Length of the AML descriptor, including
* the header and length fields.
- * Aml - Pointer to the raw AML descriptor
+ * aml - Pointer to the raw AML descriptor
*
* RETURN: None
*
@@ -265,8 +265,8 @@ acpi_rs_set_resource_header(u8 descriptor_type,
*
* FUNCTION: acpi_rs_strcpy
*
- * PARAMETERS: Destination - Pointer to the destination string
- * Source - Pointer to the source string
+ * PARAMETERS: destination - Pointer to the destination string
+ * source - Pointer to the source string
*
* RETURN: String length, including NULL terminator
*
@@ -300,7 +300,7 @@ static u16 acpi_rs_strcpy(char *destination, char *source)
* minimum_length - Minimum length of the descriptor (minus
* any optional fields)
* resource_source - Where the resource_source is returned
- * Aml - Pointer to the raw AML descriptor
+ * aml - Pointer to the raw AML descriptor
* string_ptr - (optional) where to store the actual
* resource_source string
*
@@ -386,7 +386,7 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
*
* FUNCTION: acpi_rs_set_resource_source
*
- * PARAMETERS: Aml - Pointer to the raw AML descriptor
+ * PARAMETERS: aml - Pointer to the raw AML descriptor
* minimum_length - Minimum length of the descriptor (minus
* any optional fields)
* resource_source - Internal resource_source
@@ -445,7 +445,7 @@ acpi_rs_set_resource_source(union aml_resource * aml,
*
* FUNCTION: acpi_rs_get_prt_method_data
*
- * PARAMETERS: Node - Device node
+ * PARAMETERS: node - Device node
* ret_buffer - Pointer to a buffer structure for the
* results
*
@@ -494,7 +494,7 @@ acpi_rs_get_prt_method_data(struct acpi_namespace_node * node,
*
* FUNCTION: acpi_rs_get_crs_method_data
*
- * PARAMETERS: Node - Device node
+ * PARAMETERS: node - Device node
* ret_buffer - Pointer to a buffer structure for the
* results
*
@@ -534,7 +534,7 @@ acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
*/
status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
- /* On exit, we must delete the object returned by evaluate_object */
+ /* On exit, we must delete the object returned by evaluateObject */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
@@ -544,7 +544,7 @@ acpi_rs_get_crs_method_data(struct acpi_namespace_node *node,
*
* FUNCTION: acpi_rs_get_prs_method_data
*
- * PARAMETERS: Node - Device node
+ * PARAMETERS: node - Device node
* ret_buffer - Pointer to a buffer structure for the
* results
*
@@ -585,7 +585,7 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
*/
status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
- /* On exit, we must delete the object returned by evaluate_object */
+ /* On exit, we must delete the object returned by evaluateObject */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
@@ -596,7 +596,7 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
*
* FUNCTION: acpi_rs_get_aei_method_data
*
- * PARAMETERS: Node - Device node
+ * PARAMETERS: node - Device node
* ret_buffer - Pointer to a buffer structure for the
* results
*
@@ -636,7 +636,7 @@ acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
*/
status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
- /* On exit, we must delete the object returned by evaluate_object */
+ /* On exit, we must delete the object returned by evaluateObject */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
@@ -646,8 +646,8 @@ acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
*
* FUNCTION: acpi_rs_get_method_data
*
- * PARAMETERS: Handle - Handle to the containing object
- * Path - Path to method, relative to Handle
+ * PARAMETERS: handle - Handle to the containing object
+ * path - Path to method, relative to Handle
* ret_buffer - Pointer to a buffer structure for the
* results
*
@@ -697,7 +697,7 @@ acpi_rs_get_method_data(acpi_handle handle,
*
* FUNCTION: acpi_rs_set_srs_method_data
*
- * PARAMETERS: Node - Device node
+ * PARAMETERS: node - Device node
* in_buffer - Pointer to a buffer structure of the
* parameter
*
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index f58c098c7aeb..5aad744b5b83 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -79,7 +79,7 @@ acpi_rs_validate_parameters(acpi_handle device_handle,
* FUNCTION: acpi_rs_validate_parameters
*
* PARAMETERS: device_handle - Handle to a device
- * Buffer - Pointer to a data buffer
+ * buffer - Pointer to a data buffer
* return_node - Pointer to where the device node is returned
*
* RETURN: Status
@@ -351,8 +351,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_resources)
*
* FUNCTION: acpi_resource_to_address64
*
- * PARAMETERS: Resource - Pointer to a resource
- * Out - Pointer to the users's return buffer
+ * PARAMETERS: resource - Pointer to a resource
+ * out - Pointer to the users's return buffer
* (a struct acpi_resource_address64)
*
* RETURN: Status
@@ -415,9 +415,9 @@ ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
* FUNCTION: acpi_get_vendor_resource
*
* PARAMETERS: device_handle - Handle for the parent device object
- * Name - Method name for the parent resource
+ * name - Method name for the parent resource
* (METHOD_NAME__CRS or METHOD_NAME__PRS)
- * Uuid - Pointer to the UUID to be matched.
+ * uuid - Pointer to the UUID to be matched.
* includes both subtype and 16-byte UUID
* ret_buffer - Where the vendor resource is returned
*
@@ -526,11 +526,11 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
*
* PARAMETERS: device_handle - Handle to the device object for the
* device we are querying
- * Name - Method name of the resources we want.
+ * name - Method name of the resources we want.
* (METHOD_NAME__CRS, METHOD_NAME__PRS, or
* METHOD_NAME__AEI)
* user_function - Called for each resource
- * Context - Passed to user_function
+ * context - Passed to user_function
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 4c9c760db4a4..390651860bf0 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -49,9 +49,10 @@
ACPI_MODULE_NAME("tbfadt")
/* Local prototypes */
-static ACPI_INLINE void
+static void
acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
- u8 space_id, u8 byte_width, u64 address);
+ u8 space_id,
+ u8 byte_width, u64 address, char *register_name);
static void acpi_tb_convert_fadt(void);
@@ -172,7 +173,7 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
*
* PARAMETERS: generic_address - GAS struct to be initialized
* byte_width - Width of this register
- * Address - Address of the register
+ * address - Address of the register
*
* RETURN: None
*
@@ -182,10 +183,25 @@ static struct acpi_fadt_pm_info fadt_pm_info_table[] = {
*
******************************************************************************/
-static ACPI_INLINE void
+static void
acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
- u8 space_id, u8 byte_width, u64 address)
+ u8 space_id,
+ u8 byte_width, u64 address, char *register_name)
{
+ u8 bit_width;
+
+ /* Bit width field in the GAS is only one byte long, 255 max */
+
+ bit_width = (u8)(byte_width * 8);
+
+ if (byte_width > 31) { /* (31*8)=248 */
+ ACPI_ERROR((AE_INFO,
+ "%s - 32-bit FADT register is too long (%u bytes, %u bits) "
+ "to convert to GAS struct - 255 bits max, truncating",
+ register_name, byte_width, (byte_width * 8)));
+
+ bit_width = 255;
+ }
/*
* The 64-bit Address field is non-aligned in the byte packed
@@ -196,7 +212,7 @@ acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
/* All other fields are byte-wide */
generic_address->space_id = space_id;
- generic_address->bit_width = (u8)ACPI_MUL_8(byte_width);
+ generic_address->bit_width = bit_width;
generic_address->bit_offset = 0;
generic_address->access_width = 0; /* Access width ANY */
}
@@ -267,8 +283,8 @@ void acpi_tb_parse_fadt(u32 table_index)
*
* FUNCTION: acpi_tb_create_local_fadt
*
- * PARAMETERS: Table - Pointer to BIOS FADT
- * Length - Length of the table
+ * PARAMETERS: table - Pointer to BIOS FADT
+ * length - Length of the table
*
* RETURN: None
*
@@ -287,11 +303,11 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
* a warning.
*/
if (length > sizeof(struct acpi_table_fadt)) {
- ACPI_WARNING((AE_INFO,
- "FADT (revision %u) is longer than ACPI 5.0 version, "
- "truncating length %u to %u",
- table->revision, length,
- (u32)sizeof(struct acpi_table_fadt)));
+ ACPI_BIOS_WARNING((AE_INFO,
+ "FADT (revision %u) is longer than ACPI 5.0 version, "
+ "truncating length %u to %u",
+ table->revision, length,
+ (u32)sizeof(struct acpi_table_fadt)));
}
/* Clear the entire local FADT */
@@ -436,11 +452,13 @@ static void acpi_tb_convert_fadt(void)
* they must match.
*/
if (address64->address && address32 &&
- (address64->address != (u64) address32)) {
- ACPI_ERROR((AE_INFO,
- "32/64X address mismatch in %s: 0x%8.8X/0x%8.8X%8.8X, using 32",
- fadt_info_table[i].name, address32,
- ACPI_FORMAT_UINT64(address64->address)));
+ (address64->address != (u64)address32)) {
+ ACPI_BIOS_ERROR((AE_INFO,
+ "32/64X address mismatch in FADT/%s: "
+ "0x%8.8X/0x%8.8X%8.8X, using 32",
+ fadt_info_table[i].name, address32,
+ ACPI_FORMAT_UINT64(address64->
+ address)));
}
/* Always use 32-bit address if it is valid (non-null) */
@@ -456,7 +474,8 @@ static void acpi_tb_convert_fadt(void)
&acpi_gbl_FADT,
fadt_info_table
[i].length),
- (u64) address32);
+ (u64) address32,
+ fadt_info_table[i].name);
}
}
}
@@ -465,7 +484,7 @@ static void acpi_tb_convert_fadt(void)
*
* FUNCTION: acpi_tb_validate_fadt
*
- * PARAMETERS: Table - Pointer to the FADT to be validated
+ * PARAMETERS: table - Pointer to the FADT to be validated
*
* RETURN: None
*
@@ -494,25 +513,25 @@ static void acpi_tb_validate_fadt(void)
* DSDT/X_DSDT) would indicate the presence of two FACS or two DSDT tables.
*/
if (acpi_gbl_FADT.facs &&
- (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
- ACPI_WARNING((AE_INFO,
- "32/64X FACS address mismatch in FADT - "
- "0x%8.8X/0x%8.8X%8.8X, using 32",
- acpi_gbl_FADT.facs,
- ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
-
- acpi_gbl_FADT.Xfacs = (u64) acpi_gbl_FADT.facs;
+ (acpi_gbl_FADT.Xfacs != (u64)acpi_gbl_FADT.facs)) {
+ ACPI_BIOS_WARNING((AE_INFO,
+ "32/64X FACS address mismatch in FADT - "
+ "0x%8.8X/0x%8.8X%8.8X, using 32",
+ acpi_gbl_FADT.facs,
+ ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
+
+ acpi_gbl_FADT.Xfacs = (u64)acpi_gbl_FADT.facs;
}
if (acpi_gbl_FADT.dsdt &&
- (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
- ACPI_WARNING((AE_INFO,
- "32/64X DSDT address mismatch in FADT - "
- "0x%8.8X/0x%8.8X%8.8X, using 32",
- acpi_gbl_FADT.dsdt,
- ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
-
- acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
+ (acpi_gbl_FADT.Xdsdt != (u64)acpi_gbl_FADT.dsdt)) {
+ ACPI_BIOS_WARNING((AE_INFO,
+ "32/64X DSDT address mismatch in FADT - "
+ "0x%8.8X/0x%8.8X%8.8X, using 32",
+ acpi_gbl_FADT.dsdt,
+ ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
+
+ acpi_gbl_FADT.Xdsdt = (u64)acpi_gbl_FADT.dsdt;
}
/* If Hardware Reduced flag is set, we are all done */
@@ -542,10 +561,10 @@ static void acpi_tb_validate_fadt(void)
*/
if (address64->address &&
(address64->bit_width != ACPI_MUL_8(length))) {
- ACPI_WARNING((AE_INFO,
- "32/64X length mismatch in %s: %u/%u",
- name, ACPI_MUL_8(length),
- address64->bit_width));
+ ACPI_BIOS_WARNING((AE_INFO,
+ "32/64X length mismatch in FADT/%s: %u/%u",
+ name, ACPI_MUL_8(length),
+ address64->bit_width));
}
if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
@@ -554,29 +573,29 @@ static void acpi_tb_validate_fadt(void)
* Both the address and length must be non-zero.
*/
if (!address64->address || !length) {
- ACPI_ERROR((AE_INFO,
- "Required field %s has zero address and/or length:"
- " 0x%8.8X%8.8X/0x%X",
- name,
- ACPI_FORMAT_UINT64(address64->
- address),
- length));
+ ACPI_BIOS_ERROR((AE_INFO,
+ "Required FADT field %s has zero address and/or length: "
+ "0x%8.8X%8.8X/0x%X",
+ name,
+ ACPI_FORMAT_UINT64(address64->
+ address),
+ length));
}
} else if (fadt_info_table[i].type & ACPI_FADT_SEPARATE_LENGTH) {
/*
- * Field is optional (PM2Control, GPE0, GPE1) AND has its own
+ * Field is optional (Pm2_control, GPE0, GPE1) AND has its own
* length field. If present, both the address and length must
* be valid.
*/
if ((address64->address && !length) ||
(!address64->address && length)) {
- ACPI_WARNING((AE_INFO,
- "Optional field %s has zero address or length: "
- "0x%8.8X%8.8X/0x%X",
- name,
- ACPI_FORMAT_UINT64(address64->
- address),
- length));
+ ACPI_BIOS_WARNING((AE_INFO,
+ "Optional FADT field %s has zero address or length: "
+ "0x%8.8X%8.8X/0x%X",
+ name,
+ ACPI_FORMAT_UINT64
+ (address64->address),
+ length));
}
}
}
@@ -621,12 +640,12 @@ static void acpi_tb_setup_fadt_registers(void)
(fadt_info_table[i].default_length > 0) &&
(fadt_info_table[i].default_length !=
target64->bit_width)) {
- ACPI_WARNING((AE_INFO,
- "Invalid length for %s: %u, using default %u",
- fadt_info_table[i].name,
- target64->bit_width,
- fadt_info_table[i].
- default_length));
+ ACPI_BIOS_WARNING((AE_INFO,
+ "Invalid length for FADT/%s: %u, using default %u",
+ fadt_info_table[i].name,
+ target64->bit_width,
+ fadt_info_table[i].
+ default_length));
/* Incorrect size, set width to the default */
@@ -670,7 +689,8 @@ static void acpi_tb_setup_fadt_registers(void)
source64->address +
(fadt_pm_info_table[i].
register_num *
- pm1_register_byte_width));
+ pm1_register_byte_width),
+ "PmRegisters");
}
}
}
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 4903e36ea75a..57deae166577 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -52,7 +52,7 @@ ACPI_MODULE_NAME("tbfind")
*
* FUNCTION: acpi_tb_find_table
*
- * PARAMETERS: Signature - String with ACPI table signature
+ * PARAMETERS: signature - String with ACPI table signature
* oem_id - String with the table OEM ID
* oem_table_id - String with the OEM Table ID
* table_index - Where the table index is returned
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index c03500b4cc7a..74f97d74db1c 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -138,13 +138,14 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
if ((table_desc->pointer->signature[0] != 0x00) &&
(!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT))
&& (ACPI_STRNCMP(table_desc->pointer->signature, "OEM", 3))) {
- ACPI_ERROR((AE_INFO,
- "Table has invalid signature [%4.4s] (0x%8.8X), must be SSDT or OEMx",
- acpi_ut_valid_acpi_name(*(u32 *)table_desc->
- pointer->
- signature) ? table_desc->
- pointer->signature : "????",
- *(u32 *)table_desc->pointer->signature));
+ ACPI_BIOS_ERROR((AE_INFO,
+ "Table has invalid signature [%4.4s] (0x%8.8X), "
+ "must be SSDT or OEMx",
+ acpi_ut_valid_acpi_name(*(u32 *)table_desc->
+ pointer->
+ signature) ?
+ table_desc->pointer->signature : "????",
+ *(u32 *)table_desc->pointer->signature));
return_ACPI_STATUS(AE_BAD_SIGNATURE);
}
@@ -396,10 +397,10 @@ acpi_status acpi_tb_resize_root_table_list(void)
*
* FUNCTION: acpi_tb_store_table
*
- * PARAMETERS: Address - Table address
- * Table - Table header
- * Length - Table length
- * Flags - flags
+ * PARAMETERS: address - Table address
+ * table - Table header
+ * length - Table length
+ * flags - flags
*
* RETURN: Status and table index.
*
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 0a706cac37de..b6cea30da638 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -178,8 +178,8 @@ u8 acpi_tb_tables_loaded(void)
*
* FUNCTION: acpi_tb_fix_string
*
- * PARAMETERS: String - String to be repaired
- * Length - Maximum length
+ * PARAMETERS: string - String to be repaired
+ * length - Maximum length
*
* RETURN: None
*
@@ -205,7 +205,7 @@ static void acpi_tb_fix_string(char *string, acpi_size length)
* FUNCTION: acpi_tb_cleanup_table_header
*
* PARAMETERS: out_header - Where the cleaned header is returned
- * Header - Input ACPI table header
+ * header - Input ACPI table header
*
* RETURN: Returns the cleaned header in out_header
*
@@ -231,8 +231,8 @@ acpi_tb_cleanup_table_header(struct acpi_table_header *out_header,
*
* FUNCTION: acpi_tb_print_table_header
*
- * PARAMETERS: Address - Table physical address
- * Header - Table header
+ * PARAMETERS: address - Table physical address
+ * header - Table header
*
* RETURN: None
*
@@ -296,8 +296,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
*
* FUNCTION: acpi_tb_validate_checksum
*
- * PARAMETERS: Table - ACPI table to verify
- * Length - Length of entire table
+ * PARAMETERS: table - ACPI table to verify
+ * length - Length of entire table
*
* RETURN: Status
*
@@ -317,10 +317,11 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
/* Checksum ok? (should be zero) */
if (checksum) {
- ACPI_WARNING((AE_INFO,
- "Incorrect checksum in table [%4.4s] - 0x%2.2X, should be 0x%2.2X",
- table->signature, table->checksum,
- (u8) (table->checksum - checksum)));
+ ACPI_BIOS_WARNING((AE_INFO,
+ "Incorrect checksum in table [%4.4s] - 0x%2.2X, "
+ "should be 0x%2.2X",
+ table->signature, table->checksum,
+ (u8)(table->checksum - checksum)));
#if (ACPI_CHECKSUM_ABORT)
@@ -335,8 +336,8 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
*
* FUNCTION: acpi_tb_checksum
*
- * PARAMETERS: Buffer - Pointer to memory region to be checked
- * Length - Length of this memory region
+ * PARAMETERS: buffer - Pointer to memory region to be checked
+ * length - Length of this memory region
*
* RETURN: Checksum (u8)
*
@@ -377,8 +378,9 @@ void acpi_tb_check_dsdt_header(void)
if (acpi_gbl_original_dsdt_header.length != acpi_gbl_DSDT->length ||
acpi_gbl_original_dsdt_header.checksum != acpi_gbl_DSDT->checksum) {
- ACPI_ERROR((AE_INFO,
- "The DSDT has been corrupted or replaced - old, new headers below"));
+ ACPI_BIOS_ERROR((AE_INFO,
+ "The DSDT has been corrupted or replaced - "
+ "old, new headers below"));
acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header);
acpi_tb_print_table_header(0, acpi_gbl_DSDT);
@@ -438,8 +440,8 @@ struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
*
* FUNCTION: acpi_tb_install_table
*
- * PARAMETERS: Address - Physical address of DSDT or FACS
- * Signature - Table signature, NULL if no need to
+ * PARAMETERS: address - Physical address of DSDT or FACS
+ * signature - Table signature, NULL if no need to
* match
* table_index - Index into root table array
*
@@ -480,9 +482,10 @@ acpi_tb_install_table(acpi_physical_address address,
/* If a particular signature is expected (DSDT/FACS), it must match */
if (signature && !ACPI_COMPARE_NAME(table->signature, signature)) {
- ACPI_ERROR((AE_INFO,
- "Invalid signature 0x%X for ACPI table, expected [%s]",
- *ACPI_CAST_PTR(u32, table->signature), signature));
+ ACPI_BIOS_ERROR((AE_INFO,
+ "Invalid signature 0x%X for ACPI table, expected [%s]",
+ *ACPI_CAST_PTR(u32, table->signature),
+ signature));
goto unmap_and_exit;
}
@@ -589,10 +592,10 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
/* Will truncate 64-bit address to 32 bits, issue warning */
- ACPI_WARNING((AE_INFO,
- "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X),"
- " truncating",
- ACPI_FORMAT_UINT64(address64)));
+ ACPI_BIOS_WARNING((AE_INFO,
+ "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X),"
+ " truncating",
+ ACPI_FORMAT_UINT64(address64)));
}
#endif
return ((acpi_physical_address) (address64));
@@ -603,7 +606,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
*
* FUNCTION: acpi_tb_parse_root_table
*
- * PARAMETERS: Rsdp - Pointer to the RSDP
+ * PARAMETERS: rsdp - Pointer to the RSDP
*
* RETURN: Status
*
@@ -694,8 +697,9 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
acpi_os_unmap_memory(table, sizeof(struct acpi_table_header));
if (length < sizeof(struct acpi_table_header)) {
- ACPI_ERROR((AE_INFO, "Invalid length 0x%X in RSDT/XSDT",
- length));
+ ACPI_BIOS_ERROR((AE_INFO,
+ "Invalid table length 0x%X in RSDT/XSDT",
+ length));
return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index abcc6412c244..29e51bc01383 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -1,7 +1,6 @@
/******************************************************************************
*
- * Module Name: tbxface - Public interfaces to the ACPI subsystem
- * ACPI table oriented interfaces
+ * Module Name: tbxface - ACPI table oriented external interfaces
*
*****************************************************************************/
@@ -51,11 +50,6 @@
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbxface")
-/* Local prototypes */
-static acpi_status acpi_tb_load_namespace(void);
-
-static int no_auto_ssdt;
-
/*******************************************************************************
*
* FUNCTION: acpi_allocate_root_table
@@ -65,11 +59,10 @@ static int no_auto_ssdt;
*
* RETURN: Status
*
- * DESCRIPTION: Allocate a root table array. Used by i_aSL compiler and
+ * DESCRIPTION: Allocate a root table array. Used by iASL compiler and
* acpi_initialize_tables.
*
******************************************************************************/
-
acpi_status acpi_allocate_root_table(u32 initial_table_count)
{
@@ -222,52 +215,10 @@ acpi_status acpi_reallocate_root_table(void)
/*******************************************************************************
*
- * FUNCTION: acpi_load_table
- *
- * PARAMETERS: table_ptr - pointer to a buffer containing the entire
- * table to be loaded
- *
- * RETURN: Status
- *
- * DESCRIPTION: This function is called to load a table from the caller's
- * buffer. The buffer must contain an entire ACPI Table including
- * a valid header. The header fields will be verified, and if it
- * is determined that the table is invalid, the call will fail.
- *
- ******************************************************************************/
-acpi_status acpi_load_table(struct acpi_table_header *table_ptr)
-{
- acpi_status status;
- u32 table_index;
- struct acpi_table_desc table_desc;
-
- if (!table_ptr)
- return AE_BAD_PARAMETER;
-
- ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
- table_desc.pointer = table_ptr;
- table_desc.length = table_ptr->length;
- table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
-
- /*
- * Install the new table into the local data structures
- */
- status = acpi_tb_add_table(&table_desc, &table_index);
- if (ACPI_FAILURE(status)) {
- return status;
- }
- status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
- return status;
-}
-
-ACPI_EXPORT_SYMBOL(acpi_load_table)
-
-/*******************************************************************************
- *
* FUNCTION: acpi_get_table_header
*
- * PARAMETERS: Signature - ACPI signature of needed table
- * Instance - Which instance (for SSDTs)
+ * PARAMETERS: signature - ACPI signature of needed table
+ * instance - Which instance (for SSDTs)
* out_table_header - The pointer to the table header to fill
*
* RETURN: Status and pointer to mapped table header
@@ -382,8 +333,8 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
*
* FUNCTION: acpi_get_table_with_size
*
- * PARAMETERS: Signature - ACPI signature of needed table
- * Instance - Which instance (for SSDTs)
+ * PARAMETERS: signature - ACPI signature of needed table
+ * instance - Which instance (for SSDTs)
* out_table - Where the pointer to the table is returned
*
* RETURN: Status and pointer to table
@@ -436,6 +387,7 @@ acpi_get_table_with_size(char *signature,
return (AE_NOT_FOUND);
}
+ACPI_EXPORT_SYMBOL(acpi_get_table_with_size)
acpi_status
acpi_get_table(char *signature,
@@ -453,7 +405,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
* FUNCTION: acpi_get_table_by_index
*
* PARAMETERS: table_index - Table index
- * Table - Where the pointer to the table is returned
+ * table - Where the pointer to the table is returned
*
* RETURN: Status and pointer to the table
*
@@ -502,157 +454,13 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
ACPI_EXPORT_SYMBOL(acpi_get_table_by_index)
-/*******************************************************************************
- *
- * FUNCTION: acpi_tb_load_namespace
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
- * the RSDT/XSDT.
- *
- ******************************************************************************/
-static acpi_status acpi_tb_load_namespace(void)
-{
- acpi_status status;
- u32 i;
- struct acpi_table_header *new_dsdt;
-
- ACPI_FUNCTION_TRACE(tb_load_namespace);
-
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
- /*
- * Load the namespace. The DSDT is required, but any SSDT and
- * PSDT tables are optional. Verify the DSDT.
- */
- if (!acpi_gbl_root_table_list.current_table_count ||
- !ACPI_COMPARE_NAME(&
- (acpi_gbl_root_table_list.
- tables[ACPI_TABLE_INDEX_DSDT].signature),
- ACPI_SIG_DSDT)
- ||
- ACPI_FAILURE(acpi_tb_verify_table
- (&acpi_gbl_root_table_list.
- tables[ACPI_TABLE_INDEX_DSDT]))) {
- status = AE_NO_ACPI_TABLES;
- goto unlock_and_exit;
- }
-
- /*
- * Save the DSDT pointer for simple access. This is the mapped memory
- * address. We must take care here because the address of the .Tables
- * array can change dynamically as tables are loaded at run-time. Note:
- * .Pointer field is not validated until after call to acpi_tb_verify_table.
- */
- acpi_gbl_DSDT =
- acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
-
- /*
- * Optionally copy the entire DSDT to local memory (instead of simply
- * mapping it.) There are some BIOSs that corrupt or replace the original
- * DSDT, creating the need for this option. Default is FALSE, do not copy
- * the DSDT.
- */
- if (acpi_gbl_copy_dsdt_locally) {
- new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
- if (new_dsdt) {
- acpi_gbl_DSDT = new_dsdt;
- }
- }
-
- /*
- * Save the original DSDT header for detection of table corruption
- * and/or replacement of the DSDT from outside the OS.
- */
- ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
- sizeof(struct acpi_table_header));
-
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-
- /* Load and parse tables */
-
- status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
-
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
- if ((!ACPI_COMPARE_NAME
- (&(acpi_gbl_root_table_list.tables[i].signature),
- ACPI_SIG_SSDT)
- &&
- !ACPI_COMPARE_NAME(&
- (acpi_gbl_root_table_list.tables[i].
- signature), ACPI_SIG_PSDT))
- ||
- ACPI_FAILURE(acpi_tb_verify_table
- (&acpi_gbl_root_table_list.tables[i]))) {
- continue;
- }
-
- if (no_auto_ssdt) {
- printk(KERN_WARNING "ACPI: SSDT ignored due to \"acpi_no_auto_ssdt\"\n");
- continue;
- }
-
- /* Ignore errors while loading tables, get as many as possible */
-
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- (void)acpi_ns_load_table(i, acpi_gbl_root_node);
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- }
-
- ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
-
- unlock_and_exit:
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_load_tables
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
- *
- ******************************************************************************/
-
-acpi_status acpi_load_tables(void)
-{
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(acpi_load_tables);
-
- /* Load the namespace from the tables */
-
- status = acpi_tb_load_namespace();
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "While loading namespace from ACPI tables"));
- }
-
- return_ACPI_STATUS(status);
-}
-
-ACPI_EXPORT_SYMBOL(acpi_load_tables)
-
/*******************************************************************************
*
* FUNCTION: acpi_install_table_handler
*
- * PARAMETERS: Handler - Table event handler
- * Context - Value passed to the handler on each event
+ * PARAMETERS: handler - Table event handler
+ * context - Value passed to the handler on each event
*
* RETURN: Status
*
@@ -698,7 +506,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
*
* FUNCTION: acpi_remove_table_handler
*
- * PARAMETERS: Handler - Table event handler that was installed
+ * PARAMETERS: handler - Table event handler that was installed
* previously.
*
* RETURN: Status
@@ -734,15 +542,3 @@ acpi_status acpi_remove_table_handler(acpi_tbl_handler handler)
}
ACPI_EXPORT_SYMBOL(acpi_remove_table_handler)
-
-
-static int __init acpi_no_auto_ssdt_setup(char *s) {
-
- printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n");
-
- no_auto_ssdt = 1;
-
- return 1;
-}
-
-__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
new file mode 100644
index 000000000000..f87cc63e69a1
--- /dev/null
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -0,0 +1,389 @@
+/******************************************************************************
+ *
+ * Module Name: tbxfload - Table load/unload external interfaces
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <linux/export.h>
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "actables.h"
+
+#define _COMPONENT ACPI_TABLES
+ACPI_MODULE_NAME("tbxfload")
+
+/* Local prototypes */
+static acpi_status acpi_tb_load_namespace(void);
+
+static int no_auto_ssdt;
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_load_tables
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
+ *
+ ******************************************************************************/
+
+acpi_status acpi_load_tables(void)
+{
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_load_tables);
+
+ /* Load the namespace from the tables */
+
+ status = acpi_tb_load_namespace();
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "While loading namespace from ACPI tables"));
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_load_tables)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_load_namespace
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Load the namespace from the DSDT and all SSDTs/PSDTs found in
+ * the RSDT/XSDT.
+ *
+ ******************************************************************************/
+static acpi_status acpi_tb_load_namespace(void)
+{
+ acpi_status status;
+ u32 i;
+ struct acpi_table_header *new_dsdt;
+
+ ACPI_FUNCTION_TRACE(tb_load_namespace);
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+ /*
+ * Load the namespace. The DSDT is required, but any SSDT and
+ * PSDT tables are optional. Verify the DSDT.
+ */
+ if (!acpi_gbl_root_table_list.current_table_count ||
+ !ACPI_COMPARE_NAME(&
+ (acpi_gbl_root_table_list.
+ tables[ACPI_TABLE_INDEX_DSDT].signature),
+ ACPI_SIG_DSDT)
+ ||
+ ACPI_FAILURE(acpi_tb_verify_table
+ (&acpi_gbl_root_table_list.
+ tables[ACPI_TABLE_INDEX_DSDT]))) {
+ status = AE_NO_ACPI_TABLES;
+ goto unlock_and_exit;
+ }
+
+ /*
+ * Save the DSDT pointer for simple access. This is the mapped memory
+ * address. We must take care here because the address of the .Tables
+ * array can change dynamically as tables are loaded at run-time. Note:
+ * .Pointer field is not validated until after call to acpi_tb_verify_table.
+ */
+ acpi_gbl_DSDT =
+ acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
+
+ /*
+ * Optionally copy the entire DSDT to local memory (instead of simply
+ * mapping it.) There are some BIOSs that corrupt or replace the original
+ * DSDT, creating the need for this option. Default is FALSE, do not copy
+ * the DSDT.
+ */
+ if (acpi_gbl_copy_dsdt_locally) {
+ new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
+ if (new_dsdt) {
+ acpi_gbl_DSDT = new_dsdt;
+ }
+ }
+
+ /*
+ * Save the original DSDT header for detection of table corruption
+ * and/or replacement of the DSDT from outside the OS.
+ */
+ ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
+ sizeof(struct acpi_table_header));
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+
+ /* Load and parse tables */
+
+ status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+ if ((!ACPI_COMPARE_NAME
+ (&(acpi_gbl_root_table_list.tables[i].signature),
+ ACPI_SIG_SSDT)
+ &&
+ !ACPI_COMPARE_NAME(&
+ (acpi_gbl_root_table_list.tables[i].
+ signature), ACPI_SIG_PSDT))
+ ||
+ ACPI_FAILURE(acpi_tb_verify_table
+ (&acpi_gbl_root_table_list.tables[i]))) {
+ continue;
+ }
+
+ if (no_auto_ssdt) {
+ printk(KERN_WARNING "ACPI: SSDT ignored due to \"acpi_no_auto_ssdt\"\n");
+ continue;
+ }
+
+ /* Ignore errors while loading tables, get as many as possible */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ (void)acpi_ns_load_table(i, acpi_gbl_root_node);
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI Tables successfully acquired\n"));
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_load_table
+ *
+ * PARAMETERS: table - Pointer to a buffer containing the ACPI
+ * table to be loaded.
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Dynamically load an ACPI table from the caller's buffer. Must
+ * be a valid ACPI table with a valid ACPI table header.
+ * Note1: Mainly intended to support hotplug addition of SSDTs.
+ * Note2: Does not copy the incoming table. User is reponsible
+ * to ensure that the table is not deleted or unmapped.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_load_table(struct acpi_table_header *table)
+{
+ acpi_status status;
+ struct acpi_table_desc table_desc;
+ u32 table_index;
+
+ ACPI_FUNCTION_TRACE(acpi_load_table);
+
+ /* Parameter validation */
+
+ if (!table) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /* Init local table descriptor */
+
+ ACPI_MEMSET(&table_desc, 0, sizeof(struct acpi_table_desc));
+ table_desc.address = ACPI_PTR_TO_PHYSADDR(table);
+ table_desc.pointer = table;
+ table_desc.length = table->length;
+ table_desc.flags = ACPI_TABLE_ORIGIN_UNKNOWN;
+
+ /* Must acquire the interpreter lock during this operation */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Install the table and load it into the namespace */
+
+ ACPI_INFO((AE_INFO, "Host-directed Dynamic ACPI Table Load:"));
+ status = acpi_tb_add_table(&table_desc, &table_index);
+ if (ACPI_FAILURE(status)) {
+ goto unlock_and_exit;
+ }
+
+ status = acpi_ns_load_table(table_index, acpi_gbl_root_node);
+
+ /* Invoke table handler if present */
+
+ if (acpi_gbl_table_handler) {
+ (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
+ acpi_gbl_table_handler_context);
+ }
+
+ unlock_and_exit:
+ (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_load_table)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_unload_parent_table
+ *
+ * PARAMETERS: object - Handle to any namespace object owned by
+ * the table to be unloaded
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Via any namespace object within an SSDT or OEMx table, unloads
+ * the table and deletes all namespace objects associated with
+ * that table. Unloading of the DSDT is not allowed.
+ * Note: Mainly intended to support hotplug removal of SSDTs.
+ *
+ ******************************************************************************/
+acpi_status acpi_unload_parent_table(acpi_handle object)
+{
+ struct acpi_namespace_node *node =
+ ACPI_CAST_PTR(struct acpi_namespace_node, object);
+ acpi_status status = AE_NOT_EXIST;
+ acpi_owner_id owner_id;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(acpi_unload_parent_table);
+
+ /* Parameter validation */
+
+ if (!object) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ /*
+ * The node owner_id is currently the same as the parent table ID.
+ * However, this could change in the future.
+ */
+ owner_id = node->owner_id;
+ if (!owner_id) {
+
+ /* owner_id==0 means DSDT is the owner. DSDT cannot be unloaded */
+
+ return_ACPI_STATUS(AE_TYPE);
+ }
+
+ /* Must acquire the interpreter lock during this operation */
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Find the table in the global table list */
+
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
+ if (owner_id != acpi_gbl_root_table_list.tables[i].owner_id) {
+ continue;
+ }
+
+ /*
+ * Allow unload of SSDT and OEMx tables only. Do not allow unload
+ * of the DSDT. No other types of tables should get here, since
+ * only these types can contain AML and thus are the only types
+ * that can create namespace objects.
+ */
+ if (ACPI_COMPARE_NAME
+ (acpi_gbl_root_table_list.tables[i].signature.ascii,
+ ACPI_SIG_DSDT)) {
+ status = AE_TYPE;
+ break;
+ }
+
+ /* Ensure the table is actually loaded */
+
+ if (!acpi_tb_is_table_loaded(i)) {
+ status = AE_NOT_EXIST;
+ break;
+ }
+
+ /* Invoke table handler if present */
+
+ if (acpi_gbl_table_handler) {
+ (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
+ acpi_gbl_root_table_list.
+ tables[i].pointer,
+ acpi_gbl_table_handler_context);
+ }
+
+ /*
+ * Delete all namespace objects owned by this table. Note that
+ * these objects can appear anywhere in the namespace by virtue
+ * of the AML "Scope" operator. Thus, we need to track ownership
+ * by an ID, not simply a position within the hierarchy.
+ */
+ status = acpi_tb_delete_namespace_by_owner(i);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+
+ status = acpi_tb_release_owner_id(i);
+ acpi_tb_set_table_loaded_flag(i, FALSE);
+ break;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_unload_parent_table)
+
+static int __init acpi_no_auto_ssdt_setup(char *s) {
+
+ printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n");
+
+ no_auto_ssdt = 1;
+
+ return 1;
+}
+
+__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 4258f647ca3d..74e720800037 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -57,7 +57,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
*
* FUNCTION: acpi_tb_validate_rsdp
*
- * PARAMETERS: Rsdp - Pointer to unvalidated RSDP
+ * PARAMETERS: rsdp - Pointer to unvalidated RSDP
*
* RETURN: Status
*
@@ -107,10 +107,10 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
*
* RETURN: Status, RSDP physical address
*
- * DESCRIPTION: Search lower 1_mbyte of memory for the root system descriptor
+ * DESCRIPTION: Search lower 1Mbyte of memory for the root system descriptor
* pointer structure. If it is found, set *RSDP to point to it.
*
- * NOTE1: The RSDP must be either in the first 1_k of the Extended
+ * NOTE1: The RSDP must be either in the first 1K of the Extended
* BIOS Data Area or between E0000 and FFFFF (From ACPI Spec.)
* Only a 32-bit physical address is necessary.
*
@@ -152,7 +152,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
if (physical_address > 0x400) {
/*
* 1b) Search EBDA paragraphs (EBDA is required to be a
- * minimum of 1_k length)
+ * minimum of 1K length)
*/
table_ptr = acpi_os_map_memory((acpi_physical_address)
physical_address,
@@ -216,7 +216,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
/* A valid RSDP was not found */
- ACPI_ERROR((AE_INFO, "A valid RSDP was not found"));
+ ACPI_BIOS_ERROR((AE_INFO, "A valid RSDP was not found"));
return_ACPI_STATUS(AE_NOT_FOUND);
}
@@ -225,7 +225,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
* FUNCTION: acpi_tb_scan_memory_for_rsdp
*
* PARAMETERS: start_address - Starting pointer for search
- * Length - Maximum length to search
+ * length - Maximum length to search
*
* RETURN: Pointer to the RSDP if found, otherwise NULL.
*
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 67932aebe6dd..64880306133d 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -53,8 +53,8 @@ ACPI_MODULE_NAME("utaddress")
* FUNCTION: acpi_ut_add_address_range
*
* PARAMETERS: space_id - Address space ID
- * Address - op_region start address
- * Length - op_region length
+ * address - op_region start address
+ * length - op_region length
* region_node - op_region namespace node
*
* RETURN: Status
@@ -186,9 +186,9 @@ acpi_ut_remove_address_range(acpi_adr_space_type space_id,
* FUNCTION: acpi_ut_check_address_range
*
* PARAMETERS: space_id - Address space ID
- * Address - Start address
- * Length - Length of address range
- * Warn - TRUE if warning on overlap desired
+ * address - Start address
+ * length - Length of address range
+ * warn - TRUE if warning on overlap desired
*
* RETURN: Count of the number of conflicts detected. Zero is always
* returned for Space IDs other than Memory or I/O.
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 9982d2ea66fb..ed29d474095e 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -189,7 +189,7 @@ acpi_status acpi_ut_delete_caches(void)
*
* FUNCTION: acpi_ut_validate_buffer
*
- * PARAMETERS: Buffer - Buffer descriptor to be validated
+ * PARAMETERS: buffer - Buffer descriptor to be validated
*
* RETURN: Status
*
@@ -227,7 +227,7 @@ acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
*
* FUNCTION: acpi_ut_initialize_buffer
*
- * PARAMETERS: Buffer - Buffer to be validated
+ * PARAMETERS: buffer - Buffer to be validated
* required_length - Length needed
*
* RETURN: Status
@@ -308,10 +308,10 @@ acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
*
* FUNCTION: acpi_ut_allocate
*
- * PARAMETERS: Size - Size of the allocation
- * Component - Component type of caller
- * Module - Source file name of caller
- * Line - Line number of caller
+ * PARAMETERS: size - Size of the allocation
+ * component - Component type of caller
+ * module - Source file name of caller
+ * line - Line number of caller
*
* RETURN: Address of the allocated memory on success, NULL on failure.
*
@@ -352,10 +352,10 @@ void *acpi_ut_allocate(acpi_size size,
*
* FUNCTION: acpi_ut_allocate_zeroed
*
- * PARAMETERS: Size - Size of the allocation
- * Component - Component type of caller
- * Module - Source file name of caller
- * Line - Line number of caller
+ * PARAMETERS: size - Size of the allocation
+ * component - Component type of caller
+ * module - Source file name of caller
+ * line - Line number of caller
*
* RETURN: Address of the allocated memory on success, NULL on failure.
*
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 3317c0a406ee..294692ae76e9 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -317,7 +317,7 @@ acpi_ut_copy_ielement_to_eelement(u8 object_type,
* FUNCTION: acpi_ut_copy_ipackage_to_epackage
*
* PARAMETERS: internal_object - Pointer to the object we are returning
- * Buffer - Where the object is returned
+ * buffer - Where the object is returned
* space_used - Where the object length is returned
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index a0998a886318..e810894149ae 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -145,7 +145,7 @@ static const char *acpi_ut_trim_function_name(const char *function_name)
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
- * Format - Printf format field
+ * format - Printf format field
* ... - Optional printf arguments
*
* RETURN: None
@@ -217,7 +217,7 @@ ACPI_EXPORT_SYMBOL(acpi_debug_print)
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
- * Format - Printf format field
+ * format - Printf format field
* ... - Optional printf arguments
*
* RETURN: None
@@ -286,7 +286,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace)
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
- * Pointer - Pointer to display
+ * pointer - Pointer to display
*
* RETURN: None
*
@@ -315,7 +315,7 @@ acpi_ut_trace_ptr(u32 line_number,
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
- * String - Additional string to display
+ * string - Additional string to display
*
* RETURN: None
*
@@ -346,7 +346,7 @@ acpi_ut_trace_str(u32 line_number,
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
- * Integer - Integer to display
+ * integer - Integer to display
*
* RETURN: None
*
@@ -408,7 +408,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_exit)
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
- * Status - Exit status code
+ * status - Exit status code
*
* RETURN: None
*
@@ -449,7 +449,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
- * Value - Value to be printed with exit msg
+ * value - Value to be printed with exit msg
*
* RETURN: None
*
@@ -481,7 +481,7 @@ ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
- * Ptr - Pointer to display
+ * ptr - Pointer to display
*
* RETURN: None
*
@@ -508,10 +508,10 @@ acpi_ut_ptr_exit(u32 line_number,
*
* FUNCTION: acpi_ut_dump_buffer
*
- * PARAMETERS: Buffer - Buffer to dump
- * Count - Amount to dump, in bytes
- * Display - BYTE, WORD, DWORD, or QWORD display
- * component_iD - Caller's component ID
+ * PARAMETERS: buffer - Buffer to dump
+ * count - Amount to dump, in bytes
+ * display - BYTE, WORD, DWORD, or QWORD display
+ * component_ID - Caller's component ID
*
* RETURN: None
*
@@ -625,10 +625,10 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
*
* FUNCTION: acpi_ut_dump_buffer
*
- * PARAMETERS: Buffer - Buffer to dump
- * Count - Amount to dump, in bytes
- * Display - BYTE, WORD, DWORD, or QWORD display
- * component_iD - Caller's component ID
+ * PARAMETERS: buffer - Buffer to dump
+ * count - Amount to dump, in bytes
+ * display - BYTE, WORD, DWORD, or QWORD display
+ * component_ID - Caller's component ID
*
* RETURN: None
*
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 684849949bf3..60a158472d82 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -49,41 +49,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utdecode")
-/*******************************************************************************
- *
- * FUNCTION: acpi_format_exception
- *
- * PARAMETERS: Status - The acpi_status code to be formatted
- *
- * RETURN: A string containing the exception text. A valid pointer is
- * always returned.
- *
- * DESCRIPTION: This function translates an ACPI exception into an ASCII string
- * It is here instead of utxface.c so it is always present.
- *
- ******************************************************************************/
-const char *acpi_format_exception(acpi_status status)
-{
- const char *exception = NULL;
-
- ACPI_FUNCTION_ENTRY();
-
- exception = acpi_ut_validate_exception(status);
- if (!exception) {
-
- /* Exception code was not recognized */
-
- ACPI_ERROR((AE_INFO,
- "Unknown exception code: 0x%8.8X", status));
-
- exception = "UNKNOWN_STATUS_CODE";
- }
-
- return (ACPI_CAST_PTR(const char, exception));
-}
-
-ACPI_EXPORT_SYMBOL(acpi_format_exception)
-
/*
* Properties of the ACPI Object Types, both internal and external.
* The table is indexed by values of acpi_object_type
@@ -126,8 +91,8 @@ const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES] = {
*
* FUNCTION: acpi_ut_hex_to_ascii_char
*
- * PARAMETERS: Integer - Contains the hex digit
- * Position - bit position of the digit within the
+ * PARAMETERS: integer - Contains the hex digit
+ * position - bit position of the digit within the
* integer (multiple of 4)
*
* RETURN: The converted Ascii character
@@ -164,16 +129,17 @@ char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
/* Region type decoding */
const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
- "SystemMemory",
- "SystemIO",
- "PCI_Config",
- "EmbeddedControl",
- "SMBus",
- "SystemCMOS",
- "PCIBARTarget",
- "IPMI",
- "GeneralPurposeIo",
- "GenericSerialBus"
+ "SystemMemory", /* 0x00 */
+ "SystemIO", /* 0x01 */
+ "PCI_Config", /* 0x02 */
+ "EmbeddedControl", /* 0x03 */
+ "SMBus", /* 0x04 */
+ "SystemCMOS", /* 0x05 */
+ "PCIBARTarget", /* 0x06 */
+ "IPMI", /* 0x07 */
+ "GeneralPurposeIo", /* 0x08 */
+ "GenericSerialBus", /* 0x09 */
+ "PCC" /* 0x0A */
};
char *acpi_ut_get_region_name(u8 space_id)
@@ -228,7 +194,7 @@ char *acpi_ut_get_event_name(u32 event_id)
*
* FUNCTION: acpi_ut_get_type_name
*
- * PARAMETERS: Type - An ACPI object type
+ * PARAMETERS: type - An ACPI object type
*
* RETURN: Decoded ACPI object type name
*
@@ -306,7 +272,7 @@ char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
*
* FUNCTION: acpi_ut_get_node_name
*
- * PARAMETERS: Object - A namespace node
+ * PARAMETERS: object - A namespace node
*
* RETURN: ASCII name of the node
*
@@ -351,7 +317,7 @@ char *acpi_ut_get_node_name(void *object)
*
* FUNCTION: acpi_ut_get_descriptor_name
*
- * PARAMETERS: Object - An ACPI object
+ * PARAMETERS: object - An ACPI object
*
* RETURN: Decoded name of the descriptor type
*
@@ -401,7 +367,7 @@ char *acpi_ut_get_descriptor_name(void *object)
*
* FUNCTION: acpi_ut_get_reference_name
*
- * PARAMETERS: Object - An ACPI reference object
+ * PARAMETERS: object - An ACPI reference object
*
* RETURN: Decoded name of the type of reference
*
@@ -532,7 +498,7 @@ const char *acpi_ut_get_notify_name(u32 notify_value)
*
* FUNCTION: acpi_ut_valid_object_type
*
- * PARAMETERS: Type - Object type to be validated
+ * PARAMETERS: type - Object type to be validated
*
* RETURN: TRUE if valid object type, FALSE otherwise
*
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 2a6c3e183697..798105443d0f 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -60,7 +60,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action);
*
* FUNCTION: acpi_ut_delete_internal_obj
*
- * PARAMETERS: Object - Object to be deleted
+ * PARAMETERS: object - Object to be deleted
*
* RETURN: None
*
@@ -152,7 +152,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_THERMAL:
- /* Walk the notify handler list for this object */
+ /* Walk the address handler list for this object */
handler_desc = object->common_notify.handler;
while (handler_desc) {
@@ -358,8 +358,8 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list)
*
* FUNCTION: acpi_ut_update_ref_count
*
- * PARAMETERS: Object - Object whose ref count is to be updated
- * Action - What to do
+ * PARAMETERS: object - Object whose ref count is to be updated
+ * action - What to do
*
* RETURN: New ref count
*
@@ -456,9 +456,9 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
*
* FUNCTION: acpi_ut_update_object_reference
*
- * PARAMETERS: Object - Increment ref count for this object
+ * PARAMETERS: object - Increment ref count for this object
* and all sub-objects
- * Action - Either REF_INCREMENT or REF_DECREMENT or
+ * action - Either REF_INCREMENT or REF_DECREMENT or
* REF_FORCE_DELETE
*
* RETURN: Status
@@ -480,6 +480,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
acpi_status status = AE_OK;
union acpi_generic_state *state_list = NULL;
union acpi_operand_object *next_object = NULL;
+ union acpi_operand_object *prev_object;
union acpi_generic_state *state;
u32 i;
@@ -505,12 +506,21 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
case ACPI_TYPE_POWER:
case ACPI_TYPE_THERMAL:
- /* Update the notify objects for these types (if present) */
-
- acpi_ut_update_ref_count(object->common_notify.
- system_notify, action);
- acpi_ut_update_ref_count(object->common_notify.
- device_notify, action);
+ /*
+ * Update the notify objects for these types (if present)
+ * Two lists, system and device notify handlers.
+ */
+ for (i = 0; i < ACPI_NUM_NOTIFY_TYPES; i++) {
+ prev_object =
+ object->common_notify.notify_list[i];
+ while (prev_object) {
+ next_object =
+ prev_object->notify.next[i];
+ acpi_ut_update_ref_count(prev_object,
+ action);
+ prev_object = next_object;
+ }
+ }
break;
case ACPI_TYPE_PACKAGE:
@@ -630,7 +640,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
*
* FUNCTION: acpi_ut_add_reference
*
- * PARAMETERS: Object - Object whose reference count is to be
+ * PARAMETERS: object - Object whose reference count is to be
* incremented
*
* RETURN: None
@@ -664,7 +674,7 @@ void acpi_ut_add_reference(union acpi_operand_object *object)
*
* FUNCTION: acpi_ut_remove_reference
*
- * PARAMETERS: Object - Object whose ref count will be decremented
+ * PARAMETERS: object - Object whose ref count will be decremented
*
* RETURN: None
*
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 479f32b33415..a9c65fbea5f4 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("uteval")
* FUNCTION: acpi_ut_evaluate_object
*
* PARAMETERS: prefix_node - Starting node
- * Path - Path to object from starting node
+ * path - Path to object from starting node
* expected_return_types - Bitmap of allowed return types
* return_desc - Where a return value is stored
*
@@ -187,7 +187,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
*
* PARAMETERS: object_name - Object name to be evaluated
* device_node - Node for the device
- * Value - Where the value is returned
+ * value - Where the value is returned
*
* RETURN: Status
*
@@ -229,7 +229,7 @@ acpi_ut_evaluate_numeric_object(char *object_name,
* FUNCTION: acpi_ut_execute_STA
*
* PARAMETERS: device_node - Node for the device
- * Flags - Where the status flags are returned
+ * flags - Where the status flags are returned
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
new file mode 100644
index 000000000000..23b98945f6b7
--- /dev/null
+++ b/drivers/acpi/acpica/utexcep.c
@@ -0,0 +1,153 @@
+/*******************************************************************************
+ *
+ * Module Name: utexcep - Exception code support
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#define ACPI_DEFINE_EXCEPTION_TABLE
+#include <linux/export.h>
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utexcep")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_format_exception
+ *
+ * PARAMETERS: status - The acpi_status code to be formatted
+ *
+ * RETURN: A string containing the exception text. A valid pointer is
+ * always returned.
+ *
+ * DESCRIPTION: This function translates an ACPI exception into an ASCII
+ * string. Returns "unknown status" string for invalid codes.
+ *
+ ******************************************************************************/
+const char *acpi_format_exception(acpi_status status)
+{
+ const char *exception = NULL;
+
+ ACPI_FUNCTION_ENTRY();
+
+ exception = acpi_ut_validate_exception(status);
+ if (!exception) {
+
+ /* Exception code was not recognized */
+
+ ACPI_ERROR((AE_INFO,
+ "Unknown exception code: 0x%8.8X", status));
+
+ exception = "UNKNOWN_STATUS_CODE";
+ }
+
+ return (ACPI_CAST_PTR(const char, exception));
+}
+
+ACPI_EXPORT_SYMBOL(acpi_format_exception)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_validate_exception
+ *
+ * PARAMETERS: status - The acpi_status code to be formatted
+ *
+ * RETURN: A string containing the exception text. NULL if exception is
+ * not valid.
+ *
+ * DESCRIPTION: This function validates and translates an ACPI exception into
+ * an ASCII string.
+ *
+ ******************************************************************************/
+const char *acpi_ut_validate_exception(acpi_status status)
+{
+ u32 sub_status;
+ const char *exception = NULL;
+
+ ACPI_FUNCTION_ENTRY();
+
+ /*
+ * Status is composed of two parts, a "type" and an actual code
+ */
+ sub_status = (status & ~AE_CODE_MASK);
+
+ switch (status & AE_CODE_MASK) {
+ case AE_CODE_ENVIRONMENTAL:
+
+ if (sub_status <= AE_CODE_ENV_MAX) {
+ exception = acpi_gbl_exception_names_env[sub_status];
+ }
+ break;
+
+ case AE_CODE_PROGRAMMER:
+
+ if (sub_status <= AE_CODE_PGM_MAX) {
+ exception = acpi_gbl_exception_names_pgm[sub_status];
+ }
+ break;
+
+ case AE_CODE_ACPI_TABLES:
+
+ if (sub_status <= AE_CODE_TBL_MAX) {
+ exception = acpi_gbl_exception_names_tbl[sub_status];
+ }
+ break;
+
+ case AE_CODE_AML:
+
+ if (sub_status <= AE_CODE_AML_MAX) {
+ exception = acpi_gbl_exception_names_aml[sub_status];
+ }
+ break;
+
+ case AE_CODE_CONTROL:
+
+ if (sub_status <= AE_CODE_CTRL_MAX) {
+ exception = acpi_gbl_exception_names_ctrl[sub_status];
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return (ACPI_CAST_PTR(const char, exception));
+}
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 90f53b42eca9..ed1893155f8b 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -247,8 +247,9 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
*
* RETURN: Status
*
- * DESCRIPTION: Init library globals. All globals that require specific
- * initialization should be initialized here!
+ * DESCRIPTION: Initialize ACPICA globals. All globals that require specific
+ * initialization should be initialized here. This allows for
+ * a warm restart.
*
******************************************************************************/
@@ -284,7 +285,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_owner_id_mask[i] = 0;
}
- /* Last owner_iD is never valid */
+ /* Last owner_ID is never valid */
acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
@@ -304,8 +305,8 @@ acpi_status acpi_ut_init_globals(void)
/* Global handlers */
- acpi_gbl_system_notify.handler = NULL;
- acpi_gbl_device_notify.handler = NULL;
+ acpi_gbl_global_notify[0].handler = NULL;
+ acpi_gbl_global_notify[1].handler = NULL;
acpi_gbl_exception_handler = NULL;
acpi_gbl_init_handler = NULL;
acpi_gbl_table_handler = NULL;
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index c92eb1d93785..5d84e1954575 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Module Name: utids - support for device IDs - HID, UID, CID
+ * Module Name: utids - support for device Ids - HID, UID, CID
*
*****************************************************************************/
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index 155fd786d0f2..b1eb7f17e110 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -52,7 +52,7 @@ ACPI_MODULE_NAME("utlock")
* FUNCTION: acpi_ut_create_rw_lock
* acpi_ut_delete_rw_lock
*
- * PARAMETERS: Lock - Pointer to a valid RW lock
+ * PARAMETERS: lock - Pointer to a valid RW lock
*
* RETURN: Status
*
@@ -89,7 +89,7 @@ void acpi_ut_delete_rw_lock(struct acpi_rw_lock *lock)
* FUNCTION: acpi_ut_acquire_read_lock
* acpi_ut_release_read_lock
*
- * PARAMETERS: Lock - Pointer to a valid RW lock
+ * PARAMETERS: lock - Pointer to a valid RW lock
*
* RETURN: Status
*
@@ -149,7 +149,7 @@ acpi_status acpi_ut_release_read_lock(struct acpi_rw_lock *lock)
* FUNCTION: acpi_ut_acquire_write_lock
* acpi_ut_release_write_lock
*
- * PARAMETERS: Lock - Pointer to a valid RW lock
+ * PARAMETERS: lock - Pointer to a valid RW lock
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 2491a552b0e6..d88a8aaab2a6 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -73,8 +73,8 @@ typedef union uint64_overlay {
*
* FUNCTION: acpi_ut_short_divide
*
- * PARAMETERS: Dividend - 64-bit dividend
- * Divisor - 32-bit divisor
+ * PARAMETERS: dividend - 64-bit dividend
+ * divisor - 32-bit divisor
* out_quotient - Pointer to where the quotient is returned
* out_remainder - Pointer to where the remainder is returned
*
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 86f19db74e05..33c6cf7ff467 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -50,79 +50,41 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utmisc")
+#if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
/*******************************************************************************
*
- * FUNCTION: acpi_ut_validate_exception
+ * FUNCTION: ut_convert_backslashes
*
- * PARAMETERS: Status - The acpi_status code to be formatted
+ * PARAMETERS: pathname - File pathname string to be converted
*
- * RETURN: A string containing the exception text. NULL if exception is
- * not valid.
+ * RETURN: Modifies the input Pathname
*
- * DESCRIPTION: This function validates and translates an ACPI exception into
- * an ASCII string.
+ * DESCRIPTION: Convert all backslashes (0x5C) to forward slashes (0x2F) within
+ * the entire input file pathname string.
*
******************************************************************************/
-const char *acpi_ut_validate_exception(acpi_status status)
+void ut_convert_backslashes(char *pathname)
{
- u32 sub_status;
- const char *exception = NULL;
- ACPI_FUNCTION_ENTRY();
-
- /*
- * Status is composed of two parts, a "type" and an actual code
- */
- sub_status = (status & ~AE_CODE_MASK);
-
- switch (status & AE_CODE_MASK) {
- case AE_CODE_ENVIRONMENTAL:
-
- if (sub_status <= AE_CODE_ENV_MAX) {
- exception = acpi_gbl_exception_names_env[sub_status];
- }
- break;
-
- case AE_CODE_PROGRAMMER:
-
- if (sub_status <= AE_CODE_PGM_MAX) {
- exception = acpi_gbl_exception_names_pgm[sub_status];
- }
- break;
-
- case AE_CODE_ACPI_TABLES:
-
- if (sub_status <= AE_CODE_TBL_MAX) {
- exception = acpi_gbl_exception_names_tbl[sub_status];
- }
- break;
-
- case AE_CODE_AML:
-
- if (sub_status <= AE_CODE_AML_MAX) {
- exception = acpi_gbl_exception_names_aml[sub_status];
- }
- break;
-
- case AE_CODE_CONTROL:
+ if (!pathname) {
+ return;
+ }
- if (sub_status <= AE_CODE_CTRL_MAX) {
- exception = acpi_gbl_exception_names_ctrl[sub_status];
+ while (*pathname) {
+ if (*pathname == '\\') {
+ *pathname = '/';
}
- break;
- default:
- break;
+ pathname++;
}
-
- return (ACPI_CAST_PTR(const char, exception));
}
+#endif
/*******************************************************************************
*
* FUNCTION: acpi_ut_is_pci_root_bridge
*
- * PARAMETERS: Id - The HID/CID in string format
+ * PARAMETERS: id - The HID/CID in string format
*
* RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
*
@@ -150,7 +112,7 @@ u8 acpi_ut_is_pci_root_bridge(char *id)
*
* FUNCTION: acpi_ut_is_aml_table
*
- * PARAMETERS: Table - An ACPI table
+ * PARAMETERS: table - An ACPI table
*
* RETURN: TRUE if table contains executable AML; FALSE otherwise
*
@@ -284,7 +246,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
*
* FUNCTION: acpi_ut_release_owner_id
*
- * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_iD
+ * PARAMETERS: owner_id_ptr - Pointer to a previously allocated owner_ID
*
* RETURN: None. No error is returned because we are either exiting a
* control method or unloading a table. Either way, we would
@@ -307,7 +269,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
*owner_id_ptr = 0;
- /* Zero is not a valid owner_iD */
+ /* Zero is not a valid owner_ID */
if (owner_id == 0) {
ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
@@ -381,7 +343,7 @@ void acpi_ut_strupr(char *src_string)
*
* FUNCTION: acpi_ut_print_string
*
- * PARAMETERS: String - Null terminated ASCII string
+ * PARAMETERS: string - Null terminated ASCII string
* max_length - Maximum output length
*
* RETURN: None
@@ -467,7 +429,7 @@ void acpi_ut_print_string(char *string, u8 max_length)
*
* FUNCTION: acpi_ut_dword_byte_swap
*
- * PARAMETERS: Value - Value to be converted
+ * PARAMETERS: value - Value to be converted
*
* RETURN: u32 integer with bytes swapped
*
@@ -537,9 +499,9 @@ void acpi_ut_set_integer_width(u8 revision)
*
* FUNCTION: acpi_ut_display_init_pathname
*
- * PARAMETERS: Type - Object type of the node
+ * PARAMETERS: type - Object type of the node
* obj_handle - Handle whose pathname will be displayed
- * Path - Additional path string to be appended.
+ * path - Additional path string to be appended.
* (NULL if no extra path)
*
* RETURN: acpi_status
@@ -604,8 +566,8 @@ acpi_ut_display_init_pathname(u8 type,
*
* FUNCTION: acpi_ut_valid_acpi_char
*
- * PARAMETERS: Char - The character to be examined
- * Position - Byte position (0-3)
+ * PARAMETERS: char - The character to be examined
+ * position - Byte position (0-3)
*
* RETURN: TRUE if the character is valid, FALSE otherwise
*
@@ -640,7 +602,7 @@ u8 acpi_ut_valid_acpi_char(char character, u32 position)
*
* FUNCTION: acpi_ut_valid_acpi_name
*
- * PARAMETERS: Name - The name to be examined
+ * PARAMETERS: name - The name to be examined
*
* RETURN: TRUE if the name is valid, FALSE otherwise
*
@@ -671,7 +633,7 @@ u8 acpi_ut_valid_acpi_name(u32 name)
*
* FUNCTION: acpi_ut_repair_name
*
- * PARAMETERS: Name - The ACPI name to be repaired
+ * PARAMETERS: name - The ACPI name to be repaired
*
* RETURN: Repaired version of the name
*
@@ -705,8 +667,8 @@ acpi_name acpi_ut_repair_name(char *name)
*
* FUNCTION: acpi_ut_strtoul64
*
- * PARAMETERS: String - Null terminated string
- * Base - Radix of the string: 16 or ACPI_ANY_BASE;
+ * PARAMETERS: string - Null terminated string
+ * base - Radix of the string: 16 or ACPI_ANY_BASE;
* ACPI_ANY_BASE means 'in behalf of to_integer'
* ret_integer - Where the converted integer is returned
*
@@ -755,7 +717,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
if (to_integer_op) {
/*
- * Base equal to ACPI_ANY_BASE means 'to_integer operation case'.
+ * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
* We need to determine if it is decimal or hexadecimal.
*/
if ((*string == '0') && (ACPI_TOLOWER(*(string + 1)) == 'x')) {
@@ -878,8 +840,8 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 * ret_integer)
*
* FUNCTION: acpi_ut_create_update_state_and_push
*
- * PARAMETERS: Object - Object to be added to the new state
- * Action - Increment/Decrement
+ * PARAMETERS: object - Object to be added to the new state
+ * action - Increment/Decrement
* state_list - List the state will be added to
*
* RETURN: Status
@@ -919,7 +881,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
* PARAMETERS: source_object - The package to walk
* target_object - Target object (if package is being copied)
* walk_callback - Called once for each package element
- * Context - Passed to the callback function
+ * context - Passed to the callback function
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 43174df33121..296baa676bc5 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -147,7 +147,7 @@ void acpi_ut_mutex_terminate(void)
*
* FUNCTION: acpi_ut_create_mutex
*
- * PARAMETERS: mutex_iD - ID of the mutex to be created
+ * PARAMETERS: mutex_ID - ID of the mutex to be created
*
* RETURN: Status
*
@@ -176,7 +176,7 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)
*
* FUNCTION: acpi_ut_delete_mutex
*
- * PARAMETERS: mutex_iD - ID of the mutex to be deleted
+ * PARAMETERS: mutex_ID - ID of the mutex to be deleted
*
* RETURN: Status
*
@@ -199,7 +199,7 @@ static void acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
*
* FUNCTION: acpi_ut_acquire_mutex
*
- * PARAMETERS: mutex_iD - ID of the mutex to be acquired
+ * PARAMETERS: mutex_ID - ID of the mutex to be acquired
*
* RETURN: Status
*
@@ -283,7 +283,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
*
* FUNCTION: acpi_ut_release_mutex
*
- * PARAMETERS: mutex_iD - ID of the mutex to be released
+ * PARAMETERS: mutex_ID - ID of the mutex to be released
*
* RETURN: Status
*
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index b112744fc9ae..655f0799a391 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -69,7 +69,7 @@ acpi_ut_get_element_length(u8 object_type,
* PARAMETERS: module_name - Source file name of caller
* line_number - Line number of caller
* component_id - Component type of caller
- * Type - ACPI Type of the new object
+ * type - ACPI Type of the new object
*
* RETURN: A new internal object, null on failure
*
@@ -150,7 +150,7 @@ union acpi_operand_object *acpi_ut_create_internal_object_dbg(const char
*
* FUNCTION: acpi_ut_create_package_object
*
- * PARAMETERS: Count - Number of package elements
+ * PARAMETERS: count - Number of package elements
*
* RETURN: Pointer to a new Package object, null on failure
*
@@ -323,11 +323,11 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
*
* FUNCTION: acpi_ut_valid_internal_object
*
- * PARAMETERS: Object - Object to be validated
+ * PARAMETERS: object - Object to be validated
*
* RETURN: TRUE if object is valid, FALSE otherwise
*
- * DESCRIPTION: Validate a pointer to be a union acpi_operand_object
+ * DESCRIPTION: Validate a pointer to be of type union acpi_operand_object
*
******************************************************************************/
@@ -348,7 +348,7 @@ u8 acpi_ut_valid_internal_object(void *object)
switch (ACPI_GET_DESCRIPTOR_TYPE(object)) {
case ACPI_DESC_TYPE_OPERAND:
- /* The object appears to be a valid union acpi_operand_object */
+ /* The object appears to be a valid union acpi_operand_object */
return (TRUE);
@@ -407,7 +407,7 @@ void *acpi_ut_allocate_object_desc_dbg(const char *module_name,
*
* FUNCTION: acpi_ut_delete_object_desc
*
- * PARAMETERS: Object - An Acpi internal object to be deleted
+ * PARAMETERS: object - An Acpi internal object to be deleted
*
* RETURN: None.
*
@@ -419,7 +419,7 @@ void acpi_ut_delete_object_desc(union acpi_operand_object *object)
{
ACPI_FUNCTION_TRACE_PTR(ut_delete_object_desc, object);
- /* Object must be a union acpi_operand_object */
+ /* Object must be a union acpi_operand_object */
if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
ACPI_ERROR((AE_INFO,
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 2360cf70c18c..34ef0bd7e4b4 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -68,7 +68,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2001.1", NULL, 0, ACPI_OSI_WINSRV_2003}, /* Windows Server 2003 */
{"Windows 2001 SP2", NULL, 0, ACPI_OSI_WIN_XP_SP2}, /* Windows XP SP2 */
{"Windows 2001.1 SP1", NULL, 0, ACPI_OSI_WINSRV_2003_SP1}, /* Windows Server 2003 SP1 - Added 03/2006 */
- {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA}, /* Windows Vista - Added 03/2006 */
+ {"Windows 2006", NULL, 0, ACPI_OSI_WIN_VISTA}, /* Windows vista - Added 03/2006 */
{"Windows 2006.1", NULL, 0, ACPI_OSI_WINSRV_2008}, /* Windows Server 2008 - Added 09/2009 */
{"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */
{"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 9d441ea70305..e38bef4980bc 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -356,13 +356,13 @@ static const u8 acpi_gbl_resource_types[] = {
ACPI_SMALL_VARIABLE_LENGTH, /* 06 start_dependent_functions */
ACPI_FIXED_LENGTH, /* 07 end_dependent_functions */
ACPI_FIXED_LENGTH, /* 08 IO */
- ACPI_FIXED_LENGTH, /* 09 fixed_iO */
- ACPI_FIXED_LENGTH, /* 0_a fixed_dMA */
+ ACPI_FIXED_LENGTH, /* 09 fixed_IO */
+ ACPI_FIXED_LENGTH, /* 0A fixed_DMA */
0,
0,
0,
- ACPI_VARIABLE_LENGTH, /* 0_e vendor_short */
- ACPI_FIXED_LENGTH, /* 0_f end_tag */
+ ACPI_VARIABLE_LENGTH, /* 0E vendor_short */
+ ACPI_FIXED_LENGTH, /* 0F end_tag */
/* Large descriptors */
@@ -375,16 +375,16 @@ static const u8 acpi_gbl_resource_types[] = {
ACPI_FIXED_LENGTH, /* 06 memory32_fixed */
ACPI_VARIABLE_LENGTH, /* 07 Dword* address */
ACPI_VARIABLE_LENGTH, /* 08 Word* address */
- ACPI_VARIABLE_LENGTH, /* 09 extended_iRQ */
- ACPI_VARIABLE_LENGTH, /* 0_a Qword* address */
- ACPI_FIXED_LENGTH, /* 0_b Extended* address */
- ACPI_VARIABLE_LENGTH, /* 0_c Gpio* */
+ ACPI_VARIABLE_LENGTH, /* 09 extended_IRQ */
+ ACPI_VARIABLE_LENGTH, /* 0A Qword* address */
+ ACPI_FIXED_LENGTH, /* 0B Extended* address */
+ ACPI_VARIABLE_LENGTH, /* 0C Gpio* */
0,
- ACPI_VARIABLE_LENGTH /* 0_e *serial_bus */
+ ACPI_VARIABLE_LENGTH /* 0E *serial_bus */
};
/*
- * For the i_aSL compiler/disassembler, we don't want any error messages
+ * For the iASL compiler/disassembler, we don't want any error messages
* because the disassembler uses the resource validation code to determine
* if Buffer objects are actually Resource Templates.
*/
@@ -398,11 +398,11 @@ static const u8 acpi_gbl_resource_types[] = {
*
* FUNCTION: acpi_ut_walk_aml_resources
*
- * PARAMETERS: Aml - Pointer to the raw AML resource template
+ * PARAMETERS: aml - Pointer to the raw AML resource template
* aml_length - Length of the entire template
* user_function - Called once for each descriptor found. If
* NULL, a pointer to the end_tag is returned
- * Context - Passed to user_function
+ * context - Passed to user_function
*
* RETURN: Status
*
@@ -513,7 +513,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
*
* FUNCTION: acpi_ut_validate_resource
*
- * PARAMETERS: Aml - Pointer to the raw AML resource descriptor
+ * PARAMETERS: aml - Pointer to the raw AML resource descriptor
* return_index - Where the resource index is returned. NULL
* if the index is not required.
*
@@ -664,7 +664,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
*
* FUNCTION: acpi_ut_get_resource_type
*
- * PARAMETERS: Aml - Pointer to the raw AML resource descriptor
+ * PARAMETERS: aml - Pointer to the raw AML resource descriptor
*
* RETURN: The Resource Type with no extraneous bits (except the
* Large/Small descriptor bit -- this is left alone)
@@ -698,7 +698,7 @@ u8 acpi_ut_get_resource_type(void *aml)
*
* FUNCTION: acpi_ut_get_resource_length
*
- * PARAMETERS: Aml - Pointer to the raw AML resource descriptor
+ * PARAMETERS: aml - Pointer to the raw AML resource descriptor
*
* RETURN: Byte Length
*
@@ -738,7 +738,7 @@ u16 acpi_ut_get_resource_length(void *aml)
*
* FUNCTION: acpi_ut_get_resource_header_length
*
- * PARAMETERS: Aml - Pointer to the raw AML resource descriptor
+ * PARAMETERS: aml - Pointer to the raw AML resource descriptor
*
* RETURN: Length of the AML header (depends on large/small descriptor)
*
@@ -763,7 +763,7 @@ u8 acpi_ut_get_resource_header_length(void *aml)
*
* FUNCTION: acpi_ut_get_descriptor_length
*
- * PARAMETERS: Aml - Pointer to the raw AML resource descriptor
+ * PARAMETERS: aml - Pointer to the raw AML resource descriptor
*
* RETURN: Byte length
*
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 4267477c2797..a1c988260073 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -51,8 +51,8 @@ ACPI_MODULE_NAME("utstate")
*
* FUNCTION: acpi_ut_create_pkg_state_and_push
*
- * PARAMETERS: Object - Object to be added to the new state
- * Action - Increment/Decrement
+ * PARAMETERS: object - Object to be added to the new state
+ * action - Increment/Decrement
* state_list - List the state will be added to
*
* RETURN: Status
@@ -85,7 +85,7 @@ acpi_ut_create_pkg_state_and_push(void *internal_object,
* FUNCTION: acpi_ut_push_generic_state
*
* PARAMETERS: list_head - Head of the state stack
- * State - State object to push
+ * state - State object to push
*
* RETURN: None
*
@@ -214,8 +214,8 @@ struct acpi_thread_state *acpi_ut_create_thread_state(void)
*
* FUNCTION: acpi_ut_create_update_state
*
- * PARAMETERS: Object - Initial Object to be installed in the state
- * Action - Update action to be performed
+ * PARAMETERS: object - Initial Object to be installed in the state
+ * action - Update action to be performed
*
* RETURN: New state object, null on failure
*
@@ -252,8 +252,8 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
*
* FUNCTION: acpi_ut_create_pkg_state
*
- * PARAMETERS: Object - Initial Object to be installed in the state
- * Action - Update action to be performed
+ * PARAMETERS: object - Initial Object to be installed in the state
+ * action - Update action to be performed
*
* RETURN: New state object, null on failure
*
@@ -325,7 +325,7 @@ union acpi_generic_state *acpi_ut_create_control_state(void)
*
* FUNCTION: acpi_ut_delete_generic_state
*
- * PARAMETERS: State - The state object to be deleted
+ * PARAMETERS: state - The state object to be deleted
*
* RETURN: None
*
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index afa94f51ff0b..534179f1177b 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -131,7 +131,7 @@ acpi_status __init acpi_initialize_subsystem(void)
*
* FUNCTION: acpi_enable_subsystem
*
- * PARAMETERS: Flags - Init/enable Options
+ * PARAMETERS: flags - Init/enable Options
*
* RETURN: Status
*
@@ -234,7 +234,7 @@ ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
*
* FUNCTION: acpi_initialize_objects
*
- * PARAMETERS: Flags - Init/enable Options
+ * PARAMETERS: flags - Init/enable Options
*
* RETURN: Status
*
@@ -409,7 +409,7 @@ ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
* PARAMETERS: out_buffer - A buffer to receive the resources for the
* device
*
- * RETURN: Status - the status of the call
+ * RETURN: status - the status of the call
*
* DESCRIPTION: This function is called to get information about the current
* state of the ACPI subsystem. It will return system information
@@ -480,8 +480,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_system_info)
*
* FUNCTION: acpi_install_initialization_handler
*
- * PARAMETERS: Handler - Callback procedure
- * Function - Not (currently) used, see below
+ * PARAMETERS: handler - Callback procedure
+ * function - Not (currently) used, see below
*
* RETURN: Status
*
@@ -618,7 +618,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_interface)
*
* FUNCTION: acpi_install_interface_handler
*
- * PARAMETERS: Handler - The _OSI interface handler to install
+ * PARAMETERS: handler - The _OSI interface handler to install
* NULL means "remove existing handler"
*
* RETURN: Status
@@ -651,9 +651,9 @@ ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
* FUNCTION: acpi_check_address_range
*
* PARAMETERS: space_id - Address space ID
- * Address - Start address
- * Length - Length
- * Warn - TRUE if warning on overlap desired
+ * address - Start address
+ * length - Length
+ * warn - TRUE if warning on overlap desired
*
* RETURN: Count of the number of conflicts detected.
*
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 52b568af1819..6d63cc39b9ae 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("utxferror")
* This module is used for the in-kernel ACPICA as well as the ACPICA
* tools/applications.
*
- * For the i_aSL compiler case, the output is redirected to stderr so that
+ * For the iASL compiler case, the output is redirected to stderr so that
* any of the various ACPI errors and warnings do not appear in the output
* files, for either the compiler or disassembler portions of the tool.
*/
@@ -70,7 +70,7 @@ extern FILE *acpi_gbl_output_file;
#else
/*
- * non-i_aSL case - no redirection, nothing to do
+ * non-iASL case - no redirection, nothing to do
*/
#define ACPI_MSG_REDIRECT_BEGIN
#define ACPI_MSG_REDIRECT_END
@@ -82,6 +82,8 @@ extern FILE *acpi_gbl_output_file;
#define ACPI_MSG_EXCEPTION "ACPI Exception: "
#define ACPI_MSG_WARNING "ACPI Warning: "
#define ACPI_MSG_INFO "ACPI: "
+#define ACPI_MSG_BIOS_ERROR "ACPI BIOS Bug: Error: "
+#define ACPI_MSG_BIOS_WARNING "ACPI BIOS Bug: Warning: "
/*
* Common message suffix
*/
@@ -93,7 +95,7 @@ extern FILE *acpi_gbl_output_file;
*
* PARAMETERS: module_name - Caller's module name (for error output)
* line_number - Caller's line number (for error output)
- * Format - Printf format string + additional args
+ * format - Printf format string + additional args
*
* RETURN: None
*
@@ -124,8 +126,8 @@ ACPI_EXPORT_SYMBOL(acpi_error)
*
* PARAMETERS: module_name - Caller's module name (for error output)
* line_number - Caller's line number (for error output)
- * Status - Status to be formatted
- * Format - Printf format string + additional args
+ * status - Status to be formatted
+ * format - Printf format string + additional args
*
* RETURN: None
*
@@ -159,7 +161,7 @@ ACPI_EXPORT_SYMBOL(acpi_exception)
*
* PARAMETERS: module_name - Caller's module name (for error output)
* line_number - Caller's line number (for error output)
- * Format - Printf format string + additional args
+ * format - Printf format string + additional args
*
* RETURN: None
*
@@ -190,7 +192,7 @@ ACPI_EXPORT_SYMBOL(acpi_warning)
*
* PARAMETERS: module_name - Caller's module name (for error output)
* line_number - Caller's line number (for error output)
- * Format - Printf format string + additional args
+ * format - Printf format string + additional args
*
* RETURN: None
*
@@ -218,6 +220,72 @@ acpi_info(const char *module_name, u32 line_number, const char *format, ...)
ACPI_EXPORT_SYMBOL(acpi_info)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_bios_error
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print "ACPI Firmware Error" message with module/line/version
+ * info
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_bios_error(const char *module_name,
+ u32 line_number, const char *format, ...)
+{
+ va_list arg_list;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+
+ ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_bios_error)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_bios_warning
+ *
+ * PARAMETERS: module_name - Caller's module name (for error output)
+ * line_number - Caller's line number (for error output)
+ * format - Printf format string + additional args
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print "ACPI Firmware Warning" message with module/line/version
+ * info
+ *
+ ******************************************************************************/
+void ACPI_INTERNAL_VAR_XFACE
+acpi_bios_warning(const char *module_name,
+ u32 line_number, const char *format, ...)
+{
+ va_list arg_list;
+
+ ACPI_MSG_REDIRECT_BEGIN;
+ acpi_os_printf(ACPI_MSG_BIOS_WARNING);
+
+ va_start(arg_list, format);
+ acpi_os_vprintf(format, arg_list);
+ ACPI_MSG_SUFFIX;
+ va_end(arg_list);
+
+ ACPI_MSG_REDIRECT_END;
+}
+
+ACPI_EXPORT_SYMBOL(acpi_bios_warning)
+
/*
* The remainder of this module contains internal error functions that may
* be configured out.
@@ -271,9 +339,9 @@ acpi_ut_predefined_warning(const char *module_name,
*
* PARAMETERS: module_name - Caller's module name (for error output)
* line_number - Caller's line number (for error output)
- * Pathname - Full pathname to the node
+ * pathname - Full pathname to the node
* node_flags - From Namespace node for the method/object
- * Format - Printf format string + additional args
+ * format - Printf format string + additional args
*
* RETURN: None
*
@@ -373,9 +441,9 @@ acpi_ut_namespace_error(const char *module_name,
*
* PARAMETERS: module_name - Caller's module name (for error output)
* line_number - Caller's line number (for error output)
- * Message - Error message to use on failure
+ * message - Error message to use on failure
* prefix_node - Prefix relative to the path
- * Path - Path to the node (optional)
+ * path - Path to the node (optional)
* method_status - Execution status
*
* RETURN: None
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index 1427d191d15a..0a40a851b354 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -58,8 +58,8 @@ acpi_ut_get_mutex_object(acpi_handle handle,
*
* FUNCTION: acpi_ut_get_mutex_object
*
- * PARAMETERS: Handle - Mutex or prefix handle (optional)
- * Pathname - Mutex pathname (optional)
+ * PARAMETERS: handle - Mutex or prefix handle (optional)
+ * pathname - Mutex pathname (optional)
* ret_obj - Where the mutex object is returned
*
* RETURN: Status
@@ -118,9 +118,9 @@ acpi_ut_get_mutex_object(acpi_handle handle,
*
* FUNCTION: acpi_acquire_mutex
*
- * PARAMETERS: Handle - Mutex or prefix handle (optional)
- * Pathname - Mutex pathname (optional)
- * Timeout - Max time to wait for the lock (millisec)
+ * PARAMETERS: handle - Mutex or prefix handle (optional)
+ * pathname - Mutex pathname (optional)
+ * timeout - Max time to wait for the lock (millisec)
*
* RETURN: Status
*
@@ -155,8 +155,8 @@ acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
*
* FUNCTION: acpi_release_mutex
*
- * PARAMETERS: Handle - Mutex or prefix handle (optional)
- * Pathname - Mutex pathname (optional)
+ * PARAMETERS: handle - Mutex or prefix handle (optional)
+ * pathname - Mutex pathname (optional)
*
* RETURN: Status
*
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 6686b1eaf13e..00a783661d0b 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -586,6 +586,11 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
}
*access_bit_width = 1UL << (access_size_code + 2);
+ /* Fixup common BIOS bug */
+ if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
+ *access_bit_width < 32)
+ *access_bit_width = 32;
+
if ((bit_width + bit_offset) > *access_bit_width) {
pr_warning(FW_BUG APEI_PFX
"Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 023f9c8534d0..45e3e1759fb8 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -250,6 +250,13 @@ static int acpi_battery_get_property(struct power_supply *psy,
else
val->intval = battery->capacity_now * 1000;
break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ if (battery->capacity_now && battery->full_charge_capacity)
+ val->intval = battery->capacity_now * 100/
+ battery->full_charge_capacity;
+ else
+ val->intval = 0;
+ break;
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = battery->model_number;
break;
@@ -276,6 +283,7 @@ static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
@@ -292,6 +300,7 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_ENERGY_FULL,
POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
@@ -1043,6 +1052,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
/* this is needed to learn about changes made in suspended state */
static int acpi_battery_resume(struct device *dev)
{
@@ -1059,6 +1069,7 @@ static int acpi_battery_resume(struct device *dev)
acpi_battery_update(battery);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index adceafda9c17..9628652e080c 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -574,6 +574,10 @@ static void acpi_bus_osc_support(void)
capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
#endif
+#ifdef ACPI_HOTPLUG_OST
+ capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_HOTPLUG_OST_SUPPORT;
+#endif
+
if (!ghes_disable)
capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 79d4c22f7a6d..314a3b84bbc7 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -78,7 +78,9 @@ static int acpi_button_add(struct acpi_device *device);
static int acpi_button_remove(struct acpi_device *device, int type);
static void acpi_button_notify(struct acpi_device *device, u32 event);
+#ifdef CONFIG_PM_SLEEP
static int acpi_button_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
static struct acpi_driver acpi_button_driver = {
@@ -310,6 +312,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
}
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_button_resume(struct device *dev)
{
struct acpi_device *device = to_acpi_device(dev);
@@ -319,6 +322,7 @@ static int acpi_button_resume(struct device *dev)
return acpi_lid_send_state(device);
return 0;
}
+#endif
static int acpi_button_add(struct acpi_device *device)
{
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 45cd03b4630e..1f9f7d7d7bc5 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -158,9 +158,7 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
int result;
int present;
acpi_status status;
-
-
- present = is_device_present(handle);
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
@@ -169,32 +167,47 @@ static void container_notify_cb(acpi_handle handle, u32 type, void *context)
printk(KERN_WARNING "Container driver received %s event\n",
(type == ACPI_NOTIFY_BUS_CHECK) ?
"ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");
+
+ present = is_device_present(handle);
status = acpi_bus_get_device(handle, &device);
- if (present) {
- if (ACPI_FAILURE(status) || !device) {
- result = container_device_add(&device, handle);
- if (!result)
- kobject_uevent(&device->dev.kobj,
- KOBJ_ONLINE);
- else
- printk(KERN_WARNING
- "Failed to add container\n");
- }
- } else {
+ if (!present) {
if (ACPI_SUCCESS(status)) {
/* device exist and this is a remove request */
+ device->flags.eject_pending = 1;
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
+ return;
}
+ break;
+ }
+
+ if (!ACPI_FAILURE(status) || device)
+ break;
+
+ result = container_device_add(&device, handle);
+ if (result) {
+ printk(KERN_WARNING "Failed to add container\n");
+ break;
}
+
+ kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
+ ost_code = ACPI_OST_SC_SUCCESS;
break;
+
case ACPI_NOTIFY_EJECT_REQUEST:
if (!acpi_bus_get_device(handle, &device) && device) {
+ device->flags.eject_pending = 1;
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
+ return;
}
break;
+
default:
- break;
+ /* non-hotplug event; possibly handled by other handler */
+ return;
}
+
+ /* Inform firmware that the hotplug operation has completed */
+ (void) acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
return;
}
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 669d9ee80d16..bc36a476f1ab 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -53,8 +53,10 @@ static const struct acpi_device_id fan_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
+#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev);
static int acpi_fan_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
static struct acpi_driver acpi_fan_driver = {
@@ -184,6 +186,7 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev)
{
if (!dev)
@@ -207,6 +210,7 @@ static int acpi_fan_resume(struct device *dev)
return result;
}
+#endif
static int __init acpi_fan_init(void)
{
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index e56f3be7b07d..cb31298ca684 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -237,6 +237,8 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header,
return 0;
}
+static int __initdata parsed_numa_memblks;
+
static int __init
acpi_parse_memory_affinity(struct acpi_subtable_header * header,
const unsigned long end)
@@ -250,8 +252,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
acpi_table_print_srat_entry(header);
/* let architecture-dependent part to do it */
- acpi_numa_memory_affinity_init(memory_affinity);
-
+ if (!acpi_numa_memory_affinity_init(memory_affinity))
+ parsed_numa_memblks++;
return 0;
}
@@ -304,8 +306,10 @@ int __init acpi_numa_init(void)
acpi_numa_arch_fixup();
- if (cnt <= 0)
- return cnt ?: -ENOENT;
+ if (cnt < 0)
+ return cnt;
+ else if (!parsed_numa_memblks)
+ return -ENOENT;
return 0;
}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c3881b2eb8b2..9eaf708f5885 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -891,7 +891,7 @@ static void acpi_os_execute_deferred(struct work_struct *work)
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
if (dpc->wait)
- acpi_os_wait_events_complete(NULL);
+ acpi_os_wait_events_complete();
dpc->function(dpc->context);
kfree(dpc);
@@ -987,7 +987,7 @@ acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
return __acpi_os_execute(0, function, context, 1);
}
-void acpi_os_wait_events_complete(void *context)
+void acpi_os_wait_events_complete(void)
{
flush_workqueue(kacpid_wq);
flush_workqueue(kacpi_notify_wq);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ec54014c321c..72a2c98bc429 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -573,8 +573,15 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
if (pci_msi_enabled())
flags |= OSC_MSI_SUPPORT;
- if (flags != base_flags)
- acpi_pci_osc_support(root, flags);
+ if (flags != base_flags) {
+ status = acpi_pci_osc_support(root, flags);
+ if (ACPI_FAILURE(status)) {
+ dev_info(root->bus->bridge, "ACPI _OSC support "
+ "notification failed, disabling PCIe ASPM\n");
+ pcie_no_aspm();
+ flags = base_flags;
+ }
+ }
if (!pcie_ports_disabled
&& (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 215ecd097408..fc1803414629 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -67,7 +67,9 @@ static const struct acpi_device_id power_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, power_device_ids);
+#ifdef CONFIG_PM_SLEEP
static int acpi_power_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume);
static struct acpi_driver acpi_power_driver = {
@@ -775,6 +777,7 @@ static int acpi_power_remove(struct acpi_device *device, int type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_power_resume(struct device *dev)
{
int result = 0, state;
@@ -803,6 +806,7 @@ static int acpi_power_resume(struct device *dev)
return result;
}
+#endif
int __init acpi_power_init(void)
{
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 7048b97853e0..bfc31cb0dd3e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -437,7 +437,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
/* Normal CPU soft online event */
} else {
acpi_processor_ppc_has_changed(pr, 0);
- acpi_processor_cst_has_changed(pr);
+ acpi_processor_hotplug(pr);
acpi_processor_reevaluate_tstate(pr, action);
acpi_processor_tstate_has_changed(pr);
}
@@ -696,9 +696,9 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
{
struct acpi_processor *pr;
struct acpi_device *device = NULL;
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
int result;
-
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
@@ -710,14 +710,18 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
if (!is_processor_present(handle))
break;
- if (acpi_bus_get_device(handle, &device)) {
- result = acpi_processor_device_add(handle, &device);
- if (result)
- printk(KERN_ERR PREFIX
- "Unable to add the device\n");
+ if (!acpi_bus_get_device(handle, &device))
+ break;
+
+ result = acpi_processor_device_add(handle, &device);
+ if (result) {
+ printk(KERN_ERR PREFIX "Unable to add the device\n");
break;
}
+
+ ost_code = ACPI_OST_SC_SUCCESS;
break;
+
case ACPI_NOTIFY_EJECT_REQUEST:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"received ACPI_NOTIFY_EJECT_REQUEST\n"));
@@ -731,15 +735,23 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
if (!pr) {
printk(KERN_ERR PREFIX
"Driver data is NULL, dropping EJECT\n");
- return;
+ break;
}
+
+ /* REVISIT: update when eject is supported */
+ ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
break;
+
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Unsupported event [0x%x]\n", event));
- break;
+
+ /* non-hotplug event; possibly handled by other handler */
+ return;
}
+ /* Inform firmware that the hotplug operation has completed */
+ (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
return;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e589c1985248..ad3730b4038b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -301,16 +301,16 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
/* determine latencies from FADT */
- pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
- pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
+ pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
+ pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
/*
* FADT specified C2 latency must be less than or equal to
* 100 microseconds.
*/
- if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
+ if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency));
+ "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
/* invalidate C2 */
pr->power.states[ACPI_STATE_C2].address = 0;
}
@@ -319,9 +319,9 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
* FADT supplied C3 latency must be less than or equal to
* 1000 microseconds.
*/
- if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
+ if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency));
+ "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
/* invalidate C3 */
pr->power.states[ACPI_STATE_C3].address = 0;
}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index c0b9aa5faf4c..ff0740e0a9c2 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -988,6 +988,7 @@ static void acpi_sbs_rmdirs(void)
#endif
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_sbs_resume(struct device *dev)
{
struct acpi_sbs *sbs;
@@ -997,6 +998,7 @@ static int acpi_sbs_resume(struct device *dev)
acpi_sbs_callback(sbs);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fdda49336560..d1ecca2b641a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -83,19 +83,29 @@ acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, cha
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
-static void acpi_bus_hot_remove_device(void *context)
+/**
+ * acpi_bus_hot_remove_device: hot-remove a device and its children
+ * @context: struct acpi_eject_event pointer (freed in this func)
+ *
+ * Hot-remove a device and its children. This function frees up the
+ * memory space passed by arg context, so that the caller may call
+ * this function asynchronously through acpi_os_hotplug_execute().
+ */
+void acpi_bus_hot_remove_device(void *context)
{
+ struct acpi_eject_event *ej_event = (struct acpi_eject_event *) context;
struct acpi_device *device;
- acpi_handle handle = context;
+ acpi_handle handle = ej_event->handle;
struct acpi_object_list arg_list;
union acpi_object arg;
acpi_status status = AE_OK;
+ u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
if (acpi_bus_get_device(handle, &device))
- return;
+ goto err_out;
if (!device)
- return;
+ goto err_out;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Hot-removing device %s...\n", dev_name(&device->dev)));
@@ -103,7 +113,7 @@ static void acpi_bus_hot_remove_device(void *context)
if (acpi_bus_trim(device, 1)) {
printk(KERN_ERR PREFIX
"Removing device failed\n");
- return;
+ goto err_out;
}
/* power off device */
@@ -129,10 +139,21 @@ static void acpi_bus_hot_remove_device(void *context)
* TBD: _EJD support.
*/
status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
- if (ACPI_FAILURE(status))
- printk(KERN_WARNING PREFIX
- "Eject device failed\n");
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND)
+ printk(KERN_WARNING PREFIX
+ "Eject device failed\n");
+ goto err_out;
+ }
+
+ kfree(context);
+ return;
+err_out:
+ /* Inform firmware the hot-remove operation has completed w/ error */
+ (void) acpi_evaluate_hotplug_ost(handle,
+ ej_event->event, ost_code, NULL);
+ kfree(context);
return;
}
@@ -144,6 +165,7 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
acpi_status status;
acpi_object_type type = 0;
struct acpi_device *acpi_device = to_acpi_device(d);
+ struct acpi_eject_event *ej_event;
if ((!count) || (buf[0] != '1')) {
return -EINVAL;
@@ -160,7 +182,25 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
goto err;
}
- acpi_os_hotplug_execute(acpi_bus_hot_remove_device, acpi_device->handle);
+ ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
+ if (!ej_event) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ej_event->handle = acpi_device->handle;
+ if (acpi_device->flags.eject_pending) {
+ /* event originated from ACPI eject notification */
+ ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
+ acpi_device->flags.eject_pending = 0;
+ } else {
+ /* event originated from user */
+ ej_event->event = ACPI_OST_EC_OSPM_EJECT;
+ (void) acpi_evaluate_hotplug_ost(ej_event->handle,
+ ej_event->event, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+ }
+
+ acpi_os_hotplug_execute(acpi_bus_hot_remove_device, (void *)ej_event);
err:
return ret;
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 028dd425702c..fdcdbb652915 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -28,36 +28,7 @@
#include "internal.h"
#include "sleep.h"
-u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
-static unsigned int gts, bfs;
-static int set_param_wake_flag(const char *val, struct kernel_param *kp)
-{
- int ret = param_set_int(val, kp);
-
- if (ret)
- return ret;
-
- if (kp->arg == (const char *)&gts) {
- if (gts)
- wake_sleep_flags |= ACPI_EXECUTE_GTS;
- else
- wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
- }
- if (kp->arg == (const char *)&bfs) {
- if (bfs)
- wake_sleep_flags |= ACPI_EXECUTE_BFS;
- else
- wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
- }
- return ret;
-}
-module_param_call(gts, set_param_wake_flag, param_get_int, &gts, 0644);
-module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
-MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
-MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
-
static u8 sleep_states[ACPI_S_STATE_COUNT];
-static bool pwr_btn_event_pending;
static void acpi_sleep_tts_switch(u32 acpi_state)
{
@@ -110,6 +81,7 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+static bool pwr_btn_event_pending;
/*
* The ACPI specification wants us to save NVS memory regions during hibernation
@@ -143,7 +115,7 @@ void __init acpi_old_suspend_ordering(void)
static int acpi_pm_freeze(void)
{
acpi_disable_all_gpes();
- acpi_os_wait_events_complete(NULL);
+ acpi_os_wait_events_complete();
acpi_ec_block_transactions();
return 0;
}
@@ -305,7 +277,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
switch (acpi_state) {
case ACPI_STATE_S1:
barrier();
- status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags);
+ status = acpi_enter_sleep_state(acpi_state);
break;
case ACPI_STATE_S3:
@@ -319,8 +291,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
/* This violates the spec but is required for bug compatibility. */
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
- /* Reprogram control registers and execute _BFS */
- acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags);
+ /* Reprogram control registers */
+ acpi_leave_sleep_state_prep(acpi_state);
/* ACPI 3.0 specs (P62) says that it's the responsibility
* of the OSPM to clear the status bit [ implying that the
@@ -603,9 +575,9 @@ static int acpi_hibernation_enter(void)
ACPI_FLUSH_CPU_CACHE();
/* This shouldn't return. If it returns, we have a problem */
- status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags);
- /* Reprogram control registers and execute _BFS */
- acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
+ status = acpi_enter_sleep_state(ACPI_STATE_S4);
+ /* Reprogram control registers */
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4);
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
@@ -617,8 +589,8 @@ static void acpi_hibernation_leave(void)
* enable it here.
*/
acpi_enable();
- /* Reprogram control registers and execute _BFS */
- acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
+ /* Reprogram control registers */
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4);
/* Check the hardware signature */
if (facs && s4_hardware_signature != facs->hardware_signature) {
printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -892,33 +864,7 @@ static void acpi_power_off(void)
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
printk(KERN_DEBUG "%s called\n", __func__);
local_irq_disable();
- acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags);
-}
-
-/*
- * ACPI 2.0 created the optional _GTS and _BFS,
- * but industry adoption has been neither rapid nor broad.
- *
- * Linux gets into trouble when it executes poorly validated
- * paths through the BIOS, so disable _GTS and _BFS by default,
- * but do speak up and offer the option to enable them.
- */
-static void __init acpi_gts_bfs_check(void)
-{
- acpi_handle dummy;
-
- if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy)))
- {
- printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n");
- printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, "
- "please notify linux-acpi@vger.kernel.org\n");
- }
- if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy)))
- {
- printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n");
- printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, "
- "please notify linux-acpi@vger.kernel.org\n");
- }
+ acpi_enter_sleep_state(ACPI_STATE_S5);
}
int __init acpi_sleep_init(void)
@@ -979,6 +925,5 @@ int __init acpi_sleep_init(void)
* object can also be evaluated when the system enters S5.
*/
register_reboot_notifier(&tts_notifier);
- acpi_gts_bfs_check();
return 0;
}
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 240a24400976..7c3f98ba4afe 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
{
int result = 0;
- if (!strncmp(val, "enable", strlen("enable"))) {
+ if (!strncmp(val, "enable", sizeof("enable") - 1)) {
result = acpi_debug_trace(trace_method_name, trace_debug_level,
trace_debug_layer, 0);
if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
goto exit;
}
- if (!strncmp(val, "disable", strlen("disable"))) {
+ if (!strncmp(val, "disable", sizeof("disable") - 1)) {
int name = 0;
result = acpi_debug_trace((char *)&name, trace_debug_level,
trace_debug_layer, 0);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 21dd4c268aef..edda74a43406 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -106,7 +106,9 @@ static const struct acpi_device_id thermal_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
+#ifdef CONFIG_PM_SLEEP
static int acpi_thermal_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
static struct acpi_driver acpi_thermal_driver = {
@@ -552,8 +554,6 @@ static int thermal_get_temp(struct thermal_zone_device *thermal,
return 0;
}
-static const char enabled[] = "kernel";
-static const char disabled[] = "user";
static int thermal_get_mode(struct thermal_zone_device *thermal,
enum thermal_device_mode *mode)
{
@@ -590,8 +590,8 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
if (enable != tz->tz_enabled) {
tz->tz_enabled = enable;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "%s ACPI thermal control\n",
- tz->tz_enabled ? enabled : disabled));
+ "%s kernel ACPI thermal control\n",
+ tz->tz_enabled ? "Enable" : "Disable"));
acpi_thermal_check(tz);
}
return 0;
@@ -847,7 +847,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
if (tz->trips.passive.flags.valid)
tz->thermal_zone =
- thermal_zone_device_register("acpitz", trips, tz,
+ thermal_zone_device_register("acpitz", trips, 0, tz,
&acpi_thermal_zone_ops,
tz->trips.passive.tc1,
tz->trips.passive.tc2,
@@ -855,7 +855,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
tz->polling_frequency*100);
else
tz->thermal_zone =
- thermal_zone_device_register("acpitz", trips, tz,
+ thermal_zone_device_register("acpitz", trips, 0, tz,
&acpi_thermal_zone_ops,
0, 0, 0,
tz->polling_frequency*100);
@@ -1043,6 +1043,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_thermal_resume(struct device *dev)
{
struct acpi_thermal *tz;
@@ -1077,6 +1078,7 @@ static int acpi_thermal_resume(struct device *dev)
return AE_OK;
}
+#endif
static int thermal_act(const struct dmi_system_id *d) {
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index adbbc1c80a26..3e87c9c538aa 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -412,3 +412,45 @@ out:
return status;
}
EXPORT_SYMBOL(acpi_get_physical_device_location);
+
+/**
+ * acpi_evaluate_hotplug_ost: Evaluate _OST for hotplug operations
+ * @handle: ACPI device handle
+ * @source_event: source event code
+ * @status_code: status code
+ * @status_buf: optional detailed information (NULL if none)
+ *
+ * Evaluate _OST for hotplug operations. All ACPI hotplug handlers
+ * must call this function when evaluating _OST for hotplug operations.
+ * When the platform does not support _OST, this function has no effect.
+ */
+acpi_status
+acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
+ u32 status_code, struct acpi_buffer *status_buf)
+{
+#ifdef ACPI_HOTPLUG_OST
+ union acpi_object params[3] = {
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_INTEGER,},
+ {.type = ACPI_TYPE_BUFFER,}
+ };
+ struct acpi_object_list arg_list = {3, params};
+ acpi_status status;
+
+ params[0].integer.value = source_event;
+ params[1].integer.value = status_code;
+ if (status_buf != NULL) {
+ params[2].buffer.pointer = status_buf->pointer;
+ params[2].buffer.length = status_buf->length;
+ } else {
+ params[2].buffer.pointer = NULL;
+ params[2].buffer.length = 0;
+ }
+
+ status = acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
+ return status;
+#else
+ return AE_OK;
+#endif
+}
+EXPORT_SYMBOL(acpi_evaluate_hotplug_ost);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 45d8097ef4cf..b728880ef10e 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -132,6 +132,33 @@ find_video(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
+/* Force to use vendor driver when the ACPI device is known to be
+ * buggy */
+static int video_detect_force_vendor(const struct dmi_system_id *d)
+{
+ acpi_video_support |= ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
+ return 0;
+}
+
+static struct dmi_system_id video_detect_dmi_table[] = {
+ /* On Samsung X360, the BIOS will set a flag (VDRV) if generic
+ * ACPI backlight device is used. This flag will definitively break
+ * the backlight interface (even the vendor interface) untill next
+ * reboot. It's why we should prevent video.ko from being used here
+ * and we can't rely on a later call to acpi_video_unregister().
+ */
+ {
+ .callback = video_detect_force_vendor,
+ .ident = "X360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
+ DMI_MATCH(DMI_BOARD_NAME, "X360"),
+ },
+ },
+ { },
+};
+
/*
* Returns the video capabilities of a specific ACPI graphics device
*
@@ -164,6 +191,8 @@ long acpi_video_get_capabilities(acpi_handle graphics_handle)
* ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
*}
*/
+
+ dmi_check_system(video_detect_dmi_table);
} else {
status = acpi_bus_get_device(graphics_handle, &tmp_dev);
if (ACPI_FAILURE(status)) {
@@ -182,8 +211,7 @@ long acpi_video_get_capabilities(acpi_handle graphics_handle)
}
EXPORT_SYMBOL(acpi_video_get_capabilities);
-/* Returns true if video.ko can do backlight switching */
-int acpi_video_backlight_support(void)
+static void acpi_video_caps_check(void)
{
/*
* We must check whether the ACPI graphics device is physically plugged
@@ -191,6 +219,34 @@ int acpi_video_backlight_support(void)
*/
if (!acpi_video_caps_checked)
acpi_video_get_capabilities(NULL);
+}
+
+/* Promote the vendor interface instead of the generic video module.
+ * This function allow DMI blacklists to be implemented by externals
+ * platform drivers instead of putting a big blacklist in video_detect.c
+ * After calling this function you will probably want to call
+ * acpi_video_unregister() to make sure the video module is not loaded
+ */
+void acpi_video_dmi_promote_vendor(void)
+{
+ acpi_video_caps_check();
+ acpi_video_support |= ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
+}
+EXPORT_SYMBOL(acpi_video_dmi_promote_vendor);
+
+/* To be called when a driver who previously promoted the vendor
+ * interface */
+void acpi_video_dmi_demote_vendor(void)
+{
+ acpi_video_caps_check();
+ acpi_video_support &= ~ACPI_VIDEO_BACKLIGHT_DMI_VENDOR;
+}
+EXPORT_SYMBOL(acpi_video_dmi_demote_vendor);
+
+/* Returns true if video.ko can do backlight switching */
+int acpi_video_backlight_support(void)
+{
+ acpi_video_caps_check();
/* First check for boot param -> highest prio */
if (acpi_video_support & ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR)
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index b7e728517284..e8eb91bd0d28 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -16,9 +16,9 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/amba/bus.h>
+#include <linux/sizes.h>
#include <asm/irq.h>
-#include <asm/sizes.h>
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2be8ef1d3093..27cecd313e75 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -115,7 +115,7 @@ config SATA_SIL24
If unsure, say N.
config ATA_SFF
- bool "ATA SFF support"
+ bool "ATA SFF support (for legacy IDE and PATA)"
default y
help
This option adds support for ATA controllers with SFF
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 062e6a1a248f..50d5dea0ff59 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -256,6 +256,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
{ PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index c2594ddf25b0..57eb1c212a4c 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -320,6 +320,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
extern struct ata_port_operations ahci_ops;
extern struct ata_port_operations ahci_pmp_retry_srst_ops;
+unsigned int ahci_dev_classify(struct ata_port *ap);
void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
u32 opts);
void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 3c809bfbccf5..ef773e12af79 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -329,6 +329,14 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Lynx Point-LP) */
+ { 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Lynx Point-LP) */
+ { 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Lynx Point-LP) */
+ { 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Lynx Point-LP) */
+ { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (DH89xxCC) */
{ 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
{ } /* terminate list */
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index f9eaa82311a9..555c07afa05b 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1139,7 +1139,7 @@ static void ahci_dev_config(struct ata_device *dev)
}
}
-static unsigned int ahci_dev_classify(struct ata_port *ap)
+unsigned int ahci_dev_classify(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_taskfile tf;
@@ -1153,6 +1153,7 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
return ata_dev_classify(&tf);
}
+EXPORT_SYMBOL_GPL(ahci_dev_classify);
void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
u32 opts)
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 902b5a457170..fd9ecf74e631 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -60,17 +60,7 @@ acpi_handle ata_ap_acpi_handle(struct ata_port *ap)
if (ap->flags & ATA_FLAG_ACPI_SATA)
return NULL;
- /*
- * If acpi bind operation has already happened, we can get the handle
- * for the port by checking the corresponding scsi_host device's
- * firmware node, otherwise we will need to find out the handle from
- * its parent's acpi node.
- */
- if (ap->scsi_host)
- return DEVICE_ACPI_HANDLE(&ap->scsi_host->shost_gendev);
- else
- return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev),
- ap->port_no);
+ return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), ap->port_no);
}
EXPORT_SYMBOL(ata_ap_acpi_handle);
@@ -1101,6 +1091,9 @@ static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle)
if (!*handle)
return -ENODEV;
+ if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
+ ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
+
return 0;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index fadd5866d40f..8e1039c8e159 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4062,7 +4062,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
{ "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
{ "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
- { "2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
+ { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
/* Odd clown on sil3726/4726 PMPs */
{ "Config Disk", NULL, ATA_HORKAGE_DISABLE },
@@ -4128,6 +4128,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* Devices that do not need bridging limits applied */
{ "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
+ { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
/* Devices which aren't very happy with higher link speeds */
{ "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index ac6a5beb28f3..bfaa5cb1629a 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -184,10 +184,8 @@
struct arasan_cf_dev {
/* pointer to ata_host structure */
struct ata_host *host;
- /* clk structure, only if HAVE_CLK is defined */
-#ifdef CONFIG_HAVE_CLK
+ /* clk structure */
struct clk *clk;
-#endif
/* physical base address of controller */
dma_addr_t pbase;
@@ -312,13 +310,11 @@ static int cf_init(struct arasan_cf_dev *acdev)
unsigned long flags;
int ret = 0;
-#ifdef CONFIG_HAVE_CLK
ret = clk_enable(acdev->clk);
if (ret) {
dev_dbg(acdev->host->dev, "clock enable failed");
return ret;
}
-#endif
spin_lock_irqsave(&acdev->host->lock, flags);
/* configure CF interface clock */
@@ -344,9 +340,7 @@ static void cf_exit(struct arasan_cf_dev *acdev)
writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
acdev->vbase + OP_MODE);
spin_unlock_irqrestore(&acdev->host->lock, flags);
-#ifdef CONFIG_HAVE_CLK
clk_disable(acdev->clk);
-#endif
}
static void dma_callback(void *dev)
@@ -828,13 +822,11 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
return -ENOMEM;
}
-#ifdef CONFIG_HAVE_CLK
acdev->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(acdev->clk)) {
dev_warn(&pdev->dev, "Clock not found\n");
return PTR_ERR(acdev->clk);
}
-#endif
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
@@ -899,9 +891,7 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
&arasan_cf_sht);
free_clk:
-#ifdef CONFIG_HAVE_CLK
clk_put(acdev->clk);
-#endif
return ret;
}
@@ -912,9 +902,7 @@ static int __devexit arasan_cf_remove(struct platform_device *pdev)
ata_host_detach(host);
cf_exit(acdev);
-#ifdef CONFIG_HAVE_CLK
clk_put(acdev->clk);
-#endif
return 0;
}
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 361c75cea57b..24e51056ac26 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
+#include <linux/dmi.h>
#define DRV_NAME "pata_atiixp"
#define DRV_VERSION "0.4.6"
@@ -33,11 +34,26 @@ enum {
ATIIXP_IDE_UDMA_MODE = 0x56
};
+static const struct dmi_system_id attixp_cable_override_dmi_table[] = {
+ {
+ /* Board has onboard PATA<->SATA converters */
+ .ident = "MSI E350DM-E33",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
+ DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"),
+ },
+ },
+ { }
+};
+
static int atiixp_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 udma;
+ if (dmi_check_system(attixp_cable_override_dmi_table))
+ return ATA_CBL_PATA40_SHORT;
+
/* Hack from drivers/ide/pci. Really we want to know how to do the
raw detection not play follow the bios mode guess */
pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 24712adf69df..311be18d3f03 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -65,6 +65,8 @@
#include <linux/mbus.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -4026,7 +4028,7 @@ static int mv_platform_probe(struct platform_device *pdev)
struct ata_host *host;
struct mv_host_priv *hpriv;
struct resource *res;
- int n_ports = 0;
+ int n_ports = 0, irq = 0;
int rc;
#if defined(CONFIG_HAVE_CLK)
int port;
@@ -4050,8 +4052,14 @@ static int mv_platform_probe(struct platform_device *pdev)
return -EINVAL;
/* allocate host */
- mv_platform_data = pdev->dev.platform_data;
- n_ports = mv_platform_data->n_ports;
+ if (pdev->dev.of_node) {
+ of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports);
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ } else {
+ mv_platform_data = pdev->dev.platform_data;
+ n_ports = mv_platform_data->n_ports;
+ irq = platform_get_irq(pdev, 0);
+ }
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
@@ -4109,8 +4117,7 @@ static int mv_platform_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "slots %u ports %d\n",
(unsigned)MV_MAX_Q_DEPTH, host->n_ports);
- rc = ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
- IRQF_SHARED, &mv6_sht);
+ rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
if (!rc)
return 0;
@@ -4205,15 +4212,24 @@ static int mv_platform_resume(struct platform_device *pdev)
#define mv_platform_resume NULL
#endif
+#ifdef CONFIG_OF
+static struct of_device_id mv_sata_dt_ids[] __devinitdata = {
+ { .compatible = "marvell,orion-sata", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
+#endif
+
static struct platform_driver mv_platform_driver = {
- .probe = mv_platform_probe,
- .remove = __devexit_p(mv_platform_remove),
- .suspend = mv_platform_suspend,
- .resume = mv_platform_resume,
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
+ .probe = mv_platform_probe,
+ .remove = __devexit_p(mv_platform_remove),
+ .suspend = mv_platform_suspend,
+ .resume = mv_platform_resume,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(mv_sata_dt_ids),
+ },
};
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index d4386019af5d..96cce6d53195 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2362,7 +2362,7 @@ static int __devinit ia_init(struct atm_dev *dev)
{
printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
dev->number);
- return error;
+ return -ENOMEM;
}
IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
dev->number, iadev->pci->revision, base, iadev->irq);)
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 9b21469482ae..08b4c5209384 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -196,6 +196,7 @@ config CMA
bool "Contiguous Memory Allocator (EXPERIMENTAL)"
depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
select MIGRATION
+ select MEMORY_ISOLATION
help
This enables the Contiguous Memory Allocator which allows drivers
to allocate big physically-contiguous blocks of memory for use with
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f338037a4f3d..5e6e00bc1652 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1865,6 +1865,7 @@ int __dev_printk(const char *level, const struct device *dev,
struct va_format *vaf)
{
char dict[128];
+ const char *level_extra = "";
size_t dictlen = 0;
const char *subsys;
@@ -1911,10 +1912,14 @@ int __dev_printk(const char *level, const struct device *dev,
"DEVICE=+%s:%s", subsys, dev_name(dev));
}
skip:
+ if (level[2])
+ level_extra = &level[2]; /* skip past KERN_SOH "L" */
+
return printk_emit(0, level[1] - '0',
dictlen ? dict : NULL, dictlen,
- "%s %s: %pV",
- dev_driver_string(dev), dev_name(dev), vaf);
+ "%s %s: %s%pV",
+ dev_driver_string(dev), dev_name(dev),
+ level_extra, vaf);
}
EXPORT_SYMBOL(__dev_printk);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index d91a3a0b2325..deb4a456cf83 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -156,9 +156,7 @@ static int dev_mkdir(const char *name, umode_t mode)
if (!err)
/* mark as kernel-created inode */
dentry->d_inode->i_private = &thread;
- dput(dentry);
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- path_put(&path);
+ done_path_create(&path, dentry);
return err;
}
@@ -218,10 +216,7 @@ static int handle_create(const char *nodename, umode_t mode, struct device *dev)
/* mark as kernel-created inode */
dentry->d_inode->i_private = &thread;
}
- dput(dentry);
-
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- path_put(&path);
+ done_path_create(&path, dentry);
return err;
}
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 6f3676f1559f..3fbedc75e7c5 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -10,6 +10,7 @@
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/gfp.h>
+#include <asm-generic/dma-coherent.h>
/*
* Managed DMA API
@@ -217,4 +218,52 @@ void dmam_release_declared_memory(struct device *dev)
}
EXPORT_SYMBOL(dmam_release_declared_memory);
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ */
+int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size)
+{
+ struct page *page = virt_to_page(cpu_addr);
+ int ret;
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (unlikely(ret))
+ return ret;
+
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+ return 0;
+}
+EXPORT_SYMBOL(dma_common_get_sgtable);
+
#endif
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ */
+int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ int ret = -ENXIO;
+#ifdef CONFIG_MMU
+ unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
+ unsigned long off = vma->vm_pgoff;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < count && user_count <= (count - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+#endif /* CONFIG_MMU */
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_common_mmap);
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 869d7ff2227f..eb78e9640c4a 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -169,8 +169,7 @@ void pm_clk_init(struct device *dev)
*/
int pm_clk_create(struct device *dev)
{
- int ret = dev_pm_get_subsys_data(dev);
- return ret < 0 ? ret : 0;
+ return dev_pm_get_subsys_data(dev);
}
/**
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index a14085cc613f..39c32529b833 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -24,7 +24,6 @@
int dev_pm_get_subsys_data(struct device *dev)
{
struct pm_subsys_data *psd;
- int ret = 0;
psd = kzalloc(sizeof(*psd), GFP_KERNEL);
if (!psd)
@@ -40,7 +39,6 @@ int dev_pm_get_subsys_data(struct device *dev)
dev->power.subsys_data = psd;
pm_clk_init(dev);
psd = NULL;
- ret = 1;
}
spin_unlock_irq(&dev->power.lock);
@@ -48,7 +46,7 @@ int dev_pm_get_subsys_data(struct device *dev)
/* kfree() verifies that its argument is nonzero. */
kfree(psd);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 59894873a3b3..7d9c1cb1c39a 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -147,6 +147,8 @@ static int rpm_check_suspend_allowed(struct device *dev)
|| (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
+ else if (__dev_pm_qos_read_value(dev) < 0)
+ retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
@@ -388,7 +390,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto repeat;
}
- dev->power.deferred_resume = false;
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
@@ -403,12 +404,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
- if (__dev_pm_qos_read_value(dev) < 0) {
- /* Negative PM QoS constraint means "never suspend". */
- retval = -EPERM;
- goto out;
- }
-
__update_runtime_status(dev, RPM_SUSPENDING);
if (dev->pm_domain)
@@ -440,6 +435,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
wake_up_all(&dev->power.wait_queue);
if (dev->power.deferred_resume) {
+ dev->power.deferred_resume = false;
rpm_resume(dev, 0);
retval = -EAGAIN;
goto out;
@@ -584,6 +580,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
|| dev->parent->power.runtime_status == RPM_ACTIVE) {
atomic_inc(&dev->parent->power.child_count);
spin_unlock(&dev->parent->power.lock);
+ retval = 1;
goto no_callback; /* Assume success. */
}
spin_unlock(&dev->parent->power.lock);
@@ -664,7 +661,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
}
wake_up_all(&dev->power.wait_queue);
- if (!retval)
+ if (retval >= 0)
rpm_idle(dev, RPM_ASYNC);
out:
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index b013b049476d..cc65b45b4368 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -131,7 +131,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
/* backplane irq line is in use, find out who uses
* it and set user to irq 0
*/
- list_for_each_entry_reverse(core, &bus->cores, list) {
+ list_for_each_entry(core, &bus->cores, list) {
if ((1 << bcma_core_mips_irqflag(core)) ==
oldirqflag) {
bcma_core_mips_set_irq(core, 0);
@@ -161,7 +161,7 @@ static void bcma_core_mips_dump_irq(struct bcma_bus *bus)
{
struct bcma_device *core;
- list_for_each_entry_reverse(core, &bus->cores, list) {
+ list_for_each_entry(core, &bus->cores, list) {
bcma_core_mips_print_irq(core, bcma_core_mips_irq(core));
}
}
@@ -224,7 +224,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
mcore->assigned_irqs = 1;
/* Assign IRQs to all cores on the bus */
- list_for_each_entry_reverse(core, &bus->cores, list) {
+ list_for_each_entry(core, &bus->cores, list) {
int mips_irq;
if (core->irq)
continue;
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 11b32d2642df..a6e5672c67e7 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -272,6 +272,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
{ 0, },
};
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 5672b13d0951..8d0b57164018 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -462,8 +462,10 @@ int bcma_bus_scan(struct bcma_bus *bus)
while (eromptr < eromend) {
struct bcma_device *other_core;
struct bcma_device *core = kzalloc(sizeof(*core), GFP_KERNEL);
- if (!core)
- return -ENOMEM;
+ if (!core) {
+ err = -ENOMEM;
+ goto out;
+ }
INIT_LIST_HEAD(&core->list);
core->bus = bus;
@@ -478,7 +480,7 @@ int bcma_bus_scan(struct bcma_bus *bus)
} else if (err == -ESPIPE) {
break;
}
- return err;
+ goto out;
}
core->core_index = core_num++;
@@ -494,10 +496,12 @@ int bcma_bus_scan(struct bcma_bus *bus)
list_add_tail(&core->list, &bus->cores);
}
+ err = 0;
+out:
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
iounmap(eromptr);
- return 0;
+ return err;
}
int __init bcma_bus_scan_early(struct bcma_bus *bus,
@@ -537,7 +541,7 @@ int __init bcma_bus_scan_early(struct bcma_bus *bus,
else if (err == -ESPIPE)
break;
else if (err < 0)
- return err;
+ goto out;
core->core_index = core_num++;
bus->nr_cores++;
@@ -551,6 +555,7 @@ int __init bcma_bus_scan_early(struct bcma_bus *bus,
break;
}
+out:
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
iounmap(eromptr);
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 26823d97fd9f..9ea4627dc0c2 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
/* for these chips OTP is always available */
present = true;
break;
-
+ case BCMA_CHIP_ID_BCM43228:
+ present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
+ break;
default:
present = false;
break;
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index acda773b3720..38aa6dda6b81 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
{
case CMD_TARGET_STATUS:
/* Pass it up to the upper layers... */
- if( ei->ScsiStatus)
- {
-#if 0
- printk(KERN_WARNING "cciss: cmd %p "
- "has SCSI Status = %x\n",
- c, ei->ScsiStatus);
-#endif
- cmd->result |= (ei->ScsiStatus << 1);
- }
- else { /* scsi status is zero??? How??? */
+ if (!ei->ScsiStatus) {
/* Ordinarily, this case should never happen, but there is a bug
in some released firmware revisions that allows it to happen
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index e54e31b02b88..3fbef018ce55 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -411,7 +411,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+ mdev->ldev->md.al_offset + mdev->al_tr_pos;
if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
- drbd_chk_io_error(mdev, 1, true);
+ drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
if (++mdev->al_tr_pos >
div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
@@ -876,7 +876,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
unsigned int enr, count = 0;
struct lc_element *e;
- if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
+ /* this should be an empty REQ_FLUSH */
+ if (size == 0)
+ return 0;
+
+ if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "sector: %llus, size: %d\n",
(unsigned long long)sector, size);
return 0;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index fcb956bb4b4c..d84566496746 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -889,6 +889,7 @@ struct bm_aio_ctx {
unsigned int done;
unsigned flags;
#define BM_AIO_COPY_PAGES 1
+#define BM_WRITE_ALL_PAGES 2
int error;
struct kref kref;
};
@@ -1059,7 +1060,8 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx)
break;
if (rw & WRITE) {
- if (bm_test_page_unchanged(b->bm_pages[i])) {
+ if (!(flags & BM_WRITE_ALL_PAGES) &&
+ bm_test_page_unchanged(b->bm_pages[i])) {
dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i);
continue;
}
@@ -1096,7 +1098,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
if (ctx->error) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
- drbd_chk_io_error(mdev, 1, true);
+ drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
err = -EIO; /* ctx->error ? */
}
@@ -1141,6 +1143,17 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
}
/**
+ * drbd_bm_write_all() - Write the whole bitmap to its on disk location.
+ * @mdev: DRBD device.
+ *
+ * Will write all pages.
+ */
+int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local)
+{
+ return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0);
+}
+
+/**
* drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed.
* @mdev: DRBD device.
* @upper_idx: 0: write all changed pages; +ve: page index to stop scanning for changed pages
@@ -1212,7 +1225,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
if (ctx->error)
- drbd_chk_io_error(mdev, 1, true);
+ drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
/* that should force detach, so the in memory bitmap will be
* gone in a moment as well. */
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 02f013a073a7..b953cc7c9c00 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -813,7 +813,6 @@ enum {
SIGNAL_ASENDER, /* whether asender wants to be interrupted */
SEND_PING, /* whether asender should send a ping asap */
- UNPLUG_QUEUED, /* only relevant with kernel 2.4 */
UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
MD_DIRTY, /* current uuids and flags not yet on disk */
DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */
@@ -824,7 +823,6 @@ enum {
CRASHED_PRIMARY, /* This node was a crashed primary.
* Gets cleared when the state.conn
* goes into C_CONNECTED state. */
- NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
CONSIDER_RESYNC,
MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
@@ -834,6 +832,7 @@ enum {
BITMAP_IO_QUEUED, /* Started bitmap IO */
GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
WAS_IO_ERROR, /* Local disk failed returned IO error */
+ FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
NET_CONGESTED, /* The data socket is congested */
@@ -851,6 +850,13 @@ enum {
AL_SUSPENDED, /* Activity logging is currently suspended. */
AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
STATE_SENT, /* Do not change state/UUIDs while this is set */
+
+ CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
+ * pending, from drbd worker context.
+ * If set, bdi_write_congested() returns true,
+ * so shrink_page_list() would not recurse into,
+ * and potentially deadlock on, this drbd worker.
+ */
};
struct drbd_bitmap; /* opaque for drbd_conf */
@@ -1130,8 +1136,8 @@ struct drbd_conf {
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
int rs_planed; /* resync sectors already planned */
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
- int peer_max_bio_size;
- int local_max_bio_size;
+ unsigned int peer_max_bio_size;
+ unsigned int local_max_bio_size;
};
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
@@ -1435,9 +1441,9 @@ struct bm_extent {
* hash table. */
#define HT_SHIFT 8
#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
-#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */
+#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
-#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
+#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */
/* Number of elements in the app_reads_hash */
#define APP_R_HSIZE 15
@@ -1463,6 +1469,7 @@ extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
+extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
unsigned long al_enr);
@@ -1840,12 +1847,20 @@ static inline int drbd_request_state(struct drbd_conf *mdev,
return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
}
+enum drbd_force_detach_flags {
+ DRBD_IO_ERROR,
+ DRBD_META_IO_ERROR,
+ DRBD_FORCE_DETACH,
+};
+
#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
-static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where)
+static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
+ enum drbd_force_detach_flags forcedetach,
+ const char *where)
{
switch (mdev->ldev->dc.on_io_error) {
case EP_PASS_ON:
- if (!forcedetach) {
+ if (forcedetach == DRBD_IO_ERROR) {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Local IO failed in %s.\n", where);
if (mdev->state.disk > D_INCONSISTENT)
@@ -1856,6 +1871,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
case EP_DETACH:
case EP_CALL_HELPER:
set_bit(WAS_IO_ERROR, &mdev->flags);
+ if (forcedetach == DRBD_FORCE_DETACH)
+ set_bit(FORCE_DETACH, &mdev->flags);
if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
dev_err(DEV,
@@ -1875,7 +1892,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
*/
#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
- int error, int forcedetach, const char *where)
+ int error, enum drbd_force_detach_flags forcedetach, const char *where)
{
if (error) {
unsigned long flags;
@@ -2405,15 +2422,17 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
D_ASSERT(ap_bio >= 0);
+
+ if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
+ if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
+ drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
+ }
+
/* this currently does wake_up for every dec_ap_bio!
* maybe rather introduce some type of hysteresis?
* e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
if (ap_bio < mxb)
wake_up(&mdev->misc_wait);
- if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
- if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
- drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
- }
}
static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 920ede2829d6..f93a0320e952 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -79,6 +79,7 @@ static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
static void md_sync_timer_fn(unsigned long data);
static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
+static void _tl_clear(struct drbd_conf *mdev);
MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
"Lars Ellenberg <lars@linbit.com>");
@@ -432,19 +433,10 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
/* Actions operating on the disk state, also want to work on
requests that got barrier acked. */
- switch (what) {
- case fail_frozen_disk_io:
- case restart_frozen_disk_io:
- list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
- req = list_entry(le, struct drbd_request, tl_requests);
- _req_mod(req, what);
- }
- case connection_lost_while_pending:
- case resend:
- break;
- default:
- dev_err(DEV, "what = %d in _tl_restart()\n", what);
+ list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ _req_mod(req, what);
}
}
@@ -459,11 +451,16 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
*/
void tl_clear(struct drbd_conf *mdev)
{
+ spin_lock_irq(&mdev->req_lock);
+ _tl_clear(mdev);
+ spin_unlock_irq(&mdev->req_lock);
+}
+
+static void _tl_clear(struct drbd_conf *mdev)
+{
struct list_head *le, *tle;
struct drbd_request *r;
- spin_lock_irq(&mdev->req_lock);
-
_tl_restart(mdev, connection_lost_while_pending);
/* we expect this list to be empty. */
@@ -482,7 +479,6 @@ void tl_clear(struct drbd_conf *mdev)
memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
- spin_unlock_irq(&mdev->req_lock);
}
void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
@@ -1476,12 +1472,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
if (ns.susp_fen) {
/* case1: The outdate peer handler is successful: */
if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
- tl_clear(mdev);
if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags);
}
spin_lock_irq(&mdev->req_lock);
+ _tl_clear(mdev);
_drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
spin_unlock_irq(&mdev->req_lock);
}
@@ -1514,6 +1510,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* Do not change the order of the if above and the two below... */
if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
+ /* we probably will start a resync soon.
+ * make sure those things are properly reset. */
+ mdev->rs_total = 0;
+ mdev->rs_failed = 0;
+ atomic_set(&mdev->rs_pending_cnt, 0);
+ drbd_rs_cancel_all(mdev);
+
drbd_send_uuids(mdev);
drbd_send_state(mdev, ns);
}
@@ -1630,9 +1633,24 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
eh = mdev->ldev->dc.on_io_error;
was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
- /* Immediately allow completion of all application IO, that waits
- for completion from the local disk. */
- tl_abort_disk_io(mdev);
+ if (was_io_error && eh == EP_CALL_HELPER)
+ drbd_khelper(mdev, "local-io-error");
+
+ /* Immediately allow completion of all application IO,
+ * that waits for completion from the local disk,
+ * if this was a force-detach due to disk_timeout
+ * or administrator request (drbdsetup detach --force).
+ * Do NOT abort otherwise.
+ * Aborting local requests may cause serious problems,
+ * if requests are completed to upper layers already,
+ * and then later the already submitted local bio completes.
+ * This can cause DMA into former bio pages that meanwhile
+ * have been re-used for other things.
+ * So aborting local requests may cause crashes,
+ * or even worse, silent data corruption.
+ */
+ if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
+ tl_abort_disk_io(mdev);
/* current state still has to be D_FAILED,
* there is only one way out: to D_DISKLESS,
@@ -1653,9 +1671,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
drbd_md_sync(mdev);
}
put_ldev(mdev);
-
- if (was_io_error && eh == EP_CALL_HELPER)
- drbd_khelper(mdev, "local-io-error");
}
/* second half of local IO error, failure to attach,
@@ -1669,10 +1684,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
"ASSERT FAILED: disk is %s while going diskless\n",
drbd_disk_str(mdev->state.disk));
- mdev->rs_total = 0;
- mdev->rs_failed = 0;
- atomic_set(&mdev->rs_pending_cnt, 0);
-
if (ns.conn >= C_CONNECTED)
drbd_send_state(mdev, ns);
@@ -2194,7 +2205,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
{
struct p_sizes p;
sector_t d_size, u_size;
- int q_order_type, max_bio_size;
+ int q_order_type;
+ unsigned int max_bio_size;
int ok;
if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
@@ -2203,7 +2215,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
u_size = mdev->ldev->dc.disk_size;
q_order_type = drbd_queue_order_type(mdev);
max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
- max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
+ max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
put_ldev(mdev);
} else {
d_size = 0;
@@ -2214,7 +2226,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
/* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
if (mdev->agreed_pro_version <= 94)
- max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
p.d_size = cpu_to_be64(d_size);
p.u_size = cpu_to_be64(u_size);
@@ -3521,9 +3533,9 @@ static void drbd_cleanup(void)
}
/**
- * drbd_congested() - Callback for pdflush
+ * drbd_congested() - Callback for the flusher thread
* @congested_data: User data
- * @bdi_bits: Bits pdflush is currently interested in
+ * @bdi_bits: Bits the BDI flusher thread is currently interested in
*
* Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
*/
@@ -3541,6 +3553,22 @@ static int drbd_congested(void *congested_data, int bdi_bits)
goto out;
}
+ if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
+ r |= (1 << BDI_async_congested);
+ /* Without good local data, we would need to read from remote,
+ * and that would need the worker thread as well, which is
+ * currently blocked waiting for that usermode helper to
+ * finish.
+ */
+ if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+ r |= (1 << BDI_sync_congested);
+ else
+ put_ldev(mdev);
+ r &= bdi_bits;
+ reason = 'c';
+ goto out;
+ }
+
if (get_ldev(mdev)) {
q = bdev_get_queue(mdev->ldev->backing_bdev);
r = bdi_congested(&q->backing_dev_info, bdi_bits);
@@ -3604,6 +3632,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
q->backing_dev_info.congested_data = mdev;
blk_queue_make_request(q, drbd_make_request);
+ blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
@@ -3870,7 +3899,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
/* this was a try anyways ... */
dev_err(DEV, "meta data update failed!\n");
- drbd_chk_io_error(mdev, 1, true);
+ drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
}
/* Update mdev->ldev->md.la_size_sect,
@@ -3950,9 +3979,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
spin_lock_irq(&mdev->req_lock);
if (mdev->state.conn < C_CONNECTED) {
- int peer;
+ unsigned int peer;
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
- peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
+ peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
mdev->peer_max_bio_size = peer;
}
spin_unlock_irq(&mdev->req_lock);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 6d4de6a72e80..edb490aad8b4 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -147,6 +147,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
char *argv[] = {usermode_helper, cmd, mb, NULL };
int ret;
+ if (current == mdev->worker.task)
+ set_bit(CALLBACK_PENDING, &mdev->flags);
+
snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
if (get_net_conf(mdev)) {
@@ -189,6 +192,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
+ if (current == mdev->worker.task)
+ clear_bit(CALLBACK_PENDING, &mdev->flags);
+
if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0;
@@ -668,8 +674,8 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
la_size_changed && md_moved ? "size changed and md moved" :
la_size_changed ? "size changed" : "md moved");
/* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
- err = drbd_bitmap_io(mdev, &drbd_bm_write,
- "size changed", BM_LOCKED_MASK);
+ err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
+ "size changed", BM_LOCKED_MASK);
if (err) {
rv = dev_size_error;
goto out;
@@ -795,8 +801,8 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
{
struct request_queue * const q = mdev->rq_queue;
- int max_hw_sectors = max_bio_size >> 9;
- int max_segments = 0;
+ unsigned int max_hw_sectors = max_bio_size >> 9;
+ unsigned int max_segments = 0;
if (get_ldev_if_state(mdev, D_ATTACHING)) {
struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
@@ -829,7 +835,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
{
- int now, new, local, peer;
+ unsigned int now, new, local, peer;
now = queue_max_hw_sectors(mdev->rq_queue) << 9;
local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
@@ -840,13 +846,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
mdev->local_max_bio_size = local;
put_ldev(mdev);
}
+ local = min(local, DRBD_MAX_BIO_SIZE);
/* We may ignore peer limits if the peer is modern enough.
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
if (mdev->state.conn >= C_CONNECTED) {
if (mdev->agreed_pro_version < 94) {
- peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
} else if (mdev->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
@@ -854,10 +861,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
peer = DRBD_MAX_BIO_SIZE;
}
- new = min_t(int, local, peer);
+ new = min(local, peer);
if (mdev->state.role == R_PRIMARY && new < now)
- dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
+ dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
if (new != now)
dev_info(DEV, "max BIO size = %u\n", new);
@@ -950,6 +957,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
* to realize a "hot spare" feature (not that I'd recommend that) */
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
+ /* make sure there is no leftover from previous force-detach attempts */
+ clear_bit(FORCE_DETACH, &mdev->flags);
+
+ /* and no leftover from previously aborted resync or verify, either */
+ mdev->rs_total = 0;
+ mdev->rs_failed = 0;
+ atomic_set(&mdev->rs_pending_cnt, 0);
+
/* allocation not in the IO path, cqueue thread context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
if (!nbc) {
@@ -1345,6 +1360,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
if (dt.detach_force) {
+ set_bit(FORCE_DETACH, &mdev->flags);
drbd_force_state(mdev, NS(disk, D_FAILED));
reply->ret_code = SS_SUCCESS;
goto out;
@@ -1962,9 +1978,11 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
int retcode;
/* If there is still bitmap IO pending, probably because of a previous
- * resync just being finished, wait for it before requesting a new resync. */
+ * resync just being finished, wait for it before requesting a new resync.
+ * Also wait for it's after_state_ch(). */
drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ drbd_flush_workqueue(mdev);
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
@@ -2003,9 +2021,11 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
int retcode;
/* If there is still bitmap IO pending, probably because of a previous
- * resync just being finished, wait for it before requesting a new resync. */
+ * resync just being finished, wait for it before requesting a new resync.
+ * Also wait for it's after_state_ch(). */
drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ drbd_flush_workqueue(mdev);
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 869bada2ed06..5496104f90b9 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -245,6 +245,9 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
mdev->state.role == R_SECONDARY) {
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
+ /* reset mdev->congestion_reason */
+ bdi_rw_congested(&mdev->rq_queue->backing_dev_info);
+
seq_printf(seq,
"%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
" ns:%u nr:%u dw:%u dr:%u al:%u bm:%u "
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index ea4836e0ae98..c74ca2df7431 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -277,6 +277,9 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
int i;
+ if (page == NULL)
+ return;
+
if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
i = page_chain_free(page);
else {
@@ -316,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
gfp_t gfp_mask) __must_hold(local)
{
struct drbd_epoch_entry *e;
- struct page *page;
+ struct page *page = NULL;
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
@@ -329,9 +332,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
return NULL;
}
- page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
- if (!page)
- goto fail;
+ if (data_size) {
+ page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
+ if (!page)
+ goto fail;
+ }
INIT_HLIST_NODE(&e->collision);
e->epoch = NULL;
@@ -1270,7 +1275,6 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
data_size -= dgs;
- ERR_IF(data_size == 0) return NULL;
ERR_IF(data_size & 0x1ff) return NULL;
ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
@@ -1291,6 +1295,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
if (!e)
return NULL;
+ if (!data_size)
+ return e;
+
ds = data_size;
page = e->pages;
page_chain_for_each(page) {
@@ -1715,6 +1722,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
dp_flags = be32_to_cpu(p->dp_flags);
rw |= wire_flags_to_bio(mdev, dp_flags);
+ if (e->pages == NULL) {
+ D_ASSERT(e->size == 0);
+ D_ASSERT(dp_flags & DP_FLUSH);
+ }
if (dp_flags & DP_MAY_SET_IN_SYNC)
e->flags |= EE_MAY_SET_IN_SYNC;
@@ -3801,11 +3812,18 @@ void drbd_free_tl_hash(struct drbd_conf *mdev)
mdev->ee_hash = NULL;
mdev->ee_hash_s = 0;
- /* paranoia code */
- for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
- if (h->first)
- dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
- (int)(h - mdev->tl_hash), h->first);
+ /* We may not have had the chance to wait for all locally pending
+ * application requests. The hlist_add_fake() prevents access after
+ * free on master bio completion. */
+ for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) {
+ struct drbd_request *req;
+ struct hlist_node *pos, *n;
+ hlist_for_each_entry_safe(req, pos, n, h, collision) {
+ hlist_del_init(&req->collision);
+ hlist_add_fake(&req->collision);
+ }
+ }
+
kfree(mdev->tl_hash);
mdev->tl_hash = NULL;
mdev->tl_hash_s = 0;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 8e93a6ac9bb6..01b2ac641c7b 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -455,7 +455,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
- __drbd_chk_io_error(mdev, false);
+ __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
_req_may_be_done_not_susp(req, m);
break;
@@ -477,7 +477,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
}
- __drbd_chk_io_error(mdev, false);
+ __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
goto_queue_for_net_read:
@@ -695,6 +695,12 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case resend:
+ /* Simply complete (local only) READs. */
+ if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
+ _req_may_be_done(req, m);
+ break;
+ }
+
/* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
before the connection loss (B&C only); only P_BARRIER_ACK was missing.
Trowing them out of the TL here by pretending we got a BARRIER_ACK
@@ -834,7 +840,15 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
req->private_bio = NULL;
}
if (rw == WRITE) {
- remote = 1;
+ /* Need to replicate writes. Unless it is an empty flush,
+ * which is better mapped to a DRBD P_BARRIER packet,
+ * also for drbd wire protocol compatibility reasons. */
+ if (unlikely(size == 0)) {
+ /* The only size==0 bios we expect are empty flushes. */
+ D_ASSERT(bio->bi_rw & REQ_FLUSH);
+ remote = 0;
+ } else
+ remote = 1;
} else {
/* READ || READA */
if (local) {
@@ -870,8 +884,11 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* extent. This waits for any resync activity in the corresponding
* resync extent to finish, and, if necessary, pulls in the target
* extent into the activity log, which involves further disk io because
- * of transactional on-disk meta data updates. */
- if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) {
+ * of transactional on-disk meta data updates.
+ * Empty flushes don't need to go into the activity log, they can only
+ * flush data for pending writes which are already in there. */
+ if (rw == WRITE && local && size
+ && !test_bit(AL_SUSPENDED, &mdev->flags)) {
req->rq_state |= RQ_IN_ACT_LOG;
drbd_al_begin_io(mdev, sector);
}
@@ -994,7 +1011,10 @@ allocate_barrier:
if (rw == WRITE && _req_conflicts(req))
goto fail_conflicting;
- list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
+ /* no point in adding empty flushes to the transfer log,
+ * they are mapped to drbd barriers already. */
+ if (likely(size!=0))
+ list_add_tail(&req->tl_requests, &mdev->newest_tle->requests);
/* NOTE remote first: to get the concurrent write detection right,
* we must register the request before start of local IO. */
@@ -1014,6 +1034,14 @@ allocate_barrier:
mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
maybe_pull_ahead(mdev);
+ /* If this was a flush, queue a drbd barrier/start a new epoch.
+ * Unless the current epoch was empty anyways, or we are not currently
+ * replicating, in which case there is no point. */
+ if (unlikely(bio->bi_rw & REQ_FLUSH)
+ && mdev->newest_tle->n_writes
+ && drbd_should_do_remote(mdev->state))
+ queue_barrier(mdev);
+
spin_unlock_irq(&mdev->req_lock);
kfree(b); /* if someone else has beaten us to it... */
@@ -1111,13 +1139,12 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
/*
* what we "blindly" assume:
*/
- D_ASSERT(bio->bi_size > 0);
D_ASSERT((bio->bi_size & 0x1ff) == 0);
/* to make some things easier, force alignment of requests within the
* granularity of our hash tables */
s_enr = bio->bi_sector >> HT_SHIFT;
- e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;
+ e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr;
if (likely(s_enr == e_enr)) {
do {
@@ -1275,7 +1302,7 @@ void request_timer_fn(unsigned long data)
time_after(now, req->start_time + dt) &&
!time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
- __drbd_chk_io_error(mdev, 1);
+ __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH);
}
nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
spin_unlock_irq(&mdev->req_lock);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 620c70ff2231..6bce2cc179d4 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -111,7 +111,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait);
if (test_bit(__EE_WAS_ERROR, &e->flags))
- __drbd_chk_io_error(mdev, false);
+ __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
spin_unlock_irqrestore(&mdev->req_lock, flags);
drbd_queue_work(&mdev->data.work, &e->w);
@@ -154,7 +154,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
: list_empty(&mdev->active_ee);
if (test_bit(__EE_WAS_ERROR, &e->flags))
- __drbd_chk_io_error(mdev, false);
+ __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
spin_unlock_irqrestore(&mdev->req_lock, flags);
if (is_syncer_req)
@@ -1501,14 +1501,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
return;
}
- if (mdev->state.conn < C_AHEAD) {
- /* In case a previous resync run was aborted by an IO error/detach on the peer. */
- drbd_rs_cancel_all(mdev);
- /* This should be done when we abort the resync. We definitely do not
- want to have this for connections going back and forth between
- Ahead/Behind and SyncSource/SyncTarget */
- }
-
if (side == C_SYNC_TARGET) {
/* Since application IO was locked out during C_WF_BITMAP_T and
C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 553f43a90953..a7d6347aaa79 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -191,6 +191,7 @@ static int print_unex = 1;
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/async.h>
/*
* PS/2 floppies have much slower step rates than regular floppies.
@@ -2516,8 +2517,7 @@ static int make_raw_rw_request(void)
set_fdc((long)current_req->rq_disk->private_data);
raw_cmd = &default_raw_cmd;
- raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK |
- FD_RAW_NEED_SEEK;
+ raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
raw_cmd->cmd_count = NR_RW;
if (rq_data_dir(current_req) == READ) {
raw_cmd->flags |= FD_RAW_READ;
@@ -4123,7 +4123,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
return get_disk(disks[drive]);
}
-static int __init floppy_init(void)
+static int __init do_floppy_init(void)
{
int i, unit, drive;
int err, dr;
@@ -4338,6 +4338,24 @@ out_put_disk:
return err;
}
+#ifndef MODULE
+static __init void floppy_async_init(void *data, async_cookie_t cookie)
+{
+ do_floppy_init();
+}
+#endif
+
+static int __init floppy_init(void)
+{
+#ifdef MODULE
+ return do_floppy_init();
+#else
+ /* Don't hold up the bootup by the floppy initialization */
+ async_schedule(floppy_async_init, NULL);
+ return 0;
+#endif
+}
+
static const struct io_region {
int offset;
int size;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 061427a75d37..d07c9f7fded6 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -154,6 +154,7 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
struct msghdr msg;
struct kvec iov;
sigset_t blocked, oldset;
+ unsigned long pflags = current->flags;
if (unlikely(!sock)) {
dev_err(disk_to_dev(nbd->disk),
@@ -167,8 +168,9 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
siginitsetinv(&blocked, sigmask(SIGKILL));
sigprocmask(SIG_SETMASK, &blocked, &oldset);
+ current->flags |= PF_MEMALLOC;
do {
- sock->sk->sk_allocation = GFP_NOIO;
+ sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
iov.iov_base = buf;
iov.iov_len = size;
msg.msg_name = NULL;
@@ -214,6 +216,7 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
} while (size > 0);
sigprocmask(SIG_SETMASK, &oldset, NULL);
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
return result;
}
@@ -405,6 +408,7 @@ static int nbd_do_it(struct nbd_device *nbd)
BUG_ON(nbd->magic != NBD_MAGIC);
+ sk_set_memalloc(nbd->sock->sk);
nbd->pid = task_pid_nr(current);
ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
if (ret) {
@@ -481,7 +485,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
nbd_end_request(req);
} else {
spin_lock(&nbd->queue_lock);
- list_add(&req->queuelist, &nbd->queue_head);
+ list_add_tail(&req->queuelist, &nbd->queue_head);
spin_unlock(&nbd->queue_lock);
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8f428a8ab003..9917943a3572 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -55,8 +55,6 @@
#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
-#define RBD_MAX_MD_NAME_LEN (RBD_MAX_OBJ_NAME_LEN + sizeof(RBD_SUFFIX))
-#define RBD_MAX_POOL_NAME_LEN 64
#define RBD_MAX_SNAP_NAME_LEN 32
#define RBD_MAX_OPT_LEN 1024
@@ -78,13 +76,12 @@
*/
struct rbd_image_header {
u64 image_size;
- char block_name[32];
+ char *object_prefix;
__u8 obj_order;
__u8 crypt_type;
__u8 comp_type;
struct ceph_snap_context *snapc;
size_t snap_names_len;
- u64 snap_seq;
u32 total_snaps;
char *snap_names;
@@ -150,7 +147,7 @@ struct rbd_snap {
* a single device
*/
struct rbd_device {
- int id; /* blkdev unique id */
+ int dev_id; /* blkdev unique id */
int major; /* blkdev assigned major */
struct gendisk *disk; /* blkdev's gendisk and rq */
@@ -163,20 +160,24 @@ struct rbd_device {
spinlock_t lock; /* queue lock */
struct rbd_image_header header;
- char obj[RBD_MAX_OBJ_NAME_LEN]; /* rbd image name */
- int obj_len;
- char obj_md_name[RBD_MAX_MD_NAME_LEN]; /* hdr nm. */
- char pool_name[RBD_MAX_POOL_NAME_LEN];
- int poolid;
+ char *image_name;
+ size_t image_name_len;
+ char *header_name;
+ char *pool_name;
+ int pool_id;
struct ceph_osd_event *watch_event;
struct ceph_osd_request *watch_request;
/* protects updating the header */
struct rw_semaphore header_rwsem;
- char snap_name[RBD_MAX_SNAP_NAME_LEN];
+ /* name of the snapshot this device reads from */
+ char *snap_name;
+ /* id of the snapshot this device reads from */
u64 snap_id; /* current snapshot id */
- int read_only;
+ /* whether the snap_id this device reads from still exists */
+ bool snap_exists;
+ int read_only;
struct list_head node;
@@ -201,8 +202,7 @@ static ssize_t rbd_snap_add(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count);
-static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev,
- struct rbd_snap *snap);
+static void __rbd_remove_snap_dev(struct rbd_snap *snap);
static ssize_t rbd_add(struct bus_type *bus, const char *buf,
size_t count);
@@ -240,7 +240,7 @@ static void rbd_put_dev(struct rbd_device *rbd_dev)
put_device(&rbd_dev->dev);
}
-static int __rbd_refresh_header(struct rbd_device *rbd_dev);
+static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -273,9 +273,9 @@ static const struct block_device_operations rbd_bd_ops = {
/*
* Initialize an rbd client instance.
- * We own *opt.
+ * We own *ceph_opts.
*/
-static struct rbd_client *rbd_client_create(struct ceph_options *opt,
+static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts,
struct rbd_options *rbd_opts)
{
struct rbd_client *rbdc;
@@ -291,10 +291,10 @@ static struct rbd_client *rbd_client_create(struct ceph_options *opt,
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rbdc->client = ceph_create_client(opt, rbdc, 0, 0);
+ rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
if (IS_ERR(rbdc->client))
goto out_mutex;
- opt = NULL; /* Now rbdc->client is responsible for opt */
+ ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
ret = ceph_open_session(rbdc->client);
if (ret < 0)
@@ -317,23 +317,23 @@ out_mutex:
mutex_unlock(&ctl_mutex);
kfree(rbdc);
out_opt:
- if (opt)
- ceph_destroy_options(opt);
+ if (ceph_opts)
+ ceph_destroy_options(ceph_opts);
return ERR_PTR(ret);
}
/*
* Find a ceph client with specific addr and configuration.
*/
-static struct rbd_client *__rbd_client_find(struct ceph_options *opt)
+static struct rbd_client *__rbd_client_find(struct ceph_options *ceph_opts)
{
struct rbd_client *client_node;
- if (opt->flags & CEPH_OPT_NOSHARE)
+ if (ceph_opts->flags & CEPH_OPT_NOSHARE)
return NULL;
list_for_each_entry(client_node, &rbd_client_list, node)
- if (ceph_compare_options(opt, client_node->client) == 0)
+ if (!ceph_compare_options(ceph_opts, client_node->client))
return client_node;
return NULL;
}
@@ -349,7 +349,7 @@ enum {
/* string args above */
};
-static match_table_t rbdopt_tokens = {
+static match_table_t rbd_opts_tokens = {
{Opt_notify_timeout, "notify_timeout=%d"},
/* int args above */
/* string args above */
@@ -358,11 +358,11 @@ static match_table_t rbdopt_tokens = {
static int parse_rbd_opts_token(char *c, void *private)
{
- struct rbd_options *rbdopt = private;
+ struct rbd_options *rbd_opts = private;
substring_t argstr[MAX_OPT_ARGS];
int token, intval, ret;
- token = match_token(c, rbdopt_tokens, argstr);
+ token = match_token(c, rbd_opts_tokens, argstr);
if (token < 0)
return -EINVAL;
@@ -383,7 +383,7 @@ static int parse_rbd_opts_token(char *c, void *private)
switch (token) {
case Opt_notify_timeout:
- rbdopt->notify_timeout = intval;
+ rbd_opts->notify_timeout = intval;
break;
default:
BUG_ON(token);
@@ -400,7 +400,7 @@ static struct rbd_client *rbd_get_client(const char *mon_addr,
char *options)
{
struct rbd_client *rbdc;
- struct ceph_options *opt;
+ struct ceph_options *ceph_opts;
struct rbd_options *rbd_opts;
rbd_opts = kzalloc(sizeof(*rbd_opts), GFP_KERNEL);
@@ -409,29 +409,29 @@ static struct rbd_client *rbd_get_client(const char *mon_addr,
rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT;
- opt = ceph_parse_options(options, mon_addr,
- mon_addr + mon_addr_len,
- parse_rbd_opts_token, rbd_opts);
- if (IS_ERR(opt)) {
+ ceph_opts = ceph_parse_options(options, mon_addr,
+ mon_addr + mon_addr_len,
+ parse_rbd_opts_token, rbd_opts);
+ if (IS_ERR(ceph_opts)) {
kfree(rbd_opts);
- return ERR_CAST(opt);
+ return ERR_CAST(ceph_opts);
}
spin_lock(&rbd_client_list_lock);
- rbdc = __rbd_client_find(opt);
+ rbdc = __rbd_client_find(ceph_opts);
if (rbdc) {
/* using an existing client */
kref_get(&rbdc->kref);
spin_unlock(&rbd_client_list_lock);
- ceph_destroy_options(opt);
+ ceph_destroy_options(ceph_opts);
kfree(rbd_opts);
return rbdc;
}
spin_unlock(&rbd_client_list_lock);
- rbdc = rbd_client_create(opt, rbd_opts);
+ rbdc = rbd_client_create(ceph_opts, rbd_opts);
if (IS_ERR(rbdc))
kfree(rbd_opts);
@@ -480,46 +480,60 @@ static void rbd_coll_release(struct kref *kref)
kfree(coll);
}
+static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
+{
+ return !memcmp(&ondisk->text,
+ RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT));
+}
+
/*
* Create a new header structure, translate header format from the on-disk
* header.
*/
static int rbd_header_from_disk(struct rbd_image_header *header,
struct rbd_image_header_ondisk *ondisk,
- u32 allocated_snaps,
- gfp_t gfp_flags)
+ u32 allocated_snaps)
{
- u32 i, snap_count;
+ u32 snap_count;
- if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT)))
+ if (!rbd_dev_ondisk_valid(ondisk))
return -ENXIO;
snap_count = le32_to_cpu(ondisk->snap_count);
- if (snap_count > (UINT_MAX - sizeof(struct ceph_snap_context))
- / sizeof (*ondisk))
+ if (snap_count > (SIZE_MAX - sizeof(struct ceph_snap_context))
+ / sizeof (u64))
return -EINVAL;
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
snap_count * sizeof(u64),
- gfp_flags);
+ GFP_KERNEL);
if (!header->snapc)
return -ENOMEM;
- header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
if (snap_count) {
+ header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
header->snap_names = kmalloc(header->snap_names_len,
- gfp_flags);
+ GFP_KERNEL);
if (!header->snap_names)
goto err_snapc;
header->snap_sizes = kmalloc(snap_count * sizeof(u64),
- gfp_flags);
+ GFP_KERNEL);
if (!header->snap_sizes)
goto err_names;
} else {
+ WARN_ON(ondisk->snap_names_len);
+ header->snap_names_len = 0;
header->snap_names = NULL;
header->snap_sizes = NULL;
}
- memcpy(header->block_name, ondisk->block_name,
+
+ header->object_prefix = kmalloc(sizeof (ondisk->block_name) + 1,
+ GFP_KERNEL);
+ if (!header->object_prefix)
+ goto err_sizes;
+
+ memcpy(header->object_prefix, ondisk->block_name,
sizeof(ondisk->block_name));
+ header->object_prefix[sizeof (ondisk->block_name)] = '\0';
header->image_size = le64_to_cpu(ondisk->image_size);
header->obj_order = ondisk->options.order;
@@ -527,11 +541,13 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
header->comp_type = ondisk->options.comp_type;
atomic_set(&header->snapc->nref, 1);
- header->snap_seq = le64_to_cpu(ondisk->snap_seq);
+ header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
header->snapc->num_snaps = snap_count;
header->total_snaps = snap_count;
if (snap_count && allocated_snaps == snap_count) {
+ int i;
+
for (i = 0; i < snap_count; i++) {
header->snapc->snaps[i] =
le64_to_cpu(ondisk->snaps[i].id);
@@ -540,16 +556,22 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
}
/* copy snapshot names */
- memcpy(header->snap_names, &ondisk->snaps[i],
+ memcpy(header->snap_names, &ondisk->snaps[snap_count],
header->snap_names_len);
}
return 0;
+err_sizes:
+ kfree(header->snap_sizes);
+ header->snap_sizes = NULL;
err_names:
kfree(header->snap_names);
+ header->snap_names = NULL;
err_snapc:
kfree(header->snapc);
+ header->snapc = NULL;
+
return -ENOMEM;
}
@@ -575,52 +597,50 @@ static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
return -ENOENT;
}
-static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
+static int rbd_header_set_snap(struct rbd_device *rbd_dev, u64 *size)
{
- struct rbd_image_header *header = &dev->header;
- struct ceph_snap_context *snapc = header->snapc;
- int ret = -ENOENT;
-
- BUILD_BUG_ON(sizeof (dev->snap_name) < sizeof (RBD_SNAP_HEAD_NAME));
+ int ret;
- down_write(&dev->header_rwsem);
+ down_write(&rbd_dev->header_rwsem);
- if (!memcmp(dev->snap_name, RBD_SNAP_HEAD_NAME,
+ if (!memcmp(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME,
sizeof (RBD_SNAP_HEAD_NAME))) {
- if (header->total_snaps)
- snapc->seq = header->snap_seq;
- else
- snapc->seq = 0;
- dev->snap_id = CEPH_NOSNAP;
- dev->read_only = 0;
+ rbd_dev->snap_id = CEPH_NOSNAP;
+ rbd_dev->snap_exists = false;
+ rbd_dev->read_only = 0;
if (size)
- *size = header->image_size;
+ *size = rbd_dev->header.image_size;
} else {
- ret = snap_by_name(header, dev->snap_name, &snapc->seq, size);
+ u64 snap_id = 0;
+
+ ret = snap_by_name(&rbd_dev->header, rbd_dev->snap_name,
+ &snap_id, size);
if (ret < 0)
goto done;
- dev->snap_id = snapc->seq;
- dev->read_only = 1;
+ rbd_dev->snap_id = snap_id;
+ rbd_dev->snap_exists = true;
+ rbd_dev->read_only = 1;
}
ret = 0;
done:
- up_write(&dev->header_rwsem);
+ up_write(&rbd_dev->header_rwsem);
return ret;
}
static void rbd_header_free(struct rbd_image_header *header)
{
- kfree(header->snapc);
- kfree(header->snap_names);
+ kfree(header->object_prefix);
kfree(header->snap_sizes);
+ kfree(header->snap_names);
+ ceph_put_snap_context(header->snapc);
}
/*
* get the actual striped segment name, offset and length
*/
static u64 rbd_get_segment(struct rbd_image_header *header,
- const char *block_name,
+ const char *object_prefix,
u64 ofs, u64 len,
char *seg_name, u64 *segofs)
{
@@ -628,7 +648,7 @@ static u64 rbd_get_segment(struct rbd_image_header *header,
if (seg_name)
snprintf(seg_name, RBD_MAX_SEG_NAME_LEN,
- "%s.%012llx", block_name, seg);
+ "%s.%012llx", object_prefix, seg);
ofs = ofs & ((1 << header->obj_order) - 1);
len = min_t(u64, len, (1 << header->obj_order) - ofs);
@@ -726,9 +746,8 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
* split_bio will BUG_ON if this is not the case
*/
dout("bio_chain_clone split! total=%d remaining=%d"
- "bi_size=%d\n",
- (int)total, (int)len-total,
- (int)old_chain->bi_size);
+ "bi_size=%u\n",
+ total, len - total, old_chain->bi_size);
/* split the bio. We'll release it either in the next
call, or it will have to be released outside */
@@ -777,22 +796,24 @@ err_out:
/*
* helpers for osd request op vectors.
*/
-static int rbd_create_rw_ops(struct ceph_osd_req_op **ops,
- int num_ops,
- int opcode,
- u32 payload_len)
-{
- *ops = kzalloc(sizeof(struct ceph_osd_req_op) * (num_ops + 1),
- GFP_NOIO);
- if (!*ops)
- return -ENOMEM;
- (*ops)[0].op = opcode;
+static struct ceph_osd_req_op *rbd_create_rw_ops(int num_ops,
+ int opcode, u32 payload_len)
+{
+ struct ceph_osd_req_op *ops;
+
+ ops = kzalloc(sizeof (*ops) * (num_ops + 1), GFP_NOIO);
+ if (!ops)
+ return NULL;
+
+ ops[0].op = opcode;
+
/*
* op extent offset and length will be set later on
* in calc_raw_layout()
*/
- (*ops)[0].payload_len = payload_len;
- return 0;
+ ops[0].payload_len = payload_len;
+
+ return ops;
}
static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
@@ -808,8 +829,8 @@ static void rbd_coll_end_req_index(struct request *rq,
struct request_queue *q;
int min, max, i;
- dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n",
- coll, index, ret, len);
+ dout("rbd_coll_end_req_index %p index %d ret %d len %llu\n",
+ coll, index, ret, (unsigned long long) len);
if (!rq)
return;
@@ -848,16 +869,15 @@ static void rbd_coll_end_req(struct rbd_request *req,
* Send ceph osd request
*/
static int rbd_do_request(struct request *rq,
- struct rbd_device *dev,
+ struct rbd_device *rbd_dev,
struct ceph_snap_context *snapc,
u64 snapid,
- const char *obj, u64 ofs, u64 len,
+ const char *object_name, u64 ofs, u64 len,
struct bio *bio,
struct page **pages,
int num_pages,
int flags,
struct ceph_osd_req_op *ops,
- int num_reply,
struct rbd_req_coll *coll,
int coll_index,
void (*rbd_cb)(struct ceph_osd_request *req,
@@ -887,15 +907,13 @@ static int rbd_do_request(struct request *rq,
req_data->coll_index = coll_index;
}
- dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs);
-
- down_read(&dev->header_rwsem);
+ dout("rbd_do_request object_name=%s ofs=%llu len=%llu\n", object_name,
+ (unsigned long long) ofs, (unsigned long long) len);
- osdc = &dev->rbd_client->client->osdc;
+ osdc = &rbd_dev->rbd_client->client->osdc;
req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
false, GFP_NOIO, pages, bio);
if (!req) {
- up_read(&dev->header_rwsem);
ret = -ENOMEM;
goto done_pages;
}
@@ -912,7 +930,7 @@ static int rbd_do_request(struct request *rq,
reqhead = req->r_request->front.iov_base;
reqhead->snapid = cpu_to_le64(CEPH_NOSNAP);
- strncpy(req->r_oid, obj, sizeof(req->r_oid));
+ strncpy(req->r_oid, object_name, sizeof(req->r_oid));
req->r_oid_len = strlen(req->r_oid);
layout = &req->r_file_layout;
@@ -920,7 +938,7 @@ static int rbd_do_request(struct request *rq,
layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
layout->fl_stripe_count = cpu_to_le32(1);
layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- layout->fl_pg_pool = cpu_to_le32(dev->poolid);
+ layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id);
ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
req, ops);
@@ -929,7 +947,6 @@ static int rbd_do_request(struct request *rq,
snapc,
&mtime,
req->r_oid, req->r_oid_len);
- up_read(&dev->header_rwsem);
if (linger_req) {
ceph_osdc_set_request_linger(osdc, req);
@@ -944,8 +961,9 @@ static int rbd_do_request(struct request *rq,
ret = ceph_osdc_wait_request(osdc, req);
if (ver)
*ver = le64_to_cpu(req->r_reassert_version.version);
- dout("reassert_ver=%lld\n",
- le64_to_cpu(req->r_reassert_version.version));
+ dout("reassert_ver=%llu\n",
+ (unsigned long long)
+ le64_to_cpu(req->r_reassert_version.version));
ceph_osdc_put_request(req);
}
return ret;
@@ -979,7 +997,8 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
bytes = le64_to_cpu(op->extent.length);
read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
- dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc);
+ dout("rbd_req_cb bytes=%llu readop=%d rc=%d\n",
+ (unsigned long long) bytes, read_op, (int) rc);
if (rc == -ENOENT && read_op) {
zero_bio_chain(req_data->bio, 0);
@@ -1006,14 +1025,12 @@ static void rbd_simple_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg
/*
* Do a synchronous ceph osd operation
*/
-static int rbd_req_sync_op(struct rbd_device *dev,
+static int rbd_req_sync_op(struct rbd_device *rbd_dev,
struct ceph_snap_context *snapc,
u64 snapid,
- int opcode,
int flags,
- struct ceph_osd_req_op *orig_ops,
- int num_reply,
- const char *obj,
+ struct ceph_osd_req_op *ops,
+ const char *object_name,
u64 ofs, u64 len,
char *buf,
struct ceph_osd_request **linger_req,
@@ -1022,45 +1039,28 @@ static int rbd_req_sync_op(struct rbd_device *dev,
int ret;
struct page **pages;
int num_pages;
- struct ceph_osd_req_op *ops = orig_ops;
- u32 payload_len;
+
+ BUG_ON(ops == NULL);
num_pages = calc_pages_for(ofs , len);
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
if (IS_ERR(pages))
return PTR_ERR(pages);
- if (!orig_ops) {
- payload_len = (flags & CEPH_OSD_FLAG_WRITE ? len : 0);
- ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len);
- if (ret < 0)
- goto done;
-
- if ((flags & CEPH_OSD_FLAG_WRITE) && buf) {
- ret = ceph_copy_to_page_vector(pages, buf, ofs, len);
- if (ret < 0)
- goto done_ops;
- }
- }
-
- ret = rbd_do_request(NULL, dev, snapc, snapid,
- obj, ofs, len, NULL,
+ ret = rbd_do_request(NULL, rbd_dev, snapc, snapid,
+ object_name, ofs, len, NULL,
pages, num_pages,
flags,
ops,
- 2,
NULL, 0,
NULL,
linger_req, ver);
if (ret < 0)
- goto done_ops;
+ goto done;
if ((flags & CEPH_OSD_FLAG_READ) && buf)
ret = ceph_copy_from_page_vector(pages, buf, ofs, ret);
-done_ops:
- if (!orig_ops)
- rbd_destroy_ops(ops);
done:
ceph_release_page_vector(pages, num_pages);
return ret;
@@ -1070,10 +1070,10 @@ done:
* Do an asynchronous ceph osd operation
*/
static int rbd_do_op(struct request *rq,
- struct rbd_device *rbd_dev ,
+ struct rbd_device *rbd_dev,
struct ceph_snap_context *snapc,
u64 snapid,
- int opcode, int flags, int num_reply,
+ int opcode, int flags,
u64 ofs, u64 len,
struct bio *bio,
struct rbd_req_coll *coll,
@@ -1091,14 +1091,15 @@ static int rbd_do_op(struct request *rq,
return -ENOMEM;
seg_len = rbd_get_segment(&rbd_dev->header,
- rbd_dev->header.block_name,
+ rbd_dev->header.object_prefix,
ofs, len,
seg_name, &seg_ofs);
payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0);
- ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len);
- if (ret < 0)
+ ret = -ENOMEM;
+ ops = rbd_create_rw_ops(1, opcode, payload_len);
+ if (!ops)
goto done;
/* we've taken care of segment sizes earlier when we
@@ -1112,7 +1113,6 @@ static int rbd_do_op(struct request *rq,
NULL, 0,
flags,
ops,
- num_reply,
coll, coll_index,
rbd_req_cb, 0, NULL);
@@ -1136,7 +1136,6 @@ static int rbd_req_write(struct request *rq,
return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
CEPH_OSD_OP_WRITE,
CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- 2,
ofs, len, bio, coll, coll_index);
}
@@ -1155,55 +1154,58 @@ static int rbd_req_read(struct request *rq,
snapid,
CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ,
- 2,
ofs, len, bio, coll, coll_index);
}
/*
* Request sync osd read
*/
-static int rbd_req_sync_read(struct rbd_device *dev,
- struct ceph_snap_context *snapc,
+static int rbd_req_sync_read(struct rbd_device *rbd_dev,
u64 snapid,
- const char *obj,
+ const char *object_name,
u64 ofs, u64 len,
char *buf,
u64 *ver)
{
- return rbd_req_sync_op(dev, NULL,
+ struct ceph_osd_req_op *ops;
+ int ret;
+
+ ops = rbd_create_rw_ops(1, CEPH_OSD_OP_READ, 0);
+ if (!ops)
+ return -ENOMEM;
+
+ ret = rbd_req_sync_op(rbd_dev, NULL,
snapid,
- CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ,
- NULL,
- 1, obj, ofs, len, buf, NULL, ver);
+ ops, object_name, ofs, len, buf, NULL, ver);
+ rbd_destroy_ops(ops);
+
+ return ret;
}
/*
* Request sync osd watch
*/
-static int rbd_req_sync_notify_ack(struct rbd_device *dev,
+static int rbd_req_sync_notify_ack(struct rbd_device *rbd_dev,
u64 ver,
- u64 notify_id,
- const char *obj)
+ u64 notify_id)
{
struct ceph_osd_req_op *ops;
- struct page **pages = NULL;
int ret;
- ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0);
- if (ret < 0)
- return ret;
+ ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY_ACK, 0);
+ if (!ops)
+ return -ENOMEM;
- ops[0].watch.ver = cpu_to_le64(dev->header.obj_version);
+ ops[0].watch.ver = cpu_to_le64(ver);
ops[0].watch.cookie = notify_id;
ops[0].watch.flag = 0;
- ret = rbd_do_request(NULL, dev, NULL, CEPH_NOSNAP,
- obj, 0, 0, NULL,
- pages, 0,
+ ret = rbd_do_request(NULL, rbd_dev, NULL, CEPH_NOSNAP,
+ rbd_dev->header_name, 0, 0, NULL,
+ NULL, 0,
CEPH_OSD_FLAG_READ,
ops,
- 1,
NULL, 0,
rbd_simple_req_cb, 0, NULL);
@@ -1213,54 +1215,53 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev,
static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
{
- struct rbd_device *dev = (struct rbd_device *)data;
+ struct rbd_device *rbd_dev = (struct rbd_device *)data;
+ u64 hver;
int rc;
- if (!dev)
+ if (!rbd_dev)
return;
- dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
- notify_id, (int)opcode);
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_refresh_header(dev);
- mutex_unlock(&ctl_mutex);
+ dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
+ rbd_dev->header_name, (unsigned long long) notify_id,
+ (unsigned int) opcode);
+ rc = rbd_refresh_header(rbd_dev, &hver);
if (rc)
pr_warning(RBD_DRV_NAME "%d got notification but failed to "
- " update snaps: %d\n", dev->major, rc);
+ " update snaps: %d\n", rbd_dev->major, rc);
- rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name);
+ rbd_req_sync_notify_ack(rbd_dev, hver, notify_id);
}
/*
* Request sync osd watch
*/
-static int rbd_req_sync_watch(struct rbd_device *dev,
- const char *obj,
- u64 ver)
+static int rbd_req_sync_watch(struct rbd_device *rbd_dev)
{
struct ceph_osd_req_op *ops;
- struct ceph_osd_client *osdc = &dev->rbd_client->client->osdc;
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ int ret;
- int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0);
- if (ret < 0)
- return ret;
+ ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
+ if (!ops)
+ return -ENOMEM;
ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0,
- (void *)dev, &dev->watch_event);
+ (void *)rbd_dev, &rbd_dev->watch_event);
if (ret < 0)
goto fail;
- ops[0].watch.ver = cpu_to_le64(ver);
- ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie);
+ ops[0].watch.ver = cpu_to_le64(rbd_dev->header.obj_version);
+ ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
ops[0].watch.flag = 1;
- ret = rbd_req_sync_op(dev, NULL,
+ ret = rbd_req_sync_op(rbd_dev, NULL,
CEPH_NOSNAP,
- 0,
CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
ops,
- 1, obj, 0, 0, NULL,
- &dev->watch_request, NULL);
+ rbd_dev->header_name,
+ 0, 0, NULL,
+ &rbd_dev->watch_request, NULL);
if (ret < 0)
goto fail_event;
@@ -1269,8 +1270,8 @@ static int rbd_req_sync_watch(struct rbd_device *dev,
return 0;
fail_event:
- ceph_osdc_cancel_event(dev->watch_event);
- dev->watch_event = NULL;
+ ceph_osdc_cancel_event(rbd_dev->watch_event);
+ rbd_dev->watch_event = NULL;
fail:
rbd_destroy_ops(ops);
return ret;
@@ -1279,64 +1280,65 @@ fail:
/*
* Request sync osd unwatch
*/
-static int rbd_req_sync_unwatch(struct rbd_device *dev,
- const char *obj)
+static int rbd_req_sync_unwatch(struct rbd_device *rbd_dev)
{
struct ceph_osd_req_op *ops;
+ int ret;
- int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_WATCH, 0);
- if (ret < 0)
- return ret;
+ ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
+ if (!ops)
+ return -ENOMEM;
ops[0].watch.ver = 0;
- ops[0].watch.cookie = cpu_to_le64(dev->watch_event->cookie);
+ ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
ops[0].watch.flag = 0;
- ret = rbd_req_sync_op(dev, NULL,
+ ret = rbd_req_sync_op(rbd_dev, NULL,
CEPH_NOSNAP,
- 0,
CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
ops,
- 1, obj, 0, 0, NULL, NULL, NULL);
+ rbd_dev->header_name,
+ 0, 0, NULL, NULL, NULL);
+
rbd_destroy_ops(ops);
- ceph_osdc_cancel_event(dev->watch_event);
- dev->watch_event = NULL;
+ ceph_osdc_cancel_event(rbd_dev->watch_event);
+ rbd_dev->watch_event = NULL;
return ret;
}
struct rbd_notify_info {
- struct rbd_device *dev;
+ struct rbd_device *rbd_dev;
};
static void rbd_notify_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
{
- struct rbd_device *dev = (struct rbd_device *)data;
- if (!dev)
+ struct rbd_device *rbd_dev = (struct rbd_device *)data;
+ if (!rbd_dev)
return;
- dout("rbd_notify_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
- notify_id, (int)opcode);
+ dout("rbd_notify_cb %s notify_id=%llu opcode=%u\n",
+ rbd_dev->header_name, (unsigned long long) notify_id,
+ (unsigned int) opcode);
}
/*
* Request sync osd notify
*/
-static int rbd_req_sync_notify(struct rbd_device *dev,
- const char *obj)
+static int rbd_req_sync_notify(struct rbd_device *rbd_dev)
{
struct ceph_osd_req_op *ops;
- struct ceph_osd_client *osdc = &dev->rbd_client->client->osdc;
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct ceph_osd_event *event;
struct rbd_notify_info info;
int payload_len = sizeof(u32) + sizeof(u32);
int ret;
- ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY, payload_len);
- if (ret < 0)
- return ret;
+ ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY, payload_len);
+ if (!ops)
+ return -ENOMEM;
- info.dev = dev;
+ info.rbd_dev = rbd_dev;
ret = ceph_osdc_create_event(osdc, rbd_notify_cb, 1,
(void *)&info, &event);
@@ -1349,12 +1351,12 @@ static int rbd_req_sync_notify(struct rbd_device *dev,
ops[0].watch.prot_ver = RADOS_NOTIFY_VER;
ops[0].watch.timeout = 12;
- ret = rbd_req_sync_op(dev, NULL,
+ ret = rbd_req_sync_op(rbd_dev, NULL,
CEPH_NOSNAP,
- 0,
CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
ops,
- 1, obj, 0, 0, NULL, NULL, NULL);
+ rbd_dev->header_name,
+ 0, 0, NULL, NULL, NULL);
if (ret < 0)
goto fail_event;
@@ -1373,36 +1375,37 @@ fail:
/*
* Request sync osd read
*/
-static int rbd_req_sync_exec(struct rbd_device *dev,
- const char *obj,
- const char *cls,
- const char *method,
+static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
+ const char *object_name,
+ const char *class_name,
+ const char *method_name,
const char *data,
int len,
u64 *ver)
{
struct ceph_osd_req_op *ops;
- int cls_len = strlen(cls);
- int method_len = strlen(method);
- int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_CALL,
- cls_len + method_len + len);
- if (ret < 0)
- return ret;
+ int class_name_len = strlen(class_name);
+ int method_name_len = strlen(method_name);
+ int ret;
- ops[0].cls.class_name = cls;
- ops[0].cls.class_len = (__u8)cls_len;
- ops[0].cls.method_name = method;
- ops[0].cls.method_len = (__u8)method_len;
+ ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL,
+ class_name_len + method_name_len + len);
+ if (!ops)
+ return -ENOMEM;
+
+ ops[0].cls.class_name = class_name;
+ ops[0].cls.class_len = (__u8) class_name_len;
+ ops[0].cls.method_name = method_name;
+ ops[0].cls.method_len = (__u8) method_name_len;
ops[0].cls.argc = 0;
ops[0].cls.indata = data;
ops[0].cls.indata_len = len;
- ret = rbd_req_sync_op(dev, NULL,
+ ret = rbd_req_sync_op(rbd_dev, NULL,
CEPH_NOSNAP,
- 0,
CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
ops,
- 1, obj, 0, 0, NULL, NULL, ver);
+ object_name, 0, 0, NULL, NULL, ver);
rbd_destroy_ops(ops);
@@ -1437,10 +1440,12 @@ static void rbd_rq_fn(struct request_queue *q)
struct bio *bio;
struct bio *rq_bio, *next_bio = NULL;
bool do_write;
- int size, op_size = 0;
+ unsigned int size;
+ u64 op_size = 0;
u64 ofs;
int num_segs, cur_seg = 0;
struct rbd_req_coll *coll;
+ struct ceph_snap_context *snapc;
/* peek at request from block layer */
if (!rq)
@@ -1467,23 +1472,38 @@ static void rbd_rq_fn(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
+ down_read(&rbd_dev->header_rwsem);
+
+ if (rbd_dev->snap_id != CEPH_NOSNAP && !rbd_dev->snap_exists) {
+ up_read(&rbd_dev->header_rwsem);
+ dout("request for non-existent snapshot");
+ spin_lock_irq(q->queue_lock);
+ __blk_end_request_all(rq, -ENXIO);
+ continue;
+ }
+
+ snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+
+ up_read(&rbd_dev->header_rwsem);
+
dout("%s 0x%x bytes at 0x%llx\n",
do_write ? "write" : "read",
- size, blk_rq_pos(rq) * SECTOR_SIZE);
+ size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
coll = rbd_alloc_coll(num_segs);
if (!coll) {
spin_lock_irq(q->queue_lock);
__blk_end_request_all(rq, -ENOMEM);
+ ceph_put_snap_context(snapc);
continue;
}
do {
/* a bio clone to be passed down to OSD req */
- dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt);
+ dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
op_size = rbd_get_segment(&rbd_dev->header,
- rbd_dev->header.block_name,
+ rbd_dev->header.object_prefix,
ofs, size,
NULL, NULL);
kref_get(&coll->kref);
@@ -1499,7 +1519,7 @@ static void rbd_rq_fn(struct request_queue *q)
/* init OSD command: write or read */
if (do_write)
rbd_req_write(rq, rbd_dev,
- rbd_dev->header.snapc,
+ snapc,
ofs,
op_size, bio,
coll, cur_seg);
@@ -1522,6 +1542,8 @@ next_seg:
if (bp)
bio_pair_release(bp);
spin_lock_irq(q->queue_lock);
+
+ ceph_put_snap_context(snapc);
}
}
@@ -1592,18 +1614,19 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
return -ENOMEM;
rc = rbd_req_sync_read(rbd_dev,
- NULL, CEPH_NOSNAP,
- rbd_dev->obj_md_name,
+ CEPH_NOSNAP,
+ rbd_dev->header_name,
0, len,
(char *)dh, &ver);
if (rc < 0)
goto out_dh;
- rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
+ rc = rbd_header_from_disk(header, dh, snap_count);
if (rc < 0) {
if (rc == -ENXIO)
pr_warning("unrecognized header format"
- " for image %s", rbd_dev->obj);
+ " for image %s\n",
+ rbd_dev->image_name);
goto out_dh;
}
@@ -1628,7 +1651,7 @@ out_dh:
/*
* create a snapshot
*/
-static int rbd_header_add_snap(struct rbd_device *dev,
+static int rbd_header_add_snap(struct rbd_device *rbd_dev,
const char *snap_name,
gfp_t gfp_flags)
{
@@ -1636,16 +1659,15 @@ static int rbd_header_add_snap(struct rbd_device *dev,
u64 new_snapid;
int ret;
void *data, *p, *e;
- u64 ver;
struct ceph_mon_client *monc;
/* we should create a snapshot only if we're pointing at the head */
- if (dev->snap_id != CEPH_NOSNAP)
+ if (rbd_dev->snap_id != CEPH_NOSNAP)
return -EINVAL;
- monc = &dev->rbd_client->client->monc;
- ret = ceph_monc_create_snapid(monc, dev->poolid, &new_snapid);
- dout("created snapid=%lld\n", new_snapid);
+ monc = &rbd_dev->rbd_client->client->monc;
+ ret = ceph_monc_create_snapid(monc, rbd_dev->pool_id, &new_snapid);
+ dout("created snapid=%llu\n", (unsigned long long) new_snapid);
if (ret < 0)
return ret;
@@ -1659,19 +1681,13 @@ static int rbd_header_add_snap(struct rbd_device *dev,
ceph_encode_string_safe(&p, e, snap_name, name_len, bad);
ceph_encode_64_safe(&p, e, new_snapid, bad);
- ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add",
- data, p - data, &ver);
+ ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
+ "rbd", "snap_add",
+ data, p - data, NULL);
kfree(data);
- if (ret < 0)
- return ret;
-
- down_write(&dev->header_rwsem);
- dev->header.snapc->seq = new_snapid;
- up_write(&dev->header_rwsem);
-
- return 0;
+ return ret < 0 ? ret : 0;
bad:
return -ERANGE;
}
@@ -1679,52 +1695,52 @@ bad:
static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
{
struct rbd_snap *snap;
+ struct rbd_snap *next;
- while (!list_empty(&rbd_dev->snaps)) {
- snap = list_first_entry(&rbd_dev->snaps, struct rbd_snap, node);
- __rbd_remove_snap_dev(rbd_dev, snap);
- }
+ list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
+ __rbd_remove_snap_dev(snap);
}
/*
* only read the first part of the ondisk header, without the snaps info
*/
-static int __rbd_refresh_header(struct rbd_device *rbd_dev)
+static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
{
int ret;
struct rbd_image_header h;
- u64 snap_seq;
- int follow_seq = 0;
ret = rbd_read_header(rbd_dev, &h);
if (ret < 0)
return ret;
- /* resized? */
- set_capacity(rbd_dev->disk, h.image_size / SECTOR_SIZE);
-
down_write(&rbd_dev->header_rwsem);
- snap_seq = rbd_dev->header.snapc->seq;
- if (rbd_dev->header.total_snaps &&
- rbd_dev->header.snapc->snaps[0] == snap_seq)
- /* pointing at the head, will need to follow that
- if head moves */
- follow_seq = 1;
+ /* resized? */
+ if (rbd_dev->snap_id == CEPH_NOSNAP) {
+ sector_t size = (sector_t) h.image_size / SECTOR_SIZE;
- kfree(rbd_dev->header.snapc);
- kfree(rbd_dev->header.snap_names);
+ dout("setting size to %llu sectors", (unsigned long long) size);
+ set_capacity(rbd_dev->disk, size);
+ }
+
+ /* rbd_dev->header.object_prefix shouldn't change */
kfree(rbd_dev->header.snap_sizes);
+ kfree(rbd_dev->header.snap_names);
+ /* osd requests may still refer to snapc */
+ ceph_put_snap_context(rbd_dev->header.snapc);
+ if (hver)
+ *hver = h.obj_version;
+ rbd_dev->header.obj_version = h.obj_version;
+ rbd_dev->header.image_size = h.image_size;
rbd_dev->header.total_snaps = h.total_snaps;
rbd_dev->header.snapc = h.snapc;
rbd_dev->header.snap_names = h.snap_names;
rbd_dev->header.snap_names_len = h.snap_names_len;
rbd_dev->header.snap_sizes = h.snap_sizes;
- if (follow_seq)
- rbd_dev->header.snapc->seq = rbd_dev->header.snapc->snaps[0];
- else
- rbd_dev->header.snapc->seq = snap_seq;
+ /* Free the extra copy of the object prefix */
+ WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
+ kfree(h.object_prefix);
ret = __rbd_init_snaps_header(rbd_dev);
@@ -1733,6 +1749,17 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev)
return ret;
}
+static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
+{
+ int ret;
+
+ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ ret = __rbd_refresh_header(rbd_dev, hver);
+ mutex_unlock(&ctl_mutex);
+
+ return ret;
+}
+
static int rbd_init_disk(struct rbd_device *rbd_dev)
{
struct gendisk *disk;
@@ -1762,7 +1789,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
goto out;
snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
- rbd_dev->id);
+ rbd_dev->dev_id);
disk->major = rbd_dev->major;
disk->first_minor = 0;
disk->fops = &rbd_bd_ops;
@@ -1819,8 +1846,13 @@ static ssize_t rbd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ sector_t size;
+
+ down_read(&rbd_dev->header_rwsem);
+ size = get_capacity(rbd_dev->disk);
+ up_read(&rbd_dev->header_rwsem);
- return sprintf(buf, "%llu\n", (unsigned long long)rbd_dev->header.image_size);
+ return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
}
static ssize_t rbd_major_show(struct device *dev,
@@ -1848,12 +1880,20 @@ static ssize_t rbd_pool_show(struct device *dev,
return sprintf(buf, "%s\n", rbd_dev->pool_name);
}
+static ssize_t rbd_pool_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+
+ return sprintf(buf, "%d\n", rbd_dev->pool_id);
+}
+
static ssize_t rbd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- return sprintf(buf, "%s\n", rbd_dev->obj);
+ return sprintf(buf, "%s\n", rbd_dev->image_name);
}
static ssize_t rbd_snap_show(struct device *dev,
@@ -1871,23 +1911,18 @@ static ssize_t rbd_image_refresh(struct device *dev,
size_t size)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- int rc;
- int ret = size;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+ int ret;
- rc = __rbd_refresh_header(rbd_dev);
- if (rc < 0)
- ret = rc;
+ ret = rbd_refresh_header(rbd_dev, NULL);
- mutex_unlock(&ctl_mutex);
- return ret;
+ return ret < 0 ? ret : size;
}
static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
+static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
@@ -1898,6 +1933,7 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_major.attr,
&dev_attr_client_id.attr,
&dev_attr_pool.attr,
+ &dev_attr_pool_id.attr,
&dev_attr_name.attr,
&dev_attr_current_snap.attr,
&dev_attr_refresh.attr,
@@ -1977,15 +2013,13 @@ static struct device_type rbd_snap_device_type = {
.release = rbd_snap_dev_release,
};
-static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev,
- struct rbd_snap *snap)
+static void __rbd_remove_snap_dev(struct rbd_snap *snap)
{
list_del(&snap->node);
device_unregister(&snap->dev);
}
-static int rbd_register_snap_dev(struct rbd_device *rbd_dev,
- struct rbd_snap *snap,
+static int rbd_register_snap_dev(struct rbd_snap *snap,
struct device *parent)
{
struct device *dev = &snap->dev;
@@ -2000,29 +2034,36 @@ static int rbd_register_snap_dev(struct rbd_device *rbd_dev,
return ret;
}
-static int __rbd_add_snap_dev(struct rbd_device *rbd_dev,
- int i, const char *name,
- struct rbd_snap **snapp)
+static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
+ int i, const char *name)
{
+ struct rbd_snap *snap;
int ret;
- struct rbd_snap *snap = kzalloc(sizeof(*snap), GFP_KERNEL);
+
+ snap = kzalloc(sizeof (*snap), GFP_KERNEL);
if (!snap)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+
+ ret = -ENOMEM;
snap->name = kstrdup(name, GFP_KERNEL);
+ if (!snap->name)
+ goto err;
+
snap->size = rbd_dev->header.snap_sizes[i];
snap->id = rbd_dev->header.snapc->snaps[i];
if (device_is_registered(&rbd_dev->dev)) {
- ret = rbd_register_snap_dev(rbd_dev, snap,
- &rbd_dev->dev);
+ ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
if (ret < 0)
goto err;
}
- *snapp = snap;
- return 0;
+
+ return snap;
+
err:
kfree(snap->name);
kfree(snap);
- return ret;
+
+ return ERR_PTR(ret);
}
/*
@@ -2055,7 +2096,6 @@ static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
const char *name, *first_name;
int i = rbd_dev->header.total_snaps;
struct rbd_snap *snap, *old_snap = NULL;
- int ret;
struct list_head *p, *n;
first_name = rbd_dev->header.snap_names;
@@ -2070,8 +2110,15 @@ static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
cur_id = rbd_dev->header.snapc->snaps[i - 1];
if (!i || old_snap->id < cur_id) {
- /* old_snap->id was skipped, thus was removed */
- __rbd_remove_snap_dev(rbd_dev, old_snap);
+ /*
+ * old_snap->id was skipped, thus was
+ * removed. If this rbd_dev is mapped to
+ * the removed snapshot, record that it no
+ * longer exists, to prevent further I/O.
+ */
+ if (rbd_dev->snap_id == old_snap->id)
+ rbd_dev->snap_exists = false;
+ __rbd_remove_snap_dev(old_snap);
continue;
}
if (old_snap->id == cur_id) {
@@ -2091,9 +2138,9 @@ static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
if (cur_id >= old_snap->id)
break;
/* a new snapshot */
- ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap);
- if (ret < 0)
- return ret;
+ snap = __rbd_add_snap_dev(rbd_dev, i - 1, name);
+ if (IS_ERR(snap))
+ return PTR_ERR(snap);
/* note that we add it backward so using n and not p */
list_add(&snap->node, n);
@@ -2107,9 +2154,9 @@ static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
WARN_ON(1);
return -EINVAL;
}
- ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap);
- if (ret < 0)
- return ret;
+ snap = __rbd_add_snap_dev(rbd_dev, i - 1, name);
+ if (IS_ERR(snap))
+ return PTR_ERR(snap);
list_add(&snap->node, &rbd_dev->snaps);
}
@@ -2129,14 +2176,13 @@ static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
dev->type = &rbd_device_type;
dev->parent = &rbd_root_dev;
dev->release = rbd_dev_release;
- dev_set_name(dev, "%d", rbd_dev->id);
+ dev_set_name(dev, "%d", rbd_dev->dev_id);
ret = device_register(dev);
if (ret < 0)
goto out;
list_for_each_entry(snap, &rbd_dev->snaps, node) {
- ret = rbd_register_snap_dev(rbd_dev, snap,
- &rbd_dev->dev);
+ ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
if (ret < 0)
break;
}
@@ -2155,12 +2201,9 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
int ret, rc;
do {
- ret = rbd_req_sync_watch(rbd_dev, rbd_dev->obj_md_name,
- rbd_dev->header.obj_version);
+ ret = rbd_req_sync_watch(rbd_dev);
if (ret == -ERANGE) {
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_refresh_header(rbd_dev);
- mutex_unlock(&ctl_mutex);
+ rc = rbd_refresh_header(rbd_dev, NULL);
if (rc < 0)
return rc;
}
@@ -2177,7 +2220,7 @@ static atomic64_t rbd_id_max = ATOMIC64_INIT(0);
*/
static void rbd_id_get(struct rbd_device *rbd_dev)
{
- rbd_dev->id = atomic64_inc_return(&rbd_id_max);
+ rbd_dev->dev_id = atomic64_inc_return(&rbd_id_max);
spin_lock(&rbd_dev_list_lock);
list_add_tail(&rbd_dev->node, &rbd_dev_list);
@@ -2191,7 +2234,7 @@ static void rbd_id_get(struct rbd_device *rbd_dev)
static void rbd_id_put(struct rbd_device *rbd_dev)
{
struct list_head *tmp;
- int rbd_id = rbd_dev->id;
+ int rbd_id = rbd_dev->dev_id;
int max_id;
BUG_ON(rbd_id < 1);
@@ -2282,19 +2325,58 @@ static inline size_t copy_token(const char **buf,
}
/*
- * This fills in the pool_name, obj, obj_len, snap_name, obj_len,
+ * Finds the next token in *buf, dynamically allocates a buffer big
+ * enough to hold a copy of it, and copies the token into the new
+ * buffer. The copy is guaranteed to be terminated with '\0'. Note
+ * that a duplicate buffer is created even for a zero-length token.
+ *
+ * Returns a pointer to the newly-allocated duplicate, or a null
+ * pointer if memory for the duplicate was not available. If
+ * the lenp argument is a non-null pointer, the length of the token
+ * (not including the '\0') is returned in *lenp.
+ *
+ * If successful, the *buf pointer will be updated to point beyond
+ * the end of the found token.
+ *
+ * Note: uses GFP_KERNEL for allocation.
+ */
+static inline char *dup_token(const char **buf, size_t *lenp)
+{
+ char *dup;
+ size_t len;
+
+ len = next_token(buf);
+ dup = kmalloc(len + 1, GFP_KERNEL);
+ if (!dup)
+ return NULL;
+
+ memcpy(dup, *buf, len);
+ *(dup + len) = '\0';
+ *buf += len;
+
+ if (lenp)
+ *lenp = len;
+
+ return dup;
+}
+
+/*
+ * This fills in the pool_name, image_name, image_name_len, snap_name,
* rbd_dev, rbd_md_name, and name fields of the given rbd_dev, based
* on the list of monitor addresses and other options provided via
* /sys/bus/rbd/add.
+ *
+ * Note: rbd_dev is assumed to have been initially zero-filled.
*/
static int rbd_add_parse_args(struct rbd_device *rbd_dev,
const char *buf,
const char **mon_addrs,
size_t *mon_addrs_size,
char *options,
- size_t options_size)
+ size_t options_size)
{
- size_t len;
+ size_t len;
+ int ret;
/* The first four tokens are required */
@@ -2310,56 +2392,74 @@ static int rbd_add_parse_args(struct rbd_device *rbd_dev,
if (!len || len >= options_size)
return -EINVAL;
- len = copy_token(&buf, rbd_dev->pool_name, sizeof (rbd_dev->pool_name));
- if (!len || len >= sizeof (rbd_dev->pool_name))
- return -EINVAL;
-
- len = copy_token(&buf, rbd_dev->obj, sizeof (rbd_dev->obj));
- if (!len || len >= sizeof (rbd_dev->obj))
- return -EINVAL;
+ ret = -ENOMEM;
+ rbd_dev->pool_name = dup_token(&buf, NULL);
+ if (!rbd_dev->pool_name)
+ goto out_err;
- /* We have the object length in hand, save it. */
+ rbd_dev->image_name = dup_token(&buf, &rbd_dev->image_name_len);
+ if (!rbd_dev->image_name)
+ goto out_err;
- rbd_dev->obj_len = len;
+ /* Create the name of the header object */
- BUILD_BUG_ON(RBD_MAX_MD_NAME_LEN
- < RBD_MAX_OBJ_NAME_LEN + sizeof (RBD_SUFFIX));
- sprintf(rbd_dev->obj_md_name, "%s%s", rbd_dev->obj, RBD_SUFFIX);
+ rbd_dev->header_name = kmalloc(rbd_dev->image_name_len
+ + sizeof (RBD_SUFFIX),
+ GFP_KERNEL);
+ if (!rbd_dev->header_name)
+ goto out_err;
+ sprintf(rbd_dev->header_name, "%s%s", rbd_dev->image_name, RBD_SUFFIX);
/*
- * The snapshot name is optional, but it's an error if it's
- * too long. If no snapshot is supplied, fill in the default.
+ * The snapshot name is optional. If none is is supplied,
+ * we use the default value.
*/
- len = copy_token(&buf, rbd_dev->snap_name, sizeof (rbd_dev->snap_name));
- if (!len)
+ rbd_dev->snap_name = dup_token(&buf, &len);
+ if (!rbd_dev->snap_name)
+ goto out_err;
+ if (!len) {
+ /* Replace the empty name with the default */
+ kfree(rbd_dev->snap_name);
+ rbd_dev->snap_name
+ = kmalloc(sizeof (RBD_SNAP_HEAD_NAME), GFP_KERNEL);
+ if (!rbd_dev->snap_name)
+ goto out_err;
+
memcpy(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME,
sizeof (RBD_SNAP_HEAD_NAME));
- else if (len >= sizeof (rbd_dev->snap_name))
- return -EINVAL;
+ }
return 0;
+
+out_err:
+ kfree(rbd_dev->header_name);
+ kfree(rbd_dev->image_name);
+ kfree(rbd_dev->pool_name);
+ rbd_dev->pool_name = NULL;
+
+ return ret;
}
static ssize_t rbd_add(struct bus_type *bus,
const char *buf,
size_t count)
{
- struct rbd_device *rbd_dev;
+ char *options;
+ struct rbd_device *rbd_dev = NULL;
const char *mon_addrs = NULL;
size_t mon_addrs_size = 0;
- char *options = NULL;
struct ceph_osd_client *osdc;
int rc = -ENOMEM;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
- rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
- if (!rbd_dev)
- goto err_nomem;
options = kmalloc(count, GFP_KERNEL);
if (!options)
goto err_nomem;
+ rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
+ if (!rbd_dev)
+ goto err_nomem;
/* static rbd_device initialization */
spin_lock_init(&rbd_dev->lock);
@@ -2367,15 +2467,13 @@ static ssize_t rbd_add(struct bus_type *bus,
INIT_LIST_HEAD(&rbd_dev->snaps);
init_rwsem(&rbd_dev->header_rwsem);
- init_rwsem(&rbd_dev->header_rwsem);
-
/* generate unique id: find highest unique id, add one */
rbd_id_get(rbd_dev);
/* Fill in the device name, now that we have its id. */
BUILD_BUG_ON(DEV_NAME_LEN
< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
- sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->id);
+ sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
/* parse add command */
rc = rbd_add_parse_args(rbd_dev, buf, &mon_addrs, &mon_addrs_size,
@@ -2395,7 +2493,7 @@ static ssize_t rbd_add(struct bus_type *bus,
rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
if (rc < 0)
goto err_out_client;
- rbd_dev->poolid = rc;
+ rbd_dev->pool_id = rc;
/* register our block device */
rc = register_blkdev(0, rbd_dev->name);
@@ -2435,10 +2533,16 @@ err_out_blkdev:
err_out_client:
rbd_put_client(rbd_dev);
err_put_id:
+ if (rbd_dev->pool_name) {
+ kfree(rbd_dev->snap_name);
+ kfree(rbd_dev->header_name);
+ kfree(rbd_dev->image_name);
+ kfree(rbd_dev->pool_name);
+ }
rbd_id_put(rbd_dev);
err_nomem:
- kfree(options);
kfree(rbd_dev);
+ kfree(options);
dout("Error adding device %s\n", buf);
module_put(THIS_MODULE);
@@ -2446,7 +2550,7 @@ err_nomem:
return (ssize_t) rc;
}
-static struct rbd_device *__rbd_get_dev(unsigned long id)
+static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
{
struct list_head *tmp;
struct rbd_device *rbd_dev;
@@ -2454,7 +2558,7 @@ static struct rbd_device *__rbd_get_dev(unsigned long id)
spin_lock(&rbd_dev_list_lock);
list_for_each(tmp, &rbd_dev_list) {
rbd_dev = list_entry(tmp, struct rbd_device, node);
- if (rbd_dev->id == id) {
+ if (rbd_dev->dev_id == dev_id) {
spin_unlock(&rbd_dev_list_lock);
return rbd_dev;
}
@@ -2474,7 +2578,7 @@ static void rbd_dev_release(struct device *dev)
rbd_dev->watch_request);
}
if (rbd_dev->watch_event)
- rbd_req_sync_unwatch(rbd_dev, rbd_dev->obj_md_name);
+ rbd_req_sync_unwatch(rbd_dev);
rbd_put_client(rbd_dev);
@@ -2483,6 +2587,10 @@ static void rbd_dev_release(struct device *dev)
unregister_blkdev(rbd_dev->major, rbd_dev->name);
/* done with the id, and with the rbd_dev */
+ kfree(rbd_dev->snap_name);
+ kfree(rbd_dev->header_name);
+ kfree(rbd_dev->pool_name);
+ kfree(rbd_dev->image_name);
rbd_id_put(rbd_dev);
kfree(rbd_dev);
@@ -2544,7 +2652,7 @@ static ssize_t rbd_snap_add(struct device *dev,
if (ret < 0)
goto err_unlock;
- ret = __rbd_refresh_header(rbd_dev);
+ ret = __rbd_refresh_header(rbd_dev, NULL);
if (ret < 0)
goto err_unlock;
@@ -2553,7 +2661,7 @@ static ssize_t rbd_snap_add(struct device *dev,
mutex_unlock(&ctl_mutex);
/* make a best effort, don't error if failed */
- rbd_req_sync_notify(rbd_dev, rbd_dev->obj_md_name);
+ rbd_req_sync_notify(rbd_dev);
ret = count;
kfree(name);
diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h
index 950708688f17..0924e9e41a60 100644
--- a/drivers/block/rbd_types.h
+++ b/drivers/block/rbd_types.h
@@ -31,7 +31,6 @@
#define RBD_MIN_OBJ_ORDER 16
#define RBD_MAX_OBJ_ORDER 30
-#define RBD_MAX_OBJ_NAME_LEN 96
#define RBD_MAX_SEG_NAME_LEN 128
#define RBD_COMP_NONE 0
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 9a72277a31df..eb0d8216f557 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -513,42 +513,19 @@ static void process_page(unsigned long data)
}
}
-struct mm_plug_cb {
- struct blk_plug_cb cb;
- struct cardinfo *card;
-};
-
-static void mm_unplug(struct blk_plug_cb *cb)
+static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
- struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
+ struct cardinfo *card = cb->data;
- spin_lock_irq(&mmcb->card->lock);
- activate(mmcb->card);
- spin_unlock_irq(&mmcb->card->lock);
- kfree(mmcb);
+ spin_lock_irq(&card->lock);
+ activate(card);
+ spin_unlock_irq(&card->lock);
+ kfree(cb);
}
static int mm_check_plugged(struct cardinfo *card)
{
- struct blk_plug *plug = current->plug;
- struct mm_plug_cb *mmcb;
-
- if (!plug)
- return 0;
-
- list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
- if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
- return 1;
- }
- /* Not currently on the callback list */
- mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
- if (!mmcb)
- return 0;
-
- mmcb->card = card;
- mmcb->cb.callback = mm_unplug;
- list_add(&mmcb->cb.list, &plug->cb_list);
- return 1;
+ return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb));
}
static void mm_make_request(struct request_queue *q, struct bio *bio)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 693187df7601..c0bbeb470754 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -21,8 +21,6 @@ struct workqueue_struct *virtblk_wq;
struct virtio_blk
{
- spinlock_t lock;
-
struct virtio_device *vdev;
struct virtqueue *vq;
@@ -65,7 +63,7 @@ static void blk_done(struct virtqueue *vq)
unsigned int len;
unsigned long flags;
- spin_lock_irqsave(&vblk->lock, flags);
+ spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
int error;
@@ -99,7 +97,7 @@ static void blk_done(struct virtqueue *vq)
}
/* In case queue is stopped waiting for more buffers. */
blk_start_queue(vblk->disk->queue);
- spin_unlock_irqrestore(&vblk->lock, flags);
+ spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
}
static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
@@ -397,6 +395,83 @@ static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
return 0;
}
+static int virtblk_get_cache_mode(struct virtio_device *vdev)
+{
+ u8 writeback;
+ int err;
+
+ err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE,
+ offsetof(struct virtio_blk_config, wce),
+ &writeback);
+ if (err)
+ writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
+
+ return writeback;
+}
+
+static void virtblk_update_cache_mode(struct virtio_device *vdev)
+{
+ u8 writeback = virtblk_get_cache_mode(vdev);
+ struct virtio_blk *vblk = vdev->priv;
+
+ if (writeback)
+ blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
+ else
+ blk_queue_flush(vblk->disk->queue, 0);
+
+ revalidate_disk(vblk->disk);
+}
+
+static const char *const virtblk_cache_types[] = {
+ "write through", "write back"
+};
+
+static ssize_t
+virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct virtio_blk *vblk = disk->private_data;
+ struct virtio_device *vdev = vblk->vdev;
+ int i;
+ u8 writeback;
+
+ BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
+ for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
+ if (sysfs_streq(buf, virtblk_cache_types[i]))
+ break;
+
+ if (i < 0)
+ return -EINVAL;
+
+ writeback = i;
+ vdev->config->set(vdev,
+ offsetof(struct virtio_blk_config, wce),
+ &writeback, sizeof(writeback));
+
+ virtblk_update_cache_mode(vdev);
+ return count;
+}
+
+static ssize_t
+virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct virtio_blk *vblk = disk->private_data;
+ u8 writeback = virtblk_get_cache_mode(vblk->vdev);
+
+ BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
+ return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
+}
+
+static const struct device_attribute dev_attr_cache_type_ro =
+ __ATTR(cache_type, S_IRUGO,
+ virtblk_cache_type_show, NULL);
+static const struct device_attribute dev_attr_cache_type_rw =
+ __ATTR(cache_type, S_IRUGO|S_IWUSR,
+ virtblk_cache_type_show, virtblk_cache_type_store);
+
static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
@@ -431,7 +506,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
goto out_free_index;
}
- spin_lock_init(&vblk->lock);
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
sg_init_table(vblk->sg, vblk->sg_elems);
@@ -456,7 +530,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
goto out_mempool;
}
- q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
+ q = vblk->disk->queue = blk_init_queue(do_virtblk_request, NULL);
if (!q) {
err = -ENOMEM;
goto out_put_disk;
@@ -474,8 +548,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
vblk->index = index;
/* configure queue flush support */
- if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
- blk_queue_flush(q, REQ_FLUSH);
+ virtblk_update_cache_mode(vdev);
/* If disk is read-only in the host, the guest should obey */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
@@ -553,6 +626,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
if (err)
goto out_del_disk;
+ if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
+ err = device_create_file(disk_to_dev(vblk->disk),
+ &dev_attr_cache_type_rw);
+ else
+ err = device_create_file(disk_to_dev(vblk->disk),
+ &dev_attr_cache_type_ro);
+ if (err)
+ goto out_del_disk;
return 0;
out_del_disk:
@@ -576,30 +657,20 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
{
struct virtio_blk *vblk = vdev->priv;
int index = vblk->index;
- struct virtblk_req *vbr;
- unsigned long flags;
/* Prevent config work handler from accessing the device. */
mutex_lock(&vblk->config_lock);
vblk->config_enable = false;
mutex_unlock(&vblk->config_lock);
+ del_gendisk(vblk->disk);
+ blk_cleanup_queue(vblk->disk->queue);
+
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
flush_work(&vblk->config_work);
- del_gendisk(vblk->disk);
-
- /* Abort requests dispatched to driver. */
- spin_lock_irqsave(&vblk->lock, flags);
- while ((vbr = virtqueue_detach_unused_buf(vblk->vq))) {
- __blk_end_request_all(vbr->req, -EIO);
- mempool_free(vbr, vblk->pool);
- }
- spin_unlock_irqrestore(&vblk->lock, flags);
-
- blk_cleanup_queue(vblk->disk->queue);
put_disk(vblk->disk);
mempool_destroy(vblk->pool);
vdev->config->del_vqs(vdev);
@@ -655,7 +726,7 @@ static const struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
- VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
+ VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE
};
/*
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index e4fb3374dcd2..2c2d2e5c1597 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -888,9 +888,8 @@ static int setup_blkring(struct xenbus_device *dev,
if (err)
goto fail;
- err = bind_evtchn_to_irqhandler(info->evtchn,
- blkif_interrupt,
- IRQF_SAMPLE_RANDOM, "blkif", info);
+ err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0,
+ "blkif", info);
if (err <= 0) {
xenbus_dev_fatal(dev, err,
"bind_evtchn_to_irqhandler failed");
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 10308cd8a7ed..11f36e502136 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -79,6 +79,7 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x0CF3, 0xE004) },
{ USB_DEVICE(0x0930, 0x0219) },
+ { USB_DEVICE(0x0489, 0xe057) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@@ -104,6 +105,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index e27221411036..cef3bac1a543 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -98,6 +98,7 @@ static struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x0a5c, 0x21e6) },
{ USB_DEVICE(0x0a5c, 0x21e8) },
{ USB_DEVICE(0x0a5c, 0x21f3) },
+ { USB_DEVICE(0x0a5c, 0x21f4) },
{ USB_DEVICE(0x413c, 0x8197) },
/* Foxconn - Hon Hai */
@@ -133,6 +134,7 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 0a4185279417..b130df0a1958 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -12,6 +12,7 @@
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
+#include <drm/intel-gtt.h>
int intel_agp_enabled;
EXPORT_SYMBOL(intel_agp_enabled);
@@ -747,7 +748,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
bridge->capndx = cap_ptr;
- if (intel_gmch_probe(pdev, bridge))
+ if (intel_gmch_probe(pdev, NULL, bridge))
goto found_gmch;
for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
@@ -824,7 +825,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
agp_remove_bridge(bridge);
- intel_gmch_remove(pdev);
+ intel_gmch_remove();
agp_put_bridge(bridge);
}
@@ -902,17 +903,6 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
- ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
- ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
- ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
- ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
- ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
- ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
- ID(PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB),
- ID(PCI_DEVICE_ID_INTEL_HASWELL_HB),
- ID(PCI_DEVICE_ID_INTEL_HASWELL_M_HB),
- ID(PCI_DEVICE_ID_INTEL_HASWELL_S_HB),
- ID(PCI_DEVICE_ID_INTEL_HASWELL_E_HB),
{ }
};
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 8e2d9140f300..6ec0fff79bc2 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -64,6 +64,7 @@
#define I830_PTE_SYSTEM_CACHED 0x00000006
/* GT PTE cache control fields */
#define GEN6_PTE_UNCACHED 0x00000002
+#define HSW_PTE_UNCACHED 0x00000000
#define GEN6_PTE_LLC 0x00000004
#define GEN6_PTE_LLC_MLC 0x00000006
#define GEN6_PTE_GFDT 0x00000008
@@ -239,19 +240,45 @@
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
-#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
+#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
-#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
+#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
-#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
+#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
-#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */
-#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
+#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
+#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
+#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
-int intel_gmch_probe(struct pci_dev *pdev,
- struct agp_bridge_data *bridge);
-void intel_gmch_remove(struct pci_dev *pdev);
#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 1237e7575c3f..58e32f7c3229 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -66,7 +66,6 @@ static struct _intel_private {
struct pci_dev *bridge_dev;
u8 __iomem *registers;
phys_addr_t gtt_bus_addr;
- phys_addr_t gma_bus_addr;
u32 PGETBL_save;
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
@@ -76,6 +75,7 @@ static struct _intel_private {
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
+ int refcount;
} intel_private;
#define INTEL_GTT_GEN intel_private.driver->gen
@@ -648,6 +648,7 @@ static void intel_gtt_cleanup(void)
static int intel_gtt_init(void)
{
+ u32 gma_addr;
u32 gtt_map_size;
int ret;
@@ -694,6 +695,15 @@ static int intel_gtt_init(void)
return ret;
}
+ if (INTEL_GTT_GEN <= 2)
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
+ &gma_addr);
+ else
+ pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
+ &gma_addr);
+
+ intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
+
return 0;
}
@@ -767,20 +777,10 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
writel(addr | pte_flags, intel_private.gtt + entry);
}
-static bool intel_enable_gtt(void)
+bool intel_enable_gtt(void)
{
- u32 gma_addr;
u8 __iomem *reg;
- if (INTEL_GTT_GEN <= 2)
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
- &gma_addr);
- else
- pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
- &gma_addr);
-
- intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
-
if (INTEL_GTT_GEN >= 6)
return true;
@@ -823,6 +823,7 @@ static bool intel_enable_gtt(void)
return true;
}
+EXPORT_SYMBOL(intel_enable_gtt);
static int i830_setup(void)
{
@@ -860,7 +861,7 @@ static int intel_fake_agp_configure(void)
return -EIO;
intel_private.clear_fake_agp = true;
- agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
+ agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr;
return 0;
}
@@ -1155,6 +1156,30 @@ static bool gen6_check_flags(unsigned int flags)
return true;
}
+static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
+{
+ unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
+ unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
+ u32 pte_flags;
+
+ if (type_mask == AGP_USER_MEMORY)
+ pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
+ else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
+ pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ } else { /* set 'normal'/'cached' to LLC by default */
+ pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ }
+
+ /* gen6 has bit11-4 for physical addr bit39-32 */
+ addr |= (addr >> 28) & 0xff0;
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
@@ -1182,9 +1207,17 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
+ unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
+ unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
- pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+ if (type_mask == AGP_USER_MEMORY)
+ pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
+ else {
+ pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+ if (gfdt)
+ pte_flags |= GEN6_PTE_GFDT;
+ }
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
@@ -1244,6 +1277,7 @@ static int i9xx_setup(void)
switch (INTEL_GTT_GEN) {
case 5:
case 6:
+ case 7:
gtt_offset = MB(2);
break;
case 4:
@@ -1372,6 +1406,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
+static const struct intel_gtt_driver haswell_gtt_driver = {
+ .gen = 6,
+ .setup = i9xx_setup,
+ .cleanup = gen6_cleanup,
+ .write_entry = haswell_write_entry,
+ .dma_mask_size = 40,
+ .check_flags = gen6_check_flags,
+ .chipset_flush = i9xx_chipset_flush,
+};
static const struct intel_gtt_driver valleyview_gtt_driver = {
.gen = 7,
.setup = i9xx_setup,
@@ -1379,7 +1422,6 @@ static const struct intel_gtt_driver valleyview_gtt_driver = {
.write_entry = valleyview_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
- .chipset_flush = i9xx_chipset_flush,
};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
@@ -1490,19 +1532,77 @@ static const struct intel_gtt_driver_description {
{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
"ValleyView", &valleyview_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
- "Haswell", &sandybridge_gtt_driver },
+ "Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
- "Haswell", &sandybridge_gtt_driver },
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
- "Haswell", &sandybridge_gtt_driver },
+ "Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
- "Haswell", &sandybridge_gtt_driver },
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
- "Haswell", &sandybridge_gtt_driver },
+ "Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
- "Haswell", &sandybridge_gtt_driver },
- { PCI_DEVICE_ID_INTEL_HASWELL_SDV,
- "Haswell", &sandybridge_gtt_driver },
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
+ "Haswell", &haswell_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
+ "Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL }
};
@@ -1523,14 +1623,32 @@ static int find_gmch(u16 device)
return 1;
}
-int intel_gmch_probe(struct pci_dev *pdev,
- struct agp_bridge_data *bridge)
+int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
+ struct agp_bridge_data *bridge)
{
int i, mask;
- intel_private.driver = NULL;
+
+ /*
+ * Can be called from the fake agp driver but also directly from
+ * drm/i915.ko. Hence we need to check whether everything is set up
+ * already.
+ */
+ if (intel_private.driver) {
+ intel_private.refcount++;
+ return 1;
+ }
for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
- if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
+ if (gpu_pdev) {
+ if (gpu_pdev->device ==
+ intel_gtt_chipsets[i].gmch_chip_id) {
+ intel_private.pcidev = pci_dev_get(gpu_pdev);
+ intel_private.driver =
+ intel_gtt_chipsets[i].gtt_driver;
+
+ break;
+ }
+ } else if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
intel_private.driver =
intel_gtt_chipsets[i].gtt_driver;
break;
@@ -1540,13 +1658,17 @@ int intel_gmch_probe(struct pci_dev *pdev,
if (!intel_private.driver)
return 0;
- bridge->driver = &intel_fake_agp_driver;
- bridge->dev_private_data = &intel_private;
- bridge->dev = pdev;
+ intel_private.refcount++;
+
+ if (bridge) {
+ bridge->driver = &intel_fake_agp_driver;
+ bridge->dev_private_data = &intel_private;
+ bridge->dev = bridge_pdev;
+ }
- intel_private.bridge_dev = pci_dev_get(pdev);
+ intel_private.bridge_dev = pci_dev_get(bridge_pdev);
- dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
+ dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
mask = intel_private.driver->dma_mask_size;
if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
@@ -1556,11 +1678,11 @@ int intel_gmch_probe(struct pci_dev *pdev,
pci_set_consistent_dma_mask(intel_private.pcidev,
DMA_BIT_MASK(mask));
- /*if (bridge->driver == &intel_810_driver)
- return 1;*/
+ if (intel_gtt_init() != 0) {
+ intel_gmch_remove();
- if (intel_gtt_init() != 0)
return 0;
+ }
return 1;
}
@@ -1579,12 +1701,16 @@ void intel_gtt_chipset_flush(void)
}
EXPORT_SYMBOL(intel_gtt_chipset_flush);
-void intel_gmch_remove(struct pci_dev *pdev)
+void intel_gmch_remove(void)
{
+ if (--intel_private.refcount)
+ return;
+
if (intel_private.pcidev)
pci_dev_put(intel_private.pcidev);
if (intel_private.bridge_dev)
pci_dev_put(intel_private.bridge_dev);
+ intel_private.driver = NULL;
}
EXPORT_SYMBOL(intel_gmch_remove);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index b01d67328243..7c0d391996b5 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -73,6 +73,20 @@ config HW_RANDOM_ATMEL
If unsure, say Y.
+config HW_RANDOM_BCM63XX
+ tristate "Broadcom BCM63xx Random Number Generator support"
+ depends on HW_RANDOM && BCM63XX
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on the Broadcom BCM63xx SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bcm63xx-rng
+
+ If unusure, say Y.
+
+
config HW_RANDOM_GEODE
tristate "AMD Geode HW Random Number Generator support"
depends on HW_RANDOM && X86_32 && PCI
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 8d6d173b65e6..39a757ca15b6 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
+obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
n2-rng-y := n2-drv.o n2-asm.o
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
new file mode 100644
index 000000000000..aec6a4277caa
--- /dev/null
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -0,0 +1,175 @@
+/*
+ * Broadcom BCM63xx Random Number Generator support
+ *
+ * Copyright (C) 2011, Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2009, Broadcom Corporation
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+
+#include <bcm63xx_io.h>
+#include <bcm63xx_regs.h>
+
+struct bcm63xx_rng_priv {
+ struct clk *clk;
+ void __iomem *regs;
+};
+
+#define to_rng_priv(rng) ((struct bcm63xx_rng_priv *)rng->priv)
+
+static int bcm63xx_rng_init(struct hwrng *rng)
+{
+ struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
+ u32 val;
+
+ val = bcm_readl(priv->regs + RNG_CTRL);
+ val |= RNG_EN;
+ bcm_writel(val, priv->regs + RNG_CTRL);
+
+ return 0;
+}
+
+static void bcm63xx_rng_cleanup(struct hwrng *rng)
+{
+ struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
+ u32 val;
+
+ val = bcm_readl(priv->regs + RNG_CTRL);
+ val &= ~RNG_EN;
+ bcm_writel(val, priv->regs + RNG_CTRL);
+}
+
+static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
+{
+ struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
+
+ return bcm_readl(priv->regs + RNG_STAT) & RNG_AVAIL_MASK;
+}
+
+static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
+
+ *data = bcm_readl(priv->regs + RNG_DATA);
+
+ return 4;
+}
+
+static int __devinit bcm63xx_rng_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ struct clk *clk;
+ int ret;
+ struct bcm63xx_rng_priv *priv;
+ struct hwrng *rng;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no iomem resource\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "no memory for private structure\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ rng = kzalloc(sizeof(*rng), GFP_KERNEL);
+ if (!rng) {
+ dev_err(&pdev->dev, "no memory for rng structure\n");
+ ret = -ENOMEM;
+ goto out_free_priv;
+ }
+
+ platform_set_drvdata(pdev, rng);
+ rng->priv = (unsigned long)priv;
+ rng->name = pdev->name;
+ rng->init = bcm63xx_rng_init;
+ rng->cleanup = bcm63xx_rng_cleanup;
+ rng->data_present = bcm63xx_rng_data_present;
+ rng->data_read = bcm63xx_rng_data_read;
+
+ clk = clk_get(&pdev->dev, "ipsec");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "no clock for device\n");
+ ret = PTR_ERR(clk);
+ goto out_free_rng;
+ }
+
+ priv->clk = clk;
+
+ if (!devm_request_mem_region(&pdev->dev, r->start,
+ resource_size(r), pdev->name)) {
+ dev_err(&pdev->dev, "request mem failed");
+ ret = -ENOMEM;
+ goto out_free_rng;
+ }
+
+ priv->regs = devm_ioremap_nocache(&pdev->dev, r->start,
+ resource_size(r));
+ if (!priv->regs) {
+ dev_err(&pdev->dev, "ioremap failed");
+ ret = -ENOMEM;
+ goto out_free_rng;
+ }
+
+ clk_enable(clk);
+
+ ret = hwrng_register(rng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register rng device\n");
+ goto out_clk_disable;
+ }
+
+ dev_info(&pdev->dev, "registered RNG driver\n");
+
+ return 0;
+
+out_clk_disable:
+ clk_disable(clk);
+out_free_rng:
+ platform_set_drvdata(pdev, NULL);
+ kfree(rng);
+out_free_priv:
+ kfree(priv);
+out:
+ return ret;
+}
+
+static int __devexit bcm63xx_rng_remove(struct platform_device *pdev)
+{
+ struct hwrng *rng = platform_get_drvdata(pdev);
+ struct bcm63xx_rng_priv *priv = to_rng_priv(rng);
+
+ hwrng_unregister(rng);
+ clk_disable(priv->clk);
+ kfree(priv);
+ kfree(rng);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver bcm63xx_rng_driver = {
+ .probe = bcm63xx_rng_probe,
+ .remove = __devexit_p(bcm63xx_rng_remove),
+ .driver = {
+ .name = "bcm63xx-rng",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(bcm63xx_rng_driver);
+
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_DESCRIPTION("Broadcom BCM63xx RNG driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index d706bd0e9e80..4fbdceb6f773 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -160,7 +160,7 @@ static int __exit omap_rng_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int omap_rng_suspend(struct device *dev)
{
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 723725bbb96b..5708299507d0 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -55,6 +55,7 @@ static void register_buffer(u8 *buf, size_t size)
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
{
+ int ret;
if (!busy) {
busy = true;
@@ -65,7 +66,9 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
if (!wait)
return 0;
- wait_for_completion(&have_data);
+ ret = wait_for_completion_killable(&have_data);
+ if (ret < 0)
+ return ret;
busy = false;
@@ -85,7 +88,7 @@ static struct hwrng virtio_hwrng = {
.read = virtio_read,
};
-static int virtrng_probe(struct virtio_device *vdev)
+static int probe_common(struct virtio_device *vdev)
{
int err;
@@ -103,13 +106,37 @@ static int virtrng_probe(struct virtio_device *vdev)
return 0;
}
-static void __devexit virtrng_remove(struct virtio_device *vdev)
+static void remove_common(struct virtio_device *vdev)
{
vdev->config->reset(vdev);
+ busy = false;
hwrng_unregister(&virtio_hwrng);
vdev->config->del_vqs(vdev);
}
+static int virtrng_probe(struct virtio_device *vdev)
+{
+ return probe_common(vdev);
+}
+
+static void __devexit virtrng_remove(struct virtio_device *vdev)
+{
+ remove_common(vdev);
+}
+
+#ifdef CONFIG_PM
+static int virtrng_freeze(struct virtio_device *vdev)
+{
+ remove_common(vdev);
+ return 0;
+}
+
+static int virtrng_restore(struct virtio_device *vdev)
+{
+ return probe_common(vdev);
+}
+#endif
+
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_RNG, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -121,6 +148,10 @@ static struct virtio_driver virtio_rng_driver = {
.id_table = id_table,
.probe = virtrng_probe,
.remove = __devexit_p(virtrng_remove),
+#ifdef CONFIG_PM
+ .freeze = virtrng_freeze,
+ .restore = virtrng_restore,
+#endif
};
static int __init init(void)
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 8b78750f1efe..845f97fd1832 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -283,7 +283,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
vdata->flags = flags;
vdata->type = type;
spin_lock_init(&vdata->lock);
- vdata->refcnt = ATOMIC_INIT(1);
+ atomic_set(&vdata->refcnt, 1);
vma->vm_private_data = vdata;
vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 4ec04a754733..b86eae9b77df 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -125,21 +125,26 @@
* The current exported interfaces for gathering environmental noise
* from the devices are:
*
+ * void add_device_randomness(const void *buf, unsigned int size);
* void add_input_randomness(unsigned int type, unsigned int code,
* unsigned int value);
- * void add_interrupt_randomness(int irq);
+ * void add_interrupt_randomness(int irq, int irq_flags);
* void add_disk_randomness(struct gendisk *disk);
*
+ * add_device_randomness() is for adding data to the random pool that
+ * is likely to differ between two devices (or possibly even per boot).
+ * This would be things like MAC addresses or serial numbers, or the
+ * read-out of the RTC. This does *not* add any actual entropy to the
+ * pool, but it initializes the pool to different values for devices
+ * that might otherwise be identical and have very little entropy
+ * available to them (particularly common in the embedded world).
+ *
* add_input_randomness() uses the input layer interrupt timing, as well as
* the event type information from the hardware.
*
- * add_interrupt_randomness() uses the inter-interrupt timing as random
- * inputs to the entropy pool. Note that not all interrupts are good
- * sources of randomness! For example, the timer interrupts is not a
- * good choice, because the periodicity of the interrupts is too
- * regular, and hence predictable to an attacker. Network Interface
- * Controller interrupts are a better measure, since the timing of the
- * NIC interrupts are more unpredictable.
+ * add_interrupt_randomness() uses the interrupt timing as random
+ * inputs to the entropy pool. Using the cycle counters and the irq source
+ * as inputs, it feeds the randomness roughly once a second.
*
* add_disk_randomness() uses what amounts to the seek time of block
* layer request events, on a per-disk_devt basis, as input to the
@@ -248,6 +253,8 @@
#include <linux/percpu.h>
#include <linux/cryptohash.h>
#include <linux/fips.h>
+#include <linux/ptrace.h>
+#include <linux/kmemcheck.h>
#ifdef CONFIG_GENERIC_HARDIRQS
# include <linux/irq.h>
@@ -256,8 +263,12 @@
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
+#include <asm/irq_regs.h>
#include <asm/io.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/random.h>
+
/*
* Configuration information
*/
@@ -266,6 +277,8 @@
#define SEC_XFER_SIZE 512
#define EXTRACT_SIZE 10
+#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
+
/*
* The minimum number of bits of entropy before we wake up a read on
* /dev/random. Should be enough to do a significant reseed.
@@ -420,8 +433,10 @@ struct entropy_store {
/* read-write data: */
spinlock_t lock;
unsigned add_ptr;
+ unsigned input_rotate;
int entropy_count;
- int input_rotate;
+ int entropy_total;
+ unsigned int initialized:1;
__u8 last_data[EXTRACT_SIZE];
};
@@ -454,6 +469,10 @@ static struct entropy_store nonblocking_pool = {
.pool = nonblocking_pool_data
};
+static __u32 const twist_table[8] = {
+ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+
/*
* This function adds bytes into the entropy "pool". It does not
* update the entropy estimate. The caller should call
@@ -464,29 +483,24 @@ static struct entropy_store nonblocking_pool = {
* it's cheap to do so and helps slightly in the expected case where
* the entropy is concentrated in the low-order bits.
*/
-static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
- int nbytes, __u8 out[64])
+static void _mix_pool_bytes(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
{
- static __u32 const twist_table[8] = {
- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
int input_rotate;
int wordmask = r->poolinfo->poolwords - 1;
const char *bytes = in;
__u32 w;
- unsigned long flags;
- /* Taps are constant, so we can load them without holding r->lock. */
tap1 = r->poolinfo->tap1;
tap2 = r->poolinfo->tap2;
tap3 = r->poolinfo->tap3;
tap4 = r->poolinfo->tap4;
tap5 = r->poolinfo->tap5;
- spin_lock_irqsave(&r->lock, flags);
- input_rotate = r->input_rotate;
- i = r->add_ptr;
+ smp_rmb();
+ input_rotate = ACCESS_ONCE(r->input_rotate);
+ i = ACCESS_ONCE(r->add_ptr);
/* mix one byte at a time to simplify size handling and churn faster */
while (nbytes--) {
@@ -513,19 +527,61 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
input_rotate += i ? 7 : 14;
}
- r->input_rotate = input_rotate;
- r->add_ptr = i;
+ ACCESS_ONCE(r->input_rotate) = input_rotate;
+ ACCESS_ONCE(r->add_ptr) = i;
+ smp_wmb();
if (out)
for (j = 0; j < 16; j++)
((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
+}
+
+static void __mix_pool_bytes(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
+{
+ trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
+ _mix_pool_bytes(r, in, nbytes, out);
+}
+
+static void mix_pool_bytes(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
+{
+ unsigned long flags;
+ trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
+ spin_lock_irqsave(&r->lock, flags);
+ _mix_pool_bytes(r, in, nbytes, out);
spin_unlock_irqrestore(&r->lock, flags);
}
-static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
+struct fast_pool {
+ __u32 pool[4];
+ unsigned long last;
+ unsigned short count;
+ unsigned char rotate;
+ unsigned char last_timer_intr;
+};
+
+/*
+ * This is a fast mixing routine used by the interrupt randomness
+ * collector. It's hardcoded for an 128 bit pool and assumes that any
+ * locks that might be needed are taken by the caller.
+ */
+static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
{
- mix_pool_bytes_extract(r, in, bytes, NULL);
+ const char *bytes = in;
+ __u32 w;
+ unsigned i = f->count;
+ unsigned input_rotate = f->rotate;
+
+ while (nbytes--) {
+ w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
+ f->pool[(i + 1) & 3];
+ f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
+ input_rotate += (i++ & 3) ? 7 : 14;
+ }
+ f->count = i;
+ f->rotate = input_rotate;
}
/*
@@ -533,30 +589,38 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
*/
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
- unsigned long flags;
- int entropy_count;
+ int entropy_count, orig;
if (!nbits)
return;
- spin_lock_irqsave(&r->lock, flags);
-
DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
- entropy_count = r->entropy_count;
+retry:
+ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
entropy_count += nbits;
+
if (entropy_count < 0) {
DEBUG_ENT("negative entropy/overflow\n");
entropy_count = 0;
} else if (entropy_count > r->poolinfo->POOLBITS)
entropy_count = r->poolinfo->POOLBITS;
- r->entropy_count = entropy_count;
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ goto retry;
+
+ if (!r->initialized && nbits > 0) {
+ r->entropy_total += nbits;
+ if (r->entropy_total > 128)
+ r->initialized = 1;
+ }
+
+ trace_credit_entropy_bits(r->name, nbits, entropy_count,
+ r->entropy_total, _RET_IP_);
/* should we wake readers? */
if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
- spin_unlock_irqrestore(&r->lock, flags);
}
/*********************************************************************
@@ -572,42 +636,24 @@ struct timer_rand_state {
unsigned dont_count_entropy:1;
};
-#ifndef CONFIG_GENERIC_HARDIRQS
-
-static struct timer_rand_state *irq_timer_state[NR_IRQS];
-
-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
-{
- return irq_timer_state[irq];
-}
-
-static void set_timer_rand_state(unsigned int irq,
- struct timer_rand_state *state)
-{
- irq_timer_state[irq] = state;
-}
-
-#else
-
-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
-{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
-
- return desc->timer_rand_state;
-}
-
-static void set_timer_rand_state(unsigned int irq,
- struct timer_rand_state *state)
+/*
+ * Add device- or boot-specific data to the input and nonblocking
+ * pools to help initialize them to unique values.
+ *
+ * None of this adds any entropy, it is meant to avoid the
+ * problem of the nonblocking pool having similar initial state
+ * across largely identical devices.
+ */
+void add_device_randomness(const void *buf, unsigned int size)
{
- struct irq_desc *desc;
+ unsigned long time = get_cycles() ^ jiffies;
- desc = irq_to_desc(irq);
-
- desc->timer_rand_state = state;
+ mix_pool_bytes(&input_pool, buf, size, NULL);
+ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
+ mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
+ mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
}
-#endif
+EXPORT_SYMBOL(add_device_randomness);
static struct timer_rand_state input_timer_state;
@@ -637,13 +683,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
goto out;
sample.jiffies = jiffies;
-
- /* Use arch random value, fall back to cycles */
- if (!arch_get_random_int(&sample.cycles))
- sample.cycles = get_cycles();
-
+ sample.cycles = get_cycles();
sample.num = num;
- mix_pool_bytes(&input_pool, &sample, sizeof(sample));
+ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
/*
* Calculate number of bits of randomness we probably added.
@@ -700,17 +742,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
}
EXPORT_SYMBOL_GPL(add_input_randomness);
-void add_interrupt_randomness(int irq)
+static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
+
+void add_interrupt_randomness(int irq, int irq_flags)
{
- struct timer_rand_state *state;
+ struct entropy_store *r;
+ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+ __u32 input[4], cycles = get_cycles();
+
+ input[0] = cycles ^ jiffies;
+ input[1] = irq;
+ if (regs) {
+ __u64 ip = instruction_pointer(regs);
+ input[2] = ip;
+ input[3] = ip >> 32;
+ }
- state = get_timer_rand_state(irq);
+ fast_mix(fast_pool, input, sizeof(input));
- if (state == NULL)
+ if ((fast_pool->count & 1023) &&
+ !time_after(now, fast_pool->last + HZ))
return;
- DEBUG_ENT("irq event %d\n", irq);
- add_timer_randomness(state, 0x100 + irq);
+ fast_pool->last = now;
+
+ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
+ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
+ /*
+ * If we don't have a valid cycle counter, and we see
+ * back-to-back timer interrupts, then skip giving credit for
+ * any entropy.
+ */
+ if (cycles == 0) {
+ if (irq_flags & __IRQF_TIMER) {
+ if (fast_pool->last_timer_intr)
+ return;
+ fast_pool->last_timer_intr = 1;
+ } else
+ fast_pool->last_timer_intr = 0;
+ }
+ credit_entropy_bits(r, 1);
}
#ifdef CONFIG_BLOCK
@@ -742,7 +815,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
*/
static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
- __u32 tmp[OUTPUT_POOL_WORDS];
+ __u32 tmp[OUTPUT_POOL_WORDS];
if (r->pull && r->entropy_count < nbytes * 8 &&
r->entropy_count < r->poolinfo->POOLBITS) {
@@ -761,7 +834,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
bytes = extract_entropy(r->pull, tmp, bytes,
random_read_wakeup_thresh / 8, rsvd);
- mix_pool_bytes(r, tmp, bytes);
+ mix_pool_bytes(r, tmp, bytes, NULL);
credit_entropy_bits(r, bytes*8);
}
}
@@ -820,13 +893,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
static void extract_buf(struct entropy_store *r, __u8 *out)
{
int i;
- __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+ union {
+ __u32 w[5];
+ unsigned long l[LONGS(EXTRACT_SIZE)];
+ } hash;
+ __u32 workspace[SHA_WORKSPACE_WORDS];
__u8 extract[64];
+ unsigned long flags;
/* Generate a hash across the pool, 16 words (512 bits) at a time */
- sha_init(hash);
+ sha_init(hash.w);
+ spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha_transform(hash, (__u8 *)(r->pool + i), workspace);
+ sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
/*
* We mix the hash back into the pool to prevent backtracking
@@ -837,13 +916,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
* brute-forcing the feedback as hard as brute-forcing the
* hash.
*/
- mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
+ __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
+ spin_unlock_irqrestore(&r->lock, flags);
/*
* To avoid duplicates, we atomically extract a portion of the
* pool while mixing, and hash one final time.
*/
- sha_transform(hash, extract, workspace);
+ sha_transform(hash.w, extract, workspace);
memset(extract, 0, sizeof(extract));
memset(workspace, 0, sizeof(workspace));
@@ -852,20 +932,32 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
* pattern, we fold it in half. Thus, we always feed back
* twice as much data as we output.
*/
- hash[0] ^= hash[3];
- hash[1] ^= hash[4];
- hash[2] ^= rol32(hash[2], 16);
- memcpy(out, hash, EXTRACT_SIZE);
- memset(hash, 0, sizeof(hash));
+ hash.w[0] ^= hash.w[3];
+ hash.w[1] ^= hash.w[4];
+ hash.w[2] ^= rol32(hash.w[2], 16);
+
+ /*
+ * If we have a architectural hardware random number
+ * generator, mix that in, too.
+ */
+ for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
+ unsigned long v;
+ if (!arch_get_random_long(&v))
+ break;
+ hash.l[i] ^= v;
+ }
+
+ memcpy(out, &hash, EXTRACT_SIZE);
+ memset(&hash, 0, sizeof(hash));
}
static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int reserved)
+ size_t nbytes, int min, int reserved)
{
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
+ trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
@@ -873,6 +965,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
extract_buf(r, tmp);
if (fips_enabled) {
+ unsigned long flags;
+
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
@@ -898,6 +992,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
+ trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, 0, 0);
@@ -931,17 +1026,35 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
/*
* This function is the exported kernel interface. It returns some
- * number of good random numbers, suitable for seeding TCP sequence
- * numbers, etc.
+ * number of good random numbers, suitable for key generation, seeding
+ * TCP sequence numbers, etc. It does not use the hw random number
+ * generator, if available; use get_random_bytes_arch() for that.
*/
void get_random_bytes(void *buf, int nbytes)
{
+ extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
+}
+EXPORT_SYMBOL(get_random_bytes);
+
+/*
+ * This function will use the architecture-specific hardware random
+ * number generator if it is available. The arch-specific hw RNG will
+ * almost certainly be faster than what we can do in software, but it
+ * is impossible to verify that it is implemented securely (as
+ * opposed, to, say, the AES encryption of a sequence number using a
+ * key known by the NSA). So it's useful if we need the speed, but
+ * only if we're willing to trust the hardware manufacturer not to
+ * have put in a back door.
+ */
+void get_random_bytes_arch(void *buf, int nbytes)
+{
char *p = buf;
+ trace_get_random_bytes(nbytes, _RET_IP_);
while (nbytes) {
unsigned long v;
int chunk = min(nbytes, (int)sizeof(unsigned long));
-
+
if (!arch_get_random_long(&v))
break;
@@ -950,9 +1063,11 @@ void get_random_bytes(void *buf, int nbytes)
nbytes -= chunk;
}
- extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
+ if (nbytes)
+ extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
}
-EXPORT_SYMBOL(get_random_bytes);
+EXPORT_SYMBOL(get_random_bytes_arch);
+
/*
* init_std_data - initialize pool with system data
@@ -966,23 +1081,30 @@ EXPORT_SYMBOL(get_random_bytes);
static void init_std_data(struct entropy_store *r)
{
int i;
- ktime_t now;
- unsigned long flags;
+ ktime_t now = ktime_get_real();
+ unsigned long rv;
- spin_lock_irqsave(&r->lock, flags);
r->entropy_count = 0;
- spin_unlock_irqrestore(&r->lock, flags);
-
- now = ktime_get_real();
- mix_pool_bytes(r, &now, sizeof(now));
- for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
- if (!arch_get_random_long(&flags))
+ r->entropy_total = 0;
+ mix_pool_bytes(r, &now, sizeof(now), NULL);
+ for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
+ if (!arch_get_random_long(&rv))
break;
- mix_pool_bytes(r, &flags, sizeof(flags));
+ mix_pool_bytes(r, &rv, sizeof(rv), NULL);
}
- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
}
+/*
+ * Note that setup_arch() may call add_device_randomness()
+ * long before we get here. This allows seeding of the pools
+ * with some platform dependent data very early in the boot
+ * process. But it limits our options here. We must use
+ * statically allocated structures that already have all
+ * initializations complete at compile time. We should also
+ * take care not to overwrite the precious per platform data
+ * we were given.
+ */
static int rand_initialize(void)
{
init_std_data(&input_pool);
@@ -992,24 +1114,6 @@ static int rand_initialize(void)
}
module_init(rand_initialize);
-void rand_initialize_irq(int irq)
-{
- struct timer_rand_state *state;
-
- state = get_timer_rand_state(irq);
-
- if (state)
- return;
-
- /*
- * If kzalloc returns null, we just won't use that entropy
- * source.
- */
- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
- if (state)
- set_timer_rand_state(irq, state);
-}
-
#ifdef CONFIG_BLOCK
void rand_initialize_disk(struct gendisk *disk)
{
@@ -1117,7 +1221,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
count -= bytes;
p += bytes;
- mix_pool_bytes(r, buf, bytes);
+ mix_pool_bytes(r, buf, bytes, NULL);
cond_resched();
}
@@ -1279,6 +1383,7 @@ static int proc_do_uuid(ctl_table *table, int write,
}
static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
+extern ctl_table random_table[];
ctl_table random_table[] = {
{
.procname = "poolsize",
@@ -1344,7 +1449,7 @@ late_initcall(random_int_secret_init);
* value is not cryptographically secure but for several uses the cost of
* depleting entropy is too high
*/
-DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
+static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
unsigned int get_random_int(void)
{
__u32 *hash;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 89682fa8801e..c4be3519a587 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -807,6 +807,7 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
#endif
+#ifdef CONFIG_PM_SLEEP
static int tpm_tis_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
@@ -816,6 +817,7 @@ static int tpm_tis_resume(struct device *dev)
return tpm_pm_resume(dev);
}
+#endif
static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 3f99b9099658..7f0b5ca78516 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -25,7 +25,6 @@ menu "Common Clock Framework"
config COMMON_CLK_DEBUG
bool "DebugFS representation of clock tree"
- depends on COMMON_CLK
select DEBUG_FS
---help---
Creates a directory hierchy in debugfs for visualizing the clk
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c87fdd710560..efdfd009c270 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -465,6 +465,9 @@ static void __clk_disable(struct clk *clk)
if (!clk)
return;
+ if (WARN_ON(IS_ERR(clk)))
+ return;
+
if (WARN_ON(clk->enable_count == 0))
return;
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 540795cd0760..d9279385304d 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -53,7 +53,7 @@ static struct cs5535_mfgpt_timer *cs5535_event_clock;
#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
/*
- * The MFPGT timers on the CS5536 provide us with suitable timers to use
+ * The MFGPT timers on the CS5536 provide us with suitable timers to use
* as clock event sources - not as good as a HPET or APIC, but certainly
* better than the PIT. This isn't a general purpose MFGPT driver, but
* a simplified one designed specifically to act as a clock event source.
@@ -144,7 +144,7 @@ static int __init cs5535_mfgpt_init(void)
timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING);
if (!timer) {
- printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n");
+ printk(KERN_ERR DRV_NAME ": Could not allocate MFGPT timer\n");
return -ENODEV;
}
cs5535_event_clock = timer;
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
index a88331644ebf..e64c253cb169 100644
--- a/drivers/cpufreq/exynos5250-cpufreq.c
+++ b/drivers/cpufreq/exynos5250-cpufreq.c
@@ -65,20 +65,20 @@ static unsigned int clkdiv_cpu0_5250[CPUFREQ_LEVEL_END][8] = {
* Clock divider value for following
* { ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2 }
*/
- { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1700 MHz - N/A */
- { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1600 MHz - N/A */
- { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1500 MHz - N/A */
- { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1400 MHz */
- { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1300 MHz */
- { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1200 MHz */
- { 0, 2, 7, 7, 5, 1, 2, 0 }, /* 1100 MHz */
- { 0, 2, 7, 7, 4, 1, 2, 0 }, /* 1000 MHz */
- { 0, 2, 7, 7, 4, 1, 2, 0 }, /* 900 MHz */
- { 0, 2, 7, 7, 3, 1, 1, 0 }, /* 800 MHz */
+ { 0, 3, 7, 7, 7, 3, 5, 0 }, /* 1700 MHz */
+ { 0, 3, 7, 7, 7, 1, 4, 0 }, /* 1600 MHz */
+ { 0, 2, 7, 7, 7, 1, 4, 0 }, /* 1500 MHz */
+ { 0, 2, 7, 7, 6, 1, 4, 0 }, /* 1400 MHz */
+ { 0, 2, 7, 7, 6, 1, 3, 0 }, /* 1300 MHz */
+ { 0, 2, 7, 7, 5, 1, 3, 0 }, /* 1200 MHz */
+ { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1100 MHz */
+ { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 1000 MHz */
+ { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 900 MHz */
+ { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 800 MHz */
{ 0, 1, 7, 7, 3, 1, 1, 0 }, /* 700 MHz */
- { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 600 MHz */
+ { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 600 MHz */
{ 0, 1, 7, 7, 2, 1, 1, 0 }, /* 500 MHz */
- { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 400 MHz */
+ { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 400 MHz */
{ 0, 1, 7, 7, 1, 1, 1, 0 }, /* 300 MHz */
{ 0, 1, 7, 7, 1, 1, 1, 0 }, /* 200 MHz */
};
@@ -87,9 +87,9 @@ static unsigned int clkdiv_cpu1_5250[CPUFREQ_LEVEL_END][2] = {
/* Clock divider value for following
* { COPY, HPM }
*/
- { 0, 2 }, /* 1700 MHz - N/A */
- { 0, 2 }, /* 1600 MHz - N/A */
- { 0, 2 }, /* 1500 MHz - N/A */
+ { 0, 2 }, /* 1700 MHz */
+ { 0, 2 }, /* 1600 MHz */
+ { 0, 2 }, /* 1500 MHz */
{ 0, 2 }, /* 1400 MHz */
{ 0, 2 }, /* 1300 MHz */
{ 0, 2 }, /* 1200 MHz */
@@ -106,10 +106,10 @@ static unsigned int clkdiv_cpu1_5250[CPUFREQ_LEVEL_END][2] = {
};
static unsigned int exynos5_apll_pms_table[CPUFREQ_LEVEL_END] = {
- (0), /* 1700 MHz - N/A */
- (0), /* 1600 MHz - N/A */
- (0), /* 1500 MHz - N/A */
- (0), /* 1400 MHz */
+ ((425 << 16) | (6 << 8) | 0), /* 1700 MHz */
+ ((200 << 16) | (3 << 8) | 0), /* 1600 MHz */
+ ((250 << 16) | (4 << 8) | 0), /* 1500 MHz */
+ ((175 << 16) | (3 << 8) | 0), /* 1400 MHz */
((325 << 16) | (6 << 8) | 0), /* 1300 MHz */
((200 << 16) | (4 << 8) | 0), /* 1200 MHz */
((275 << 16) | (6 << 8) | 0), /* 1100 MHz */
@@ -126,9 +126,10 @@ static unsigned int exynos5_apll_pms_table[CPUFREQ_LEVEL_END] = {
/* ASV group voltage table */
static const unsigned int asv_voltage_5250[CPUFREQ_LEVEL_END] = {
- 0, 0, 0, 0, 0, 0, 0, /* 1700 MHz ~ 1100 MHz Not supported */
- 1175000, 1125000, 1075000, 1050000, 1000000,
- 950000, 925000, 925000, 900000
+ 1300000, 1250000, 1225000, 1200000, 1150000,
+ 1125000, 1100000, 1075000, 1050000, 1025000,
+ 1012500, 1000000, 975000, 950000, 937500,
+ 925000
};
static void set_clkdiv(unsigned int div_index)
@@ -248,15 +249,7 @@ static void __init set_volt_table(void)
{
unsigned int i;
- exynos5250_freq_table[L0].frequency = CPUFREQ_ENTRY_INVALID;
- exynos5250_freq_table[L1].frequency = CPUFREQ_ENTRY_INVALID;
- exynos5250_freq_table[L2].frequency = CPUFREQ_ENTRY_INVALID;
- exynos5250_freq_table[L3].frequency = CPUFREQ_ENTRY_INVALID;
- exynos5250_freq_table[L4].frequency = CPUFREQ_ENTRY_INVALID;
- exynos5250_freq_table[L5].frequency = CPUFREQ_ENTRY_INVALID;
- exynos5250_freq_table[L6].frequency = CPUFREQ_ENTRY_INVALID;
-
- max_support_idx = L7;
+ max_support_idx = L0;
for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++)
exynos5250_volt_table[i] = asv_voltage_5250[i];
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 17fa04d08be9..b47034e650a5 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -218,7 +218,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
- if (atomic_inc_return(&freq_table_users) == 1)
+ if (!freq_table)
result = opp_init_cpufreq_table(mpu_dev, &freq_table);
if (result) {
@@ -227,6 +227,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
goto fail_ck;
}
+ atomic_inc_return(&freq_table_users);
+
result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (result)
goto fail_table;
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index cdc02ac8f41a..503996a94a6a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -454,6 +454,7 @@ static int __init pcc_cpufreq_probe(void)
mem_resource->address_length);
if (pcch_virt_addr == NULL) {
pr_debug("probe: could not map shared mem region\n");
+ ret = -ENOMEM;
goto out_free;
}
pcch_hdr = pcch_virt_addr;
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 78a666d1e5f5..a76b689e553b 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -18,3 +18,6 @@ config CPU_IDLE_GOV_MENU
bool
depends on CPU_IDLE && NO_HZ
default y
+
+config ARCH_NEEDS_CPU_IDLE_COUPLED
+ def_bool n
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 5634f88379df..38c8f69f30cf 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -3,3 +3,4 @@
#
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
+obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
new file mode 100644
index 000000000000..3265844839bf
--- /dev/null
+++ b/drivers/cpuidle/coupled.c
@@ -0,0 +1,727 @@
+/*
+ * coupled.c - helper functions to enter the same idle state on multiple cpus
+ *
+ * Copyright (c) 2011 Google, Inc.
+ *
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpu.h>
+#include <linux/cpuidle.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "cpuidle.h"
+
+/**
+ * DOC: Coupled cpuidle states
+ *
+ * On some ARM SMP SoCs (OMAP4460, Tegra 2, and probably more), the
+ * cpus cannot be independently powered down, either due to
+ * sequencing restrictions (on Tegra 2, cpu 0 must be the last to
+ * power down), or due to HW bugs (on OMAP4460, a cpu powering up
+ * will corrupt the gic state unless the other cpu runs a work
+ * around). Each cpu has a power state that it can enter without
+ * coordinating with the other cpu (usually Wait For Interrupt, or
+ * WFI), and one or more "coupled" power states that affect blocks
+ * shared between the cpus (L2 cache, interrupt controller, and
+ * sometimes the whole SoC). Entering a coupled power state must
+ * be tightly controlled on both cpus.
+ *
+ * This file implements a solution, where each cpu will wait in the
+ * WFI state until all cpus are ready to enter a coupled state, at
+ * which point the coupled state function will be called on all
+ * cpus at approximately the same time.
+ *
+ * Once all cpus are ready to enter idle, they are woken by an smp
+ * cross call. At this point, there is a chance that one of the
+ * cpus will find work to do, and choose not to enter idle. A
+ * final pass is needed to guarantee that all cpus will call the
+ * power state enter function at the same time. During this pass,
+ * each cpu will increment the ready counter, and continue once the
+ * ready counter matches the number of online coupled cpus. If any
+ * cpu exits idle, the other cpus will decrement their counter and
+ * retry.
+ *
+ * requested_state stores the deepest coupled idle state each cpu
+ * is ready for. It is assumed that the states are indexed from
+ * shallowest (highest power, lowest exit latency) to deepest
+ * (lowest power, highest exit latency). The requested_state
+ * variable is not locked. It is only written from the cpu that
+ * it stores (or by the on/offlining cpu if that cpu is offline),
+ * and only read after all the cpus are ready for the coupled idle
+ * state are are no longer updating it.
+ *
+ * Three atomic counters are used. alive_count tracks the number
+ * of cpus in the coupled set that are currently or soon will be
+ * online. waiting_count tracks the number of cpus that are in
+ * the waiting loop, in the ready loop, or in the coupled idle state.
+ * ready_count tracks the number of cpus that are in the ready loop
+ * or in the coupled idle state.
+ *
+ * To use coupled cpuidle states, a cpuidle driver must:
+ *
+ * Set struct cpuidle_device.coupled_cpus to the mask of all
+ * coupled cpus, usually the same as cpu_possible_mask if all cpus
+ * are part of the same cluster. The coupled_cpus mask must be
+ * set in the struct cpuidle_device for each cpu.
+ *
+ * Set struct cpuidle_device.safe_state to a state that is not a
+ * coupled state. This is usually WFI.
+ *
+ * Set CPUIDLE_FLAG_COUPLED in struct cpuidle_state.flags for each
+ * state that affects multiple cpus.
+ *
+ * Provide a struct cpuidle_state.enter function for each state
+ * that affects multiple cpus. This function is guaranteed to be
+ * called on all cpus at approximately the same time. The driver
+ * should ensure that the cpus all abort together if any cpu tries
+ * to abort once the function is called. The function should return
+ * with interrupts still disabled.
+ */
+
+/**
+ * struct cpuidle_coupled - data for set of cpus that share a coupled idle state
+ * @coupled_cpus: mask of cpus that are part of the coupled set
+ * @requested_state: array of requested states for cpus in the coupled set
+ * @ready_waiting_counts: combined count of cpus in ready or waiting loops
+ * @online_count: count of cpus that are online
+ * @refcnt: reference count of cpuidle devices that are using this struct
+ * @prevent: flag to prevent coupled idle while a cpu is hotplugging
+ */
+struct cpuidle_coupled {
+ cpumask_t coupled_cpus;
+ int requested_state[NR_CPUS];
+ atomic_t ready_waiting_counts;
+ int online_count;
+ int refcnt;
+ int prevent;
+};
+
+#define WAITING_BITS 16
+#define MAX_WAITING_CPUS (1 << WAITING_BITS)
+#define WAITING_MASK (MAX_WAITING_CPUS - 1)
+#define READY_MASK (~WAITING_MASK)
+
+#define CPUIDLE_COUPLED_NOT_IDLE (-1)
+
+static DEFINE_MUTEX(cpuidle_coupled_lock);
+static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
+
+/*
+ * The cpuidle_coupled_poked_mask mask is used to avoid calling
+ * __smp_call_function_single with the per cpu call_single_data struct already
+ * in use. This prevents a deadlock where two cpus are waiting for each others
+ * call_single_data struct to be available
+ */
+static cpumask_t cpuidle_coupled_poked_mask;
+
+/**
+ * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
+ * @dev: cpuidle_device of the calling cpu
+ * @a: atomic variable to hold the barrier
+ *
+ * No caller to this function will return from this function until all online
+ * cpus in the same coupled group have called this function. Once any caller
+ * has returned from this function, the barrier is immediately available for
+ * reuse.
+ *
+ * The atomic variable a must be initialized to 0 before any cpu calls
+ * this function, will be reset to 0 before any cpu returns from this function.
+ *
+ * Must only be called from within a coupled idle state handler
+ * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set).
+ *
+ * Provides full smp barrier semantics before and after calling.
+ */
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
+{
+ int n = dev->coupled->online_count;
+
+ smp_mb__before_atomic_inc();
+ atomic_inc(a);
+
+ while (atomic_read(a) < n)
+ cpu_relax();
+
+ if (atomic_inc_return(a) == n * 2) {
+ atomic_set(a, 0);
+ return;
+ }
+
+ while (atomic_read(a) > n)
+ cpu_relax();
+}
+
+/**
+ * cpuidle_state_is_coupled - check if a state is part of a coupled set
+ * @dev: struct cpuidle_device for the current cpu
+ * @drv: struct cpuidle_driver for the platform
+ * @state: index of the target state in drv->states
+ *
+ * Returns true if the target state is coupled with cpus besides this one
+ */
+bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int state)
+{
+ return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
+}
+
+/**
+ * cpuidle_coupled_set_ready - mark a cpu as ready
+ * @coupled: the struct coupled that contains the current cpu
+ */
+static inline void cpuidle_coupled_set_ready(struct cpuidle_coupled *coupled)
+{
+ atomic_add(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
+}
+
+/**
+ * cpuidle_coupled_set_not_ready - mark a cpu as not ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Decrements the ready counter, unless the ready (and thus the waiting) counter
+ * is equal to the number of online cpus. Prevents a race where one cpu
+ * decrements the waiting counter and then re-increments it just before another
+ * cpu has decremented its ready counter, leading to the ready counter going
+ * down from the number of online cpus without going through the coupled idle
+ * state.
+ *
+ * Returns 0 if the counter was decremented successfully, -EINVAL if the ready
+ * counter was equal to the number of online cpus.
+ */
+static
+inline int cpuidle_coupled_set_not_ready(struct cpuidle_coupled *coupled)
+{
+ int all;
+ int ret;
+
+ all = coupled->online_count || (coupled->online_count << WAITING_BITS);
+ ret = atomic_add_unless(&coupled->ready_waiting_counts,
+ -MAX_WAITING_CPUS, all);
+
+ return ret ? 0 : -EINVAL;
+}
+
+/**
+ * cpuidle_coupled_no_cpus_ready - check if no cpus in a coupled set are ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all of the cpus in a coupled set are out of the ready loop.
+ */
+static inline int cpuidle_coupled_no_cpus_ready(struct cpuidle_coupled *coupled)
+{
+ int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
+ return r == 0;
+}
+
+/**
+ * cpuidle_coupled_cpus_ready - check if all cpus in a coupled set are ready
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all cpus coupled to this target state are in the ready loop
+ */
+static inline bool cpuidle_coupled_cpus_ready(struct cpuidle_coupled *coupled)
+{
+ int r = atomic_read(&coupled->ready_waiting_counts) >> WAITING_BITS;
+ return r == coupled->online_count;
+}
+
+/**
+ * cpuidle_coupled_cpus_waiting - check if all cpus in a coupled set are waiting
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all cpus coupled to this target state are in the wait loop
+ */
+static inline bool cpuidle_coupled_cpus_waiting(struct cpuidle_coupled *coupled)
+{
+ int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
+ return w == coupled->online_count;
+}
+
+/**
+ * cpuidle_coupled_no_cpus_waiting - check if no cpus in coupled set are waiting
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns true if all of the cpus in a coupled set are out of the waiting loop.
+ */
+static inline int cpuidle_coupled_no_cpus_waiting(struct cpuidle_coupled *coupled)
+{
+ int w = atomic_read(&coupled->ready_waiting_counts) & WAITING_MASK;
+ return w == 0;
+}
+
+/**
+ * cpuidle_coupled_get_state - determine the deepest idle state
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Returns the deepest idle state that all coupled cpus can enter
+ */
+static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
+ struct cpuidle_coupled *coupled)
+{
+ int i;
+ int state = INT_MAX;
+
+ /*
+ * Read barrier ensures that read of requested_state is ordered after
+ * reads of ready_count. Matches the write barriers
+ * cpuidle_set_state_waiting.
+ */
+ smp_rmb();
+
+ for_each_cpu_mask(i, coupled->coupled_cpus)
+ if (cpu_online(i) && coupled->requested_state[i] < state)
+ state = coupled->requested_state[i];
+
+ return state;
+}
+
+static void cpuidle_coupled_poked(void *info)
+{
+ int cpu = (unsigned long)info;
+ cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
+}
+
+/**
+ * cpuidle_coupled_poke - wake up a cpu that may be waiting
+ * @cpu: target cpu
+ *
+ * Ensures that the target cpu exits it's waiting idle state (if it is in it)
+ * and will see updates to waiting_count before it re-enters it's waiting idle
+ * state.
+ *
+ * If cpuidle_coupled_poked_mask is already set for the target cpu, that cpu
+ * either has or will soon have a pending IPI that will wake it out of idle,
+ * or it is currently processing the IPI and is not in idle.
+ */
+static void cpuidle_coupled_poke(int cpu)
+{
+ struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
+
+ if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
+ __smp_call_function_single(cpu, csd, 0);
+}
+
+/**
+ * cpuidle_coupled_poke_others - wake up all other cpus that may be waiting
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Calls cpuidle_coupled_poke on all other online cpus.
+ */
+static void cpuidle_coupled_poke_others(int this_cpu,
+ struct cpuidle_coupled *coupled)
+{
+ int cpu;
+
+ for_each_cpu_mask(cpu, coupled->coupled_cpus)
+ if (cpu != this_cpu && cpu_online(cpu))
+ cpuidle_coupled_poke(cpu);
+}
+
+/**
+ * cpuidle_coupled_set_waiting - mark this cpu as in the wait loop
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ * @next_state: the index in drv->states of the requested state for this cpu
+ *
+ * Updates the requested idle state for the specified cpuidle device,
+ * poking all coupled cpus out of idle if necessary to let them see the new
+ * state.
+ */
+static void cpuidle_coupled_set_waiting(int cpu,
+ struct cpuidle_coupled *coupled, int next_state)
+{
+ int w;
+
+ coupled->requested_state[cpu] = next_state;
+
+ /*
+ * If this is the last cpu to enter the waiting state, poke
+ * all the other cpus out of their waiting state so they can
+ * enter a deeper state. This can race with one of the cpus
+ * exiting the waiting state due to an interrupt and
+ * decrementing waiting_count, see comment below.
+ *
+ * The atomic_inc_return provides a write barrier to order the write
+ * to requested_state with the later write that increments ready_count.
+ */
+ w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
+ if (w == coupled->online_count)
+ cpuidle_coupled_poke_others(cpu, coupled);
+}
+
+/**
+ * cpuidle_coupled_set_not_waiting - mark this cpu as leaving the wait loop
+ * @dev: struct cpuidle_device for this cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Removes the requested idle state for the specified cpuidle device.
+ */
+static void cpuidle_coupled_set_not_waiting(int cpu,
+ struct cpuidle_coupled *coupled)
+{
+ /*
+ * Decrementing waiting count can race with incrementing it in
+ * cpuidle_coupled_set_waiting, but that's OK. Worst case, some
+ * cpus will increment ready_count and then spin until they
+ * notice that this cpu has cleared it's requested_state.
+ */
+ atomic_dec(&coupled->ready_waiting_counts);
+
+ coupled->requested_state[cpu] = CPUIDLE_COUPLED_NOT_IDLE;
+}
+
+/**
+ * cpuidle_coupled_set_done - mark this cpu as leaving the ready loop
+ * @cpu: the current cpu
+ * @coupled: the struct coupled that contains the current cpu
+ *
+ * Marks this cpu as no longer in the ready and waiting loops. Decrements
+ * the waiting count first to prevent another cpu looping back in and seeing
+ * this cpu as waiting just before it exits idle.
+ */
+static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
+{
+ cpuidle_coupled_set_not_waiting(cpu, coupled);
+ atomic_sub(MAX_WAITING_CPUS, &coupled->ready_waiting_counts);
+}
+
+/**
+ * cpuidle_coupled_clear_pokes - spin until the poke interrupt is processed
+ * @cpu - this cpu
+ *
+ * Turns on interrupts and spins until any outstanding poke interrupts have
+ * been processed and the poke bit has been cleared.
+ *
+ * Other interrupts may also be processed while interrupts are enabled, so
+ * need_resched() must be tested after turning interrupts off again to make sure
+ * the interrupt didn't schedule work that should take the cpu out of idle.
+ *
+ * Returns 0 if need_resched was false, -EINTR if need_resched was true.
+ */
+static int cpuidle_coupled_clear_pokes(int cpu)
+{
+ local_irq_enable();
+ while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
+ cpu_relax();
+ local_irq_disable();
+
+ return need_resched() ? -EINTR : 0;
+}
+
+/**
+ * cpuidle_enter_state_coupled - attempt to enter a state with coupled cpus
+ * @dev: struct cpuidle_device for the current cpu
+ * @drv: struct cpuidle_driver for the platform
+ * @next_state: index of the requested state in drv->states
+ *
+ * Coordinate with coupled cpus to enter the target state. This is a two
+ * stage process. In the first stage, the cpus are operating independently,
+ * and may call into cpuidle_enter_state_coupled at completely different times.
+ * To save as much power as possible, the first cpus to call this function will
+ * go to an intermediate state (the cpuidle_device's safe state), and wait for
+ * all the other cpus to call this function. Once all coupled cpus are idle,
+ * the second stage will start. Each coupled cpu will spin until all cpus have
+ * guaranteed that they will call the target_state.
+ *
+ * This function must be called with interrupts disabled. It may enable
+ * interrupts while preparing for idle, and it will always return with
+ * interrupts enabled.
+ */
+int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state)
+{
+ int entered_state = -1;
+ struct cpuidle_coupled *coupled = dev->coupled;
+
+ if (!coupled)
+ return -EINVAL;
+
+ while (coupled->prevent) {
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ local_irq_enable();
+ return entered_state;
+ }
+ entered_state = cpuidle_enter_state(dev, drv,
+ dev->safe_state_index);
+ }
+
+ /* Read barrier ensures online_count is read after prevent is cleared */
+ smp_rmb();
+
+ cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
+
+retry:
+ /*
+ * Wait for all coupled cpus to be idle, using the deepest state
+ * allowed for a single cpu.
+ */
+ while (!cpuidle_coupled_cpus_waiting(coupled)) {
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+
+ if (coupled->prevent) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+
+ entered_state = cpuidle_enter_state(dev, drv,
+ dev->safe_state_index);
+ }
+
+ if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
+ goto out;
+ }
+
+ /*
+ * All coupled cpus are probably idle. There is a small chance that
+ * one of the other cpus just became active. Increment the ready count,
+ * and spin until all coupled cpus have incremented the counter. Once a
+ * cpu has incremented the ready counter, it cannot abort idle and must
+ * spin until either all cpus have incremented the ready counter, or
+ * another cpu leaves idle and decrements the waiting counter.
+ */
+
+ cpuidle_coupled_set_ready(coupled);
+ while (!cpuidle_coupled_cpus_ready(coupled)) {
+ /* Check if any other cpus bailed out of idle. */
+ if (!cpuidle_coupled_cpus_waiting(coupled))
+ if (!cpuidle_coupled_set_not_ready(coupled))
+ goto retry;
+
+ cpu_relax();
+ }
+
+ /* all cpus have acked the coupled state */
+ next_state = cpuidle_coupled_get_state(dev, coupled);
+
+ entered_state = cpuidle_enter_state(dev, drv, next_state);
+
+ cpuidle_coupled_set_done(dev->cpu, coupled);
+
+out:
+ /*
+ * Normal cpuidle states are expected to return with irqs enabled.
+ * That leads to an inefficiency where a cpu receiving an interrupt
+ * that brings it out of idle will process that interrupt before
+ * exiting the idle enter function and decrementing ready_count. All
+ * other cpus will need to spin waiting for the cpu that is processing
+ * the interrupt. If the driver returns with interrupts disabled,
+ * all other cpus will loop back into the safe idle state instead of
+ * spinning, saving power.
+ *
+ * Calling local_irq_enable here allows coupled states to return with
+ * interrupts disabled, but won't cause problems for drivers that
+ * exit with interrupts enabled.
+ */
+ local_irq_enable();
+
+ /*
+ * Wait until all coupled cpus have exited idle. There is no risk that
+ * a cpu exits and re-enters the ready state because this cpu has
+ * already decremented its waiting_count.
+ */
+ while (!cpuidle_coupled_no_cpus_ready(coupled))
+ cpu_relax();
+
+ return entered_state;
+}
+
+static void cpuidle_coupled_update_online_cpus(struct cpuidle_coupled *coupled)
+{
+ cpumask_t cpus;
+ cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
+ coupled->online_count = cpumask_weight(&cpus);
+}
+
+/**
+ * cpuidle_coupled_register_device - register a coupled cpuidle device
+ * @dev: struct cpuidle_device for the current cpu
+ *
+ * Called from cpuidle_register_device to handle coupled idle init. Finds the
+ * cpuidle_coupled struct for this set of coupled cpus, or creates one if none
+ * exists yet.
+ */
+int cpuidle_coupled_register_device(struct cpuidle_device *dev)
+{
+ int cpu;
+ struct cpuidle_device *other_dev;
+ struct call_single_data *csd;
+ struct cpuidle_coupled *coupled;
+
+ if (cpumask_empty(&dev->coupled_cpus))
+ return 0;
+
+ for_each_cpu_mask(cpu, dev->coupled_cpus) {
+ other_dev = per_cpu(cpuidle_devices, cpu);
+ if (other_dev && other_dev->coupled) {
+ coupled = other_dev->coupled;
+ goto have_coupled;
+ }
+ }
+
+ /* No existing coupled info found, create a new one */
+ coupled = kzalloc(sizeof(struct cpuidle_coupled), GFP_KERNEL);
+ if (!coupled)
+ return -ENOMEM;
+
+ coupled->coupled_cpus = dev->coupled_cpus;
+
+have_coupled:
+ dev->coupled = coupled;
+ if (WARN_ON(!cpumask_equal(&dev->coupled_cpus, &coupled->coupled_cpus)))
+ coupled->prevent++;
+
+ cpuidle_coupled_update_online_cpus(coupled);
+
+ coupled->refcnt++;
+
+ csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
+ csd->func = cpuidle_coupled_poked;
+ csd->info = (void *)(unsigned long)dev->cpu;
+
+ return 0;
+}
+
+/**
+ * cpuidle_coupled_unregister_device - unregister a coupled cpuidle device
+ * @dev: struct cpuidle_device for the current cpu
+ *
+ * Called from cpuidle_unregister_device to tear down coupled idle. Removes the
+ * cpu from the coupled idle set, and frees the cpuidle_coupled_info struct if
+ * this was the last cpu in the set.
+ */
+void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
+{
+ struct cpuidle_coupled *coupled = dev->coupled;
+
+ if (cpumask_empty(&dev->coupled_cpus))
+ return;
+
+ if (--coupled->refcnt)
+ kfree(coupled);
+ dev->coupled = NULL;
+}
+
+/**
+ * cpuidle_coupled_prevent_idle - prevent cpus from entering a coupled state
+ * @coupled: the struct coupled that contains the cpu that is changing state
+ *
+ * Disables coupled cpuidle on a coupled set of cpus. Used to ensure that
+ * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
+ */
+static void cpuidle_coupled_prevent_idle(struct cpuidle_coupled *coupled)
+{
+ int cpu = get_cpu();
+
+ /* Force all cpus out of the waiting loop. */
+ coupled->prevent++;
+ cpuidle_coupled_poke_others(cpu, coupled);
+ put_cpu();
+ while (!cpuidle_coupled_no_cpus_waiting(coupled))
+ cpu_relax();
+}
+
+/**
+ * cpuidle_coupled_allow_idle - allows cpus to enter a coupled state
+ * @coupled: the struct coupled that contains the cpu that is changing state
+ *
+ * Enables coupled cpuidle on a coupled set of cpus. Used to ensure that
+ * cpu_online_mask doesn't change while cpus are coordinating coupled idle.
+ */
+static void cpuidle_coupled_allow_idle(struct cpuidle_coupled *coupled)
+{
+ int cpu = get_cpu();
+
+ /*
+ * Write barrier ensures readers see the new online_count when they
+ * see prevent == 0.
+ */
+ smp_wmb();
+ coupled->prevent--;
+ /* Force cpus out of the prevent loop. */
+ cpuidle_coupled_poke_others(cpu, coupled);
+ put_cpu();
+}
+
+/**
+ * cpuidle_coupled_cpu_notify - notifier called during hotplug transitions
+ * @nb: notifier block
+ * @action: hotplug transition
+ * @hcpu: target cpu number
+ *
+ * Called when a cpu is brought on or offline using hotplug. Updates the
+ * coupled cpu set appropriately
+ */
+static int cpuidle_coupled_cpu_notify(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (unsigned long)hcpu;
+ struct cpuidle_device *dev;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ case CPU_DOWN_PREPARE:
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ case CPU_DOWN_FAILED:
+ break;
+ default:
+ return NOTIFY_OK;
+ }
+
+ mutex_lock(&cpuidle_lock);
+
+ dev = per_cpu(cpuidle_devices, cpu);
+ if (!dev || !dev->coupled)
+ goto out;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ case CPU_DOWN_PREPARE:
+ cpuidle_coupled_prevent_idle(dev->coupled);
+ break;
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ cpuidle_coupled_update_online_cpus(dev->coupled);
+ /* Fall through */
+ case CPU_UP_CANCELED:
+ case CPU_DOWN_FAILED:
+ cpuidle_coupled_allow_idle(dev->coupled);
+ break;
+ }
+
+out:
+ mutex_unlock(&cpuidle_lock);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpuidle_coupled_cpu_notifier = {
+ .notifier_call = cpuidle_coupled_cpu_notify,
+};
+
+static int __init cpuidle_coupled_init(void)
+{
+ return register_cpu_notifier(&cpuidle_coupled_cpu_notifier);
+}
+core_initcall(cpuidle_coupled_init);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d6a533e68e0f..e28f6ea46f1a 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -92,6 +92,34 @@ int cpuidle_play_dead(void)
}
/**
+ * cpuidle_enter_state - enter the state and update stats
+ * @dev: cpuidle device for this cpu
+ * @drv: cpuidle driver for this cpu
+ * @next_state: index into drv->states of the state to enter
+ */
+int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+ int next_state)
+{
+ int entered_state;
+
+ entered_state = cpuidle_enter_ops(dev, drv, next_state);
+
+ if (entered_state >= 0) {
+ /* Update cpuidle counters */
+ /* This can be moved to within driver enter routine
+ * but that results in multiple copies of same code.
+ */
+ dev->states_usage[entered_state].time +=
+ (unsigned long long)dev->last_residency;
+ dev->states_usage[entered_state].usage++;
+ } else {
+ dev->last_residency = 0;
+ }
+
+ return entered_state;
+}
+
+/**
* cpuidle_idle_call - the main idle loop
*
* NOTE: no locks or semaphores should be used here
@@ -113,15 +141,6 @@ int cpuidle_idle_call(void)
if (!dev || !dev->enabled)
return -EBUSY;
-#if 0
- /* shows regressions, re-enable for 2.6.29 */
- /*
- * run any timers that can be run now, at this point
- * before calculating the idle duration etc.
- */
- hrtimer_peek_ahead_timers();
-#endif
-
/* ask the governor for the next state */
next_state = cpuidle_curr_governor->select(drv, dev);
if (need_resched()) {
@@ -132,23 +151,15 @@ int cpuidle_idle_call(void)
trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle_rcuidle(next_state, dev->cpu);
- entered_state = cpuidle_enter_ops(dev, drv, next_state);
+ if (cpuidle_state_is_coupled(dev, drv, next_state))
+ entered_state = cpuidle_enter_state_coupled(dev, drv,
+ next_state);
+ else
+ entered_state = cpuidle_enter_state(dev, drv, next_state);
trace_power_end_rcuidle(dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
- if (entered_state >= 0) {
- /* Update cpuidle counters */
- /* This can be moved to within driver enter routine
- * but that results in multiple copies of same code.
- */
- dev->states_usage[entered_state].time +=
- (unsigned long long)dev->last_residency;
- dev->states_usage[entered_state].usage++;
- } else {
- dev->last_residency = 0;
- }
-
/* give the governor an opportunity to reflect on the outcome */
if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev, entered_state);
@@ -299,6 +310,9 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
int ret, i;
struct cpuidle_driver *drv = cpuidle_get_driver();
+ if (!dev)
+ return -EINVAL;
+
if (dev->enabled)
return 0;
if (!drv || !cpuidle_curr_governor)
@@ -383,8 +397,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
- if (!dev)
- return -EINVAL;
if (!try_module_get(cpuidle_driver->owner))
return -EINVAL;
@@ -392,13 +404,25 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
- if ((ret = cpuidle_add_sysfs(cpu_dev))) {
- module_put(cpuidle_driver->owner);
- return ret;
- }
+ ret = cpuidle_add_sysfs(cpu_dev);
+ if (ret)
+ goto err_sysfs;
+
+ ret = cpuidle_coupled_register_device(dev);
+ if (ret)
+ goto err_coupled;
dev->registered = 1;
return 0;
+
+err_coupled:
+ cpuidle_remove_sysfs(cpu_dev);
+ wait_for_completion(&dev->kobj_unregister);
+err_sysfs:
+ list_del(&dev->device_list);
+ per_cpu(cpuidle_devices, dev->cpu) = NULL;
+ module_put(cpuidle_driver->owner);
+ return ret;
}
/**
@@ -409,6 +433,9 @@ int cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
+ if (!dev)
+ return -EINVAL;
+
mutex_lock(&cpuidle_lock);
if ((ret = __cpuidle_register_device(dev))) {
@@ -448,6 +475,8 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
wait_for_completion(&dev->kobj_unregister);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
+ cpuidle_coupled_unregister_device(dev);
+
cpuidle_resume_and_unlock();
module_put(cpuidle_driver->owner);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 7db186685c27..76e7f696ad8c 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -14,6 +14,8 @@ extern struct list_head cpuidle_detected_devices;
extern struct mutex cpuidle_lock;
extern spinlock_t cpuidle_driver_lock;
extern int cpuidle_disabled(void);
+extern int cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state);
/* idle loop */
extern void cpuidle_install_idle_handler(void);
@@ -30,4 +32,34 @@ extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
extern int cpuidle_add_sysfs(struct device *dev);
extern void cpuidle_remove_sysfs(struct device *dev);
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int state);
+int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state);
+int cpuidle_coupled_register_device(struct cpuidle_device *dev);
+void cpuidle_coupled_unregister_device(struct cpuidle_device *dev);
+#else
+static inline bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int state)
+{
+ return false;
+}
+
+static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int next_state)
+{
+ return -1;
+}
+
+static inline int cpuidle_coupled_register_device(struct cpuidle_device *dev)
+{
+ return 0;
+}
+
+static inline void cpuidle_coupled_unregister_device(struct cpuidle_device *dev)
+{
+}
+#endif
+
#endif /* __DRIVER_CPUIDLE_H */
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 53c8c51d5881..93d14070141a 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -63,7 +63,7 @@ static void caam_jr_dequeue(unsigned long devarg)
head = ACCESS_ONCE(jrp->head);
- spin_lock_bh(&jrp->outlock);
+ spin_lock(&jrp->outlock);
sw_idx = tail = jrp->tail;
hw_idx = jrp->out_ring_read_index;
@@ -115,7 +115,7 @@ static void caam_jr_dequeue(unsigned long devarg)
jrp->tail = tail;
}
- spin_unlock_bh(&jrp->outlock);
+ spin_unlock(&jrp->outlock);
/* Finally, execute user's callback */
usercall(dev, userdesc, userstatus, userarg);
@@ -236,14 +236,14 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
return -EIO;
}
- spin_lock(&jrp->inplock);
+ spin_lock_bh(&jrp->inplock);
head = jrp->head;
tail = ACCESS_ONCE(jrp->tail);
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
- spin_unlock(&jrp->inplock);
+ spin_unlock_bh(&jrp->inplock);
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
return -EBUSY;
}
@@ -265,7 +265,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
- spin_unlock(&jrp->inplock);
+ spin_unlock_bh(&jrp->inplock);
return 0;
}
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index c9c4befb5a8d..df14358d7fa1 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -821,8 +821,8 @@ static int hifn_register_rng(struct hifn_device *dev)
/*
* We must wait at least 256 Pk_clk cycles between two reads of the rng.
*/
- dev->rng_wait_time = DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) *
- 256;
+ dev->rng_wait_time = DIV_ROUND_UP_ULL(NSEC_PER_SEC,
+ dev->pk_clk_freq) * 256;
dev->rng.name = dev->name;
dev->rng.data_present = hifn_rng_data_present,
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 0d4071754352..21c1a87032b7 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -1127,6 +1127,10 @@ err_unreg_ecb:
crypto_unregister_alg(&mv_aes_alg_ecb);
err_irq:
free_irq(irq, cp);
+ if (!IS_ERR(cp->clk)) {
+ clk_disable_unprepare(cp->clk);
+ clk_put(cp->clk);
+ }
err_thread:
kthread_stop(cp->queue_th);
err_unmap_sram:
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 67b97c5fd859..a8bd0310f8fe 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1610,8 +1610,7 @@ static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
sprintf(p->irq_name, "%s-%d", irq_name, index);
- return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
- p->irq_name, p);
+ return request_irq(p->irq, handler, 0, p->irq_name, p);
}
static struct kmem_cache *queue_cache[2];
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d45cf1bcbde5..d06ea2950dd9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -53,6 +53,7 @@ config AMBA_PL08X
bool "ARM PrimeCell PL080 or PL081 support"
depends on ARM_AMBA && EXPERIMENTAL
select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
help
Platform has a PL08x DMAC device
which can provide DMA engine support
@@ -269,6 +270,7 @@ config DMA_SA11X0
tristate "SA-11x0 DMA support"
depends on ARCH_SA1100
select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
help
Support the DMA engine found on Intel StrongARM SA-1100 and
SA-1110 SoCs. This DMA engine can only be used with on-chip
@@ -284,9 +286,18 @@ config MMP_TDMA
Say Y here if you enabled MMP ADMA, otherwise say N.
+config DMA_OMAP
+ tristate "OMAP DMA support"
+ depends on ARCH_OMAP
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+
config DMA_ENGINE
bool
+config DMA_VIRTUAL_CHANNELS
+ tristate
+
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 640356add0a3..4cf6b128ab9a 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
+obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
@@ -30,3 +31,4 @@ obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
+obj-$(CONFIG_DMA_OMAP) += omap-dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 49ecbbb8932d..6fbeebb9486f 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -86,10 +86,12 @@
#include <asm/hardware/pl080.h>
#include "dmaengine.h"
+#include "virt-dma.h"
#define DRIVER_NAME "pl08xdmac"
static struct amba_driver pl08x_amba_driver;
+struct pl08x_driver_data;
/**
* struct vendor_data - vendor-specific config parameters for PL08x derivatives
@@ -119,6 +121,123 @@ struct pl08x_lli {
};
/**
+ * struct pl08x_bus_data - information of source or destination
+ * busses for a transfer
+ * @addr: current address
+ * @maxwidth: the maximum width of a transfer on this bus
+ * @buswidth: the width of this bus in bytes: 1, 2 or 4
+ */
+struct pl08x_bus_data {
+ dma_addr_t addr;
+ u8 maxwidth;
+ u8 buswidth;
+};
+
+/**
+ * struct pl08x_phy_chan - holder for the physical channels
+ * @id: physical index to this channel
+ * @lock: a lock to use when altering an instance of this struct
+ * @serving: the virtual channel currently being served by this physical
+ * channel
+ * @locked: channel unavailable for the system, e.g. dedicated to secure
+ * world
+ */
+struct pl08x_phy_chan {
+ unsigned int id;
+ void __iomem *base;
+ spinlock_t lock;
+ struct pl08x_dma_chan *serving;
+ bool locked;
+};
+
+/**
+ * struct pl08x_sg - structure containing data per sg
+ * @src_addr: src address of sg
+ * @dst_addr: dst address of sg
+ * @len: transfer len in bytes
+ * @node: node for txd's dsg_list
+ */
+struct pl08x_sg {
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ size_t len;
+ struct list_head node;
+};
+
+/**
+ * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
+ * @vd: virtual DMA descriptor
+ * @dsg_list: list of children sg's
+ * @llis_bus: DMA memory address (physical) start for the LLIs
+ * @llis_va: virtual memory address start for the LLIs
+ * @cctl: control reg values for current txd
+ * @ccfg: config reg values for current txd
+ * @done: this marks completed descriptors, which should not have their
+ * mux released.
+ */
+struct pl08x_txd {
+ struct virt_dma_desc vd;
+ struct list_head dsg_list;
+ dma_addr_t llis_bus;
+ struct pl08x_lli *llis_va;
+ /* Default cctl value for LLIs */
+ u32 cctl;
+ /*
+ * Settings to be put into the physical channel when we
+ * trigger this txd. Other registers are in llis_va[0].
+ */
+ u32 ccfg;
+ bool done;
+};
+
+/**
+ * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
+ * states
+ * @PL08X_CHAN_IDLE: the channel is idle
+ * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
+ * channel and is running a transfer on it
+ * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
+ * channel, but the transfer is currently paused
+ * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
+ * channel to become available (only pertains to memcpy channels)
+ */
+enum pl08x_dma_chan_state {
+ PL08X_CHAN_IDLE,
+ PL08X_CHAN_RUNNING,
+ PL08X_CHAN_PAUSED,
+ PL08X_CHAN_WAITING,
+};
+
+/**
+ * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
+ * @vc: wrappped virtual channel
+ * @phychan: the physical channel utilized by this channel, if there is one
+ * @name: name of channel
+ * @cd: channel platform data
+ * @runtime_addr: address for RX/TX according to the runtime config
+ * @at: active transaction on this channel
+ * @lock: a lock for this channel data
+ * @host: a pointer to the host (internal use)
+ * @state: whether the channel is idle, paused, running etc
+ * @slave: whether this channel is a device (slave) or for memcpy
+ * @signal: the physical DMA request signal which this channel is using
+ * @mux_use: count of descriptors using this DMA request signal setting
+ */
+struct pl08x_dma_chan {
+ struct virt_dma_chan vc;
+ struct pl08x_phy_chan *phychan;
+ const char *name;
+ const struct pl08x_channel_data *cd;
+ struct dma_slave_config cfg;
+ struct pl08x_txd *at;
+ struct pl08x_driver_data *host;
+ enum pl08x_dma_chan_state state;
+ bool slave;
+ int signal;
+ unsigned mux_use;
+};
+
+/**
* struct pl08x_driver_data - the local state holder for the PL08x
* @slave: slave engine for this instance
* @memcpy: memcpy engine for this instance
@@ -128,7 +247,6 @@ struct pl08x_lli {
* @pd: platform data passed in from the platform/machine
* @phy_chans: array of data for the physical channels
* @pool: a pool for the LLI descriptors
- * @pool_ctr: counter of LLIs in the pool
* @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
* fetches
* @mem_buses: set to indicate memory transfers on AHB2.
@@ -143,10 +261,8 @@ struct pl08x_driver_data {
struct pl08x_platform_data *pd;
struct pl08x_phy_chan *phy_chans;
struct dma_pool *pool;
- int pool_ctr;
u8 lli_buses;
u8 mem_buses;
- spinlock_t lock;
};
/*
@@ -162,12 +278,51 @@ struct pl08x_driver_data {
static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
{
- return container_of(chan, struct pl08x_dma_chan, chan);
+ return container_of(chan, struct pl08x_dma_chan, vc.chan);
}
static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
{
- return container_of(tx, struct pl08x_txd, tx);
+ return container_of(tx, struct pl08x_txd, vd.tx);
+}
+
+/*
+ * Mux handling.
+ *
+ * This gives us the DMA request input to the PL08x primecell which the
+ * peripheral described by the channel data will be routed to, possibly
+ * via a board/SoC specific external MUX. One important point to note
+ * here is that this does not depend on the physical channel.
+ */
+static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
+{
+ const struct pl08x_platform_data *pd = plchan->host->pd;
+ int ret;
+
+ if (plchan->mux_use++ == 0 && pd->get_signal) {
+ ret = pd->get_signal(plchan->cd);
+ if (ret < 0) {
+ plchan->mux_use = 0;
+ return ret;
+ }
+
+ plchan->signal = ret;
+ }
+ return 0;
+}
+
+static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
+{
+ const struct pl08x_platform_data *pd = plchan->host->pd;
+
+ if (plchan->signal >= 0) {
+ WARN_ON(plchan->mux_use == 0);
+
+ if (--plchan->mux_use == 0 && pd->put_signal) {
+ pd->put_signal(plchan->cd, plchan->signal);
+ plchan->signal = -1;
+ }
+ }
}
/*
@@ -189,20 +344,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
* been set when the LLIs were constructed. Poke them into the hardware
* and start the transfer.
*/
-static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
- struct pl08x_txd *txd)
+static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
{
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_phy_chan *phychan = plchan->phychan;
- struct pl08x_lli *lli = &txd->llis_va[0];
+ struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
+ struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+ struct pl08x_lli *lli;
u32 val;
+ list_del(&txd->vd.node);
+
plchan->at = txd;
/* Wait for channel inactive */
while (pl08x_phy_channel_busy(phychan))
cpu_relax();
+ lli = &txd->llis_va[0];
+
dev_vdbg(&pl08x->adev->dev,
"WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
"clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
@@ -311,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
{
struct pl08x_phy_chan *ch;
struct pl08x_txd *txd;
- unsigned long flags;
size_t bytes = 0;
- spin_lock_irqsave(&plchan->lock, flags);
ch = plchan->phychan;
txd = plchan->at;
@@ -354,18 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
}
}
- /* Sum up all queued transactions */
- if (!list_empty(&plchan->pend_list)) {
- struct pl08x_txd *txdi;
- list_for_each_entry(txdi, &plchan->pend_list, node) {
- struct pl08x_sg *dsg;
- list_for_each_entry(dsg, &txd->dsg_list, node)
- bytes += dsg->len;
- }
- }
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-
return bytes;
}
@@ -391,7 +537,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
if (!ch->locked && !ch->serving) {
ch->serving = virt_chan;
- ch->signal = -1;
spin_unlock_irqrestore(&ch->lock, flags);
break;
}
@@ -404,25 +549,114 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
return NULL;
}
- pm_runtime_get_sync(&pl08x->adev->dev);
return ch;
}
+/* Mark the physical channel as free. Note, this write is atomic. */
static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
struct pl08x_phy_chan *ch)
{
- unsigned long flags;
+ ch->serving = NULL;
+}
+
+/*
+ * Try to allocate a physical channel. When successful, assign it to
+ * this virtual channel, and initiate the next descriptor. The
+ * virtual channel lock must be held at this point.
+ */
+static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_phy_chan *ch;
- spin_lock_irqsave(&ch->lock, flags);
+ ch = pl08x_get_phy_channel(pl08x, plchan);
+ if (!ch) {
+ dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
+ plchan->state = PL08X_CHAN_WAITING;
+ return;
+ }
- /* Stop the channel and clear its interrupts */
- pl08x_terminate_phy_chan(pl08x, ch);
+ dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n",
+ ch->id, plchan->name);
- pm_runtime_put(&pl08x->adev->dev);
+ plchan->phychan = ch;
+ plchan->state = PL08X_CHAN_RUNNING;
+ pl08x_start_next_txd(plchan);
+}
- /* Mark it as free */
- ch->serving = NULL;
- spin_unlock_irqrestore(&ch->lock, flags);
+static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch,
+ struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+
+ dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n",
+ ch->id, plchan->name);
+
+ /*
+ * We do this without taking the lock; we're really only concerned
+ * about whether this pointer is NULL or not, and we're guaranteed
+ * that this will only be called when it _already_ is non-NULL.
+ */
+ ch->serving = plchan;
+ plchan->phychan = ch;
+ plchan->state = PL08X_CHAN_RUNNING;
+ pl08x_start_next_txd(plchan);
+}
+
+/*
+ * Free a physical DMA channel, potentially reallocating it to another
+ * virtual channel if we have any pending.
+ */
+static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
+{
+ struct pl08x_driver_data *pl08x = plchan->host;
+ struct pl08x_dma_chan *p, *next;
+
+ retry:
+ next = NULL;
+
+ /* Find a waiting virtual channel for the next transfer. */
+ list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
+ if (p->state == PL08X_CHAN_WAITING) {
+ next = p;
+ break;
+ }
+
+ if (!next) {
+ list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
+ if (p->state == PL08X_CHAN_WAITING) {
+ next = p;
+ break;
+ }
+ }
+
+ /* Ensure that the physical channel is stopped */
+ pl08x_terminate_phy_chan(pl08x, plchan->phychan);
+
+ if (next) {
+ bool success;
+
+ /*
+ * Eww. We know this isn't going to deadlock
+ * but lockdep probably doesn't.
+ */
+ spin_lock(&next->vc.lock);
+ /* Re-check the state now that we have the lock */
+ success = next->state == PL08X_CHAN_WAITING;
+ if (success)
+ pl08x_phy_reassign_start(plchan->phychan, next);
+ spin_unlock(&next->vc.lock);
+
+ /* If the state changed, try to find another channel */
+ if (!success)
+ goto retry;
+ } else {
+ /* No more jobs, so free up the physical channel */
+ pl08x_put_phy_channel(pl08x, plchan->phychan);
+ }
+
+ plchan->phychan = NULL;
+ plchan->state = PL08X_CHAN_IDLE;
}
/*
@@ -585,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
return 0;
}
- pl08x->pool_ctr++;
-
bd.txd = txd;
bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
cctl = txd->cctl;
@@ -802,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
return num_llis;
}
-/* You should call this with the struct pl08x lock held */
static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
struct pl08x_txd *txd)
{
struct pl08x_sg *dsg, *_dsg;
- /* Free the LLI */
if (txd->llis_va)
dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
- pl08x->pool_ctr--;
-
list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
list_del(&dsg->node);
kfree(dsg);
@@ -822,133 +1050,75 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
kfree(txd);
}
-static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
- struct pl08x_dma_chan *plchan)
+static void pl08x_unmap_buffers(struct pl08x_txd *txd)
{
- struct pl08x_txd *txdi = NULL;
- struct pl08x_txd *next;
-
- if (!list_empty(&plchan->pend_list)) {
- list_for_each_entry_safe(txdi,
- next, &plchan->pend_list, node) {
- list_del(&txdi->node);
- pl08x_free_txd(pl08x, txdi);
+ struct device *dev = txd->vd.tx.chan->device->dev;
+ struct pl08x_sg *dsg;
+
+ if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ else {
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
}
}
+ if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
+ else
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
+ }
}
-/*
- * The DMA ENGINE API
- */
-static int pl08x_alloc_chan_resources(struct dma_chan *chan)
+static void pl08x_desc_free(struct virt_dma_desc *vd)
{
- return 0;
-}
+ struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
-static void pl08x_free_chan_resources(struct dma_chan *chan)
-{
+ if (!plchan->slave)
+ pl08x_unmap_buffers(txd);
+
+ if (!txd->done)
+ pl08x_release_mux(plchan);
+
+ pl08x_free_txd(plchan->host, txd);
}
-/*
- * This should be called with the channel plchan->lock held
- */
-static int prep_phy_channel(struct pl08x_dma_chan *plchan,
- struct pl08x_txd *txd)
+static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
+ struct pl08x_dma_chan *plchan)
{
- struct pl08x_driver_data *pl08x = plchan->host;
- struct pl08x_phy_chan *ch;
- int ret;
-
- /* Check if we already have a channel */
- if (plchan->phychan) {
- ch = plchan->phychan;
- goto got_channel;
- }
+ LIST_HEAD(head);
+ struct pl08x_txd *txd;
- ch = pl08x_get_phy_channel(pl08x, plchan);
- if (!ch) {
- /* No physical channel available, cope with it */
- dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
- return -EBUSY;
- }
+ vchan_get_all_descriptors(&plchan->vc, &head);
- /*
- * OK we have a physical channel: for memcpy() this is all we
- * need, but for slaves the physical signals may be muxed!
- * Can the platform allow us to use this channel?
- */
- if (plchan->slave && pl08x->pd->get_signal) {
- ret = pl08x->pd->get_signal(plchan);
- if (ret < 0) {
- dev_dbg(&pl08x->adev->dev,
- "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
- ch->id, plchan->name);
- /* Release physical channel & return */
- pl08x_put_phy_channel(pl08x, ch);
- return -EBUSY;
- }
- ch->signal = ret;
+ while (!list_empty(&head)) {
+ txd = list_first_entry(&head, struct pl08x_txd, vd.node);
+ list_del(&txd->vd.node);
+ pl08x_desc_free(&txd->vd);
}
-
- plchan->phychan = ch;
- dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
- ch->id,
- ch->signal,
- plchan->name);
-
-got_channel:
- /* Assign the flow control signal to this channel */
- if (txd->direction == DMA_MEM_TO_DEV)
- txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
- else if (txd->direction == DMA_DEV_TO_MEM)
- txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
-
- plchan->phychan_hold++;
-
- return 0;
}
-static void release_phy_channel(struct pl08x_dma_chan *plchan)
+/*
+ * The DMA ENGINE API
+ */
+static int pl08x_alloc_chan_resources(struct dma_chan *chan)
{
- struct pl08x_driver_data *pl08x = plchan->host;
-
- if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) {
- pl08x->pd->put_signal(plchan);
- plchan->phychan->signal = -1;
- }
- pl08x_put_phy_channel(pl08x, plchan->phychan);
- plchan->phychan = NULL;
+ return 0;
}
-static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
+static void pl08x_free_chan_resources(struct dma_chan *chan)
{
- struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
- struct pl08x_txd *txd = to_pl08x_txd(tx);
- unsigned long flags;
- dma_cookie_t cookie;
-
- spin_lock_irqsave(&plchan->lock, flags);
- cookie = dma_cookie_assign(tx);
-
- /* Put this onto the pending list */
- list_add_tail(&txd->node, &plchan->pend_list);
-
- /*
- * If there was no physical channel available for this memcpy,
- * stack the request up and indicate that the channel is waiting
- * for a free physical channel.
- */
- if (!plchan->slave && !plchan->phychan) {
- /* Do this memcpy whenever there is a channel ready */
- plchan->state = PL08X_CHAN_WAITING;
- plchan->waiting = txd;
- } else {
- plchan->phychan_hold--;
- }
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-
- return cookie;
+ /* Ensure all queued descriptors are freed */
+ vchan_free_chan_resources(to_virt_chan(chan));
}
static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
@@ -968,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct virt_dma_desc *vd;
+ unsigned long flags;
enum dma_status ret;
+ size_t bytes = 0;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS)
return ret;
/*
+ * There's no point calculating the residue if there's
+ * no txstate to store the value.
+ */
+ if (!txstate) {
+ if (plchan->state == PL08X_CHAN_PAUSED)
+ ret = DMA_PAUSED;
+ return ret;
+ }
+
+ spin_lock_irqsave(&plchan->vc.lock, flags);
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret != DMA_SUCCESS) {
+ vd = vchan_find_desc(&plchan->vc, cookie);
+ if (vd) {
+ /* On the issued list, so hasn't been processed yet */
+ struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
+ struct pl08x_sg *dsg;
+
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ bytes += dsg->len;
+ } else {
+ bytes = pl08x_getbytes_chan(plchan);
+ }
+ }
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
+
+ /*
* This cookie not complete yet
* Get number of bytes left in the active transactions and queue
*/
- dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
+ dma_set_residue(txstate, bytes);
- if (plchan->state == PL08X_CHAN_PAUSED)
- return DMA_PAUSED;
+ if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS)
+ ret = DMA_PAUSED;
/* Whether waiting or running, we're in progress */
- return DMA_IN_PROGRESS;
+ return ret;
}
/* PrimeCell DMA extension */
@@ -1080,38 +1280,14 @@ static u32 pl08x_burst(u32 maxburst)
return burst_sizes[i].reg;
}
-static int dma_set_runtime_config(struct dma_chan *chan,
- struct dma_slave_config *config)
+static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
+ enum dma_slave_buswidth addr_width, u32 maxburst)
{
- struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
- struct pl08x_driver_data *pl08x = plchan->host;
- enum dma_slave_buswidth addr_width;
- u32 width, burst, maxburst;
- u32 cctl = 0;
-
- if (!plchan->slave)
- return -EINVAL;
-
- /* Transfer direction */
- plchan->runtime_direction = config->direction;
- if (config->direction == DMA_MEM_TO_DEV) {
- addr_width = config->dst_addr_width;
- maxburst = config->dst_maxburst;
- } else if (config->direction == DMA_DEV_TO_MEM) {
- addr_width = config->src_addr_width;
- maxburst = config->src_maxburst;
- } else {
- dev_err(&pl08x->adev->dev,
- "bad runtime_config: alien transfer direction\n");
- return -EINVAL;
- }
+ u32 width, burst, cctl = 0;
width = pl08x_width(addr_width);
- if (width == ~0) {
- dev_err(&pl08x->adev->dev,
- "bad runtime_config: alien address width\n");
- return -EINVAL;
- }
+ if (width == ~0)
+ return ~0;
cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
@@ -1128,28 +1304,23 @@ static int dma_set_runtime_config(struct dma_chan *chan,
cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
- plchan->device_fc = config->device_fc;
+ return pl08x_cctl(cctl);
+}
- if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
- plchan->src_addr = config->src_addr;
- plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
- pl08x_select_bus(plchan->cd->periph_buses,
- pl08x->mem_buses);
- } else {
- plchan->dst_addr = config->dst_addr;
- plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
- pl08x_select_bus(pl08x->mem_buses,
- plchan->cd->periph_buses);
- }
+static int dma_set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
- dev_dbg(&pl08x->adev->dev,
- "configured channel %s (%s) for %s, data width %d, "
- "maxburst %d words, LE, CCTL=0x%08x\n",
- dma_chan_name(chan), plchan->name,
- (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
- addr_width,
- maxburst,
- cctl);
+ if (!plchan->slave)
+ return -EINVAL;
+
+ /* Reject definitely invalid configurations */
+ if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ return -EINVAL;
+
+ plchan->cfg = *config;
return 0;
}
@@ -1163,95 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan)
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
unsigned long flags;
- spin_lock_irqsave(&plchan->lock, flags);
- /* Something is already active, or we're waiting for a channel... */
- if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
- spin_unlock_irqrestore(&plchan->lock, flags);
- return;
- }
-
- /* Take the first element in the queue and execute it */
- if (!list_empty(&plchan->pend_list)) {
- struct pl08x_txd *next;
-
- next = list_first_entry(&plchan->pend_list,
- struct pl08x_txd,
- node);
- list_del(&next->node);
- plchan->state = PL08X_CHAN_RUNNING;
-
- pl08x_start_txd(plchan, next);
+ spin_lock_irqsave(&plchan->vc.lock, flags);
+ if (vchan_issue_pending(&plchan->vc)) {
+ if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
+ pl08x_phy_alloc_and_start(plchan);
}
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-}
-
-static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
- struct pl08x_txd *txd)
-{
- struct pl08x_driver_data *pl08x = plchan->host;
- unsigned long flags;
- int num_llis, ret;
-
- num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
- if (!num_llis) {
- spin_lock_irqsave(&plchan->lock, flags);
- pl08x_free_txd(pl08x, txd);
- spin_unlock_irqrestore(&plchan->lock, flags);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&plchan->lock, flags);
-
- /*
- * See if we already have a physical channel allocated,
- * else this is the time to try to get one.
- */
- ret = prep_phy_channel(plchan, txd);
- if (ret) {
- /*
- * No physical channel was available.
- *
- * memcpy transfers can be sorted out at submission time.
- *
- * Slave transfers may have been denied due to platform
- * channel muxing restrictions. Since there is no guarantee
- * that this will ever be resolved, and the signal must be
- * acquired AFTER acquiring the physical channel, we will let
- * them be NACK:ed with -EBUSY here. The drivers can retry
- * the prep() call if they are eager on doing this using DMA.
- */
- if (plchan->slave) {
- pl08x_free_txd_list(pl08x, plchan);
- pl08x_free_txd(pl08x, txd);
- spin_unlock_irqrestore(&plchan->lock, flags);
- return -EBUSY;
- }
- } else
- /*
- * Else we're all set, paused and ready to roll, status
- * will switch to PL08X_CHAN_RUNNING when we call
- * issue_pending(). If there is something running on the
- * channel already we don't change its state.
- */
- if (plchan->state == PL08X_CHAN_IDLE)
- plchan->state = PL08X_CHAN_PAUSED;
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-
- return 0;
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
}
-static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
- unsigned long flags)
+static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
{
struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
if (txd) {
- dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
- txd->tx.flags = flags;
- txd->tx.tx_submit = pl08x_tx_submit;
- INIT_LIST_HEAD(&txd->node);
INIT_LIST_HEAD(&txd->dsg_list);
/* Always enable error and terminal interrupts */
@@ -1274,7 +1369,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
struct pl08x_sg *dsg;
int ret;
- txd = pl08x_get_txd(plchan, flags);
+ txd = pl08x_get_txd(plchan);
if (!txd) {
dev_err(&pl08x->adev->dev,
"%s no memory for descriptor\n", __func__);
@@ -1290,14 +1385,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
}
list_add_tail(&dsg->node, &txd->dsg_list);
- txd->direction = DMA_NONE;
dsg->src_addr = src;
dsg->dst_addr = dest;
dsg->len = len;
/* Set platform data for m2m */
txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
- txd->cctl = pl08x->pd->memcpy_channel.cctl &
+ txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
/* Both to be incremented or the code will break */
@@ -1307,11 +1401,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
pl08x->mem_buses);
- ret = pl08x_prep_channel_resources(plchan, txd);
- if (ret)
+ ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+ if (!ret) {
+ pl08x_free_txd(pl08x, txd);
return NULL;
+ }
- return &txd->tx;
+ return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
}
static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
@@ -1324,36 +1420,40 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct pl08x_txd *txd;
struct pl08x_sg *dsg;
struct scatterlist *sg;
+ enum dma_slave_buswidth addr_width;
dma_addr_t slave_addr;
int ret, tmp;
+ u8 src_buses, dst_buses;
+ u32 maxburst, cctl;
dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
__func__, sg_dma_len(sgl), plchan->name);
- txd = pl08x_get_txd(plchan, flags);
+ txd = pl08x_get_txd(plchan);
if (!txd) {
dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
return NULL;
}
- if (direction != plchan->runtime_direction)
- dev_err(&pl08x->adev->dev, "%s DMA setup does not match "
- "the direction configured for the PrimeCell\n",
- __func__);
-
/*
* Set up addresses, the PrimeCell configured address
* will take precedence since this may configure the
* channel target address dynamically at runtime.
*/
- txd->direction = direction;
-
if (direction == DMA_MEM_TO_DEV) {
- txd->cctl = plchan->dst_cctl;
- slave_addr = plchan->dst_addr;
+ cctl = PL080_CONTROL_SRC_INCR;
+ slave_addr = plchan->cfg.dst_addr;
+ addr_width = plchan->cfg.dst_addr_width;
+ maxburst = plchan->cfg.dst_maxburst;
+ src_buses = pl08x->mem_buses;
+ dst_buses = plchan->cd->periph_buses;
} else if (direction == DMA_DEV_TO_MEM) {
- txd->cctl = plchan->src_cctl;
- slave_addr = plchan->src_addr;
+ cctl = PL080_CONTROL_DST_INCR;
+ slave_addr = plchan->cfg.src_addr;
+ addr_width = plchan->cfg.src_addr_width;
+ maxburst = plchan->cfg.src_maxburst;
+ src_buses = plchan->cd->periph_buses;
+ dst_buses = pl08x->mem_buses;
} else {
pl08x_free_txd(pl08x, txd);
dev_err(&pl08x->adev->dev,
@@ -1361,7 +1461,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
return NULL;
}
- if (plchan->device_fc)
+ cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
+ if (cctl == ~0) {
+ pl08x_free_txd(pl08x, txd);
+ dev_err(&pl08x->adev->dev,
+ "DMA slave configuration botched?\n");
+ return NULL;
+ }
+
+ txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
+
+ if (plchan->cfg.device_fc)
tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
PL080_FLOW_PER2MEM_PER;
else
@@ -1370,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ ret = pl08x_request_mux(plchan);
+ if (ret < 0) {
+ pl08x_free_txd(pl08x, txd);
+ dev_dbg(&pl08x->adev->dev,
+ "unable to mux for transfer on %s due to platform restrictions\n",
+ plchan->name);
+ return NULL;
+ }
+
+ dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n",
+ plchan->signal, plchan->name);
+
+ /* Assign the flow control signal to this channel */
+ if (direction == DMA_MEM_TO_DEV)
+ txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
+ else
+ txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+
for_each_sg(sgl, sg, sg_len, tmp) {
dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
if (!dsg) {
+ pl08x_release_mux(plchan);
pl08x_free_txd(pl08x, txd);
dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
__func__);
@@ -1390,11 +1519,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
}
}
- ret = pl08x_prep_channel_resources(plchan, txd);
- if (ret)
+ ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+ if (!ret) {
+ pl08x_release_mux(plchan);
+ pl08x_free_txd(pl08x, txd);
return NULL;
+ }
- return &txd->tx;
+ return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
}
static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -1415,9 +1547,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* Anything succeeds on channels with no physical allocation and
* no queued transfers.
*/
- spin_lock_irqsave(&plchan->lock, flags);
+ spin_lock_irqsave(&plchan->vc.lock, flags);
if (!plchan->phychan && !plchan->at) {
- spin_unlock_irqrestore(&plchan->lock, flags);
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
return 0;
}
@@ -1426,18 +1558,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
plchan->state = PL08X_CHAN_IDLE;
if (plchan->phychan) {
- pl08x_terminate_phy_chan(pl08x, plchan->phychan);
-
/*
* Mark physical channel as free and free any slave
* signal
*/
- release_phy_channel(plchan);
- plchan->phychan_hold = 0;
+ pl08x_phy_free(plchan);
}
/* Dequeue jobs and free LLIs */
if (plchan->at) {
- pl08x_free_txd(pl08x, plchan->at);
+ pl08x_desc_free(&plchan->at->vd);
plchan->at = NULL;
}
/* Dequeue jobs not yet fired as well */
@@ -1457,7 +1586,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
break;
}
- spin_unlock_irqrestore(&plchan->lock, flags);
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
return ret;
}
@@ -1494,123 +1623,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
}
-static void pl08x_unmap_buffers(struct pl08x_txd *txd)
-{
- struct device *dev = txd->tx.chan->device->dev;
- struct pl08x_sg *dsg;
-
- if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_single(dev, dsg->src_addr, dsg->len,
- DMA_TO_DEVICE);
- else {
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_page(dev, dsg->src_addr, dsg->len,
- DMA_TO_DEVICE);
- }
- }
- if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_single(dev, dsg->dst_addr, dsg->len,
- DMA_FROM_DEVICE);
- else
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_page(dev, dsg->dst_addr, dsg->len,
- DMA_FROM_DEVICE);
- }
-}
-
-static void pl08x_tasklet(unsigned long data)
-{
- struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
- struct pl08x_driver_data *pl08x = plchan->host;
- struct pl08x_txd *txd;
- unsigned long flags;
-
- spin_lock_irqsave(&plchan->lock, flags);
-
- txd = plchan->at;
- plchan->at = NULL;
-
- if (txd) {
- /* Update last completed */
- dma_cookie_complete(&txd->tx);
- }
-
- /* If a new descriptor is queued, set it up plchan->at is NULL here */
- if (!list_empty(&plchan->pend_list)) {
- struct pl08x_txd *next;
-
- next = list_first_entry(&plchan->pend_list,
- struct pl08x_txd,
- node);
- list_del(&next->node);
-
- pl08x_start_txd(plchan, next);
- } else if (plchan->phychan_hold) {
- /*
- * This channel is still in use - we have a new txd being
- * prepared and will soon be queued. Don't give up the
- * physical channel.
- */
- } else {
- struct pl08x_dma_chan *waiting = NULL;
-
- /*
- * No more jobs, so free up the physical channel
- * Free any allocated signal on slave transfers too
- */
- release_phy_channel(plchan);
- plchan->state = PL08X_CHAN_IDLE;
-
- /*
- * And NOW before anyone else can grab that free:d up
- * physical channel, see if there is some memcpy pending
- * that seriously needs to start because of being stacked
- * up while we were choking the physical channels with data.
- */
- list_for_each_entry(waiting, &pl08x->memcpy.channels,
- chan.device_node) {
- if (waiting->state == PL08X_CHAN_WAITING &&
- waiting->waiting != NULL) {
- int ret;
-
- /* This should REALLY not fail now */
- ret = prep_phy_channel(waiting,
- waiting->waiting);
- BUG_ON(ret);
- waiting->phychan_hold--;
- waiting->state = PL08X_CHAN_RUNNING;
- waiting->waiting = NULL;
- pl08x_issue_pending(&waiting->chan);
- break;
- }
- }
- }
-
- spin_unlock_irqrestore(&plchan->lock, flags);
-
- if (txd) {
- dma_async_tx_callback callback = txd->tx.callback;
- void *callback_param = txd->tx.callback_param;
-
- /* Don't try to unmap buffers on slave channels */
- if (!plchan->slave)
- pl08x_unmap_buffers(txd);
-
- /* Free the descriptor */
- spin_lock_irqsave(&plchan->lock, flags);
- pl08x_free_txd(pl08x, txd);
- spin_unlock_irqrestore(&plchan->lock, flags);
-
- /* Callback to signal completion */
- if (callback)
- callback(callback_param);
- }
-}
-
static irqreturn_t pl08x_irq(int irq, void *dev)
{
struct pl08x_driver_data *pl08x = dev;
@@ -1635,6 +1647,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
/* Locate physical channel */
struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
struct pl08x_dma_chan *plchan = phychan->serving;
+ struct pl08x_txd *tx;
if (!plchan) {
dev_err(&pl08x->adev->dev,
@@ -1643,8 +1656,29 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
continue;
}
- /* Schedule tasklet on this channel */
- tasklet_schedule(&plchan->tasklet);
+ spin_lock(&plchan->vc.lock);
+ tx = plchan->at;
+ if (tx) {
+ plchan->at = NULL;
+ /*
+ * This descriptor is done, release its mux
+ * reservation.
+ */
+ pl08x_release_mux(plchan);
+ tx->done = true;
+ vchan_cookie_complete(&tx->vd);
+
+ /*
+ * And start the next descriptor (if any),
+ * otherwise free this channel.
+ */
+ if (vchan_next_desc(&plchan->vc))
+ pl08x_start_next_txd(plchan);
+ else
+ pl08x_phy_free(plchan);
+ }
+ spin_unlock(&plchan->vc.lock);
+
mask |= (1 << i);
}
}
@@ -1654,16 +1688,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
{
- u32 cctl = pl08x_cctl(chan->cd->cctl);
-
chan->slave = true;
chan->name = chan->cd->bus_id;
- chan->src_addr = chan->cd->addr;
- chan->dst_addr = chan->cd->addr;
- chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
- pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
- chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
- pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
+ chan->cfg.src_addr = chan->cd->addr;
+ chan->cfg.dst_addr = chan->cd->addr;
}
/*
@@ -1693,6 +1721,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
chan->host = pl08x;
chan->state = PL08X_CHAN_IDLE;
+ chan->signal = -1;
if (slave) {
chan->cd = &pl08x->pd->slave_channels[i];
@@ -1705,26 +1734,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
return -ENOMEM;
}
}
- if (chan->cd->circular_buffer) {
- dev_err(&pl08x->adev->dev,
- "channel %s: circular buffers not supported\n",
- chan->name);
- kfree(chan);
- continue;
- }
dev_dbg(&pl08x->adev->dev,
"initialize virtual channel \"%s\"\n",
chan->name);
- chan->chan.device = dmadev;
- dma_cookie_init(&chan->chan);
-
- spin_lock_init(&chan->lock);
- INIT_LIST_HEAD(&chan->pend_list);
- tasklet_init(&chan->tasklet, pl08x_tasklet,
- (unsigned long) chan);
-
- list_add_tail(&chan->chan.device_node, &dmadev->channels);
+ chan->vc.desc_free = pl08x_desc_free;
+ vchan_init(&chan->vc, dmadev);
}
dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
i, slave ? "slave" : "memcpy");
@@ -1737,8 +1752,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev)
struct pl08x_dma_chan *next;
list_for_each_entry_safe(chan,
- next, &dmadev->channels, chan.device_node) {
- list_del(&chan->chan.device_node);
+ next, &dmadev->channels, vc.chan.device_node) {
+ list_del(&chan->vc.chan.device_node);
kfree(chan);
}
}
@@ -1791,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "\nPL08x virtual memcpy channels:\n");
seq_printf(s, "CHANNEL:\tSTATE:\n");
seq_printf(s, "--------\t------\n");
- list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
+ list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) {
seq_printf(s, "%s\t\t%s\n", chan->name,
pl08x_state_str(chan->state));
}
@@ -1799,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "\nPL08x virtual slave channels:\n");
seq_printf(s, "CHANNEL:\tSTATE:\n");
seq_printf(s, "--------\t------\n");
- list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
+ list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
seq_printf(s, "%s\t\t%s\n", chan->name,
pl08x_state_str(chan->state));
}
@@ -1851,9 +1866,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_pl08x;
}
- pm_runtime_set_active(&adev->dev);
- pm_runtime_enable(&adev->dev);
-
/* Initialize memcpy engine */
dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
pl08x->memcpy.dev = &adev->dev;
@@ -1903,8 +1915,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_lli_pool;
}
- spin_lock_init(&pl08x->lock);
-
pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
if (!pl08x->base) {
ret = -ENOMEM;
@@ -1942,7 +1952,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ch->id = i;
ch->base = pl08x->base + PL080_Cx_BASE(i);
spin_lock_init(&ch->lock);
- ch->signal = -1;
/*
* Nomadik variants can have channels that are locked
@@ -2007,7 +2016,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
amba_part(adev), amba_rev(adev),
(unsigned long long)adev->res.start, adev->irq[0]);
- pm_runtime_put(&adev->dev);
return 0;
out_no_slave_reg:
@@ -2026,9 +2034,6 @@ out_no_ioremap:
dma_pool_destroy(pl08x->pool);
out_no_lli_pool:
out_no_platdata:
- pm_runtime_put(&adev->dev);
- pm_runtime_disable(&adev->dev);
-
kfree(pl08x);
out_no_pl08x:
amba_release_regions(adev);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index fcfeb3cd8d31..5084975d793c 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -172,7 +172,8 @@ struct imxdma_engine {
struct device_dma_parameters dma_parms;
struct dma_device dma_device;
void __iomem *base;
- struct clk *dma_clk;
+ struct clk *dma_ahb;
+ struct clk *dma_ipg;
spinlock_t lock;
struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
struct imxdma_channel channel[IMX_DMA_CHANNELS];
@@ -976,10 +977,20 @@ static int __init imxdma_probe(struct platform_device *pdev)
return 0;
}
- imxdma->dma_clk = clk_get(NULL, "dma");
- if (IS_ERR(imxdma->dma_clk))
- return PTR_ERR(imxdma->dma_clk);
- clk_enable(imxdma->dma_clk);
+ imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(imxdma->dma_ipg)) {
+ ret = PTR_ERR(imxdma->dma_ipg);
+ goto err_clk;
+ }
+
+ imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(imxdma->dma_ahb)) {
+ ret = PTR_ERR(imxdma->dma_ahb);
+ goto err_clk;
+ }
+
+ clk_prepare_enable(imxdma->dma_ipg);
+ clk_prepare_enable(imxdma->dma_ahb);
/* reset DMA module */
imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
@@ -988,16 +999,14 @@ static int __init imxdma_probe(struct platform_device *pdev)
ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
if (ret) {
dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
- kfree(imxdma);
- return ret;
+ goto err_enable;
}
ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
if (ret) {
dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
free_irq(MX1_DMA_INT, NULL);
- kfree(imxdma);
- return ret;
+ goto err_enable;
}
}
@@ -1094,7 +1103,10 @@ err_init:
free_irq(MX1_DMA_INT, NULL);
free_irq(MX1_DMA_ERR, NULL);
}
-
+err_enable:
+ clk_disable_unprepare(imxdma->dma_ipg);
+ clk_disable_unprepare(imxdma->dma_ahb);
+err_clk:
kfree(imxdma);
return ret;
}
@@ -1114,7 +1126,9 @@ static int __exit imxdma_remove(struct platform_device *pdev)
free_irq(MX1_DMA_ERR, NULL);
}
- kfree(imxdma);
+ clk_disable_unprepare(imxdma->dma_ipg);
+ clk_disable_unprepare(imxdma->dma_ahb);
+ kfree(imxdma);
return 0;
}
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
new file mode 100644
index 000000000000..ae0561826137
--- /dev/null
+++ b/drivers/dma/omap-dma.c
@@ -0,0 +1,669 @@
+/*
+ * OMAP DMAengine support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/omap-dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+#include <plat/dma.h>
+
+struct omap_dmadev {
+ struct dma_device ddev;
+ spinlock_t lock;
+ struct tasklet_struct task;
+ struct list_head pending;
+};
+
+struct omap_chan {
+ struct virt_dma_chan vc;
+ struct list_head node;
+
+ struct dma_slave_config cfg;
+ unsigned dma_sig;
+ bool cyclic;
+
+ int dma_ch;
+ struct omap_desc *desc;
+ unsigned sgidx;
+};
+
+struct omap_sg {
+ dma_addr_t addr;
+ uint32_t en; /* number of elements (24-bit) */
+ uint32_t fn; /* number of frames (16-bit) */
+};
+
+struct omap_desc {
+ struct virt_dma_desc vd;
+ enum dma_transfer_direction dir;
+ dma_addr_t dev_addr;
+
+ int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
+ uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
+ uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
+ uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
+ uint8_t periph_port; /* Peripheral port */
+
+ unsigned sglen;
+ struct omap_sg sg[0];
+};
+
+static const unsigned es_bytes[] = {
+ [OMAP_DMA_DATA_TYPE_S8] = 1,
+ [OMAP_DMA_DATA_TYPE_S16] = 2,
+ [OMAP_DMA_DATA_TYPE_S32] = 4,
+};
+
+static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
+{
+ return container_of(d, struct omap_dmadev, ddev);
+}
+
+static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct omap_chan, vc.chan);
+}
+
+static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct omap_desc, vd.tx);
+}
+
+static void omap_dma_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(container_of(vd, struct omap_desc, vd));
+}
+
+static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
+ unsigned idx)
+{
+ struct omap_sg *sg = d->sg + idx;
+
+ if (d->dir == DMA_DEV_TO_MEM)
+ omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
+ OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
+ else
+ omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
+ OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
+
+ omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
+ d->sync_mode, c->dma_sig, d->sync_type);
+
+ omap_start_dma(c->dma_ch);
+}
+
+static void omap_dma_start_desc(struct omap_chan *c)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+ struct omap_desc *d;
+
+ if (!vd) {
+ c->desc = NULL;
+ return;
+ }
+
+ list_del(&vd->node);
+
+ c->desc = d = to_omap_dma_desc(&vd->tx);
+ c->sgidx = 0;
+
+ if (d->dir == DMA_DEV_TO_MEM)
+ omap_set_dma_src_params(c->dma_ch, d->periph_port,
+ OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
+ else
+ omap_set_dma_dest_params(c->dma_ch, d->periph_port,
+ OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
+
+ omap_dma_start_sg(c, d, 0);
+}
+
+static void omap_dma_callback(int ch, u16 status, void *data)
+{
+ struct omap_chan *c = data;
+ struct omap_desc *d;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ d = c->desc;
+ if (d) {
+ if (!c->cyclic) {
+ if (++c->sgidx < d->sglen) {
+ omap_dma_start_sg(c, d, c->sgidx);
+ } else {
+ omap_dma_start_desc(c);
+ vchan_cookie_complete(&d->vd);
+ }
+ } else {
+ vchan_cyclic_callback(&d->vd);
+ }
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+/*
+ * This callback schedules all pending channels. We could be more
+ * clever here by postponing allocation of the real DMA channels to
+ * this point, and freeing them when our virtual channel becomes idle.
+ *
+ * We would then need to deal with 'all channels in-use'
+ */
+static void omap_dma_sched(unsigned long data)
+{
+ struct omap_dmadev *d = (struct omap_dmadev *)data;
+ LIST_HEAD(head);
+
+ spin_lock_irq(&d->lock);
+ list_splice_tail_init(&d->pending, &head);
+ spin_unlock_irq(&d->lock);
+
+ while (!list_empty(&head)) {
+ struct omap_chan *c = list_first_entry(&head,
+ struct omap_chan, node);
+
+ spin_lock_irq(&c->vc.lock);
+ list_del_init(&c->node);
+ omap_dma_start_desc(c);
+ spin_unlock_irq(&c->vc.lock);
+ }
+}
+
+static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+
+ dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+
+ return omap_request_dma(c->dma_sig, "DMA engine",
+ omap_dma_callback, c, &c->dma_ch);
+}
+
+static void omap_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+
+ vchan_free_chan_resources(&c->vc);
+ omap_free_dma(c->dma_ch);
+
+ dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+}
+
+static size_t omap_dma_sg_size(struct omap_sg *sg)
+{
+ return sg->en * sg->fn;
+}
+
+static size_t omap_dma_desc_size(struct omap_desc *d)
+{
+ unsigned i;
+ size_t size;
+
+ for (size = i = 0; i < d->sglen; i++)
+ size += omap_dma_sg_size(&d->sg[i]);
+
+ return size * es_bytes[d->es];
+}
+
+static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
+{
+ unsigned i;
+ size_t size, es_size = es_bytes[d->es];
+
+ for (size = i = 0; i < d->sglen; i++) {
+ size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
+
+ if (size)
+ size += this_size;
+ else if (addr >= d->sg[i].addr &&
+ addr < d->sg[i].addr + this_size)
+ size += d->sg[i].addr + this_size - addr;
+ }
+ return size;
+}
+
+static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_SUCCESS || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
+ } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
+ struct omap_desc *d = c->desc;
+ dma_addr_t pos;
+
+ if (d->dir == DMA_MEM_TO_DEV)
+ pos = omap_get_dma_src_pos(c->dma_ch);
+ else if (d->dir == DMA_DEV_TO_MEM)
+ pos = omap_get_dma_dst_pos(c->dma_ch);
+ else
+ pos = 0;
+
+ txstate->residue = omap_dma_desc_size_pos(d, pos);
+ } else {
+ txstate->residue = 0;
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+
+ return ret;
+}
+
+static void omap_dma_issue_pending(struct dma_chan *chan)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (vchan_issue_pending(&c->vc) && !c->desc) {
+ struct omap_dmadev *d = to_omap_dma_dev(chan->device);
+ spin_lock(&d->lock);
+ if (list_empty(&c->node))
+ list_add_tail(&c->node, &d->pending);
+ spin_unlock(&d->lock);
+ tasklet_schedule(&d->task);
+ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
+ enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct scatterlist *sgent;
+ struct omap_desc *d;
+ dma_addr_t dev_addr;
+ unsigned i, j = 0, es, en, frame_bytes, sync_type;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = c->cfg.src_addr;
+ dev_width = c->cfg.src_addr_width;
+ burst = c->cfg.src_maxburst;
+ sync_type = OMAP_DMA_SRC_SYNC;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = c->cfg.dst_addr;
+ dev_width = c->cfg.dst_addr_width;
+ burst = c->cfg.dst_maxburst;
+ sync_type = OMAP_DMA_DST_SYNC;
+ } else {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ /* Bus width translates to the element size (ES) */
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ es = OMAP_DMA_DATA_TYPE_S8;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ es = OMAP_DMA_DATA_TYPE_S16;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ es = OMAP_DMA_DATA_TYPE_S32;
+ break;
+ default: /* not reached */
+ return NULL;
+ }
+
+ /* Now allocate and setup the descriptor. */
+ d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
+ if (!d)
+ return NULL;
+
+ d->dir = dir;
+ d->dev_addr = dev_addr;
+ d->es = es;
+ d->sync_mode = OMAP_DMA_SYNC_FRAME;
+ d->sync_type = sync_type;
+ d->periph_port = OMAP_DMA_PORT_TIPB;
+
+ /*
+ * Build our scatterlist entries: each contains the address,
+ * the number of elements (EN) in each frame, and the number of
+ * frames (FN). Number of bytes for this entry = ES * EN * FN.
+ *
+ * Burst size translates to number of elements with frame sync.
+ * Note: DMA engine defines burst to be the number of dev-width
+ * transfers.
+ */
+ en = burst;
+ frame_bytes = es_bytes[es] * en;
+ for_each_sg(sgl, sgent, sglen, i) {
+ d->sg[j].addr = sg_dma_address(sgent);
+ d->sg[j].en = en;
+ d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
+ j++;
+ }
+
+ d->sglen = j;
+
+ return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
+}
+
+static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir, void *context)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ enum dma_slave_buswidth dev_width;
+ struct omap_desc *d;
+ dma_addr_t dev_addr;
+ unsigned es, sync_type;
+ u32 burst;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = c->cfg.src_addr;
+ dev_width = c->cfg.src_addr_width;
+ burst = c->cfg.src_maxburst;
+ sync_type = OMAP_DMA_SRC_SYNC;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = c->cfg.dst_addr;
+ dev_width = c->cfg.dst_addr_width;
+ burst = c->cfg.dst_maxburst;
+ sync_type = OMAP_DMA_DST_SYNC;
+ } else {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ /* Bus width translates to the element size (ES) */
+ switch (dev_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ es = OMAP_DMA_DATA_TYPE_S8;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ es = OMAP_DMA_DATA_TYPE_S16;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ es = OMAP_DMA_DATA_TYPE_S32;
+ break;
+ default: /* not reached */
+ return NULL;
+ }
+
+ /* Now allocate and setup the descriptor. */
+ d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
+ if (!d)
+ return NULL;
+
+ d->dir = dir;
+ d->dev_addr = dev_addr;
+ d->fi = burst;
+ d->es = es;
+ d->sync_mode = OMAP_DMA_SYNC_PACKET;
+ d->sync_type = sync_type;
+ d->periph_port = OMAP_DMA_PORT_MPUI;
+ d->sg[0].addr = buf_addr;
+ d->sg[0].en = period_len / es_bytes[es];
+ d->sg[0].fn = buf_len / period_len;
+ d->sglen = 1;
+
+ if (!c->cyclic) {
+ c->cyclic = true;
+ omap_dma_link_lch(c->dma_ch, c->dma_ch);
+ omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
+ omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
+ }
+
+ if (!cpu_class_is_omap1()) {
+ omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
+ }
+
+ return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+}
+
+static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
+{
+ if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ return -EINVAL;
+
+ memcpy(&c->cfg, cfg, sizeof(c->cfg));
+
+ return 0;
+}
+
+static int omap_dma_terminate_all(struct omap_chan *c)
+{
+ struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&c->vc.lock, flags);
+
+ /* Prevent this channel being scheduled */
+ spin_lock(&d->lock);
+ list_del_init(&c->node);
+ spin_unlock(&d->lock);
+
+ /*
+ * Stop DMA activity: we assume the callback will not be called
+ * after omap_stop_dma() returns (even if it does, it will see
+ * c->desc is NULL and exit.)
+ */
+ if (c->desc) {
+ c->desc = NULL;
+ omap_stop_dma(c->dma_ch);
+ }
+
+ if (c->cyclic) {
+ c->cyclic = false;
+ omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
+ }
+
+ vchan_get_all_descriptors(&c->vc, &head);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
+
+ return 0;
+}
+
+static int omap_dma_pause(struct omap_chan *c)
+{
+ /* FIXME: not supported by platform private API */
+ return -EINVAL;
+}
+
+static int omap_dma_resume(struct omap_chan *c)
+{
+ /* FIXME: not supported by platform private API */
+ return -EINVAL;
+}
+
+static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ int ret;
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
+ break;
+
+ case DMA_TERMINATE_ALL:
+ ret = omap_dma_terminate_all(c);
+ break;
+
+ case DMA_PAUSE:
+ ret = omap_dma_pause(c);
+ break;
+
+ case DMA_RESUME:
+ ret = omap_dma_resume(c);
+ break;
+
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
+{
+ struct omap_chan *c;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ c->dma_sig = dma_sig;
+ c->vc.desc_free = omap_dma_desc_free;
+ vchan_init(&c->vc, &od->ddev);
+ INIT_LIST_HEAD(&c->node);
+
+ od->ddev.chancnt++;
+
+ return 0;
+}
+
+static void omap_dma_free(struct omap_dmadev *od)
+{
+ tasklet_kill(&od->task);
+ while (!list_empty(&od->ddev.channels)) {
+ struct omap_chan *c = list_first_entry(&od->ddev.channels,
+ struct omap_chan, vc.chan.device_node);
+
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
+ kfree(c);
+ }
+ kfree(od);
+}
+
+static int omap_dma_probe(struct platform_device *pdev)
+{
+ struct omap_dmadev *od;
+ int rc, i;
+
+ od = kzalloc(sizeof(*od), GFP_KERNEL);
+ if (!od)
+ return -ENOMEM;
+
+ dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+ od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
+ od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
+ od->ddev.device_tx_status = omap_dma_tx_status;
+ od->ddev.device_issue_pending = omap_dma_issue_pending;
+ od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
+ od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
+ od->ddev.device_control = omap_dma_control;
+ od->ddev.dev = &pdev->dev;
+ INIT_LIST_HEAD(&od->ddev.channels);
+ INIT_LIST_HEAD(&od->pending);
+ spin_lock_init(&od->lock);
+
+ tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
+
+ for (i = 0; i < 127; i++) {
+ rc = omap_dma_chan_init(od, i);
+ if (rc) {
+ omap_dma_free(od);
+ return rc;
+ }
+ }
+
+ rc = dma_async_device_register(&od->ddev);
+ if (rc) {
+ pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
+ rc);
+ omap_dma_free(od);
+ } else {
+ platform_set_drvdata(pdev, od);
+ }
+
+ dev_info(&pdev->dev, "OMAP DMA engine driver\n");
+
+ return rc;
+}
+
+static int omap_dma_remove(struct platform_device *pdev)
+{
+ struct omap_dmadev *od = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&od->ddev);
+ omap_dma_free(od);
+
+ return 0;
+}
+
+static struct platform_driver omap_dma_driver = {
+ .probe = omap_dma_probe,
+ .remove = omap_dma_remove,
+ .driver = {
+ .name = "omap-dma-engine",
+ .owner = THIS_MODULE,
+ },
+};
+
+bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ if (chan->device->dev->driver == &omap_dma_driver.driver) {
+ struct omap_chan *c = to_omap_dma_chan(chan);
+ unsigned req = *(unsigned *)param;
+
+ return req == c->dma_sig;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
+
+static struct platform_device *pdev;
+
+static const struct platform_device_info omap_dma_dev_info = {
+ .name = "omap-dma-engine",
+ .id = -1,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int omap_dma_init(void)
+{
+ int rc = platform_driver_register(&omap_dma_driver);
+
+ if (rc == 0) {
+ pdev = platform_device_register_full(&omap_dma_dev_info);
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&omap_dma_driver);
+ rc = PTR_ERR(pdev);
+ }
+ }
+ return rc;
+}
+subsys_initcall(omap_dma_init);
+
+static void __exit omap_dma_exit(void)
+{
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&omap_dma_driver);
+}
+module_exit(omap_dma_exit);
+
+MODULE_AUTHOR("Russell King");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index ec78ccef9132..f5a73606217e 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -21,6 +21,8 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include "virt-dma.h"
+
#define NR_PHY_CHAN 6
#define DMA_ALIGN 3
#define DMA_MAX_SIZE 0x1fff
@@ -72,12 +74,13 @@ struct sa11x0_dma_sg {
};
struct sa11x0_dma_desc {
- struct dma_async_tx_descriptor tx;
+ struct virt_dma_desc vd;
+
u32 ddar;
size_t size;
+ unsigned period;
+ bool cyclic;
- /* maybe protected by c->lock */
- struct list_head node;
unsigned sglen;
struct sa11x0_dma_sg sg[0];
};
@@ -85,15 +88,11 @@ struct sa11x0_dma_desc {
struct sa11x0_dma_phy;
struct sa11x0_dma_chan {
- struct dma_chan chan;
- spinlock_t lock;
- dma_cookie_t lc;
+ struct virt_dma_chan vc;
- /* protected by c->lock */
+ /* protected by c->vc.lock */
struct sa11x0_dma_phy *phy;
enum dma_status status;
- struct list_head desc_submitted;
- struct list_head desc_issued;
/* protected by d->lock */
struct list_head node;
@@ -109,7 +108,7 @@ struct sa11x0_dma_phy {
struct sa11x0_dma_chan *vchan;
- /* Protected by c->lock */
+ /* Protected by c->vc.lock */
unsigned sg_load;
struct sa11x0_dma_desc *txd_load;
unsigned sg_done;
@@ -127,13 +126,12 @@ struct sa11x0_dma_dev {
spinlock_t lock;
struct tasklet_struct task;
struct list_head chan_pending;
- struct list_head desc_complete;
struct sa11x0_dma_phy phy[NR_PHY_CHAN];
};
static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
{
- return container_of(chan, struct sa11x0_dma_chan, chan);
+ return container_of(chan, struct sa11x0_dma_chan, vc.chan);
}
static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
@@ -141,27 +139,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
return container_of(dmadev, struct sa11x0_dma_dev, slave);
}
-static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx)
+static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
{
- return container_of(tx, struct sa11x0_dma_desc, tx);
+ struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+ return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
}
-static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
+static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
{
- if (list_empty(&c->desc_issued))
- return NULL;
-
- return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node);
+ kfree(container_of(vd, struct sa11x0_dma_desc, vd));
}
static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
{
- list_del(&txd->node);
+ list_del(&txd->vd.node);
p->txd_load = txd;
p->sg_load = 0;
dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
- p->num, txd, txd->tx.cookie, txd->ddar);
+ p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
}
static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
@@ -183,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
return;
if (p->sg_load == txd->sglen) {
- struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
+ if (!txd->cyclic) {
+ struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
- /*
- * We have reached the end of the current descriptor.
- * Peek at the next descriptor, and if compatible with
- * the current, start processing it.
- */
- if (txn && txn->ddar == txd->ddar) {
- txd = txn;
- sa11x0_dma_start_desc(p, txn);
+ /*
+ * We have reached the end of the current descriptor.
+ * Peek at the next descriptor, and if compatible with
+ * the current, start processing it.
+ */
+ if (txn && txn->ddar == txd->ddar) {
+ txd = txn;
+ sa11x0_dma_start_desc(p, txn);
+ } else {
+ p->txd_load = NULL;
+ return;
+ }
} else {
- p->txd_load = NULL;
- return;
+ /* Cyclic: reset back to beginning */
+ p->sg_load = 0;
}
}
@@ -229,21 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
struct sa11x0_dma_desc *txd = p->txd_done;
if (++p->sg_done == txd->sglen) {
- struct sa11x0_dma_dev *d = p->dev;
-
- dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n",
- p->num, p->txd_done, p->txd_done->tx.cookie);
-
- c->lc = txd->tx.cookie;
+ if (!txd->cyclic) {
+ vchan_cookie_complete(&txd->vd);
- spin_lock(&d->lock);
- list_add_tail(&txd->node, &d->desc_complete);
- spin_unlock(&d->lock);
+ p->sg_done = 0;
+ p->txd_done = p->txd_load;
- p->sg_done = 0;
- p->txd_done = p->txd_load;
+ if (!p->txd_done)
+ tasklet_schedule(&p->dev->task);
+ } else {
+ if ((p->sg_done % txd->period) == 0)
+ vchan_cyclic_callback(&txd->vd);
- tasklet_schedule(&d->task);
+ /* Cyclic: reset back to beginning */
+ p->sg_done = 0;
+ }
}
sa11x0_dma_start_sg(p, c);
@@ -280,7 +282,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
if (c) {
unsigned long flags;
- spin_lock_irqsave(&c->lock, flags);
+ spin_lock_irqsave(&c->vc.lock, flags);
/*
* Now that we're holding the lock, check that the vchan
* really is associated with this pchan before touching the
@@ -294,7 +296,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
if (dcsr & DCSR_DONEB)
sa11x0_dma_complete(p, c);
}
- spin_unlock_irqrestore(&c->lock, flags);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
}
return IRQ_HANDLED;
@@ -332,28 +334,15 @@ static void sa11x0_dma_tasklet(unsigned long arg)
struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
struct sa11x0_dma_phy *p;
struct sa11x0_dma_chan *c;
- struct sa11x0_dma_desc *txd, *txn;
- LIST_HEAD(head);
unsigned pch, pch_alloc = 0;
dev_dbg(d->slave.dev, "tasklet enter\n");
- /* Get the completed tx descriptors */
- spin_lock_irq(&d->lock);
- list_splice_init(&d->desc_complete, &head);
- spin_unlock_irq(&d->lock);
-
- list_for_each_entry(txd, &head, node) {
- c = to_sa11x0_dma_chan(txd->tx.chan);
-
- dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n",
- c, txd, txd->tx.cookie);
-
- spin_lock_irq(&c->lock);
+ list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
+ spin_lock_irq(&c->vc.lock);
p = c->phy;
- if (p) {
- if (!p->txd_done)
- sa11x0_dma_start_txd(c);
+ if (p && !p->txd_done) {
+ sa11x0_dma_start_txd(c);
if (!p->txd_done) {
/* No current txd associated with this channel */
dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
@@ -363,7 +352,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
p->vchan = NULL;
}
}
- spin_unlock_irq(&c->lock);
+ spin_unlock_irq(&c->vc.lock);
}
spin_lock_irq(&d->lock);
@@ -380,7 +369,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
/* Mark this channel allocated */
p->vchan = c;
- dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c);
+ dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
}
}
spin_unlock_irq(&d->lock);
@@ -390,42 +379,18 @@ static void sa11x0_dma_tasklet(unsigned long arg)
p = &d->phy[pch];
c = p->vchan;
- spin_lock_irq(&c->lock);
+ spin_lock_irq(&c->vc.lock);
c->phy = p;
sa11x0_dma_start_txd(c);
- spin_unlock_irq(&c->lock);
+ spin_unlock_irq(&c->vc.lock);
}
}
- /* Now free the completed tx descriptor, and call their callbacks */
- list_for_each_entry_safe(txd, txn, &head, node) {
- dma_async_tx_callback callback = txd->tx.callback;
- void *callback_param = txd->tx.callback_param;
-
- dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n",
- txd, txd->tx.cookie);
-
- kfree(txd);
-
- if (callback)
- callback(callback_param);
- }
-
dev_dbg(d->slave.dev, "tasklet exit\n");
}
-static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head)
-{
- struct sa11x0_dma_desc *txd, *txn;
-
- list_for_each_entry_safe(txd, txn, head, node) {
- dev_dbg(d->slave.dev, "txd %p: freeing\n", txd);
- kfree(txd);
- }
-}
-
static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
{
return 0;
@@ -436,18 +401,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
unsigned long flags;
- LIST_HEAD(head);
- spin_lock_irqsave(&c->lock, flags);
- spin_lock(&d->lock);
+ spin_lock_irqsave(&d->lock, flags);
list_del_init(&c->node);
- spin_unlock(&d->lock);
-
- list_splice_tail_init(&c->desc_submitted, &head);
- list_splice_tail_init(&c->desc_issued, &head);
- spin_unlock_irqrestore(&c->lock, flags);
+ spin_unlock_irqrestore(&d->lock, flags);
- sa11x0_dma_desc_free(d, &head);
+ vchan_free_chan_resources(&c->vc);
}
static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
@@ -472,33 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
struct sa11x0_dma_phy *p;
- struct sa11x0_dma_desc *txd;
- dma_cookie_t last_used, last_complete;
+ struct virt_dma_desc *vd;
unsigned long flags;
enum dma_status ret;
- size_t bytes = 0;
-
- last_used = c->chan.cookie;
- last_complete = c->lc;
- ret = dma_async_is_complete(cookie, last_complete, last_used);
- if (ret == DMA_SUCCESS) {
- dma_set_tx_state(state, last_complete, last_used, 0);
+ ret = dma_cookie_status(&c->vc.chan, cookie, state);
+ if (ret == DMA_SUCCESS)
return ret;
- }
- spin_lock_irqsave(&c->lock, flags);
+ if (!state)
+ return c->status;
+
+ spin_lock_irqsave(&c->vc.lock, flags);
p = c->phy;
- ret = c->status;
- if (p) {
- dma_addr_t addr = sa11x0_dma_pos(p);
- dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+ /*
+ * If the cookie is on our issue queue, then the residue is
+ * its total size.
+ */
+ vd = vchan_find_desc(&c->vc, cookie);
+ if (vd) {
+ state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
+ } else if (!p) {
+ state->residue = 0;
+ } else {
+ struct sa11x0_dma_desc *txd;
+ size_t bytes = 0;
- txd = p->txd_done;
+ if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
+ txd = p->txd_done;
+ else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
+ txd = p->txd_load;
+ else
+ txd = NULL;
+
+ ret = c->status;
if (txd) {
+ dma_addr_t addr = sa11x0_dma_pos(p);
unsigned i;
+ dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
+
for (i = 0; i < txd->sglen; i++) {
dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
i, txd->sg[i].addr, txd->sg[i].len);
@@ -521,17 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
bytes += txd->sg[i].len;
}
}
- if (txd != p->txd_load && p->txd_load)
- bytes += p->txd_load->size;
- }
- list_for_each_entry(txd, &c->desc_issued, node) {
- bytes += txd->size;
+ state->residue = bytes;
}
- spin_unlock_irqrestore(&c->lock, flags);
-
- dma_set_tx_state(state, last_complete, last_used, bytes);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
- dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
+ dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
return ret;
}
@@ -547,40 +514,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan)
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
unsigned long flags;
- spin_lock_irqsave(&c->lock, flags);
- list_splice_tail_init(&c->desc_submitted, &c->desc_issued);
- if (!list_empty(&c->desc_issued)) {
- spin_lock(&d->lock);
- if (!c->phy && list_empty(&c->node)) {
- list_add_tail(&c->node, &d->chan_pending);
- tasklet_schedule(&d->task);
- dev_dbg(d->slave.dev, "vchan %p: issued\n", c);
+ spin_lock_irqsave(&c->vc.lock, flags);
+ if (vchan_issue_pending(&c->vc)) {
+ if (!c->phy) {
+ spin_lock(&d->lock);
+ if (list_empty(&c->node)) {
+ list_add_tail(&c->node, &d->chan_pending);
+ tasklet_schedule(&d->task);
+ dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+ }
+ spin_unlock(&d->lock);
}
- spin_unlock(&d->lock);
} else
- dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c);
- spin_unlock_irqrestore(&c->lock, flags);
-}
-
-static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan);
- struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx);
- unsigned long flags;
-
- spin_lock_irqsave(&c->lock, flags);
- c->chan.cookie += 1;
- if (c->chan.cookie < 0)
- c->chan.cookie = 1;
- txd->tx.cookie = c->chan.cookie;
-
- list_add_tail(&txd->node, &c->desc_submitted);
- spin_unlock_irqrestore(&c->lock, flags);
-
- dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n",
- c, txd, txd->tx.cookie);
-
- return txd->tx.cookie;
+ dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
}
static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
@@ -596,7 +543,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
/* SA11x0 channels can only operate in their native direction */
if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
- c, c->ddar, dir);
+ &c->vc, c->ddar, dir);
return NULL;
}
@@ -612,14 +559,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
if (addr & DMA_ALIGN) {
dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
- c, addr);
+ &c->vc, addr);
return NULL;
}
}
txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
if (!txd) {
- dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c);
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
return NULL;
}
@@ -655,17 +602,73 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
} while (len);
}
- dma_async_tx_descriptor_init(&txd->tx, &c->chan);
- txd->tx.flags = flags;
- txd->tx.tx_submit = sa11x0_dma_tx_submit;
txd->ddar = c->ddar;
txd->size = size;
txd->sglen = j;
dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
- c, txd, txd->size, txd->sglen);
+ &c->vc, &txd->vd, txd->size, txd->sglen);
- return &txd->tx;
+ return vchan_tx_prep(&c->vc, &txd->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
+ enum dma_transfer_direction dir, void *context)
+{
+ struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
+ struct sa11x0_dma_desc *txd;
+ unsigned i, j, k, sglen, sgperiod;
+
+ /* SA11x0 channels can only operate in their native direction */
+ if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
+ dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
+ &c->vc, c->ddar, dir);
+ return NULL;
+ }
+
+ sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
+ sglen = size * sgperiod / period;
+
+ /* Do not allow zero-sized txds */
+ if (sglen == 0)
+ return NULL;
+
+ txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
+ if (!txd) {
+ dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
+ return NULL;
+ }
+
+ for (i = k = 0; i < size / period; i++) {
+ size_t tlen, len = period;
+
+ for (j = 0; j < sgperiod; j++, k++) {
+ tlen = len;
+
+ if (tlen > DMA_MAX_SIZE) {
+ unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
+ tlen = (tlen / mult) & ~DMA_ALIGN;
+ }
+
+ txd->sg[k].addr = addr;
+ txd->sg[k].len = tlen;
+ addr += tlen;
+ len -= tlen;
+ }
+
+ WARN_ON(len != 0);
+ }
+
+ WARN_ON(k != sglen);
+
+ txd->ddar = c->ddar;
+ txd->size = size;
+ txd->sglen = sglen;
+ txd->cyclic = 1;
+ txd->period = sgperiod;
+
+ return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
@@ -695,8 +698,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
if (maxburst == 8)
ddar |= DDAR_BS;
- dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
- c, addr, width, maxburst);
+ dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
+ &c->vc, addr, width, maxburst);
c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
@@ -718,16 +721,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
case DMA_TERMINATE_ALL:
- dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c);
+ dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
/* Clear the tx descriptor lists */
- spin_lock_irqsave(&c->lock, flags);
- list_splice_tail_init(&c->desc_submitted, &head);
- list_splice_tail_init(&c->desc_issued, &head);
+ spin_lock_irqsave(&c->vc.lock, flags);
+ vchan_get_all_descriptors(&c->vc, &head);
p = c->phy;
if (p) {
- struct sa11x0_dma_desc *txd, *txn;
-
dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
/* vchan is assigned to a pchan - stop the channel */
writel(DCSR_RUN | DCSR_IE |
@@ -735,17 +735,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
DCSR_STRTB | DCSR_DONEB,
p->base + DMA_DCSR_C);
- list_for_each_entry_safe(txd, txn, &d->desc_complete, node)
- if (txd->tx.chan == &c->chan)
- list_move(&txd->node, &head);
-
if (p->txd_load) {
if (p->txd_load != p->txd_done)
- list_add_tail(&p->txd_load->node, &head);
+ list_add_tail(&p->txd_load->vd.node, &head);
p->txd_load = NULL;
}
if (p->txd_done) {
- list_add_tail(&p->txd_done->node, &head);
+ list_add_tail(&p->txd_done->vd.node, &head);
p->txd_done = NULL;
}
c->phy = NULL;
@@ -754,14 +750,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
spin_unlock(&d->lock);
tasklet_schedule(&d->task);
}
- spin_unlock_irqrestore(&c->lock, flags);
- sa11x0_dma_desc_free(d, &head);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ vchan_dma_desc_free_list(&c->vc, &head);
ret = 0;
break;
case DMA_PAUSE:
- dev_dbg(d->slave.dev, "vchan %p: pause\n", c);
- spin_lock_irqsave(&c->lock, flags);
+ dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
+ spin_lock_irqsave(&c->vc.lock, flags);
if (c->status == DMA_IN_PROGRESS) {
c->status = DMA_PAUSED;
@@ -774,26 +770,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
spin_unlock(&d->lock);
}
}
- spin_unlock_irqrestore(&c->lock, flags);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
ret = 0;
break;
case DMA_RESUME:
- dev_dbg(d->slave.dev, "vchan %p: resume\n", c);
- spin_lock_irqsave(&c->lock, flags);
+ dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
+ spin_lock_irqsave(&c->vc.lock, flags);
if (c->status == DMA_PAUSED) {
c->status = DMA_IN_PROGRESS;
p = c->phy;
if (p) {
writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
- } else if (!list_empty(&c->desc_issued)) {
+ } else if (!list_empty(&c->vc.desc_issued)) {
spin_lock(&d->lock);
list_add_tail(&c->node, &d->chan_pending);
spin_unlock(&d->lock);
}
}
- spin_unlock_irqrestore(&c->lock, flags);
+ spin_unlock_irqrestore(&c->vc.lock, flags);
ret = 0;
break;
@@ -853,15 +849,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
return -ENOMEM;
}
- c->chan.device = dmadev;
c->status = DMA_IN_PROGRESS;
c->ddar = chan_desc[i].ddar;
c->name = chan_desc[i].name;
- spin_lock_init(&c->lock);
- INIT_LIST_HEAD(&c->desc_submitted);
- INIT_LIST_HEAD(&c->desc_issued);
INIT_LIST_HEAD(&c->node);
- list_add_tail(&c->chan.device_node, &dmadev->channels);
+
+ c->vc.desc_free = sa11x0_dma_free_desc;
+ vchan_init(&c->vc, dmadev);
}
return dma_async_device_register(dmadev);
@@ -890,8 +884,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev)
{
struct sa11x0_dma_chan *c, *cn;
- list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) {
- list_del(&c->chan.device_node);
+ list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
+ list_del(&c->vc.chan.device_node);
+ tasklet_kill(&c->vc.task);
kfree(c);
}
}
@@ -915,7 +910,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
spin_lock_init(&d->lock);
INIT_LIST_HEAD(&d->chan_pending);
- INIT_LIST_HEAD(&d->desc_complete);
d->base = ioremap(res->start, resource_size(res));
if (!d->base) {
@@ -947,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
}
dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
+ d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
if (ret) {
dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 27f5c781fd73..f4cd946d259d 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -483,6 +483,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
new->mark = DESC_PREPARED;
new->async_tx.flags = flags;
new->direction = direction;
+ new->partial = 0;
*len -= copy_size;
if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
@@ -644,6 +645,14 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
case DMA_TERMINATE_ALL:
spin_lock_irqsave(&schan->chan_lock, flags);
ops->halt_channel(schan);
+
+ if (ops->get_partial && !list_empty(&schan->ld_queue)) {
+ /* Record partial transfer */
+ struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
+ struct shdma_desc, node);
+ desc->partial = ops->get_partial(schan, desc);
+ }
+
spin_unlock_irqrestore(&schan->chan_lock, flags);
shdma_chan_ld_cleanup(schan, true);
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index 027c9be97654..f41bcc5267fd 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -381,6 +381,17 @@ static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
return true;
}
+static size_t sh_dmae_get_partial(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
+ struct sh_dmae_desc, shdma_desc);
+ return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
+ sh_chan->xmit_shift;
+}
+
/* Called from error IRQ or NMI */
static bool sh_dmae_reset(struct sh_dmae_device *shdev)
{
@@ -632,6 +643,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = {
.start_xfer = sh_dmae_start_xfer,
.embedded_desc = sh_dmae_embedded_desc,
.chan_irq = sh_dmae_chan_irq,
+ .get_partial = sh_dmae_get_partial,
};
static int __devinit sh_dmae_probe(struct platform_device *pdev)
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index d52dbc6c54ab..24acd711e032 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1119,15 +1119,21 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma *tdma = tdc->tdma;
+ int ret;
dma_cookie_init(&tdc->dma_chan);
tdc->config_init = false;
- return 0;
+ ret = clk_prepare_enable(tdma->dma_clk);
+ if (ret < 0)
+ dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
+ return ret;
}
static void tegra_dma_free_chan_resources(struct dma_chan *dc)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma *tdma = tdc->tdma;
struct tegra_dma_desc *dma_desc;
struct tegra_dma_sg_req *sg_req;
@@ -1163,6 +1169,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
list_del(&sg_req->node);
kfree(sg_req);
}
+ clk_disable_unprepare(tdma->dma_clk);
}
/* Tegra20 specific DMA controller information */
@@ -1255,6 +1262,13 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
}
}
+ /* Enable clock before accessing registers */
+ ret = clk_prepare_enable(tdma->dma_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+ goto err_pm_disable;
+ }
+
/* Reset DMA controller */
tegra_periph_reset_assert(tdma->dma_clk);
udelay(2);
@@ -1265,6 +1279,8 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev)
tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
+ clk_disable_unprepare(tdma->dma_clk);
+
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < cdata->nr_channels; i++) {
struct tegra_dma_channel *tdc = &tdma->channels[i];
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
new file mode 100644
index 000000000000..6f80432a3f0a
--- /dev/null
+++ b/drivers/dma/virt-dma.c
@@ -0,0 +1,123 @@
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+
+static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct virt_dma_desc, tx);
+}
+
+dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct virt_dma_chan *vc = to_virt_chan(tx->chan);
+ struct virt_dma_desc *vd = to_virt_desc(tx);
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ spin_lock_irqsave(&vc->lock, flags);
+ cookie = dma_cookie_assign(tx);
+
+ list_add_tail(&vd->node, &vc->desc_submitted);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
+ vc, vd, cookie);
+
+ return cookie;
+}
+EXPORT_SYMBOL_GPL(vchan_tx_submit);
+
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
+ dma_cookie_t cookie)
+{
+ struct virt_dma_desc *vd;
+
+ list_for_each_entry(vd, &vc->desc_issued, node)
+ if (vd->tx.cookie == cookie)
+ return vd;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vchan_find_desc);
+
+/*
+ * This tasklet handles the completion of a DMA descriptor by
+ * calling its callback and freeing it.
+ */
+static void vchan_complete(unsigned long arg)
+{
+ struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+ struct virt_dma_desc *vd;
+ dma_async_tx_callback cb = NULL;
+ void *cb_data = NULL;
+ LIST_HEAD(head);
+
+ spin_lock_irq(&vc->lock);
+ list_splice_tail_init(&vc->desc_completed, &head);
+ vd = vc->cyclic;
+ if (vd) {
+ vc->cyclic = NULL;
+ cb = vd->tx.callback;
+ cb_data = vd->tx.callback_param;
+ }
+ spin_unlock_irq(&vc->lock);
+
+ if (cb)
+ cb(cb_data);
+
+ while (!list_empty(&head)) {
+ vd = list_first_entry(&head, struct virt_dma_desc, node);
+ cb = vd->tx.callback;
+ cb_data = vd->tx.callback_param;
+
+ list_del(&vd->node);
+
+ vc->desc_free(vd);
+
+ if (cb)
+ cb(cb_data);
+ }
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
+{
+ while (!list_empty(head)) {
+ struct virt_dma_desc *vd = list_first_entry(head,
+ struct virt_dma_desc, node);
+ list_del(&vd->node);
+ dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
+ vc->desc_free(vd);
+ }
+}
+EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
+
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
+{
+ dma_cookie_init(&vc->chan);
+
+ spin_lock_init(&vc->lock);
+ INIT_LIST_HEAD(&vc->desc_submitted);
+ INIT_LIST_HEAD(&vc->desc_issued);
+ INIT_LIST_HEAD(&vc->desc_completed);
+
+ tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
+
+ vc->chan.device = dmadev;
+ list_add_tail(&vc->chan.device_node, &dmadev->channels);
+}
+EXPORT_SYMBOL_GPL(vchan_init);
+
+MODULE_AUTHOR("Russell King");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
new file mode 100644
index 000000000000..85c19d63f9fb
--- /dev/null
+++ b/drivers/dma/virt-dma.h
@@ -0,0 +1,152 @@
+/*
+ * Virtual DMA channel support for DMAengine
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef VIRT_DMA_H
+#define VIRT_DMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#include "dmaengine.h"
+
+struct virt_dma_desc {
+ struct dma_async_tx_descriptor tx;
+ /* protected by vc.lock */
+ struct list_head node;
+};
+
+struct virt_dma_chan {
+ struct dma_chan chan;
+ struct tasklet_struct task;
+ void (*desc_free)(struct virt_dma_desc *);
+
+ spinlock_t lock;
+
+ /* protected by vc.lock */
+ struct list_head desc_submitted;
+ struct list_head desc_issued;
+ struct list_head desc_completed;
+
+ struct virt_dma_desc *cyclic;
+};
+
+static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct virt_dma_chan, chan);
+}
+
+void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
+void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
+struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
+
+/**
+ * vchan_tx_prep - prepare a descriptor
+ * vc: virtual channel allocating this descriptor
+ * vd: virtual descriptor to prepare
+ * tx_flags: flags argument passed in to prepare function
+ */
+static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
+ struct virt_dma_desc *vd, unsigned long tx_flags)
+{
+ extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
+
+ dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
+ vd->tx.flags = tx_flags;
+ vd->tx.tx_submit = vchan_tx_submit;
+
+ return &vd->tx;
+}
+
+/**
+ * vchan_issue_pending - move submitted descriptors to issued list
+ * vc: virtual channel to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
+{
+ list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
+ return !list_empty(&vc->desc_issued);
+}
+
+/**
+ * vchan_cookie_complete - report completion of a descriptor
+ * vd: virtual descriptor to update
+ *
+ * vc.lock must be held by caller
+ */
+static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ dma_cookie_complete(&vd->tx);
+ dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
+ vd, vd->tx.cookie);
+ list_add_tail(&vd->node, &vc->desc_completed);
+
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_cyclic_callback - report the completion of a period
+ * vd: virtual descriptor
+ */
+static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
+{
+ struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+
+ vc->cyclic = vd;
+ tasklet_schedule(&vc->task);
+}
+
+/**
+ * vchan_next_desc - peek at the next descriptor to be processed
+ * vc: virtual channel to obtain descriptor from
+ *
+ * vc.lock must be held by caller
+ */
+static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
+{
+ if (list_empty(&vc->desc_issued))
+ return NULL;
+
+ return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node);
+}
+
+/**
+ * vchan_get_all_descriptors - obtain all submitted and issued descriptors
+ * vc: virtual channel to get descriptors from
+ * head: list of descriptors found
+ *
+ * vc.lock must be held by caller
+ *
+ * Removes all submitted and issued descriptors from internal lists, and
+ * provides a list of all descriptors found
+ */
+static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
+ struct list_head *head)
+{
+ list_splice_tail_init(&vc->desc_submitted, head);
+ list_splice_tail_init(&vc->desc_issued, head);
+ list_splice_tail_init(&vc->desc_completed, head);
+}
+
+static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
+{
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vc->lock, flags);
+ vchan_get_all_descriptors(vc, &head);
+ spin_unlock_irqrestore(&vc->lock, flags);
+
+ vchan_dma_desc_free_list(vc, &head);
+}
+
+#endif
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index fdffa1beca17..409b92b8d346 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -7,7 +7,7 @@
menuconfig EDAC
bool "EDAC (Error Detection And Correction) reporting"
depends on HAS_IOMEM
- depends on X86 || PPC || TILE
+ depends on X86 || PPC || TILE || ARM
help
EDAC is designed to report errors in the core system.
These are low-level errors that are reported in the CPU or
@@ -31,6 +31,14 @@ if EDAC
comment "Reporting subsystems"
+config EDAC_LEGACY_SYSFS
+ bool "EDAC legacy sysfs"
+ default y
+ help
+ Enable the compatibility sysfs nodes.
+ Use 'Y' if your edac utilities aren't ported to work with the newer
+ structures.
+
config EDAC_DEBUG
bool "Debugging"
help
@@ -294,4 +302,18 @@ config EDAC_TILE
Support for error detection and correction on the
Tilera memory controller.
+config EDAC_HIGHBANK_MC
+ tristate "Highbank Memory Controller"
+ depends on EDAC_MM_EDAC && ARCH_HIGHBANK
+ help
+ Support for error detection and correction on the
+ Calxeda Highbank memory controller.
+
+config EDAC_HIGHBANK_L2
+ tristate "Highbank L2 Cache"
+ depends on EDAC_MM_EDAC && ARCH_HIGHBANK
+ help
+ Support for error detection and correction on the
+ Calxeda Highbank memory controller.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 196a63dd37c5..7e5129a733f8 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -55,3 +55,6 @@ obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o
obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o
obj-$(CONFIG_EDAC_TILE) += tile_edac.o
+
+obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
+obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 7be9b7288e90..5a297a26211d 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -321,8 +321,8 @@ found:
return edac_mc_find((int)node_id);
err_no_match:
- debugf2("sys_addr 0x%lx doesn't match any node\n",
- (unsigned long)sys_addr);
+ edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
+ (unsigned long)sys_addr);
return NULL;
}
@@ -393,15 +393,15 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
mask = ~mask;
if ((input_addr & mask) == (base & mask)) {
- debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
- (unsigned long)input_addr, csrow,
- pvt->mc_node_id);
+ edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
+ (unsigned long)input_addr, csrow,
+ pvt->mc_node_id);
return csrow;
}
}
- debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
- (unsigned long)input_addr, pvt->mc_node_id);
+ edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
+ (unsigned long)input_addr, pvt->mc_node_id);
return -1;
}
@@ -430,20 +430,20 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
/* only revE and later have the DRAM Hole Address Register */
if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
- debugf1(" revision %d for node %d does not support DHAR\n",
- pvt->ext_model, pvt->mc_node_id);
+ edac_dbg(1, " revision %d for node %d does not support DHAR\n",
+ pvt->ext_model, pvt->mc_node_id);
return 1;
}
/* valid for Fam10h and above */
if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
- debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
+ edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
return 1;
}
if (!dhar_valid(pvt)) {
- debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
- pvt->mc_node_id);
+ edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
+ pvt->mc_node_id);
return 1;
}
@@ -475,9 +475,9 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
else
*hole_offset = k8_dhar_offset(pvt);
- debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
- pvt->mc_node_id, (unsigned long)*hole_base,
- (unsigned long)*hole_offset, (unsigned long)*hole_size);
+ edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
+ pvt->mc_node_id, (unsigned long)*hole_base,
+ (unsigned long)*hole_offset, (unsigned long)*hole_size);
return 0;
}
@@ -528,10 +528,9 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
/* use DHAR to translate SysAddr to DramAddr */
dram_addr = sys_addr - hole_offset;
- debugf2("using DHAR to translate SysAddr 0x%lx to "
- "DramAddr 0x%lx\n",
- (unsigned long)sys_addr,
- (unsigned long)dram_addr);
+ edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
+ (unsigned long)sys_addr,
+ (unsigned long)dram_addr);
return dram_addr;
}
@@ -548,9 +547,8 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
*/
dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
- debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
- "DramAddr 0x%lx\n", (unsigned long)sys_addr,
- (unsigned long)dram_addr);
+ edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
+ (unsigned long)sys_addr, (unsigned long)dram_addr);
return dram_addr;
}
@@ -586,9 +584,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
(dram_addr & 0xfff);
- debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
- intlv_shift, (unsigned long)dram_addr,
- (unsigned long)input_addr);
+ edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
+ intlv_shift, (unsigned long)dram_addr,
+ (unsigned long)input_addr);
return input_addr;
}
@@ -604,8 +602,8 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
input_addr =
dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
- debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
- (unsigned long)sys_addr, (unsigned long)input_addr);
+ edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
+ (unsigned long)sys_addr, (unsigned long)input_addr);
return input_addr;
}
@@ -637,8 +635,8 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
if (intlv_shift == 0) {
- debugf1(" InputAddr 0x%lx translates to DramAddr of "
- "same value\n", (unsigned long)input_addr);
+ edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
+ (unsigned long)input_addr);
return input_addr;
}
@@ -649,9 +647,9 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
dram_addr = bits + (intlv_sel << 12);
- debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
- "(%d node interleave bits)\n", (unsigned long)input_addr,
- (unsigned long)dram_addr, intlv_shift);
+ edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
+ (unsigned long)input_addr,
+ (unsigned long)dram_addr, intlv_shift);
return dram_addr;
}
@@ -673,9 +671,9 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
(dram_addr < (hole_base + hole_size))) {
sys_addr = dram_addr + hole_offset;
- debugf1("using DHAR to translate DramAddr 0x%lx to "
- "SysAddr 0x%lx\n", (unsigned long)dram_addr,
- (unsigned long)sys_addr);
+ edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
+ (unsigned long)dram_addr,
+ (unsigned long)sys_addr);
return sys_addr;
}
@@ -697,9 +695,9 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
*/
sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
- debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
- pvt->mc_node_id, (unsigned long)dram_addr,
- (unsigned long)sys_addr);
+ edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
+ pvt->mc_node_id, (unsigned long)dram_addr,
+ (unsigned long)sys_addr);
return sys_addr;
}
@@ -768,49 +766,48 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
static void amd64_dump_dramcfg_low(u32 dclr, int chan)
{
- debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
+ edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
- debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
- (dclr & BIT(16)) ? "un" : "",
- (dclr & BIT(19)) ? "yes" : "no");
+ edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
+ (dclr & BIT(16)) ? "un" : "",
+ (dclr & BIT(19)) ? "yes" : "no");
- debugf1(" PAR/ERR parity: %s\n",
- (dclr & BIT(8)) ? "enabled" : "disabled");
+ edac_dbg(1, " PAR/ERR parity: %s\n",
+ (dclr & BIT(8)) ? "enabled" : "disabled");
if (boot_cpu_data.x86 == 0x10)
- debugf1(" DCT 128bit mode width: %s\n",
- (dclr & BIT(11)) ? "128b" : "64b");
+ edac_dbg(1, " DCT 128bit mode width: %s\n",
+ (dclr & BIT(11)) ? "128b" : "64b");
- debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
- (dclr & BIT(12)) ? "yes" : "no",
- (dclr & BIT(13)) ? "yes" : "no",
- (dclr & BIT(14)) ? "yes" : "no",
- (dclr & BIT(15)) ? "yes" : "no");
+ edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
+ (dclr & BIT(12)) ? "yes" : "no",
+ (dclr & BIT(13)) ? "yes" : "no",
+ (dclr & BIT(14)) ? "yes" : "no",
+ (dclr & BIT(15)) ? "yes" : "no");
}
/* Display and decode various NB registers for debug purposes. */
static void dump_misc_regs(struct amd64_pvt *pvt)
{
- debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
+ edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
- debugf1(" NB two channel DRAM capable: %s\n",
- (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
+ edac_dbg(1, " NB two channel DRAM capable: %s\n",
+ (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
- debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
+ edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
+ (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
+ (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
amd64_dump_dramcfg_low(pvt->dclr0, 0);
- debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
+ edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
- debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
- "offset: 0x%08x\n",
- pvt->dhar, dhar_base(pvt),
- (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
- : f10_dhar_offset(pvt));
+ edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
+ pvt->dhar, dhar_base(pvt),
+ (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
+ : f10_dhar_offset(pvt));
- debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
+ edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
amd64_debug_display_dimm_sizes(pvt, 0);
@@ -857,15 +854,15 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
u32 *base1 = &pvt->csels[1].csbases[cs];
if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
- debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, *base0, reg0);
+ edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
+ cs, *base0, reg0);
if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
continue;
if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
- debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, *base1, reg1);
+ edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, reg1);
}
for_each_chip_select_mask(cs, 0, pvt) {
@@ -875,15 +872,15 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
u32 *mask1 = &pvt->csels[1].csmasks[cs];
if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
- debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, *mask0, reg0);
+ edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask0, reg0);
if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
continue;
if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
- debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, *mask1, reg1);
+ edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, reg1);
}
}
@@ -1049,24 +1046,22 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
if (!src_mci) {
amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
(unsigned long)sys_addr);
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, offset, syndrome,
-1, -1, -1,
- EDAC_MOD_STR,
"failed to map error addr to a node",
- NULL);
+ "");
return;
}
/* Now map the sys_addr to a CSROW */
csrow = sys_addr_to_csrow(src_mci, sys_addr);
if (csrow < 0) {
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, offset, syndrome,
-1, -1, -1,
- EDAC_MOD_STR,
"failed to map error addr to a csrow",
- NULL);
+ "");
return;
}
@@ -1082,12 +1077,11 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
"possible error reporting race\n",
syndrome);
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, offset, syndrome,
csrow, -1, -1,
- EDAC_MOD_STR,
"unknown syndrome - possible error reporting race",
- NULL);
+ "");
return;
}
} else {
@@ -1102,10 +1096,10 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
channel = ((sys_addr & BIT(3)) != 0);
}
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci, 1,
page, offset, syndrome,
csrow, channel, -1,
- EDAC_MOD_STR, "", NULL);
+ "", "");
}
static int ddr2_cs_size(unsigned i, bool dct_width)
@@ -1193,7 +1187,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
* Need to check DCT0[0] and DCT1[0] to see if only one of them has
* their CSEnable bit on. If so, then SINGLE DIMM case.
*/
- debugf0("Data width is not 128 bits - need more decoding\n");
+ edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
/*
* Check DRAM Bank Address Mapping values for each DIMM to see if there
@@ -1272,25 +1266,24 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
return;
if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
- debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
- pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
+ edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
+ pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
- debugf0(" DCTs operate in %s mode.\n",
- (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
+ edac_dbg(0, " DCTs operate in %s mode\n",
+ (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
if (!dct_ganging_enabled(pvt))
- debugf0(" Address range split per DCT: %s\n",
- (dct_high_range_enabled(pvt) ? "yes" : "no"));
+ edac_dbg(0, " Address range split per DCT: %s\n",
+ (dct_high_range_enabled(pvt) ? "yes" : "no"));
- debugf0(" data interleave for ECC: %s, "
- "DRAM cleared since last warm reset: %s\n",
- (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
- (dct_memory_cleared(pvt) ? "yes" : "no"));
+ edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
+ (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
+ (dct_memory_cleared(pvt) ? "yes" : "no"));
- debugf0(" channel interleave: %s, "
- "interleave bits selector: 0x%x\n",
- (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
- dct_sel_interleave_addr(pvt));
+ edac_dbg(0, " channel interleave: %s, "
+ "interleave bits selector: 0x%x\n",
+ (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
+ dct_sel_interleave_addr(pvt));
}
amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
@@ -1428,7 +1421,7 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
pvt = mci->pvt_info;
- debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
+ edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
for_each_chip_select(csrow, dct, pvt) {
if (!csrow_enabled(csrow, dct, pvt))
@@ -1436,19 +1429,18 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
- debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
- csrow, cs_base, cs_mask);
+ edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
+ csrow, cs_base, cs_mask);
cs_mask = ~cs_mask;
- debugf1(" (InputAddr & ~CSMask)=0x%llx "
- "(CSBase & ~CSMask)=0x%llx\n",
- (in_addr & cs_mask), (cs_base & cs_mask));
+ edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
+ (in_addr & cs_mask), (cs_base & cs_mask));
if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
cs_found = f10_process_possible_spare(pvt, dct, csrow);
- debugf1(" MATCH csrow=%d\n", cs_found);
+ edac_dbg(1, " MATCH csrow=%d\n", cs_found);
break;
}
}
@@ -1505,8 +1497,8 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
u8 intlv_en = dram_intlv_en(pvt, range);
u32 intlv_sel = dram_intlv_sel(pvt, range);
- debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
- range, sys_addr, get_dram_limit(pvt, range));
+ edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
+ range, sys_addr, get_dram_limit(pvt, range));
if (dhar_valid(pvt) &&
dhar_base(pvt) <= sys_addr &&
@@ -1562,7 +1554,7 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
(chan_addr & 0xfff);
}
- debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
+ edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
@@ -1616,12 +1608,11 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
if (csrow < 0) {
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, offset, syndrome,
-1, -1, -1,
- EDAC_MOD_STR,
"failed to map error addr to a csrow",
- NULL);
+ "");
return;
}
@@ -1633,10 +1624,10 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
if (dct_ganging_enabled(pvt))
chan = get_channel_from_ecc_syndrome(mci, syndrome);
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, offset, syndrome,
csrow, chan, -1,
- EDAC_MOD_STR, "", NULL);
+ "", "");
}
/*
@@ -1664,7 +1655,8 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
: pvt->csels[0].csbases;
- debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
+ edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
+ ctrl, dbam);
edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
@@ -1840,7 +1832,7 @@ static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
}
}
- debugf0("syndrome(%x) not found\n", syndrome);
+ edac_dbg(0, "syndrome(%x) not found\n", syndrome);
return -1;
}
@@ -1917,12 +1909,11 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
/* Ensure that the Error Address is VALID */
if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0, 0,
-1, -1, -1,
- EDAC_MOD_STR,
"HW has no ERROR_ADDRESS available",
- NULL);
+ "");
return;
}
@@ -1946,12 +1937,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, 0,
-1, -1, -1,
- EDAC_MOD_STR,
"HW has no ERROR_ADDRESS available",
- NULL);
+ "");
return;
}
@@ -1966,11 +1956,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (!src_mci) {
amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
(unsigned long)sys_addr);
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
page, offset, 0,
-1, -1, -1,
- EDAC_MOD_STR,
- "ERROR ADDRESS NOT mapped to a MC", NULL);
+ "ERROR ADDRESS NOT mapped to a MC",
+ "");
return;
}
@@ -1980,17 +1970,16 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (csrow < 0) {
amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
(unsigned long)sys_addr);
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
page, offset, 0,
-1, -1, -1,
- EDAC_MOD_STR,
"ERROR ADDRESS NOT mapped to CS",
- NULL);
+ "");
} else {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
page, offset, 0,
csrow, -1, -1,
- EDAC_MOD_STR, "", NULL);
+ "", "");
}
}
@@ -2047,9 +2036,9 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
return -ENODEV;
}
- debugf1("F1: %s\n", pci_name(pvt->F1));
- debugf1("F2: %s\n", pci_name(pvt->F2));
- debugf1("F3: %s\n", pci_name(pvt->F3));
+ edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
+ edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
+ edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
return 0;
}
@@ -2076,15 +2065,15 @@ static void read_mc_regs(struct amd64_pvt *pvt)
* those are Read-As-Zero
*/
rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
- debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
+ edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
/* check first whether TOP_MEM2 is enabled */
rdmsrl(MSR_K8_SYSCFG, msr_val);
if (msr_val & (1U << 21)) {
rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
- debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
+ edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
} else
- debugf0(" TOP_MEM2 disabled.\n");
+ edac_dbg(0, " TOP_MEM2 disabled\n");
amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
@@ -2100,17 +2089,17 @@ static void read_mc_regs(struct amd64_pvt *pvt)
if (!rw)
continue;
- debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
- range,
- get_dram_base(pvt, range),
- get_dram_limit(pvt, range));
+ edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
+ range,
+ get_dram_base(pvt, range),
+ get_dram_limit(pvt, range));
- debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
- dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
- (rw & 0x1) ? "R" : "-",
- (rw & 0x2) ? "W" : "-",
- dram_intlv_sel(pvt, range),
- dram_dst_node(pvt, range));
+ edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
+ dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
+ (rw & 0x1) ? "R" : "-",
+ (rw & 0x2) ? "W" : "-",
+ dram_intlv_sel(pvt, range),
+ dram_dst_node(pvt, range));
}
read_dct_base_mask(pvt);
@@ -2191,9 +2180,9 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
- debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
- debugf0(" nr_pages/channel= %u channel-count = %d\n",
- nr_pages, pvt->channel_count);
+ edac_dbg(0, " (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
+ edac_dbg(0, " nr_pages/channel= %u channel-count = %d\n",
+ nr_pages, pvt->channel_count);
return nr_pages;
}
@@ -2205,6 +2194,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
static int init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
struct amd64_pvt *pvt = mci->pvt_info;
u64 base, mask;
u32 val;
@@ -2217,22 +2207,19 @@ static int init_csrows(struct mem_ctl_info *mci)
pvt->nbcfg = val;
- debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- pvt->mc_node_id, val,
- !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
+ edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+ pvt->mc_node_id, val,
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
for_each_chip_select(i, 0, pvt) {
- csrow = &mci->csrows[i];
+ csrow = mci->csrows[i];
if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
- debugf1("----CSROW %d EMPTY for node %d\n", i,
- pvt->mc_node_id);
+ edac_dbg(1, "----CSROW %d VALID for MC node %d\n",
+ i, pvt->mc_node_id);
continue;
}
- debugf1("----CSROW %d VALID for MC node %d\n",
- i, pvt->mc_node_id);
-
empty = 0;
if (csrow_enabled(i, 0, pvt))
nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
@@ -2244,8 +2231,9 @@ static int init_csrows(struct mem_ctl_info *mci)
mtype = amd64_determine_memory_type(pvt, i);
- debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
- debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count);
+ edac_dbg(1, " for MC node %d csrow %d:\n", pvt->mc_node_id, i);
+ edac_dbg(1, " nr_pages: %u\n",
+ nr_pages * pvt->channel_count);
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
@@ -2257,9 +2245,10 @@ static int init_csrows(struct mem_ctl_info *mci)
edac_mode = EDAC_NONE;
for (j = 0; j < pvt->channel_count; j++) {
- csrow->channels[j].dimm->mtype = mtype;
- csrow->channels[j].dimm->edac_mode = edac_mode;
- csrow->channels[j].dimm->nr_pages = nr_pages;
+ dimm = csrow->channels[j]->dimm;
+ dimm->mtype = mtype;
+ dimm->edac_mode = edac_mode;
+ dimm->nr_pages = nr_pages;
}
}
@@ -2296,9 +2285,9 @@ static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
struct msr *reg = per_cpu_ptr(msrs, cpu);
nbe = reg->l & MSR_MCGCTL_NBE;
- debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
- cpu, reg->q,
- (nbe ? "enabled" : "disabled"));
+ edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+ cpu, reg->q,
+ (nbe ? "enabled" : "disabled"));
if (!nbe)
goto out;
@@ -2369,8 +2358,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
amd64_read_pci_cfg(F3, NBCFG, &value);
- debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
- nid, value, !!(value & NBCFG_ECC_ENABLE));
+ edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("DRAM ECC disabled on this node, enabling...\n");
@@ -2394,8 +2383,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
s->flags.nb_ecc_prev = 1;
}
- debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
- nid, value, !!(value & NBCFG_ECC_ENABLE));
+ edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
return ret;
}
@@ -2463,26 +2452,29 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid)
return true;
}
-struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
- ARRAY_SIZE(amd64_inj_attrs) +
- 1];
-
-struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
-
-static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
+static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
{
- unsigned int i = 0, j = 0;
+ int rc;
- for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
- sysfs_attrs[i] = amd64_dbg_attrs[i];
+ rc = amd64_create_sysfs_dbg_files(mci);
+ if (rc < 0)
+ return rc;
- if (boot_cpu_data.x86 >= 0x10)
- for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
- sysfs_attrs[i] = amd64_inj_attrs[j];
+ if (boot_cpu_data.x86 >= 0x10) {
+ rc = amd64_create_sysfs_inject_files(mci);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
- sysfs_attrs[i] = terminator;
+static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
+{
+ amd64_remove_sysfs_dbg_files(mci);
- mci->mc_driver_sysfs_attributes = sysfs_attrs;
+ if (boot_cpu_data.x86 >= 0x10)
+ amd64_remove_sysfs_inject_files(mci);
}
static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
@@ -2601,20 +2593,22 @@ static int amd64_init_one_instance(struct pci_dev *F2)
goto err_siblings;
mci->pvt_info = pvt;
- mci->dev = &pvt->F2->dev;
+ mci->pdev = &pvt->F2->dev;
setup_mci_misc_attrs(mci, fam_type);
if (init_csrows(mci))
mci->edac_cap = EDAC_FLAG_NONE;
- set_mc_sysfs_attrs(mci);
-
ret = -ENODEV;
if (edac_mc_add_mc(mci)) {
- debugf1("failed edac_mc_add_mc()\n");
+ edac_dbg(1, "failed edac_mc_add_mc()\n");
goto err_add_mc;
}
+ if (set_mc_sysfs_attrs(mci)) {
+ edac_dbg(1, "failed edac_mc_add_mc()\n");
+ goto err_add_sysfs;
+ }
/* register stuff with EDAC MCE */
if (report_gart_errors)
@@ -2628,6 +2622,8 @@ static int amd64_init_one_instance(struct pci_dev *F2)
return 0;
+err_add_sysfs:
+ edac_mc_del_mc(mci->pdev);
err_add_mc:
edac_mc_free(mci);
@@ -2651,7 +2647,7 @@ static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
ret = pci_enable_device(pdev);
if (ret < 0) {
- debugf0("ret=%d\n", ret);
+ edac_dbg(0, "ret=%d\n", ret);
return -EIO;
}
@@ -2698,6 +2694,8 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s = ecc_stngs[nid];
+ mci = find_mci_by_dev(&pdev->dev);
+ del_mc_sysfs_attrs(mci);
/* Remove from EDAC CORE tracking list */
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 9a666cb985b2..8d4804732bac 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -413,20 +413,33 @@ struct ecc_settings {
};
#ifdef CONFIG_EDAC_DEBUG
-#define NUM_DBG_ATTRS 5
+int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci);
+void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci);
+
#else
-#define NUM_DBG_ATTRS 0
+static inline int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci)
+{
+ return 0;
+}
+static void inline amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci)
+{
+}
#endif
#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
-#define NUM_INJ_ATTRS 5
+int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci);
+void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci);
+
#else
-#define NUM_INJ_ATTRS 0
+static inline int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci)
+{
+ return 0;
+}
+static inline void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci)
+{
+}
#endif
-extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
- amd64_inj_attrs[NUM_INJ_ATTRS];
-
/*
* Each of the PCI Device IDs types have their own set of hardware accessor
* functions and per device encoding/decoding logic.
@@ -460,3 +473,5 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size);
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c
index e3562288f4ce..2c1bbf740605 100644
--- a/drivers/edac/amd64_edac_dbg.c
+++ b/drivers/edac/amd64_edac_dbg.c
@@ -1,8 +1,11 @@
#include "amd64_edac.h"
#define EDAC_DCT_ATTR_SHOW(reg) \
-static ssize_t amd64_##reg##_show(struct mem_ctl_info *mci, char *data) \
+static ssize_t amd64_##reg##_show(struct device *dev, \
+ struct device_attribute *mattr, \
+ char *data) \
{ \
+ struct mem_ctl_info *mci = to_mci(dev); \
struct amd64_pvt *pvt = mci->pvt_info; \
return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
}
@@ -12,8 +15,12 @@ EDAC_DCT_ATTR_SHOW(dbam0);
EDAC_DCT_ATTR_SHOW(top_mem);
EDAC_DCT_ATTR_SHOW(top_mem2);
-static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
+static ssize_t amd64_hole_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
+
u64 hole_base = 0;
u64 hole_offset = 0;
u64 hole_size = 0;
@@ -27,46 +34,40 @@ static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
/*
* update NUM_DBG_ATTRS in case you add new members
*/
-struct mcidev_sysfs_attribute amd64_dbg_attrs[] = {
+static DEVICE_ATTR(dhar, S_IRUGO, amd64_dhar_show, NULL);
+static DEVICE_ATTR(dbam, S_IRUGO, amd64_dbam0_show, NULL);
+static DEVICE_ATTR(topmem, S_IRUGO, amd64_top_mem_show, NULL);
+static DEVICE_ATTR(topmem2, S_IRUGO, amd64_top_mem2_show, NULL);
+static DEVICE_ATTR(dram_hole, S_IRUGO, amd64_hole_show, NULL);
+
+int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci)
+{
+ int rc;
+
+ rc = device_create_file(&mci->dev, &dev_attr_dhar);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_dbam);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_topmem);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_topmem2);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_dram_hole);
+ if (rc < 0)
+ return rc;
- {
- .attr = {
- .name = "dhar",
- .mode = (S_IRUGO)
- },
- .show = amd64_dhar_show,
- .store = NULL,
- },
- {
- .attr = {
- .name = "dbam",
- .mode = (S_IRUGO)
- },
- .show = amd64_dbam0_show,
- .store = NULL,
- },
- {
- .attr = {
- .name = "topmem",
- .mode = (S_IRUGO)
- },
- .show = amd64_top_mem_show,
- .store = NULL,
- },
- {
- .attr = {
- .name = "topmem2",
- .mode = (S_IRUGO)
- },
- .show = amd64_top_mem2_show,
- .store = NULL,
- },
- {
- .attr = {
- .name = "dram_hole",
- .mode = (S_IRUGO)
- },
- .show = amd64_hole_show,
- .store = NULL,
- },
-};
+ return 0;
+}
+
+void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci)
+{
+ device_remove_file(&mci->dev, &dev_attr_dhar);
+ device_remove_file(&mci->dev, &dev_attr_dbam);
+ device_remove_file(&mci->dev, &dev_attr_topmem);
+ device_remove_file(&mci->dev, &dev_attr_topmem2);
+ device_remove_file(&mci->dev, &dev_attr_dram_hole);
+}
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index 303f10e03dda..53d972e00dfb 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -1,7 +1,10 @@
#include "amd64_edac.h"
-static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf)
+static ssize_t amd64_inject_section_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *buf)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
return sprintf(buf, "0x%x\n", pvt->injection.section);
}
@@ -12,9 +15,11 @@ static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf)
*
* range: 0..3
*/
-static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
+static ssize_t amd64_inject_section_store(struct device *dev,
+ struct device_attribute *mattr,
const char *data, size_t count)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
int ret = 0;
@@ -33,8 +38,11 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
return ret;
}
-static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf)
+static ssize_t amd64_inject_word_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *buf)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
return sprintf(buf, "0x%x\n", pvt->injection.word);
}
@@ -45,9 +53,11 @@ static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf)
*
* range: 0..8
*/
-static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
+static ssize_t amd64_inject_word_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
int ret = 0;
@@ -66,8 +76,11 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
return ret;
}
-static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf)
+static ssize_t amd64_inject_ecc_vector_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *buf)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
}
@@ -77,9 +90,11 @@ static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf)
* corresponding bit within the error injection word above. When used during a
* DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
*/
-static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
+static ssize_t amd64_inject_ecc_vector_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
int ret = 0;
@@ -103,9 +118,11 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
* Do a DRAM ECC read. Assemble staged values in the pvt area, format into
* fields needed by the injection registers and read the NB Array Data Port.
*/
-static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
+static ssize_t amd64_inject_read_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
u32 section, word_bits;
@@ -125,7 +142,8 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
/* Issue 'word' and 'bit' along with the READ request */
amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
- debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
+ edac_dbg(0, "section=0x%x word_bits=0x%x\n",
+ section, word_bits);
return count;
}
@@ -136,9 +154,11 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
* Do a DRAM ECC write. Assemble staged values in the pvt area and format into
* fields needed by the injection registers.
*/
-static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
+static ssize_t amd64_inject_write_store(struct device *dev,
+ struct device_attribute *mattr,
const char *data, size_t count)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
u32 section, word_bits;
@@ -158,7 +178,8 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
/* Issue 'word' and 'bit' along with the READ request */
amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
- debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
+ edac_dbg(0, "section=0x%x word_bits=0x%x\n",
+ section, word_bits);
return count;
}
@@ -168,46 +189,47 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
/*
* update NUM_INJ_ATTRS in case you add new members
*/
-struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
-
- {
- .attr = {
- .name = "inject_section",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = amd64_inject_section_show,
- .store = amd64_inject_section_store,
- },
- {
- .attr = {
- .name = "inject_word",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = amd64_inject_word_show,
- .store = amd64_inject_word_store,
- },
- {
- .attr = {
- .name = "inject_ecc_vector",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = amd64_inject_ecc_vector_show,
- .store = amd64_inject_ecc_vector_store,
- },
- {
- .attr = {
- .name = "inject_write",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = NULL,
- .store = amd64_inject_write_store,
- },
- {
- .attr = {
- .name = "inject_read",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = NULL,
- .store = amd64_inject_read_store,
- },
-};
+
+static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
+ amd64_inject_section_show, amd64_inject_section_store);
+static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
+ amd64_inject_word_show, amd64_inject_word_store);
+static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
+ amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store);
+static DEVICE_ATTR(inject_write, S_IRUGO | S_IWUSR,
+ NULL, amd64_inject_write_store);
+static DEVICE_ATTR(inject_read, S_IRUGO | S_IWUSR,
+ NULL, amd64_inject_read_store);
+
+
+int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci)
+{
+ int rc;
+
+ rc = device_create_file(&mci->dev, &dev_attr_inject_section);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_word);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_ecc_vector);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_write);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_read);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci)
+{
+ device_remove_file(&mci->dev, &dev_attr_inject_section);
+ device_remove_file(&mci->dev, &dev_attr_inject_word);
+ device_remove_file(&mci->dev, &dev_attr_inject_ecc_vector);
+ device_remove_file(&mci->dev, &dev_attr_inject_write);
+ device_remove_file(&mci->dev, &dev_attr_inject_read);
+}
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 9774d443fa57..29eeb68a200c 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -105,7 +105,7 @@ static void amd76x_get_error_info(struct mem_ctl_info *mci,
{
struct pci_dev *pdev;
- pdev = to_pci_dev(mci->dev);
+ pdev = to_pci_dev(mci->pdev);
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS,
&info->ecc_mode_status);
@@ -145,10 +145,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) {
row = (info->ecc_mode_status >> 4) & 0xf;
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
- mci->csrows[row].first_page, 0, 0,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ mci->csrows[row]->first_page, 0, 0,
row, 0, -1,
- mci->ctl_name, "", NULL);
+ mci->ctl_name, "");
}
}
@@ -160,10 +160,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) {
row = info->ecc_mode_status & 0xf;
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
- mci->csrows[row].first_page, 0, 0,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+ mci->csrows[row]->first_page, 0, 0,
row, 0, -1,
- mci->ctl_name, "", NULL);
+ mci->ctl_name, "");
}
}
@@ -180,7 +180,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
static void amd76x_check(struct mem_ctl_info *mci)
{
struct amd76x_error_info info;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
amd76x_get_error_info(mci, &info);
amd76x_process_error_info(mci, &info, 1);
}
@@ -194,8 +194,8 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int index;
for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
- dimm = csrow->channels[0].dimm;
+ csrow = mci->csrows[index];
+ dimm = csrow->channels[0]->dimm;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(pdev,
@@ -241,7 +241,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
u32 ems_mode;
struct amd76x_error_info discard;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
ems_mode = (ems >> 10) & 0x3;
@@ -256,8 +256,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- debugf0("%s(): mci = %p\n", __func__, mci);
- mci->dev = &pdev->dev;
+ edac_dbg(0, "mci = %p\n", mci);
+ mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mci->edac_cap = ems_mode ?
@@ -276,7 +276,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
@@ -292,7 +292,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
}
/* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
return 0;
fail:
@@ -304,7 +304,7 @@ fail:
static int __devinit amd76x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* don't need to call pci_enable_device() */
return amd76x_probe1(pdev, ent->driver_data);
@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
if (amd76x_pci)
edac_pci_release_generic_ctl(amd76x_pci);
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 69ee6aab5c71..a1bbd8edd257 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -33,10 +33,10 @@ struct cell_edac_priv
static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
{
struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = &mci->csrows[0];
+ struct csrow_info *csrow = mci->csrows[0];
unsigned long address, pfn, offset, syndrome;
- dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
+ dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n",
priv->node, chan, ar);
/* Address decoding is likely a bit bogus, to dbl check */
@@ -48,18 +48,18 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
syndrome = (ar & 0x000000001fe00000ul) >> 21;
/* TODO: Decoding of the error address */
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
csrow->first_page + pfn, offset, syndrome,
- 0, chan, -1, "", "", NULL);
+ 0, chan, -1, "", "");
}
static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
{
struct cell_edac_priv *priv = mci->pvt_info;
- struct csrow_info *csrow = &mci->csrows[0];
+ struct csrow_info *csrow = mci->csrows[0];
unsigned long address, pfn, offset;
- dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
+ dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n",
priv->node, chan, ar);
/* Address decoding is likely a bit bogus, to dbl check */
@@ -70,9 +70,9 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
offset = address & ~PAGE_MASK;
/* TODO: Decoding of the error address */
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
csrow->first_page + pfn, offset, 0,
- 0, chan, -1, "", "", NULL);
+ 0, chan, -1, "", "");
}
static void cell_edac_check(struct mem_ctl_info *mci)
@@ -83,7 +83,7 @@ static void cell_edac_check(struct mem_ctl_info *mci)
fir = in_be64(&priv->regs->mic_fir);
#ifdef DEBUG
if (fir != priv->prev_fir) {
- dev_dbg(mci->dev, "fir change : 0x%016lx\n", fir);
+ dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir);
priv->prev_fir = fir;
}
#endif
@@ -119,14 +119,14 @@ static void cell_edac_check(struct mem_ctl_info *mci)
mb(); /* sync up */
#ifdef DEBUG
fir = in_be64(&priv->regs->mic_fir);
- dev_dbg(mci->dev, "fir clear : 0x%016lx\n", fir);
+ dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir);
#endif
}
}
static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
{
- struct csrow_info *csrow = &mci->csrows[0];
+ struct csrow_info *csrow = mci->csrows[0];
struct dimm_info *dimm;
struct cell_edac_priv *priv = mci->pvt_info;
struct device_node *np;
@@ -150,12 +150,12 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
csrow->last_page = csrow->first_page + nr_pages - 1;
for (j = 0; j < csrow->nr_channels; j++) {
- dimm = csrow->channels[j].dimm;
+ dimm = csrow->channels[j]->dimm;
dimm->mtype = MEM_XDR;
dimm->edac_mode = EDAC_SECDED;
dimm->nr_pages = nr_pages / csrow->nr_channels;
}
- dev_dbg(mci->dev,
+ dev_dbg(mci->pdev,
"Initialized on node %d, chanmask=0x%x,"
" first_page=0x%lx, nr_pages=0x%x\n",
priv->node, priv->chanmask,
@@ -212,7 +212,7 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
priv->regs = regs;
priv->node = pdev->id;
priv->chanmask = chanmask;
- mci->dev = &pdev->dev;
+ mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_XDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index e22030a9de66..c2ef13495873 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -316,13 +316,12 @@ static void get_total_mem(struct cpc925_mc_pdata *pdata)
reg += aw;
size = of_read_number(reg, sw);
reg += sw;
- debugf1("%s: start 0x%lx, size 0x%lx\n", __func__,
- start, size);
+ edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size);
pdata->total_mem += size;
} while (reg < reg_end);
of_node_put(np);
- debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem);
+ edac_dbg(0, "total_mem 0x%lx\n", pdata->total_mem);
}
static void cpc925_init_csrows(struct mem_ctl_info *mci)
@@ -330,8 +329,9 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
struct cpc925_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
struct dimm_info *dimm;
+ enum dev_type dtype;
int index, j;
- u32 mbmr, mbbar, bba;
+ u32 mbmr, mbbar, bba, grain;
unsigned long row_size, nr_pages, last_nr_pages = 0;
get_total_mem(pdata);
@@ -347,7 +347,7 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
if (bba == 0)
continue; /* not populated */
- csrow = &mci->csrows[index];
+ csrow = mci->csrows[index];
row_size = bba * (1UL << 28); /* 256M */
csrow->first_page = last_nr_pages;
@@ -355,37 +355,36 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
csrow->last_page = csrow->first_page + nr_pages - 1;
last_nr_pages = csrow->last_page + 1;
+ switch (csrow->nr_channels) {
+ case 1: /* Single channel */
+ grain = 32; /* four-beat burst of 32 bytes */
+ break;
+ case 2: /* Dual channel */
+ default:
+ grain = 64; /* four-beat burst of 64 bytes */
+ break;
+ }
+ switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
+ case 6: /* 0110, no way to differentiate X8 VS X16 */
+ case 5: /* 0101 */
+ case 8: /* 1000 */
+ dtype = DEV_X16;
+ break;
+ case 7: /* 0111 */
+ case 9: /* 1001 */
+ dtype = DEV_X8;
+ break;
+ default:
+ dtype = DEV_UNKNOWN;
+ break;
+ }
for (j = 0; j < csrow->nr_channels; j++) {
- dimm = csrow->channels[j].dimm;
-
+ dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / csrow->nr_channels;
dimm->mtype = MEM_RDDR;
dimm->edac_mode = EDAC_SECDED;
-
- switch (csrow->nr_channels) {
- case 1: /* Single channel */
- dimm->grain = 32; /* four-beat burst of 32 bytes */
- break;
- case 2: /* Dual channel */
- default:
- dimm->grain = 64; /* four-beat burst of 64 bytes */
- break;
- }
-
- switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
- case 6: /* 0110, no way to differentiate X8 VS X16 */
- case 5: /* 0101 */
- case 8: /* 1000 */
- dimm->dtype = DEV_X16;
- break;
- case 7: /* 0111 */
- case 9: /* 1001 */
- dimm->dtype = DEV_X8;
- break;
- default:
- dimm->dtype = DEV_UNKNOWN;
- break;
- }
+ dimm->grain = grain;
+ dimm->dtype = dtype;
}
}
}
@@ -463,7 +462,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
*csrow = rank;
#ifdef CONFIG_EDAC_DEBUG
- if (mci->csrows[rank].first_page == 0) {
+ if (mci->csrows[rank]->first_page == 0) {
cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
"non-populated csrow, broken hardware?\n");
return;
@@ -471,7 +470,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
#endif
/* Revert csrow number */
- pa = mci->csrows[rank].first_page << PAGE_SHIFT;
+ pa = mci->csrows[rank]->first_page << PAGE_SHIFT;
/* Revert column address */
col += bcnt;
@@ -512,7 +511,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
*offset = pa & (PAGE_SIZE - 1);
*pfn = pa >> PAGE_SHIFT;
- debugf0("%s: ECC physical address 0x%lx\n", __func__, pa);
+ edac_dbg(0, "ECC physical address 0x%lx\n", pa);
}
static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
@@ -555,18 +554,18 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
if (apiexcp & CECC_EXCP_DETECTED) {
cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
channel = cpc925_mc_find_channel(mci, syndrome);
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
pfn, offset, syndrome,
csrow, channel, -1,
- mci->ctl_name, "", NULL);
+ mci->ctl_name, "");
}
if (apiexcp & UECC_EXCP_DETECTED) {
cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
pfn, offset, 0,
csrow, -1, -1,
- mci->ctl_name, "", NULL);
+ mci->ctl_name, "");
}
cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
@@ -852,8 +851,8 @@ static void cpc925_add_edac_devices(void __iomem *vbase)
goto err2;
}
- debugf0("%s: Successfully added edac device for %s\n",
- __func__, dev_info->ctl_name);
+ edac_dbg(0, "Successfully added edac device for %s\n",
+ dev_info->ctl_name);
continue;
@@ -884,8 +883,8 @@ static void cpc925_del_edac_devices(void)
if (dev_info->exit)
dev_info->exit(dev_info);
- debugf0("%s: Successfully deleted edac device for %s\n",
- __func__, dev_info->ctl_name);
+ edac_dbg(0, "Successfully deleted edac device for %s\n",
+ dev_info->ctl_name);
}
}
@@ -900,7 +899,7 @@ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
- debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr);
+ edac_dbg(0, "Mem Scrub Ctrl Register 0x%x\n", mscr);
if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
(si == 0)) {
@@ -928,8 +927,7 @@ static int cpc925_mc_get_channels(void __iomem *vbase)
((mbcr & MBCR_64BITBUS_MASK) == 0))
dual = 1;
- debugf0("%s: %s channel\n", __func__,
- (dual > 0) ? "Dual" : "Single");
+ edac_dbg(0, "%s channel\n", (dual > 0) ? "Dual" : "Single");
return dual;
}
@@ -944,7 +942,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
struct resource *r;
int res = 0, nr_channels;
- debugf0("%s: %s platform device found!\n", __func__, pdev->name);
+ edac_dbg(0, "%s platform device found!\n", pdev->name);
if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
res = -ENOMEM;
@@ -995,7 +993,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
pdata->edac_idx = edac_mc_idx++;
pdata->name = pdev->name;
- mci->dev = &pdev->dev;
+ mci->pdev = &pdev->dev;
platform_set_drvdata(pdev, mci);
mci->dev_name = dev_name(&pdev->dev);
mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
@@ -1026,7 +1024,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
cpc925_add_edac_devices(vbase);
/* get this far and it's successful */
- debugf0("%s: success\n", __func__);
+ edac_dbg(0, "success\n");
res = 0;
goto out;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 3186512c9739..a5ed6b795fd4 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -309,7 +309,7 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
u32 remap;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
if (page < pvt->tolm)
return page;
@@ -335,7 +335,7 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
int i;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
/* convert the addr to 4k page */
page = sec1_add >> (PAGE_SHIFT - 4);
@@ -371,10 +371,10 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
channel = !(error_one & 1);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, offset_in_page(sec1_add << 4), sec1_syndrome,
row, channel, -1,
- "e752x CE", "", NULL);
+ "e752x CE", "");
}
static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
@@ -394,7 +394,7 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
int row;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
if (error_one & 0x0202) {
error_2b = ded_add;
@@ -408,11 +408,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
block_page,
offset_in_page(error_2b << 4), 0,
row, -1, -1,
- "e752x UE from Read", "", NULL);
+ "e752x UE from Read", "");
}
if (error_one & 0x0404) {
@@ -427,11 +427,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
block_page,
offset_in_page(error_2b << 4), 0,
row, -1, -1,
- "e752x UE from Scruber", "", NULL);
+ "e752x UE from Scruber", "");
}
}
@@ -453,10 +453,10 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
if (!handle_error)
return;
- debugf3("%s()\n", __func__);
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ edac_dbg(3, "\n");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1,
- "e752x UE log memory write", "", NULL);
+ "e752x UE log memory write", "");
}
static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -982,7 +982,7 @@ static void e752x_check(struct mem_ctl_info *mci)
{
struct e752x_error_info info;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
e752x_get_error_info(mci, &info);
e752x_process_error_info(mci, &info, 1);
}
@@ -1069,6 +1069,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
u16 ddrcsr)
{
struct csrow_info *csrow;
+ enum edac_type edac_mode;
unsigned long last_cumul_size;
int index, mem_dev, drc_chan;
int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
@@ -1095,14 +1096,13 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
/* mem_dev 0=x8, 1=x4 */
mem_dev = (dra >> (index * 4 + 2)) & 0x3;
- csrow = &mci->csrows[remap_csrow_index(mci, index)];
+ csrow = mci->csrows[remap_csrow_index(mci, index)];
mem_dev = (mem_dev == 2);
pci_read_config_byte(pdev, E752X_DRB + index, &value);
/* convert a 128 or 64 MiB DRB to a page size. */
cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
- debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
- cumul_size);
+ edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
@@ -1111,29 +1111,29 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ edac_mode = EDAC_NONE;
for (i = 0; i < csrow->nr_channels; i++) {
- struct dimm_info *dimm = csrow->channels[i].dimm;
+ struct dimm_info *dimm = csrow->channels[i]->dimm;
- debugf3("Initializing rank at (%i,%i)\n", index, i);
+ edac_dbg(3, "Initializing rank at (%i,%i)\n", index, i);
dimm->nr_pages = nr_pages / csrow->nr_channels;
dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
dimm->mtype = MEM_RDDR; /* only one type supported */
dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
- /*
- * if single channel or x8 devices then SECDED
- * if dual channel and x4 then S4ECD4ED
- */
- if (drc_ddim) {
- if (drc_chan && mem_dev) {
- dimm->edac_mode = EDAC_S4ECD4ED;
- mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
- } else {
- dimm->edac_mode = EDAC_SECDED;
- mci->edac_cap |= EDAC_FLAG_SECDED;
- }
- } else
- dimm->edac_mode = EDAC_NONE;
+ dimm->edac_mode = edac_mode;
}
}
}
@@ -1269,8 +1269,8 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
int drc_chan; /* Number of channels 0=1chan,1=2chan */
struct e752x_error_info discard;
- debugf0("%s(): mci\n", __func__);
- debugf0("Starting Probe1\n");
+ edac_dbg(0, "mci\n");
+ edac_dbg(0, "Starting Probe1\n");
/* check to see if device 0 function 1 is enabled; if it isn't, we
* assume the BIOS has reserved it for a reason and is expecting
@@ -1300,7 +1300,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- debugf3("%s(): init mci\n", __func__);
+ edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_RDDR;
/* 3100 IMCH supports SECDEC only */
mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
@@ -1308,9 +1308,9 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E752X_REVISION;
- mci->dev = &pdev->dev;
+ mci->pdev = &pdev->dev;
- debugf3("%s(): init pvt\n", __func__);
+ edac_dbg(3, "init pvt\n");
pvt = (struct e752x_pvt *)mci->pvt_info;
pvt->dev_info = &e752x_devs[dev_idx];
pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
@@ -1320,7 +1320,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
return -ENODEV;
}
- debugf3("%s(): more mci init\n", __func__);
+ edac_dbg(3, "more mci init\n");
mci->ctl_name = pvt->dev_info->ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = e752x_check;
@@ -1342,7 +1342,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
else
mci->edac_cap |= EDAC_FLAG_NONE;
- debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
+ edac_dbg(3, "tolm, remapbase, remaplimit\n");
/* load the top of low memory, remap base, and remap limit vars */
pci_read_config_word(pdev, E752X_TOLM, &pci_data);
@@ -1359,7 +1359,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
@@ -1377,7 +1377,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
}
/* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
return 0;
fail:
@@ -1393,7 +1393,7 @@ fail:
static int __devinit e752x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* wake up and enable device */
if (pci_enable_device(pdev) < 0)
@@ -1407,7 +1407,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
struct mem_ctl_info *mci;
struct e752x_pvt *pvt;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
if (e752x_pci)
edac_pci_release_generic_ctl(e752x_pci);
@@ -1453,7 +1453,7 @@ static int __init e752x_init(void)
{
int pci_rc;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -1464,7 +1464,7 @@ static int __init e752x_init(void)
static void __exit e752x_exit(void)
{
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
pci_unregister_driver(&e752x_driver);
}
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 9a9c1a546797..9ff57f361a43 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -166,7 +166,7 @@ static const struct e7xxx_dev_info e7xxx_devs[] = {
/* FIXME - is this valid for both SECDED and S4ECD4ED? */
static inline int e7xxx_find_channel(u16 syndrome)
{
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
if ((syndrome & 0xff00) == 0)
return 0;
@@ -186,7 +186,7 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
u32 remap;
struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
if ((page < pvt->tolm) ||
((page >= 0x100000) && (page < pvt->remapbase)))
@@ -208,7 +208,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
int row;
int channel;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
/* read the error address */
error_1b = info->dram_celog_add;
/* FIXME - should use PAGE_SHIFT */
@@ -219,15 +219,15 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
row = edac_mc_find_csrow_by_page(mci, page);
/* convert syndrome to channel */
channel = e7xxx_find_channel(syndrome);
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome,
- row, channel, -1, "e7xxx CE", "", NULL);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, 0, syndrome,
+ row, channel, -1, "e7xxx CE", "");
}
static void process_ce_no_info(struct mem_ctl_info *mci)
{
- debugf3("%s()\n", __func__);
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
- "e7xxx CE log register overflow", "", NULL);
+ edac_dbg(3, "\n");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
+ "e7xxx CE log register overflow", "");
}
static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
@@ -235,23 +235,23 @@ static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
u32 error_2b, block_page;
int row;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
/* read the error address */
error_2b = info->dram_uelog_add;
/* FIXME - should use PAGE_SHIFT */
block_page = error_2b >> 6; /* convert to 4k address */
row = edac_mc_find_csrow_by_page(mci, block_page);
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0,
- row, -1, -1, "e7xxx UE", "", NULL);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, block_page, 0, 0,
+ row, -1, -1, "e7xxx UE", "");
}
static void process_ue_no_info(struct mem_ctl_info *mci)
{
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
- "e7xxx UE log register overflow", "", NULL);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
+ "e7xxx UE log register overflow", "");
}
static void e7xxx_get_error_info(struct mem_ctl_info *mci,
@@ -334,7 +334,7 @@ static void e7xxx_check(struct mem_ctl_info *mci)
{
struct e7xxx_error_info info;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
e7xxx_get_error_info(mci, &info);
e7xxx_process_error_info(mci, &info, 1);
}
@@ -362,6 +362,7 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int drc_chan, drc_drbg, drc_ddim, mem_dev;
struct csrow_info *csrow;
struct dimm_info *dimm;
+ enum edac_type edac_mode;
pci_read_config_dword(pdev, E7XXX_DRA, &dra);
drc_chan = dual_channel_active(drc, dev_idx);
@@ -377,13 +378,12 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
for (index = 0; index < mci->nr_csrows; index++) {
/* mem_dev 0=x8, 1=x4 */
mem_dev = (dra >> (index * 4 + 3)) & 0x1;
- csrow = &mci->csrows[index];
+ csrow = mci->csrows[index];
pci_read_config_byte(pdev, E7XXX_DRB + index, &value);
/* convert a 64 or 32 MiB DRB to a page size. */
cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
- debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
- cumul_size);
+ edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
@@ -392,28 +392,29 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ edac_mode = EDAC_NONE;
+
for (j = 0; j < drc_chan + 1; j++) {
- dimm = csrow->channels[j].dimm;
+ dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / (drc_chan + 1);
dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
dimm->mtype = MEM_RDDR; /* only one type supported */
dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
- /*
- * if single channel or x8 devices then SECDED
- * if dual channel and x4 then S4ECD4ED
- */
- if (drc_ddim) {
- if (drc_chan && mem_dev) {
- dimm->edac_mode = EDAC_S4ECD4ED;
- mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
- } else {
- dimm->edac_mode = EDAC_SECDED;
- mci->edac_cap |= EDAC_FLAG_SECDED;
- }
- } else
- dimm->edac_mode = EDAC_NONE;
+ dimm->edac_mode = edac_mode;
}
}
}
@@ -428,7 +429,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
int drc_chan;
struct e7xxx_error_info discard;
- debugf0("%s(): mci\n", __func__);
+ edac_dbg(0, "mci\n");
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
@@ -451,15 +452,15 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- debugf3("%s(): init mci\n", __func__);
+ edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
EDAC_FLAG_S4ECD4ED;
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E7XXX_REVISION;
- mci->dev = &pdev->dev;
- debugf3("%s(): init pvt\n", __func__);
+ mci->pdev = &pdev->dev;
+ edac_dbg(3, "init pvt\n");
pvt = (struct e7xxx_pvt *)mci->pvt_info;
pvt->dev_info = &e7xxx_devs[dev_idx];
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
@@ -472,14 +473,14 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
goto fail0;
}
- debugf3("%s(): more mci init\n", __func__);
+ edac_dbg(3, "more mci init\n");
mci->ctl_name = pvt->dev_info->ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = e7xxx_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
e7xxx_init_csrows(mci, pdev, dev_idx, drc);
mci->edac_cap |= EDAC_FLAG_NONE;
- debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
+ edac_dbg(3, "tolm, remapbase, remaplimit\n");
/* load the top of low memory, remap base, and remap limit vars */
pci_read_config_word(pdev, E7XXX_TOLM, &pci_data);
pvt->tolm = ((u32) pci_data) << 4;
@@ -498,7 +499,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail1;
}
@@ -514,7 +515,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
}
/* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
return 0;
fail1:
@@ -530,7 +531,7 @@ fail0:
static int __devinit e7xxx_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* wake up and enable device */
return pci_enable_device(pdev) ?
@@ -542,7 +543,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
struct mem_ctl_info *mci;
struct e7xxx_pvt *pvt;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
if (e7xxx_pci)
edac_pci_release_generic_ctl(e7xxx_pci);
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 117490d4f835..23bb99fa44f1 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -71,26 +71,21 @@ extern const char *edac_mem_types[];
#ifdef CONFIG_EDAC_DEBUG
extern int edac_debug_level;
-#define edac_debug_printk(level, fmt, arg...) \
- do { \
- if (level <= edac_debug_level) \
- edac_printk(KERN_DEBUG, EDAC_DEBUG, \
- "%s: " fmt, __func__, ##arg); \
- } while (0)
-
-#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
-#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
-#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
-#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
-#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
+#define edac_dbg(level, fmt, ...) \
+do { \
+ if (level <= edac_debug_level) \
+ edac_printk(KERN_DEBUG, EDAC_DEBUG, \
+ "%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
#else /* !CONFIG_EDAC_DEBUG */
-#define debugf0( ... )
-#define debugf1( ... )
-#define debugf2( ... )
-#define debugf3( ... )
-#define debugf4( ... )
+#define edac_dbg(level, fmt, ...) \
+do { \
+ if (0) \
+ edac_printk(KERN_DEBUG, EDAC_DEBUG, \
+ "%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
#endif /* !CONFIG_EDAC_DEBUG */
@@ -460,15 +455,15 @@ extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
unsigned long page);
void edac_mc_handle_error(const enum hw_event_mc_err_type type,
struct mem_ctl_info *mci,
+ const u16 error_count,
const unsigned long page_frame_number,
const unsigned long offset_in_page,
const unsigned long syndrome,
- const int layer0,
- const int layer1,
- const int layer2,
+ const int top_layer,
+ const int mid_layer,
+ const int low_layer,
const char *msg,
- const char *other_detail,
- const void *mcelog);
+ const char *other_detail);
/*
* edac_device APIs
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index ee3f1f810c1e..211021dfec73 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -40,12 +40,13 @@ static LIST_HEAD(edac_device_list);
#ifdef CONFIG_EDAC_DEBUG
static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
{
- debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx);
- debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
- debugf3("\tdev = %p\n", edac_dev->dev);
- debugf3("\tmod_name:ctl_name = %s:%s\n",
- edac_dev->mod_name, edac_dev->ctl_name);
- debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info);
+ edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n",
+ edac_dev, edac_dev->dev_idx);
+ edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
+ edac_dbg(3, "\tdev = %p\n", edac_dev->dev);
+ edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
+ edac_dev->mod_name, edac_dev->ctl_name);
+ edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info);
}
#endif /* CONFIG_EDAC_DEBUG */
@@ -82,8 +83,7 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
void *pvt, *p;
int err;
- debugf4("%s() instances=%d blocks=%d\n",
- __func__, nr_instances, nr_blocks);
+ edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks);
/* Calculate the size of memory we need to allocate AND
* determine the offsets of the various item arrays
@@ -156,8 +156,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
/* Name of this edac device */
snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
- debugf4("%s() edac_dev=%p next after end=%p\n",
- __func__, dev_ctl, pvt + sz_private );
+ edac_dbg(4, "edac_dev=%p next after end=%p\n",
+ dev_ctl, pvt + sz_private);
/* Initialize every Instance */
for (instance = 0; instance < nr_instances; instance++) {
@@ -178,10 +178,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
snprintf(blk->name, sizeof(blk->name),
"%s%d", edac_block_name, block+offset_value);
- debugf4("%s() instance=%d inst_p=%p block=#%d "
- "block_p=%p name='%s'\n",
- __func__, instance, inst, block,
- blk, blk->name);
+ edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n",
+ instance, inst, block, blk, blk->name);
/* if there are NO attributes OR no attribute pointer
* then continue on to next block iteration
@@ -194,8 +192,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
blk->block_attributes = attrib_p;
- debugf4("%s() THIS BLOCK_ATTRIB=%p\n",
- __func__, blk->block_attributes);
+ edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n",
+ blk->block_attributes);
/* Initialize every user specified attribute in this
* block with the data the caller passed in
@@ -214,11 +212,10 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
attrib->block = blk; /* up link */
- debugf4("%s() alloc-attrib=%p attrib_name='%s' "
- "attrib-spec=%p spec-name=%s\n",
- __func__, attrib, attrib->attr.name,
- &attrib_spec[attr],
- attrib_spec[attr].attr.name
+ edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n",
+ attrib, attrib->attr.name,
+ &attrib_spec[attr],
+ attrib_spec[attr].attr.name
);
}
}
@@ -273,7 +270,7 @@ static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
struct edac_device_ctl_info *edac_dev;
struct list_head *item;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
list_for_each(item, &edac_device_list) {
edac_dev = list_entry(item, struct edac_device_ctl_info, link);
@@ -408,7 +405,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
unsigned msec)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* take the arg 'msec' and set it into the control structure
* to used in the time period calculation
@@ -496,7 +493,7 @@ EXPORT_SYMBOL_GPL(edac_device_alloc_index);
*/
int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG
if (edac_debug_level >= 3)
@@ -570,7 +567,7 @@ struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
{
struct edac_device_ctl_info *edac_dev;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
mutex_lock(&device_ctls_mutex);
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index b4ea185ccebf..fb68a06ad683 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -202,7 +202,7 @@ static void edac_device_ctrl_master_release(struct kobject *kobj)
{
struct edac_device_ctl_info *edac_dev = to_edacdev(kobj);
- debugf4("%s() control index=%d\n", __func__, edac_dev->dev_idx);
+ edac_dbg(4, "control index=%d\n", edac_dev->dev_idx);
/* decrement the EDAC CORE module ref count */
module_put(edac_dev->owner);
@@ -233,12 +233,12 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
struct bus_type *edac_subsys;
int err;
- debugf1("%s()\n", __func__);
+ edac_dbg(1, "\n");
/* get the /sys/devices/system/edac reference */
edac_subsys = edac_get_sysfs_subsys();
if (edac_subsys == NULL) {
- debugf1("%s() no edac_subsys error\n", __func__);
+ edac_dbg(1, "no edac_subsys error\n");
err = -ENODEV;
goto err_out;
}
@@ -264,8 +264,8 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
&edac_subsys->dev_root->kobj,
"%s", edac_dev->name);
if (err) {
- debugf1("%s()Failed to register '.../edac/%s'\n",
- __func__, edac_dev->name);
+ edac_dbg(1, "Failed to register '.../edac/%s'\n",
+ edac_dev->name);
goto err_kobj_reg;
}
kobject_uevent(&edac_dev->kobj, KOBJ_ADD);
@@ -274,8 +274,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
* edac_device_unregister_sysfs_main_kobj() must be used
*/
- debugf4("%s() Registered '.../edac/%s' kobject\n",
- __func__, edac_dev->name);
+ edac_dbg(4, "Registered '.../edac/%s' kobject\n", edac_dev->name);
return 0;
@@ -296,9 +295,8 @@ err_out:
*/
void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev)
{
- debugf0("%s()\n", __func__);
- debugf4("%s() name of kobject is: %s\n",
- __func__, kobject_name(&dev->kobj));
+ edac_dbg(0, "\n");
+ edac_dbg(4, "name of kobject is: %s\n", kobject_name(&dev->kobj));
/*
* Unregister the edac device's kobject and
@@ -336,7 +334,7 @@ static void edac_device_ctrl_instance_release(struct kobject *kobj)
{
struct edac_device_instance *instance;
- debugf1("%s()\n", __func__);
+ edac_dbg(1, "\n");
/* map from this kobj to the main control struct
* and then dec the main kobj count
@@ -442,7 +440,7 @@ static void edac_device_ctrl_block_release(struct kobject *kobj)
{
struct edac_device_block *block;
- debugf1("%s()\n", __func__);
+ edac_dbg(1, "\n");
/* get the container of the kobj */
block = to_block(kobj);
@@ -524,10 +522,10 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
struct edac_dev_sysfs_block_attribute *sysfs_attrib;
struct kobject *main_kobj;
- debugf4("%s() Instance '%s' inst_p=%p block '%s' block_p=%p\n",
- __func__, instance->name, instance, block->name, block);
- debugf4("%s() block kobj=%p block kobj->parent=%p\n",
- __func__, &block->kobj, &block->kobj.parent);
+ edac_dbg(4, "Instance '%s' inst_p=%p block '%s' block_p=%p\n",
+ instance->name, instance, block->name, block);
+ edac_dbg(4, "block kobj=%p block kobj->parent=%p\n",
+ &block->kobj, &block->kobj.parent);
/* init this block's kobject */
memset(&block->kobj, 0, sizeof(struct kobject));
@@ -546,8 +544,7 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
&instance->kobj,
"%s", block->name);
if (err) {
- debugf1("%s() Failed to register instance '%s'\n",
- __func__, block->name);
+ edac_dbg(1, "Failed to register instance '%s'\n", block->name);
kobject_put(main_kobj);
err = -ENODEV;
goto err_out;
@@ -560,11 +557,9 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev,
if (sysfs_attrib && block->nr_attribs) {
for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) {
- debugf4("%s() creating block attrib='%s' "
- "attrib->%p to kobj=%p\n",
- __func__,
- sysfs_attrib->attr.name,
- sysfs_attrib, &block->kobj);
+ edac_dbg(4, "creating block attrib='%s' attrib->%p to kobj=%p\n",
+ sysfs_attrib->attr.name,
+ sysfs_attrib, &block->kobj);
/* Create each block_attribute file */
err = sysfs_create_file(&block->kobj,
@@ -647,14 +642,14 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl,
&edac_dev->kobj, "%s", instance->name);
if (err != 0) {
- debugf2("%s() Failed to register instance '%s'\n",
- __func__, instance->name);
+ edac_dbg(2, "Failed to register instance '%s'\n",
+ instance->name);
kobject_put(main_kobj);
goto err_out;
}
- debugf4("%s() now register '%d' blocks for instance %d\n",
- __func__, instance->nr_blocks, idx);
+ edac_dbg(4, "now register '%d' blocks for instance %d\n",
+ instance->nr_blocks, idx);
/* register all blocks of this instance */
for (i = 0; i < instance->nr_blocks; i++) {
@@ -670,8 +665,8 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev,
}
kobject_uevent(&instance->kobj, KOBJ_ADD);
- debugf4("%s() Registered instance %d '%s' kobject\n",
- __func__, idx, instance->name);
+ edac_dbg(4, "Registered instance %d '%s' kobject\n",
+ idx, instance->name);
return 0;
@@ -715,7 +710,7 @@ static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev)
int i, j;
int err;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* iterate over creation of the instances */
for (i = 0; i < edac_dev->nr_instances; i++) {
@@ -817,12 +812,12 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
int err;
struct kobject *edac_kobj = &edac_dev->kobj;
- debugf0("%s() idx=%d\n", __func__, edac_dev->dev_idx);
+ edac_dbg(0, "idx=%d\n", edac_dev->dev_idx);
/* go create any main attributes callers wants */
err = edac_device_add_main_sysfs_attributes(edac_dev);
if (err) {
- debugf0("%s() failed to add sysfs attribs\n", __func__);
+ edac_dbg(0, "failed to add sysfs attribs\n");
goto err_out;
}
@@ -832,8 +827,7 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
err = sysfs_create_link(edac_kobj,
&edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK);
if (err) {
- debugf0("%s() sysfs_create_link() returned err= %d\n",
- __func__, err);
+ edac_dbg(0, "sysfs_create_link() returned err= %d\n", err);
goto err_remove_main_attribs;
}
@@ -843,14 +837,13 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev)
*/
err = edac_device_create_instances(edac_dev);
if (err) {
- debugf0("%s() edac_device_create_instances() "
- "returned err= %d\n", __func__, err);
+ edac_dbg(0, "edac_device_create_instances() returned err= %d\n",
+ err);
goto err_remove_link;
}
- debugf4("%s() create-instances done, idx=%d\n",
- __func__, edac_dev->dev_idx);
+ edac_dbg(4, "create-instances done, idx=%d\n", edac_dev->dev_idx);
return 0;
@@ -873,7 +866,7 @@ err_out:
*/
void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* remove any main attributes for this device */
edac_device_remove_main_sysfs_attributes(edac_dev);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index de5ba86e8b89..616d90bcb3a4 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -27,70 +27,95 @@
#include <linux/list.h>
#include <linux/ctype.h>
#include <linux/edac.h>
+#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/edac.h>
#include "edac_core.h"
#include "edac_module.h"
+#define CREATE_TRACE_POINTS
+#define TRACE_INCLUDE_PATH ../../include/ras
+#include <ras/ras_event.h>
+
/* lock to memory controller's control array */
static DEFINE_MUTEX(mem_ctls_mutex);
static LIST_HEAD(mc_devices);
+unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
+ unsigned len)
+{
+ struct mem_ctl_info *mci = dimm->mci;
+ int i, n, count = 0;
+ char *p = buf;
+
+ for (i = 0; i < mci->n_layers; i++) {
+ n = snprintf(p, len, "%s %d ",
+ edac_layer_name[mci->layers[i].type],
+ dimm->location[i]);
+ p += n;
+ len -= n;
+ count += n;
+ if (!len)
+ break;
+ }
+
+ return count;
+}
+
#ifdef CONFIG_EDAC_DEBUG
static void edac_mc_dump_channel(struct rank_info *chan)
{
- debugf4("\tchannel = %p\n", chan);
- debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
- debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
- debugf4("\tchannel->dimm = %p\n", chan->dimm);
+ edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
+ edac_dbg(4, " channel = %p\n", chan);
+ edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
+ edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
}
-static void edac_mc_dump_dimm(struct dimm_info *dimm)
+static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
{
- int i;
-
- debugf4("\tdimm = %p\n", dimm);
- debugf4("\tdimm->label = '%s'\n", dimm->label);
- debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
- debugf4("\tdimm location ");
- for (i = 0; i < dimm->mci->n_layers; i++) {
- printk(KERN_CONT "%d", dimm->location[i]);
- if (i < dimm->mci->n_layers - 1)
- printk(KERN_CONT ".");
- }
- printk(KERN_CONT "\n");
- debugf4("\tdimm->grain = %d\n", dimm->grain);
- debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
+ char location[80];
+
+ edac_dimm_info_location(dimm, location, sizeof(location));
+
+ edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
+ dimm->mci->mem_is_per_rank ? "rank" : "dimm",
+ number, location, dimm->csrow, dimm->cschannel);
+ edac_dbg(4, " dimm = %p\n", dimm);
+ edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
+ edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
+ edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
+ edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
}
static void edac_mc_dump_csrow(struct csrow_info *csrow)
{
- debugf4("\tcsrow = %p\n", csrow);
- debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
- debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
- debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
- debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
- debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
- debugf4("\tcsrow->channels = %p\n", csrow->channels);
- debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
+ edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
+ edac_dbg(4, " csrow = %p\n", csrow);
+ edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
+ edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
+ edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
+ edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
+ edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
+ edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
}
static void edac_mc_dump_mci(struct mem_ctl_info *mci)
{
- debugf3("\tmci = %p\n", mci);
- debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
- debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
- debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
- debugf4("\tmci->edac_check = %p\n", mci->edac_check);
- debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
- mci->nr_csrows, mci->csrows);
- debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
- mci->tot_dimms, mci->dimms);
- debugf3("\tdev = %p\n", mci->dev);
- debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
- debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
+ edac_dbg(3, "\tmci = %p\n", mci);
+ edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
+ edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
+ edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
+ edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
+ edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
+ mci->nr_csrows, mci->csrows);
+ edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
+ mci->tot_dimms, mci->dimms);
+ edac_dbg(3, "\tdev = %p\n", mci->pdev);
+ edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
+ mci->mod_name, mci->ctl_name);
+ edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
}
#endif /* CONFIG_EDAC_DEBUG */
@@ -205,15 +230,15 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
{
struct mem_ctl_info *mci;
struct edac_mc_layer *layer;
- struct csrow_info *csi, *csr;
- struct rank_info *chi, *chp, *chan;
+ struct csrow_info *csr;
+ struct rank_info *chan;
struct dimm_info *dimm;
u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
unsigned pos[EDAC_MAX_LAYERS];
unsigned size, tot_dimms = 1, count = 1;
unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
void *pvt, *p, *ptr = NULL;
- int i, j, err, row, chn, n, len;
+ int i, j, row, chn, n, len, off;
bool per_rank = false;
BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
@@ -239,26 +264,24 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
*/
mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
- csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
- chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
- dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
for (i = 0; i < n_layers; i++) {
count *= layers[i].size;
- debugf4("%s: errcount layer %d size %d\n", __func__, i, count);
+ edac_dbg(4, "errcount layer %d size %d\n", i, count);
ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
tot_errcount += 2 * count;
}
- debugf4("%s: allocating %d error counters\n", __func__, tot_errcount);
+ edac_dbg(4, "allocating %d error counters\n", tot_errcount);
pvt = edac_align_ptr(&ptr, sz_pvt, 1);
size = ((unsigned long)pvt) + sz_pvt;
- debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
- __func__, size,
- tot_dimms,
- per_rank ? "ranks" : "dimms",
- tot_csrows * tot_channels);
+ edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
+ size,
+ tot_dimms,
+ per_rank ? "ranks" : "dimms",
+ tot_csrows * tot_channels);
+
mci = kzalloc(size, GFP_KERNEL);
if (mci == NULL)
return NULL;
@@ -267,9 +290,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
* rather than an imaginary chunk of memory located at address 0.
*/
layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
- csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
- chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
- dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
for (i = 0; i < n_layers; i++) {
mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
@@ -278,8 +298,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
/* setup index and various internal pointers */
mci->mc_idx = mc_num;
- mci->csrows = csi;
- mci->dimms = dimm;
mci->tot_dimms = tot_dimms;
mci->pvt_info = pvt;
mci->n_layers = n_layers;
@@ -290,40 +308,57 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
mci->mem_is_per_rank = per_rank;
/*
- * Fill the csrow struct
+ * Alocate and fill the csrow/channels structs
*/
+ mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
+ if (!mci->csrows)
+ goto error;
for (row = 0; row < tot_csrows; row++) {
- csr = &csi[row];
+ csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
+ if (!csr)
+ goto error;
+ mci->csrows[row] = csr;
csr->csrow_idx = row;
csr->mci = mci;
csr->nr_channels = tot_channels;
- chp = &chi[row * tot_channels];
- csr->channels = chp;
+ csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
+ GFP_KERNEL);
+ if (!csr->channels)
+ goto error;
for (chn = 0; chn < tot_channels; chn++) {
- chan = &chp[chn];
+ chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
+ if (!chan)
+ goto error;
+ csr->channels[chn] = chan;
chan->chan_idx = chn;
chan->csrow = csr;
}
}
/*
- * Fill the dimm struct
+ * Allocate and fill the dimm structs
*/
+ mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
+ if (!mci->dimms)
+ goto error;
+
memset(&pos, 0, sizeof(pos));
row = 0;
chn = 0;
- debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
- per_rank ? "ranks" : "dimms");
for (i = 0; i < tot_dimms; i++) {
- chan = &csi[row].channels[chn];
- dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers,
- pos[0], pos[1], pos[2]);
- dimm->mci = mci;
+ chan = mci->csrows[row]->channels[chn];
+ off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
+ if (off < 0 || off >= tot_dimms) {
+ edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
+ goto error;
+ }
- debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__,
- i, per_rank ? "rank" : "dimm", (dimm - mci->dimms),
- pos[0], pos[1], pos[2], row, chn);
+ dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
+ if (!dimm)
+ goto error;
+ mci->dimms[off] = dimm;
+ dimm->mci = mci;
/*
* Copy DIMM location and initialize it.
@@ -367,16 +402,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
}
mci->op_state = OP_ALLOC;
- INIT_LIST_HEAD(&mci->grp_kobj_list);
-
- /*
- * Initialize the 'root' kobj for the edac_mc controller
- */
- err = edac_mc_register_sysfs_main_kobj(mci);
- if (err) {
- kfree(mci);
- return NULL;
- }
/* at this point, the root kobj is valid, and in order to
* 'free' the object, then the function:
@@ -384,7 +409,30 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
* which will perform kobj unregistration and the actual free
* will occur during the kobject callback operation
*/
+
return mci;
+
+error:
+ if (mci->dimms) {
+ for (i = 0; i < tot_dimms; i++)
+ kfree(mci->dimms[i]);
+ kfree(mci->dimms);
+ }
+ if (mci->csrows) {
+ for (chn = 0; chn < tot_channels; chn++) {
+ csr = mci->csrows[chn];
+ if (csr) {
+ for (chn = 0; chn < tot_channels; chn++)
+ kfree(csr->channels[chn]);
+ kfree(csr);
+ }
+ kfree(mci->csrows[i]);
+ }
+ kfree(mci->csrows);
+ }
+ kfree(mci);
+
+ return NULL;
}
EXPORT_SYMBOL_GPL(edac_mc_alloc);
@@ -395,12 +443,10 @@ EXPORT_SYMBOL_GPL(edac_mc_alloc);
*/
void edac_mc_free(struct mem_ctl_info *mci)
{
- debugf1("%s()\n", __func__);
+ edac_dbg(1, "\n");
- edac_mc_unregister_sysfs_main_kobj(mci);
-
- /* free the mci instance memory here */
- kfree(mci);
+ /* the mci instance is freed here, when the sysfs object is dropped */
+ edac_unregister_sysfs(mci);
}
EXPORT_SYMBOL_GPL(edac_mc_free);
@@ -417,12 +463,12 @@ struct mem_ctl_info *find_mci_by_dev(struct device *dev)
struct mem_ctl_info *mci;
struct list_head *item;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
- if (mci->dev == dev)
+ if (mci->pdev == dev)
return mci;
}
@@ -485,7 +531,7 @@ static void edac_mc_workq_function(struct work_struct *work_req)
*/
static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* if this instance is not in the POLL state, then simply return */
if (mci->op_state != OP_RUNNING_POLL)
@@ -512,8 +558,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
status = cancel_delayed_work(&mci->work);
if (status == 0) {
- debugf0("%s() not canceled, flush the queue\n",
- __func__);
+ edac_dbg(0, "not canceled, flush the queue\n");
/* workq instance might be running, wait for it */
flush_workqueue(edac_workqueue);
@@ -574,7 +619,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci)
insert_before = &mc_devices;
- p = find_mci_by_dev(mci->dev);
+ p = find_mci_by_dev(mci->pdev);
if (unlikely(p != NULL))
goto fail0;
@@ -596,7 +641,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci)
fail0:
edac_printk(KERN_WARNING, EDAC_MC,
- "%s (%s) %s %s already assigned %d\n", dev_name(p->dev),
+ "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
return 1;
@@ -660,7 +705,7 @@ EXPORT_SYMBOL(edac_mc_find);
/* FIXME - should a warning be printed if no error detection? correction? */
int edac_mc_add_mc(struct mem_ctl_info *mci)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG
if (edac_debug_level >= 3)
@@ -670,15 +715,22 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
int i;
for (i = 0; i < mci->nr_csrows; i++) {
+ struct csrow_info *csrow = mci->csrows[i];
+ u32 nr_pages = 0;
int j;
- edac_mc_dump_csrow(&mci->csrows[i]);
- for (j = 0; j < mci->csrows[i].nr_channels; j++)
- edac_mc_dump_channel(&mci->csrows[i].
- channels[j]);
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j]->dimm->nr_pages;
+ if (!nr_pages)
+ continue;
+ edac_mc_dump_csrow(csrow);
+ for (j = 0; j < csrow->nr_channels; j++)
+ if (csrow->channels[j]->dimm->nr_pages)
+ edac_mc_dump_channel(csrow->channels[j]);
}
for (i = 0; i < mci->tot_dimms; i++)
- edac_mc_dump_dimm(&mci->dimms[i]);
+ if (mci->dimms[i]->nr_pages)
+ edac_mc_dump_dimm(mci->dimms[i], i);
}
#endif
mutex_lock(&mem_ctls_mutex);
@@ -732,7 +784,7 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
{
struct mem_ctl_info *mci;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
mutex_lock(&mem_ctls_mutex);
@@ -770,7 +822,7 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
void *virt_addr;
unsigned long flags = 0;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
/* ECC error page was not in our memory. Ignore it. */
if (!pfn_valid(page))
@@ -797,26 +849,26 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
/* FIXME - should return -1 */
int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
{
- struct csrow_info *csrows = mci->csrows;
+ struct csrow_info **csrows = mci->csrows;
int row, i, j, n;
- debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
+ edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
row = -1;
for (i = 0; i < mci->nr_csrows; i++) {
- struct csrow_info *csrow = &csrows[i];
+ struct csrow_info *csrow = csrows[i];
n = 0;
for (j = 0; j < csrow->nr_channels; j++) {
- struct dimm_info *dimm = csrow->channels[j].dimm;
+ struct dimm_info *dimm = csrow->channels[j]->dimm;
n += dimm->nr_pages;
}
if (n == 0)
continue;
- debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
- "mask(0x%lx)\n", mci->mc_idx, __func__,
- csrow->first_page, page, csrow->last_page,
- csrow->page_mask);
+ edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
+ mci->mc_idx,
+ csrow->first_page, page, csrow->last_page,
+ csrow->page_mask);
if ((page >= csrow->first_page) &&
(page <= csrow->last_page) &&
@@ -845,15 +897,16 @@ const char *edac_layer_name[] = {
EXPORT_SYMBOL_GPL(edac_layer_name);
static void edac_inc_ce_error(struct mem_ctl_info *mci,
- bool enable_per_layer_report,
- const int pos[EDAC_MAX_LAYERS])
+ bool enable_per_layer_report,
+ const int pos[EDAC_MAX_LAYERS],
+ const u16 count)
{
int i, index = 0;
- mci->ce_mc++;
+ mci->ce_mc += count;
if (!enable_per_layer_report) {
- mci->ce_noinfo_count++;
+ mci->ce_noinfo_count += count;
return;
}
@@ -861,7 +914,7 @@ static void edac_inc_ce_error(struct mem_ctl_info *mci,
if (pos[i] < 0)
break;
index += pos[i];
- mci->ce_per_layer[i][index]++;
+ mci->ce_per_layer[i][index] += count;
if (i < mci->n_layers - 1)
index *= mci->layers[i + 1].size;
@@ -870,14 +923,15 @@ static void edac_inc_ce_error(struct mem_ctl_info *mci,
static void edac_inc_ue_error(struct mem_ctl_info *mci,
bool enable_per_layer_report,
- const int pos[EDAC_MAX_LAYERS])
+ const int pos[EDAC_MAX_LAYERS],
+ const u16 count)
{
int i, index = 0;
- mci->ue_mc++;
+ mci->ue_mc += count;
if (!enable_per_layer_report) {
- mci->ce_noinfo_count++;
+ mci->ce_noinfo_count += count;
return;
}
@@ -885,7 +939,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
if (pos[i] < 0)
break;
index += pos[i];
- mci->ue_per_layer[i][index]++;
+ mci->ue_per_layer[i][index] += count;
if (i < mci->n_layers - 1)
index *= mci->layers[i + 1].size;
@@ -893,6 +947,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
}
static void edac_ce_error(struct mem_ctl_info *mci,
+ const u16 error_count,
const int pos[EDAC_MAX_LAYERS],
const char *msg,
const char *location,
@@ -902,23 +957,25 @@ static void edac_ce_error(struct mem_ctl_info *mci,
const bool enable_per_layer_report,
const unsigned long page_frame_number,
const unsigned long offset_in_page,
- u32 grain)
+ long grain)
{
unsigned long remapped_page;
if (edac_mc_get_log_ce()) {
if (other_detail && *other_detail)
edac_mc_printk(mci, KERN_WARNING,
- "CE %s on %s (%s%s - %s)\n",
+ "%d CE %s on %s (%s %s - %s)\n",
+ error_count,
msg, label, location,
detail, other_detail);
else
edac_mc_printk(mci, KERN_WARNING,
- "CE %s on %s (%s%s)\n",
+ "%d CE %s on %s (%s %s)\n",
+ error_count,
msg, label, location,
detail);
}
- edac_inc_ce_error(mci, enable_per_layer_report, pos);
+ edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
if (mci->scrub_mode & SCRUB_SW_SRC) {
/*
@@ -942,6 +999,7 @@ static void edac_ce_error(struct mem_ctl_info *mci,
}
static void edac_ue_error(struct mem_ctl_info *mci,
+ const u16 error_count,
const int pos[EDAC_MAX_LAYERS],
const char *msg,
const char *location,
@@ -953,12 +1011,14 @@ static void edac_ue_error(struct mem_ctl_info *mci,
if (edac_mc_get_log_ue()) {
if (other_detail && *other_detail)
edac_mc_printk(mci, KERN_WARNING,
- "UE %s on %s (%s%s - %s)\n",
+ "%d UE %s on %s (%s %s - %s)\n",
+ error_count,
msg, label, location, detail,
other_detail);
else
edac_mc_printk(mci, KERN_WARNING,
- "UE %s on %s (%s%s)\n",
+ "%d UE %s on %s (%s %s)\n",
+ error_count,
msg, label, location, detail);
}
@@ -971,33 +1031,53 @@ static void edac_ue_error(struct mem_ctl_info *mci,
msg, label, location, detail);
}
- edac_inc_ue_error(mci, enable_per_layer_report, pos);
+ edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
}
#define OTHER_LABEL " or "
+
+/**
+ * edac_mc_handle_error - reports a memory event to userspace
+ *
+ * @type: severity of the error (CE/UE/Fatal)
+ * @mci: a struct mem_ctl_info pointer
+ * @error_count: Number of errors of the same type
+ * @page_frame_number: mem page where the error occurred
+ * @offset_in_page: offset of the error inside the page
+ * @syndrome: ECC syndrome
+ * @top_layer: Memory layer[0] position
+ * @mid_layer: Memory layer[1] position
+ * @low_layer: Memory layer[2] position
+ * @msg: Message meaningful to the end users that
+ * explains the event
+ * @other_detail: Technical details about the event that
+ * may help hardware manufacturers and
+ * EDAC developers to analyse the event
+ */
void edac_mc_handle_error(const enum hw_event_mc_err_type type,
struct mem_ctl_info *mci,
+ const u16 error_count,
const unsigned long page_frame_number,
const unsigned long offset_in_page,
const unsigned long syndrome,
- const int layer0,
- const int layer1,
- const int layer2,
+ const int top_layer,
+ const int mid_layer,
+ const int low_layer,
const char *msg,
- const char *other_detail,
- const void *mcelog)
+ const char *other_detail)
{
/* FIXME: too much for stack: move it to some pre-alocated area */
char detail[80], location[80];
char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
char *p;
int row = -1, chan = -1;
- int pos[EDAC_MAX_LAYERS] = { layer0, layer1, layer2 };
+ int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
int i;
- u32 grain;
+ long grain;
bool enable_per_layer_report = false;
+ u8 grain_bits;
- debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
+ edac_dbg(3, "MC%d\n", mci->mc_idx);
/*
* Check if the event report is consistent and if the memory
@@ -1043,13 +1123,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
p = label;
*p = '\0';
for (i = 0; i < mci->tot_dimms; i++) {
- struct dimm_info *dimm = &mci->dimms[i];
+ struct dimm_info *dimm = mci->dimms[i];
- if (layer0 >= 0 && layer0 != dimm->location[0])
+ if (top_layer >= 0 && top_layer != dimm->location[0])
continue;
- if (layer1 >= 0 && layer1 != dimm->location[1])
+ if (mid_layer >= 0 && mid_layer != dimm->location[1])
continue;
- if (layer2 >= 0 && layer2 != dimm->location[2])
+ if (low_layer >= 0 && low_layer != dimm->location[2])
continue;
/* get the max grain, over the error match range */
@@ -1075,11 +1155,9 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
* get csrow/channel of the DIMM, in order to allow
* incrementing the compat API counters
*/
- debugf4("%s: %s csrows map: (%d,%d)\n",
- __func__,
- mci->mem_is_per_rank ? "rank" : "dimm",
- dimm->csrow, dimm->cschannel);
-
+ edac_dbg(4, "%s csrows map: (%d,%d)\n",
+ mci->mem_is_per_rank ? "rank" : "dimm",
+ dimm->csrow, dimm->cschannel);
if (row == -1)
row = dimm->csrow;
else if (row >= 0 && row != dimm->csrow)
@@ -1095,19 +1173,18 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
if (!enable_per_layer_report) {
strcpy(label, "any memory");
} else {
- debugf4("%s: csrow/channel to increment: (%d,%d)\n",
- __func__, row, chan);
+ edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
if (p == label)
strcpy(label, "unknown memory");
if (type == HW_EVENT_ERR_CORRECTED) {
if (row >= 0) {
- mci->csrows[row].ce_count++;
+ mci->csrows[row]->ce_count += error_count;
if (chan >= 0)
- mci->csrows[row].channels[chan].ce_count++;
+ mci->csrows[row]->channels[chan]->ce_count += error_count;
}
} else
if (row >= 0)
- mci->csrows[row].ue_count++;
+ mci->csrows[row]->ue_count += error_count;
}
/* Fill the RAM location data */
@@ -1120,23 +1197,33 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
edac_layer_name[mci->layers[i].type],
pos[i]);
}
+ if (p > location)
+ *(p - 1) = '\0';
+
+ /* Report the error via the trace interface */
+
+ grain_bits = fls_long(grain) + 1;
+ trace_mc_event(type, msg, label, error_count,
+ mci->mc_idx, top_layer, mid_layer, low_layer,
+ PAGES_TO_MiB(page_frame_number) | offset_in_page,
+ grain_bits, syndrome, other_detail);
/* Memory type dependent details about the error */
if (type == HW_EVENT_ERR_CORRECTED) {
snprintf(detail, sizeof(detail),
- "page:0x%lx offset:0x%lx grain:%d syndrome:0x%lx",
+ "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
page_frame_number, offset_in_page,
grain, syndrome);
- edac_ce_error(mci, pos, msg, location, label, detail,
- other_detail, enable_per_layer_report,
+ edac_ce_error(mci, error_count, pos, msg, location, label,
+ detail, other_detail, enable_per_layer_report,
page_frame_number, offset_in_page, grain);
} else {
snprintf(detail, sizeof(detail),
- "page:0x%lx offset:0x%lx grain:%d",
+ "page:0x%lx offset:0x%lx grain:%ld",
page_frame_number, offset_in_page, grain);
- edac_ue_error(mci, pos, msg, location, label, detail,
- other_detail, enable_per_layer_report);
+ edac_ue_error(mci, error_count, pos, msg, location, label,
+ detail, other_detail, enable_per_layer_report);
}
}
EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index f6a29b0eedc8..ed0bc07b8503 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -7,17 +7,21 @@
*
* Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
*
+ * (c) 2012 - Mauro Carvalho Chehab <mchehab@redhat.com>
+ * The entire API were re-written, and ported to use struct device
+ *
*/
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/edac.h>
#include <linux/bug.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
#include "edac_core.h"
#include "edac_module.h"
-
/* MC EDAC Controls, setable by module parameter, and sysfs */
static int edac_mc_log_ue = 1;
static int edac_mc_log_ce = 1;
@@ -78,6 +82,8 @@ module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
&edac_mc_poll_msec, 0644);
MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
+static struct device *mci_pdev;
+
/*
* various constants for Memory Controllers
*/
@@ -125,317 +131,526 @@ static const char *edac_caps[] = {
[EDAC_S16ECD16ED] = "S16ECD16ED"
};
-/* EDAC sysfs CSROW data structures and methods
+#ifdef CONFIG_EDAC_LEGACY_SYSFS
+/*
+ * EDAC sysfs CSROW data structures and methods
+ */
+
+#define to_csrow(k) container_of(k, struct csrow_info, dev)
+
+/*
+ * We need it to avoid namespace conflicts between the legacy API
+ * and the per-dimm/per-rank one
*/
+#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
+
+struct dev_ch_attribute {
+ struct device_attribute attr;
+ int channel;
+};
+
+#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
+ struct dev_ch_attribute dev_attr_legacy_##_name = \
+ { __ATTR(_name, _mode, _show, _store), (_var) }
+
+#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
/* Set of more default csrow<id> attribute show/store functions */
-static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data,
- int private)
+static ssize_t csrow_ue_count_show(struct device *dev,
+ struct device_attribute *mattr, char *data)
{
+ struct csrow_info *csrow = to_csrow(dev);
+
return sprintf(data, "%u\n", csrow->ue_count);
}
-static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
- int private)
+static ssize_t csrow_ce_count_show(struct device *dev,
+ struct device_attribute *mattr, char *data)
{
+ struct csrow_info *csrow = to_csrow(dev);
+
return sprintf(data, "%u\n", csrow->ce_count);
}
-static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
- int private)
+static ssize_t csrow_size_show(struct device *dev,
+ struct device_attribute *mattr, char *data)
{
+ struct csrow_info *csrow = to_csrow(dev);
int i;
u32 nr_pages = 0;
for (i = 0; i < csrow->nr_channels; i++)
- nr_pages += csrow->channels[i].dimm->nr_pages;
-
+ nr_pages += csrow->channels[i]->dimm->nr_pages;
return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
}
-static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
- int private)
+static ssize_t csrow_mem_type_show(struct device *dev,
+ struct device_attribute *mattr, char *data)
{
- return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]);
+ struct csrow_info *csrow = to_csrow(dev);
+
+ return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]);
}
-static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
- int private)
+static ssize_t csrow_dev_type_show(struct device *dev,
+ struct device_attribute *mattr, char *data)
{
- return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]);
+ struct csrow_info *csrow = to_csrow(dev);
+
+ return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
}
-static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
- int private)
+static ssize_t csrow_edac_mode_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
- return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]);
+ struct csrow_info *csrow = to_csrow(dev);
+
+ return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
}
/* show/store functions for DIMM Label attributes */
-static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
- char *data, int channel)
+static ssize_t channel_dimm_label_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct csrow_info *csrow = to_csrow(dev);
+ unsigned chan = to_channel(mattr);
+ struct rank_info *rank = csrow->channels[chan];
+
/* if field has not been initialized, there is nothing to send */
- if (!csrow->channels[channel].dimm->label[0])
+ if (!rank->dimm->label[0])
return 0;
return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
- csrow->channels[channel].dimm->label);
+ rank->dimm->label);
}
-static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
- const char *data,
- size_t count, int channel)
+static ssize_t channel_dimm_label_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
{
+ struct csrow_info *csrow = to_csrow(dev);
+ unsigned chan = to_channel(mattr);
+ struct rank_info *rank = csrow->channels[chan];
+
ssize_t max_size = 0;
max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
- strncpy(csrow->channels[channel].dimm->label, data, max_size);
- csrow->channels[channel].dimm->label[max_size] = '\0';
+ strncpy(rank->dimm->label, data, max_size);
+ rank->dimm->label[max_size] = '\0';
return max_size;
}
/* show function for dynamic chX_ce_count attribute */
-static ssize_t channel_ce_count_show(struct csrow_info *csrow,
- char *data, int channel)
+static ssize_t channel_ce_count_show(struct device *dev,
+ struct device_attribute *mattr, char *data)
{
- return sprintf(data, "%u\n", csrow->channels[channel].ce_count);
+ struct csrow_info *csrow = to_csrow(dev);
+ unsigned chan = to_channel(mattr);
+ struct rank_info *rank = csrow->channels[chan];
+
+ return sprintf(data, "%u\n", rank->ce_count);
}
-/* csrow specific attribute structure */
-struct csrowdev_attribute {
- struct attribute attr;
- ssize_t(*show) (struct csrow_info *, char *, int);
- ssize_t(*store) (struct csrow_info *, const char *, size_t, int);
- int private;
-};
+/* cwrow<id>/attribute files */
+DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
+DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
+DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
+DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
+DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
+DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
-#define to_csrow(k) container_of(k, struct csrow_info, kobj)
-#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
+/* default attributes of the CSROW<id> object */
+static struct attribute *csrow_attrs[] = {
+ &dev_attr_legacy_dev_type.attr,
+ &dev_attr_legacy_mem_type.attr,
+ &dev_attr_legacy_edac_mode.attr,
+ &dev_attr_legacy_size_mb.attr,
+ &dev_attr_legacy_ue_count.attr,
+ &dev_attr_legacy_ce_count.attr,
+ NULL,
+};
-/* Set of show/store higher level functions for default csrow attributes */
-static ssize_t csrowdev_show(struct kobject *kobj,
- struct attribute *attr, char *buffer)
-{
- struct csrow_info *csrow = to_csrow(kobj);
- struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
+static struct attribute_group csrow_attr_grp = {
+ .attrs = csrow_attrs,
+};
- if (csrowdev_attr->show)
- return csrowdev_attr->show(csrow,
- buffer, csrowdev_attr->private);
- return -EIO;
-}
+static const struct attribute_group *csrow_attr_groups[] = {
+ &csrow_attr_grp,
+ NULL
+};
-static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
+static void csrow_attr_release(struct device *dev)
{
- struct csrow_info *csrow = to_csrow(kobj);
- struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
-
- if (csrowdev_attr->store)
- return csrowdev_attr->store(csrow,
- buffer,
- count, csrowdev_attr->private);
- return -EIO;
-}
+ struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
-static const struct sysfs_ops csrowfs_ops = {
- .show = csrowdev_show,
- .store = csrowdev_store
-};
+ edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
+ kfree(csrow);
+}
-#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \
-static struct csrowdev_attribute attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
- .private = _private, \
+static struct device_type csrow_attr_type = {
+ .groups = csrow_attr_groups,
+ .release = csrow_attr_release,
};
-/* default cwrow<id>/attribute files */
-CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0);
-CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0);
-CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0);
-CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0);
-CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0);
-CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0);
+/*
+ * possible dynamic channel DIMM Label attribute files
+ *
+ */
-/* default attributes of the CSROW<id> object */
-static struct csrowdev_attribute *default_csrow_attr[] = {
- &attr_dev_type,
- &attr_mem_type,
- &attr_edac_mode,
- &attr_size_mb,
- &attr_ue_count,
- &attr_ce_count,
- NULL,
-};
+#define EDAC_NR_CHANNELS 6
-/* possible dynamic channel DIMM Label attribute files */
-CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 0);
-CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 1);
-CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 2);
-CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 3);
-CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 4);
-CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR,
+DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 5);
/* Total possible dynamic DIMM Label attribute file table */
-static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = {
- &attr_ch0_dimm_label,
- &attr_ch1_dimm_label,
- &attr_ch2_dimm_label,
- &attr_ch3_dimm_label,
- &attr_ch4_dimm_label,
- &attr_ch5_dimm_label
+static struct device_attribute *dynamic_csrow_dimm_attr[] = {
+ &dev_attr_legacy_ch0_dimm_label.attr,
+ &dev_attr_legacy_ch1_dimm_label.attr,
+ &dev_attr_legacy_ch2_dimm_label.attr,
+ &dev_attr_legacy_ch3_dimm_label.attr,
+ &dev_attr_legacy_ch4_dimm_label.attr,
+ &dev_attr_legacy_ch5_dimm_label.attr
};
/* possible dynamic channel ce_count attribute files */
-CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0);
-CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1);
-CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2);
-CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3);
-CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4);
-CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5);
+DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR,
+ channel_ce_count_show, NULL, 0);
+DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR,
+ channel_ce_count_show, NULL, 1);
+DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR,
+ channel_ce_count_show, NULL, 2);
+DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR,
+ channel_ce_count_show, NULL, 3);
+DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR,
+ channel_ce_count_show, NULL, 4);
+DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR,
+ channel_ce_count_show, NULL, 5);
/* Total possible dynamic ce_count attribute file table */
-static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = {
- &attr_ch0_ce_count,
- &attr_ch1_ce_count,
- &attr_ch2_ce_count,
- &attr_ch3_ce_count,
- &attr_ch4_ce_count,
- &attr_ch5_ce_count
+static struct device_attribute *dynamic_csrow_ce_count_attr[] = {
+ &dev_attr_legacy_ch0_ce_count.attr,
+ &dev_attr_legacy_ch1_ce_count.attr,
+ &dev_attr_legacy_ch2_ce_count.attr,
+ &dev_attr_legacy_ch3_ce_count.attr,
+ &dev_attr_legacy_ch4_ce_count.attr,
+ &dev_attr_legacy_ch5_ce_count.attr
};
-#define EDAC_NR_CHANNELS 6
+static inline int nr_pages_per_csrow(struct csrow_info *csrow)
+{
+ int chan, nr_pages = 0;
+
+ for (chan = 0; chan < csrow->nr_channels; chan++)
+ nr_pages += csrow->channels[chan]->dimm->nr_pages;
+
+ return nr_pages;
+}
-/* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */
-static int edac_create_channel_files(struct kobject *kobj, int chan)
+/* Create a CSROW object under specifed edac_mc_device */
+static int edac_create_csrow_object(struct mem_ctl_info *mci,
+ struct csrow_info *csrow, int index)
{
- int err = -ENODEV;
+ int err, chan;
+
+ if (csrow->nr_channels >= EDAC_NR_CHANNELS)
+ return -ENODEV;
+
+ csrow->dev.type = &csrow_attr_type;
+ csrow->dev.bus = &mci->bus;
+ device_initialize(&csrow->dev);
+ csrow->dev.parent = &mci->dev;
+ dev_set_name(&csrow->dev, "csrow%d", index);
+ dev_set_drvdata(&csrow->dev, csrow);
- if (chan >= EDAC_NR_CHANNELS)
+ edac_dbg(0, "creating (virtual) csrow node %s\n",
+ dev_name(&csrow->dev));
+
+ err = device_add(&csrow->dev);
+ if (err < 0)
return err;
- /* create the DIMM label attribute file */
- err = sysfs_create_file(kobj,
- (struct attribute *)
- dynamic_csrow_dimm_attr[chan]);
-
- if (!err) {
- /* create the CE Count attribute file */
- err = sysfs_create_file(kobj,
- (struct attribute *)
- dynamic_csrow_ce_count_attr[chan]);
- } else {
- debugf1("%s() dimm labels and ce_count files created",
- __func__);
+ for (chan = 0; chan < csrow->nr_channels; chan++) {
+ /* Only expose populated DIMMs */
+ if (!csrow->channels[chan]->dimm->nr_pages)
+ continue;
+ err = device_create_file(&csrow->dev,
+ dynamic_csrow_dimm_attr[chan]);
+ if (err < 0)
+ goto error;
+ err = device_create_file(&csrow->dev,
+ dynamic_csrow_ce_count_attr[chan]);
+ if (err < 0) {
+ device_remove_file(&csrow->dev,
+ dynamic_csrow_dimm_attr[chan]);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ for (--chan; chan >= 0; chan--) {
+ device_remove_file(&csrow->dev,
+ dynamic_csrow_dimm_attr[chan]);
+ device_remove_file(&csrow->dev,
+ dynamic_csrow_ce_count_attr[chan]);
}
+ put_device(&csrow->dev);
return err;
}
-/* No memory to release for this kobj */
-static void edac_csrow_instance_release(struct kobject *kobj)
+/* Create a CSROW object under specifed edac_mc_device */
+static int edac_create_csrow_objects(struct mem_ctl_info *mci)
{
- struct mem_ctl_info *mci;
- struct csrow_info *cs;
+ int err, i, chan;
+ struct csrow_info *csrow;
+
+ for (i = 0; i < mci->nr_csrows; i++) {
+ csrow = mci->csrows[i];
+ if (!nr_pages_per_csrow(csrow))
+ continue;
+ err = edac_create_csrow_object(mci, mci->csrows[i], i);
+ if (err < 0)
+ goto error;
+ }
+ return 0;
- debugf1("%s()\n", __func__);
+error:
+ for (--i; i >= 0; i--) {
+ csrow = mci->csrows[i];
+ if (!nr_pages_per_csrow(csrow))
+ continue;
+ for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
+ if (!csrow->channels[chan]->dimm->nr_pages)
+ continue;
+ device_remove_file(&csrow->dev,
+ dynamic_csrow_dimm_attr[chan]);
+ device_remove_file(&csrow->dev,
+ dynamic_csrow_ce_count_attr[chan]);
+ }
+ put_device(&mci->csrows[i]->dev);
+ }
- cs = container_of(kobj, struct csrow_info, kobj);
- mci = cs->mci;
+ return err;
+}
- kobject_put(&mci->edac_mci_kobj);
+static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
+{
+ int i, chan;
+ struct csrow_info *csrow;
+
+ for (i = mci->nr_csrows - 1; i >= 0; i--) {
+ csrow = mci->csrows[i];
+ if (!nr_pages_per_csrow(csrow))
+ continue;
+ for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
+ if (!csrow->channels[chan]->dimm->nr_pages)
+ continue;
+ edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n",
+ i, chan);
+ device_remove_file(&csrow->dev,
+ dynamic_csrow_dimm_attr[chan]);
+ device_remove_file(&csrow->dev,
+ dynamic_csrow_ce_count_attr[chan]);
+ }
+ put_device(&mci->csrows[i]->dev);
+ device_del(&mci->csrows[i]->dev);
+ }
}
+#endif
-/* the kobj_type instance for a CSROW */
-static struct kobj_type ktype_csrow = {
- .release = edac_csrow_instance_release,
- .sysfs_ops = &csrowfs_ops,
- .default_attrs = (struct attribute **)default_csrow_attr,
+/*
+ * Per-dimm (or per-rank) devices
+ */
+
+#define to_dimm(k) container_of(k, struct dimm_info, dev)
+
+/* show/store functions for DIMM Label attributes */
+static ssize_t dimmdev_location_show(struct device *dev,
+ struct device_attribute *mattr, char *data)
+{
+ struct dimm_info *dimm = to_dimm(dev);
+
+ return edac_dimm_info_location(dimm, data, PAGE_SIZE);
+}
+
+static ssize_t dimmdev_label_show(struct device *dev,
+ struct device_attribute *mattr, char *data)
+{
+ struct dimm_info *dimm = to_dimm(dev);
+
+ /* if field has not been initialized, there is nothing to send */
+ if (!dimm->label[0])
+ return 0;
+
+ return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label);
+}
+
+static ssize_t dimmdev_label_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data,
+ size_t count)
+{
+ struct dimm_info *dimm = to_dimm(dev);
+
+ ssize_t max_size = 0;
+
+ max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
+ strncpy(dimm->label, data, max_size);
+ dimm->label[max_size] = '\0';
+
+ return max_size;
+}
+
+static ssize_t dimmdev_size_show(struct device *dev,
+ struct device_attribute *mattr, char *data)
+{
+ struct dimm_info *dimm = to_dimm(dev);
+
+ return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
+}
+
+static ssize_t dimmdev_mem_type_show(struct device *dev,
+ struct device_attribute *mattr, char *data)
+{
+ struct dimm_info *dimm = to_dimm(dev);
+
+ return sprintf(data, "%s\n", mem_types[dimm->mtype]);
+}
+
+static ssize_t dimmdev_dev_type_show(struct device *dev,
+ struct device_attribute *mattr, char *data)
+{
+ struct dimm_info *dimm = to_dimm(dev);
+
+ return sprintf(data, "%s\n", dev_types[dimm->dtype]);
+}
+
+static ssize_t dimmdev_edac_mode_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ struct dimm_info *dimm = to_dimm(dev);
+
+ return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
+}
+
+/* dimm/rank attribute files */
+static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
+ dimmdev_label_show, dimmdev_label_store);
+static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
+static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
+static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
+static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
+static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);
+
+/* attributes of the dimm<id>/rank<id> object */
+static struct attribute *dimm_attrs[] = {
+ &dev_attr_dimm_label.attr,
+ &dev_attr_dimm_location.attr,
+ &dev_attr_size.attr,
+ &dev_attr_dimm_mem_type.attr,
+ &dev_attr_dimm_dev_type.attr,
+ &dev_attr_dimm_edac_mode.attr,
+ NULL,
};
-/* Create a CSROW object under specifed edac_mc_device */
-static int edac_create_csrow_object(struct mem_ctl_info *mci,
- struct csrow_info *csrow, int index)
+static struct attribute_group dimm_attr_grp = {
+ .attrs = dimm_attrs,
+};
+
+static const struct attribute_group *dimm_attr_groups[] = {
+ &dimm_attr_grp,
+ NULL
+};
+
+static void dimm_attr_release(struct device *dev)
{
- struct kobject *kobj_mci = &mci->edac_mci_kobj;
- struct kobject *kobj;
- int chan;
- int err;
+ struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
- /* generate ..../edac/mc/mc<id>/csrow<index> */
- memset(&csrow->kobj, 0, sizeof(csrow->kobj));
- csrow->mci = mci; /* include container up link */
+ edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
+ kfree(dimm);
+}
- /* bump the mci instance's kobject's ref count */
- kobj = kobject_get(&mci->edac_mci_kobj);
- if (!kobj) {
- err = -ENODEV;
- goto err_out;
- }
+static struct device_type dimm_attr_type = {
+ .groups = dimm_attr_groups,
+ .release = dimm_attr_release,
+};
+
+/* Create a DIMM object under specifed memory controller device */
+static int edac_create_dimm_object(struct mem_ctl_info *mci,
+ struct dimm_info *dimm,
+ int index)
+{
+ int err;
+ dimm->mci = mci;
- /* Instanstiate the csrow object */
- err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci,
- "csrow%d", index);
- if (err)
- goto err_release_top_kobj;
+ dimm->dev.type = &dimm_attr_type;
+ dimm->dev.bus = &mci->bus;
+ device_initialize(&dimm->dev);
- /* At this point, to release a csrow kobj, one must
- * call the kobject_put and allow that tear down
- * to work the releasing
- */
+ dimm->dev.parent = &mci->dev;
+ if (mci->mem_is_per_rank)
+ dev_set_name(&dimm->dev, "rank%d", index);
+ else
+ dev_set_name(&dimm->dev, "dimm%d", index);
+ dev_set_drvdata(&dimm->dev, dimm);
+ pm_runtime_forbid(&mci->dev);
- /* Create the dyanmic attribute files on this csrow,
- * namely, the DIMM labels and the channel ce_count
- */
- for (chan = 0; chan < csrow->nr_channels; chan++) {
- err = edac_create_channel_files(&csrow->kobj, chan);
- if (err) {
- /* special case the unregister here */
- kobject_put(&csrow->kobj);
- goto err_out;
- }
- }
- kobject_uevent(&csrow->kobj, KOBJ_ADD);
- return 0;
+ err = device_add(&dimm->dev);
- /* error unwind stack */
-err_release_top_kobj:
- kobject_put(&mci->edac_mci_kobj);
+ edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
-err_out:
return err;
}
-/* default sysfs methods and data structures for the main MCI kobject */
+/*
+ * Memory controller device
+ */
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
-static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
+static ssize_t mci_reset_counters_store(struct device *dev,
+ struct device_attribute *mattr,
const char *data, size_t count)
{
- int row, chan;
-
- mci->ue_noinfo_count = 0;
- mci->ce_noinfo_count = 0;
+ struct mem_ctl_info *mci = to_mci(dev);
+ int cnt, row, chan, i;
mci->ue_mc = 0;
mci->ce_mc = 0;
+ mci->ue_noinfo_count = 0;
+ mci->ce_noinfo_count = 0;
for (row = 0; row < mci->nr_csrows; row++) {
- struct csrow_info *ri = &mci->csrows[row];
+ struct csrow_info *ri = mci->csrows[row];
ri->ue_count = 0;
ri->ce_count = 0;
for (chan = 0; chan < ri->nr_channels; chan++)
- ri->channels[chan].ce_count = 0;
+ ri->channels[chan]->ce_count = 0;
+ }
+
+ cnt = 1;
+ for (i = 0; i < mci->n_layers; i++) {
+ cnt *= mci->layers[i].size;
+ memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32));
+ memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32));
}
mci->start_time = jiffies;
@@ -451,9 +666,11 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
* Negative value still means that an error has occurred while setting
* the scrub rate.
*/
-static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
+static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
+ struct device_attribute *mattr,
const char *data, size_t count)
{
+ struct mem_ctl_info *mci = to_mci(dev);
unsigned long bandwidth = 0;
int new_bw = 0;
@@ -476,8 +693,11 @@ static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci,
/*
* ->get_sdram_scrub_rate() return value semantics same as above.
*/
-static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
+static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
int bandwidth = 0;
if (!mci->get_sdram_scrub_rate)
@@ -493,45 +713,72 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
}
/* default attribute files for the MCI object */
-static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
+static ssize_t mci_ue_count_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
+
return sprintf(data, "%d\n", mci->ue_mc);
}
-static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
+static ssize_t mci_ce_count_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
+
return sprintf(data, "%d\n", mci->ce_mc);
}
-static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
+static ssize_t mci_ce_noinfo_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
+
return sprintf(data, "%d\n", mci->ce_noinfo_count);
}
-static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
+static ssize_t mci_ue_noinfo_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
+
return sprintf(data, "%d\n", mci->ue_noinfo_count);
}
-static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
+static ssize_t mci_seconds_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
+
return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
}
-static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
+static ssize_t mci_ctl_name_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
+
return sprintf(data, "%s\n", mci->ctl_name);
}
-static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
+static ssize_t mci_size_mb_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
int total_pages = 0, csrow_idx, j;
for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
- struct csrow_info *csrow = &mci->csrows[csrow_idx];
+ struct csrow_info *csrow = mci->csrows[csrow_idx];
for (j = 0; j < csrow->nr_channels; j++) {
- struct dimm_info *dimm = csrow->channels[j].dimm;
+ struct dimm_info *dimm = csrow->channels[j]->dimm;
total_pages += dimm->nr_pages;
}
@@ -540,361 +787,187 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
}
-#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
-#define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr)
-
-/* MCI show/store functions for top most object */
-static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
+static ssize_t mci_max_location_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
- struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
- struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
-
- debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+ struct mem_ctl_info *mci = to_mci(dev);
+ int i;
+ char *p = data;
- if (mcidev_attr->show)
- return mcidev_attr->show(mem_ctl_info, buffer);
+ for (i = 0; i < mci->n_layers; i++) {
+ p += sprintf(p, "%s %d ",
+ edac_layer_name[mci->layers[i].type],
+ mci->layers[i].size - 1);
+ }
- return -EIO;
+ return p - data;
}
-static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
+#ifdef CONFIG_EDAC_DEBUG
+static ssize_t edac_fake_inject_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
{
- struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
- struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
-
- debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
-
- if (mcidev_attr->store)
- return mcidev_attr->store(mem_ctl_info, buffer, count);
+ struct device *dev = file->private_data;
+ struct mem_ctl_info *mci = to_mci(dev);
+ static enum hw_event_mc_err_type type;
+ u16 errcount = mci->fake_inject_count;
+
+ if (!errcount)
+ errcount = 1;
+
+ type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
+ : HW_EVENT_ERR_CORRECTED;
+
+ printk(KERN_DEBUG
+ "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
+ errcount,
+ (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
+ errcount > 1 ? "s" : "",
+ mci->fake_inject_layer[0],
+ mci->fake_inject_layer[1],
+ mci->fake_inject_layer[2]
+ );
+ edac_mc_handle_error(type, mci, errcount, 0, 0, 0,
+ mci->fake_inject_layer[0],
+ mci->fake_inject_layer[1],
+ mci->fake_inject_layer[2],
+ "FAKE ERROR", "for EDAC testing only");
- return -EIO;
+ return count;
}
-/* Intermediate show/store table */
-static const struct sysfs_ops mci_ops = {
- .show = mcidev_show,
- .store = mcidev_store
-};
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
-#define MCIDEV_ATTR(_name,_mode,_show,_store) \
-static struct mcidev_sysfs_attribute mci_attr_##_name = { \
- .attr = {.name = __stringify(_name), .mode = _mode }, \
- .show = _show, \
- .store = _store, \
+static const struct file_operations debug_fake_inject_fops = {
+ .open = debugfs_open,
+ .write = edac_fake_inject_write,
+ .llseek = generic_file_llseek,
};
+#endif
/* default Control file */
-MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
+DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
/* default Attribute files */
-MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
-MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
-MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
-MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
-MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
-MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
-MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
+DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
+DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
+DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
+DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
+DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
+DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
+DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
+DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
/* memory scrubber attribute file */
-MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
+DEVICE_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show,
mci_sdram_scrub_rate_store);
-static struct mcidev_sysfs_attribute *mci_attr[] = {
- &mci_attr_reset_counters,
- &mci_attr_mc_name,
- &mci_attr_size_mb,
- &mci_attr_seconds_since_reset,
- &mci_attr_ue_noinfo_count,
- &mci_attr_ce_noinfo_count,
- &mci_attr_ue_count,
- &mci_attr_ce_count,
- &mci_attr_sdram_scrub_rate,
+static struct attribute *mci_attrs[] = {
+ &dev_attr_reset_counters.attr,
+ &dev_attr_mc_name.attr,
+ &dev_attr_size_mb.attr,
+ &dev_attr_seconds_since_reset.attr,
+ &dev_attr_ue_noinfo_count.attr,
+ &dev_attr_ce_noinfo_count.attr,
+ &dev_attr_ue_count.attr,
+ &dev_attr_ce_count.attr,
+ &dev_attr_sdram_scrub_rate.attr,
+ &dev_attr_max_location.attr,
NULL
};
+static struct attribute_group mci_attr_grp = {
+ .attrs = mci_attrs,
+};
-/*
- * Release of a MC controlling instance
- *
- * each MC control instance has the following resources upon entry:
- * a) a ref count on the top memctl kobj
- * b) a ref count on this module
- *
- * this function must decrement those ref counts and then
- * issue a free on the instance's memory
- */
-static void edac_mci_control_release(struct kobject *kobj)
-{
- struct mem_ctl_info *mci;
-
- mci = to_mci(kobj);
+static const struct attribute_group *mci_attr_groups[] = {
+ &mci_attr_grp,
+ NULL
+};
- debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx);
+static void mci_attr_release(struct device *dev)
+{
+ struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
- /* decrement the module ref count */
- module_put(mci->owner);
+ edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
+ kfree(mci);
}
-static struct kobj_type ktype_mci = {
- .release = edac_mci_control_release,
- .sysfs_ops = &mci_ops,
- .default_attrs = (struct attribute **)mci_attr,
+static struct device_type mci_attr_type = {
+ .groups = mci_attr_groups,
+ .release = mci_attr_release,
};
-/* EDAC memory controller sysfs kset:
- * /sys/devices/system/edac/mc
- */
-static struct kset *mc_kset;
+#ifdef CONFIG_EDAC_DEBUG
+static struct dentry *edac_debugfs;
-/*
- * edac_mc_register_sysfs_main_kobj
- *
- * setups and registers the main kobject for each mci
- */
-int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci)
+int __init edac_debugfs_init(void)
{
- struct kobject *kobj_mci;
- int err;
-
- debugf1("%s()\n", __func__);
-
- kobj_mci = &mci->edac_mci_kobj;
-
- /* Init the mci's kobject */
- memset(kobj_mci, 0, sizeof(*kobj_mci));
-
- /* Record which module 'owns' this control structure
- * and bump the ref count of the module
- */
- mci->owner = THIS_MODULE;
-
- /* bump ref count on this module */
- if (!try_module_get(mci->owner)) {
- err = -ENODEV;
- goto fail_out;
- }
-
- /* this instance become part of the mc_kset */
- kobj_mci->kset = mc_kset;
-
- /* register the mc<id> kobject to the mc_kset */
- err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL,
- "mc%d", mci->mc_idx);
- if (err) {
- debugf1("%s()Failed to register '.../edac/mc%d'\n",
- __func__, mci->mc_idx);
- goto kobj_reg_fail;
+ edac_debugfs = debugfs_create_dir("edac", NULL);
+ if (IS_ERR(edac_debugfs)) {
+ edac_debugfs = NULL;
+ return -ENOMEM;
}
- kobject_uevent(kobj_mci, KOBJ_ADD);
-
- /* At this point, to 'free' the control struct,
- * edac_mc_unregister_sysfs_main_kobj() must be used
- */
-
- debugf1("%s() Registered '.../edac/mc%d' kobject\n",
- __func__, mci->mc_idx);
-
return 0;
-
- /* Error exit stack */
-
-kobj_reg_fail:
- module_put(mci->owner);
-
-fail_out:
- return err;
-}
-
-/*
- * edac_mc_register_sysfs_main_kobj
- *
- * tears down and the main mci kobject from the mc_kset
- */
-void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
-{
- debugf1("%s()\n", __func__);
-
- /* delete the kobj from the mc_kset */
- kobject_put(&mci->edac_mci_kobj);
-}
-
-#define EDAC_DEVICE_SYMLINK "device"
-
-#define grp_to_mci(k) (container_of(k, struct mcidev_sysfs_group_kobj, kobj)->mci)
-
-/* MCI show/store functions for top most object */
-static ssize_t inst_grp_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
-{
- struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
- struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
-
- debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
-
- if (mcidev_attr->show)
- return mcidev_attr->show(mem_ctl_info, buffer);
-
- return -EIO;
}
-static ssize_t inst_grp_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
+void __exit edac_debugfs_exit(void)
{
- struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
- struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
-
- debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
-
- if (mcidev_attr->store)
- return mcidev_attr->store(mem_ctl_info, buffer, count);
-
- return -EIO;
+ debugfs_remove(edac_debugfs);
}
-/* No memory to release for this kobj */
-static void edac_inst_grp_release(struct kobject *kobj)
+int edac_create_debug_nodes(struct mem_ctl_info *mci)
{
- struct mcidev_sysfs_group_kobj *grp;
- struct mem_ctl_info *mci;
-
- debugf1("%s()\n", __func__);
-
- grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj);
- mci = grp->mci;
-}
-
-/* Intermediate show/store table */
-static struct sysfs_ops inst_grp_ops = {
- .show = inst_grp_show,
- .store = inst_grp_store
-};
-
-/* the kobj_type instance for a instance group */
-static struct kobj_type ktype_inst_grp = {
- .release = edac_inst_grp_release,
- .sysfs_ops = &inst_grp_ops,
-};
-
+ struct dentry *d, *parent;
+ char name[80];
+ int i;
-/*
- * edac_create_mci_instance_attributes
- * create MC driver specific attributes bellow an specified kobj
- * This routine calls itself recursively, in order to create an entire
- * object tree.
- */
-static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
- const struct mcidev_sysfs_attribute *sysfs_attrib,
- struct kobject *kobj)
-{
- int err;
+ if (!edac_debugfs)
+ return -ENODEV;
- debugf4("%s()\n", __func__);
-
- while (sysfs_attrib) {
- debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
- if (sysfs_attrib->grp) {
- struct mcidev_sysfs_group_kobj *grp_kobj;
-
- grp_kobj = kzalloc(sizeof(*grp_kobj), GFP_KERNEL);
- if (!grp_kobj)
- return -ENOMEM;
-
- grp_kobj->grp = sysfs_attrib->grp;
- grp_kobj->mci = mci;
- list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
-
- debugf0("%s() grp %s, mci %p\n", __func__,
- sysfs_attrib->grp->name, mci);
-
- err = kobject_init_and_add(&grp_kobj->kobj,
- &ktype_inst_grp,
- &mci->edac_mci_kobj,
- sysfs_attrib->grp->name);
- if (err < 0) {
- printk(KERN_ERR "kobject_init_and_add failed: %d\n", err);
- return err;
- }
- err = edac_create_mci_instance_attributes(mci,
- grp_kobj->grp->mcidev_attr,
- &grp_kobj->kobj);
-
- if (err < 0)
- return err;
- } else if (sysfs_attrib->attr.name) {
- debugf4("%s() file %s\n", __func__,
- sysfs_attrib->attr.name);
-
- err = sysfs_create_file(kobj, &sysfs_attrib->attr);
- if (err < 0) {
- printk(KERN_ERR "sysfs_create_file failed: %d\n", err);
- return err;
- }
- } else
- break;
-
- sysfs_attrib++;
+ d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs);
+ if (!d)
+ return -ENOMEM;
+ parent = d;
+
+ for (i = 0; i < mci->n_layers; i++) {
+ sprintf(name, "fake_inject_%s",
+ edac_layer_name[mci->layers[i].type]);
+ d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
+ &mci->fake_inject_layer[i]);
+ if (!d)
+ goto nomem;
}
- return 0;
-}
+ d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
+ &mci->fake_inject_ue);
+ if (!d)
+ goto nomem;
-/*
- * edac_remove_mci_instance_attributes
- * remove MC driver specific attributes at the topmost level
- * directory of this mci instance.
- */
-static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
- const struct mcidev_sysfs_attribute *sysfs_attrib,
- struct kobject *kobj, int count)
-{
- struct mcidev_sysfs_group_kobj *grp_kobj, *tmp;
+ d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent,
+ &mci->fake_inject_count);
+ if (!d)
+ goto nomem;
- debugf1("%s()\n", __func__);
-
- /*
- * loop if there are attributes and until we hit a NULL entry
- * Remove first all the attributes
- */
- while (sysfs_attrib) {
- debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
- if (sysfs_attrib->grp) {
- debugf4("%s() seeking for group %s\n",
- __func__, sysfs_attrib->grp->name);
- list_for_each_entry(grp_kobj,
- &mci->grp_kobj_list, list) {
- debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
- if (grp_kobj->grp == sysfs_attrib->grp) {
- edac_remove_mci_instance_attributes(mci,
- grp_kobj->grp->mcidev_attr,
- &grp_kobj->kobj, count + 1);
- debugf4("%s() group %s\n", __func__,
- sysfs_attrib->grp->name);
- kobject_put(&grp_kobj->kobj);
- }
- }
- debugf4("%s() end of seeking for group %s\n",
- __func__, sysfs_attrib->grp->name);
- } else if (sysfs_attrib->attr.name) {
- debugf4("%s() file %s\n", __func__,
- sysfs_attrib->attr.name);
- sysfs_remove_file(kobj, &sysfs_attrib->attr);
- } else
- break;
- sysfs_attrib++;
- }
+ d = debugfs_create_file("fake_inject", S_IWUSR, parent,
+ &mci->dev,
+ &debug_fake_inject_fops);
+ if (!d)
+ goto nomem;
- /* Remove the group objects */
- if (count)
- return;
- list_for_each_entry_safe(grp_kobj, tmp,
- &mci->grp_kobj_list, list) {
- list_del(&grp_kobj->list);
- kfree(grp_kobj);
- }
+ mci->debugfs = parent;
+ return 0;
+nomem:
+ debugfs_remove(mci->debugfs);
+ return -ENOMEM;
}
-
+#endif
/*
* Create a new Memory Controller kobject instance,
@@ -906,77 +979,87 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
*/
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
{
- int i, j;
- int err;
- struct csrow_info *csrow;
- struct kobject *kobj_mci = &mci->edac_mci_kobj;
+ int i, err;
- debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
-
- INIT_LIST_HEAD(&mci->grp_kobj_list);
+ /*
+ * The memory controller needs its own bus, in order to avoid
+ * namespace conflicts at /sys/bus/edac.
+ */
+ mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
+ if (!mci->bus.name)
+ return -ENOMEM;
+ edac_dbg(0, "creating bus %s\n", mci->bus.name);
+ err = bus_register(&mci->bus);
+ if (err < 0)
+ return err;
- /* create a symlink for the device */
- err = sysfs_create_link(kobj_mci, &mci->dev->kobj,
- EDAC_DEVICE_SYMLINK);
- if (err) {
- debugf1("%s() failure to create symlink\n", __func__);
- goto fail0;
+ /* get the /sys/devices/system/edac subsys reference */
+ mci->dev.type = &mci_attr_type;
+ device_initialize(&mci->dev);
+
+ mci->dev.parent = mci_pdev;
+ mci->dev.bus = &mci->bus;
+ dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
+ dev_set_drvdata(&mci->dev, mci);
+ pm_runtime_forbid(&mci->dev);
+
+ edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
+ err = device_add(&mci->dev);
+ if (err < 0) {
+ bus_unregister(&mci->bus);
+ kfree(mci->bus.name);
+ return err;
}
- /* If the low level driver desires some attributes,
- * then create them now for the driver.
+ /*
+ * Create the dimm/rank devices
*/
- if (mci->mc_driver_sysfs_attributes) {
- err = edac_create_mci_instance_attributes(mci,
- mci->mc_driver_sysfs_attributes,
- &mci->edac_mci_kobj);
+ for (i = 0; i < mci->tot_dimms; i++) {
+ struct dimm_info *dimm = mci->dimms[i];
+ /* Only expose populated DIMMs */
+ if (dimm->nr_pages == 0)
+ continue;
+#ifdef CONFIG_EDAC_DEBUG
+ edac_dbg(1, "creating dimm%d, located at ", i);
+ if (edac_debug_level >= 1) {
+ int lay;
+ for (lay = 0; lay < mci->n_layers; lay++)
+ printk(KERN_CONT "%s %d ",
+ edac_layer_name[mci->layers[lay].type],
+ dimm->location[lay]);
+ printk(KERN_CONT "\n");
+ }
+#endif
+ err = edac_create_dimm_object(mci, dimm, i);
if (err) {
- debugf1("%s() failure to create mci attributes\n",
- __func__);
- goto fail0;
+ edac_dbg(1, "failure: create dimm %d obj\n", i);
+ goto fail;
}
}
- /* Make directories for each CSROW object under the mc<id> kobject
- */
- for (i = 0; i < mci->nr_csrows; i++) {
- int nr_pages = 0;
-
- csrow = &mci->csrows[i];
- for (j = 0; j < csrow->nr_channels; j++)
- nr_pages += csrow->channels[j].dimm->nr_pages;
-
- if (nr_pages > 0) {
- err = edac_create_csrow_object(mci, csrow, i);
- if (err) {
- debugf1("%s() failure: create csrow %d obj\n",
- __func__, i);
- goto fail1;
- }
- }
- }
+#ifdef CONFIG_EDAC_LEGACY_SYSFS
+ err = edac_create_csrow_objects(mci);
+ if (err < 0)
+ goto fail;
+#endif
+#ifdef CONFIG_EDAC_DEBUG
+ edac_create_debug_nodes(mci);
+#endif
return 0;
-fail1:
+fail:
for (i--; i >= 0; i--) {
- int nr_pages = 0;
-
- csrow = &mci->csrows[i];
- for (j = 0; j < csrow->nr_channels; j++)
- nr_pages += csrow->channels[j].dimm->nr_pages;
- if (nr_pages > 0)
- kobject_put(&mci->csrows[i].kobj);
+ struct dimm_info *dimm = mci->dimms[i];
+ if (dimm->nr_pages == 0)
+ continue;
+ put_device(&dimm->dev);
+ device_del(&dimm->dev);
}
-
- /* remove the mci instance's attributes, if any */
- edac_remove_mci_instance_attributes(mci,
- mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0);
-
- /* remove the symlink */
- sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
-
-fail0:
+ put_device(&mci->dev);
+ device_del(&mci->dev);
+ bus_unregister(&mci->bus);
+ kfree(mci->bus.name);
return err;
}
@@ -985,98 +1068,84 @@ fail0:
*/
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
- struct csrow_info *csrow;
- int i, j;
-
- debugf0("%s()\n", __func__);
-
- /* remove all csrow kobjects */
- debugf4("%s() unregister this mci kobj\n", __func__);
- for (i = 0; i < mci->nr_csrows; i++) {
- int nr_pages = 0;
-
- csrow = &mci->csrows[i];
- for (j = 0; j < csrow->nr_channels; j++)
- nr_pages += csrow->channels[j].dimm->nr_pages;
- if (nr_pages > 0) {
- debugf0("%s() unreg csrow-%d\n", __func__, i);
- kobject_put(&mci->csrows[i].kobj);
- }
- }
+ int i;
- /* remove this mci instance's attribtes */
- if (mci->mc_driver_sysfs_attributes) {
- debugf4("%s() unregister mci private attributes\n", __func__);
- edac_remove_mci_instance_attributes(mci,
- mci->mc_driver_sysfs_attributes,
- &mci->edac_mci_kobj, 0);
+ edac_dbg(0, "\n");
+
+#ifdef CONFIG_EDAC_DEBUG
+ debugfs_remove(mci->debugfs);
+#endif
+#ifdef CONFIG_EDAC_LEGACY_SYSFS
+ edac_delete_csrow_objects(mci);
+#endif
+
+ for (i = 0; i < mci->tot_dimms; i++) {
+ struct dimm_info *dimm = mci->dimms[i];
+ if (dimm->nr_pages == 0)
+ continue;
+ edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
+ put_device(&dimm->dev);
+ device_del(&dimm->dev);
}
-
- /* remove the symlink */
- debugf4("%s() remove_link\n", __func__);
- sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
-
- /* unregister this instance's kobject */
- debugf4("%s() remove_mci_instance\n", __func__);
- kobject_put(&mci->edac_mci_kobj);
}
+void edac_unregister_sysfs(struct mem_ctl_info *mci)
+{
+ edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
+ put_device(&mci->dev);
+ device_del(&mci->dev);
+ bus_unregister(&mci->bus);
+ kfree(mci->bus.name);
+}
+static void mc_attr_release(struct device *dev)
+{
+ /*
+ * There's no container structure here, as this is just the mci
+ * parent device, used to create the /sys/devices/mc sysfs node.
+ * So, there are no attributes on it.
+ */
+ edac_dbg(1, "Releasing device %s\n", dev_name(dev));
+ kfree(dev);
+}
-
+static struct device_type mc_attr_type = {
+ .release = mc_attr_release,
+};
/*
- * edac_setup_sysfs_mc_kset(void)
- *
- * Initialize the mc_kset for the 'mc' entry
- * This requires creating the top 'mc' directory with a kset
- * and its controls/attributes.
- *
- * To this 'mc' kset, instance 'mci' will be grouped as children.
- *
- * Return: 0 SUCCESS
- * !0 FAILURE error code
+ * Init/exit code for the module. Basically, creates/removes /sys/class/rc
*/
-int edac_sysfs_setup_mc_kset(void)
+int __init edac_mc_sysfs_init(void)
{
- int err = -EINVAL;
struct bus_type *edac_subsys;
-
- debugf1("%s()\n", __func__);
+ int err;
/* get the /sys/devices/system/edac subsys reference */
edac_subsys = edac_get_sysfs_subsys();
if (edac_subsys == NULL) {
- debugf1("%s() no edac_subsys error=%d\n", __func__, err);
- goto fail_out;
+ edac_dbg(1, "no edac_subsys\n");
+ return -EINVAL;
}
- /* Init the MC's kobject */
- mc_kset = kset_create_and_add("mc", NULL, &edac_subsys->dev_root->kobj);
- if (!mc_kset) {
- err = -ENOMEM;
- debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
- goto fail_kset;
- }
+ mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
- debugf1("%s() Registered '.../edac/mc' kobject\n", __func__);
+ mci_pdev->bus = edac_subsys;
+ mci_pdev->type = &mc_attr_type;
+ device_initialize(mci_pdev);
+ dev_set_name(mci_pdev, "mc");
- return 0;
+ err = device_add(mci_pdev);
+ if (err < 0)
+ return err;
-fail_kset:
- edac_put_sysfs_subsys();
+ edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
-fail_out:
- return err;
+ return 0;
}
-/*
- * edac_sysfs_teardown_mc_kset
- *
- * deconstruct the mc_ket for memory controllers
- */
-void edac_sysfs_teardown_mc_kset(void)
+void __exit edac_mc_sysfs_exit(void)
{
- kset_unregister(mc_kset);
+ put_device(mci_pdev);
+ device_del(mci_pdev);
edac_put_sysfs_subsys();
}
-
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c
index 5ddaa86d6a6e..58a28d838f37 100644
--- a/drivers/edac/edac_module.c
+++ b/drivers/edac/edac_module.c
@@ -15,7 +15,7 @@
#include "edac_core.h"
#include "edac_module.h"
-#define EDAC_VERSION "Ver: 2.1.0"
+#define EDAC_VERSION "Ver: 3.0.0"
#ifdef CONFIG_EDAC_DEBUG
/* Values of 0 to 4 will generate output */
@@ -90,26 +90,21 @@ static int __init edac_init(void)
*/
edac_pci_clear_parity_errors();
- /*
- * now set up the mc_kset under the edac class object
- */
- err = edac_sysfs_setup_mc_kset();
+ err = edac_mc_sysfs_init();
if (err)
goto error;
+ edac_debugfs_init();
+
/* Setup/Initialize the workq for this core */
err = edac_workqueue_setup();
if (err) {
edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n");
- goto workq_fail;
+ goto error;
}
return 0;
- /* Error teardown stack */
-workq_fail:
- edac_sysfs_teardown_mc_kset();
-
error:
return err;
}
@@ -120,11 +115,12 @@ error:
*/
static void __exit edac_exit(void)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* tear down the various subsystems */
edac_workqueue_teardown();
- edac_sysfs_teardown_mc_kset();
+ edac_mc_sysfs_exit();
+ edac_debugfs_exit();
}
/*
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 0ea7d14cb930..3d139c6e7fe3 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -19,12 +19,12 @@
*
* edac_mc objects
*/
-extern int edac_sysfs_setup_mc_kset(void);
-extern void edac_sysfs_teardown_mc_kset(void);
-extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci);
-extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci);
+ /* on edac_mc_sysfs.c */
+int edac_mc_sysfs_init(void);
+void edac_mc_sysfs_exit(void);
extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci);
extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
+void edac_unregister_sysfs(struct mem_ctl_info *mci);
extern int edac_get_log_ue(void);
extern int edac_get_log_ce(void);
extern int edac_get_panic_on_ue(void);
@@ -34,6 +34,10 @@ extern int edac_mc_get_panic_on_ue(void);
extern int edac_get_poll_msec(void);
extern int edac_mc_get_poll_msec(void);
+unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
+ unsigned len);
+
+ /* on edac_device.c */
extern int edac_device_register_sysfs_main_kobj(
struct edac_device_ctl_info *edac_dev);
extern void edac_device_unregister_sysfs_main_kobj(
@@ -53,6 +57,20 @@ extern void edac_mc_reset_delay_period(int value);
extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
/*
+ * EDAC debugfs functions
+ */
+#ifdef CONFIG_EDAC_DEBUG
+int edac_debugfs_init(void);
+void edac_debugfs_exit(void);
+#else
+static inline int edac_debugfs_init(void)
+{
+ return -ENODEV;
+}
+static inline void edac_debugfs_exit(void) {}
+#endif
+
+/*
* EDAC PCI functions
*/
#ifdef CONFIG_PCI
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index f1ac86649886..ee87ef972ead 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -45,7 +45,7 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
void *p = NULL, *pvt;
unsigned int size;
- debugf1("%s()\n", __func__);
+ edac_dbg(1, "\n");
pci = edac_align_ptr(&p, sizeof(*pci), 1);
pvt = edac_align_ptr(&p, 1, sz_pvt);
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info);
*/
void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci)
{
- debugf1("%s()\n", __func__);
+ edac_dbg(1, "\n");
edac_pci_remove_sysfs(pci);
}
@@ -97,7 +97,7 @@ static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev)
struct edac_pci_ctl_info *pci;
struct list_head *item;
- debugf1("%s()\n", __func__);
+ edac_dbg(1, "\n");
list_for_each(item, &edac_pci_list) {
pci = list_entry(item, struct edac_pci_ctl_info, link);
@@ -122,7 +122,7 @@ static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci)
struct list_head *item, *insert_before;
struct edac_pci_ctl_info *rover;
- debugf1("%s()\n", __func__);
+ edac_dbg(1, "\n");
insert_before = &edac_pci_list;
@@ -226,7 +226,7 @@ static void edac_pci_workq_function(struct work_struct *work_req)
int msec;
unsigned long delay;
- debugf3("%s() checking\n", __func__);
+ edac_dbg(3, "checking\n");
mutex_lock(&edac_pci_ctls_mutex);
@@ -261,7 +261,7 @@ static void edac_pci_workq_function(struct work_struct *work_req)
static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
unsigned int msec)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
queue_delayed_work(edac_workqueue, &pci->work,
@@ -276,7 +276,7 @@ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
{
int status;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
status = cancel_delayed_work(&pci->work);
if (status == 0)
@@ -293,7 +293,7 @@ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci,
unsigned long value)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
edac_pci_workq_teardown(pci);
@@ -333,7 +333,7 @@ EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
*/
int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
pci->pci_idx = edac_idx;
pci->start_time = jiffies;
@@ -393,7 +393,7 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
{
struct edac_pci_ctl_info *pci;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
mutex_lock(&edac_pci_ctls_mutex);
@@ -430,7 +430,7 @@ EXPORT_SYMBOL_GPL(edac_pci_del_device);
*/
static void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
{
- debugf4("%s()\n", __func__);
+ edac_dbg(4, "\n");
edac_pci_do_parity_check();
}
@@ -475,7 +475,7 @@ struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev,
pdata->edac_idx = edac_pci_idx++;
if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
- debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+ edac_dbg(3, "failed edac_pci_add_device()\n");
edac_pci_free_ctl_info(pci);
return NULL;
}
@@ -491,7 +491,7 @@ EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl);
*/
void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci)
{
- debugf0("%s() pci mod=%s\n", __func__, pci->mod_name);
+ edac_dbg(0, "pci mod=%s\n", pci->mod_name);
edac_pci_del_device(pci->dev);
edac_pci_free_ctl_info(pci);
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 97f5064e3992..e164c555a337 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -78,7 +78,7 @@ static void edac_pci_instance_release(struct kobject *kobj)
{
struct edac_pci_ctl_info *pci;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* Form pointer to containing struct, the pci control struct */
pci = to_instance(kobj);
@@ -161,7 +161,7 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
struct kobject *main_kobj;
int err;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* First bump the ref count on the top main kobj, which will
* track the number of PCI instances we have, and thus nest
@@ -177,14 +177,13 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx)
err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance,
edac_pci_top_main_kobj, "pci%d", idx);
if (err != 0) {
- debugf2("%s() failed to register instance pci%d\n",
- __func__, idx);
+ edac_dbg(2, "failed to register instance pci%d\n", idx);
kobject_put(edac_pci_top_main_kobj);
goto error_out;
}
kobject_uevent(&pci->kobj, KOBJ_ADD);
- debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx);
+ edac_dbg(1, "Register instance 'pci%d' kobject\n", idx);
return 0;
@@ -201,7 +200,7 @@ error_out:
static void edac_pci_unregister_sysfs_instance_kobj(
struct edac_pci_ctl_info *pci)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* Unregister the instance kobject and allow its release
* function release the main reference count and then
@@ -317,7 +316,7 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = {
*/
static void edac_pci_release_main_kobj(struct kobject *kobj)
{
- debugf0("%s() here to module_put(THIS_MODULE)\n", __func__);
+ edac_dbg(0, "here to module_put(THIS_MODULE)\n");
kfree(kobj);
@@ -345,7 +344,7 @@ static int edac_pci_main_kobj_setup(void)
int err;
struct bus_type *edac_subsys;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* check and count if we have already created the main kobject */
if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1)
@@ -356,7 +355,7 @@ static int edac_pci_main_kobj_setup(void)
*/
edac_subsys = edac_get_sysfs_subsys();
if (edac_subsys == NULL) {
- debugf1("%s() no edac_subsys\n", __func__);
+ edac_dbg(1, "no edac_subsys\n");
err = -ENODEV;
goto decrement_count_fail;
}
@@ -366,14 +365,14 @@ static int edac_pci_main_kobj_setup(void)
* level main kobj for EDAC PCI
*/
if (!try_module_get(THIS_MODULE)) {
- debugf1("%s() try_module_get() failed\n", __func__);
+ edac_dbg(1, "try_module_get() failed\n");
err = -ENODEV;
goto mod_get_fail;
}
edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
if (!edac_pci_top_main_kobj) {
- debugf1("Failed to allocate\n");
+ edac_dbg(1, "Failed to allocate\n");
err = -ENOMEM;
goto kzalloc_fail;
}
@@ -383,7 +382,7 @@ static int edac_pci_main_kobj_setup(void)
&ktype_edac_pci_main_kobj,
&edac_subsys->dev_root->kobj, "pci");
if (err) {
- debugf1("Failed to register '.../edac/pci'\n");
+ edac_dbg(1, "Failed to register '.../edac/pci'\n");
goto kobject_init_and_add_fail;
}
@@ -392,7 +391,7 @@ static int edac_pci_main_kobj_setup(void)
* must be used, for resources to be cleaned up properly
*/
kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD);
- debugf1("Registered '.../edac/pci' kobject\n");
+ edac_dbg(1, "Registered '.../edac/pci' kobject\n");
return 0;
@@ -421,15 +420,14 @@ decrement_count_fail:
*/
static void edac_pci_main_kobj_teardown(void)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* Decrement the count and only if no more controller instances
* are connected perform the unregisteration of the top level
* main kobj
*/
if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) {
- debugf0("%s() called kobject_put on main kobj\n",
- __func__);
+ edac_dbg(0, "called kobject_put on main kobj\n");
kobject_put(edac_pci_top_main_kobj);
}
edac_put_sysfs_subsys();
@@ -446,7 +444,7 @@ int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
int err;
struct kobject *edac_kobj = &pci->kobj;
- debugf0("%s() idx=%d\n", __func__, pci->pci_idx);
+ edac_dbg(0, "idx=%d\n", pci->pci_idx);
/* create the top main EDAC PCI kobject, IF needed */
err = edac_pci_main_kobj_setup();
@@ -460,8 +458,7 @@ int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci)
err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK);
if (err) {
- debugf0("%s() sysfs_create_link() returned err= %d\n",
- __func__, err);
+ edac_dbg(0, "sysfs_create_link() returned err= %d\n", err);
goto symlink_fail;
}
@@ -484,7 +481,7 @@ unregister_cleanup:
*/
void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
{
- debugf0("%s() index=%d\n", __func__, pci->pci_idx);
+ edac_dbg(0, "index=%d\n", pci->pci_idx);
/* Remove the symlink */
sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK);
@@ -496,7 +493,7 @@ void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci)
* if this 'pci' is the last instance.
* If it is, the main kobject will be unregistered as a result
*/
- debugf0("%s() calling edac_pci_main_kobj_teardown()\n", __func__);
+ edac_dbg(0, "calling edac_pci_main_kobj_teardown()\n");
edac_pci_main_kobj_teardown();
}
@@ -572,7 +569,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
local_irq_restore(flags);
- debugf4("PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
+ edac_dbg(4, "PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
/* check the status reg for errors on boards NOT marked as broken
* if broken, we cannot trust any of the status bits
@@ -603,13 +600,15 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
}
- debugf4("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev_name(&dev->dev));
+ edac_dbg(4, "PCI HEADER TYPE= 0x%02x %s\n",
+ header_type, dev_name(&dev->dev));
if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
/* On bridges, need to examine secondary status register */
status = get_pci_parity_status(dev, 1);
- debugf4("PCI SEC_STATUS= 0x%04x %s\n", status, dev_name(&dev->dev));
+ edac_dbg(4, "PCI SEC_STATUS= 0x%04x %s\n",
+ status, dev_name(&dev->dev));
/* check the secondary status reg for errors,
* on NOT broken boards
@@ -671,7 +670,7 @@ void edac_pci_do_parity_check(void)
{
int before_count;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
/* if policy has PCI check off, leave now */
if (!check_pci_errors)
diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c
new file mode 100644
index 000000000000..e599b00c05a8
--- /dev/null
+++ b/drivers/edac/highbank_l2_edac.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2011-2012 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+#define SR_CLR_SB_ECC_INTR 0x0
+#define SR_CLR_DB_ECC_INTR 0x4
+
+struct hb_l2_drvdata {
+ void __iomem *base;
+ int sb_irq;
+ int db_irq;
+};
+
+static irqreturn_t highbank_l2_err_handler(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *dci = dev_id;
+ struct hb_l2_drvdata *drvdata = dci->pvt_info;
+
+ if (irq == drvdata->sb_irq) {
+ writel(1, drvdata->base + SR_CLR_SB_ECC_INTR);
+ edac_device_handle_ce(dci, 0, 0, dci->ctl_name);
+ }
+ if (irq == drvdata->db_irq) {
+ writel(1, drvdata->base + SR_CLR_DB_ECC_INTR);
+ edac_device_handle_ue(dci, 0, 0, dci->ctl_name);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit highbank_l2_err_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci;
+ struct hb_l2_drvdata *drvdata;
+ struct resource *r;
+ int res = 0;
+
+ dci = edac_device_alloc_ctl_info(sizeof(*drvdata), "cpu",
+ 1, "L", 1, 2, NULL, 0, 0);
+ if (!dci)
+ return -ENOMEM;
+
+ drvdata = dci->pvt_info;
+ dci->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dci);
+
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "Unable to get mem resource\n");
+ res = -ENODEV;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, r->start,
+ resource_size(r), dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "Error while requesting mem region\n");
+ res = -EBUSY;
+ goto err;
+ }
+
+ drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (!drvdata->base) {
+ dev_err(&pdev->dev, "Unable to map regs\n");
+ res = -ENOMEM;
+ goto err;
+ }
+
+ drvdata->db_irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev, drvdata->db_irq,
+ highbank_l2_err_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (res < 0)
+ goto err;
+
+ drvdata->sb_irq = platform_get_irq(pdev, 1);
+ res = devm_request_irq(&pdev->dev, drvdata->sb_irq,
+ highbank_l2_err_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (res < 0)
+ goto err;
+
+ dci->mod_name = dev_name(&pdev->dev);
+ dci->dev_name = dev_name(&pdev->dev);
+
+ if (edac_device_add_device(dci))
+ goto err;
+
+ devres_close_group(&pdev->dev, NULL);
+ return 0;
+err:
+ devres_release_group(&pdev->dev, NULL);
+ edac_device_free_ctl_info(dci);
+ return res;
+}
+
+static int highbank_l2_err_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+ return 0;
+}
+
+static const struct of_device_id hb_l2_err_of_match[] = {
+ { .compatible = "calxeda,hb-sregs-l2-ecc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hb_l2_err_of_match);
+
+static struct platform_driver highbank_l2_edac_driver = {
+ .probe = highbank_l2_err_probe,
+ .remove = highbank_l2_err_remove,
+ .driver = {
+ .name = "hb_l2_edac",
+ .of_match_table = hb_l2_err_of_match,
+ },
+};
+
+module_platform_driver(highbank_l2_edac_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Calxeda, Inc.");
+MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank L2 Cache");
diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c
new file mode 100644
index 000000000000..c769f477fd22
--- /dev/null
+++ b/drivers/edac/highbank_mc_edac.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2011-2012 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/uaccess.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+
+/* DDR Ctrlr Error Registers */
+#define HB_DDR_ECC_OPT 0x128
+#define HB_DDR_ECC_U_ERR_ADDR 0x130
+#define HB_DDR_ECC_U_ERR_STAT 0x134
+#define HB_DDR_ECC_U_ERR_DATAL 0x138
+#define HB_DDR_ECC_U_ERR_DATAH 0x13c
+#define HB_DDR_ECC_C_ERR_ADDR 0x140
+#define HB_DDR_ECC_C_ERR_STAT 0x144
+#define HB_DDR_ECC_C_ERR_DATAL 0x148
+#define HB_DDR_ECC_C_ERR_DATAH 0x14c
+#define HB_DDR_ECC_INT_STATUS 0x180
+#define HB_DDR_ECC_INT_ACK 0x184
+#define HB_DDR_ECC_U_ERR_ID 0x424
+#define HB_DDR_ECC_C_ERR_ID 0x428
+
+#define HB_DDR_ECC_INT_STAT_CE 0x8
+#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10
+#define HB_DDR_ECC_INT_STAT_UE 0x20
+#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40
+
+#define HB_DDR_ECC_OPT_MODE_MASK 0x3
+#define HB_DDR_ECC_OPT_FWC 0x100
+#define HB_DDR_ECC_OPT_XOR_SHIFT 16
+
+struct hb_mc_drvdata {
+ void __iomem *mc_vbase;
+};
+
+static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct hb_mc_drvdata *drvdata = mci->pvt_info;
+ u32 status, err_addr;
+
+ /* Read the interrupt status register */
+ status = readl(drvdata->mc_vbase + HB_DDR_ECC_INT_STATUS);
+
+ if (status & HB_DDR_ECC_INT_STAT_UE) {
+ err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_U_ERR_ADDR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, 0,
+ 0, 0, -1,
+ mci->ctl_name, "");
+ }
+ if (status & HB_DDR_ECC_INT_STAT_CE) {
+ u32 syndrome = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_STAT);
+ syndrome = (syndrome >> 8) & 0xff;
+ err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_ADDR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+ err_addr >> PAGE_SHIFT,
+ err_addr & ~PAGE_MASK, syndrome,
+ 0, 0, -1,
+ mci->ctl_name, "");
+ }
+
+ /* clear the error, clears the interrupt */
+ writel(status, drvdata->mc_vbase + HB_DDR_ECC_INT_ACK);
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+static ssize_t highbank_mc_err_inject_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct mem_ctl_info *mci = file->private_data;
+ struct hb_mc_drvdata *pdata = mci->pvt_info;
+ char buf[32];
+ size_t buf_size;
+ u32 reg;
+ u8 synd;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, data, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ if (!kstrtou8(buf, 16, &synd)) {
+ reg = readl(pdata->mc_vbase + HB_DDR_ECC_OPT);
+ reg &= HB_DDR_ECC_OPT_MODE_MASK;
+ reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
+ writel(reg, pdata->mc_vbase + HB_DDR_ECC_OPT);
+ }
+
+ return count;
+}
+
+static int debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations highbank_mc_debug_inject_fops = {
+ .open = debugfs_open,
+ .write = highbank_mc_err_inject_write,
+ .llseek = generic_file_llseek,
+};
+
+static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
+{
+ if (mci->debugfs)
+ debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci,
+ &highbank_mc_debug_inject_fops);
+;
+}
+#else
+static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
+{}
+#endif
+
+static int __devinit highbank_mc_probe(struct platform_device *pdev)
+{
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ struct hb_mc_drvdata *drvdata;
+ struct dimm_info *dimm;
+ struct resource *r;
+ u32 control;
+ int irq;
+ int res = 0;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct hb_mc_drvdata));
+ if (!mci)
+ return -ENOMEM;
+
+ mci->pdev = &pdev->dev;
+ drvdata = mci->pvt_info;
+ platform_set_drvdata(pdev, mci);
+
+ if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "Unable to get mem resource\n");
+ res = -ENODEV;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, r->start,
+ resource_size(r), dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "Error while requesting mem region\n");
+ res = -EBUSY;
+ goto err;
+ }
+
+ drvdata->mc_vbase = devm_ioremap(&pdev->dev,
+ r->start, resource_size(r));
+ if (!drvdata->mc_vbase) {
+ dev_err(&pdev->dev, "Unable to map regs\n");
+ res = -ENOMEM;
+ goto err;
+ }
+
+ control = readl(drvdata->mc_vbase + HB_DDR_ECC_OPT) & 0x3;
+ if (!control || (control == 0x2)) {
+ dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
+ res = -ENODEV;
+ goto err;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
+ 0, dev_name(&pdev->dev), mci);
+ if (res < 0) {
+ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+ goto err;
+ }
+
+ mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = dev_name(&pdev->dev);
+ mci->mod_ver = "1";
+ mci->ctl_name = dev_name(&pdev->dev);
+ mci->scrub_mode = SCRUB_SW_SRC;
+
+ /* Only a single 4GB DIMM is supported */
+ dimm = *mci->dimms;
+ dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1;
+ dimm->grain = 8;
+ dimm->dtype = DEV_X8;
+ dimm->mtype = MEM_DDR3;
+ dimm->edac_mode = EDAC_SECDED;
+
+ res = edac_mc_add_mc(mci);
+ if (res < 0)
+ goto err;
+
+ highbank_mc_create_debugfs_nodes(mci);
+
+ devres_close_group(&pdev->dev, NULL);
+ return 0;
+err:
+ devres_release_group(&pdev->dev, NULL);
+ edac_mc_free(mci);
+ return res;
+}
+
+static int highbank_mc_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+ return 0;
+}
+
+static const struct of_device_id hb_ddr_ctrl_of_match[] = {
+ { .compatible = "calxeda,hb-ddr-ctrl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
+
+static struct platform_driver highbank_mc_edac_driver = {
+ .probe = highbank_mc_probe,
+ .remove = highbank_mc_remove,
+ .driver = {
+ .name = "hb_mc_edac",
+ .of_match_table = hb_ddr_ctrl_of_match,
+ },
+};
+
+module_platform_driver(highbank_mc_edac_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Calxeda, Inc.");
+MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank");
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 8ad1744faacd..d3d19cc4e9a1 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -194,7 +194,7 @@ static void i3000_get_error_info(struct mem_ctl_info *mci,
{
struct pci_dev *pdev;
- pdev = to_pci_dev(mci->dev);
+ pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
@@ -236,7 +236,7 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
int row, multi_chan, channel;
unsigned long pfn, offset;
- multi_chan = mci->csrows[0].nr_channels - 1;
+ multi_chan = mci->csrows[0]->nr_channels - 1;
if (!(info->errsts & I3000_ERRSTS_BITS))
return 0;
@@ -245,9 +245,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1,
- "UE overwrote CE", "", NULL);
+ "UE overwrote CE", "");
info->errsts = info->errsts2;
}
@@ -258,15 +258,15 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
row = edac_mc_find_csrow_by_page(mci, pfn);
if (info->errsts & I3000_ERRSTS_UE)
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
pfn, offset, 0,
row, -1, -1,
- "i3000 UE", "", NULL);
+ "i3000 UE", "");
else
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
pfn, offset, info->derrsyn,
row, multi_chan ? channel : 0, -1,
- "i3000 CE", "", NULL);
+ "i3000 CE", "");
return 1;
}
@@ -275,7 +275,7 @@ static void i3000_check(struct mem_ctl_info *mci)
{
struct i3000_error_info info;
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
i3000_get_error_info(mci, &info);
i3000_process_error_info(mci, &info, 1);
}
@@ -322,7 +322,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
unsigned long mchbar;
void __iomem *window;
- debugf0("MC: %s()\n", __func__);
+ edac_dbg(0, "MC:\n");
pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
mchbar &= I3000_MCHBAR_MASK;
@@ -366,9 +366,9 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
if (!mci)
return -ENOMEM;
- debugf3("MC: %s(): init mci\n", __func__);
+ edac_dbg(3, "MC: init mci\n");
- mci->dev = &pdev->dev;
+ mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
@@ -393,14 +393,13 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
u8 value;
u32 cumul_size;
- struct csrow_info *csrow = &mci->csrows[i];
+ struct csrow_info *csrow = mci->csrows[i];
value = drb[i];
cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
if (interleaved)
cumul_size <<= 1;
- debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
- __func__, i, cumul_size);
+ edac_dbg(3, "MC: (%d) cumul_size 0x%x\n", i, cumul_size);
if (cumul_size == last_cumul_size)
continue;
@@ -410,7 +409,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
last_cumul_size = cumul_size;
for (j = 0; j < nr_channels; j++) {
- struct dimm_info *dimm = csrow->channels[j].dimm;
+ struct dimm_info *dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / nr_channels;
dimm->grain = I3000_DEAP_GRAIN;
@@ -429,7 +428,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
rc = -ENODEV;
if (edac_mc_add_mc(mci)) {
- debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
goto fail;
}
@@ -445,7 +444,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
}
/* get this far and it's successful */
- debugf3("MC: %s(): success\n", __func__);
+ edac_dbg(3, "MC: success\n");
return 0;
fail:
@@ -461,7 +460,7 @@ static int __devinit i3000_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: %s()\n", __func__);
+ edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
@@ -477,7 +476,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
if (i3000_pci)
edac_pci_release_generic_ctl(i3000_pci);
@@ -511,7 +510,7 @@ static int __init i3000_init(void)
{
int pci_rc;
- debugf3("MC: %s()\n", __func__);
+ edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -525,14 +524,14 @@ static int __init i3000_init(void)
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_3000_HB, NULL);
if (!mci_pdev) {
- debugf0("i3000 pci_get_device fail\n");
+ edac_dbg(0, "i3000 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
if (pci_rc < 0) {
- debugf0("i3000 init fail\n");
+ edac_dbg(0, "i3000 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
@@ -552,7 +551,7 @@ fail0:
static void __exit i3000_exit(void)
{
- debugf3("MC: %s()\n", __func__);
+ edac_dbg(3, "MC:\n");
pci_unregister_driver(&i3000_driver);
if (!i3000_registered) {
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index bbe43ef71823..47180a08edad 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -110,10 +110,10 @@ static int how_many_channels(struct pci_dev *pdev)
pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
- debugf0("In single channel mode.\n");
+ edac_dbg(0, "In single channel mode\n");
return 1;
} else {
- debugf0("In dual channel mode.\n");
+ edac_dbg(0, "In dual channel mode\n");
return 2;
}
}
@@ -159,7 +159,7 @@ static void i3200_clear_error_info(struct mem_ctl_info *mci)
{
struct pci_dev *pdev;
- pdev = to_pci_dev(mci->dev);
+ pdev = to_pci_dev(mci->pdev);
/*
* Clear any error bits.
@@ -176,7 +176,7 @@ static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci,
struct i3200_priv *priv = mci->pvt_info;
void __iomem *window = priv->window;
- pdev = to_pci_dev(mci->dev);
+ pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
@@ -218,25 +218,25 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
return;
if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
- -1, -1, -1, "UE overwrote CE", "", NULL);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "");
info->errsts = info->errsts2;
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
if (log & I3200_ECCERRLOG_UE) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, 0,
eccerrlog_row(channel, log),
-1, -1,
- "i3000 UE", "", NULL);
+ "i3000 UE", "");
} else if (log & I3200_ECCERRLOG_CE) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, eccerrlog_syndrome(log),
eccerrlog_row(channel, log),
-1, -1,
- "i3000 UE", "", NULL);
+ "i3000 UE", "");
}
}
}
@@ -245,7 +245,7 @@ static void i3200_check(struct mem_ctl_info *mci)
{
struct i3200_error_info info;
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
i3200_get_and_clear_error_info(mci, &info);
i3200_process_error_info(mci, &info);
}
@@ -332,7 +332,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
void __iomem *window;
struct i3200_priv *priv;
- debugf0("MC: %s()\n", __func__);
+ edac_dbg(0, "MC:\n");
window = i3200_map_mchbar(pdev);
if (!window)
@@ -352,9 +352,9 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
if (!mci)
return -ENOMEM;
- debugf3("MC: %s(): init mci\n", __func__);
+ edac_dbg(3, "MC: init mci\n");
- mci->dev = &pdev->dev;
+ mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
@@ -379,7 +379,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
*/
for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages;
- struct csrow_info *csrow = &mci->csrows[i];
+ struct csrow_info *csrow = mci->csrows[i];
nr_pages = drb_to_nr_pages(drbs, stacked,
i / I3200_RANKS_PER_CHANNEL,
@@ -389,7 +389,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
continue;
for (j = 0; j < nr_channels; j++) {
- struct dimm_info *dimm = csrow->channels[j].dimm;
+ struct dimm_info *dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / nr_channels;
dimm->grain = nr_pages << PAGE_SHIFT;
@@ -403,12 +403,12 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
rc = -ENODEV;
if (edac_mc_add_mc(mci)) {
- debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
goto fail;
}
/* get this far and it's successful */
- debugf3("MC: %s(): success\n", __func__);
+ edac_dbg(3, "MC: success\n");
return 0;
fail:
@@ -424,7 +424,7 @@ static int __devinit i3200_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: %s()\n", __func__);
+ edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
@@ -441,7 +441,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
struct mem_ctl_info *mci;
struct i3200_priv *priv;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
@@ -475,7 +475,7 @@ static int __init i3200_init(void)
{
int pci_rc;
- debugf3("MC: %s()\n", __func__);
+ edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -489,14 +489,14 @@ static int __init i3200_init(void)
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_3200_HB, NULL);
if (!mci_pdev) {
- debugf0("i3200 pci_get_device fail\n");
+ edac_dbg(0, "i3200 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl);
if (pci_rc < 0) {
- debugf0("i3200 init fail\n");
+ edac_dbg(0, "i3200 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
@@ -516,7 +516,7 @@ fail0:
static void __exit i3200_exit(void)
{
- debugf3("MC: %s()\n", __func__);
+ edac_dbg(3, "MC:\n");
pci_unregister_driver(&i3200_driver);
if (!i3200_registered) {
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 11ea835f155a..39c63757c2a1 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -273,7 +273,7 @@
#define CHANNELS_PER_BRANCH 2
#define MAX_BRANCHES 2
-/* Defines to extract the vaious fields from the
+/* Defines to extract the various fields from the
* MTRx - Memory Technology Registers
*/
#define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8))
@@ -287,22 +287,6 @@
#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
-#ifdef CONFIG_EDAC_DEBUG
-static char *numrow_toString[] = {
- "8,192 - 13 rows",
- "16,384 - 14 rows",
- "32,768 - 15 rows",
- "reserved"
-};
-
-static char *numcol_toString[] = {
- "1,024 - 10 columns",
- "2,048 - 11 columns",
- "4,096 - 12 columns",
- "reserved"
-};
-#endif
-
/* enables the report of miscellaneous messages as CE errors - default off */
static int misc_messages;
@@ -344,7 +328,13 @@ struct i5000_pvt {
struct pci_dev *branch_1; /* 22.0 */
u16 tolm; /* top of low memory */
- u64 ambase; /* AMB BAR */
+ union {
+ u64 ambase; /* AMB BAR */
+ struct {
+ u32 ambase_bottom;
+ u32 ambase_top;
+ } u __packed;
+ };
u16 mir0, mir1, mir2;
@@ -494,10 +484,9 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
ras = NREC_RAS(info->nrecmemb);
cas = NREC_CAS(info->nrecmemb);
- debugf0("\t\tCSROW= %d Channel= %d "
- "(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, bank,
- rdwr ? "Write" : "Read", ras, cas);
+ edac_dbg(0, "\t\tCSROW= %d Channel= %d (DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, bank,
+ rdwr ? "Write" : "Read", ras, cas);
/* Only 1 bit will be on */
switch (allErrors) {
@@ -536,10 +525,10 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
bank, ras, cas, allErrors, specific);
/* Call the helper to output message */
- edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+ edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
channel >> 1, channel & 1, rank,
rdwr ? "Write error" : "Read error",
- msg, NULL);
+ msg);
}
/*
@@ -574,7 +563,7 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* ONLY ONE of the possible error bits will be set, as per the docs */
ue_errors = allErrors & FERR_NF_UNCORRECTABLE;
if (ue_errors) {
- debugf0("\tUncorrected bits= 0x%x\n", ue_errors);
+ edac_dbg(0, "\tUncorrected bits= 0x%x\n", ue_errors);
branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
@@ -590,11 +579,9 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
ras = NREC_RAS(info->nrecmemb);
cas = NREC_CAS(info->nrecmemb);
- debugf0
- ("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
- rdwr ? "Write" : "Read", ras, cas);
+ edac_dbg(0, "\t\tCSROW= %d Channels= %d,%d (Branch= %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, channel + 1, branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas);
switch (ue_errors) {
case FERR_NF_M12ERR:
@@ -637,16 +624,16 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
rank, bank, ras, cas, ue_errors, specific);
/* Call the helper to output message */
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
channel >> 1, -1, rank,
rdwr ? "Write error" : "Read error",
- msg, NULL);
+ msg);
}
/* Check correctable errors */
ce_errors = allErrors & FERR_NF_CORRECTABLE;
if (ce_errors) {
- debugf0("\tCorrected bits= 0x%x\n", ce_errors);
+ edac_dbg(0, "\tCorrected bits= 0x%x\n", ce_errors);
branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd);
@@ -664,10 +651,9 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
ras = REC_RAS(info->recmemb);
cas = REC_CAS(info->recmemb);
- debugf0("\t\tCSROW= %d Channel= %d (Branch %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, branch >> 1, bank,
- rdwr ? "Write" : "Read", ras, cas);
+ edac_dbg(0, "\t\tCSROW= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, branch >> 1, bank,
+ rdwr ? "Write" : "Read", ras, cas);
switch (ce_errors) {
case FERR_NF_M17ERR:
@@ -692,10 +678,10 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
specific);
/* Call the helper to output message */
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
channel >> 1, channel % 2, rank,
rdwr ? "Write error" : "Read error",
- msg, NULL);
+ msg);
}
if (!misc_messages)
@@ -738,9 +724,9 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
"Err=%#x (%s)", misc_errors, specific);
/* Call the helper to output message */
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
branch >> 1, -1, -1,
- "Misc error", msg, NULL);
+ "Misc error", msg);
}
}
@@ -779,7 +765,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci)
static void i5000_check_error(struct mem_ctl_info *mci)
{
struct i5000_error_info info;
- debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
+ edac_dbg(4, "MC%d\n", mci->mc_idx);
i5000_get_error_info(mci, &info);
i5000_process_error_info(mci, &info, 1);
}
@@ -850,15 +836,16 @@ static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx)
pvt->fsb_error_regs = pdev;
- debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->system_address),
- pvt->system_address->vendor, pvt->system_address->device);
- debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->branchmap_werrors),
- pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
- debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->fsb_error_regs),
- pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
+ edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->system_address),
+ pvt->system_address->vendor, pvt->system_address->device);
+ edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->branchmap_werrors),
+ pvt->branchmap_werrors->vendor,
+ pvt->branchmap_werrors->device);
+ edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->fsb_error_regs),
+ pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
pdev = NULL;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
@@ -981,16 +968,25 @@ static void decode_mtr(int slot_row, u16 mtr)
ans = MTR_DIMMS_PRESENT(mtr);
- debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr,
- ans ? "Present" : "NOT Present");
+ edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n",
+ slot_row, mtr, ans ? "" : "NOT ");
if (!ans)
return;
- debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
- debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
- debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
- debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
- debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
+ edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
+ edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
+ edac_dbg(2, "\t\tNUMRANK: %s\n",
+ MTR_DIMM_RANK(mtr) ? "double" : "single");
+ edac_dbg(2, "\t\tNUMROW: %s\n",
+ MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
+ MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
+ MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
+ "reserved");
+ edac_dbg(2, "\t\tNUMCOL: %s\n",
+ MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
+ MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
+ MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
+ "reserved");
}
static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
@@ -1061,7 +1057,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
"--------------------------------");
p += n;
space -= n;
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
@@ -1082,7 +1078,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
}
p += n;
space -= n;
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
@@ -1092,7 +1088,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
"--------------------------------");
p += n;
space -= n;
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
@@ -1105,7 +1101,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
p += n;
space -= n;
}
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
@@ -1118,7 +1114,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
}
/* output the last message and free buffer */
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
kfree(mem_buffer);
}
@@ -1141,24 +1137,25 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
pvt = mci->pvt_info;
pci_read_config_dword(pvt->system_address, AMBASE,
- (u32 *) & pvt->ambase);
+ &pvt->u.ambase_bottom);
pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
- ((u32 *) & pvt->ambase) + sizeof(u32));
+ &pvt->u.ambase_top);
maxdimmperch = pvt->maxdimmperch;
maxch = pvt->maxch;
- debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
- (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
+ edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
+ (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
/* Get the Branch Map regs */
pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
pvt->tolm >>= 12;
- debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
- pvt->tolm);
+ edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
+ pvt->tolm, pvt->tolm);
actual_tolm = pvt->tolm << 28;
- debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm);
+ edac_dbg(2, "Actual TOLM byte addr=%u (0x%x)\n",
+ actual_tolm, actual_tolm);
pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
@@ -1168,15 +1165,18 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
limit = (pvt->mir0 >> 4) & 0x0FFF;
way0 = pvt->mir0 & 0x1;
way1 = pvt->mir0 & 0x2;
- debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n",
+ limit, way1, way0);
limit = (pvt->mir1 >> 4) & 0x0FFF;
way0 = pvt->mir1 & 0x1;
way1 = pvt->mir1 & 0x2;
- debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n",
+ limit, way1, way0);
limit = (pvt->mir2 >> 4) & 0x0FFF;
way0 = pvt->mir2 & 0x1;
way1 = pvt->mir2 & 0x2;
- debugf2("MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ edac_dbg(2, "MIR2: limit= 0x%x WAY1= %u WAY0= %x\n",
+ limit, way1, way0);
/* Get the MTR[0-3] regs */
for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
@@ -1185,31 +1185,31 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
pci_read_config_word(pvt->branch_0, where,
&pvt->b0_mtr[slot_row]);
- debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
- pvt->b0_mtr[slot_row]);
+ edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n",
+ slot_row, where, pvt->b0_mtr[slot_row]);
if (pvt->maxch >= CHANNELS_PER_BRANCH) {
pci_read_config_word(pvt->branch_1, where,
&pvt->b1_mtr[slot_row]);
- debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row,
- where, pvt->b1_mtr[slot_row]);
+ edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n",
+ slot_row, where, pvt->b1_mtr[slot_row]);
} else {
pvt->b1_mtr[slot_row] = 0;
}
}
/* Read and dump branch 0's MTRs */
- debugf2("\nMemory Technology Registers:\n");
- debugf2(" Branch 0:\n");
+ edac_dbg(2, "Memory Technology Registers:\n");
+ edac_dbg(2, " Branch 0:\n");
for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
}
pci_read_config_word(pvt->branch_0, AMB_PRESENT_0,
&pvt->b0_ambpresent0);
- debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
+ edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
pci_read_config_word(pvt->branch_0, AMB_PRESENT_1,
&pvt->b0_ambpresent1);
- debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
+ edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
/* Only if we have 2 branchs (4 channels) */
if (pvt->maxch < CHANNELS_PER_BRANCH) {
@@ -1217,18 +1217,18 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
pvt->b1_ambpresent1 = 0;
} else {
/* Read and dump branch 1's MTRs */
- debugf2(" Branch 1:\n");
+ edac_dbg(2, " Branch 1:\n");
for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) {
decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
}
pci_read_config_word(pvt->branch_1, AMB_PRESENT_0,
&pvt->b1_ambpresent0);
- debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
- pvt->b1_ambpresent0);
+ edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n",
+ pvt->b1_ambpresent0);
pci_read_config_word(pvt->branch_1, AMB_PRESENT_1,
&pvt->b1_ambpresent1);
- debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
- pvt->b1_ambpresent1);
+ edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n",
+ pvt->b1_ambpresent1);
}
/* Go and determine the size of each DIMM and place in an
@@ -1363,10 +1363,9 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
int num_channels;
int num_dimms_per_channel;
- debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __FILE__, __func__,
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
/* We only are looking for func 0 of the set */
if (PCI_FUNC(pdev->devfn) != 0)
@@ -1388,8 +1387,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
&num_channels);
- debugf0("MC: %s(): Number of Branches=2 Channels= %d DIMMS= %d\n",
- __func__, num_channels, num_dimms_per_channel);
+ edac_dbg(0, "MC: Number of Branches=2 Channels= %d DIMMS= %d\n",
+ num_channels, num_dimms_per_channel);
/* allocate a new MC control structure */
@@ -1406,10 +1405,9 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- kobject_get(&mci->edac_mci_kobj);
- debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
+ edac_dbg(0, "MC: mci = %p\n", mci);
- mci->dev = &pdev->dev; /* record ptr to the generic device */
+ mci->pdev = &pdev->dev; /* record ptr to the generic device */
pvt = mci->pvt_info;
pvt->system_address = pdev; /* Record this device in our private */
@@ -1439,19 +1437,16 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
/* initialize the MC control structure 'csrows' table
* with the mapping and control information */
if (i5000_init_csrows(mci)) {
- debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i5000_init_csrows() returned nonzero "
- "value\n");
+ edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5000_init_csrows() returned nonzero value\n");
mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
} else {
- debugf1("MC: Enable error reporting now\n");
+ edac_dbg(1, "MC: Enable error reporting now\n");
i5000_enable_error_reporting(mci);
}
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
- debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
- __FILE__, __func__);
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
@@ -1479,7 +1474,6 @@ fail1:
i5000_put_devices(mci);
fail0:
- kobject_put(&mci->edac_mci_kobj);
edac_mc_free(mci);
return -ENODEV;
}
@@ -1496,7 +1490,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: %s: %s()\n", __FILE__, __func__);
+ edac_dbg(0, "MC:\n");
/* wake up device */
rc = pci_enable_device(pdev);
@@ -1515,7 +1509,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0("%s: %s()\n", __FILE__, __func__);
+ edac_dbg(0, "\n");
if (i5000_pci)
edac_pci_release_generic_ctl(i5000_pci);
@@ -1525,7 +1519,6 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
/* retrieve references to resources, and free those resources */
i5000_put_devices(mci);
- kobject_put(&mci->edac_mci_kobj);
edac_mc_free(mci);
}
@@ -1562,7 +1555,7 @@ static int __init i5000_init(void)
{
int pci_rc;
- debugf2("MC: %s: %s()\n", __FILE__, __func__);
+ edac_dbg(2, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -1578,7 +1571,7 @@ static int __init i5000_init(void)
*/
static void __exit i5000_exit(void)
{
- debugf2("MC: %s: %s()\n", __FILE__, __func__);
+ edac_dbg(2, "MC:\n");
pci_unregister_driver(&i5000_driver);
}
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index e9e7c2a29dc3..c4b5e5f868e8 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -431,10 +431,10 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
"bank %u, cas %u, ras %u\n",
bank, cas, ras);
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0, syndrome,
chan, rank, -1,
- msg, detail, NULL);
+ msg, detail);
}
static void i5100_handle_ue(struct mem_ctl_info *mci,
@@ -453,10 +453,10 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
"bank %u, cas %u, ras %u\n",
bank, cas, ras);
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, syndrome,
chan, rank, -1,
- msg, detail, NULL);
+ msg, detail);
}
static void i5100_read_log(struct mem_ctl_info *mci, int chan,
@@ -859,8 +859,8 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
i5100_rank_to_slot(mci, chan, rank));
}
- debugf2("dimm channel %d, rank %d, size %ld\n",
- chan, rank, (long)PAGES_TO_MiB(npages));
+ edac_dbg(2, "dimm channel %d, rank %d, size %ld\n",
+ chan, rank, (long)PAGES_TO_MiB(npages));
}
}
@@ -943,7 +943,7 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
goto bail_disable_ch1;
}
- mci->dev = &pdev->dev;
+ mci->pdev = &pdev->dev;
priv = mci->pvt_info;
priv->ranksperchan = ranksperch;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 6640c29e1885..277246998b80 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -300,24 +300,6 @@ static inline int extract_fbdchan_indx(u32 x)
return (x>>28) & 0x3;
}
-#ifdef CONFIG_EDAC_DEBUG
-/* MTR NUMROW */
-static const char *numrow_toString[] = {
- "8,192 - 13 rows",
- "16,384 - 14 rows",
- "32,768 - 15 rows",
- "65,536 - 16 rows"
-};
-
-/* MTR NUMCOL */
-static const char *numcol_toString[] = {
- "1,024 - 10 columns",
- "2,048 - 11 columns",
- "4,096 - 12 columns",
- "reserved"
-};
-#endif
-
/* Device name and register DID (Device ID) */
struct i5400_dev_info {
const char *ctl_name; /* name for this device */
@@ -345,7 +327,13 @@ struct i5400_pvt {
struct pci_dev *branch_1; /* 22.0 */
u16 tolm; /* top of low memory */
- u64 ambase; /* AMB BAR */
+ union {
+ u64 ambase; /* AMB BAR */
+ struct {
+ u32 ambase_bottom;
+ u32 ambase_top;
+ } u __packed;
+ };
u16 mir0, mir1;
@@ -560,10 +548,9 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
ras = nrec_ras(info);
cas = nrec_cas(info);
- debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d "
- "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
- buf_id, rdwr_str(rdwr), ras, cas);
+ edac_dbg(0, "\t\tDIMM= %d Channels= %d,%d (Branch= %d DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, channel + 1, branch >> 1, bank,
+ buf_id, rdwr_str(rdwr), ras, cas);
/* Only 1 bit will be on */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
@@ -573,10 +560,10 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
"Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
bank, buf_id, ras, cas, allErrors, error_name[errnum]);
- edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+ edac_mc_handle_error(tp_event, mci, 1, 0, 0, 0,
branch >> 1, -1, rank,
rdwr ? "Write error" : "Read error",
- msg, NULL);
+ msg);
}
/*
@@ -613,7 +600,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Correctable errors */
if (allErrors & ERROR_NF_CORRECTABLE) {
- debugf0("\tCorrected bits= 0x%lx\n", allErrors);
+ edac_dbg(0, "\tCorrected bits= 0x%lx\n", allErrors);
branch = extract_fbdchan_indx(info->ferr_nf_fbd);
@@ -634,10 +621,9 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Only 1 bit will be on */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
- debugf0("\t\tDIMM= %d Channel= %d (Branch %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, branch >> 1, bank,
- rdwr_str(rdwr), ras, cas);
+ edac_dbg(0, "\t\tDIMM= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, branch >> 1, bank,
+ rdwr_str(rdwr), ras, cas);
/* Form out message */
snprintf(msg, sizeof(msg),
@@ -646,10 +632,10 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
branch >> 1, bank, rdwr_str(rdwr), ras, cas,
allErrors, error_name[errnum]);
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0,
branch >> 1, channel % 2, rank,
rdwr ? "Write error" : "Read error",
- msg, NULL);
+ msg);
return;
}
@@ -700,7 +686,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci)
static void i5400_check_error(struct mem_ctl_info *mci)
{
struct i5400_error_info info;
- debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
+ edac_dbg(4, "MC%d\n", mci->mc_idx);
i5400_get_error_info(mci, &info);
i5400_process_error_info(mci, &info);
}
@@ -786,15 +772,16 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
}
pvt->fsb_error_regs = pdev;
- debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->system_address),
- pvt->system_address->vendor, pvt->system_address->device);
- debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->branchmap_werrors),
- pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device);
- debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->fsb_error_regs),
- pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
+ edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->system_address),
+ pvt->system_address->vendor, pvt->system_address->device);
+ edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->branchmap_werrors),
+ pvt->branchmap_werrors->vendor,
+ pvt->branchmap_werrors->device);
+ edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->fsb_error_regs),
+ pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device);
pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_5400_FBD0, NULL);
@@ -882,8 +869,8 @@ static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
n = dimm;
if (n >= DIMMS_PER_CHANNEL) {
- debugf0("ERROR: trying to access an invalid dimm: %d\n",
- dimm);
+ edac_dbg(0, "ERROR: trying to access an invalid dimm: %d\n",
+ dimm);
return 0;
}
@@ -903,20 +890,29 @@ static void decode_mtr(int slot_row, u16 mtr)
ans = MTR_DIMMS_PRESENT(mtr);
- debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr,
- ans ? "Present" : "NOT Present");
+ edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n",
+ slot_row, mtr, ans ? "" : "NOT ");
if (!ans)
return;
- debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
-
- debugf2("\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
-
- debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
- debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single");
- debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
- debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
+ edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
+
+ edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
+ MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+
+ edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
+ edac_dbg(2, "\t\tNUMRANK: %s\n",
+ MTR_DIMM_RANK(mtr) ? "double" : "single");
+ edac_dbg(2, "\t\tNUMROW: %s\n",
+ MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
+ MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
+ MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
+ "65,536 - 16 rows");
+ edac_dbg(2, "\t\tNUMCOL: %s\n",
+ MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
+ MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
+ MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
+ "reserved");
}
static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
@@ -989,7 +985,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
"-------------------------------");
p += n;
space -= n;
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
@@ -1004,7 +1000,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
p += n;
space -= n;
}
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
@@ -1014,7 +1010,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
"-------------------------------");
p += n;
space -= n;
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
@@ -1029,7 +1025,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
}
space -= n;
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
@@ -1042,7 +1038,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
}
/* output the last message and free buffer */
- debugf2("%s\n", mem_buffer);
+ edac_dbg(2, "%s\n", mem_buffer);
kfree(mem_buffer);
}
@@ -1065,25 +1061,25 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
pvt = mci->pvt_info;
pci_read_config_dword(pvt->system_address, AMBASE,
- (u32 *) &pvt->ambase);
+ &pvt->u.ambase_bottom);
pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
- ((u32 *) &pvt->ambase) + sizeof(u32));
+ &pvt->u.ambase_top);
maxdimmperch = pvt->maxdimmperch;
maxch = pvt->maxch;
- debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
- (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
+ edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
+ (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
/* Get the Branch Map regs */
pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm);
pvt->tolm >>= 12;
- debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
- pvt->tolm);
+ edac_dbg(2, "\nTOLM (number of 256M regions) =%u (0x%x)\n",
+ pvt->tolm, pvt->tolm);
actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
- debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
- actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
+ edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
+ actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0);
pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1);
@@ -1092,11 +1088,13 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
limit = (pvt->mir0 >> 4) & 0x0fff;
way0 = pvt->mir0 & 0x1;
way1 = pvt->mir0 & 0x2;
- debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n",
+ limit, way1, way0);
limit = (pvt->mir1 >> 4) & 0xfff;
way0 = pvt->mir1 & 0x1;
way1 = pvt->mir1 & 0x2;
- debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
+ edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n",
+ limit, way1, way0);
/* Get the set of MTR[0-3] regs by each branch */
for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
@@ -1106,8 +1104,8 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
pci_read_config_word(pvt->branch_0, where,
&pvt->b0_mtr[slot_row]);
- debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where,
- pvt->b0_mtr[slot_row]);
+ edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n",
+ slot_row, where, pvt->b0_mtr[slot_row]);
if (pvt->maxch < CHANNELS_PER_BRANCH) {
pvt->b1_mtr[slot_row] = 0;
@@ -1117,22 +1115,22 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
/* Branch 1 set of MTR registers */
pci_read_config_word(pvt->branch_1, where,
&pvt->b1_mtr[slot_row]);
- debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where,
- pvt->b1_mtr[slot_row]);
+ edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n",
+ slot_row, where, pvt->b1_mtr[slot_row]);
}
/* Read and dump branch 0's MTRs */
- debugf2("\nMemory Technology Registers:\n");
- debugf2(" Branch 0:\n");
+ edac_dbg(2, "Memory Technology Registers:\n");
+ edac_dbg(2, " Branch 0:\n");
for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
&pvt->b0_ambpresent0);
- debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
+ edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0);
pci_read_config_word(pvt->branch_0, AMBPRESENT_1,
&pvt->b0_ambpresent1);
- debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
+ edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1);
/* Only if we have 2 branchs (4 channels) */
if (pvt->maxch < CHANNELS_PER_BRANCH) {
@@ -1140,18 +1138,18 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
pvt->b1_ambpresent1 = 0;
} else {
/* Read and dump branch 1's MTRs */
- debugf2(" Branch 1:\n");
+ edac_dbg(2, " Branch 1:\n");
for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
&pvt->b1_ambpresent0);
- debugf2("\t\tAMB-Branch 1-present0 0x%x:\n",
- pvt->b1_ambpresent0);
+ edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n",
+ pvt->b1_ambpresent0);
pci_read_config_word(pvt->branch_1, AMBPRESENT_1,
&pvt->b1_ambpresent1);
- debugf2("\t\tAMB-Branch 1-present1 0x%x:\n",
- pvt->b1_ambpresent1);
+ edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n",
+ pvt->b1_ambpresent1);
}
/* Go and determine the size of each DIMM and place in an
@@ -1203,10 +1201,9 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
size_mb = pvt->dimm_info[slot][channel].megabytes;
- debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n",
- __func__, dimm - mci->dimms,
- channel / 2, channel % 2, slot,
- size_mb / 1000, size_mb % 1000);
+ edac_dbg(2, "dimm (branch %d channel %d slot %d): %d.%03d GB\n",
+ channel / 2, channel % 2, slot,
+ size_mb / 1000, size_mb % 1000);
dimm->nr_pages = size_mb << 8;
dimm->grain = 8;
@@ -1227,7 +1224,7 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
* With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
*/
if (ndimms == 1)
- mci->dimms[0].edac_mode = EDAC_SECDED;
+ mci->dimms[0]->edac_mode = EDAC_SECDED;
return (ndimms == 0);
}
@@ -1270,10 +1267,9 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (dev_idx >= ARRAY_SIZE(i5400_devs))
return -EINVAL;
- debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __FILE__, __func__,
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
/* We only are looking for func 0 of the set */
if (PCI_FUNC(pdev->devfn) != 0)
@@ -1297,9 +1293,9 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
+ edac_dbg(0, "MC: mci = %p\n", mci);
- mci->dev = &pdev->dev; /* record ptr to the generic device */
+ mci->pdev = &pdev->dev; /* record ptr to the generic device */
pvt = mci->pvt_info;
pvt->system_address = pdev; /* Record this device in our private */
@@ -1329,19 +1325,16 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
/* initialize the MC control structure 'dimms' table
* with the mapping and control information */
if (i5400_init_dimms(mci)) {
- debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i5400_init_dimms() returned nonzero "
- "value\n");
+ edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5400_init_dimms() returned nonzero value\n");
mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
} else {
- debugf1("MC: Enable error reporting now\n");
+ edac_dbg(1, "MC: Enable error reporting now\n");
i5400_enable_error_reporting(mci);
}
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
- debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n",
- __FILE__, __func__);
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
@@ -1385,7 +1378,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: %s: %s()\n", __FILE__, __func__);
+ edac_dbg(0, "MC:\n");
/* wake up device */
rc = pci_enable_device(pdev);
@@ -1404,7 +1397,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0("%s: %s()\n", __FILE__, __func__);
+ edac_dbg(0, "\n");
if (i5400_pci)
edac_pci_release_generic_ctl(i5400_pci);
@@ -1450,7 +1443,7 @@ static int __init i5400_init(void)
{
int pci_rc;
- debugf2("MC: %s: %s()\n", __FILE__, __func__);
+ edac_dbg(2, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -1466,7 +1459,7 @@ static int __init i5400_init(void)
*/
static void __exit i5400_exit(void)
{
- debugf2("MC: %s: %s()\n", __FILE__, __func__);
+ edac_dbg(2, "MC:\n");
pci_unregister_driver(&i5400_driver);
}
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 97c22fd650ee..a09d0667f72a 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -182,24 +182,6 @@ static const u16 mtr_regs[MAX_SLOTS] = {
#define MTR_DIMM_COLS(mtr) ((mtr) & 0x3)
#define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10)
-#ifdef CONFIG_EDAC_DEBUG
-/* MTR NUMROW */
-static const char *numrow_toString[] = {
- "8,192 - 13 rows",
- "16,384 - 14 rows",
- "32,768 - 15 rows",
- "65,536 - 16 rows"
-};
-
-/* MTR NUMCOL */
-static const char *numcol_toString[] = {
- "1,024 - 10 columns",
- "2,048 - 11 columns",
- "4,096 - 12 columns",
- "reserved"
-};
-#endif
-
/************************************************
* i7300 Register definitions for error detection
************************************************/
@@ -467,10 +449,10 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
"Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
bank, ras, cas, errors, specific);
- edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+ edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
branch, -1, rank,
is_wr ? "Write error" : "Read error",
- pvt->tmp_prt_buffer, NULL);
+ pvt->tmp_prt_buffer);
}
@@ -513,11 +495,11 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
"DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
bank, ras, cas, errors, specific);
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
syndrome,
branch >> 1, channel % 2, rank,
is_wr ? "Write error" : "Read error",
- pvt->tmp_prt_buffer, NULL);
+ pvt->tmp_prt_buffer);
}
return;
}
@@ -614,9 +596,8 @@ static int decode_mtr(struct i7300_pvt *pvt,
mtr = pvt->mtr[slot][branch];
ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
- debugf2("\tMTR%d CH%d: DIMMs are %s (mtr)\n",
- slot, channel,
- ans ? "Present" : "NOT Present");
+ edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
+ slot, channel, ans ? "" : "NOT ");
/* Determine if there is a DIMM present in this DIMM slot */
if (!ans)
@@ -638,16 +619,25 @@ static int decode_mtr(struct i7300_pvt *pvt,
dinfo->megabytes = 1 << addrBits;
- debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
-
- debugf2("\t\tELECTRICAL THROTTLING is %s\n",
- MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
-
- debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
- debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANKS(mtr) ? "double" : "single");
- debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]);
- debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
- debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes);
+ edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
+
+ edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
+ MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
+
+ edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
+ edac_dbg(2, "\t\tNUMRANK: %s\n",
+ MTR_DIMM_RANKS(mtr) ? "double" : "single");
+ edac_dbg(2, "\t\tNUMROW: %s\n",
+ MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
+ MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
+ MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
+ "65,536 - 16 rows");
+ edac_dbg(2, "\t\tNUMCOL: %s\n",
+ MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
+ MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
+ MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
+ "reserved");
+ edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
/*
* The type of error detection actually depends of the
@@ -663,9 +653,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
dimm->mtype = MEM_FB_DDR2;
if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
dimm->edac_mode = EDAC_SECDED;
- debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
+ edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
} else {
- debugf2("\t\tECC code is on Lockstep mode\n");
+ edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
if (MTR_DRAM_WIDTH(mtr) == 8)
dimm->edac_mode = EDAC_S8ECD8ED;
else
@@ -674,9 +664,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
/* ask what device type on this row */
if (MTR_DRAM_WIDTH(mtr) == 8) {
- debugf2("\t\tScrub algorithm for x8 is on %s mode\n",
- IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
- "enhanced" : "normal");
+ edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
+ IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
+ "enhanced" : "normal");
dimm->dtype = DEV_X8;
} else
@@ -710,14 +700,14 @@ static void print_dimm_size(struct i7300_pvt *pvt)
p += n;
space -= n;
}
- debugf2("%s\n", pvt->tmp_prt_buffer);
+ edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
p = pvt->tmp_prt_buffer;
space = PAGE_SIZE;
n = snprintf(p, space, "-------------------------------"
"------------------------------");
p += n;
space -= n;
- debugf2("%s\n", pvt->tmp_prt_buffer);
+ edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
p = pvt->tmp_prt_buffer;
space = PAGE_SIZE;
@@ -733,7 +723,7 @@ static void print_dimm_size(struct i7300_pvt *pvt)
space -= n;
}
- debugf2("%s\n", pvt->tmp_prt_buffer);
+ edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
p = pvt->tmp_prt_buffer;
space = PAGE_SIZE;
}
@@ -742,7 +732,7 @@ static void print_dimm_size(struct i7300_pvt *pvt)
"------------------------------");
p += n;
space -= n;
- debugf2("%s\n", pvt->tmp_prt_buffer);
+ edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
p = pvt->tmp_prt_buffer;
space = PAGE_SIZE;
#endif
@@ -765,7 +755,7 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
pvt = mci->pvt_info;
- debugf2("Memory Technology Registers:\n");
+ edac_dbg(2, "Memory Technology Registers:\n");
/* Get the AMB present registers for the four channels */
for (branch = 0; branch < MAX_BRANCHES; branch++) {
@@ -774,15 +764,15 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
AMBPRESENT_0,
&pvt->ambpresent[channel]);
- debugf2("\t\tAMB-present CH%d = 0x%x:\n",
- channel, pvt->ambpresent[channel]);
+ edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
+ channel, pvt->ambpresent[channel]);
channel = to_channel(1, branch);
pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
AMBPRESENT_1,
&pvt->ambpresent[channel]);
- debugf2("\t\tAMB-present CH%d = 0x%x:\n",
- channel, pvt->ambpresent[channel]);
+ edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
+ channel, pvt->ambpresent[channel]);
}
/* Get the set of MTR[0-7] regs by each branch */
@@ -824,12 +814,11 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
static void decode_mir(int mir_no, u16 mir[MAX_MIR])
{
if (mir[mir_no] & 3)
- debugf2("MIR%d: limit= 0x%x Branch(es) that participate:"
- " %s %s\n",
- mir_no,
- (mir[mir_no] >> 4) & 0xfff,
- (mir[mir_no] & 1) ? "B0" : "",
- (mir[mir_no] & 2) ? "B1" : "");
+ edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
+ mir_no,
+ (mir[mir_no] >> 4) & 0xfff,
+ (mir[mir_no] & 1) ? "B0" : "",
+ (mir[mir_no] & 2) ? "B1" : "");
}
/**
@@ -849,17 +838,17 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
(u32 *) &pvt->ambase);
- debugf2("AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
+ edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
/* Get the Branch Map regs */
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
pvt->tolm >>= 12;
- debugf2("TOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm,
- pvt->tolm);
+ edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
+ pvt->tolm, pvt->tolm);
actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
- debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
- actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
+ edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
+ actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
/* Get memory controller settings */
pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
@@ -868,15 +857,15 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci)
&pvt->mc_settings_a);
if (IS_SINGLE_MODE(pvt->mc_settings_a))
- debugf0("Memory controller operating on single mode\n");
+ edac_dbg(0, "Memory controller operating on single mode\n");
else
- debugf0("Memory controller operating on %s mode\n",
- IS_MIRRORED(pvt->mc_settings) ? "mirrored" : "non-mirrored");
+ edac_dbg(0, "Memory controller operating on %smirrored mode\n",
+ IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
- debugf0("Error detection is %s\n",
- IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
- debugf0("Retry is %s\n",
- IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ edac_dbg(0, "Error detection is %s\n",
+ IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
+ edac_dbg(0, "Retry is %s\n",
+ IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
/* Get Memory Interleave Range registers */
pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
@@ -970,18 +959,18 @@ static int __devinit i7300_get_devices(struct mem_ctl_info *mci)
}
}
- debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->pci_dev_16_0_fsb_ctlr),
- pvt->pci_dev_16_0_fsb_ctlr->vendor,
- pvt->pci_dev_16_0_fsb_ctlr->device);
- debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->pci_dev_16_1_fsb_addr_map),
- pvt->pci_dev_16_1_fsb_addr_map->vendor,
- pvt->pci_dev_16_1_fsb_addr_map->device);
- debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n",
- pci_name(pvt->pci_dev_16_2_fsb_err_regs),
- pvt->pci_dev_16_2_fsb_err_regs->vendor,
- pvt->pci_dev_16_2_fsb_err_regs->device);
+ edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->pci_dev_16_0_fsb_ctlr),
+ pvt->pci_dev_16_0_fsb_ctlr->vendor,
+ pvt->pci_dev_16_0_fsb_ctlr->device);
+ edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->pci_dev_16_1_fsb_addr_map),
+ pvt->pci_dev_16_1_fsb_addr_map->vendor,
+ pvt->pci_dev_16_1_fsb_addr_map->device);
+ edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n",
+ pci_name(pvt->pci_dev_16_2_fsb_err_regs),
+ pvt->pci_dev_16_2_fsb_err_regs->vendor,
+ pvt->pci_dev_16_2_fsb_err_regs->device);
pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
@@ -1032,10 +1021,9 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
if (rc == -EIO)
return rc;
- debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n",
- __func__,
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
/* We only are looking for func 0 of the set */
if (PCI_FUNC(pdev->devfn) != 0)
@@ -1055,9 +1043,9 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+ edac_dbg(0, "MC: mci = %p\n", mci);
- mci->dev = &pdev->dev; /* record ptr to the generic device */
+ mci->pdev = &pdev->dev; /* record ptr to the generic device */
pvt = mci->pvt_info;
pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */
@@ -1088,19 +1076,16 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
/* initialize the MC control structure 'csrows' table
* with the mapping and control information */
if (i7300_get_mc_regs(mci)) {
- debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i7300_init_csrows() returned nonzero "
- "value\n");
+ edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
} else {
- debugf1("MC: Enable error reporting now\n");
+ edac_dbg(1, "MC: Enable error reporting now\n");
i7300_enable_error_reporting(mci);
}
/* add this new MC control structure to EDAC's list of MCs */
if (edac_mc_add_mc(mci)) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
@@ -1142,7 +1127,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
struct mem_ctl_info *mci;
char *tmp;
- debugf0(__FILE__ ": %s()\n", __func__);
+ edac_dbg(0, "\n");
if (i7300_pci)
edac_pci_release_generic_ctl(i7300_pci);
@@ -1189,7 +1174,7 @@ static int __init i7300_init(void)
{
int pci_rc;
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ edac_dbg(2, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -1204,7 +1189,7 @@ static int __init i7300_init(void)
*/
static void __exit i7300_exit(void)
{
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ edac_dbg(2, "\n");
pci_unregister_driver(&i7300_driver);
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index a499c7ed820a..3672101023bd 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -248,6 +248,8 @@ struct i7core_dev {
};
struct i7core_pvt {
+ struct device *addrmatch_dev, *chancounts_dev;
+
struct pci_dev *pci_noncore;
struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
@@ -514,29 +516,28 @@ static int get_dimm_config(struct mem_ctl_info *mci)
pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
- debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
- pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
- pvt->info.max_dod, pvt->info.ch_map);
+ edac_dbg(0, "QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
+ pvt->i7core_dev->socket, pvt->info.mc_control,
+ pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map);
if (ECC_ENABLED(pvt)) {
- debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
+ edac_dbg(0, "ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
if (ECCx8(pvt))
mode = EDAC_S8ECD8ED;
else
mode = EDAC_S4ECD4ED;
} else {
- debugf0("ECC disabled\n");
+ edac_dbg(0, "ECC disabled\n");
mode = EDAC_NONE;
}
/* FIXME: need to handle the error codes */
- debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
- "x%x x 0x%x\n",
- numdimms(pvt->info.max_dod),
- numrank(pvt->info.max_dod >> 2),
- numbank(pvt->info.max_dod >> 4),
- numrow(pvt->info.max_dod >> 6),
- numcol(pvt->info.max_dod >> 9));
+ edac_dbg(0, "DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n",
+ numdimms(pvt->info.max_dod),
+ numrank(pvt->info.max_dod >> 2),
+ numbank(pvt->info.max_dod >> 4),
+ numrow(pvt->info.max_dod >> 6),
+ numcol(pvt->info.max_dod >> 9));
for (i = 0; i < NUM_CHANS; i++) {
u32 data, dimm_dod[3], value[8];
@@ -545,11 +546,11 @@ static int get_dimm_config(struct mem_ctl_info *mci)
continue;
if (!CH_ACTIVE(pvt, i)) {
- debugf0("Channel %i is not active\n", i);
+ edac_dbg(0, "Channel %i is not active\n", i);
continue;
}
if (CH_DISABLED(pvt, i)) {
- debugf0("Channel %i is disabled\n", i);
+ edac_dbg(0, "Channel %i is disabled\n", i);
continue;
}
@@ -580,15 +581,14 @@ static int get_dimm_config(struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_ch[i][1],
MC_DOD_CH_DIMM2, &dimm_dod[2]);
- debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
- "%s%s%s%cDIMMs\n",
- i,
- RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
- data,
- pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
- pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
- pvt->channel[i].has_4rank ? "HAS_4R " : "",
- (data & REGISTERED_DIMM) ? 'R' : 'U');
+ edac_dbg(0, "Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n",
+ i,
+ RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
+ data,
+ pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
+ pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
+ pvt->channel[i].has_4rank ? "HAS_4R " : "",
+ (data & REGISTERED_DIMM) ? 'R' : 'U');
for (j = 0; j < 3; j++) {
u32 banks, ranks, rows, cols;
@@ -607,11 +607,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
/* DDR3 has 8 I/O banks */
size = (rows * cols * banks * ranks) >> (20 - 3);
- debugf0("\tdimm %d %d Mb offset: %x, "
- "bank: %d, rank: %d, row: %#x, col: %#x\n",
- j, size,
- RANKOFFSET(dimm_dod[j]),
- banks, ranks, rows, cols);
+ edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
+ j, size,
+ RANKOFFSET(dimm_dod[j]),
+ banks, ranks, rows, cols);
npages = MiB_TO_PAGES(size);
@@ -647,12 +646,12 @@ static int get_dimm_config(struct mem_ctl_info *mci)
pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
- debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
+ edac_dbg(1, "\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
for (j = 0; j < 8; j++)
- debugf1("\t\t%#x\t%#x\t%#x\n",
- (value[j] >> 27) & 0x1,
- (value[j] >> 24) & 0x7,
- (value[j] & ((1 << 24) - 1)));
+ edac_dbg(1, "\t\t%#x\t%#x\t%#x\n",
+ (value[j] >> 27) & 0x1,
+ (value[j] >> 24) & 0x7,
+ (value[j] & ((1 << 24) - 1)));
}
return 0;
@@ -662,6 +661,8 @@ static int get_dimm_config(struct mem_ctl_info *mci)
Error insertion routines
****************************************************************************/
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
/* The i7core has independent error injection features per channel.
However, to have a simpler code, we don't allow enabling error injection
on more than one channel.
@@ -691,9 +692,11 @@ static int disable_inject(const struct mem_ctl_info *mci)
* bit 0 - refers to the lower 32-byte half cacheline
* bit 1 - refers to the upper 32-byte half cacheline
*/
-static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
+static ssize_t i7core_inject_section_store(struct device *dev,
+ struct device_attribute *mattr,
const char *data, size_t count)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
unsigned long value;
int rc;
@@ -709,9 +712,11 @@ static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
return count;
}
-static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
- char *data)
+static ssize_t i7core_inject_section_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
return sprintf(data, "0x%08x\n", pvt->inject.section);
}
@@ -724,10 +729,12 @@ static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
* bit 1 - inject ECC error
* bit 2 - inject parity error
*/
-static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
+static ssize_t i7core_inject_type_store(struct device *dev,
+ struct device_attribute *mattr,
const char *data, size_t count)
{
- struct i7core_pvt *pvt = mci->pvt_info;
+ struct mem_ctl_info *mci = to_mci(dev);
+struct i7core_pvt *pvt = mci->pvt_info;
unsigned long value;
int rc;
@@ -742,10 +749,13 @@ static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
return count;
}
-static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
- char *data)
+static ssize_t i7core_inject_type_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
+
return sprintf(data, "0x%08x\n", pvt->inject.type);
}
@@ -759,9 +769,11 @@ static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
* 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
* uncorrectable error to be injected.
*/
-static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
+static ssize_t i7core_inject_eccmask_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
unsigned long value;
int rc;
@@ -777,10 +789,13 @@ static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
return count;
}
-static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
- char *data)
+static ssize_t i7core_inject_eccmask_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
+
return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
}
@@ -797,14 +812,16 @@ static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
#define DECLARE_ADDR_MATCH(param, limit) \
static ssize_t i7core_inject_store_##param( \
- struct mem_ctl_info *mci, \
- const char *data, size_t count) \
+ struct device *dev, \
+ struct device_attribute *mattr, \
+ const char *data, size_t count) \
{ \
+ struct mem_ctl_info *mci = to_mci(dev); \
struct i7core_pvt *pvt; \
long value; \
int rc; \
\
- debugf1("%s()\n", __func__); \
+ edac_dbg(1, "\n"); \
pvt = mci->pvt_info; \
\
if (pvt->inject.enable) \
@@ -824,13 +841,15 @@ static ssize_t i7core_inject_store_##param( \
} \
\
static ssize_t i7core_inject_show_##param( \
- struct mem_ctl_info *mci, \
- char *data) \
+ struct device *dev, \
+ struct device_attribute *mattr, \
+ char *data) \
{ \
+ struct mem_ctl_info *mci = to_mci(dev); \
struct i7core_pvt *pvt; \
\
pvt = mci->pvt_info; \
- debugf1("%s() pvt=%p\n", __func__, pvt); \
+ edac_dbg(1, "pvt=%p\n", pvt); \
if (pvt->inject.param < 0) \
return sprintf(data, "any\n"); \
else \
@@ -838,14 +857,9 @@ static ssize_t i7core_inject_show_##param( \
}
#define ATTR_ADDR_MATCH(param) \
- { \
- .attr = { \
- .name = #param, \
- .mode = (S_IRUGO | S_IWUSR) \
- }, \
- .show = i7core_inject_show_##param, \
- .store = i7core_inject_store_##param, \
- }
+ static DEVICE_ATTR(param, S_IRUGO | S_IWUSR, \
+ i7core_inject_show_##param, \
+ i7core_inject_store_##param)
DECLARE_ADDR_MATCH(channel, 3);
DECLARE_ADDR_MATCH(dimm, 3);
@@ -854,14 +868,21 @@ DECLARE_ADDR_MATCH(bank, 32);
DECLARE_ADDR_MATCH(page, 0x10000);
DECLARE_ADDR_MATCH(col, 0x4000);
+ATTR_ADDR_MATCH(channel);
+ATTR_ADDR_MATCH(dimm);
+ATTR_ADDR_MATCH(rank);
+ATTR_ADDR_MATCH(bank);
+ATTR_ADDR_MATCH(page);
+ATTR_ADDR_MATCH(col);
+
static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
{
u32 read;
int count;
- debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
- dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
- where, val);
+ edac_dbg(0, "setting pci %02x:%02x.%x reg=%02x value=%08x\n",
+ dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
+ where, val);
for (count = 0; count < 10; count++) {
if (count)
@@ -899,9 +920,11 @@ static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
* is reliable enough to check if the MC is using the
* three channels. However, this is not clear at the datasheet.
*/
-static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
+static ssize_t i7core_inject_enable_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
u32 injectmask;
u64 mask = 0;
@@ -994,17 +1017,18 @@ static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
pci_write_config_dword(pvt->pci_noncore,
MC_CFG_CONTROL, 8);
- debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
- " inject 0x%08x\n",
- mask, pvt->inject.eccmask, injectmask);
+ edac_dbg(0, "Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n",
+ mask, pvt->inject.eccmask, injectmask);
return count;
}
-static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
- char *data)
+static ssize_t i7core_inject_enable_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct i7core_pvt *pvt = mci->pvt_info;
u32 injectmask;
@@ -1014,7 +1038,7 @@ static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
MC_CHANNEL_ERROR_INJECT, &injectmask);
- debugf0("Inject error read: 0x%018x\n", injectmask);
+ edac_dbg(0, "Inject error read: 0x%018x\n", injectmask);
if (injectmask & 0x0c)
pvt->inject.enable = 1;
@@ -1024,12 +1048,14 @@ static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
#define DECLARE_COUNTER(param) \
static ssize_t i7core_show_counter_##param( \
- struct mem_ctl_info *mci, \
- char *data) \
+ struct device *dev, \
+ struct device_attribute *mattr, \
+ char *data) \
{ \
+ struct mem_ctl_info *mci = to_mci(dev); \
struct i7core_pvt *pvt = mci->pvt_info; \
\
- debugf1("%s() \n", __func__); \
+ edac_dbg(1, "\n"); \
if (!pvt->ce_count_available || (pvt->is_registered)) \
return sprintf(data, "data unavailable\n"); \
return sprintf(data, "%lu\n", \
@@ -1037,121 +1063,179 @@ static ssize_t i7core_show_counter_##param( \
}
#define ATTR_COUNTER(param) \
- { \
- .attr = { \
- .name = __stringify(udimm##param), \
- .mode = (S_IRUGO | S_IWUSR) \
- }, \
- .show = i7core_show_counter_##param \
- }
+ static DEVICE_ATTR(udimm##param, S_IRUGO | S_IWUSR, \
+ i7core_show_counter_##param, \
+ NULL)
DECLARE_COUNTER(0);
DECLARE_COUNTER(1);
DECLARE_COUNTER(2);
+ATTR_COUNTER(0);
+ATTR_COUNTER(1);
+ATTR_COUNTER(2);
+
/*
- * Sysfs struct
+ * inject_addrmatch device sysfs struct
*/
-static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
- ATTR_ADDR_MATCH(channel),
- ATTR_ADDR_MATCH(dimm),
- ATTR_ADDR_MATCH(rank),
- ATTR_ADDR_MATCH(bank),
- ATTR_ADDR_MATCH(page),
- ATTR_ADDR_MATCH(col),
- { } /* End of list */
+static struct attribute *i7core_addrmatch_attrs[] = {
+ &dev_attr_channel.attr,
+ &dev_attr_dimm.attr,
+ &dev_attr_rank.attr,
+ &dev_attr_bank.attr,
+ &dev_attr_page.attr,
+ &dev_attr_col.attr,
+ NULL
};
-static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
- .name = "inject_addrmatch",
- .mcidev_attr = i7core_addrmatch_attrs,
+static struct attribute_group addrmatch_grp = {
+ .attrs = i7core_addrmatch_attrs,
};
-static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
- ATTR_COUNTER(0),
- ATTR_COUNTER(1),
- ATTR_COUNTER(2),
- { .attr = { .name = NULL } }
+static const struct attribute_group *addrmatch_groups[] = {
+ &addrmatch_grp,
+ NULL
};
-static const struct mcidev_sysfs_group i7core_udimm_counters = {
- .name = "all_channel_counts",
- .mcidev_attr = i7core_udimm_counters_attrs,
+static void addrmatch_release(struct device *device)
+{
+ edac_dbg(1, "Releasing device %s\n", dev_name(device));
+ kfree(device);
+}
+
+static struct device_type addrmatch_type = {
+ .groups = addrmatch_groups,
+ .release = addrmatch_release,
};
-static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
- {
- .attr = {
- .name = "inject_section",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_section_show,
- .store = i7core_inject_section_store,
- }, {
- .attr = {
- .name = "inject_type",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_type_show,
- .store = i7core_inject_type_store,
- }, {
- .attr = {
- .name = "inject_eccmask",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_eccmask_show,
- .store = i7core_inject_eccmask_store,
- }, {
- .grp = &i7core_inject_addrmatch,
- }, {
- .attr = {
- .name = "inject_enable",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_enable_show,
- .store = i7core_inject_enable_store,
- },
- { } /* End of list */
+/*
+ * all_channel_counts sysfs struct
+ */
+
+static struct attribute *i7core_udimm_counters_attrs[] = {
+ &dev_attr_udimm0.attr,
+ &dev_attr_udimm1.attr,
+ &dev_attr_udimm2.attr,
+ NULL
};
-static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
- {
- .attr = {
- .name = "inject_section",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_section_show,
- .store = i7core_inject_section_store,
- }, {
- .attr = {
- .name = "inject_type",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_type_show,
- .store = i7core_inject_type_store,
- }, {
- .attr = {
- .name = "inject_eccmask",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_eccmask_show,
- .store = i7core_inject_eccmask_store,
- }, {
- .grp = &i7core_inject_addrmatch,
- }, {
- .attr = {
- .name = "inject_enable",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = i7core_inject_enable_show,
- .store = i7core_inject_enable_store,
- }, {
- .grp = &i7core_udimm_counters,
- },
- { } /* End of list */
+static struct attribute_group all_channel_counts_grp = {
+ .attrs = i7core_udimm_counters_attrs,
};
+static const struct attribute_group *all_channel_counts_groups[] = {
+ &all_channel_counts_grp,
+ NULL
+};
+
+static void all_channel_counts_release(struct device *device)
+{
+ edac_dbg(1, "Releasing device %s\n", dev_name(device));
+ kfree(device);
+}
+
+static struct device_type all_channel_counts_type = {
+ .groups = all_channel_counts_groups,
+ .release = all_channel_counts_release,
+};
+
+/*
+ * inject sysfs attributes
+ */
+
+static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR,
+ i7core_inject_section_show, i7core_inject_section_store);
+
+static DEVICE_ATTR(inject_type, S_IRUGO | S_IWUSR,
+ i7core_inject_type_show, i7core_inject_type_store);
+
+
+static DEVICE_ATTR(inject_eccmask, S_IRUGO | S_IWUSR,
+ i7core_inject_eccmask_show, i7core_inject_eccmask_store);
+
+static DEVICE_ATTR(inject_enable, S_IRUGO | S_IWUSR,
+ i7core_inject_enable_show, i7core_inject_enable_store);
+
+static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ int rc;
+
+ rc = device_create_file(&mci->dev, &dev_attr_inject_section);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_type);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_eccmask);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_enable);
+ if (rc < 0)
+ return rc;
+
+ pvt->addrmatch_dev = kzalloc(sizeof(*pvt->addrmatch_dev), GFP_KERNEL);
+ if (!pvt->addrmatch_dev)
+ return rc;
+
+ pvt->addrmatch_dev->type = &addrmatch_type;
+ pvt->addrmatch_dev->bus = mci->dev.bus;
+ device_initialize(pvt->addrmatch_dev);
+ pvt->addrmatch_dev->parent = &mci->dev;
+ dev_set_name(pvt->addrmatch_dev, "inject_addrmatch");
+ dev_set_drvdata(pvt->addrmatch_dev, mci);
+
+ edac_dbg(1, "creating %s\n", dev_name(pvt->addrmatch_dev));
+
+ rc = device_add(pvt->addrmatch_dev);
+ if (rc < 0)
+ return rc;
+
+ if (!pvt->is_registered) {
+ pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
+ GFP_KERNEL);
+ if (!pvt->chancounts_dev) {
+ put_device(pvt->addrmatch_dev);
+ device_del(pvt->addrmatch_dev);
+ return rc;
+ }
+
+ pvt->chancounts_dev->type = &all_channel_counts_type;
+ pvt->chancounts_dev->bus = mci->dev.bus;
+ device_initialize(pvt->chancounts_dev);
+ pvt->chancounts_dev->parent = &mci->dev;
+ dev_set_name(pvt->chancounts_dev, "all_channel_counts");
+ dev_set_drvdata(pvt->chancounts_dev, mci);
+
+ edac_dbg(1, "creating %s\n", dev_name(pvt->chancounts_dev));
+
+ rc = device_add(pvt->chancounts_dev);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+
+ edac_dbg(1, "\n");
+
+ device_remove_file(&mci->dev, &dev_attr_inject_section);
+ device_remove_file(&mci->dev, &dev_attr_inject_type);
+ device_remove_file(&mci->dev, &dev_attr_inject_eccmask);
+ device_remove_file(&mci->dev, &dev_attr_inject_enable);
+
+ if (!pvt->is_registered) {
+ put_device(pvt->chancounts_dev);
+ device_del(pvt->chancounts_dev);
+ }
+ put_device(pvt->addrmatch_dev);
+ device_del(pvt->addrmatch_dev);
+}
+
/****************************************************************************
Device initialization routines: put/get, init/exit
****************************************************************************/
@@ -1164,14 +1248,14 @@ static void i7core_put_devices(struct i7core_dev *i7core_dev)
{
int i;
- debugf0(__FILE__ ": %s()\n", __func__);
+ edac_dbg(0, "\n");
for (i = 0; i < i7core_dev->n_devs; i++) {
struct pci_dev *pdev = i7core_dev->pdev[i];
if (!pdev)
continue;
- debugf0("Removing dev %02x:%02x.%d\n",
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ edac_dbg(0, "Removing dev %02x:%02x.%d\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
pci_dev_put(pdev);
}
}
@@ -1214,12 +1298,12 @@ static unsigned i7core_pci_lastbus(void)
while ((b = pci_find_next_bus(b)) != NULL) {
bus = b->number;
- debugf0("Found bus %d\n", bus);
+ edac_dbg(0, "Found bus %d\n", bus);
if (bus > last_bus)
last_bus = bus;
}
- debugf0("Last bus %d\n", last_bus);
+ edac_dbg(0, "Last bus %d\n", last_bus);
return last_bus;
}
@@ -1326,10 +1410,10 @@ static int i7core_get_onedevice(struct pci_dev **prev,
return -ENODEV;
}
- debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
- socket, bus, dev_descr->dev,
- dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ edac_dbg(0, "Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
+ socket, bus, dev_descr->dev,
+ dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
/*
* As stated on drivers/pci/search.c, the reference count for
@@ -1427,13 +1511,13 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
family = "unknown";
pvt->enable_scrub = false;
}
- debugf0("Detected a processor type %s\n", family);
+ edac_dbg(0, "Detected a processor type %s\n", family);
} else
goto error;
- debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- pdev, i7core_dev->socket);
+ edac_dbg(0, "Associated fn %d.%d, dev = %p, socket %d\n",
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ pdev, i7core_dev->socket);
if (PCI_SLOT(pdev->devfn) == 3 &&
PCI_FUNC(pdev->devfn) == 2)
@@ -1452,18 +1536,6 @@ error:
/****************************************************************************
Error check routines
****************************************************************************/
-static void i7core_rdimm_update_errcount(struct mem_ctl_info *mci,
- const int chan,
- const int dimm,
- const int add)
-{
- int i;
-
- for (i = 0; i < add; i++) {
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
- chan, dimm, -1, "error", "", NULL);
- }
-}
static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
const int chan,
@@ -1502,12 +1574,17 @@ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
/*updated the edac core */
if (add0 != 0)
- i7core_rdimm_update_errcount(mci, chan, 0, add0);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add0,
+ 0, 0, 0,
+ chan, 0, -1, "error", "");
if (add1 != 0)
- i7core_rdimm_update_errcount(mci, chan, 1, add1);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add1,
+ 0, 0, 0,
+ chan, 1, -1, "error", "");
if (add2 != 0)
- i7core_rdimm_update_errcount(mci, chan, 2, add2);
-
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add2,
+ 0, 0, 0,
+ chan, 2, -1, "error", "");
}
static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
@@ -1530,8 +1607,8 @@ static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
&rcv[2][1]);
for (i = 0 ; i < 3; i++) {
- debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
- (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
+ edac_dbg(3, "MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
+ (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
/*if the channel has 3 dimms*/
if (pvt->channel[i].dimms > 2) {
new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
@@ -1562,7 +1639,7 @@ static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
int new0, new1, new2;
if (!pvt->pci_mcr[4]) {
- debugf0("%s MCR registers not found\n", __func__);
+ edac_dbg(0, "MCR registers not found\n");
return;
}
@@ -1626,7 +1703,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
- char *type, *optype, *err, msg[80];
+ char *type, *optype, *err;
enum hw_event_mc_err_type tp_event;
unsigned long error = m->status & 0x1ff0000l;
bool uncorrected_error = m->mcgstatus & 1ll << 61;
@@ -1704,20 +1781,18 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
err = "unknown";
}
- snprintf(msg, sizeof(msg), "count=%d %s", core_err_cnt, optype);
-
/*
* Call the helper to output message
* FIXME: what to do if core_err_cnt > 1? Currently, it generates
* only one event
*/
if (uncorrected_error || !pvt->is_registered)
- edac_mc_handle_error(tp_event, mci,
+ edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT,
m->addr & ~PAGE_MASK,
syndrome,
channel, dimm, -1,
- err, msg, m);
+ err, optype);
}
/*
@@ -2094,8 +2169,7 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
struct i7core_pvt *pvt;
if (unlikely(!mci || !mci->pvt_info)) {
- debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
- __func__, &i7core_dev->pdev[0]->dev);
+ edac_dbg(0, "MC: dev = %p\n", &i7core_dev->pdev[0]->dev);
i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
return;
@@ -2103,8 +2177,7 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
pvt = mci->pvt_info;
- debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
- __func__, mci, &i7core_dev->pdev[0]->dev);
+ edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
/* Disable scrubrate setting */
if (pvt->enable_scrub)
@@ -2114,9 +2187,10 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
i7core_pci_ctl_release(pvt);
/* Remove MC sysfs nodes */
- edac_mc_del_mc(mci->dev);
+ i7core_delete_sysfs_devices(mci);
+ edac_mc_del_mc(mci->pdev);
- debugf1("%s: free mci struct\n", mci->ctl_name);
+ edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
kfree(mci->ctl_name);
edac_mc_free(mci);
i7core_dev->mci = NULL;
@@ -2142,8 +2216,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
if (unlikely(!mci))
return -ENOMEM;
- debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
- __func__, mci, &i7core_dev->pdev[0]->dev);
+ edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev);
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
@@ -2172,15 +2245,11 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
if (unlikely(rc < 0))
goto fail0;
- if (pvt->is_registered)
- mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
- else
- mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
/* Get dimm basic config */
get_dimm_config(mci);
/* record ptr to the generic device */
- mci->dev = &i7core_dev->pdev[0]->dev;
+ mci->pdev = &i7core_dev->pdev[0]->dev;
/* Set the function pointer to an actual operation function */
mci->edac_check = i7core_check_error;
@@ -2190,8 +2259,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
/* add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
/* FIXME: perhaps some code should go here that disables error
* reporting if we just enabled it
*/
@@ -2199,6 +2267,12 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
rc = -EINVAL;
goto fail0;
}
+ if (i7core_create_sysfs_devices(mci)) {
+ edac_dbg(0, "MC: failed to create sysfs nodes\n");
+ edac_mc_del_mc(mci->pdev);
+ rc = -EINVAL;
+ goto fail0;
+ }
/* Default error mask is any memory */
pvt->inject.channel = 0;
@@ -2298,7 +2372,7 @@ static void __devexit i7core_remove(struct pci_dev *pdev)
{
struct i7core_dev *i7core_dev;
- debugf0(__FILE__ ": %s()\n", __func__);
+ edac_dbg(0, "\n");
/*
* we have a trouble here: pdev value for removal will be wrong, since
@@ -2347,7 +2421,7 @@ static int __init i7core_init(void)
{
int pci_rc;
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ edac_dbg(2, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -2374,7 +2448,7 @@ static int __init i7core_init(void)
*/
static void __exit i7core_exit(void)
{
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ edac_dbg(2, "\n");
pci_unregister_driver(&i7core_driver);
mce_unregister_decode_chain(&i7_mce_dec);
}
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 52072c28a8a6..90f303db5d1d 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -124,7 +124,7 @@ static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci,
*info)
{
struct pci_dev *pdev;
- pdev = to_pci_dev(mci->dev);
+ pdev = to_pci_dev(mci->pdev);
pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap);
if (info->eap & I82443BXGX_EAP_OFFSET_SBE)
/* Clear error to allow next error to be reported [p.61] */
@@ -156,19 +156,19 @@ static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
error_found = 1;
if (handle_errors)
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, pageoffset, 0,
edac_mc_find_csrow_by_page(mci, page),
- 0, -1, mci->ctl_name, "", NULL);
+ 0, -1, mci->ctl_name, "");
}
if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
error_found = 1;
if (handle_errors)
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
page, pageoffset, 0,
edac_mc_find_csrow_by_page(mci, page),
- 0, -1, mci->ctl_name, "", NULL);
+ 0, -1, mci->ctl_name, "");
}
return error_found;
@@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci)
{
struct i82443bxgx_edacmc_error_info info;
- debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
i82443bxgx_edacmc_get_error_info(mci, &info);
i82443bxgx_edacmc_process_error_info(mci, &info, 1);
}
@@ -197,18 +197,17 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
row_high_limit_last = 0;
for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
- dimm = csrow->channels[0].dimm;
+ csrow = mci->csrows[index];
+ dimm = csrow->channels[0]->dimm;
pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
- debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
- mci->mc_idx, __FILE__, __func__, index, drbar);
+ edac_dbg(1, "MC%d: Row=%d DRB = %#0x\n",
+ mci->mc_idx, index, drbar);
row_high_limit = ((u32) drbar << 23);
/* find the DRAM Chip Select Base address and mask */
- debugf1("MC%d: %s: %s() Row=%d, "
- "Boundary Address=%#0x, Last = %#0x\n",
- mci->mc_idx, __FILE__, __func__, index, row_high_limit,
- row_high_limit_last);
+ edac_dbg(1, "MC%d: Row=%d, Boundary Address=%#0x, Last = %#0x\n",
+ mci->mc_idx, index, row_high_limit,
+ row_high_limit_last);
/* 440GX goes to 2GB, represented with a DRB of 0. */
if (row_high_limit_last && !row_high_limit)
@@ -241,7 +240,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
enum mem_type mtype;
enum edac_type edac_mode;
- debugf0("MC: %s: %s()\n", __FILE__, __func__);
+ edac_dbg(0, "MC:\n");
/* Something is really hosed if PCI config space reads from
* the MC aren't working.
@@ -259,8 +258,8 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci);
- mci->dev = &pdev->dev;
+ edac_dbg(0, "MC: mci = %p\n", mci);
+ mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc);
@@ -275,8 +274,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
mtype = MEM_RDR;
break;
default:
- debugf0("Unknown/reserved DRAM type value "
- "in DRAMC register!\n");
+ edac_dbg(0, "Unknown/reserved DRAM type value in DRAMC register!\n");
mtype = -MEM_UNKNOWN;
}
@@ -305,8 +303,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
edac_mode = EDAC_SECDED;
break;
default:
- debugf0("%s(): Unknown/reserved ECC state "
- "in NBXCFG register!\n", __func__);
+ edac_dbg(0, "Unknown/reserved ECC state in NBXCFG register!\n");
edac_mode = EDAC_UNKNOWN;
break;
}
@@ -330,7 +327,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
mci->ctl_page_to_phys = NULL;
if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
@@ -345,7 +342,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
__func__);
}
- debugf3("MC: %s: %s(): success\n", __FILE__, __func__);
+ edac_dbg(3, "MC: success\n");
return 0;
fail:
@@ -361,7 +358,7 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: %s: %s()\n", __FILE__, __func__);
+ edac_dbg(0, "MC:\n");
/* don't need to call pci_enable_device() */
rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data);
@@ -376,7 +373,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0("%s: %s()\n", __FILE__, __func__);
+ edac_dbg(0, "\n");
if (i82443bxgx_pci)
edac_pci_release_generic_ctl(i82443bxgx_pci);
@@ -428,7 +425,7 @@ static int __init i82443bxgx_edacmc_init(void)
id = &i82443bxgx_pci_tbl[i];
}
if (!mci_pdev) {
- debugf0("i82443bxgx pci_get_device fail\n");
+ edac_dbg(0, "i82443bxgx pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
@@ -436,7 +433,7 @@ static int __init i82443bxgx_edacmc_init(void)
pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl);
if (pci_rc < 0) {
- debugf0("i82443bxgx init fail\n");
+ edac_dbg(0, "i82443bxgx init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 08045059d10b..1faa74971513 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -67,7 +67,7 @@ static void i82860_get_error_info(struct mem_ctl_info *mci,
{
struct pci_dev *pdev;
- pdev = to_pci_dev(mci->dev);
+ pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
@@ -109,25 +109,25 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0003) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
- -1, -1, -1, "UE overwrote CE", "", NULL);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "");
info->errsts = info->errsts2;
}
info->eap >>= PAGE_SHIFT;
row = edac_mc_find_csrow_by_page(mci, info->eap);
- dimm = mci->csrows[row].channels[0].dimm;
+ dimm = mci->csrows[row]->channels[0]->dimm;
if (info->errsts & 0x0002)
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
info->eap, 0, 0,
dimm->location[0], dimm->location[1], -1,
- "i82860 UE", "", NULL);
+ "i82860 UE", "");
else
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
info->eap, 0, info->derrsyn,
dimm->location[0], dimm->location[1], -1,
- "i82860 CE", "", NULL);
+ "i82860 CE", "");
return 1;
}
@@ -136,7 +136,7 @@ static void i82860_check(struct mem_ctl_info *mci)
{
struct i82860_error_info info;
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
i82860_get_error_info(mci, &info);
i82860_process_error_info(mci, &info, 1);
}
@@ -161,14 +161,13 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
* in all eight rows.
*/
for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
- dimm = csrow->channels[0].dimm;
+ csrow = mci->csrows[index];
+ dimm = csrow->channels[0]->dimm;
pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
cumul_size = (value & I82860_GBA_MASK) <<
(I82860_GBA_SHIFT - PAGE_SHIFT);
- debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
- cumul_size);
+ edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
@@ -210,8 +209,8 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
if (!mci)
return -ENOMEM;
- debugf3("%s(): init mci\n", __func__);
- mci->dev = &pdev->dev;
+ edac_dbg(3, "init mci\n");
+ mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
/* I"m not sure about this but I think that all RDRAM is SECDED */
@@ -229,7 +228,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
@@ -245,7 +244,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
}
/* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
return 0;
@@ -260,7 +259,7 @@ static int __devinit i82860_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
i82860_printk(KERN_INFO, "i82860 init one\n");
if (pci_enable_device(pdev) < 0)
@@ -278,7 +277,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
if (i82860_pci)
edac_pci_release_generic_ctl(i82860_pci);
@@ -311,7 +310,7 @@ static int __init i82860_init(void)
{
int pci_rc;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -324,7 +323,7 @@ static int __init i82860_init(void)
PCI_DEVICE_ID_INTEL_82860_0, NULL);
if (mci_pdev == NULL) {
- debugf0("860 pci_get_device fail\n");
+ edac_dbg(0, "860 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
@@ -332,7 +331,7 @@ static int __init i82860_init(void)
pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
if (pci_rc < 0) {
- debugf0("860 init fail\n");
+ edac_dbg(0, "860 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
@@ -352,7 +351,7 @@ fail0:
static void __exit i82860_exit(void)
{
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
pci_unregister_driver(&i82860_driver);
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index b613e31c16e5..3e416b1a6b53 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -189,7 +189,7 @@ static void i82875p_get_error_info(struct mem_ctl_info *mci,
{
struct pci_dev *pdev;
- pdev = to_pci_dev(mci->dev);
+ pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
@@ -227,7 +227,7 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
{
int row, multi_chan;
- multi_chan = mci->csrows[0].nr_channels - 1;
+ multi_chan = mci->csrows[0]->nr_channels - 1;
if (!(info->errsts & 0x0081))
return 0;
@@ -236,9 +236,9 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0081) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1,
- "UE overwrote CE", "", NULL);
+ "UE overwrote CE", "");
info->errsts = info->errsts2;
}
@@ -246,15 +246,15 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
row = edac_mc_find_csrow_by_page(mci, info->eap);
if (info->errsts & 0x0080)
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
info->eap, 0, 0,
row, -1, -1,
- "i82875p UE", "", NULL);
+ "i82875p UE", "");
else
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
info->eap, 0, info->derrsyn,
row, multi_chan ? (info->des & 0x1) : 0,
- -1, "i82875p CE", "", NULL);
+ -1, "i82875p CE", "");
return 1;
}
@@ -263,7 +263,7 @@ static void i82875p_check(struct mem_ctl_info *mci)
{
struct i82875p_error_info info;
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
i82875p_get_error_info(mci, &info);
i82875p_process_error_info(mci, &info, 1);
}
@@ -367,12 +367,11 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
*/
for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
+ csrow = mci->csrows[index];
value = readb(ovrfl_window + I82875P_DRB + index);
cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
- debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
- cumul_size);
+ edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
@@ -382,7 +381,7 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
last_cumul_size = cumul_size;
for (j = 0; j < nr_chans; j++) {
- dimm = csrow->channels[j].dimm;
+ dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / nr_chans;
dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
@@ -405,7 +404,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
u32 nr_chans;
struct i82875p_error_info discard;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
@@ -426,11 +425,8 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
goto fail0;
}
- /* Keeps mci available after edac_mc_del_mc() till edac_mc_free() */
- kobject_get(&mci->edac_mci_kobj);
-
- debugf3("%s(): init mci\n", __func__);
- mci->dev = &pdev->dev;
+ edac_dbg(3, "init mci\n");
+ mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_UNKNOWN;
@@ -440,7 +436,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
mci->dev_name = pci_name(pdev);
mci->edac_check = i82875p_check;
mci->ctl_page_to_phys = NULL;
- debugf3("%s(): init pvt\n", __func__);
+ edac_dbg(3, "init pvt\n");
pvt = (struct i82875p_pvt *)mci->pvt_info;
pvt->ovrfl_pdev = ovrfl_pdev;
pvt->ovrfl_window = ovrfl_window;
@@ -451,7 +447,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail1;
}
@@ -467,11 +463,10 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
}
/* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
return 0;
fail1:
- kobject_put(&mci->edac_mci_kobj);
edac_mc_free(mci);
fail0:
@@ -489,7 +484,7 @@ static int __devinit i82875p_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
i82875p_printk(KERN_INFO, "i82875p init one\n");
if (pci_enable_device(pdev) < 0)
@@ -508,7 +503,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
struct mem_ctl_info *mci;
struct i82875p_pvt *pvt = NULL;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
if (i82875p_pci)
edac_pci_release_generic_ctl(i82875p_pci);
@@ -554,7 +549,7 @@ static int __init i82875p_init(void)
{
int pci_rc;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -569,7 +564,7 @@ static int __init i82875p_init(void)
PCI_DEVICE_ID_INTEL_82875_0, NULL);
if (!mci_pdev) {
- debugf0("875p pci_get_device fail\n");
+ edac_dbg(0, "875p pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
@@ -577,7 +572,7 @@ static int __init i82875p_init(void)
pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
if (pci_rc < 0) {
- debugf0("875p init fail\n");
+ edac_dbg(0, "875p init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
@@ -597,7 +592,7 @@ fail0:
static void __exit i82875p_exit(void)
{
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
i82875p_remove_one(mci_pdev);
pci_dev_put(mci_pdev);
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 433332c7cdba..069e26c11c4f 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -241,7 +241,7 @@ static void i82975x_get_error_info(struct mem_ctl_info *mci,
{
struct pci_dev *pdev;
- pdev = to_pci_dev(mci->dev);
+ pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
@@ -288,8 +288,8 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0003) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
- -1, -1, -1, "UE overwrote CE", "", NULL);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "");
info->errsts = info->errsts2;
}
@@ -308,21 +308,21 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
(info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page);
return 0;
}
- chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
+ chan = (mci->csrows[row]->nr_channels == 1) ? 0 : info->eap & 1;
offst = info->eap
& ((1 << PAGE_SHIFT) -
- (1 << mci->csrows[row].channels[chan].dimm->grain));
+ (1 << mci->csrows[row]->channels[chan]->dimm->grain));
if (info->errsts & 0x0002)
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
page, offst, 0,
row, -1, -1,
- "i82975x UE", "", NULL);
+ "i82975x UE", "");
else
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, offst, info->derrsyn,
row, chan ? chan : 0, -1,
- "i82975x CE", "", NULL);
+ "i82975x CE", "");
return 1;
}
@@ -331,7 +331,7 @@ static void i82975x_check(struct mem_ctl_info *mci)
{
struct i82975x_error_info info;
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
i82975x_get_error_info(mci, &info);
i82975x_process_error_info(mci, &info, 1);
}
@@ -394,7 +394,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
*/
for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
+ csrow = mci->csrows[index];
value = readb(mch_window + I82975X_DRB + index +
((index >= 4) ? 0x80 : 0));
@@ -406,8 +406,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
*/
if (csrow->nr_channels > 1)
cumul_size <<= 1;
- debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
- cumul_size);
+ edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
nr_pages = cumul_size - last_cumul_size;
if (!nr_pages)
@@ -421,10 +420,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
*/
dtype = i82975x_dram_type(mch_window, index);
for (chan = 0; chan < csrow->nr_channels; chan++) {
- dimm = mci->csrows[index].channels[chan].dimm;
+ dimm = mci->csrows[index]->channels[chan]->dimm;
dimm->nr_pages = nr_pages / csrow->nr_channels;
- strncpy(csrow->channels[chan].dimm->label,
+ strncpy(csrow->channels[chan]->dimm->label,
labels[(index >> 1) + (chan * 2)],
EDAC_MC_LABEL_LEN);
dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
@@ -489,11 +488,11 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
u8 c1drb[4];
#endif
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar);
if (!(mchbar & 1)) {
- debugf3("%s(): failed, MCHBAR disabled!\n", __func__);
+ edac_dbg(3, "failed, MCHBAR disabled!\n");
goto fail0;
}
mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */
@@ -558,8 +557,8 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
goto fail1;
}
- debugf3("%s(): init mci\n", __func__);
- mci->dev = &pdev->dev;
+ edac_dbg(3, "init mci\n");
+ mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
@@ -569,7 +568,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
mci->dev_name = pci_name(pdev);
mci->edac_check = i82975x_check;
mci->ctl_page_to_phys = NULL;
- debugf3("%s(): init pvt\n", __func__);
+ edac_dbg(3, "init pvt\n");
pvt = (struct i82975x_pvt *) mci->pvt_info;
pvt->mch_window = mch_window;
i82975x_init_csrows(mci, pdev, mch_window);
@@ -578,12 +577,12 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
/* finalize this instance of memory controller with edac core */
if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail2;
}
/* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
return 0;
fail2:
@@ -601,7 +600,7 @@ static int __devinit i82975x_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
@@ -619,7 +618,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
struct mem_ctl_info *mci;
struct i82975x_pvt *pvt;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
mci = edac_mc_del_mc(&pdev->dev);
if (mci == NULL)
@@ -655,7 +654,7 @@ static int __init i82975x_init(void)
{
int pci_rc;
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -669,7 +668,7 @@ static int __init i82975x_init(void)
PCI_DEVICE_ID_INTEL_82975_0, NULL);
if (!mci_pdev) {
- debugf0("i82975x pci_get_device fail\n");
+ edac_dbg(0, "i82975x pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
@@ -677,7 +676,7 @@ static int __init i82975x_init(void)
pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl);
if (pci_rc < 0) {
- debugf0("i82975x init fail\n");
+ edac_dbg(0, "i82975x init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
@@ -697,7 +696,7 @@ fail0:
static void __exit i82975x_exit(void)
{
- debugf3("%s()\n", __func__);
+ edac_dbg(3, "\n");
pci_unregister_driver(&i82975x_driver);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 0e374625f6f8..a1e791ec25d3 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -49,34 +49,45 @@ static u32 orig_hid1[2];
/************************ MC SYSFS parts ***********************************/
-static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci,
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev,
+ struct device_attribute *mattr,
char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
return sprintf(data, "0x%08x",
in_be32(pdata->mc_vbase +
MPC85XX_MC_DATA_ERR_INJECT_HI));
}
-static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci,
+static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev,
+ struct device_attribute *mattr,
char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
return sprintf(data, "0x%08x",
in_be32(pdata->mc_vbase +
MPC85XX_MC_DATA_ERR_INJECT_LO));
}
-static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data)
+static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
return sprintf(data, "0x%08x",
in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
}
-static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci,
+static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev,
+ struct device_attribute *mattr,
const char *data, size_t count)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
if (isdigit(*data)) {
out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
@@ -86,9 +97,11 @@ static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci,
return 0;
}
-static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci,
+static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev,
+ struct device_attribute *mattr,
const char *data, size_t count)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
if (isdigit(*data)) {
out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
@@ -98,9 +111,11 @@ static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci,
return 0;
}
-static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci,
- const char *data, size_t count)
+static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
{
+ struct mem_ctl_info *mci = to_mci(dev);
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
if (isdigit(*data)) {
out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
@@ -110,38 +125,35 @@ static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci,
return 0;
}
-static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = {
- {
- .attr = {
- .name = "inject_data_hi",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = mpc85xx_mc_inject_data_hi_show,
- .store = mpc85xx_mc_inject_data_hi_store},
- {
- .attr = {
- .name = "inject_data_lo",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = mpc85xx_mc_inject_data_lo_show,
- .store = mpc85xx_mc_inject_data_lo_store},
- {
- .attr = {
- .name = "inject_ctrl",
- .mode = (S_IRUGO | S_IWUSR)
- },
- .show = mpc85xx_mc_inject_ctrl_show,
- .store = mpc85xx_mc_inject_ctrl_store},
+DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR,
+ mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store);
+DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
+ mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store);
+DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
+ mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store);
- /* End of list */
- {
- .attr = {.name = NULL}
- }
-};
+static int mpc85xx_create_sysfs_attributes(struct mem_ctl_info *mci)
+{
+ int rc;
+
+ rc = device_create_file(&mci->dev, &dev_attr_inject_data_hi);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_data_lo);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_ctrl);
+ if (rc < 0)
+ return rc;
-static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
+ return 0;
+}
+
+static void mpc85xx_remove_sysfs_attributes(struct mem_ctl_info *mci)
{
- mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes;
+ device_remove_file(&mci->dev, &dev_attr_inject_data_hi);
+ device_remove_file(&mci->dev, &dev_attr_inject_data_lo);
+ device_remove_file(&mci->dev, &dev_attr_inject_ctrl);
}
/**************************** PCI Err device ***************************/
@@ -268,7 +280,7 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
- debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+ edac_dbg(3, "failed edac_pci_add_device()\n");
goto err;
}
@@ -291,7 +303,7 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *op)
}
devres_remove_group(&op->dev, mpc85xx_pci_err_probe);
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
return 0;
@@ -309,7 +321,7 @@ static int mpc85xx_pci_err_remove(struct platform_device *op)
struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev);
struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
orig_pci_err_cap_dr);
@@ -570,7 +582,7 @@ static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
pdata->edac_idx = edac_dev_idx++;
if (edac_device_add_device(edac_dev) > 0) {
- debugf3("%s(): failed edac_device_add_device()\n", __func__);
+ edac_dbg(3, "failed edac_device_add_device()\n");
goto err;
}
@@ -598,7 +610,7 @@ static int __devinit mpc85xx_l2_err_probe(struct platform_device *op)
devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
return 0;
@@ -616,7 +628,7 @@ static int mpc85xx_l2_err_remove(struct platform_device *op)
struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
if (edac_op_state == EDAC_OPSTATE_INT) {
out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
@@ -813,7 +825,7 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
pfn = err_addr >> PAGE_SHIFT;
for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
- csrow = &mci->csrows[row_index];
+ csrow = mci->csrows[row_index];
if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
break;
}
@@ -854,16 +866,16 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
if (err_detect & DDR_EDE_SBE)
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
pfn, err_addr & ~PAGE_MASK, syndrome,
row_index, 0, -1,
- mci->ctl_name, "", NULL);
+ mci->ctl_name, "");
if (err_detect & DDR_EDE_MBE)
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
pfn, err_addr & ~PAGE_MASK, syndrome,
row_index, 0, -1,
- mci->ctl_name, "", NULL);
+ mci->ctl_name, "");
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
}
@@ -933,8 +945,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
u32 start;
u32 end;
- csrow = &mci->csrows[index];
- dimm = csrow->channels[0].dimm;
+ csrow = mci->csrows[index];
+ dimm = csrow->channels[0]->dimm;
cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
(index * MPC85XX_MC_CS_BNDS_OFS));
@@ -990,9 +1002,9 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
pdata = mci->pvt_info;
pdata->name = "mpc85xx_mc_err";
pdata->irq = NO_IRQ;
- mci->dev = &op->dev;
+ mci->pdev = &op->dev;
pdata->edac_idx = edac_mc_idx++;
- dev_set_drvdata(mci->dev, mci);
+ dev_set_drvdata(mci->pdev, mci);
mci->ctl_name = pdata->name;
mci->dev_name = pdata->name;
@@ -1026,7 +1038,7 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
goto err;
}
- debugf3("%s(): init mci\n", __func__);
+ edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
MEM_FLAG_DDR | MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
@@ -1041,8 +1053,6 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
mci->scrub_mode = SCRUB_SW_SRC;
- mpc85xx_set_mc_sysfs_attributes(mci);
-
mpc85xx_init_csrows(mci);
/* store the original error disable bits */
@@ -1054,7 +1064,13 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
+ goto err;
+ }
+
+ if (mpc85xx_create_sysfs_attributes(mci)) {
+ edac_mc_del_mc(mci->pdev);
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto err;
}
@@ -1088,7 +1104,7 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
}
devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
return 0;
@@ -1106,7 +1122,7 @@ static int mpc85xx_mc_err_remove(struct platform_device *op)
struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
if (edac_op_state == EDAC_OPSTATE_INT) {
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
@@ -1117,6 +1133,7 @@ static int mpc85xx_mc_err_remove(struct platform_device *op)
orig_ddr_err_disable);
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
+ mpc85xx_remove_sysfs_attributes(mci);
edac_mc_del_mc(&op->dev);
edac_mc_free(mci);
return 0;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index b0bb5a3d2527..2b315c2edc3c 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -169,7 +169,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
MV64X60_PCIx_ERR_MASK_VAL);
if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
- debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+ edac_dbg(3, "failed edac_pci_add_device()\n");
goto err;
}
@@ -194,7 +194,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
/* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
return 0;
@@ -210,7 +210,7 @@ static int mv64x60_pci_err_remove(struct platform_device *pdev)
{
struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
edac_pci_del_device(&pdev->dev);
@@ -336,7 +336,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
pdata->edac_idx = edac_dev_idx++;
if (edac_device_add_device(edac_dev) > 0) {
- debugf3("%s(): failed edac_device_add_device()\n", __func__);
+ edac_dbg(3, "failed edac_device_add_device()\n");
goto err;
}
@@ -363,7 +363,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
/* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
return 0;
@@ -379,7 +379,7 @@ static int mv64x60_sram_err_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(edac_dev);
@@ -531,7 +531,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
pdata->edac_idx = edac_dev_idx++;
if (edac_device_add_device(edac_dev) > 0) {
- debugf3("%s(): failed edac_device_add_device()\n", __func__);
+ edac_dbg(3, "failed edac_device_add_device()\n");
goto err;
}
@@ -558,7 +558,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
/* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
return 0;
@@ -574,7 +574,7 @@ static int mv64x60_cpu_err_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(edac_dev);
@@ -611,17 +611,17 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
/* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
if (!(reg & 0x1))
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
err_addr >> PAGE_SHIFT,
err_addr & PAGE_MASK, syndrome,
0, 0, -1,
- mci->ctl_name, "", NULL);
+ mci->ctl_name, "");
else /* 2 bit error, UE */
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
err_addr >> PAGE_SHIFT,
err_addr & PAGE_MASK, 0,
0, 0, -1,
- mci->ctl_name, "", NULL);
+ mci->ctl_name, "");
/* clear the error */
out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
@@ -670,8 +670,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
- csrow = &mci->csrows[0];
- dimm = csrow->channels[0].dimm;
+ csrow = mci->csrows[0];
+ dimm = csrow->channels[0]->dimm;
dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
dimm->grain = 8;
@@ -724,7 +724,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
}
pdata = mci->pvt_info;
- mci->dev = &pdev->dev;
+ mci->pdev = &pdev->dev;
platform_set_drvdata(pdev, mci);
pdata->name = "mv64x60_mc_err";
pdata->irq = NO_IRQ;
@@ -766,7 +766,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
goto err2;
}
- debugf3("%s(): init mci\n", __func__);
+ edac_dbg(3, "init mci\n");
mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
@@ -790,7 +790,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto err;
}
@@ -815,7 +815,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
}
/* get this far and it's successful */
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
return 0;
@@ -831,7 +831,7 @@ static int mv64x60_mc_err_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index b095a906a994..2d35b78ada3c 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -74,7 +74,7 @@ static int system_mmc_id;
static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
{
- struct pci_dev *pdev = to_pci_dev(mci->dev);
+ struct pci_dev *pdev = to_pci_dev(mci->pdev);
u32 tmp;
pci_read_config_dword(pdev, MCDEBUG_ERRSTA,
@@ -95,7 +95,7 @@ static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci)
static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
{
- struct pci_dev *pdev = to_pci_dev(mci->dev);
+ struct pci_dev *pdev = to_pci_dev(mci->pdev);
u32 errlog1a;
u32 cs;
@@ -110,16 +110,16 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
/* uncorrectable/multi-bit errors */
if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
MCDEBUG_ERRSTA_RFL_STATUS)) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
- mci->csrows[cs].first_page, 0, 0,
- cs, 0, -1, mci->ctl_name, "", NULL);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+ mci->csrows[cs]->first_page, 0, 0,
+ cs, 0, -1, mci->ctl_name, "");
}
/* correctable/single-bit errors */
if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
- mci->csrows[cs].first_page, 0, 0,
- cs, 0, -1, mci->ctl_name, "", NULL);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+ mci->csrows[cs]->first_page, 0, 0,
+ cs, 0, -1, mci->ctl_name, "");
}
static void pasemi_edac_check(struct mem_ctl_info *mci)
@@ -141,8 +141,8 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
int index;
for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
- dimm = csrow->channels[0].dimm;
+ csrow = mci->csrows[index];
+ dimm = csrow->channels[0]->dimm;
pci_read_config_dword(pdev,
MCDRAM_RANKCFG + (index * 12),
@@ -225,7 +225,7 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
MCCFG_ERRCOR_ECC_GEN_EN |
MCCFG_ERRCOR_ECC_CRR_EN;
- mci->dev = &pdev->dev;
+ mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ?
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index f3f9fed06ad7..bf0957635991 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -727,10 +727,10 @@ ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
for (row = 0; row < mci->nr_csrows; row++)
if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0, 0,
row, 0, -1,
- message, "", NULL);
+ message, "");
}
/**
@@ -758,10 +758,10 @@ ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
for (row = 0; row < mci->nr_csrows; row++)
if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
page, offset, 0,
row, 0, -1,
- message, "", NULL);
+ message, "");
}
/**
@@ -1027,9 +1027,9 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
/* Initial driver pointers and private data */
- mci->dev = &op->dev;
+ mci->pdev = &op->dev;
- dev_set_drvdata(mci->dev, mci);
+ dev_set_drvdata(mci->pdev, mci);
pdata = mci->pvt_info;
@@ -1334,7 +1334,7 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
return 0;
fail1:
- edac_mc_del_mc(mci->dev);
+ edac_mc_del_mc(mci->pdev);
fail:
edac_mc_free(mci);
@@ -1368,7 +1368,7 @@ ppc4xx_edac_remove(struct platform_device *op)
dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN);
- edac_mc_del_mc(mci->dev);
+ edac_mc_del_mc(mci->pdev);
edac_mc_free(mci);
return 0;
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index e1cacd164f31..f854debd5533 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -140,7 +140,7 @@ static void r82600_get_error_info(struct mem_ctl_info *mci,
{
struct pci_dev *pdev;
- pdev = to_pci_dev(mci->dev);
+ pdev = to_pci_dev(mci->pdev);
pci_read_config_dword(pdev, R82600_EAP, &info->eapr);
if (info->eapr & BIT(0))
@@ -179,11 +179,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
error_found = 1;
if (handle_errors)
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
page, 0, syndrome,
edac_mc_find_csrow_by_page(mci, page),
0, -1,
- mci->ctl_name, "", NULL);
+ mci->ctl_name, "");
}
if (info->eapr & BIT(1)) { /* UE? */
@@ -191,11 +191,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
if (handle_errors)
/* 82600 doesn't give enough info */
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
page, 0, 0,
edac_mc_find_csrow_by_page(mci, page),
0, -1,
- mci->ctl_name, "", NULL);
+ mci->ctl_name, "");
}
return error_found;
@@ -205,7 +205,7 @@ static void r82600_check(struct mem_ctl_info *mci)
{
struct r82600_error_info info;
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
r82600_get_error_info(mci, &info);
r82600_process_error_info(mci, &info, 1);
}
@@ -230,19 +230,19 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
row_high_limit_last = 0;
for (index = 0; index < mci->nr_csrows; index++) {
- csrow = &mci->csrows[index];
- dimm = csrow->channels[0].dimm;
+ csrow = mci->csrows[index];
+ dimm = csrow->channels[0]->dimm;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
- debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar);
+ edac_dbg(1, "Row=%d DRBA = %#0x\n", index, drbar);
row_high_limit = ((u32) drbar << 24);
/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
- debugf1("%s() Row=%d, Boundary Address=%#0x, Last = %#0x\n",
- __func__, index, row_high_limit, row_high_limit_last);
+ edac_dbg(1, "Row=%d, Boundary Address=%#0x, Last = %#0x\n",
+ index, row_high_limit, row_high_limit_last);
/* Empty row [p.57] */
if (row_high_limit == row_high_limit_last)
@@ -277,14 +277,13 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
u32 sdram_refresh_rate;
struct r82600_error_info discard;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
pci_read_config_dword(pdev, R82600_EAP, &eapr);
scrub_disabled = eapr & BIT(31);
sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
- debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
- sdram_refresh_rate);
- debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
+ edac_dbg(2, "sdram refresh rate = %#0x\n", sdram_refresh_rate);
+ edac_dbg(2, "DRAMC register = %#0x\n", dramcr);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = R82600_NR_CSROWS;
layers[0].is_virt_csrow = true;
@@ -295,8 +294,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
if (mci == NULL)
return -ENOMEM;
- debugf0("%s(): mci = %p\n", __func__, mci);
- mci->dev = &pdev->dev;
+ edac_dbg(0, "mci = %p\n", mci);
+ mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
/* FIXME try to work out if the chip leads have been used for COM2
@@ -311,8 +310,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
if (ecc_enabled(dramcr)) {
if (scrub_disabled)
- debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
- "%#0x\n", __func__, mci, eapr);
+ edac_dbg(3, "mci = %p - Scrubbing disabled! EAP: %#0x\n",
+ mci, eapr);
} else
mci->edac_cap = EDAC_FLAG_NONE;
@@ -329,15 +328,14 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
- debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "failed edac_mc_add_mc()\n");
goto fail;
}
/* get this far and it's successful */
if (disable_hardware_scrub) {
- debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n",
- __func__);
+ edac_dbg(3, "Disabling Hardware Scrub (scrub on error)\n");
pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31));
}
@@ -352,7 +350,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
__func__);
}
- debugf3("%s(): success\n", __func__);
+ edac_dbg(3, "success\n");
return 0;
fail:
@@ -364,7 +362,7 @@ fail:
static int __devinit r82600_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
/* don't need to call pci_enable_device() */
return r82600_probe1(pdev, ent->driver_data);
@@ -374,7 +372,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
if (r82600_pci)
edac_pci_release_generic_ctl(r82600_pci);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 36ad17e79d61..f3b1f9fafa4b 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -381,8 +381,8 @@ static inline int numrank(u32 mtr)
int ranks = (1 << RANK_CNT_BITS(mtr));
if (ranks > 4) {
- debugf0("Invalid number of ranks: %d (max = 4) raw value = %x (%04x)",
- ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr);
+ edac_dbg(0, "Invalid number of ranks: %d (max = 4) raw value = %x (%04x)\n",
+ ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr);
return -EINVAL;
}
@@ -394,8 +394,8 @@ static inline int numrow(u32 mtr)
int rows = (RANK_WIDTH_BITS(mtr) + 12);
if (rows < 13 || rows > 18) {
- debugf0("Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)",
- rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
+ edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
+ rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
return -EINVAL;
}
@@ -407,8 +407,8 @@ static inline int numcol(u32 mtr)
int cols = (COL_WIDTH_BITS(mtr) + 10);
if (cols > 12) {
- debugf0("Invalid number of cols: %d (max = 4) raw value = %x (%04x)",
- cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
+ edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
+ cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
return -EINVAL;
}
@@ -475,8 +475,8 @@ static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot &&
PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) {
- debugf1("Associated %02x.%02x.%d with %p\n",
- bus, slot, func, sbridge_dev->pdev[i]);
+ edac_dbg(1, "Associated %02x.%02x.%d with %p\n",
+ bus, slot, func, sbridge_dev->pdev[i]);
return sbridge_dev->pdev[i];
}
}
@@ -523,45 +523,45 @@ static int get_dimm_config(struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_br, SAD_CONTROL, &reg);
pvt->sbridge_dev->node_id = NODE_ID(reg);
- debugf0("mc#%d: Node ID: %d, source ID: %d\n",
- pvt->sbridge_dev->mc,
- pvt->sbridge_dev->node_id,
- pvt->sbridge_dev->source_id);
+ edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
+ pvt->sbridge_dev->mc,
+ pvt->sbridge_dev->node_id,
+ pvt->sbridge_dev->source_id);
pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
if (IS_MIRROR_ENABLED(reg)) {
- debugf0("Memory mirror is enabled\n");
+ edac_dbg(0, "Memory mirror is enabled\n");
pvt->is_mirrored = true;
} else {
- debugf0("Memory mirror is disabled\n");
+ edac_dbg(0, "Memory mirror is disabled\n");
pvt->is_mirrored = false;
}
pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
- debugf0("Lockstep is enabled\n");
+ edac_dbg(0, "Lockstep is enabled\n");
mode = EDAC_S8ECD8ED;
pvt->is_lockstep = true;
} else {
- debugf0("Lockstep is disabled\n");
+ edac_dbg(0, "Lockstep is disabled\n");
mode = EDAC_S4ECD4ED;
pvt->is_lockstep = false;
}
if (IS_CLOSE_PG(pvt->info.mcmtr)) {
- debugf0("address map is on closed page mode\n");
+ edac_dbg(0, "address map is on closed page mode\n");
pvt->is_close_pg = true;
} else {
- debugf0("address map is on open page mode\n");
+ edac_dbg(0, "address map is on open page mode\n");
pvt->is_close_pg = false;
}
pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
if (IS_RDIMM_ENABLED(reg)) {
/* FIXME: Can also be LRDIMM */
- debugf0("Memory is registered\n");
+ edac_dbg(0, "Memory is registered\n");
mtype = MEM_RDDR3;
} else {
- debugf0("Memory is unregistered\n");
+ edac_dbg(0, "Memory is unregistered\n");
mtype = MEM_DDR3;
}
@@ -576,7 +576,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
i, j, 0);
pci_read_config_dword(pvt->pci_tad[i],
mtr_regs[j], &mtr);
- debugf4("Channel #%d MTR%d = %x\n", i, j, mtr);
+ edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
if (IS_DIMM_PRESENT(mtr)) {
pvt->channel[i].dimms++;
@@ -588,10 +588,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
size = (rows * cols * banks * ranks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
- debugf0("mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
- pvt->sbridge_dev->mc, i, j,
- size, npages,
- banks, ranks, rows, cols);
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ pvt->sbridge_dev->mc, i, j,
+ size, npages,
+ banks, ranks, rows, cols);
dimm->nr_pages = npages;
dimm->grain = 32;
@@ -629,8 +629,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
tmp_mb = (1 + pvt->tolm) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("TOLM: %u.%03u GB (0x%016Lx)\n",
- mb, kb, (u64)pvt->tolm);
+ edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
/* Address range is already 45:25 */
pci_read_config_dword(pvt->pci_sad1, TOHM,
@@ -639,8 +638,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
tmp_mb = (1 + pvt->tohm) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("TOHM: %u.%03u GB (0x%016Lx)",
- mb, kb, (u64)pvt->tohm);
+ edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)", mb, kb, (u64)pvt->tohm);
/*
* Step 2) Get SAD range and SAD Interleave list
@@ -663,13 +661,13 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
tmp_mb = (limit + 1) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("SAD#%d %s up to %u.%03u GB (0x%016Lx) %s reg=0x%08x\n",
- n_sads,
- get_dram_attr(reg),
- mb, kb,
- ((u64)tmp_mb) << 20L,
- INTERLEAVE_MODE(reg) ? "Interleave: 8:6" : "Interleave: [8:6]XOR[18:16]",
- reg);
+ edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
+ n_sads,
+ get_dram_attr(reg),
+ mb, kb,
+ ((u64)tmp_mb) << 20L,
+ INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
+ reg);
prv = limit;
pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
@@ -679,8 +677,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
if (j > 0 && sad_interl == sad_pkg(reg, j))
break;
- debugf0("SAD#%d, interleave #%d: %d\n",
- n_sads, j, sad_pkg(reg, j));
+ edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
+ n_sads, j, sad_pkg(reg, j));
}
}
@@ -697,16 +695,16 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
tmp_mb = (limit + 1) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
- n_tads, mb, kb,
- ((u64)tmp_mb) << 20L,
- (u32)TAD_SOCK(reg),
- (u32)TAD_CH(reg),
- (u32)TAD_TGT0(reg),
- (u32)TAD_TGT1(reg),
- (u32)TAD_TGT2(reg),
- (u32)TAD_TGT3(reg),
- reg);
+ edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
+ n_tads, mb, kb,
+ ((u64)tmp_mb) << 20L,
+ (u32)TAD_SOCK(reg),
+ (u32)TAD_CH(reg),
+ (u32)TAD_TGT0(reg),
+ (u32)TAD_TGT1(reg),
+ (u32)TAD_TGT2(reg),
+ (u32)TAD_TGT3(reg),
+ reg);
prv = limit;
}
@@ -722,11 +720,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
&reg);
tmp_mb = TAD_OFFSET(reg) >> 20;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
- i, j,
- mb, kb,
- ((u64)tmp_mb) << 20L,
- reg);
+ edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
+ i, j,
+ mb, kb,
+ ((u64)tmp_mb) << 20L,
+ reg);
}
}
@@ -747,12 +745,12 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
tmp_mb = RIR_LIMIT(reg) >> 20;
rir_way = 1 << RIR_WAY(reg);
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
- i, j,
- mb, kb,
- ((u64)tmp_mb) << 20L,
- rir_way,
- reg);
+ edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
+ i, j,
+ mb, kb,
+ ((u64)tmp_mb) << 20L,
+ rir_way,
+ reg);
for (k = 0; k < rir_way; k++) {
pci_read_config_dword(pvt->pci_tad[i],
@@ -761,12 +759,12 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
tmp_mb = RIR_OFFSET(reg) << 6;
mb = div_u64_rem(tmp_mb, 1000, &kb);
- debugf0("CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
- i, j, k,
- mb, kb,
- ((u64)tmp_mb) << 20L,
- (u32)RIR_RNK_TGT(reg),
- reg);
+ edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
+ i, j, k,
+ mb, kb,
+ ((u64)tmp_mb) << 20L,
+ (u32)RIR_RNK_TGT(reg),
+ reg);
}
}
}
@@ -853,16 +851,16 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way))
break;
sad_interleave[sad_way] = sad_pkg(reg, sad_way);
- debugf0("SAD interleave #%d: %d\n",
- sad_way, sad_interleave[sad_way]);
+ edac_dbg(0, "SAD interleave #%d: %d\n",
+ sad_way, sad_interleave[sad_way]);
}
- debugf0("mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
- pvt->sbridge_dev->mc,
- n_sads,
- addr,
- limit,
- sad_way + 7,
- interleave_mode ? "" : "XOR[18:16]");
+ edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
+ pvt->sbridge_dev->mc,
+ n_sads,
+ addr,
+ limit,
+ sad_way + 7,
+ interleave_mode ? "" : "XOR[18:16]");
if (interleave_mode)
idx = ((addr >> 6) ^ (addr >> 16)) & 7;
else
@@ -884,8 +882,8 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
return -EINVAL;
}
*socket = sad_interleave[idx];
- debugf0("SAD interleave index: %d (wayness %d) = CPU socket %d\n",
- idx, sad_way, *socket);
+ edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
+ idx, sad_way, *socket);
/*
* Move to the proper node structure, in order to access the
@@ -972,16 +970,16 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
offset = TAD_OFFSET(tad_offset);
- debugf0("TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
- n_tads,
- addr,
- limit,
- (u32)TAD_SOCK(reg),
- ch_way,
- offset,
- idx,
- base_ch,
- *channel_mask);
+ edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
+ n_tads,
+ addr,
+ limit,
+ (u32)TAD_SOCK(reg),
+ ch_way,
+ offset,
+ idx,
+ base_ch,
+ *channel_mask);
/* Calculate channel address */
/* Remove the TAD offset */
@@ -1017,11 +1015,11 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
limit = RIR_LIMIT(reg);
mb = div_u64_rem(limit >> 20, 1000, &kb);
- debugf0("RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
- n_rir,
- mb, kb,
- limit,
- 1 << RIR_WAY(reg));
+ edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
+ n_rir,
+ mb, kb,
+ limit,
+ 1 << RIR_WAY(reg));
if (ch_addr <= limit)
break;
}
@@ -1042,12 +1040,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
&reg);
*rank = RIR_RNK_TGT(reg);
- debugf0("RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
- n_rir,
- ch_addr,
- limit,
- rir_way,
- idx);
+ edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
+ n_rir,
+ ch_addr,
+ limit,
+ rir_way,
+ idx);
return 0;
}
@@ -1064,14 +1062,14 @@ static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
{
int i;
- debugf0(__FILE__ ": %s()\n", __func__);
+ edac_dbg(0, "\n");
for (i = 0; i < sbridge_dev->n_devs; i++) {
struct pci_dev *pdev = sbridge_dev->pdev[i];
if (!pdev)
continue;
- debugf0("Removing dev %02x:%02x.%d\n",
- pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ edac_dbg(0, "Removing dev %02x:%02x.%d\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
pci_dev_put(pdev);
}
}
@@ -1177,10 +1175,9 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
return -ENODEV;
}
- debugf0("Detected dev %02x:%d.%d PCI ID %04x:%04x\n",
- bus, dev_descr->dev,
- dev_descr->func,
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ edac_dbg(0, "Detected dev %02x:%d.%d PCI ID %04x:%04x\n",
+ bus, dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
/*
* As stated on drivers/pci/search.c, the reference count for
@@ -1297,10 +1294,10 @@ static int mci_bind_devs(struct mem_ctl_info *mci,
goto error;
}
- debugf0("Associated PCI %02x.%02d.%d with dev = %p\n",
- sbridge_dev->bus,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- pdev);
+ edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
+ sbridge_dev->bus,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ pdev);
}
/* Check if everything were registered */
@@ -1435,8 +1432,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
* to the group of dimm's where the error may be happening.
*/
snprintf(msg, sizeof(msg),
- "count:%d%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
- core_err_cnt,
+ "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
area_type,
@@ -1445,20 +1441,20 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
channel_mask,
rank);
- debugf0("%s", msg);
+ edac_dbg(0, "%s\n", msg);
/* FIXME: need support for channel mask */
/* Call the helper to output message */
- edac_mc_handle_error(tp_event, mci,
+ edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
channel, dimm, -1,
- optype, msg, m);
+ optype, msg);
return;
err_parsing:
- edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+ edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
-1, -1, -1,
- msg, "", m);
+ msg, "");
}
@@ -1592,8 +1588,7 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
struct sbridge_pvt *pvt;
if (unlikely(!mci || !mci->pvt_info)) {
- debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
- __func__, &sbridge_dev->pdev[0]->dev);
+ edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
return;
@@ -1601,13 +1596,13 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
pvt = mci->pvt_info;
- debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
- __func__, mci, &sbridge_dev->pdev[0]->dev);
+ edac_dbg(0, "MC: mci = %p, dev = %p\n",
+ mci, &sbridge_dev->pdev[0]->dev);
/* Remove MC sysfs nodes */
- edac_mc_del_mc(mci->dev);
+ edac_mc_del_mc(mci->pdev);
- debugf1("%s: free mci struct\n", mci->ctl_name);
+ edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
kfree(mci->ctl_name);
edac_mc_free(mci);
sbridge_dev->mci = NULL;
@@ -1638,8 +1633,8 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
if (unlikely(!mci))
return -ENOMEM;
- debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
- __func__, mci, &sbridge_dev->pdev[0]->dev);
+ edac_dbg(0, "MC: mci = %p, dev = %p\n",
+ mci, &sbridge_dev->pdev[0]->dev);
pvt = mci->pvt_info;
memset(pvt, 0, sizeof(*pvt));
@@ -1670,12 +1665,11 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
get_memory_layout(mci);
/* record ptr to the generic device */
- mci->dev = &sbridge_dev->pdev[0]->dev;
+ mci->pdev = &sbridge_dev->pdev[0]->dev;
/* add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
- debugf0("MC: " __FILE__
- ": %s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
rc = -EINVAL;
goto fail0;
}
@@ -1722,7 +1716,8 @@ static int __devinit sbridge_probe(struct pci_dev *pdev,
mc = 0;
list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
- debugf0("Registering MC#%d (%d of %d)\n", mc, mc + 1, num_mc);
+ edac_dbg(0, "Registering MC#%d (%d of %d)\n",
+ mc, mc + 1, num_mc);
sbridge_dev->mc = mc++;
rc = sbridge_register_mci(sbridge_dev);
if (unlikely(rc < 0))
@@ -1752,7 +1747,7 @@ static void __devexit sbridge_remove(struct pci_dev *pdev)
{
struct sbridge_dev *sbridge_dev;
- debugf0(__FILE__ ": %s()\n", __func__);
+ edac_dbg(0, "\n");
/*
* we have a trouble here: pdev value for removal will be wrong, since
@@ -1801,7 +1796,7 @@ static int __init sbridge_init(void)
{
int pci_rc;
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ edac_dbg(2, "\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -1825,7 +1820,7 @@ static int __init sbridge_init(void)
*/
static void __exit sbridge_exit(void)
{
- debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ edac_dbg(2, "\n");
pci_unregister_driver(&sbridge_driver);
mce_unregister_decode_chain(&sbridge_mce_dec);
}
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index 7bb4614730db..1e904b7b79a0 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -69,12 +69,12 @@ static void tile_edac_check(struct mem_ctl_info *mci)
/* Check if the current error count is different from the saved one. */
if (mem_error.sbe_count != priv->ce_count) {
- dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
+ dev_dbg(mci->pdev, "ECC CE err on node %d\n", priv->node);
priv->ce_count = mem_error.sbe_count;
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0, 0,
0, 0, -1,
- mci->ctl_name, "", NULL);
+ mci->ctl_name, "");
}
}
@@ -84,10 +84,10 @@ static void tile_edac_check(struct mem_ctl_info *mci)
*/
static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
{
- struct csrow_info *csrow = &mci->csrows[0];
+ struct csrow_info *csrow = mci->csrows[0];
struct tile_edac_priv *priv = mci->pvt_info;
struct mshim_mem_info mem_info;
- struct dimm_info *dimm = csrow->channels[0].dimm;
+ struct dimm_info *dimm = csrow->channels[0]->dimm;
if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
@@ -149,7 +149,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
priv->node = pdev->id;
priv->hv_devhdl = hv_devhdl;
- mci->dev = &pdev->dev;
+ mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index 1ac7962d63ea..08a992693e62 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -103,10 +103,10 @@ static int how_many_channel(struct pci_dev *pdev)
pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b);
if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
- debugf0("In single channel mode.\n");
+ edac_dbg(0, "In single channel mode\n");
x38_channel_num = 1;
} else {
- debugf0("In dual channel mode.\n");
+ edac_dbg(0, "In dual channel mode\n");
x38_channel_num = 2;
}
@@ -151,7 +151,7 @@ static void x38_clear_error_info(struct mem_ctl_info *mci)
{
struct pci_dev *pdev;
- pdev = to_pci_dev(mci->dev);
+ pdev = to_pci_dev(mci->pdev);
/*
* Clear any error bits.
@@ -172,7 +172,7 @@ static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
struct pci_dev *pdev;
void __iomem *window = mci->pvt_info;
- pdev = to_pci_dev(mci->dev);
+ pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
@@ -215,26 +215,26 @@ static void x38_process_error_info(struct mem_ctl_info *mci,
return;
if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1,
- "UE overwrote CE", "", NULL);
+ "UE overwrote CE", "");
info->errsts = info->errsts2;
}
for (channel = 0; channel < x38_channel_num; channel++) {
log = info->eccerrlog[channel];
if (log & X38_ECCERRLOG_UE) {
- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, 0,
eccerrlog_row(channel, log),
-1, -1,
- "x38 UE", "", NULL);
+ "x38 UE", "");
} else if (log & X38_ECCERRLOG_CE) {
- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0, eccerrlog_syndrome(log),
eccerrlog_row(channel, log),
-1, -1,
- "x38 CE", "", NULL);
+ "x38 CE", "");
}
}
}
@@ -243,7 +243,7 @@ static void x38_check(struct mem_ctl_info *mci)
{
struct x38_error_info info;
- debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
+ edac_dbg(1, "MC%d\n", mci->mc_idx);
x38_get_and_clear_error_info(mci, &info);
x38_process_error_info(mci, &info);
}
@@ -331,7 +331,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
bool stacked;
void __iomem *window;
- debugf0("MC: %s()\n", __func__);
+ edac_dbg(0, "MC:\n");
window = x38_map_mchbar(pdev);
if (!window)
@@ -352,9 +352,9 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
if (!mci)
return -ENOMEM;
- debugf3("MC: %s(): init mci\n", __func__);
+ edac_dbg(3, "MC: init mci\n");
- mci->dev = &pdev->dev;
+ mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
@@ -378,7 +378,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
*/
for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages;
- struct csrow_info *csrow = &mci->csrows[i];
+ struct csrow_info *csrow = mci->csrows[i];
nr_pages = drb_to_nr_pages(drbs, stacked,
i / X38_RANKS_PER_CHANNEL,
@@ -388,7 +388,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
continue;
for (j = 0; j < x38_channel_num; j++) {
- struct dimm_info *dimm = csrow->channels[j].dimm;
+ struct dimm_info *dimm = csrow->channels[j]->dimm;
dimm->nr_pages = nr_pages / x38_channel_num;
dimm->grain = nr_pages << PAGE_SHIFT;
@@ -402,12 +402,12 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
rc = -ENODEV;
if (edac_mc_add_mc(mci)) {
- debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__);
+ edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
goto fail;
}
/* get this far and it's successful */
- debugf3("MC: %s(): success\n", __func__);
+ edac_dbg(3, "MC: success\n");
return 0;
fail:
@@ -423,7 +423,7 @@ static int __devinit x38_init_one(struct pci_dev *pdev,
{
int rc;
- debugf0("MC: %s()\n", __func__);
+ edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
@@ -439,7 +439,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
- debugf0("%s()\n", __func__);
+ edac_dbg(0, "\n");
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
@@ -472,7 +472,7 @@ static int __init x38_init(void)
{
int pci_rc;
- debugf3("MC: %s()\n", __func__);
+ edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
@@ -486,14 +486,14 @@ static int __init x38_init(void)
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_X38_HB, NULL);
if (!mci_pdev) {
- debugf0("x38 pci_get_device fail\n");
+ edac_dbg(0, "x38 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = x38_init_one(mci_pdev, x38_pci_tbl);
if (pci_rc < 0) {
- debugf0("x38 init fail\n");
+ edac_dbg(0, "x38 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
@@ -513,7 +513,7 @@ fail0:
static void __exit x38_exit(void)
{
- debugf3("MC: %s()\n", __func__);
+ edac_dbg(3, "MC:\n");
pci_unregister_driver(&x38_driver);
if (!x38_registered) {
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 16716356d1fe..e175c8ed4ec4 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -33,7 +33,7 @@ config EXTCON_MAX77693
config EXTCON_MAX8997
tristate "MAX8997 EXTCON Support"
- depends on MFD_MAX8997
+ depends on MFD_MAX8997 && IRQ_DOMAIN
help
If you say yes here you get support for the MUIC device of
Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index a4ed30bd9a41..ef9090a4271d 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -26,6 +26,7 @@
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
#include <linux/extcon.h>
+#include <linux/irqdomain.h>
#define DEV_NAME "max8997-muic"
@@ -77,6 +78,7 @@
struct max8997_muic_irq {
unsigned int irq;
const char *name;
+ unsigned int virq;
};
static struct max8997_muic_irq muic_irqs[] = {
@@ -343,12 +345,10 @@ static void max8997_muic_irq_work(struct work_struct *work)
{
struct max8997_muic_info *info = container_of(work,
struct max8997_muic_info, irq_work);
- struct max8997_dev *max8997 = i2c_get_clientdata(info->muic);
u8 status[2];
u8 adc, chg_type;
-
- int irq_type = info->irq - max8997->irq_base;
- int ret;
+ int irq_type = 0;
+ int i, ret;
mutex_lock(&info->mutex);
@@ -363,6 +363,10 @@ static void max8997_muic_irq_work(struct work_struct *work)
dev_dbg(info->dev, "%s: STATUS1:0x%x, 2:0x%x\n", __func__,
status[0], status[1]);
+ for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
+ if (info->irq == muic_irqs[i].virq)
+ irq_type = muic_irqs[i].irq;
+
switch (irq_type) {
case MAX8997_MUICIRQ_ADC:
adc = status[0] & STATUS1_ADC_MASK;
@@ -448,11 +452,15 @@ static int __devinit max8997_muic_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
struct max8997_muic_irq *muic_irq = &muic_irqs[i];
+ int virq = 0;
+
+ virq = irq_create_mapping(max8997->irq_domain, muic_irq->irq);
+ if (!virq)
+ goto err_irq;
+ muic_irq->virq = virq;
- ret = request_threaded_irq(pdata->irq_base + muic_irq->irq,
- NULL, max8997_muic_irq_handler,
- 0, muic_irq->name,
- info);
+ ret = request_threaded_irq(virq, NULL,max8997_muic_irq_handler,
+ 0, muic_irq->name, info);
if (ret) {
dev_err(&pdev->dev,
"failed: irq request (IRQ: %d,"
@@ -496,7 +504,7 @@ err_extcon:
kfree(info->edev);
err_irq:
while (--i >= 0)
- free_irq(pdata->irq_base + muic_irqs[i].irq, info);
+ free_irq(muic_irqs[i].virq, info);
kfree(info);
err_kfree:
return ret;
@@ -505,11 +513,10 @@ err_kfree:
static int __devexit max8997_muic_remove(struct platform_device *pdev)
{
struct max8997_muic_info *info = platform_get_drvdata(pdev);
- struct max8997_dev *max8997 = i2c_get_clientdata(info->muic);
int i;
for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
- free_irq(max8997->irq_base + muic_irqs[i].irq, info);
+ free_irq(muic_irqs[i].virq, info);
cancel_work_sync(&info->irq_work);
extcon_dev_unregister(info->edev);
diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon_gpio.c
index fe3db45fa83c..3cc152e690b0 100644
--- a/drivers/extcon/extcon_gpio.c
+++ b/drivers/extcon/extcon_gpio.c
@@ -107,7 +107,8 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- ret = gpio_request_one(extcon_data->gpio, GPIOF_DIR_IN, pdev->name);
+ ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
+ pdev->name);
if (ret < 0)
goto err;
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 4d460ef87161..7a05fd24d68b 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -398,6 +398,14 @@ static ssize_t guid_show(struct device *dev,
return ret;
}
+static ssize_t is_local_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_device *device = fw_device(dev);
+
+ return sprintf(buf, "%u\n", device->is_local);
+}
+
static int units_sprintf(char *buf, const u32 *directory)
{
struct fw_csr_iterator ci;
@@ -447,6 +455,7 @@ static ssize_t units_show(struct device *dev,
static struct device_attribute fw_device_attributes[] = {
__ATTR_RO(config_rom),
__ATTR_RO(guid),
+ __ATTR_RO(is_local),
__ATTR_RO(units),
__ATTR_NULL,
};
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 8382e27e9a27..38c0aa60b2cb 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -146,7 +146,7 @@ EXPORT_SYMBOL(fw_iso_buffer_destroy);
/* Convert DMA address to offset into virtually contiguous buffer. */
size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
{
- int i;
+ size_t i;
dma_addr_t address;
ssize_t offset;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 780708dc6e25..87d6f2d2f02d 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -525,9 +525,10 @@ const struct fw_address_region fw_high_memory_region =
{ .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
EXPORT_SYMBOL(fw_high_memory_region);
-#if 0
-const struct fw_address_region fw_low_memory_region =
+static const struct fw_address_region low_memory_region =
{ .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
+
+#if 0
const struct fw_address_region fw_private_region =
{ .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
const struct fw_address_region fw_csr_region =
@@ -1198,6 +1199,23 @@ static struct fw_address_handler registers = {
.address_callback = handle_registers,
};
+static void handle_low_memory(struct fw_card *card, struct fw_request *request,
+ int tcode, int destination, int source, int generation,
+ unsigned long long offset, void *payload, size_t length,
+ void *callback_data)
+{
+ /*
+ * This catches requests not handled by the physical DMA unit,
+ * i.e., wrong transaction types or unauthorized source nodes.
+ */
+ fw_send_response(card, request, RCODE_TYPE_ERROR);
+}
+
+static struct fw_address_handler low_memory = {
+ .length = 0x000100000000ULL,
+ .address_callback = handle_low_memory,
+};
+
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
MODULE_LICENSE("GPL");
@@ -1259,6 +1277,7 @@ static int __init fw_core_init(void)
fw_core_add_address_handler(&topology_map, &topology_map_region);
fw_core_add_address_handler(&registers, &registers_region);
+ fw_core_add_address_handler(&low_memory, &low_memory_region);
fw_core_add_descriptor(&vendor_id_descriptor);
fw_core_add_descriptor(&model_id_descriptor);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index c1af05e834b6..c788dbdaf3bc 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -191,6 +191,7 @@ struct fw_ohci {
unsigned quirks;
unsigned int pri_req_max;
u32 bus_time;
+ bool bus_time_running;
bool is_root;
bool csr_state_setclear_abdicate;
int n_ir;
@@ -1726,6 +1727,13 @@ static u32 update_bus_time(struct fw_ohci *ohci)
{
u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
+ if (unlikely(!ohci->bus_time_running)) {
+ reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
+ ohci->bus_time = (lower_32_bits(get_seconds()) & ~0x7f) |
+ (cycle_time_seconds & 0x40);
+ ohci->bus_time_running = true;
+ }
+
if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
ohci->bus_time += 0x40;
@@ -2213,7 +2221,7 @@ static int ohci_enable(struct fw_card *card,
{
struct fw_ohci *ohci = fw_ohci(card);
struct pci_dev *dev = to_pci_dev(card->device);
- u32 lps, seconds, version, irqs;
+ u32 lps, version, irqs;
int i, ret;
if (software_reset(ohci)) {
@@ -2269,9 +2277,12 @@ static int ohci_enable(struct fw_card *card,
(OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
(200 << 16));
- seconds = lower_32_bits(get_seconds());
- reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25);
- ohci->bus_time = seconds & ~0x3f;
+ ohci->bus_time_running = false;
+
+ for (i = 0; i < 32; i++)
+ if (ohci->ir_context_support & (1 << i))
+ reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
+ IR_CONTEXT_MULTI_CHANNEL_MODE);
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
if (version >= OHCI_VERSION_1_1) {
@@ -2369,7 +2380,6 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_postedWriteErr |
OHCI1394_selfIDComplete |
OHCI1394_regAccessFail |
- OHCI1394_cycle64Seconds |
OHCI1394_cycleInconsistent |
OHCI1394_unrecoverableError |
OHCI1394_cycleTooLong |
@@ -2658,7 +2668,8 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
case CSR_BUS_TIME:
spin_lock_irqsave(&ohci->lock, flags);
- ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f);
+ ohci->bus_time = (update_bus_time(ohci) & 0x40) |
+ (value & ~0x7f);
spin_unlock_irqrestore(&ohci->lock, flags);
break;
@@ -3539,6 +3550,13 @@ static int __devinit pci_probe(struct pci_dev *dev,
INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
+ if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
+ pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
+ dev_err(&dev->dev, "invalid MMIO resource\n");
+ err = -ENXIO;
+ goto fail_disable;
+ }
+
err = pci_request_region(dev, 0, ohci_driver_name);
if (err) {
dev_err(&dev->dev, "MMIO resource unavailable\n");
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 153980be4ee6..b298158cb922 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -6,6 +6,7 @@
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/bootmem.h>
+#include <linux/random.h>
#include <asm/dmi.h>
/*
@@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
dmi_table(buf, dmi_len, dmi_num, decode, NULL);
+ add_device_randomness(buf, dmi_len);
+
dmi_iounmap(buf, dmi_len);
return 0;
}
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index adc07102a20d..c1cdc9236666 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -98,7 +98,7 @@ static LIST_HEAD(map_entries);
/**
* firmware_map_add_entry() - Does the real work to add a firmware memmap entry.
* @start: Start of the memory range.
- * @end: End of the memory range (inclusive).
+ * @end: End of the memory range (exclusive).
* @type: Type of the memory range.
* @entry: Pre-allocated (either kmalloc() or bootmem allocator), uninitialised
* entry.
@@ -113,7 +113,7 @@ static int firmware_map_add_entry(u64 start, u64 end,
BUG_ON(start > end);
entry->start = start;
- entry->end = end;
+ entry->end = end - 1;
entry->type = type;
INIT_LIST_HEAD(&entry->list);
kobject_init(&entry->kobj, &memmap_ktype);
@@ -148,7 +148,7 @@ static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
* firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
* memory hotplug.
* @start: Start of the memory range.
- * @end: End of the memory range (inclusive).
+ * @end: End of the memory range (exclusive)
* @type: Type of the memory range.
*
* Adds a firmware mapping entry. This function is for memory hotplug, it is
@@ -175,7 +175,7 @@ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
/**
* firmware_map_add_early() - Adds a firmware mapping entry.
* @start: Start of the memory range.
- * @end: End of the memory range (inclusive).
+ * @end: End of the memory range.
* @type: Type of the memory range.
*
* Adds a firmware mapping entry. This function uses the bootmem allocator
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 51e0e2d8fac6..a330492e06f9 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
return -ENODEV;
- pcdp = ioremap(efi.hcdp, 4096);
+ pcdp = early_ioremap(efi.hcdp, 4096);
printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
if (strstr(cmdline, "console=hcdp")) {
@@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
}
out:
- iounmap(pcdp);
+ early_iounmap(pcdp, 4096);
return rc;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 542f0c04b695..ba7926f5c099 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -253,6 +253,12 @@ config GPIO_GE_FPGA
comment "I2C GPIO expanders:"
+config GPIO_ARIZONA
+ tristate "Wolfson Microelectronics Arizona class devices"
+ depends on MFD_ARIZONA
+ help
+ Support for GPIOs on Wolfson Arizona class devices.
+
config GPIO_MAX7300
tristate "Maxim MAX7300 GPIO expander"
depends on I2C
@@ -288,7 +294,7 @@ config GPIO_MAX732X_IRQ
config GPIO_MC9S08DZ60
bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions"
- depends on I2C && MACH_MX35_3DS
+ depends on I2C=y && MACH_MX35_3DS
help
Select this to enable the MC9S08DZ60 GPIO driver
@@ -466,6 +472,18 @@ config GPIO_BT8XX
If unsure, say N.
+config GPIO_AMD8111
+ tristate "AMD 8111 GPIO driver"
+ depends on PCI
+ help
+ The AMD 8111 south bridge contains 32 GPIO pins which can be used.
+
+ Note, that usually system firmware/ACPI handles GPIO pins on their
+ own and users might easily break their systems with uncarefull usage
+ of this driver!
+
+ If unsure, say N
+
config GPIO_LANGWELL
bool "Intel Langwell/Penwell GPIO support"
depends on PCI && X86
@@ -579,6 +597,13 @@ config GPIO_AB8500
help
Select this to enable the AB8500 IC GPIO driver
+config GPIO_TPS6586X
+ bool "TPS6586X GPIO"
+ depends on MFD_TPS6586X
+ help
+ Select this option to enable GPIO driver for the TPS6586X
+ chip family.
+
config GPIO_TPS65910
bool "TPS65910 GPIO"
depends on MFD_TPS65910
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 0f55662002c3..153caceeb053 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -12,6 +12,8 @@ obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
obj-$(CONFIG_GPIO_AB8500) += gpio-ab8500.o
obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
+obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o
+obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
@@ -61,6 +63,7 @@ obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o
+obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
new file mode 100644
index 000000000000..710fafcdd1b1
--- /dev/null
+++ b/drivers/gpio/gpio-amd8111.c
@@ -0,0 +1,246 @@
+/*
+ * GPIO driver for AMD 8111 south bridges
+ *
+ * Copyright (c) 2012 Dmitry Eremin-Solenikov
+ *
+ * Based on the AMD RNG driver:
+ * Copyright 2005 (c) MontaVista Software, Inc.
+ * with the majority of the code coming from:
+ *
+ * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
+ * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
+ *
+ * derived from
+ *
+ * Hardware driver for the AMD 768 Random Number Generator (RNG)
+ * (c) Copyright 2001 Red Hat Inc
+ *
+ * derived from
+ *
+ * Hardware driver for Intel i810 Random Number Generator (RNG)
+ * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+#define PMBASE_OFFSET 0xb0
+#define PMBASE_SIZE 0x30
+
+#define AMD_REG_GPIO(i) (0x10 + (i))
+
+#define AMD_GPIO_LTCH_STS 0x40 /* Latch status, w1 */
+#define AMD_GPIO_RTIN 0x20 /* Real Time in, ro */
+#define AMD_GPIO_DEBOUNCE 0x10 /* Debounce, rw */
+#define AMD_GPIO_MODE_MASK 0x0c /* Pin Mode Select, rw */
+#define AMD_GPIO_MODE_IN 0x00
+#define AMD_GPIO_MODE_OUT 0x04
+/* Enable alternative (e.g. clkout, IRQ, etc) function of the pin */
+#define AMD_GPIO_MODE_ALTFN 0x08 /* Or 0x09 */
+#define AMD_GPIO_X_MASK 0x03 /* In/Out specific, rw */
+#define AMD_GPIO_X_IN_ACTIVEHI 0x01 /* Active High */
+#define AMD_GPIO_X_IN_LATCH 0x02 /* Latched version is selected */
+#define AMD_GPIO_X_OUT_LOW 0x00
+#define AMD_GPIO_X_OUT_HI 0x01
+#define AMD_GPIO_X_OUT_CLK0 0x02
+#define AMD_GPIO_X_OUT_CLK1 0x03
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE. We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS), 0 },
+ { 0, }, /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, pci_tbl);
+
+struct amd_gpio {
+ struct gpio_chip chip;
+ u32 pmbase;
+ void __iomem *pm;
+ struct pci_dev *pdev;
+ spinlock_t lock; /* guards hw registers and orig table */
+ u8 orig[32];
+};
+
+#define to_agp(chip) container_of(chip, struct amd_gpio, chip)
+
+static int amd_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct amd_gpio *agp = to_agp(chip);
+
+ agp->orig[offset] = ioread8(agp->pm + AMD_REG_GPIO(offset)) &
+ (AMD_GPIO_DEBOUNCE | AMD_GPIO_MODE_MASK | AMD_GPIO_X_MASK);
+
+ dev_dbg(&agp->pdev->dev, "Requested gpio %d, data %x\n", offset, agp->orig[offset]);
+
+ return 0;
+}
+
+static void amd_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct amd_gpio *agp = to_agp(chip);
+
+ dev_dbg(&agp->pdev->dev, "Freed gpio %d, data %x\n", offset, agp->orig[offset]);
+
+ iowrite8(agp->orig[offset], agp->pm + AMD_REG_GPIO(offset));
+}
+
+static void amd_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct amd_gpio *agp = to_agp(chip);
+ u8 temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&agp->lock, flags);
+ temp = ioread8(agp->pm + AMD_REG_GPIO(offset));
+ temp = (temp & AMD_GPIO_DEBOUNCE) | AMD_GPIO_MODE_OUT | (value ? AMD_GPIO_X_OUT_HI : AMD_GPIO_X_OUT_LOW);
+ iowrite8(temp, agp->pm + AMD_REG_GPIO(offset));
+ spin_unlock_irqrestore(&agp->lock, flags);
+
+ dev_dbg(&agp->pdev->dev, "Setting gpio %d, value %d, reg=%02x\n", offset, !!value, temp);
+}
+
+static int amd_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct amd_gpio *agp = to_agp(chip);
+ u8 temp;
+
+ temp = ioread8(agp->pm + AMD_REG_GPIO(offset));
+
+ dev_dbg(&agp->pdev->dev, "Getting gpio %d, reg=%02x\n", offset, temp);
+
+ return (temp & AMD_GPIO_RTIN) ? 1 : 0;
+}
+
+static int amd_gpio_dirout(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct amd_gpio *agp = to_agp(chip);
+ u8 temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&agp->lock, flags);
+ temp = ioread8(agp->pm + AMD_REG_GPIO(offset));
+ temp = (temp & AMD_GPIO_DEBOUNCE) | AMD_GPIO_MODE_OUT | (value ? AMD_GPIO_X_OUT_HI : AMD_GPIO_X_OUT_LOW);
+ iowrite8(temp, agp->pm + AMD_REG_GPIO(offset));
+ spin_unlock_irqrestore(&agp->lock, flags);
+
+ dev_dbg(&agp->pdev->dev, "Dirout gpio %d, value %d, reg=%02x\n", offset, !!value, temp);
+
+ return 0;
+}
+
+static int amd_gpio_dirin(struct gpio_chip *chip, unsigned offset)
+{
+ struct amd_gpio *agp = to_agp(chip);
+ u8 temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&agp->lock, flags);
+ temp = ioread8(agp->pm + AMD_REG_GPIO(offset));
+ temp = (temp & AMD_GPIO_DEBOUNCE) | AMD_GPIO_MODE_IN;
+ iowrite8(temp, agp->pm + AMD_REG_GPIO(offset));
+ spin_unlock_irqrestore(&agp->lock, flags);
+
+ dev_dbg(&agp->pdev->dev, "Dirin gpio %d, reg=%02x\n", offset, temp);
+
+ return 0;
+}
+
+static struct amd_gpio gp = {
+ .chip = {
+ .label = "AMD GPIO",
+ .owner = THIS_MODULE,
+ .base = -1,
+ .ngpio = 32,
+ .request = amd_gpio_request,
+ .free = amd_gpio_free,
+ .set = amd_gpio_set,
+ .get = amd_gpio_get,
+ .direction_output = amd_gpio_dirout,
+ .direction_input = amd_gpio_dirin,
+ },
+};
+
+static int __init amd_gpio_init(void)
+{
+ int err = -ENODEV;
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *ent;
+
+
+ /* We look for our device - AMD South Bridge
+ * I don't know about a system with two such bridges,
+ * so we can assume that there is max. one device.
+ *
+ * We can't use plain pci_driver mechanism,
+ * as the device is really a multiple function device,
+ * main driver that binds to the pci_device is an smbus
+ * driver and have to find & bind to the device this way.
+ */
+ for_each_pci_dev(pdev) {
+ ent = pci_match_id(pci_tbl, pdev);
+ if (ent)
+ goto found;
+ }
+ /* Device not found. */
+ goto out;
+
+found:
+ err = pci_read_config_dword(pdev, 0x58, &gp.pmbase);
+ if (err)
+ goto out;
+ err = -EIO;
+ gp.pmbase &= 0x0000FF00;
+ if (gp.pmbase == 0)
+ goto out;
+ if (!request_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE, "AMD GPIO")) {
+ dev_err(&pdev->dev, "AMD GPIO region 0x%x already in use!\n",
+ gp.pmbase + PMBASE_OFFSET);
+ err = -EBUSY;
+ goto out;
+ }
+ gp.pm = ioport_map(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+ gp.pdev = pdev;
+ gp.chip.dev = &pdev->dev;
+
+ spin_lock_init(&gp.lock);
+
+ printk(KERN_INFO "AMD-8111 GPIO detected\n");
+ err = gpiochip_add(&gp.chip);
+ if (err) {
+ printk(KERN_ERR "GPIO registering failed (%d)\n",
+ err);
+ release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+ goto out;
+ }
+out:
+ return err;
+}
+
+static void __exit amd_gpio_exit(void)
+{
+ int err = gpiochip_remove(&gp.chip);
+ WARN_ON(err);
+ ioport_unmap(gp.pm);
+ release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
+}
+
+module_init(amd_gpio_init);
+module_exit(amd_gpio_exit);
+
+MODULE_AUTHOR("The Linux Kernel team");
+MODULE_DESCRIPTION("GPIO driver for AMD chipsets");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
new file mode 100644
index 000000000000..8740d2eb06f8
--- /dev/null
+++ b/drivers/gpio/gpio-arizona.c
@@ -0,0 +1,163 @@
+/*
+ * gpiolib support for Wolfson Arizona class devices
+ *
+ * Copyright 2012 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/pdata.h>
+#include <linux/mfd/arizona/registers.h>
+
+struct arizona_gpio {
+ struct arizona *arizona;
+ struct gpio_chip gpio_chip;
+};
+
+static inline struct arizona_gpio *to_arizona_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct arizona_gpio, gpio_chip);
+}
+
+static int arizona_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+ struct arizona_gpio *arizona_gpio = to_arizona_gpio(chip);
+ struct arizona *arizona = arizona_gpio->arizona;
+
+ return regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
+ ARIZONA_GPN_DIR, ARIZONA_GPN_DIR);
+}
+
+static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct arizona_gpio *arizona_gpio = to_arizona_gpio(chip);
+ struct arizona *arizona = arizona_gpio->arizona;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(arizona->regmap, ARIZONA_GPIO1_CTRL + offset, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val & ARIZONA_GPN_LVL)
+ return 1;
+ else
+ return 0;
+}
+
+static int arizona_gpio_direction_out(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct arizona_gpio *arizona_gpio = to_arizona_gpio(chip);
+ struct arizona *arizona = arizona_gpio->arizona;
+
+ if (value)
+ value = ARIZONA_GPN_LVL;
+
+ return regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
+ ARIZONA_GPN_DIR | ARIZONA_GPN_LVL, value);
+}
+
+static void arizona_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct arizona_gpio *arizona_gpio = to_arizona_gpio(chip);
+ struct arizona *arizona = arizona_gpio->arizona;
+
+ if (value)
+ value = ARIZONA_GPN_LVL;
+
+ regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
+ ARIZONA_GPN_LVL, value);
+}
+
+static struct gpio_chip template_chip = {
+ .label = "arizona",
+ .owner = THIS_MODULE,
+ .direction_input = arizona_gpio_direction_in,
+ .get = arizona_gpio_get,
+ .direction_output = arizona_gpio_direction_out,
+ .set = arizona_gpio_set,
+ .can_sleep = 1,
+};
+
+static int __devinit arizona_gpio_probe(struct platform_device *pdev)
+{
+ struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+ struct arizona_pdata *pdata = arizona->dev->platform_data;
+ struct arizona_gpio *arizona_gpio;
+ int ret;
+
+ arizona_gpio = devm_kzalloc(&pdev->dev, sizeof(*arizona_gpio),
+ GFP_KERNEL);
+ if (arizona_gpio == NULL)
+ return -ENOMEM;
+
+ arizona_gpio->arizona = arizona;
+ arizona_gpio->gpio_chip = template_chip;
+ arizona_gpio->gpio_chip.dev = &pdev->dev;
+
+ switch (arizona->type) {
+ case WM5102:
+ case WM5110:
+ arizona_gpio->gpio_chip.ngpio = 5;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown chip variant %d\n",
+ arizona->type);
+ return -EINVAL;
+ }
+
+ if (pdata && pdata->gpio_base)
+ arizona_gpio->gpio_chip.base = pdata->gpio_base;
+ else
+ arizona_gpio->gpio_chip.base = -1;
+
+ ret = gpiochip_add(&arizona_gpio->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
+ ret);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, arizona_gpio);
+
+ return ret;
+
+err:
+ return ret;
+}
+
+static int __devexit arizona_gpio_remove(struct platform_device *pdev)
+{
+ struct arizona_gpio *arizona_gpio = platform_get_drvdata(pdev);
+
+ return gpiochip_remove(&arizona_gpio->gpio_chip);
+}
+
+static struct platform_driver arizona_gpio_driver = {
+ .driver.name = "arizona-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = arizona_gpio_probe,
+ .remove = __devexit_p(arizona_gpio_remove),
+};
+
+module_platform_driver(arizona_gpio_driver);
+
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("GPIO interface for Arizona devices");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:arizona-gpio");
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index 150d9768811d..ec48ed512628 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -247,9 +247,9 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p)
p->irq_base = irq_alloc_descs(pdata->irq_base, 0,
pdata->number_of_pins, numa_node_id());
- if (IS_ERR_VALUE(p->irq_base)) {
+ if (p->irq_base < 0) {
dev_err(&pdev->dev, "cannot get irq_desc\n");
- return -ENXIO;
+ return p->irq_base;
}
pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n",
pdata->gpio_base, pdata->number_of_pins, p->irq_base);
@@ -266,7 +266,7 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p)
return 0;
}
-static void __devexit em_gio_irq_domain_cleanup(struct em_gio_priv *p)
+static void em_gio_irq_domain_cleanup(struct em_gio_priv *p)
{
struct gpio_em_config *pdata = p->pdev->dev.platform_data;
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index a1c8754f52cf..202a99207b7d 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -339,7 +339,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
resource_size_t start, len;
struct lnw_gpio *lnw;
u32 gpio_base;
- int retval = 0;
+ int retval;
int ngpio = id->driver_data;
retval = pci_enable_device(pdev);
@@ -357,6 +357,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
base = ioremap_nocache(start, len);
if (!base) {
dev_err(&pdev->dev, "error mapping bar1\n");
+ retval = -EFAULT;
goto err3;
}
gpio_base = *((u32 *)base + 1);
@@ -381,8 +382,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
&lnw_gpio_irq_ops, lnw);
- if (!lnw->domain)
+ if (!lnw->domain) {
+ retval = -ENOMEM;
goto err3;
+ }
lnw->reg_base = base;
lnw->chip.label = dev_name(&pdev->dev);
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index c2199beca98a..8a420f13905e 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -1,5 +1,5 @@
/*
- * arch/arm/mach-lpc32xx/gpiolib.c
+ * GPIO driver for LPC32xx SoC
*
* Author: Kevin Wells <kevin.wells@nxp.com>
*
@@ -28,6 +28,7 @@
#include <mach/hardware.h>
#include <mach/platform.h>
#include <mach/gpio-lpc32xx.h>
+#include <mach/irqs.h>
#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
@@ -367,6 +368,66 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
return -EINVAL;
}
+static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
+{
+ return IRQ_LPC32XX_P0_P1_IRQ;
+}
+
+static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
+ IRQ_LPC32XX_GPIO_00,
+ IRQ_LPC32XX_GPIO_01,
+ IRQ_LPC32XX_GPIO_02,
+ IRQ_LPC32XX_GPIO_03,
+ IRQ_LPC32XX_GPIO_04,
+ IRQ_LPC32XX_GPIO_05,
+};
+
+static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
+ return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
+ return -ENXIO;
+}
+
+static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
+ IRQ_LPC32XX_GPI_00,
+ IRQ_LPC32XX_GPI_01,
+ IRQ_LPC32XX_GPI_02,
+ IRQ_LPC32XX_GPI_03,
+ IRQ_LPC32XX_GPI_04,
+ IRQ_LPC32XX_GPI_05,
+ IRQ_LPC32XX_GPI_06,
+ IRQ_LPC32XX_GPI_07,
+ IRQ_LPC32XX_GPI_08,
+ IRQ_LPC32XX_GPI_09,
+ -ENXIO, /* 10 */
+ -ENXIO, /* 11 */
+ -ENXIO, /* 12 */
+ -ENXIO, /* 13 */
+ -ENXIO, /* 14 */
+ -ENXIO, /* 15 */
+ -ENXIO, /* 16 */
+ -ENXIO, /* 17 */
+ -ENXIO, /* 18 */
+ IRQ_LPC32XX_GPI_19,
+ -ENXIO, /* 20 */
+ -ENXIO, /* 21 */
+ -ENXIO, /* 22 */
+ -ENXIO, /* 23 */
+ -ENXIO, /* 24 */
+ -ENXIO, /* 25 */
+ -ENXIO, /* 26 */
+ -ENXIO, /* 27 */
+ IRQ_LPC32XX_GPI_28,
+};
+
+static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
+ return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
+ return -ENXIO;
+}
+
static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
{
.chip = {
@@ -376,6 +437,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_output = lpc32xx_gpio_dir_output_p012,
.set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
+ .to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P0_GRP,
.ngpio = LPC32XX_GPIO_P0_MAX,
.names = gpio_p0_names,
@@ -391,6 +453,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_output = lpc32xx_gpio_dir_output_p012,
.set = lpc32xx_gpio_set_value_p012,
.request = lpc32xx_gpio_request,
+ .to_irq = lpc32xx_gpio_to_irq_p01,
.base = LPC32XX_GPIO_P1_GRP,
.ngpio = LPC32XX_GPIO_P1_MAX,
.names = gpio_p1_names,
@@ -421,6 +484,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_output = lpc32xx_gpio_dir_output_p3,
.set = lpc32xx_gpio_set_value_p3,
.request = lpc32xx_gpio_request,
+ .to_irq = lpc32xx_gpio_to_irq_gpio_p3,
.base = LPC32XX_GPIO_P3_GRP,
.ngpio = LPC32XX_GPIO_P3_MAX,
.names = gpio_p3_names,
@@ -434,6 +498,7 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
.direction_input = lpc32xx_gpio_dir_in_always,
.get = lpc32xx_gpi_get_value,
.request = lpc32xx_gpio_request,
+ .to_irq = lpc32xx_gpio_to_irq_gpi_p3,
.base = LPC32XX_GPI_P3_GRP,
.ngpio = LPC32XX_GPI_P3_MAX,
.names = gpi_p3_names,
@@ -457,13 +522,6 @@ static struct lpc32xx_gpio_chip lpc32xx_gpiochip[] = {
},
};
-/* Empty now, can be removed later when mach-lpc32xx is finally switched over
- * to DT support
- */
-void __init lpc32xx_gpio_init(void)
-{
-}
-
static int lpc32xx_of_xlate(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags)
{
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 71a838f44501..b38986285868 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -99,7 +99,7 @@ static int msic_gpio_to_oreg(unsigned offset)
if (offset < 20)
return INTEL_MSIC_GPIO0HV0CTLO - offset + 16;
- return INTEL_MSIC_GPIO1HV0CTLO + offset + 20;
+ return INTEL_MSIC_GPIO1HV0CTLO - offset + 20;
}
static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 04691d3abe60..80f44bb64a87 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -37,7 +37,8 @@
enum mxc_gpio_hwtype {
IMX1_GPIO, /* runs on i.mx1 */
IMX21_GPIO, /* runs on i.mx21 and i.mx27 */
- IMX31_GPIO, /* runs on all other i.mx */
+ IMX31_GPIO, /* runs on i.mx31 */
+ IMX35_GPIO, /* runs on all other i.mx */
};
/* device type dependent stuff */
@@ -49,6 +50,7 @@ struct mxc_gpio_hwdata {
unsigned icr2_reg;
unsigned imr_reg;
unsigned isr_reg;
+ int edge_sel_reg;
unsigned low_level;
unsigned high_level;
unsigned rise_edge;
@@ -73,6 +75,7 @@ static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = {
.icr2_reg = 0x2c,
.imr_reg = 0x30,
.isr_reg = 0x34,
+ .edge_sel_reg = -EINVAL,
.low_level = 0x03,
.high_level = 0x02,
.rise_edge = 0x00,
@@ -87,6 +90,22 @@ static struct mxc_gpio_hwdata imx31_gpio_hwdata = {
.icr2_reg = 0x10,
.imr_reg = 0x14,
.isr_reg = 0x18,
+ .edge_sel_reg = -EINVAL,
+ .low_level = 0x00,
+ .high_level = 0x01,
+ .rise_edge = 0x02,
+ .fall_edge = 0x03,
+};
+
+static struct mxc_gpio_hwdata imx35_gpio_hwdata = {
+ .dr_reg = 0x00,
+ .gdir_reg = 0x04,
+ .psr_reg = 0x08,
+ .icr1_reg = 0x0c,
+ .icr2_reg = 0x10,
+ .imr_reg = 0x14,
+ .isr_reg = 0x18,
+ .edge_sel_reg = 0x1c,
.low_level = 0x00,
.high_level = 0x01,
.rise_edge = 0x02,
@@ -103,12 +122,13 @@ static struct mxc_gpio_hwdata *mxc_gpio_hwdata;
#define GPIO_ICR2 (mxc_gpio_hwdata->icr2_reg)
#define GPIO_IMR (mxc_gpio_hwdata->imr_reg)
#define GPIO_ISR (mxc_gpio_hwdata->isr_reg)
+#define GPIO_EDGE_SEL (mxc_gpio_hwdata->edge_sel_reg)
#define GPIO_INT_LOW_LEV (mxc_gpio_hwdata->low_level)
#define GPIO_INT_HIGH_LEV (mxc_gpio_hwdata->high_level)
#define GPIO_INT_RISE_EDGE (mxc_gpio_hwdata->rise_edge)
#define GPIO_INT_FALL_EDGE (mxc_gpio_hwdata->fall_edge)
-#define GPIO_INT_NONE 0x4
+#define GPIO_INT_BOTH_EDGES 0x4
static struct platform_device_id mxc_gpio_devtype[] = {
{
@@ -121,6 +141,9 @@ static struct platform_device_id mxc_gpio_devtype[] = {
.name = "imx31-gpio",
.driver_data = IMX31_GPIO,
}, {
+ .name = "imx35-gpio",
+ .driver_data = IMX35_GPIO,
+ }, {
/* sentinel */
}
};
@@ -129,6 +152,7 @@ static const struct of_device_id mxc_gpio_dt_ids[] = {
{ .compatible = "fsl,imx1-gpio", .data = &mxc_gpio_devtype[IMX1_GPIO], },
{ .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], },
{ .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], },
+ { .compatible = "fsl,imx35-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
{ /* sentinel */ }
};
@@ -160,15 +184,19 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
edge = GPIO_INT_FALL_EDGE;
break;
case IRQ_TYPE_EDGE_BOTH:
- val = gpio_get_value(gpio);
- if (val) {
- edge = GPIO_INT_LOW_LEV;
- pr_debug("mxc: set GPIO %d to low trigger\n", gpio);
+ if (GPIO_EDGE_SEL >= 0) {
+ edge = GPIO_INT_BOTH_EDGES;
} else {
- edge = GPIO_INT_HIGH_LEV;
- pr_debug("mxc: set GPIO %d to high trigger\n", gpio);
+ val = gpio_get_value(gpio);
+ if (val) {
+ edge = GPIO_INT_LOW_LEV;
+ pr_debug("mxc: set GPIO %d to low trigger\n", gpio);
+ } else {
+ edge = GPIO_INT_HIGH_LEV;
+ pr_debug("mxc: set GPIO %d to high trigger\n", gpio);
+ }
+ port->both_edges |= 1 << gpio_idx;
}
- port->both_edges |= 1 << gpio_idx;
break;
case IRQ_TYPE_LEVEL_LOW:
edge = GPIO_INT_LOW_LEV;
@@ -180,10 +208,23 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
return -EINVAL;
}
- reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* ICR1 or ICR2 */
- bit = gpio_idx & 0xf;
- val = readl(reg) & ~(0x3 << (bit << 1));
- writel(val | (edge << (bit << 1)), reg);
+ if (GPIO_EDGE_SEL >= 0) {
+ val = readl(port->base + GPIO_EDGE_SEL);
+ if (edge == GPIO_INT_BOTH_EDGES)
+ writel(val | (1 << gpio_idx),
+ port->base + GPIO_EDGE_SEL);
+ else
+ writel(val & ~(1 << gpio_idx),
+ port->base + GPIO_EDGE_SEL);
+ }
+
+ if (edge != GPIO_INT_BOTH_EDGES) {
+ reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* lower or upper register */
+ bit = gpio_idx & 0xf;
+ val = readl(reg) & ~(0x3 << (bit << 1));
+ writel(val | (edge << (bit << 1)), reg);
+ }
+
writel(1 << gpio_idx, port->base + GPIO_ISR);
return 0;
@@ -335,7 +376,9 @@ static void __devinit mxc_gpio_get_hw(struct platform_device *pdev)
return;
}
- if (hwtype == IMX31_GPIO)
+ if (hwtype == IMX35_GPIO)
+ mxc_gpio_hwdata = &imx35_gpio_hwdata;
+ else if (hwtype == IMX31_GPIO)
mxc_gpio_hwdata = &imx31_gpio_hwdata;
else
mxc_gpio_hwdata = &imx1_imx21_gpio_hwdata;
@@ -422,9 +465,8 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
goto out_iounmap;
port->bgc.gc.to_irq = mxc_gpio_to_irq;
- port->bgc.gc.base = pdev->id * 32;
- port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir);
- port->bgc.data = port->bgc.read_reg(port->bgc.reg_set);
+ port->bgc.gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 :
+ pdev->id * 32;
err = gpiochip_add(&port->bgc.gc);
if (err)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 4fbc208c32cf..e6efd77668f0 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -899,12 +899,6 @@ static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
bank = container_of(chip, struct gpio_bank, chip);
- if (!bank->dbck) {
- bank->dbck = clk_get(bank->dev, "dbclk");
- if (IS_ERR(bank->dbck))
- dev_err(bank->dev, "Could not get gpio dbck\n");
- }
-
spin_lock_irqsave(&bank->lock, flags);
_set_gpio_debounce(bank, offset, debounce);
spin_unlock_irqrestore(&bank->lock, flags);
@@ -976,6 +970,10 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
/* Initialize interface clk ungated, module enabled */
if (bank->regs->ctrl)
__raw_writel(0, base + bank->regs->ctrl);
+
+ bank->dbck = clk_get(bank->dev, "dbclk");
+ if (IS_ERR(bank->dbck))
+ dev_err(bank->dev, "Could not get gpio dbck\n");
}
static __devinit void
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 1c313c710be3..9c693ae17956 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -78,10 +78,10 @@ struct pca953x_chip {
#ifdef CONFIG_GPIO_PCA953X_IRQ
struct mutex irq_lock;
- uint16_t irq_mask;
- uint16_t irq_stat;
- uint16_t irq_trig_raise;
- uint16_t irq_trig_fall;
+ u32 irq_mask;
+ u32 irq_stat;
+ u32 irq_trig_raise;
+ u32 irq_trig_fall;
int irq_base;
#endif
@@ -98,12 +98,11 @@ static int pca953x_write_reg(struct pca953x_chip *chip, int reg, u32 val)
if (chip->gpio_chip.ngpio <= 8)
ret = i2c_smbus_write_byte_data(chip->client, reg, val);
else if (chip->gpio_chip.ngpio == 24) {
- ret = i2c_smbus_write_word_data(chip->client,
+ cpu_to_le32s(&val);
+ ret = i2c_smbus_write_i2c_block_data(chip->client,
(reg << 2) | REG_ADDR_AI,
- val & 0xffff);
- ret = i2c_smbus_write_byte_data(chip->client,
- (reg << 2) + 2,
- (val & 0xff0000) >> 16);
+ 3,
+ (u8 *) &val);
}
else {
switch (chip->chip_type) {
@@ -135,22 +134,27 @@ static int pca953x_read_reg(struct pca953x_chip *chip, int reg, u32 *val)
{
int ret;
- if (chip->gpio_chip.ngpio <= 8)
+ if (chip->gpio_chip.ngpio <= 8) {
ret = i2c_smbus_read_byte_data(chip->client, reg);
- else if (chip->gpio_chip.ngpio == 24) {
- ret = i2c_smbus_read_word_data(chip->client, reg << 2);
- ret |= (i2c_smbus_read_byte_data(chip->client,
- (reg << 2) + 2)<<16);
+ *val = ret;
}
- else
+ else if (chip->gpio_chip.ngpio == 24) {
+ *val = 0;
+ ret = i2c_smbus_read_i2c_block_data(chip->client,
+ (reg << 2) | REG_ADDR_AI,
+ 3,
+ (u8 *) val);
+ le32_to_cpus(val);
+ } else {
ret = i2c_smbus_read_word_data(chip->client, reg << 1);
+ *val = ret;
+ }
if (ret < 0) {
dev_err(&chip->client->dev, "failed reading register\n");
return ret;
}
- *val = (u32)ret;
return 0;
}
@@ -349,8 +353,8 @@ static void pca953x_irq_bus_lock(struct irq_data *d)
static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- uint16_t new_irqs;
- uint16_t level;
+ u32 new_irqs;
+ u32 level;
/* Look for any newly setup interrupt */
new_irqs = chip->irq_trig_fall | chip->irq_trig_raise;
@@ -368,8 +372,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
{
struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
- uint16_t level = d->irq - chip->irq_base;
- uint16_t mask = 1 << level;
+ u32 level = d->irq - chip->irq_base;
+ u32 mask = 1 << level;
if (!(type & IRQ_TYPE_EDGE_BOTH)) {
dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
@@ -399,12 +403,12 @@ static struct irq_chip pca953x_irq_chip = {
.irq_set_type = pca953x_irq_set_type,
};
-static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
+static u32 pca953x_irq_pending(struct pca953x_chip *chip)
{
u32 cur_stat;
- uint16_t old_stat;
- uint16_t pending;
- uint16_t trigger;
+ u32 old_stat;
+ u32 pending;
+ u32 trigger;
int ret, offset = 0;
switch (chip->chip_type) {
@@ -440,8 +444,8 @@ static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
static irqreturn_t pca953x_irq_handler(int irq, void *devid)
{
struct pca953x_chip *chip = devid;
- uint16_t pending;
- uint16_t level;
+ u32 pending;
+ u32 level;
pending = pca953x_irq_pending(chip);
@@ -564,7 +568,7 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
* WARNING: This is DEPRECATED and will be removed eventually!
*/
static void
-pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
+pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, u32 *invert)
{
struct device_node *node;
const __be32 *val;
@@ -592,13 +596,13 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
}
#else
static void
-pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
+pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, u32 *invert)
{
*gpio_base = -1;
}
#endif
-static int __devinit device_pca953x_init(struct pca953x_chip *chip, int invert)
+static int __devinit device_pca953x_init(struct pca953x_chip *chip, u32 invert)
{
int ret;
@@ -617,7 +621,7 @@ out:
return ret;
}
-static int __devinit device_pca957x_init(struct pca953x_chip *chip, int invert)
+static int __devinit device_pca957x_init(struct pca953x_chip *chip, u32 invert)
{
int ret;
u32 val = 0;
@@ -653,8 +657,9 @@ static int __devinit pca953x_probe(struct i2c_client *client,
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
- int irq_base=0, invert=0;
+ int irq_base = 0;
int ret;
+ u32 invert = 0;
chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
if (chip == NULL)
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 2d1de9e7e9bd..076e236d0da7 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -61,61 +61,28 @@ struct pcf857x {
struct i2c_client *client;
struct mutex lock; /* protect 'out' */
unsigned out; /* software latch */
+
+ int (*write)(struct i2c_client *client, unsigned data);
+ int (*read)(struct i2c_client *client);
};
/*-------------------------------------------------------------------------*/
/* Talk to 8-bit I/O expander */
-static int pcf857x_input8(struct gpio_chip *chip, unsigned offset)
-{
- struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
- int status;
-
- mutex_lock(&gpio->lock);
- gpio->out |= (1 << offset);
- status = i2c_smbus_write_byte(gpio->client, gpio->out);
- mutex_unlock(&gpio->lock);
-
- return status;
-}
-
-static int pcf857x_get8(struct gpio_chip *chip, unsigned offset)
-{
- struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
- s32 value;
-
- value = i2c_smbus_read_byte(gpio->client);
- return (value < 0) ? 0 : (value & (1 << offset));
-}
-
-static int pcf857x_output8(struct gpio_chip *chip, unsigned offset, int value)
+static int i2c_write_le8(struct i2c_client *client, unsigned data)
{
- struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
- unsigned bit = 1 << offset;
- int status;
-
- mutex_lock(&gpio->lock);
- if (value)
- gpio->out |= bit;
- else
- gpio->out &= ~bit;
- status = i2c_smbus_write_byte(gpio->client, gpio->out);
- mutex_unlock(&gpio->lock);
-
- return status;
+ return i2c_smbus_write_byte(client, data);
}
-static void pcf857x_set8(struct gpio_chip *chip, unsigned offset, int value)
+static int i2c_read_le8(struct i2c_client *client)
{
- pcf857x_output8(chip, offset, value);
+ return (int)i2c_smbus_read_byte(client);
}
-/*-------------------------------------------------------------------------*/
-
/* Talk to 16-bit I/O expander */
-static int i2c_write_le16(struct i2c_client *client, u16 word)
+static int i2c_write_le16(struct i2c_client *client, unsigned word)
{
u8 buf[2] = { word & 0xff, word >> 8, };
int status;
@@ -135,29 +102,31 @@ static int i2c_read_le16(struct i2c_client *client)
return (buf[1] << 8) | buf[0];
}
-static int pcf857x_input16(struct gpio_chip *chip, unsigned offset)
+/*-------------------------------------------------------------------------*/
+
+static int pcf857x_input(struct gpio_chip *chip, unsigned offset)
{
struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
int status;
mutex_lock(&gpio->lock);
gpio->out |= (1 << offset);
- status = i2c_write_le16(gpio->client, gpio->out);
+ status = gpio->write(gpio->client, gpio->out);
mutex_unlock(&gpio->lock);
return status;
}
-static int pcf857x_get16(struct gpio_chip *chip, unsigned offset)
+static int pcf857x_get(struct gpio_chip *chip, unsigned offset)
{
struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
int value;
- value = i2c_read_le16(gpio->client);
+ value = gpio->read(gpio->client);
return (value < 0) ? 0 : (value & (1 << offset));
}
-static int pcf857x_output16(struct gpio_chip *chip, unsigned offset, int value)
+static int pcf857x_output(struct gpio_chip *chip, unsigned offset, int value)
{
struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
unsigned bit = 1 << offset;
@@ -168,15 +137,15 @@ static int pcf857x_output16(struct gpio_chip *chip, unsigned offset, int value)
gpio->out |= bit;
else
gpio->out &= ~bit;
- status = i2c_write_le16(gpio->client, gpio->out);
+ status = gpio->write(gpio->client, gpio->out);
mutex_unlock(&gpio->lock);
return status;
}
-static void pcf857x_set16(struct gpio_chip *chip, unsigned offset, int value)
+static void pcf857x_set(struct gpio_chip *chip, unsigned offset, int value)
{
- pcf857x_output16(chip, offset, value);
+ pcf857x_output(chip, offset, value);
}
/*-------------------------------------------------------------------------*/
@@ -200,10 +169,15 @@ static int pcf857x_probe(struct i2c_client *client,
mutex_init(&gpio->lock);
- gpio->chip.base = pdata ? pdata->gpio_base : -1;
- gpio->chip.can_sleep = 1;
- gpio->chip.dev = &client->dev;
- gpio->chip.owner = THIS_MODULE;
+ gpio->chip.base = pdata ? pdata->gpio_base : -1;
+ gpio->chip.can_sleep = 1;
+ gpio->chip.dev = &client->dev;
+ gpio->chip.owner = THIS_MODULE;
+ gpio->chip.get = pcf857x_get;
+ gpio->chip.set = pcf857x_set;
+ gpio->chip.direction_input = pcf857x_input;
+ gpio->chip.direction_output = pcf857x_output;
+ gpio->chip.ngpio = id->driver_data;
/* NOTE: the OnSemi jlc1562b is also largely compatible with
* these parts, notably for output. It has a low-resolution
@@ -216,12 +190,9 @@ static int pcf857x_probe(struct i2c_client *client,
*
* NOTE: we don't distinguish here between *4 and *4a parts.
*/
- gpio->chip.ngpio = id->driver_data;
if (gpio->chip.ngpio == 8) {
- gpio->chip.direction_input = pcf857x_input8;
- gpio->chip.get = pcf857x_get8;
- gpio->chip.direction_output = pcf857x_output8;
- gpio->chip.set = pcf857x_set8;
+ gpio->write = i2c_write_le8;
+ gpio->read = i2c_read_le8;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE))
@@ -238,10 +209,8 @@ static int pcf857x_probe(struct i2c_client *client,
* NOTE: we don't distinguish here between '75 and '75c parts.
*/
} else if (gpio->chip.ngpio == 16) {
- gpio->chip.direction_input = pcf857x_input16;
- gpio->chip.get = pcf857x_get16;
- gpio->chip.direction_output = pcf857x_output16;
- gpio->chip.set = pcf857x_set16;
+ gpio->write = i2c_write_le16;
+ gpio->read = i2c_read_le16;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
status = -EIO;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 58a6a63a6ece..9cac88a65f78 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -62,6 +62,7 @@ int pxa_last_gpio;
#ifdef CONFIG_OF
static struct irq_domain *domain;
+static struct device_node *pxa_gpio_of_node;
#endif
struct pxa_gpio_chip {
@@ -277,6 +278,24 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
(value ? GPSR_OFFSET : GPCR_OFFSET));
}
+#ifdef CONFIG_OF_GPIO
+static int pxa_gpio_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ if (gpiospec->args[0] > pxa_last_gpio)
+ return -EINVAL;
+
+ if (gc != &pxa_gpio_chips[gpiospec->args[0] / 32].chip)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[1];
+
+ return gpiospec->args[0] % 32;
+}
+#endif
+
static int __devinit pxa_init_gpio_chip(int gpio_end,
int (*set_wake)(unsigned int, unsigned int))
{
@@ -304,6 +323,11 @@ static int __devinit pxa_init_gpio_chip(int gpio_end,
c->get = pxa_gpio_get;
c->set = pxa_gpio_set;
c->to_irq = pxa_gpio_to_irq;
+#ifdef CONFIG_OF_GPIO
+ c->of_node = pxa_gpio_of_node;
+ c->of_xlate = pxa_gpio_of_xlate;
+ c->of_gpio_n_cells = 2;
+#endif
/* number of GPIOs on last bank may be less than 32 */
c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
@@ -488,6 +512,7 @@ static int pxa_gpio_nums(void)
return count;
}
+#ifdef CONFIG_OF
static struct of_device_id pxa_gpio_dt_ids[] = {
{ .compatible = "mrvl,pxa-gpio" },
{ .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO },
@@ -505,9 +530,9 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq,
const struct irq_domain_ops pxa_irq_domain_ops = {
.map = pxa_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
};
-#ifdef CONFIG_OF
static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev)
{
int ret, nr_banks, nr_gpios, irq_base;
@@ -545,6 +570,7 @@ static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev)
}
domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0,
&pxa_irq_domain_ops, NULL);
+ pxa_gpio_of_node = np;
return 0;
err:
iounmap(gpio_reg_base);
@@ -653,7 +679,7 @@ static struct platform_driver pxa_gpio_driver = {
.probe = pxa_gpio_probe,
.driver = {
.name = "pxa-gpio",
- .of_match_table = pxa_gpio_dt_ids,
+ .of_match_table = of_match_ptr(pxa_gpio_dt_ids),
},
};
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index e97016af6443..b62d443e9a59 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -170,6 +170,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
rdc321x_gpio_dev->reg2_data_base = r->start + 0x4;
rdc321x_gpio_dev->chip.label = "rdc321x-gpio";
+ rdc321x_gpio_dev->chip.owner = THIS_MODULE;
rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index b6453d0e44ad..ba126cc04073 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2454,12 +2454,6 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
},
}, {
.chip = {
- .base = EXYNOS5_GPC4(0),
- .ngpio = EXYNOS5_GPIO_C4_NR,
- .label = "GPC4",
- },
- }, {
- .chip = {
.base = EXYNOS5_GPD0(0),
.ngpio = EXYNOS5_GPIO_D0_NR,
.label = "GPD0",
@@ -2513,6 +2507,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
.label = "GPY6",
},
}, {
+ .chip = {
+ .base = EXYNOS5_GPC4(0),
+ .ngpio = EXYNOS5_GPIO_C4_NR,
+ .label = "GPC4",
+ },
+ }, {
.config = &samsung_gpio_cfgs[9],
.irq_base = IRQ_EINT(0),
.chip = {
@@ -2681,11 +2681,14 @@ static int exynos_gpio_xlate(struct gpio_chip *gc,
if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(gpiospec->args[1])))
pr_warn("gpio_xlate: failed to set pin function\n");
- if (s3c_gpio_setpull(pin, gpiospec->args[2]))
+ if (s3c_gpio_setpull(pin, gpiospec->args[2] & 0xffff))
pr_warn("gpio_xlate: failed to set pin pull up/down\n");
if (s5p_gpio_set_drvstr(pin, gpiospec->args[3]))
pr_warn("gpio_xlate: failed to set pin drive strength\n");
+ if (flags)
+ *flags = gpiospec->args[2] >> 16;
+
return gpiospec->args[0];
}
@@ -2833,7 +2836,7 @@ static __init void exynos5_gpiolib_init(void)
}
/* need to set base address for gpc4 */
- exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
+ exynos5_gpios_1[20].base = gpio_base1 + 0x2E0;
/* need to set base address for gpx */
chip = &exynos5_gpios_1[21];
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 424dce8e3f30..8707d4572a06 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -241,7 +241,8 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev)
break;
default:
- return -ENODEV;
+ err = -ENODEV;
+ goto err_sch_gpio_core;
}
sch_gpio_core.dev = &pdev->dev;
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
new file mode 100644
index 000000000000..2526b3bb0fae
--- /dev/null
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -0,0 +1,158 @@
+/*
+ * TI TPS6586x GPIO driver
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Author: Laxman dewangan <ldewangan@nvidia.com>
+ *
+ * Based on tps6586x.c
+ * Copyright (c) 2010 CompuLab Ltd.
+ * Mike Rapoport <mike@compulab.co.il>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/tps6586x.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+/* GPIO control registers */
+#define TPS6586X_GPIOSET1 0x5d
+#define TPS6586X_GPIOSET2 0x5e
+
+struct tps6586x_gpio {
+ struct gpio_chip gpio_chip;
+ struct device *parent;
+};
+
+static inline struct tps6586x_gpio *to_tps6586x_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct tps6586x_gpio, gpio_chip);
+}
+
+static int tps6586x_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct tps6586x_gpio *tps6586x_gpio = to_tps6586x_gpio(gc);
+ uint8_t val;
+ int ret;
+
+ ret = tps6586x_read(tps6586x_gpio->parent, TPS6586X_GPIOSET2, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & (1 << offset));
+}
+
+static void tps6586x_gpio_set(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps6586x_gpio *tps6586x_gpio = to_tps6586x_gpio(gc);
+
+ tps6586x_update(tps6586x_gpio->parent, TPS6586X_GPIOSET2,
+ value << offset, 1 << offset);
+}
+
+static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ struct tps6586x_gpio *tps6586x_gpio = to_tps6586x_gpio(gc);
+ uint8_t val, mask;
+
+ tps6586x_gpio_set(gc, offset, value);
+
+ val = 0x1 << (offset * 2);
+ mask = 0x3 << (offset * 2);
+
+ return tps6586x_update(tps6586x_gpio->parent, TPS6586X_GPIOSET1,
+ val, mask);
+}
+
+static int __devinit tps6586x_gpio_probe(struct platform_device *pdev)
+{
+ struct tps6586x_platform_data *pdata;
+ struct tps6586x_gpio *tps6586x_gpio;
+ int ret;
+
+ pdata = dev_get_platdata(pdev->dev.parent);
+ tps6586x_gpio = devm_kzalloc(&pdev->dev,
+ sizeof(*tps6586x_gpio), GFP_KERNEL);
+ if (!tps6586x_gpio) {
+ dev_err(&pdev->dev, "Could not allocate tps6586x_gpio\n");
+ return -ENOMEM;
+ }
+
+ tps6586x_gpio->parent = pdev->dev.parent;
+
+ tps6586x_gpio->gpio_chip.owner = THIS_MODULE;
+ tps6586x_gpio->gpio_chip.label = pdev->name;
+ tps6586x_gpio->gpio_chip.dev = &pdev->dev;
+ tps6586x_gpio->gpio_chip.ngpio = 4;
+ tps6586x_gpio->gpio_chip.can_sleep = 1;
+
+ /* FIXME: add handling of GPIOs as dedicated inputs */
+ tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output;
+ tps6586x_gpio->gpio_chip.set = tps6586x_gpio_set;
+ tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get;
+
+#ifdef CONFIG_OF_GPIO
+ tps6586x_gpio->gpio_chip.of_node = pdev->dev.parent->of_node;
+#endif
+ if (pdata && pdata->gpio_base)
+ tps6586x_gpio->gpio_chip.base = pdata->gpio_base;
+ else
+ tps6586x_gpio->gpio_chip.base = -1;
+
+ ret = gpiochip_add(&tps6586x_gpio->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, tps6586x_gpio);
+
+ return ret;
+}
+
+static int __devexit tps6586x_gpio_remove(struct platform_device *pdev)
+{
+ struct tps6586x_gpio *tps6586x_gpio = platform_get_drvdata(pdev);
+
+ return gpiochip_remove(&tps6586x_gpio->gpio_chip);
+}
+
+static struct platform_driver tps6586x_gpio_driver = {
+ .driver.name = "tps6586x-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = tps6586x_gpio_probe,
+ .remove = __devexit_p(tps6586x_gpio_remove),
+};
+
+static int __init tps6586x_gpio_init(void)
+{
+ return platform_driver_register(&tps6586x_gpio_driver);
+}
+subsys_initcall(tps6586x_gpio_init);
+
+static void __exit tps6586x_gpio_exit(void)
+{
+ platform_driver_unregister(&tps6586x_gpio_driver);
+}
+module_exit(tps6586x_gpio_exit);
+
+MODULE_ALIAS("platform:tps6586x-gpio");
+MODULE_DESCRIPTION("GPIO interface for TPS6586X PMIC");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index aa61ad2fcaaa..1c764e779d80 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -19,6 +19,7 @@
#include <linux/mfd/core.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
+#include <linux/regmap.h>
#include <linux/mfd/wm8994/core.h>
#include <linux/mfd/wm8994/pdata.h>
@@ -112,10 +113,7 @@ static int wm8994_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
- if (!wm8994->irq_base)
- return -EINVAL;
-
- return wm8994->irq_base + offset;
+ return regmap_irq_get_virq(wm8994->irq_data, offset);
}
@@ -254,7 +252,8 @@ static int __devinit wm8994_gpio_probe(struct platform_device *pdev)
struct wm8994_gpio *wm8994_gpio;
int ret;
- wm8994_gpio = kzalloc(sizeof(*wm8994_gpio), GFP_KERNEL);
+ wm8994_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm8994_gpio),
+ GFP_KERNEL);
if (wm8994_gpio == NULL)
return -ENOMEM;
@@ -279,20 +278,14 @@ static int __devinit wm8994_gpio_probe(struct platform_device *pdev)
return ret;
err:
- kfree(wm8994_gpio);
return ret;
}
static int __devexit wm8994_gpio_remove(struct platform_device *pdev)
{
struct wm8994_gpio *wm8994_gpio = platform_get_drvdata(pdev);
- int ret;
- ret = gpiochip_remove(&wm8994_gpio->gpio_chip);
- if (ret == 0)
- kfree(wm8994_gpio);
-
- return ret;
+ return gpiochip_remove(&wm8994_gpio->gpio_chip);
}
static struct platform_driver wm8994_gpio_driver = {
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index d18068a9f3ec..f1a45997aea8 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -21,7 +21,7 @@
#include <linux/of_gpio.h>
#include <linux/slab.h>
-/* Private data structure for of_gpiochip_is_match */
+/* Private data structure for of_gpiochip_find_and_xlate */
struct gg_data {
enum of_gpio_flags *flags;
struct of_phandle_args gpiospec;
@@ -62,7 +62,10 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
int of_get_named_gpio_flags(struct device_node *np, const char *propname,
int index, enum of_gpio_flags *flags)
{
- struct gg_data gg_data = { .flags = flags, .out_gpio = -ENODEV };
+ /* Return -EPROBE_DEFER to support probe() functions to be called
+ * later when the GPIO actually becomes available
+ */
+ struct gg_data gg_data = { .flags = flags, .out_gpio = -EPROBE_DEFER };
int ret;
/* .of_xlate might decide to not fill in the flags, so clear it. */
@@ -73,13 +76,13 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
&gg_data.gpiospec);
if (ret) {
pr_debug("%s: can't parse gpios property\n", __func__);
- return -EINVAL;
+ return ret;
}
gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
of_node_put(gg_data.gpiospec.np);
- pr_debug("%s exited with status %d\n", __func__, ret);
+ pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio);
return gg_data.out_gpio;
}
EXPORT_SYMBOL(of_get_named_gpio_flags);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 120b2a0e3167..de0213c9d11c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1186,7 +1186,7 @@ int gpio_request(unsigned gpio, const char *label)
{
struct gpio_desc *desc;
struct gpio_chip *chip;
- int status = -EINVAL;
+ int status = -EPROBE_DEFER;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 23120c00a881..90e28081712d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -22,6 +22,7 @@ menuconfig DRM
config DRM_USB
tristate
depends on DRM
+ depends on USB_ARCH_HAS_HCD
select USB
config DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 65f9d231af14..7282c081fb53 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -460,8 +460,8 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
}
static bool ast_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
@@ -680,7 +680,7 @@ static void ast_encoder_dpms(struct drm_encoder *encoder, int mode)
}
static bool ast_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 100f6308c509..a44d31aa4e3c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -97,7 +97,7 @@ static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
* to just pass that straight through, so this does nothing
*/
static bool cirrus_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
@@ -429,8 +429,8 @@ void cirrus_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
static bool cirrus_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 348b367debeb..b356c719f2f1 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -641,8 +641,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
/* Make sure buffers are located in AGP memory that we own */
valid = 0;
@@ -704,7 +702,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
@@ -796,13 +793,11 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
order = drm_order(request->size);
size = 1 << order;
- DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
- request->count, request->size, size, order, dev->queue_count);
+ DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
+ request->count, request->size, size, order);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
@@ -904,7 +899,6 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
@@ -1019,8 +1013,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
spin_lock(&dev->count_lock);
if (dev->buf_use) {
@@ -1071,7 +1063,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
@@ -1177,8 +1168,6 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
- if (dev->queue_count)
- return -EBUSY; /* Not while in use */
spin_lock(&dev->count_lock);
if (dev->buf_use) {
@@ -1228,7 +1217,6 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
- init_waitqueue_head(&buf->dma_wait);
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 08a7aa722d6b..6fbfc244748f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- if (!req->flags)
+ if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
mutex_lock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 1c7a1c0d3edd..70b13fc19396 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -46,7 +46,6 @@ static struct drm_info_list drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
- {"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index cfb4e333ec0f..08f5e5309b22 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -120,11 +120,6 @@ void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
buf->pending = 0;
buf->file_priv = NULL;
buf->used = 0;
-
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
- && waitqueue_active(&buf->dma_wait)) {
- wake_up_interruptible(&buf->dma_wait);
- }
}
/**
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8a9d0792e4ec..9238de4009fa 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -182,7 +182,6 @@ static struct drm_ioctl_desc drm_ioctls[] = {
int drm_lastclose(struct drm_device * dev)
{
struct drm_vma_entry *vma, *vma_temp;
- int i;
DRM_DEBUG("\n");
@@ -228,16 +227,6 @@ int drm_lastclose(struct drm_device * dev)
kfree(vma);
}
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
- for (i = 0; i < dev->queue_count; i++) {
- kfree(dev->queuelist[i]);
- dev->queuelist[i] = NULL;
- }
- kfree(dev->queuelist);
- dev->queuelist = NULL;
- }
- dev->queue_count = 0;
-
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!drm_core_check_feature(dev, DRIVER_MODESET))
drm_dma_takedown(dev);
@@ -486,7 +475,7 @@ long drm_ioctl(struct file *filp,
kfree(kdata);
atomic_dec(&dev->ioctl_count);
if (retcode)
- DRM_DEBUG("ret = %x\n", retcode);
+ DRM_DEBUG("ret = %d\n", retcode);
return retcode;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a8743c399e83..b7ee230572b7 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -87,6 +87,9 @@ static struct edid_quirk {
int product_id;
u32 quirks;
} edid_quirk_list[] = {
+ /* ASUS VW222S */
+ { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+
/* Acer AL1706 */
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 66d4a28ad5a2..0303935d10e2 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -119,7 +119,7 @@ static int edid_load(struct drm_connector *connector, char *name,
{
const struct firmware *fw;
struct platform_device *pdev;
- u8 *fwdata = NULL, *edid;
+ u8 *fwdata = NULL, *edid, *new_edid;
int fwsize, expected;
int builtin = 0, err = 0;
int i, valid_extensions = 0;
@@ -195,12 +195,14 @@ static int edid_load(struct drm_connector *connector, char *name,
"\"%s\" for connector \"%s\"\n", valid_extensions,
edid[0x7e], name, connector_name);
edid[0x7e] = valid_extensions;
- edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
+ new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
GFP_KERNEL);
- if (edid == NULL) {
+ if (new_edid == NULL) {
err = -ENOMEM;
+ kfree(edid);
goto relfw_out;
}
+ edid = new_edid;
}
connector->display_info.raw_edid = edid;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 5683b7fdd746..f546d1e8af82 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -228,7 +228,7 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
int i, ret;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
- ret = drm_crtc_helper_set_config(mode_set);
+ ret = mode_set->crtc->funcs->set_config(mode_set);
if (ret)
error = true;
}
@@ -1353,7 +1353,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
int count = 0;
u32 max_width, max_height, bpp_sel;
- bool bound = false, crtcs_bound = false;
+ int bound = 0, crtcs_bound = 0;
struct drm_crtc *crtc;
if (!fb_helper->fb)
@@ -1362,12 +1362,12 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb)
- crtcs_bound = true;
+ crtcs_bound++;
if (crtc->fb == fb_helper->fb)
- bound = true;
+ bound++;
}
- if (!bound && crtcs_bound) {
+ if (bound < crtcs_bound) {
fb_helper->delayed_hotplug = true;
mutex_unlock(&dev->mode_config.mutex);
return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 123de28f94ef..5062eec673f1 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -75,10 +75,6 @@ static int drm_setup(struct drm_device * dev)
dev->sigdata.lock = NULL;
- dev->queue_count = 0;
- dev->queue_reserved = 0;
- dev->queue_slots = 0;
- dev->queuelist = NULL;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
@@ -144,12 +140,12 @@ int drm_open(struct inode *inode, struct file *filp)
}
if (!retcode) {
mutex_lock(&dev->struct_mutex);
- if (minor->type == DRM_MINOR_LEGACY) {
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = inode->i_mapping;
- else if (dev->dev_mapping != inode->i_mapping)
- retcode = -ENODEV;
- }
+ if (dev->dev_mapping == NULL)
+ dev->dev_mapping = &inode->i_data;
+ /* ihold ensures nobody can remove inode with our i_data */
+ ihold(container_of(dev->dev_mapping, struct inode, i_data));
+ inode->i_mapping = dev->dev_mapping;
+ filp->f_mapping = dev->dev_mapping;
mutex_unlock(&dev->struct_mutex);
}
@@ -370,72 +366,16 @@ int drm_fasync(int fd, struct file *filp, int on)
}
EXPORT_SYMBOL(drm_fasync);
-/*
- * Reclaim locked buffers; note that this may be a bad idea if the current
- * context doesn't have the hw lock...
- */
-static void drm_reclaim_locked_buffers(struct drm_device *dev, struct file *f)
-{
- struct drm_file *file_priv = f->private_data;
-
- if (drm_i_have_hw_lock(dev, file_priv)) {
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- } else {
- unsigned long _end = jiffies + 3 * DRM_HZ;
- int locked = 0;
-
- drm_idlelock_take(&file_priv->master->lock);
-
- /*
- * Wait for a while.
- */
- do {
- spin_lock_bh(&file_priv->master->lock.spinlock);
- locked = file_priv->master->lock.idle_has_lock;
- spin_unlock_bh(&file_priv->master->lock.spinlock);
- if (locked)
- break;
- schedule();
- } while (!time_after_eq(jiffies, _end));
-
- if (!locked) {
- DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
- "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
- "\tI will go on reclaiming the buffers anyway.\n");
- }
-
- dev->driver->reclaim_buffers_locked(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
- }
-}
-
static void drm_master_release(struct drm_device *dev, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
- if (dev->driver->reclaim_buffers_locked &&
- file_priv->master->lock.hw_lock)
- drm_reclaim_locked_buffers(dev, filp);
-
- if (dev->driver->reclaim_buffers_idlelocked &&
- file_priv->master->lock.hw_lock) {
- drm_idlelock_take(&file_priv->master->lock);
- dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
- drm_idlelock_release(&file_priv->master->lock);
- }
-
-
if (drm_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
drm_lock_free(&file_priv->master->lock,
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !dev->driver->reclaim_buffers_locked) {
- dev->driver->reclaim_buffers(dev, file_priv);
- }
}
static void drm_events_release(struct drm_file *file_priv)
@@ -505,6 +445,9 @@ int drm_release(struct inode *inode, struct file *filp)
if (file_priv->minor->master)
drm_master_release(dev, filp);
+ if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+ drm_core_reclaim_buffers(dev, file_priv);
+
drm_events_release(file_priv);
if (dev->driver->driver_features & DRIVER_MODESET)
@@ -566,6 +509,9 @@ int drm_release(struct inode *inode, struct file *filp)
}
}
+ BUG_ON(dev->dev_mapping == NULL);
+ iput(container_of(dev->dev_mapping, struct inode, i_data));
+
/* drop the reference held my the file priv */
drm_master_put(&file_priv->master);
file_priv->is_master = 0;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d58e69da1fb5..fbe0842038b5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -354,7 +354,7 @@ drm_gem_create_mmap_offset(struct drm_gem_object *obj)
/* Get a DRM GEM mmap offset allocated... */
list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, 0);
+ obj->size / PAGE_SIZE, 0, false);
if (!list->file_offset_node) {
DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index ab1162da70f8..8928edbb94c7 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -110,42 +110,6 @@ int drm_vm_info(struct seq_file *m, void *data)
}
/**
- * Called when "/proc/dri/.../queues" is read.
- */
-int drm_queues_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int i;
- struct drm_queue *q;
-
- mutex_lock(&dev->struct_mutex);
- seq_printf(m, " ctx/flags use fin"
- " blk/rw/rwf wait flushed queued"
- " locks\n\n");
- for (i = 0; i < dev->queue_count; i++) {
- q = dev->queuelist[i];
- atomic_inc(&q->use_count);
- seq_printf(m, "%5d/0x%03x %5d %5d"
- " %5d/%c%c/%c%c%c %5Zd\n",
- i,
- q->flags,
- atomic_read(&q->use_count),
- atomic_read(&q->finalization),
- atomic_read(&q->block_count),
- atomic_read(&q->block_read) ? 'r' : '-',
- atomic_read(&q->block_write) ? 'w' : '-',
- waitqueue_active(&q->read_queue) ? 'r' : '-',
- waitqueue_active(&q->write_queue) ? 'w' : '-',
- waitqueue_active(&q->flush_queue) ? 'f' : '-',
- DRM_BUFCOUNT(&q->waitlist));
- atomic_dec(&q->use_count);
- }
- mutex_unlock(&dev->struct_mutex);
- return 0;
-}
-
-/**
* Called when "/proc/dri/.../bufs" is read.
*/
int drm_bufs_info(struct seq_file *m, void *data)
@@ -235,7 +199,7 @@ int drm_clients_info(struct seq_file *m, void *data)
}
-int drm_gem_one_name_info(int id, void *ptr, void *data)
+static int drm_gem_one_name_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
struct seq_file *m = data;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index c798eeae0a03..03f16f352fe2 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -974,7 +974,6 @@ EXPORT_SYMBOL(drm_vblank_off);
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
* @crtc: CRTC in question
- * @post: post or pre mode set?
*
* Account for vblank events across mode setting events, which will likely
* reset the hardware frame counter.
@@ -1037,6 +1036,10 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
if (!dev->num_crtcs)
return 0;
+ /* KMS drivers handle this internally */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
crtc = modeset->crtc;
if (crtc >= dev->num_crtcs)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 521152041691..32039553e172 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -70,10 +70,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
lock->context, task_pid_nr(current),
master->lock.hw_lock->lock, lock->flags);
- if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
- if (lock->context < 0)
- return -EINVAL;
-
add_wait_queue(&master->lock.lock_queue, &entry);
spin_lock_bh(&master->lock.spinlock);
master->lock.user_waiters++;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 961fb54f4266..9bb82f7f0061 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -118,45 +118,53 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
- unsigned long size, unsigned alignment)
+ unsigned long size, unsigned alignment,
+ unsigned long color)
{
struct drm_mm *mm = hole_node->mm;
- unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+ unsigned long adj_start = hole_start;
+ unsigned long adj_end = hole_end;
BUG_ON(!hole_node->hole_follows || node->allocated);
- if (alignment)
- tmp = hole_start % alignment;
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
- if (!tmp) {
+ if (alignment) {
+ unsigned tmp = adj_start % alignment;
+ if (tmp)
+ adj_start += alignment - tmp;
+ }
+
+ if (adj_start == hole_start) {
hole_node->hole_follows = 0;
- list_del_init(&hole_node->hole_stack);
- } else
- wasted = alignment - tmp;
+ list_del(&hole_node->hole_stack);
+ }
- node->start = hole_start + wasted;
+ node->start = adj_start;
node->size = size;
node->mm = mm;
+ node->color = color;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
- BUG_ON(node->start + node->size > hole_end);
+ BUG_ON(node->start + node->size > adj_end);
+ node->hole_follows = 0;
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
- } else {
- node->hole_follows = 0;
}
}
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
+ unsigned long color,
int atomic)
{
struct drm_mm_node *node;
@@ -165,7 +173,7 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
if (unlikely(node == NULL))
return NULL;
- drm_mm_insert_helper(hole_node, node, size, alignment);
+ drm_mm_insert_helper(hole_node, node, size, alignment, color);
return node;
}
@@ -181,11 +189,11 @@ int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
{
struct drm_mm_node *hole_node;
- hole_node = drm_mm_search_free(mm, size, alignment, 0);
+ hole_node = drm_mm_search_free(mm, size, alignment, false);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper(hole_node, node, size, alignment);
+ drm_mm_insert_helper(hole_node, node, size, alignment, 0);
return 0;
}
@@ -194,50 +202,57 @@ EXPORT_SYMBOL(drm_mm_insert_node);
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
+ unsigned long color,
unsigned long start, unsigned long end)
{
struct drm_mm *mm = hole_node->mm;
- unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+ unsigned long adj_start = hole_start;
+ unsigned long adj_end = hole_end;
BUG_ON(!hole_node->hole_follows || node->allocated);
- if (hole_start < start)
- wasted += start - hole_start;
- if (alignment)
- tmp = (hole_start + wasted) % alignment;
+ if (mm->color_adjust)
+ mm->color_adjust(hole_node, color, &adj_start, &adj_end);
- if (tmp)
- wasted += alignment - tmp;
+ if (adj_start < start)
+ adj_start = start;
+
+ if (alignment) {
+ unsigned tmp = adj_start % alignment;
+ if (tmp)
+ adj_start += alignment - tmp;
+ }
- if (!wasted) {
+ if (adj_start == hole_start) {
hole_node->hole_follows = 0;
- list_del_init(&hole_node->hole_stack);
+ list_del(&hole_node->hole_stack);
}
- node->start = hole_start + wasted;
+ node->start = adj_start;
node->size = size;
node->mm = mm;
+ node->color = color;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
- BUG_ON(node->start + node->size > hole_end);
+ BUG_ON(node->start + node->size > adj_end);
BUG_ON(node->start + node->size > end);
+ node->hole_follows = 0;
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
- } else {
- node->hole_follows = 0;
}
}
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
+ unsigned long color,
unsigned long start,
unsigned long end,
int atomic)
@@ -248,7 +263,7 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node
if (unlikely(node == NULL))
return NULL;
- drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
start, end);
return node;
@@ -267,11 +282,11 @@ int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_in_range(mm, size, alignment,
- start, end, 0);
+ start, end, false);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
start, end);
return 0;
@@ -336,27 +351,23 @@ EXPORT_SYMBOL(drm_mm_put_block);
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
{
- unsigned wasted = 0;
-
if (end - start < size)
return 0;
if (alignment) {
unsigned tmp = start % alignment;
if (tmp)
- wasted = alignment - tmp;
- }
-
- if (end >= start + size + wasted) {
- return 1;
+ start += alignment - tmp;
}
- return 0;
+ return end >= start + size;
}
-struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment, int best_match)
+struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ bool best_match)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@@ -368,10 +379,17 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
best_size = ~0UL;
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ unsigned long adj_start = drm_mm_hole_node_start(entry);
+ unsigned long adj_end = drm_mm_hole_node_end(entry);
+
+ if (mm->color_adjust) {
+ mm->color_adjust(entry, color, &adj_start, &adj_end);
+ if (adj_end <= adj_start)
+ continue;
+ }
+
BUG_ON(!entry->hole_follows);
- if (!check_free_hole(drm_mm_hole_node_start(entry),
- drm_mm_hole_node_end(entry),
- size, alignment))
+ if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
if (!best_match)
@@ -385,14 +403,15 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
return best;
}
-EXPORT_SYMBOL(drm_mm_search_free);
-
-struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int best_match)
+EXPORT_SYMBOL(drm_mm_search_free_generic);
+
+struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ bool best_match)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@@ -410,6 +429,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
end : drm_mm_hole_node_end(entry);
BUG_ON(!entry->hole_follows);
+
+ if (mm->color_adjust) {
+ mm->color_adjust(entry, color, &adj_start, &adj_end);
+ if (adj_end <= adj_start)
+ continue;
+ }
+
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
@@ -424,7 +450,7 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
return best;
}
-EXPORT_SYMBOL(drm_mm_search_free_in_range);
+EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
@@ -437,6 +463,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
new->mm = old->mm;
new->start = old->start;
new->size = old->size;
+ new->color = old->color;
old->allocated = 0;
new->allocated = 1;
@@ -452,9 +479,12 @@ EXPORT_SYMBOL(drm_mm_replace_node);
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
-void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
- unsigned alignment)
+void drm_mm_init_scan(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color)
{
+ mm->scan_color = color;
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
@@ -474,11 +504,14 @@ EXPORT_SYMBOL(drm_mm_init_scan);
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
-void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+ unsigned long size,
unsigned alignment,
+ unsigned long color,
unsigned long start,
unsigned long end)
{
+ mm->scan_color = color;
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
@@ -522,17 +555,21 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
hole_start = drm_mm_hole_node_start(prev_node);
hole_end = drm_mm_hole_node_end(prev_node);
+
+ adj_start = hole_start;
+ adj_end = hole_end;
+
+ if (mm->color_adjust)
+ mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
+
if (mm->scan_check_range) {
- adj_start = hole_start < mm->scan_start ?
- mm->scan_start : hole_start;
- adj_end = hole_end > mm->scan_end ?
- mm->scan_end : hole_end;
- } else {
- adj_start = hole_start;
- adj_end = hole_end;
+ if (adj_start < mm->scan_start)
+ adj_start = mm->scan_start;
+ if (adj_end > mm->scan_end)
+ adj_end = mm->scan_end;
}
- if (check_free_hole(adj_start , adj_end,
+ if (check_free_hole(adj_start, adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
mm->scan_hit_size = hole_end;
@@ -616,6 +653,8 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
mm->head_node.size = start - mm->head_node.start;
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+ mm->color_adjust = NULL;
+
return 0;
}
EXPORT_SYMBOL(drm_mm_init);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index b7adb4a967fd..28637c181b15 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -706,9 +706,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
-
- p->crtc_hadjusted = false;
- p->crtc_vadjusted = false;
}
EXPORT_SYMBOL(drm_mode_set_crtcinfo);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 13f3d936472f..5320364582ce 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -465,3 +465,52 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_pci_exit);
+
+int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
+{
+ struct pci_dev *root;
+ int pos;
+ u32 lnkcap, lnkcap2;
+
+ *mask = 0;
+ if (!dev->pdev)
+ return -EINVAL;
+
+ if (!pci_is_pcie(dev->pdev))
+ return -EINVAL;
+
+ root = dev->pdev->bus->self;
+
+ pos = pci_pcie_cap(root);
+ if (!pos)
+ return -EINVAL;
+
+ /* we've been informed via and serverworks don't make the cut */
+ if (root->vendor == PCI_VENDOR_ID_VIA ||
+ root->vendor == PCI_VENDOR_ID_SERVERWORKS)
+ return -EINVAL;
+
+ pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap);
+ pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2);
+
+ lnkcap &= PCI_EXP_LNKCAP_SLS;
+ lnkcap2 &= 0xfe;
+
+ if (lnkcap2) { /* PCIE GEN 3.0 */
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
+ *mask |= DRM_PCIE_SPEED_25;
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
+ *mask |= DRM_PCIE_SPEED_50;
+ if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
+ *mask |= DRM_PCIE_SPEED_80;
+ } else {
+ if (lnkcap & 1)
+ *mask |= DRM_PCIE_SPEED_25;
+ if (lnkcap & 2)
+ *mask |= DRM_PCIE_SPEED_50;
+ }
+
+ DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
+ return 0;
+}
+EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index fff87221f9e9..da457b18eaaf 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -53,7 +53,6 @@ static struct drm_info_list drm_proc_list[] = {
{"name", drm_name_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
- {"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
@@ -90,7 +89,7 @@ static const struct file_operations drm_proc_fops = {
* Create a given set of proc files represented by an array of
* gdm_proc_lists in the given root directory.
*/
-int drm_proc_create_files(struct drm_info_list *files, int count,
+static int drm_proc_create_files(struct drm_info_list *files, int count,
struct proc_dir_entry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
@@ -173,7 +172,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
return 0;
}
-int drm_proc_remove_files(struct drm_info_list *files, int count,
+static int drm_proc_remove_files(struct drm_info_list *files, int count,
struct drm_minor *minor)
{
struct list_head *pos, *q;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 45cf1dd3eb9c..45ac8d6c92b7 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -134,6 +134,7 @@ void drm_sysfs_destroy(void)
return;
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
+ drm_class = NULL;
}
/**
@@ -554,6 +555,9 @@ void drm_sysfs_device_remove(struct drm_minor *minor)
int drm_class_device_register(struct device *dev)
{
+ if (!drm_class || IS_ERR(drm_class))
+ return -ENOENT;
+
dev->class = drm_class;
return device_register(dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index bf791fa0e50d..d9568198c300 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -196,7 +196,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
return ret;
}
-struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
+static struct drm_encoder *exynos_drm_best_encoder(
+ struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct exynos_drm_connector *exynos_connector =
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index eaf630dc5dba..84dd099eae3b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -33,7 +33,6 @@
#include "exynos_drm_fbdev.h"
static LIST_HEAD(exynos_drm_subdrv_list);
-static struct drm_device *drm_dev;
static int exynos_drm_subdrv_probe(struct drm_device *dev,
struct exynos_drm_subdrv *subdrv)
@@ -120,8 +119,6 @@ int exynos_drm_device_register(struct drm_device *dev)
if (!dev)
return -EINVAL;
- drm_dev = dev;
-
list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
subdrv->drm_dev = dev;
err = exynos_drm_subdrv_probe(dev, subdrv);
@@ -149,8 +146,6 @@ int exynos_drm_device_unregister(struct drm_device *dev)
list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list)
exynos_drm_subdrv_remove(dev, subdrv);
- drm_dev = NULL;
-
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 4afb625128d7..abb1e2f8227f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,21 +29,23 @@
#include "drmP.h"
#include "drm_crtc_helper.h"
-#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_fb.h"
#include "exynos_drm_encoder.h"
-#include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
drm_crtc)
+enum exynos_crtc_mode {
+ CRTC_MODE_NORMAL, /* normal mode */
+ CRTC_MODE_BLANK, /* The private plane of crtc is blank */
+};
+
/*
* Exynos specific crtc structure.
*
* @drm_crtc: crtc object.
- * @overlay: contain information common to display controller and hdmi and
- * contents of this overlay object would be copied to sub driver size.
+ * @drm_plane: pointer of private plane object for this crtc
* @pipe: a crtc index created at load() with a new crtc object creation
* and the crtc object would be set to private->crtc array
* to get a crtc object corresponding to this pipe from private->crtc
@@ -52,115 +54,16 @@
* we can refer to the crtc to current hardware interrupt occured through
* this pipe value.
* @dpms: store the crtc dpms value
+ * @mode: store the crtc mode value
*/
struct exynos_drm_crtc {
struct drm_crtc drm_crtc;
- struct exynos_drm_overlay overlay;
+ struct drm_plane *plane;
unsigned int pipe;
unsigned int dpms;
+ enum exynos_crtc_mode mode;
};
-static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
-{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- struct exynos_drm_overlay *overlay = &exynos_crtc->overlay;
-
- exynos_drm_fn_encoder(crtc, overlay,
- exynos_drm_encoder_crtc_mode_set);
- exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
- exynos_drm_encoder_crtc_commit);
-}
-
-int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- struct exynos_drm_crtc_pos *pos)
-{
- struct exynos_drm_gem_buf *buffer;
- unsigned int actual_w;
- unsigned int actual_h;
- int nr = exynos_drm_format_num_buffers(fb->pixel_format);
- int i;
-
- for (i = 0; i < nr; i++) {
- buffer = exynos_drm_fb_buffer(fb, i);
- if (!buffer) {
- DRM_LOG_KMS("buffer is null\n");
- return -EFAULT;
- }
-
- overlay->dma_addr[i] = buffer->dma_addr;
- overlay->vaddr[i] = buffer->kvaddr;
-
- DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
- i, (unsigned long)overlay->vaddr[i],
- (unsigned long)overlay->dma_addr[i]);
- }
-
- actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
- actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
-
- /* set drm framebuffer data. */
- overlay->fb_x = pos->fb_x;
- overlay->fb_y = pos->fb_y;
- overlay->fb_width = fb->width;
- overlay->fb_height = fb->height;
- overlay->src_width = pos->src_w;
- overlay->src_height = pos->src_h;
- overlay->bpp = fb->bits_per_pixel;
- overlay->pitch = fb->pitches[0];
- overlay->pixel_format = fb->pixel_format;
-
- /* set overlay range to be displayed. */
- overlay->crtc_x = pos->crtc_x;
- overlay->crtc_y = pos->crtc_y;
- overlay->crtc_width = actual_w;
- overlay->crtc_height = actual_h;
-
- /* set drm mode data. */
- overlay->mode_width = mode->hdisplay;
- overlay->mode_height = mode->vdisplay;
- overlay->refresh = mode->vrefresh;
- overlay->scan_flag = mode->flags;
-
- DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)",
- overlay->crtc_x, overlay->crtc_y,
- overlay->crtc_width, overlay->crtc_height);
-
- return 0;
-}
-
-static int exynos_drm_crtc_update(struct drm_crtc *crtc)
-{
- struct exynos_drm_crtc *exynos_crtc;
- struct exynos_drm_overlay *overlay;
- struct exynos_drm_crtc_pos pos;
- struct drm_display_mode *mode = &crtc->mode;
- struct drm_framebuffer *fb = crtc->fb;
-
- if (!mode || !fb)
- return -EINVAL;
-
- exynos_crtc = to_exynos_crtc(crtc);
- overlay = &exynos_crtc->overlay;
-
- memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
-
- /* it means the offset of framebuffer to be displayed. */
- pos.fb_x = crtc->x;
- pos.fb_y = crtc->y;
-
- /* OSD position to be displayed. */
- pos.crtc_x = 0;
- pos.crtc_y = 0;
- pos.crtc_w = fb->width - crtc->x;
- pos.crtc_h = fb->height - crtc->y;
- pos.src_w = pos.crtc_w;
- pos.src_h = pos.crtc_h;
-
- return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos);
-}
-
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
@@ -175,23 +78,8 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
mutex_lock(&dev->struct_mutex);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- exynos_drm_fn_encoder(crtc, &mode,
- exynos_drm_encoder_crtc_dpms);
- exynos_crtc->dpms = mode;
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- exynos_drm_fn_encoder(crtc, &mode,
- exynos_drm_encoder_crtc_dpms);
- exynos_crtc->dpms = mode;
- break;
- default:
- DRM_ERROR("unspecified mode %d\n", mode);
- break;
- }
+ exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms);
+ exynos_crtc->dpms = mode;
mutex_unlock(&dev->struct_mutex);
}
@@ -209,35 +97,13 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%s\n", __FILE__);
- /*
- * when set_crtc is requested from user or at booting time,
- * crtc->commit would be called without dpms call so if dpms is
- * no power on then crtc->dpms should be called
- * with DRM_MODE_DPMS_ON for the hardware power to be on.
- */
- if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) {
- int mode = DRM_MODE_DPMS_ON;
-
- /*
- * enable hardware(power on) to all encoders hdmi connected
- * to current crtc.
- */
- exynos_drm_crtc_dpms(crtc, mode);
- /*
- * enable dma to all encoders connected to current crtc and
- * lcd panel.
- */
- exynos_drm_fn_encoder(crtc, &mode,
- exynos_drm_encoder_dpms_from_crtc);
- }
-
- exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
- exynos_drm_encoder_crtc_commit);
+ exynos_plane_commit(exynos_crtc->plane);
+ exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON);
}
static bool
exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -251,31 +117,61 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb)
{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_plane *plane = exynos_crtc->plane;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+ int pipe = exynos_crtc->pipe;
+ int ret;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
+ exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+
/*
* copy the mode data adjusted by mode_fixup() into crtc->mode
* so that hardware can be seet to proper mode.
*/
memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
- return exynos_drm_crtc_update(crtc);
+ crtc_w = crtc->fb->width - x;
+ crtc_h = crtc->fb->height - y;
+
+ ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h,
+ x, y, crtc_w, crtc_h);
+ if (ret)
+ return ret;
+
+ plane->crtc = crtc;
+ plane->fb = crtc->fb;
+
+ exynos_drm_fn_encoder(crtc, &pipe, exynos_drm_encoder_crtc_pipe);
+
+ return 0;
}
static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_plane *plane = exynos_crtc->plane;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
- ret = exynos_drm_crtc_update(crtc);
+ crtc_w = crtc->fb->width - x;
+ crtc_h = crtc->fb->height - y;
+
+ ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h,
+ x, y, crtc_w, crtc_h);
if (ret)
return ret;
- exynos_drm_crtc_apply(crtc);
+ exynos_drm_crtc_commit(crtc);
- return ret;
+ return 0;
}
static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc)
@@ -284,6 +180,16 @@ static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc)
/* drm framework doesn't check NULL */
}
+static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_OFF);
+ exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.dpms = exynos_drm_crtc_dpms,
.prepare = exynos_drm_crtc_prepare,
@@ -292,6 +198,7 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.mode_set = exynos_drm_crtc_mode_set,
.mode_set_base = exynos_drm_crtc_mode_set_base,
.load_lut = exynos_drm_crtc_load_lut,
+ .disable = exynos_drm_crtc_disable,
};
static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
@@ -327,7 +234,8 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
&dev_priv->pageflip_event_list);
crtc->fb = fb;
- ret = exynos_drm_crtc_update(crtc);
+ ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
+ NULL);
if (ret) {
crtc->fb = old_fb;
drm_vblank_put(dev, exynos_crtc->pipe);
@@ -335,14 +243,6 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out;
}
-
- /*
- * the values related to a buffer of the drm framebuffer
- * to be applied should be set at here. because these values
- * first, are set to shadow registers and then to
- * real registers at vsync front porch period.
- */
- exynos_drm_crtc_apply(crtc);
}
out:
mutex_unlock(&dev->struct_mutex);
@@ -362,18 +262,73 @@ static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
kfree(exynos_crtc);
}
+static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = crtc->dev;
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (property == dev_priv->crtc_mode_property) {
+ enum exynos_crtc_mode mode = val;
+
+ if (mode == exynos_crtc->mode)
+ return 0;
+
+ exynos_crtc->mode = mode;
+
+ switch (mode) {
+ case CRTC_MODE_NORMAL:
+ exynos_drm_crtc_commit(crtc);
+ break;
+ case CRTC_MODE_BLANK:
+ exynos_plane_dpms(exynos_crtc->plane,
+ DRM_MODE_DPMS_OFF);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static struct drm_crtc_funcs exynos_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.page_flip = exynos_drm_crtc_page_flip,
.destroy = exynos_drm_crtc_destroy,
+ .set_property = exynos_drm_crtc_set_property,
+};
+
+static const struct drm_prop_enum_list mode_names[] = {
+ { CRTC_MODE_NORMAL, "normal" },
+ { CRTC_MODE_BLANK, "blank" },
};
-struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev,
- struct drm_crtc *crtc)
+static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
- return &exynos_crtc->overlay;
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop = dev_priv->crtc_mode_property;
+ if (!prop) {
+ prop = drm_property_create_enum(dev, 0, "mode", mode_names,
+ ARRAY_SIZE(mode_names));
+ if (!prop)
+ return;
+
+ dev_priv->crtc_mode_property = prop;
+ }
+
+ drm_object_attach_property(&crtc->base, prop, 0);
}
int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
@@ -392,7 +347,12 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
exynos_crtc->pipe = nr;
exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
- exynos_crtc->overlay.zpos = DEFAULT_ZPOS;
+ exynos_crtc->plane = exynos_plane_init(dev, 1 << nr, true);
+ if (!exynos_crtc->plane) {
+ kfree(exynos_crtc);
+ return -ENOMEM;
+ }
+
crtc = &exynos_crtc->drm_crtc;
private->crtc[nr] = crtc;
@@ -400,6 +360,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
drm_crtc_init(dev, crtc, &exynos_crtc_funcs);
drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
+ exynos_drm_crtc_attach_mode_property(crtc);
+
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 16b8e2195a0d..6bae8d8c250e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -29,39 +29,8 @@
#ifndef _EXYNOS_DRM_CRTC_H_
#define _EXYNOS_DRM_CRTC_H_
-struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev,
- struct drm_crtc *crtc);
int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
-/*
- * Exynos specific crtc postion structure.
- *
- * @fb_x: offset x on a framebuffer to be displyed
- * - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed
- * - the unit is screen coordinates.
- * @src_w: width of source area to be displayed from a framebuffer.
- * @src_h: height of source area to be displayed from a framebuffer.
- * @crtc_x: offset x on hardware screen.
- * @crtc_y: offset y on hardware screen.
- * @crtc_w: width of hardware screen.
- * @crtc_h: height of hardware screen.
- */
-struct exynos_drm_crtc_pos {
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int src_w;
- unsigned int src_h;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_w;
- unsigned int crtc_h;
-};
-
-int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- struct exynos_drm_crtc_pos *pos);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 274909271c36..613bf8a5d9b2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -25,6 +25,7 @@
#include "drmP.h"
#include "drm.h"
+#include "exynos_drm.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
@@ -86,6 +87,10 @@ static struct sg_table *
npages = buf->size / buf->page_size;
sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
+ if (!sgt) {
+ DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
+ goto err_unlock;
+ }
nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
@@ -186,7 +191,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
struct page *page;
- int ret, i = 0;
+ int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -210,7 +215,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt)) {
+ if (IS_ERR_OR_NULL(sgt)) {
ret = PTR_ERR(sgt);
goto err_buf_detach;
}
@@ -236,13 +241,25 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
}
sgl = sgt->sgl;
- buffer->dma_addr = sg_dma_address(sgl);
- while (i < sgt->nents) {
- buffer->pages[i] = sg_page(sgl);
- buffer->size += sg_dma_len(sgl);
- sgl = sg_next(sgl);
- i++;
+ if (sgt->nents == 1) {
+ buffer->dma_addr = sg_dma_address(sgt->sgl);
+ buffer->size = sg_dma_len(sgt->sgl);
+
+ /* always physically continuous memory if sgt->nents is 1. */
+ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
+ } else {
+ unsigned int i = 0;
+
+ buffer->dma_addr = sg_dma_address(sgl);
+ while (i < sgt->nents) {
+ buffer->pages[i] = sg_page(sgl);
+ buffer->size += sg_dma_len(sgl);
+ sgl = sg_next(sgl);
+ i++;
+ }
+
+ exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
exynos_gem_obj->buffer = buffer;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index d6de2e07fa03..ebacec6f1e48 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -85,8 +85,11 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
}
for (nr = 0; nr < MAX_PLANE; nr++) {
- ret = exynos_plane_init(dev, nr);
- if (ret)
+ struct drm_plane *plane;
+ unsigned int possible_crtcs = (1 << MAX_CRTC) - 1;
+
+ plane = exynos_plane_init(dev, possible_crtcs, false);
+ if (!plane)
goto err_crtc;
}
@@ -221,8 +224,6 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
- DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c82c90c443e7..e22704b249d7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -59,12 +59,14 @@ enum exynos_drm_output_type {
*
* @mode_set: copy drm overlay info to hw specific overlay info.
* @commit: apply hardware specific overlay data to registers.
+ * @enable: enable hardware specific overlay.
* @disable: disable hardware specific overlay.
*/
struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev,
struct exynos_drm_overlay *overlay);
void (*commit)(struct device *subdrv_dev, int zpos);
+ void (*enable)(struct device *subdrv_dev, int zpos);
void (*disable)(struct device *subdrv_dev, int zpos);
};
@@ -174,7 +176,7 @@ struct exynos_drm_manager_ops {
void (*apply)(struct device *subdrv_dev);
void (*mode_fixup)(struct device *subdrv_dev,
struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*mode_set)(struct device *subdrv_dev, void *mode);
void (*get_max_resol)(struct device *subdrv_dev, unsigned int *width,
@@ -235,6 +237,8 @@ struct exynos_drm_private {
* this array is used to be aware of which crtc did it request vblank.
*/
struct drm_crtc *crtc[MAX_CRTC];
+ struct drm_property *plane_zpos_property;
+ struct drm_property *crtc_mode_property;
};
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 23d5ad379f86..2c037cd7d2d4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -30,7 +30,6 @@
#include "drm_crtc_helper.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_crtc.h"
#include "exynos_drm_encoder.h"
#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\
@@ -108,7 +107,7 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
static bool
exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
@@ -136,21 +135,16 @@ static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
struct drm_connector *connector;
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
struct exynos_drm_manager_ops *manager_ops = manager->ops;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
- struct exynos_drm_overlay *overlay = get_exynos_drm_overlay(dev,
- encoder->crtc);
DRM_DEBUG_KMS("%s\n", __FILE__);
+ exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
+ if (connector->encoder == encoder)
if (manager_ops && manager_ops->mode_set)
manager_ops->mode_set(manager->dev,
adjusted_mode);
-
- if (overlay_ops && overlay_ops->mode_set)
- overlay_ops->mode_set(manager->dev, overlay);
- }
}
}
@@ -310,8 +304,8 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data)
struct exynos_drm_manager_ops *manager_ops = manager->ops;
int crtc = *(int *)data;
- if (manager->pipe == -1)
- manager->pipe = crtc;
+ if (manager->pipe != crtc)
+ return;
if (manager_ops->enable_vblank)
manager_ops->enable_vblank(manager->dev);
@@ -324,34 +318,41 @@ void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
struct exynos_drm_manager_ops *manager_ops = manager->ops;
int crtc = *(int *)data;
- if (manager->pipe == -1)
- manager->pipe = crtc;
+ if (manager->pipe != crtc)
+ return;
if (manager_ops->disable_vblank)
manager_ops->disable_vblank(manager->dev);
}
-void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
- void *data)
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
{
- struct exynos_drm_manager *manager =
- to_exynos_encoder(encoder)->manager;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
- int zpos = DEFAULT_ZPOS;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_manager *manager = exynos_encoder->manager;
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
+ int mode = *(int *)data;
- if (data)
- zpos = *(int *)data;
+ DRM_DEBUG_KMS("%s\n", __FILE__);
- if (overlay_ops && overlay_ops->commit)
- overlay_ops->commit(manager->dev, zpos);
+ if (manager_ops && manager_ops->dpms)
+ manager_ops->dpms(manager->dev, mode);
+
+ /*
+ * if this condition is ok then it means that the crtc is already
+ * detached from encoder and last function for detaching is properly
+ * done, so clear pipe from manager to prevent repeated call.
+ */
+ if (mode > DRM_MODE_DPMS_ON) {
+ if (!encoder->crtc)
+ manager->pipe = -1;
+ }
}
-void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data)
{
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
- int crtc = *(int *)data;
- int zpos = DEFAULT_ZPOS;
+ int pipe = *(int *)data;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -359,76 +360,62 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
* when crtc is detached from encoder, this pipe is used
* to select manager operation
*/
- manager->pipe = crtc;
-
- exynos_drm_encoder_crtc_plane_commit(encoder, &zpos);
+ manager->pipe = pipe;
}
-void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data)
{
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- int mode = *(int *)data;
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ struct exynos_drm_overlay *overlay = data;
DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_drm_encoder_dpms(encoder, mode);
-
- exynos_encoder->dpms = mode;
+ if (overlay_ops && overlay_ops->mode_set)
+ overlay_ops->mode_set(manager->dev, overlay);
}
-void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data)
{
- struct drm_device *dev = encoder->dev;
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_manager *manager = exynos_encoder->manager;
- struct exynos_drm_manager_ops *manager_ops = manager->ops;
- struct drm_connector *connector;
- int mode = *(int *)data;
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ int zpos = DEFAULT_ZPOS;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (manager_ops && manager_ops->dpms)
- manager_ops->dpms(manager->dev, mode);
-
- /*
- * set current dpms mode to the connector connected to
- * current encoder. connector->dpms would be checked
- * at drm_helper_connector_dpms()
- */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- connector->dpms = mode;
+ if (data)
+ zpos = *(int *)data;
- /*
- * if this condition is ok then it means that the crtc is already
- * detached from encoder and last function for detaching is properly
- * done, so clear pipe from manager to prevent repeated call.
- */
- if (mode > DRM_MODE_DPMS_ON) {
- if (!encoder->crtc)
- manager->pipe = -1;
- }
+ if (overlay_ops && overlay_ops->commit)
+ overlay_ops->commit(manager->dev, zpos);
}
-void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data)
{
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
- struct exynos_drm_overlay *overlay = data;
+ int zpos = DEFAULT_ZPOS;
- if (overlay_ops && overlay_ops->mode_set)
- overlay_ops->mode_set(manager->dev, overlay);
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (data)
+ zpos = *(int *)data;
+
+ if (overlay_ops && overlay_ops->enable)
+ overlay_ops->enable(manager->dev, zpos);
}
-void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
{
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
int zpos = DEFAULT_ZPOS;
- DRM_DEBUG_KMS("\n");
+ DRM_DEBUG_KMS("%s\n", __FILE__);
if (data)
zpos = *(int *)data;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index eb7d2316847e..6470d9ddf5a1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -40,13 +40,11 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
void (*fn)(struct drm_encoder *, void *));
void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
- void *data);
-void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder,
- void *data);
void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 29fdbfeb43cb..a68d2b313f03 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -78,7 +78,6 @@ struct fimd_context {
struct drm_crtc *crtc;
struct clk *bus_clk;
struct clk *lcd_clk;
- struct resource *regs_res;
void __iomem *regs;
struct fimd_win_data win_data[WINDOWS_NR];
unsigned int clkdiv;
@@ -813,7 +812,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
return -EINVAL;
}
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -838,33 +837,26 @@ static int __devinit fimd_probe(struct platform_device *pdev)
goto err_clk;
}
- ctx->regs_res = request_mem_region(res->start, resource_size(res),
- dev_name(dev));
- if (!ctx->regs_res) {
- dev_err(dev, "failed to claim register region\n");
- ret = -ENOENT;
- goto err_clk;
- }
-
- ctx->regs = ioremap(res->start, resource_size(res));
+ ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!ctx->regs) {
dev_err(dev, "failed to map registers\n");
ret = -ENXIO;
- goto err_req_region_io;
+ goto err_clk;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "irq request failed.\n");
- goto err_req_region_irq;
+ goto err_clk;
}
ctx->irq = res->start;
- ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx);
- if (ret < 0) {
+ ret = devm_request_irq(&pdev->dev, ctx->irq, fimd_irq_handler,
+ 0, "drm_fimd", ctx);
+ if (ret) {
dev_err(dev, "irq request failed.\n");
- goto err_req_irq;
+ goto err_clk;
}
ctx->vidcon0 = pdata->vidcon0;
@@ -899,14 +891,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
return 0;
-err_req_irq:
-err_req_region_irq:
- iounmap(ctx->regs);
-
-err_req_region_io:
- release_resource(ctx->regs_res);
- kfree(ctx->regs_res);
-
err_clk:
clk_disable(ctx->lcd_clk);
clk_put(ctx->lcd_clk);
@@ -916,7 +900,6 @@ err_bus_clk:
clk_put(ctx->bus_clk);
err_clk_get:
- kfree(ctx);
return ret;
}
@@ -944,13 +927,6 @@ out:
clk_put(ctx->lcd_clk);
clk_put(ctx->bus_clk);
- iounmap(ctx->regs);
- release_resource(ctx->regs_res);
- kfree(ctx->regs_res);
- free_irq(ctx->irq, ctx);
-
- kfree(ctx);
-
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 5c8b683029ea..f9efde40c097 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -99,25 +99,17 @@ out:
struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
- struct inode *inode;
- struct address_space *mapping;
struct page *p, **pages;
int i, npages;
- /* This is the shared memory object that backs the GEM resource */
- inode = obj->filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
-
npages = obj->size >> PAGE_SHIFT;
pages = drm_malloc_ab(npages, sizeof(struct page *));
if (pages == NULL)
return ERR_PTR(-ENOMEM);
- gfpmask |= mapping_gfp_mask(mapping);
-
for (i = 0; i < npages; i++) {
- p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ p = alloc_page(gfpmask);
if (IS_ERR(p))
goto fail;
pages[i] = p;
@@ -126,31 +118,22 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
return pages;
fail:
- while (i--)
- page_cache_release(pages[i]);
+ while (--i)
+ __free_page(pages[i]);
drm_free_large(pages);
return ERR_PTR(PTR_ERR(p));
}
static void exynos_gem_put_pages(struct drm_gem_object *obj,
- struct page **pages,
- bool dirty, bool accessed)
+ struct page **pages)
{
- int i, npages;
+ int npages;
npages = obj->size >> PAGE_SHIFT;
- for (i = 0; i < npages; i++) {
- if (dirty)
- set_page_dirty(pages[i]);
-
- if (accessed)
- mark_page_accessed(pages[i]);
-
- /* Undo the reference we took when populating the table */
- page_cache_release(pages[i]);
- }
+ while (--npages >= 0)
+ __free_page(pages[npages]);
drm_free_large(pages);
}
@@ -189,7 +172,7 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
return -EINVAL;
}
- pages = exynos_gem_get_pages(obj, GFP_KERNEL);
+ pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
if (IS_ERR(pages)) {
DRM_ERROR("failed to get pages.\n");
return PTR_ERR(pages);
@@ -230,7 +213,7 @@ err1:
kfree(buf->sgt);
buf->sgt = NULL;
err:
- exynos_gem_put_pages(obj, pages, true, false);
+ exynos_gem_put_pages(obj, pages);
return ret;
}
@@ -248,7 +231,7 @@ static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
kfree(buf->sgt);
buf->sgt = NULL;
- exynos_gem_put_pages(obj, buf->pages, true, false);
+ exynos_gem_put_pages(obj, buf->pages);
buf->pages = NULL;
/* add some codes for UNCACHED type here. TODO */
@@ -291,11 +274,21 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (!buf->pages)
return;
+ /*
+ * do not release memory region from exporter.
+ *
+ * the region will be released by exporter
+ * once dmabuf's refcount becomes 0.
+ */
+ if (obj->import_attach)
+ goto out;
+
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
exynos_drm_gem_put_pages(obj);
else
exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+out:
exynos_drm_fini_buf(obj->dev, buf);
exynos_gem_obj->buffer = NULL;
@@ -668,7 +661,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
* with DRM_IOCTL_MODE_CREATE_DUMB command.
*/
- args->pitch = args->width * args->bpp >> 3;
+ args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = PAGE_ALIGN(args->pitch * args->height);
exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 14d038b6cb02..085b2a5d5f70 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -63,7 +63,8 @@ struct exynos_drm_gem_buf {
* by user request or at framebuffer creation.
* continuous memory region allocated by user request
* or at framebuffer creation.
- * @size: total memory size to physically non-continuous memory region.
+ * @size: size requested from user, in bytes and this size is aligned
+ * in page unit.
* @flags: indicate memory type to allocated buffer and cache attruibute.
*
* P.S. this object would be transfered to user as kms_bo.handle so
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 5d9d2c2f8f3f..8ffcdf8b9e22 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -142,7 +142,7 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_hdmi_context *ctx = to_context(subdrv_dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index bd8126996e52..a91c42088e42 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -51,7 +51,7 @@ struct exynos_hdmi_ops {
/* manager */
void (*mode_fixup)(void *ctx, struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*mode_set)(void *ctx, void *mode);
void (*get_max_resol)(void *ctx, unsigned int *width,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index c4c6525d4653..b89829e5043a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -12,9 +12,12 @@
#include "drmP.h"
#include "exynos_drm.h"
-#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
+
+#define to_exynos_plane(x) container_of(x, struct exynos_plane, base)
struct exynos_plane {
struct drm_plane base;
@@ -30,6 +33,108 @@ static const uint32_t formats[] = {
DRM_FORMAT_NV12MT,
};
+int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+ unsigned int actual_w;
+ unsigned int actual_h;
+ int nr;
+ int i;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ nr = exynos_drm_format_num_buffers(fb->pixel_format);
+ for (i = 0; i < nr; i++) {
+ struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i);
+
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null\n");
+ return -EFAULT;
+ }
+
+ overlay->dma_addr[i] = buffer->dma_addr;
+ overlay->vaddr[i] = buffer->kvaddr;
+
+ DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
+ i, (unsigned long)overlay->vaddr[i],
+ (unsigned long)overlay->dma_addr[i]);
+ }
+
+ actual_w = min((unsigned)(crtc->mode.hdisplay - crtc_x), crtc_w);
+ actual_h = min((unsigned)(crtc->mode.vdisplay - crtc_y), crtc_h);
+
+ /* set drm framebuffer data. */
+ overlay->fb_x = src_x;
+ overlay->fb_y = src_y;
+ overlay->fb_width = fb->width;
+ overlay->fb_height = fb->height;
+ overlay->src_width = src_w;
+ overlay->src_height = src_h;
+ overlay->bpp = fb->bits_per_pixel;
+ overlay->pitch = fb->pitches[0];
+ overlay->pixel_format = fb->pixel_format;
+
+ /* set overlay range to be displayed. */
+ overlay->crtc_x = crtc_x;
+ overlay->crtc_y = crtc_y;
+ overlay->crtc_width = actual_w;
+ overlay->crtc_height = actual_h;
+
+ /* set drm mode data. */
+ overlay->mode_width = crtc->mode.hdisplay;
+ overlay->mode_height = crtc->mode.vdisplay;
+ overlay->refresh = crtc->mode.vrefresh;
+ overlay->scan_flag = crtc->mode.flags;
+
+ DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)",
+ overlay->crtc_x, overlay->crtc_y,
+ overlay->crtc_width, overlay->crtc_height);
+
+ exynos_drm_fn_encoder(crtc, overlay, exynos_drm_encoder_plane_mode_set);
+
+ return 0;
+}
+
+void exynos_plane_commit(struct drm_plane *plane)
+{
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+ exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+ exynos_drm_encoder_plane_commit);
+}
+
+void exynos_plane_dpms(struct drm_plane *plane, int mode)
+{
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ if (exynos_plane->enabled)
+ return;
+
+ exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+ exynos_drm_encoder_plane_enable);
+
+ exynos_plane->enabled = true;
+ } else {
+ if (!exynos_plane->enabled)
+ return;
+
+ exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+ exynos_drm_encoder_plane_disable);
+
+ exynos_plane->enabled = false;
+ }
+}
+
static int
exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -37,64 +142,37 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- struct exynos_plane *exynos_plane =
- container_of(plane, struct exynos_plane, base);
- struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
- struct exynos_drm_crtc_pos pos;
int ret;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
- pos.crtc_x = crtc_x;
- pos.crtc_y = crtc_y;
- pos.crtc_w = crtc_w;
- pos.crtc_h = crtc_h;
-
- /* considering 16.16 fixed point of source values */
- pos.fb_x = src_x >> 16;
- pos.fb_y = src_y >> 16;
- pos.src_w = src_w >> 16;
- pos.src_h = src_h >> 16;
-
- ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
+ ret = exynos_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y,
+ crtc_w, crtc_h, src_x >> 16, src_y >> 16,
+ src_w >> 16, src_h >> 16);
if (ret < 0)
return ret;
- exynos_drm_fn_encoder(crtc, overlay,
- exynos_drm_encoder_crtc_mode_set);
- exynos_drm_fn_encoder(crtc, &overlay->zpos,
- exynos_drm_encoder_crtc_plane_commit);
+ plane->crtc = crtc;
+ plane->fb = crtc->fb;
- exynos_plane->enabled = true;
+ exynos_plane_commit(plane);
+ exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
return 0;
}
static int exynos_disable_plane(struct drm_plane *plane)
{
- struct exynos_plane *exynos_plane =
- container_of(plane, struct exynos_plane, base);
- struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
-
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (!exynos_plane->enabled)
- return 0;
-
- exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
- exynos_drm_encoder_crtc_disable);
-
- exynos_plane->enabled = false;
- exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+ exynos_plane_dpms(plane, DRM_MODE_DPMS_OFF);
return 0;
}
static void exynos_plane_destroy(struct drm_plane *plane)
{
- struct exynos_plane *exynos_plane =
- container_of(plane, struct exynos_plane, base);
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -103,69 +181,79 @@ static void exynos_plane_destroy(struct drm_plane *plane)
kfree(exynos_plane);
}
+static int exynos_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (property == dev_priv->plane_zpos_property) {
+ exynos_plane->overlay.zpos = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static struct drm_plane_funcs exynos_plane_funcs = {
.update_plane = exynos_update_plane,
.disable_plane = exynos_disable_plane,
.destroy = exynos_plane_destroy,
+ .set_property = exynos_plane_set_property,
};
-int exynos_plane_init(struct drm_device *dev, unsigned int nr)
+static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
{
- struct exynos_plane *exynos_plane;
- uint32_t possible_crtcs;
+ struct drm_device *dev = plane->dev;
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
- exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
- if (!exynos_plane)
- return -ENOMEM;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- /* all CRTCs are available */
- possible_crtcs = (1 << MAX_CRTC) - 1;
+ prop = dev_priv->plane_zpos_property;
+ if (!prop) {
+ prop = drm_property_create_range(dev, 0, "zpos", 0,
+ MAX_PLANE - 1);
+ if (!prop)
+ return;
- exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+ dev_priv->plane_zpos_property = prop;
+ }
- return drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
- &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
- false);
+ drm_object_attach_property(&plane->base, prop, 0);
}
-int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+struct drm_plane *exynos_plane_init(struct drm_device *dev,
+ unsigned int possible_crtcs, bool priv)
{
- struct drm_exynos_plane_set_zpos *zpos_req = data;
- struct drm_mode_object *obj;
- struct drm_plane *plane;
struct exynos_plane *exynos_plane;
- int ret = 0;
+ int err;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) {
- if (zpos_req->zpos != DEFAULT_ZPOS) {
- DRM_ERROR("zpos not within limits\n");
- return -EINVAL;
- }
+ exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
+ if (!exynos_plane) {
+ DRM_ERROR("failed to allocate plane\n");
+ return NULL;
}
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, zpos_req->plane_id,
- DRM_MODE_OBJECT_PLANE);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown plane ID %d\n",
- zpos_req->plane_id);
- ret = -EINVAL;
- goto out;
+ err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
+ &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
+ priv);
+ if (err) {
+ DRM_ERROR("failed to initialize plane\n");
+ kfree(exynos_plane);
+ return NULL;
}
- plane = obj_to_plane(obj);
- exynos_plane = container_of(plane, struct exynos_plane, base);
-
- exynos_plane->overlay.zpos = zpos_req->zpos;
+ if (priv)
+ exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+ else
+ exynos_plane_attach_zpos_property(&exynos_plane->base);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
+ return &exynos_plane->base;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 16b71f8217e7..88312458580d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -9,6 +9,12 @@
*
*/
-int exynos_plane_init(struct drm_device *dev, unsigned int nr);
-int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+void exynos_plane_commit(struct drm_plane *plane);
+void exynos_plane_dpms(struct drm_plane *plane, int mode);
+struct drm_plane *exynos_plane_init(struct drm_device *dev,
+ unsigned int possible_crtcs, bool priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 7b9c153dceb6..bb1550c4dd57 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -85,8 +85,6 @@ static const char fake_edid_info[] = {
0x00, 0x00, 0x00, 0x06
};
-static void vidi_fake_vblank_handler(struct work_struct *work);
-
static bool vidi_display_is_connected(struct device *dev)
{
struct vidi_context *ctx = get_vidi_context(dev);
@@ -531,6 +529,16 @@ static int vidi_store_connection(struct device *dev,
if (ctx->connected > 1)
return -EINVAL;
+ /* use fake edid data for test. */
+ if (!ctx->raw_edid)
+ ctx->raw_edid = (struct edid *)fake_edid_info;
+
+ /* if raw_edid isn't same as fake data then it can't be tested. */
+ if (ctx->raw_edid != (struct edid *)fake_edid_info) {
+ DRM_DEBUG_KMS("edid data is not fake data.\n");
+ return -EINVAL;
+ }
+
DRM_DEBUG_KMS("requested connection.\n");
drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
@@ -549,6 +557,8 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct exynos_drm_manager *manager;
struct exynos_drm_display_ops *display_ops;
struct drm_exynos_vidi_connection *vidi = data;
+ struct edid *raw_edid;
+ int edid_len;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -557,11 +567,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
return -EINVAL;
}
- if (!vidi->edid) {
- DRM_DEBUG_KMS("edid data is null.\n");
- return -EINVAL;
- }
-
if (vidi->connection > 1) {
DRM_DEBUG_KMS("connection should be 0 or 1.\n");
return -EINVAL;
@@ -588,8 +593,30 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
return -EINVAL;
}
- if (vidi->connection)
- ctx->raw_edid = (struct edid *)vidi->edid;
+ if (vidi->connection) {
+ if (!vidi->edid) {
+ DRM_DEBUG_KMS("edid data is null.\n");
+ return -EINVAL;
+ }
+ raw_edid = (struct edid *)(uint32_t)vidi->edid;
+ edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
+ ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
+ if (!ctx->raw_edid) {
+ DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
+ return -ENOMEM;
+ }
+ memcpy(ctx->raw_edid, raw_edid, edid_len);
+ } else {
+ /*
+ * with connection = 0, free raw_edid
+ * only if raw edid data isn't same as fake data.
+ */
+ if (ctx->raw_edid && ctx->raw_edid !=
+ (struct edid *)fake_edid_info) {
+ kfree(ctx->raw_edid);
+ ctx->raw_edid = NULL;
+ }
+ }
ctx->connected = vidi->connection;
drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
@@ -614,9 +641,6 @@ static int __devinit vidi_probe(struct platform_device *pdev)
INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
- /* for test */
- ctx->raw_edid = (struct edid *)fake_edid_info;
-
subdrv = &ctx->subdrv;
subdrv->dev = dev;
subdrv->manager = &vidi_manager;
@@ -644,6 +668,11 @@ static int __devexit vidi_remove(struct platform_device *pdev)
exynos_drm_subdrv_unregister(&ctx->subdrv);
+ if (ctx->raw_edid != (struct edid *)fake_edid_info) {
+ kfree(ctx->raw_edid);
+ ctx->raw_edid = NULL;
+ }
+
kfree(ctx);
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a137e9e39a33..409e2ec1207c 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -63,7 +63,6 @@ struct hdmi_context {
bool dvi_mode;
struct mutex hdmi_mutex;
- struct resource *regs_res;
void __iomem *regs;
unsigned int external_irq;
unsigned int internal_irq;
@@ -1940,7 +1939,7 @@ static void hdmi_conf_apply(struct hdmi_context *hdata)
}
static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_display_mode *m;
@@ -2280,16 +2279,17 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
return -EINVAL;
}
- drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+ drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx),
+ GFP_KERNEL);
if (!drm_hdmi_ctx) {
DRM_ERROR("failed to allocate common hdmi context.\n");
return -ENOMEM;
}
- hdata = kzalloc(sizeof(struct hdmi_context), GFP_KERNEL);
+ hdata = devm_kzalloc(&pdev->dev, sizeof(struct hdmi_context),
+ GFP_KERNEL);
if (!hdata) {
DRM_ERROR("out of memory\n");
- kfree(drm_hdmi_ctx);
return -ENOMEM;
}
@@ -2318,26 +2318,18 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
goto err_resource;
}
- hdata->regs_res = request_mem_region(res->start, resource_size(res),
- dev_name(dev));
- if (!hdata->regs_res) {
- DRM_ERROR("failed to claim register region\n");
- ret = -ENOENT;
- goto err_resource;
- }
-
- hdata->regs = ioremap(res->start, resource_size(res));
+ hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!hdata->regs) {
DRM_ERROR("failed to map registers\n");
ret = -ENXIO;
- goto err_req_region;
+ goto err_resource;
}
/* DDC i2c driver */
if (i2c_add_driver(&ddc_driver)) {
DRM_ERROR("failed to register ddc i2c driver\n");
ret = -ENOENT;
- goto err_iomap;
+ goto err_resource;
}
hdata->ddc_port = hdmi_ddc;
@@ -2398,16 +2390,9 @@ err_hdmiphy:
i2c_del_driver(&hdmiphy_driver);
err_ddc:
i2c_del_driver(&ddc_driver);
-err_iomap:
- iounmap(hdata->regs);
-err_req_region:
- release_mem_region(hdata->regs_res->start,
- resource_size(hdata->regs_res));
err_resource:
hdmi_resources_cleanup(hdata);
err_data:
- kfree(hdata);
- kfree(drm_hdmi_ctx);
return ret;
}
@@ -2425,18 +2410,11 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
hdmi_resources_cleanup(hdata);
- iounmap(hdata->regs);
-
- release_mem_region(hdata->regs_res->start,
- resource_size(hdata->regs_res));
-
/* hdmiphy i2c driver */
i2c_del_driver(&hdmiphy_driver);
/* DDC i2c driver */
i2c_del_driver(&ddc_driver);
- kfree(hdata);
-
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e2147a2ddcec..30fcc12f81dd 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -956,7 +956,8 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
- mixer_res->mixer_regs = ioremap(res->start, resource_size(res));
+ mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (mixer_res->mixer_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
ret = -ENXIO;
@@ -967,38 +968,34 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
if (res == NULL) {
dev_err(dev, "get memory resource failed.\n");
ret = -ENXIO;
- goto fail_mixer_regs;
+ goto fail;
}
- mixer_res->vp_regs = ioremap(res->start, resource_size(res));
+ mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (mixer_res->vp_regs == NULL) {
dev_err(dev, "register mapping failed.\n");
ret = -ENXIO;
- goto fail_mixer_regs;
+ goto fail;
}
res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
if (res == NULL) {
dev_err(dev, "get interrupt resource failed.\n");
ret = -ENXIO;
- goto fail_vp_regs;
+ goto fail;
}
- ret = request_irq(res->start, mixer_irq_handler, 0, "drm_mixer", ctx);
+ ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
+ 0, "drm_mixer", ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
- goto fail_vp_regs;
+ goto fail;
}
mixer_res->irq = res->start;
return 0;
-fail_vp_regs:
- iounmap(mixer_res->vp_regs);
-
-fail_mixer_regs:
- iounmap(mixer_res->mixer_regs);
-
fail:
if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
clk_put(mixer_res->sclk_dac);
@@ -1013,16 +1010,6 @@ fail:
return ret;
}
-static void mixer_resources_cleanup(struct mixer_context *ctx)
-{
- struct mixer_resources *res = &ctx->mixer_res;
-
- free_irq(res->irq, ctx);
-
- iounmap(res->vp_regs);
- iounmap(res->mixer_regs);
-}
-
static int __devinit mixer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1032,16 +1019,16 @@ static int __devinit mixer_probe(struct platform_device *pdev)
dev_info(dev, "probe start\n");
- drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+ drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx),
+ GFP_KERNEL);
if (!drm_hdmi_ctx) {
DRM_ERROR("failed to allocate common hdmi context.\n");
return -ENOMEM;
}
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
DRM_ERROR("failed to alloc mixer context.\n");
- kfree(drm_hdmi_ctx);
return -ENOMEM;
}
@@ -1072,17 +1059,10 @@ fail:
static int mixer_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct exynos_drm_hdmi_context *drm_hdmi_ctx =
- platform_get_drvdata(pdev);
- struct mixer_context *ctx = drm_hdmi_ctx->ctx;
-
- dev_info(dev, "remove successful\n");
+ dev_info(&pdev->dev, "remove successful\n");
pm_runtime_disable(&pdev->dev);
- mixer_resources_cleanup(ctx);
-
return 0;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 187422018601..8c175345d85c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -82,7 +82,7 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
}
static bool cdv_intel_crt_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index c3e9a0f701df..a68509ba22a8 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -913,7 +913,7 @@ static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
}
static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 88b59d4a7b7f..a86f87b9ddde 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -90,7 +90,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
}
static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index ff5b58eb878c..c7f9468b74ba 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -270,7 +270,7 @@ static int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
}
static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 973d7f6d66b7..8d7caf0f363e 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -427,7 +427,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv,
*
* Returns 0 on success, nonzero on failure.
*/
-bool psb_intel_init_bios(struct drm_device *dev)
+int psb_intel_init_bios(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev->pdev;
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 0a738663eb5a..2e95523b84b1 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -431,7 +431,7 @@ struct bdb_driver_features {
u8 custom_vbt_version;
} __attribute__((packed));
-extern bool psb_intel_init_bios(struct drm_device *dev);
+extern int psb_intel_init_bios(struct drm_device *dev);
extern void psb_intel_destroy_bios(struct drm_device *dev);
/*
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index b34ff097b979..d4813e03f5ee 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -684,7 +684,7 @@ void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
}
bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct mdfld_dsi_encoder *dsi_encoder = mdfld_dsi_encoder(encoder);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
index 6f762478b959..2b40663e1696 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.h
@@ -65,7 +65,7 @@ extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
/* MDFLD DPI helper functions */
extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 3f3cd619c79f..dec6a9aea3c6 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -117,7 +117,7 @@ static void psb_intel_crtc_commit(struct drm_crtc *crtc)
}
static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index f821c835ca90..cdafd2acc72f 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -487,7 +487,7 @@ oaktrail_crtc_mode_set_exit:
}
static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index c10899c953b9..2eb3dc4e9c9b 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -191,7 +191,7 @@ static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
}
static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index a8858a907f47..0c4737438530 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -633,7 +633,6 @@ static struct drm_driver driver = {
.open = psb_driver_open,
.preclose = psb_driver_preclose,
.postclose = psb_driver_close,
- .reclaim_buffers = drm_core_reclaim_buffers,
.gem_init_object = psb_gem_init_object,
.gem_free_object = psb_gem_free_object,
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 36c3c99612f6..8033526bb53b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -543,7 +543,7 @@ void psb_intel_encoder_destroy(struct drm_encoder *encoder)
}
static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
@@ -1362,6 +1362,9 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
(struct drm_connector **) (psb_intel_crtc + 1);
psb_intel_crtc->mode_set.num_connectors = 0;
psb_intel_cursor_init(dev, psb_intel_crtc);
+
+ /* Set to true so that the pipe is forced off on initial config. */
+ psb_intel_crtc->active = true;
}
int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 2515f83248cb..ebe1a28f60e1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -268,7 +268,7 @@ extern struct drm_framebuffer *psb_intel_framebuffer_create(struct drm_device
*mode_cmd,
void *mm_private);
extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index c83f5b5d1057..37adc9edf974 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -375,7 +375,7 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector,
}
bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index d39b15be7649..0466c7b985f8 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -901,7 +901,7 @@ static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
static bool
psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct psb_intel_sdvo_dtd output_dtd;
@@ -918,7 +918,7 @@ psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdv
static bool
psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* Reset the input timing to the screen. Assume always input 0. */
@@ -942,7 +942,7 @@ psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
}
static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index d3f2e8785010..36d952280c50 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -88,7 +88,7 @@ static void ch7006_encoder_restore(struct drm_encoder *encoder)
}
static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
index c860f24a5afc..9b83574141a6 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -172,7 +172,7 @@ struct ch7006_mode ch7006_modes[] = {
};
struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
- struct drm_display_mode *drm_mode)
+ const struct drm_display_mode *drm_mode)
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
struct ch7006_mode *mode;
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
index 17667b7d57e7..09599f4c0c9a 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -111,7 +111,7 @@ extern struct ch7006_tv_norm_info ch7006_tv_norms[];
extern struct ch7006_mode ch7006_modes[];
struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
- struct drm_display_mode *drm_mode);
+ const struct drm_display_mode *drm_mode);
void ch7006_setup_levels(struct drm_encoder *encoder);
void ch7006_setup_subcarrier(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index b7d45ab4ba69..30b8ae5e5c4a 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -254,7 +254,7 @@ sil164_encoder_restore(struct drm_encoder *encoder)
static bool
sil164_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index fa9439159ebd..57d892eaaa6e 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -881,7 +881,7 @@ static int i810_flush_queue(struct drm_device *dev)
}
/* Must be called with the lock held */
-static void i810_reclaim_buffers(struct drm_device *dev,
+void i810_driver_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
@@ -1220,12 +1220,17 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
if (dev_priv->page_flipping)
i810_do_cleanup_pageflip(dev);
}
-}
-void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- i810_reclaim_buffers(dev, file_priv);
+ if (file_priv->master && file_priv->master->lock.hw_lock) {
+ drm_idlelock_take(&file_priv->master->lock);
+ i810_driver_reclaim_buffers(dev, file_priv);
+ drm_idlelock_release(&file_priv->master->lock);
+ } else {
+ /* master disappeared, clean up stuff anyway and hope nothing
+ * goes wrong */
+ i810_driver_reclaim_buffers(dev, file_priv);
+ }
+
}
int i810_driver_dma_quiescent(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index ec12f7dc717a..f9924ad04d09 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -57,13 +57,12 @@ static const struct file_operations i810_driver_fops = {
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
- DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
+ DRIVER_HAVE_DMA,
.dev_priv_size = sizeof(drm_i810_buf_priv_t),
.load = i810_driver_load,
.lastclose = i810_driver_lastclose,
.preclose = i810_driver_preclose,
.device_is_agp = i810_driver_device_is_agp,
- .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
.dma_quiescent = i810_driver_dma_quiescent,
.ioctls = i810_ioctls,
.fops = &i810_driver_fops,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index c9339f481795..6e0acad9e0f5 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -116,14 +116,12 @@ typedef struct drm_i810_private {
/* i810_dma.c */
extern int i810_driver_dma_quiescent(struct drm_device *dev);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
+void i810_driver_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *file_priv);
extern int i810_driver_load(struct drm_device *, unsigned long flags);
extern void i810_driver_lastclose(struct drm_device *dev);
extern void i810_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
extern int i810_driver_device_is_agp(struct drm_device *dev);
extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 2e9268da58d8..b0bacdba6d7e 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -7,6 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_debugfs.o \
i915_suspend.o \
i915_gem.o \
+ i915_gem_context.o \
i915_gem_debug.o \
i915_gem_evict.o \
i915_gem_execbuffer.o \
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 8c2ad014c47f..58914691a77b 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -86,7 +86,7 @@ struct intel_dvo_dev_ops {
* buses with clock limitations.
*/
bool (*mode_fixup)(struct intel_dvo_device *dvo,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/*
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5363e9c66c27..359f6e8b9b00 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -676,6 +676,7 @@ static void i915_ring_error_state(struct seq_file *m,
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
seq_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
if (INTEL_INFO(dev)->gen >= 6) {
+ seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
seq_printf(m, " SYNC_0: 0x%08x\n",
error->semaphore_mboxes[ring][0]);
@@ -713,6 +714,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, "IER: 0x%08x\n", error->ier);
seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ seq_printf(m, "CCID: 0x%08x\n", error->ccid);
for (i = 0; i < dev_priv->num_fence_regs; i++)
seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
@@ -1765,6 +1767,64 @@ static const struct file_operations i915_max_freq_fops = {
};
static ssize_t
+i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ char buf[80];
+ int len;
+
+ len = snprintf(buf, sizeof(buf),
+ "min freq: %d\n", dev_priv->min_delay * 50);
+
+ if (len > sizeof(buf))
+ len = sizeof(buf);
+
+ return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct drm_device *dev = filp->private_data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ char buf[20];
+ int val = 1;
+
+ if (cnt > 0) {
+ if (cnt > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = 0;
+
+ val = simple_strtoul(buf, NULL, 0);
+ }
+
+ DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
+
+ /*
+ * Turbo will still be enabled, but won't go below the set value.
+ */
+ dev_priv->min_delay = val / 50;
+
+ gen6_set_rps(dev, val / 50);
+
+ return cnt;
+}
+
+static const struct file_operations i915_min_freq_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i915_min_freq_read,
+ .write = i915_min_freq_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t
i915_cache_sharing_read(struct file *filp,
char __user *ubuf,
size_t max,
@@ -1997,6 +2057,12 @@ int i915_debugfs_init(struct drm_minor *minor)
return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
+ "i915_min_freq",
+ &i915_min_freq_fops);
+ if (ret)
+ return ret;
+
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_cache_sharing",
&i915_cache_sharing_fops);
if (ret)
@@ -2028,6 +2094,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
+ 1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 36822b924eb1..9cf7dfe022b9 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1006,6 +1006,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_ALIASING_PPGTT:
value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
break;
+ case I915_PARAM_HAS_WAIT_TIMEOUT:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -1082,8 +1085,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
- dev_priv->dri1.gfx_hws_cpu_addr = ioremap_wc(dev->agp->base + hws->addr,
- 4096);
+ dev_priv->dri1.gfx_hws_cpu_addr =
+ ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
@@ -1411,7 +1414,7 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return;
- ap->ranges[0].base = dev_priv->dev->agp->base;
+ ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr;
ap->ranges[0].size =
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
primary =
@@ -1467,11 +1470,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
+ ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
+ if (!ret) {
+ DRM_ERROR("failed to set up gmch\n");
+ ret = -EIO;
+ goto put_bridge;
+ }
+
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
- goto put_bridge;
+ goto put_gmch;
}
i915_kick_out_firmware_fb(dev_priv);
@@ -1498,19 +1508,22 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
- goto put_bridge;
+ goto put_gmch;
}
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev->agp->base, aperture_size);
+ io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
+ aperture_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
goto out_rmmap;
}
- i915_mtrr_setup(dev_priv, dev->agp->base, aperture_size);
+ i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
+ aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
@@ -1534,7 +1547,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(dev);
+
intel_irq_init(dev);
+ intel_gt_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
@@ -1567,7 +1584,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- spin_lock_init(&dev_priv->gt_lock);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
@@ -1586,8 +1602,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Start out suspended */
dev_priv->mm.suspended = 1;
- intel_detect_pch(dev);
-
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = i915_load_modeset_init(dev);
if (ret < 0) {
@@ -1622,13 +1636,16 @@ out_gem_unload:
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
if (dev_priv->mm.gtt_mtrr >= 0) {
- mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
- dev->agp->agp_info.aper_size * 1024 * 1024);
+ mtrr_del(dev_priv->mm.gtt_mtrr,
+ dev_priv->mm.gtt_base_addr,
+ aperture_size);
dev_priv->mm.gtt_mtrr = -1;
}
io_mapping_free(dev_priv->mm.gtt_mapping);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
+put_gmch:
+ intel_gmch_remove();
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -1660,8 +1677,9 @@ int i915_driver_unload(struct drm_device *dev)
io_mapping_free(dev_priv->mm.gtt_mapping);
if (dev_priv->mm.gtt_mtrr >= 0) {
- mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
- dev->agp->agp_info.aper_size * 1024 * 1024);
+ mtrr_del(dev_priv->mm.gtt_mtrr,
+ dev_priv->mm.gtt_base_addr,
+ dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
dev_priv->mm.gtt_mtrr = -1;
}
@@ -1702,6 +1720,7 @@ int i915_driver_unload(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
i915_gem_cleanup_stolen(dev);
@@ -1741,6 +1760,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
+ idr_init(&file_priv->context_idr);
+
return 0;
}
@@ -1760,7 +1781,13 @@ void i915_driver_lastclose(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
+ /* On gen6+ we refuse to init without kms enabled, but then the drm core
+ * goes right around and calls lastclose. Check for this and don't clean
+ * up anything. */
+ if (!dev_priv)
+ return;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fb_restore_mode(dev);
vga_switcheroo_process_delayed_switch();
return;
@@ -1773,6 +1800,7 @@ void i915_driver_lastclose(struct drm_device * dev)
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
+ i915_gem_context_close(dev, file_priv);
i915_gem_release(dev, file_priv);
}
@@ -1826,6 +1854,9 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9fe9ebe52a7a..a24ffbe97c01 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -32,6 +32,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/console.h>
@@ -215,7 +216,6 @@ static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
- .has_pch_split = 1,
};
static const struct intel_device_info intel_ironlake_m_info = {
@@ -223,7 +223,6 @@ static const struct intel_device_info intel_ironlake_m_info = {
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 1,
.has_bsd_ring = 1,
- .has_pch_split = 1,
};
static const struct intel_device_info intel_sandybridge_d_info = {
@@ -232,7 +231,6 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
.has_force_wake = 1,
};
@@ -243,7 +241,6 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
.has_force_wake = 1,
};
@@ -253,7 +250,6 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
.has_force_wake = 1,
};
@@ -264,7 +260,6 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
.has_force_wake = 1,
};
@@ -292,7 +287,6 @@ static const struct intel_device_info intel_haswell_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
.has_force_wake = 1,
};
@@ -302,7 +296,6 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
- .has_pch_split = 1,
.has_force_wake = 1,
};
@@ -353,11 +346,43 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
+ INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
+ INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
- INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */
+ INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
+ INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
+ INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
+ INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
+ INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
+ INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
+ INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
+ INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
+ INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
+ INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
+ INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
+ INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
+ INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
+ INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
+ INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
+ INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
+ INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
+ INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
+ INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
+ INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
+ INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
+ INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
+ INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
+ INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
+ INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
+ INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
+ INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
+ INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
+ INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
+ INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
{0, 0, 0}
};
@@ -429,135 +454,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
return 1;
}
-void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
- int count;
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
- udelay(10);
-
- I915_WRITE_NOTRACE(FORCEWAKE, 1);
- POSTING_READ(FORCEWAKE);
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
- udelay(10);
-}
-
-void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
-{
- int count;
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
- udelay(10);
-
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
- POSTING_READ(FORCEWAKE_MT);
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
- udelay(10);
-}
-
-/*
- * Generally this is called implicitly by the register read function. However,
- * if some sequence requires the GT to not power down then this function should
- * be called at the beginning of the sequence followed by a call to
- * gen6_gt_force_wake_put() at the end of the sequence.
- */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
- if (dev_priv->forcewake_count++ == 0)
- dev_priv->display.force_wake_get(dev_priv);
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
-
-static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
-{
- u32 gtfifodbg;
- gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
- if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
- "MMIO read or write has been dropped %x\n", gtfifodbg))
- I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
-}
-
-void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE, 0);
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-/*
- * see gen6_gt_force_wake_get()
- */
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
- if (--dev_priv->forcewake_count == 0)
- dev_priv->display.force_wake_put(dev_priv);
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
-
-int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
-{
- int ret = 0;
-
- if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
- int loop = 500;
- u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
- udelay(10);
- fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- }
- if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
- ++ret;
- dev_priv->gt_fifo_count = fifo;
- }
- dev_priv->gt_fifo_count--;
-
- return ret;
-}
-
-void vlv_force_wake_get(struct drm_i915_private *dev_priv)
-{
- int count;
-
- count = 0;
-
- /* Already awake? */
- if ((I915_READ(0x130094) & 0xa1) == 0xa1)
- return;
-
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
- POSTING_READ(FORCEWAKE_VLV);
-
- count = 0;
- while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0)
- udelay(10);
-}
-
-void vlv_force_wake_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
- /* FIXME: confirm VLV behavior with Punit folks */
- POSTING_READ(FORCEWAKE_VLV);
-}
-
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -637,7 +533,7 @@ static int i915_drm_thaw(struct drm_device *dev)
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ironlake_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex);
@@ -794,9 +690,9 @@ static int gen6_do_reset(struct drm_device *dev)
/* If reset with a user forcewake, try to restore, otherwise turn it off */
if (dev_priv->forcewake_count)
- dev_priv->display.force_wake_get(dev_priv);
+ dev_priv->gt.force_wake_get(dev_priv);
else
- dev_priv->display.force_wake_put(dev_priv);
+ dev_priv->gt.force_wake_put(dev_priv);
/* Restore fifo count */
dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
@@ -805,7 +701,7 @@ static int gen6_do_reset(struct drm_device *dev)
return ret;
}
-static int intel_gpu_reset(struct drm_device *dev)
+int intel_gpu_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = -ENODEV;
@@ -863,10 +759,7 @@ int i915_reset(struct drm_device *dev)
if (!i915_try_reset)
return 0;
- if (!mutex_trylock(&dev->struct_mutex))
- return -EBUSY;
-
- dev_priv->stop_rings = 0;
+ mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
@@ -909,12 +802,16 @@ int i915_reset(struct drm_device *dev)
for_each_ring(ring, dev_priv, i)
ring->init(ring);
+ i915_gem_context_init(dev);
i915_gem_init_ppgtt(dev);
- mutex_unlock(&dev->struct_mutex);
+ /*
+ * It would make sense to re-init all the other hw state, at
+ * least the rps/rc6/emon init done within modeset_init_hw. For
+ * some unknown reason, this blows up my ilk, so don't.
+ */
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- intel_modeset_init_hw(dev);
+ mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_irq_install(dev);
@@ -925,10 +822,12 @@ int i915_reset(struct drm_device *dev)
return 0;
}
-
static int __devinit
i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct intel_device_info *intel_info =
+ (struct intel_device_info *) ent->driver_data;
+
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
@@ -937,6 +836,18 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
+ /* We've managed to ship a kms-enabled ddx that shipped with an XvMC
+ * implementation for gen3 (and only gen3) that used legacy drm maps
+ * (gasp!) to share buffers between X and the client. Hence we need to
+ * keep around the fake agp stuff for gen3, even when kms is enabled. */
+ if (intel_info->gen != 3) {
+ driver.driver_features &=
+ ~(DRIVER_USE_AGP | DRIVER_REQUIRE_AGP);
+ } else if (!intel_agp_enabled) {
+ DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
+ return -ENODEV;
+ }
+
return drm_get_pci_dev(pdev, ent, &driver);
}
@@ -1058,7 +969,6 @@ static struct drm_driver driver = {
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
- .reclaim_buffers = drm_core_reclaim_buffers,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
@@ -1097,11 +1007,6 @@ static struct pci_driver i915_pci_driver = {
static int __init i915_init(void)
{
- if (!intel_agp_enabled) {
- DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
- return -ENODEV;
- }
-
driver.num_ioctls = i915_max_ioctl;
/*
@@ -1149,6 +1054,84 @@ MODULE_LICENSE("GPL and additional rights");
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
+static bool IS_DISPLAYREG(u32 reg)
+{
+ /*
+ * This should make it easier to transition modules over to the
+ * new register block scheme, since we can do it incrementally.
+ */
+ if (reg >= 0x180000)
+ return false;
+
+ if (reg >= RENDER_RING_BASE &&
+ reg < RENDER_RING_BASE + 0xff)
+ return false;
+ if (reg >= GEN6_BSD_RING_BASE &&
+ reg < GEN6_BSD_RING_BASE + 0xff)
+ return false;
+ if (reg >= BLT_RING_BASE &&
+ reg < BLT_RING_BASE + 0xff)
+ return false;
+
+ if (reg == PGTBL_ER)
+ return false;
+
+ if (reg >= IPEIR_I965 &&
+ reg < HWSTAM)
+ return false;
+
+ if (reg == MI_MODE)
+ return false;
+
+ if (reg == GFX_MODE_GEN7)
+ return false;
+
+ if (reg == RENDER_HWS_PGA_GEN7 ||
+ reg == BSD_HWS_PGA_GEN7 ||
+ reg == BLT_HWS_PGA_GEN7)
+ return false;
+
+ if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
+ reg == GEN6_BSD_RNCID)
+ return false;
+
+ if (reg == GEN6_BLITTER_ECOSKPD)
+ return false;
+
+ if (reg >= 0x4000c &&
+ reg <= 0x4002c)
+ return false;
+
+ if (reg >= 0x4f000 &&
+ reg <= 0x4f08f)
+ return false;
+
+ if (reg >= 0x4f100 &&
+ reg <= 0x4f11f)
+ return false;
+
+ if (reg >= VLV_MASTER_IER &&
+ reg <= GEN6_PMIER)
+ return false;
+
+ if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
+ reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
+ return false;
+
+ if (reg >= VLV_IIR_RW &&
+ reg <= VLV_ISR)
+ return false;
+
+ if (reg == FORCEWAKE_VLV ||
+ reg == FORCEWAKE_ACK_VLV)
+ return false;
+
+ if (reg == GEN6_GDRST)
+ return false;
+
+ return true;
+}
+
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
@@ -1156,11 +1139,13 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (dev_priv->forcewake_count == 0) \
- dev_priv->display.force_wake_get(dev_priv); \
+ dev_priv->gt.force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
if (dev_priv->forcewake_count == 0) \
- dev_priv->display.force_wake_put(dev_priv); \
+ dev_priv->gt.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
+ } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
+ val = read##y(dev_priv->regs + reg + 0x180000); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
@@ -1181,7 +1166,11 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- write##y(val, dev_priv->regs + reg); \
+ if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
+ write##y(val, dev_priv->regs + reg + 0x180000); \
+ } else { \
+ write##y(val, dev_priv->regs + reg); \
+ } \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b0b676abde0d..627fe35781b4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,6 +79,10 @@ enum port {
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ if ((intel_encoder)->base.crtc == (__crtc))
+
struct intel_pch_pll {
int refcount; /* count of number of CRTCs sharing this PLL */
int active; /* count of number of active CRTCs (i.e. DPMS on) */
@@ -176,6 +180,7 @@ struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
u32 ier;
+ u32 ccid;
bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
@@ -185,6 +190,7 @@ struct drm_i915_error_state {
u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
+ u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
/* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS];
u32 cpu_ring_tail[I915_NUM_RINGS];
@@ -261,8 +267,6 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
- void (*force_wake_get)(struct drm_i915_private *dev_priv);
- void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -270,6 +274,11 @@ struct drm_i915_display_funcs {
/* pll clock increase/decrease */
};
+struct drm_i915_gt_funcs {
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
+};
+
struct intel_device_info {
u8 gen;
u8 is_mobile:1;
@@ -284,7 +293,6 @@ struct intel_device_info {
u8 is_crestline:1;
u8 is_ivybridge:1;
u8 is_valleyview:1;
- u8 has_pch_split:1;
u8 has_force_wake:1;
u8 is_haswell:1;
u8 has_fbc:1;
@@ -309,6 +317,17 @@ struct i915_hw_ppgtt {
dma_addr_t scratch_page_dma_addr;
};
+
+/* This must match up with the value previously used for execbuf2.rsvd1. */
+#define DEFAULT_CONTEXT_ID 0
+struct i915_hw_context {
+ int id;
+ bool is_initialized;
+ struct drm_i915_file_private *file_priv;
+ struct intel_ring_buffer *ring;
+ struct drm_i915_gem_object *obj;
+};
+
enum no_fbc_reason {
FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
@@ -321,6 +340,7 @@ enum no_fbc_reason {
};
enum intel_pch {
+ PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
@@ -350,6 +370,8 @@ typedef struct drm_i915_private {
int relative_constants_mode;
void __iomem *regs;
+
+ struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
@@ -652,11 +674,14 @@ typedef struct drm_i915_private {
unsigned long gtt_end;
struct io_mapping *gtt_mapping;
+ phys_addr_t gtt_base_addr;
int gtt_mtrr;
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
+ u32 *l3_remap_info;
+
struct shrinker inactive_shrinker;
/**
@@ -817,6 +842,10 @@ typedef struct drm_i915_private {
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
+
+ struct work_struct parity_error_work;
+ bool hw_contexts_disabled;
+ uint32_t hw_context_size;
} drm_i915_private_t;
/* Iterate over initialised rings */
@@ -1026,6 +1055,7 @@ struct drm_i915_file_private {
struct spinlock lock;
struct list_head request_list;
} mm;
+ struct idr context_idr;
};
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
@@ -1071,7 +1101,8 @@ struct drm_i915_file_private {
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6)
+#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
@@ -1094,13 +1125,13 @@ struct drm_i915_file_private {
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define HAS_PCH_SPLIT(dev) (INTEL_INFO(dev)->has_pch_split)
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
@@ -1166,6 +1197,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4);
+extern int intel_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
@@ -1178,6 +1210,7 @@ void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
+extern void intel_gt_init(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref);
@@ -1237,6 +1270,8 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
@@ -1306,6 +1341,8 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
+int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+ bool interruptible);
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
@@ -1315,6 +1352,7 @@ int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
+void i915_gem_l3_remap(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
@@ -1323,8 +1361,8 @@ int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
-int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno);
+int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
+ uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1358,6 +1396,16 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
+/* i915_gem_context.c */
+void i915_gem_context_init(struct drm_device *dev);
+void i915_gem_context_fini(struct drm_device *dev);
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
+int i915_switch_context(struct intel_ring_buffer *ring,
+ struct drm_file *file, int to_id);
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* i915_gem_gtt.c */
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
@@ -1475,20 +1523,12 @@ extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void ironlake_init_pch_refclk(struct drm_device *dev);
-extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
-extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
-
-extern void vlv_force_wake_get(struct drm_i915_private *dev_priv);
-extern void vlv_force_wake_put(struct drm_i915_private *dev_priv);
/* overlay */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 288d7b8f49ae..489e2b162b27 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -96,9 +96,18 @@ i915_gem_wait_for_error(struct drm_device *dev)
if (!atomic_read(&dev_priv->mm.wedged))
return 0;
- ret = wait_for_completion_interruptible(x);
- if (ret)
+ /*
+ * Only wait 10 seconds for the gpu reset to complete to avoid hanging
+ * userspace. If it takes that long something really bad is going on and
+ * we should simply try to bail out and fail as gracefully as possible.
+ */
+ ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
+ if (ret == 0) {
+ DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
+ return -EIO;
+ } else if (ret < 0) {
return ret;
+ }
if (atomic_read(&dev_priv->mm.wedged)) {
/* GPU is hung, bump the completion count to account for
@@ -1122,7 +1131,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
- pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
+ pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
@@ -1132,6 +1141,11 @@ unlock:
out:
switch (ret) {
case -EIO:
+ /* If this -EIO is due to a gpu hang, give the reset code a
+ * chance to clean up the mess. Otherwise return the proper
+ * SIGBUS. */
+ if (!atomic_read(&dev_priv->mm.wedged))
+ return VM_FAULT_SIGBUS;
case -EAGAIN:
/* Give the error handler a chance to run and move the
* objects off the GPU active list. Next time we service the
@@ -1568,6 +1582,21 @@ i915_add_request(struct intel_ring_buffer *ring,
int was_empty;
int ret;
+ /*
+ * Emit any outstanding flushes - execbuf can fail to emit the flush
+ * after having emitted the batchbuffer command. Hence we need to fix
+ * things up similar to emitting the lazy request. The difference here
+ * is that the flush _must_ happen before the next request, no matter
+ * what.
+ */
+ if (ring->gpu_caches_dirty) {
+ ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ ring->gpu_caches_dirty = false;
+ }
+
BUG_ON(request == NULL);
seqno = i915_gem_next_request_seqno(ring);
@@ -1613,6 +1642,9 @@ i915_add_request(struct intel_ring_buffer *ring,
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
}
+
+ WARN_ON(!list_empty(&ring->gpu_write_list));
+
return 0;
}
@@ -1827,14 +1859,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/
idle = true;
for_each_ring(ring, dev_priv, i) {
- if (!list_empty(&ring->gpu_write_list)) {
+ if (ring->gpu_caches_dirty) {
struct drm_i915_gem_request *request;
- int ret;
- ret = i915_gem_flush_ring(ring,
- 0, I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (ret || request == NULL ||
+ if (request == NULL ||
i915_add_request(ring, NULL, request))
kfree(request);
}
@@ -1848,11 +1877,10 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-static int
-i915_gem_check_wedge(struct drm_i915_private *dev_priv)
+int
+i915_gem_check_wedge(struct drm_i915_private *dev_priv,
+ bool interruptible)
{
- BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
-
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
bool recovery_complete;
@@ -1863,7 +1891,16 @@ i915_gem_check_wedge(struct drm_i915_private *dev_priv)
recovery_complete = x->done > 0;
spin_unlock_irqrestore(&x->wait.lock, flags);
- return recovery_complete ? -EIO : -EAGAIN;
+ /* Non-interruptible callers can't handle -EAGAIN, hence return
+ * -EIO unconditionally for these. */
+ if (!interruptible)
+ return -EIO;
+
+ /* Recovery complete, but still wedged means reset failure. */
+ if (recovery_complete)
+ return -EIO;
+
+ return -EAGAIN;
}
return 0;
@@ -1899,34 +1936,85 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
return ret;
}
+/**
+ * __wait_seqno - wait until execution of seqno has finished
+ * @ring: the ring expected to report seqno
+ * @seqno: duh!
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
- bool interruptible)
+ bool interruptible, struct timespec *timeout)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
- int ret = 0;
+ struct timespec before, now, wait_time={1,0};
+ unsigned long timeout_jiffies;
+ long end;
+ bool wait_forever = true;
+ int ret;
if (i915_seqno_passed(ring->get_seqno(ring), seqno))
return 0;
trace_i915_gem_request_wait_begin(ring, seqno);
+
+ if (timeout != NULL) {
+ wait_time = *timeout;
+ wait_forever = false;
+ }
+
+ timeout_jiffies = timespec_to_jiffies(&wait_time);
+
if (WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
+ /* Record current time in case interrupted by signal, or wedged * */
+ getrawmonotonic(&before);
+
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring), seqno) || \
atomic_read(&dev_priv->mm.wedged))
+ do {
+ if (interruptible)
+ end = wait_event_interruptible_timeout(ring->irq_queue,
+ EXIT_COND,
+ timeout_jiffies);
+ else
+ end = wait_event_timeout(ring->irq_queue, EXIT_COND,
+ timeout_jiffies);
- if (interruptible)
- ret = wait_event_interruptible(ring->irq_queue,
- EXIT_COND);
- else
- wait_event(ring->irq_queue, EXIT_COND);
+ ret = i915_gem_check_wedge(dev_priv, interruptible);
+ if (ret)
+ end = ret;
+ } while (end == 0 && wait_forever);
+
+ getrawmonotonic(&now);
ring->irq_put(ring);
trace_i915_gem_request_wait_end(ring, seqno);
#undef EXIT_COND
- return ret;
+ if (timeout) {
+ struct timespec sleep_time = timespec_sub(now, before);
+ *timeout = timespec_sub(*timeout, sleep_time);
+ }
+
+ switch (end) {
+ case -EIO:
+ case -EAGAIN: /* Wedged */
+ case -ERESTARTSYS: /* Signal */
+ return (int)end;
+ case 0: /* Timeout */
+ if (timeout)
+ set_normalized_timespec(timeout, 0, 0);
+ return -ETIME;
+ default: /* Completed */
+ WARN_ON(end < 0); /* We're not aware of other errors */
+ return 0;
+ }
}
/**
@@ -1934,15 +2022,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
* request and object lists appropriately for that event.
*/
int
-i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno)
+i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
int ret = 0;
BUG_ON(seqno == 0);
- ret = i915_gem_check_wedge(dev_priv);
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
if (ret)
return ret;
@@ -1950,9 +2037,7 @@ i915_wait_request(struct intel_ring_buffer *ring,
if (ret)
return ret;
- ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible);
- if (atomic_read(&dev_priv->mm.wedged))
- ret = -EAGAIN;
+ ret = __wait_seqno(ring, seqno, dev_priv->mm.interruptible, NULL);
return ret;
}
@@ -1975,7 +2060,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+ ret = i915_wait_seqno(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
i915_gem_retire_requests_ring(obj->ring);
@@ -1985,6 +2070,115 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
}
/**
+ * Ensures that an object will eventually get non-busy by flushing any required
+ * write domains, emitting any outstanding lazy request and retiring and
+ * completed requests.
+ */
+static int
+i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->active) {
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_check_olr(obj->ring,
+ obj->last_rendering_seqno);
+ if (ret)
+ return ret;
+ i915_gem_retire_requests_ring(obj->ring);
+ }
+
+ return 0;
+}
+
+/**
+ * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Returns 0 if successful, else an error is returned with the remaining time in
+ * the timeout parameter.
+ * -ETIME: object is still busy after timeout
+ * -ERESTARTSYS: signal interrupted the wait
+ * -ENONENT: object doesn't exist
+ * Also possible, but rare:
+ * -EAGAIN: GPU wedged
+ * -ENOMEM: damn
+ * -ENODEV: Internal IRQ fail
+ * -E?: The add request failed
+ *
+ * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
+ * non-zero timeout parameter the wait ioctl will wait for the given number of
+ * nanoseconds on an object becoming unbusy. Since the wait itself does so
+ * without holding struct_mutex the object may become re-busied before this
+ * function completes. A similar but shorter * race condition exists in the busy
+ * ioctl
+ */
+int
+i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_i915_gem_wait *args = data;
+ struct drm_i915_gem_object *obj;
+ struct intel_ring_buffer *ring = NULL;
+ struct timespec timeout_stack, *timeout = NULL;
+ u32 seqno = 0;
+ int ret = 0;
+
+ if (args->timeout_ns >= 0) {
+ timeout_stack = ns_to_timespec(args->timeout_ns);
+ timeout = &timeout_stack;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
+ if (&obj->base == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOENT;
+ }
+
+ /* Need to make sure the object gets inactive eventually. */
+ ret = i915_gem_object_flush_active(obj);
+ if (ret)
+ goto out;
+
+ if (obj->active) {
+ seqno = obj->last_rendering_seqno;
+ ring = obj->ring;
+ }
+
+ if (seqno == 0)
+ goto out;
+
+ /* Do this after OLR check to make sure we make forward progress polling
+ * on this IOCTL with a 0 timeout (like busy ioctl)
+ */
+ if (!args->timeout_ns) {
+ ret = -ETIME;
+ goto out;
+ }
+
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+ ret = __wait_seqno(ring, seqno, true, timeout);
+ if (timeout) {
+ WARN_ON(!timespec_valid(timeout));
+ args->timeout_ns = timespec_to_ns(timeout);
+ }
+ return ret;
+
+out:
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/**
* i915_gem_object_sync - sync an object to a ring.
*
* @obj: object which may be in use on another ring.
@@ -2160,7 +2354,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
return ret;
}
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
+ return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}
int i915_gpu_idle(struct drm_device *dev)
@@ -2171,6 +2365,10 @@ int i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) {
+ ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
+ if (ret)
+ return ret;
+
ret = i915_ring_idle(ring);
if (ret)
return ret;
@@ -2364,7 +2562,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
}
if (obj->last_fenced_seqno) {
- ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
+ ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
if (ret)
return ret;
@@ -2551,8 +2749,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (map_and_fenceable)
free_space =
drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
- size, alignment, 0,
- dev_priv->mm.gtt_mappable_end,
+ size, alignment,
+ 0, dev_priv->mm.gtt_mappable_end,
0);
else
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
@@ -2563,7 +2761,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->gtt_space =
drm_mm_get_block_range_generic(free_space,
size, alignment, 0,
- dev_priv->mm.gtt_mappable_end,
+ 0, dev_priv->mm.gtt_mappable_end,
0);
else
obj->gtt_space =
@@ -3030,7 +3228,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (seqno == 0)
return 0;
- ret = __wait_seqno(ring, seqno, true);
+ ret = __wait_seqno(ring, seqno, true, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -3199,30 +3397,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* become non-busy without any further actions, therefore emit any
* necessary flushes here.
*/
- args->busy = obj->active;
- if (args->busy) {
- /* Unconditionally flush objects, even when the gpu still uses this
- * object. Userspace calling this function indicates that it wants to
- * use this buffer rather sooner than later, so issuing the required
- * flush earlier is beneficial.
- */
- if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->ring,
- 0, obj->base.write_domain);
- } else {
- ret = i915_gem_check_olr(obj->ring,
- obj->last_rendering_seqno);
- }
+ ret = i915_gem_object_flush_active(obj);
- /* Update the active list for the hardware's current position.
- * Otherwise this only updates on a delayed timer or when irqs
- * are actually unmasked, and our working set ends up being
- * larger than required.
- */
- i915_gem_retire_requests_ring(obj->ring);
-
- args->busy = obj->active;
- }
+ args->busy = obj->active;
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3435,6 +3612,38 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
+void i915_gem_l3_remap(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 misccpctl;
+ int i;
+
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ if (!dev_priv->mm.l3_remap_info)
+ return;
+
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ POSTING_READ(GEN7_MISCCPCTL);
+
+ for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
+ u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
+ if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
+ DRM_DEBUG("0x%x was already programmed to %x\n",
+ GEN7_L3LOG_BASE + i, remap);
+ if (remap && !dev_priv->mm.l3_remap_info[i/4])
+ DRM_DEBUG_DRIVER("Clearing remapped register\n");
+ I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
+ }
+
+ /* Make sure all the writes land before disabling dop clock gating */
+ POSTING_READ(GEN7_L3LOG_BASE);
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+}
+
void i915_gem_init_swizzling(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -3518,12 +3727,33 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
}
}
+static bool
+intel_enable_blt(struct drm_device *dev)
+{
+ if (!HAS_BLT(dev))
+ return false;
+
+ /* The blitter was dysfunctional on early prototypes */
+ if (IS_GEN6(dev) && dev->pdev->revision < 8) {
+ DRM_INFO("BLT not supported on this pre-production hardware;"
+ " graphics performance will be degraded.\n");
+ return false;
+ }
+
+ return true;
+}
+
int
i915_gem_init_hw(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
+ if (!intel_enable_gtt())
+ return -EIO;
+
+ i915_gem_l3_remap(dev);
+
i915_gem_init_swizzling(dev);
ret = intel_init_render_ring_buffer(dev);
@@ -3536,7 +3766,7 @@ i915_gem_init_hw(struct drm_device *dev)
goto cleanup_render_ring;
}
- if (HAS_BLT(dev)) {
+ if (intel_enable_blt(dev)) {
ret = intel_init_blt_ring_buffer(dev);
if (ret)
goto cleanup_bsd_ring;
@@ -3544,6 +3774,11 @@ i915_gem_init_hw(struct drm_device *dev)
dev_priv->next_seqno = 1;
+ /*
+ * XXX: There was some w/a described somewhere suggesting loading
+ * contexts before PPGTT.
+ */
+ i915_gem_context_init(dev);
i915_gem_init_ppgtt(dev);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
new file mode 100644
index 000000000000..a9d58d72bb4d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -0,0 +1,535 @@
+/*
+ * Copyright © 2011-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ben Widawsky <ben@bwidawsk.net>
+ *
+ */
+
+/*
+ * This file implements HW context support. On gen5+ a HW context consists of an
+ * opaque GPU object which is referenced at times of context saves and restores.
+ * With RC6 enabled, the context is also referenced as the GPU enters and exists
+ * from RC6 (GPU has it's own internal power context, except on gen5). Though
+ * something like a context does exist for the media ring, the code only
+ * supports contexts for the render ring.
+ *
+ * In software, there is a distinction between contexts created by the user,
+ * and the default HW context. The default HW context is used by GPU clients
+ * that do not request setup of their own hardware context. The default
+ * context's state is never restored to help prevent programming errors. This
+ * would happen if a client ran and piggy-backed off another clients GPU state.
+ * The default context only exists to give the GPU some offset to load as the
+ * current to invoke a save of the context we actually care about. In fact, the
+ * code could likely be constructed, albeit in a more complicated fashion, to
+ * never use the default context, though that limits the driver's ability to
+ * swap out, and/or destroy other contexts.
+ *
+ * All other contexts are created as a request by the GPU client. These contexts
+ * store GPU state, and thus allow GPU clients to not re-emit state (and
+ * potentially query certain state) at any time. The kernel driver makes
+ * certain that the appropriate commands are inserted.
+ *
+ * The context life cycle is semi-complicated in that context BOs may live
+ * longer than the context itself because of the way the hardware, and object
+ * tracking works. Below is a very crude representation of the state machine
+ * describing the context life.
+ * refcount pincount active
+ * S0: initial state 0 0 0
+ * S1: context created 1 0 0
+ * S2: context is currently running 2 1 X
+ * S3: GPU referenced, but not current 2 0 1
+ * S4: context is current, but destroyed 1 1 0
+ * S5: like S3, but destroyed 1 0 1
+ *
+ * The most common (but not all) transitions:
+ * S0->S1: client creates a context
+ * S1->S2: client submits execbuf with context
+ * S2->S3: other clients submits execbuf with context
+ * S3->S1: context object was retired
+ * S3->S2: clients submits another execbuf
+ * S2->S4: context destroy called with current context
+ * S3->S5->S0: destroy path
+ * S4->S5->S0: destroy path on current context
+ *
+ * There are two confusing terms used above:
+ * The "current context" means the context which is currently running on the
+ * GPU. The GPU has loaded it's state already and has stored away the gtt
+ * offset of the BO. The GPU is not actively referencing the data at this
+ * offset, but it will on the next context switch. The only way to avoid this
+ * is to do a GPU reset.
+ *
+ * An "active context' is one which was previously the "current context" and is
+ * on the active list waiting for the next context switch to occur. Until this
+ * happens, the object must remain at the same gtt offset. It is therefore
+ * possible to destroy a context, but it is still active.
+ *
+ */
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/* This is a HW constraint. The value below is the largest known requirement
+ * I've seen in a spec to date, and that was a workaround for a non-shipping
+ * part. It should be safe to decrease this, but it's more future proof as is.
+ */
+#define CONTEXT_ALIGN (64<<10)
+
+static struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
+static int do_switch(struct drm_i915_gem_object *from_obj,
+ struct i915_hw_context *to, u32 seqno);
+
+static int get_context_size(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ u32 reg;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ reg = I915_READ(CXT_SIZE);
+ ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
+ break;
+ case 7:
+ reg = I915_READ(GEN7_CXT_SIZE);
+ ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
+ break;
+ default:
+ BUG();
+ }
+
+ return ret;
+}
+
+static void do_destroy(struct i915_hw_context *ctx)
+{
+ struct drm_device *dev = ctx->obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (ctx->file_priv)
+ idr_remove(&ctx->file_priv->context_idr, ctx->id);
+ else
+ BUG_ON(ctx != dev_priv->ring[RCS].default_context);
+
+ drm_gem_object_unreference(&ctx->obj->base);
+ kfree(ctx);
+}
+
+static struct i915_hw_context *
+create_hw_context(struct drm_device *dev,
+ struct drm_i915_file_private *file_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_context *ctx;
+ int ret, id;
+
+ ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
+ if (ctx == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+ if (ctx->obj == NULL) {
+ kfree(ctx);
+ DRM_DEBUG_DRIVER("Context object allocated failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* The ring associated with the context object is handled by the normal
+ * object tracking code. We give an initial ring value simple to pass an
+ * assertion in the context switch code.
+ */
+ ctx->ring = &dev_priv->ring[RCS];
+
+ /* Default context will never have a file_priv */
+ if (file_priv == NULL)
+ return ctx;
+
+ ctx->file_priv = file_priv;
+
+again:
+ if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
+ ret = -ENOMEM;
+ DRM_DEBUG_DRIVER("idr allocation failed\n");
+ goto err_out;
+ }
+
+ ret = idr_get_new_above(&file_priv->context_idr, ctx,
+ DEFAULT_CONTEXT_ID + 1, &id);
+ if (ret == 0)
+ ctx->id = id;
+
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ goto err_out;
+
+ return ctx;
+
+err_out:
+ do_destroy(ctx);
+ return ERR_PTR(ret);
+}
+
+static inline bool is_default_context(struct i915_hw_context *ctx)
+{
+ return (ctx == ctx->ring->default_context);
+}
+
+/**
+ * The default context needs to exist per ring that uses contexts. It stores the
+ * context state of the GPU for applications that don't utilize HW contexts, as
+ * well as an idle case.
+ */
+static int create_default_context(struct drm_i915_private *dev_priv)
+{
+ struct i915_hw_context *ctx;
+ int ret;
+
+ BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+
+ ctx = create_hw_context(dev_priv->dev, NULL);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ /* We may need to do things with the shrinker which require us to
+ * immediately switch back to the default context. This can cause a
+ * problem as pinning the default context also requires GTT space which
+ * may not be available. To avoid this we always pin the
+ * default context.
+ */
+ dev_priv->ring[RCS].default_context = ctx;
+ ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false);
+ if (ret) {
+ do_destroy(ctx);
+ return ret;
+ }
+
+ ret = do_switch(NULL, ctx, 0);
+ if (ret) {
+ i915_gem_object_unpin(ctx->obj);
+ do_destroy(ctx);
+ } else {
+ DRM_DEBUG_DRIVER("Default HW context loaded\n");
+ }
+
+ return ret;
+}
+
+void i915_gem_context_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ctx_size;
+
+ if (!HAS_HW_CONTEXTS(dev)) {
+ dev_priv->hw_contexts_disabled = true;
+ return;
+ }
+
+ /* If called from reset, or thaw... we've been here already */
+ if (dev_priv->hw_contexts_disabled ||
+ dev_priv->ring[RCS].default_context)
+ return;
+
+ ctx_size = get_context_size(dev);
+ dev_priv->hw_context_size = get_context_size(dev);
+ dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);
+
+ if (ctx_size <= 0 || ctx_size > (1<<20)) {
+ dev_priv->hw_contexts_disabled = true;
+ return;
+ }
+
+ if (create_default_context(dev_priv)) {
+ dev_priv->hw_contexts_disabled = true;
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("HW context support initialized\n");
+}
+
+void i915_gem_context_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->hw_contexts_disabled)
+ return;
+
+ /* The only known way to stop the gpu from accessing the hw context is
+ * to reset it. Do this as the very last operation to avoid confusing
+ * other code, leading to spurious errors. */
+ intel_gpu_reset(dev);
+
+ i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
+
+ do_destroy(dev_priv->ring[RCS].default_context);
+}
+
+static int context_idr_cleanup(int id, void *p, void *data)
+{
+ struct i915_hw_context *ctx = p;
+
+ BUG_ON(id == DEFAULT_CONTEXT_ID);
+
+ do_destroy(ctx);
+
+ return 0;
+}
+
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ mutex_lock(&dev->struct_mutex);
+ idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
+ idr_destroy(&file_priv->context_idr);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static struct i915_hw_context *
+i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
+{
+ return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
+}
+
+static inline int
+mi_set_context(struct intel_ring_buffer *ring,
+ struct i915_hw_context *new_context,
+ u32 hw_flags)
+{
+ int ret;
+
+ /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
+ * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
+ * explicitly, so we rely on the value at ring init, stored in
+ * itlb_before_ctx_switch.
+ */
+ if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
+ ret = ring->flush(ring, 0, 0);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ if (IS_GEN7(ring->dev))
+ intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+ else
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring, new_context->obj->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ hw_flags);
+ /* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
+ intel_ring_emit(ring, MI_NOOP);
+
+ if (IS_GEN7(ring->dev))
+ intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+ else
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+
+ return ret;
+}
+
+static int do_switch(struct drm_i915_gem_object *from_obj,
+ struct i915_hw_context *to,
+ u32 seqno)
+{
+ struct intel_ring_buffer *ring = NULL;
+ u32 hw_flags = 0;
+ int ret;
+
+ BUG_ON(to == NULL);
+ BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
+
+ ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false);
+ if (ret)
+ return ret;
+
+ /* Clear this page out of any CPU caches for coherent swap-in/out. Note
+ * that thanks to write = false in this call and us not setting any gpu
+ * write domains when putting a context object onto the active list
+ * (when switching away from it), this won't block.
+ * XXX: We need a real interface to do this instead of trickery. */
+ ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+ if (ret) {
+ i915_gem_object_unpin(to->obj);
+ return ret;
+ }
+
+ if (!to->obj->has_global_gtt_mapping)
+ i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+
+ if (!to->is_initialized || is_default_context(to))
+ hw_flags |= MI_RESTORE_INHIBIT;
+ else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
+ hw_flags |= MI_FORCE_RESTORE;
+
+ ring = to->ring;
+ ret = mi_set_context(ring, to, hw_flags);
+ if (ret) {
+ i915_gem_object_unpin(to->obj);
+ return ret;
+ }
+
+ /* The backing object for the context is done after switching to the
+ * *next* context. Therefore we cannot retire the previous context until
+ * the next context has already started running. In fact, the below code
+ * is a bit suboptimal because the retiring can occur simply after the
+ * MI_SET_CONTEXT instead of when the next seqno has completed.
+ */
+ if (from_obj != NULL) {
+ from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_gem_object_move_to_active(from_obj, ring, seqno);
+ /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
+ * whole damn pipeline, we don't need to explicitly mark the
+ * object dirty. The only exception is that the context must be
+ * correct in case the object gets swapped out. Ideally we'd be
+ * able to defer doing this until we know the object would be
+ * swapped, but there is no way to do that yet.
+ */
+ from_obj->dirty = 1;
+ BUG_ON(from_obj->ring != to->ring);
+ i915_gem_object_unpin(from_obj);
+
+ drm_gem_object_unreference(&from_obj->base);
+ }
+
+ drm_gem_object_reference(&to->obj->base);
+ ring->last_context_obj = to->obj;
+ to->is_initialized = true;
+
+ return 0;
+}
+
+/**
+ * i915_switch_context() - perform a GPU context switch.
+ * @ring: ring for which we'll execute the context switch
+ * @file_priv: file_priv associated with the context, may be NULL
+ * @id: context id number
+ * @seqno: sequence number by which the new context will be switched to
+ * @flags:
+ *
+ * The context life cycle is simple. The context refcount is incremented and
+ * decremented by 1 and create and destroy. If the context is in use by the GPU,
+ * it will have a refoucnt > 1. This allows us to destroy the context abstract
+ * object while letting the normal object tracking destroy the backing BO.
+ */
+int i915_switch_context(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ int to_id)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_file_private *file_priv = NULL;
+ struct i915_hw_context *to;
+ struct drm_i915_gem_object *from_obj = ring->last_context_obj;
+
+ if (dev_priv->hw_contexts_disabled)
+ return 0;
+
+ if (ring != &dev_priv->ring[RCS])
+ return 0;
+
+ if (file)
+ file_priv = file->driver_priv;
+
+ if (to_id == DEFAULT_CONTEXT_ID) {
+ to = ring->default_context;
+ } else {
+ to = i915_gem_context_get(file_priv, to_id);
+ if (to == NULL)
+ return -ENOENT;
+ }
+
+ if (from_obj == to->obj)
+ return 0;
+
+ return do_switch(from_obj, to, i915_gem_next_request_seqno(to->ring));
+}
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_context_create *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_hw_context *ctx;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ if (dev_priv->hw_contexts_disabled)
+ return -ENODEV;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = create_hw_context(dev, file_priv);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ args->ctx_id = ctx->id;
+ DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
+
+ return 0;
+}
+
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_context_destroy *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_hw_context *ctx;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ctx = i915_gem_context_get(file_priv, args->ctx_id);
+ if (!ctx) {
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOENT;
+ }
+
+ do_destroy(ctx);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index a4f6aaabca99..bddf7bed183f 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -132,7 +132,8 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
__func__, obj, obj->gtt_offset, handle,
obj->size / 1024);
- gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
+ gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
+ obj->base.size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ae7c24e12e52..eba0308f10e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -78,11 +78,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list);
if (mappable)
- drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
- alignment, 0,
- dev_priv->mm.gtt_mappable_end);
+ drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0,
+ 0, dev_priv->mm.gtt_mappable_end);
else
- drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+ drm_mm_init_scan(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 974a9f1068a3..ff2819ea0813 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -291,6 +291,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_i915_obj = to_intel_bo(target_obj);
target_offset = target_i915_obj->gtt_offset;
+ /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
+ * pipe_control writes because the gpu doesn't properly redirect them
+ * through the ppgtt for non_secure batchbuffers. */
+ if (unlikely(IS_GEN6(dev) &&
+ reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
+ !target_i915_obj->has_global_gtt_mapping)) {
+ i915_gem_gtt_bind_object(target_i915_obj,
+ target_i915_obj->cache_level);
+ }
+
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
@@ -399,16 +409,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
io_mapping_unmap_atomic(reloc_page);
}
- /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
- * pipe_control writes because the gpu doesn't properly redirect them
- * through the ppgtt for non_secure batchbuffers. */
- if (unlikely(IS_GEN6(dev) &&
- reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- !target_i915_obj->has_global_gtt_mapping)) {
- i915_gem_gtt_bind_object(target_i915_obj,
- target_i915_obj->cache_level);
- }
-
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
@@ -810,33 +810,16 @@ err:
return ret;
}
-static int
+static void
i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains,
- uint32_t flush_domains,
- uint32_t flush_rings)
+ uint32_t flush_domains)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int i, ret;
-
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
-
- if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
- for (i = 0; i < I915_NUM_RINGS; i++)
- if (flush_rings & (1 << i)) {
- ret = i915_gem_flush_ring(&dev_priv->ring[i],
- invalidate_domains,
- flush_domains);
- if (ret)
- return ret;
- }
- }
-
- return 0;
}
static int
@@ -885,12 +868,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) {
- ret = i915_gem_execbuffer_flush(ring->dev,
- cd.invalidate_domains,
- cd.flush_domains,
- cd.flush_rings);
- if (ret)
- return ret;
+ i915_gem_execbuffer_flush(ring->dev,
+ cd.invalidate_domains,
+ cd.flush_domains);
}
if (cd.flips) {
@@ -905,6 +885,16 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
return ret;
}
+ /* Unconditionally invalidate gpu caches and ensure that we do flush
+ * any residual writes from the previous batch.
+ */
+ ret = i915_gem_flush_ring(ring,
+ I915_GEM_GPU_DOMAINS,
+ ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
+ if (ret)
+ return ret;
+
+ ring->gpu_caches_dirty = false;
return 0;
}
@@ -983,26 +973,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_request *request;
- u32 invalidate;
- /*
- * Ensure that the commands in the batch buffer are
- * finished before the interrupt fires.
- *
- * The sampler always gets flushed on i965 (sigh).
- */
- invalidate = I915_GEM_DOMAIN_COMMAND;
- if (INTEL_INFO(dev)->gen >= 4)
- invalidate |= I915_GEM_DOMAIN_SAMPLER;
- if (ring->flush(ring, invalidate, 0)) {
- i915_gem_next_request_seqno(ring);
- return;
- }
+ /* Unconditionally force add_request to emit a full flush. */
+ ring->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL || i915_add_request(ring, file, request)) {
- i915_gem_next_request_seqno(ring);
kfree(request);
}
}
@@ -1044,6 +1021,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
+ u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 seqno;
u32 mask;
@@ -1065,9 +1043,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BSD:
ring = &dev_priv->ring[VCS];
+ if (ctx_id != 0) {
+ DRM_DEBUG("Ring %s doesn't support contexts\n",
+ ring->name);
+ return -EPERM;
+ }
break;
case I915_EXEC_BLT:
ring = &dev_priv->ring[BCS];
+ if (ctx_id != 0) {
+ DRM_DEBUG("Ring %s doesn't support contexts\n",
+ ring->name);
+ return -EPERM;
+ }
break;
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
@@ -1240,6 +1228,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
+ ret = i915_switch_context(ring, file, ctx_id);
+ if (ret)
+ goto err;
+
if (ring == &dev_priv->ring[RCS] &&
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
@@ -1367,6 +1359,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.num_cliprects = args->num_cliprects;
exec2.cliprects_ptr = args->cliprects_ptr;
exec2.flags = I915_EXEC_RENDER;
+ i915_execbuffer2_set_context_id(exec2, 0);
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9fd25a435536..60815b861ec2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
- first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES;
+ first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
@@ -261,7 +261,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
- pte_flags |= GEN6_PTE_UNCACHED;
+ if (IS_HASWELL(dev))
+ pte_flags |= HSW_PTE_UNCACHED;
+ else
+ pte_flags |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
@@ -361,7 +364,8 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->mm.gtt->needs_dmar)
+ /* don't map imported dma buf objects */
+ if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
return intel_gtt_map_memory(obj->pages,
obj->base.size >> PAGE_SHIFT,
&obj->sg_list,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ed3224c37423..8a3828528b9d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -375,6 +375,86 @@ static void gen6_pm_rps_work(struct work_struct *work)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
+
+/**
+ * ivybridge_parity_work - Workqueue called when a parity error interrupt
+ * occurred.
+ * @work: workqueue struct
+ *
+ * Doesn't actually do anything except notify userspace. As a consequence of
+ * this event, userspace should try to remap the bad rows since statistically
+ * it is likely the same row is more likely to go bad again.
+ */
+static void ivybridge_parity_work(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
+ parity_error_work);
+ u32 error_status, row, bank, subbank;
+ char *parity_event[5];
+ uint32_t misccpctl;
+ unsigned long flags;
+
+ /* We must turn off DOP level clock gating to access the L3 registers.
+ * In order to prevent a get/put style interface, acquire struct mutex
+ * any time we access those registers.
+ */
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+ POSTING_READ(GEN7_MISCCPCTL);
+
+ error_status = I915_READ(GEN7_L3CDERRST1);
+ row = GEN7_PARITY_ERROR_ROW(error_status);
+ bank = GEN7_PARITY_ERROR_BANK(error_status);
+ subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+ I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
+ GEN7_L3CDERRST1_ENABLE);
+ POSTING_READ(GEN7_L3CDERRST1);
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+
+ parity_event[0] = "L3_PARITY_ERROR=1";
+ parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+ parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+ parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+ parity_event[4] = NULL;
+
+ kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
+ KOBJ_CHANGE, parity_event);
+
+ DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
+ row, bank, subbank);
+
+ kfree(parity_event[3]);
+ kfree(parity_event[2]);
+ kfree(parity_event[1]);
+}
+
+static void ivybridge_handle_parity_error(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ unsigned long flags;
+
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+ queue_work(dev_priv->wq, &dev_priv->parity_error_work);
+}
+
static void snb_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 gt_iir)
@@ -394,6 +474,9 @@ static void snb_gt_irq_handler(struct drm_device *dev,
DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
i915_handle_error(dev, false);
}
+
+ if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
+ ivybridge_handle_parity_error(dev);
}
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
@@ -429,15 +512,10 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
unsigned long irqflags;
int pipe;
u32 pipe_stats[I915_MAX_PIPES];
- u32 vblank_status;
- int vblank = 0;
bool blc_event;
atomic_inc(&dev_priv->irq_received);
- vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS |
- PIPE_VBLANK_INTERRUPT_STATUS;
-
while (true) {
iir = I915_READ(VLV_IIR);
gt_iir = I915_READ(GTIIR);
@@ -467,6 +545,16 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
+ drm_handle_vblank(dev, pipe);
+
+ if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
+ }
+
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
@@ -481,19 +569,6 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
I915_READ(PORT_HOTPLUG_STAT);
}
-
- if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) {
- drm_handle_vblank(dev, 0);
- vblank++;
- intel_finish_page_flip(dev, 0);
- }
-
- if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) {
- drm_handle_vblank(dev, 1);
- vblank++;
- intel_finish_page_flip(dev, 0);
- }
-
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -991,6 +1066,7 @@ static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
+ error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
error->semaphore_mboxes[ring->id][0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
@@ -1104,6 +1180,7 @@ static void i915_capture_error_state(struct drm_device *dev)
kref_init(&error->ref);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
+ error->ccid = I915_READ(CCID);
if (HAS_PCH_SPLIT(dev))
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
@@ -1426,23 +1503,20 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- u32 dpfl, imr;
+ u32 imr;
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- dpfl = I915_READ(VLV_DPFLIPSTAT);
imr = I915_READ(VLV_IMR);
- if (pipe == 0) {
- dpfl |= PIPEA_VBLANK_INT_EN;
+ if (pipe == 0)
imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
- } else {
- dpfl |= PIPEA_VBLANK_INT_EN;
+ else
imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- }
- I915_WRITE(VLV_DPFLIPSTAT, dpfl);
I915_WRITE(VLV_IMR, imr);
+ i915_enable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -1492,20 +1566,17 @@ static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- u32 dpfl, imr;
+ u32 imr;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- dpfl = I915_READ(VLV_DPFLIPSTAT);
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
imr = I915_READ(VLV_IMR);
- if (pipe == 0) {
- dpfl &= ~PIPEA_VBLANK_INT_EN;
+ if (pipe == 0)
imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
- } else {
- dpfl &= ~PIPEB_VBLANK_INT_EN;
+ else
imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- }
I915_WRITE(VLV_IMR, imr);
- I915_WRITE(VLV_DPFLIPSTAT, dpfl);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -1648,7 +1719,6 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
atomic_set(&dev_priv->irq_received, 0);
-
I915_WRITE(HWSTAM, 0xeffe);
/* XXX hotplug from PCH */
@@ -1811,13 +1881,13 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
DE_PIPEA_VBLANK_IVB);
POSTING_READ(DEIER);
- dev_priv->gt_irq_mask = ~0;
+ dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
- GEN6_BLITTER_USER_INTERRUPT;
+ GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
@@ -1840,16 +1910,24 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 render_irqs;
u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
+ u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u16 msid;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
- enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
- dev_priv->irq_mask = ~enable_mask;
+ /*
+ *Leave vblank interrupts masked initially. enable/disable will
+ * toggle them based on usage.
+ */
+ dev_priv->irq_mask = (~enable_mask) |
+ I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
+ I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
@@ -1868,26 +1946,27 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(PIPESTAT(1), 0xffff);
POSTING_READ(VLV_IER);
+ i915_enable_pipestat(dev_priv, 0, pipestat_enable);
+ i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
- render_irqs = GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
- GT_GEN6_BLT_CS_ERROR_INTERRUPT |
- GT_GEN6_BLT_USER_INTERRUPT |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_GEN6_BSD_CS_ERROR_INTERRUPT |
- GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
- GT_PIPE_NOTIFY |
- GT_RENDER_CS_ERROR_INTERRUPT |
- GT_SYNC_STATUS |
- GT_USER_INTERRUPT;
-
- dev_priv->gt_irq_mask = ~render_irqs;
+ dev_priv->gt_irq_mask = ~0;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, 0);
- I915_WRITE(GTIER, render_irqs);
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
+ GT_GEN6_BLT_CS_ERROR_INTERRUPT |
+ GT_GEN6_BLT_USER_INTERRUPT |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_GEN6_BSD_CS_ERROR_INTERRUPT |
+ GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_RENDER_CS_ERROR_INTERRUPT |
+ GT_SYNC_STATUS |
+ GT_USER_INTERRUPT);
POSTING_READ(GTIER);
/* ack & enable invalid PTE error interrupts */
@@ -2166,9 +2245,9 @@ static int i915_irq_postinstall(struct drm_device *dev)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
@@ -2328,10 +2407,8 @@ static void i965_irq_preinstall(struct drm_device * dev)
atomic_set(&dev_priv->irq_received, 0);
- if (I915_HAS_HOTPLUG(dev)) {
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- }
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(HWSTAM, 0xeffe);
for_each_pipe(pipe)
@@ -2344,11 +2421,13 @@ static void i965_irq_preinstall(struct drm_device * dev)
static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 hotplug_en;
u32 enable_mask;
u32 error_mask;
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
+ I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -2364,13 +2443,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
- if (I915_HAS_HOTPLUG(dev)) {
- /* Enable in IER... */
- enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
- /* and unmask in IMR */
- dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
- }
-
/*
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
@@ -2390,36 +2462,40 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
- if (I915_HAS_HOTPLUG(dev)) {
- u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
-
- /* Note HDMI and DP share bits */
- if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMIC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
- hotplug_en |= HDMID_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ /* Note HDMI and DP share hotplug bits */
+ hotplug_en = 0;
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (IS_G4X(dev)) {
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
- if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
- hotplug_en |= CRT_HOTPLUG_INT_EN;
+ } else {
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ }
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
- /* Programming the CRT detection parameters tends
- to generate a spurious hotplug event about three
- seconds later. So just do it once.
- */
- if (IS_G4X(dev))
- hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
- hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
- }
+ /* Programming the CRT detection parameters tends
+ to generate a spurious hotplug event about three
+ seconds later. So just do it once.
+ */
+ if (IS_G4X(dev))
+ hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
+ hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ }
- /* Ignore TV since it's buggy */
+ /* Ignore TV since it's buggy */
- I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- }
+ I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
intel_opregion_enable_asle(dev);
@@ -2477,8 +2553,7 @@ static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
ret = IRQ_HANDLED;
/* Consume port. Then clear IIR or we'll miss events */
- if ((I915_HAS_HOTPLUG(dev)) &&
- (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
@@ -2551,10 +2626,8 @@ static void i965_irq_uninstall(struct drm_device * dev)
if (!dev_priv)
return;
- if (I915_HAS_HOTPLUG(dev)) {
- I915_WRITE(PORT_HOTPLUG_EN, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
- }
+ I915_WRITE(PORT_HOTPLUG_EN, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(HWSTAM, 0xffffffff);
for_each_pipe(pipe)
@@ -2575,6 +2648,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
+ INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 48d5e8e051cf..28725ce5b82c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -115,6 +115,7 @@
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
+#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_CACHE_BITS (3 << 1)
@@ -217,6 +218,9 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
+#define MI_ARB_ENABLE (1<<0)
+#define MI_ARB_DISABLE (0<<0)
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
@@ -299,6 +303,7 @@
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
#define PIPE_CONTROL_CS_STALL (1<<20)
+#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
@@ -686,10 +691,10 @@
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
-#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
+#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
+#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
+#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
+#define GEN6_BSD_GO_INDICATOR (1 << 4)
#define GEN6_BSD_HWSTAM 0x12098
#define GEN6_BSD_IMR 0x120a8
@@ -908,6 +913,7 @@
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
+#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
#define SRX_INDEX 0x3c4
@@ -1453,6 +1459,10 @@
#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
+#define GEN6_GT_THREAD_STATUS_REG 0x13805c
+#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
+#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
+
#define GEN6_GT_PERF_STATUS 0x145948
#define GEN6_RP_STATE_LIMITS 0x145994
#define GEN6_RP_STATE_CAP 0x145998
@@ -1462,6 +1472,31 @@
*/
#define CCID 0x2180
#define CCID_EN (1<<0)
+#define CXT_SIZE 0x21a0
+#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
+#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
+#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
+#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
+#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \
+ GEN6_CXT_RING_SIZE(cxt_reg) + \
+ GEN6_CXT_RENDER_SIZE(cxt_reg) + \
+ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
+ GEN6_CXT_PIPELINE_SIZE(cxt_reg))
+#define GEN7_CXT_SIZE 0x21a8
+#define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f)
+#define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7)
+#define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f)
+#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
+#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
+#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \
+ GEN7_CXT_RING_SIZE(ctx_reg) + \
+ GEN7_CXT_RENDER_SIZE(ctx_reg) + \
+ GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
+ GEN7_CXT_GT1_SIZE(ctx_reg) + \
+ GEN7_CXT_VFSTATE_SIZE(ctx_reg))
+
/*
* Overlay regs
*/
@@ -1566,20 +1601,34 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT 0x61114
-#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
-#define DPB_HOTPLUG_INT_STATUS (1 << 29)
-#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
-#define DPC_HOTPLUG_INT_STATUS (1 << 28)
-#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
-#define DPD_HOTPLUG_INT_STATUS (1 << 27)
+/* HDMI/DP bits are gen4+ */
+#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
+#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
+#define DPD_HOTPLUG_LIVE_STATUS (1 << 27)
+#define DPD_HOTPLUG_INT_STATUS (3 << 21)
+#define DPC_HOTPLUG_INT_STATUS (3 << 19)
+#define DPB_HOTPLUG_INT_STATUS (3 << 17)
+/* HDMI bits are shared with the DP bits */
+#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
+#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
+#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
+#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
+#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
+#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
+/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
-#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
-#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
+/* SDVO is different across gen3/4 */
+#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
+#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
+#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
+#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
+#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
+#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
/* SDVO port control */
#define SDVOB 0x61140
@@ -1711,8 +1760,10 @@
#define VIDEO_DIP_PORT_C (2 << 29)
#define VIDEO_DIP_PORT_D (3 << 29)
#define VIDEO_DIP_PORT_MASK (3 << 29)
+#define VIDEO_DIP_ENABLE_GCP (1 << 25)
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
+#define VIDEO_DIP_ENABLE_GAMUT (4 << 21)
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
@@ -1723,7 +1774,11 @@
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
#define VIDEO_DIP_FREQ_MASK (3 << 16)
/* HSW and later: */
+#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
+#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
+#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
+#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
/* Panel power sequencing */
@@ -1795,18 +1850,35 @@
#define PFIT_AUTO_RATIOS 0x61238
/* Backlight control */
-#define BLC_PWM_CTL 0x61254
-#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
-#define BLM_COMBINATION_MODE (1 << 30)
+#define BLM_PWM_ENABLE (1 << 31)
+#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
+#define BLM_PIPE_SELECT (1 << 29)
+#define BLM_PIPE_SELECT_IVB (3 << 29)
+#define BLM_PIPE_A (0 << 29)
+#define BLM_PIPE_B (1 << 29)
+#define BLM_PIPE_C (2 << 29) /* ivb + */
+#define BLM_PIPE(pipe) ((pipe) << 29)
+#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
+#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
+#define BLM_PHASE_IN_ENABLE (1 << 25)
+#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
+#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
+#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
+#define BLM_PHASE_IN_COUNT_SHIFT (8)
+#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
+#define BLM_PHASE_IN_INCR_SHIFT (0)
+#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
+#define BLC_PWM_CTL 0x61254
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
*
* The actual value is this field multiplied by two.
*/
-#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
-#define BLM_LEGACY_MODE (1 << 16)
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
/*
* This is the number of cycles out of the backlight modulation cycle for which
* the backlight is on.
@@ -1816,9 +1888,24 @@
*/
#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
+#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
#define BLC_HIST_CTL 0x61260
+/* New registers for PCH-split platforms. Safe where new bits show up, the
+ * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+#define BLC_PWM_CPU_CTL2 0x48250
+#define BLC_PWM_CPU_CTL 0x48254
+
+/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
+ * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
+#define BLC_PWM_PCH_CTL1 0xc8250
+#define BLM_PCH_PWM_ENABLE (1 << 31)
+#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
+#define BLM_PCH_POLARITY (1 << 29)
+#define BLC_PWM_PCH_CTL2 0xc8254
+
/* TV port control */
#define TV_CTL 0x68000
/** Enables the TV encoder */
@@ -2583,13 +2670,13 @@
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
#define VLV_DPFLIPSTAT 0x70028
-#define PIPEB_LINE_COMPARE_STATUS (1<<29)
+#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
#define SPRITED_FLIPDONE_INT_EN (1<<26)
#define SPRITEC_FLIPDONE_INT_EN (1<<25)
#define PLANEB_FLIPDONE_INT_EN (1<<24)
-#define PIPEA_LINE_COMPARE_STATUS (1<<21)
+#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
#define PIPEA_HLINE_INT_EN (1<<20)
#define PIPEA_VBLANK_INT_EN (1<<19)
#define SPRITEB_FLIPDONE_INT_EN (1<<18)
@@ -2897,13 +2984,14 @@
#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
+#define DSPLINOFF(plane) DSPADDR(plane)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
- (I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg))))
+ (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
/* VBIOS flags */
#define SWF00 0x71410
@@ -3771,6 +3859,9 @@
#define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_TP1_TO_TP2_48 (2<<20)
+#define FDI_RX_TP1_TO_TP2_64 (3<<20)
+#define FDI_RX_FDI_DELAY_90 (0x90<<0)
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
@@ -3824,7 +3915,6 @@
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
/* or SDVOB */
-#define VLV_HDMIB 0x61140
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER(pipe) ((pipe) << 30)
@@ -3855,20 +3945,18 @@
#define PCH_LVDS 0xe1180
#define LVDS_DETECTED (1 << 1)
-#define BLC_PWM_CPU_CTL2 0x48250
-#define PWM_ENABLE (1 << 31)
-#define PWM_PIPE_A (0 << 29)
-#define PWM_PIPE_B (1 << 29)
-#define BLC_PWM_CPU_CTL 0x48254
+/* vlv has 2 sets of panel control regs. */
+#define PIPEA_PP_STATUS 0x61200
+#define PIPEA_PP_CONTROL 0x61204
+#define PIPEA_PP_ON_DELAYS 0x61208
+#define PIPEA_PP_OFF_DELAYS 0x6120c
+#define PIPEA_PP_DIVISOR 0x61210
-#define BLC_PWM_PCH_CTL1 0xc8250
-#define PWM_PCH_ENABLE (1 << 31)
-#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
-#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
-#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
-#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
-
-#define BLC_PWM_PCH_CTL2 0xc8254
+#define PIPEB_PP_STATUS 0x61300
+#define PIPEB_PP_CONTROL 0x61304
+#define PIPEB_PP_ON_DELAYS 0x61308
+#define PIPEB_PP_OFF_DELAYS 0x6130c
+#define PIPEB_PP_DIVISOR 0x61310
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
@@ -3992,6 +4080,7 @@
#define FORCEWAKE 0xA18C
#define FORCEWAKE_VLV 0x1300b0
#define FORCEWAKE_ACK_VLV 0x1300b4
+#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_MT_ACK 0x130040
@@ -4012,10 +4101,15 @@
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
#define GEN6_UCGCTL2 0x9404
+# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
+# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
+#define GEN7_UCGCTL4 0x940c
+#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
@@ -4047,6 +4141,7 @@
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
+#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0)
#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
#define GEN6_RP_UP_THRESHOLD 0xA02C
#define GEN6_RP_DOWN_THRESHOLD 0xA030
@@ -4111,6 +4206,26 @@
#define GEN6_RC6 3
#define GEN6_RC7 4
+#define GEN7_MISCCPCTL (0x9424)
+#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
+
+/* IVYBRIDGE DPF */
+#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
+#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
+#define GEN7_PARITY_ERROR_VALID (1<<13)
+#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
+#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
+#define GEN7_PARITY_ERROR_ROW(reg) \
+ ((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
+#define GEN7_PARITY_ERROR_BANK(reg) \
+ ((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
+#define GEN7_PARITY_ERROR_SUBBANK(reg) \
+ ((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
+#define GEN7_L3CDERRST1_ENABLE (1<<7)
+
+#define GEN7_L3LOG_BASE 0xB070
+#define GEN7_L3LOG_SIZE 0x80
+
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -4177,7 +4292,7 @@
PIPE_DDI_FUNC_CTL_B)
#define PIPE_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define PIPE_DDI_PORT_MASK (0xf<<28)
+#define PIPE_DDI_PORT_MASK (7<<28)
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
@@ -4335,7 +4450,7 @@
#define PIPE_WM_LINETIME_B 0x45274
#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, \
PIPE_WM_LINETIME_A, \
- PIPE_WM_LINETIME_A)
+ PIPE_WM_LINETIME_B)
#define PIPE_WM_LINETIME_MASK (0x1ff)
#define PIPE_WM_LINETIME_TIME(x) ((x))
#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
@@ -4347,4 +4462,9 @@
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
+#define WM_DBG 0x45280
+#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
+#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
+#define WM_DBG_DISALLOW_SPRITE (1<<2)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a748e5cabe14..4776ccf1b3cd 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -828,10 +828,7 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
- if (IS_IRONLAKE_M(dev))
- ironlake_disable_drps(dev);
- if (INTEL_INFO(dev)->gen >= 6)
- gen6_disable_rps(dev);
+ intel_disable_gt_powersave(dev);
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 79f83445afa0..7631807a2788 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -29,8 +29,10 @@
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/sysfs.h>
+#include "intel_drv.h"
#include "i915_drv.h"
+#ifdef CONFIG_PM
static u32 calc_residency(struct drm_device *dev, const u32 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -92,20 +94,145 @@ static struct attribute_group rc6_attr_group = {
.attrs = rc6_attrs
};
-void i915_setup_sysfs(struct drm_device *dev)
+static int l3_access_valid(struct drm_device *dev, loff_t offset)
+{
+ if (!IS_IVYBRIDGE(dev))
+ return -EPERM;
+
+ if (offset % 4 != 0)
+ return -EINVAL;
+
+ if (offset >= GEN7_L3LOG_SIZE)
+ return -ENXIO;
+
+ return 0;
+}
+
+static ssize_t
+i915_l3_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_device *drm_dev = dminor->dev;
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ uint32_t misccpctl;
+ int i, ret;
+
+ ret = l3_access_valid(drm_dev, offset);
+ if (ret)
+ return ret;
+
+ ret = i915_mutex_lock_interruptible(drm_dev);
+ if (ret)
+ return ret;
+
+ misccpctl = I915_READ(GEN7_MISCCPCTL);
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
+
+ for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
+ *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
+
+ I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+ mutex_unlock(&drm_dev->struct_mutex);
+
+ return i - offset;
+}
+
+static ssize_t
+i915_l3_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+ struct drm_device *drm_dev = dminor->dev;
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
+ u32 *temp = NULL; /* Just here to make handling failures easy */
int ret;
- /* ILK doesn't have any residency information */
- if (INTEL_INFO(dev)->gen < 6)
- return;
+ ret = l3_access_valid(drm_dev, offset);
+ if (ret)
+ return ret;
- ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+ ret = i915_mutex_lock_interruptible(drm_dev);
if (ret)
- DRM_ERROR("sysfs setup failed\n");
+ return ret;
+
+ if (!dev_priv->mm.l3_remap_info) {
+ temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
+ if (!temp) {
+ mutex_unlock(&drm_dev->struct_mutex);
+ return -ENOMEM;
+ }
+ }
+
+ ret = i915_gpu_idle(drm_dev);
+ if (ret) {
+ kfree(temp);
+ mutex_unlock(&drm_dev->struct_mutex);
+ return ret;
+ }
+
+ /* TODO: Ideally we really want a GPU reset here to make sure errors
+ * aren't propagated. Since I cannot find a stable way to reset the GPU
+ * at this point it is left as a TODO.
+ */
+ if (temp)
+ dev_priv->mm.l3_remap_info = temp;
+
+ memcpy(dev_priv->mm.l3_remap_info + (offset/4),
+ buf + (offset/4),
+ count);
+
+ i915_gem_l3_remap(drm_dev);
+
+ mutex_unlock(&drm_dev->struct_mutex);
+
+ return count;
+}
+
+static struct bin_attribute dpf_attrs = {
+ .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
+ .size = GEN7_L3LOG_SIZE,
+ .read = i915_l3_read,
+ .write = i915_l3_write,
+ .mmap = NULL
+};
+
+void i915_setup_sysfs(struct drm_device *dev)
+{
+ int ret;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ret = sysfs_merge_group(&dev->primary->kdev.kobj,
+ &rc6_attr_group);
+ if (ret)
+ DRM_ERROR("RC6 residency sysfs setup failed\n");
+ }
+
+ if (IS_IVYBRIDGE(dev)) {
+ ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
+ if (ret)
+ DRM_ERROR("l3 parity sysfs setup failed\n");
+ }
}
void i915_teardown_sysfs(struct drm_device *dev)
{
+ device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
}
+#else
+void i915_setup_sysfs(struct drm_device *dev)
+{
+ return;
+}
+
+void i915_teardown_sysfs(struct drm_device *dev)
+{
+ return;
+}
+#endif /* CONFIG_PM */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index dac7bba4d9da..fe90b3a84a6d 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -311,9 +311,33 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_ARGS(ring, seqno)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
+TRACE_EVENT(i915_gem_request_wait_begin,
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
- TP_ARGS(ring, seqno)
+ TP_ARGS(ring, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, ring)
+ __field(u32, seqno)
+ __field(bool, blocking)
+ ),
+
+ /* NB: the blocking information is racy since mutex_is_locked
+ * doesn't check that the current thread holds the lock. The only
+ * other option would be to pass the boolean information of whether
+ * or not the class was blocking down through the stack which is
+ * less desirable.
+ */
+ TP_fast_assign(
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
+ __entry->seqno = seqno;
+ __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
+ ),
+
+ TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
+ __entry->dev, __entry->ring, __entry->seqno,
+ __entry->blocking ? "yes (NB)" : "no")
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 353459362f6f..8c6074154bf6 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -692,7 +692,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
*
* Returns 0 on success, nonzero on failure.
*/
-bool
+int
intel_parse_bios(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index dbda6e3bdf07..31c2107e7825 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -476,7 +476,7 @@ struct bdb_edp {
} __attribute__ ((packed));
void intel_setup_bios(struct drm_device *dev);
-bool intel_parse_bios(struct drm_device *dev);
+int intel_parse_bios(struct drm_device *dev);
/*
* Driver<->VBIOS interaction occurs through scratch bits in
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 75a70c46ef1b..23bdc8cd1458 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -88,6 +88,9 @@ static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
+ if (IS_VALLEYVIEW(dev) && mode != DRM_MODE_DPMS_ON)
+ mode = DRM_MODE_DPMS_OFF;
+
switch (mode) {
case DRM_MODE_DPMS_ON:
temp |= ADPA_DAC_ENABLE;
@@ -129,7 +132,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
}
static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
@@ -230,6 +233,42 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
return ret;
}
+static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 adpa;
+ bool ret;
+ u32 save_adpa;
+
+ save_adpa = adpa = I915_READ(ADPA);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+
+ I915_WRITE(ADPA, adpa);
+
+ if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000)) {
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+ I915_WRITE(ADPA, save_adpa);
+ }
+
+ /* Check the status to see if both blue and green are on now */
+ adpa = I915_READ(ADPA);
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
+ ret = true;
+ else
+ ret = false;
+
+ DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+
+ /* FIXME: debug force function and remove */
+ ret = true;
+
+ return ret;
+}
+
/**
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
*
@@ -249,6 +288,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev))
return intel_ironlake_crt_detect_hotplug(connector);
+ if (IS_VALLEYVIEW(dev))
+ return valleyview_crt_detect_hotplug(connector);
+
/*
* On 4 series desktop, CRT detect sequence need to be done twice
* to get a reliable result.
@@ -284,43 +326,68 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
return ret;
}
+static struct edid *intel_crt_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *i2c)
+{
+ struct edid *edid;
+
+ edid = drm_get_edid(connector, i2c);
+
+ if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ intel_gmbus_force_bit(i2c, true);
+ edid = drm_get_edid(connector, i2c);
+ intel_gmbus_force_bit(i2c, false);
+ }
+
+ return edid;
+}
+
+/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
+static int intel_crt_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+
+ edid = intel_crt_get_edid(connector, adapter);
+ if (!edid)
+ return 0;
+
+ return intel_connector_update_modes(connector, edid);
+}
+
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+ struct edid *edid;
+ struct i2c_adapter *i2c;
- /* CRT should always be at 0, but check anyway */
- if (crt->base.type != INTEL_OUTPUT_ANALOG)
- return false;
+ BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
- if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
- struct edid *edid;
- bool is_digital = false;
- struct i2c_adapter *i2c;
+ i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
+ edid = intel_crt_get_edid(connector, i2c);
+
+ if (edid) {
+ bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
- i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
- edid = drm_get_edid(connector, i2c);
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
* have to check the EDID input spec of the attached device.
- *
- * On the other hand, what should we do if it is a broken EDID?
*/
- if (edid != NULL) {
- is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
- connector->display_info.raw_edid = NULL;
- kfree(edid);
- }
-
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
- } else {
- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
+
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
+ kfree(edid);
+
return false;
}
@@ -453,18 +520,27 @@ intel_crt_detect(struct drm_connector *connector, bool force)
struct intel_load_detect_pipe tmp;
if (I915_HAS_HOTPLUG(dev)) {
+ /* We can not rely on the HPD pin always being correctly wired
+ * up, for example many KVM do not pass it through, and so
+ * only trust an assertion that the monitor is connected.
+ */
if (intel_crt_detect_hotplug(connector)) {
DRM_DEBUG_KMS("CRT detected via hotplug\n");
return connector_status_connected;
- } else {
+ } else
DRM_DEBUG_KMS("CRT not detected via hotplug\n");
- return connector_status_disconnected;
- }
}
if (intel_crt_detect_ddc(connector))
return connector_status_connected;
+ /* Load detection is broken on HPD capable machines. Whoever wants a
+ * broken monitor (without edid) to work behind a broken kvm (that fails
+ * to have the right resistors for HP detection) needs to fix this up.
+ * For now just bail out. */
+ if (I915_HAS_HOTPLUG(dev))
+ return connector_status_disconnected;
+
if (!force)
return connector->status;
@@ -498,13 +574,13 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct i2c_adapter *i2c;
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
- ret = intel_ddc_get_modes(connector, i2c);
+ ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;
/* Try to probe digital port for output in DVI-I -> VGA mode. */
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
- return intel_ddc_get_modes(connector, i2c);
+ return intel_crt_ddc_get_modes(connector, i2c);
}
static int intel_crt_set_property(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 46d1e886c692..933c74859172 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -170,6 +170,15 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
udelay(600);
+ /* We need to program FDI_RX_MISC with the default TP1 to TP2
+ * values before enabling the receiver, and configure the delay
+ * for the FDI timing generator to 90h. Luckily, all the other
+ * bits are supposed to be zeroed, so we can write those values
+ * directly.
+ */
+ I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 |
+ FDI_RX_FDI_DELAY_90);
+
/* Enable CPU FDI Receiver with auto-training */
reg = FDI_RX_CTL(pipe);
I915_WRITE(reg,
@@ -726,8 +735,7 @@ void intel_ddi_mode_set(struct drm_encoder *encoder,
I915_WRITE(DDI_FUNC_CTL(pipe), temp);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
void intel_ddi_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a8538ac0299d..2dfa6cf4886b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -98,6 +98,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
+static bool
+intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock);
+
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
{
@@ -359,6 +364,48 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.find_pll = intel_find_pll_ironlake_dp,
};
+static const intel_limit_t intel_limits_vlv_dac = {
+ .dot = { .min = 25000, .max = 270000 },
+ .vco = { .min = 4000000, .max = 6000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 22, .max = 450 }, /* guess */
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+ .find_pll = intel_vlv_find_best_pll,
+};
+
+static const intel_limit_t intel_limits_vlv_hdmi = {
+ .dot = { .min = 20000, .max = 165000 },
+ .vco = { .min = 5994000, .max = 4000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 60, .max = 300 }, /* guess */
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+ .find_pll = intel_vlv_find_best_pll,
+};
+
+static const intel_limit_t intel_limits_vlv_dp = {
+ .dot = { .min = 162000, .max = 270000 },
+ .vco = { .min = 5994000, .max = 4000000 },
+ .n = { .min = 1, .max = 7 },
+ .m = { .min = 60, .max = 300 }, /* guess */
+ .m1 = { .min = 2, .max = 3 },
+ .m2 = { .min = 11, .max = 156 },
+ .p = { .min = 10, .max = 30 },
+ .p1 = { .min = 2, .max = 3 },
+ .p2 = { .dot_limit = 270000,
+ .p2_slow = 2, .p2_fast = 20 },
+ .find_pll = intel_vlv_find_best_pll,
+};
+
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
unsigned long flags;
@@ -384,6 +431,28 @@ out_unlock:
return val;
}
+static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
+ u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->dpio_lock, flags);
+ if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
+ DRM_ERROR("DPIO idle wait timed out\n");
+ goto out_unlock;
+ }
+
+ I915_WRITE(DPIO_DATA, val);
+ I915_WRITE(DPIO_REG, reg);
+ I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
+ DPIO_BYTE);
+ if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
+ DRM_ERROR("DPIO write wait timed out\n");
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
+}
+
static void vlv_init_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -434,7 +503,7 @@ static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
* register is uninitialized.
*/
val = I915_READ(reg);
- if (!(val & ~LVDS_DETECTED))
+ if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
val = dev_priv->bios_lvds_val;
dev_priv->lvds_val = val;
}
@@ -510,6 +579,13 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
+ } else if (IS_VALLEYVIEW(dev)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
+ limit = &intel_limits_vlv_dac;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
+ limit = &intel_limits_vlv_hdmi;
+ else
+ limit = &intel_limits_vlv_dp;
} else if (!IS_GEN2(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
@@ -551,11 +627,10 @@ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
- if (encoder->base.crtc == crtc && encoder->type == type)
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->type == type)
return true;
return false;
@@ -783,6 +858,74 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
+static bool
+intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
+ int target, int refclk, intel_clock_t *match_clock,
+ intel_clock_t *best_clock)
+{
+ u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
+ u32 m, n, fastclk;
+ u32 updrate, minupdate, fracbits, p;
+ unsigned long bestppm, ppm, absppm;
+ int dotclk, flag;
+
+ flag = 0;
+ dotclk = target * 1000;
+ bestppm = 1000000;
+ ppm = absppm = 0;
+ fastclk = dotclk / (2*100);
+ updrate = 0;
+ minupdate = 19200;
+ fracbits = 1;
+ n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
+ bestm1 = bestm2 = bestp1 = bestp2 = 0;
+
+ /* based on hardware requirement, prefer smaller n to precision */
+ for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
+ updrate = refclk / n;
+ for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
+ for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
+ if (p2 > 10)
+ p2 = p2 - 1;
+ p = p1 * p2;
+ /* based on hardware requirement, prefer bigger m1,m2 values */
+ for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
+ m2 = (((2*(fastclk * p * n / m1 )) +
+ refclk) / (2*refclk));
+ m = m1 * m2;
+ vco = updrate * m;
+ if (vco >= limit->vco.min && vco < limit->vco.max) {
+ ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
+ absppm = (ppm > 0) ? ppm : (-ppm);
+ if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
+ bestppm = 0;
+ flag = 1;
+ }
+ if (absppm < bestppm - 10) {
+ bestppm = absppm;
+ flag = 1;
+ }
+ if (flag) {
+ bestn = n;
+ bestm1 = m1;
+ bestm2 = m2;
+ bestp1 = p1;
+ bestp2 = p2;
+ flag = 0;
+ }
+ }
+ }
+ }
+ }
+ }
+ best_clock->n = bestn;
+ best_clock->m1 = bestm1;
+ best_clock->m2 = bestm2;
+ best_clock->p1 = bestp1;
+ best_clock->p2 = bestp2;
+
+ return true;
+}
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
{
@@ -1232,15 +1375,21 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
+
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
+ "IBX PCH dp port still using transcoder B\n");
}
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
+ WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
+
+ WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
+ "IBX PCH hdmi port still using transcoder B\n");
}
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
@@ -1255,13 +1404,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
reg = PCH_ADPA;
val = I915_READ(reg);
- WARN(adpa_pipe_enabled(dev_priv, val, pipe),
+ WARN(adpa_pipe_enabled(dev_priv, pipe, val),
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
reg = PCH_LVDS;
val = I915_READ(reg);
- WARN(lvds_pipe_enabled(dev_priv, val, pipe),
+ WARN(lvds_pipe_enabled(dev_priv, pipe, val),
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
@@ -1287,7 +1436,7 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* No really, not for ILK+ */
- BUG_ON(dev_priv->info->gen >= 5);
+ BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
@@ -1344,7 +1493,7 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
unsigned long flags;
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
@@ -1358,7 +1507,7 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
SBI_BUSY |
SBI_CTL_OP_CRWR);
- if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
goto out_unlock;
@@ -1372,10 +1521,10 @@ static u32
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
{
unsigned long flags;
- u32 value;
+ u32 value = 0;
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
- if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
+ if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
@@ -1387,7 +1536,7 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
SBI_BUSY |
SBI_CTL_OP_CRRD);
- if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
+ if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
goto out_unlock;
@@ -1723,7 +1872,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
- if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
+ if (hdmi_pipe_enabled(dev_priv, pipe, val)) {
DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
reg, pipe);
I915_WRITE(reg, val & ~PORT_ENABLE);
@@ -1745,12 +1894,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
reg = PCH_ADPA;
val = I915_READ(reg);
- if (adpa_pipe_enabled(dev_priv, val, pipe))
+ if (adpa_pipe_enabled(dev_priv, pipe, val))
I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
reg = PCH_LVDS;
val = I915_READ(reg);
- if (lvds_pipe_enabled(dev_priv, val, pipe)) {
+ if (lvds_pipe_enabled(dev_priv, pipe, val)) {
DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
I915_WRITE(reg, val & ~LVDS_PORT_EN);
POSTING_READ(reg);
@@ -1824,6 +1973,22 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
i915_gem_object_unpin(obj);
}
+/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
+ * is assumed to be a power-of-two. */
+static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y,
+ unsigned int bpp,
+ unsigned int pitch)
+{
+ int tile_rows, tiles;
+
+ tile_rows = *y / 8;
+ *y %= 8;
+ tiles = *x / (512/bpp);
+ *x %= 512/bpp;
+
+ return tile_rows * pitch * 8 + tiles * 4096;
+}
+
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y)
{
@@ -1833,7 +1998,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
- unsigned long Start, Offset;
+ unsigned long linear_offset;
u32 dspcntr;
u32 reg;
@@ -1880,18 +2045,28 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(reg, dspcntr);
- Start = obj->gtt_offset;
- Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ intel_crtc->dspaddr_offset =
+ gen4_compute_dspaddr_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= intel_crtc->dspaddr_offset;
+ } else {
+ intel_crtc->dspaddr_offset = linear_offset;
+ }
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
+ obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane),
+ obj->gtt_offset + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPADDR(plane), Offset);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
- I915_WRITE(DSPADDR(plane), Start + Offset);
+ I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
POSTING_READ(reg);
return 0;
@@ -1906,7 +2081,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
- unsigned long Start, Offset;
+ unsigned long linear_offset;
u32 dspcntr;
u32 reg;
@@ -1961,15 +2136,20 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
- Start = obj->gtt_offset;
- Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ intel_crtc->dspaddr_offset =
+ gen4_compute_dspaddr_offset_xtiled(&x, &y,
+ fb->bits_per_pixel / 8,
+ fb->pitches[0]);
+ linear_offset -= intel_crtc->dspaddr_offset;
- DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
+ obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
+ I915_MODIFY_DISPBASE(DSPSURF(plane),
+ obj->gtt_offset + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPADDR(plane), Offset);
+ I915_WRITE(DSPLINOFF(plane), linear_offset);
POSTING_READ(reg);
return 0;
@@ -2656,16 +2836,13 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
/*
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
* must be driven by its own crtc; no sharing is possible.
*/
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
* CPU handles all others */
@@ -3397,7 +3574,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
}
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
@@ -3554,16 +3731,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder *encoder;
struct drm_connector *connector;
+ struct intel_encoder *intel_encoder;
unsigned int display_bpc = UINT_MAX, bpc;
/* Walk the encoders & connectors on this crtc, get min bpc */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
-
- if (encoder->crtc != crtc)
- continue;
+ for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
unsigned int lvds_bpc;
@@ -3581,21 +3754,10 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
continue;
}
- if (intel_encoder->type == INTEL_OUTPUT_EDP) {
- /* Use VBT settings if we have an eDP panel */
- unsigned int edp_bpc = dev_priv->edp.bpp / 3;
-
- if (edp_bpc < display_bpc) {
- DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
- display_bpc = edp_bpc;
- }
- continue;
- }
-
/* Not one of the known troublemakers, check the EDID */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
- if (connector->encoder != encoder)
+ if (connector->encoder != &intel_encoder->base)
continue;
/* Don't use an invalid EDID bpc value */
@@ -3666,13 +3828,37 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
return display_bpc != bpc;
}
+static int vlv_get_refclk(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int refclk = 27000; /* for DP & HDMI */
+
+ return 100000; /* only one validated so far */
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
+ refclk = 96000;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv))
+ refclk = 100000;
+ else
+ refclk = 96000;
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ refclk = 100000;
+ }
+
+ return refclk;
+}
+
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ if (IS_VALLEYVIEW(dev)) {
+ refclk = vlv_get_refclk(crtc);
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
@@ -3787,6 +3973,72 @@ static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
I915_WRITE(LVDS, temp);
}
+static void vlv_update_pll(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ intel_clock_t *clock, intel_clock_t *reduced_clock,
+ int refclk, int num_connectors)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 dpll, mdiv, pdiv;
+ u32 bestn, bestm1, bestm2, bestp1, bestp2;
+ bool is_hdmi;
+
+ is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
+
+ bestn = clock->n;
+ bestm1 = clock->m1;
+ bestm2 = clock->m2;
+ bestp1 = clock->p1;
+ bestp2 = clock->p2;
+
+ /* Enable DPIO clock input */
+ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
+ DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
+
+ mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
+ mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
+ mdiv |= ((bestn << DPIO_N_SHIFT));
+ mdiv |= (1 << DPIO_POST_DIV_SHIFT);
+ mdiv |= (1 << DPIO_K_SHIFT);
+ mdiv |= DPIO_ENABLE_CALIBRATION;
+ intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+
+ intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
+
+ pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) |
+ (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
+ (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT);
+ intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
+
+ intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051);
+
+ dpll |= DPLL_VCO_ENABLE;
+ I915_WRITE(DPLL(pipe), dpll);
+ POSTING_READ(DPLL(pipe));
+ if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("DPLL %d failed to lock\n", pipe);
+
+ if (is_hdmi) {
+ u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
+
+ if (temp > 1)
+ temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ else
+ temp = 0;
+
+ I915_WRITE(DPLL_MD(pipe), temp);
+ POSTING_READ(DPLL_MD(pipe));
+ }
+
+ intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */
+}
+
static void i9xx_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -3974,15 +4226,11 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
u32 dspcntr, pipeconf, vsyncshift;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_lvds = false, is_tv = false, is_dp = false;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
-
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -4044,6 +4292,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
if (IS_GEN2(dev))
i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
+ else if (IS_VALLEYVIEW(dev))
+ vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL,
+ refclk, num_connectors);
else
i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
has_reduced_clock ? &reduced_clock : NULL,
@@ -4282,15 +4533,11 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *edp_encoder = NULL;
int num_connectors = 0;
bool is_lvds = false;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
-
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -4327,7 +4574,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder, *edp_encoder = NULL;
const intel_limit_t *limit;
int ret;
@@ -4338,10 +4584,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
bool dither;
bool is_cpu_edp = false, is_pch_edp = false;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- if (encoder->base.crtc != crtc)
- continue;
-
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
@@ -4405,25 +4648,10 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
&clock,
&reduced_clock);
}
- /* SDVO TV has fixed PLL values depend on its clock range,
- this mirrors vbios setting. */
- if (is_sdvo && is_tv) {
- if (adjusted_mode->clock >= 100000
- && adjusted_mode->clock < 140500) {
- clock.p1 = 2;
- clock.p2 = 10;
- clock.n = 3;
- clock.m1 = 16;
- clock.m2 = 8;
- } else if (adjusted_mode->clock >= 140500
- && adjusted_mode->clock <= 200000) {
- clock.p1 = 1;
- clock.p2 = 10;
- clock.n = 6;
- clock.m1 = 12;
- clock.m2 = 8;
- }
- }
+
+ if (is_sdvo && is_tv)
+ i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
+
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
@@ -4431,16 +4659,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (is_cpu_edp) {
- target_clock = mode->clock;
intel_edp_link_config(edp_encoder, &lane, &link_bw);
} else {
- /* [e]DP over FDI requires target mode clock
- instead of link clock */
- if (is_dp)
- target_clock = mode->clock;
- else
- target_clock = adjusted_mode->clock;
-
/* FDI is a binary signal running at ~2.7GHz, encoding
* each output octet as 10 bits. The actual frequency
* is stored as a divider into a 100MHz clock, and the
@@ -4451,6 +4671,14 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
}
+ /* [e]DP over FDI requires target mode clock instead of link clock. */
+ if (edp_encoder)
+ target_clock = intel_edp_target_clock(edp_encoder, mode);
+ else if (is_dp)
+ target_clock = mode->clock;
+ else
+ target_clock = adjusted_mode->clock;
+
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
@@ -4662,16 +4890,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
- if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG_KMS("enabling CxSR downclocking\n");
- pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
- }
} else {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
- if (HAS_PIPE_CXSR(dev)) {
- DRM_DEBUG_KMS("disabling CxSR downclocking\n");
- pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
- }
}
}
@@ -5975,7 +6195,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long offset;
u32 flip_mask;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
@@ -5984,9 +6203,6 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
if (ret)
goto err;
- /* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
-
ret = intel_ring_begin(ring, 6);
if (ret)
goto err_unpin;
@@ -6003,7 +6219,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_ring_advance(ring);
return 0;
@@ -6021,7 +6237,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- unsigned long offset;
u32 flip_mask;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
@@ -6030,9 +6245,6 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
if (ret)
goto err;
- /* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
-
ret = intel_ring_begin(ring, 6);
if (ret)
goto err_unpin;
@@ -6046,7 +6258,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + offset);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@@ -6084,7 +6296,9 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
+ intel_ring_emit(ring,
+ (obj->gtt_offset + intel_crtc->dspaddr_offset) |
+ obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -6124,7 +6338,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(ring, obj->gtt_offset);
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -6187,7 +6401,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(ring, (obj->gtt_offset));
+ intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
return 0;
@@ -6219,6 +6433,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
unsigned long flags;
int ret;
+ /* Can't change pixel format via MI display flips. */
+ if (fb->pixel_format != crtc->fb->pixel_format)
+ return -EINVAL;
+
+ /*
+ * TILEOFF/LINOFF registers can't be changed via MI display flips.
+ * Note that pitch changes could also affect these register.
+ */
+ if (INTEL_INFO(dev)->gen > 3 &&
+ (fb->offsets[0] != crtc->fb->offsets[0] ||
+ fb->pitches[0] != crtc->fb->pitches[0]))
+ return -EINVAL;
+
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
@@ -6249,7 +6476,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto cleanup;
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6284,6 +6513,7 @@ cleanup_pending:
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
+cleanup:
spin_lock_irqsave(&dev->event_lock, flags);
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -6566,7 +6796,24 @@ static void intel_setup_outputs(struct drm_device *dev)
if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D);
+ } else if (IS_VALLEYVIEW(dev)) {
+ int found;
+ if (I915_READ(SDVOB) & PORT_DETECTED) {
+ /* SDVOB multiplex with HDMIB */
+ found = intel_sdvo_init(dev, SDVOB, true);
+ if (!found)
+ intel_hdmi_init(dev, SDVOB);
+ if (!found && (I915_READ(DP_B) & DP_DETECTED))
+ intel_dp_init(dev, DP_B);
+ }
+
+ if (I915_READ(SDVOC) & PORT_DETECTED)
+ intel_hdmi_init(dev, SDVOC);
+
+ /* Shares lanes with HDMI on SDVOC */
+ if (I915_READ(DP_C) & DP_DETECTED)
+ intel_dp_init(dev, DP_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@@ -6623,7 +6870,7 @@ static void intel_setup_outputs(struct drm_device *dev)
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ironlake_init_pch_refclk(dev);
}
@@ -6777,9 +7024,6 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.write_eld = ironlake_write_eld;
} else
dev_priv->display.update_wm = NULL;
- } else if (IS_VALLEYVIEW(dev)) {
- dev_priv->display.force_wake_get = vlv_force_wake_get;
- dev_priv->display.force_wake_put = vlv_force_wake_put;
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
}
@@ -6923,20 +7167,18 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ /* We attempt to init the necessary power wells early in the initialization
+ * time, so the subsystems that expect power to be enabled can work.
+ */
+ intel_init_power_wells(dev);
- intel_init_clock_gating(dev);
+ intel_prepare_ddi(dev);
- if (IS_IRONLAKE_M(dev)) {
- ironlake_enable_drps(dev);
- ironlake_enable_rc6(dev);
- intel_init_emon(dev);
- }
+ intel_init_clock_gating(dev);
- if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
- gen6_enable_rps(dev_priv);
- gen6_update_ring_freq(dev_priv);
- }
+ mutex_lock(&dev->struct_mutex);
+ intel_enable_gt_powersave(dev);
+ mutex_unlock(&dev->struct_mutex);
}
void intel_modeset_init(struct drm_device *dev)
@@ -6958,8 +7200,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_pm(dev);
- intel_prepare_ddi(dev);
-
intel_init_display(dev);
if (IS_GEN2(dev)) {
@@ -6972,7 +7212,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
- dev->mode_config.fb_base = dev->agp->base;
+ dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
@@ -7025,13 +7265,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_disable_fbc(dev);
- if (IS_IRONLAKE_M(dev))
- ironlake_disable_drps(dev);
- if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
- gen6_disable_rps(dev);
+ intel_disable_gt_powersave(dev);
- if (IS_IRONLAKE_M(dev))
- ironlake_disable_rc6(dev);
+ ironlake_teardown_rc6(dev);
if (IS_VALLEYVIEW(dev))
vlv_init_dpio(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index c0449324143c..a6c426afaa7a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -155,6 +155,18 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
*link_bw = 270000;
}
+int
+intel_edp_target_clock(struct intel_encoder *intel_encoder,
+ struct drm_display_mode *mode)
+{
+ struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
+
+ if (intel_dp->panel_fixed_mode)
+ return intel_dp->panel_fixed_mode->clock;
+ else
+ return mode->clock;
+}
+
static int
intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
@@ -225,7 +237,7 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
static bool
intel_dp_adjust_dithering(struct intel_dp *intel_dp,
struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ bool adjust_mode)
{
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
@@ -239,8 +251,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
if (mode_rate > max_rate)
return false;
- if (adjusted_mode)
- adjusted_mode->private_flags
+ if (adjust_mode)
+ mode->private_flags
|= INTEL_MODE_DP_FORCE_6BPC;
return true;
@@ -263,7 +275,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- if (!intel_dp_adjust_dithering(intel_dp, mode, NULL))
+ if (!intel_dp_adjust_dithering(intel_dp, mode, false))
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
@@ -691,7 +703,8 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
}
static bool
-intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+intel_dp_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
@@ -706,28 +719,23 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
- /*
- * the mode->clock is used to calculate the Data&Link M/N
- * of the pipe. For the eDP the fixed clock should be used.
- */
- mode->clock = intel_dp->panel_fixed_mode->clock;
}
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
- max_lane_count, bws[max_clock], mode->clock);
+ max_lane_count, bws[max_clock], adjusted_mode->clock);
- if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
+ if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
- mode_rate = intel_dp_link_required(mode->clock, bpp);
+ mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
- for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
- for (clock = 0; clock <= max_clock; clock++) {
+ for (clock = 0; clock <= max_clock; clock++) {
+ for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
if (mode_rate <= link_avail) {
@@ -786,8 +794,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
+ struct intel_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
@@ -797,13 +804,9 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
/*
* Find the lane count in the intel_encoder private
*/
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- if (encoder->crtc != crtc)
- continue;
-
- intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
intel_dp->base.type == INTEL_OUTPUT_EDP)
{
@@ -1171,10 +1174,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(dev_priv);
- pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ /* We need to switch off panel power _and_ force vdd, for otherwise some
+ * panels get very unhappy and cease to work. */
+ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
+ intel_dp->want_panel_vdd = false;
+
ironlake_wait_panel_off(intel_dp);
}
@@ -1284,11 +1291,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
- ironlake_edp_panel_off(intel_dp);
-
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ ironlake_edp_panel_off(intel_dp);
intel_dp_link_down(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, false);
}
static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1323,11 +1328,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
/* Switching the panel off requires vdd. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
- ironlake_edp_panel_off(intel_dp);
-
intel_dp_sink_dpms(intel_dp, mode);
+ ironlake_edp_panel_off(intel_dp);
intel_dp_link_down(intel_dp);
- ironlake_edp_panel_vdd_off(intel_dp, false);
if (is_cpu_edp(intel_dp))
ironlake_edp_pll_off(encoder);
@@ -1768,7 +1771,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
- if (i == intel_dp->lane_count) {
+ if (i == intel_dp->lane_count && voltage_tries == 5) {
++loop_tries;
if (loop_tries == 5) {
DRM_DEBUG_KMS("too many full retries, give up\n");
@@ -1922,7 +1925,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
DP |= DP_LINK_TRAIN_OFF;
}
- if (!HAS_PCH_CPT(dev) &&
+ if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
@@ -2099,25 +2102,23 @@ g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t temp, bit;
+ uint32_t bit;
switch (intel_dp->output_reg) {
case DP_B:
- bit = DPB_HOTPLUG_INT_STATUS;
+ bit = DPB_HOTPLUG_LIVE_STATUS;
break;
case DP_C:
- bit = DPC_HOTPLUG_INT_STATUS;
+ bit = DPC_HOTPLUG_LIVE_STATUS;
break;
case DP_D:
- bit = DPD_HOTPLUG_INT_STATUS;
+ bit = DPD_HOTPLUG_LIVE_STATUS;
break;
default:
return connector_status_unknown;
}
- temp = I915_READ(PORT_HOTPLUG_STAT);
-
- if ((temp & bit) == 0)
+ if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
return connector_status_disconnected;
return intel_dp_detect_dpcd(intel_dp);
@@ -2399,16 +2400,11 @@ int
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_encoder *encoder;
-
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct intel_dp *intel_dp;
+ struct intel_encoder *encoder;
- if (encoder->crtc != crtc)
- continue;
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
intel_dp->base.type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
@@ -2520,19 +2516,19 @@ intel_dp_init(struct drm_device *dev, int output_reg)
case DP_B:
case PCH_DP_B:
dev_priv->hotplug_supported_mask |=
- HDMIB_HOTPLUG_INT_STATUS;
+ DPB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case DP_C:
case PCH_DP_C:
dev_priv->hotplug_supported_mask |=
- HDMIC_HOTPLUG_INT_STATUS;
+ DPC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case DP_D:
case PCH_DP_D:
dev_priv->hotplug_supported_mask |=
- HDMID_HOTPLUG_INT_STATUS;
+ DPD_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3e0918834e7e..cd54cf88a28f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -46,15 +46,16 @@
})
#define wait_for_atomic_us(COND, US) ({ \
- int i, ret__ = -ETIMEDOUT; \
- for (i = 0; i < (US); i++) { \
- if ((COND)) { \
- ret__ = 0; \
- break; \
- } \
- udelay(1); \
- } \
- ret__; \
+ unsigned long timeout__ = jiffies + usecs_to_jiffies(US); \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ cpu_relax(); \
+ } \
+ ret__; \
})
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
@@ -169,6 +170,7 @@ struct intel_crtc {
u8 lut_r[256], lut_g[256], lut_b[256];
int dpms_mode;
bool active; /* is the crtc on? independent of the dpms mode */
+ bool primary_disabled; /* is the crtc obscured by a plane? */
bool busy; /* is scanout buffer being updated frequently? */
struct timer_list idle_timer;
bool lowfreq_avail;
@@ -176,6 +178,11 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work;
int fdi_lanes;
+ /* Display surface base address adjustement for pageflips. Note that on
+ * gen4+ this only adjusts up to a tile, offsets within a tile are
+ * handled in the hw itself (with the TILEOFF register). */
+ unsigned long dspaddr_offset;
+
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
@@ -191,7 +198,6 @@ struct intel_plane {
struct drm_plane base;
enum pipe pipe;
struct drm_i915_gem_object *obj;
- bool primary_disabled;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane,
@@ -301,6 +307,8 @@ struct intel_hdmi {
enum hdmi_force_audio force_audio;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
+ void (*set_infoframes)(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode);
};
static inline struct drm_crtc *
@@ -334,8 +342,9 @@ struct intel_fbc_work {
int interval;
};
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
@@ -343,9 +352,6 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-extern void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
- struct drm_display_mode *adjusted_mode);
-extern void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
@@ -360,6 +366,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
+extern int intel_edp_target_clock(struct intel_encoder *,
+ struct drm_display_mode *mode);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
@@ -372,13 +380,13 @@ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
-extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern int intel_panel_setup_backlight(struct drm_device *dev);
-extern void intel_panel_enable_backlight(struct drm_device *dev);
+extern void intel_panel_enable_backlight(struct drm_device *dev,
+ enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev);
extern void intel_panel_destroy_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
@@ -423,9 +431,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
-extern void ironlake_disable_rc6(struct drm_device *dev);
-extern void ironlake_enable_drps(struct drm_device *dev);
-extern void ironlake_disable_drps(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -492,10 +497,11 @@ extern void intel_update_fbc(struct drm_device *dev);
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
-extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
-extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
-extern void gen6_disable_rps(struct drm_device *dev);
-extern void intel_init_emon(struct drm_device *dev);
+extern void intel_init_power_wells(struct drm_device *dev);
+extern void intel_enable_gt_powersave(struct drm_device *dev);
+extern void intel_disable_gt_powersave(struct drm_device *dev);
+extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
+extern void ironlake_teardown_rc6(struct drm_device *dev);
extern void intel_ddi_dpms(struct drm_encoder *encoder, int mode);
extern void intel_ddi_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 60ba50b956f2..36c542e5036b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -136,7 +136,7 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
}
static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index bf8690720a0c..97f673523b97 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
struct device *device = &dev->pdev->dev;
int size, ret;
@@ -140,7 +140,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
- info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
+ info->screen_base =
+ ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
+ size);
if (!info->screen_base) {
ret = -ENOSPC;
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2ead3bf7c21d..98f602427eb8 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -37,6 +37,19 @@
#include "i915_drm.h"
#include "i915_drv.h"
+static void
+assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
+{
+ struct drm_device *dev = intel_hdmi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t enabled_bits;
+
+ enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+
+ WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
+ "HDMI port enabled, expecting disabled\n");
+}
+
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_hdmi, base.base);
@@ -121,36 +134,31 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
- val &= ~VIDEO_DIP_PORT_MASK;
- if (intel_hdmi->sdvox_reg == SDVOB)
- val |= VIDEO_DIP_PORT_B;
- else if (intel_hdmi->sdvox_reg == SDVOC)
- val |= VIDEO_DIP_PORT_C;
- else
- return;
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
- val |= VIDEO_DIP_ENABLE;
I915_WRITE(VIDEO_DIP_CTL, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
+ mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(VIDEO_DIP_CTL, val);
+ POSTING_READ(VIDEO_DIP_CTL);
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
@@ -160,46 +168,32 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
- val &= ~VIDEO_DIP_PORT_MASK;
- switch (intel_hdmi->sdvox_reg) {
- case HDMIB:
- val |= VIDEO_DIP_PORT_B;
- break;
- case HDMIC:
- val |= VIDEO_DIP_PORT_C;
- break;
- case HDMID:
- val |= VIDEO_DIP_PORT_D;
- break;
- default:
- return;
- }
-
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
- val |= VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
@@ -213,32 +207,31 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
/* The DIP control register spec says that we need to update the AVI
* infoframe without clearing its enable bit */
- if (frame->type == DIP_TYPE_AVI)
- val |= VIDEO_DIP_ENABLE_AVI;
- else
+ if (frame->type != DIP_TYPE_AVI)
val &= ~g4x_infoframe_enable(frame);
- val |= VIDEO_DIP_ENABLE;
-
I915_WRITE(reg, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
@@ -252,26 +245,28 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
+ WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
- val |= VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
+ mmiowb();
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
I915_WRITE(reg, val);
+ POSTING_READ(reg);
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
@@ -289,18 +284,19 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
if (data_reg == 0)
return;
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
val &= ~hsw_infoframe_enable(frame);
I915_WRITE(ctl_reg, val);
+ mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(data_reg + i, *data);
data++;
}
+ mmiowb();
val |= hsw_infoframe_enable(frame);
I915_WRITE(ctl_reg, val);
+ POSTING_READ(ctl_reg);
}
static void intel_set_infoframe(struct drm_encoder *encoder,
@@ -308,14 +304,11 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- if (!intel_hdmi->has_hdmi_sink)
- return;
-
intel_dip_infoframe_csum(frame);
intel_hdmi->write_infoframe(encoder, frame);
}
-void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
@@ -330,7 +323,7 @@ void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
intel_set_infoframe(encoder, &avi_if);
}
-void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe spd_if;
@@ -345,6 +338,223 @@ void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
intel_set_infoframe(encoder, &spd_if);
}
+static void g4x_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = VIDEO_DIP_CTL;
+ u32 val = I915_READ(reg);
+ u32 port;
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* If the registers were not initialized yet, they might be zeroes,
+ * which means we're selecting the AVI DIP and we're setting its
+ * frequency to once. This seems to really confuse the HW and make
+ * things stop working (the register spec says the AVI always needs to
+ * be sent every VSync). So here we avoid writing to the register more
+ * than we need and also explicitly select the AVI DIP and explicitly
+ * set its frequency to every VSync. Avoiding to write it twice seems to
+ * be enough to solve the problem, but being defensive shouldn't hurt us
+ * either. */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ switch (intel_hdmi->sdvox_reg) {
+ case SDVOB:
+ port = VIDEO_DIP_PORT_B;
+ break;
+ case SDVOC:
+ port = VIDEO_DIP_PORT_C;
+ break;
+ default:
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~VIDEO_DIP_ENABLE_VENDOR;
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void ibx_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+ u32 port;
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ switch (intel_hdmi->sdvox_reg) {
+ case HDMIB:
+ port = VIDEO_DIP_PORT_B;
+ break;
+ case HDMIC:
+ port = VIDEO_DIP_PORT_C;
+ break;
+ case HDMID:
+ port = VIDEO_DIP_PORT_D;
+ break;
+ default:
+ return;
+ }
+
+ if (port != (val & VIDEO_DIP_PORT_MASK)) {
+ if (val & VIDEO_DIP_ENABLE) {
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ }
+ val &= ~VIDEO_DIP_PORT_MASK;
+ val |= port;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void cpt_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ /* Set both together, unset both together: see the spec. */
+ val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void vlv_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ /* See the big comment in g4x_set_infoframes() */
+ val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ if (!(val & VIDEO_DIP_ENABLE))
+ return;
+ val &= ~VIDEO_DIP_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ return;
+ }
+
+ val |= VIDEO_DIP_ENABLE;
+ val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
+ VIDEO_DIP_ENABLE_GCP);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
+static void hsw_set_infoframes(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
+ u32 val = I915_READ(reg);
+
+ assert_hdmi_port_disabled(intel_hdmi);
+
+ if (!intel_hdmi->has_hdmi_sink) {
+ I915_WRITE(reg, 0);
+ POSTING_READ(reg);
+ return;
+ }
+
+ val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
+ VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
+
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+
+ intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_spd_infoframe(encoder);
+}
+
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -355,7 +565,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
- sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
+ sdvox = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev))
sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -382,14 +592,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (HAS_PCH_CPT(dev))
sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
- else if (intel_crtc->pipe == 1)
+ else if (intel_crtc->pipe == PIPE_B)
sdvox |= SDVO_PIPE_B_SELECT;
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
@@ -405,6 +614,36 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
temp = I915_READ(intel_hdmi->sdvox_reg);
+ /* HW workaround for IBX, we need to move the port to transcoder A
+ * before disabling it. */
+ if (HAS_PCH_IBX(dev)) {
+ struct drm_crtc *crtc = encoder->crtc;
+ int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ if (temp & SDVO_PIPE_B_SELECT) {
+ temp &= ~SDVO_PIPE_B_SELECT;
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Again we need to write this twice. */
+ I915_WRITE(intel_hdmi->sdvox_reg, temp);
+ POSTING_READ(intel_hdmi->sdvox_reg);
+
+ /* Transcoder selection bits only update
+ * effectively on vblank. */
+ if (crtc)
+ intel_wait_for_vblank(dev, pipe);
+ else
+ msleep(50);
+ }
+ } else {
+ /* Restore the transcoder select bit. */
+ if (pipe == PIPE_B)
+ enable_bits |= SDVO_PIPE_B_SELECT;
+ }
+ }
+
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
@@ -446,12 +685,33 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
}
static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
+static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
+{
+ struct drm_device *dev = intel_hdmi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t bit;
+
+ switch (intel_hdmi->sdvox_reg) {
+ case SDVOB:
+ bit = HDMIB_HOTPLUG_LIVE_STATUS;
+ break;
+ case SDVOC:
+ bit = HDMIC_HOTPLUG_LIVE_STATUS;
+ break;
+ default:
+ bit = 0;
+ break;
+ }
+
+ return I915_READ(PORT_HOTPLUG_STAT) & bit;
+}
+
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
@@ -460,6 +720,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
+ if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
+ return status;
+
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
edid = drm_get_edid(connector,
@@ -633,7 +896,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi *intel_hdmi;
- int i;
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
@@ -710,26 +972,19 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
- I915_WRITE(VIDEO_DIP_CTL, 0);
+ intel_hdmi->set_infoframes = g4x_set_infoframes;
} else if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(VLV_TVIDEO_DIP_CTL(i), 0);
+ intel_hdmi->set_infoframes = vlv_set_infoframes;
} else if (IS_HASWELL(dev)) {
- /* FIXME: Haswell has a new set of DIP frame registers, but we are
- * just doing the minimal required for HDMI to work at this stage.
- */
intel_hdmi->write_infoframe = hsw_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(HSW_TVIDEO_DIP_CTL(i), 0);
+ intel_hdmi->set_infoframes = hsw_set_infoframes;
} else if (HAS_PCH_IBX(dev)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(TVIDEO_DIP_CTL(i), 0);
+ intel_hdmi->set_infoframes = ibx_set_infoframes;
} else {
intel_hdmi->write_infoframe = cpt_write_infoframe;
- for_each_pipe(i)
- I915_WRITE(TVIDEO_DIP_CTL(i), 0);
+ intel_hdmi->set_infoframes = cpt_set_infoframes;
}
if (IS_HASWELL(dev))
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 1991a4408cf9..b9755f6378d8 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -486,9 +486,6 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->dev_priv = dev_priv;
bus->adapter.algo = &gmbus_algorithm;
- ret = i2c_add_adapter(&bus->adapter);
- if (ret)
- goto err;
/* By default use a conservative clock rate */
bus->reg0 = port | GMBUS_RATE_100KHZ;
@@ -498,6 +495,10 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->force_bit = true;
intel_gpio_setup(bus, port);
+
+ ret = i2c_add_adapter(&bus->adapter);
+ if (ret)
+ goto err;
}
intel_i2c_reset(dev_priv->dev);
@@ -540,9 +541,6 @@ void intel_teardown_gmbus(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- if (dev_priv->gmbus == NULL)
- return;
-
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 08eb04c787e8..e9a6f6aaed85 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -71,6 +71,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
static void intel_lvds_enable(struct intel_lvds *intel_lvds)
{
struct drm_device *dev = intel_lvds->base.base.dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_lvds->base.base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@@ -107,7 +108,7 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds)
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
- intel_panel_enable_backlight(dev);
+ intel_panel_enable_backlight(dev, intel_crtc->pipe);
}
static void intel_lvds_disable(struct intel_lvds *intel_lvds)
@@ -228,14 +229,14 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
}
static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- struct drm_encoder *tmp_encoder;
+ struct intel_encoder *tmp_encoder;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
@@ -246,8 +247,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
/* Should never happen!! */
- list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
- if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
+ for_each_encoder_on_crtc(dev, encoder->crtc, tmp_encoder) {
+ if (&tmp_encoder->base != encoder) {
DRM_ERROR("Can't enable LVDS and another "
"encoder on the same pipe\n");
return false;
@@ -408,13 +409,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
{
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
- /*
- * Prior to Ironlake, we must disable the pipe if we want to adjust
- * the panel fitter. However at all other times we can just reset
- * the registers regardless.
- */
- if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
- intel_lvds_disable(intel_lvds);
+ intel_lvds_disable(intel_lvds);
}
static void intel_lvds_commit(struct drm_encoder *encoder)
@@ -777,6 +772,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "ZOTAC ZBOXSD-ID12/ID13",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
+ DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Gigabyte GA-D525TUD",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
+ },
+ },
{ } /* terminating entry */
};
@@ -967,6 +978,8 @@ bool intel_lvds_init(struct drm_device *dev)
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ else if (IS_GEN4(dev))
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
else
intel_encoder->crtc_mask = (1 << 1);
@@ -1074,35 +1087,14 @@ bool intel_lvds_init(struct drm_device *dev)
goto failed;
out:
+ /*
+ * Unlock registers and just
+ * leave them unlocked
+ */
if (HAS_PCH_SPLIT(dev)) {
- u32 pwm;
-
- pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
-
- /* make sure PWM is enabled and locked to the LVDS pipe */
- pwm = I915_READ(BLC_PWM_CPU_CTL2);
- if (pipe == 0 && (pwm & PWM_PIPE_B))
- I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
- if (pipe)
- pwm |= PWM_PIPE_B;
- else
- pwm &= ~PWM_PIPE_B;
- I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
-
- pwm = I915_READ(BLC_PWM_PCH_CTL1);
- pwm |= PWM_PCH_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
- /*
- * Unlock registers and just
- * leave them unlocked
- */
I915_WRITE(PCH_PP_CONTROL,
I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
} else {
- /*
- * Unlock registers and just
- * leave them unlocked
- */
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index d67ec3a51e42..29b72593fbb2 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -33,31 +33,22 @@
#include "i915_drv.h"
/**
- * intel_ddc_probe
- *
+ * intel_connector_update_modes - update connector from edid
+ * @connector: DRM connector device to use
+ * @edid: previously read EDID information
*/
-bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid)
{
- struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
- u8 out_buf[] = { 0x0, 0x0};
- u8 buf[2];
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = out_buf,
- },
- {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- return i2c_transfer(intel_gmbus_get_adapter(dev_priv, ddc_bus),
- msgs, 2) == 2;
+ int ret;
+
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+
+ return ret;
}
/**
@@ -71,18 +62,12 @@ int intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
- int ret = 0;
edid = drm_get_edid(connector, adapter);
- if (edid) {
- drm_mode_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- drm_edid_to_eld(connector, edid);
- connector->display_info.raw_edid = NULL;
- kfree(edid);
- }
+ if (!edid)
+ return 0;
- return ret;
+ return intel_connector_update_modes(connector, edid);
}
static const struct drm_prop_enum_list force_audio_names[] = {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 458743da3774..830d0dd610e1 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -226,7 +226,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_wait_request(ring, overlay->last_flip_req);
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
@@ -452,7 +452,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(ring, overlay->last_flip_req);
+ ret = i915_wait_seqno(ring, overlay->last_flip_req);
if (ret)
return ret;
i915_gem_retire_requests(dev);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 2a1625d84a69..3df4f5fa892a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -56,7 +56,7 @@ intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
void
intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -213,7 +213,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
return val;
}
-u32 intel_panel_get_backlight(struct drm_device *dev)
+static u32 intel_panel_get_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
@@ -287,15 +287,69 @@ void intel_panel_disable_backlight(struct drm_device *dev)
dev_priv->backlight_enabled = false;
intel_panel_actually_set_backlight(dev, 0);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ uint32_t reg, tmp;
+
+ reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+
+ I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp &= ~BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ }
+ }
}
-void intel_panel_enable_backlight(struct drm_device *dev)
+void intel_panel_enable_backlight(struct drm_device *dev,
+ enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ uint32_t reg, tmp;
+
+ reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
+
+
+ tmp = I915_READ(reg);
+
+ /* Note that this can also get called through dpms changes. And
+ * we don't track the backlight dpms state, hence check whether
+ * we have to do anything first. */
+ if (tmp & BLM_PWM_ENABLE)
+ goto set_level;
+
+ if (dev_priv->num_pipe == 3)
+ tmp &= ~BLM_PIPE_SELECT_IVB;
+ else
+ tmp &= ~BLM_PIPE_SELECT;
+
+ tmp |= BLM_PIPE(pipe);
+ tmp &= ~BLM_PWM_ENABLE;
+
+ I915_WRITE(reg, tmp);
+ POSTING_READ(reg);
+ I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp |= BLM_PCH_PWM_ENABLE;
+ tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ }
+ }
+
+set_level:
+ /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
+ * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
+ * registers are set.
+ */
dev_priv->backlight_enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d0ce2a5b1d3f..1881c8c83f0e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -387,8 +387,6 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_i915_gem_object *obj;
int enable_fbc;
- DRM_DEBUG_KMS("\n");
-
if (!i915_powersave)
return;
@@ -405,7 +403,9 @@ void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled && tmp_crtc->fb) {
+ if (tmp_crtc->enabled &&
+ !to_intel_crtc(tmp_crtc)->primary_disabled &&
+ tmp_crtc->fb) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -2182,7 +2182,7 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
return true;
}
-void ironlake_enable_drps(struct drm_device *dev)
+static void ironlake_enable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
@@ -2246,7 +2246,7 @@ void ironlake_enable_drps(struct drm_device *dev)
getrawmonotonic(&dev_priv->last_time2);
}
-void ironlake_disable_drps(struct drm_device *dev)
+static void ironlake_disable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl = I915_READ16(MEMSWCTL);
@@ -2299,10 +2299,11 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
dev_priv->cur_delay = val;
}
-void gen6_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, 0);
@@ -2332,9 +2333,11 @@ int intel_enable_rc6(const struct drm_device *dev)
if (INTEL_INFO(dev)->gen == 5)
return 0;
- /* Sorry Haswell, no RC6 for you for now. */
+ /* On Haswell, only RC6 is available. So let's enable it by default to
+ * provide better testing and coverage since the beginning.
+ */
if (IS_HASWELL(dev))
- return 0;
+ return INTEL_RC6_ENABLE;
/*
* Disable rc6 on Sandybridge
@@ -2347,8 +2350,9 @@ int intel_enable_rc6(const struct drm_device *dev)
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
-void gen6_enable_rps(struct drm_i915_private *dev_priv)
+static void gen6_enable_rps(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
u32 rp_state_cap;
u32 gt_perf_status;
@@ -2357,6 +2361,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
int rc6_mode;
int i;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
*
@@ -2364,7 +2370,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
* userspace...
*/
I915_WRITE(GEN6_RC_STATE, 0);
- mutex_lock(&dev_priv->dev->struct_mutex);
/* Clear the DBG now so we don't confuse earlier errors */
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
@@ -2400,20 +2405,24 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+ /* Check if we are enabling RC6 */
rc6_mode = intel_enable_rc6(dev_priv->dev);
if (rc6_mode & INTEL_RC6_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
- if (rc6_mode & INTEL_RC6p_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
+ /* We don't use those on Haswell */
+ if (!IS_HASWELL(dev)) {
+ if (rc6_mode & INTEL_RC6p_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
- if (rc6_mode & INTEL_RC6pp_ENABLE)
- rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ if (rc6_mode & INTEL_RC6pp_ENABLE)
+ rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
+ }
DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
- (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
+ (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
@@ -2431,10 +2440,12 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
dev_priv->max_delay << 24 |
dev_priv->min_delay << 16);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
- I915_WRITE(GEN6_RP_UP_EI, 100000);
- I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
+
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
@@ -2442,7 +2453,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
- GEN6_RP_DOWN_IDLE_CONT);
+ (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
@@ -2473,14 +2484,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
/* requires MSI enabled */
- I915_WRITE(GEN6_PMIER,
- GEN6_PM_MBOX_EVENT |
- GEN6_PM_THERMAL_EVENT |
- GEN6_PM_RP_DOWN_TIMEOUT |
- GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_UP_EI_EXPIRED |
- GEN6_PM_RP_DOWN_EI_EXPIRED);
+ I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
spin_lock_irq(&dev_priv->rps_lock);
WARN_ON(dev_priv->pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
@@ -2489,15 +2493,17 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK, 0);
gen6_gt_force_wake_put(dev_priv);
- mutex_unlock(&dev_priv->dev->struct_mutex);
}
-void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+static void gen6_update_ring_freq(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
int gpu_freq, ia_freq, max_ia_freq;
int scaling_factor = 180;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
max_ia_freq = cpufreq_quick_get_max(0);
/*
* Default to measured freq if none found, PCU will ensure we don't go
@@ -2509,8 +2515,6 @@ void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
/* Convert from kHz to MHz */
max_ia_freq /= 1000;
- mutex_lock(&dev_priv->dev->struct_mutex);
-
/*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
@@ -2541,11 +2545,9 @@ void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
continue;
}
}
-
- mutex_unlock(&dev_priv->dev->struct_mutex);
}
-static void ironlake_teardown_rc6(struct drm_device *dev)
+void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2562,7 +2564,7 @@ static void ironlake_teardown_rc6(struct drm_device *dev)
}
}
-void ironlake_disable_rc6(struct drm_device *dev)
+static void ironlake_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2578,8 +2580,6 @@ void ironlake_disable_rc6(struct drm_device *dev)
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
POSTING_READ(RSTDBYCTL);
}
-
- ironlake_teardown_rc6(dev);
}
static int ironlake_setup_rc6(struct drm_device *dev)
@@ -2601,7 +2601,7 @@ static int ironlake_setup_rc6(struct drm_device *dev)
return 0;
}
-void ironlake_enable_rc6(struct drm_device *dev)
+static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
@@ -2613,12 +2613,11 @@ void ironlake_enable_rc6(struct drm_device *dev)
if (!intel_enable_rc6(dev))
return;
- mutex_lock(&dev->struct_mutex);
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
ret = ironlake_setup_rc6(dev);
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
+ if (ret)
return;
- }
/*
* GPU can automatically power down the render unit if given a page
@@ -2627,7 +2626,6 @@ void ironlake_enable_rc6(struct drm_device *dev)
ret = intel_ring_begin(ring, 6);
if (ret) {
ironlake_teardown_rc6(dev);
- mutex_unlock(&dev->struct_mutex);
return;
}
@@ -2652,13 +2650,11 @@ void ironlake_enable_rc6(struct drm_device *dev)
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
- mutex_unlock(&dev->struct_mutex);
return;
}
I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
- mutex_unlock(&dev->struct_mutex);
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -3154,8 +3150,7 @@ void intel_gpu_ips_teardown(void)
i915_mch_dev = NULL;
spin_unlock(&mchdev_lock);
}
-
-void intel_init_emon(struct drm_device *dev)
+static void intel_init_emon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lcfuse;
@@ -3226,6 +3221,28 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
+void intel_disable_gt_powersave(struct drm_device *dev)
+{
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_disable_drps(dev);
+ ironlake_disable_rc6(dev);
+ } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
+ gen6_disable_rps(dev);
+ }
+}
+
+void intel_enable_gt_powersave(struct drm_device *dev)
+{
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_enable_drps(dev);
+ ironlake_enable_rc6(dev);
+ intel_init_emon(dev);
+ } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ gen6_enable_rps(dev);
+ gen6_update_ring_freq(dev);
+ }
+}
+
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3328,8 +3345,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
*
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
+ *
+ * Also apply WaDisableVDSUnitClockGating and
+ * WaDisableRCPBUnitClockGating.
*/
I915_WRITE(GEN6_UCGCTL2,
+ GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
@@ -3357,6 +3378,9 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
@@ -3377,7 +3401,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
-static void ivybridge_init_clock_gating(struct drm_device *dev)
+static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
@@ -3427,13 +3451,24 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
/* WaDisable4x2SubspanOptimization */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /* XXX: This is a workaround for early silicon revisions and should be
+ * removed later.
+ */
+ I915_WRITE(WM_DBG,
+ I915_READ(WM_DBG) |
+ WM_DBG_DISALLOW_MULTIPLE_LP |
+ WM_DBG_DISALLOW_SPRITE |
+ WM_DBG_DISALLOW_MAXFIFO);
+
}
-static void valleyview_init_clock_gating(struct drm_device *dev)
+static void ivybridge_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+ uint32_t snpcr;
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
@@ -3441,10 +3476,77 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating workaround.
*/
- I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
+ for_each_pipe(pipe) {
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ intel_flush_display_plane(dev_priv, pipe);
+ }
+
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+ gen7_setup_fixed_func_scheduler(dev_priv);
+
+ /* WaDisable4x2SubspanOptimization */
+ I915_WRITE(CACHE_MODE_1,
+ _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= GEN6_MBC_SNPCR_MED;
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+}
+
+static void valleyview_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+ uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
+
+ I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
+
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
@@ -3465,6 +3567,35 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
+ GEN6_MBCTL_ENABLE_BOOT_FETCH);
+
+
+ /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
+ * gating disable must be set. Failure to set it results in
+ * flickering pixels due to Z write ordering failures after
+ * some amount of runtime in the Mesa "fire" demo, and Unigine
+ * Sanctuary and Tropics, and apparently anything else with
+ * alpha test or pixel discard.
+ *
+ * According to the spec, bit 11 (RCCUNIT) must also be set,
+ * but we didn't debug actual testcases to find it out.
+ *
+ * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ *
+ * Also apply WaDisableVDSUnitClockGating and
+ * WaDisableRCPBUnitClockGating.
+ */
+ I915_WRITE(GEN6_UCGCTL2,
+ GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
+ GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
+
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
@@ -3474,6 +3605,19 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
+
+ /*
+ * On ValleyView, the GUnit needs to signal the GT
+ * when flip and other events complete. So enable
+ * all the GUnit->GT interrupts here
+ */
+ I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN |
+ PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN |
+ SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN |
+ PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN |
+ PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
+ SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
+ PLANEA_FLIPDONE_INT_EN);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@@ -3681,34 +3825,6 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
-
- /* IVB configs may use multi-threaded forcewake */
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- u32 ecobus;
-
- /* A small trick here - if the bios hasn't configured MT forcewake,
- * and if the device is in RC6, then force_wake_mt_get will not wake
- * the device and the ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT forcewake being
- * disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- DRM_DEBUG_KMS("Using MT version of forcewake\n");
- dev_priv->display.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->display.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- }
- }
-
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
@@ -3756,7 +3872,7 @@ void intel_init_pm(struct drm_device *dev)
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
- dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ dev_priv->display.init_clock_gating = haswell_init_clock_gating;
dev_priv->display.sanitize_pm = gen6_sanitize_pm;
} else
dev_priv->display.update_wm = NULL;
@@ -3764,8 +3880,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.init_clock_gating =
valleyview_init_clock_gating;
- dev_priv->display.force_wake_get = vlv_force_wake_get;
- dev_priv->display.force_wake_put = vlv_force_wake_put;
} else if (IS_PINEVIEW(dev)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
dev_priv->is_ddr3,
@@ -3811,10 +3925,198 @@ void intel_init_pm(struct drm_device *dev)
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
+}
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+ u32 gt_thread_status_mask;
+
+ if (IS_HASWELL(dev_priv->dev))
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
+ else
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
- /* We attempt to init the necessary power wells early in the initialization
- * time, so the subsystems that expect power to be enabled can work.
+ /* w/a for a sporadic read returning 0 by waiting for the GT
+ * thread to wake up.
*/
- intel_init_power_wells(dev);
+ if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+ DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ u32 forcewake_ack;
+
+ if (IS_HASWELL(dev_priv->dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_ACK;
+
+ if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
+ DRM_ERROR("Force wake wait timed out\n");
+
+ I915_WRITE_NOTRACE(FORCEWAKE, 1);
+ POSTING_READ(FORCEWAKE);
+
+ if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
+ DRM_ERROR("Force wake wait timed out\n");
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ u32 forcewake_ack;
+
+ if (IS_HASWELL(dev_priv->dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_MT_ACK;
+
+ if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1) == 0, 500))
+ DRM_ERROR("Force wake wait timed out\n");
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
+ POSTING_READ(FORCEWAKE_MT);
+
+ if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500))
+ DRM_ERROR("Force wake wait timed out\n");
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (dev_priv->forcewake_count++ == 0)
+ dev_priv->gt.force_wake_get(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+}
+
+void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+ u32 gtfifodbg;
+ gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
+ if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
+ "MMIO read or write has been dropped %x\n", gtfifodbg))
+ I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+}
+
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ POSTING_READ(FORCEWAKE);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
+ POSTING_READ(FORCEWAKE_MT);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (--dev_priv->forcewake_count == 0)
+ dev_priv->gt.force_wake_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+}
+
+int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+ udelay(10);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ }
+ if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+ ++ret;
+ dev_priv->gt_fifo_count = fifo;
+ }
+ dev_priv->gt_fifo_count--;
+
+ return ret;
+}
+
+static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ /* Already awake? */
+ if ((I915_READ(0x130094) & 0xa1) == 0xa1)
+ return;
+
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffffffff);
+ POSTING_READ(FORCEWAKE_VLV);
+
+ if (wait_for_atomic_us((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 500))
+ DRM_ERROR("Force wake wait timed out\n");
+
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_VLV, 0xffff0000);
+ /* FIXME: confirm VLV behavior with Punit folks */
+ POSTING_READ(FORCEWAKE_VLV);
+}
+
+void intel_gt_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ spin_lock_init(&dev_priv->gt_lock);
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->gt.force_wake_get = vlv_force_wake_get;
+ dev_priv->gt.force_wake_put = vlv_force_wake_put;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
+ u32 ecobus;
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->gt.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->gt.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+ }
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e5b84ff89ca5..e2a73b38abe9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -219,30 +219,44 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
- intel_emit_post_sync_nonzero_flush(ring);
+ ret = intel_emit_post_sync_nonzero_flush(ring);
+ if (ret)
+ return ret;
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
- flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
- flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
- flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
- flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ if (flush_domains) {
+ flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+ /*
+ * Ensure that any following seqno writes only happen
+ * when the render cache is indeed flushed.
+ */
+ flags |= PIPE_CONTROL_CS_STALL;
+ }
+ if (invalidate_domains) {
+ flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ /*
+ * TLB invalidate requires a post-sync write.
+ */
+ flags |= PIPE_CONTROL_QW_WRITE;
+ }
- ret = intel_ring_begin(ring, 6);
+ ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0); /* lower dword */
- intel_ring_emit(ring, 0); /* uppwer dword */
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, 0);
intel_ring_advance(ring);
return 0;
@@ -280,8 +294,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
- /* Initialize the ring. */
- I915_WRITE_START(ring, obj->gtt_offset);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
@@ -307,6 +319,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
}
}
+ /* Initialize the ring. This must happen _after_ we've cleared the ring
+ * registers with the above sequence (the readback of the HEAD registers
+ * also enforces ordering), otherwise the hw might lose the new ring
+ * register values. */
+ I915_WRITE_START(ring, obj->gtt_offset);
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
@@ -433,11 +450,21 @@ static int init_render_ring(struct intel_ring_buffer *ring)
*/
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
+
+ /* This is not explicitly set for GEN6, so read the register.
+ * see intel_ring_mi_set_context() for why we care.
+ * TODO: consider explicitly setting the bit for GEN5
+ */
+ ring->itlb_before_ctx_switch =
+ !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
}
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
+ if (IS_IVYBRIDGE(dev))
+ I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+
return ret;
}
@@ -825,7 +852,11 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
- I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
+ GEN6_RENDER_L3_PARITY_ERROR));
+ else
+ I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
@@ -844,7 +875,10 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
- I915_WRITE_IMR(ring, ~0);
+ if (IS_IVYBRIDGE(dev) && ring->id == RCS)
+ I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
+ else
+ I915_WRITE_IMR(ring, ~0);
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
@@ -946,6 +980,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) {
+ ret = -ENOMEM;
goto err_unpin;
}
ring->status_page.obj = obj;
@@ -969,6 +1004,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ring->dev = dev;
@@ -1002,8 +1038,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto err_unpin;
- ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
- ring->size);
+ ring->virtual_start =
+ ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
+ ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
ret = -EINVAL;
@@ -1089,20 +1126,9 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- bool was_interruptible;
int ret;
- /* XXX As we have not yet audited all the paths to check that
- * they are ready for ERESTARTSYS from intel_ring_begin, do not
- * allow us to be interruptible by a signal.
- */
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- ret = i915_wait_request(ring, seqno);
-
- dev_priv->mm.interruptible = was_interruptible;
+ ret = i915_wait_seqno(ring, seqno);
if (!ret)
i915_gem_retire_requests_ring(ring);
@@ -1200,8 +1226,10 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
}
msleep(1);
- if (atomic_read(&dev_priv->mm.wedged))
- return -EAGAIN;
+
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end(ring);
return -EBUSY;
@@ -1210,12 +1238,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords;
int ret;
- if (unlikely(atomic_read(&dev_priv->mm.wedged)))
- return -EIO;
+ ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
@@ -1250,20 +1279,31 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private;
/* Every tail move must follow the sequence below */
+
+ /* Disable notification that the ring is IDLE. The GT
+ * will then assume that it is busy and bring it out of rc6.
+ */
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
- I915_WRITE(GEN6_BSD_RNCID, 0x0);
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+ /* Clear the context id. Here be magic! */
+ I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+ /* Wait for the ring not to be idle, i.e. for it to wake up. */
if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
- GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
- 50))
- DRM_ERROR("timed out waiting for IDLE Indicator\n");
+ GEN6_BSD_SLEEP_INDICATOR) == 0,
+ 50))
+ DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+ /* Now that the ring is fully powered up, update the tail */
I915_WRITE_TAIL(ring, value);
+ POSTING_READ(RING_TAIL(ring->mmio_base));
+
+ /* Let the ring send IDLE messages to the GT again,
+ * and so let it sleep to conserve power when idle.
+ */
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
- GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
static int gen6_ring_flush(struct intel_ring_buffer *ring,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 55d3da26bae7..1d3c81fdad92 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -113,9 +113,17 @@ struct intel_ring_buffer {
* Do we have some not yet emitted requests outstanding?
*/
u32 outstanding_lazy_request;
+ bool gpu_caches_dirty;
wait_queue_head_t irq_queue;
+ /**
+ * Do an explicit TLB flush before MI_SET_CONTEXT
+ */
+ bool itlb_before_ctx_switch;
+ struct i915_hw_context *default_context;
+ struct drm_i915_gem_object *last_context_obj;
+
void *private;
};
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index b6a9d45fc3c6..d81bb0bf2885 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -140,9 +140,6 @@ struct intel_sdvo {
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
-
- /* Input timings for adjusted_mode */
- struct intel_sdvo_dtd input_dtd;
};
struct intel_sdvo_connector {
@@ -447,13 +444,16 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
struct i2c_msg *msgs;
int i, ret = true;
+ /* Would be simpler to allocate both in one go ? */
buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
if (!buf)
return false;
msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
- if (!msgs)
+ if (!msgs) {
+ kfree(buf);
return false;
+ }
intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
@@ -938,7 +938,7 @@ static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
static bool
intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct intel_sdvo_dtd output_dtd;
@@ -953,11 +953,15 @@ intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
return true;
}
+/* Asks the sdvo controller for the preferred input mode given the output mode.
+ * Unfortunately we have to set up the full output mode to do that. */
static bool
-intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
+ struct intel_sdvo_dtd input_dtd;
+
/* Reset the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
return false;
@@ -969,16 +973,16 @@ intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
return false;
if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
- &intel_sdvo->input_dtd))
+ &input_dtd))
return false;
- intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
+ intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
return true;
}
static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
@@ -993,17 +997,17 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
return false;
- (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
- mode,
- adjusted_mode);
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
} else if (intel_sdvo->is_lvds) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
intel_sdvo->sdvo_lvds_fixed_mode))
return false;
- (void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
- mode,
- adjusted_mode);
+ (void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
+ mode,
+ adjusted_mode);
}
/* Make the CRTC code factor in the SDVO pixel multiplier. The
@@ -1057,7 +1061,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo->sdvo_lvds_fixed_mode);
else
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
- (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
+ if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
+ DRM_INFO("Setting output timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
@@ -1079,7 +1085,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
+ if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
+ DRM_INFO("Setting input timings on %s failed\n",
+ SDVO_NAME(intel_sdvo));
switch (pixel_multiplier) {
default:
@@ -1376,7 +1384,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
/* add 30ms delay when the output type might be TV */
if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
- mdelay(30);
+ msleep(30);
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
@@ -1684,6 +1692,7 @@ static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
edid = intel_sdvo_get_edid(connector);
if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
+ kfree(edid);
return has_audio;
}
@@ -2521,6 +2530,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
+ u32 hotplug_mask;
int i;
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
@@ -2552,10 +2562,18 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
}
}
- if (intel_sdvo->is_sdvob)
- dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
- else
- dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+ hotplug_mask = 0;
+ if (IS_G4X(dev)) {
+ hotplug_mask = intel_sdvo->is_sdvob ?
+ SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
+ } else if (IS_GEN4(dev)) {
+ hotplug_mask = intel_sdvo->is_sdvob ?
+ SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
+ } else {
+ hotplug_mask = intel_sdvo->is_sdvob ?
+ SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
+ }
+ dev_priv->hotplug_supported_mask |= hotplug_mask;
drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 2a20fb0781d7..7644f31a3778 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -56,14 +56,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
sprctl &= ~SPRITE_PIXFORMAT_MASK;
sprctl &= ~SPRITE_RGB_ORDER_RGBX;
sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+ sprctl &= ~SPRITE_TILED;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
- sprctl |= SPRITE_FORMAT_RGBX888;
+ sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
- sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+ sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
@@ -84,7 +85,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
break;
default:
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
- sprctl |= DVS_FORMAT_RGBX888;
+ sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
}
@@ -233,6 +234,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
dvscntr &= ~DVS_PIXFORMAT_MASK;
dvscntr &= ~DVS_RGB_ORDER_XBGR;
dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+ dvscntr &= ~DVS_TILED;
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
@@ -326,6 +328,12 @@ intel_enable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
+ if (!intel_crtc->primary_disabled)
+ return;
+
+ intel_crtc->primary_disabled = false;
+ intel_update_fbc(dev);
+
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
}
@@ -337,7 +345,13 @@ intel_disable_primary(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
+ if (intel_crtc->primary_disabled)
+ return;
+
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+
+ intel_crtc->primary_disabled = true;
+ intel_update_fbc(dev);
}
static int
@@ -485,18 +499,14 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
* Be sure to re-enable the primary before the sprite is no longer
* covering it fully.
*/
- if (!disable_primary && intel_plane->primary_disabled) {
+ if (!disable_primary)
intel_enable_primary(crtc);
- intel_plane->primary_disabled = false;
- }
intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
crtc_w, crtc_h, x, y, src_w, src_h);
- if (disable_primary) {
+ if (disable_primary)
intel_disable_primary(crtc);
- intel_plane->primary_disabled = true;
- }
/* Unpin old obj after new one is active to avoid ugliness */
if (old_obj) {
@@ -527,11 +537,8 @@ intel_disable_plane(struct drm_plane *plane)
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret = 0;
- if (intel_plane->primary_disabled) {
+ if (plane->crtc)
intel_enable_primary(plane->crtc);
- intel_plane->primary_disabled = false;
- }
-
intel_plane->disable_plane(plane);
if (!intel_plane->obj)
@@ -685,6 +692,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
break;
default:
+ kfree(intel_plane);
return -ENODEV;
}
@@ -699,4 +707,3 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
return ret;
}
-
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index a233a51fd7e6..befce6c49704 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -891,24 +891,21 @@ intel_tv_mode_valid(struct drm_connector *connector,
static bool
-intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+intel_tv_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_mode_config *drm_config = &dev->mode_config;
struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
- struct drm_encoder *other_encoder;
+ struct intel_encoder *other_encoder;
if (!tv_mode)
return false;
- /* FIXME: lock encoder list */
- list_for_each_entry(other_encoder, &drm_config->encoder_list, head) {
- if (other_encoder != encoder &&
- other_encoder->crtc == encoder->crtc)
+ for_each_encoder_on_crtc(dev, encoder->crtc, other_encoder)
+ if (&other_encoder->base != encoder)
return false;
- }
adjusted_mode->clock = tv_mode->clock;
return true;
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index f9a925d58819..b1bb46de3f5a 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -75,7 +75,6 @@ static struct drm_driver driver = {
.irq_postinstall = mga_driver_irq_postinstall,
.irq_uninstall = mga_driver_irq_uninstall,
.irq_handler = mga_driver_irq_handler,
- .reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
.fops = &mga_driver_fops,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 93e832d6c328..ea1024d79974 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -47,6 +47,9 @@ static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
bool primary = false;
ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d303061b251e..b69642d5d850 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -78,8 +78,8 @@ static inline void mga_wait_busy(struct mga_device *mdev)
* to just pass that straight through, so this does nothing
*/
static bool mga_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
@@ -468,10 +468,11 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
{
unsigned int vcomax, vcomin, pllreffreq;
unsigned int delta, tmpdelta;
- unsigned int testr, testn, testm, testo;
+ int testr, testn, testm, testo;
unsigned int p, m, n;
- unsigned int computed;
+ unsigned int computed, vco;
int tmp;
+ const unsigned int m_div_val[] = { 1, 2, 4, 8 };
m = n = p = 0;
vcomax = 1488000;
@@ -490,12 +491,13 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
if (delta == 0)
break;
for (testo = 5; testo < 33; testo++) {
- computed = pllreffreq * (testn + 1) /
+ vco = pllreffreq * (testn + 1) /
(testr + 1);
- if (computed < vcomin)
+ if (vco < vcomin)
continue;
- if (computed > vcomax)
+ if (vco > vcomax)
continue;
+ computed = vco / (m_div_val[testm] * (testo + 1));
if (computed > clock)
tmpdelta = computed - clock;
else
@@ -1322,8 +1324,8 @@ void mga_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
* to handle any encoder-specific limitations
*/
static bool mga_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
return true;
}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index fe5267d06ab5..1cece6a78f39 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -4,7 +4,7 @@
ccflags-y := -Iinclude/drm
nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
- nouveau_object.o nouveau_irq.o nouveau_notifier.o \
+ nouveau_gpuobj.o nouveau_irq.o nouveau_notifier.o \
nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
@@ -12,6 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
+ nouveau_abi16.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
new file mode 100644
index 000000000000..ff23d88880e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_abi16.h"
+#include "nouveau_ramht.h"
+#include "nouveau_software.h"
+
+int
+nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_getparam *getparam = data;
+
+ switch (getparam->param) {
+ case NOUVEAU_GETPARAM_CHIPSET_ID:
+ getparam->value = dev_priv->chipset;
+ break;
+ case NOUVEAU_GETPARAM_PCI_VENDOR:
+ getparam->value = dev->pci_vendor;
+ break;
+ case NOUVEAU_GETPARAM_PCI_DEVICE:
+ getparam->value = dev->pci_device;
+ break;
+ case NOUVEAU_GETPARAM_BUS_TYPE:
+ if (drm_pci_device_is_agp(dev))
+ getparam->value = 0;
+ else
+ if (!pci_is_pcie(dev->pdev))
+ getparam->value = 1;
+ else
+ getparam->value = 2;
+ break;
+ case NOUVEAU_GETPARAM_FB_SIZE:
+ getparam->value = dev_priv->fb_available_size;
+ break;
+ case NOUVEAU_GETPARAM_AGP_SIZE:
+ getparam->value = dev_priv->gart_info.aper_size;
+ break;
+ case NOUVEAU_GETPARAM_VM_VRAM_BASE:
+ getparam->value = 0; /* deprecated */
+ break;
+ case NOUVEAU_GETPARAM_PTIMER_TIME:
+ getparam->value = dev_priv->engine.timer.read(dev);
+ break;
+ case NOUVEAU_GETPARAM_HAS_BO_USAGE:
+ getparam->value = 1;
+ break;
+ case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
+ getparam->value = 1;
+ break;
+ case NOUVEAU_GETPARAM_GRAPH_UNITS:
+ /* NV40 and NV50 versions are quite different, but register
+ * address is the same. User is supposed to know the card
+ * family anyway... */
+ if (dev_priv->chipset >= 0x40) {
+ getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
+ break;
+ }
+ /* FALLTHRU */
+ default:
+ NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
+{
+ return -EINVAL;
+}
+
+int
+nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_channel_alloc *init = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ if (!dev_priv->eng[NVOBJ_ENGINE_GR])
+ return -ENODEV;
+
+ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+ return -EINVAL;
+
+ ret = nouveau_channel_alloc(dev, &chan, file_priv,
+ init->fb_ctxdma_handle,
+ init->tt_ctxdma_handle);
+ if (ret)
+ return ret;
+ init->channel = chan->id;
+
+ if (nouveau_vram_pushbuf == 0) {
+ if (chan->dma.ib_max)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
+ else
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
+ } else {
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
+ }
+
+ if (dev_priv->card_type < NV_C0) {
+ init->subchan[0].handle = 0x00000000;
+ init->subchan[0].grclass = 0x0000;
+ init->subchan[1].handle = NvSw;
+ init->subchan[1].grclass = NV_SW;
+ init->nr_subchan = 2;
+ }
+
+ /* Named memory object area */
+ ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
+ &init->notifier_handle);
+
+ if (ret == 0)
+ atomic_inc(&chan->users); /* userspace reference */
+ nouveau_channel_put(&chan);
+ return ret;
+}
+
+int
+nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
+{
+ struct drm_nouveau_channel_free *req = data;
+ struct nouveau_channel *chan;
+
+ chan = nouveau_channel_get(file_priv, req->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ list_del(&chan->list);
+ atomic_dec(&chan->users);
+ nouveau_channel_put(&chan);
+ return 0;
+}
+
+int
+nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
+{
+ struct drm_nouveau_grobj_alloc *init = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ if (init->handle == ~0)
+ return -EINVAL;
+
+ /* compatibility with userspace that assumes 506e for all chipsets */
+ if (init->class == 0x506e) {
+ init->class = nouveau_software_class(dev);
+ if (init->class == 0x906e)
+ return 0;
+ } else
+ if (init->class == 0x906e) {
+ NV_ERROR(dev, "906e not supported yet\n");
+ return -EINVAL;
+ }
+
+ chan = nouveau_channel_get(file_priv, init->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ if (nouveau_ramht_find(chan, init->handle)) {
+ ret = -EEXIST;
+ goto out;
+ }
+
+ ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
+ if (ret) {
+ NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
+ ret, init->channel, init->handle);
+ }
+
+out:
+ nouveau_channel_put(&chan);
+ return ret;
+}
+
+int
+nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_notifierobj_alloc *na = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ /* completely unnecessary for these chipsets... */
+ if (unlikely(dev_priv->card_type >= NV_C0))
+ return -EINVAL;
+
+ chan = nouveau_channel_get(file_priv, na->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
+ &na->offset);
+ nouveau_channel_put(&chan);
+ return ret;
+}
+
+int
+nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
+{
+ struct drm_nouveau_gpuobj_free *objfree = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ chan = nouveau_channel_get(file_priv, objfree->channel);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ /* Synchronize with the user channel */
+ nouveau_channel_idle(chan);
+
+ ret = nouveau_ramht_remove(chan, objfree->handle);
+ nouveau_channel_put(&chan);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
new file mode 100644
index 000000000000..e6328b008a8c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -0,0 +1,83 @@
+#ifndef __NOUVEAU_ABI16_H__
+#define __NOUVEAU_ABI16_H__
+
+#define ABI16_IOCTL_ARGS \
+ struct drm_device *dev, void *data, struct drm_file *file_priv
+int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS);
+int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
+
+struct drm_nouveau_channel_alloc {
+ uint32_t fb_ctxdma_handle;
+ uint32_t tt_ctxdma_handle;
+
+ int channel;
+ uint32_t pushbuf_domains;
+
+ /* Notifier memory */
+ uint32_t notifier_handle;
+
+ /* DRM-enforced subchannel assignments */
+ struct {
+ uint32_t handle;
+ uint32_t grclass;
+ } subchan[8];
+ uint32_t nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+ int channel;
+};
+
+struct drm_nouveau_grobj_alloc {
+ int channel;
+ uint32_t handle;
+ int class;
+};
+
+struct drm_nouveau_notifierobj_alloc {
+ uint32_t channel;
+ uint32_t handle;
+ uint32_t size;
+ uint32_t offset;
+};
+
+struct drm_nouveau_gpuobj_free {
+ int channel;
+ uint32_t handle;
+};
+
+#define NOUVEAU_GETPARAM_PCI_VENDOR 3
+#define NOUVEAU_GETPARAM_PCI_DEVICE 4
+#define NOUVEAU_GETPARAM_BUS_TYPE 5
+#define NOUVEAU_GETPARAM_FB_SIZE 8
+#define NOUVEAU_GETPARAM_AGP_SIZE 9
+#define NOUVEAU_GETPARAM_CHIPSET_ID 11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
+#define NOUVEAU_GETPARAM_PTIMER_TIME 14
+#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
+struct drm_nouveau_getparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+struct drm_nouveau_setparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
+#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
+#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index fc841e87b343..26ebffebe710 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -211,11 +211,6 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
}
-static int nouveau_dsm_init(void)
-{
- return 0;
-}
-
static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
{
/* easy option one - intel vendor ID means Integrated */
@@ -232,7 +227,6 @@ static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
static struct vga_switcheroo_handler nouveau_dsm_handler = {
.switchto = nouveau_dsm_switchto,
.power_state = nouveau_dsm_power_state,
- .init = nouveau_dsm_init,
.get_client_id = nouveau_dsm_get_client_id,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 2f11e16a81a9..a0a3fe3c016b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6091,6 +6091,18 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
}
}
+ /* fdo#50830: connector indices for VGA and DVI-I are backwards */
+ if (nv_match_device(dev, 0x0421, 0x3842, 0xc793)) {
+ if (idx == 0 && *conn == 0x02000300)
+ *conn = 0x02011300;
+ else
+ if (idx == 1 && *conn == 0x04011310)
+ *conn = 0x04000310;
+ else
+ if (idx == 2 && *conn == 0x02011312)
+ *conn = 0x02000312;
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 629d8a2df5bd..debd90225a88 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -395,98 +395,3 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
nouveau_channel_put(&chan);
}
}
-
-
-/***********************************
- * ioctls wrapping the functions
- ***********************************/
-
-static int
-nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_channel_alloc *init = data;
- struct nouveau_channel *chan;
- int ret;
-
- if (!dev_priv->eng[NVOBJ_ENGINE_GR])
- return -ENODEV;
-
- if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
- return -EINVAL;
-
- ret = nouveau_channel_alloc(dev, &chan, file_priv,
- init->fb_ctxdma_handle,
- init->tt_ctxdma_handle);
- if (ret)
- return ret;
- init->channel = chan->id;
-
- if (nouveau_vram_pushbuf == 0) {
- if (chan->dma.ib_max)
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
- NOUVEAU_GEM_DOMAIN_GART;
- else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
- else
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
- } else {
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
- }
-
- if (dev_priv->card_type < NV_C0) {
- init->subchan[0].handle = 0x00000000;
- init->subchan[0].grclass = 0x0000;
- init->subchan[1].handle = NvSw;
- init->subchan[1].grclass = NV_SW;
- init->nr_subchan = 2;
- }
-
- /* Named memory object area */
- ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
- &init->notifier_handle);
-
- if (ret == 0)
- atomic_inc(&chan->users); /* userspace reference */
- nouveau_channel_put(&chan);
- return ret;
-}
-
-static int
-nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_channel_free *req = data;
- struct nouveau_channel *chan;
-
- chan = nouveau_channel_get(file_priv, req->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
-
- list_del(&chan->list);
- atomic_dec(&chan->users);
- nouveau_channel_put(&chan);
- return 0;
-}
-
-/***********************************
- * finally, the ioctl table
- ***********************************/
-
-struct drm_ioctl_desc nouveau_ioctls[] = {
- DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
-};
-
-int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index cad254c8e387..9a36f5f39b06 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -29,6 +29,7 @@
#include "drm.h"
#include "drm_crtc_helper.h"
#include "nouveau_drv.h"
+#include "nouveau_abi16.h"
#include "nouveau_hw.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
@@ -384,6 +385,21 @@ nouveau_pci_resume(struct pci_dev *pdev)
return 0;
}
+static struct drm_ioctl_desc nouveau_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
+};
+
static const struct file_operations nouveau_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -422,7 +438,6 @@ static struct drm_driver driver = {
.get_vblank_counter = drm_vblank_count,
.enable_vblank = nouveau_vblank_enable,
.disable_vblank = nouveau_vblank_disable,
- .reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls,
.fops = &nouveau_driver_fops,
@@ -463,7 +478,7 @@ static struct pci_driver nouveau_pci_driver = {
static int __init nouveau_init(void)
{
- driver.num_ioctls = nouveau_max_ioctl;
+ driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
if (nouveau_modeset == -1) {
#ifdef CONFIG_VGA_CONSOLE
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 8613cb23808c..4f2cc95ce264 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -689,8 +689,6 @@ struct drm_nouveau_private {
void (*irq_handler[32])(struct drm_device *);
bool msi_enabled;
- struct list_head vbl_waiting;
-
struct {
struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
@@ -872,10 +870,6 @@ extern int nouveau_load(struct drm_device *, unsigned long flags);
extern int nouveau_firstopen(struct drm_device *);
extern void nouveau_lastclose(struct drm_device *);
extern int nouveau_unload(struct drm_device *);
-extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
- struct drm_file *);
-extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
- struct drm_file *);
extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
uint32_t reg, uint32_t mask, uint32_t val);
extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
@@ -914,15 +908,8 @@ extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
int cout, uint32_t start, uint32_t end,
uint32_t *offset);
-extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
-extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
- struct drm_file *);
-extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
- struct drm_file *);
/* nouveau_channel.c */
-extern struct drm_ioctl_desc nouveau_ioctls[];
-extern int nouveau_max_ioctl;
extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
extern int nouveau_channel_alloc(struct drm_device *dev,
struct nouveau_channel **chan,
@@ -938,7 +925,7 @@ extern void nouveau_channel_ref(struct nouveau_channel *chan,
struct nouveau_channel **pchan);
extern int nouveau_channel_idle(struct nouveau_channel *chan);
-/* nouveau_object.c */
+/* nouveau_gpuobj.c */
#define NVOBJ_ENGINE_ADD(d, e, p) do { \
struct drm_nouveau_private *dev_priv = (d)->dev_private; \
dev_priv->eng[NVOBJ_ENGINE_##e] = (p); \
@@ -993,10 +980,6 @@ extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
int class, u64 base, u64 size, int target,
int access, u32 type, u32 comp);
-extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
- struct drm_file *);
-extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
- struct drm_file *);
/* nouveau_irq.c */
extern int nouveau_irq_init(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 30f542316944..af7cfb825716 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -207,8 +207,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo = NULL;
int ret = 0;
- if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
- dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
+ dev_priv->ttm.bdev.dev_mapping = dev->dev_mapping;
if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
@@ -342,6 +341,7 @@ retry:
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
NV_ERROR(dev, "multiple instances of buffer %d on "
"validation list\n", b->handle);
+ drm_gem_object_unreference_unlocked(gem);
validate_fini(op, NULL);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
index b190cc01c820..bd79fedb7054 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
@@ -758,66 +758,6 @@ nouveau_gpuobj_resume(struct drm_device *dev)
dev_priv->engine.instmem.flush(dev);
}
-int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_grobj_alloc *init = data;
- struct nouveau_channel *chan;
- int ret;
-
- if (init->handle == ~0)
- return -EINVAL;
-
- /* compatibility with userspace that assumes 506e for all chipsets */
- if (init->class == 0x506e) {
- init->class = nouveau_software_class(dev);
- if (init->class == 0x906e)
- return 0;
- } else
- if (init->class == 0x906e) {
- NV_ERROR(dev, "906e not supported yet\n");
- return -EINVAL;
- }
-
- chan = nouveau_channel_get(file_priv, init->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
-
- if (nouveau_ramht_find(chan, init->handle)) {
- ret = -EEXIST;
- goto out;
- }
-
- ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
- if (ret) {
- NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
- ret, init->channel, init->handle);
- }
-
-out:
- nouveau_channel_put(&chan);
- return ret;
-}
-
-int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_gpuobj_free *objfree = data;
- struct nouveau_channel *chan;
- int ret;
-
- chan = nouveau_channel_get(file_priv, objfree->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
-
- /* Synchronize with the user channel */
- nouveau_channel_idle(chan);
-
- ret = nouveau_ramht_remove(chan, objfree->handle);
- nouveau_channel_put(&chan);
- return ret;
-}
-
u32
nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 77e564667b5c..240cf962c999 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -229,7 +229,7 @@ nouveau_i2c_init(struct drm_device *dev)
}
break;
case 6: /* NV50- DP AUX */
- port->drive = entry[0];
+ port->drive = entry[0] & 0x0f;
port->sense = port->drive;
port->adapter.algo = &nouveau_dp_i2c_algo;
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 868c7fd74854..b2c2937531a8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -41,12 +41,8 @@
void
nouveau_irq_preinstall(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
/* Master disable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
-
- INIT_LIST_HEAD(&dev_priv->vbl_waiting);
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 2ef883c4bbc1..69c93b864519 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -161,44 +161,3 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
*b_offset = mem->start;
return 0;
}
-
-int
-nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset)
-{
- if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor)
- return -EINVAL;
-
- if (poffset) {
- struct drm_mm_node *mem = nobj->priv;
-
- if (*poffset >= mem->size)
- return false;
-
- *poffset += mem->start;
- }
-
- return 0;
-}
-
-int
-nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_notifierobj_alloc *na = data;
- struct nouveau_channel *chan;
- int ret;
-
- /* completely unnecessary for these chipsets... */
- if (unlikely(dev_priv->card_type >= NV_C0))
- return -EINVAL;
-
- chan = nouveau_channel_get(file_priv, na->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
-
- ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
- &na->offset);
- nouveau_channel_put(&chan);
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
index e60bc6ce9003..709e5ac680ec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_software.h
+++ b/drivers/gpu/drm/nouveau/nouveau_software.h
@@ -4,13 +4,15 @@
struct nouveau_software_priv {
struct nouveau_exec_engine base;
struct list_head vblank;
+ spinlock_t peephole_lock;
};
struct nouveau_software_chan {
struct list_head flip;
struct {
struct list_head list;
- struct nouveau_bo *bo;
+ u32 channel;
+ u32 ctxdma;
u32 offset;
u32 value;
u32 head;
@@ -18,32 +20,17 @@ struct nouveau_software_chan {
};
static inline void
-nouveau_software_vblank(struct drm_device *dev, int crtc)
-{
- struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
- struct nouveau_software_chan *pch, *tmp;
-
- list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
- if (pch->vblank.head != crtc)
- continue;
-
- nouveau_bo_wr32(pch->vblank.bo, pch->vblank.offset,
- pch->vblank.value);
- list_del(&pch->vblank.list);
- drm_vblank_put(dev, crtc);
- }
-}
-
-static inline void
nouveau_software_context_new(struct nouveau_software_chan *pch)
{
INIT_LIST_HEAD(&pch->flip);
+ INIT_LIST_HEAD(&pch->vblank.list);
}
static inline void
nouveau_software_create(struct nouveau_software_priv *psw)
{
INIT_LIST_HEAD(&psw->vblank);
+ spin_lock_init(&psw->peephole_lock);
}
static inline u16
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 19706f0532ea..c61014442aa9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -731,15 +731,16 @@ nouveau_card_init(struct drm_device *dev)
case 0xa3:
case 0xa5:
case 0xa8:
- case 0xaf:
nva3_copy_create(dev);
break;
}
break;
case NV_C0:
- nvc0_copy_create(dev, 1);
+ if (!(nv_rd32(dev, 0x022500) & 0x00000200))
+ nvc0_copy_create(dev, 1);
case NV_D0:
- nvc0_copy_create(dev, 0);
+ if (!(nv_rd32(dev, 0x022500) & 0x00000100))
+ nvc0_copy_create(dev, 0);
break;
default:
break;
@@ -1234,80 +1235,6 @@ int nouveau_unload(struct drm_device *dev)
return 0;
}
-int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_getparam *getparam = data;
-
- switch (getparam->param) {
- case NOUVEAU_GETPARAM_CHIPSET_ID:
- getparam->value = dev_priv->chipset;
- break;
- case NOUVEAU_GETPARAM_PCI_VENDOR:
- getparam->value = dev->pci_vendor;
- break;
- case NOUVEAU_GETPARAM_PCI_DEVICE:
- getparam->value = dev->pci_device;
- break;
- case NOUVEAU_GETPARAM_BUS_TYPE:
- if (drm_pci_device_is_agp(dev))
- getparam->value = NV_AGP;
- else if (pci_is_pcie(dev->pdev))
- getparam->value = NV_PCIE;
- else
- getparam->value = NV_PCI;
- break;
- case NOUVEAU_GETPARAM_FB_SIZE:
- getparam->value = dev_priv->fb_available_size;
- break;
- case NOUVEAU_GETPARAM_AGP_SIZE:
- getparam->value = dev_priv->gart_info.aper_size;
- break;
- case NOUVEAU_GETPARAM_VM_VRAM_BASE:
- getparam->value = 0; /* deprecated */
- break;
- case NOUVEAU_GETPARAM_PTIMER_TIME:
- getparam->value = dev_priv->engine.timer.read(dev);
- break;
- case NOUVEAU_GETPARAM_HAS_BO_USAGE:
- getparam->value = 1;
- break;
- case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
- getparam->value = 1;
- break;
- case NOUVEAU_GETPARAM_GRAPH_UNITS:
- /* NV40 and NV50 versions are quite different, but register
- * address is the same. User is supposed to know the card
- * family anyway... */
- if (dev_priv->chipset >= 0x40) {
- getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
- break;
- }
- /* FALLTHRU */
- default:
- NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int
-nouveau_ioctl_setparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_setparam *setparam = data;
-
- switch (setparam->param) {
- default:
- NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param);
- return -EINVAL;
- }
-
- return 0;
-}
-
/* Wait until (value(reg) & mask) == val, up until timeout has hit */
bool
nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 4c31c63e5528..43accc11102f 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -215,7 +215,7 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
}
static bool
-nv_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+nv_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 8300266ffaea..38f19479417c 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -332,7 +332,7 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
}
static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (nv04_dac_in_use(encoder))
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 2258746016f8..c2675623b7cd 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -179,7 +179,7 @@ static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
}
static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 696d7e7dc2a0..67be5db021f5 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -338,7 +338,7 @@ static int nv17_tv_mode_valid(struct drm_encoder *encoder,
}
static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 97a477b3d52d..22cebd5dd694 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -527,7 +527,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
}
static bool
-nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index eb216a446b89..2c36a6b92c53 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -175,7 +175,8 @@ nv50_dac_restore(struct drm_encoder *encoder)
}
static bool
-nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+nv50_dac_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 5c41612723b4..b244d9968c5d 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -646,7 +646,30 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
static void
nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
{
- nouveau_software_vblank(dev, crtc);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
+ struct nouveau_software_chan *pch, *tmp;
+
+ list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
+ if (pch->vblank.head != crtc)
+ continue;
+
+ spin_lock(&psw->peephole_lock);
+ nv_wr32(dev, 0x001704, pch->vblank.channel);
+ nv_wr32(dev, 0x001710, 0x80000000 | pch->vblank.ctxdma);
+ if (dev_priv->chipset == 0x50) {
+ nv_wr32(dev, 0x001570, pch->vblank.offset);
+ nv_wr32(dev, 0x001574, pch->vblank.value);
+ } else {
+ nv_wr32(dev, 0x060010, pch->vblank.offset);
+ nv_wr32(dev, 0x060014, pch->vblank.value);
+ }
+ spin_unlock(&psw->peephole_lock);
+
+ list_del(&pch->vblank.list);
+ drm_vblank_put(dev, crtc);
+ }
+
drm_handle_vblank(dev, crtc);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index d9cc2f2638d6..437608d1dfe7 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -299,7 +299,7 @@ static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
/* There must be a *lot* of these. Will take some time to gather them up. */
struct nouveau_enum nv50_data_error_names[] = {
- { 0x00000003, "INVALID_QUERY_OR_TEXTURE", NULL },
+ { 0x00000003, "INVALID_OPERATION", NULL },
{ 0x00000004, "INVALID_VALUE", NULL },
{ 0x00000005, "INVALID_ENUM", NULL },
{ 0x00000008, "INVALID_OBJECT", NULL },
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
index 114d2517d4a8..df554d9dacb8 100644
--- a/drivers/gpu/drm/nouveau/nv50_software.c
+++ b/drivers/gpu/drm/nouveau/nv50_software.c
@@ -36,9 +36,6 @@ struct nv50_software_priv {
struct nv50_software_chan {
struct nouveau_software_chan base;
- struct {
- struct nouveau_gpuobj *object;
- } vblank;
};
static int
@@ -51,11 +48,7 @@ mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
if (!gpuobj)
return -ENOENT;
- if (nouveau_notifier_offset(gpuobj, NULL))
- return -EINVAL;
-
- pch->vblank.object = gpuobj;
- pch->base.vblank.offset = ~0;
+ pch->base.vblank.ctxdma = gpuobj->cinst >> 4;
return 0;
}
@@ -63,11 +56,7 @@ static int
mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
{
struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
-
- if (nouveau_notifier_offset(pch->vblank.object, &data))
- return -ERANGE;
-
- pch->base.vblank.offset = data >> 2;
+ pch->base.vblank.offset = data;
return 0;
}
@@ -86,7 +75,7 @@ mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
struct drm_device *dev = chan->dev;
- if (!pch->vblank.object || pch->base.vblank.offset == ~0 || data > 1)
+ if (data > 1)
return -EINVAL;
drm_vblank_get(dev, data);
@@ -116,7 +105,7 @@ nv50_software_context_new(struct nouveau_channel *chan, int engine)
return -ENOMEM;
nouveau_software_context_new(&pch->base);
- pch->base.vblank.bo = chan->notifier_bo;
+ pch->base.vblank.channel = chan->ramin->vinst >> 12;
chan->engctx[engine] = pch;
/* dma objects for display sync channel semaphore blocks */
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index a9514eaa74c1..93240bde891b 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -327,7 +327,8 @@ nv50_sor_restore(struct drm_encoder *encoder)
}
static bool
-nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+nv50_sor_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
index edece9c616eb..bbfcc73b6708 100644
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -117,18 +117,30 @@ nv84_crypt_tlb_flush(struct drm_device *dev, int engine)
nv50_vm_flush_engine(dev, 0x0a);
}
+static struct nouveau_bitfield nv84_crypt_intr[] = {
+ { 0x00000001, "INVALID_STATE" },
+ { 0x00000002, "ILLEGAL_MTHD" },
+ { 0x00000004, "ILLEGAL_CLASS" },
+ { 0x00000080, "QUERY" },
+ { 0x00000100, "FAULT" },
+ {}
+};
+
static void
nv84_crypt_isr(struct drm_device *dev)
{
u32 stat = nv_rd32(dev, 0x102130);
u32 mthd = nv_rd32(dev, 0x102190);
u32 data = nv_rd32(dev, 0x102194);
- u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff;
+ u64 inst = (u64)(nv_rd32(dev, 0x102188) & 0x7fffffff) << 12;
int show = nouveau_ratelimit();
+ int chid = nv50_graph_isr_chid(dev, inst);
if (show) {
- NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- stat, mthd, data, inst);
+ NV_INFO(dev, "PCRYPT:");
+ nouveau_bitfield_print(nv84_crypt_intr, stat);
+ printk(KERN_CONT " ch %d (0x%010llx) mthd 0x%04x data 0x%08x\n",
+ chid, inst, mthd, data);
}
nv_wr32(dev, 0x102130, stat);
diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c
index cc82d799fc3b..c564c5e4c30a 100644
--- a/drivers/gpu/drm/nouveau/nv84_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv84_fifo.c
@@ -117,17 +117,22 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
+ u32 save;
/* remove channel from playlist, will context switch if active */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
nv50_fifo_playlist_update(dev);
+ save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
+
/* tell any engines on this channel to unload their contexts */
nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
+ nv_wr32(dev, 0x002520, save);
+
nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
@@ -184,10 +189,13 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv84_fifo_priv *priv = nv_engine(dev, engine);
int i;
+ u32 save;
/* set playlist length to zero, fifo will unload context */
nv_wr32(dev, 0x0032ec, 0);
+ save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
+
/* tell all connected engines to unload their contexts */
for (i = 0; i < priv->base.channels; i++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[i];
@@ -199,6 +207,7 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
}
}
+ nv_wr32(dev, 0x002520, save);
nv_wr32(dev, 0x002140, 0);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
index abc36626fef0..219850d53286 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
@@ -119,9 +119,9 @@ dispatch_dma:
// mthd 0x030c-0x0340, various stuff
.b16 0xc3 14
.b32 #ctx_src_address_high ~0x000000ff
-.b32 #ctx_src_address_low ~0xfffffff0
+.b32 #ctx_src_address_low ~0xffffffff
.b32 #ctx_dst_address_high ~0x000000ff
-.b32 #ctx_dst_address_low ~0xfffffff0
+.b32 #ctx_dst_address_low ~0xffffffff
.b32 #ctx_src_pitch ~0x0007ffff
.b32 #ctx_dst_pitch ~0x0007ffff
.b32 #ctx_xcnt ~0x0000ffff
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
index 1f33fbdc00be..37d6de3c9d61 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
@@ -1,37 +1,72 @@
-uint32_t nva3_pcopy_data[] = {
+u32 nva3_pcopy_data[] = {
+/* 0x0000: ctx_object */
0x00000000,
+/* 0x0004: ctx_dma */
+/* 0x0004: ctx_dma_query */
0x00000000,
+/* 0x0008: ctx_dma_src */
0x00000000,
+/* 0x000c: ctx_dma_dst */
0x00000000,
+/* 0x0010: ctx_query_address_high */
0x00000000,
+/* 0x0014: ctx_query_address_low */
0x00000000,
+/* 0x0018: ctx_query_counter */
0x00000000,
+/* 0x001c: ctx_src_address_high */
0x00000000,
+/* 0x0020: ctx_src_address_low */
0x00000000,
+/* 0x0024: ctx_src_pitch */
0x00000000,
+/* 0x0028: ctx_src_tile_mode */
0x00000000,
+/* 0x002c: ctx_src_xsize */
0x00000000,
+/* 0x0030: ctx_src_ysize */
0x00000000,
+/* 0x0034: ctx_src_zsize */
0x00000000,
+/* 0x0038: ctx_src_zoff */
0x00000000,
+/* 0x003c: ctx_src_xoff */
0x00000000,
+/* 0x0040: ctx_src_yoff */
0x00000000,
+/* 0x0044: ctx_src_cpp */
0x00000000,
+/* 0x0048: ctx_dst_address_high */
0x00000000,
+/* 0x004c: ctx_dst_address_low */
0x00000000,
+/* 0x0050: ctx_dst_pitch */
0x00000000,
+/* 0x0054: ctx_dst_tile_mode */
0x00000000,
+/* 0x0058: ctx_dst_xsize */
0x00000000,
+/* 0x005c: ctx_dst_ysize */
0x00000000,
+/* 0x0060: ctx_dst_zsize */
0x00000000,
+/* 0x0064: ctx_dst_zoff */
0x00000000,
+/* 0x0068: ctx_dst_xoff */
0x00000000,
+/* 0x006c: ctx_dst_yoff */
0x00000000,
+/* 0x0070: ctx_dst_cpp */
0x00000000,
+/* 0x0074: ctx_format */
0x00000000,
+/* 0x0078: ctx_swz_const0 */
0x00000000,
+/* 0x007c: ctx_swz_const1 */
0x00000000,
+/* 0x0080: ctx_xcnt */
0x00000000,
+/* 0x0084: ctx_ycnt */
0x00000000,
0x00000000,
0x00000000,
@@ -63,6 +98,7 @@ uint32_t nva3_pcopy_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0100: dispatch_table */
0x00010000,
0x00000000,
0x00000000,
@@ -73,6 +109,7 @@ uint32_t nva3_pcopy_data[] = {
0x00010162,
0x00000000,
0x00030060,
+/* 0x0128: dispatch_dma */
0x00010170,
0x00000000,
0x00010170,
@@ -118,11 +155,11 @@ uint32_t nva3_pcopy_data[] = {
0x0000001c,
0xffffff00,
0x00000020,
- 0x0000000f,
+ 0x00000000,
0x00000048,
0xffffff00,
0x0000004c,
- 0x0000000f,
+ 0x00000000,
0x00000024,
0xfff80000,
0x00000050,
@@ -146,7 +183,8 @@ uint32_t nva3_pcopy_data[] = {
0x00000800,
};
-uint32_t nva3_pcopy_code[] = {
+u32 nva3_pcopy_code[] = {
+/* 0x0000: main */
0x04fe04bd,
0x3517f000,
0xf10010fe,
@@ -158,23 +196,31 @@ uint32_t nva3_pcopy_code[] = {
0x17f11031,
0x27f01200,
0x0012d003,
+/* 0x002f: spin */
0xf40031f4,
0x0ef40028,
+/* 0x0035: ih */
0x8001cffd,
0xf40812c4,
0x21f4060b,
+/* 0x0041: ih_no_chsw */
0x0412c472,
0xf4060bf4,
+/* 0x004a: ih_no_cmd */
0x11c4c321,
0x4001d00c,
+/* 0x0052: swctx */
0x47f101f8,
0x4bfe7700,
0x0007fe00,
0xf00204b9,
0x01f40643,
0x0604fa09,
+/* 0x006b: swctx_load */
0xfa060ef4,
+/* 0x006e: swctx_done */
0x03f80504,
+/* 0x0072: chsw */
0x27f100f8,
0x23cf1400,
0x1e3fc800,
@@ -183,18 +229,22 @@ uint32_t nva3_pcopy_code[] = {
0x1e3af052,
0xf00023d0,
0x24d00147,
+/* 0x0093: chsw_no_unload */
0xcf00f880,
0x3dc84023,
0x220bf41e,
0xf40131f4,
0x57f05221,
0x0367f004,
+/* 0x00a8: chsw_load_ctx_dma */
0xa07856bc,
0xb6018068,
0x87d00884,
0x0162b600,
+/* 0x00bb: chsw_finish_load */
0xf0f018f4,
0x23d00237,
+/* 0x00c3: dispatch */
0xf100f880,
0xcf190037,
0x33cf4032,
@@ -202,6 +252,7 @@ uint32_t nva3_pcopy_code[] = {
0x1024b607,
0x010057f1,
0x74bd64bd,
+/* 0x00dc: dispatch_loop */
0x58005658,
0x50b60157,
0x0446b804,
@@ -211,6 +262,7 @@ uint32_t nva3_pcopy_code[] = {
0xb60276bb,
0x57bb0374,
0xdf0ef400,
+/* 0x0100: dispatch_valid_mthd */
0xb60246bb,
0x45bb0344,
0x01459800,
@@ -220,31 +272,41 @@ uint32_t nva3_pcopy_code[] = {
0xb0014658,
0x1bf40064,
0x00538009,
+/* 0x0127: dispatch_cmd */
0xf4300ef4,
0x55f90132,
0xf40c01f4,
+/* 0x0132: dispatch_invalid_bitfield */
0x25f0250e,
+/* 0x0135: dispatch_illegal_mthd */
0x0125f002,
+/* 0x0138: dispatch_error */
0x100047f1,
0xd00042d0,
0x27f04043,
0x0002d040,
+/* 0x0148: hostirq_wait */
0xf08002cf,
0x24b04024,
0xf71bf400,
+/* 0x0154: dispatch_done */
0x1d0027f1,
0xd00137f0,
0x00f80023,
+/* 0x0160: cmd_nop */
+/* 0x0162: cmd_pm_trigger */
0x27f100f8,
0x34bd2200,
0xd00233f0,
0x00f80023,
+/* 0x0170: cmd_dma */
0x012842b7,
0xf00145b6,
0x43801e39,
0x0040b701,
0x0644b606,
0xf80043d0,
+/* 0x0189: cmd_exec_set_format */
0xf030f400,
0xb00001b0,
0x01b00101,
@@ -256,20 +318,26 @@ uint32_t nva3_pcopy_code[] = {
0x70b63847,
0x0232f401,
0x94bd84bd,
+/* 0x01b4: ncomp_loop */
0xb60f4ac4,
0xb4bd0445,
+/* 0x01bc: bpc_loop */
0xf404a430,
0xa5ff0f18,
0x00cbbbc0,
0xf40231f4,
+/* 0x01ce: cmp_c0 */
0x1bf4220e,
0x10c7f00c,
0xf400cbbb,
+/* 0x01da: cmp_c1 */
0xa430160e,
0x0c18f406,
0xbb14c7f0,
0x0ef400cb,
+/* 0x01e9: cmp_zero */
0x80c7f107,
+/* 0x01ed: bpc_next */
0x01c83800,
0xb60180b6,
0xb5b801b0,
@@ -280,6 +348,7 @@ uint32_t nva3_pcopy_code[] = {
0x98110680,
0x68fd2008,
0x0502f400,
+/* 0x0216: dst_xcnt */
0x75fd64bd,
0x1c078000,
0xf10078fd,
@@ -304,6 +373,7 @@ uint32_t nva3_pcopy_code[] = {
0x980056d0,
0x56d01f06,
0x1030f440,
+/* 0x0276: cmd_exec_set_surface_tiled */
0x579800f8,
0x6879c70a,
0xb66478c7,
@@ -311,9 +381,11 @@ uint32_t nva3_pcopy_code[] = {
0x0e76b060,
0xf0091bf4,
0x0ef40477,
+/* 0x0291: xtile64 */
0x027cf00f,
0xfd1170b6,
0x77f00947,
+/* 0x029d: xtileok */
0x0f5a9806,
0xfd115b98,
0xb7f000ab,
@@ -371,6 +443,7 @@ uint32_t nva3_pcopy_code[] = {
0x67d00600,
0x0060b700,
0x0068d004,
+/* 0x0382: cmd_exec_set_surface_linear */
0x6cf000f8,
0x0260b702,
0x0864b602,
@@ -381,13 +454,16 @@ uint32_t nva3_pcopy_code[] = {
0xb70067d0,
0x98040060,
0x67d00957,
+/* 0x03ab: cmd_exec_wait */
0xf900f800,
0xf110f900,
0xb6080007,
+/* 0x03b6: loop */
0x01cf0604,
0x0114f000,
0xfcfa1bf4,
0xf800fc10,
+/* 0x03c5: cmd_exec_query */
0x0d34c800,
0xf5701bf4,
0xf103ab21,
@@ -417,6 +493,7 @@ uint32_t nva3_pcopy_code[] = {
0x47f10153,
0x44b60800,
0x0045d006,
+/* 0x0438: query_counter */
0x03ab21f5,
0x080c47f1,
0x980644b6,
@@ -439,11 +516,13 @@ uint32_t nva3_pcopy_code[] = {
0x47f10153,
0x44b60800,
0x0045d006,
+/* 0x0492: cmd_exec */
0x21f500f8,
0x3fc803ab,
0x0e0bf400,
0x018921f5,
0x020047f1,
+/* 0x04a7: cmd_exec_no_format */
0xf11e0ef4,
0xb6081067,
0x77f00664,
@@ -451,19 +530,24 @@ uint32_t nva3_pcopy_code[] = {
0x981c0780,
0x67d02007,
0x4067d000,
+/* 0x04c2: cmd_exec_init_src_surface */
0x32f444bd,
0xc854bd02,
0x0bf4043f,
0x8221f50a,
0x0a0ef403,
+/* 0x04d4: src_tiled */
0x027621f5,
+/* 0x04db: cmd_exec_init_dst_surface */
0xf40749f0,
0x57f00231,
0x083fc82c,
0xf50a0bf4,
0xf4038221,
+/* 0x04ee: dst_tiled */
0x21f50a0e,
0x49f00276,
+/* 0x04f5: cmd_exec_kick */
0x0057f108,
0x0654b608,
0xd0210698,
@@ -473,6 +557,8 @@ uint32_t nva3_pcopy_code[] = {
0xc80054d0,
0x0bf40c3f,
0xc521f507,
+/* 0x0519: cmd_exec_done */
+/* 0x051b: cmd_wrcache_flush */
0xf100f803,
0xbd220027,
0x0133f034,
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
index a8d17458ced1..cd879f31bb38 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
@@ -1,34 +1,65 @@
-uint32_t nvc0_pcopy_data[] = {
+u32 nvc0_pcopy_data[] = {
+/* 0x0000: ctx_object */
0x00000000,
+/* 0x0004: ctx_query_address_high */
0x00000000,
+/* 0x0008: ctx_query_address_low */
0x00000000,
+/* 0x000c: ctx_query_counter */
0x00000000,
+/* 0x0010: ctx_src_address_high */
0x00000000,
+/* 0x0014: ctx_src_address_low */
0x00000000,
+/* 0x0018: ctx_src_pitch */
0x00000000,
+/* 0x001c: ctx_src_tile_mode */
0x00000000,
+/* 0x0020: ctx_src_xsize */
0x00000000,
+/* 0x0024: ctx_src_ysize */
0x00000000,
+/* 0x0028: ctx_src_zsize */
0x00000000,
+/* 0x002c: ctx_src_zoff */
0x00000000,
+/* 0x0030: ctx_src_xoff */
0x00000000,
+/* 0x0034: ctx_src_yoff */
0x00000000,
+/* 0x0038: ctx_src_cpp */
0x00000000,
+/* 0x003c: ctx_dst_address_high */
0x00000000,
+/* 0x0040: ctx_dst_address_low */
0x00000000,
+/* 0x0044: ctx_dst_pitch */
0x00000000,
+/* 0x0048: ctx_dst_tile_mode */
0x00000000,
+/* 0x004c: ctx_dst_xsize */
0x00000000,
+/* 0x0050: ctx_dst_ysize */
0x00000000,
+/* 0x0054: ctx_dst_zsize */
0x00000000,
+/* 0x0058: ctx_dst_zoff */
0x00000000,
+/* 0x005c: ctx_dst_xoff */
0x00000000,
+/* 0x0060: ctx_dst_yoff */
0x00000000,
+/* 0x0064: ctx_dst_cpp */
0x00000000,
+/* 0x0068: ctx_format */
0x00000000,
+/* 0x006c: ctx_swz_const0 */
0x00000000,
+/* 0x0070: ctx_swz_const1 */
0x00000000,
+/* 0x0074: ctx_xcnt */
0x00000000,
+/* 0x0078: ctx_ycnt */
0x00000000,
0x00000000,
0x00000000,
@@ -63,6 +94,7 @@ uint32_t nvc0_pcopy_data[] = {
0x00000000,
0x00000000,
0x00000000,
+/* 0x0100: dispatch_table */
0x00010000,
0x00000000,
0x00000000,
@@ -111,11 +143,11 @@ uint32_t nvc0_pcopy_data[] = {
0x00000010,
0xffffff00,
0x00000014,
- 0x0000000f,
+ 0x00000000,
0x0000003c,
0xffffff00,
0x00000040,
- 0x0000000f,
+ 0x00000000,
0x00000018,
0xfff80000,
0x00000044,
@@ -139,7 +171,8 @@ uint32_t nvc0_pcopy_data[] = {
0x00000800,
};
-uint32_t nvc0_pcopy_code[] = {
+u32 nvc0_pcopy_code[] = {
+/* 0x0000: main */
0x04fe04bd,
0x3517f000,
0xf10010fe,
@@ -151,15 +184,20 @@ uint32_t nvc0_pcopy_code[] = {
0x17f11031,
0x27f01200,
0x0012d003,
+/* 0x002f: spin */
0xf40031f4,
0x0ef40028,
+/* 0x0035: ih */
0x8001cffd,
0xf40812c4,
0x21f4060b,
+/* 0x0041: ih_no_chsw */
0x0412c4ca,
0xf5070bf4,
+/* 0x004b: ih_no_cmd */
0xc4010221,
0x01d00c11,
+/* 0x0053: swctx */
0xf101f840,
0xfe770047,
0x47f1004b,
@@ -188,8 +226,11 @@ uint32_t nvc0_pcopy_code[] = {
0xf00204b9,
0x01f40643,
0x0604fa09,
+/* 0x00c3: swctx_load */
0xfa060ef4,
+/* 0x00c6: swctx_done */
0x03f80504,
+/* 0x00ca: chsw */
0x27f100f8,
0x23cf1400,
0x1e3fc800,
@@ -198,18 +239,22 @@ uint32_t nvc0_pcopy_code[] = {
0x1e3af053,
0xf00023d0,
0x24d00147,
+/* 0x00eb: chsw_no_unload */
0xcf00f880,
0x3dc84023,
0x090bf41e,
0xf40131f4,
+/* 0x00fa: chsw_finish_load */
0x37f05321,
0x8023d002,
+/* 0x0102: dispatch */
0x37f100f8,
0x32cf1900,
0x0033cf40,
0x07ff24e4,
0xf11024b6,
0xbd010057,
+/* 0x011b: dispatch_loop */
0x5874bd64,
0x57580056,
0x0450b601,
@@ -219,6 +264,7 @@ uint32_t nvc0_pcopy_code[] = {
0xbb0f08f4,
0x74b60276,
0x0057bb03,
+/* 0x013f: dispatch_valid_mthd */
0xbbdf0ef4,
0x44b60246,
0x0045bb03,
@@ -229,24 +275,33 @@ uint32_t nvc0_pcopy_code[] = {
0x64b00146,
0x091bf400,
0xf4005380,
+/* 0x0166: dispatch_cmd */
0x32f4300e,
0xf455f901,
0x0ef40c01,
+/* 0x0171: dispatch_invalid_bitfield */
0x0225f025,
+/* 0x0174: dispatch_illegal_mthd */
+/* 0x0177: dispatch_error */
0xf10125f0,
0xd0100047,
0x43d00042,
0x4027f040,
+/* 0x0187: hostirq_wait */
0xcf0002d0,
0x24f08002,
0x0024b040,
+/* 0x0193: dispatch_done */
0xf1f71bf4,
0xf01d0027,
0x23d00137,
+/* 0x019f: cmd_nop */
0xf800f800,
+/* 0x01a1: cmd_pm_trigger */
0x0027f100,
0xf034bd22,
0x23d00233,
+/* 0x01af: cmd_exec_set_format */
0xf400f800,
0x01b0f030,
0x0101b000,
@@ -258,20 +313,26 @@ uint32_t nvc0_pcopy_code[] = {
0x3847c701,
0xf40170b6,
0x84bd0232,
+/* 0x01da: ncomp_loop */
0x4ac494bd,
0x0445b60f,
+/* 0x01e2: bpc_loop */
0xa430b4bd,
0x0f18f404,
0xbbc0a5ff,
0x31f400cb,
0x220ef402,
+/* 0x01f4: cmp_c0 */
0xf00c1bf4,
0xcbbb10c7,
0x160ef400,
+/* 0x0200: cmp_c1 */
0xf406a430,
0xc7f00c18,
0x00cbbb14,
+/* 0x020f: cmp_zero */
0xf1070ef4,
+/* 0x0213: bpc_next */
0x380080c7,
0x80b601c8,
0x01b0b601,
@@ -283,6 +344,7 @@ uint32_t nvc0_pcopy_code[] = {
0x1d08980e,
0xf40068fd,
0x64bd0502,
+/* 0x023c: dst_xcnt */
0x800075fd,
0x78fd1907,
0x1057f100,
@@ -307,15 +369,18 @@ uint32_t nvc0_pcopy_code[] = {
0x1c069800,
0xf44056d0,
0x00f81030,
+/* 0x029c: cmd_exec_set_surface_tiled */
0xc7075798,
0x78c76879,
0x0380b664,
0xb06077c7,
0x1bf40e76,
0x0477f009,
+/* 0x02b7: xtile64 */
0xf00f0ef4,
0x70b6027c,
0x0947fd11,
+/* 0x02c3: xtileok */
0x980677f0,
0x5b980c5a,
0x00abfd0e,
@@ -374,6 +439,7 @@ uint32_t nvc0_pcopy_code[] = {
0xb70067d0,
0xd0040060,
0x00f80068,
+/* 0x03a8: cmd_exec_set_surface_linear */
0xb7026cf0,
0xb6020260,
0x57980864,
@@ -384,12 +450,15 @@ uint32_t nvc0_pcopy_code[] = {
0x0060b700,
0x06579804,
0xf80067d0,
+/* 0x03d1: cmd_exec_wait */
0xf900f900,
0x0007f110,
0x0604b608,
+/* 0x03dc: loop */
0xf00001cf,
0x1bf40114,
0xfc10fcfa,
+/* 0x03eb: cmd_exec_query */
0xc800f800,
0x1bf40d34,
0xd121f570,
@@ -419,6 +488,7 @@ uint32_t nvc0_pcopy_code[] = {
0x0153f026,
0x080047f1,
0xd00644b6,
+/* 0x045e: query_counter */
0x21f50045,
0x47f103d1,
0x44b6080c,
@@ -442,11 +512,13 @@ uint32_t nvc0_pcopy_code[] = {
0x080047f1,
0xd00644b6,
0x00f80045,
+/* 0x04b8: cmd_exec */
0x03d121f5,
0xf4003fc8,
0x21f50e0b,
0x47f101af,
0x0ef40200,
+/* 0x04cd: cmd_exec_no_format */
0x1067f11e,
0x0664b608,
0x800177f0,
@@ -454,18 +526,23 @@ uint32_t nvc0_pcopy_code[] = {
0x1d079819,
0xd00067d0,
0x44bd4067,
+/* 0x04e8: cmd_exec_init_src_surface */
0xbd0232f4,
0x043fc854,
0xf50a0bf4,
0xf403a821,
+/* 0x04fa: src_tiled */
0x21f50a0e,
0x49f0029c,
+/* 0x0501: cmd_exec_init_dst_surface */
0x0231f407,
0xc82c57f0,
0x0bf4083f,
0xa821f50a,
0x0a0ef403,
+/* 0x0514: dst_tiled */
0x029c21f5,
+/* 0x051b: cmd_exec_kick */
0xf10849f0,
0xb6080057,
0x06980654,
@@ -475,7 +552,9 @@ uint32_t nvc0_pcopy_code[] = {
0x54d00546,
0x0c3fc800,
0xf5070bf4,
+/* 0x053f: cmd_exec_done */
0xf803eb21,
+/* 0x0541: cmd_wrcache_flush */
0x0027f100,
0xf034bd22,
0x23d00133,
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index 7c95c44e2887..4e712b10ebdb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -557,7 +557,7 @@ prog_mem(struct drm_device *dev, struct nvc0_pm_state *info)
nouveau_mem_exec(&exec, info->perflvl);
if (dev_priv->chipset < 0xd0)
- nv_wr32(dev, 0x611200, 0x00003300);
+ nv_wr32(dev, 0x611200, 0x00003330);
else
nv_wr32(dev, 0x62c000, 0x03030300);
}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index c486d3ce3c2c..dac525b2994e 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -607,7 +607,7 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
}
static bool
-nvd0_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
+nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
@@ -790,7 +790,7 @@ nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ch = EVO_CURS(nv_crtc->index);
- evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x);
+ evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
return 0;
}
@@ -938,7 +938,8 @@ nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
}
static bool
-nvd0_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+nvd0_dac_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
@@ -1377,7 +1378,8 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
}
static bool
-nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+nvd0_sor_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c
index 1855ecbd843b..e98d144e6eb9 100644
--- a/drivers/gpu/drm/nouveau/nve0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nve0_fifo.c
@@ -294,6 +294,25 @@ nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
printk(" on channel 0x%010llx\n", (u64)inst << 12);
}
+static int
+nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
+{
+ struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ if (likely(chid >= 0 && chid < priv->base.channels)) {
+ chan = dev_priv->channels.ptr[chid];
+ if (likely(chan))
+ ret = nouveau_finish_page_flip(chan, NULL);
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return ret;
+}
+
static void
nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
{
@@ -303,11 +322,21 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
u32 subc = (addr & 0x00070000);
u32 mthd = (addr & 0x00003ffc);
+ u32 show = stat;
+
+ if (stat & 0x00200000) {
+ if (mthd == 0x0054) {
+ if (!nve0_fifo_page_flip(dev, chid))
+ show &= ~0x00200000;
+ }
+ }
- NV_INFO(dev, "PSUBFIFO %d:", unit);
- nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat);
- NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
- unit, chid, subc, mthd, data);
+ if (show) {
+ NV_INFO(dev, "PFIFO%d:", unit);
+ nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
+ NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid, subc, mthd, data);
+ }
nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 88718fad5d6d..2666a5308ab9 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -71,7 +71,6 @@ static struct drm_driver driver = {
.irq_postinstall = r128_driver_irq_postinstall,
.irq_uninstall = r128_driver_irq_uninstall,
.irq_handler = r128_driver_irq_handler,
- .reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = r128_ioctls,
.dma_ioctl = r128_cce_buffers,
.fops = &r128_driver_fops,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 3904d7964a4b..2817101fb167 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -258,8 +258,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
- /* disable crtc pair power gating before programming */
- if (ASIC_IS_DCE6(rdev))
+ if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
@@ -278,25 +277,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
- /* power gating is per-pair */
- if (ASIC_IS_DCE6(rdev)) {
- struct drm_crtc *other_crtc;
- struct radeon_crtc *other_radeon_crtc;
- list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) {
- other_radeon_crtc = to_radeon_crtc(other_crtc);
- if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) ||
- ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) ||
- ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) ||
- ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) ||
- ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) ||
- ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) {
- /* if both crtcs in the pair are off, enable power gating */
- if (other_radeon_crtc->enabled == false)
- atombios_powergate_crtc(crtc, ATOM_ENABLE);
- break;
- }
- }
- }
+ if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
+ atombios_powergate_crtc(crtc, ATOM_ENABLE);
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
@@ -444,11 +426,28 @@ union atom_enable_ss {
static void atombios_crtc_program_ss(struct radeon_device *rdev,
int enable,
int pll_id,
+ int crtc_id,
struct radeon_atom_ss *ss)
{
+ unsigned i;
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
+ if (!enable) {
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->mode_info.crtcs[i] &&
+ rdev->mode_info.crtcs[i]->enabled &&
+ i != crtc_id &&
+ pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+ /* one other crtc is using this pll don't turn
+ * off spread spectrum as it might turn off
+ * display on active crtc
+ */
+ return;
+ }
+ }
+ }
+
memset(&args, 0, sizeof(args));
if (ASIC_IS_DCE5(rdev)) {
@@ -457,22 +456,18 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
switch (pll_id) {
case ATOM_PPLL1:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
- args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
- args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_PPLL2:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
- args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
- args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_DCPLL:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
- args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
- args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
break;
case ATOM_PPLL_INVALID:
return;
}
+ args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+ args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v3.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
args.v3.ucEnable = ATOM_DISABLE;
@@ -482,22 +477,18 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
switch (pll_id) {
case ATOM_PPLL1:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
- args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
- args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_PPLL2:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
- args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
- args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_DCPLL:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
- args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
- args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
break;
case ATOM_PPLL_INVALID:
return;
}
+ args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+ args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v2.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
args.v2.ucEnable = ATOM_DISABLE;
@@ -1036,7 +1027,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
- atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
+ atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
@@ -1059,7 +1050,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
ss.step = step_size;
}
- atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
+ atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss);
}
}
@@ -1539,8 +1530,12 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
* crtc virtual pixel clock.
*/
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
- if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
+ if (rdev->clock.dp_extclk)
return ATOM_PPLL_INVALID;
+ else if (ASIC_IS_DCE6(rdev))
+ return ATOM_PPLL0;
+ else if (ASIC_IS_DCE5(rdev))
+ return ATOM_DCPLL;
}
}
}
@@ -1576,11 +1571,11 @@ void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
ASIC_INTERNAL_SS_ON_DCPLL,
rdev->clock.default_dispclk);
if (ss_enabled)
- atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss);
+ atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
/* XXX: DCE5, make sure voltage, dispclk is high enough */
atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
if (ss_enabled)
- atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss);
+ atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
}
}
@@ -1628,7 +1623,7 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
}
static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
@@ -1639,18 +1634,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ radeon_crtc->in_mode_set = true;
/* pick pll */
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+ /* disable crtc pair power gating before programming */
+ if (ASIC_IS_DCE6(rdev))
+ atombios_powergate_crtc(crtc, ATOM_DISABLE);
+
atombios_lock_crtc(crtc, ATOM_ENABLE);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void atombios_crtc_commit(struct drm_crtc *crtc)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
atombios_lock_crtc(crtc, ATOM_DISABLE);
+ radeon_crtc->in_mode_set = false;
}
static void atombios_crtc_disable(struct drm_crtc *crtc)
@@ -1659,9 +1664,22 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_atom_ss ss;
+ int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ for (i = 0; i < rdev->num_crtc; i++) {
+ if (rdev->mode_info.crtcs[i] &&
+ rdev->mode_info.crtcs[i]->enabled &&
+ i != radeon_crtc->crtc_id &&
+ radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
+ /* one other crtc is using this pll don't turn
+ * off the pll
+ */
+ goto done;
+ }
+ }
+
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
case ATOM_PPLL2:
@@ -1678,6 +1696,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
default:
break;
}
+done:
radeon_crtc->pll_id = -1;
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 5131b3b0f7d2..3623b98ed3fe 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -22,6 +22,7 @@
*
* Authors: Dave Airlie
* Alex Deucher
+ * Jerome Glisse
*/
#include "drmP.h"
#include "radeon_drm.h"
@@ -576,30 +577,25 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+ u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
+ u8 tmp;
if (!ASIC_IS_DCE4(rdev))
return panel_mode;
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_NUTMEG)
- panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
- else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_TRAVIS) {
- u8 id[6];
- int i;
- for (i = 0; i < 6; i++)
- id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i);
- if (id[0] == 0x73 &&
- id[1] == 0x69 &&
- id[2] == 0x76 &&
- id[3] == 0x61 &&
- id[4] == 0x72 &&
- id[5] == 0x54)
+ if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
+ /* DP bridge chips */
+ tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+ if (tmp & 1)
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
+ (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
else
- panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+ /* eDP */
+ tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
}
@@ -608,7 +604,7 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
}
void radeon_dp_set_link_config(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
@@ -654,7 +650,6 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE, 100);
if (ret <= 0) {
- DRM_ERROR("displayport link status failed\n");
return false;
}
@@ -833,8 +828,10 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
else
mdelay(dp_info->rd_interval * 4);
- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
+ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+ DRM_ERROR("displayport link status failed\n");
break;
+ }
if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
clock_recovery = true;
@@ -896,8 +893,10 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
else
mdelay(dp_info->rd_interval * 4);
- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
+ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+ DRM_ERROR("displayport link status failed\n");
break;
+ }
if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
channel_eq = true;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 486ccdf4aacd..6e8803a1170c 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -58,7 +58,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
}
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -1379,6 +1379,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_connector *radeon_connector = NULL;
struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
@@ -1390,12 +1392,38 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- /* some early dce3.2 boards have a bug in their transmitter control table */
- if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
- ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ if (!connector)
+ dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+ else
+ dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
+
+ /* setup and enable the encoder */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+ atombios_dig_encoder_setup(encoder,
+ ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+ dig->panel_mode);
+ if (ext_encoder) {
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+ }
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* setup and enable the encoder */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+ /* enable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- else
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ } else {
+ /* setup and enable the encoder and transmitter */
+ atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ /* some early dce3.2 boards have a bug in their transmitter control table */
+ if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+ }
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
atombios_set_edp_panel_power(connector,
@@ -1412,10 +1440,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ /* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- else
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ } else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+ }
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
@@ -1732,13 +1769,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *test_encoder;
- struct radeon_encoder_atom_dig *dig;
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t dig_enc_in_use = 0;
- /* DCE4/5 */
- if (ASIC_IS_DCE4(rdev)) {
- dig = radeon_encoder->enc_priv;
- if (ASIC_IS_DCE41(rdev)) {
+ if (ASIC_IS_DCE6(rdev)) {
+ /* DCE6 */
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ }
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* DCE4/5 */
+ if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
/* ontario follows DCE4 */
if (rdev->family == CHIP_PALM) {
if (dig->linkb)
@@ -1840,10 +1898,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
radeon_encoder->pixel_clock = adjusted_mode->clock;
+ /* need to call this here rather than in prepare() since we need some crtc info */
+ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
atombios_yuv_setup(encoder, true);
@@ -1862,38 +1922,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
- if (!connector)
- dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
- else
- dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
-
- /* setup and enable the encoder */
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
- atombios_dig_encoder_setup(encoder,
- ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
- dig->panel_mode);
- } else if (ASIC_IS_DCE4(rdev)) {
- /* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- /* setup and enable the encoder */
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
-
- /* enable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- } else {
- /* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
-
- /* setup and enable the encoder and transmitter */
- atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- }
+ /* handled in dpms */
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -1914,14 +1943,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
break;
}
- if (ext_encoder) {
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
- atombios_external_encoder_setup(encoder, ext_encoder,
- EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
- else
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
- }
-
atombios_apply_encoder_quirks(encoder, adjusted_mode);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
@@ -2108,7 +2129,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
}
radeon_atom_output_lock(encoder, true);
- radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -2129,6 +2149,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
{
+ /* need to call this here as we need the crtc set up */
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
radeon_atom_output_lock(encoder, false);
}
@@ -2169,14 +2190,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- if (ASIC_IS_DCE4(rdev))
- /* disable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- else {
- /* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
- }
+ /* handled in dpms */
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
@@ -2234,7 +2248,7 @@ radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
}
static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7fb3d2e0434c..e93b80a6d4e9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -99,6 +99,14 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
}
}
+/**
+ * dce4_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (evergreen+).
+ */
void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
@@ -118,18 +126,49 @@ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
}
}
+/**
+ * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to prepare for pageflip on
+ *
+ * Pre-pageflip callback (evergreen+).
+ * Enables the pageflip irq (vblank irq).
+ */
void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
+/**
+ * evergreen_post_page_flip - pos-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to cleanup pageflip on
+ *
+ * Post-pageflip callback (evergreen+).
+ * Disables the pageflip irq (vblank irq).
+ */
void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
{
/* disable the pflip int */
radeon_irq_kms_pflip_irq_put(rdev, crtc);
}
+/**
+ * evergreen_page_flip - pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -214,6 +253,15 @@ int sumo_get_temp(struct radeon_device *rdev)
return actual_temp * 1000;
}
+/**
+ * sumo_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (sumo, trinity, SI).
+ * Used for profile mode only.
+ */
void sumo_pm_init_profile(struct radeon_device *rdev)
{
int idx;
@@ -265,6 +313,14 @@ void sumo_pm_init_profile(struct radeon_device *rdev)
rdev->pm.power_state[idx].num_clock_modes - 1;
}
+/**
+ * evergreen_pm_misc - set additional pm hw parameters callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set non-clock parameters associated with a power state
+ * (voltage, etc.) (evergreen+).
+ */
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -292,6 +348,13 @@ void evergreen_pm_misc(struct radeon_device *rdev)
}
}
+/**
+ * evergreen_pm_prepare - pre-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Prepare for a power state change (evergreen+).
+ */
void evergreen_pm_prepare(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
@@ -310,6 +373,13 @@ void evergreen_pm_prepare(struct radeon_device *rdev)
}
}
+/**
+ * evergreen_pm_finish - post-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clean up after a power state change (evergreen+).
+ */
void evergreen_pm_finish(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
@@ -328,6 +398,15 @@ void evergreen_pm_finish(struct radeon_device *rdev)
}
}
+/**
+ * evergreen_hpd_sense - hpd sense callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (evergreen+).
+ * Returns true if connected, false if not connected.
+ */
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
@@ -364,6 +443,14 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
return connected;
}
+/**
+ * evergreen_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (evergreen+).
+ */
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
@@ -424,10 +511,19 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev,
}
}
+/**
+ * evergreen_hpd_init - hpd setup callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
void evergreen_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned enabled = 0;
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
@@ -436,73 +532,72 @@ void evergreen_hpd_init(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
- rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp);
- rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp);
- rdev->irq.hpd[2] = true;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp);
- rdev->irq.hpd[3] = true;
break;
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp);
- rdev->irq.hpd[4] = true;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp);
- rdev->irq.hpd[5] = true;
break;
default:
break;
}
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+ enabled |= 1 << radeon_connector->hpd.hpd;
}
- if (rdev->irq.installed)
- evergreen_irq_set(rdev);
+ radeon_irq_kms_enable_hpd(rdev, enabled);
}
+/**
+ * evergreen_hpd_fini - hpd tear down callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
void evergreen_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned disabled = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0);
- rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0);
- rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0);
- rdev->irq.hpd[2] = false;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0);
- rdev->irq.hpd[3] = false;
break;
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0);
- rdev->irq.hpd[4] = false;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0);
- rdev->irq.hpd[5] = false;
break;
default:
break;
}
+ disabled |= 1 << radeon_connector->hpd.hpd;
}
+ radeon_irq_kms_disable_hpd(rdev, disabled);
}
/* watermark setup */
@@ -933,6 +1028,14 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
}
+/**
+ * evergreen_bandwidth_update - update display watermarks callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the display watermarks based on the requested mode(s)
+ * (evergreen+).
+ */
void evergreen_bandwidth_update(struct radeon_device *rdev)
{
struct drm_display_mode *mode0 = NULL;
@@ -956,6 +1059,15 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
}
}
+/**
+ * evergreen_mc_wait_for_idle - wait for MC idle callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Wait for the MC (memory controller) to be idle.
+ * (evergreen+).
+ * Returns 0 if the MC is idle, -1 if not.
+ */
int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
@@ -1117,24 +1229,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
- save->vga_control[0] = RREG32(D1VGA_CONTROL);
- save->vga_control[1] = RREG32(D2VGA_CONTROL);
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
- save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
- save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
- if (rdev->num_crtc >= 4) {
- save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
- save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
- save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
- save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
- }
- if (rdev->num_crtc >= 6) {
- save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
- save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
- save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
- save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
- }
/* Stop all video */
WREG32(VGA_RENDER_CONTROL, 0);
@@ -1245,47 +1341,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
/* Unlock host access */
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
- /* Restore video state */
- WREG32(D1VGA_CONTROL, save->vga_control[0]);
- WREG32(D2VGA_CONTROL, save->vga_control[1]);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
- WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
- WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
- }
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
- }
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
- }
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
- }
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
@@ -1371,12 +1426,28 @@ void evergreen_mc_program(struct radeon_device *rdev)
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 next_rptr;
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(ring, 1);
- /* FIXME: implement */
+
+ if (ring->rptr_save_reg) {
+ next_rptr = ring->wptr + 3 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((ring->rptr_save_reg -
+ PACKET3_SET_CONFIG_REG_START) >> 2));
+ radeon_ring_write(ring, next_rptr);
+ } else if (rdev->wb.enabled) {
+ next_rptr = ring->wptr + 5 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+ radeon_ring_write(ring, next_rptr);
+ radeon_ring_write(ring, 0);
+ }
+
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
@@ -1858,10 +1929,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_IGP)
rdev->config.evergreen.tile_config |= 1 << 4;
else {
- if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
- rdev->config.evergreen.tile_config |= 1 << 4;
- else
+ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+ case 0: /* four banks */
rdev->config.evergreen.tile_config |= 0 << 4;
+ break;
+ case 1: /* eight banks */
+ rdev->config.evergreen.tile_config |= 1 << 4;
+ break;
+ case 2: /* sixteen banks */
+ default:
+ rdev->config.evergreen.tile_config |= 2 << 4;
+ break;
+ }
}
rdev->config.evergreen.tile_config |= 0 << 8;
rdev->config.evergreen.tile_config |=
@@ -2188,6 +2267,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
@@ -2225,6 +2312,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
evergreen_mc_resume(rdev, &save);
return 0;
}
@@ -2348,20 +2443,20 @@ int evergreen_irq_set(struct radeon_device *rdev)
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
} else {
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
@@ -2369,32 +2464,32 @@ int evergreen_irq_set(struct radeon_device *rdev)
}
if (rdev->irq.crtc_vblank_int[0] ||
- rdev->irq.pflip[0]) {
+ atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
- rdev->irq.pflip[1]) {
+ atomic_read(&rdev->irq.pflip[1])) {
DRM_DEBUG("evergreen_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[2] ||
- rdev->irq.pflip[2]) {
+ atomic_read(&rdev->irq.pflip[2])) {
DRM_DEBUG("evergreen_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[3] ||
- rdev->irq.pflip[3]) {
+ atomic_read(&rdev->irq.pflip[3])) {
DRM_DEBUG("evergreen_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[4] ||
- rdev->irq.pflip[4]) {
+ atomic_read(&rdev->irq.pflip[4])) {
DRM_DEBUG("evergreen_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[5] ||
- rdev->irq.pflip[5]) {
+ atomic_read(&rdev->irq.pflip[5])) {
DRM_DEBUG("evergreen_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK;
}
@@ -2676,7 +2771,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 rptr;
u32 src_id, src_data;
u32 ring_index;
- unsigned long flags;
bool queue_hotplug = false;
bool queue_hdmi = false;
@@ -2684,22 +2778,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
return IRQ_NONE;
wptr = evergreen_get_ih_wptr(rdev);
+
+restart_ih:
+ /* is somebody else already processing irqs? */
+ if (atomic_xchg(&rdev->ih.lock, 1))
+ return IRQ_NONE;
+
rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
- spin_lock_irqsave(&rdev->ih.lock, flags);
- if (rptr == wptr) {
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
- return IRQ_NONE;
- }
-restart_ih:
/* Order reading of wptr vs. reading of IH ring data */
rmb();
/* display interrupts */
evergreen_irq_ack(rdev);
- rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
@@ -2716,7 +2809,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[0])
+ if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
@@ -2742,7 +2835,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[1])
+ if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
@@ -2768,7 +2861,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[2])
+ if (atomic_read(&rdev->irq.pflip[2]))
radeon_crtc_handle_flip(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
@@ -2794,7 +2887,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[3])
+ if (atomic_read(&rdev->irq.pflip[3]))
radeon_crtc_handle_flip(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
@@ -2820,7 +2913,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[4])
+ if (atomic_read(&rdev->irq.pflip[4]))
radeon_crtc_handle_flip(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
@@ -2846,7 +2939,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[5])
+ if (atomic_read(&rdev->irq.pflip[5]))
radeon_crtc_handle_flip(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
@@ -2986,7 +3079,6 @@ restart_ih:
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
- rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
break;
default:
@@ -2998,17 +3090,19 @@ restart_ih:
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
- /* make sure wptr hasn't changed while processing */
- wptr = evergreen_get_ih_wptr(rdev);
- if (wptr != rdev->ih.wptr)
- goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_hdmi)
schedule_work(&rdev->audio_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ atomic_set(&rdev->ih.lock, 0);
+
+ /* make sure wptr hasn't changed while processing */
+ wptr = evergreen_get_ih_wptr(rdev);
+ if (wptr != rptr)
+ goto restart_ih;
+
return IRQ_HANDLED;
}
@@ -3096,13 +3190,11 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
r = r600_audio_init(rdev);
if (r) {
@@ -3146,9 +3238,6 @@ int evergreen_suspend(struct radeon_device *rdev)
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r600_audio_fini(rdev);
- /* FIXME: we should wait for ring to be empty */
- radeon_ib_pool_suspend(rdev);
- r600_blit_suspend(rdev);
r700_cp_stop(rdev);
ring->ready = false;
evergreen_irq_suspend(rdev);
@@ -3234,20 +3323,14 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = evergreen_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
@@ -3274,7 +3357,7 @@ void evergreen_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -3289,7 +3372,8 @@ void evergreen_fini(struct radeon_device *rdev)
void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
- u32 link_width_cntl, speed_cntl;
+ u32 link_width_cntl, speed_cntl, mask;
+ int ret;
if (radeon_pcie_gen2 == 0)
return;
@@ -3304,6 +3388,15 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
if (ASIC_IS_X2(rdev))
return;
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret != 0)
+ return;
+
+ if (!(mask & DRM_PCIE_SPEED_50))
+ return;
+
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 1e96bd458cfd..89cb9feb5653 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -622,7 +622,8 @@ int evergreen_blit_init(struct radeon_device *rdev)
rdev->r600_blit.primitives.draw_auto = draw_auto;
rdev->r600_blit.primitives.set_default_state = set_default_state;
- rdev->r600_blit.ring_size_common = 55; /* shaders + def state */
+ rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
+ rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
@@ -633,10 +634,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
rdev->r600_blit.max_dim = 16384;
- /* pin copy shader into vram if already initialized */
- if (rdev->r600_blit.shader_obj)
- goto done;
-
rdev->r600_blit.state_offset = 0;
if (rdev->family < CHIP_CAYMAN)
@@ -667,11 +664,26 @@ int evergreen_blit_init(struct radeon_device *rdev)
obj_size += cayman_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("evergreen failed to allocate shader\n");
- return r;
+ /* pin copy shader into vram if not already initialized */
+ if (!rdev->r600_blit.shader_obj) {
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("evergreen failed to allocate shader\n");
+ return r;
+ }
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
}
DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
@@ -713,17 +725,6 @@ int evergreen_blit_init(struct radeon_device *rdev)
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-done:
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c16554122ccd..e44a62a07fe3 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -788,6 +788,13 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
case V_030000_SQ_TEX_DIM_1D_ARRAY:
case V_030000_SQ_TEX_DIM_2D_ARRAY:
depth = 1;
+ break;
+ case V_030000_SQ_TEX_DIM_2D_MSAA:
+ case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+ surf.nsamples = 1 << llevel;
+ llevel = 0;
+ depth = 1;
+ break;
case V_030000_SQ_TEX_DIM_3D:
break;
default:
@@ -961,13 +968,15 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
if (track->db_dirty) {
/* Check stencil buffer */
- if (G_028800_STENCIL_ENABLE(track->db_depth_control)) {
+ if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
+ G_028800_STENCIL_ENABLE(track->db_depth_control)) {
r = evergreen_cs_track_validate_stencil(p);
if (r)
return r;
}
/* Check depth buffer */
- if (G_028800_Z_ENABLE(track->db_depth_control)) {
+ if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
+ G_028800_Z_ENABLE(track->db_depth_control)) {
r = evergreen_cs_track_validate_depth(p);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b50b15c70498..79347855d9bf 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -88,6 +88,10 @@
#define CONFIG_MEMSIZE 0x5428
#define CP_COHER_BASE 0x85F8
+#define CP_STALLED_STAT1 0x8674
+#define CP_STALLED_STAT2 0x8678
+#define CP_BUSY_STAT 0x867C
+#define CP_STAT 0x8680
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
@@ -1273,6 +1277,8 @@
#define S_028044_FORMAT(x) (((x) & 0x1) << 0)
#define G_028044_FORMAT(x) (((x) >> 0) & 0x1)
#define C_028044_FORMAT 0xFFFFFFFE
+#define V_028044_STENCIL_INVALID 0
+#define V_028044_STENCIL_8 1
#define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7)
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b7bf18e40215..853800e8582f 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -574,10 +574,18 @@ static void cayman_gpu_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_IGP)
rdev->config.cayman.tile_config |= 1 << 4;
else {
- if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
- rdev->config.cayman.tile_config |= 1 << 4;
- else
+ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+ case 0: /* four banks */
rdev->config.cayman.tile_config |= 0 << 4;
+ break;
+ case 1: /* eight banks */
+ rdev->config.cayman.tile_config |= 1 << 4;
+ break;
+ case 2: /* sixteen banks */
+ default:
+ rdev->config.cayman.tile_config |= 2 << 4;
+ break;
+ }
}
rdev->config.cayman.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
@@ -850,11 +858,20 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(ring, 1);
+
+ if (ring->rptr_save_reg) {
+ uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((ring->rptr_save_reg -
+ PACKET3_SET_CONFIG_REG_START) >> 2));
+ radeon_ring_write(ring, next_rptr);
+ }
+
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
@@ -981,16 +998,41 @@ static int cayman_cp_start(struct radeon_device *rdev)
static void cayman_cp_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
cayman_cp_enable(rdev, false);
- radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
}
int cayman_cp_resume(struct radeon_device *rdev)
{
+ static const int ridx[] = {
+ RADEON_RING_TYPE_GFX_INDEX,
+ CAYMAN_RING_TYPE_CP1_INDEX,
+ CAYMAN_RING_TYPE_CP2_INDEX
+ };
+ static const unsigned cp_rb_cntl[] = {
+ CP_RB0_CNTL,
+ CP_RB1_CNTL,
+ CP_RB2_CNTL,
+ };
+ static const unsigned cp_rb_rptr_addr[] = {
+ CP_RB0_RPTR_ADDR,
+ CP_RB1_RPTR_ADDR,
+ CP_RB2_RPTR_ADDR
+ };
+ static const unsigned cp_rb_rptr_addr_hi[] = {
+ CP_RB0_RPTR_ADDR_HI,
+ CP_RB1_RPTR_ADDR_HI,
+ CP_RB2_RPTR_ADDR_HI
+ };
+ static const unsigned cp_rb_base[] = {
+ CP_RB0_BASE,
+ CP_RB1_BASE,
+ CP_RB2_BASE
+ };
struct radeon_ring *ring;
- u32 tmp;
- u32 rb_bufsz;
- int r;
+ int i, r;
/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
@@ -1012,91 +1054,47 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_DEBUG, (1 << 27));
- /* ring 0 - compute and gfx */
- /* Set ring buffer size */
- ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
-#ifdef __BIG_ENDIAN
- tmp |= BUF_SWAP_32BIT;
-#endif
- WREG32(CP_RB0_CNTL, tmp);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
- ring->wptr = 0;
- WREG32(CP_RB0_WPTR, ring->wptr);
-
/* set the wb address wether it's enabled or not */
- WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
- WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+ WREG32(SCRATCH_UMSK, 0xff);
- if (rdev->wb.enabled)
- WREG32(SCRATCH_UMSK, 0xff);
- else {
- tmp |= RB_NO_UPDATE;
- WREG32(SCRATCH_UMSK, 0);
- }
-
- mdelay(1);
- WREG32(CP_RB0_CNTL, tmp);
-
- WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
-
- ring->rptr = RREG32(CP_RB0_RPTR);
-
- /* ring1 - compute only */
- /* Set ring buffer size */
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
-#ifdef __BIG_ENDIAN
- tmp |= BUF_SWAP_32BIT;
-#endif
- WREG32(CP_RB1_CNTL, tmp);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
- ring->wptr = 0;
- WREG32(CP_RB1_WPTR, ring->wptr);
-
- /* set the wb address wether it's enabled or not */
- WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
- WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
-
- mdelay(1);
- WREG32(CP_RB1_CNTL, tmp);
-
- WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+ for (i = 0; i < 3; ++i) {
+ uint32_t rb_cntl;
+ uint64_t addr;
- ring->rptr = RREG32(CP_RB1_RPTR);
-
- /* ring2 - compute only */
- /* Set ring buffer size */
- ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ /* Set ring buffer size */
+ ring = &rdev->ring[ridx[i]];
+ rb_cntl = drm_order(ring->ring_size / 8);
+ rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
#ifdef __BIG_ENDIAN
- tmp |= BUF_SWAP_32BIT;
+ rb_cntl |= BUF_SWAP_32BIT;
#endif
- WREG32(CP_RB2_CNTL, tmp);
+ WREG32(cp_rb_cntl[i], rb_cntl);
- /* Initialize the ring buffer's read and write pointers */
- WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
- ring->wptr = 0;
- WREG32(CP_RB2_WPTR, ring->wptr);
+ /* set the wb address wether it's enabled or not */
+ addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
+ WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
+ WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
+ }
- /* set the wb address wether it's enabled or not */
- WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
- WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
+ /* set the rb base addr, this causes an internal reset of ALL rings */
+ for (i = 0; i < 3; ++i) {
+ ring = &rdev->ring[ridx[i]];
+ WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
+ }
- mdelay(1);
- WREG32(CP_RB2_CNTL, tmp);
+ for (i = 0; i < 3; ++i) {
+ /* Initialize the ring buffer's read and write pointers */
+ ring = &rdev->ring[ridx[i]];
+ WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
- WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+ ring->rptr = ring->wptr = 0;
+ WREG32(ring->rptr_reg, ring->rptr);
+ WREG32(ring->wptr_reg, ring->wptr);
- ring->rptr = RREG32(CP_RB2_RPTR);
+ mdelay(1);
+ WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
+ }
/* start the rings */
cayman_cp_start(rdev);
@@ -1132,6 +1130,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14F8));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
@@ -1180,6 +1186,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
evergreen_mc_resume(rdev, &save);
return 0;
}
@@ -1291,17 +1305,17 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
- r = radeon_vm_manager_start(rdev);
- if (r)
+ r = radeon_vm_manager_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
return r;
+ }
r = r600_audio_init(rdev);
if (r)
@@ -1334,10 +1348,6 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- /* FIXME: we should wait for ring to be empty */
- radeon_ib_pool_suspend(rdev);
- radeon_vm_manager_suspend(rdev);
- r600_blit_suspend(rdev);
cayman_cp_enable(rdev, false);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
evergreen_irq_suspend(rdev);
@@ -1413,17 +1423,7 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
- r = radeon_vm_manager_init(rdev);
- if (r) {
- dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
- }
-
r = cayman_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
@@ -1432,7 +1432,7 @@ int cayman_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_IGP)
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
@@ -1463,7 +1463,7 @@ void cayman_fini(struct radeon_device *rdev)
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index a0b98066e207..870db340d377 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -236,6 +236,10 @@
#define CP_SEM_WAIT_TIMER 0x85BC
#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
#define CP_COHER_CNTL2 0x85E8
+#define CP_STALLED_STAT1 0x8674
+#define CP_STALLED_STAT2 0x8678
+#define CP_BUSY_STAT 0x867C
+#define CP_STAT 0x8680
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index fb44e7e49083..8acb34fd3fd5 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -65,6 +65,19 @@ MODULE_FIRMWARE(FIRMWARE_R520);
#include "r100_track.h"
+/* This files gather functions specifics to:
+ * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ * and others in some cases.
+ */
+
+/**
+ * r100_wait_for_vblank - vblank wait asic callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to wait for vblank on
+ *
+ * Wait for vblank on the requested crtc (r1xx-r4xx).
+ */
void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
@@ -99,128 +112,49 @@ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
}
}
-/* This files gather functions specifics to:
- * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+/**
+ * r100_pre_page_flip - pre-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to prepare for pageflip on
+ *
+ * Pre-pageflip callback (r1xx-r4xx).
+ * Enables the pageflip irq (vblank irq).
*/
-
-int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx,
- unsigned reg)
-{
- int r;
- u32 tile_flags = 0;
- u32 tmp;
- struct radeon_cs_reloc *reloc;
- u32 value;
-
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
-
- value = radeon_get_ib_value(p, idx);
- tmp = value & 0x003fffff;
- tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
-
- if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- tile_flags |= RADEON_DST_TILE_MACRO;
- if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- if (reg == RADEON_SRC_PITCH_OFFSET) {
- DRM_ERROR("Cannot src blit from microtiled surface\n");
- r100_cs_dump_packet(p, pkt);
- return -EINVAL;
- }
- tile_flags |= RADEON_DST_TILE_MICRO;
- }
-
- tmp |= tile_flags;
- p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
- } else
- p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
- return 0;
-}
-
-int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- int idx)
-{
- unsigned c, i;
- struct radeon_cs_reloc *reloc;
- struct r100_cs_track *track;
- int r = 0;
- volatile uint32_t *ib;
- u32 idx_value;
-
- ib = p->ib.ptr;
- track = (struct r100_cs_track *)p->track;
- c = radeon_get_ib_value(p, idx++) & 0x1F;
- if (c > 16) {
- DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return -EINVAL;
- }
- track->num_arrays = c;
- for (i = 0; i < (c - 1); i+=2, idx+=3) {
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- idx_value = radeon_get_ib_value(p, idx);
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
-
- track->arrays[i + 0].esize = idx_value >> 8;
- track->arrays[i + 0].robj = reloc->robj;
- track->arrays[i + 0].esize &= 0x7F;
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
- track->arrays[i + 1].robj = reloc->robj;
- track->arrays[i + 1].esize = idx_value >> 24;
- track->arrays[i + 1].esize &= 0x7F;
- }
- if (c & 1) {
- r = r100_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
- r100_cs_dump_packet(p, pkt);
- return r;
- }
- idx_value = radeon_get_ib_value(p, idx);
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
- track->arrays[i + 0].robj = reloc->robj;
- track->arrays[i + 0].esize = idx_value >> 8;
- track->arrays[i + 0].esize &= 0x7F;
- }
- return r;
-}
-
void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
{
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
+/**
+ * r100_post_page_flip - pos-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to cleanup pageflip on
+ *
+ * Post-pageflip callback (r1xx-r4xx).
+ * Disables the pageflip irq (vblank irq).
+ */
void r100_post_page_flip(struct radeon_device *rdev, int crtc)
{
/* disable the pflip int */
radeon_irq_kms_pflip_irq_put(rdev, crtc);
}
+/**
+ * r100_page_flip - pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (r1xx-r4xx).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
+ * Returns the current update pending status.
+ */
u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -247,6 +181,15 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
}
+/**
+ * r100_pm_get_dynpm_state - look up dynpm power state callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Look up the optimal power state based on the
+ * current state of the GPU (r1xx-r5xx).
+ * Used for dynpm only.
+ */
void r100_pm_get_dynpm_state(struct radeon_device *rdev)
{
int i;
@@ -329,6 +272,15 @@ void r100_pm_get_dynpm_state(struct radeon_device *rdev)
pcie_lanes);
}
+/**
+ * r100_pm_init_profile - Initialize power profiles callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the power states used in profile mode
+ * (r1xx-r3xx).
+ * Used for profile mode only.
+ */
void r100_pm_init_profile(struct radeon_device *rdev)
{
/* default */
@@ -368,6 +320,14 @@ void r100_pm_init_profile(struct radeon_device *rdev)
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
}
+/**
+ * r100_pm_misc - set additional pm hw parameters callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set non-clock parameters associated with a power state
+ * (voltage, pcie lanes, etc.) (r1xx-r4xx).
+ */
void r100_pm_misc(struct radeon_device *rdev)
{
int requested_index = rdev->pm.requested_power_state_index;
@@ -459,6 +419,13 @@ void r100_pm_misc(struct radeon_device *rdev)
}
}
+/**
+ * r100_pm_prepare - pre-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Prepare for a power state change (r1xx-r4xx).
+ */
void r100_pm_prepare(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
@@ -483,6 +450,13 @@ void r100_pm_prepare(struct radeon_device *rdev)
}
}
+/**
+ * r100_pm_finish - post-power state change callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clean up after a power state change (r1xx-r4xx).
+ */
void r100_pm_finish(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
@@ -507,6 +481,14 @@ void r100_pm_finish(struct radeon_device *rdev)
}
}
+/**
+ * r100_gui_idle - gui idle callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
+ * Returns true if idle, false if not.
+ */
bool r100_gui_idle(struct radeon_device *rdev)
{
if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
@@ -516,6 +498,15 @@ bool r100_gui_idle(struct radeon_device *rdev)
}
/* hpd for digital panel detect/disconnect */
+/**
+ * r100_hpd_sense - hpd sense callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Checks if a digital monitor is connected (r1xx-r4xx).
+ * Returns true if connected, false if not connected.
+ */
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
@@ -535,6 +526,14 @@ bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
return connected;
}
+/**
+ * r100_hpd_set_polarity - hpd set polarity callback.
+ *
+ * @rdev: radeon_device pointer
+ * @hpd: hpd (hotplug detect) pin
+ *
+ * Set the polarity of the hpd pin (r1xx-r4xx).
+ */
void r100_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
@@ -563,47 +562,47 @@ void r100_hpd_set_polarity(struct radeon_device *rdev,
}
}
+/**
+ * r100_hpd_init - hpd setup callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup the hpd pins used by the card (r1xx-r4xx).
+ * Set the polarity, and enable the hpd interrupts.
+ */
void r100_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned enable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- switch (radeon_connector->hpd.hpd) {
- case RADEON_HPD_1:
- rdev->irq.hpd[0] = true;
- break;
- case RADEON_HPD_2:
- rdev->irq.hpd[1] = true;
- break;
- default:
- break;
- }
+ enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
- if (rdev->irq.installed)
- r100_irq_set(rdev);
+ radeon_irq_kms_enable_hpd(rdev, enable);
}
+/**
+ * r100_hpd_fini - hpd tear down callback.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the hpd pins used by the card (r1xx-r4xx).
+ * Disable the hpd interrupts.
+ */
void r100_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned disable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- switch (radeon_connector->hpd.hpd) {
- case RADEON_HPD_1:
- rdev->irq.hpd[0] = false;
- break;
- case RADEON_HPD_2:
- rdev->irq.hpd[1] = false;
- break;
- default:
- break;
- }
+ disable |= 1 << radeon_connector->hpd.hpd;
}
+ radeon_irq_kms_disable_hpd(rdev, disable);
}
/*
@@ -635,15 +634,6 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return radeon_gart_table_ram_alloc(rdev);
}
-/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
-void r100_enable_bm(struct radeon_device *rdev)
-{
- uint32_t tmp;
- /* Enable bus mastering */
- tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
- WREG32(RADEON_BUS_CNTL, tmp);
-}
-
int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -705,18 +695,18 @@ int r100_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
tmp |= RADEON_SW_INT_ENABLE;
}
if (rdev->irq.gui_idle) {
tmp |= RADEON_GUI_IDLE_MASK;
}
if (rdev->irq.crtc_vblank_int[0] ||
- rdev->irq.pflip[0]) {
+ atomic_read(&rdev->irq.pflip[0])) {
tmp |= RADEON_CRTC_VBLANK_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
- rdev->irq.pflip[1]) {
+ atomic_read(&rdev->irq.pflip[1])) {
tmp |= RADEON_CRTC2_VBLANK_MASK;
}
if (rdev->irq.hpd[0]) {
@@ -782,7 +772,6 @@ int r100_irq_process(struct radeon_device *rdev)
/* gui idle interrupt */
if (status & RADEON_GUI_IDLE_STAT) {
rdev->irq.gui_idle_acked = true;
- rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
}
/* Vertical blank interrupts */
@@ -792,7 +781,7 @@ int r100_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[0])
+ if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
@@ -801,7 +790,7 @@ int r100_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[1])
+ if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
}
if (status & RADEON_FP_DETECT_STAT) {
@@ -883,7 +872,7 @@ int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence)
+ struct radeon_fence **fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t cur_pages;
@@ -947,7 +936,7 @@ int r100_copy_blit(struct radeon_device *rdev,
RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
- r = radeon_fence_emit(rdev, fence);
+ r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
radeon_ring_unlock_commit(rdev, ring);
return r;
@@ -1192,6 +1181,14 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
}
ring->ready = true;
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ if (radeon_ring_supports_scratch_reg(rdev, ring)) {
+ r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+ if (r) {
+ DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+ ring->rptr_save_reg = 0;
+ }
+ }
return 0;
}
@@ -1202,6 +1199,7 @@ void r100_cp_fini(struct radeon_device *rdev)
}
/* Disable ring */
r100_cp_disable(rdev);
+ radeon_scratch_free(rdev, rdev->ring[RADEON_RING_TYPE_GFX_INDEX].rptr_save_reg);
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
DRM_INFO("radeon: cp finalized\n");
}
@@ -1223,6 +1221,112 @@ void r100_cp_disable(struct radeon_device *rdev)
/*
* CS functions
*/
+int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx,
+ unsigned reg)
+{
+ int r;
+ u32 tile_flags = 0;
+ u32 tmp;
+ struct radeon_cs_reloc *reloc;
+ u32 value;
+
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+
+ value = radeon_get_ib_value(p, idx);
+ tmp = value & 0x003fffff;
+ tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
+
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ tile_flags |= RADEON_DST_TILE_MACRO;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ if (reg == RADEON_SRC_PITCH_OFFSET) {
+ DRM_ERROR("Cannot src blit from microtiled surface\n");
+ r100_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
+ tile_flags |= RADEON_DST_TILE_MICRO;
+ }
+
+ tmp |= tile_flags;
+ p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
+ } else
+ p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
+ return 0;
+}
+
+int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ int idx)
+{
+ unsigned c, i;
+ struct radeon_cs_reloc *reloc;
+ struct r100_cs_track *track;
+ int r = 0;
+ volatile uint32_t *ib;
+ u32 idx_value;
+
+ ib = p->ib.ptr;
+ track = (struct r100_cs_track *)p->track;
+ c = radeon_get_ib_value(p, idx++) & 0x1F;
+ if (c > 16) {
+ DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return -EINVAL;
+ }
+ track->num_arrays = c;
+ for (i = 0; i < (c - 1); i+=2, idx+=3) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ idx_value = radeon_get_ib_value(p, idx);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+
+ track->arrays[i + 0].esize = idx_value >> 8;
+ track->arrays[i + 0].robj = reloc->robj;
+ track->arrays[i + 0].esize &= 0x7F;
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
+ track->arrays[i + 1].robj = reloc->robj;
+ track->arrays[i + 1].esize = idx_value >> 24;
+ track->arrays[i + 1].esize &= 0x7F;
+ }
+ if (c & 1) {
+ r = r100_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("No reloc for packet3 %d\n",
+ pkt->opcode);
+ r100_cs_dump_packet(p, pkt);
+ return r;
+ }
+ idx_value = radeon_get_ib_value(p, idx);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
+ track->arrays[i + 0].robj = reloc->robj;
+ track->arrays[i + 0].esize = idx_value >> 8;
+ track->arrays[i + 0].esize &= 0x7F;
+ }
+ return r;
+}
+
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
const unsigned *auth, unsigned n,
@@ -2048,6 +2152,379 @@ int r100_cs_parse(struct radeon_cs_parser *p)
return 0;
}
+static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
+{
+ DRM_ERROR("pitch %d\n", t->pitch);
+ DRM_ERROR("use_pitch %d\n", t->use_pitch);
+ DRM_ERROR("width %d\n", t->width);
+ DRM_ERROR("width_11 %d\n", t->width_11);
+ DRM_ERROR("height %d\n", t->height);
+ DRM_ERROR("height_11 %d\n", t->height_11);
+ DRM_ERROR("num levels %d\n", t->num_levels);
+ DRM_ERROR("depth %d\n", t->txdepth);
+ DRM_ERROR("bpp %d\n", t->cpp);
+ DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
+ DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
+ DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
+ DRM_ERROR("compress format %d\n", t->compress_format);
+}
+
+static int r100_track_compress_size(int compress_format, int w, int h)
+{
+ int block_width, block_height, block_bytes;
+ int wblocks, hblocks;
+ int min_wblocks;
+ int sz;
+
+ block_width = 4;
+ block_height = 4;
+
+ switch (compress_format) {
+ case R100_TRACK_COMP_DXT1:
+ block_bytes = 8;
+ min_wblocks = 4;
+ break;
+ default:
+ case R100_TRACK_COMP_DXT35:
+ block_bytes = 16;
+ min_wblocks = 2;
+ break;
+ }
+
+ hblocks = (h + block_height - 1) / block_height;
+ wblocks = (w + block_width - 1) / block_width;
+ if (wblocks < min_wblocks)
+ wblocks = min_wblocks;
+ sz = wblocks * hblocks * block_bytes;
+ return sz;
+}
+
+static int r100_cs_track_cube(struct radeon_device *rdev,
+ struct r100_cs_track *track, unsigned idx)
+{
+ unsigned face, w, h;
+ struct radeon_bo *cube_robj;
+ unsigned long size;
+ unsigned compress_format = track->textures[idx].compress_format;
+
+ for (face = 0; face < 5; face++) {
+ cube_robj = track->textures[idx].cube_info[face].robj;
+ w = track->textures[idx].cube_info[face].width;
+ h = track->textures[idx].cube_info[face].height;
+
+ if (compress_format) {
+ size = r100_track_compress_size(compress_format, w, h);
+ } else
+ size = w * h;
+ size *= track->textures[idx].cpp;
+
+ size += track->textures[idx].cube_info[face].offset;
+
+ if (size > radeon_bo_size(cube_robj)) {
+ DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
+ size, radeon_bo_size(cube_robj));
+ r100_cs_track_texture_print(&track->textures[idx]);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int r100_cs_track_texture_check(struct radeon_device *rdev,
+ struct r100_cs_track *track)
+{
+ struct radeon_bo *robj;
+ unsigned long size;
+ unsigned u, i, w, h, d;
+ int ret;
+
+ for (u = 0; u < track->num_texture; u++) {
+ if (!track->textures[u].enabled)
+ continue;
+ if (track->textures[u].lookup_disable)
+ continue;
+ robj = track->textures[u].robj;
+ if (robj == NULL) {
+ DRM_ERROR("No texture bound to unit %u\n", u);
+ return -EINVAL;
+ }
+ size = 0;
+ for (i = 0; i <= track->textures[u].num_levels; i++) {
+ if (track->textures[u].use_pitch) {
+ if (rdev->family < CHIP_R300)
+ w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
+ else
+ w = track->textures[u].pitch / (1 << i);
+ } else {
+ w = track->textures[u].width;
+ if (rdev->family >= CHIP_RV515)
+ w |= track->textures[u].width_11;
+ w = w / (1 << i);
+ if (track->textures[u].roundup_w)
+ w = roundup_pow_of_two(w);
+ }
+ h = track->textures[u].height;
+ if (rdev->family >= CHIP_RV515)
+ h |= track->textures[u].height_11;
+ h = h / (1 << i);
+ if (track->textures[u].roundup_h)
+ h = roundup_pow_of_two(h);
+ if (track->textures[u].tex_coord_type == 1) {
+ d = (1 << track->textures[u].txdepth) / (1 << i);
+ if (!d)
+ d = 1;
+ } else {
+ d = 1;
+ }
+ if (track->textures[u].compress_format) {
+
+ size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
+ /* compressed textures are block based */
+ } else
+ size += w * h * d;
+ }
+ size *= track->textures[u].cpp;
+
+ switch (track->textures[u].tex_coord_type) {
+ case 0:
+ case 1:
+ break;
+ case 2:
+ if (track->separate_cube) {
+ ret = r100_cs_track_cube(rdev, track, u);
+ if (ret)
+ return ret;
+ } else
+ size *= 6;
+ break;
+ default:
+ DRM_ERROR("Invalid texture coordinate type %u for unit "
+ "%u\n", track->textures[u].tex_coord_type, u);
+ return -EINVAL;
+ }
+ if (size > radeon_bo_size(robj)) {
+ DRM_ERROR("Texture of unit %u needs %lu bytes but is "
+ "%lu\n", u, size, radeon_bo_size(robj));
+ r100_cs_track_texture_print(&track->textures[u]);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+{
+ unsigned i;
+ unsigned long size;
+ unsigned prim_walk;
+ unsigned nverts;
+ unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
+
+ if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
+ !track->blend_read_enable)
+ num_cb = 0;
+
+ for (i = 0; i < num_cb; i++) {
+ if (track->cb[i].robj == NULL) {
+ DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
+ return -EINVAL;
+ }
+ size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
+ size += track->cb[i].offset;
+ if (size > radeon_bo_size(track->cb[i].robj)) {
+ DRM_ERROR("[drm] Buffer too small for color buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->cb[i].robj));
+ DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
+ i, track->cb[i].pitch, track->cb[i].cpp,
+ track->cb[i].offset, track->maxy);
+ return -EINVAL;
+ }
+ }
+ track->cb_dirty = false;
+
+ if (track->zb_dirty && track->z_enabled) {
+ if (track->zb.robj == NULL) {
+ DRM_ERROR("[drm] No buffer for z buffer !\n");
+ return -EINVAL;
+ }
+ size = track->zb.pitch * track->zb.cpp * track->maxy;
+ size += track->zb.offset;
+ if (size > radeon_bo_size(track->zb.robj)) {
+ DRM_ERROR("[drm] Buffer too small for z buffer "
+ "(need %lu have %lu) !\n", size,
+ radeon_bo_size(track->zb.robj));
+ DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
+ track->zb.pitch, track->zb.cpp,
+ track->zb.offset, track->maxy);
+ return -EINVAL;
+ }
+ }
+ track->zb_dirty = false;
+
+ if (track->aa_dirty && track->aaresolve) {
+ if (track->aa.robj == NULL) {
+ DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+ return -EINVAL;
+ }
+ /* I believe the format comes from colorbuffer0. */
+ size = track->aa.pitch * track->cb[0].cpp * track->maxy;
+ size += track->aa.offset;
+ if (size > radeon_bo_size(track->aa.robj)) {
+ DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->aa.robj));
+ DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
+ i, track->aa.pitch, track->cb[0].cpp,
+ track->aa.offset, track->maxy);
+ return -EINVAL;
+ }
+ }
+ track->aa_dirty = false;
+
+ prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
+ if (track->vap_vf_cntl & (1 << 14)) {
+ nverts = track->vap_alt_nverts;
+ } else {
+ nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
+ }
+ switch (prim_walk) {
+ case 1:
+ for (i = 0; i < track->num_arrays; i++) {
+ size = track->arrays[i].esize * track->max_indx * 4;
+ if (track->arrays[i].robj == NULL) {
+ DRM_ERROR("(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
+ return -EINVAL;
+ }
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
+ DRM_ERROR("Max indices %u\n", track->max_indx);
+ return -EINVAL;
+ }
+ }
+ break;
+ case 2:
+ for (i = 0; i < track->num_arrays; i++) {
+ size = track->arrays[i].esize * (nverts - 1) * 4;
+ if (track->arrays[i].robj == NULL) {
+ DRM_ERROR("(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
+ return -EINVAL;
+ }
+ if (size > radeon_bo_size(track->arrays[i].robj)) {
+ dev_err(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
+ return -EINVAL;
+ }
+ }
+ break;
+ case 3:
+ size = track->vtx_size * nverts;
+ if (size != track->immd_dwords) {
+ DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
+ track->immd_dwords, size);
+ DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
+ nverts, track->vtx_size);
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
+ prim_walk);
+ return -EINVAL;
+ }
+
+ if (track->tex_dirty) {
+ track->tex_dirty = false;
+ return r100_cs_track_texture_check(rdev, track);
+ }
+ return 0;
+}
+
+void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
+{
+ unsigned i, face;
+
+ track->cb_dirty = true;
+ track->zb_dirty = true;
+ track->tex_dirty = true;
+ track->aa_dirty = true;
+
+ if (rdev->family < CHIP_R300) {
+ track->num_cb = 1;
+ if (rdev->family <= CHIP_RS200)
+ track->num_texture = 3;
+ else
+ track->num_texture = 6;
+ track->maxy = 2048;
+ track->separate_cube = 1;
+ } else {
+ track->num_cb = 4;
+ track->num_texture = 16;
+ track->maxy = 4096;
+ track->separate_cube = 0;
+ track->aaresolve = false;
+ track->aa.robj = NULL;
+ }
+
+ for (i = 0; i < track->num_cb; i++) {
+ track->cb[i].robj = NULL;
+ track->cb[i].pitch = 8192;
+ track->cb[i].cpp = 16;
+ track->cb[i].offset = 0;
+ }
+ track->z_enabled = true;
+ track->zb.robj = NULL;
+ track->zb.pitch = 8192;
+ track->zb.cpp = 4;
+ track->zb.offset = 0;
+ track->vtx_size = 0x7F;
+ track->immd_dwords = 0xFFFFFFFFUL;
+ track->num_arrays = 11;
+ track->max_indx = 0x00FFFFFFUL;
+ for (i = 0; i < track->num_arrays; i++) {
+ track->arrays[i].robj = NULL;
+ track->arrays[i].esize = 0x7F;
+ }
+ for (i = 0; i < track->num_texture; i++) {
+ track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ track->textures[i].pitch = 16536;
+ track->textures[i].width = 16536;
+ track->textures[i].height = 16536;
+ track->textures[i].width_11 = 1 << 11;
+ track->textures[i].height_11 = 1 << 11;
+ track->textures[i].num_levels = 12;
+ if (rdev->family <= CHIP_RS200) {
+ track->textures[i].tex_coord_type = 0;
+ track->textures[i].txdepth = 0;
+ } else {
+ track->textures[i].txdepth = 16;
+ track->textures[i].tex_coord_type = 1;
+ }
+ track->textures[i].cpp = 64;
+ track->textures[i].robj = NULL;
+ /* CS IB emission code makes sure texture unit are disabled */
+ track->textures[i].enabled = false;
+ track->textures[i].lookup_disable = false;
+ track->textures[i].roundup_w = true;
+ track->textures[i].roundup_h = true;
+ if (track->separate_cube)
+ for (face = 0; face < 5; face++) {
+ track->textures[i].cube_info[face].robj = NULL;
+ track->textures[i].cube_info[face].width = 16536;
+ track->textures[i].cube_info[face].height = 16536;
+ track->textures[i].cube_info[face].offset = 0;
+ }
+ }
+}
/*
* Global GPU functions
@@ -2175,6 +2652,15 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
+/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+void r100_enable_bm(struct radeon_device *rdev)
+{
+ uint32_t tmp;
+ /* Enable bus mastering */
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
+}
+
void r100_bm_disable(struct radeon_device *rdev)
{
u32 tmp;
@@ -3261,380 +3747,6 @@ void r100_bandwidth_update(struct radeon_device *rdev)
}
}
-static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
-{
- DRM_ERROR("pitch %d\n", t->pitch);
- DRM_ERROR("use_pitch %d\n", t->use_pitch);
- DRM_ERROR("width %d\n", t->width);
- DRM_ERROR("width_11 %d\n", t->width_11);
- DRM_ERROR("height %d\n", t->height);
- DRM_ERROR("height_11 %d\n", t->height_11);
- DRM_ERROR("num levels %d\n", t->num_levels);
- DRM_ERROR("depth %d\n", t->txdepth);
- DRM_ERROR("bpp %d\n", t->cpp);
- DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
- DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
- DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
- DRM_ERROR("compress format %d\n", t->compress_format);
-}
-
-static int r100_track_compress_size(int compress_format, int w, int h)
-{
- int block_width, block_height, block_bytes;
- int wblocks, hblocks;
- int min_wblocks;
- int sz;
-
- block_width = 4;
- block_height = 4;
-
- switch (compress_format) {
- case R100_TRACK_COMP_DXT1:
- block_bytes = 8;
- min_wblocks = 4;
- break;
- default:
- case R100_TRACK_COMP_DXT35:
- block_bytes = 16;
- min_wblocks = 2;
- break;
- }
-
- hblocks = (h + block_height - 1) / block_height;
- wblocks = (w + block_width - 1) / block_width;
- if (wblocks < min_wblocks)
- wblocks = min_wblocks;
- sz = wblocks * hblocks * block_bytes;
- return sz;
-}
-
-static int r100_cs_track_cube(struct radeon_device *rdev,
- struct r100_cs_track *track, unsigned idx)
-{
- unsigned face, w, h;
- struct radeon_bo *cube_robj;
- unsigned long size;
- unsigned compress_format = track->textures[idx].compress_format;
-
- for (face = 0; face < 5; face++) {
- cube_robj = track->textures[idx].cube_info[face].robj;
- w = track->textures[idx].cube_info[face].width;
- h = track->textures[idx].cube_info[face].height;
-
- if (compress_format) {
- size = r100_track_compress_size(compress_format, w, h);
- } else
- size = w * h;
- size *= track->textures[idx].cpp;
-
- size += track->textures[idx].cube_info[face].offset;
-
- if (size > radeon_bo_size(cube_robj)) {
- DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
- size, radeon_bo_size(cube_robj));
- r100_cs_track_texture_print(&track->textures[idx]);
- return -1;
- }
- }
- return 0;
-}
-
-static int r100_cs_track_texture_check(struct radeon_device *rdev,
- struct r100_cs_track *track)
-{
- struct radeon_bo *robj;
- unsigned long size;
- unsigned u, i, w, h, d;
- int ret;
-
- for (u = 0; u < track->num_texture; u++) {
- if (!track->textures[u].enabled)
- continue;
- if (track->textures[u].lookup_disable)
- continue;
- robj = track->textures[u].robj;
- if (robj == NULL) {
- DRM_ERROR("No texture bound to unit %u\n", u);
- return -EINVAL;
- }
- size = 0;
- for (i = 0; i <= track->textures[u].num_levels; i++) {
- if (track->textures[u].use_pitch) {
- if (rdev->family < CHIP_R300)
- w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
- else
- w = track->textures[u].pitch / (1 << i);
- } else {
- w = track->textures[u].width;
- if (rdev->family >= CHIP_RV515)
- w |= track->textures[u].width_11;
- w = w / (1 << i);
- if (track->textures[u].roundup_w)
- w = roundup_pow_of_two(w);
- }
- h = track->textures[u].height;
- if (rdev->family >= CHIP_RV515)
- h |= track->textures[u].height_11;
- h = h / (1 << i);
- if (track->textures[u].roundup_h)
- h = roundup_pow_of_two(h);
- if (track->textures[u].tex_coord_type == 1) {
- d = (1 << track->textures[u].txdepth) / (1 << i);
- if (!d)
- d = 1;
- } else {
- d = 1;
- }
- if (track->textures[u].compress_format) {
-
- size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
- /* compressed textures are block based */
- } else
- size += w * h * d;
- }
- size *= track->textures[u].cpp;
-
- switch (track->textures[u].tex_coord_type) {
- case 0:
- case 1:
- break;
- case 2:
- if (track->separate_cube) {
- ret = r100_cs_track_cube(rdev, track, u);
- if (ret)
- return ret;
- } else
- size *= 6;
- break;
- default:
- DRM_ERROR("Invalid texture coordinate type %u for unit "
- "%u\n", track->textures[u].tex_coord_type, u);
- return -EINVAL;
- }
- if (size > radeon_bo_size(robj)) {
- DRM_ERROR("Texture of unit %u needs %lu bytes but is "
- "%lu\n", u, size, radeon_bo_size(robj));
- r100_cs_track_texture_print(&track->textures[u]);
- return -EINVAL;
- }
- }
- return 0;
-}
-
-int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
-{
- unsigned i;
- unsigned long size;
- unsigned prim_walk;
- unsigned nverts;
- unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
-
- if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
- !track->blend_read_enable)
- num_cb = 0;
-
- for (i = 0; i < num_cb; i++) {
- if (track->cb[i].robj == NULL) {
- DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
- return -EINVAL;
- }
- size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
- size += track->cb[i].offset;
- if (size > radeon_bo_size(track->cb[i].robj)) {
- DRM_ERROR("[drm] Buffer too small for color buffer %d "
- "(need %lu have %lu) !\n", i, size,
- radeon_bo_size(track->cb[i].robj));
- DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
- i, track->cb[i].pitch, track->cb[i].cpp,
- track->cb[i].offset, track->maxy);
- return -EINVAL;
- }
- }
- track->cb_dirty = false;
-
- if (track->zb_dirty && track->z_enabled) {
- if (track->zb.robj == NULL) {
- DRM_ERROR("[drm] No buffer for z buffer !\n");
- return -EINVAL;
- }
- size = track->zb.pitch * track->zb.cpp * track->maxy;
- size += track->zb.offset;
- if (size > radeon_bo_size(track->zb.robj)) {
- DRM_ERROR("[drm] Buffer too small for z buffer "
- "(need %lu have %lu) !\n", size,
- radeon_bo_size(track->zb.robj));
- DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
- track->zb.pitch, track->zb.cpp,
- track->zb.offset, track->maxy);
- return -EINVAL;
- }
- }
- track->zb_dirty = false;
-
- if (track->aa_dirty && track->aaresolve) {
- if (track->aa.robj == NULL) {
- DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
- return -EINVAL;
- }
- /* I believe the format comes from colorbuffer0. */
- size = track->aa.pitch * track->cb[0].cpp * track->maxy;
- size += track->aa.offset;
- if (size > radeon_bo_size(track->aa.robj)) {
- DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
- "(need %lu have %lu) !\n", i, size,
- radeon_bo_size(track->aa.robj));
- DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
- i, track->aa.pitch, track->cb[0].cpp,
- track->aa.offset, track->maxy);
- return -EINVAL;
- }
- }
- track->aa_dirty = false;
-
- prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
- if (track->vap_vf_cntl & (1 << 14)) {
- nverts = track->vap_alt_nverts;
- } else {
- nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
- }
- switch (prim_walk) {
- case 1:
- for (i = 0; i < track->num_arrays; i++) {
- size = track->arrays[i].esize * track->max_indx * 4;
- if (track->arrays[i].robj == NULL) {
- DRM_ERROR("(PW %u) Vertex array %u no buffer "
- "bound\n", prim_walk, i);
- return -EINVAL;
- }
- if (size > radeon_bo_size(track->arrays[i].robj)) {
- dev_err(rdev->dev, "(PW %u) Vertex array %u "
- "need %lu dwords have %lu dwords\n",
- prim_walk, i, size >> 2,
- radeon_bo_size(track->arrays[i].robj)
- >> 2);
- DRM_ERROR("Max indices %u\n", track->max_indx);
- return -EINVAL;
- }
- }
- break;
- case 2:
- for (i = 0; i < track->num_arrays; i++) {
- size = track->arrays[i].esize * (nverts - 1) * 4;
- if (track->arrays[i].robj == NULL) {
- DRM_ERROR("(PW %u) Vertex array %u no buffer "
- "bound\n", prim_walk, i);
- return -EINVAL;
- }
- if (size > radeon_bo_size(track->arrays[i].robj)) {
- dev_err(rdev->dev, "(PW %u) Vertex array %u "
- "need %lu dwords have %lu dwords\n",
- prim_walk, i, size >> 2,
- radeon_bo_size(track->arrays[i].robj)
- >> 2);
- return -EINVAL;
- }
- }
- break;
- case 3:
- size = track->vtx_size * nverts;
- if (size != track->immd_dwords) {
- DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
- track->immd_dwords, size);
- DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
- nverts, track->vtx_size);
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
- prim_walk);
- return -EINVAL;
- }
-
- if (track->tex_dirty) {
- track->tex_dirty = false;
- return r100_cs_track_texture_check(rdev, track);
- }
- return 0;
-}
-
-void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
-{
- unsigned i, face;
-
- track->cb_dirty = true;
- track->zb_dirty = true;
- track->tex_dirty = true;
- track->aa_dirty = true;
-
- if (rdev->family < CHIP_R300) {
- track->num_cb = 1;
- if (rdev->family <= CHIP_RS200)
- track->num_texture = 3;
- else
- track->num_texture = 6;
- track->maxy = 2048;
- track->separate_cube = 1;
- } else {
- track->num_cb = 4;
- track->num_texture = 16;
- track->maxy = 4096;
- track->separate_cube = 0;
- track->aaresolve = false;
- track->aa.robj = NULL;
- }
-
- for (i = 0; i < track->num_cb; i++) {
- track->cb[i].robj = NULL;
- track->cb[i].pitch = 8192;
- track->cb[i].cpp = 16;
- track->cb[i].offset = 0;
- }
- track->z_enabled = true;
- track->zb.robj = NULL;
- track->zb.pitch = 8192;
- track->zb.cpp = 4;
- track->zb.offset = 0;
- track->vtx_size = 0x7F;
- track->immd_dwords = 0xFFFFFFFFUL;
- track->num_arrays = 11;
- track->max_indx = 0x00FFFFFFUL;
- for (i = 0; i < track->num_arrays; i++) {
- track->arrays[i].robj = NULL;
- track->arrays[i].esize = 0x7F;
- }
- for (i = 0; i < track->num_texture; i++) {
- track->textures[i].compress_format = R100_TRACK_COMP_NONE;
- track->textures[i].pitch = 16536;
- track->textures[i].width = 16536;
- track->textures[i].height = 16536;
- track->textures[i].width_11 = 1 << 11;
- track->textures[i].height_11 = 1 << 11;
- track->textures[i].num_levels = 12;
- if (rdev->family <= CHIP_RS200) {
- track->textures[i].tex_coord_type = 0;
- track->textures[i].txdepth = 0;
- } else {
- track->textures[i].txdepth = 16;
- track->textures[i].tex_coord_type = 1;
- }
- track->textures[i].cpp = 64;
- track->textures[i].robj = NULL;
- /* CS IB emission code makes sure texture unit are disabled */
- track->textures[i].enabled = false;
- track->textures[i].lookup_disable = false;
- track->textures[i].roundup_w = true;
- track->textures[i].roundup_h = true;
- if (track->separate_cube)
- for (face = 0; face < 5; face++) {
- track->textures[i].cube_info[face].robj = NULL;
- track->textures[i].cube_info[face].width = 16536;
- track->textures[i].cube_info[face].height = 16536;
- track->textures[i].cube_info[face].offset = 0;
- }
- }
-}
-
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t scratch;
@@ -3679,6 +3791,12 @@ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ if (ring->rptr_save_reg) {
+ u32 next_rptr = ring->wptr + 2 + 3;
+ radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
+ radeon_ring_write(ring, next_rptr);
+ }
+
radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
radeon_ring_write(ring, ib->gpu_addr);
radeon_ring_write(ring, ib->length_dw);
@@ -3711,7 +3829,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[6] = PACKET2(0);
ib.ptr[7] = PACKET2(0);
ib.length_dw = 8;
- r = radeon_ib_schedule(rdev, &ib);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
@@ -3740,12 +3858,6 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
-void r100_ib_fini(struct radeon_device *rdev)
-{
- radeon_ib_pool_suspend(rdev);
- radeon_ib_pool_fini(rdev);
-}
-
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Shutdown CP we shouldn't need to do that but better be safe than
@@ -3905,13 +4017,11 @@ static int r100_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
return 0;
}
@@ -3948,7 +4058,6 @@ int r100_resume(struct radeon_device *rdev)
int r100_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -3961,7 +4070,7 @@ void r100_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
@@ -4068,20 +4177,14 @@ int r100_init(struct radeon_device *rdev)
}
r100_set_safe_registers(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r100_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index a26144d01207..f0889259eb08 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -85,7 +85,7 @@ int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence)
+ struct radeon_fence **fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size;
@@ -120,7 +120,7 @@ int r200_copy_dma(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
- r = radeon_fence_emit(rdev, fence);
+ r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
radeon_ring_unlock_commit(rdev, ring);
return r;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 97722a33e513..646a1927dda7 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1391,13 +1391,11 @@ static int r300_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
return 0;
}
@@ -1436,7 +1434,6 @@ int r300_resume(struct radeon_device *rdev)
int r300_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -1451,7 +1448,7 @@ void r300_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
@@ -1538,20 +1535,14 @@ int r300_init(struct radeon_device *rdev)
}
r300_set_reg_safe(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r300_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 99137be7a300..f2f5bf6d339f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -275,13 +275,11 @@ static int r420_startup(struct radeon_device *rdev)
}
r420_cp_errata_init(rdev);
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
return 0;
}
@@ -324,7 +322,6 @@ int r420_resume(struct radeon_device *rdev)
int r420_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -340,7 +337,7 @@ void r420_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
@@ -438,20 +435,14 @@ int r420_init(struct radeon_device *rdev)
}
r420_set_reg_safe(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r420_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index b5cf8375cd25..079d3c52c08a 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -203,13 +203,11 @@ static int r520_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
return 0;
}
@@ -311,20 +309,14 @@ int r520_init(struct radeon_device *rdev)
return r;
rv515_set_safe_registers(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r520_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index bff627293812..d79c639ae739 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -709,6 +709,7 @@ void r600_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned enable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -729,28 +730,22 @@ void r600_hpd_init(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
- rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp);
- rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp);
- rdev->irq.hpd[2] = true;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp);
- rdev->irq.hpd[3] = true;
break;
/* DCE 3.2 */
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp);
- rdev->irq.hpd[4] = true;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp);
- rdev->irq.hpd[5] = true;
break;
default:
break;
@@ -759,85 +754,73 @@ void r600_hpd_init(struct radeon_device *rdev)
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
- rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
- rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
- rdev->irq.hpd[2] = true;
break;
default:
break;
}
}
+ enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
- if (rdev->irq.installed)
- r600_irq_set(rdev);
+ radeon_irq_kms_enable_hpd(rdev, enable);
}
void r600_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned disable = 0;
- if (ASIC_IS_DCE3(rdev)) {
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ if (ASIC_IS_DCE3(rdev)) {
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0);
- rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0);
- rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0);
- rdev->irq.hpd[2] = false;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0);
- rdev->irq.hpd[3] = false;
break;
/* DCE 3.2 */
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0);
- rdev->irq.hpd[4] = false;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0);
- rdev->irq.hpd[5] = false;
break;
default:
break;
}
- }
- } else {
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ } else {
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
- rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
- rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
- rdev->irq.hpd[2] = false;
break;
default:
break;
}
}
+ disable |= 1 << radeon_connector->hpd.hpd;
}
+ radeon_irq_kms_disable_hpd(rdev, disable);
}
/*
@@ -1306,6 +1289,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
@@ -1349,6 +1340,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
rv515_mc_resume(rdev, &save);
return 0;
}
@@ -2172,18 +2171,29 @@ int r600_cp_resume(struct radeon_device *rdev)
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
{
u32 rb_bufsz;
+ int r;
/* Align ring size */
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
ring->ring_size = ring_size;
ring->align_mask = 16 - 1;
+
+ if (radeon_ring_supports_scratch_reg(rdev, ring)) {
+ r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
+ if (r) {
+ DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
+ ring->rptr_save_reg = 0;
+ }
+ }
}
void r600_cp_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r600_cp_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
}
@@ -2206,7 +2216,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i, ridx = radeon_ring_index(rdev, ring);
+ unsigned i;
int r;
r = radeon_scratch_get(rdev, &scratch);
@@ -2217,7 +2227,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
WREG32(scratch, 0xCAFEDEAD);
r = radeon_ring_lock(rdev, ring, 3);
if (r) {
- DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
radeon_scratch_free(rdev, scratch);
return r;
}
@@ -2232,10 +2242,10 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
} else {
DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ridx, scratch, tmp);
+ ring->idx, scratch, tmp);
r = -EINVAL;
}
radeon_scratch_free(rdev, scratch);
@@ -2309,34 +2319,21 @@ int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence)
+ struct radeon_fence **fence)
{
+ struct radeon_semaphore *sem = NULL;
struct radeon_sa_bo *vb = NULL;
int r;
- r = r600_blit_prepare_copy(rdev, num_gpu_pages, &vb);
+ r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
if (r) {
return r;
}
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
- r600_blit_done_copy(rdev, fence, vb);
+ r600_blit_done_copy(rdev, fence, vb, sem);
return 0;
}
-void r600_blit_suspend(struct radeon_device *rdev)
-{
- int r;
-
- /* unpin shaders bo */
- if (rdev->r600_blit.shader_obj) {
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (!r) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- }
- }
-}
-
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -2419,13 +2416,11 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
r = r600_audio_init(rdev);
if (r) {
@@ -2475,9 +2470,6 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- radeon_ib_pool_suspend(rdev);
- r600_blit_suspend(rdev);
- /* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev);
@@ -2559,20 +2551,14 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = r600_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
@@ -2588,7 +2574,7 @@ void r600_fini(struct radeon_device *rdev)
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -2607,9 +2593,24 @@ void r600_fini(struct radeon_device *rdev)
*/
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 next_rptr;
+
+ if (ring->rptr_save_reg) {
+ next_rptr = ring->wptr + 3 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((ring->rptr_save_reg -
+ PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(ring, next_rptr);
+ } else if (rdev->wb.enabled) {
+ next_rptr = ring->wptr + 5 + 4;
+ radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
+ radeon_ring_write(ring, next_rptr);
+ radeon_ring_write(ring, 0);
+ }
- /* FIXME: implement */
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
@@ -2627,7 +2628,6 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
uint32_t tmp = 0;
unsigned i;
int r;
- int ring_index = radeon_ring_index(rdev, ring);
r = radeon_scratch_get(rdev, &scratch);
if (r) {
@@ -2635,7 +2635,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, ring_index, &ib, 256);
+ r = radeon_ib_get(rdev, ring->idx, &ib, 256);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
@@ -2644,7 +2644,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = radeon_ib_schedule(rdev, &ib);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
@@ -2857,7 +2857,6 @@ void r600_disable_interrupts(struct radeon_device *rdev)
WREG32(IH_RB_RPTR, 0);
WREG32(IH_RB_WPTR, 0);
rdev->ih.enabled = false;
- rdev->ih.wptr = 0;
rdev->ih.rptr = 0;
}
@@ -3042,18 +3041,18 @@ int r600_irq_set(struct radeon_device *rdev)
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.crtc_vblank_int[0] ||
- rdev->irq.pflip[0]) {
+ atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
mode_int |= D1MODE_VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
- rdev->irq.pflip[1]) {
+ atomic_read(&rdev->irq.pflip[1])) {
DRM_DEBUG("r600_irq_set: vblank 1\n");
mode_int |= D2MODE_VBLANK_INT_MASK;
}
@@ -3309,7 +3308,6 @@ int r600_irq_process(struct radeon_device *rdev)
u32 rptr;
u32 src_id, src_data;
u32 ring_index;
- unsigned long flags;
bool queue_hotplug = false;
bool queue_hdmi = false;
@@ -3321,24 +3319,21 @@ int r600_irq_process(struct radeon_device *rdev)
RREG32(IH_RB_WPTR);
wptr = r600_get_ih_wptr(rdev);
- rptr = rdev->ih.rptr;
- DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
- spin_lock_irqsave(&rdev->ih.lock, flags);
-
- if (rptr == wptr) {
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
+restart_ih:
+ /* is somebody else already processing irqs? */
+ if (atomic_xchg(&rdev->ih.lock, 1))
return IRQ_NONE;
- }
-restart_ih:
+ rptr = rdev->ih.rptr;
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+
/* Order reading of wptr vs. reading of IH ring data */
rmb();
/* display interrupts */
r600_irq_ack(rdev);
- rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
@@ -3355,7 +3350,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[0])
+ if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
@@ -3381,7 +3376,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[1])
+ if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
@@ -3480,7 +3475,6 @@ restart_ih:
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
- rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
break;
default:
@@ -3492,17 +3486,19 @@ restart_ih:
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
- /* make sure wptr hasn't changed while processing */
- wptr = r600_get_ih_wptr(rdev);
- if (wptr != rdev->ih.wptr)
- goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
if (queue_hdmi)
schedule_work(&rdev->audio_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ atomic_set(&rdev->ih.lock, 0);
+
+ /* make sure wptr hasn't changed while processing */
+ wptr = r600_get_ih_wptr(rdev);
+ if (wptr != rptr)
+ goto restart_ih;
+
return IRQ_HANDLED;
}
@@ -3685,6 +3681,8 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
u16 link_cntl2;
+ u32 mask;
+ int ret;
if (radeon_pcie_gen2 == 0)
return;
@@ -3703,6 +3701,15 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if (rdev->family <= CHIP_R600)
return;
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret != 0)
+ return;
+
+ if (!(mask & DRM_PCIE_SPEED_50))
+ return;
+
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
/* 55 nm r6xx asics */
if ((rdev->family == CHIP_RV670) ||
(rdev->family == CHIP_RV620) ||
@@ -3782,3 +3789,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
+
+/**
+ * r600_get_gpu_clock - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (R6xx-cayman).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
+{
+ uint64_t clock;
+
+ mutex_lock(&rdev->gpu_clock_mutex);
+ WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ mutex_unlock(&rdev->gpu_clock_mutex);
+ return clock;
+}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 03b6e0d3d503..2bef8549ddfe 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -512,7 +512,8 @@ int r600_blit_init(struct radeon_device *rdev)
rdev->r600_blit.primitives.draw_auto = draw_auto;
rdev->r600_blit.primitives.set_default_state = set_default_state;
- rdev->r600_blit.ring_size_common = 40; /* shaders + def state */
+ rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
+ rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
@@ -523,10 +524,6 @@ int r600_blit_init(struct radeon_device *rdev)
rdev->r600_blit.max_dim = 8192;
- /* pin copy shader into vram if already initialized */
- if (rdev->r600_blit.shader_obj)
- goto done;
-
rdev->r600_blit.state_offset = 0;
if (rdev->family >= CHIP_RV770)
@@ -551,11 +548,26 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("r600 failed to allocate shader\n");
- return r;
+ /* pin copy shader into vram if not already initialized */
+ if (rdev->r600_blit.shader_obj == NULL) {
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM,
+ NULL, &rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("r600 failed to allocate shader\n");
+ return r;
+ }
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
+ return r;
+ }
}
DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
@@ -586,17 +598,6 @@ int r600_blit_init(struct radeon_device *rdev)
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-done:
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -666,7 +667,8 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
- struct radeon_sa_bo **vb)
+ struct radeon_fence **fence, struct radeon_sa_bo **vb,
+ struct radeon_semaphore **sem)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
@@ -689,34 +691,50 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
return r;
}
+ r = radeon_semaphore_create(rdev, sem);
+ if (r) {
+ radeon_sa_bo_free(rdev, vb, NULL);
+ return r;
+ }
+
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common;
r = radeon_ring_lock(rdev, ring, ring_size);
if (r) {
radeon_sa_bo_free(rdev, vb, NULL);
+ radeon_semaphore_free(rdev, sem, NULL);
return r;
}
+ if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
+ radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
+ RADEON_RING_TYPE_GFX_INDEX);
+ radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
+ } else {
+ radeon_semaphore_free(rdev, sem, NULL);
+ }
+
rdev->r600_blit.primitives.set_default_state(rdev);
rdev->r600_blit.primitives.set_shaders(rdev);
return 0;
}
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
- struct radeon_sa_bo *vb)
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
+ struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- r = radeon_fence_emit(rdev, fence);
+ r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return;
}
radeon_ring_unlock_commit(rdev, ring);
- radeon_sa_bo_free(rdev, &vb, fence);
+ radeon_sa_bo_free(rdev, &vb, *fence);
+ radeon_semaphore_free(rdev, &sem, *fence);
}
void r600_kms_blit_copy(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index ca87f7afaf23..f37676d7f217 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -47,18 +47,23 @@ struct r600_cs_track {
u32 npipes;
/* value we track */
u32 sq_config;
+ u32 log_nsamples;
u32 nsamples;
u32 cb_color_base_last[8];
struct radeon_bo *cb_color_bo[8];
u64 cb_color_bo_mc[8];
- u32 cb_color_bo_offset[8];
- struct radeon_bo *cb_color_frag_bo[8]; /* unused */
- struct radeon_bo *cb_color_tile_bo[8]; /* unused */
+ u64 cb_color_bo_offset[8];
+ struct radeon_bo *cb_color_frag_bo[8];
+ u64 cb_color_frag_offset[8];
+ struct radeon_bo *cb_color_tile_bo[8];
+ u64 cb_color_tile_offset[8];
+ u32 cb_color_mask[8];
u32 cb_color_info[8];
u32 cb_color_view[8];
u32 cb_color_size_idx[8]; /* unused */
u32 cb_target_mask;
u32 cb_shader_mask; /* unused */
+ bool is_resolve;
u32 cb_color_size[8];
u32 vgt_strmout_en;
u32 vgt_strmout_buffer_en;
@@ -311,7 +316,15 @@ static void r600_cs_track_init(struct r600_cs_track *track)
track->cb_color_bo[i] = NULL;
track->cb_color_bo_offset[i] = 0xFFFFFFFF;
track->cb_color_bo_mc[i] = 0xFFFFFFFF;
- }
+ track->cb_color_frag_bo[i] = NULL;
+ track->cb_color_frag_offset[i] = 0xFFFFFFFF;
+ track->cb_color_tile_bo[i] = NULL;
+ track->cb_color_tile_offset[i] = 0xFFFFFFFF;
+ track->cb_color_mask[i] = 0xFFFFFFFF;
+ }
+ track->is_resolve = false;
+ track->nsamples = 16;
+ track->log_nsamples = 4;
track->cb_target_mask = 0xFFFFFFFF;
track->cb_shader_mask = 0xFFFFFFFF;
track->cb_dirty = true;
@@ -348,11 +361,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
volatile u32 *ib = p->ib.ptr;
unsigned array_mode;
u32 format;
+ /* When resolve is used, the second colorbuffer has always 1 sample. */
+ unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
- if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
- dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
- return -EINVAL;
- }
size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
format = G_0280A0_FORMAT(track->cb_color_info[i]);
if (!r600_fmt_is_valid_color(format)) {
@@ -375,7 +386,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
array_check.group_size = track->group_size;
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
- array_check.nsamples = track->nsamples;
+ array_check.nsamples = nsamples;
array_check.blocksize = r600_fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
@@ -420,7 +431,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
}
/* check offset */
- tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format);
+ tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
+ r600_fmt_get_blocksize(format) * nsamples;
switch (array_mode) {
default:
case V_0280A0_ARRAY_LINEAR_GENERAL:
@@ -441,7 +453,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
* broken userspace.
*/
} else {
- dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n",
+ dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
__func__, i, array_mode,
track->cb_color_bo_offset[i], tmp,
radeon_bo_size(track->cb_color_bo[i]),
@@ -458,6 +470,51 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
ib[track->cb_color_size_idx[i]] = tmp;
+
+ /* FMASK/CMASK */
+ switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
+ case V_0280A0_TILE_DISABLE:
+ break;
+ case V_0280A0_FRAG_ENABLE:
+ if (track->nsamples > 1) {
+ uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
+ /* the tile size is 8x8, but the size is in units of bits.
+ * for bytes, do just * 8. */
+ uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
+
+ if (bytes + track->cb_color_frag_offset[i] >
+ radeon_bo_size(track->cb_color_frag_bo[i])) {
+ dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
+ "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+ __func__, tile_max, bytes,
+ track->cb_color_frag_offset[i],
+ radeon_bo_size(track->cb_color_frag_bo[i]));
+ return -EINVAL;
+ }
+ }
+ /* fall through */
+ case V_0280A0_CLEAR_ENABLE:
+ {
+ uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
+ /* One block = 128x128 pixels, one 8x8 tile has 4 bits..
+ * (128*128) / (8*8) / 2 = 128 bytes per block. */
+ uint32_t bytes = (block_max + 1) * 128;
+
+ if (bytes + track->cb_color_tile_offset[i] >
+ radeon_bo_size(track->cb_color_tile_bo[i])) {
+ dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
+ "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+ __func__, block_max, bytes,
+ track->cb_color_tile_offset[i],
+ radeon_bo_size(track->cb_color_tile_bo[i]));
+ return -EINVAL;
+ }
+ break;
+ }
+ default:
+ dev_warn(p->dev, "%s invalid tile mode\n", __func__);
+ return -EINVAL;
+ }
return 0;
}
@@ -566,7 +623,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
- tmp = ntiles * bpe * 64 * nviews;
+ tmp = ntiles * bpe * 64 * nviews * track->nsamples;
if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
array_mode,
@@ -746,6 +803,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
*/
if (track->cb_dirty) {
tmp = track->cb_target_mask;
+
+ /* We must check both colorbuffers for RESOLVE. */
+ if (track->is_resolve) {
+ tmp |= 0xff;
+ }
+
for (i = 0; i < 8; i++) {
if ((tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
@@ -764,8 +827,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
/* Check depth buffer */
- if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
- G_028800_Z_ENABLE(track->db_depth_control))) {
+ if (track->db_dirty &&
+ G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
+ (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+ G_028800_Z_ENABLE(track->db_depth_control))) {
r = r600_cs_track_validate_db(p);
if (r)
return r;
@@ -1229,9 +1294,15 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case R_028C04_PA_SC_AA_CONFIG:
tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
+ track->log_nsamples = tmp;
track->nsamples = 1 << tmp;
track->cb_dirty = true;
break;
+ case R_028808_CB_COLOR_CONTROL:
+ tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
+ track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
+ track->cb_dirty = true;
+ break;
case R_0280A0_CB_COLOR0_INFO:
case R_0280A4_CB_COLOR1_INFO:
case R_0280A8_CB_COLOR2_INFO:
@@ -1310,16 +1381,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
}
- ib[idx] = track->cb_color_base_last[tmp];
track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
+ track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
+ ib[idx] = track->cb_color_base_last[tmp];
} else {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_frag_bo[tmp] = reloc->robj;
+ track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+ track->cb_dirty = true;
}
break;
case R_0280C0_CB_COLOR0_TILE:
@@ -1336,16 +1412,35 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
}
- ib[idx] = track->cb_color_base_last[tmp];
track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
+ track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
+ ib[idx] = track->cb_color_base_last[tmp];
} else {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_tile_bo[tmp] = reloc->robj;
+ track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+ track->cb_dirty = true;
+ }
+ break;
+ case R_028100_CB_COLOR0_MASK:
+ case R_028104_CB_COLOR1_MASK:
+ case R_028108_CB_COLOR2_MASK:
+ case R_02810C_CB_COLOR3_MASK:
+ case R_028110_CB_COLOR4_MASK:
+ case R_028114_CB_COLOR5_MASK:
+ case R_028118_CB_COLOR6_MASK:
+ case R_02811C_CB_COLOR7_MASK:
+ tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
+ track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
+ if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
+ track->cb_dirty = true;
}
break;
case CB_COLOR0_BASE:
@@ -1490,7 +1585,7 @@ unsigned r600_mip_minify(unsigned size, unsigned level)
}
static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
- unsigned w0, unsigned h0, unsigned d0, unsigned format,
+ unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
unsigned block_align, unsigned height_align, unsigned base_align,
unsigned *l0_size, unsigned *mipmap_size)
{
@@ -1518,7 +1613,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
depth = r600_mip_minify(d0, i);
- size = nbx * nby * blocksize;
+ size = nbx * nby * blocksize * nsamples;
if (nfaces)
size *= nfaces;
else
@@ -1557,13 +1652,14 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
u32 tiling_flags)
{
struct r600_cs_track *track = p->track;
- u32 nfaces, llevel, blevel, w0, h0, d0;
- u32 word0, word1, l0_size, mipmap_size, word2, word3;
+ u32 dim, nfaces, llevel, blevel, w0, h0, d0;
+ u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
u32 height_align, pitch, pitch_align, depth_align;
- u32 array, barray, larray;
+ u32 barray, larray;
u64 base_align;
struct array_mode_checker array_check;
u32 format;
+ bool is_array;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
@@ -1581,12 +1677,28 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
}
word1 = radeon_get_ib_value(p, idx + 1);
+ word2 = radeon_get_ib_value(p, idx + 2) << 8;
+ word3 = radeon_get_ib_value(p, idx + 3) << 8;
+ word4 = radeon_get_ib_value(p, idx + 4);
+ word5 = radeon_get_ib_value(p, idx + 5);
+ dim = G_038000_DIM(word0);
w0 = G_038000_TEX_WIDTH(word0) + 1;
+ pitch = (G_038000_PITCH(word0) + 1) * 8;
h0 = G_038004_TEX_HEIGHT(word1) + 1;
d0 = G_038004_TEX_DEPTH(word1);
+ format = G_038004_DATA_FORMAT(word1);
+ blevel = G_038010_BASE_LEVEL(word4);
+ llevel = G_038014_LAST_LEVEL(word5);
+ /* pitch in texels */
+ array_check.array_mode = G_038000_TILE_MODE(word0);
+ array_check.group_size = track->group_size;
+ array_check.nbanks = track->nbanks;
+ array_check.npipes = track->npipes;
+ array_check.nsamples = 1;
+ array_check.blocksize = r600_fmt_get_blocksize(format);
nfaces = 1;
- array = 0;
- switch (G_038000_DIM(word0)) {
+ is_array = false;
+ switch (dim) {
case V_038000_SQ_TEX_DIM_1D:
case V_038000_SQ_TEX_DIM_2D:
case V_038000_SQ_TEX_DIM_3D:
@@ -1599,29 +1711,25 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
break;
case V_038000_SQ_TEX_DIM_1D_ARRAY:
case V_038000_SQ_TEX_DIM_2D_ARRAY:
- array = 1;
+ is_array = true;
break;
- case V_038000_SQ_TEX_DIM_2D_MSAA:
case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+ is_array = true;
+ /* fall through */
+ case V_038000_SQ_TEX_DIM_2D_MSAA:
+ array_check.nsamples = 1 << llevel;
+ llevel = 0;
+ break;
default:
dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
return -EINVAL;
}
- format = G_038004_DATA_FORMAT(word1);
if (!r600_fmt_is_valid_texture(format, p->family)) {
dev_warn(p->dev, "%s:%d texture invalid format %d\n",
__func__, __LINE__, format);
return -EINVAL;
}
- /* pitch in texels */
- pitch = (G_038000_PITCH(word0) + 1) * 8;
- array_check.array_mode = G_038000_TILE_MODE(word0);
- array_check.group_size = track->group_size;
- array_check.nbanks = track->nbanks;
- array_check.npipes = track->npipes;
- array_check.nsamples = 1;
- array_check.blocksize = r600_fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
@@ -1647,24 +1755,17 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
return -EINVAL;
}
- word2 = radeon_get_ib_value(p, idx + 2) << 8;
- word3 = radeon_get_ib_value(p, idx + 3) << 8;
-
- word0 = radeon_get_ib_value(p, idx + 4);
- word1 = radeon_get_ib_value(p, idx + 5);
- blevel = G_038010_BASE_LEVEL(word0);
- llevel = G_038014_LAST_LEVEL(word1);
if (blevel > llevel) {
dev_warn(p->dev, "texture blevel %d > llevel %d\n",
blevel, llevel);
}
- if (array == 1) {
- barray = G_038014_BASE_ARRAY(word1);
- larray = G_038014_LAST_ARRAY(word1);
+ if (is_array) {
+ barray = G_038014_BASE_ARRAY(word5);
+ larray = G_038014_LAST_ARRAY(word5);
nfaces = larray - barray + 1;
}
- r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
+ r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
pitch_align, height_align, base_align,
&l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */
@@ -1677,7 +1778,6 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
- word3 = radeon_get_ib_value(p, idx + 3) << 8;
if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 82a0a4c919c0..e3558c3ef24a 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -519,8 +519,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
if (rdev->irq.installed) {
/* if irq is available use it */
- rdev->irq.afmt[dig->afmt->id] = true;
- radeon_irq_set(rdev);
+ radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
}
dig->afmt->enabled = true;
@@ -556,8 +555,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
offset, radeon_encoder->encoder_id);
/* disable irq */
- rdev->irq.afmt[dig->afmt->id] = false;
- radeon_irq_set(rdev);
+ radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
/* Older chipsets not handled by AtomBIOS */
if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 025fd5b6c08c..fa6f37099ba9 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -66,6 +66,14 @@
#define CC_RB_BACKEND_DISABLE 0x98F4
#define BACKEND_DISABLE(x) ((x) << 16)
+#define R_028808_CB_COLOR_CONTROL 0x28808
+#define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4)
+#define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7)
+#define C_028808_SPECIAL_OP 0xFFFFFF8F
+#define V_028808_SPECIAL_NORMAL 0x00
+#define V_028808_SPECIAL_DISABLE 0x01
+#define V_028808_SPECIAL_RESOLVE_BOX 0x07
+
#define CB_COLOR0_BASE 0x28040
#define CB_COLOR1_BASE 0x28044
#define CB_COLOR2_BASE 0x28048
@@ -92,6 +100,20 @@
#define R_028094_CB_COLOR5_VIEW 0x028094
#define R_028098_CB_COLOR6_VIEW 0x028098
#define R_02809C_CB_COLOR7_VIEW 0x02809C
+#define R_028100_CB_COLOR0_MASK 0x028100
+#define S_028100_CMASK_BLOCK_MAX(x) (((x) & 0xFFF) << 0)
+#define G_028100_CMASK_BLOCK_MAX(x) (((x) >> 0) & 0xFFF)
+#define C_028100_CMASK_BLOCK_MAX 0xFFFFF000
+#define S_028100_FMASK_TILE_MAX(x) (((x) & 0xFFFFF) << 12)
+#define G_028100_FMASK_TILE_MAX(x) (((x) >> 12) & 0xFFFFF)
+#define C_028100_FMASK_TILE_MAX 0x00000FFF
+#define R_028104_CB_COLOR1_MASK 0x028104
+#define R_028108_CB_COLOR2_MASK 0x028108
+#define R_02810C_CB_COLOR3_MASK 0x02810C
+#define R_028110_CB_COLOR4_MASK 0x028110
+#define R_028114_CB_COLOR5_MASK 0x028114
+#define R_028118_CB_COLOR6_MASK 0x028118
+#define R_02811C_CB_COLOR7_MASK 0x02811C
#define CB_COLOR0_INFO 0x280a0
# define CB_FORMAT(x) ((x) << 2)
# define CB_ARRAY_MODE(x) ((x) << 8)
@@ -153,6 +175,9 @@
#define CONFIG_MEMSIZE 0x5428
#define CONFIG_CNTL 0x5424
+#define CP_STALLED_STAT1 0x8674
+#define CP_STALLED_STAT2 0x8678
+#define CP_BUSY_STAT 0x867C
#define CP_STAT 0x8680
#define CP_COHER_BASE 0x85F8
#define CP_DEBUG 0xC1FC
@@ -599,6 +624,9 @@
#define RLC_HB_WPTR 0x3f1c
#define RLC_HB_WPTR_LSB_ADDR 0x3f14
#define RLC_HB_WPTR_MSB_ADDR 0x3f18
+#define RLC_GPU_CLOCK_COUNT_LSB 0x3f38
+#define RLC_GPU_CLOCK_COUNT_MSB 0x3f3c
+#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x3f40
#define RLC_MC_CNTL 0x3f44
#define RLC_UCODE_CNTL 0x3f48
#define RLC_UCODE_ADDR 0x3f2c
@@ -1394,6 +1422,9 @@
#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
#define C_0280A0_TILE_MODE 0xFFF3FFFF
+#define V_0280A0_TILE_DISABLE 0
+#define V_0280A0_CLEAR_ENABLE 1
+#define V_0280A0_FRAG_ENABLE 2
#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index fefcca55c1eb..59a15315ae9f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -113,7 +113,6 @@ extern int radeon_lockup_timeout;
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
-#define RADEON_FENCE_NOTEMITED_SEQ (~0LL)
/* internal ring indices */
/* r1xx+ has gfx CP ring */
@@ -143,65 +142,8 @@ struct radeon_device;
/*
* BIOS.
*/
-#define ATRM_BIOS_PAGE 4096
-
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool radeon_atrm_supported(struct pci_dev *pdev);
-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
-#else
-static inline bool radeon_atrm_supported(struct pci_dev *pdev)
-{
- return false;
-}
-
-static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
- return -EINVAL;
-}
-#endif
bool radeon_get_bios(struct radeon_device *rdev);
-
-/*
- * Mutex which allows recursive locking from the same process.
- */
-struct radeon_mutex {
- struct mutex mutex;
- struct task_struct *owner;
- int level;
-};
-
-static inline void radeon_mutex_init(struct radeon_mutex *mutex)
-{
- mutex_init(&mutex->mutex);
- mutex->owner = NULL;
- mutex->level = 0;
-}
-
-static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
-{
- if (mutex_trylock(&mutex->mutex)) {
- /* The mutex was unlocked before, so it's ours now */
- mutex->owner = current;
- } else if (mutex->owner != current) {
- /* Another process locked the mutex, take it */
- mutex_lock(&mutex->mutex);
- mutex->owner = current;
- }
- /* Otherwise the mutex was already locked by this process */
-
- mutex->level++;
-}
-
-static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
-{
- if (--mutex->level > 0)
- return;
-
- mutex->owner = NULL;
- mutex_unlock(&mutex->mutex);
-}
-
-
/*
* Dummy page
*/
@@ -258,8 +200,8 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
- /* seq is protected by ring emission lock */
- uint64_t seq;
+ /* sync_seq is protected by ring emission lock */
+ uint64_t sync_seq[RADEON_NUM_RINGS];
atomic64_t last_seq;
unsigned long last_activity;
bool initialized;
@@ -277,19 +219,39 @@ struct radeon_fence {
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
-int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
+int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
+bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
+void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
+static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
+ struct radeon_fence *b)
+{
+ if (!a) {
+ return b;
+ }
+
+ if (!b) {
+ return a;
+ }
+
+ BUG_ON(a->ring != b->ring);
+
+ if (a->seq > b->seq) {
+ return a;
+ } else {
+ return b;
+ }
+}
/*
* Tiling registers
@@ -323,6 +285,7 @@ struct radeon_bo_va {
uint64_t soffset;
uint64_t eoffset;
uint32_t flags;
+ struct radeon_fence *fence;
bool valid;
};
@@ -385,7 +348,7 @@ struct radeon_bo_list {
* alignment).
*/
struct radeon_sa_manager {
- spinlock_t lock;
+ wait_queue_head_t wq;
struct radeon_bo *bo;
struct list_head *hole;
struct list_head flist[RADEON_NUM_RINGS];
@@ -451,10 +414,9 @@ void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
- bool sync_to[RADEON_NUM_RINGS],
- int dst_ring);
+ int signaler, int waiter);
void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore,
+ struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
/*
@@ -597,21 +559,18 @@ union radeon_irq_stat_regs {
#define RADEON_MAX_AFMT_BLOCKS 6
struct radeon_irq {
- bool installed;
- bool sw_int[RADEON_NUM_RINGS];
- bool crtc_vblank_int[RADEON_MAX_CRTCS];
- bool pflip[RADEON_MAX_CRTCS];
- wait_queue_head_t vblank_queue;
- bool hpd[RADEON_MAX_HPD_PINS];
- bool gui_idle;
- bool gui_idle_acked;
- wait_queue_head_t idle_queue;
- bool afmt[RADEON_MAX_AFMT_BLOCKS];
- spinlock_t sw_lock;
- int sw_refcount[RADEON_NUM_RINGS];
- union radeon_irq_stat_regs stat_regs;
- spinlock_t pflip_lock[RADEON_MAX_CRTCS];
- int pflip_refcount[RADEON_MAX_CRTCS];
+ bool installed;
+ spinlock_t lock;
+ atomic_t ring_int[RADEON_NUM_RINGS];
+ bool crtc_vblank_int[RADEON_MAX_CRTCS];
+ atomic_t pflip[RADEON_MAX_CRTCS];
+ wait_queue_head_t vblank_queue;
+ bool hpd[RADEON_MAX_HPD_PINS];
+ bool gui_idle;
+ bool gui_idle_acked;
+ wait_queue_head_t idle_queue;
+ bool afmt[RADEON_MAX_AFMT_BLOCKS];
+ union radeon_irq_stat_regs stat_regs;
};
int radeon_irq_kms_init(struct radeon_device *rdev);
@@ -620,6 +579,11 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev);
/*
* CP & rings.
@@ -630,9 +594,11 @@ struct radeon_ib {
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
+ int ring;
struct radeon_fence *fence;
unsigned vm_id;
bool is_const_ib;
+ struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore;
};
@@ -642,6 +608,9 @@ struct radeon_ring {
unsigned rptr;
unsigned rptr_offs;
unsigned rptr_reg;
+ unsigned rptr_save_reg;
+ u64 next_rptr_gpu_addr;
+ volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
unsigned wptr_reg;
@@ -657,6 +626,7 @@ struct radeon_ring {
u32 ptr_reg_shift;
u32 ptr_reg_mask;
u32 nop;
+ u32 idx;
};
/*
@@ -690,6 +660,7 @@ struct radeon_vm_funcs {
};
struct radeon_vm_manager {
+ struct mutex lock;
struct list_head lru_vm;
uint32_t use_bitmap;
struct radeon_sa_manager sa_manager;
@@ -718,13 +689,10 @@ struct r600_ih {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
- unsigned rptr_offs;
- unsigned wptr;
- unsigned wptr_old;
unsigned ring_size;
uint64_t gpu_addr;
uint32_t ptr_mask;
- spinlock_t lock;
+ atomic_t lock;
bool enabled;
};
@@ -757,8 +725,6 @@ struct r600_blit {
u32 state_len;
};
-void r600_blit_suspend(struct radeon_device *rdev);
-
/*
* SI RLC stuff
*/
@@ -774,14 +740,14 @@ struct si_rlc {
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
-int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+ struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
-int radeon_ib_pool_start(struct radeon_device *rdev);
-int radeon_ib_pool_suspend(struct radeon_device *rdev);
int radeon_ib_ring_tests(struct radeon_device *rdev);
/* Ring access between begin & end cannot sleep */
-int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+ struct radeon_ring *ring);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
@@ -793,6 +759,10 @@ int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
void radeon_ring_lockup_update(struct radeon_ring *ring);
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
+unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
+ uint32_t **data);
+int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned size, uint32_t *data);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
@@ -891,6 +861,7 @@ struct radeon_wb {
};
#define RADEON_WB_SCRATCH_OFFSET 0
+#define RADEON_WB_RING0_NEXT_RPTR 256
#define RADEON_WB_CP_RPTR_OFFSET 1024
#define RADEON_WB_CP1_RPTR_OFFSET 1280
#define RADEON_WB_CP2_RPTR_OFFSET 1536
@@ -1039,11 +1010,12 @@ struct radeon_power_state {
struct radeon_pm {
struct mutex mutex;
+ /* write locked while reprogramming mclk */
+ struct rw_semaphore mclk_lock;
u32 active_crtcs;
int active_crtc_count;
int req_vblank;
bool vblank_sync;
- bool gui_idle;
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
@@ -1192,20 +1164,20 @@ struct radeon_asic {
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence);
+ struct radeon_fence **fence);
u32 blit_ring_index;
int (*dma)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence);
+ struct radeon_fence **fence);
u32 dma_ring_index;
/* method used for bo copy */
int (*copy)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence);
+ struct radeon_fence **fence);
/* ring used for bo copies */
u32 copy_ring_index;
} copy;
@@ -1467,6 +1439,7 @@ struct radeon_device {
struct device *dev;
struct drm_device *ddev;
struct pci_dev *pdev;
+ struct rw_semaphore exclusive_lock;
/* ASIC */
union radeon_asic_config config;
enum radeon_family family;
@@ -1512,7 +1485,6 @@ struct radeon_device {
struct radeon_gem gem;
struct radeon_pm pm;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
- struct radeon_mutex cs_mutex;
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
bool shutdown;
@@ -1534,7 +1506,6 @@ struct radeon_device {
struct work_struct audio_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
- struct mutex vram_mutex;
bool audio_enabled;
struct r600_audio audio_status; /* audio stuff */
struct notifier_block acpi_nb;
@@ -1548,6 +1519,7 @@ struct radeon_device {
unsigned debugfs_count;
/* virtual memory */
struct radeon_vm_manager vm_manager;
+ struct mutex gpu_clock_mutex;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1748,11 +1720,11 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
-#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
-#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
-#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
-#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
-#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
+#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
+#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
+#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
/* Common functions */
/* AGP */
@@ -1785,8 +1757,6 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
*/
int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
-int radeon_vm_manager_start(struct radeon_device *rdev);
-int radeon_vm_manager_suspend(struct radeon_device *rdev);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f533df5f7d50..973417c4b014 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -40,6 +40,16 @@
/*
* Registers accessors functions.
*/
+/**
+ * radeon_invalid_rreg - dummy reg read function
+ *
+ * @rdev: radeon device pointer
+ * @reg: offset of register
+ *
+ * Dummy register read function. Used for register blocks
+ * that certain asics don't have (all asics).
+ * Returns the value in the register.
+ */
static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
{
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
@@ -47,6 +57,16 @@ static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
return 0;
}
+/**
+ * radeon_invalid_wreg - dummy reg write function
+ *
+ * @rdev: radeon device pointer
+ * @reg: offset of register
+ * @v: value to write to the register
+ *
+ * Dummy register read function. Used for register blocks
+ * that certain asics don't have (all asics).
+ */
static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
@@ -54,6 +74,14 @@ static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32
BUG_ON(1);
}
+/**
+ * radeon_register_accessor_init - sets up the register accessor callbacks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Sets up the register accessor callbacks for various register
+ * apertures. Not all asics have all apertures (all asics).
+ */
static void radeon_register_accessor_init(struct radeon_device *rdev)
{
rdev->mc_rreg = &radeon_invalid_rreg;
@@ -102,6 +130,14 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
/* helper to disable agp */
+/**
+ * radeon_agp_disable - AGP disable helper function
+ *
+ * @rdev: radeon device pointer
+ *
+ * Removes AGP flags and changes the gart callbacks on AGP
+ * cards when using the internal gart rather than AGP (all asics).
+ */
void radeon_agp_disable(struct radeon_device *rdev)
{
rdev->flags &= ~RADEON_IS_AGP;
@@ -1608,6 +1644,16 @@ static struct radeon_asic si_asic = {
},
};
+/**
+ * radeon_asic_init - register asic specific callbacks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Registers the appropriate asic specific callbacks for each
+ * chip family. Also sets other asics specific info like the number
+ * of crtcs and the register aperture accessors (all asics).
+ * Returns 0 for success.
+ */
int radeon_asic_init(struct radeon_device *rdev)
{
radeon_register_accessor_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e76a941ef14e..18c38d14c8cd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -79,7 +79,7 @@ int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence);
+ struct radeon_fence **fence);
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
@@ -103,7 +103,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev);
void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
-void r100_ib_fini(struct radeon_device *rdev);
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
@@ -144,7 +143,7 @@ extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
- struct radeon_fence *fence);
+ struct radeon_fence **fence);
void r200_set_safe_registers(struct radeon_device *rdev);
/*
@@ -256,13 +255,10 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
* rv515
*/
struct rv515_mc_save {
- u32 d1vga_control;
- u32 d2vga_control;
u32 vga_render_control;
u32 vga_hdp_control;
- u32 d1crtc_control;
- u32 d2crtc_control;
};
+
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -318,7 +314,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages, struct radeon_fence *fence);
+ unsigned num_gpu_pages, struct radeon_fence **fence);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -363,14 +359,16 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
- struct radeon_sa_bo **vb);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence,
- struct radeon_sa_bo *vb);
+ struct radeon_fence **fence, struct radeon_sa_bo **vb,
+ struct radeon_semaphore **sem);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
+ struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
unsigned num_gpu_pages,
struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
+uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -389,11 +387,10 @@ void r700_cp_fini(struct radeon_device *rdev);
* evergreen
*/
struct evergreen_mc_save {
- u32 vga_control[6];
u32 vga_render_control;
u32 vga_hdp_control;
- u32 crtc_control[6];
};
+
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
@@ -472,5 +469,6 @@ int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
+uint64_t si_get_gpu_clock(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index b1e3820df363..d67d4f3eb6f4 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -452,7 +452,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
- if ((dev->pdev->device == 0x9802) &&
+ if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
(dev->pdev->subsystem_vendor == 0x1734) &&
(dev->pdev->subsystem_device == 0x11bd)) {
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
@@ -1263,6 +1263,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
};
bool radeon_atombios_sideport_present(struct radeon_device *rdev)
@@ -1390,27 +1392,50 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
u16 data_offset, size;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
+ union igp_info *igp_info;
u8 frev, crev;
u16 percentage = 0, rate = 0;
/* get any igp specific overrides */
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
- igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
+ igp_info = (union igp_info *)
(mode_info->atom_context->bios + data_offset);
- switch (id) {
- case ASIC_INTERNAL_SS_ON_TMDS:
- percentage = le16_to_cpu(igp_info->usDVISSPercentage);
- rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
+ switch (crev) {
+ case 6:
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_TMDS:
+ percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
+ rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_HDMI:
+ percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
+ rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS:
+ percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
+ rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
+ break;
+ }
break;
- case ASIC_INTERNAL_SS_ON_HDMI:
- percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
- rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
+ case 7:
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_TMDS:
+ percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
+ rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_HDMI:
+ percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
+ rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS:
+ percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
+ rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
+ break;
+ }
break;
- case ASIC_INTERNAL_SS_ON_LVDS:
- percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
- rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
+ default:
+ DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
break;
}
if (percentage)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 98724fcb0088..2a2cf0b88a28 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -30,57 +30,8 @@ static struct radeon_atpx_priv {
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle atpx_handle;
- acpi_handle atrm_handle;
} radeon_atpx_priv;
-/* retrieve the ROM in 4k blocks */
-static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
- int offset, int len)
-{
- acpi_status status;
- union acpi_object atrm_arg_elements[2], *obj;
- struct acpi_object_list atrm_arg;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
-
- atrm_arg.count = 2;
- atrm_arg.pointer = &atrm_arg_elements[0];
-
- atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
- atrm_arg_elements[0].integer.value = offset;
-
- atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
- atrm_arg_elements[1].integer.value = len;
-
- status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
- if (ACPI_FAILURE(status)) {
- printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
- return -ENODEV;
- }
-
- obj = (union acpi_object *)buffer.pointer;
- memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
- len = obj->buffer.length;
- kfree(buffer.pointer);
- return len;
-}
-
-bool radeon_atrm_supported(struct pci_dev *pdev)
-{
- /* get the discrete ROM only via ATRM */
- if (!radeon_atpx_priv.atpx_detected)
- return false;
-
- if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
- return false;
- return true;
-}
-
-
-int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
-{
- return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
-}
-
static int radeon_atpx_get_version(acpi_handle handle)
{
acpi_status status;
@@ -198,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
{
- acpi_handle dhandle, atpx_handle, atrm_handle;
+ acpi_handle dhandle, atpx_handle;
acpi_status status;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
@@ -209,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
if (ACPI_FAILURE(status))
return false;
- status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (ACPI_FAILURE(status))
- return false;
-
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx_handle = atpx_handle;
- radeon_atpx_priv.atrm_handle = atrm_handle;
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 364f5b1a04b9..bedda9caadd9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -45,20 +45,14 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
for (i = 0; i < n; i++) {
switch (flag) {
case RADEON_BENCHMARK_COPY_DMA:
- r = radeon_fence_create(rdev, &fence, radeon_copy_dma_ring_index(rdev));
- if (r)
- return r;
r = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
- fence);
+ &fence);
break;
case RADEON_BENCHMARK_COPY_BLIT:
- r = radeon_fence_create(rdev, &fence, radeon_copy_blit_ring_index(rdev));
- if (r)
- return r;
r = radeon_copy_blit(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
- fence);
+ &fence);
break;
default:
DRM_ERROR("Unknown copy method\n");
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 501f4881e5aa..d306cc8fdeaa 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -32,6 +32,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
/*
* BIOS.
*/
@@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev)
return true;
}
+#ifdef CONFIG_ACPI
/* ATRM is used to get the BIOS on the discrete cards in
* dual-gpu systems.
*/
+/* retrieve the ROM in 4k blocks */
+#define ATRM_BIOS_PAGE 4096
+/**
+ * radeon_atrm_call - fetch a chunk of the vbios
+ *
+ * @atrm_handle: acpi ATRM handle
+ * @bios: vbios image pointer
+ * @offset: offset of vbios image data to fetch
+ * @len: length of vbios image data to fetch
+ *
+ * Executes ATRM to fetch a chunk of the discrete
+ * vbios image on PX systems (all asics).
+ * Returns the length of the buffer fetched.
+ */
+static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+ int offset, int len)
+{
+ acpi_status status;
+ union acpi_object atrm_arg_elements[2], *obj;
+ struct acpi_object_list atrm_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+ atrm_arg.count = 2;
+ atrm_arg.pointer = &atrm_arg_elements[0];
+
+ atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atrm_arg_elements[0].integer.value = offset;
+
+ atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atrm_arg_elements[1].integer.value = len;
+
+ status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+ return -ENODEV;
+ }
+
+ obj = (union acpi_object *)buffer.pointer;
+ memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
+ len = obj->buffer.length;
+ kfree(buffer.pointer);
+ return len;
+}
+
static bool radeon_atrm_get_bios(struct radeon_device *rdev)
{
int ret;
int size = 256 * 1024;
int i;
+ struct pci_dev *pdev = NULL;
+ acpi_handle dhandle, atrm_handle;
+ acpi_status status;
+ bool found = false;
+
+ /* ATRM is for the discrete card only */
+ if (rdev->flags & RADEON_IS_IGP)
+ return false;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ continue;
+
+ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+ if (!ACPI_FAILURE(status)) {
+ found = true;
+ break;
+ }
+ }
- if (!radeon_atrm_supported(rdev->pdev))
+ if (!found)
return false;
rdev->bios = kmalloc(size, GFP_KERNEL);
@@ -117,9 +183,10 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
}
for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
- ret = radeon_atrm_get_bios_chunk(rdev->bios,
- (i * ATRM_BIOS_PAGE),
- ATRM_BIOS_PAGE);
+ ret = radeon_atrm_call(atrm_handle,
+ rdev->bios,
+ (i * ATRM_BIOS_PAGE),
+ ATRM_BIOS_PAGE);
if (ret < ATRM_BIOS_PAGE)
break;
}
@@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
}
return true;
}
+#else
+static inline bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+ return false;
+}
+#endif
static bool ni_read_disabled_bios(struct radeon_device *rdev)
{
@@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
return legacy_read_disabled_bios(rdev);
}
+#ifdef CONFIG_ACPI
+static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+ bool ret = false;
+ struct acpi_table_header *hdr;
+ acpi_size tbl_size;
+ UEFI_ACPI_VFCT *vfct;
+ GOP_VBIOS_CONTENT *vbios;
+ VFCT_IMAGE_HEADER *vhdr;
+
+ if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
+ return false;
+ if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
+ DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+ goto out_unmap;
+ }
+
+ vfct = (UEFI_ACPI_VFCT *)hdr;
+ if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
+ DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+ goto out_unmap;
+ }
+
+ vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
+ vhdr = &vbios->VbiosHeader;
+ DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
+ vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
+ vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
+
+ if (vhdr->PCIBus != rdev->pdev->bus->number ||
+ vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
+ vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
+ vhdr->VendorID != rdev->pdev->vendor ||
+ vhdr->DeviceID != rdev->pdev->device) {
+ DRM_INFO("ACPI VFCT table is not for this card\n");
+ goto out_unmap;
+ };
+
+ if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
+ DRM_ERROR("ACPI VFCT image truncated\n");
+ goto out_unmap;
+ }
+
+ rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
+ ret = !!rdev->bios;
+
+out_unmap:
+ return ret;
+}
+#else
+static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
+{
+ return false;
+}
+#endif
bool radeon_get_bios(struct radeon_device *rdev)
{
@@ -484,6 +612,8 @@ bool radeon_get_bios(struct radeon_device *rdev)
r = radeon_atrm_get_bios(rdev);
if (r == false)
+ r = radeon_acpi_vfct_bios(rdev);
+ if (r == false)
r = igp_read_bios_from_vram(rdev);
if (r == false)
r = radeon_read_bios(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 576f4f6919f2..f75247d42ffd 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -719,6 +719,34 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
return i2c;
}
+static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct radeon_i2c_bus_rec i2c;
+ u16 offset;
+ u8 id, blocks, clk, data;
+ int i;
+
+ i2c.valid = false;
+
+ offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+ if (offset) {
+ blocks = RBIOS8(offset + 2);
+ for (i = 0; i < blocks; i++) {
+ id = RBIOS8(offset + 3 + (i * 5) + 0);
+ if (id == 136) {
+ clk = RBIOS8(offset + 3 + (i * 5) + 3);
+ data = RBIOS8(offset + 3 + (i * 5) + 4);
+ /* gpiopad */
+ i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
+ (1 << clk), (1 << data));
+ break;
+ }
+ }
+ }
+ return i2c;
+}
+
void radeon_combios_i2c_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
@@ -755,30 +783,14 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
} else if (rdev->family == CHIP_RS300 ||
rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) {
- u16 offset;
- u8 id, blocks, clk, data;
- int i;
-
/* 0x68 */
i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
- offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
- if (offset) {
- blocks = RBIOS8(offset + 2);
- for (i = 0; i < blocks; i++) {
- id = RBIOS8(offset + 3 + (i * 5) + 0);
- if (id == 136) {
- clk = RBIOS8(offset + 3 + (i * 5) + 3);
- data = RBIOS8(offset + 3 + (i * 5) + 4);
- /* gpiopad */
- i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
- (1 << clk), (1 << data));
- rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
- break;
- }
- }
- }
+ /* gpiopad */
+ i2c = radeon_combios_get_i2c_info_from_table(rdev);
+ if (i2c.valid)
+ rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
} else if ((rdev->family == CHIP_R200) ||
(rdev->family >= CHIP_R300)) {
/* 0x68 */
@@ -2321,7 +2333,10 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
connector = (tmp >> 12) & 0xf;
ddc_type = (tmp >> 8) & 0xf;
- ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
+ if (ddc_type == 5)
+ ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev);
+ else
+ ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
switch (connector) {
case CONNECTOR_PROPRIETARY_LEGACY:
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2914c5761cfc..895e628b60f8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -64,14 +64,33 @@ void radeon_connector_hotplug(struct drm_connector *connector)
/* just deal with DP (not eDP) here. */
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
- int saved_dpms = connector->dpms;
-
- /* Only turn off the display it it's physically disconnected */
- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- else if (radeon_dp_needs_link_train(radeon_connector))
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- connector->dpms = saved_dpms;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ /* if existing sink type was not DP no need to retrain */
+ if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ return;
+
+ /* first get sink type as it may be reset after (un)plug */
+ dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+ /* don't do anything if sink is not display port, i.e.,
+ * passive dp->(dvi|hdmi) adaptor
+ */
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ int saved_dpms = connector->dpms;
+ /* Only turn off the display if it's physically disconnected */
+ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ } else if (radeon_dp_needs_link_train(radeon_connector)) {
+ /* set it to OFF so that drm_helper_connector_dpms()
+ * won't return immediately since the current state
+ * is ON at this point.
+ */
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+ connector->dpms = saved_dpms;
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 142f89462aa4..b4a0db24f4dd 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -115,36 +115,20 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return 0;
}
-static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
+static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
- bool sync_to_ring[RADEON_NUM_RINGS] = { };
- bool need_sync = false;
- int i, r;
+ int i;
for (i = 0; i < p->nrelocs; i++) {
- struct radeon_fence *fence;
+ struct radeon_fence *a, *b;
if (!p->relocs[i].robj || !p->relocs[i].robj->tbo.sync_obj)
continue;
- fence = p->relocs[i].robj->tbo.sync_obj;
- if (fence->ring != p->ring && !radeon_fence_signaled(fence)) {
- sync_to_ring[fence->ring] = true;
- need_sync = true;
- }
- }
-
- if (!need_sync) {
- return 0;
- }
-
- r = radeon_semaphore_create(p->rdev, &p->ib.semaphore);
- if (r) {
- return r;
+ a = p->relocs[i].robj->tbo.sync_obj;
+ b = p->ib.sync_to[a->ring];
+ p->ib.sync_to[a->ring] = radeon_fence_later(a, b);
}
-
- return radeon_semaphore_sync_rings(p->rdev, p->ib.semaphore,
- sync_to_ring, p->ring);
}
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
@@ -294,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return 0;
}
+static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser,
+ struct radeon_fence *fence)
+{
+ struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ struct radeon_bo_list *lobj;
+
+ if (parser->chunk_ib_idx == -1) {
+ return;
+ }
+ if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) {
+ return;
+ }
+
+ list_for_each_entry(lobj, &parser->validated, tv.head) {
+ struct radeon_bo_va *bo_va;
+ struct radeon_bo *rbo = lobj->bo;
+
+ bo_va = radeon_bo_va(rbo, vm);
+ radeon_fence_unref(&bo_va->fence);
+ bo_va->fence = radeon_fence_ref(fence);
+ }
+}
+
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
@@ -306,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
- if (!error)
+ if (!error) {
+ /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */
+ radeon_bo_vm_fence_va(parser, parser->ib.fence);
ttm_eu_fence_buffer_objects(&parser->validated,
parser->ib.fence);
- else
+ } else {
ttm_eu_backoff_reservation(&parser->validated);
+ }
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
@@ -368,16 +379,13 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
DRM_ERROR("Invalid command stream !\n");
return r;
}
- r = radeon_cs_sync_rings(parser);
- if (r) {
- DRM_ERROR("Failed to synchronize rings !\n");
- }
+ radeon_cs_sync_rings(parser);
parser->ib.vm_id = 0;
- r = radeon_ib_schedule(rdev, &parser->ib);
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
}
- return 0;
+ return r;
}
static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
@@ -407,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if (parser->chunk_ib_idx == -1)
return 0;
-
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0;
@@ -459,6 +466,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
return r;
}
+ mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
r = radeon_vm_bind(rdev, vm);
if (r) {
@@ -468,30 +476,26 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
if (r) {
goto out;
}
- r = radeon_cs_sync_rings(parser);
- if (r) {
- DRM_ERROR("Failed to synchronize rings !\n");
- }
+ radeon_cs_sync_rings(parser);
+
+ parser->ib.vm_id = vm->id;
+ /* ib pool is bind at 0 in virtual address space,
+ * so gpu_addr is the offset inside the pool bo
+ */
+ parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
parser->const_ib.vm_id = vm->id;
- /* ib pool is bind at 0 in virtual address space to gpu_addr is the
- * offset inside the pool bo
+ /* ib pool is bind at 0 in virtual address space,
+ * so gpu_addr is the offset inside the pool bo
*/
parser->const_ib.gpu_addr = parser->const_ib.sa_bo->soffset;
- r = radeon_ib_schedule(rdev, &parser->const_ib);
- if (r)
- goto out;
+ r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
+ } else {
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL);
}
- parser->ib.vm_id = vm->id;
- /* ib pool is bind at 0 in virtual address space to gpu_addr is the
- * offset inside the pool bo
- */
- parser->ib.gpu_addr = parser->ib.sa_bo->soffset;
- parser->ib.is_const_ib = false;
- r = radeon_ib_schedule(rdev, &parser->ib);
out:
if (!r) {
if (vm->fence) {
@@ -499,7 +503,8 @@ out:
}
vm->fence = radeon_fence_ref(parser->ib.fence);
}
- mutex_unlock(&fpriv->vm.mutex);
+ mutex_unlock(&vm->mutex);
+ mutex_unlock(&rdev->vm_manager.lock);
return r;
}
@@ -519,9 +524,9 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct radeon_cs_parser parser;
int r;
- radeon_mutex_lock(&rdev->cs_mutex);
+ down_read(&rdev->exclusive_lock);
if (!rdev->accel_working) {
- radeon_mutex_unlock(&rdev->cs_mutex);
+ up_read(&rdev->exclusive_lock);
return -EBUSY;
}
/* initialize parser */
@@ -534,8 +539,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r);
+ up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_parser_relocs(&parser);
@@ -543,8 +548,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
+ up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
r = radeon_cs_ib_chunk(rdev, &parser);
@@ -557,8 +562,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
out:
radeon_cs_parser_fini(&parser, r);
+ up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 42acc6449dd6..8794744cdf1a 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -67,7 +67,8 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
if (ASIC_IS_DCE4(rdev)) {
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
- WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+ WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
@@ -94,7 +95,8 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
if (ASIC_IS_DCE4(rdev)) {
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
- EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
+ EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
@@ -262,8 +264,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
if (!(cursor_end & 0x7f))
w--;
}
- if (w <= 0)
+ if (w <= 0) {
w = 1;
+ cursor_end = x - xorigin + w;
+ if (!(cursor_end & 0x7f)) {
+ x--;
+ WARN_ON_ONCE(x < 0);
+ }
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 066c98b888a5..7a3daebd732d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -96,8 +96,12 @@ static const char radeon_family_name[][16] = {
"LAST",
};
-/*
- * Clear GPU surface registers.
+/**
+ * radeon_surface_init - Clear GPU surface registers.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Clear GPU surface registers (r1xx-r5xx).
*/
void radeon_surface_init(struct radeon_device *rdev)
{
@@ -119,6 +123,13 @@ void radeon_surface_init(struct radeon_device *rdev)
/*
* GPU scratch registers helpers function.
*/
+/**
+ * radeon_scratch_init - Init scratch register driver information.
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init CP scratch register driver information (r1xx-r5xx)
+ */
void radeon_scratch_init(struct radeon_device *rdev)
{
int i;
@@ -136,6 +147,15 @@ void radeon_scratch_init(struct radeon_device *rdev)
}
}
+/**
+ * radeon_scratch_get - Allocate a scratch register
+ *
+ * @rdev: radeon_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Allocate a CP scratch register for use by the driver (all asics).
+ * Returns 0 on success or -EINVAL on failure.
+ */
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
{
int i;
@@ -150,6 +170,14 @@ int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
return -EINVAL;
}
+/**
+ * radeon_scratch_free - Free a scratch register
+ *
+ * @rdev: radeon_device pointer
+ * @reg: scratch register mmio offset
+ *
+ * Free a CP scratch register allocated for use by the driver (all asics)
+ */
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
{
int i;
@@ -162,6 +190,20 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
}
}
+/*
+ * radeon_wb_*()
+ * Writeback is the the method by which the the GPU updates special pages
+ * in memory with the status of certain GPU events (fences, ring pointers,
+ * etc.).
+ */
+
+/**
+ * radeon_wb_disable - Disable Writeback
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback (all asics). Used for suspend.
+ */
void radeon_wb_disable(struct radeon_device *rdev)
{
int r;
@@ -177,6 +219,14 @@ void radeon_wb_disable(struct radeon_device *rdev)
rdev->wb.enabled = false;
}
+/**
+ * radeon_wb_fini - Disable Writeback and free memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver shutdown.
+ */
void radeon_wb_fini(struct radeon_device *rdev)
{
radeon_wb_disable(rdev);
@@ -187,6 +237,15 @@ void radeon_wb_fini(struct radeon_device *rdev)
}
}
+/**
+ * radeon_wb_init- Init Writeback driver info and allocate memory
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Disables Writeback and frees the Writeback memory (all asics).
+ * Used at driver startup.
+ * Returns 0 on success or an -error on failure.
+ */
int radeon_wb_init(struct radeon_device *rdev)
{
int r;
@@ -355,6 +414,15 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
/*
* GPU helpers function.
*/
+/**
+ * radeon_card_posted - check if the hw has already been initialized
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the asic has been initialized (all asics).
+ * Used at driver startup.
+ * Returns true if initialized or false if not.
+ */
bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
@@ -404,6 +472,14 @@ bool radeon_card_posted(struct radeon_device *rdev)
}
+/**
+ * radeon_update_bandwidth_info - update display bandwidth params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Used when sclk/mclk are switched or display modes are set.
+ * params are used to calculate display watermarks (all asics)
+ */
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
fixed20_12 a;
@@ -424,6 +500,15 @@ void radeon_update_bandwidth_info(struct radeon_device *rdev)
}
}
+/**
+ * radeon_boot_test_post_card - check and possibly initialize the hw
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Check if the asic is initialized and if not, attempt to initialize
+ * it (all asics).
+ * Returns true if initialized or false if not.
+ */
bool radeon_boot_test_post_card(struct radeon_device *rdev)
{
if (radeon_card_posted(rdev))
@@ -442,6 +527,16 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
}
}
+/**
+ * radeon_dummy_page_init - init dummy page used by the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate the dummy page used by the driver (all asics).
+ * This dummy page is used by the driver as a filler for gart entries
+ * when pages are taken out of the GART
+ * Returns 0 on sucess, -ENOMEM on failure.
+ */
int radeon_dummy_page_init(struct radeon_device *rdev)
{
if (rdev->dummy_page.page)
@@ -460,6 +555,13 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_dummy_page_fini - free dummy page used by the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the dummy page used by the driver (all asics).
+ */
void radeon_dummy_page_fini(struct radeon_device *rdev)
{
if (rdev->dummy_page.page == NULL)
@@ -472,6 +574,23 @@ void radeon_dummy_page_fini(struct radeon_device *rdev)
/* ATOM accessor methods */
+/*
+ * ATOM is an interpreted byte code stored in tables in the vbios. The
+ * driver registers callbacks to access registers and the interpreter
+ * in the driver parses the tables and executes then to program specific
+ * actions (set display modes, asic init, etc.). See radeon_atombios.c,
+ * atombios.h, and atom.c
+ */
+
+/**
+ * cail_pll_read - read PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the PLL register.
+ */
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -481,6 +600,15 @@ static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
return r;
}
+/**
+ * cail_pll_write - write PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ */
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -488,6 +616,15 @@ static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
rdev->pll_wreg(rdev, reg, val);
}
+/**
+ * cail_mc_read - read MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ *
+ * Provides an MC register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MC register.
+ */
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -497,6 +634,15 @@ static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
return r;
}
+/**
+ * cail_mc_write - write MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MC register accessor for the atom interpreter (r4xx+).
+ */
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -504,6 +650,15 @@ static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
rdev->mc_wreg(rdev, reg, val);
}
+/**
+ * cail_reg_write - write MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
+ */
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -511,6 +666,15 @@ static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
WREG32(reg*4, val);
}
+/**
+ * cail_reg_read - read MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ *
+ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MMIO register.
+ */
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -520,6 +684,15 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
return r;
}
+/**
+ * cail_ioreg_write - write IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a IO register accessor for the atom interpreter (r4xx+).
+ */
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -527,6 +700,15 @@ static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
WREG32_IO(reg*4, val);
}
+/**
+ * cail_ioreg_read - read IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ *
+ * Provides an IO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the IO register.
+ */
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
@@ -536,6 +718,16 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
return r;
}
+/**
+ * radeon_atombios_init - init the driver info and callbacks for atombios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initializes the driver info and register access callbacks for the
+ * ATOM interpreter (r4xx+).
+ * Returns 0 on sucess, -ENOMEM on failure.
+ * Called at driver startup.
+ */
int radeon_atombios_init(struct radeon_device *rdev)
{
struct card_info *atom_card_info =
@@ -569,6 +761,15 @@ int radeon_atombios_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_atombios_fini - free the driver info and callbacks for atombios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the driver info and register access callbacks for the ATOM
+ * interpreter (r4xx+).
+ * Called at driver shutdown.
+ */
void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
@@ -578,17 +779,50 @@ void radeon_atombios_fini(struct radeon_device *rdev)
kfree(rdev->mode_info.atom_card_info);
}
+/* COMBIOS */
+/*
+ * COMBIOS is the bios format prior to ATOM. It provides
+ * command tables similar to ATOM, but doesn't have a unified
+ * parser. See radeon_combios.c
+ */
+
+/**
+ * radeon_combios_init - init the driver info for combios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initializes the driver info for combios (r1xx-r3xx).
+ * Returns 0 on sucess.
+ * Called at driver startup.
+ */
int radeon_combios_init(struct radeon_device *rdev)
{
radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
return 0;
}
+/**
+ * radeon_combios_fini - free the driver info for combios
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Frees the driver info for combios (r1xx-r3xx).
+ * Called at driver shutdown.
+ */
void radeon_combios_fini(struct radeon_device *rdev)
{
}
-/* if we get transitioned to only one device, tak VGA back */
+/* if we get transitioned to only one device, take VGA back */
+/**
+ * radeon_vga_set_decode - enable/disable vga decode
+ *
+ * @cookie: radeon_device pointer
+ * @state: enable/disable vga decode
+ *
+ * Enable/disable vga decode (all asics).
+ * Returns VGA resource flags.
+ */
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
{
struct radeon_device *rdev = cookie;
@@ -600,6 +834,14 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
+/**
+ * radeon_check_arguments - validate module params
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Validates certain module parameters and updates
+ * the associated values used by the driver (all asics).
+ */
void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
@@ -666,6 +908,15 @@ void radeon_check_arguments(struct radeon_device *rdev)
}
}
+/**
+ * radeon_switcheroo_set_state - set switcheroo state
+ *
+ * @pdev: pci dev pointer
+ * @state: vga switcheroo state
+ *
+ * Callback for the switcheroo driver. Suspends or resumes the
+ * the asics before or after it is powered up using ACPI methods.
+ */
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -686,6 +937,15 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
}
}
+/**
+ * radeon_switcheroo_can_switch - see if switcheroo state can change
+ *
+ * @pdev: pci dev pointer
+ *
+ * Callback for the switcheroo driver. Check of the switcheroo
+ * state can be changed.
+ * Returns true if the state can be changed, false if not.
+ */
static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -703,6 +963,18 @@ static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
.can_switch = radeon_switcheroo_can_switch,
};
+/**
+ * radeon_device_init - initialize the driver
+ *
+ * @rdev: radeon_device pointer
+ * @pdev: drm dev pointer
+ * @pdev: pci dev pointer
+ * @flags: driver flags
+ *
+ * Initializes the driver info and hw (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver startup.
+ */
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
@@ -721,6 +993,10 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->accel_working = false;
+ /* set up ring ids */
+ for (i = 0; i < RADEON_NUM_RINGS; i++) {
+ rdev->ring[i].idx = i;
+ }
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
@@ -728,20 +1004,21 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
- radeon_mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex);
- if (rdev->family >= CHIP_R600)
- spin_lock_init(&rdev->ih.lock);
+ atomic_set(&rdev->ih.lock, 0);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
- mutex_init(&rdev->vram_mutex);
+ mutex_init(&rdev->gpu_clock_mutex);
+ init_rwsem(&rdev->pm.mclk_lock);
+ init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
r = radeon_gem_init(rdev);
if (r)
return r;
/* initialize vm here */
+ mutex_init(&rdev->vm_manager.lock);
rdev->vm_manager.use_bitmap = 1;
rdev->vm_manager.max_pfn = 1 << 20;
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
@@ -774,7 +1051,7 @@ int radeon_device_init(struct radeon_device *rdev,
if (rdev->flags & RADEON_IS_AGP)
rdev->need_dma32 = true;
if ((rdev->flags & RADEON_IS_PCI) &&
- (rdev->family < CHIP_RS400))
+ (rdev->family <= CHIP_RS740))
rdev->need_dma32 = true;
dma_bits = rdev->need_dma32 ? 32 : 40;
@@ -822,6 +1099,10 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
return r;
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
/* Acceleration not working on AGP card try again
* with fallback to PCI or PCIE GART
@@ -847,6 +1128,14 @@ int radeon_device_init(struct radeon_device *rdev,
static void radeon_debugfs_remove_files(struct radeon_device *rdev);
+/**
+ * radeon_device_fini - tear down the driver
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the driver info (all asics).
+ * Called at driver shutdown.
+ */
void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
@@ -868,6 +1157,16 @@ void radeon_device_fini(struct radeon_device *rdev)
/*
* Suspend & resume.
*/
+/**
+ * radeon_suspend_kms - initiate device suspend
+ *
+ * @pdev: drm dev pointer
+ * @state: suspend state
+ *
+ * Puts the hw in the suspend state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver suspend.
+ */
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
struct radeon_device *rdev;
@@ -942,10 +1241,20 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
return 0;
}
+/**
+ * radeon_resume_kms - initiate device resume
+ *
+ * @pdev: drm dev pointer
+ *
+ * Bring the hw back to operating state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver resume.
+ */
int radeon_resume_kms(struct drm_device *dev)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
+ int r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -960,6 +1269,11 @@ int radeon_resume_kms(struct drm_device *dev)
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
+
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+
radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
@@ -984,30 +1298,80 @@ int radeon_resume_kms(struct drm_device *dev)
return 0;
}
+/**
+ * radeon_gpu_reset - reset the asic
+ *
+ * @rdev: radeon device pointer
+ *
+ * Attempt the reset the GPU if it has hung (all asics).
+ * Returns 0 for success or an error on failure.
+ */
int radeon_gpu_reset(struct radeon_device *rdev)
{
- int r;
+ unsigned ring_sizes[RADEON_NUM_RINGS];
+ uint32_t *ring_data[RADEON_NUM_RINGS];
+
+ bool saved = false;
+
+ int i, r;
int resched;
+ down_write(&rdev->exclusive_lock);
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_suspend(rdev);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
+ &ring_data[i]);
+ if (ring_sizes[i]) {
+ saved = true;
+ dev_info(rdev->dev, "Saved %d dwords of commands "
+ "on ring %d.\n", ring_sizes[i], i);
+ }
+ }
+
+retry:
r = radeon_asic_reset(rdev);
if (!r) {
- dev_info(rdev->dev, "GPU reset succeed\n");
+ dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
radeon_resume(rdev);
- radeon_restore_bios_scratch_regs(rdev);
- drm_helper_resume_force_mode(rdev->ddev);
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
}
+ radeon_restore_bios_scratch_regs(rdev);
+ drm_helper_resume_force_mode(rdev->ddev);
+
+ if (!r) {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ radeon_ring_restore(rdev, &rdev->ring[i],
+ ring_sizes[i], ring_data[i]);
+ ring_sizes[i] = 0;
+ ring_data[i] = NULL;
+ }
+
+ r = radeon_ib_ring_tests(rdev);
+ if (r) {
+ dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
+ if (saved) {
+ saved = false;
+ radeon_suspend(rdev);
+ goto retry;
+ }
+ }
+ } else {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ kfree(ring_data[i]);
+ }
+ }
+
+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
}
+ up_write(&rdev->exclusive_lock);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 64a008d14493..7ddef8f30d0e 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1401,7 +1401,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
radeon_i2c_fini(rdev);
}
-static bool is_hdtv_mode(struct drm_display_mode *mode)
+static bool is_hdtv_mode(const struct drm_display_mode *mode)
{
/* try and guess if this is a tv or a monitor */
if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
@@ -1414,7 +1414,7 @@ static bool is_hdtv_mode(struct drm_display_mode *mode)
}
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 2c4d53fd20c5..8c593ea82c41 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -59,9 +59,14 @@
* 2.15.0 - add max_pipes query
* 2.16.0 - fix evergreen 2D tiled surface calculation
* 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
+ * 2.18.0 - r600-eg: allow "invalid" DB formats
+ * 2.19.0 - r600-eg: MSAA textures
+ * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query
+ * 2.21.0 - r600-r700: FMASK and CMASK
+ * 2.22.0 - r600 only: RESOLVE_BOX allowed
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 17
+#define KMS_DRIVER_MINOR 22
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -133,7 +138,7 @@ int radeon_tv = 1;
int radeon_audio = 0;
int radeon_disp_priority = 0;
int radeon_hw_i2c = 0;
-int radeon_pcie_gen2 = 0;
+int radeon_pcie_gen2 = -1;
int radeon_msi = -1;
int radeon_lockup_timeout = 10000;
@@ -179,7 +184,7 @@ module_param_named(disp_priority, radeon_disp_priority, int, 0444);
MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
-MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
+MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
@@ -262,7 +267,6 @@ static struct drm_driver driver_old = {
.irq_postinstall = radeon_driver_irq_postinstall,
.irq_uninstall = radeon_driver_irq_uninstall,
.irq_handler = radeon_driver_irq_handler,
- .reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = radeon_ioctls,
.dma_ioctl = radeon_cp_buffers,
.fops = &radeon_driver_old_fops,
@@ -365,7 +369,6 @@ static struct drm_driver kms_driver = {
.irq_postinstall = radeon_driver_irq_postinstall_kms,
.irq_uninstall = radeon_driver_irq_uninstall_kms,
.irq_handler = radeon_driver_irq_handler_kms,
- .reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = radeon_ioctls_kms,
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 11f5f402d22c..7b737b9339ad 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -40,39 +40,95 @@
#include "radeon.h"
#include "radeon_trace.h"
+/*
+ * Fences
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization. When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the the relevant GPU caches have been flushed. Whether
+ * we use a scratch register or memory location depends on the asic
+ * and whether writeback is enabled.
+ */
+
+/**
+ * radeon_fence_write - write a fence value
+ *
+ * @rdev: radeon_device pointer
+ * @seq: sequence number to write
+ * @ring: ring index the fence is associated with
+ *
+ * Writes a fence value to memory or a scratch register (all asics).
+ */
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
{
- if (rdev->wb.enabled) {
- *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
+ struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
+ if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
+ *drv->cpu_addr = cpu_to_le32(seq);
} else {
- WREG32(rdev->fence_drv[ring].scratch_reg, seq);
+ WREG32(drv->scratch_reg, seq);
}
}
+/**
+ * radeon_fence_read - read a fence value
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Reads a fence value from memory or a scratch register (all asics).
+ * Returns the value of the fence read from memory or register.
+ */
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
{
+ struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
u32 seq = 0;
- if (rdev->wb.enabled) {
- seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
+ if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
+ seq = le32_to_cpu(*drv->cpu_addr);
} else {
- seq = RREG32(rdev->fence_drv[ring].scratch_reg);
+ seq = RREG32(drv->scratch_reg);
}
return seq;
}
-int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
+/**
+ * radeon_fence_emit - emit a fence on the requested ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ * @ring: ring index the fence is associated with
+ *
+ * Emits a fence command on the requested ring (all asics).
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int radeon_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence **fence,
+ int ring)
{
/* we are protected by the ring emission mutex */
- if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
- return 0;
+ *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
+ if ((*fence) == NULL) {
+ return -ENOMEM;
}
- fence->seq = ++rdev->fence_drv[fence->ring].seq;
- radeon_fence_ring_emit(rdev, fence->ring, fence);
- trace_radeon_fence_emit(rdev->ddev, fence->seq);
+ kref_init(&((*fence)->kref));
+ (*fence)->rdev = rdev;
+ (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
+ (*fence)->ring = ring;
+ radeon_fence_ring_emit(rdev, ring, *fence);
+ trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
return 0;
}
+/**
+ * radeon_fence_process - process a fence
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
+ */
void radeon_fence_process(struct radeon_device *rdev, int ring)
{
uint64_t seq, last_seq;
@@ -133,30 +189,35 @@ void radeon_fence_process(struct radeon_device *rdev, int ring)
}
}
+/**
+ * radeon_fence_destroy - destroy a fence
+ *
+ * @kref: fence kref
+ *
+ * Frees the fence object (all asics).
+ */
static void radeon_fence_destroy(struct kref *kref)
{
struct radeon_fence *fence;
fence = container_of(kref, struct radeon_fence, kref);
- fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
kfree(fence);
}
-int radeon_fence_create(struct radeon_device *rdev,
- struct radeon_fence **fence,
- int ring)
-{
- *fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
- if ((*fence) == NULL) {
- return -ENOMEM;
- }
- kref_init(&((*fence)->kref));
- (*fence)->rdev = rdev;
- (*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ;
- (*fence)->ring = ring;
- return 0;
-}
-
+/**
+ * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
+ *
+ * @rdev: radeon device pointer
+ * @seq: sequence number
+ * @ring: ring index the fence is associated with
+ *
+ * Check if the last singled fence sequnce number is >= the requested
+ * sequence number (all asics).
+ * Returns true if the fence has signaled (current fence value
+ * is >= requested value) or false if it has not (current fence
+ * value is < the requested value. Helper function for
+ * radeon_fence_signaled().
+ */
static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
u64 seq, unsigned ring)
{
@@ -171,15 +232,19 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
return false;
}
+/**
+ * radeon_fence_signaled - check if a fence has signaled
+ *
+ * @fence: radeon fence object
+ *
+ * Check if the requested fence has signaled (all asics).
+ * Returns true if the fence has signaled or false if it has not.
+ */
bool radeon_fence_signaled(struct radeon_fence *fence)
{
if (!fence) {
return true;
}
- if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
- WARN(1, "Querying an unemitted fence : %p !\n", fence);
- return true;
- }
if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
return true;
}
@@ -190,6 +255,24 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
return false;
}
+/**
+ * radeon_fence_wait_seq - wait for a specific sequence number
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number we want to wait for
+ * @ring: ring index the fence is associated with
+ * @intr: use interruptable sleep
+ * @lock_ring: whether the ring should be locked or not
+ *
+ * Wait for the requested sequence number to be written (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number. Helper function
+ * for radeon_fence_wait(), et al.
+ * Returns 0 if the sequence number has passed, error for all other cases.
+ * -EDEADLK is returned when a GPU lockup has been detected and the ring is
+ * marked as not ready so no further jobs get scheduled until a successful
+ * reset.
+ */
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
unsigned ring, bool intr, bool lock_ring)
{
@@ -285,6 +368,17 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
return 0;
}
+/**
+ * radeon_fence_wait - wait for a fence to signal
+ *
+ * @fence: radeon fence object
+ * @intr: use interruptable sleep
+ *
+ * Wait for the requested fence to signal (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the fence.
+ * Returns 0 if the fence has passed, error for all other cases.
+ */
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
int r;
@@ -315,6 +409,20 @@ bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
return false;
}
+/**
+ * radeon_fence_wait_any_seq - wait for a sequence number on any ring
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number(s) we want to wait for
+ * @intr: use interruptable sleep
+ *
+ * Wait for the requested sequence number(s) to be written by any ring
+ * (all asics). Sequnce number array is indexed by ring id.
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number. Helper function
+ * for radeon_fence_wait_any(), et al.
+ * Returns 0 if the sequence number has passed, error for all other cases.
+ */
static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
u64 *target_seq, bool intr)
{
@@ -343,7 +451,7 @@ static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
/* nothing to wait for ? */
if (ring == RADEON_NUM_RINGS) {
- return 0;
+ return -ENOENT;
}
while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
@@ -424,6 +532,19 @@ static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
return 0;
}
+/**
+ * radeon_fence_wait_any - wait for a fence to signal on any ring
+ *
+ * @rdev: radeon device pointer
+ * @fences: radeon fence object(s)
+ * @intr: use interruptable sleep
+ *
+ * Wait for any requested fence to signal (all asics). Fence
+ * array is indexed by ring id. @intr selects whether to use
+ * interruptable (true) or non-interruptable (false) sleep when
+ * waiting for the fences. Used by the suballocator.
+ * Returns 0 if any fence has passed, error for all other cases.
+ */
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr)
@@ -444,9 +565,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
return 0;
}
- if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) {
- seq[i] = fences[i]->seq;
- }
+ seq[i] = fences[i]->seq;
}
r = radeon_fence_wait_any_seq(rdev, seq, intr);
@@ -456,16 +575,22 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
return 0;
}
+/**
+ * radeon_fence_wait_next_locked - wait for the next fence to signal
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for the next fence on the requested ring to signal (all asics).
+ * Returns 0 if the next fence has passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
uint64_t seq;
- /* We are not protected by ring lock when reading current seq but
- * it's ok as worst case is we return to early while we could have
- * wait.
- */
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
- if (seq >= rdev->fence_drv[ring].seq) {
+ if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
/* nothing to wait for, last_seq is
already the last emited fence */
return -ENOENT;
@@ -473,23 +598,59 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
}
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+/**
+ * radeon_fence_wait_empty_locked - wait for all fences to signal
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Wait for all fences on the requested ring to signal (all asics).
+ * Returns 0 if the fences have passed, error for all other cases.
+ * Caller must hold ring lock.
+ */
+void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
- /* We are not protected by ring lock when reading current seq
- * but it's ok as wait empty is call from place where no more
- * activity can be scheduled so there won't be concurrent access
- * to seq value.
- */
- return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq,
- ring, false, false);
+ uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+
+ while(1) {
+ int r;
+ r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ if (r == -EDEADLK) {
+ mutex_unlock(&rdev->ring_lock);
+ r = radeon_gpu_reset(rdev);
+ mutex_lock(&rdev->ring_lock);
+ if (!r)
+ continue;
+ }
+ if (r) {
+ dev_err(rdev->dev, "error waiting for ring to become"
+ " idle (%d)\n", r);
+ }
+ return;
+ }
}
+/**
+ * radeon_fence_ref - take a ref on a fence
+ *
+ * @fence: radeon fence object
+ *
+ * Take a reference on a fence (all asics).
+ * Returns the fence.
+ */
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
kref_get(&fence->kref);
return fence;
}
+/**
+ * radeon_fence_unref - remove a ref on a fence
+ *
+ * @fence: radeon fence object
+ *
+ * Remove a reference on a fence (all asics).
+ */
void radeon_fence_unref(struct radeon_fence **fence)
{
struct radeon_fence *tmp = *fence;
@@ -500,6 +661,16 @@ void radeon_fence_unref(struct radeon_fence **fence)
}
}
+/**
+ * radeon_fence_count_emitted - get the count of emitted fences
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index the fence is associated with
+ *
+ * Get the number of fences emitted on the requested ring (all asics).
+ * Returns the number of emitted fences on the ring. Used by the
+ * dynpm code to ring track activity.
+ */
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
{
uint64_t emitted;
@@ -508,7 +679,8 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
* but it's ok to report slightly wrong fence count here.
*/
radeon_fence_process(rdev, ring);
- emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq);
+ emitted = rdev->fence_drv[ring].sync_seq[ring]
+ - atomic64_read(&rdev->fence_drv[ring].last_seq);
/* to avoid 32bits warp around */
if (emitted > 0x10000000) {
emitted = 0x10000000;
@@ -516,6 +688,83 @@ unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
return (unsigned)emitted;
}
+/**
+ * radeon_fence_need_sync - do we need a semaphore
+ *
+ * @fence: radeon fence object
+ * @dst_ring: which ring to check against
+ *
+ * Check if the fence needs to be synced against another ring
+ * (all asics). If so, we need to emit a semaphore.
+ * Returns true if we need to sync with another ring, false if
+ * not.
+ */
+bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
+{
+ struct radeon_fence_driver *fdrv;
+
+ if (!fence) {
+ return false;
+ }
+
+ if (fence->ring == dst_ring) {
+ return false;
+ }
+
+ /* we are protected by the ring mutex */
+ fdrv = &fence->rdev->fence_drv[dst_ring];
+ if (fence->seq <= fdrv->sync_seq[fence->ring]) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * radeon_fence_note_sync - record the sync point
+ *
+ * @fence: radeon fence object
+ * @dst_ring: which ring to check against
+ *
+ * Note the sequence number at which point the fence will
+ * be synced with the requested ring (all asics).
+ */
+void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
+{
+ struct radeon_fence_driver *dst, *src;
+ unsigned i;
+
+ if (!fence) {
+ return;
+ }
+
+ if (fence->ring == dst_ring) {
+ return;
+ }
+
+ /* we are protected by the ring mutex */
+ src = &fence->rdev->fence_drv[fence->ring];
+ dst = &fence->rdev->fence_drv[dst_ring];
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (i == dst_ring) {
+ continue;
+ }
+ dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
+ }
+}
+
+/**
+ * radeon_fence_driver_start_ring - make the fence driver
+ * ready for use on the requested ring.
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index to start the fence driver on
+ *
+ * Make the fence driver ready for processing (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has.
+ * Returns 0 for success, errors for failure.
+ */
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
uint64_t index;
@@ -537,24 +786,49 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
}
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
- radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring);
+ radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
rdev->fence_drv[ring].initialized = true;
dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
return 0;
}
+/**
+ * radeon_fence_driver_init_ring - init the fence driver
+ * for the requested ring.
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring index to start the fence driver on
+ *
+ * Init the fence driver for the requested ring (all asics).
+ * Helper function for radeon_fence_driver_init().
+ */
static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
{
+ int i;
+
rdev->fence_drv[ring].scratch_reg = -1;
rdev->fence_drv[ring].cpu_addr = NULL;
rdev->fence_drv[ring].gpu_addr = 0;
- rdev->fence_drv[ring].seq = 0;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ rdev->fence_drv[ring].sync_seq[i] = 0;
atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].initialized = false;
}
+/**
+ * radeon_fence_driver_init - init the fence driver
+ * for all possible rings.
+ *
+ * @rdev: radeon device pointer
+ *
+ * Init the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * radeon_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
int radeon_fence_driver_init(struct radeon_device *rdev)
{
int ring;
@@ -569,6 +843,14 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_fence_driver_fini - tear down the fence driver
+ * for all possible rings.
+ *
+ * @rdev: radeon device pointer
+ *
+ * Tear down the fence driver for all possible rings (all asics).
+ */
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
int ring;
@@ -595,7 +877,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- int i;
+ int i, j;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!rdev->fence_drv[i].initialized)
@@ -604,8 +886,14 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
seq_printf(m, "--- ring %d ---\n", i);
seq_printf(m, "Last signaled fence 0x%016llx\n",
(unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
- seq_printf(m, "Last emitted 0x%016llx\n",
- rdev->fence_drv[i].seq);
+ seq_printf(m, "Last emitted 0x%016llx\n",
+ rdev->fence_drv[i].sync_seq[i]);
+
+ for (j = 0; j < RADEON_NUM_RINGS; ++j) {
+ if (i != j && rdev->fence_drv[j].initialized)
+ seq_printf(m, "Last sync to ring %d 0x%016llx\n",
+ j, rdev->fence_drv[i].sync_seq[j]);
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 84b648a7ddd8..bb3b7fe05ccd 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -31,8 +31,38 @@
#include "radeon_reg.h"
/*
+ * GART
+ * The GART (Graphics Aperture Remapping Table) is an aperture
+ * in the GPU's address space. System pages can be mapped into
+ * the aperture and look like contiguous pages from the GPU's
+ * perspective. A page table maps the pages in the aperture
+ * to the actual backing pages in system memory.
+ *
+ * Radeon GPUs support both an internal GART, as described above,
+ * and AGP. AGP works similarly, but the GART table is configured
+ * and maintained by the northbridge rather than the driver.
+ * Radeon hw has a separate AGP aperture that is programmed to
+ * point to the AGP aperture provided by the northbridge and the
+ * requests are passed through to the northbridge aperture.
+ * Both AGP and internal GART can be used at the same time, however
+ * that is not currently supported by the driver.
+ *
+ * This file handles the common internal GART management.
+ */
+
+/*
* Common GART table functions.
*/
+/**
+ * radeon_gart_table_ram_alloc - allocate system ram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
+ * gart table to be in system memory.
+ * Returns 0 for success, -ENOMEM for failure.
+ */
int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
{
void *ptr;
@@ -54,6 +84,15 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_gart_table_ram_free - free system ram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Free system memory for GART page table
+ * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
+ * gart table to be in system memory.
+ */
void radeon_gart_table_ram_free(struct radeon_device *rdev)
{
if (rdev->gart.ptr == NULL) {
@@ -73,6 +112,16 @@ void radeon_gart_table_ram_free(struct radeon_device *rdev)
rdev->gart.table_addr = 0;
}
+/**
+ * radeon_gart_table_vram_alloc - allocate vram for gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate video memory for GART page table
+ * (pcie r4xx, r5xx+). These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
{
int r;
@@ -88,6 +137,16 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_gart_table_vram_pin - pin gart page table in vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Pin the GART page table in vram so it will not be moved
+ * by the memory manager (pcie r4xx, r5xx+). These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
+ */
int radeon_gart_table_vram_pin(struct radeon_device *rdev)
{
uint64_t gpu_addr;
@@ -110,6 +169,14 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
return r;
}
+/**
+ * radeon_gart_table_vram_unpin - unpin gart page table in vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Unpin the GART page table in vram (pcie r4xx, r5xx+).
+ * These asics require the gart table to be in video memory.
+ */
void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
{
int r;
@@ -126,6 +193,15 @@ void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
}
}
+/**
+ * radeon_gart_table_vram_free - free gart page table vram
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Free the video memory used for the GART page table
+ * (pcie r4xx, r5xx+). These asics require the gart table to
+ * be in video memory.
+ */
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
if (rdev->gart.robj == NULL) {
@@ -135,12 +211,19 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev)
radeon_bo_unref(&rdev->gart.robj);
}
-
-
-
/*
* Common gart functions.
*/
+/**
+ * radeon_gart_unbind - unbind pages from the gart page table
+ *
+ * @rdev: radeon_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to unbind
+ *
+ * Unbinds the requested pages from the gart page table and
+ * replaces them with the dummy page (all asics).
+ */
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages)
{
@@ -172,6 +255,19 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
radeon_gart_tlb_flush(rdev);
}
+/**
+ * radeon_gart_bind - bind pages into the gart page table
+ *
+ * @rdev: radeon_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @pagelist: pages to bind
+ * @dma_addr: DMA addresses of pages
+ *
+ * Binds the requested pages to the gart page table
+ * (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr)
{
@@ -203,6 +299,14 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
return 0;
}
+/**
+ * radeon_gart_restore - bind all pages in the gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Binds all pages in the gart page table (all asics).
+ * Used to rebuild the gart table on device startup or resume.
+ */
void radeon_gart_restore(struct radeon_device *rdev)
{
int i, j, t;
@@ -222,6 +326,14 @@ void radeon_gart_restore(struct radeon_device *rdev)
radeon_gart_tlb_flush(rdev);
}
+/**
+ * radeon_gart_init - init the driver info for managing the gart
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Allocate the dummy page and init the gart driver info (all asics).
+ * Returns 0 for success, error for failure.
+ */
int radeon_gart_init(struct radeon_device *rdev)
{
int r, i;
@@ -262,6 +374,13 @@ int radeon_gart_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_gart_fini - tear down the driver info for managing the gart
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the gart driver info and free the dummy page (all asics).
+ */
void radeon_gart_fini(struct radeon_device *rdev)
{
if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
@@ -278,35 +397,104 @@ void radeon_gart_fini(struct radeon_device *rdev)
}
/*
+ * GPUVM
+ * GPUVM is similar to the legacy gart on older asics, however
+ * rather than there being a single global gart table
+ * for the entire GPU, there are multiple VM page tables active
+ * at any given time. The VM page tables can contain a mix
+ * vram pages and system memory pages and system memory pages
+ * can be mapped as snooped (cached system pages) or unsnooped
+ * (uncached system pages).
+ * Each VM has an ID associated with it and there is a page table
+ * associated with each VMID. When execting a command buffer,
+ * the kernel tells the the ring what VMID to use for that command
+ * buffer. VMIDs are allocated dynamically as commands are submitted.
+ * The userspace drivers maintain their own address space and the kernel
+ * sets up their pages tables accordingly when they submit their
+ * command buffers and a VMID is assigned.
+ * Cayman/Trinity support up to 8 active VMs at any given time;
+ * SI supports 16.
+ */
+
+/*
* vm helpers
*
* TODO bind a default page at vm initialization for default address
*/
+
+/**
+ * radeon_vm_manager_init - init the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init the vm manager (cayman+).
+ * Returns 0 for success, error for failure.
+ */
int radeon_vm_manager_init(struct radeon_device *rdev)
{
+ struct radeon_vm *vm;
+ struct radeon_bo_va *bo_va;
int r;
- rdev->vm_manager.enabled = false;
+ if (!rdev->vm_manager.enabled) {
+ /* mark first vm as always in use, it's the system one */
+ /* allocate enough for 2 full VM pts */
+ r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
+ rdev->vm_manager.max_pfn * 8 * 2,
+ RADEON_GEM_DOMAIN_VRAM);
+ if (r) {
+ dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
+ (rdev->vm_manager.max_pfn * 8) >> 10);
+ return r;
+ }
- /* mark first vm as always in use, it's the system one */
- /* allocate enough for 2 full VM pts */
- r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
- rdev->vm_manager.max_pfn * 8 * 2,
- RADEON_GEM_DOMAIN_VRAM);
- if (r) {
- dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
- (rdev->vm_manager.max_pfn * 8) >> 10);
- return r;
+ r = rdev->vm_manager.funcs->init(rdev);
+ if (r)
+ return r;
+
+ rdev->vm_manager.enabled = true;
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
+ if (r)
+ return r;
}
- r = rdev->vm_manager.funcs->init(rdev);
- if (r == 0)
- rdev->vm_manager.enabled = true;
+ /* restore page table */
+ list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
+ if (vm->id == -1)
+ continue;
- return r;
+ list_for_each_entry(bo_va, &vm->va, vm_list) {
+ struct ttm_mem_reg *mem = NULL;
+ if (bo_va->valid)
+ mem = &bo_va->bo->tbo.mem;
+
+ bo_va->valid = false;
+ r = radeon_vm_bo_update_pte(rdev, vm, bo_va->bo, mem);
+ if (r) {
+ DRM_ERROR("Failed to update pte for vm %d!\n", vm->id);
+ }
+ }
+
+ r = rdev->vm_manager.funcs->bind(rdev, vm, vm->id);
+ if (r) {
+ DRM_ERROR("Failed to bind vm %d!\n", vm->id);
+ }
+ }
+ return 0;
}
-/* cs mutex must be lock */
+/* global mutex must be lock */
+/**
+ * radeon_vm_unbind_locked - unbind a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to unbind
+ *
+ * Unbind the requested vm (cayman+).
+ * Wait for use of the VM to finish, then unbind the page table,
+ * and free the page table memory.
+ */
static void radeon_vm_unbind_locked(struct radeon_device *rdev,
struct radeon_vm *vm)
{
@@ -317,10 +505,21 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
}
/* wait for vm use to end */
- if (vm->fence) {
- radeon_fence_wait(vm->fence, false);
- radeon_fence_unref(&vm->fence);
+ while (vm->fence) {
+ int r;
+ r = radeon_fence_wait(vm->fence, false);
+ if (r)
+ DRM_ERROR("error while waiting for fence: %d\n", r);
+ if (r == -EDEADLK) {
+ mutex_unlock(&rdev->vm_manager.lock);
+ r = radeon_gpu_reset(rdev);
+ mutex_lock(&rdev->vm_manager.lock);
+ if (!r)
+ continue;
+ }
+ break;
}
+ radeon_fence_unref(&vm->fence);
/* hw unbind */
rdev->vm_manager.funcs->unbind(rdev, vm);
@@ -335,39 +534,42 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
}
}
+/**
+ * radeon_vm_manager_fini - tear down the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the VM manager (cayman+).
+ */
void radeon_vm_manager_fini(struct radeon_device *rdev)
{
- if (rdev->vm_manager.sa_manager.bo == NULL)
- return;
- radeon_vm_manager_suspend(rdev);
- rdev->vm_manager.funcs->fini(rdev);
- radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
- rdev->vm_manager.enabled = false;
-}
-
-int radeon_vm_manager_start(struct radeon_device *rdev)
-{
- if (rdev->vm_manager.sa_manager.bo == NULL) {
- return -EINVAL;
- }
- return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
-}
-
-int radeon_vm_manager_suspend(struct radeon_device *rdev)
-{
struct radeon_vm *vm, *tmp;
- radeon_mutex_lock(&rdev->cs_mutex);
+ if (!rdev->vm_manager.enabled)
+ return;
+
+ mutex_lock(&rdev->vm_manager.lock);
/* unbind all active vm */
list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
radeon_vm_unbind_locked(rdev, vm);
}
rdev->vm_manager.funcs->fini(rdev);
- radeon_mutex_unlock(&rdev->cs_mutex);
- return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+ mutex_unlock(&rdev->vm_manager.lock);
+
+ radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+ radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
+ rdev->vm_manager.enabled = false;
}
-/* cs mutex must be lock */
+/* global mutex must be locked */
+/**
+ * radeon_vm_unbind - locked version of unbind
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to unbind
+ *
+ * Locked version that wraps radeon_vm_unbind_locked (cayman+).
+ */
void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
{
mutex_lock(&vm->mutex);
@@ -375,7 +577,19 @@ void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
mutex_unlock(&vm->mutex);
}
-/* cs mutex must be lock & vm mutex must be lock */
+/* global and local mutex must be locked */
+/**
+ * radeon_vm_bind - bind a page table to a VMID
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to bind
+ *
+ * Bind the requested vm (cayman+).
+ * Suballocate memory for the page table, allocate a VMID
+ * and bind the page table to it, and finally start to populate
+ * the page table.
+ * Returns 0 for success, error for failure.
+ */
int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_vm *vm_evict;
@@ -438,6 +652,20 @@ retry_id:
}
/* object have to be reserved */
+/**
+ * radeon_vm_bo_add - add a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ * @offset: requested offset of the buffer in the VM address space
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Add @bo into the requested vm (cayman+).
+ * Add @bo to the list of bos associated with the vm and validate
+ * the offset requested within the vm address space.
+ * Returns 0 for success, error for failure.
+ */
int radeon_vm_bo_add(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
@@ -479,7 +707,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
if (last_pfn > vm->last_pfn) {
/* release mutex and lock in right order */
mutex_unlock(&vm->mutex);
- radeon_mutex_lock(&rdev->cs_mutex);
+ mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
/* and check again */
if (last_pfn > vm->last_pfn) {
@@ -488,7 +716,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
radeon_vm_unbind_locked(rdev, vm);
vm->last_pfn = (last_pfn + align) & ~align;
}
- radeon_mutex_unlock(&rdev->cs_mutex);
+ mutex_unlock(&rdev->vm_manager.lock);
}
head = &vm->va;
last_offset = 0;
@@ -515,6 +743,17 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
return 0;
}
+/**
+ * radeon_vm_get_addr - get the physical address of the page
+ *
+ * @rdev: radeon_device pointer
+ * @mem: ttm mem
+ * @pfn: pfn
+ *
+ * Look up the physical address of the page that the pte resolves
+ * to (cayman+).
+ * Returns the physical address of the page.
+ */
static u64 radeon_vm_get_addr(struct radeon_device *rdev,
struct ttm_mem_reg *mem,
unsigned pfn)
@@ -543,7 +782,18 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev,
return addr;
}
-/* object have to be reserved & cs mutex took & vm mutex took */
+/* object have to be reserved & global and local mutex must be locked */
+/**
+ * radeon_vm_bo_update_pte - map a bo into the vm page table
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ * @mem: ttm mem
+ *
+ * Fill in the page table entries for @bo (cayman+).
+ * Returns 0 for success, -EINVAL for failure.
+ */
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
@@ -564,7 +814,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
return -EINVAL;
}
- if (bo_va->valid)
+ if (bo_va->valid && mem)
return 0;
ngpu_pages = radeon_bo_ngpu_pages(bo);
@@ -592,20 +842,48 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
}
/* object have to be reserved */
+/**
+ * radeon_vm_bo_rmv - remove a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Remove @bo from the requested vm (cayman+).
+ * Remove @bo from the list of bos associated with the vm and
+ * remove the ptes for @bo in the page table.
+ * Returns 0 for success.
+ */
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va;
+ int r;
bo_va = radeon_bo_va(bo, vm);
if (bo_va == NULL)
return 0;
- radeon_mutex_lock(&rdev->cs_mutex);
+ /* wait for va use to end */
+ while (bo_va->fence) {
+ r = radeon_fence_wait(bo_va->fence, false);
+ if (r) {
+ DRM_ERROR("error while waiting for fence: %d\n", r);
+ }
+ if (r == -EDEADLK) {
+ r = radeon_gpu_reset(rdev);
+ if (!r)
+ continue;
+ }
+ break;
+ }
+ radeon_fence_unref(&bo_va->fence);
+
+ mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
- radeon_mutex_unlock(&rdev->cs_mutex);
+ mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list);
mutex_unlock(&vm->mutex);
list_del(&bo_va->bo_list);
@@ -614,6 +892,15 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
return 0;
}
+/**
+ * radeon_vm_bo_invalidate - mark the bo as invalid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Mark @bo as invalid (cayman+).
+ */
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo)
{
@@ -625,6 +912,17 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
}
}
+/**
+ * radeon_vm_init - initialize a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Init @vm (cayman+).
+ * Map the IB pool and any other shared objects into the VM
+ * by default as it's used by all VMs.
+ * Returns 0 for success, error for failure.
+ */
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
int r;
@@ -651,22 +949,34 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
return r;
}
+/**
+ * radeon_vm_fini - tear down a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Tear down @vm (cayman+).
+ * Unbind the VM and remove all bos from the vm bo list
+ */
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va, *tmp;
int r;
- radeon_mutex_lock(&rdev->cs_mutex);
+ mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
radeon_vm_unbind_locked(rdev, vm);
- radeon_mutex_unlock(&rdev->cs_mutex);
+ mutex_unlock(&rdev->vm_manager.lock);
- /* remove all bo */
+ /* remove all bo at this point non are busy any more because unbind
+ * waited for the last vm fence to signal
+ */
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
list_del_init(&bo_va->bo_list);
list_del_init(&bo_va->vm_list);
+ radeon_fence_unref(&bo_va->fence);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
kfree(bo_va);
}
@@ -678,6 +988,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {
list_del_init(&bo_va->bo_list);
+ radeon_fence_unref(&bo_va->fence);
radeon_bo_unreserve(bo_va->bo);
kfree(bo_va);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 21ec9f5653ce..1b57b0058ad6 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -134,36 +134,25 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct radeon_device *rdev = rbo->rdev;
struct radeon_fpriv *fpriv = file_priv->driver_priv;
struct radeon_vm *vm = &fpriv->vm;
- struct radeon_bo_va *bo_va, *tmp;
if (rdev->family < CHIP_CAYMAN) {
return;
}
if (radeon_bo_reserve(rbo, false)) {
+ dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n");
return;
}
- list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
- if (bo_va->vm == vm) {
- /* remove from this vm address space */
- mutex_lock(&vm->mutex);
- list_del(&bo_va->vm_list);
- mutex_unlock(&vm->mutex);
- list_del(&bo_va->bo_list);
- kfree(bo_va);
- }
- }
+ radeon_vm_bo_rmv(rdev, vm, rbo);
radeon_bo_unreserve(rbo);
}
static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
{
if (r == -EDEADLK) {
- radeon_mutex_lock(&rdev->cs_mutex);
r = radeon_gpu_reset(rdev);
if (!r)
r = -EAGAIN;
- radeon_mutex_unlock(&rdev->cs_mutex);
}
return r;
}
@@ -217,12 +206,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int r;
+ down_read(&rdev->exclusive_lock);
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
args->initial_domain, false,
false, &gobj);
if (r) {
+ up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
@@ -230,10 +221,12 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
+ up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
args->handle = handle;
+ up_read(&rdev->exclusive_lock);
return 0;
}
@@ -242,6 +235,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
{
/* transition the BO to a domain -
* just validate the BO into a certain domain */
+ struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -249,10 +243,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
/* for now if someone requests domain CPU -
* just make sure the buffer is finished with */
+ down_read(&rdev->exclusive_lock);
/* just do a BO wait for now */
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
+ up_read(&rdev->exclusive_lock);
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
@@ -260,6 +256,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_unreference_unlocked(gobj);
+ up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 5df58d1aba06..afaa1727abd2 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -32,6 +32,17 @@
#include "radeon.h"
#include "atom.h"
+#define RADEON_WAIT_IDLE_TIMEOUT 200
+
+/**
+ * radeon_driver_irq_handler_kms - irq handler for KMS
+ *
+ * @DRM_IRQ_ARGS: args
+ *
+ * This is the irq handler for the radeon KMS driver (all asics).
+ * radeon_irq_process is a macro that points to the per-asic
+ * irq handler callback.
+ */
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -43,6 +54,17 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
/*
* Handle hotplug events outside the interrupt handler proper.
*/
+/**
+ * radeon_hotplug_work_func - display hotplug work handler
+ *
+ * @work: work struct
+ *
+ * This is the hot plug event work handler (all asics).
+ * The work gets scheduled from the irq handler if there
+ * was a hot plug interrupt. It walks the connector table
+ * and calls the hotplug handler for each one, then sends
+ * a drm hotplug event to alert userspace.
+ */
static void radeon_hotplug_work_func(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
@@ -59,61 +81,94 @@ static void radeon_hotplug_work_func(struct work_struct *work)
drm_helper_hpd_irq_event(dev);
}
+/**
+ * radeon_driver_irq_preinstall_kms - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Gets the hw ready to enable irqs (all asics).
+ * This function disables all interrupt sources on the GPU.
+ */
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
unsigned i;
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
- rdev->irq.sw_int[i] = false;
+ atomic_set(&rdev->irq.ring_int[i], 0);
rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
- rdev->irq.pflip[i] = false;
+ atomic_set(&rdev->irq.pflip[i], 0);
rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
/* Clear bits */
radeon_irq_process(rdev);
}
+/**
+ * radeon_driver_irq_postinstall_kms - drm irq preinstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * Handles stuff to be done after enabling irqs (all asics).
+ * Returns 0 on success.
+ */
int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{
- struct radeon_device *rdev = dev->dev_private;
- unsigned i;
-
dev->max_vblank_count = 0x001fffff;
- for (i = 0; i < RADEON_NUM_RINGS; i++)
- rdev->irq.sw_int[i] = true;
- radeon_irq_set(rdev);
return 0;
}
+/**
+ * radeon_driver_irq_uninstall_kms - drm irq uninstall callback
+ *
+ * @dev: drm dev pointer
+ *
+ * This function disables all interrupt sources on the GPU (all asics).
+ */
void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
unsigned i;
if (rdev == NULL) {
return;
}
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
- rdev->irq.sw_int[i] = false;
+ atomic_set(&rdev->irq.ring_int[i], 0);
rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
- rdev->irq.pflip[i] = false;
+ atomic_set(&rdev->irq.pflip[i], 0);
rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
+/**
+ * radeon_msi_ok - asic specific msi checks
+ *
+ * @rdev: radeon device pointer
+ *
+ * Handles asic specific MSI checks to determine if
+ * MSIs should be enabled on a particular chip (all asics).
+ * Returns true if MSIs should be enabled, false if MSIs
+ * should not be enabled.
+ */
static bool radeon_msi_ok(struct radeon_device *rdev)
{
/* RV370/RV380 was first asic with MSI support */
@@ -166,17 +221,22 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
return true;
}
+/**
+ * radeon_irq_kms_init - init driver interrupt info
+ *
+ * @rdev: radeon device pointer
+ *
+ * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
+ * Returns 0 for success, error for failure.
+ */
int radeon_irq_kms_init(struct radeon_device *rdev)
{
- int i;
int r = 0;
INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
- spin_lock_init(&rdev->irq.sw_lock);
- for (i = 0; i < rdev->num_crtc; i++)
- spin_lock_init(&rdev->irq.pflip_lock[i]);
+ spin_lock_init(&rdev->irq.lock);
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
return r;
@@ -201,6 +261,13 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_irq_kms_fini - tear down driver interrrupt info
+ *
+ * @rdev: radeon device pointer
+ *
+ * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ */
void radeon_irq_kms_fini(struct radeon_device *rdev)
{
drm_vblank_cleanup(rdev->ddev);
@@ -213,31 +280,63 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
flush_work_sync(&rdev->hotplug_work);
}
+/**
+ * radeon_irq_kms_sw_irq_get - enable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to enable
+ *
+ * Enables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
- spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
- if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
- rdev->irq.sw_int[ring] = true;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
- spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
+/**
+ * radeon_irq_kms_sw_irq_put - disable software interrupt
+ *
+ * @rdev: radeon device pointer
+ * @ring: ring whose interrupt you want to disable
+ *
+ * Disables the software interrupt for a specific ring (all asics).
+ * The software interrupt is generally used to signal a fence on
+ * a particular ring.
+ */
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
- spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
- BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
- if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
- rdev->irq.sw_int[ring] = false;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
- spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
+/**
+ * radeon_irq_kms_pflip_irq_get - enable pageflip interrupt
+ *
+ * @rdev: radeon device pointer
+ * @crtc: crtc whose interrupt you want to enable
+ *
+ * Enables the pageflip interrupt for a specific crtc (all asics).
+ * For pageflips we use the vblank interrupt source.
+ */
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
{
unsigned long irqflags;
@@ -245,14 +344,25 @@ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
if (crtc < 0 || crtc >= rdev->num_crtc)
return;
- spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
- if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
- rdev->irq.pflip[crtc] = true;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
- spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
}
+/**
+ * radeon_irq_kms_pflip_irq_put - disable pageflip interrupt
+ *
+ * @rdev: radeon device pointer
+ * @crtc: crtc whose interrupt you want to disable
+ *
+ * Disables the pageflip interrupt for a specific crtc (all asics).
+ * For pageflips we use the vblank interrupt source.
+ */
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
{
unsigned long irqflags;
@@ -260,12 +370,121 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
if (crtc < 0 || crtc >= rdev->num_crtc)
return;
- spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
- BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
- if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
- rdev->irq.pflip[crtc] = false;
+ if (!rdev->ddev->irq_enabled)
+ return;
+
+ if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
- spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
}
+/**
+ * radeon_irq_kms_enable_afmt - enable audio format change interrupt
+ *
+ * @rdev: radeon device pointer
+ * @block: afmt block whose interrupt you want to enable
+ *
+ * Enables the afmt change interrupt for a specific afmt block (all asics).
+ */
+void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ rdev->irq.afmt[block] = true;
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+
+}
+
+/**
+ * radeon_irq_kms_disable_afmt - disable audio format change interrupt
+ *
+ * @rdev: radeon device pointer
+ * @block: afmt block whose interrupt you want to disable
+ *
+ * Disables the afmt change interrupt for a specific afmt block (all asics).
+ */
+void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ rdev->irq.afmt[block] = false;
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_enable_hpd - enable hotplug detect interrupt
+ *
+ * @rdev: radeon device pointer
+ * @hpd_mask: mask of hpd pins you want to enable.
+ *
+ * Enables the hotplug detect interrupt for a specific hpd pin (all asics).
+ */
+void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+ unsigned long irqflags;
+ int i;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+ rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_disable_hpd - disable hotplug detect interrupt
+ *
+ * @rdev: radeon device pointer
+ * @hpd_mask: mask of hpd pins you want to disable.
+ *
+ * Disables the hotplug detect interrupt for a specific hpd pin (all asics).
+ */
+void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
+{
+ unsigned long irqflags;
+ int i;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
+ rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+}
+
+/**
+ * radeon_irq_kms_wait_gui_idle - waits for drawing engine to be idle
+ *
+ * @rdev: radeon device pointer
+ *
+ * Enabled the GUI idle interrupt and waits for it to fire (r6xx+).
+ * This is currently used to make sure the 3D engine is idle for power
+ * management, but should be replaces with proper fence waits.
+ * GUI idle interrupts don't work very well on pre-r6xx hw and it also
+ * does not take into account other aspects of the chip that may be busy.
+ * DO NOT USE GOING FORWARD.
+ */
+int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev)
+{
+ unsigned long irqflags;
+ int r;
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ rdev->irq.gui_idle = true;
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+
+ r = wait_event_timeout(rdev->irq.idle_queue, radeon_gui_idle(rdev),
+ msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
+
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
+ rdev->irq.gui_idle = false;
+ radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 5c58d7d90cb2..414b4acf6947 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -29,10 +29,22 @@
#include "drm_sarea.h"
#include "radeon.h"
#include "radeon_drm.h"
+#include "radeon_asic.h"
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
+/**
+ * radeon_driver_unload_kms - Main unload function for KMS.
+ *
+ * @dev: drm dev pointer
+ *
+ * This is the main unload function for KMS (all asics).
+ * It calls radeon_modeset_fini() to tear down the
+ * displays, and radeon_device_fini() to tear down
+ * the rest of the device (CP, writeback, etc.).
+ * Returns 0 on success.
+ */
int radeon_driver_unload_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -46,6 +58,19 @@ int radeon_driver_unload_kms(struct drm_device *dev)
return 0;
}
+/**
+ * radeon_driver_load_kms - Main load function for KMS.
+ *
+ * @dev: drm dev pointer
+ * @flags: device flags
+ *
+ * This is the main load function for KMS (all asics).
+ * It calls radeon_device_init() to set up the non-display
+ * parts of the chip (asic init, CP, writeback, etc.), and
+ * radeon_modeset_init() to set up the display parts
+ * (crtcs, encoders, hotplug detect, etc.).
+ * Returns 0 on success, error on failure.
+ */
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
struct radeon_device *rdev;
@@ -96,6 +121,16 @@ out:
return r;
}
+/**
+ * radeon_set_filp_rights - Set filp right.
+ *
+ * @dev: drm dev pointer
+ * @owner: drm file
+ * @applier: drm file
+ * @value: value
+ *
+ * Sets the filp rights for the device (all asics).
+ */
static void radeon_set_filp_rights(struct drm_device *dev,
struct drm_file **owner,
struct drm_file *applier,
@@ -118,20 +153,54 @@ static void radeon_set_filp_rights(struct drm_device *dev,
/*
* Userspace get information ioctl
*/
+/**
+ * radeon_info_ioctl - answer a device specific request.
+ *
+ * @rdev: radeon device pointer
+ * @data: request object
+ * @filp: drm filp
+ *
+ * This function is used to pass device specific parameters to the userspace
+ * drivers. Examples include: pci device id, pipeline parms, tiling params,
+ * etc. (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
- struct drm_radeon_info *info;
+ struct drm_radeon_info *info = data;
struct radeon_mode_info *minfo = &rdev->mode_info;
- uint32_t *value_ptr;
- uint32_t value;
+ uint32_t value, *value_ptr;
+ uint64_t value64, *value_ptr64;
struct drm_crtc *crtc;
int i, found;
- info = data;
+ /* TIMESTAMP is a 64-bit value, needs special handling. */
+ if (info->request == RADEON_INFO_TIMESTAMP) {
+ if (rdev->family >= CHIP_R600) {
+ value_ptr64 = (uint64_t*)((unsigned long)info->value);
+ if (rdev->family >= CHIP_TAHITI) {
+ value64 = si_get_gpu_clock(rdev);
+ } else {
+ value64 = r600_get_gpu_clock(rdev);
+ }
+
+ if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
+ DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ return 0;
+ } else {
+ DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
+ return -EINVAL;
+ }
+ }
+
value_ptr = (uint32_t *)((unsigned long)info->value);
- if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value)))
+ if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
+ }
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
@@ -291,7 +360,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
- DRM_ERROR("copy_to_user\n");
+ DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
return 0;
@@ -301,16 +370,40 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
/*
* Outdated mess for old drm with Xorg being in charge (void function now).
*/
+/**
+ * radeon_driver_firstopen_kms - drm callback for first open
+ *
+ * @dev: drm dev pointer
+ *
+ * Nothing to be done for KMS (all asics).
+ * Returns 0 on success.
+ */
int radeon_driver_firstopen_kms(struct drm_device *dev)
{
return 0;
}
+/**
+ * radeon_driver_firstopen_kms - drm callback for last close
+ *
+ * @dev: drm dev pointer
+ *
+ * Switch vga switcheroo state after last close (all asics).
+ */
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
vga_switcheroo_process_delayed_switch();
}
+/**
+ * radeon_driver_open_kms - drm callback for open
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device open, init vm on cayman+ (all asics).
+ * Returns 0 on success, error on failure.
+ */
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
struct radeon_device *rdev = dev->dev_private;
@@ -339,6 +432,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return 0;
}
+/**
+ * radeon_driver_postclose_kms - drm callback for post close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device post close, tear down vm on cayman+ (all asics).
+ */
void radeon_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
@@ -354,6 +455,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
}
}
+/**
+ * radeon_driver_preclose_kms - drm callback for pre close
+ *
+ * @dev: drm dev pointer
+ * @file_priv: drm file
+ *
+ * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx
+ * (all asics).
+ */
void radeon_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
@@ -367,6 +477,15 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
/*
* VBlank related functions.
*/
+/**
+ * radeon_get_vblank_counter_kms - get frame count
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to get the frame count from
+ *
+ * Gets the frame count on the requested crtc (all asics).
+ * Returns frame count on success, -EINVAL on failure.
+ */
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
@@ -379,34 +498,70 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
return radeon_get_vblank_counter(rdev, crtc);
}
+/**
+ * radeon_enable_vblank_kms - enable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to enable vblank interrupt for
+ *
+ * Enable the interrupt on the requested crtc (all asics).
+ * Returns 0 on success, -EINVAL on failure.
+ */
int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
+ int r;
if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return -EINVAL;
}
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.crtc_vblank_int[crtc] = true;
-
- return radeon_irq_set(rdev);
+ r = radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
+ return r;
}
+/**
+ * radeon_disable_vblank_kms - disable vblank interrupt
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to disable vblank interrupt for
+ *
+ * Disable the interrupt on the requested crtc (all asics).
+ */
void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned long irqflags;
if (crtc < 0 || crtc >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", crtc);
return;
}
+ spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.crtc_vblank_int[crtc] = false;
-
radeon_irq_set(rdev);
+ spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
+/**
+ * radeon_get_vblank_timestamp_kms - get vblank timestamp
+ *
+ * @dev: drm dev pointer
+ * @crtc: crtc to get the timestamp for
+ * @max_error: max error
+ * @vblank_time: time value
+ * @flags: flags passed to the driver
+ *
+ * Gets the timestamp on the requested crtc based on the
+ * scanout position. (all asics).
+ * Returns postive status flags on success, negative error on failure.
+ */
int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
int *max_error,
struct timeval *vblank_time,
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 210317c7045e..94b4a1c12893 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -990,7 +990,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
@@ -1025,9 +1025,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
static void radeon_crtc_prepare(struct drm_crtc *crtc)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
+ radeon_crtc->in_mode_set = true;
/*
* The hardware wedges sometimes if you reconfigure one CRTC
* whilst another is running (see fdo bug #24611).
@@ -1038,6 +1040,7 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc)
static void radeon_crtc_commit(struct drm_crtc *crtc)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
@@ -1048,6 +1051,7 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
if (crtci->enabled)
radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
}
+ radeon_crtc->in_mode_set = false;
}
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index a0c82229e8f0..670e9910f869 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -244,7 +244,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
}
static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 5b10ffd7bb2f..d56978949f34 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -275,6 +275,7 @@ struct radeon_crtc {
u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
+ bool in_mode_set;
uint32_t crtc_offset;
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
@@ -488,7 +489,7 @@ extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode);
extern void radeon_dp_set_link_config(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
extern void radeon_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector);
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
@@ -678,7 +679,7 @@ void radeon_enc_destroy(struct drm_encoder *encoder);
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 830f1a7b486f..9024e7222839 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -52,11 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo)
list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
/* remove from all vm address space */
- mutex_lock(&bo_va->vm->mutex);
- list_del(&bo_va->vm_list);
- mutex_unlock(&bo_va->vm->mutex);
- list_del(&bo_va->bo_list);
- kfree(bo_va);
+ radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo);
}
}
@@ -115,9 +111,7 @@ int radeon_bo_create(struct radeon_device *rdev,
size = ALIGN(size, PAGE_SIZE);
- if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
- rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
- }
+ rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
if (kernel) {
type = ttm_bo_type_kernel;
} else if (sg) {
@@ -154,11 +148,11 @@ retry:
INIT_LIST_HEAD(&bo->va);
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
- mutex_lock(&rdev->vram_mutex);
+ down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, 0, !kernel, NULL,
acc_size, sg, &radeon_ttm_bo_destroy);
- mutex_unlock(&rdev->vram_mutex);
+ up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
if (domain == RADEON_GEM_DOMAIN_VRAM) {
@@ -219,9 +213,9 @@ void radeon_bo_unref(struct radeon_bo **bo)
return;
rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
- mutex_lock(&rdev->vram_mutex);
+ down_read(&rdev->pm.mclk_lock);
ttm_bo_unref(&tbo);
- mutex_unlock(&rdev->vram_mutex);
+ up_read(&rdev->pm.mclk_lock);
if (tbo == NULL)
*bo = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 5b37e283ec38..7ae606600107 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -34,7 +34,6 @@
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
#define RADEON_WAIT_VBLANK_TIMEOUT 200
-#define RADEON_WAIT_IDLE_TIMEOUT 200
static const char *radeon_pm_state_type_name[5] = {
"Default",
@@ -251,21 +250,14 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
return;
mutex_lock(&rdev->ddev->struct_mutex);
- mutex_lock(&rdev->vram_mutex);
+ down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock);
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
if (rdev->irq.installed) {
- /* wait for GPU idle */
- rdev->pm.gui_idle = false;
- rdev->irq.gui_idle = true;
- radeon_irq_set(rdev);
- wait_event_interruptible_timeout(
- rdev->irq.idle_queue, rdev->pm.gui_idle,
- msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
- rdev->irq.gui_idle = false;
- radeon_irq_set(rdev);
+ /* wait for GPU to become idle */
+ radeon_irq_kms_wait_gui_idle(rdev);
}
} else {
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -303,7 +295,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
mutex_unlock(&rdev->ring_lock);
- mutex_unlock(&rdev->vram_mutex);
+ up_write(&rdev->pm.mclk_lock);
mutex_unlock(&rdev->ddev->struct_mutex);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 983658c91358..43c431a2686d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -35,47 +35,97 @@
#include "atom.h"
/*
- * IB.
+ * IB
+ * IBs (Indirect Buffers) and areas of GPU accessible memory where
+ * commands are stored. You can put a pointer to the IB in the
+ * command ring and the hw will fetch the commands from the IB
+ * and execute them. Generally userspace acceleration drivers
+ * produce command buffers which are send to the kernel and
+ * put in IBs for execution by the requested ring.
*/
int radeon_debugfs_sa_init(struct radeon_device *rdev);
+/**
+ * radeon_ib_get - request an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the IB is associated with
+ * @ib: IB object returned
+ * @size: requested IB size
+ *
+ * Request an IB (all asics). IBs are allocated using the
+ * suballocator.
+ * Returns 0 on success, error on failure.
+ */
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, unsigned size)
{
- int r;
+ int i, r;
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r;
}
- r = radeon_fence_create(rdev, &ib->fence, ring);
+
+ r = radeon_semaphore_create(rdev, &ib->semaphore);
if (r) {
- dev_err(rdev->dev, "failed to create fence for new IB (%d)\n", r);
- radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
return r;
}
+ ib->ring = ring;
+ ib->fence = NULL;
ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
ib->vm_id = 0;
ib->is_const_ib = false;
- ib->semaphore = NULL;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ ib->sync_to[i] = NULL;
return 0;
}
+/**
+ * radeon_ib_free - free an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to free
+ *
+ * Free an IB (all asics).
+ */
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
- radeon_semaphore_free(rdev, ib->semaphore, ib->fence);
+ radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
radeon_fence_unref(&ib->fence);
}
-int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
+/**
+ * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ * @const_ib: Const IB to schedule (SI only)
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine). Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed. To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE. If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
+ * to SI there was just a DE IB.
+ */
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+ struct radeon_ib *const_ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
- int r = 0;
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ bool need_sync = false;
+ int i, r = 0;
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
@@ -84,17 +134,51 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
}
/* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, ring, 64);
+ r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
if (r) {
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
- radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
- radeon_fence_emit(rdev, ib->fence);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_fence *fence = ib->sync_to[i];
+ if (radeon_fence_need_sync(fence, ib->ring)) {
+ need_sync = true;
+ radeon_semaphore_sync_rings(rdev, ib->semaphore,
+ fence->ring, ib->ring);
+ radeon_fence_note_sync(fence, ib->ring);
+ }
+ }
+ /* immediately free semaphore when we don't need to sync */
+ if (!need_sync) {
+ radeon_semaphore_free(rdev, &ib->semaphore, NULL);
+ }
+ if (const_ib) {
+ radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
+ radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+ }
+ radeon_ring_ib_execute(rdev, ib->ring, ib);
+ r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+ if (const_ib) {
+ const_ib->fence = radeon_fence_ref(ib->fence);
+ }
radeon_ring_unlock_commit(rdev, ring);
return 0;
}
+/**
+ * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the suballocator to manage a pool of memory
+ * for use as IBs (all asics).
+ * Returns 0 on success, error on failure.
+ */
int radeon_ib_pool_init(struct radeon_device *rdev)
{
int r;
@@ -108,6 +192,12 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (r) {
return r;
}
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
+ if (r) {
+ return r;
+ }
+
rdev->ib_pool_ready = true;
if (radeon_debugfs_sa_init(rdev)) {
dev_err(rdev->dev, "failed to register debugfs file for SA\n");
@@ -115,24 +205,33 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the suballocator managing the pool of memory
+ * for use as IBs (all asics).
+ */
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
rdev->ib_pool_ready = false;
}
}
-int radeon_ib_pool_start(struct radeon_device *rdev)
-{
- return radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
-}
-
-int radeon_ib_pool_suspend(struct radeon_device *rdev)
-{
- return radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
-}
-
+/**
+ * radeon_ib_ring_tests - test IBs on the rings
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Test an IB (Indirect Buffer) on each ring.
+ * If the test fails, disable the ring.
+ * Returns 0 on success, error if the primary GFX ring
+ * IB test fails.
+ */
int radeon_ib_ring_tests(struct radeon_device *rdev)
{
unsigned i;
@@ -164,10 +263,28 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
}
/*
- * Ring.
+ * Rings
+ * Most engines on the GPU are fed via ring buffers. Ring
+ * buffers are areas of GPU accessible memory that the host
+ * writes commands into and the GPU reads commands out of.
+ * There is a rptr (read pointer) that determines where the
+ * GPU is currently reading, and a wptr (write pointer)
+ * which determines where the host has written. When the
+ * pointers are equal, the ring is idle. When the host
+ * writes commands to the ring buffer, it increments the
+ * wptr. The GPU then starts fetching commands and executes
+ * them until the pointers are equal again.
*/
int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+/**
+ * radeon_ring_write - write a value to the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ * @v: dword (dw) value to write
+ *
+ * Write a value to the requested ring buffer (all asics).
+ */
void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
#if DRM_DEBUG_CODE
@@ -181,21 +298,37 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
ring->ring_free_dw--;
}
-int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
+/**
+ * radeon_ring_supports_scratch_reg - check if the ring supports
+ * writing to scratch registers
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if a specific ring supports writing to scratch registers (all asics).
+ * Returns true if the ring supports writing to scratch regs, false if not.
+ */
+bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
+ struct radeon_ring *ring)
{
- /* r1xx-r5xx only has CP ring */
- if (rdev->family < CHIP_R600)
- return RADEON_RING_TYPE_GFX_INDEX;
-
- if (rdev->family >= CHIP_CAYMAN) {
- if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
- return CAYMAN_RING_TYPE_CP1_INDEX;
- else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
- return CAYMAN_RING_TYPE_CP2_INDEX;
+ switch (ring->idx) {
+ case RADEON_RING_TYPE_GFX_INDEX:
+ case CAYMAN_RING_TYPE_CP1_INDEX:
+ case CAYMAN_RING_TYPE_CP2_INDEX:
+ return true;
+ default:
+ return false;
}
- return RADEON_RING_TYPE_GFX_INDEX;
}
+/**
+ * radeon_ring_free_size - update the free size
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the free dw slots in the ring buffer (all asics).
+ */
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rptr;
@@ -214,7 +347,16 @@ void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
}
}
-
+/**
+ * radeon_ring_alloc - allocate space on the ring buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * Returns 0 on success, error on failure.
+ */
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
@@ -227,7 +369,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
if (ndw < ring->ring_free_dw) {
break;
}
- r = radeon_fence_wait_next_locked(rdev, radeon_ring_index(rdev, ring));
+ r = radeon_fence_wait_next_locked(rdev, ring->idx);
if (r)
return r;
}
@@ -236,6 +378,17 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
return 0;
}
+/**
+ * radeon_ring_lock - lock the ring and allocate space on it
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Lock the ring and allocate @ndw dwords in the ring buffer
+ * (all asics).
+ * Returns 0 on success, error on failure.
+ */
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
@@ -249,15 +402,20 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
return 0;
}
+/**
+ * radeon_ring_commit - tell the GPU to execute the new
+ * commands on the ring buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the wptr (write pointer) to tell the GPU to
+ * execute new commands on the ring buffer (all asics).
+ */
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
- unsigned count_dw_pad;
- unsigned i;
-
/* We pad to match fetch size */
- count_dw_pad = (ring->align_mask + 1) -
- (ring->wptr & ring->align_mask);
- for (i = 0; i < count_dw_pad; i++) {
+ while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
}
DRM_MEMORYBARRIER();
@@ -265,23 +423,55 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
(void)RREG32(ring->wptr_reg);
}
+/**
+ * radeon_ring_unlock_commit - tell the GPU to execute the new
+ * commands on the ring buffer and unlock it
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Call radeon_ring_commit() then unlock the ring (all asics).
+ */
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_commit(rdev, ring);
mutex_unlock(&rdev->ring_lock);
}
+/**
+ * radeon_ring_undo - reset the wptr
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Reset the driver's copy of the wtpr (all asics).
+ */
void radeon_ring_undo(struct radeon_ring *ring)
{
ring->wptr = ring->wptr_old;
}
+/**
+ * radeon_ring_unlock_undo - reset the wptr and unlock the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Call radeon_ring_undo() then unlock the ring (all asics).
+ */
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_undo(ring);
mutex_unlock(&rdev->ring_lock);
}
+/**
+ * radeon_ring_force_activity - add some nop packets to the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Add some nop packets to the ring to force activity (all asics).
+ * Used for lockup detection to see if the rptr is advancing.
+ */
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
@@ -296,6 +486,13 @@ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *
}
}
+/**
+ * radeon_ring_force_activity - update lockup variables
+ *
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Update the last rptr value and timestamp (all asics).
+ */
void radeon_ring_lockup_update(struct radeon_ring *ring)
{
ring->last_rptr = ring->rptr;
@@ -349,6 +546,116 @@ bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *rin
return false;
}
+/**
+ * radeon_ring_backup - Back up the content of a ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: the ring we want to back up
+ *
+ * Saves all unprocessed commits from a ring, returns the number of dwords saved.
+ */
+unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
+ uint32_t **data)
+{
+ unsigned size, ptr, i;
+
+ /* just in case lock the ring */
+ mutex_lock(&rdev->ring_lock);
+ *data = NULL;
+
+ if (ring->ring_obj == NULL) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
+ /* it doesn't make sense to save anything if all fences are signaled */
+ if (!radeon_fence_count_emitted(rdev, ring->idx)) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
+ /* calculate the number of dw on the ring */
+ if (ring->rptr_save_reg)
+ ptr = RREG32(ring->rptr_save_reg);
+ else if (rdev->wb.enabled)
+ ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
+ else {
+ /* no way to read back the next rptr */
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
+ size = ring->wptr + (ring->ring_size / 4);
+ size -= ptr;
+ size &= ring->ptr_mask;
+ if (size == 0) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+
+ /* and then save the content of the ring */
+ *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
+ if (!*data) {
+ mutex_unlock(&rdev->ring_lock);
+ return 0;
+ }
+ for (i = 0; i < size; ++i) {
+ (*data)[i] = ring->ring[ptr++];
+ ptr &= ring->ptr_mask;
+ }
+
+ mutex_unlock(&rdev->ring_lock);
+ return size;
+}
+
+/**
+ * radeon_ring_restore - append saved commands to the ring again
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring to append commands to
+ * @size: number of dwords we want to write
+ * @data: saved commands
+ *
+ * Allocates space on the ring and restore the previously saved commands.
+ */
+int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
+ unsigned size, uint32_t *data)
+{
+ int i, r;
+
+ if (!size || !data)
+ return 0;
+
+ /* restore the saved ring content */
+ r = radeon_ring_lock(rdev, ring, size);
+ if (r)
+ return r;
+
+ for (i = 0; i < size; ++i) {
+ radeon_ring_write(ring, data[i]);
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ kfree(data);
+ return 0;
+}
+
+/**
+ * radeon_ring_init - init driver ring struct.
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @ring_size: size of the ring
+ * @rptr_offs: offset of the rptr writeback location in the WB buffer
+ * @rptr_reg: MMIO offset of the rptr register
+ * @wptr_reg: MMIO offset of the wptr register
+ * @ptr_reg_shift: bit offset of the rptr/wptr values
+ * @ptr_reg_mask: bit mask of the rptr/wptr values
+ * @nop: nop packet for this ring
+ *
+ * Initialize the driver information for the selected ring (all asics).
+ * Returns 0 on success, error on failure.
+ */
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
@@ -391,12 +698,26 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->ring_free_dw = ring->ring_size / 4;
+ if (rdev->wb.enabled) {
+ u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
+ ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
+ ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
+ }
if (radeon_debugfs_ring_init(rdev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
+ radeon_ring_lockup_update(ring);
return 0;
}
+/**
+ * radeon_ring_fini - tear down the driver ring struct.
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Tear down the driver information for the selected ring (all asics).
+ */
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
@@ -438,6 +759,10 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
count = (ring->ring_size / 4) - ring->ring_free_dw;
seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
+ if (ring->rptr_save_reg) {
+ seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
+ RREG32(ring->rptr_save_reg));
+ }
seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 32059b745728..4e771240fdd0 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -54,7 +54,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
{
int i, r;
- spin_lock_init(&sa_manager->lock);
+ init_waitqueue_head(&sa_manager->wq);
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
@@ -211,6 +211,39 @@ static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
return false;
}
+/**
+ * radeon_sa_event - Check if we can stop waiting
+ *
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to allocate
+ * @align: alignment we need to match
+ *
+ * Check if either there is a fence we can wait for or
+ * enough free memory to satisfy the allocation directly
+ */
+static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
+ unsigned size, unsigned align)
+{
+ unsigned soffset, eoffset, wasted;
+ int i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!list_empty(&sa_manager->flist[i])) {
+ return true;
+ }
+ }
+
+ soffset = radeon_sa_bo_hole_soffset(sa_manager);
+ eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
+ wasted = (align - (soffset % align)) % align;
+
+ if ((eoffset - soffset) >= (size + wasted)) {
+ return true;
+ }
+
+ return false;
+}
+
static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
struct radeon_fence **fences,
unsigned *tries)
@@ -297,8 +330,8 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
INIT_LIST_HEAD(&(*sa_bo)->olist);
INIT_LIST_HEAD(&(*sa_bo)->flist);
- spin_lock(&sa_manager->lock);
- do {
+ spin_lock(&sa_manager->wq.lock);
+ while(1) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
fences[i] = NULL;
tries[i] = 0;
@@ -309,30 +342,34 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
size, align)) {
- spin_unlock(&sa_manager->lock);
+ spin_unlock(&sa_manager->wq.lock);
return 0;
}
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
- if (block) {
- spin_unlock(&sa_manager->lock);
- r = radeon_fence_wait_any(rdev, fences, false);
- spin_lock(&sa_manager->lock);
- if (r) {
- /* if we have nothing to wait for we
- are practically out of memory */
- if (r == -ENOENT) {
- r = -ENOMEM;
- }
- goto out_err;
- }
+ if (!block) {
+ break;
+ }
+
+ spin_unlock(&sa_manager->wq.lock);
+ r = radeon_fence_wait_any(rdev, fences, false);
+ spin_lock(&sa_manager->wq.lock);
+ /* if we have nothing to wait for block */
+ if (r == -ENOENT) {
+ r = wait_event_interruptible_locked(
+ sa_manager->wq,
+ radeon_sa_event(sa_manager, size, align)
+ );
+ }
+ if (r) {
+ goto out_err;
}
- } while (block);
+ };
out_err:
- spin_unlock(&sa_manager->lock);
+ spin_unlock(&sa_manager->wq.lock);
kfree(*sa_bo);
*sa_bo = NULL;
return r;
@@ -348,15 +385,16 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
}
sa_manager = (*sa_bo)->manager;
- spin_lock(&sa_manager->lock);
- if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+ spin_lock(&sa_manager->wq.lock);
+ if (fence && !radeon_fence_signaled(fence)) {
(*sa_bo)->fence = radeon_fence_ref(fence);
list_add_tail(&(*sa_bo)->flist,
&sa_manager->flist[fence->ring]);
} else {
radeon_sa_bo_remove_locked(*sa_bo);
}
- spin_unlock(&sa_manager->lock);
+ wake_up_all_locked(&sa_manager->wq);
+ spin_unlock(&sa_manager->wq.lock);
*sa_bo = NULL;
}
@@ -366,7 +404,7 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
{
struct radeon_sa_bo *i;
- spin_lock(&sa_manager->lock);
+ spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
if (&i->olist == sa_manager->hole) {
seq_printf(m, ">");
@@ -381,6 +419,6 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
}
seq_printf(m, "\n");
}
- spin_unlock(&sa_manager->lock);
+ spin_unlock(&sa_manager->wq.lock);
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index e2ace5dce117..7cc78de6ddc3 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -68,70 +68,49 @@ void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
}
+/* caller must hold ring lock */
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
- bool sync_to[RADEON_NUM_RINGS],
- int dst_ring)
+ int signaler, int waiter)
{
- int i = 0, r;
+ int r;
- mutex_lock(&rdev->ring_lock);
- r = radeon_ring_alloc(rdev, &rdev->ring[dst_ring], RADEON_NUM_RINGS * 8);
- if (r) {
- goto error;
+ /* no need to signal and wait on the same ring */
+ if (signaler == waiter) {
+ return 0;
}
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- /* no need to sync to our own or unused rings */
- if (!sync_to[i] || i == dst_ring)
- continue;
-
- /* prevent GPU deadlocks */
- if (!rdev->ring[i].ready) {
- dev_err(rdev->dev, "Trying to sync to a disabled ring!");
- r = -EINVAL;
- goto error;
- }
-
- r = radeon_ring_alloc(rdev, &rdev->ring[i], 8);
- if (r) {
- goto error;
- }
-
- radeon_semaphore_emit_signal(rdev, i, semaphore);
- radeon_semaphore_emit_wait(rdev, dst_ring, semaphore);
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[signaler].ready) {
+ dev_err(rdev->dev, "Trying to sync to a disabled ring!");
+ return -EINVAL;
+ }
- radeon_ring_commit(rdev, &rdev->ring[i]);
+ r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
+ if (r) {
+ return r;
}
+ radeon_semaphore_emit_signal(rdev, signaler, semaphore);
+ radeon_ring_commit(rdev, &rdev->ring[signaler]);
- radeon_ring_commit(rdev, &rdev->ring[dst_ring]);
- mutex_unlock(&rdev->ring_lock);
+ /* we assume caller has already allocated space on waiters ring */
+ radeon_semaphore_emit_wait(rdev, waiter, semaphore);
return 0;
-
-error:
- /* unlock all locks taken so far */
- for (--i; i >= 0; --i) {
- if (sync_to[i] || i == dst_ring) {
- radeon_ring_undo(&rdev->ring[i]);
- }
- }
- radeon_ring_undo(&rdev->ring[dst_ring]);
- mutex_unlock(&rdev->ring_lock);
- return r;
}
void radeon_semaphore_free(struct radeon_device *rdev,
- struct radeon_semaphore *semaphore,
+ struct radeon_semaphore **semaphore,
struct radeon_fence *fence)
{
- if (semaphore == NULL) {
+ if (semaphore == NULL || *semaphore == NULL) {
return;
}
- if (semaphore->waiters > 0) {
+ if ((*semaphore)->waiters > 0) {
dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
- " hardware lockup imminent!\n", semaphore);
+ " hardware lockup imminent!\n", *semaphore);
}
- radeon_sa_bo_free(rdev, &semaphore->sa_bo, fence);
- kfree(semaphore);
+ radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
+ kfree(*semaphore);
+ *semaphore = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index efff929ea49d..7c16540c10ff 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -106,13 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
- r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
- goto out_cleanup;
- }
-
- r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, fence);
+ r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
goto out_cleanup;
@@ -155,13 +149,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(vram_obj);
- r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
- goto out_cleanup;
- }
-
- r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, fence);
+ r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
goto out_cleanup;
@@ -241,36 +229,33 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
{
struct radeon_fence *fence1 = NULL, *fence2 = NULL;
struct radeon_semaphore *semaphore = NULL;
- int ridxA = radeon_ring_index(rdev, ringA);
- int ridxB = radeon_ring_index(rdev, ringB);
int r;
- r = radeon_fence_create(rdev, &fence1, ridxA);
+ r = radeon_semaphore_create(rdev, &semaphore);
if (r) {
- DRM_ERROR("Failed to create sync fence 1\n");
+ DRM_ERROR("Failed to create semaphore\n");
goto out_cleanup;
}
- r = radeon_fence_create(rdev, &fence2, ridxA);
+
+ r = radeon_ring_lock(rdev, ringA, 64);
if (r) {
- DRM_ERROR("Failed to create sync fence 2\n");
+ DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
goto out_cleanup;
}
-
- r = radeon_semaphore_create(rdev, &semaphore);
+ radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+ r = radeon_fence_emit(rdev, &fence1, ringA->idx);
if (r) {
- DRM_ERROR("Failed to create semaphore\n");
+ DRM_ERROR("Failed to emit fence 1\n");
+ radeon_ring_unlock_undo(rdev, ringA);
goto out_cleanup;
}
-
- r = radeon_ring_lock(rdev, ringA, 64);
+ radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+ r = radeon_fence_emit(rdev, &fence2, ringA->idx);
if (r) {
- DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+ DRM_ERROR("Failed to emit fence 2\n");
+ radeon_ring_unlock_undo(rdev, ringA);
goto out_cleanup;
}
- radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
- radeon_fence_emit(rdev, fence1);
- radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
- radeon_fence_emit(rdev, fence2);
radeon_ring_unlock_commit(rdev, ringA);
mdelay(1000);
@@ -285,7 +270,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
DRM_ERROR("Failed to lock ring B %p\n", ringB);
goto out_cleanup;
}
- radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+ radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringB);
r = radeon_fence_wait(fence1, false);
@@ -306,7 +291,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
DRM_ERROR("Failed to lock ring B %p\n", ringB);
goto out_cleanup;
}
- radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+ radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringB);
r = radeon_fence_wait(fence2, false);
@@ -316,8 +301,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
}
out_cleanup:
- if (semaphore)
- radeon_semaphore_free(rdev, semaphore, NULL);
+ radeon_semaphore_free(rdev, &semaphore, NULL);
if (fence1)
radeon_fence_unref(&fence1);
@@ -336,23 +320,9 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
{
struct radeon_fence *fenceA = NULL, *fenceB = NULL;
struct radeon_semaphore *semaphore = NULL;
- int ridxA = radeon_ring_index(rdev, ringA);
- int ridxB = radeon_ring_index(rdev, ringB);
- int ridxC = radeon_ring_index(rdev, ringC);
bool sigA, sigB;
int i, r;
- r = radeon_fence_create(rdev, &fenceA, ridxA);
- if (r) {
- DRM_ERROR("Failed to create sync fence 1\n");
- goto out_cleanup;
- }
- r = radeon_fence_create(rdev, &fenceB, ridxB);
- if (r) {
- DRM_ERROR("Failed to create sync fence 2\n");
- goto out_cleanup;
- }
-
r = radeon_semaphore_create(rdev, &semaphore);
if (r) {
DRM_ERROR("Failed to create semaphore\n");
@@ -361,20 +331,30 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ringA, 64);
if (r) {
- DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+ DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
+ r = radeon_fence_emit(rdev, &fenceA, ringA->idx);
+ if (r) {
+ DRM_ERROR("Failed to emit sync fence 1\n");
+ radeon_ring_unlock_undo(rdev, ringA);
goto out_cleanup;
}
- radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
- radeon_fence_emit(rdev, fenceA);
radeon_ring_unlock_commit(rdev, ringA);
r = radeon_ring_lock(rdev, ringB, 64);
if (r) {
- DRM_ERROR("Failed to lock ring B %d\n", ridxB);
+ DRM_ERROR("Failed to lock ring B %d\n", ringB->idx);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
+ r = radeon_fence_emit(rdev, &fenceB, ringB->idx);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 2\n");
+ radeon_ring_unlock_undo(rdev, ringB);
goto out_cleanup;
}
- radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
- radeon_fence_emit(rdev, fenceB);
radeon_ring_unlock_commit(rdev, ringB);
mdelay(1000);
@@ -393,7 +373,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
DRM_ERROR("Failed to lock ring B %p\n", ringC);
goto out_cleanup;
}
- radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+ radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringC);
for (i = 0; i < 30; ++i) {
@@ -419,7 +399,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
DRM_ERROR("Failed to lock ring B %p\n", ringC);
goto out_cleanup;
}
- radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+ radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
radeon_ring_unlock_commit(rdev, ringC);
mdelay(1000);
@@ -436,8 +416,7 @@ void radeon_test_ring_sync2(struct radeon_device *rdev,
}
out_cleanup:
- if (semaphore)
- radeon_semaphore_free(rdev, semaphore, NULL);
+ radeon_semaphore_free(rdev, &semaphore, NULL);
if (fenceA)
radeon_fence_unref(&fenceA);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c94a2257761f..5b71c716d83f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -222,15 +222,11 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
- struct radeon_fence *fence, *old_fence;
- struct radeon_semaphore *sem = NULL;
- int r;
+ struct radeon_fence *fence;
+ int r, ridx;
rdev = radeon_get_rdev(bo->bdev);
- r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
- if (unlikely(r)) {
- return r;
- }
+ ridx = radeon_copy_ring_index(rdev);
old_start = old_mem->start << PAGE_SHIFT;
new_start = new_mem->start << PAGE_SHIFT;
@@ -243,7 +239,6 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
- radeon_fence_unref(&fence);
return -EINVAL;
}
switch (new_mem->mem_type) {
@@ -255,46 +250,23 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
- radeon_fence_unref(&fence);
return -EINVAL;
}
- if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
+ if (!rdev->ring[ridx].ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
- radeon_fence_unref(&fence);
return -EINVAL;
}
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
/* sync other rings */
- old_fence = bo->sync_obj;
- if (old_fence && old_fence->ring != fence->ring
- && !radeon_fence_signaled(old_fence)) {
- bool sync_to_ring[RADEON_NUM_RINGS] = { };
- sync_to_ring[old_fence->ring] = true;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- radeon_fence_unref(&fence);
- return r;
- }
-
- r = radeon_semaphore_sync_rings(rdev, sem,
- sync_to_ring, fence->ring);
- if (r) {
- radeon_semaphore_free(rdev, sem, NULL);
- radeon_fence_unref(&fence);
- return r;
- }
- }
-
+ fence = bo->sync_obj;
r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
- fence);
+ &fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait_reserve, no_wait_gpu, new_mem);
- radeon_semaphore_free(rdev, sem, fence);
radeon_fence_unref(&fence);
return r;
}
@@ -762,9 +734,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
DRM_INFO("radeon: %uM of GTT memory ready.\n",
(unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
- if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
- rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
- }
+ rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
r = radeon_ttm_debugfs_init(rdev);
if (r) {
@@ -825,9 +795,9 @@ static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
}
rdev = radeon_get_rdev(bo->bdev);
- mutex_lock(&rdev->vram_mutex);
+ down_read(&rdev->pm.mclk_lock);
r = ttm_vm_ops->fault(vma, vmf);
- mutex_unlock(&rdev->vram_mutex);
+ up_read(&rdev->pm.mclk_lock);
return r;
}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 5e659b034d9a..20bfbda7b3f1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -744,15 +744,6 @@ r600 0x9400
0x00028C38 CB_CLRCMP_DST
0x00028C3C CB_CLRCMP_MSK
0x00028C34 CB_CLRCMP_SRC
-0x00028100 CB_COLOR0_MASK
-0x00028104 CB_COLOR1_MASK
-0x00028108 CB_COLOR2_MASK
-0x0002810C CB_COLOR3_MASK
-0x00028110 CB_COLOR4_MASK
-0x00028114 CB_COLOR5_MASK
-0x00028118 CB_COLOR6_MASK
-0x0002811C CB_COLOR7_MASK
-0x00028808 CB_COLOR_CONTROL
0x0002842C CB_FOG_BLUE
0x00028428 CB_FOG_GREEN
0x00028424 CB_FOG_RED
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index a464eb5e2df2..2752f7f78237 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -426,13 +426,11 @@ static int rs400_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
return 0;
}
@@ -470,7 +468,6 @@ int rs400_resume(struct radeon_device *rdev)
int rs400_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -482,7 +479,7 @@ void rs400_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -550,20 +547,14 @@ int rs400_init(struct radeon_device *rdev)
return r;
r300_set_reg_safe(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rs400_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index e95c5e61d4e2..5301b3df8466 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -294,6 +294,7 @@ void rs600_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned enable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -301,26 +302,25 @@ void rs600_hpd_init(struct radeon_device *rdev)
case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
- rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
- rdev->irq.hpd[1] = true;
break;
default:
break;
}
+ enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
- if (rdev->irq.installed)
- rs600_irq_set(rdev);
+ radeon_irq_kms_enable_hpd(rdev, enable);
}
void rs600_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
+ unsigned disable = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -328,17 +328,17 @@ void rs600_hpd_fini(struct radeon_device *rdev)
case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
- rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
- rdev->irq.hpd[1] = false;
break;
default:
break;
}
+ disable |= 1 << radeon_connector->hpd.hpd;
}
+ radeon_irq_kms_disable_hpd(rdev, disable);
}
int rs600_asic_reset(struct radeon_device *rdev)
@@ -564,18 +564,18 @@ int rs600_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
tmp |= S_000040_SW_INT_EN(1);
}
if (rdev->irq.gui_idle) {
tmp |= S_000040_GUI_IDLE(1);
}
if (rdev->irq.crtc_vblank_int[0] ||
- rdev->irq.pflip[0]) {
+ atomic_read(&rdev->irq.pflip[0])) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.crtc_vblank_int[1] ||
- rdev->irq.pflip[1]) {
+ atomic_read(&rdev->irq.pflip[1])) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.hpd[0]) {
@@ -686,7 +686,6 @@ int rs600_irq_process(struct radeon_device *rdev)
/* GUI idle */
if (G_000040_GUI_IDLE(status)) {
rdev->irq.gui_idle_acked = true;
- rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
}
/* Vertical blank interrupts */
@@ -696,7 +695,7 @@ int rs600_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[0])
+ if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
}
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
@@ -705,7 +704,7 @@ int rs600_irq_process(struct radeon_device *rdev)
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[1])
+ if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
}
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
@@ -908,13 +907,11 @@ static int rs600_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
r = r600_audio_init(rdev);
if (r) {
@@ -956,7 +953,6 @@ int rs600_resume(struct radeon_device *rdev)
int rs600_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -970,7 +966,7 @@ void rs600_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -1038,20 +1034,14 @@ int rs600_init(struct radeon_device *rdev)
return r;
rs600_set_safe_registers(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rs600_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
rs600_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 159b6a43fda0..3b663fcfe061 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -637,13 +637,11 @@ static int rs690_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
r = r600_audio_init(rdev);
if (r) {
@@ -685,7 +683,6 @@ int rs690_resume(struct radeon_device *rdev)
int rs690_suspend(struct radeon_device *rdev)
{
- radeon_ib_pool_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -699,7 +696,7 @@ void rs690_fini(struct radeon_device *rdev)
r600_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -768,20 +765,14 @@ int rs690_init(struct radeon_device *rdev)
return r;
rs600_set_safe_registers(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rs690_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 7f08cedb5333..aa8ef491ef3c 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
{
- save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
- save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
- save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
- save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
/* Stop all video */
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
@@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
/* Unlock host access */
WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
- /* Restore video state */
- WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
- WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
- WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
- WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
}
@@ -408,13 +395,11 @@ static int rv515_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
return 0;
}
@@ -469,7 +454,7 @@ void rv515_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_gem_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
@@ -543,20 +528,14 @@ int rv515_init(struct radeon_device *rdev)
return r;
rv515_set_safe_registers(rdev);
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rv515_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index b4f51c569c36..ca8ffec10ff6 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -358,8 +358,10 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
void r700_cp_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r700_cp_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
}
/*
@@ -951,13 +953,11 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
+ }
r = r600_audio_init(rdev);
if (r) {
@@ -994,9 +994,6 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- radeon_ib_pool_suspend(rdev);
- r600_blit_suspend(rdev);
- /* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev);
@@ -1076,20 +1073,14 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
-
r = rv770_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
@@ -1104,7 +1095,7 @@ void rv770_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -1121,6 +1112,8 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, lanes, speed_cntl, tmp;
u16 link_cntl2;
+ u32 mask;
+ int ret;
if (radeon_pcie_gen2 == 0)
return;
@@ -1135,6 +1128,15 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
if (ASIC_IS_X2(rdev))
return;
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret != 0)
+ return;
+
+ if (!(mask & DRM_PCIE_SPEED_50))
+ return;
+
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+
/* advertise upconfig capability */
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 0b0279291a73..0139e227e3c7 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1639,11 +1639,19 @@ static void si_gpu_init(struct radeon_device *rdev)
/* XXX what about 12? */
rdev->config.si.tile_config |= (3 << 0);
break;
- }
- if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
- rdev->config.si.tile_config |= 1 << 4;
- else
+ }
+ switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
+ case 0: /* four banks */
rdev->config.si.tile_config |= 0 << 4;
+ break;
+ case 1: /* eight banks */
+ rdev->config.si.tile_config |= 1 << 4;
+ break;
+ case 2: /* sixteen banks */
+ default:
+ rdev->config.si.tile_config |= 2 << 4;
+ break;
+ }
rdev->config.si.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.si.tile_config |=
@@ -1762,13 +1770,34 @@ void si_fence_ring_emit(struct radeon_device *rdev,
*/
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
u32 header;
- if (ib->is_const_ib)
+ if (ib->is_const_ib) {
+ /* set switch buffer packet before const IB */
+ radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ radeon_ring_write(ring, 0);
+
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
- else
+ } else {
+ u32 next_rptr;
+ if (ring->rptr_save_reg) {
+ next_rptr = ring->wptr + 3 + 4 + 8;
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((ring->rptr_save_reg -
+ PACKET3_SET_CONFIG_REG_START) >> 2));
+ radeon_ring_write(ring, next_rptr);
+ } else if (rdev->wb.enabled) {
+ next_rptr = ring->wptr + 5 + 4 + 8;
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+ radeon_ring_write(ring, (1 << 8));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+ }
radeon_ring_write(ring, header);
radeon_ring_write(ring,
@@ -1779,18 +1808,20 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
- /* flush read cache over gart for this vmid */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, ib->vm_id);
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
- PACKET3_TC_ACTION_ENA |
- PACKET3_SH_KCACHE_ACTION_ENA |
- PACKET3_SH_ICACHE_ACTION_ENA);
- radeon_ring_write(ring, 0xFFFFFFFF);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 10); /* poll interval */
+ if (!ib->is_const_ib) {
+ /* flush read cache over gart for this vmid */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, ib->vm_id);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ }
}
/*
@@ -1917,10 +1948,20 @@ static int si_cp_start(struct radeon_device *rdev)
static void si_cp_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring;
si_cp_enable(rdev, false);
- radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
- radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
+
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ radeon_ring_fini(rdev, ring);
+ radeon_scratch_free(rdev, ring->rptr_save_reg);
}
static int si_cp_resume(struct radeon_device *rdev)
@@ -2702,7 +2743,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
if (ib->is_const_ib)
ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
else {
- switch (ib->fence->ring) {
+ switch (ib->ring) {
case RADEON_RING_TYPE_GFX_INDEX:
ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
break;
@@ -2711,7 +2752,7 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
break;
default:
- dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->fence->ring);
+ dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
ret = -EINVAL;
break;
}
@@ -2942,7 +2983,6 @@ static void si_disable_interrupts(struct radeon_device *rdev)
WREG32(IH_RB_RPTR, 0);
WREG32(IH_RB_WPTR, 0);
rdev->ih.enabled = false;
- rdev->ih.wptr = 0;
rdev->ih.rptr = 0;
}
@@ -3093,45 +3133,45 @@ int si_irq_set(struct radeon_device *rdev)
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
/* enable CP interrupts on all rings */
- if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("si_irq_set: sw int gfx\n");
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
DRM_DEBUG("si_irq_set: sw int cp1\n");
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
}
- if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
+ if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
DRM_DEBUG("si_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.crtc_vblank_int[0] ||
- rdev->irq.pflip[0]) {
+ atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("si_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
- rdev->irq.pflip[1]) {
+ atomic_read(&rdev->irq.pflip[1])) {
DRM_DEBUG("si_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[2] ||
- rdev->irq.pflip[2]) {
+ atomic_read(&rdev->irq.pflip[2])) {
DRM_DEBUG("si_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[3] ||
- rdev->irq.pflip[3]) {
+ atomic_read(&rdev->irq.pflip[3])) {
DRM_DEBUG("si_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[4] ||
- rdev->irq.pflip[4]) {
+ atomic_read(&rdev->irq.pflip[4])) {
DRM_DEBUG("si_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[5] ||
- rdev->irq.pflip[5]) {
+ atomic_read(&rdev->irq.pflip[5])) {
DRM_DEBUG("si_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK;
}
@@ -3359,29 +3399,27 @@ int si_irq_process(struct radeon_device *rdev)
u32 rptr;
u32 src_id, src_data, ring_id;
u32 ring_index;
- unsigned long flags;
bool queue_hotplug = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
wptr = si_get_ih_wptr(rdev);
+
+restart_ih:
+ /* is somebody else already processing irqs? */
+ if (atomic_xchg(&rdev->ih.lock, 1))
+ return IRQ_NONE;
+
rptr = rdev->ih.rptr;
DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
- spin_lock_irqsave(&rdev->ih.lock, flags);
- if (rptr == wptr) {
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
- return IRQ_NONE;
- }
-restart_ih:
/* Order reading of wptr vs. reading of IH ring data */
rmb();
/* display interrupts */
si_irq_ack(rdev);
- rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
@@ -3399,7 +3437,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[0])
+ if (atomic_read(&rdev->irq.pflip[0]))
radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
@@ -3425,7 +3463,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[1])
+ if (atomic_read(&rdev->irq.pflip[1]))
radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
@@ -3451,7 +3489,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[2])
+ if (atomic_read(&rdev->irq.pflip[2]))
radeon_crtc_handle_flip(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
@@ -3477,7 +3515,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[3])
+ if (atomic_read(&rdev->irq.pflip[3]))
radeon_crtc_handle_flip(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
@@ -3503,7 +3541,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[4])
+ if (atomic_read(&rdev->irq.pflip[4]))
radeon_crtc_handle_flip(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
@@ -3529,7 +3567,7 @@ restart_ih:
rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
- if (rdev->irq.pflip[5])
+ if (atomic_read(&rdev->irq.pflip[5]))
radeon_crtc_handle_flip(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
@@ -3620,7 +3658,6 @@ restart_ih:
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
- rdev->pm.gui_idle = true;
wake_up(&rdev->irq.idle_queue);
break;
default:
@@ -3632,15 +3669,17 @@ restart_ih:
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
- /* make sure wptr hasn't changed while processing */
- wptr = si_get_ih_wptr(rdev);
- if (wptr != rdev->ih.wptr)
- goto restart_ih;
if (queue_hotplug)
schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
- spin_unlock_irqrestore(&rdev->ih.lock, flags);
+ atomic_set(&rdev->ih.lock, 0);
+
+ /* make sure wptr hasn't changed while processing */
+ wptr = si_get_ih_wptr(rdev);
+ if (wptr != rptr)
+ goto restart_ih;
+
return IRQ_HANDLED;
}
@@ -3752,35 +3791,18 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_start(rdev);
- if (r)
- return r;
-
- r = radeon_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d) on CP ring 0\n", r);
- rdev->accel_working = false;
- return r;
- }
-
- r = radeon_ib_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
+ r = radeon_ib_pool_init(rdev);
if (r) {
- DRM_ERROR("radeon: failed testing IB (%d) on CP ring 1\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
- r = radeon_ib_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
+ r = radeon_vm_manager_init(rdev);
if (r) {
- DRM_ERROR("radeon: failed testing IB (%d) on CP ring 2\n", r);
- rdev->accel_working = false;
+ dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
return r;
}
- r = radeon_vm_manager_start(rdev);
- if (r)
- return r;
-
return 0;
}
@@ -3809,12 +3831,6 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
- /* FIXME: we should wait for ring to be empty */
- radeon_ib_pool_suspend(rdev);
- radeon_vm_manager_suspend(rdev);
-#if 0
- r600_blit_suspend(rdev);
-#endif
si_cp_enable(rdev, false);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
@@ -3903,17 +3919,7 @@ int si_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- }
- r = radeon_vm_manager_init(rdev);
- if (r) {
- dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
- }
-
r = si_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
@@ -3921,7 +3927,7 @@ int si_init(struct radeon_device *rdev)
si_irq_fini(rdev);
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_irq_kms_fini(rdev);
si_pcie_gart_fini(rdev);
@@ -3950,7 +3956,7 @@ void si_fini(struct radeon_device *rdev)
si_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
- r100_ib_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -3962,3 +3968,22 @@ void si_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
+/**
+ * si_get_gpu_clock - return GPU clock counter snapshot
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Fetches a GPU clock counter snapshot (SI).
+ * Returns the 64 bit clock counter snapshot.
+ */
+uint64_t si_get_gpu_clock(struct radeon_device *rdev)
+{
+ uint64_t clock;
+
+ mutex_lock(&rdev->gpu_clock_mutex);
+ WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ mutex_unlock(&rdev->gpu_clock_mutex);
+ return clock;
+}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index db4067962868..ef4815c27b1c 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -698,6 +698,9 @@
#define RLC_UCODE_ADDR 0xC32C
#define RLC_UCODE_DATA 0xC330
+#define RLC_GPU_CLOCK_COUNT_LSB 0xC338
+#define RLC_GPU_CLOCK_COUNT_MSB 0xC33C
+#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340
#define RLC_MC_CNTL 0xC344
#define RLC_UCODE_CNTL 0xC348
@@ -901,5 +904,6 @@
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
#define PACKET3_SET_CE_DE_COUNTERS 0x89
#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
+#define PACKET3_SWITCH_BUFFER 0x8B
#endif
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 6eb507a5d130..1efbb9075837 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1050,6 +1050,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
drm_savage_private_t *dev_priv = dev->dev_private;
+ int release_idlelock = 0;
int i;
if (!dma)
@@ -1059,7 +1060,10 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
if (!dma->buflist)
return;
- /*i830_flush_queue(dev); */
+ if (file_priv->master && file_priv->master->lock.hw_lock) {
+ drm_idlelock_take(&file_priv->master->lock);
+ release_idlelock = 1;
+ }
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
@@ -1075,7 +1079,8 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
}
}
- drm_core_reclaim_buffers(dev, file_priv);
+ if (release_idlelock)
+ drm_idlelock_release(&file_priv->master->lock);
}
struct drm_ioctl_desc savage_ioctls[] = {
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 89afe0b83643..d31d4cca9a4c 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -52,9 +52,9 @@ static struct drm_driver driver = {
.dev_priv_size = sizeof(drm_savage_buf_priv_t),
.load = savage_driver_load,
.firstopen = savage_driver_firstopen,
+ .preclose = savage_reclaim_buffers,
.lastclose = savage_driver_lastclose,
.unload = savage_driver_unload,
- .reclaim_buffers = savage_reclaim_buffers,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
.fops = &savage_driver_fops,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index dd14cd1a0033..7f119870147c 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -105,10 +105,9 @@ static struct drm_driver driver = {
.load = sis_driver_load,
.unload = sis_driver_unload,
.open = sis_driver_open,
+ .preclose = sis_reclaim_buffers_locked,
.postclose = sis_driver_postclose,
.dma_quiescent = sis_idle,
- .reclaim_buffers = NULL,
- .reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
.ioctls = sis_ioctls,
.fops = &sis_driver_fops,
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index dd4a316c3d74..2c231070d250 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -74,7 +74,7 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
+ DRM_DEBUG("offset = %lu, size = %lu\n", fb->offset, fb->size);
return 0;
}
@@ -161,7 +161,7 @@ fail_alloc:
mem->size = 0;
mem->free = 0;
- DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
+ DRM_DEBUG("alloc %d, size = %ld, offset = %ld\n", pool, mem->size,
mem->offset);
return retval;
@@ -215,7 +215,7 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
+ DRM_DEBUG("offset = %lu, size = %lu\n", agp->offset, agp->size);
return 0;
}
@@ -321,14 +321,20 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
struct sis_file_private *file_priv = file->driver_priv;
struct sis_memblock *entry, *next;
+ if (!(file->minor->master && file->master->lock.hw_lock))
+ return;
+
+ drm_idlelock_take(&file->master->lock);
+
mutex_lock(&dev->struct_mutex);
if (list_empty(&file_priv->obj_list)) {
mutex_unlock(&dev->struct_mutex);
+ drm_idlelock_release(&file->master->lock);
+
return;
}
- if (dev->driver->dma_quiescent)
- dev->driver->dma_quiescent(dev);
+ sis_idle(dev);
list_for_each_entry_safe(entry, next, &file_priv->obj_list,
@@ -343,6 +349,9 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
kfree(entry);
}
mutex_unlock(&dev->struct_mutex);
+
+ drm_idlelock_release(&file->master->lock);
+
return;
}
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 1613c78544c0..90f6b13acfac 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -54,7 +54,6 @@ static const struct file_operations tdfx_driver_fops = {
static struct drm_driver driver = {
.driver_features = DRIVER_USE_MTRR,
- .reclaim_buffers = drm_core_reclaim_buffers,
.fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 0b5e096d39a6..56e0bf31d425 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -1,6 +1,7 @@
config DRM_UDL
tristate "DisplayLink"
depends on DRM && EXPERIMENTAL
+ depends on USB_ARCH_HAS_HCD
select DRM_USB
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
index 56e75f0f1df5..0731ab2e6c06 100644
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -27,7 +27,7 @@ static void udl_encoder_disable(struct drm_encoder *encoder)
}
static bool udl_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 7bd65bdd15a8..291ecc145585 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -308,7 +308,7 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
/* need to attach */
attach = dma_buf_attach(dma_buf, dev->dev);
if (IS_ERR(attach))
- return ERR_PTR(PTR_ERR(attach));
+ return ERR_CAST(attach);
sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sg)) {
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 0d7816789da1..9159d48d1dfd 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -45,12 +45,25 @@ static char *udl_vidreg_unlock(char *buf)
* 0x01 H and V sync off (screen blank but powered)
* 0x07 DPMS powerdown (requires modeset to come back)
*/
-static char *udl_enable_hvsync(char *buf, bool enable)
+static char *udl_set_blank(char *buf, int dpms_mode)
{
- if (enable)
- return udl_set_register(buf, 0x1F, 0x00);
- else
- return udl_set_register(buf, 0x1F, 0x07);
+ u8 reg;
+ switch (dpms_mode) {
+ case DRM_MODE_DPMS_OFF:
+ reg = 0x07;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ reg = 0x05;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ reg = 0x01;
+ break;
+ case DRM_MODE_DPMS_ON:
+ reg = 0x00;
+ break;
+ }
+
+ return udl_set_register(buf, 0x1f, reg);
}
static char *udl_set_color_depth(char *buf, u8 selection)
@@ -199,6 +212,20 @@ static char *udl_set_vid_cmds(char *wrptr, struct drm_display_mode *mode)
return wrptr;
}
+static char *udl_dummy_render(char *wrptr)
+{
+ *wrptr++ = 0xAF;
+ *wrptr++ = 0x6A; /* copy */
+ *wrptr++ = 0x00; /* from addr */
+ *wrptr++ = 0x00;
+ *wrptr++ = 0x00;
+ *wrptr++ = 0x01; /* one pixel */
+ *wrptr++ = 0x00; /* to address */
+ *wrptr++ = 0x00;
+ *wrptr++ = 0x00;
+ return wrptr;
+}
+
static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -235,9 +262,10 @@ static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
buf = (char *)urb->transfer_buffer;
buf = udl_vidreg_lock(buf);
- buf = udl_enable_hvsync(buf, false);
+ buf = udl_set_blank(buf, mode);
buf = udl_vidreg_unlock(buf);
+ buf = udl_dummy_render(buf);
retval = udl_submit_urb(dev, urb, buf - (char *)
urb->transfer_buffer);
} else {
@@ -251,7 +279,7 @@ static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
}
static bool udl_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@@ -306,9 +334,11 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay);
wrptr = udl_set_vid_cmds(wrptr, adjusted_mode);
- wrptr = udl_enable_hvsync(wrptr, true);
+ wrptr = udl_set_blank(wrptr, DRM_MODE_DPMS_ON);
wrptr = udl_vidreg_unlock(wrptr);
+ wrptr = udl_dummy_render(wrptr);
+
ufb->active_16 = true;
if (old_fb) {
struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
@@ -324,8 +354,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc,
static void udl_crtc_disable(struct drm_crtc *crtc)
{
-
-
+ udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void udl_crtc_destroy(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 02661f35f7a0..e927b4c052f5 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -75,6 +75,7 @@ static struct drm_driver driver = {
.load = via_driver_load,
.unload = via_driver_unload,
.open = via_driver_open,
+ .preclose = via_reclaim_buffers_locked,
.postclose = via_driver_postclose,
.context_dtor = via_final_context,
.get_vblank_counter = via_get_vblank_counter,
@@ -85,9 +86,6 @@ static struct drm_driver driver = {
.irq_uninstall = via_driver_irq_uninstall,
.irq_handler = via_driver_irq_handler,
.dma_quiescent = via_driver_dma_quiescent,
- .reclaim_buffers = drm_core_reclaim_buffers,
- .reclaim_buffers_locked = NULL,
- .reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
.lastclose = via_lastclose,
.ioctls = via_ioctls,
.fops = &via_driver_fops,
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index a3574d09a07d..acfcb358e7b7 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -215,14 +215,20 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
struct via_file_private *file_priv = file->driver_priv;
struct via_memblock *entry, *next;
+ if (!(file->minor->master && file->master->lock.hw_lock))
+ return;
+
+ drm_idlelock_take(&file->master->lock);
+
mutex_lock(&dev->struct_mutex);
if (list_empty(&file_priv->obj_list)) {
mutex_unlock(&dev->struct_mutex);
+ drm_idlelock_release(&file->master->lock);
+
return;
}
- if (dev->driver->dma_quiescent)
- dev->driver->dma_quiescent(dev);
+ via_driver_dma_quiescent(dev);
list_for_each_entry_safe(entry, next, &file_priv->obj_list,
owner_list) {
@@ -231,5 +237,8 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
kfree(entry);
}
mutex_unlock(&dev->struct_mutex);
+
+ drm_idlelock_release(&file->master->lock);
+
return;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index ee24d216aa85..4d9edead01ac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -769,10 +769,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
goto out_no_tfile;
file_priv->driver_priv = vmw_fp;
-
- if (unlikely(dev_priv->bdev.dev_mapping == NULL))
- dev_priv->bdev.dev_mapping =
- file_priv->filp->f_path.dentry->d_inode->i_mapping;
+ dev_priv->bdev.dev_mapping = dev->dev_mapping;
return 0;
@@ -1147,7 +1144,6 @@ static struct drm_driver driver = {
.get_vblank_counter = vmw_get_vblank_counter,
.enable_vblank = vmw_enable_vblank,
.disable_vblank = vmw_disable_vblank,
- .reclaim_buffers_locked = NULL,
.ioctls = vmw_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
.dma_quiescent = NULL, /*vmw_dma_quiescent, */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 6b0078ffa763..c50724bd30f6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1688,15 +1688,19 @@ int vmw_du_page_flip(struct drm_crtc *crtc,
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct drm_framebuffer *old_fb = crtc->fb;
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
- struct drm_file *file_priv = event->base.file_priv;
+ struct drm_file *file_priv ;
struct vmw_fence_obj *fence = NULL;
struct drm_clip_rect clips;
int ret;
+ if (event == NULL)
+ return -EINVAL;
+
/* require ScreenObject support for page flipping */
if (!dev_priv->sou_priv)
return -ENOSYS;
+ file_priv = event->base.file_priv;
if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
return -EINVAL;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 5b3c7d135dc9..e25cf31faab2 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -70,27 +70,12 @@ static struct vgasr_priv vgasr_priv = {
.clients = LIST_HEAD_INIT(vgasr_priv.clients),
};
-int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
-{
- mutex_lock(&vgasr_mutex);
- if (vgasr_priv.handler) {
- mutex_unlock(&vgasr_mutex);
- return -EINVAL;
- }
-
- vgasr_priv.handler = handler;
- mutex_unlock(&vgasr_mutex);
- return 0;
-}
-EXPORT_SYMBOL(vga_switcheroo_register_handler);
-
-void vga_switcheroo_unregister_handler(void)
+static bool vga_switcheroo_ready(void)
{
- mutex_lock(&vgasr_mutex);
- vgasr_priv.handler = NULL;
- mutex_unlock(&vgasr_mutex);
+ /* we're ready if we get two clients + handler */
+ return !vgasr_priv.active &&
+ vgasr_priv.registered_clients == 2 && vgasr_priv.handler;
}
-EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
static void vga_switcheroo_enable(void)
{
@@ -98,7 +83,8 @@ static void vga_switcheroo_enable(void)
struct vga_switcheroo_client *client;
/* call the handler to init */
- vgasr_priv.handler->init();
+ if (vgasr_priv.handler->init)
+ vgasr_priv.handler->init();
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (client->id != -1)
@@ -113,6 +99,37 @@ static void vga_switcheroo_enable(void)
vgasr_priv.active = true;
}
+int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
+{
+ mutex_lock(&vgasr_mutex);
+ if (vgasr_priv.handler) {
+ mutex_unlock(&vgasr_mutex);
+ return -EINVAL;
+ }
+
+ vgasr_priv.handler = handler;
+ if (vga_switcheroo_ready()) {
+ printk(KERN_INFO "vga_switcheroo: enabled\n");
+ vga_switcheroo_enable();
+ }
+ mutex_unlock(&vgasr_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(vga_switcheroo_register_handler);
+
+void vga_switcheroo_unregister_handler(void)
+{
+ mutex_lock(&vgasr_mutex);
+ vgasr_priv.handler = NULL;
+ if (vgasr_priv.active) {
+ pr_info("vga_switcheroo: disabled\n");
+ vga_switcheroo_debugfs_fini(&vgasr_priv);
+ vgasr_priv.active = false;
+ }
+ mutex_unlock(&vgasr_mutex);
+}
+EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
+
static int register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
int id, bool active)
@@ -134,9 +151,7 @@ static int register_client(struct pci_dev *pdev,
if (client_is_vga(client))
vgasr_priv.registered_clients++;
- /* if we get two clients + handler */
- if (!vgasr_priv.active &&
- vgasr_priv.registered_clients == 2 && vgasr_priv.handler) {
+ if (vga_switcheroo_ready()) {
printk(KERN_INFO "vga_switcheroo: enabled\n");
vga_switcheroo_enable();
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 500844f04f93..8bf8a64e5115 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1624,7 +1624,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
- { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
@@ -1928,6 +1927,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GRETAGMACBETH, USB_DEVICE_ID_GRETAGMACBETH_HUEY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_POWERMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_SOUNDKNOB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GRIFFIN, USB_DEVICE_ID_RADIOSHARK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_90) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_100) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_101) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 41c34f21bd00..1dcb76ff51e3 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -333,6 +333,7 @@
#define USB_VENDOR_ID_GRIFFIN 0x077d
#define USB_DEVICE_ID_POWERMATE 0x0410
#define USB_DEVICE_ID_SOUNDKNOB 0x04AA
+#define USB_DEVICE_ID_RADIOSHARK 0x627a
#define USB_VENDOR_ID_GTCO 0x078c
#define USB_DEVICE_ID_GTCO_90 0x0090
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 0f9c146fc00d..4d524b5f52f5 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -439,7 +439,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
struct dj_report *dj_report;
int retval;
- dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+ dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
return -ENOMEM;
dj_report->report_id = REPORT_ID_DJ_SHORT;
@@ -456,7 +456,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report;
int retval;
- dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+ dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
return -ENOMEM;
dj_report->report_id = REPORT_ID_DJ_SHORT;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a220e5746d67..4748086eaaf2 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -545,8 +545,7 @@ static int vmbus_bus_init(int irq)
if (ret)
goto err_cleanup;
- ret = request_irq(irq, vmbus_isr, IRQF_SAMPLE_RANDOM,
- driver_name, hv_acpi_dev);
+ ret = request_irq(irq, vmbus_isr, 0, driver_name, hv_acpi_dev);
if (ret != 0) {
pr_err("Unable to request IRQ %d\n",
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 563c02904ddf..23ab3c496b05 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -927,6 +927,8 @@ static int acpi_power_meter_remove(struct acpi_device *device, int type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+
static int acpi_power_meter_resume(struct device *dev)
{
struct acpi_power_meter_resource *resource;
@@ -944,6 +946,8 @@ static int acpi_power_meter_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
+
static SIMPLE_DEV_PM_OPS(acpi_power_meter_pm, NULL, acpi_power_meter_resume);
static struct acpi_driver acpi_power_meter_driver = {
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 4d937a18fadb..282708860517 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -55,9 +55,9 @@
/* wait up to 32 ms for a status change. */
#define APPLESMC_MIN_WAIT 0x0010
+#define APPLESMC_RETRY_WAIT 0x0100
#define APPLESMC_MAX_WAIT 0x8000
-#define APPLESMC_STATUS_MASK 0x0f
#define APPLESMC_READ_CMD 0x10
#define APPLESMC_WRITE_CMD 0x11
#define APPLESMC_GET_KEY_BY_INDEX_CMD 0x12
@@ -162,51 +162,68 @@ static unsigned int key_at_index;
static struct workqueue_struct *applesmc_led_wq;
/*
- * __wait_status - Wait up to 32ms for the status port to get a certain value
- * (masked with 0x0f), returning zero if the value is obtained. Callers must
+ * wait_read - Wait for a byte to appear on SMC port. Callers must
* hold applesmc_lock.
*/
-static int __wait_status(u8 val)
+static int wait_read(void)
{
+ u8 status;
int us;
-
- val = val & APPLESMC_STATUS_MASK;
-
for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
udelay(us);
- if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == val)
+ status = inb(APPLESMC_CMD_PORT);
+ /* read: wait for smc to settle */
+ if (status & 0x01)
return 0;
}
+ pr_warn("wait_read() fail: 0x%02x\n", status);
return -EIO;
}
/*
- * special treatment of command port - on newer macbooks, it seems necessary
- * to resend the command byte before polling the status again. Callers must
- * hold applesmc_lock.
+ * send_byte - Write to SMC port, retrying when necessary. Callers
+ * must hold applesmc_lock.
*/
-static int send_command(u8 cmd)
+static int send_byte(u8 cmd, u16 port)
{
+ u8 status;
int us;
+
+ outb(cmd, port);
for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) {
- outb(cmd, APPLESMC_CMD_PORT);
udelay(us);
- if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == 0x0c)
+ status = inb(APPLESMC_CMD_PORT);
+ /* write: wait for smc to settle */
+ if (status & 0x02)
+ continue;
+ /* ready: cmd accepted, return */
+ if (status & 0x04)
return 0;
+ /* timeout: give up */
+ if (us << 1 == APPLESMC_MAX_WAIT)
+ break;
+ /* busy: long wait and resend */
+ udelay(APPLESMC_RETRY_WAIT);
+ outb(cmd, port);
}
+
+ pr_warn("send_byte(0x%02x, 0x%04x) fail: 0x%02x\n", cmd, port, status);
return -EIO;
}
+static int send_command(u8 cmd)
+{
+ return send_byte(cmd, APPLESMC_CMD_PORT);
+}
+
static int send_argument(const char *key)
{
int i;
- for (i = 0; i < 4; i++) {
- outb(key[i], APPLESMC_DATA_PORT);
- if (__wait_status(0x04))
+ for (i = 0; i < 4; i++)
+ if (send_byte(key[i], APPLESMC_DATA_PORT))
return -EIO;
- }
return 0;
}
@@ -219,11 +236,14 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
return -EIO;
}
- outb(len, APPLESMC_DATA_PORT);
+ if (send_byte(len, APPLESMC_DATA_PORT)) {
+ pr_warn("%.4s: read len fail\n", key);
+ return -EIO;
+ }
for (i = 0; i < len; i++) {
- if (__wait_status(0x05)) {
- pr_warn("%.4s: read data fail\n", key);
+ if (wait_read()) {
+ pr_warn("%.4s: read data[%d] fail\n", key, i);
return -EIO;
}
buffer[i] = inb(APPLESMC_DATA_PORT);
@@ -241,14 +261,16 @@ static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len)
return -EIO;
}
- outb(len, APPLESMC_DATA_PORT);
+ if (send_byte(len, APPLESMC_DATA_PORT)) {
+ pr_warn("%.4s: write len fail\n", key);
+ return -EIO;
+ }
for (i = 0; i < len; i++) {
- if (__wait_status(0x04)) {
+ if (send_byte(buffer[i], APPLESMC_DATA_PORT)) {
pr_warn("%s: write data fail\n", key);
return -EIO;
}
- outb(buffer[i], APPLESMC_DATA_PORT);
}
return 0;
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 351d1f4593e7..4ee578948723 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = {
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")
}
+ }, {
+ /* Old interface reads the same sensor for fan0 and fan1 */
+ .ident = "Asus M5A78L",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "M5A78L")
+ }
},
{ }
};
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 637c51c11b44..0fa356fe82cc 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -196,7 +196,7 @@ struct tjmax {
int tjmax;
};
-static struct tjmax __cpuinitconst tjmax_table[] = {
+static const struct tjmax __cpuinitconst tjmax_table[] = {
{ "CPU D410", 100000 },
{ "CPU D425", 100000 },
{ "CPU D510", 100000 },
@@ -793,7 +793,7 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
.notifier_call = coretemp_cpu_callback,
};
-static const struct x86_cpu_id coretemp_ids[] = {
+static const struct x86_cpu_id __initconst coretemp_ids[] = {
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
{}
};
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index e72ba5d2a824..e21e43c13156 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -57,7 +57,7 @@ static const unsigned short normal_i2c[] = {
#define JC42_CFG_EVENT_LOCK (1 << 7)
#define JC42_CFG_SHUTDOWN (1 << 8)
#define JC42_CFG_HYST_SHIFT 9
-#define JC42_CFG_HYST_MASK 0x03
+#define JC42_CFG_HYST_MASK (0x03 << 9)
/* Capabilities */
#define JC42_CAP_RANGE (1 << 2)
@@ -287,8 +287,8 @@ static ssize_t show_temp_crit_hyst(struct device *dev,
return PTR_ERR(data);
temp = jc42_temp_from_reg(data->temp_crit);
- hyst = jc42_hysteresis[(data->config >> JC42_CFG_HYST_SHIFT)
- & JC42_CFG_HYST_MASK];
+ hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK)
+ >> JC42_CFG_HYST_SHIFT];
return sprintf(buf, "%d\n", temp - hyst);
}
@@ -302,8 +302,8 @@ static ssize_t show_temp_max_hyst(struct device *dev,
return PTR_ERR(data);
temp = jc42_temp_from_reg(data->temp_max);
- hyst = jc42_hysteresis[(data->config >> JC42_CFG_HYST_SHIFT)
- & JC42_CFG_HYST_MASK];
+ hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK)
+ >> JC42_CFG_HYST_SHIFT];
return sprintf(buf, "%d\n", temp - hyst);
}
@@ -362,8 +362,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
}
mutex_lock(&data->update_lock);
- data->config = (data->config
- & ~(JC42_CFG_HYST_MASK << JC42_CFG_HYST_SHIFT))
+ data->config = (data->config & ~JC42_CFG_HYST_MASK)
| (hyst << JC42_CFG_HYST_SHIFT);
err = i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG,
data->config);
@@ -535,9 +534,16 @@ static int jc42_remove(struct i2c_client *client)
struct jc42_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &jc42_group);
- if (data->config != data->orig_config)
- i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG,
- data->orig_config);
+
+ /* Restore original configuration except hysteresis */
+ if ((data->config & ~JC42_CFG_HYST_MASK) !=
+ (data->orig_config & ~JC42_CFG_HYST_MASK)) {
+ int config;
+
+ config = (data->orig_config & ~JC42_CFG_HYST_MASK)
+ | (data->config & JC42_CFG_HYST_MASK);
+ i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config);
+ }
return 0;
}
diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
index 8689664ef03c..ee4ebc198a94 100644
--- a/drivers/hwmon/via-cputemp.c
+++ b/drivers/hwmon/via-cputemp.c
@@ -309,7 +309,7 @@ static struct notifier_block via_cputemp_cpu_notifier __refdata = {
.notifier_call = via_cputemp_cpu_callback,
};
-static const struct x86_cpu_id cputemp_ids[] = {
+static const struct x86_cpu_id __initconst cputemp_ids[] = {
{ X86_VENDOR_CENTAUR, 6, 0xa, }, /* C7 A */
{ X86_VENDOR_CENTAUR, 6, 0xd, }, /* C7 D */
{ X86_VENDOR_CENTAUR, 6, 0xf, }, /* Nano */
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index ab4825205a9d..5b1a6a666441 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1206,7 +1206,7 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
int err = -ENODEV;
u16 val;
- static const __initdata char *names[] = {
+ static __initconst char *const names[] = {
"W83627HF",
"W83627THF",
"W83697HF",
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 2e7530a4e7b8..b4aaa1bd6728 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -462,7 +462,7 @@ config I2C_MPC
config I2C_MV64XXX
tristate "Marvell mv64xxx I2C Controller"
- depends on (MV64X60 || PLAT_ORION) && EXPERIMENTAL
+ depends on (MV64X60 || PLAT_ORION)
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Marvell 64xxx line of host bridges.
@@ -483,10 +483,11 @@ config I2C_MXS
config I2C_NOMADIK
tristate "ST-Ericsson Nomadik/Ux500 I2C Controller"
- depends on PLAT_NOMADIK
+ depends on ARM_AMBA
help
If you say yes to this option, support will be included for the
- I2C interface from ST-Ericsson's Nomadik and Ux500 architectures.
+ I2C interface from ST-Ericsson's Nomadik and Ux500 architectures,
+ as well as the STA2X11 PCIe I/O HUB.
config I2C_NUC900
tristate "NUC900 I2C Driver"
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 1679deef9c89..e24484beef07 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -279,30 +279,31 @@ static int __devexit at91_i2c_remove(struct platform_device *pdev)
/* NOTE: could save a few mA by keeping clock off outside of at91_xfer... */
-static int at91_i2c_suspend(struct platform_device *pdev, pm_message_t mesg)
+static int at91_i2c_suspend(struct device *dev)
{
clk_disable(twi_clk);
return 0;
}
-static int at91_i2c_resume(struct platform_device *pdev)
+static int at91_i2c_resume(struct device *dev)
{
return clk_enable(twi_clk);
}
+static SIMPLE_DEV_PM_OPS(at91_i2c_pm, at91_i2c_suspend, at91_i2c_resume);
+#define AT91_I2C_PM (&at91_i2c_pm)
+
#else
-#define at91_i2c_suspend NULL
-#define at91_i2c_resume NULL
+#define AT91_I2C_PM NULL
#endif
static struct platform_driver at91_i2c_driver = {
.probe = at91_i2c_probe,
.remove = __devexit_p(at91_i2c_remove),
- .suspend = at91_i2c_suspend,
- .resume = at91_i2c_resume,
.driver = {
.name = "at91_i2c",
.owner = THIS_MODULE,
+ .pm = AT91_I2C_PM,
},
};
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index cdb59e5b23f7..0cf780fd6ef1 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -25,6 +25,7 @@
#include <asm/blackfin.h>
#include <asm/portmux.h>
#include <asm/irq.h>
+#include <asm/bfin_twi.h>
/* SMBus mode*/
#define TWI_I2C_MODE_STANDARD 1
@@ -32,56 +33,6 @@
#define TWI_I2C_MODE_COMBINED 3
#define TWI_I2C_MODE_REPEAT 4
-struct bfin_twi_iface {
- int irq;
- spinlock_t lock;
- char read_write;
- u8 command;
- u8 *transPtr;
- int readNum;
- int writeNum;
- int cur_mode;
- int manual_stop;
- int result;
- struct i2c_adapter adap;
- struct completion complete;
- struct i2c_msg *pmsg;
- int msg_num;
- int cur_msg;
- u16 saved_clkdiv;
- u16 saved_control;
- void __iomem *regs_base;
-};
-
-
-#define DEFINE_TWI_REG(reg, off) \
-static inline u16 read_##reg(struct bfin_twi_iface *iface) \
- { return bfin_read16(iface->regs_base + (off)); } \
-static inline void write_##reg(struct bfin_twi_iface *iface, u16 v) \
- { bfin_write16(iface->regs_base + (off), v); }
-
-DEFINE_TWI_REG(CLKDIV, 0x00)
-DEFINE_TWI_REG(CONTROL, 0x04)
-DEFINE_TWI_REG(SLAVE_CTL, 0x08)
-DEFINE_TWI_REG(SLAVE_STAT, 0x0C)
-DEFINE_TWI_REG(SLAVE_ADDR, 0x10)
-DEFINE_TWI_REG(MASTER_CTL, 0x14)
-DEFINE_TWI_REG(MASTER_STAT, 0x18)
-DEFINE_TWI_REG(MASTER_ADDR, 0x1C)
-DEFINE_TWI_REG(INT_STAT, 0x20)
-DEFINE_TWI_REG(INT_MASK, 0x24)
-DEFINE_TWI_REG(FIFO_CTL, 0x28)
-DEFINE_TWI_REG(FIFO_STAT, 0x2C)
-DEFINE_TWI_REG(XMT_DATA8, 0x80)
-DEFINE_TWI_REG(XMT_DATA16, 0x84)
-DEFINE_TWI_REG(RCV_DATA8, 0x88)
-DEFINE_TWI_REG(RCV_DATA16, 0x8C)
-
-static const u16 pin_req[2][3] = {
- {P_TWI0_SCL, P_TWI0_SDA, 0},
- {P_TWI1_SCL, P_TWI1_SDA, 0},
-};
-
static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
unsigned short twi_int_status)
{
@@ -99,7 +50,7 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
*/
else if (iface->cur_mode == TWI_I2C_MODE_COMBINED)
write_MASTER_CTL(iface,
- read_MASTER_CTL(iface) | MDIR | RSTART);
+ read_MASTER_CTL(iface) | MDIR);
else if (iface->manual_stop)
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) | STOP);
@@ -107,10 +58,10 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
iface->cur_msg + 1 < iface->msg_num) {
if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD)
write_MASTER_CTL(iface,
- read_MASTER_CTL(iface) | RSTART | MDIR);
+ read_MASTER_CTL(iface) | MDIR);
else
write_MASTER_CTL(iface,
- (read_MASTER_CTL(iface) | RSTART) & ~MDIR);
+ read_MASTER_CTL(iface) & ~MDIR);
}
}
if (twi_int_status & RCVSERV) {
@@ -130,17 +81,25 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
}
iface->transPtr++;
iface->readNum--;
- } else if (iface->manual_stop) {
- write_MASTER_CTL(iface,
- read_MASTER_CTL(iface) | STOP);
- } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
- iface->cur_msg + 1 < iface->msg_num) {
- if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD)
- write_MASTER_CTL(iface,
- read_MASTER_CTL(iface) | RSTART | MDIR);
- else
+ }
+
+ if (iface->readNum == 0) {
+ if (iface->manual_stop) {
+ /* Temporary workaround to avoid possible bus stall -
+ * Flush FIFO before issuing the STOP condition
+ */
+ read_RCV_DATA16(iface);
write_MASTER_CTL(iface,
- (read_MASTER_CTL(iface) | RSTART) & ~MDIR);
+ read_MASTER_CTL(iface) | STOP);
+ } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
+ iface->cur_msg + 1 < iface->msg_num) {
+ if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD)
+ write_MASTER_CTL(iface,
+ read_MASTER_CTL(iface) | MDIR);
+ else
+ write_MASTER_CTL(iface,
+ read_MASTER_CTL(iface) & ~MDIR);
+ }
}
}
if (twi_int_status & MERR) {
@@ -193,7 +152,8 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
return;
}
if (twi_int_status & MCOMP) {
- if ((read_MASTER_CTL(iface) & MEN) == 0 &&
+ if (twi_int_status & (XMTSERV | RCVSERV) &&
+ (read_MASTER_CTL(iface) & MEN) == 0 &&
(iface->cur_mode == TWI_I2C_MODE_REPEAT ||
iface->cur_mode == TWI_I2C_MODE_COMBINED)) {
iface->result = -1;
@@ -221,7 +181,7 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
write_MASTER_CTL(iface,
read_MASTER_CTL(iface) & ~RSTART);
} else if (iface->cur_mode == TWI_I2C_MODE_REPEAT &&
- iface->cur_msg+1 < iface->msg_num) {
+ iface->cur_msg + 1 < iface->msg_num) {
iface->cur_msg++;
iface->transPtr = iface->pmsg[iface->cur_msg].buf;
iface->writeNum = iface->readNum =
@@ -241,27 +201,29 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
}
}
- if (iface->pmsg[iface->cur_msg].len <= 255)
- write_MASTER_CTL(iface,
+ if (iface->pmsg[iface->cur_msg].len <= 255) {
+ write_MASTER_CTL(iface,
(read_MASTER_CTL(iface) &
(~(0xff << 6))) |
- (iface->pmsg[iface->cur_msg].len << 6));
- else {
+ (iface->pmsg[iface->cur_msg].len << 6));
+ iface->manual_stop = 0;
+ } else {
write_MASTER_CTL(iface,
(read_MASTER_CTL(iface) |
(0xff << 6)));
iface->manual_stop = 1;
}
- /* remove restart bit and enable master receive */
- write_MASTER_CTL(iface,
- read_MASTER_CTL(iface) & ~RSTART);
+ /* remove restart bit before last message */
+ if (iface->cur_msg + 1 == iface->msg_num)
+ write_MASTER_CTL(iface,
+ read_MASTER_CTL(iface) & ~RSTART);
} else {
iface->result = 1;
write_INT_MASK(iface, 0);
write_MASTER_CTL(iface, 0);
}
+ complete(&iface->complete);
}
- complete(&iface->complete);
}
/* Interrupt handler */
@@ -298,8 +260,8 @@ static int bfin_twi_do_master_xfer(struct i2c_adapter *adap,
if (!(read_CONTROL(iface) & TWI_ENA))
return -ENXIO;
- while (read_MASTER_STAT(iface) & BUSBUSY)
- yield();
+ if (read_MASTER_STAT(iface) & BUSBUSY)
+ return -EAGAIN;
iface->pmsg = msgs;
iface->msg_num = num;
@@ -311,7 +273,8 @@ static int bfin_twi_do_master_xfer(struct i2c_adapter *adap,
return -EINVAL;
}
- iface->cur_mode = TWI_I2C_MODE_REPEAT;
+ if (iface->msg_num > 1)
+ iface->cur_mode = TWI_I2C_MODE_REPEAT;
iface->manual_stop = 0;
iface->transPtr = pmsg->buf;
iface->writeNum = iface->readNum = pmsg->len;
@@ -356,6 +319,7 @@ static int bfin_twi_do_master_xfer(struct i2c_adapter *adap,
/* Master enable */
write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
+ (iface->msg_num > 1 ? RSTART : 0) |
((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) |
((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0));
SSYNC();
@@ -398,8 +362,8 @@ int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr,
if (!(read_CONTROL(iface) & TWI_ENA))
return -ENXIO;
- while (read_MASTER_STAT(iface) & BUSBUSY)
- yield();
+ if (read_MASTER_STAT(iface) & BUSBUSY)
+ return -EAGAIN;
iface->writeNum = 0;
iface->readNum = 0;
@@ -520,7 +484,7 @@ int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr,
else
write_MASTER_CTL(iface, 0x1 << 6);
/* Master enable */
- write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN |
+ write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN | RSTART |
((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0));
break;
default:
@@ -611,9 +575,9 @@ static struct i2c_algorithm bfin_twi_algorithm = {
.functionality = bfin_twi_functionality,
};
-static int i2c_bfin_twi_suspend(struct platform_device *pdev, pm_message_t state)
+static int i2c_bfin_twi_suspend(struct device *dev)
{
- struct bfin_twi_iface *iface = platform_get_drvdata(pdev);
+ struct bfin_twi_iface *iface = dev_get_drvdata(dev);
iface->saved_clkdiv = read_CLKDIV(iface);
iface->saved_control = read_CONTROL(iface);
@@ -626,14 +590,14 @@ static int i2c_bfin_twi_suspend(struct platform_device *pdev, pm_message_t state
return 0;
}
-static int i2c_bfin_twi_resume(struct platform_device *pdev)
+static int i2c_bfin_twi_resume(struct device *dev)
{
- struct bfin_twi_iface *iface = platform_get_drvdata(pdev);
+ struct bfin_twi_iface *iface = dev_get_drvdata(dev);
int rc = request_irq(iface->irq, bfin_twi_interrupt_entry,
- 0, pdev->name, iface);
+ 0, to_platform_device(dev)->name, iface);
if (rc) {
- dev_err(&pdev->dev, "Can't get IRQ %d !\n", iface->irq);
+ dev_err(dev, "Can't get IRQ %d !\n", iface->irq);
return -ENODEV;
}
@@ -646,6 +610,9 @@ static int i2c_bfin_twi_resume(struct platform_device *pdev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(i2c_bfin_twi_pm,
+ i2c_bfin_twi_suspend, i2c_bfin_twi_resume);
+
static int i2c_bfin_twi_probe(struct platform_device *pdev)
{
struct bfin_twi_iface *iface;
@@ -695,7 +662,8 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
p_adap->timeout = 5 * HZ;
p_adap->retries = 3;
- rc = peripheral_request_list(pin_req[pdev->id], "i2c-bfin-twi");
+ rc = peripheral_request_list((unsigned short *)pdev->dev.platform_data,
+ "i2c-bfin-twi");
if (rc) {
dev_err(&pdev->dev, "Can't setup pin mux!\n");
goto out_error_pin_mux;
@@ -742,7 +710,7 @@ out_error_add_adapter:
free_irq(iface->irq, iface);
out_error_req_irq:
out_error_no_irq:
- peripheral_free_list(pin_req[pdev->id]);
+ peripheral_free_list((unsigned short *)pdev->dev.platform_data);
out_error_pin_mux:
iounmap(iface->regs_base);
out_error_ioremap:
@@ -760,7 +728,7 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev)
i2c_del_adapter(&(iface->adap));
free_irq(iface->irq, iface);
- peripheral_free_list(pin_req[pdev->id]);
+ peripheral_free_list((unsigned short *)pdev->dev.platform_data);
iounmap(iface->regs_base);
kfree(iface);
@@ -770,11 +738,10 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev)
static struct platform_driver i2c_bfin_twi_driver = {
.probe = i2c_bfin_twi_probe,
.remove = i2c_bfin_twi_remove,
- .suspend = i2c_bfin_twi_suspend,
- .resume = i2c_bfin_twi_resume,
.driver = {
.name = "i2c-bfin-twi",
.owner = THIS_MODULE,
+ .pm = &i2c_bfin_twi_pm,
},
};
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index aedb94f34bf7..dae3ddfe7619 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -405,6 +405,7 @@ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
}
}
}
+ ret = num;
abort:
sret = diolan_i2c_stop(dev);
if (sret < 0 && ret >= 0)
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 370031ac8200..0722f869465c 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -117,10 +117,8 @@ static u16 __initdata i2c_clk_div[50][2] = {
struct imx_i2c_struct {
struct i2c_adapter adapter;
- struct resource *res;
struct clk *clk;
void __iomem *base;
- int irq;
wait_queue_head_t queue;
unsigned long i2csr;
unsigned int disable_delay;
@@ -472,9 +470,8 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
struct imxi2c_platform_data *pdata = pdev->dev.platform_data;
struct pinctrl *pinctrl;
void __iomem *base;
- resource_size_t res_size;
- int irq, bitrate;
- int ret;
+ int irq, ret;
+ u32 bitrate;
dev_dbg(&pdev->dev, "<%s>\n", __func__);
@@ -489,25 +486,15 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
return -ENOENT;
}
- res_size = resource_size(res);
-
- if (!request_mem_region(res->start, res_size, DRIVER_NAME)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
+ base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!base)
return -EBUSY;
- }
-
- base = ioremap(res->start, res_size);
- if (!base) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -EIO;
- goto fail1;
- }
- i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL);
+ i2c_imx = devm_kzalloc(&pdev->dev, sizeof(struct imx_i2c_struct),
+ GFP_KERNEL);
if (!i2c_imx) {
dev_err(&pdev->dev, "can't allocate interface\n");
- ret = -ENOMEM;
- goto fail2;
+ return -ENOMEM;
}
/* Setup i2c_imx driver structure */
@@ -517,29 +504,27 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
i2c_imx->adapter.dev.parent = &pdev->dev;
i2c_imx->adapter.nr = pdev->id;
i2c_imx->adapter.dev.of_node = pdev->dev.of_node;
- i2c_imx->irq = irq;
i2c_imx->base = base;
- i2c_imx->res = res;
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
- goto fail3;
+ dev_err(&pdev->dev, "can't get/select pinctrl\n");
+ return PTR_ERR(pinctrl);
}
/* Get I2C clock */
- i2c_imx->clk = clk_get(&pdev->dev, "i2c_clk");
+ i2c_imx->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(i2c_imx->clk)) {
- ret = PTR_ERR(i2c_imx->clk);
dev_err(&pdev->dev, "can't get I2C clock\n");
- goto fail3;
+ return PTR_ERR(i2c_imx->clk);
}
/* Request IRQ */
- ret = request_irq(i2c_imx->irq, i2c_imx_isr, 0, pdev->name, i2c_imx);
+ ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
+ pdev->name, i2c_imx);
if (ret) {
- dev_err(&pdev->dev, "can't claim irq %d\n", i2c_imx->irq);
- goto fail4;
+ dev_err(&pdev->dev, "can't claim irq %d\n", irq);
+ return ret;
}
/* Init queue */
@@ -564,7 +549,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
if (ret < 0) {
dev_err(&pdev->dev, "registration failed\n");
- goto fail5;
+ return ret;
}
of_i2c_register_devices(&i2c_imx->adapter);
@@ -572,28 +557,16 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
/* Set up platform driver data */
platform_set_drvdata(pdev, i2c_imx);
- dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", i2c_imx->irq);
+ dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq);
dev_dbg(&i2c_imx->adapter.dev, "device resources from 0x%x to 0x%x\n",
- i2c_imx->res->start, i2c_imx->res->end);
- dev_dbg(&i2c_imx->adapter.dev, "allocated %d bytes at 0x%x \n",
- res_size, i2c_imx->res->start);
+ res->start, res->end);
+ dev_dbg(&i2c_imx->adapter.dev, "allocated %d bytes at 0x%x\n",
+ resource_size(res), res->start);
dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
i2c_imx->adapter.name);
dev_dbg(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
return 0; /* Return OK */
-
-fail5:
- free_irq(i2c_imx->irq, i2c_imx);
-fail4:
- clk_put(i2c_imx->clk);
-fail3:
- kfree(i2c_imx);
-fail2:
- iounmap(base);
-fail1:
- release_mem_region(res->start, resource_size(res));
- return ret; /* Return error number */
}
static int __exit i2c_imx_remove(struct platform_device *pdev)
@@ -605,20 +578,12 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c_imx->adapter);
platform_set_drvdata(pdev, NULL);
- /* free interrupt */
- free_irq(i2c_imx->irq, i2c_imx);
-
/* setup chip registers to defaults */
writeb(0, i2c_imx->base + IMX_I2C_IADR);
writeb(0, i2c_imx->base + IMX_I2C_IFDR);
writeb(0, i2c_imx->base + IMX_I2C_I2CR);
writeb(0, i2c_imx->base + IMX_I2C_I2SR);
- clk_put(i2c_imx->clk);
-
- iounmap(i2c_imx->base);
- release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res));
- kfree(i2c_imx);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 4f44a33017b0..2e9d56719e99 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -18,6 +18,11 @@
#include <linux/mv643xx_i2c.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_i2c.h>
+#include <linux/clk.h>
+#include <linux/err.h>
/* Register defines */
#define MV64XXX_I2C_REG_SLAVE_ADDR 0x00
@@ -98,6 +103,9 @@ struct mv64xxx_i2c_data {
int rc;
u32 freq_m;
u32 freq_n;
+#if defined(CONFIG_HAVE_CLK)
+ struct clk *clk;
+#endif
wait_queue_head_t waitq;
spinlock_t lock;
struct i2c_msg *msg;
@@ -521,6 +529,82 @@ mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
drv_data->reg_base_p = 0;
}
+#ifdef CONFIG_OF
+static int __devinit
+mv64xxx_calc_freq(const int tclk, const int n, const int m)
+{
+ return tclk / (10 * (m + 1) * (2 << n));
+}
+
+static bool __devinit
+mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
+ u32 *best_m)
+{
+ int freq, delta, best_delta = INT_MAX;
+ int m, n;
+
+ for (n = 0; n <= 7; n++)
+ for (m = 0; m <= 15; m++) {
+ freq = mv64xxx_calc_freq(tclk, n, m);
+ delta = req_freq - freq;
+ if (delta >= 0 && delta < best_delta) {
+ *best_m = m;
+ *best_n = n;
+ best_delta = delta;
+ }
+ if (best_delta == 0)
+ return true;
+ }
+ if (best_delta == INT_MAX)
+ return false;
+ return true;
+}
+
+static int __devinit
+mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
+ struct device_node *np)
+{
+ u32 bus_freq, tclk;
+ int rc = 0;
+
+ /* CLK is mandatory when using DT to describe the i2c bus. We
+ * need to know tclk in order to calculate bus clock
+ * factors.
+ */
+#if !defined(CONFIG_HAVE_CLK)
+ /* Have OF but no CLK */
+ return -ENODEV;
+#else
+ if (IS_ERR(drv_data->clk)) {
+ rc = -ENODEV;
+ goto out;
+ }
+ tclk = clk_get_rate(drv_data->clk);
+ of_property_read_u32(np, "clock-frequency", &bus_freq);
+ if (!mv64xxx_find_baud_factors(bus_freq, tclk,
+ &drv_data->freq_n, &drv_data->freq_m)) {
+ rc = -EINVAL;
+ goto out;
+ }
+ drv_data->irq = irq_of_parse_and_map(np, 0);
+
+ /* Its not yet defined how timeouts will be specified in device tree.
+ * So hard code the value to 1 second.
+ */
+ drv_data->adapter.timeout = HZ;
+out:
+ return rc;
+#endif
+}
+#else /* CONFIG_OF */
+static int __devinit
+mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
+ struct device_node *np)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_OF */
+
static int __devinit
mv64xxx_i2c_probe(struct platform_device *pd)
{
@@ -528,7 +612,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
struct mv64xxx_i2c_pdata *pdata = pd->dev.platform_data;
int rc;
- if ((pd->id != 0) || !pdata)
+ if ((!pdata && !pd->dev.of_node))
return -ENODEV;
drv_data = kzalloc(sizeof(struct mv64xxx_i2c_data), GFP_KERNEL);
@@ -546,19 +630,35 @@ mv64xxx_i2c_probe(struct platform_device *pd)
init_waitqueue_head(&drv_data->waitq);
spin_lock_init(&drv_data->lock);
- drv_data->freq_m = pdata->freq_m;
- drv_data->freq_n = pdata->freq_n;
- drv_data->irq = platform_get_irq(pd, 0);
+#if defined(CONFIG_HAVE_CLK)
+ /* Not all platforms have a clk */
+ drv_data->clk = clk_get(&pd->dev, NULL);
+ if (!IS_ERR(drv_data->clk)) {
+ clk_prepare(drv_data->clk);
+ clk_enable(drv_data->clk);
+ }
+#endif
+ if (pdata) {
+ drv_data->freq_m = pdata->freq_m;
+ drv_data->freq_n = pdata->freq_n;
+ drv_data->irq = platform_get_irq(pd, 0);
+ drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout);
+ } else if (pd->dev.of_node) {
+ rc = mv64xxx_of_config(drv_data, pd->dev.of_node);
+ if (rc)
+ goto exit_unmap_regs;
+ }
if (drv_data->irq < 0) {
rc = -ENXIO;
goto exit_unmap_regs;
}
+
drv_data->adapter.dev.parent = &pd->dev;
drv_data->adapter.algo = &mv64xxx_i2c_algo;
drv_data->adapter.owner = THIS_MODULE;
drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
- drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout);
drv_data->adapter.nr = pd->id;
+ drv_data->adapter.dev.of_node = pd->dev.of_node;
platform_set_drvdata(pd, drv_data);
i2c_set_adapdata(&drv_data->adapter, drv_data);
@@ -577,11 +677,20 @@ mv64xxx_i2c_probe(struct platform_device *pd)
goto exit_free_irq;
}
+ of_i2c_register_devices(&drv_data->adapter);
+
return 0;
exit_free_irq:
free_irq(drv_data->irq, drv_data);
exit_unmap_regs:
+#if defined(CONFIG_HAVE_CLK)
+ /* Not all platforms have a clk */
+ if (!IS_ERR(drv_data->clk)) {
+ clk_disable(drv_data->clk);
+ clk_unprepare(drv_data->clk);
+ }
+#endif
mv64xxx_i2c_unmap_regs(drv_data);
exit_kfree:
kfree(drv_data);
@@ -597,17 +706,31 @@ mv64xxx_i2c_remove(struct platform_device *dev)
rc = i2c_del_adapter(&drv_data->adapter);
free_irq(drv_data->irq, drv_data);
mv64xxx_i2c_unmap_regs(drv_data);
+#if defined(CONFIG_HAVE_CLK)
+ /* Not all platforms have a clk */
+ if (!IS_ERR(drv_data->clk)) {
+ clk_disable(drv_data->clk);
+ clk_unprepare(drv_data->clk);
+ }
+#endif
kfree(drv_data);
return rc;
}
+static const struct of_device_id mv64xxx_i2c_of_match_table[] __devinitdata = {
+ { .compatible = "marvell,mv64xxx-i2c", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
+
static struct platform_driver mv64xxx_i2c_driver = {
.probe = mv64xxx_i2c_probe,
.remove = __devexit_p(mv64xxx_i2c_remove),
.driver = {
.owner = THIS_MODULE,
.name = MV64XXX_I2C_CTLR_NAME,
+ .of_match_table = of_match_ptr(mv64xxx_i2c_of_match_table),
},
};
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 04eb441b6ce1..088c5c1ed17d 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -46,6 +46,10 @@
#define MXS_I2C_CTRL0_DIRECTION 0x00010000
#define MXS_I2C_CTRL0_XFER_COUNT(v) ((v) & 0x0000FFFF)
+#define MXS_I2C_TIMING0 (0x10)
+#define MXS_I2C_TIMING1 (0x20)
+#define MXS_I2C_TIMING2 (0x30)
+
#define MXS_I2C_CTRL1 (0x40)
#define MXS_I2C_CTRL1_SET (0x44)
#define MXS_I2C_CTRL1_CLR (0x48)
@@ -97,6 +101,35 @@
#define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \
MXS_I2C_CTRL0_MASTER_MODE)
+struct mxs_i2c_speed_config {
+ uint32_t timing0;
+ uint32_t timing1;
+ uint32_t timing2;
+};
+
+/*
+ * Timing values for the default 24MHz clock supplied into the i2c block.
+ *
+ * The bus can operate at 95kHz or at 400kHz with the following timing
+ * register configurations. The 100kHz mode isn't present because it's
+ * values are not stated in the i.MX233/i.MX28 datasheet. The 95kHz mode
+ * shall be close enough replacement. Therefore when the bus is configured
+ * for 100kHz operation, 95kHz timing settings are actually loaded.
+ *
+ * For details, see i.MX233 [25.4.2 - 25.4.4] and i.MX28 [27.5.2 - 27.5.4].
+ */
+static const struct mxs_i2c_speed_config mxs_i2c_95kHz_config = {
+ .timing0 = 0x00780030,
+ .timing1 = 0x00800030,
+ .timing2 = 0x00300030,
+};
+
+static const struct mxs_i2c_speed_config mxs_i2c_400kHz_config = {
+ .timing0 = 0x000f0007,
+ .timing1 = 0x001f000f,
+ .timing2 = 0x00300030,
+};
+
/**
* struct mxs_i2c_dev - per device, private MXS-I2C data
*
@@ -112,11 +145,17 @@ struct mxs_i2c_dev {
struct completion cmd_complete;
u32 cmd_err;
struct i2c_adapter adapter;
+ const struct mxs_i2c_speed_config *speed;
};
static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
{
stmp_reset_block(i2c->regs);
+
+ writel(i2c->speed->timing0, i2c->regs + MXS_I2C_TIMING0);
+ writel(i2c->speed->timing1, i2c->regs + MXS_I2C_TIMING1);
+ writel(i2c->speed->timing2, i2c->regs + MXS_I2C_TIMING2);
+
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
i2c->regs + MXS_I2C_QUEUECTRL_SET);
@@ -193,7 +232,7 @@ static int mxs_i2c_wait_for_data(struct mxs_i2c_dev *i2c)
static int mxs_i2c_finish_read(struct mxs_i2c_dev *i2c, u8 *buf, int len)
{
- u32 data;
+ u32 uninitialized_var(data);
int i;
for (i = 0; i < len; i++) {
@@ -319,6 +358,28 @@ static const struct i2c_algorithm mxs_i2c_algo = {
.functionality = mxs_i2c_func,
};
+static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
+{
+ uint32_t speed;
+ struct device *dev = i2c->dev;
+ struct device_node *node = dev->of_node;
+ int ret;
+
+ if (!node)
+ return -EINVAL;
+
+ i2c->speed = &mxs_i2c_95kHz_config;
+ ret = of_property_read_u32(node, "clock-frequency", &speed);
+ if (ret)
+ dev_warn(dev, "No I2C speed selected, using 100kHz\n");
+ else if (speed == 400000)
+ i2c->speed = &mxs_i2c_400kHz_config;
+ else if (speed != 100000)
+ dev_warn(dev, "Unsupported I2C speed selected, using 100kHz\n");
+
+ return 0;
+}
+
static int __devinit mxs_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -358,6 +419,11 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
return err;
i2c->dev = dev;
+
+ err = mxs_i2c_get_ofdata(i2c);
+ if (err)
+ return err;
+
platform_set_drvdata(pdev, i2c);
/* Do reset to enforce correct startup after pinmuxing */
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index a92440dbef07..61b00edacb08 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -14,7 +14,8 @@
*/
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/atomic.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
@@ -23,8 +24,7 @@
#include <linux/io.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
-
-#include <plat/i2c.h>
+#include <linux/platform_data/i2c-nomadik.h>
#define DRIVER_NAME "nmk-i2c"
@@ -136,7 +136,7 @@ struct i2c_nmk_client {
/**
* struct nmk_i2c_dev - private data structure of the controller.
- * @pdev: parent platform device.
+ * @adev: parent amba device.
* @adap: corresponding I2C adapter.
* @irq: interrupt line for the controller.
* @virtbase: virtual io memory area.
@@ -150,7 +150,7 @@ struct i2c_nmk_client {
* @busy: Busy doing transfer.
*/
struct nmk_i2c_dev {
- struct platform_device *pdev;
+ struct amba_device *adev;
struct i2c_adapter adap;
int irq;
void __iomem *virtbase;
@@ -217,7 +217,7 @@ static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
}
}
- dev_err(&dev->pdev->dev,
+ dev_err(&dev->adev->dev,
"flushing operation timed out giving up after %d attempts",
LOOP_ATTEMPTS);
@@ -276,15 +276,32 @@ exit:
/**
* load_i2c_mcr_reg() - load the MCR register
* @dev: private data of controller
+ * @flags: message flags
*/
-static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *dev)
+static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *dev, u16 flags)
{
u32 mcr = 0;
+ unsigned short slave_adr_3msb_bits;
- /* 7-bit address transaction */
- mcr |= GEN_MASK(1, I2C_MCR_AM, 12);
mcr |= GEN_MASK(dev->cli.slave_adr, I2C_MCR_A7, 1);
+ if (unlikely(flags & I2C_M_TEN)) {
+ /* 10-bit address transaction */
+ mcr |= GEN_MASK(2, I2C_MCR_AM, 12);
+ /*
+ * Get the top 3 bits.
+ * EA10 represents extended address in MCR. This includes
+ * the extension (MSB bits) of the 7 bit address loaded
+ * in A7
+ */
+ slave_adr_3msb_bits = (dev->cli.slave_adr >> 7) & 0x7;
+
+ mcr |= GEN_MASK(slave_adr_3msb_bits, I2C_MCR_EA10, 8);
+ } else {
+ /* 7-bit address transaction */
+ mcr |= GEN_MASK(1, I2C_MCR_AM, 12);
+ }
+
/* start byte procedure not applied */
mcr |= GEN_MASK(0, I2C_MCR_SB, 11);
@@ -333,10 +350,6 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
i2c_clk = clk_get_rate(dev->clk);
- /* fallback to std. mode if machine has not provided it */
- if (dev->cfg.clk_freq == 0)
- dev->cfg.clk_freq = 100000;
-
/*
* The spec says, in case of std. mode the divider is
* 2 whereas it is 3 for fast and fastplus mode of
@@ -364,7 +377,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* and high speed (up to 3.4 Mb/s)
*/
if (dev->cfg.sm > I2C_FREQ_MODE_FAST) {
- dev_err(&dev->pdev->dev,
+ dev_err(&dev->adev->dev,
"do not support this mode defaulting to std. mode\n");
brcr2 = i2c_clk/(100000 * 2) & 0xffff;
writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
@@ -381,19 +394,20 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
/**
* read_i2c() - Read from I2C client device
* @dev: private data of I2C Driver
+ * @flags: message flags
*
* This function reads from i2c client device when controller is in
* master mode. There is a completion timeout. If there is no transfer
* before timeout error is returned.
*/
-static int read_i2c(struct nmk_i2c_dev *dev)
+static int read_i2c(struct nmk_i2c_dev *dev, u16 flags)
{
u32 status = 0;
u32 mcr;
u32 irq_mask = 0;
int timeout;
- mcr = load_i2c_mcr_reg(dev);
+ mcr = load_i2c_mcr_reg(dev, flags);
writel(mcr, dev->virtbase + I2C_MCR);
/* load the current CR value */
@@ -423,7 +437,7 @@ static int read_i2c(struct nmk_i2c_dev *dev)
&dev->xfer_complete, dev->adap.timeout);
if (timeout < 0) {
- dev_err(&dev->pdev->dev,
+ dev_err(&dev->adev->dev,
"wait_for_completion_timeout "
"returned %d waiting for event\n", timeout);
status = timeout;
@@ -431,7 +445,7 @@ static int read_i2c(struct nmk_i2c_dev *dev)
if (timeout == 0) {
/* Controller timed out */
- dev_err(&dev->pdev->dev, "read from slave 0x%x timed out\n",
+ dev_err(&dev->adev->dev, "read from slave 0x%x timed out\n",
dev->cli.slave_adr);
status = -ETIMEDOUT;
}
@@ -459,17 +473,18 @@ static void fill_tx_fifo(struct nmk_i2c_dev *dev, int no_bytes)
/**
* write_i2c() - Write data to I2C client.
* @dev: private data of I2C Driver
+ * @flags: message flags
*
* This function writes data to I2C client
*/
-static int write_i2c(struct nmk_i2c_dev *dev)
+static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
{
u32 status = 0;
u32 mcr;
u32 irq_mask = 0;
int timeout;
- mcr = load_i2c_mcr_reg(dev);
+ mcr = load_i2c_mcr_reg(dev, flags);
writel(mcr, dev->virtbase + I2C_MCR);
@@ -510,7 +525,7 @@ static int write_i2c(struct nmk_i2c_dev *dev)
&dev->xfer_complete, dev->adap.timeout);
if (timeout < 0) {
- dev_err(&dev->pdev->dev,
+ dev_err(&dev->adev->dev,
"wait_for_completion_timeout "
"returned %d waiting for event\n", timeout);
status = timeout;
@@ -518,7 +533,7 @@ static int write_i2c(struct nmk_i2c_dev *dev)
if (timeout == 0) {
/* Controller timed out */
- dev_err(&dev->pdev->dev, "write to slave 0x%x timed out\n",
+ dev_err(&dev->adev->dev, "write to slave 0x%x timed out\n",
dev->cli.slave_adr);
status = -ETIMEDOUT;
}
@@ -538,11 +553,11 @@ static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags)
if (flags & I2C_M_RD) {
/* read operation */
dev->cli.operation = I2C_READ;
- status = read_i2c(dev);
+ status = read_i2c(dev, flags);
} else {
/* write operation */
dev->cli.operation = I2C_WRITE;
- status = write_i2c(dev);
+ status = write_i2c(dev, flags);
}
if (status || (dev->result)) {
@@ -557,7 +572,7 @@ static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags)
if (((i2c_sr >> 2) & 0x3) == 0x3) {
/* get the abort cause */
cause = (i2c_sr >> 4) & 0x7;
- dev_err(&dev->pdev->dev, "%s\n",
+ dev_err(&dev->adev->dev, "%s\n",
cause >= ARRAY_SIZE(abort_causes) ?
"unknown reason" :
abort_causes[cause]);
@@ -630,7 +645,7 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
if (dev->regulator)
regulator_enable(dev->regulator);
- pm_runtime_get_sync(&dev->pdev->dev);
+ pm_runtime_get_sync(&dev->adev->dev);
clk_enable(dev->clk);
@@ -644,13 +659,6 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
setup_i2c_controller(dev);
for (i = 0; i < num_msgs; i++) {
- if (unlikely(msgs[i].flags & I2C_M_TEN)) {
- dev_err(&dev->pdev->dev,
- "10 bit addressing not supported\n");
-
- status = -EINVAL;
- goto out;
- }
dev->cli.slave_adr = msgs[i].addr;
dev->cli.buffer = msgs[i].buf;
dev->cli.count = msgs[i].len;
@@ -667,7 +675,7 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
out:
clk_disable(dev->clk);
- pm_runtime_put_sync(&dev->pdev->dev);
+ pm_runtime_put_sync(&dev->adev->dev);
if (dev->regulator)
regulator_disable(dev->regulator);
@@ -790,7 +798,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
if (dev->cli.count) {
dev->result = -EIO;
- dev_err(&dev->pdev->dev,
+ dev_err(&dev->adev->dev,
"%lu bytes still remain to be xfered\n",
dev->cli.count);
(void) init_hw(dev);
@@ -834,7 +842,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
dev->result = -EIO;
(void) init_hw(dev);
- dev_err(&dev->pdev->dev, "Tx Fifo Over run\n");
+ dev_err(&dev->adev->dev, "Tx Fifo Over run\n");
complete(&dev->xfer_complete);
break;
@@ -847,10 +855,10 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
case I2C_IT_RFSE:
case I2C_IT_WTSR:
case I2C_IT_STD:
- dev_err(&dev->pdev->dev, "unhandled Interrupt\n");
+ dev_err(&dev->adev->dev, "unhandled Interrupt\n");
break;
default:
- dev_err(&dev->pdev->dev, "spurious Interrupt..\n");
+ dev_err(&dev->adev->dev, "spurious Interrupt..\n");
break;
}
@@ -861,8 +869,8 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
#ifdef CONFIG_PM
static int nmk_i2c_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev);
+ struct amba_device *adev = to_amba_device(dev);
+ struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
if (nmk_i2c->busy)
return -EBUSY;
@@ -891,7 +899,7 @@ static const struct dev_pm_ops nmk_i2c_pm = {
static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
}
static const struct i2c_algorithm nmk_i2c_algo = {
@@ -899,78 +907,81 @@ static const struct i2c_algorithm nmk_i2c_algo = {
.functionality = nmk_i2c_functionality
};
-static int __devinit nmk_i2c_probe(struct platform_device *pdev)
+static struct nmk_i2c_controller u8500_i2c = {
+ /*
+ * Slave data setup time; 250ns, 100ns, and 10ns, which
+ * is 14, 6 and 2 respectively for a 48Mhz i2c clock.
+ */
+ .slsu = 0xe,
+ .tft = 1, /* Tx FIFO threshold */
+ .rft = 8, /* Rx FIFO threshold */
+ .clk_freq = 400000, /* fast mode operation */
+ .timeout = 200, /* Slave response timeout(ms) */
+ .sm = I2C_FREQ_MODE_FAST,
+};
+
+static atomic_t adapter_id = ATOMIC_INIT(0);
+
+static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
- struct resource *res;
- struct nmk_i2c_controller *pdata =
- pdev->dev.platform_data;
+ struct nmk_i2c_controller *pdata = adev->dev.platform_data;
struct nmk_i2c_dev *dev;
struct i2c_adapter *adap;
+ if (!pdata)
+ /* No i2c configuration found, using the default. */
+ pdata = &u8500_i2c;
+
dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL);
if (!dev) {
- dev_err(&pdev->dev, "cannot allocate memory\n");
+ dev_err(&adev->dev, "cannot allocate memory\n");
ret = -ENOMEM;
goto err_no_mem;
}
dev->busy = false;
- dev->pdev = pdev;
- platform_set_drvdata(pdev, dev);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENOENT;
- goto err_no_resource;
- }
-
- if (request_mem_region(res->start, resource_size(res),
- DRIVER_NAME "I/O region") == NULL) {
- ret = -EBUSY;
- goto err_no_region;
- }
+ dev->adev = adev;
+ amba_set_drvdata(adev, dev);
- dev->virtbase = ioremap(res->start, resource_size(res));
+ dev->virtbase = ioremap(adev->res.start, resource_size(&adev->res));
if (!dev->virtbase) {
ret = -ENOMEM;
goto err_no_ioremap;
}
- dev->irq = platform_get_irq(pdev, 0);
+ dev->irq = adev->irq[0];
ret = request_irq(dev->irq, i2c_irq_handler, 0,
DRIVER_NAME, dev);
if (ret) {
- dev_err(&pdev->dev, "cannot claim the irq %d\n", dev->irq);
+ dev_err(&adev->dev, "cannot claim the irq %d\n", dev->irq);
goto err_irq;
}
- dev->regulator = regulator_get(&pdev->dev, "v-i2c");
+ dev->regulator = regulator_get(&adev->dev, "v-i2c");
if (IS_ERR(dev->regulator)) {
- dev_warn(&pdev->dev, "could not get i2c regulator\n");
+ dev_warn(&adev->dev, "could not get i2c regulator\n");
dev->regulator = NULL;
}
- pm_suspend_ignore_children(&pdev->dev, true);
- pm_runtime_enable(&pdev->dev);
+ pm_suspend_ignore_children(&adev->dev, true);
- dev->clk = clk_get(&pdev->dev, NULL);
+ dev->clk = clk_get(&adev->dev, NULL);
if (IS_ERR(dev->clk)) {
- dev_err(&pdev->dev, "could not get i2c clock\n");
+ dev_err(&adev->dev, "could not get i2c clock\n");
ret = PTR_ERR(dev->clk);
goto err_no_clk;
}
adap = &dev->adap;
- adap->dev.parent = &pdev->dev;
+ adap->dev.parent = &adev->dev;
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->algo = &nmk_i2c_algo;
adap->timeout = msecs_to_jiffies(pdata->timeout);
+ adap->nr = atomic_read(&adapter_id);
snprintf(adap->name, sizeof(adap->name),
- "Nomadik I2C%d at %lx", pdev->id, (unsigned long)res->start);
-
- /* fetch the controller id */
- adap->nr = pdev->id;
+ "Nomadik I2C%d at %pR", adap->nr, &adev->res);
+ atomic_inc(&adapter_id);
/* fetch the controller configuration from machine */
dev->cfg.clk_freq = pdata->clk_freq;
@@ -981,16 +992,18 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(adap, dev);
- dev_info(&pdev->dev,
+ dev_info(&adev->dev,
"initialize %s on virtual base %p\n",
adap->name, dev->virtbase);
ret = i2c_add_numbered_adapter(adap);
if (ret) {
- dev_err(&pdev->dev, "failed to add adapter\n");
+ dev_err(&adev->dev, "failed to add adapter\n");
goto err_add_adap;
}
+ pm_runtime_put(&adev->dev);
+
return 0;
err_add_adap:
@@ -998,25 +1011,21 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
err_no_clk:
if (dev->regulator)
regulator_put(dev->regulator);
- pm_runtime_disable(&pdev->dev);
free_irq(dev->irq, dev);
err_irq:
iounmap(dev->virtbase);
err_no_ioremap:
- release_mem_region(res->start, resource_size(res));
- err_no_region:
- platform_set_drvdata(pdev, NULL);
- err_no_resource:
+ amba_set_drvdata(adev, NULL);
kfree(dev);
err_no_mem:
return ret;
}
-static int __devexit nmk_i2c_remove(struct platform_device *pdev)
+static int nmk_i2c_remove(struct amba_device *adev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct nmk_i2c_dev *dev = platform_get_drvdata(pdev);
+ struct resource *res = &adev->res;
+ struct nmk_i2c_dev *dev = amba_get_drvdata(adev);
i2c_del_adapter(&dev->adap);
flush_i2c_fifo(dev);
@@ -1031,31 +1040,46 @@ static int __devexit nmk_i2c_remove(struct platform_device *pdev)
clk_put(dev->clk);
if (dev->regulator)
regulator_put(dev->regulator);
- pm_runtime_disable(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
+ pm_runtime_disable(&adev->dev);
+ amba_set_drvdata(adev, NULL);
kfree(dev);
return 0;
}
-static struct platform_driver nmk_i2c_driver = {
- .driver = {
+static struct amba_id nmk_i2c_ids[] = {
+ {
+ .id = 0x00180024,
+ .mask = 0x00ffffff,
+ },
+ {
+ .id = 0x00380024,
+ .mask = 0x00ffffff,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(amba, nmk_i2c_ids);
+
+static struct amba_driver nmk_i2c_driver = {
+ .drv = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
.pm = &nmk_i2c_pm,
},
+ .id_table = nmk_i2c_ids,
.probe = nmk_i2c_probe,
- .remove = __devexit_p(nmk_i2c_remove),
+ .remove = nmk_i2c_remove,
};
static int __init nmk_i2c_init(void)
{
- return platform_driver_register(&nmk_i2c_driver);
+ return amba_driver_register(&nmk_i2c_driver);
}
static void __exit nmk_i2c_exit(void)
{
- platform_driver_unregister(&nmk_i2c_driver);
+ amba_driver_unregister(&nmk_i2c_driver);
}
subsys_initcall(nmk_i2c_init);
@@ -1064,4 +1088,3 @@ module_exit(nmk_i2c_exit);
MODULE_AUTHOR("Sachin Verma, Srinidhi KASAGAR");
MODULE_DESCRIPTION("Nomadik/Ux500 I2C driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 75194c579b6d..bffd5501ac2d 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -10,40 +10,9 @@
*/
/*
- * Device tree configuration:
- *
- * Required properties:
- * - compatible : "opencores,i2c-ocores"
- * - reg : bus address start and address range size of device
- * - interrupts : interrupt number
- * - regstep : size of device registers in bytes
- * - clock-frequency : frequency of bus clock in Hz
- *
- * Example:
- *
- * i2c0: ocores@a0000000 {
- * compatible = "opencores,i2c-ocores";
- * reg = <0xa0000000 0x8>;
- * interrupts = <10>;
- *
- * regstep = <1>;
- * clock-frequency = <20000000>;
- *
- * -- Devices connected on this I2C bus get
- * -- defined here; address- and size-cells
- * -- apply to these child devices
- *
- * #address-cells = <1>;
- * #size-cells = <0>;
- *
- * dummy@60 {
- * compatible = "dummy";
- * reg = <60>;
- * };
- * };
- *
+ * This driver can be used from the device tree, see
+ * Documentation/devicetree/bindings/i2c/ocore-i2c.txt
*/
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -56,10 +25,12 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of_i2c.h>
+#include <linux/log2.h>
struct ocores_i2c {
void __iomem *base;
- int regstep;
+ u32 reg_shift;
+ u32 reg_io_width;
wait_queue_head_t wait;
struct i2c_adapter adap;
struct i2c_msg *msg;
@@ -102,12 +73,22 @@ struct ocores_i2c {
static inline void oc_setreg(struct ocores_i2c *i2c, int reg, u8 value)
{
- iowrite8(value, i2c->base + reg * i2c->regstep);
+ if (i2c->reg_io_width == 4)
+ iowrite32(value, i2c->base + (reg << i2c->reg_shift));
+ else if (i2c->reg_io_width == 2)
+ iowrite16(value, i2c->base + (reg << i2c->reg_shift));
+ else
+ iowrite8(value, i2c->base + (reg << i2c->reg_shift));
}
static inline u8 oc_getreg(struct ocores_i2c *i2c, int reg)
{
- return ioread8(i2c->base + reg * i2c->regstep);
+ if (i2c->reg_io_width == 4)
+ return ioread32(i2c->base + (reg << i2c->reg_shift));
+ else if (i2c->reg_io_width == 2)
+ return ioread16(i2c->base + (reg << i2c->reg_shift));
+ else
+ return ioread8(i2c->base + (reg << i2c->reg_shift));
}
static void ocores_process(struct ocores_i2c *i2c)
@@ -247,26 +228,35 @@ static struct i2c_adapter ocores_adapter = {
};
#ifdef CONFIG_OF
-static int ocores_i2c_of_probe(struct platform_device* pdev,
- struct ocores_i2c* i2c)
+static int ocores_i2c_of_probe(struct platform_device *pdev,
+ struct ocores_i2c *i2c)
{
- const __be32* val;
-
- val = of_get_property(pdev->dev.of_node, "regstep", NULL);
- if (!val) {
- dev_err(&pdev->dev, "Missing required parameter 'regstep'");
- return -ENODEV;
+ struct device_node *np = pdev->dev.of_node;
+ u32 val;
+
+ if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) {
+ /* no 'reg-shift', check for deprecated 'regstep' */
+ if (!of_property_read_u32(np, "regstep", &val)) {
+ if (!is_power_of_2(val)) {
+ dev_err(&pdev->dev, "invalid regstep %d\n",
+ val);
+ return -EINVAL;
+ }
+ i2c->reg_shift = ilog2(val);
+ dev_warn(&pdev->dev,
+ "regstep property deprecated, use reg-shift\n");
+ }
}
- i2c->regstep = be32_to_cpup(val);
- val = of_get_property(pdev->dev.of_node, "clock-frequency", NULL);
- if (!val) {
+ if (of_property_read_u32(np, "clock-frequency", &val)) {
dev_err(&pdev->dev,
- "Missing required parameter 'clock-frequency'");
+ "Missing required parameter 'clock-frequency'\n");
return -ENODEV;
}
- i2c->clock_khz = be32_to_cpup(val) / 1000;
+ i2c->clock_khz = val / 1000;
+ of_property_read_u32(pdev->dev.of_node, "reg-io-width",
+ &i2c->reg_io_width);
return 0;
}
#else
@@ -308,7 +298,8 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (pdata) {
- i2c->regstep = pdata->regstep;
+ i2c->reg_shift = pdata->reg_shift;
+ i2c->reg_io_width = pdata->reg_io_width;
i2c->clock_khz = pdata->clock_khz;
} else {
ret = ocores_i2c_of_probe(pdev, i2c);
@@ -316,6 +307,9 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
return ret;
}
+ if (i2c->reg_io_width == 0)
+ i2c->reg_io_width = 1; /* Set to default value */
+
ocores_init(i2c);
init_waitqueue_head(&i2c->wait);
@@ -351,7 +345,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit ocores_i2c_remove(struct platform_device* pdev)
+static int __devexit ocores_i2c_remove(struct platform_device *pdev)
{
struct ocores_i2c *i2c = platform_get_drvdata(pdev);
@@ -367,9 +361,9 @@ static int __devexit ocores_i2c_remove(struct platform_device* pdev)
}
#ifdef CONFIG_PM
-static int ocores_i2c_suspend(struct platform_device *pdev, pm_message_t state)
+static int ocores_i2c_suspend(struct device *dev)
{
- struct ocores_i2c *i2c = platform_get_drvdata(pdev);
+ struct ocores_i2c *i2c = dev_get_drvdata(dev);
u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL);
/* make sure the device is disabled */
@@ -378,17 +372,19 @@ static int ocores_i2c_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int ocores_i2c_resume(struct platform_device *pdev)
+static int ocores_i2c_resume(struct device *dev)
{
- struct ocores_i2c *i2c = platform_get_drvdata(pdev);
+ struct ocores_i2c *i2c = dev_get_drvdata(dev);
ocores_init(i2c);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
+#define OCORES_I2C_PM (&ocores_i2c_pm)
#else
-#define ocores_i2c_suspend NULL
-#define ocores_i2c_resume NULL
+#define OCORES_I2C_PM NULL
#endif
static struct of_device_id ocores_i2c_match[] = {
@@ -400,12 +396,11 @@ MODULE_DEVICE_TABLE(of, ocores_i2c_match);
static struct platform_driver ocores_i2c_driver = {
.probe = ocores_i2c_probe,
.remove = __devexit_p(ocores_i2c_remove),
- .suspend = ocores_i2c_suspend,
- .resume = ocores_i2c_resume,
.driver = {
.owner = THIS_MODULE,
.name = "ocores-i2c",
.of_match_table = ocores_i2c_match,
+ .pm = OCORES_I2C_PM,
},
};
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index ee139a598814..f44c83549fe5 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -2,7 +2,7 @@
* (C) Copyright 2009-2010
* Nokia Siemens Networks, michael.lawnick.ext@nsn.com
*
- * Portions Copyright (C) 2010 Cavium Networks, Inc.
+ * Portions Copyright (C) 2010, 2011 Cavium Networks, Inc.
*
* This is a driver for the i2c adapter in Cavium Networks' OCTEON processors.
*
@@ -11,17 +11,18 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_i2c.h>
+#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h>
-
-#include <linux/io.h>
#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of.h>
#include <asm/octeon/octeon.h>
@@ -65,7 +66,7 @@ struct octeon_i2c {
wait_queue_head_t queue;
struct i2c_adapter adap;
int irq;
- int twsi_freq;
+ u32 twsi_freq;
int sys_freq;
resource_size_t twsi_phys;
void __iomem *twsi_base;
@@ -121,10 +122,8 @@ static u8 octeon_i2c_read_sw(struct octeon_i2c *i2c, u64 eop_reg)
*/
static void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
{
- u64 tmp;
-
__raw_writeq(data, i2c->twsi_base + TWSI_INT);
- tmp = __raw_readq(i2c->twsi_base + TWSI_INT);
+ __raw_readq(i2c->twsi_base + TWSI_INT);
}
/**
@@ -515,7 +514,6 @@ static int __devinit octeon_i2c_probe(struct platform_device *pdev)
{
int irq, result = 0;
struct octeon_i2c *i2c;
- struct octeon_i2c_data *i2c_data;
struct resource *res_mem;
/* All adaptors have an irq. */
@@ -523,86 +521,90 @@ static int __devinit octeon_i2c_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
+ i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c) {
dev_err(&pdev->dev, "kzalloc failed\n");
result = -ENOMEM;
goto out;
}
i2c->dev = &pdev->dev;
- i2c_data = pdev->dev.platform_data;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res_mem == NULL) {
dev_err(i2c->dev, "found no memory resource\n");
result = -ENXIO;
- goto fail_region;
+ goto out;
}
+ i2c->twsi_phys = res_mem->start;
+ i2c->regsize = resource_size(res_mem);
- if (i2c_data == NULL) {
- dev_err(i2c->dev, "no I2C frequency data\n");
+ /*
+ * "clock-rate" is a legacy binding, the official binding is
+ * "clock-frequency". Try the official one first and then
+ * fall back if it doesn't exist.
+ */
+ if (of_property_read_u32(pdev->dev.of_node,
+ "clock-frequency", &i2c->twsi_freq) &&
+ of_property_read_u32(pdev->dev.of_node,
+ "clock-rate", &i2c->twsi_freq)) {
+ dev_err(i2c->dev,
+ "no I2C 'clock-rate' or 'clock-frequency' property\n");
result = -ENXIO;
- goto fail_region;
+ goto out;
}
- i2c->twsi_phys = res_mem->start;
- i2c->regsize = resource_size(res_mem);
- i2c->twsi_freq = i2c_data->i2c_freq;
- i2c->sys_freq = i2c_data->sys_freq;
+ i2c->sys_freq = octeon_get_io_clock_rate();
- if (!request_mem_region(i2c->twsi_phys, i2c->regsize, res_mem->name)) {
+ if (!devm_request_mem_region(&pdev->dev, i2c->twsi_phys, i2c->regsize,
+ res_mem->name)) {
dev_err(i2c->dev, "request_mem_region failed\n");
- goto fail_region;
+ goto out;
}
- i2c->twsi_base = ioremap(i2c->twsi_phys, i2c->regsize);
+ i2c->twsi_base = devm_ioremap(&pdev->dev, i2c->twsi_phys, i2c->regsize);
init_waitqueue_head(&i2c->queue);
i2c->irq = irq;
- result = request_irq(i2c->irq, octeon_i2c_isr, 0, DRV_NAME, i2c);
+ result = devm_request_irq(&pdev->dev, i2c->irq,
+ octeon_i2c_isr, 0, DRV_NAME, i2c);
if (result < 0) {
dev_err(i2c->dev, "failed to attach interrupt\n");
- goto fail_irq;
+ goto out;
}
result = octeon_i2c_initlowlevel(i2c);
if (result) {
dev_err(i2c->dev, "init low level failed\n");
- goto fail_add;
+ goto out;
}
result = octeon_i2c_setclock(i2c);
if (result) {
dev_err(i2c->dev, "clock init failed\n");
- goto fail_add;
+ goto out;
}
i2c->adap = octeon_i2c_ops;
i2c->adap.dev.parent = &pdev->dev;
- i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0;
+ i2c->adap.dev.of_node = pdev->dev.of_node;
i2c_set_adapdata(&i2c->adap, i2c);
platform_set_drvdata(pdev, i2c);
- result = i2c_add_numbered_adapter(&i2c->adap);
+ result = i2c_add_adapter(&i2c->adap);
if (result < 0) {
dev_err(i2c->dev, "failed to add adapter\n");
goto fail_add;
}
-
dev_info(i2c->dev, "version %s\n", DRV_VERSION);
- return result;
+ of_i2c_register_devices(&i2c->adap);
+
+ return 0;
fail_add:
platform_set_drvdata(pdev, NULL);
- free_irq(i2c->irq, i2c);
-fail_irq:
- iounmap(i2c->twsi_base);
- release_mem_region(i2c->twsi_phys, i2c->regsize);
-fail_region:
- kfree(i2c);
out:
return result;
};
@@ -613,19 +615,24 @@ static int __devexit octeon_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
platform_set_drvdata(pdev, NULL);
- free_irq(i2c->irq, i2c);
- iounmap(i2c->twsi_base);
- release_mem_region(i2c->twsi_phys, i2c->regsize);
- kfree(i2c);
return 0;
};
+static struct of_device_id octeon_i2c_match[] = {
+ {
+ .compatible = "cavium,octeon-3860-twsi",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_i2c_match);
+
static struct platform_driver octeon_i2c_driver = {
.probe = octeon_i2c_probe,
.remove = __devexit_p(octeon_i2c_remove),
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
+ .of_match_table = octeon_i2c_match,
},
};
@@ -635,4 +642,3 @@ MODULE_AUTHOR("Michael Lawnick <michael.lawnick.ext@nsn.com>");
MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index c2148332de0f..5d19a49803c1 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -49,8 +49,8 @@
/* I2C controller revisions present on specific hardware */
#define OMAP_I2C_REV_ON_2430 0x36
-#define OMAP_I2C_REV_ON_3430 0x3C
-#define OMAP_I2C_REV_ON_3530_4430 0x40
+#define OMAP_I2C_REV_ON_3430_3530 0x3C
+#define OMAP_I2C_REV_ON_3630_4430 0x40
/* timeout waiting for the controller to respond */
#define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000))
@@ -173,7 +173,7 @@ enum {
/* Errata definitions */
#define I2C_OMAP_ERRATA_I207 (1 << 0)
-#define I2C_OMAP3_1P153 (1 << 1)
+#define I2C_OMAP_ERRATA_I462 (1 << 1)
struct omap_i2c_dev {
struct device *dev;
@@ -269,47 +269,6 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
(i2c_dev->regs[reg] << i2c_dev->reg_shift));
}
-static void omap_i2c_unidle(struct omap_i2c_dev *dev)
-{
- if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
- omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
- omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
- omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, dev->sclhstate);
- omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, dev->bufstate);
- omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, dev->syscstate);
- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
- }
-
- /*
- * Don't write to this register if the IE state is 0 as it can
- * cause deadlock.
- */
- if (dev->iestate)
- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
-}
-
-static void omap_i2c_idle(struct omap_i2c_dev *dev)
-{
- u16 iv;
-
- dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
- omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1);
- else
- omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
-
- if (dev->rev < OMAP_I2C_OMAP1_REV_2) {
- iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG); /* Read clears */
- } else {
- omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, dev->iestate);
-
- /* Flush posted write */
- omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
- }
-}
-
static int omap_i2c_init(struct omap_i2c_dev *dev)
{
u16 psc = 0, scll = 0, sclh = 0, buf = 0;
@@ -346,7 +305,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG,
SYSC_AUTOIDLE_MASK);
- } else if (dev->rev >= OMAP_I2C_REV_ON_3430) {
+ } else if (dev->rev >= OMAP_I2C_REV_ON_3430_3530) {
dev->syscstate = SYSC_AUTOIDLE_MASK;
dev->syscstate |= SYSC_ENAWAKEUP_MASK;
dev->syscstate |= (SYSC_IDLEMODE_SMART <<
@@ -468,11 +427,6 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
/* Take the I2C module out of reset: */
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
- dev->errata = 0;
-
- if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
- dev->errata |= I2C_OMAP_ERRATA_I207;
-
/* Enable interrupts */
dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
@@ -514,7 +468,7 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
struct i2c_msg *msg, int stop)
{
struct omap_i2c_dev *dev = i2c_get_adapdata(adap);
- int r;
+ unsigned long timeout;
u16 w;
dev_dbg(dev->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
@@ -536,7 +490,7 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR;
omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w);
- init_completion(&dev->cmd_complete);
+ INIT_COMPLETION(dev->cmd_complete);
dev->cmd_err = 0;
w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT;
@@ -584,12 +538,10 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
* REVISIT: We should abort the transfer on signals, but the bus goes
* into arbitration and we're currently unable to recover from it.
*/
- r = wait_for_completion_timeout(&dev->cmd_complete,
- OMAP_I2C_TIMEOUT);
+ timeout = wait_for_completion_timeout(&dev->cmd_complete,
+ OMAP_I2C_TIMEOUT);
dev->buf_len = 0;
- if (r < 0)
- return r;
- if (r == 0) {
+ if (timeout == 0) {
dev_err(dev->dev, "controller timed out\n");
omap_i2c_init(dev);
return -ETIMEDOUT;
@@ -630,7 +582,9 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
int i;
int r;
- pm_runtime_get_sync(dev->dev);
+ r = pm_runtime_get_sync(dev->dev);
+ if (IS_ERR_VALUE(r))
+ goto out;
r = omap_i2c_wait_for_bb(dev);
if (r < 0)
@@ -767,11 +721,11 @@ omap_i2c_omap1_isr(int this_irq, void *dev_id)
#endif
/*
- * OMAP3430 Errata 1.153: When an XRDY/XDR is hit, wait for XUDF before writing
+ * OMAP3430 Errata i462: When an XRDY/XDR is hit, wait for XUDF before writing
* data to DATA_REG. Otherwise some data bytes can be lost while transferring
* them from the memory to the I2C interface.
*/
-static int errata_omap3_1p153(struct omap_i2c_dev *dev, u16 *stat, int *err)
+static int errata_omap3_i462(struct omap_i2c_dev *dev, u16 *stat, int *err)
{
unsigned long timeout = 10000;
@@ -779,7 +733,6 @@ static int errata_omap3_1p153(struct omap_i2c_dev *dev, u16 *stat, int *err)
if (*stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) {
omap_i2c_ack_stat(dev, *stat & (OMAP_I2C_STAT_XRDY |
OMAP_I2C_STAT_XDR));
- *err |= OMAP_I2C_STAT_XUDF;
return -ETIMEDOUT;
}
@@ -792,6 +745,7 @@ static int errata_omap3_1p153(struct omap_i2c_dev *dev, u16 *stat, int *err)
return 0;
}
+ *err |= OMAP_I2C_STAT_XUDF;
return 0;
}
@@ -930,8 +884,8 @@ complete:
break;
}
- if ((dev->errata & I2C_OMAP3_1P153) &&
- errata_omap3_1p153(dev, &stat, &err))
+ if ((dev->errata & I2C_OMAP_ERRATA_I462) &&
+ errata_omap3_i462(dev, &stat, &err))
goto complete;
omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w);
@@ -1048,6 +1002,7 @@ omap_i2c_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, dev);
+ init_completion(&dev->cmd_complete);
dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
@@ -1057,12 +1012,19 @@ omap_i2c_probe(struct platform_device *pdev)
dev->regs = (u8 *)reg_map_ip_v1;
pm_runtime_enable(dev->dev);
- pm_runtime_get_sync(dev->dev);
+ r = pm_runtime_get_sync(dev->dev);
+ if (IS_ERR_VALUE(r))
+ goto err_free_mem;
dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff;
- if (dev->rev <= OMAP_I2C_REV_ON_3430)
- dev->errata |= I2C_OMAP3_1P153;
+ dev->errata = 0;
+
+ if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
+ dev->errata |= I2C_OMAP_ERRATA_I207;
+
+ if (dev->rev <= OMAP_I2C_REV_ON_3430_3530)
+ dev->errata |= I2C_OMAP_ERRATA_I462;
if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) {
u16 s;
@@ -1079,7 +1041,7 @@ omap_i2c_probe(struct platform_device *pdev)
dev->fifo_size = (dev->fifo_size / 2);
- if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
+ if (dev->rev >= OMAP_I2C_REV_ON_3630_4430)
dev->b_hw = 0; /* Disable hardware fixes */
else
dev->b_hw = 1; /* Enable hardware fixes */
@@ -1095,7 +1057,7 @@ omap_i2c_probe(struct platform_device *pdev)
isr = (dev->rev < OMAP_I2C_OMAP1_REV_2) ? omap_i2c_omap1_isr :
omap_i2c_isr;
- r = request_irq(dev->irq, isr, 0, pdev->name, dev);
+ r = request_irq(dev->irq, isr, IRQF_NO_SUSPEND, pdev->name, dev);
if (r) {
dev_err(dev->dev, "failure requesting irq %i\n", dev->irq);
@@ -1105,8 +1067,6 @@ omap_i2c_probe(struct platform_device *pdev)
dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id,
dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
- pm_runtime_put(dev->dev);
-
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
@@ -1126,6 +1086,8 @@ omap_i2c_probe(struct platform_device *pdev)
of_i2c_register_devices(adap);
+ pm_runtime_put(dev->dev);
+
return 0;
err_free_irq:
@@ -1134,6 +1096,7 @@ err_unuse_clocks:
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
pm_runtime_put(dev->dev);
iounmap(dev->base);
+ pm_runtime_disable(&pdev->dev);
err_free_mem:
platform_set_drvdata(pdev, NULL);
kfree(dev);
@@ -1143,17 +1106,23 @@ err_release_region:
return r;
}
-static int
-omap_i2c_remove(struct platform_device *pdev)
+static int __devexit omap_i2c_remove(struct platform_device *pdev)
{
struct omap_i2c_dev *dev = platform_get_drvdata(pdev);
struct resource *mem;
+ int ret;
platform_set_drvdata(pdev, NULL);
free_irq(dev->irq, dev);
i2c_del_adapter(&dev->adapter);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
iounmap(dev->base);
kfree(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1161,13 +1130,26 @@ omap_i2c_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
#ifdef CONFIG_PM_RUNTIME
static int omap_i2c_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
+ u16 iv;
+
+ _dev->iestate = omap_i2c_read_reg(_dev, OMAP_I2C_IE_REG);
+
+ omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, 0);
- omap_i2c_idle(_dev);
+ if (_dev->rev < OMAP_I2C_OMAP1_REV_2) {
+ iv = omap_i2c_read_reg(_dev, OMAP_I2C_IV_REG); /* Read clears */
+ } else {
+ omap_i2c_write_reg(_dev, OMAP_I2C_STAT_REG, _dev->iestate);
+
+ /* Flush posted write */
+ omap_i2c_read_reg(_dev, OMAP_I2C_STAT_REG);
+ }
return 0;
}
@@ -1177,23 +1159,40 @@ static int omap_i2c_runtime_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct omap_i2c_dev *_dev = platform_get_drvdata(pdev);
- omap_i2c_unidle(_dev);
+ if (_dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+ omap_i2c_write_reg(_dev, OMAP_I2C_CON_REG, 0);
+ omap_i2c_write_reg(_dev, OMAP_I2C_PSC_REG, _dev->pscstate);
+ omap_i2c_write_reg(_dev, OMAP_I2C_SCLL_REG, _dev->scllstate);
+ omap_i2c_write_reg(_dev, OMAP_I2C_SCLH_REG, _dev->sclhstate);
+ omap_i2c_write_reg(_dev, OMAP_I2C_BUF_REG, _dev->bufstate);
+ omap_i2c_write_reg(_dev, OMAP_I2C_SYSC_REG, _dev->syscstate);
+ omap_i2c_write_reg(_dev, OMAP_I2C_WE_REG, _dev->westate);
+ omap_i2c_write_reg(_dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
+ }
+
+ /*
+ * Don't write to this register if the IE state is 0 as it can
+ * cause deadlock.
+ */
+ if (_dev->iestate)
+ omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, _dev->iestate);
return 0;
}
+#endif /* CONFIG_PM_RUNTIME */
static struct dev_pm_ops omap_i2c_pm_ops = {
- .runtime_suspend = omap_i2c_runtime_suspend,
- .runtime_resume = omap_i2c_runtime_resume,
+ SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
+ omap_i2c_runtime_resume, NULL)
};
#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
#else
#define OMAP_I2C_PM_OPS NULL
-#endif
+#endif /* CONFIG_PM */
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
- .remove = omap_i2c_remove,
+ .remove = __devexit_p(omap_i2c_remove),
.driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 07b7447ecbc9..3d71395ae1f7 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -306,8 +306,7 @@ static int __devinit pmcmsptwi_probe(struct platform_device *pldev)
pmcmsptwi_data.irq = platform_get_irq(pldev, 0);
if (pmcmsptwi_data.irq) {
rc = request_irq(pmcmsptwi_data.irq, &pmcmsptwi_interrupt,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
- pldev->name, &pmcmsptwi_data);
+ IRQF_SHARED, pldev->name, &pmcmsptwi_data);
if (rc == 0) {
/*
* Enable 'DONE' interrupt only.
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 99389d2eae51..5d54416770b0 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -587,25 +587,27 @@ static struct i2c_algorithm pnx_algorithm = {
};
#ifdef CONFIG_PM
-static int i2c_pnx_controller_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int i2c_pnx_controller_suspend(struct device *dev)
{
- struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
+ struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev);
clk_disable(alg_data->clk);
return 0;
}
-static int i2c_pnx_controller_resume(struct platform_device *pdev)
+static int i2c_pnx_controller_resume(struct device *dev)
{
- struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
+ struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev);
return clk_enable(alg_data->clk);
}
+
+static SIMPLE_DEV_PM_OPS(i2c_pnx_pm,
+ i2c_pnx_controller_suspend, i2c_pnx_controller_resume);
+#define PNX_I2C_PM (&i2c_pnx_pm)
#else
-#define i2c_pnx_controller_suspend NULL
-#define i2c_pnx_controller_resume NULL
+#define PNX_I2C_PM NULL
#endif
static int __devinit i2c_pnx_probe(struct platform_device *pdev)
@@ -783,11 +785,10 @@ static struct platform_driver i2c_pnx_driver = {
.name = "pnx-i2c",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(i2c_pnx_of_match),
+ .pm = PNX_I2C_PM,
},
.probe = i2c_pnx_probe,
.remove = __devexit_p(i2c_pnx_remove),
- .suspend = i2c_pnx_controller_suspend,
- .resume = i2c_pnx_controller_resume,
};
static int __init i2c_adap_pnx_init(void)
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index 93709fbe30eb..d8515be00b98 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -254,7 +254,7 @@ static int __devexit puv3_i2c_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int puv3_i2c_suspend(struct platform_device *dev, pm_message_t state)
+static int puv3_i2c_suspend(struct device *dev)
{
int poll_count;
/* Disable the IIC */
@@ -267,23 +267,20 @@ static int puv3_i2c_suspend(struct platform_device *dev, pm_message_t state)
return 0;
}
-static int puv3_i2c_resume(struct platform_device *dev)
-{
- return 0 ;
-}
+static SIMPLE_DEV_PM_OPS(puv3_i2c_pm, puv3_i2c_suspend, NULL);
+#define PUV3_I2C_PM (&puv3_i2c_pm)
+
#else
-#define puv3_i2c_suspend NULL
-#define puv3_i2c_resume NULL
+#define PUV3_I2C_PM NULL
#endif
static struct platform_driver puv3_i2c_driver = {
.probe = puv3_i2c_probe,
.remove = __devexit_p(puv3_i2c_remove),
- .suspend = puv3_i2c_suspend,
- .resume = puv3_i2c_resume,
.driver = {
.name = "PKUnity-v3-I2C",
.owner = THIS_MODULE,
+ .pm = PUV3_I2C_PM,
}
};
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index a997c7d3f95d..1034d93fb838 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -41,13 +41,6 @@
#include <asm/irq.h>
-#ifndef CONFIG_HAVE_CLK
-#define clk_get(dev, id) NULL
-#define clk_put(clk) do { } while (0)
-#define clk_disable(clk) do { } while (0)
-#define clk_enable(clk) do { } while (0)
-#endif
-
struct pxa_reg_layout {
u32 ibmr;
u32 idbr;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 01959154572d..5ae3b0236bd3 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -122,7 +122,7 @@ static inline unsigned int s3c24xx_get_device_quirks(struct platform_device *pde
{
if (pdev->dev.of_node) {
const struct of_device_id *match;
- match = of_match_node(&s3c24xx_i2c_match, pdev->dev.of_node);
+ match = of_match_node(s3c24xx_i2c_match, pdev->dev.of_node);
return (unsigned int)match->data;
}
@@ -609,7 +609,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
if (ret != -EAGAIN) {
clk_disable(i2c->clk);
- pm_runtime_put_sync(&adap->dev);
+ pm_runtime_put(&adap->dev);
return ret;
}
@@ -619,7 +619,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
}
clk_disable(i2c->clk);
- pm_runtime_put_sync(&adap->dev);
+ pm_runtime_put(&adap->dev);
return -EREMOTEIO;
}
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 4d44af181f37..580a0c04cb42 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2009 ST-Ericsson AB
+ * Copyright (C) 2007-2012 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
* ST DDC I2C master mode driver, used in e.g. U300 series platforms.
* Author: Linus Walleij <linus.walleij@stericsson.com>
@@ -139,8 +139,6 @@ module_param(scl_frequency, uint, 0644);
* struct stu300_dev - the stu300 driver state holder
* @pdev: parent platform device
* @adapter: corresponding I2C adapter
- * @phybase: location of I/O area in memory
- * @physize: size of I/O area in memory
* @clk: hardware block clock
* @irq: assigned interrupt line
* @cmd_issue_lock: this locks the following cmd_ variables
@@ -155,8 +153,6 @@ module_param(scl_frequency, uint, 0644);
struct stu300_dev {
struct platform_device *pdev;
struct i2c_adapter adapter;
- resource_size_t phybase;
- resource_size_t physize;
void __iomem *virtbase;
struct clk *clk;
int irq;
@@ -873,64 +869,44 @@ stu300_probe(struct platform_device *pdev)
int ret = 0;
char clk_name[] = "I2C0";
- dev = kzalloc(sizeof(struct stu300_dev), GFP_KERNEL);
+ dev = devm_kzalloc(&pdev->dev, sizeof(struct stu300_dev), GFP_KERNEL);
if (!dev) {
dev_err(&pdev->dev, "could not allocate device struct\n");
- ret = -ENOMEM;
- goto err_no_devmem;
+ return -ENOMEM;
}
bus_nr = pdev->id;
clk_name[3] += (char)bus_nr;
- dev->clk = clk_get(&pdev->dev, clk_name);
+ dev->clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(dev->clk)) {
- ret = PTR_ERR(dev->clk);
dev_err(&pdev->dev, "could not retrieve i2c bus clock\n");
- goto err_no_clk;
+ return PTR_ERR(dev->clk);
}
dev->pdev = pdev;
- platform_set_drvdata(pdev, dev);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENOENT;
- goto err_no_resource;
- }
-
- dev->phybase = res->start;
- dev->physize = resource_size(res);
-
- if (request_mem_region(dev->phybase, dev->physize,
- NAME " I/O Area") == NULL) {
- ret = -EBUSY;
- goto err_no_ioregion;
- }
+ if (!res)
+ return -ENOENT;
- dev->virtbase = ioremap(dev->phybase, dev->physize);
+ dev->virtbase = devm_request_and_ioremap(&pdev->dev, res);
dev_dbg(&pdev->dev, "initialize bus device I2C%d on virtual "
"base %p\n", bus_nr, dev->virtbase);
- if (!dev->virtbase) {
- ret = -ENOMEM;
- goto err_no_ioremap;
- }
+ if (!dev->virtbase)
+ return -ENOMEM;
dev->irq = platform_get_irq(pdev, 0);
- if (request_irq(dev->irq, stu300_irh, 0,
- NAME, dev)) {
- ret = -EIO;
- goto err_no_irq;
- }
+ ret = devm_request_irq(&pdev->dev, dev->irq, stu300_irh, 0, NAME, dev);
+ if (ret < 0)
+ return ret;
dev->speed = scl_frequency;
- clk_enable(dev->clk);
+ clk_prepare_enable(dev->clk);
ret = stu300_init_hw(dev);
clk_disable(dev->clk);
-
if (ret != 0) {
dev_err(&dev->pdev->dev, "error initializing hardware.\n");
- goto err_init_hw;
+ return -EIO;
}
/* IRQ event handling initialization */
@@ -952,57 +928,43 @@ stu300_probe(struct platform_device *pdev)
/* i2c device drivers may be active on return from add_adapter() */
ret = i2c_add_numbered_adapter(adap);
if (ret) {
- dev_err(&dev->pdev->dev, "failure adding ST Micro DDC "
+ dev_err(&pdev->dev, "failure adding ST Micro DDC "
"I2C adapter\n");
- goto err_add_adapter;
+ return ret;
}
- return 0;
- err_add_adapter:
- err_init_hw:
- free_irq(dev->irq, dev);
- err_no_irq:
- iounmap(dev->virtbase);
- err_no_ioremap:
- release_mem_region(dev->phybase, dev->physize);
- err_no_ioregion:
- platform_set_drvdata(pdev, NULL);
- err_no_resource:
- clk_put(dev->clk);
- err_no_clk:
- kfree(dev);
- err_no_devmem:
- dev_err(&pdev->dev, "failed to add " NAME " adapter: %d\n",
- pdev->id);
- return ret;
+ platform_set_drvdata(pdev, dev);
+ return 0;
}
#ifdef CONFIG_PM
-static int stu300_suspend(struct platform_device *pdev, pm_message_t state)
+static int stu300_suspend(struct device *device)
{
- struct stu300_dev *dev = platform_get_drvdata(pdev);
+ struct stu300_dev *dev = dev_get_drvdata(device);
/* Turn off everything */
stu300_wr8(0x00, dev->virtbase + I2C_CR);
return 0;
}
-static int stu300_resume(struct platform_device *pdev)
+static int stu300_resume(struct device *device)
{
int ret = 0;
- struct stu300_dev *dev = platform_get_drvdata(pdev);
+ struct stu300_dev *dev = dev_get_drvdata(device);
clk_enable(dev->clk);
ret = stu300_init_hw(dev);
clk_disable(dev->clk);
if (ret != 0)
- dev_err(&pdev->dev, "error re-initializing hardware.\n");
+ dev_err(device, "error re-initializing hardware.\n");
return ret;
}
+
+static SIMPLE_DEV_PM_OPS(stu300_pm, stu300_suspend, stu300_resume);
+#define STU300_I2C_PM (&stu300_pm)
#else
-#define stu300_suspend NULL
-#define stu300_resume NULL
+#define STU300_I2C_PM NULL
#endif
static int __exit
@@ -1013,12 +975,7 @@ stu300_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
/* Turn off everything */
stu300_wr8(0x00, dev->virtbase + I2C_CR);
- free_irq(dev->irq, dev);
- iounmap(dev->virtbase);
- release_mem_region(dev->phybase, dev->physize);
- clk_put(dev->clk);
platform_set_drvdata(pdev, NULL);
- kfree(dev);
return 0;
}
@@ -1026,10 +983,9 @@ static struct platform_driver stu300_i2c_driver = {
.driver = {
.name = NAME,
.owner = THIS_MODULE,
+ .pm = STU300_I2C_PM,
},
.remove = __exit_p(stu300_remove),
- .suspend = stu300_suspend,
- .resume = stu300_resume,
};
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 3da7ee3eb505..9a08c57bc936 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -97,8 +97,21 @@
#define I2C_HEADER_10BIT_ADDR (1<<18)
#define I2C_HEADER_IE_ENABLE (1<<17)
#define I2C_HEADER_REPEAT_START (1<<16)
+#define I2C_HEADER_CONTINUE_XFER (1<<15)
#define I2C_HEADER_MASTER_ADDR_SHIFT 12
#define I2C_HEADER_SLAVE_ADDR_SHIFT 1
+/*
+ * msg_end_type: The bus control which need to be send at end of transfer.
+ * @MSG_END_STOP: Send stop pulse at end of transfer.
+ * @MSG_END_REPEAT_START: Send repeat start at end of transfer.
+ * @MSG_END_CONTINUE: The following on message is coming and so do not send
+ * stop or repeat start.
+ */
+enum msg_end_type {
+ MSG_END_STOP,
+ MSG_END_REPEAT_START,
+ MSG_END_CONTINUE,
+};
/**
* struct tegra_i2c_dev - per device i2c context
@@ -106,7 +119,6 @@
* @adapter: core i2c layer adapter information
* @clk: clock reference for i2c controller
* @i2c_clk: clock reference for i2c bus
- * @iomem: memory resource for registers
* @base: ioremapped registers cookie
* @cont_id: i2c controller id, used for for packet header
* @irq: irq number of transfer complete interrupt
@@ -124,7 +136,6 @@ struct tegra_i2c_dev {
struct i2c_adapter adapter;
struct clk *clk;
struct clk *i2c_clk;
- struct resource *iomem;
void __iomem *base;
int cont_id;
int irq;
@@ -165,6 +176,10 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
unsigned long reg)
{
writel(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+
+ /* Read back register to make sure that register writes completed */
+ if (reg != I2C_TX_FIFO)
+ readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
}
static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
@@ -449,7 +464,7 @@ err:
}
static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
- struct i2c_msg *msg, int stop)
+ struct i2c_msg *msg, enum msg_end_type end_state)
{
u32 packet_header;
u32 int_mask;
@@ -476,7 +491,9 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
packet_header = I2C_HEADER_IE_ENABLE;
- if (!stop)
+ if (end_state == MSG_END_CONTINUE)
+ packet_header |= I2C_HEADER_CONTINUE_XFER;
+ else if (end_state == MSG_END_REPEAT_START)
packet_header |= I2C_HEADER_REPEAT_START;
if (msg->flags & I2C_M_TEN) {
packet_header |= msg->addr;
@@ -548,8 +565,14 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
clk_prepare_enable(i2c_dev->clk);
for (i = 0; i < num; i++) {
- int stop = (i == (num - 1)) ? 1 : 0;
- ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], stop);
+ enum msg_end_type end_type = MSG_END_STOP;
+ if (i < (num - 1)) {
+ if (msgs[i + 1].flags & I2C_M_NOSTART)
+ end_type = MSG_END_CONTINUE;
+ else
+ end_type = MSG_END_REPEAT_START;
+ }
+ ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], end_type);
if (ret)
break;
}
@@ -559,7 +582,8 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
static u32 tegra_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR |
+ I2C_FUNC_PROTOCOL_MANGLING | I2C_FUNC_NOSTART;
}
static const struct i2c_algorithm tegra_i2c_algo = {
@@ -572,7 +596,6 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev)
struct tegra_i2c_dev *i2c_dev;
struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
struct resource *res;
- struct resource *iomem;
struct clk *clk;
struct clk *i2c_clk;
const unsigned int *prop;
@@ -585,50 +608,41 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "no mem resource\n");
return -EINVAL;
}
- iomem = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!iomem) {
- dev_err(&pdev->dev, "I2C region already claimed\n");
- return -EBUSY;
- }
- base = ioremap(iomem->start, resource_size(iomem));
+ base = devm_request_and_ioremap(&pdev->dev, res);
if (!base) {
- dev_err(&pdev->dev, "Cannot ioremap I2C region\n");
- return -ENOMEM;
+ dev_err(&pdev->dev, "Cannot request/ioremap I2C registers\n");
+ return -EADDRNOTAVAIL;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(&pdev->dev, "no irq resource\n");
- ret = -EINVAL;
- goto err_iounmap;
+ return -EINVAL;
}
irq = res->start;
- clk = clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "missing controller clock");
- ret = PTR_ERR(clk);
- goto err_release_region;
+ return PTR_ERR(clk);
}
- i2c_clk = clk_get(&pdev->dev, "i2c");
+ i2c_clk = devm_clk_get(&pdev->dev, "i2c");
if (IS_ERR(i2c_clk)) {
dev_err(&pdev->dev, "missing bus clock");
- ret = PTR_ERR(i2c_clk);
- goto err_clk_put;
+ return PTR_ERR(i2c_clk);
}
- i2c_dev = kzalloc(sizeof(struct tegra_i2c_dev), GFP_KERNEL);
+ i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
if (!i2c_dev) {
- ret = -ENOMEM;
- goto err_i2c_clk_put;
+ dev_err(&pdev->dev, "Could not allocate struct tegra_i2c_dev");
+ return -ENOMEM;
}
i2c_dev->base = base;
i2c_dev->clk = clk;
i2c_dev->i2c_clk = i2c_clk;
- i2c_dev->iomem = iomem;
i2c_dev->adapter.algo = &tegra_i2c_algo;
i2c_dev->irq = irq;
i2c_dev->cont_id = pdev->id;
@@ -657,13 +671,14 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev)
ret = tegra_i2c_init(i2c_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize i2c controller");
- goto err_free;
+ return ret;
}
- ret = request_irq(i2c_dev->irq, tegra_i2c_isr, 0, pdev->name, i2c_dev);
+ ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
+ tegra_i2c_isr, 0, pdev->name, i2c_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
- goto err_free;
+ return ret;
}
clk_prepare_enable(i2c_dev->i2c_clk);
@@ -681,45 +696,26 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev)
ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
if (ret) {
dev_err(&pdev->dev, "Failed to add I2C adapter\n");
- goto err_free_irq;
+ clk_disable_unprepare(i2c_dev->i2c_clk);
+ return ret;
}
of_i2c_register_devices(&i2c_dev->adapter);
return 0;
-err_free_irq:
- free_irq(i2c_dev->irq, i2c_dev);
-err_free:
- kfree(i2c_dev);
-err_i2c_clk_put:
- clk_put(i2c_clk);
-err_clk_put:
- clk_put(clk);
-err_release_region:
- release_mem_region(iomem->start, resource_size(iomem));
-err_iounmap:
- iounmap(base);
- return ret;
}
static int __devexit tegra_i2c_remove(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adapter);
- free_irq(i2c_dev->irq, i2c_dev);
- clk_put(i2c_dev->i2c_clk);
- clk_put(i2c_dev->clk);
- release_mem_region(i2c_dev->iomem->start,
- resource_size(i2c_dev->iomem));
- iounmap(i2c_dev->base);
- kfree(i2c_dev);
return 0;
}
-#ifdef CONFIG_PM
-static int tegra_i2c_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int tegra_i2c_suspend(struct device *dev)
{
- struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
i2c_lock_adapter(&i2c_dev->adapter);
i2c_dev->is_suspended = true;
@@ -728,9 +724,9 @@ static int tegra_i2c_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int tegra_i2c_resume(struct platform_device *pdev)
+static int tegra_i2c_resume(struct device *dev)
{
- struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev);
int ret;
i2c_lock_adapter(&i2c_dev->adapter);
@@ -748,6 +744,11 @@ static int tegra_i2c_resume(struct platform_device *pdev)
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(tegra_i2c_pm, tegra_i2c_suspend, tegra_i2c_resume);
+#define TEGRA_I2C_PM (&tegra_i2c_pm)
+#else
+#define TEGRA_I2C_PM NULL
#endif
#if defined(CONFIG_OF)
@@ -758,21 +759,16 @@ static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
{},
};
MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
-#else
-#define tegra_i2c_of_match NULL
#endif
static struct platform_driver tegra_i2c_driver = {
.probe = tegra_i2c_probe,
.remove = __devexit_p(tegra_i2c_remove),
-#ifdef CONFIG_PM
- .suspend = tegra_i2c_suspend,
- .resume = tegra_i2c_resume,
-#endif
.driver = {
.name = "tegra-i2c",
.owner = THIS_MODULE,
- .of_match_table = tegra_i2c_of_match,
+ .of_match_table = of_match_ptr(tegra_i2c_of_match),
+ .pm = TEGRA_I2C_PM,
},
};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 26488aa893d5..2efa56c5ff2c 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1312,6 +1312,37 @@ module_exit(i2c_exit);
*/
/**
+ * __i2c_transfer - unlocked flavor of i2c_transfer
+ * @adap: Handle to I2C bus
+ * @msgs: One or more messages to execute before STOP is issued to
+ * terminate the operation; each message begins with a START.
+ * @num: Number of messages to be executed.
+ *
+ * Returns negative errno, else the number of messages executed.
+ *
+ * Adapter lock must be held when calling this function. No debug logging
+ * takes place. adap->algo->master_xfer existence isn't checked.
+ */
+int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ unsigned long orig_jiffies;
+ int ret, try;
+
+ /* Retry automatically on arbitration loss */
+ orig_jiffies = jiffies;
+ for (ret = 0, try = 0; try <= adap->retries; try++) {
+ ret = adap->algo->master_xfer(adap, msgs, num);
+ if (ret != -EAGAIN)
+ break;
+ if (time_after(jiffies, orig_jiffies + adap->timeout))
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(__i2c_transfer);
+
+/**
* i2c_transfer - execute a single or combined I2C message
* @adap: Handle to I2C bus
* @msgs: One or more messages to execute before STOP is issued to
@@ -1325,8 +1356,7 @@ module_exit(i2c_exit);
*/
int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- unsigned long orig_jiffies;
- int ret, try;
+ int ret;
/* REVISIT the fault reporting model here is weak:
*
@@ -1364,15 +1394,7 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
i2c_lock_adapter(adap);
}
- /* Retry automatically on arbitration loss */
- orig_jiffies = jiffies;
- for (ret = 0, try = 0; try <= adap->retries; try++) {
- ret = adap->algo->master_xfer(adap, msgs, num);
- if (ret != -EAGAIN)
- break;
- if (time_after(jiffies, orig_jiffies + adap->timeout))
- break;
- }
+ ret = __i2c_transfer(adap, msgs, num);
i2c_unlock_adapter(adap);
return ret;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 92406097efeb..8d1e32d7cd97 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -4,7 +4,7 @@
int generic_ide_suspend(struct device *dev, pm_message_t mesg)
{
- ide_drive_t *drive = dev_get_drvdata(dev);
+ ide_drive_t *drive = to_ide_device(dev);
ide_drive_t *pair = ide_get_pair_dev(drive);
ide_hwif_t *hwif = drive->hwif;
struct request *rq;
@@ -40,7 +40,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
int generic_ide_resume(struct device *dev)
{
- ide_drive_t *drive = dev_get_drvdata(dev);
+ ide_drive_t *drive = to_ide_device(dev);
ide_drive_t *pair = ide_get_pair_dev(drive);
ide_hwif_t *hwif = drive->hwif;
struct request *rq;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index fe95d5464a02..e8726177d103 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -170,6 +170,38 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
.enter = &intel_idle },
};
+static struct cpuidle_state ivb_cstates[MWAIT_MAX_NUM_CSTATES] = {
+ { /* MWAIT C0 */ },
+ { /* MWAIT C1 */
+ .name = "C1-IVB",
+ .desc = "MWAIT 0x00",
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .enter = &intel_idle },
+ { /* MWAIT C2 */
+ .name = "C3-IVB",
+ .desc = "MWAIT 0x10",
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 59,
+ .target_residency = 156,
+ .enter = &intel_idle },
+ { /* MWAIT C3 */
+ .name = "C6-IVB",
+ .desc = "MWAIT 0x20",
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 80,
+ .target_residency = 300,
+ .enter = &intel_idle },
+ { /* MWAIT C4 */
+ .name = "C7-IVB",
+ .desc = "MWAIT 0x30",
+ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 87,
+ .target_residency = 300,
+ .enter = &intel_idle },
+};
+
static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C0 */ },
{ /* MWAIT C1 */
@@ -361,6 +393,10 @@ static const struct idle_cpu idle_cpu_snb = {
.state_table = snb_cstates,
};
+static const struct idle_cpu idle_cpu_ivb = {
+ .state_table = ivb_cstates,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
@@ -376,6 +412,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x2f, idle_cpu_nehalem),
ICPU(0x2a, idle_cpu_snb),
ICPU(0x2d, idle_cpu_snb),
+ ICPU(0x3a, idle_cpu_ivb),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -569,8 +606,9 @@ static int __init intel_idle_init(void)
intel_idle_cpuidle_driver_init();
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
+ struct cpuidle_driver *drv = cpuidle_get_driver();
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
- cpuidle_get_driver()->name);
+ drv ? drv->name : "none");
return retval;
}
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 59fbb3ae40e7..e35bb8f6fe75 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -129,7 +129,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
{
struct adf4350_platform_data *pdata = st->pdata;
u64 tmp;
- u32 div_gcd, prescaler;
+ u32 div_gcd, prescaler, chspc;
u16 mdiv, r_cnt = 0;
u8 band_sel_div;
@@ -158,14 +158,20 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
if (pdata->ref_div_factor)
r_cnt = pdata->ref_div_factor - 1;
- do {
- r_cnt = adf4350_tune_r_cnt(st, r_cnt);
+ chspc = st->chspc;
- st->r1_mod = st->fpfd / st->chspc;
- while (st->r1_mod > ADF4350_MAX_MODULUS) {
- r_cnt = adf4350_tune_r_cnt(st, r_cnt);
- st->r1_mod = st->fpfd / st->chspc;
- }
+ do {
+ do {
+ do {
+ r_cnt = adf4350_tune_r_cnt(st, r_cnt);
+ st->r1_mod = st->fpfd / chspc;
+ if (r_cnt > ADF4350_MAX_R_CNT) {
+ /* try higher spacing values */
+ chspc++;
+ r_cnt = 0;
+ }
+ } while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt);
+ } while (r_cnt == 0);
tmp = freq * (u64)st->r1_mod + (st->fpfd > 1);
do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */
@@ -194,7 +200,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
st->regs[ADF4350_REG0] = ADF4350_REG0_INT(st->r0_int) |
ADF4350_REG0_FRACT(st->r0_fract);
- st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(0) |
+ st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(1) |
ADF4350_REG1_MOD(st->r1_mod) |
prescaler;
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 1cbb449b319a..9a99f43094f0 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -271,9 +271,10 @@ static int adjd_s311_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct adjd_s311_data *data = iio_priv(indio_dev);
- data->buffer = krealloc(data->buffer, indio_dev->scan_bytes,
- GFP_KERNEL);
- if (!data->buffer)
+
+ kfree(data->buffer);
+ data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ if (data->buffer == NULL)
return -ENOMEM;
return 0;
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index c3e7bac13123..e45712a921ce 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -404,7 +404,7 @@ out:
return ret;
}
-static int show_thresh_either_en(struct device *dev,
+static ssize_t show_thresh_either_en(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -424,7 +424,7 @@ static int show_thresh_either_en(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%u\n", enable);
}
-static int store_thresh_either_en(struct device *dev,
+static ssize_t store_thresh_either_en(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 5a335b5447c6..7172559ce0c1 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3064,10 +3064,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
id_priv->id.port_num, &rec,
comp_mask, GFP_KERNEL,
cma_ib_mc_handler, mc);
- if (IS_ERR(mc->multicast.ib))
- return PTR_ERR(mc->multicast.ib);
-
- return 0;
+ return PTR_RET(mc->multicast.ib);
}
static void iboe_mcast_work_handler(struct work_struct *work)
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 893cb879462c..055ed59838dc 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -267,6 +267,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
if (!uevent)
return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
+ mutex_lock(&ctx->file->mut);
uevent->cm_id = cm_id;
ucma_set_event_context(ctx, event, uevent);
uevent->resp.event = event->event;
@@ -277,7 +278,6 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
ucma_copy_conn_event(&uevent->resp.param.conn,
&event->param.conn);
- mutex_lock(&ctx->file->mut);
if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
if (!ctx->backlog) {
ret = -ENOMEM;
@@ -1002,23 +1002,18 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- optval = kmalloc(cmd.optlen, GFP_KERNEL);
- if (!optval) {
- ret = -ENOMEM;
- goto out1;
- }
-
- if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
- cmd.optlen)) {
- ret = -EFAULT;
- goto out2;
+ optval = memdup_user((void __user *) (unsigned long) cmd.optval,
+ cmd.optlen);
+ if (IS_ERR(optval)) {
+ ret = PTR_ERR(optval);
+ goto out;
}
ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
cmd.optlen);
-out2:
kfree(optval);
-out1:
+
+out:
ucma_put_ctx(ctx);
return ret;
}
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index 8c81992fa6db..e4a73158fc7f 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -439,7 +439,7 @@ static int c2_rnic_close(struct c2_dev *c2dev)
/*
* Called by c2_probe to initialize the RNIC. This principally
- * involves initalizing the various limits and resouce pools that
+ * involves initializing the various limits and resource pools that
* comprise the RNIC instance.
*/
int __devinit c2_rnic_init(struct c2_dev *c2dev)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 77b6b182778a..aaf88ef9409c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1680,7 +1680,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
* T3A does 3 things when a TERM is received:
* 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
* 2) generate an async event on the QP with the TERMINATE opcode
- * 3) post a TERMINATE opcde cqe into the associated CQ.
+ * 3) post a TERMINATE opcode cqe into the associated CQ.
*
* For (1), we save the message in the qp for later consumer consumption.
* For (2), we move the QP into TERMINATE, post a QP event and disconnect.
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c27141fef1ab..9c2ae7efd00f 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -125,6 +125,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
{
struct ib_ah *new_ah;
struct ib_ah_attr ah_attr;
+ unsigned long flags;
if (!dev->send_agent[port_num - 1][0])
return;
@@ -139,11 +140,11 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
if (IS_ERR(new_ah))
return;
- spin_lock(&dev->sm_lock);
+ spin_lock_irqsave(&dev->sm_lock, flags);
if (dev->sm_ah[port_num - 1])
ib_destroy_ah(dev->sm_ah[port_num - 1]);
dev->sm_ah[port_num - 1] = new_ah;
- spin_unlock(&dev->sm_lock);
+ spin_unlock_irqrestore(&dev->sm_lock, flags);
}
/*
@@ -197,13 +198,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
static void node_desc_override(struct ib_device *dev,
struct ib_mad *mad)
{
+ unsigned long flags;
+
if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
- spin_lock(&to_mdev(dev)->sm_lock);
+ spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
- spin_unlock(&to_mdev(dev)->sm_lock);
+ spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
}
}
@@ -213,6 +216,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
struct ib_mad_send_buf *send_buf;
struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
int ret;
+ unsigned long flags;
if (agent) {
send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
@@ -225,13 +229,13 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
* wrong following the IB spec strictly, but we know
* it's OK for our devices).
*/
- spin_lock(&dev->sm_lock);
+ spin_lock_irqsave(&dev->sm_lock, flags);
memcpy(send_buf->mad, mad, sizeof *mad);
if ((send_buf->ah = dev->sm_ah[port_num - 1]))
ret = ib_post_send_mad(send_buf, NULL);
else
ret = -EINVAL;
- spin_unlock(&dev->sm_lock);
+ spin_unlock_irqrestore(&dev->sm_lock, flags);
if (ret)
ib_free_send_mad(send_buf);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index fe2088cfa6ee..cc05579ebce7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -423,6 +423,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
struct ib_device_modify *props)
{
struct mlx4_cmd_mailbox *mailbox;
+ unsigned long flags;
if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
return -EOPNOTSUPP;
@@ -430,9 +431,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
return 0;
- spin_lock(&to_mdev(ibdev)->sm_lock);
+ spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
memcpy(ibdev->node_desc, props->node_desc, 64);
- spin_unlock(&to_mdev(ibdev)->sm_lock);
+ spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
/*
* If possible, pass node desc to FW, so it can generate
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a6d8ea060ea8..f585eddef4b7 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1407,6 +1407,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
struct mlx4_wqe_mlx_seg *mlx = wqe;
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
+ struct net_device *ndev;
union ib_gid sgid;
u16 pkey;
int send_size;
@@ -1483,7 +1484,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
/* FIXME: cache smac value? */
- smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr;
+ ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1];
+ if (!ndev)
+ return -ENODEV;
+ smac = ndev->dev_addr;
memcpy(sqp->ud_header.eth.smac_h, smac, 6);
if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 5a044526e4f4..c4e0131f1b57 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -161,7 +161,7 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
ocrdma_get_guid(dev, &sgid->raw[8]);
}
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
{
struct net_device *netdev, *tmp;
@@ -202,14 +202,13 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
return 0;
}
-#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q)
+#if IS_ENABLED(CONFIG_IPV6)
static int ocrdma_inet6addr_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
- struct net_device *event_netdev = ifa->idev->dev;
- struct net_device *netdev = NULL;
+ struct net_device *netdev = ifa->idev->dev;
struct ib_event gid_event;
struct ocrdma_dev *dev;
bool found = false;
@@ -217,11 +216,12 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
bool is_vlan = false;
u16 vid = 0;
- netdev = vlan_dev_real_dev(event_netdev);
- if (netdev != event_netdev) {
- is_vlan = true;
- vid = vlan_dev_vlan_id(event_netdev);
+ is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
+ if (is_vlan) {
+ vid = vlan_dev_vlan_id(netdev);
+ netdev = vlan_dev_real_dev(netdev);
}
+
rcu_read_lock();
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
if (dev->nic_info.netdev == netdev) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index b2f9784beb4a..cb5b7f7d4d38 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -893,7 +893,9 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
/* verify consumer QPs are not trying to use GSI QP's CQ */
if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
- (dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq))) {
+ (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) ||
+ (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) ||
+ (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
__func__, dev->id);
return -EINVAL;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 6e19ec844d99..7b1b86690024 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -656,6 +656,11 @@ struct qib_pportdata {
/* 16 congestion entries with each entry corresponding to a SL */
struct ib_cc_congestion_entry_shadow *congestion_entries;
+ /* Maximum number of congestion control entries that the agent expects
+ * the manager to send.
+ */
+ u16 cc_supported_table_entries;
+
/* Total number of congestion control table entries */
u16 total_cct_entry;
@@ -667,11 +672,6 @@ struct qib_pportdata {
/* CA's max number of 64 entry units in the congestion control table */
u8 cc_max_table_entries;
-
- /* Maximum number of congestion control entries that the agent expects
- * the manager to send.
- */
- u8 cc_supported_table_entries;
};
/* Observers. Not to be taken lightly, possibly not to ship. */
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 0d7280af99bc..3f6b21e9dc11 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6346,8 +6346,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
dd->piobcnt4k * dd->align4k;
dd->piovl15base = ioremap_nocache(vl15off,
NUM_VL15_BUFS * dd->align4k);
- if (!dd->piovl15base)
+ if (!dd->piovl15base) {
+ ret = -ENOMEM;
goto bail;
+ }
}
qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index a322d5171a2c..50a8a0d4fe67 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -372,7 +372,7 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
/* Read CTRL reg for each channel to check TRIMDONE */
if (baduns & (1 << chn)) {
qib_dev_err(dd,
- "Reseting TRIMDONE on chn %d (%s)\n",
+ "Resetting TRIMDONE on chn %d (%s)\n",
chn, where);
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0x10, 0x10);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 86df632ea612..ca43901ed861 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -92,6 +92,8 @@ enum {
IPOIB_STOP_REAPER = 7,
IPOIB_FLAG_ADMIN_CM = 9,
IPOIB_FLAG_UMCAST = 10,
+ IPOIB_STOP_NEIGH_GC = 11,
+ IPOIB_NEIGH_TBL_FLUSH = 12,
IPOIB_MAX_BACKOFF_SECONDS = 16,
@@ -260,6 +262,20 @@ struct ipoib_ethtool_st {
u16 max_coalesced_frames;
};
+struct ipoib_neigh_hash {
+ struct ipoib_neigh __rcu **buckets;
+ struct rcu_head rcu;
+ u32 mask;
+ u32 size;
+};
+
+struct ipoib_neigh_table {
+ struct ipoib_neigh_hash __rcu *htbl;
+ rwlock_t rwlock;
+ atomic_t entries;
+ struct completion flushed;
+};
+
/*
* Device private locking: network stack tx_lock protects members used
* in TX fast path, lock protects everything else. lock nests inside
@@ -279,6 +295,8 @@ struct ipoib_dev_priv {
struct rb_root path_tree;
struct list_head path_list;
+ struct ipoib_neigh_table ntbl;
+
struct ipoib_mcast *broadcast;
struct list_head multicast_list;
struct rb_root multicast_tree;
@@ -291,7 +309,7 @@ struct ipoib_dev_priv {
struct work_struct flush_heavy;
struct work_struct restart_task;
struct delayed_work ah_reap_task;
-
+ struct delayed_work neigh_reap_task;
struct ib_device *ca;
u8 port;
u16 pkey;
@@ -377,13 +395,16 @@ struct ipoib_neigh {
#ifdef CONFIG_INFINIBAND_IPOIB_CM
struct ipoib_cm_tx *cm;
#endif
- union ib_gid dgid;
+ u8 daddr[INFINIBAND_ALEN];
struct sk_buff_head queue;
- struct neighbour *neighbour;
struct net_device *dev;
struct list_head list;
+ struct ipoib_neigh __rcu *hnext;
+ struct rcu_head rcu;
+ atomic_t refcnt;
+ unsigned long alive;
};
#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
@@ -394,21 +415,17 @@ static inline int ipoib_ud_need_sg(unsigned int ib_mtu)
return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE;
}
-/*
- * We stash a pointer to our private neighbour information after our
- * hardware address in neigh->ha. The ALIGN() expression here makes
- * sure that this pointer is stored aligned so that an unaligned
- * load is not needed to dereference it.
- */
-static inline struct ipoib_neigh **to_ipoib_neigh(struct neighbour *neigh)
+void ipoib_neigh_dtor(struct ipoib_neigh *neigh);
+static inline void ipoib_neigh_put(struct ipoib_neigh *neigh)
{
- return (void*) neigh + ALIGN(offsetof(struct neighbour, ha) +
- INFINIBAND_ALEN, sizeof(void *));
+ if (atomic_dec_and_test(&neigh->refcnt))
+ ipoib_neigh_dtor(neigh);
}
-
-struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh,
+struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr);
+struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
struct net_device *dev);
-void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh);
+void ipoib_neigh_free(struct ipoib_neigh *neigh);
+void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid);
extern struct workqueue_struct *ipoib_workqueue;
@@ -425,7 +442,6 @@ static inline void ipoib_put_ah(struct ipoib_ah *ah)
{
kref_put(&ah->ref, ipoib_free_ah);
}
-
int ipoib_open(struct net_device *dev);
int ipoib_add_pkey_attr(struct net_device *dev);
int ipoib_add_umcast_attr(struct net_device *dev);
@@ -455,7 +471,7 @@ void ipoib_dev_cleanup(struct net_device *dev);
void ipoib_mcast_join_task(struct work_struct *work);
void ipoib_mcast_carrier_on_task(struct work_struct *work);
-void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb);
+void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
@@ -517,10 +533,10 @@ static inline int ipoib_cm_admin_enabled(struct net_device *dev)
test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
}
-static inline int ipoib_cm_enabled(struct net_device *dev, struct neighbour *n)
+static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- return IPOIB_CM_SUPPORTED(n->ha) &&
+ return IPOIB_CM_SUPPORTED(hwaddr) &&
test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
}
@@ -575,7 +591,7 @@ static inline int ipoib_cm_admin_enabled(struct net_device *dev)
{
return 0;
}
-static inline int ipoib_cm_enabled(struct net_device *dev, struct neighbour *n)
+static inline int ipoib_cm_enabled(struct net_device *dev, u8 *hwaddr)
{
return 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 6d66ab0dd92a..24683fda8e21 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -811,9 +811,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (neigh) {
neigh->cm = NULL;
list_del(&neigh->list);
- if (neigh->ah)
- ipoib_put_ah(neigh->ah);
- ipoib_neigh_free(dev, neigh);
+ ipoib_neigh_free(neigh);
tx->neigh = NULL;
}
@@ -1230,9 +1228,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
if (neigh) {
neigh->cm = NULL;
list_del(&neigh->list);
- if (neigh->ah)
- ipoib_put_ah(neigh->ah);
- ipoib_neigh_free(dev, neigh);
+ ipoib_neigh_free(neigh);
tx->neigh = NULL;
}
@@ -1275,12 +1271,15 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
+ unsigned long flags;
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
+ spin_lock_irqsave(&priv->lock, flags);
list_move(&tx->list, &priv->cm.reap_list);
queue_work(ipoib_workqueue, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
- tx->neigh->dgid.raw);
+ tx->neigh->daddr + 4);
tx->neigh = NULL;
+ spin_unlock_irqrestore(&priv->lock, flags);
}
}
@@ -1304,7 +1303,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
p = list_entry(priv->cm.start_list.next, typeof(*p), list);
list_del_init(&p->list);
neigh = p->neigh;
- qpn = IPOIB_QPN(neigh->neighbour->ha);
+ qpn = IPOIB_QPN(neigh->daddr);
memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1320,9 +1319,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
if (neigh) {
neigh->cm = NULL;
list_del(&neigh->list);
- if (neigh->ah)
- ipoib_put_ah(neigh->ah);
- ipoib_neigh_free(dev, neigh);
+ ipoib_neigh_free(neigh);
}
list_del(&p->list);
kfree(p);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bbee4b2d7a13..3e2085a3ee47 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -46,7 +46,8 @@
#include <linux/ip.h>
#include <linux/in.h>
-#include <net/dst.h>
+#include <linux/jhash.h>
+#include <net/arp.h>
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
@@ -84,6 +85,7 @@ struct ib_sa_client ipoib_sa_client;
static void ipoib_add_one(struct ib_device *device);
static void ipoib_remove_one(struct ib_device *device);
+static void ipoib_neigh_reclaim(struct rcu_head *rp);
static struct ib_client ipoib_client = {
.name = "ipoib",
@@ -264,30 +266,15 @@ static int __path_add(struct net_device *dev, struct ipoib_path *path)
static void path_free(struct net_device *dev, struct ipoib_path *path)
{
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ipoib_neigh *neigh, *tn;
struct sk_buff *skb;
- unsigned long flags;
while ((skb = __skb_dequeue(&path->queue)))
dev_kfree_skb_irq(skb);
- spin_lock_irqsave(&priv->lock, flags);
-
- list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
- /*
- * It's safe to call ipoib_put_ah() inside priv->lock
- * here, because we know that path->ah will always
- * hold one more reference, so ipoib_put_ah() will
- * never do more than decrement the ref count.
- */
- if (neigh->ah)
- ipoib_put_ah(neigh->ah);
-
- ipoib_neigh_free(dev, neigh);
- }
+ ipoib_dbg(netdev_priv(dev), "path_free\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ /* remove all neigh connected to this path */
+ ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
if (path->ah)
ipoib_put_ah(path->ah);
@@ -458,19 +445,15 @@ static void path_rec_completion(int status,
}
kref_get(&path->ah->ref);
neigh->ah = path->ah;
- memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
- sizeof(union ib_gid));
- if (ipoib_cm_enabled(dev, neigh->neighbour)) {
+ if (ipoib_cm_enabled(dev, neigh->daddr)) {
if (!ipoib_cm_get(neigh))
ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
path,
neigh));
if (!ipoib_cm_get(neigh)) {
list_del(&neigh->list);
- if (neigh->ah)
- ipoib_put_ah(neigh->ah);
- ipoib_neigh_free(dev, neigh);
+ ipoib_neigh_free(neigh);
continue;
}
}
@@ -555,15 +538,15 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
-/* called with rcu_read_lock */
-static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
+static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
+ struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
struct ipoib_neigh *neigh;
unsigned long flags;
- neigh = ipoib_neigh_alloc(n, skb->dev);
+ neigh = ipoib_neigh_alloc(daddr, dev);
if (!neigh) {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
@@ -572,9 +555,9 @@ static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, n->ha + 4);
+ path = __path_find(dev, daddr + 4);
if (!path) {
- path = path_rec_create(dev, n->ha + 4);
+ path = path_rec_create(dev, daddr + 4);
if (!path)
goto err_path;
@@ -586,17 +569,13 @@ static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_
if (path->ah) {
kref_get(&path->ah->ref);
neigh->ah = path->ah;
- memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
- sizeof(union ib_gid));
- if (ipoib_cm_enabled(dev, neigh->neighbour)) {
+ if (ipoib_cm_enabled(dev, neigh->daddr)) {
if (!ipoib_cm_get(neigh))
ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
if (!ipoib_cm_get(neigh)) {
list_del(&neigh->list);
- if (neigh->ah)
- ipoib_put_ah(neigh->ah);
- ipoib_neigh_free(dev, neigh);
+ ipoib_neigh_free(neigh);
goto err_drop;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
@@ -608,7 +587,8 @@ static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_
}
} else {
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(n->ha));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(daddr));
+ ipoib_neigh_put(neigh);
return;
}
} else {
@@ -621,35 +601,20 @@ static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_
}
spin_unlock_irqrestore(&priv->lock, flags);
+ ipoib_neigh_put(neigh);
return;
err_list:
list_del(&neigh->list);
err_path:
- ipoib_neigh_free(dev, neigh);
+ ipoib_neigh_free(neigh);
err_drop:
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-/* called with rcu_read_lock */
-static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
-{
- struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
-
- /* Look up path record for unicasts */
- if (n->ha[4] != 0xff) {
- neigh_add_path(skb, n, dev);
- return;
- }
-
- /* Add in the P_Key for multicasts */
- n->ha[8] = (priv->pkey >> 8) & 0xff;
- n->ha[9] = priv->pkey & 0xff;
- ipoib_mcast_send(dev, n->ha + 4, skb);
+ ipoib_neigh_put(neigh);
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
@@ -710,96 +675,80 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh;
- struct neighbour *n = NULL;
+ struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
+ struct ipoib_header *header;
unsigned long flags;
- rcu_read_lock();
- if (likely(skb_dst(skb))) {
- n = dst_neigh_lookup_skb(skb_dst(skb), skb);
- if (!n) {
+ header = (struct ipoib_header *) skb->data;
+
+ if (unlikely(cb->hwaddr[4] == 0xff)) {
+ /* multicast, arrange "if" according to probability */
+ if ((header->proto != htons(ETH_P_IP)) &&
+ (header->proto != htons(ETH_P_IPV6)) &&
+ (header->proto != htons(ETH_P_ARP)) &&
+ (header->proto != htons(ETH_P_RARP))) {
+ /* ethertype not supported by IPoIB */
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
- goto unlock;
+ return NETDEV_TX_OK;
}
+ /* Add in the P_Key for multicast*/
+ cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ cb->hwaddr[9] = priv->pkey & 0xff;
+
+ neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ if (likely(neigh))
+ goto send_using_neigh;
+ ipoib_mcast_send(dev, cb->hwaddr, skb);
+ return NETDEV_TX_OK;
}
- if (likely(n)) {
- if (unlikely(!*to_ipoib_neigh(n))) {
- ipoib_path_lookup(skb, n, dev);
- goto unlock;
- }
-
- neigh = *to_ipoib_neigh(n);
- if (unlikely((memcmp(&neigh->dgid.raw,
- n->ha + 4,
- sizeof(union ib_gid))) ||
- (neigh->dev != dev))) {
- spin_lock_irqsave(&priv->lock, flags);
- /*
- * It's safe to call ipoib_put_ah() inside
- * priv->lock here, because we know that
- * path->ah will always hold one more reference,
- * so ipoib_put_ah() will never do more than
- * decrement the ref count.
- */
- if (neigh->ah)
- ipoib_put_ah(neigh->ah);
- list_del(&neigh->list);
- ipoib_neigh_free(dev, neigh);
- spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_path_lookup(skb, n, dev);
- goto unlock;
+ /* unicast, arrange "switch" according to probability */
+ switch (header->proto) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ if (unlikely(!neigh)) {
+ neigh_add_path(skb, cb->hwaddr, dev);
+ return NETDEV_TX_OK;
}
+ break;
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_RARP):
+ /* for unicast ARP and RARP should always perform path find */
+ unicast_arp_send(skb, dev, cb);
+ return NETDEV_TX_OK;
+ default:
+ /* ethertype not supported by IPoIB */
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
- if (ipoib_cm_get(neigh)) {
- if (ipoib_cm_up(neigh)) {
- ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
- goto unlock;
- }
- } else if (neigh->ah) {
- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
- goto unlock;
+send_using_neigh:
+ /* note we now hold a ref to neigh */
+ if (ipoib_cm_get(neigh)) {
+ if (ipoib_cm_up(neigh)) {
+ ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
+ goto unref;
}
+ } else if (neigh->ah) {
+ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
+ goto unref;
+ }
- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
- spin_lock_irqsave(&priv->lock, flags);
- __skb_queue_tail(&neigh->queue, skb);
- spin_unlock_irqrestore(&priv->lock, flags);
- } else {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
- }
+ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ spin_lock_irqsave(&priv->lock, flags);
+ __skb_queue_tail(&neigh->queue, skb);
+ spin_unlock_irqrestore(&priv->lock, flags);
} else {
- struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
-
- if (cb->hwaddr[4] == 0xff) {
- /* Add in the P_Key for multicast*/
- cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
- cb->hwaddr[9] = priv->pkey & 0xff;
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
- ipoib_mcast_send(dev, cb->hwaddr + 4, skb);
- } else {
- /* unicast GID -- should be ARP or RARP reply */
-
- if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
- (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
- ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
- skb_dst(skb) ? "neigh" : "dst",
- be16_to_cpup((__be16 *) skb->data),
- IPOIB_QPN(cb->hwaddr),
- cb->hwaddr + 4);
- dev_kfree_skb_any(skb);
- ++dev->stats.tx_dropped;
- goto unlock;
- }
+unref:
+ ipoib_neigh_put(neigh);
- unicast_arp_send(skb, dev, cb);
- }
- }
-unlock:
- if (n)
- neigh_release(n);
- rcu_read_unlock();
return NETDEV_TX_OK;
}
@@ -821,6 +770,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
const void *daddr, const void *saddr, unsigned len)
{
struct ipoib_header *header;
+ struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
@@ -828,14 +778,11 @@ static int ipoib_hard_header(struct sk_buff *skb,
header->reserved = 0;
/*
- * If we don't have a dst_entry structure, stuff the
+ * we don't rely on dst_entry structure, always stuff the
* destination address into skb->cb so we can figure out where
* to send the packet later.
*/
- if (!skb_dst(skb)) {
- struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
- memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
- }
+ memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
return 0;
}
@@ -852,86 +799,438 @@ static void ipoib_set_mcast_list(struct net_device *dev)
queue_work(ipoib_workqueue, &priv->restart_task);
}
-static void ipoib_neigh_cleanup(struct neighbour *n)
+static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
{
- struct ipoib_neigh *neigh;
- struct ipoib_dev_priv *priv = netdev_priv(n->dev);
+ /*
+ * Use only the address parts that contributes to spreading
+ * The subnet prefix is not used as one can not connect to
+ * same remote port (GUID) using the same remote QPN via two
+ * different subnets.
+ */
+ /* qpn octets[1:4) & port GUID octets[12:20) */
+ u32 *daddr_32 = (u32 *) daddr;
+ u32 hv;
+
+ hv = jhash_3words(daddr_32[3], daddr_32[4], 0xFFFFFF & daddr_32[0], 0);
+ return hv & htbl->mask;
+}
+
+struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ struct ipoib_neigh *neigh = NULL;
+ u32 hash_val;
+
+ rcu_read_lock_bh();
+
+ htbl = rcu_dereference_bh(ntbl->htbl);
+
+ if (!htbl)
+ goto out_unlock;
+
+ hash_val = ipoib_addr_hash(htbl, daddr);
+ for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
+ neigh != NULL;
+ neigh = rcu_dereference_bh(neigh->hnext)) {
+ if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
+ /* found, take one ref on behalf of the caller */
+ if (!atomic_inc_not_zero(&neigh->refcnt)) {
+ /* deleted */
+ neigh = NULL;
+ goto out_unlock;
+ }
+ neigh->alive = jiffies;
+ goto out_unlock;
+ }
+ }
+
+out_unlock:
+ rcu_read_unlock_bh();
+ return neigh;
+}
+
+static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ unsigned long neigh_obsolete;
+ unsigned long dt;
unsigned long flags;
- struct ipoib_ah *ah = NULL;
+ int i;
- neigh = *to_ipoib_neigh(n);
- if (neigh)
- priv = netdev_priv(neigh->dev);
- else
+ if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
return;
- ipoib_dbg(priv,
- "neigh_cleanup for %06x %pI6\n",
- IPOIB_QPN(n->ha),
- n->ha + 4);
- spin_lock_irqsave(&priv->lock, flags);
+ write_lock_bh(&ntbl->rwlock);
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&ntbl->rwlock));
+
+ if (!htbl)
+ goto out_unlock;
+
+ /* neigh is obsolete if it was idle for two GC periods */
+ dt = 2 * arp_tbl.gc_interval;
+ neigh_obsolete = jiffies - dt;
+ /* handle possible race condition */
+ if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
+ goto out_unlock;
+
+ for (i = 0; i < htbl->size; i++) {
+ struct ipoib_neigh *neigh;
+ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
+
+ while ((neigh = rcu_dereference_protected(*np,
+ lockdep_is_held(&ntbl->rwlock))) != NULL) {
+ /* was the neigh idle for two GC periods */
+ if (time_after(neigh_obsolete, neigh->alive)) {
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&ntbl->rwlock)));
+ /* remove from path/mc list */
+ spin_lock_irqsave(&priv->lock, flags);
+ list_del(&neigh->list);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ } else {
+ np = &neigh->hnext;
+ }
- if (neigh->ah)
- ah = neigh->ah;
- list_del(&neigh->list);
- ipoib_neigh_free(n->dev, neigh);
+ }
+ }
- spin_unlock_irqrestore(&priv->lock, flags);
+out_unlock:
+ write_unlock_bh(&ntbl->rwlock);
+}
- if (ah)
- ipoib_put_ah(ah);
+static void ipoib_reap_neigh(struct work_struct *work)
+{
+ struct ipoib_dev_priv *priv =
+ container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
+
+ __ipoib_reap_neigh(priv);
+
+ if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
+ queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
+ arp_tbl.gc_interval);
}
-struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour,
+
+static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
struct net_device *dev)
{
struct ipoib_neigh *neigh;
- neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
+ neigh = kzalloc(sizeof *neigh, GFP_ATOMIC);
if (!neigh)
return NULL;
- neigh->neighbour = neighbour;
neigh->dev = dev;
- memset(&neigh->dgid.raw, 0, sizeof (union ib_gid));
- *to_ipoib_neigh(neighbour) = neigh;
+ memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
skb_queue_head_init(&neigh->queue);
+ INIT_LIST_HEAD(&neigh->list);
ipoib_cm_set(neigh, NULL);
+ /* one ref on behalf of the caller */
+ atomic_set(&neigh->refcnt, 1);
+
+ return neigh;
+}
+
+struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
+ struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ struct ipoib_neigh *neigh;
+ u32 hash_val;
+
+ write_lock_bh(&ntbl->rwlock);
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&ntbl->rwlock));
+ if (!htbl) {
+ neigh = NULL;
+ goto out_unlock;
+ }
+
+ /* need to add a new neigh, but maybe some other thread succeeded?
+ * recalc hash, maybe hash resize took place so we do a search
+ */
+ hash_val = ipoib_addr_hash(htbl, daddr);
+ for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
+ lockdep_is_held(&ntbl->rwlock));
+ neigh != NULL;
+ neigh = rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&ntbl->rwlock))) {
+ if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
+ /* found, take one ref on behalf of the caller */
+ if (!atomic_inc_not_zero(&neigh->refcnt)) {
+ /* deleted */
+ neigh = NULL;
+ break;
+ }
+ neigh->alive = jiffies;
+ goto out_unlock;
+ }
+ }
+
+ neigh = ipoib_neigh_ctor(daddr, dev);
+ if (!neigh)
+ goto out_unlock;
+
+ /* one ref on behalf of the hash table */
+ atomic_inc(&neigh->refcnt);
+ neigh->alive = jiffies;
+ /* put in hash */
+ rcu_assign_pointer(neigh->hnext,
+ rcu_dereference_protected(htbl->buckets[hash_val],
+ lockdep_is_held(&ntbl->rwlock)));
+ rcu_assign_pointer(htbl->buckets[hash_val], neigh);
+ atomic_inc(&ntbl->entries);
+
+out_unlock:
+ write_unlock_bh(&ntbl->rwlock);
return neigh;
}
-void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh)
+void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
{
+ /* neigh reference count was dropprd to zero */
+ struct net_device *dev = neigh->dev;
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
- *to_ipoib_neigh(neigh->neighbour) = NULL;
+ if (neigh->ah)
+ ipoib_put_ah(neigh->ah);
while ((skb = __skb_dequeue(&neigh->queue))) {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
if (ipoib_cm_get(neigh))
ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
+ ipoib_dbg(netdev_priv(dev),
+ "neigh free for %06x %pI6\n",
+ IPOIB_QPN(neigh->daddr),
+ neigh->daddr + 4);
kfree(neigh);
+ if (atomic_dec_and_test(&priv->ntbl.entries)) {
+ if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
+ complete(&priv->ntbl.flushed);
+ }
+}
+
+static void ipoib_neigh_reclaim(struct rcu_head *rp)
+{
+ /* Called as a result of removal from hash table */
+ struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
+ /* note TX context may hold another ref */
+ ipoib_neigh_put(neigh);
}
-static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
+void ipoib_neigh_free(struct ipoib_neigh *neigh)
{
- parms->neigh_cleanup = ipoib_neigh_cleanup;
+ struct net_device *dev = neigh->dev;
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ struct ipoib_neigh __rcu **np;
+ struct ipoib_neigh *n;
+ u32 hash_val;
+
+ write_lock_bh(&ntbl->rwlock);
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&ntbl->rwlock));
+ if (!htbl)
+ goto out_unlock;
+
+ hash_val = ipoib_addr_hash(htbl, neigh->daddr);
+ np = &htbl->buckets[hash_val];
+ for (n = rcu_dereference_protected(*np,
+ lockdep_is_held(&ntbl->rwlock));
+ n != NULL;
+ n = rcu_dereference_protected(*np,
+ lockdep_is_held(&ntbl->rwlock))) {
+ if (n == neigh) {
+ /* found */
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&ntbl->rwlock)));
+ call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ goto out_unlock;
+ } else {
+ np = &n->hnext;
+ }
+ }
+
+out_unlock:
+ write_unlock_bh(&ntbl->rwlock);
+
+}
+
+static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ struct ipoib_neigh **buckets;
+ u32 size;
+
+ clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
+ ntbl->htbl = NULL;
+ rwlock_init(&ntbl->rwlock);
+ htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
+ if (!htbl)
+ return -ENOMEM;
+ set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
+ size = roundup_pow_of_two(arp_tbl.gc_thresh3);
+ buckets = kzalloc(size * sizeof(*buckets), GFP_KERNEL);
+ if (!buckets) {
+ kfree(htbl);
+ return -ENOMEM;
+ }
+ htbl->size = size;
+ htbl->mask = (size - 1);
+ htbl->buckets = buckets;
+ ntbl->htbl = htbl;
+ atomic_set(&ntbl->entries, 0);
+
+ /* start garbage collection */
+ clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
+ queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
+ arp_tbl.gc_interval);
return 0;
}
+static void neigh_hash_free_rcu(struct rcu_head *head)
+{
+ struct ipoib_neigh_hash *htbl = container_of(head,
+ struct ipoib_neigh_hash,
+ rcu);
+ struct ipoib_neigh __rcu **buckets = htbl->buckets;
+
+ kfree(buckets);
+ kfree(htbl);
+}
+
+void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ unsigned long flags;
+ int i;
+
+ /* remove all neigh connected to a given path or mcast */
+ write_lock_bh(&ntbl->rwlock);
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&ntbl->rwlock));
+
+ if (!htbl)
+ goto out_unlock;
+
+ for (i = 0; i < htbl->size; i++) {
+ struct ipoib_neigh *neigh;
+ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
+
+ while ((neigh = rcu_dereference_protected(*np,
+ lockdep_is_held(&ntbl->rwlock))) != NULL) {
+ /* delete neighs belong to this parent */
+ if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&ntbl->rwlock)));
+ /* remove from parent list */
+ spin_lock_irqsave(&priv->lock, flags);
+ list_del(&neigh->list);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ } else {
+ np = &neigh->hnext;
+ }
+
+ }
+ }
+out_unlock:
+ write_unlock_bh(&ntbl->rwlock);
+}
+
+static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
+{
+ struct ipoib_neigh_table *ntbl = &priv->ntbl;
+ struct ipoib_neigh_hash *htbl;
+ unsigned long flags;
+ int i;
+
+ write_lock_bh(&ntbl->rwlock);
+
+ htbl = rcu_dereference_protected(ntbl->htbl,
+ lockdep_is_held(&ntbl->rwlock));
+ if (!htbl)
+ goto out_unlock;
+
+ for (i = 0; i < htbl->size; i++) {
+ struct ipoib_neigh *neigh;
+ struct ipoib_neigh __rcu **np = &htbl->buckets[i];
+
+ while ((neigh = rcu_dereference_protected(*np,
+ lockdep_is_held(&ntbl->rwlock))) != NULL) {
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(neigh->hnext,
+ lockdep_is_held(&ntbl->rwlock)));
+ /* remove from path/mc list */
+ spin_lock_irqsave(&priv->lock, flags);
+ list_del(&neigh->list);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ }
+ }
+
+ rcu_assign_pointer(ntbl->htbl, NULL);
+ call_rcu(&htbl->rcu, neigh_hash_free_rcu);
+
+out_unlock:
+ write_unlock_bh(&ntbl->rwlock);
+}
+
+static void ipoib_neigh_hash_uninit(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ int stopped;
+
+ ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
+ init_completion(&priv->ntbl.flushed);
+ set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
+
+ /* Stop GC if called at init fail need to cancel work */
+ stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
+ if (!stopped)
+ cancel_delayed_work(&priv->neigh_reap_task);
+
+ if (atomic_read(&priv->ntbl.entries)) {
+ ipoib_flush_neighs(priv);
+ wait_for_completion(&priv->ntbl.flushed);
+ }
+}
+
+
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ if (ipoib_neigh_hash_init(priv) < 0)
+ goto out;
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
GFP_KERNEL);
if (!priv->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
ca->name, ipoib_recvq_size);
- goto out;
+ goto out_neigh_hash_cleanup;
}
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -954,6 +1253,8 @@ out_tx_ring_cleanup:
out_rx_ring_cleanup:
kfree(priv->rx_ring);
+out_neigh_hash_cleanup:
+ ipoib_neigh_hash_uninit(dev);
out:
return -ENOMEM;
}
@@ -966,6 +1267,9 @@ void ipoib_dev_cleanup(struct net_device *dev)
/* Delete any child interfaces first */
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
+ /* Stop GC on child */
+ set_bit(IPOIB_STOP_NEIGH_GC, &cpriv->flags);
+ cancel_delayed_work(&cpriv->neigh_reap_task);
unregister_netdev(cpriv->dev);
ipoib_dev_cleanup(cpriv->dev);
free_netdev(cpriv->dev);
@@ -978,6 +1282,8 @@ void ipoib_dev_cleanup(struct net_device *dev)
priv->rx_ring = NULL;
priv->tx_ring = NULL;
+
+ ipoib_neigh_hash_uninit(dev);
}
static const struct header_ops ipoib_header_ops = {
@@ -992,7 +1298,6 @@ static const struct net_device_ops ipoib_netdev_ops = {
.ndo_start_xmit = ipoib_start_xmit,
.ndo_tx_timeout = ipoib_timeout,
.ndo_set_rx_mode = ipoib_set_mcast_list,
- .ndo_neigh_setup = ipoib_neigh_setup_dev,
};
static void ipoib_setup(struct net_device *dev)
@@ -1041,6 +1346,7 @@ static void ipoib_setup(struct net_device *dev)
INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
+ INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
}
struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
@@ -1281,6 +1587,9 @@ sysfs_failed:
register_failed:
ib_unregister_event_handler(&priv->event_handler);
+ /* Stop GC if started before flush */
+ set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
+ cancel_delayed_work(&priv->neigh_reap_task);
flush_workqueue(ipoib_workqueue);
event_failed:
@@ -1347,6 +1656,9 @@ static void ipoib_remove_one(struct ib_device *device)
dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
rtnl_unlock();
+ /* Stop GC */
+ set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
+ cancel_delayed_work(&priv->neigh_reap_task);
flush_workqueue(ipoib_workqueue);
unregister_netdev(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 7cecb16d3d48..13f4aa7593c8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -69,28 +69,13 @@ struct ipoib_mcast_iter {
static void ipoib_mcast_free(struct ipoib_mcast *mcast)
{
struct net_device *dev = mcast->dev;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ipoib_neigh *neigh, *tmp;
int tx_dropped = 0;
ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n",
mcast->mcmember.mgid.raw);
- spin_lock_irq(&priv->lock);
-
- list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
- /*
- * It's safe to call ipoib_put_ah() inside priv->lock
- * here, because we know that mcast->ah will always
- * hold one more reference, so ipoib_put_ah() will
- * never do more than decrement the ref count.
- */
- if (neigh->ah)
- ipoib_put_ah(neigh->ah);
- ipoib_neigh_free(dev, neigh);
- }
-
- spin_unlock_irq(&priv->lock);
+ /* remove all neigh connected to this mcast */
+ ipoib_del_neighs_by_gid(dev, mcast->mcmember.mgid.raw);
if (mcast->ah)
ipoib_put_ah(mcast->ah);
@@ -655,17 +640,12 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
return 0;
}
-void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
+void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct dst_entry *dst = skb_dst(skb);
struct ipoib_mcast *mcast;
- struct neighbour *n;
unsigned long flags;
-
- n = NULL;
- if (dst)
- n = dst_neigh_lookup_skb(dst, skb);
+ void *mgid = daddr + 4;
spin_lock_irqsave(&priv->lock, flags);
@@ -721,28 +701,29 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
out:
if (mcast && mcast->ah) {
- if (n) {
- if (!*to_ipoib_neigh(n)) {
- struct ipoib_neigh *neigh;
-
- neigh = ipoib_neigh_alloc(n, skb->dev);
- if (neigh) {
- kref_get(&mcast->ah->ref);
- neigh->ah = mcast->ah;
- list_add_tail(&neigh->list,
- &mcast->neigh_list);
- }
+ struct ipoib_neigh *neigh;
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+ neigh = ipoib_neigh_get(dev, daddr);
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!neigh) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ neigh = ipoib_neigh_alloc(daddr, dev);
+ spin_lock_irqsave(&priv->lock, flags);
+ if (neigh) {
+ kref_get(&mcast->ah->ref);
+ neigh->ah = mcast->ah;
+ list_add_tail(&neigh->list, &mcast->neigh_list);
}
- neigh_release(n);
}
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
+ if (neigh)
+ ipoib_neigh_put(neigh);
return;
}
unlock:
- if (n)
- neigh_release(n);
spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index bcbf22ee0aa7..1b5b0c730054 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -586,24 +586,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
scmnd->sc_data_direction);
}
-static void srp_remove_req(struct srp_target_port *target,
- struct srp_request *req, s32 req_lim_delta)
+/**
+ * srp_claim_req - Take ownership of the scmnd associated with a request.
+ * @target: SRP target port.
+ * @req: SRP request.
+ * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
+ * ownership of @req->scmnd if it equals @scmnd.
+ *
+ * Return value:
+ * Either NULL or a pointer to the SCSI command the caller became owner of.
+ */
+static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
+ struct srp_request *req,
+ struct scsi_cmnd *scmnd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&target->lock, flags);
+ if (!scmnd) {
+ scmnd = req->scmnd;
+ req->scmnd = NULL;
+ } else if (req->scmnd == scmnd) {
+ req->scmnd = NULL;
+ } else {
+ scmnd = NULL;
+ }
+ spin_unlock_irqrestore(&target->lock, flags);
+
+ return scmnd;
+}
+
+/**
+ * srp_free_req() - Unmap data and add request to the free request list.
+ */
+static void srp_free_req(struct srp_target_port *target,
+ struct srp_request *req, struct scsi_cmnd *scmnd,
+ s32 req_lim_delta)
{
unsigned long flags;
- srp_unmap_data(req->scmnd, target, req);
+ srp_unmap_data(scmnd, target, req);
+
spin_lock_irqsave(&target->lock, flags);
target->req_lim += req_lim_delta;
- req->scmnd = NULL;
list_add_tail(&req->list, &target->free_reqs);
spin_unlock_irqrestore(&target->lock, flags);
}
static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
{
- req->scmnd->result = DID_RESET << 16;
- req->scmnd->scsi_done(req->scmnd);
- srp_remove_req(target, req, 0);
+ struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
+
+ if (scmnd) {
+ scmnd->result = DID_RESET << 16;
+ scmnd->scsi_done(scmnd);
+ srp_free_req(target, req, scmnd, 0);
+ }
}
static int srp_reconnect_target(struct srp_target_port *target)
@@ -1073,11 +1111,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
complete(&target->tsk_mgmt_done);
} else {
req = &target->req_ring[rsp->tag];
- scmnd = req->scmnd;
- if (!scmnd)
+ scmnd = srp_claim_req(target, req, NULL);
+ if (!scmnd) {
shost_printk(KERN_ERR, target->scsi_host,
"Null scmnd for RSP w/tag %016llx\n",
(unsigned long long) rsp->tag);
+
+ spin_lock_irqsave(&target->lock, flags);
+ target->req_lim += be32_to_cpu(rsp->req_lim_delta);
+ spin_unlock_irqrestore(&target->lock, flags);
+
+ return;
+ }
scmnd->result = rsp->status;
if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
@@ -1092,7 +1137,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
- srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
+ srp_free_req(target, req, scmnd,
+ be32_to_cpu(rsp->req_lim_delta));
+
scmnd->host_scribble = NULL;
scmnd->scsi_done(scmnd);
}
@@ -1631,25 +1678,17 @@ static int srp_abort(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
- int ret = SUCCESS;
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
- if (!req || target->qp_in_error)
+ if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
return FAILED;
- if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
- SRP_TSK_ABORT_TASK))
- return FAILED;
-
- if (req->scmnd) {
- if (!target->tsk_mgmt_status) {
- srp_remove_req(target, req, 0);
- scmnd->result = DID_ABORT << 16;
- } else
- ret = FAILED;
- }
+ srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+ SRP_TSK_ABORT_TASK);
+ srp_free_req(target, req, scmnd, 0);
+ scmnd->result = DID_ABORT << 16;
- return ret;
+ return SUCCESS;
}
static int srp_reset_device(struct scsi_cmnd *scmnd)
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 7a0ce8d42887..9e1449f8c6a2 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1469,7 +1469,7 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
*
* XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
* the data that has been transferred via IB RDMA had to be postponed until the
- * check_stop_free() callback. None of this is nessecary anymore and needs to
+ * check_stop_free() callback. None of this is necessary anymore and needs to
* be cleaned up.
*/
static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
diff --git a/drivers/input/misc/88pm80x_onkey.c b/drivers/input/misc/88pm80x_onkey.c
new file mode 100644
index 000000000000..7f26e7b6c228
--- /dev/null
+++ b/drivers/input/misc/88pm80x_onkey.c
@@ -0,0 +1,168 @@
+/*
+ * Marvell 88PM80x ONKEY driver
+ *
+ * Copyright (C) 2012 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ * Qiao Zhou <zhouqiao@marvell.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/mfd/88pm80x.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define PM800_LONG_ONKEY_EN (1 << 0)
+#define PM800_LONG_KEY_DELAY (8) /* 1 .. 16 seconds */
+#define PM800_LONKEY_PRESS_TIME ((PM800_LONG_KEY_DELAY-1) << 4)
+#define PM800_LONKEY_PRESS_TIME_MASK (0xF0)
+#define PM800_SW_PDOWN (1 << 5)
+
+struct pm80x_onkey_info {
+ struct input_dev *idev;
+ struct pm80x_chip *pm80x;
+ struct regmap *map;
+ int irq;
+};
+
+/* 88PM80x gives us an interrupt when ONKEY is held */
+static irqreturn_t pm80x_onkey_handler(int irq, void *data)
+{
+ struct pm80x_onkey_info *info = data;
+ int ret = 0;
+ unsigned int val;
+
+ ret = regmap_read(info->map, PM800_STATUS_1, &val);
+ if (ret < 0) {
+ dev_err(info->idev->dev.parent, "failed to read status: %d\n", ret);
+ return IRQ_NONE;
+ }
+ val &= PM800_ONKEY_STS1;
+
+ input_report_key(info->idev, KEY_POWER, val);
+ input_sync(info->idev);
+
+ return IRQ_HANDLED;
+}
+
+static SIMPLE_DEV_PM_OPS(pm80x_onkey_pm_ops, pm80x_dev_suspend,
+ pm80x_dev_resume);
+
+static int __devinit pm80x_onkey_probe(struct platform_device *pdev)
+{
+
+ struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct pm80x_onkey_info *info;
+ int err;
+
+ info = kzalloc(sizeof(struct pm80x_onkey_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pm80x = chip;
+
+ info->irq = platform_get_irq(pdev, 0);
+ if (info->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ info->map = info->pm80x->regmap;
+ if (!info->map) {
+ dev_err(&pdev->dev, "no regmap!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ info->idev = input_allocate_device();
+ if (!info->idev) {
+ dev_err(&pdev->dev, "Failed to allocate input dev\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ info->idev->name = "88pm80x_on";
+ info->idev->phys = "88pm80x_on/input0";
+ info->idev->id.bustype = BUS_I2C;
+ info->idev->dev.parent = &pdev->dev;
+ info->idev->evbit[0] = BIT_MASK(EV_KEY);
+ __set_bit(KEY_POWER, info->idev->keybit);
+
+ err = pm80x_request_irq(info->pm80x, info->irq, pm80x_onkey_handler,
+ IRQF_ONESHOT, "onkey", info);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Failed to request IRQ: #%d: %d\n",
+ info->irq, err);
+ goto out_reg;
+ }
+
+ err = input_register_device(info->idev);
+ if (err) {
+ dev_err(&pdev->dev, "Can't register input device: %d\n", err);
+ goto out_irq;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ /* Enable long onkey detection */
+ regmap_update_bits(info->map, PM800_RTC_MISC4, PM800_LONG_ONKEY_EN,
+ PM800_LONG_ONKEY_EN);
+ /* Set 8-second interval */
+ regmap_update_bits(info->map, PM800_RTC_MISC3,
+ PM800_LONKEY_PRESS_TIME_MASK,
+ PM800_LONKEY_PRESS_TIME);
+
+ device_init_wakeup(&pdev->dev, 1);
+ return 0;
+
+out_irq:
+ pm80x_free_irq(info->pm80x, info->irq, info);
+out_reg:
+ input_free_device(info->idev);
+out:
+ kfree(info);
+ return err;
+}
+
+static int __devexit pm80x_onkey_remove(struct platform_device *pdev)
+{
+ struct pm80x_onkey_info *info = platform_get_drvdata(pdev);
+
+ device_init_wakeup(&pdev->dev, 0);
+ pm80x_free_irq(info->pm80x, info->irq, info);
+ input_unregister_device(info->idev);
+ kfree(info);
+ return 0;
+}
+
+static struct platform_driver pm80x_onkey_driver = {
+ .driver = {
+ .name = "88pm80x-onkey",
+ .owner = THIS_MODULE,
+ .pm = &pm80x_onkey_pm_ops,
+ },
+ .probe = pm80x_onkey_probe,
+ .remove = __devexit_p(pm80x_onkey_remove),
+};
+
+module_platform_driver(pm80x_onkey_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Marvell 88PM80x ONKEY driver");
+MODULE_AUTHOR("Qiao Zhou <zhouqiao@marvell.com>");
+MODULE_ALIAS("platform:88pm80x-onkey");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7faf4a7fcaa9..7c0f1ecfdd7a 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -22,6 +22,16 @@ config INPUT_88PM860X_ONKEY
To compile this driver as a module, choose M here: the module
will be called 88pm860x_onkey.
+config INPUT_88PM80X_ONKEY
+ tristate "88PM80x ONKEY support"
+ depends on MFD_88PM800
+ help
+ Support the ONKEY of Marvell 88PM80x PMICs as an input device
+ reporting power button status.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 88pm80x_onkey.
+
config INPUT_AB8500_PONKEY
tristate "AB8500 Pon (PowerOn) Key"
depends on AB8500_CORE
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index f55cdf4916fa..83fe6f5b77d1 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -5,6 +5,7 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_INPUT_88PM860X_ONKEY) += 88pm860x_onkey.o
+obj-$(CONFIG_INPUT_88PM80X_ONKEY) += 88pm80x_onkey.o
obj-$(CONFIG_INPUT_AB8500_PONKEY) += ab8500-ponkey.o
obj-$(CONFIG_INPUT_AD714X) += ad714x.o
obj-$(CONFIG_INPUT_AD714X_I2C) += ad714x-i2c.o
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 84ec691c05aa..f06231b7cab1 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -74,8 +74,8 @@ static int __devinit ab8500_ponkey_probe(struct platform_device *pdev)
ponkey->idev = input;
ponkey->ab8500 = ab8500;
- ponkey->irq_dbf = irq_dbf;
- ponkey->irq_dbr = irq_dbr;
+ ponkey->irq_dbf = ab8500_irq_get_virq(ab8500, irq_dbf);
+ ponkey->irq_dbr = ab8500_irq_get_virq(ab8500, irq_dbr);
input->name = "AB8500 POn(PowerOn) Key";
input->dev.parent = &pdev->dev;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index d5b390f75c9a..14eaecea2b70 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -40,11 +40,27 @@
* Note that newer firmware allows querying device for maximum useable
* coordinates.
*/
+#define XMIN 0
+#define XMAX 6143
+#define YMIN 0
+#define YMAX 6143
#define XMIN_NOMINAL 1472
#define XMAX_NOMINAL 5472
#define YMIN_NOMINAL 1408
#define YMAX_NOMINAL 4448
+/* Size in bits of absolute position values reported by the hardware */
+#define ABS_POS_BITS 13
+
+/*
+ * Any position values from the hardware above the following limits are
+ * treated as "wrapped around negative" values that have been truncated to
+ * the 13-bit reporting range of the hardware. These are just reasonable
+ * guesses and can be adjusted if hardware is found that operates outside
+ * of these parameters.
+ */
+#define X_MAX_POSITIVE (((1 << ABS_POS_BITS) + XMAX) / 2)
+#define Y_MAX_POSITIVE (((1 << ABS_POS_BITS) + YMAX) / 2)
/*****************************************************************************
* Stuff we need even when we do not want native Synaptics support
@@ -588,6 +604,12 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
hw->right = (buf[0] & 0x02) ? 1 : 0;
}
+ /* Convert wrap-around values to negative */
+ if (hw->x > X_MAX_POSITIVE)
+ hw->x -= 1 << ABS_POS_BITS;
+ if (hw->y > Y_MAX_POSITIVE)
+ hw->y -= 1 << ABS_POS_BITS;
+
return 0;
}
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 09a089996ded..d7a7e54f6465 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -878,7 +878,7 @@ static int __init hp_sdc_init(void)
#endif
errstr = "IRQ not available for";
- if (request_irq(hp_sdc.irq, &hp_sdc_isr, IRQF_SHARED|IRQF_SAMPLE_RANDOM,
+ if (request_irq(hp_sdc.irq, &hp_sdc_isr, IRQF_SHARED,
"HP SDC", &hp_sdc))
goto err1;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 6533f44be5bd..002041975de9 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -464,7 +464,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
t = (data[6] << 2) | ((data[7] >> 6) & 3);
if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
(features->type >= INTUOS5S && features->type <= INTUOS5L) ||
- features->type == WACOM_21UX2 || features->type == WACOM_24HD) {
+ (features->type >= WACOM_21UX2 && features->type <= WACOM_24HD)) {
t = (t << 1) | (data[1] & 1);
}
input_report_abs(input, ABS_PRESSURE, t);
@@ -614,7 +614,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_abs(input, ABS_MISC, 0);
}
} else {
- if (features->type == WACOM_21UX2) {
+ if (features->type == WACOM_21UX2 || features->type == WACOM_22HD) {
input_report_key(input, BTN_0, (data[5] & 0x01));
input_report_key(input, BTN_1, (data[6] & 0x01));
input_report_key(input, BTN_2, (data[6] & 0x02));
@@ -633,6 +633,12 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_key(input, BTN_Z, (data[8] & 0x20));
input_report_key(input, BTN_BASE, (data[8] & 0x40));
input_report_key(input, BTN_BASE2, (data[8] & 0x80));
+
+ if (features->type == WACOM_22HD) {
+ input_report_key(input, KEY_PROG1, data[9] & 0x01);
+ input_report_key(input, KEY_PROG2, data[9] & 0x02);
+ input_report_key(input, KEY_PROG3, data[9] & 0x04);
+ }
} else {
input_report_key(input, BTN_0, (data[5] & 0x01));
input_report_key(input, BTN_1, (data[5] & 0x02));
@@ -1231,6 +1237,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case CINTIQ:
case WACOM_BEE:
case WACOM_21UX2:
+ case WACOM_22HD:
case WACOM_24HD:
sync = wacom_intuos_irq(wacom_wac);
break;
@@ -1432,6 +1439,12 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
wacom_setup_cintiq(wacom_wac);
break;
+ case WACOM_22HD:
+ __set_bit(KEY_PROG1, input_dev->keybit);
+ __set_bit(KEY_PROG2, input_dev->keybit);
+ __set_bit(KEY_PROG3, input_dev->keybit);
+ /* fall through */
+
case WACOM_21UX2:
__set_bit(BTN_A, input_dev->keybit);
__set_bit(BTN_B, input_dev->keybit);
@@ -1858,6 +1871,9 @@ static const struct wacom_features wacom_features_0xF0 =
static const struct wacom_features wacom_features_0xCC =
{ "Wacom Cintiq 21UX2", WACOM_PKGLEN_INTUOS, 87200, 65600, 2047,
63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0xFA =
+ { "Wacom Cintiq 22HD", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
+ 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x90 =
{ "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2075,6 +2091,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xEF) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
+ { USB_DEVICE_WACOM(0xFA) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
};
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index bd5d37b28714..96c185cc301e 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -73,8 +73,9 @@ enum {
INTUOS5S,
INTUOS5,
INTUOS5L,
- WACOM_24HD,
WACOM_21UX2,
+ WACOM_22HD,
+ WACOM_24HD,
CINTIQ,
WACOM_BEE,
WACOM_MO,
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 73bd2f6b82ec..1ba232cbc09d 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -472,6 +472,19 @@ config TOUCHSCREEN_PENMOUNT
To compile this driver as a module, choose M here: the
module will be called penmount.
+config TOUCHSCREEN_EDT_FT5X06
+ tristate "EDT FocalTech FT5x06 I2C Touchscreen support"
+ depends on I2C
+ help
+ Say Y here if you have an EDT "Polytouch" touchscreen based
+ on the FocalTech FT5x06 family of controllers connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called edt-ft5x06.
+
config TOUCHSCREEN_MIGOR
tristate "Renesas MIGO-R touchscreen"
depends on SH_MIGOR && I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 5920c60f999d..178eb128d90f 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_TOUCHSCREEN_CYTTSP_SPI) += cyttsp_spi.o
obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o
obj-$(CONFIG_TOUCHSCREEN_DA9052) += da9052_tsi.o
obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
+obj-$(CONFIG_TOUCHSCREEN_EDT_FT5X06) += edt-ft5x06.o
obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
new file mode 100644
index 000000000000..9afc777a40a7
--- /dev/null
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -0,0 +1,898 @@
+/*
+ * Copyright (C) 2012 Simon Budig, <simon.budig@kernelconcepts.de>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * This is a driver for the EDT "Polytouch" family of touch controllers
+ * based on the FocalTech FT5x06 line of chips.
+ *
+ * Development of this driver has been sponsored by Glyn:
+ * http://www.glyn.com/Products/Displays
+ */
+
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/i2c.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/input/mt.h>
+#include <linux/input/edt-ft5x06.h>
+
+#define MAX_SUPPORT_POINTS 5
+
+#define WORK_REGISTER_THRESHOLD 0x00
+#define WORK_REGISTER_REPORT_RATE 0x08
+#define WORK_REGISTER_GAIN 0x30
+#define WORK_REGISTER_OFFSET 0x31
+#define WORK_REGISTER_NUM_X 0x33
+#define WORK_REGISTER_NUM_Y 0x34
+
+#define WORK_REGISTER_OPMODE 0x3c
+#define FACTORY_REGISTER_OPMODE 0x01
+
+#define TOUCH_EVENT_DOWN 0x00
+#define TOUCH_EVENT_UP 0x01
+#define TOUCH_EVENT_ON 0x02
+#define TOUCH_EVENT_RESERVED 0x03
+
+#define EDT_NAME_LEN 23
+#define EDT_SWITCH_MODE_RETRIES 10
+#define EDT_SWITCH_MODE_DELAY 5 /* msec */
+#define EDT_RAW_DATA_RETRIES 100
+#define EDT_RAW_DATA_DELAY 1 /* msec */
+
+struct edt_ft5x06_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ u16 num_x;
+ u16 num_y;
+
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debug_dir;
+ u8 *raw_buffer;
+ size_t raw_bufsize;
+#endif
+
+ struct mutex mutex;
+ bool factory_mode;
+ int threshold;
+ int gain;
+ int offset;
+ int report_rate;
+
+ char name[EDT_NAME_LEN];
+};
+
+static int edt_ft5x06_ts_readwrite(struct i2c_client *client,
+ u16 wr_len, u8 *wr_buf,
+ u16 rd_len, u8 *rd_buf)
+{
+ struct i2c_msg wrmsg[2];
+ int i = 0;
+ int ret;
+
+ if (wr_len) {
+ wrmsg[i].addr = client->addr;
+ wrmsg[i].flags = 0;
+ wrmsg[i].len = wr_len;
+ wrmsg[i].buf = wr_buf;
+ i++;
+ }
+ if (rd_len) {
+ wrmsg[i].addr = client->addr;
+ wrmsg[i].flags = I2C_M_RD;
+ wrmsg[i].len = rd_len;
+ wrmsg[i].buf = rd_buf;
+ i++;
+ }
+
+ ret = i2c_transfer(client->adapter, wrmsg, i);
+ if (ret < 0)
+ return ret;
+ if (ret != i)
+ return -EIO;
+
+ return 0;
+}
+
+static bool edt_ft5x06_ts_check_crc(struct edt_ft5x06_ts_data *tsdata,
+ u8 *buf, int buflen)
+{
+ int i;
+ u8 crc = 0;
+
+ for (i = 0; i < buflen - 1; i++)
+ crc ^= buf[i];
+
+ if (crc != buf[buflen-1]) {
+ dev_err_ratelimited(&tsdata->client->dev,
+ "crc error: 0x%02x expected, got 0x%02x\n",
+ crc, buf[buflen-1]);
+ return false;
+ }
+
+ return true;
+}
+
+static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id)
+{
+ struct edt_ft5x06_ts_data *tsdata = dev_id;
+ struct device *dev = &tsdata->client->dev;
+ u8 cmd = 0xf9;
+ u8 rdbuf[26];
+ int i, type, x, y, id;
+ int error;
+
+ memset(rdbuf, 0, sizeof(rdbuf));
+
+ error = edt_ft5x06_ts_readwrite(tsdata->client,
+ sizeof(cmd), &cmd,
+ sizeof(rdbuf), rdbuf);
+ if (error) {
+ dev_err_ratelimited(dev, "Unable to fetch data, error: %d\n",
+ error);
+ goto out;
+ }
+
+ if (rdbuf[0] != 0xaa || rdbuf[1] != 0xaa || rdbuf[2] != 26) {
+ dev_err_ratelimited(dev, "Unexpected header: %02x%02x%02x!\n",
+ rdbuf[0], rdbuf[1], rdbuf[2]);
+ goto out;
+ }
+
+ if (!edt_ft5x06_ts_check_crc(tsdata, rdbuf, 26))
+ goto out;
+
+ for (i = 0; i < MAX_SUPPORT_POINTS; i++) {
+ u8 *buf = &rdbuf[i * 4 + 5];
+ bool down;
+
+ type = buf[0] >> 6;
+ /* ignore Reserved events */
+ if (type == TOUCH_EVENT_RESERVED)
+ continue;
+
+ x = ((buf[0] << 8) | buf[1]) & 0x0fff;
+ y = ((buf[2] << 8) | buf[3]) & 0x0fff;
+ id = (buf[2] >> 4) & 0x0f;
+ down = (type != TOUCH_EVENT_UP);
+
+ input_mt_slot(tsdata->input, id);
+ input_mt_report_slot_state(tsdata->input, MT_TOOL_FINGER, down);
+
+ if (!down)
+ continue;
+
+ input_report_abs(tsdata->input, ABS_MT_POSITION_X, x);
+ input_report_abs(tsdata->input, ABS_MT_POSITION_Y, y);
+ }
+
+ input_mt_report_pointer_emulation(tsdata->input, true);
+ input_sync(tsdata->input);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
+ u8 addr, u8 value)
+{
+ u8 wrbuf[4];
+
+ wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
+ wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
+ wrbuf[2] = value;
+ wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2];
+
+ return edt_ft5x06_ts_readwrite(tsdata->client, 4, wrbuf, 0, NULL);
+}
+
+static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata,
+ u8 addr)
+{
+ u8 wrbuf[2], rdbuf[2];
+ int error;
+
+ wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
+ wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
+ wrbuf[1] |= tsdata->factory_mode ? 0x80 : 0x40;
+
+ error = edt_ft5x06_ts_readwrite(tsdata->client, 2, wrbuf, 2, rdbuf);
+ if (error)
+ return error;
+
+ if ((wrbuf[0] ^ wrbuf[1] ^ rdbuf[0]) != rdbuf[1]) {
+ dev_err(&tsdata->client->dev,
+ "crc error: 0x%02x expected, got 0x%02x\n",
+ wrbuf[0] ^ wrbuf[1] ^ rdbuf[0], rdbuf[1]);
+ return -EIO;
+ }
+
+ return rdbuf[0];
+}
+
+struct edt_ft5x06_attribute {
+ struct device_attribute dattr;
+ size_t field_offset;
+ u8 limit_low;
+ u8 limit_high;
+ u8 addr;
+};
+
+#define EDT_ATTR(_field, _mode, _addr, _limit_low, _limit_high) \
+ struct edt_ft5x06_attribute edt_ft5x06_attr_##_field = { \
+ .dattr = __ATTR(_field, _mode, \
+ edt_ft5x06_setting_show, \
+ edt_ft5x06_setting_store), \
+ .field_offset = \
+ offsetof(struct edt_ft5x06_ts_data, _field), \
+ .limit_low = _limit_low, \
+ .limit_high = _limit_high, \
+ .addr = _addr, \
+ }
+
+static ssize_t edt_ft5x06_setting_show(struct device *dev,
+ struct device_attribute *dattr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+ struct edt_ft5x06_attribute *attr =
+ container_of(dattr, struct edt_ft5x06_attribute, dattr);
+ u8 *field = (u8 *)((char *)tsdata + attr->field_offset);
+ int val;
+ size_t count = 0;
+ int error = 0;
+
+ mutex_lock(&tsdata->mutex);
+
+ if (tsdata->factory_mode) {
+ error = -EIO;
+ goto out;
+ }
+
+ val = edt_ft5x06_register_read(tsdata, attr->addr);
+ if (val < 0) {
+ error = val;
+ dev_err(&tsdata->client->dev,
+ "Failed to fetch attribute %s, error %d\n",
+ dattr->attr.name, error);
+ goto out;
+ }
+
+ if (val != *field) {
+ dev_warn(&tsdata->client->dev,
+ "%s: read (%d) and stored value (%d) differ\n",
+ dattr->attr.name, val, *field);
+ *field = val;
+ }
+
+ count = scnprintf(buf, PAGE_SIZE, "%d\n", val);
+out:
+ mutex_unlock(&tsdata->mutex);
+ return error ?: count;
+}
+
+static ssize_t edt_ft5x06_setting_store(struct device *dev,
+ struct device_attribute *dattr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+ struct edt_ft5x06_attribute *attr =
+ container_of(dattr, struct edt_ft5x06_attribute, dattr);
+ u8 *field = (u8 *)((char *)tsdata + attr->field_offset);
+ unsigned int val;
+ int error;
+
+ mutex_lock(&tsdata->mutex);
+
+ if (tsdata->factory_mode) {
+ error = -EIO;
+ goto out;
+ }
+
+ error = kstrtouint(buf, 0, &val);
+ if (error)
+ goto out;
+
+ if (val < attr->limit_low || val > attr->limit_high) {
+ error = -ERANGE;
+ goto out;
+ }
+
+ error = edt_ft5x06_register_write(tsdata, attr->addr, val);
+ if (error) {
+ dev_err(&tsdata->client->dev,
+ "Failed to update attribute %s, error: %d\n",
+ dattr->attr.name, error);
+ goto out;
+ }
+
+ *field = val;
+
+out:
+ mutex_unlock(&tsdata->mutex);
+ return error ?: count;
+}
+
+static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN, 0, 31);
+static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET, 0, 31);
+static EDT_ATTR(threshold, S_IWUSR | S_IRUGO,
+ WORK_REGISTER_THRESHOLD, 20, 80);
+static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO,
+ WORK_REGISTER_REPORT_RATE, 3, 14);
+
+static struct attribute *edt_ft5x06_attrs[] = {
+ &edt_ft5x06_attr_gain.dattr.attr,
+ &edt_ft5x06_attr_offset.dattr.attr,
+ &edt_ft5x06_attr_threshold.dattr.attr,
+ &edt_ft5x06_attr_report_rate.dattr.attr,
+ NULL
+};
+
+static const struct attribute_group edt_ft5x06_attr_group = {
+ .attrs = edt_ft5x06_attrs,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata)
+{
+ struct i2c_client *client = tsdata->client;
+ int retries = EDT_SWITCH_MODE_RETRIES;
+ int ret;
+ int error;
+
+ disable_irq(client->irq);
+
+ if (!tsdata->raw_buffer) {
+ tsdata->raw_bufsize = tsdata->num_x * tsdata->num_y *
+ sizeof(u16);
+ tsdata->raw_buffer = kzalloc(tsdata->raw_bufsize, GFP_KERNEL);
+ if (!tsdata->raw_buffer) {
+ error = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ /* mode register is 0x3c when in the work mode */
+ error = edt_ft5x06_register_write(tsdata, WORK_REGISTER_OPMODE, 0x03);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to switch to factory mode, error %d\n", error);
+ goto err_out;
+ }
+
+ tsdata->factory_mode = true;
+ do {
+ mdelay(EDT_SWITCH_MODE_DELAY);
+ /* mode register is 0x01 when in factory mode */
+ ret = edt_ft5x06_register_read(tsdata, FACTORY_REGISTER_OPMODE);
+ if (ret == 0x03)
+ break;
+ } while (--retries > 0);
+
+ if (retries == 0) {
+ dev_err(&client->dev, "not in factory mode after %dms.\n",
+ EDT_SWITCH_MODE_RETRIES * EDT_SWITCH_MODE_DELAY);
+ error = -EIO;
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ kfree(tsdata->raw_buffer);
+ tsdata->raw_buffer = NULL;
+ tsdata->factory_mode = false;
+ enable_irq(client->irq);
+
+ return error;
+}
+
+static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata)
+{
+ struct i2c_client *client = tsdata->client;
+ int retries = EDT_SWITCH_MODE_RETRIES;
+ int ret;
+ int error;
+
+ /* mode register is 0x01 when in the factory mode */
+ error = edt_ft5x06_register_write(tsdata, FACTORY_REGISTER_OPMODE, 0x1);
+ if (error) {
+ dev_err(&client->dev,
+ "failed to switch to work mode, error: %d\n", error);
+ return error;
+ }
+
+ tsdata->factory_mode = false;
+
+ do {
+ mdelay(EDT_SWITCH_MODE_DELAY);
+ /* mode register is 0x01 when in factory mode */
+ ret = edt_ft5x06_register_read(tsdata, WORK_REGISTER_OPMODE);
+ if (ret == 0x01)
+ break;
+ } while (--retries > 0);
+
+ if (retries == 0) {
+ dev_err(&client->dev, "not in work mode after %dms.\n",
+ EDT_SWITCH_MODE_RETRIES * EDT_SWITCH_MODE_DELAY);
+ tsdata->factory_mode = true;
+ return -EIO;
+ }
+
+ if (tsdata->raw_buffer)
+ kfree(tsdata->raw_buffer);
+ tsdata->raw_buffer = NULL;
+
+ /* restore parameters */
+ edt_ft5x06_register_write(tsdata, WORK_REGISTER_THRESHOLD,
+ tsdata->threshold);
+ edt_ft5x06_register_write(tsdata, WORK_REGISTER_GAIN,
+ tsdata->gain);
+ edt_ft5x06_register_write(tsdata, WORK_REGISTER_OFFSET,
+ tsdata->offset);
+ edt_ft5x06_register_write(tsdata, WORK_REGISTER_REPORT_RATE,
+ tsdata->report_rate);
+
+ enable_irq(client->irq);
+
+ return 0;
+}
+
+static int edt_ft5x06_debugfs_mode_get(void *data, u64 *mode)
+{
+ struct edt_ft5x06_ts_data *tsdata = data;
+
+ *mode = tsdata->factory_mode;
+
+ return 0;
+};
+
+static int edt_ft5x06_debugfs_mode_set(void *data, u64 mode)
+{
+ struct edt_ft5x06_ts_data *tsdata = data;
+ int retval = 0;
+
+ if (mode > 1)
+ return -ERANGE;
+
+ mutex_lock(&tsdata->mutex);
+
+ if (mode != tsdata->factory_mode) {
+ retval = mode ? edt_ft5x06_factory_mode(tsdata) :
+ edt_ft5x06_work_mode(tsdata);
+ }
+
+ mutex_unlock(&tsdata->mutex);
+
+ return retval;
+};
+
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_mode_fops, edt_ft5x06_debugfs_mode_get,
+ edt_ft5x06_debugfs_mode_set, "%llu\n");
+
+static int edt_ft5x06_debugfs_raw_data_open(struct inode *inode,
+ struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t edt_ft5x06_debugfs_raw_data_read(struct file *file,
+ char __user *buf, size_t count, loff_t *off)
+{
+ struct edt_ft5x06_ts_data *tsdata = file->private_data;
+ struct i2c_client *client = tsdata->client;
+ int retries = EDT_RAW_DATA_RETRIES;
+ int val, i, error;
+ size_t read = 0;
+ int colbytes;
+ char wrbuf[3];
+ u8 *rdbuf;
+
+ if (*off < 0 || *off >= tsdata->raw_bufsize)
+ return 0;
+
+ mutex_lock(&tsdata->mutex);
+
+ if (!tsdata->factory_mode || !tsdata->raw_buffer) {
+ error = -EIO;
+ goto out;
+ }
+
+ error = edt_ft5x06_register_write(tsdata, 0x08, 0x01);
+ if (error) {
+ dev_dbg(&client->dev,
+ "failed to write 0x08 register, error %d\n", error);
+ goto out;
+ }
+
+ do {
+ msleep(EDT_RAW_DATA_DELAY);
+ val = edt_ft5x06_register_read(tsdata, 0x08);
+ if (val < 1)
+ break;
+ } while (--retries > 0);
+
+ if (val < 0) {
+ error = val;
+ dev_dbg(&client->dev,
+ "failed to read 0x08 register, error %d\n", error);
+ goto out;
+ }
+
+ if (retries == 0) {
+ dev_dbg(&client->dev,
+ "timed out waiting for register to settle\n");
+ error = -ETIMEDOUT;
+ goto out;
+ }
+
+ rdbuf = tsdata->raw_buffer;
+ colbytes = tsdata->num_y * sizeof(u16);
+
+ wrbuf[0] = 0xf5;
+ wrbuf[1] = 0x0e;
+ for (i = 0; i < tsdata->num_x; i++) {
+ wrbuf[2] = i; /* column index */
+ error = edt_ft5x06_ts_readwrite(tsdata->client,
+ sizeof(wrbuf), wrbuf,
+ colbytes, rdbuf);
+ if (error)
+ goto out;
+
+ rdbuf += colbytes;
+ }
+
+ read = min_t(size_t, count, tsdata->raw_bufsize - *off);
+ error = copy_to_user(buf, tsdata->raw_buffer + *off, read);
+ if (!error)
+ *off += read;
+out:
+ mutex_unlock(&tsdata->mutex);
+ return error ?: read;
+};
+
+
+static const struct file_operations debugfs_raw_data_fops = {
+ .open = edt_ft5x06_debugfs_raw_data_open,
+ .read = edt_ft5x06_debugfs_raw_data_read,
+};
+
+static void __devinit
+edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
+ const char *debugfs_name)
+{
+ tsdata->debug_dir = debugfs_create_dir(debugfs_name, NULL);
+ if (!tsdata->debug_dir)
+ return;
+
+ debugfs_create_u16("num_x", S_IRUSR, tsdata->debug_dir, &tsdata->num_x);
+ debugfs_create_u16("num_y", S_IRUSR, tsdata->debug_dir, &tsdata->num_y);
+
+ debugfs_create_file("mode", S_IRUSR | S_IWUSR,
+ tsdata->debug_dir, tsdata, &debugfs_mode_fops);
+ debugfs_create_file("raw_data", S_IRUSR,
+ tsdata->debug_dir, tsdata, &debugfs_raw_data_fops);
+}
+
+static void __devexit
+edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
+{
+ if (tsdata->debug_dir)
+ debugfs_remove_recursive(tsdata->debug_dir);
+}
+
+#else
+
+static inline void
+edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
+ const char *debugfs_name)
+{
+}
+
+static inline void
+edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
+{
+}
+
+#endif /* CONFIG_DEBUGFS */
+
+
+
+static int __devinit edt_ft5x06_ts_reset(struct i2c_client *client,
+ int reset_pin)
+{
+ int error;
+
+ if (gpio_is_valid(reset_pin)) {
+ /* this pulls reset down, enabling the low active reset */
+ error = gpio_request_one(reset_pin, GPIOF_OUT_INIT_LOW,
+ "edt-ft5x06 reset");
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to request GPIO %d as reset pin, error %d\n",
+ reset_pin, error);
+ return error;
+ }
+
+ mdelay(50);
+ gpio_set_value(reset_pin, 1);
+ mdelay(100);
+ }
+
+ return 0;
+}
+
+static int __devinit edt_ft5x06_ts_identify(struct i2c_client *client,
+ char *model_name,
+ char *fw_version)
+{
+ u8 rdbuf[EDT_NAME_LEN];
+ char *p;
+ int error;
+
+ error = edt_ft5x06_ts_readwrite(client, 1, "\xbb",
+ EDT_NAME_LEN - 1, rdbuf);
+ if (error)
+ return error;
+
+ /* remove last '$' end marker */
+ rdbuf[EDT_NAME_LEN - 1] = '\0';
+ if (rdbuf[EDT_NAME_LEN - 2] == '$')
+ rdbuf[EDT_NAME_LEN - 2] = '\0';
+
+ /* look for Model/Version separator */
+ p = strchr(rdbuf, '*');
+ if (p)
+ *p++ = '\0';
+
+ strlcpy(model_name, rdbuf + 1, EDT_NAME_LEN);
+ strlcpy(fw_version, p ? p : "", EDT_NAME_LEN);
+
+ return 0;
+}
+
+#define EDT_ATTR_CHECKSET(name, reg) \
+ if (pdata->name >= edt_ft5x06_attr_##name.limit_low && \
+ pdata->name <= edt_ft5x06_attr_##name.limit_high) \
+ edt_ft5x06_register_write(tsdata, reg, pdata->name)
+
+static void __devinit
+edt_ft5x06_ts_get_defaults(struct edt_ft5x06_ts_data *tsdata,
+ const struct edt_ft5x06_platform_data *pdata)
+{
+ if (!pdata->use_parameters)
+ return;
+
+ /* pick up defaults from the platform data */
+ EDT_ATTR_CHECKSET(threshold, WORK_REGISTER_THRESHOLD);
+ EDT_ATTR_CHECKSET(gain, WORK_REGISTER_GAIN);
+ EDT_ATTR_CHECKSET(offset, WORK_REGISTER_OFFSET);
+ EDT_ATTR_CHECKSET(report_rate, WORK_REGISTER_REPORT_RATE);
+}
+
+static void __devinit
+edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata)
+{
+ tsdata->threshold = edt_ft5x06_register_read(tsdata,
+ WORK_REGISTER_THRESHOLD);
+ tsdata->gain = edt_ft5x06_register_read(tsdata, WORK_REGISTER_GAIN);
+ tsdata->offset = edt_ft5x06_register_read(tsdata, WORK_REGISTER_OFFSET);
+ tsdata->report_rate = edt_ft5x06_register_read(tsdata,
+ WORK_REGISTER_REPORT_RATE);
+ tsdata->num_x = edt_ft5x06_register_read(tsdata, WORK_REGISTER_NUM_X);
+ tsdata->num_y = edt_ft5x06_register_read(tsdata, WORK_REGISTER_NUM_Y);
+}
+
+static int __devinit edt_ft5x06_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct edt_ft5x06_platform_data *pdata =
+ client->dev.platform_data;
+ struct edt_ft5x06_ts_data *tsdata;
+ struct input_dev *input;
+ int error;
+ char fw_version[EDT_NAME_LEN];
+
+ dev_dbg(&client->dev, "probing for EDT FT5x06 I2C\n");
+
+ if (!pdata) {
+ dev_err(&client->dev, "no platform data?\n");
+ return -EINVAL;
+ }
+
+ error = edt_ft5x06_ts_reset(client, pdata->reset_pin);
+ if (error)
+ return error;
+
+ if (gpio_is_valid(pdata->irq_pin)) {
+ error = gpio_request_one(pdata->irq_pin,
+ GPIOF_IN, "edt-ft5x06 irq");
+ if (error) {
+ dev_err(&client->dev,
+ "Failed to request GPIO %d, error %d\n",
+ pdata->irq_pin, error);
+ return error;
+ }
+ }
+
+ tsdata = kzalloc(sizeof(*tsdata), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!tsdata || !input) {
+ dev_err(&client->dev, "failed to allocate driver data.\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ mutex_init(&tsdata->mutex);
+ tsdata->client = client;
+ tsdata->input = input;
+ tsdata->factory_mode = false;
+
+ error = edt_ft5x06_ts_identify(client, tsdata->name, fw_version);
+ if (error) {
+ dev_err(&client->dev, "touchscreen probe failed\n");
+ goto err_free_mem;
+ }
+
+ edt_ft5x06_ts_get_defaults(tsdata, pdata);
+ edt_ft5x06_ts_get_parameters(tsdata);
+
+ dev_dbg(&client->dev,
+ "Model \"%s\", Rev. \"%s\", %dx%d sensors\n",
+ tsdata->name, fw_version, tsdata->num_x, tsdata->num_y);
+
+ input->name = tsdata->name;
+ input->id.bustype = BUS_I2C;
+ input->dev.parent = &client->dev;
+
+ __set_bit(EV_SYN, input->evbit);
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ input_set_abs_params(input, ABS_X, 0, tsdata->num_x * 64 - 1, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, tsdata->num_y * 64 - 1, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ 0, tsdata->num_x * 64 - 1, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ 0, tsdata->num_y * 64 - 1, 0, 0);
+ error = input_mt_init_slots(input, MAX_SUPPORT_POINTS);
+ if (error) {
+ dev_err(&client->dev, "Unable to init MT slots.\n");
+ goto err_free_mem;
+ }
+
+ input_set_drvdata(input, tsdata);
+ i2c_set_clientdata(client, tsdata);
+
+ error = request_threaded_irq(client->irq, NULL, edt_ft5x06_ts_isr,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->name, tsdata);
+ if (error) {
+ dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
+ goto err_free_mem;
+ }
+
+ error = sysfs_create_group(&client->dev.kobj, &edt_ft5x06_attr_group);
+ if (error)
+ goto err_free_irq;
+
+ error = input_register_device(input);
+ if (error)
+ goto err_remove_attrs;
+
+ edt_ft5x06_ts_prepare_debugfs(tsdata, dev_driver_string(&client->dev));
+ device_init_wakeup(&client->dev, 1);
+
+ dev_dbg(&client->dev,
+ "EDT FT5x06 initialized: IRQ pin %d, Reset pin %d.\n",
+ pdata->irq_pin, pdata->reset_pin);
+
+ return 0;
+
+err_remove_attrs:
+ sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
+err_free_irq:
+ free_irq(client->irq, tsdata);
+err_free_mem:
+ input_free_device(input);
+ kfree(tsdata);
+
+ if (gpio_is_valid(pdata->irq_pin))
+ gpio_free(pdata->irq_pin);
+
+ return error;
+}
+
+static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client)
+{
+ const struct edt_ft5x06_platform_data *pdata =
+ dev_get_platdata(&client->dev);
+ struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client);
+
+ edt_ft5x06_ts_teardown_debugfs(tsdata);
+ sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group);
+
+ free_irq(client->irq, tsdata);
+ input_unregister_device(tsdata->input);
+
+ if (gpio_is_valid(pdata->irq_pin))
+ gpio_free(pdata->irq_pin);
+ if (gpio_is_valid(pdata->reset_pin))
+ gpio_free(pdata->reset_pin);
+
+ kfree(tsdata->raw_buffer);
+ kfree(tsdata);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int edt_ft5x06_ts_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int edt_ft5x06_ts_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(edt_ft5x06_ts_pm_ops,
+ edt_ft5x06_ts_suspend, edt_ft5x06_ts_resume);
+
+static const struct i2c_device_id edt_ft5x06_ts_id[] = {
+ { "edt-ft5x06", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, edt_ft5x06_ts_id);
+
+static struct i2c_driver edt_ft5x06_ts_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "edt_ft5x06",
+ .pm = &edt_ft5x06_ts_pm_ops,
+ },
+ .id_table = edt_ft5x06_ts_id,
+ .probe = edt_ft5x06_ts_probe,
+ .remove = __devexit_p(edt_ft5x06_ts_remove),
+};
+
+module_i2c_driver(edt_ft5x06_ts_driver);
+
+MODULE_AUTHOR("Simon Budig <simon.budig@kernelconcepts.de>");
+MODULE_DESCRIPTION("EDT FT5x06 I2C Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 503c7096ed36..908407efc672 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -48,7 +48,7 @@ struct eeti_ts_priv {
struct input_dev *input;
struct work_struct work;
struct mutex mutex;
- int irq, irq_active_high;
+ int irq_gpio, irq, irq_active_high;
};
#define EETI_TS_BITDEPTH (11)
@@ -62,7 +62,7 @@ struct eeti_ts_priv {
static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv)
{
- return gpio_get_value(irq_to_gpio(priv->irq)) == priv->irq_active_high;
+ return gpio_get_value(priv->irq_gpio) == priv->irq_active_high;
}
static void eeti_ts_read(struct work_struct *work)
@@ -157,7 +157,7 @@ static void eeti_ts_close(struct input_dev *dev)
static int __devinit eeti_ts_probe(struct i2c_client *client,
const struct i2c_device_id *idp)
{
- struct eeti_ts_platform_data *pdata;
+ struct eeti_ts_platform_data *pdata = client->dev.platform_data;
struct eeti_ts_priv *priv;
struct input_dev *input;
unsigned int irq_flags;
@@ -199,9 +199,12 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
priv->client = client;
priv->input = input;
- priv->irq = client->irq;
+ priv->irq_gpio = pdata->irq_gpio;
+ priv->irq = gpio_to_irq(pdata->irq_gpio);
- pdata = client->dev.platform_data;
+ err = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
+ if (err < 0)
+ goto err1;
if (pdata)
priv->irq_active_high = pdata->irq_active_high;
@@ -215,13 +218,13 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
err = input_register_device(input);
if (err)
- goto err1;
+ goto err2;
err = request_irq(priv->irq, eeti_ts_isr, irq_flags,
client->name, priv);
if (err) {
dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
- goto err2;
+ goto err3;
}
/*
@@ -233,9 +236,11 @@ static int __devinit eeti_ts_probe(struct i2c_client *client,
device_init_wakeup(&client->dev, 0);
return 0;
-err2:
+err3:
input_unregister_device(input);
input = NULL; /* so we dont try to free it below */
+err2:
+ gpio_free(pdata->irq_gpio);
err1:
input_free_device(input);
kfree(priv);
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index d9be6eac99b1..7f03d1bd916e 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/jornada720.h>
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 6d1cbdfc9b2a..b64502dfa9f4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -296,8 +296,13 @@ static int iommu_init_device(struct device *dev)
} else
dma_pdev = pci_dev_get(pdev);
+ /* Account for quirked devices */
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+ /*
+ * If it's a multifunction device that does not support our
+ * required ACS flags, add to the same group as function 0.
+ */
if (dma_pdev->multifunction &&
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
swap_pci_ref(&dma_pdev,
@@ -305,14 +310,28 @@ static int iommu_init_device(struct device *dev)
PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
0)));
+ /*
+ * Devices on the root bus go through the iommu. If that's not us,
+ * find the next upstream device and test ACS up to the root bus.
+ * Finding the next device may require skipping virtual buses.
+ */
while (!pci_is_root_bus(dma_pdev->bus)) {
- if (pci_acs_path_enabled(dma_pdev->bus->self,
- NULL, REQ_ACS_FLAGS))
+ struct pci_bus *bus = dma_pdev->bus;
+
+ while (!bus->self) {
+ if (!pci_is_root_bus(bus))
+ bus = bus->parent;
+ else
+ goto root_bus;
+ }
+
+ if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
break;
- swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+ swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
}
+root_bus:
group = iommu_group_get(&dma_pdev->dev);
pci_dev_put(dma_pdev);
if (!group) {
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 500e7f15f5c2..18a89b760aaa 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1111,7 +1111,7 @@ static void print_iommu_info(void)
if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
pr_info("AMD-Vi: Extended features: ");
- for (i = 0; ARRAY_SIZE(feat_str); ++i) {
+ for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
if (iommu_feature(iommu, (1ULL << i)))
pr_cont(" %s", feat_str[i]);
}
@@ -1131,9 +1131,6 @@ static int __init amd_iommu_init_pci(void)
break;
}
- /* Make sure ACS will be enabled */
- pci_request_acs();
-
ret = amd_iommu_init_devices();
print_iommu_info();
@@ -1652,6 +1649,9 @@ static bool detect_ivrs(void)
early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+ /* Make sure ACS will be enabled during PCI probe */
+ pci_request_acs();
+
return true;
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 45350ff5e93c..80bad32aa463 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -732,9 +732,9 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain)
spin_lock_init(&priv->pgtablelock);
INIT_LIST_HEAD(&priv->clients);
- dom->geometry.aperture_start = 0;
- dom->geometry.aperture_end = ~0UL;
- dom->geometry.force_aperture = true;
+ domain->geometry.aperture_start = 0;
+ domain->geometry.aperture_end = ~0UL;
+ domain->geometry.force_aperture = true;
domain->priv = priv;
return 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 7469b5346643..2297ec193eb4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2008,6 +2008,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
if (!drhd) {
printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n",
pci_name(pdev));
+ free_domain_mem(domain);
return NULL;
}
iommu = drhd->iommu;
@@ -4124,8 +4125,13 @@ static int intel_iommu_add_device(struct device *dev)
} else
dma_pdev = pci_dev_get(pdev);
+ /* Account for quirked devices */
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+ /*
+ * If it's a multifunction device that does not support our
+ * required ACS flags, add to the same group as function 0.
+ */
if (dma_pdev->multifunction &&
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
swap_pci_ref(&dma_pdev,
@@ -4133,14 +4139,28 @@ static int intel_iommu_add_device(struct device *dev)
PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
0)));
+ /*
+ * Devices on the root bus go through the iommu. If that's not us,
+ * find the next upstream device and test ACS up to the root bus.
+ * Finding the next device may require skipping virtual buses.
+ */
while (!pci_is_root_bus(dma_pdev->bus)) {
- if (pci_acs_path_enabled(dma_pdev->bus->self,
- NULL, REQ_ACS_FLAGS))
+ struct pci_bus *bus = dma_pdev->bus;
+
+ while (!bus->self) {
+ if (!pci_is_root_bus(bus))
+ bus = bus->parent;
+ else
+ goto root_bus;
+ }
+
+ if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
break;
- swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+ swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
}
+root_bus:
group = iommu_group_get(&dma_pdev->dev);
pci_dev_put(dma_pdev);
if (!group) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index e0b18f3ae9a8..af8904de1d44 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -736,6 +736,7 @@ int __init parse_ioapics_under_ir(void)
{
struct dmar_drhd_unit *drhd;
int ir_supported = 0;
+ int ioapic_idx;
for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu;
@@ -748,13 +749,20 @@ int __init parse_ioapics_under_ir(void)
}
}
- if (ir_supported && ir_ioapic_num != nr_ioapics) {
- printk(KERN_WARNING
- "Not all IO-APIC's listed under remapping hardware\n");
- return -1;
+ if (!ir_supported)
+ return 0;
+
+ for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
+ int ioapic_id = mpc_ioapic_id(ioapic_idx);
+ if (!map_ioapic_to_ir(ioapic_id)) {
+ pr_err(FW_BUG "ioapic %d has no mapping iommu, "
+ "interrupt remapping will be disabled\n",
+ ioapic_id);
+ return -1;
+ }
}
- return ir_supported;
+ return 1;
}
int __init ir_dev_scope_init(void)
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 4ba325ab6262..2a4bb36bc688 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -799,14 +799,14 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain,
goto out;
}
}
- dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev));
+ dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev));
out:
spin_unlock(&as->client_lock);
}
static int smmu_iommu_domain_init(struct iommu_domain *domain)
{
- int i, err = -ENODEV;
+ int i, err = -EAGAIN;
unsigned long flags;
struct smmu_as *as;
struct smmu_device *smmu = smmu_handle;
@@ -814,11 +814,14 @@ static int smmu_iommu_domain_init(struct iommu_domain *domain)
/* Look for a free AS with lock held */
for (i = 0; i < smmu->num_as; i++) {
as = &smmu->as[i];
- if (!as->pdir_page) {
- err = alloc_pdir(as);
- if (!err)
- goto found;
- }
+
+ if (as->pdir_page)
+ continue;
+
+ err = alloc_pdir(as);
+ if (!err)
+ goto found;
+
if (err != -EAGAIN)
break;
}
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index c08fc605e56b..fa6ca4733725 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -449,7 +449,8 @@ hdlc_fill_fifo(struct bchannel *bch)
{
struct fritzcard *fc = bch->hw;
struct hdlc_hw *hdlc;
- int count, fs, cnt = 0, idx, fillempty = 0;
+ int count, fs, cnt = 0, idx;
+ bool fillempty = false;
u8 *p;
u32 *ptr, val, addr;
@@ -462,7 +463,7 @@ hdlc_fill_fifo(struct bchannel *bch)
return;
count = fs;
p = bch->fill;
- fillempty = 1;
+ fillempty = true;
} else {
count = bch->tx_skb->len - bch->tx_idx;
if (count <= 0)
@@ -477,7 +478,7 @@ hdlc_fill_fifo(struct bchannel *bch)
hdlc->ctrl.sr.cmd |= HDLC_CMD_XME;
}
ptr = (u32 *)p;
- if (fillempty) {
+ if (!fillempty) {
pr_debug("%s.B%d: %d/%d/%d", fc->name, bch->nr, count,
bch->tx_idx, bch->tx_skb->len);
bch->tx_idx += count;
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index 5405ec644db3..baf2686aa8eb 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -16,7 +16,6 @@
#include <linux/sched.h>
#include "isdnloop.h"
-static char *revision = "$Revision: 1.11.6.7 $";
static char *isdnloop_id = "loop0";
MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card");
@@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1)
static int __init
isdnloop_init(void)
{
- char *p;
- char rev[10];
-
- if ((p = strchr(revision, ':'))) {
- strcpy(rev, p + 1);
- p = strchr(rev, '$');
- *p = 0;
- } else
- strcpy(rev, " ??? ");
- printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev);
-
if (isdnloop_id)
return (isdnloop_addcard(isdnloop_id));
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 0dc8abca1407..949cabb88f1c 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -2222,7 +2222,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
InitWin(l2);
l2->l2m.fsm = &l2fsm;
if (test_bit(FLG_LAPB, &l2->flag) ||
- test_bit(FLG_PTP, &l2->flag) ||
+ test_bit(FLG_FIXED_TEI, &l2->flag) ||
test_bit(FLG_LAPD_NET, &l2->flag))
l2->l2m.state = ST_L2_4;
else
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 12b2b55c519e..c96bbaadeebd 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -200,6 +200,13 @@ config LEDS_LP5523
Driver provides direct control via LED class and interface for
programming the engines.
+config LEDS_LP8788
+ tristate "LED support for the TI LP8788 PMIC"
+ depends on LEDS_CLASS
+ depends on MFD_LP8788
+ help
+ This option enables support for the Keyboard LEDs on the LP8788 PMIC.
+
config LEDS_CLEVO_MAIL
tristate "Mail LED on Clevo notebook"
depends on LEDS_CLASS
@@ -415,6 +422,14 @@ config LEDS_MAX8997
This option enables support for on-chip LED drivers on
MAXIM MAX8997 PMIC.
+config LEDS_LM3556
+ tristate "LED support for LM3556 Chip"
+ depends on LEDS_CLASS && I2C
+ select REGMAP_I2C
+ help
+ This option enables support for LEDs connected to LM3556.
+ LM3556 includes Torch, Flash and Indicator functions.
+
config LEDS_OT200
tristate "LED support for the Bachmann OT200"
depends on LEDS_CLASS && HAS_IOMEM
@@ -422,6 +437,14 @@ config LEDS_OT200
This option enables support for the LEDs on the Bachmann OT200.
Say Y to enable LEDs on the Bachmann OT200.
+config LEDS_BLINKM
+ tristate "LED support for the BlinkM I2C RGB LED"
+ depends on LEDS_CLASS
+ depends on I2C
+ help
+ This option enables support for the BlinkM RGB LED connected
+ through I2C. Say Y to enable support for the BlinkM LED.
+
config LEDS_TRIGGERS
bool "LED Trigger support"
depends on LEDS_CLASS
@@ -443,6 +466,20 @@ config LEDS_TRIGGER_TIMER
If unsure, say Y.
+config LEDS_TRIGGER_ONESHOT
+ tristate "LED One-shot Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to blink in one-shot pulses with parameters
+ controlled via sysfs. It's useful to notify the user on
+ sporadic events, when there are no clear begin and end trap points,
+ or on dense events, where this blinks the LED at constant rate if
+ rearmed continuously.
+
+ It also shows how to use the led_blink_set_oneshot() function.
+
+ If unsure, say Y.
+
config LEDS_TRIGGER_IDE_DISK
bool "LED IDE Disk Trigger"
depends on IDE_GD_ATA
@@ -497,7 +534,7 @@ config LEDS_TRIGGER_TRANSIENT
depends on LEDS_TRIGGERS
help
This allows one time activation of a transient state on
- GPIO/PWM based hadrware.
+ GPIO/PWM based hardware.
If unsure, say Y.
endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index f8958cd6cf6e..a4429a9217bc 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
+obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
@@ -47,12 +48,15 @@ obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o
obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
obj-$(CONFIG_LEDS_RENESAS_TPU) += leds-renesas-tpu.o
obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
+obj-$(CONFIG_LEDS_LM3556) += leds-lm3556.o
+obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
+obj-$(CONFIG_LEDS_TRIGGER_ONESHOT) += ledtrig-oneshot.o
obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index e663e6f413e9..c599095bc005 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -53,7 +53,7 @@ static ssize_t led_brightness_store(struct device *dev,
if (state == LED_OFF)
led_trigger_remove(led_cdev);
- led_set_brightness(led_cdev, state);
+ __led_set_brightness(led_cdev, state);
return size;
}
@@ -82,7 +82,12 @@ static void led_timer_function(unsigned long data)
unsigned long delay;
if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
- led_set_brightness(led_cdev, LED_OFF);
+ __led_set_brightness(led_cdev, LED_OFF);
+ return;
+ }
+
+ if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
+ led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
return;
}
@@ -100,7 +105,21 @@ static void led_timer_function(unsigned long data)
delay = led_cdev->blink_delay_off;
}
- led_set_brightness(led_cdev, brightness);
+ __led_set_brightness(led_cdev, brightness);
+
+ /* Return in next iteration if led is in one-shot mode and we are in
+ * the final blink state so that the led is toggled each delay_on +
+ * delay_off milliseconds in worst case.
+ */
+ if (led_cdev->flags & LED_BLINK_ONESHOT) {
+ if (led_cdev->flags & LED_BLINK_INVERT) {
+ if (brightness)
+ led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
+ } else {
+ if (!brightness)
+ led_cdev->flags |= LED_BLINK_ONESHOT_STOP;
+ }
+ }
mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
}
@@ -203,7 +222,7 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
#endif
/* Stop blinking */
- led_brightness_set(led_cdev, LED_OFF);
+ led_set_brightness(led_cdev, LED_OFF);
device_unregister(led_cdev->dev);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index d65353d8d3fc..2ab05af3de31 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -24,14 +24,6 @@ EXPORT_SYMBOL_GPL(leds_list_lock);
LIST_HEAD(leds_list);
EXPORT_SYMBOL_GPL(leds_list);
-static void led_stop_software_blink(struct led_classdev *led_cdev)
-{
- /* deactivate previous settings */
- del_timer_sync(&led_cdev->blink_timer);
- led_cdev->blink_delay_on = 0;
- led_cdev->blink_delay_off = 0;
-}
-
static void led_set_software_blink(struct led_classdev *led_cdev,
unsigned long delay_on,
unsigned long delay_off)
@@ -53,7 +45,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
/* never off - just set to brightness */
if (!delay_off) {
- led_set_brightness(led_cdev, led_cdev->blink_brightness);
+ __led_set_brightness(led_cdev, led_cdev->blink_brightness);
return;
}
@@ -61,13 +53,12 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
}
-void led_blink_set(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off)
+static void led_blink_setup(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
{
- del_timer_sync(&led_cdev->blink_timer);
-
- if (led_cdev->blink_set &&
+ if (!(led_cdev->flags & LED_BLINK_ONESHOT) &&
+ led_cdev->blink_set &&
!led_cdev->blink_set(led_cdev, delay_on, delay_off))
return;
@@ -77,12 +68,49 @@ void led_blink_set(struct led_classdev *led_cdev,
led_set_software_blink(led_cdev, *delay_on, *delay_off);
}
+
+void led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ del_timer_sync(&led_cdev->blink_timer);
+
+ led_cdev->flags &= ~LED_BLINK_ONESHOT;
+ led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+
+ led_blink_setup(led_cdev, delay_on, delay_off);
+}
EXPORT_SYMBOL(led_blink_set);
-void led_brightness_set(struct led_classdev *led_cdev,
+void led_blink_set_oneshot(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ int invert)
+{
+ if ((led_cdev->flags & LED_BLINK_ONESHOT) &&
+ timer_pending(&led_cdev->blink_timer))
+ return;
+
+ led_cdev->flags |= LED_BLINK_ONESHOT;
+ led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+
+ if (invert)
+ led_cdev->flags |= LED_BLINK_INVERT;
+ else
+ led_cdev->flags &= ~LED_BLINK_INVERT;
+
+ led_blink_setup(led_cdev, delay_on, delay_off);
+}
+EXPORT_SYMBOL(led_blink_set_oneshot);
+
+void led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
- led_stop_software_blink(led_cdev);
- led_cdev->brightness_set(led_cdev, brightness);
+ /* stop and clear soft-blink timer */
+ del_timer_sync(&led_cdev->blink_timer);
+ led_cdev->blink_delay_on = 0;
+ led_cdev->blink_delay_off = 0;
+
+ __led_set_brightness(led_cdev, brightness);
}
-EXPORT_SYMBOL(led_brightness_set);
+EXPORT_SYMBOL(led_set_brightness);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 46b4c766335d..363975b3c925 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -99,7 +99,7 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
EXPORT_SYMBOL_GPL(led_trigger_show);
/* Caller must ensure led_cdev->trigger_lock held */
-void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
+void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
{
unsigned long flags;
@@ -112,15 +112,15 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger)
if (led_cdev->trigger->deactivate)
led_cdev->trigger->deactivate(led_cdev);
led_cdev->trigger = NULL;
- led_brightness_set(led_cdev, LED_OFF);
+ led_set_brightness(led_cdev, LED_OFF);
}
- if (trigger) {
- write_lock_irqsave(&trigger->leddev_list_lock, flags);
- list_add_tail(&led_cdev->trig_list, &trigger->led_cdevs);
- write_unlock_irqrestore(&trigger->leddev_list_lock, flags);
- led_cdev->trigger = trigger;
- if (trigger->activate)
- trigger->activate(led_cdev);
+ if (trig) {
+ write_lock_irqsave(&trig->leddev_list_lock, flags);
+ list_add_tail(&led_cdev->trig_list, &trig->led_cdevs);
+ write_unlock_irqrestore(&trig->leddev_list_lock, flags);
+ led_cdev->trigger = trig;
+ if (trig->activate)
+ trig->activate(led_cdev);
}
}
EXPORT_SYMBOL_GPL(led_trigger_set);
@@ -153,24 +153,24 @@ EXPORT_SYMBOL_GPL(led_trigger_set_default);
/* LED Trigger Interface */
-int led_trigger_register(struct led_trigger *trigger)
+int led_trigger_register(struct led_trigger *trig)
{
struct led_classdev *led_cdev;
- struct led_trigger *trig;
+ struct led_trigger *_trig;
- rwlock_init(&trigger->leddev_list_lock);
- INIT_LIST_HEAD(&trigger->led_cdevs);
+ rwlock_init(&trig->leddev_list_lock);
+ INIT_LIST_HEAD(&trig->led_cdevs);
down_write(&triggers_list_lock);
/* Make sure the trigger's name isn't already in use */
- list_for_each_entry(trig, &trigger_list, next_trig) {
- if (!strcmp(trig->name, trigger->name)) {
+ list_for_each_entry(_trig, &trigger_list, next_trig) {
+ if (!strcmp(_trig->name, trig->name)) {
up_write(&triggers_list_lock);
return -EEXIST;
}
}
/* Add to the list of led triggers */
- list_add_tail(&trigger->next_trig, &trigger_list);
+ list_add_tail(&trig->next_trig, &trigger_list);
up_write(&triggers_list_lock);
/* Register with any LEDs that have this as a default trigger */
@@ -178,8 +178,8 @@ int led_trigger_register(struct led_trigger *trigger)
list_for_each_entry(led_cdev, &leds_list, node) {
down_write(&led_cdev->trigger_lock);
if (!led_cdev->trigger && led_cdev->default_trigger &&
- !strcmp(led_cdev->default_trigger, trigger->name))
- led_trigger_set(led_cdev, trigger);
+ !strcmp(led_cdev->default_trigger, trig->name))
+ led_trigger_set(led_cdev, trig);
up_write(&led_cdev->trigger_lock);
}
up_read(&leds_list_lock);
@@ -188,20 +188,20 @@ int led_trigger_register(struct led_trigger *trigger)
}
EXPORT_SYMBOL_GPL(led_trigger_register);
-void led_trigger_unregister(struct led_trigger *trigger)
+void led_trigger_unregister(struct led_trigger *trig)
{
struct led_classdev *led_cdev;
/* Remove from the list of led triggers */
down_write(&triggers_list_lock);
- list_del(&trigger->next_trig);
+ list_del(&trig->next_trig);
up_write(&triggers_list_lock);
/* Remove anyone actively using this trigger */
down_read(&leds_list_lock);
list_for_each_entry(led_cdev, &leds_list, node) {
down_write(&led_cdev->trigger_lock);
- if (led_cdev->trigger == trigger)
+ if (led_cdev->trigger == trig)
led_trigger_set(led_cdev, NULL);
up_write(&led_cdev->trigger_lock);
}
@@ -211,58 +211,80 @@ EXPORT_SYMBOL_GPL(led_trigger_unregister);
/* Simple LED Tigger Interface */
-void led_trigger_event(struct led_trigger *trigger,
+void led_trigger_event(struct led_trigger *trig,
enum led_brightness brightness)
{
struct list_head *entry;
- if (!trigger)
+ if (!trig)
return;
- read_lock(&trigger->leddev_list_lock);
- list_for_each(entry, &trigger->led_cdevs) {
+ read_lock(&trig->leddev_list_lock);
+ list_for_each(entry, &trig->led_cdevs) {
struct led_classdev *led_cdev;
led_cdev = list_entry(entry, struct led_classdev, trig_list);
- led_set_brightness(led_cdev, brightness);
+ __led_set_brightness(led_cdev, brightness);
}
- read_unlock(&trigger->leddev_list_lock);
+ read_unlock(&trig->leddev_list_lock);
}
EXPORT_SYMBOL_GPL(led_trigger_event);
-void led_trigger_blink(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off)
+static void led_trigger_blink_setup(struct led_trigger *trig,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ int oneshot,
+ int invert)
{
struct list_head *entry;
- if (!trigger)
+ if (!trig)
return;
- read_lock(&trigger->leddev_list_lock);
- list_for_each(entry, &trigger->led_cdevs) {
+ read_lock(&trig->leddev_list_lock);
+ list_for_each(entry, &trig->led_cdevs) {
struct led_classdev *led_cdev;
led_cdev = list_entry(entry, struct led_classdev, trig_list);
- led_blink_set(led_cdev, delay_on, delay_off);
+ if (oneshot)
+ led_blink_set_oneshot(led_cdev, delay_on, delay_off,
+ invert);
+ else
+ led_blink_set(led_cdev, delay_on, delay_off);
}
- read_unlock(&trigger->leddev_list_lock);
+ read_unlock(&trig->leddev_list_lock);
+}
+
+void led_trigger_blink(struct led_trigger *trig,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ led_trigger_blink_setup(trig, delay_on, delay_off, 0, 0);
}
EXPORT_SYMBOL_GPL(led_trigger_blink);
+void led_trigger_blink_oneshot(struct led_trigger *trig,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ int invert)
+{
+ led_trigger_blink_setup(trig, delay_on, delay_off, 1, invert);
+}
+EXPORT_SYMBOL_GPL(led_trigger_blink_oneshot);
+
void led_trigger_register_simple(const char *name, struct led_trigger **tp)
{
- struct led_trigger *trigger;
+ struct led_trigger *trig;
int err;
- trigger = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
+ trig = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
- if (trigger) {
- trigger->name = name;
- err = led_trigger_register(trigger);
+ if (trig) {
+ trig->name = name;
+ err = led_trigger_register(trig);
if (err < 0) {
- kfree(trigger);
- trigger = NULL;
+ kfree(trig);
+ trig = NULL;
printk(KERN_WARNING "LED trigger %s failed to register"
" (%d)\n", name, err);
}
@@ -270,15 +292,15 @@ void led_trigger_register_simple(const char *name, struct led_trigger **tp)
printk(KERN_WARNING "LED trigger %s failed to register"
" (no memory)\n", name);
- *tp = trigger;
+ *tp = trig;
}
EXPORT_SYMBOL_GPL(led_trigger_register_simple);
-void led_trigger_unregister_simple(struct led_trigger *trigger)
+void led_trigger_unregister_simple(struct led_trigger *trig)
{
- if (trigger)
- led_trigger_unregister(trigger);
- kfree(trigger);
+ if (trig)
+ led_trigger_unregister(trig);
+ kfree(trig);
}
EXPORT_SYMBOL_GPL(led_trigger_unregister_simple);
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 5b61aaf7ac0f..61897cfeeda6 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -209,7 +209,7 @@ static int pm860x_led_probe(struct platform_device *pdev)
return -EINVAL;
}
- data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct pm860x_led), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
strncpy(data->name, res->name, MFD_NAME_SIZE - 1);
@@ -220,7 +220,6 @@ static int pm860x_led_probe(struct platform_device *pdev)
data->port = pdata->flags;
if (data->port < 0) {
dev_err(&pdev->dev, "check device failed\n");
- kfree(data);
return -EINVAL;
}
@@ -233,13 +232,10 @@ static int pm860x_led_probe(struct platform_device *pdev)
ret = led_classdev_register(chip->dev, &data->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
- goto out;
+ return ret;
}
pm860x_led_set(&data->cdev, 0);
return 0;
-out:
- kfree(data);
- return ret;
}
static int pm860x_led_remove(struct platform_device *pdev)
@@ -247,7 +243,6 @@ static int pm860x_led_remove(struct platform_device *pdev)
struct pm860x_led *data = platform_get_drvdata(pdev);
led_classdev_unregister(&data->cdev);
- kfree(data);
return 0;
}
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
index b1400db3f839..aa56a867693a 100644
--- a/drivers/leds/leds-adp5520.c
+++ b/drivers/leds/leds-adp5520.c
@@ -119,7 +119,8 @@ static int __devinit adp5520_led_probe(struct platform_device *pdev)
return -EFAULT;
}
- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ led = devm_kzalloc(&pdev->dev, sizeof(*led) * pdata->num_leds,
+ GFP_KERNEL);
if (led == NULL) {
dev_err(&pdev->dev, "failed to alloc memory\n");
return -ENOMEM;
@@ -129,7 +130,7 @@ static int __devinit adp5520_led_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to write\n");
- goto err_free;
+ return ret;
}
for (i = 0; i < pdata->num_leds; ++i) {
@@ -179,8 +180,6 @@ err:
}
}
-err_free:
- kfree(led);
return ret;
}
@@ -200,7 +199,6 @@ static int __devexit adp5520_led_remove(struct platform_device *pdev)
cancel_work_sync(&led[i].work);
}
- kfree(led);
return 0;
}
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
index 525a92492837..5de74ff90dcf 100644
--- a/drivers/leds/leds-asic3.c
+++ b/drivers/leds/leds-asic3.c
@@ -99,12 +99,13 @@ static int __devinit asic3_led_probe(struct platform_device *pdev)
ret = mfd_cell_enable(pdev);
if (ret < 0)
- goto ret0;
+ return ret;
- led->cdev = kzalloc(sizeof(struct led_classdev), GFP_KERNEL);
+ led->cdev = devm_kzalloc(&pdev->dev, sizeof(struct led_classdev),
+ GFP_KERNEL);
if (!led->cdev) {
ret = -ENOMEM;
- goto ret1;
+ goto out;
}
led->cdev->name = led->name;
@@ -115,15 +116,12 @@ static int __devinit asic3_led_probe(struct platform_device *pdev)
ret = led_classdev_register(&pdev->dev, led->cdev);
if (ret < 0)
- goto ret2;
+ goto out;
return 0;
-ret2:
- kfree(led->cdev);
-ret1:
+out:
(void) mfd_cell_disable(pdev);
-ret0:
return ret;
}
@@ -133,8 +131,6 @@ static int __devexit asic3_led_remove(struct platform_device *pdev)
led_classdev_unregister(led->cdev);
- kfree(led->cdev);
-
return mfd_cell_disable(pdev);
}
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index 64ad702a2ecc..45430632faab 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -46,7 +46,8 @@ static int __devinit pwmled_probe(struct platform_device *pdev)
if (!pdata || pdata->num_leds < 1)
return -ENODEV;
- leds = kcalloc(pdata->num_leds, sizeof(*leds), GFP_KERNEL);
+ leds = devm_kzalloc(&pdev->dev, pdata->num_leds * sizeof(*leds),
+ GFP_KERNEL);
if (!leds)
return -ENOMEM;
@@ -108,7 +109,6 @@ err:
pwm_channel_free(&leds[i].pwmc);
}
}
- kfree(leds);
return status;
}
@@ -129,7 +129,6 @@ static int __exit pwmled_remove(struct platform_device *pdev)
pwm_channel_free(&led->pwmc);
}
- kfree(leds);
platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 591cbdf5a046..89ca6a2a19d1 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -677,7 +677,7 @@ static int __devinit bd2802_probe(struct i2c_client *client,
struct bd2802_led_platform_data *pdata;
int ret, i;
- led = kzalloc(sizeof(struct bd2802_led), GFP_KERNEL);
+ led = devm_kzalloc(&client->dev, sizeof(struct bd2802_led), GFP_KERNEL);
if (!led) {
dev_err(&client->dev, "failed to allocate driver data\n");
return -ENOMEM;
@@ -697,7 +697,7 @@ static int __devinit bd2802_probe(struct i2c_client *client,
ret = bd2802_write_byte(client, BD2802_REG_CLKSETUP, 0x00);
if (ret < 0) {
dev_err(&client->dev, "failed to detect device\n");
- goto failed_free;
+ return ret;
} else
dev_info(&client->dev, "return 0x%02x\n", ret);
@@ -729,9 +729,6 @@ static int __devinit bd2802_probe(struct i2c_client *client,
failed_unregister_dev_file:
for (i--; i >= 0; i--)
device_remove_file(&led->client->dev, bd2802_attributes[i]);
-failed_free:
- kfree(led);
-
return ret;
}
@@ -746,7 +743,6 @@ static int __exit bd2802_remove(struct i2c_client *client)
bd2802_disable_adv_conf(led);
for (i = 0; i < ARRAY_SIZE(bd2802_attributes); i++)
device_remove_file(&led->client->dev, bd2802_attributes[i]);
- kfree(led);
return 0;
}
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
new file mode 100644
index 000000000000..f7c3d7f1ec52
--- /dev/null
+++ b/drivers/leds/leds-blinkm.c
@@ -0,0 +1,815 @@
+/*
+ * leds-blinkm.c
+ * (c) Jan-Simon Möller (dl9pf@gmx.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include <linux/printk.h>
+#include <linux/pm_runtime.h>
+#include <linux/leds.h>
+#include <linux/delay.h>
+
+/* Addresses to scan - BlinkM is on 0x09 by default*/
+static const unsigned short normal_i2c[] = { 0x09, I2C_CLIENT_END };
+
+static int blinkm_transfer_hw(struct i2c_client *client, int cmd);
+static int blinkm_test_run(struct i2c_client *client);
+
+struct blinkm_led {
+ struct i2c_client *i2c_client;
+ struct led_classdev led_cdev;
+ int id;
+ atomic_t active;
+};
+
+struct blinkm_work {
+ struct blinkm_led *blinkm_led;
+ struct work_struct work;
+};
+
+#define cdev_to_blmled(c) container_of(c, struct blinkm_led, led_cdev)
+#define work_to_blmwork(c) container_of(c, struct blinkm_work, work)
+
+struct blinkm_data {
+ struct i2c_client *i2c_client;
+ struct mutex update_lock;
+ /* used for led class interface */
+ struct blinkm_led blinkm_leds[3];
+ /* used for "blinkm" sysfs interface */
+ u8 red; /* color red */
+ u8 green; /* color green */
+ u8 blue; /* color blue */
+ /* next values to use for transfer */
+ u8 next_red; /* color red */
+ u8 next_green; /* color green */
+ u8 next_blue; /* color blue */
+ /* internal use */
+ u8 args[7]; /* set of args for transmission */
+ u8 i2c_addr; /* i2c addr */
+ u8 fw_ver; /* firmware version */
+ /* used, but not from userspace */
+ u8 hue; /* HSB hue */
+ u8 saturation; /* HSB saturation */
+ u8 brightness; /* HSB brightness */
+ u8 next_hue; /* HSB hue */
+ u8 next_saturation; /* HSB saturation */
+ u8 next_brightness; /* HSB brightness */
+ /* currently unused / todo */
+ u8 fade_speed; /* fade speed 1 - 255 */
+ s8 time_adjust; /* time adjust -128 - 127 */
+ u8 fade:1; /* fade on = 1, off = 0 */
+ u8 rand:1; /* rand fade mode on = 1 */
+ u8 script_id; /* script ID */
+ u8 script_repeats; /* repeats of script */
+ u8 script_startline; /* line to start */
+};
+
+/* Colors */
+#define RED 0
+#define GREEN 1
+#define BLUE 2
+
+/* mapping command names to cmd chars - see datasheet */
+#define BLM_GO_RGB 0
+#define BLM_FADE_RGB 1
+#define BLM_FADE_HSB 2
+#define BLM_FADE_RAND_RGB 3
+#define BLM_FADE_RAND_HSB 4
+#define BLM_PLAY_SCRIPT 5
+#define BLM_STOP_SCRIPT 6
+#define BLM_SET_FADE_SPEED 7
+#define BLM_SET_TIME_ADJ 8
+#define BLM_GET_CUR_RGB 9
+#define BLM_WRITE_SCRIPT_LINE 10
+#define BLM_READ_SCRIPT_LINE 11
+#define BLM_SET_SCRIPT_LR 12 /* Length & Repeats */
+#define BLM_SET_ADDR 13
+#define BLM_GET_ADDR 14
+#define BLM_GET_FW_VER 15
+#define BLM_SET_STARTUP_PARAM 16
+
+/* BlinkM Commands
+ * as extracted out of the datasheet:
+ *
+ * cmdchar = command (ascii)
+ * cmdbyte = command in hex
+ * nr_args = number of arguments (to send)
+ * nr_ret = number of return values (to read)
+ * dir = direction (0 = read, 1 = write, 2 = both)
+ *
+ */
+static const struct {
+ char cmdchar;
+ u8 cmdbyte;
+ u8 nr_args;
+ u8 nr_ret;
+ u8 dir:2;
+} blinkm_cmds[17] = {
+ /* cmdchar, cmdbyte, nr_args, nr_ret, dir */
+ { 'n', 0x6e, 3, 0, 1},
+ { 'c', 0x63, 3, 0, 1},
+ { 'h', 0x68, 3, 0, 1},
+ { 'C', 0x43, 3, 0, 1},
+ { 'H', 0x48, 3, 0, 1},
+ { 'p', 0x70, 3, 0, 1},
+ { 'o', 0x6f, 0, 0, 1},
+ { 'f', 0x66, 1, 0, 1},
+ { 't', 0x74, 1, 0, 1},
+ { 'g', 0x67, 0, 3, 0},
+ { 'W', 0x57, 7, 0, 1},
+ { 'R', 0x52, 2, 5, 2},
+ { 'L', 0x4c, 3, 0, 1},
+ { 'A', 0x41, 4, 0, 1},
+ { 'a', 0x61, 0, 1, 0},
+ { 'Z', 0x5a, 0, 1, 0},
+ { 'B', 0x42, 5, 0, 1},
+};
+
+static ssize_t show_color_common(struct device *dev, char *buf, int color)
+{
+ struct i2c_client *client;
+ struct blinkm_data *data;
+ int ret;
+
+ client = to_i2c_client(dev);
+ data = i2c_get_clientdata(client);
+
+ ret = blinkm_transfer_hw(client, BLM_GET_CUR_RGB);
+ if (ret < 0)
+ return ret;
+ switch (color) {
+ case RED:
+ return scnprintf(buf, PAGE_SIZE, "%02X\n", data->red);
+ break;
+ case GREEN:
+ return scnprintf(buf, PAGE_SIZE, "%02X\n", data->green);
+ break;
+ case BLUE:
+ return scnprintf(buf, PAGE_SIZE, "%02X\n", data->blue);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static int store_color_common(struct device *dev, const char *buf, int color)
+{
+ struct i2c_client *client;
+ struct blinkm_data *data;
+ int ret;
+ u8 value;
+
+ client = to_i2c_client(dev);
+ data = i2c_get_clientdata(client);
+
+ ret = kstrtou8(buf, 10, &value);
+ if (ret < 0) {
+ dev_err(dev, "BlinkM: value too large!\n");
+ return ret;
+ }
+
+ switch (color) {
+ case RED:
+ data->next_red = value;
+ break;
+ case GREEN:
+ data->next_green = value;
+ break;
+ case BLUE:
+ data->next_blue = value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "next_red = %d, next_green = %d, next_blue = %d\n",
+ data->next_red, data->next_green, data->next_blue);
+
+ /* if mode ... */
+ ret = blinkm_transfer_hw(client, BLM_GO_RGB);
+ if (ret < 0) {
+ dev_err(dev, "BlinkM: can't set RGB\n");
+ return ret;
+ }
+ return 0;
+}
+
+static ssize_t show_red(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return show_color_common(dev, buf, RED);
+}
+
+static ssize_t store_red(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ ret = store_color_common(dev, buf, RED);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static DEVICE_ATTR(red, S_IRUGO | S_IWUSR, show_red, store_red);
+
+static ssize_t show_green(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return show_color_common(dev, buf, GREEN);
+}
+
+static ssize_t store_green(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+
+ int ret;
+
+ ret = store_color_common(dev, buf, GREEN);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static DEVICE_ATTR(green, S_IRUGO | S_IWUSR, show_green, store_green);
+
+static ssize_t show_blue(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return show_color_common(dev, buf, BLUE);
+}
+
+static ssize_t store_blue(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+
+ ret = store_color_common(dev, buf, BLUE);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static DEVICE_ATTR(blue, S_IRUGO | S_IWUSR, show_blue, store_blue);
+
+static ssize_t show_test(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE,
+ "#Write into test to start test sequence!#\n");
+}
+
+static ssize_t store_test(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+
+ struct i2c_client *client;
+ int ret;
+ client = to_i2c_client(dev);
+
+ /*test */
+ ret = blinkm_test_run(client);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR(test, S_IRUGO | S_IWUSR, show_test, store_test);
+
+/* TODO: HSB, fade, timeadj, script ... */
+
+static struct attribute *blinkm_attrs[] = {
+ &dev_attr_red.attr,
+ &dev_attr_green.attr,
+ &dev_attr_blue.attr,
+ &dev_attr_test.attr,
+ NULL,
+};
+
+static struct attribute_group blinkm_group = {
+ .name = "blinkm",
+ .attrs = blinkm_attrs,
+};
+
+static int blinkm_write(struct i2c_client *client, int cmd, u8 *arg)
+{
+ int result;
+ int i;
+ int arglen = blinkm_cmds[cmd].nr_args;
+ /* write out cmd to blinkm - always / default step */
+ result = i2c_smbus_write_byte(client, blinkm_cmds[cmd].cmdbyte);
+ if (result < 0)
+ return result;
+ /* no args to write out */
+ if (arglen == 0)
+ return 0;
+
+ for (i = 0; i < arglen; i++) {
+ /* repeat for arglen */
+ result = i2c_smbus_write_byte(client, arg[i]);
+ if (result < 0)
+ return result;
+ }
+ return 0;
+}
+
+static int blinkm_read(struct i2c_client *client, int cmd, u8 *arg)
+{
+ int result;
+ int i;
+ int retlen = blinkm_cmds[cmd].nr_ret;
+ for (i = 0; i < retlen; i++) {
+ /* repeat for retlen */
+ result = i2c_smbus_read_byte(client);
+ if (result < 0)
+ return result;
+ arg[i] = result;
+ }
+
+ return 0;
+}
+
+static int blinkm_transfer_hw(struct i2c_client *client, int cmd)
+{
+ /* the protocol is simple but non-standard:
+ * e.g. cmd 'g' (= 0x67) for "get device address"
+ * - which defaults to 0x09 - would be the sequence:
+ * a) write 0x67 to the device (byte write)
+ * b) read the value (0x09) back right after (byte read)
+ *
+ * Watch out for "unfinished" sequences (i.e. not enough reads
+ * or writes after a command. It will make the blinkM misbehave.
+ * Sequence is key here.
+ */
+
+ /* args / return are in private data struct */
+ struct blinkm_data *data = i2c_get_clientdata(client);
+
+ /* We start hardware transfers which are not to be
+ * mixed with other commands. Aquire a lock now. */
+ if (mutex_lock_interruptible(&data->update_lock) < 0)
+ return -EAGAIN;
+
+ /* switch cmd - usually write before reads */
+ switch (cmd) {
+ case BLM_FADE_RAND_RGB:
+ case BLM_GO_RGB:
+ case BLM_FADE_RGB:
+ data->args[0] = data->next_red;
+ data->args[1] = data->next_green;
+ data->args[2] = data->next_blue;
+ blinkm_write(client, cmd, data->args);
+ data->red = data->args[0];
+ data->green = data->args[1];
+ data->blue = data->args[2];
+ break;
+ case BLM_FADE_HSB:
+ case BLM_FADE_RAND_HSB:
+ data->args[0] = data->next_hue;
+ data->args[1] = data->next_saturation;
+ data->args[2] = data->next_brightness;
+ blinkm_write(client, cmd, data->args);
+ data->hue = data->next_hue;
+ data->saturation = data->next_saturation;
+ data->brightness = data->next_brightness;
+ break;
+ case BLM_PLAY_SCRIPT:
+ data->args[0] = data->script_id;
+ data->args[1] = data->script_repeats;
+ data->args[2] = data->script_startline;
+ blinkm_write(client, cmd, data->args);
+ break;
+ case BLM_STOP_SCRIPT:
+ blinkm_write(client, cmd, NULL);
+ break;
+ case BLM_GET_CUR_RGB:
+ data->args[0] = data->red;
+ data->args[1] = data->green;
+ data->args[2] = data->blue;
+ blinkm_write(client, cmd, NULL);
+ blinkm_read(client, cmd, data->args);
+ data->red = data->args[0];
+ data->green = data->args[1];
+ data->blue = data->args[2];
+ break;
+ case BLM_GET_ADDR:
+ data->args[0] = data->i2c_addr;
+ blinkm_write(client, cmd, NULL);
+ blinkm_read(client, cmd, data->args);
+ data->i2c_addr = data->args[0];
+ break;
+ case BLM_SET_TIME_ADJ:
+ case BLM_SET_FADE_SPEED:
+ case BLM_READ_SCRIPT_LINE:
+ case BLM_WRITE_SCRIPT_LINE:
+ case BLM_SET_SCRIPT_LR:
+ case BLM_SET_ADDR:
+ case BLM_GET_FW_VER:
+ case BLM_SET_STARTUP_PARAM:
+ dev_err(&client->dev,
+ "BlinkM: cmd %d not implemented yet.\n", cmd);
+ break;
+ default:
+ dev_err(&client->dev, "BlinkM: unknown command %d\n", cmd);
+ mutex_unlock(&data->update_lock);
+ return -EINVAL;
+ } /* end switch(cmd) */
+
+ /* transfers done, unlock */
+ mutex_unlock(&data->update_lock);
+ return 0;
+}
+
+static void led_work(struct work_struct *work)
+{
+ int ret;
+ struct blinkm_led *led;
+ struct blinkm_data *data ;
+ struct blinkm_work *blm_work = work_to_blmwork(work);
+
+ led = blm_work->blinkm_led;
+ data = i2c_get_clientdata(led->i2c_client);
+ ret = blinkm_transfer_hw(led->i2c_client, BLM_GO_RGB);
+ atomic_dec(&led->active);
+ dev_dbg(&led->i2c_client->dev,
+ "# DONE # next_red = %d, next_green = %d,"
+ " next_blue = %d, active = %d\n",
+ data->next_red, data->next_green,
+ data->next_blue, atomic_read(&led->active));
+ kfree(blm_work);
+}
+
+static int blinkm_led_common_set(struct led_classdev *led_cdev,
+ enum led_brightness value, int color)
+{
+ /* led_brightness is 0, 127 or 255 - we just use it here as-is */
+ struct blinkm_led *led = cdev_to_blmled(led_cdev);
+ struct blinkm_data *data = i2c_get_clientdata(led->i2c_client);
+ struct blinkm_work *bl_work;
+
+ switch (color) {
+ case RED:
+ /* bail out if there's no change */
+ if (data->next_red == (u8) value)
+ return 0;
+ /* we assume a quite fast sequence here ([off]->on->off)
+ * think of network led trigger - we cannot blink that fast, so
+ * in case we already have a off->on->off transition queued up,
+ * we refuse to queue up more.
+ * Revisit: fast-changing brightness. */
+ if (atomic_read(&led->active) > 1)
+ return 0;
+ data->next_red = (u8) value;
+ break;
+ case GREEN:
+ /* bail out if there's no change */
+ if (data->next_green == (u8) value)
+ return 0;
+ /* we assume a quite fast sequence here ([off]->on->off)
+ * Revisit: fast-changing brightness. */
+ if (atomic_read(&led->active) > 1)
+ return 0;
+ data->next_green = (u8) value;
+ break;
+ case BLUE:
+ /* bail out if there's no change */
+ if (data->next_blue == (u8) value)
+ return 0;
+ /* we assume a quite fast sequence here ([off]->on->off)
+ * Revisit: fast-changing brightness. */
+ if (atomic_read(&led->active) > 1)
+ return 0;
+ data->next_blue = (u8) value;
+ break;
+
+ default:
+ dev_err(&led->i2c_client->dev, "BlinkM: unknown color.\n");
+ return -EINVAL;
+ }
+
+ bl_work = kzalloc(sizeof(*bl_work), GFP_ATOMIC);
+ if (!bl_work)
+ return -ENOMEM;
+
+ atomic_inc(&led->active);
+ dev_dbg(&led->i2c_client->dev,
+ "#TO_SCHED# next_red = %d, next_green = %d,"
+ " next_blue = %d, active = %d\n",
+ data->next_red, data->next_green,
+ data->next_blue, atomic_read(&led->active));
+
+ /* a fresh work _item_ for each change */
+ bl_work->blinkm_led = led;
+ INIT_WORK(&bl_work->work, led_work);
+ /* queue work in own queue for easy sync on exit*/
+ schedule_work(&bl_work->work);
+
+ return 0;
+}
+
+static void blinkm_led_red_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ blinkm_led_common_set(led_cdev, value, RED);
+}
+
+static void blinkm_led_green_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ blinkm_led_common_set(led_cdev, value, GREEN);
+}
+
+static void blinkm_led_blue_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ blinkm_led_common_set(led_cdev, value, BLUE);
+}
+
+static void blinkm_init_hw(struct i2c_client *client)
+{
+ int ret;
+ ret = blinkm_transfer_hw(client, BLM_STOP_SCRIPT);
+ ret = blinkm_transfer_hw(client, BLM_GO_RGB);
+}
+
+static int blinkm_test_run(struct i2c_client *client)
+{
+ int ret;
+ struct blinkm_data *data = i2c_get_clientdata(client);
+
+ data->next_red = 0x01;
+ data->next_green = 0x05;
+ data->next_blue = 0x10;
+ ret = blinkm_transfer_hw(client, BLM_GO_RGB);
+ if (ret < 0)
+ return ret;
+ msleep(2000);
+
+ data->next_red = 0x25;
+ data->next_green = 0x10;
+ data->next_blue = 0x31;
+ ret = blinkm_transfer_hw(client, BLM_FADE_RGB);
+ if (ret < 0)
+ return ret;
+ msleep(2000);
+
+ data->next_hue = 0x50;
+ data->next_saturation = 0x10;
+ data->next_brightness = 0x20;
+ ret = blinkm_transfer_hw(client, BLM_FADE_HSB);
+ if (ret < 0)
+ return ret;
+ msleep(2000);
+
+ return 0;
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int ret;
+ int count = 99;
+ u8 tmpargs[7];
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_WORD_DATA
+ | I2C_FUNC_SMBUS_WRITE_BYTE))
+ return -ENODEV;
+
+ /* Now, we do the remaining detection. Simple for now. */
+ /* We might need more guards to protect other i2c slaves */
+
+ /* make sure the blinkM is balanced (read/writes) */
+ while (count > 0) {
+ ret = blinkm_write(client, BLM_GET_ADDR, NULL);
+ usleep_range(5000, 10000);
+ ret = blinkm_read(client, BLM_GET_ADDR, tmpargs);
+ usleep_range(5000, 10000);
+ if (tmpargs[0] == 0x09)
+ count = 0;
+ count--;
+ }
+
+ /* Step 1: Read BlinkM address back - cmd_char 'a' */
+ ret = blinkm_write(client, BLM_GET_ADDR, NULL);
+ if (ret < 0)
+ return ret;
+ usleep_range(20000, 30000); /* allow a small delay */
+ ret = blinkm_read(client, BLM_GET_ADDR, tmpargs);
+ if (ret < 0)
+ return ret;
+
+ if (tmpargs[0] != 0x09) {
+ dev_err(&client->dev, "enodev DEV ADDR = 0x%02X\n", tmpargs[0]);
+ return -ENODEV;
+ }
+
+ strlcpy(info->type, "blinkm", I2C_NAME_SIZE);
+ return 0;
+}
+
+static int __devinit blinkm_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct blinkm_data *data;
+ struct blinkm_led *led[3];
+ int err, i;
+ char blinkm_led_name[28];
+
+ data = devm_kzalloc(&client->dev,
+ sizeof(struct blinkm_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ data->i2c_addr = 0x09;
+ data->i2c_addr = 0x08;
+ /* i2c addr - use fake addr of 0x08 initially (real is 0x09) */
+ data->fw_ver = 0xfe;
+ /* firmware version - use fake until we read real value
+ * (currently broken - BlinkM confused!) */
+ data->script_id = 0x01;
+ data->i2c_client = client;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &blinkm_group);
+ if (err < 0) {
+ dev_err(&client->dev, "couldn't register sysfs group\n");
+ goto exit;
+ }
+
+ for (i = 0; i < 3; i++) {
+ /* RED = 0, GREEN = 1, BLUE = 2 */
+ led[i] = &data->blinkm_leds[i];
+ led[i]->i2c_client = client;
+ led[i]->id = i;
+ led[i]->led_cdev.max_brightness = 255;
+ led[i]->led_cdev.flags = LED_CORE_SUSPENDRESUME;
+ atomic_set(&led[i]->active, 0);
+ switch (i) {
+ case RED:
+ snprintf(blinkm_led_name, sizeof(blinkm_led_name),
+ "blinkm-%d-%d-red",
+ client->adapter->nr,
+ client->addr);
+ led[i]->led_cdev.name = blinkm_led_name;
+ led[i]->led_cdev.brightness_set = blinkm_led_red_set;
+ err = led_classdev_register(&client->dev,
+ &led[i]->led_cdev);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "couldn't register LED %s\n",
+ led[i]->led_cdev.name);
+ goto failred;
+ }
+ break;
+ case GREEN:
+ snprintf(blinkm_led_name, sizeof(blinkm_led_name),
+ "blinkm-%d-%d-green",
+ client->adapter->nr,
+ client->addr);
+ led[i]->led_cdev.name = blinkm_led_name;
+ led[i]->led_cdev.brightness_set = blinkm_led_green_set;
+ err = led_classdev_register(&client->dev,
+ &led[i]->led_cdev);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "couldn't register LED %s\n",
+ led[i]->led_cdev.name);
+ goto failgreen;
+ }
+ break;
+ case BLUE:
+ snprintf(blinkm_led_name, sizeof(blinkm_led_name),
+ "blinkm-%d-%d-blue",
+ client->adapter->nr,
+ client->addr);
+ led[i]->led_cdev.name = blinkm_led_name;
+ led[i]->led_cdev.brightness_set = blinkm_led_blue_set;
+ err = led_classdev_register(&client->dev,
+ &led[i]->led_cdev);
+ if (err < 0) {
+ dev_err(&client->dev,
+ "couldn't register LED %s\n",
+ led[i]->led_cdev.name);
+ goto failblue;
+ }
+ break;
+ } /* end switch */
+ } /* end for */
+
+ /* Initialize the blinkm */
+ blinkm_init_hw(client);
+
+ return 0;
+
+failblue:
+ led_classdev_unregister(&led[GREEN]->led_cdev);
+
+failgreen:
+ led_classdev_unregister(&led[RED]->led_cdev);
+
+failred:
+ sysfs_remove_group(&client->dev.kobj, &blinkm_group);
+exit:
+ return err;
+}
+
+static int __devexit blinkm_remove(struct i2c_client *client)
+{
+ struct blinkm_data *data = i2c_get_clientdata(client);
+ int ret = 0;
+ int i;
+
+ /* make sure no workqueue entries are pending */
+ for (i = 0; i < 3; i++) {
+ flush_scheduled_work();
+ led_classdev_unregister(&data->blinkm_leds[i].led_cdev);
+ }
+
+ /* reset rgb */
+ data->next_red = 0x00;
+ data->next_green = 0x00;
+ data->next_blue = 0x00;
+ ret = blinkm_transfer_hw(client, BLM_FADE_RGB);
+ if (ret < 0)
+ dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
+
+ /* reset hsb */
+ data->next_hue = 0x00;
+ data->next_saturation = 0x00;
+ data->next_brightness = 0x00;
+ ret = blinkm_transfer_hw(client, BLM_FADE_HSB);
+ if (ret < 0)
+ dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
+
+ /* red fade to off */
+ data->next_red = 0xff;
+ ret = blinkm_transfer_hw(client, BLM_GO_RGB);
+ if (ret < 0)
+ dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
+
+ /* off */
+ data->next_red = 0x00;
+ ret = blinkm_transfer_hw(client, BLM_FADE_RGB);
+ if (ret < 0)
+ dev_err(&client->dev, "Failure in blinkm_remove ignored. Continuing.\n");
+
+ sysfs_remove_group(&client->dev.kobj, &blinkm_group);
+ return 0;
+}
+
+static const struct i2c_device_id blinkm_id[] = {
+ {"blinkm", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, blinkm_id);
+
+ /* This is the driver that will be inserted */
+static struct i2c_driver blinkm_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "blinkm",
+ },
+ .probe = blinkm_probe,
+ .remove = __devexit_p(blinkm_remove),
+ .id_table = blinkm_id,
+ .detect = blinkm_detect,
+ .address_list = normal_i2c,
+};
+
+module_i2c_driver(blinkm_driver);
+
+MODULE_AUTHOR("Jan-Simon Moeller <dl9pf@gmx.de>");
+MODULE_DESCRIPTION("BlinkM RGB LED driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index d9cd73ebd6c4..cc77c9d92615 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -108,7 +108,7 @@ static int __devinit da903x_led_probe(struct platform_device *pdev)
return -EINVAL;
}
- led = kzalloc(sizeof(struct da903x_led), GFP_KERNEL);
+ led = devm_kzalloc(&pdev->dev, sizeof(struct da903x_led), GFP_KERNEL);
if (led == NULL) {
dev_err(&pdev->dev, "failed to alloc memory for LED%d\n", id);
return -ENOMEM;
@@ -129,15 +129,11 @@ static int __devinit da903x_led_probe(struct platform_device *pdev)
ret = led_classdev_register(led->master, &led->cdev);
if (ret) {
dev_err(&pdev->dev, "failed to register LED %d\n", id);
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, led);
return 0;
-
-err:
- kfree(led);
- return ret;
}
static int __devexit da903x_led_remove(struct platform_device *pdev)
@@ -145,7 +141,6 @@ static int __devexit da903x_led_remove(struct platform_device *pdev)
struct da903x_led *led = platform_get_drvdata(pdev);
led_classdev_unregister(&led->cdev);
- kfree(led);
return 0;
}
diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c
index d56c14269ff0..1f9d8e62d37e 100644
--- a/drivers/leds/leds-dac124s085.c
+++ b/drivers/leds/leds-dac124s085.c
@@ -69,7 +69,7 @@ static int dac124s085_probe(struct spi_device *spi)
struct dac124s085_led *led;
int i, ret;
- dac = kzalloc(sizeof(*dac), GFP_KERNEL);
+ dac = devm_kzalloc(&spi->dev, sizeof(*dac), GFP_KERNEL);
if (!dac)
return -ENOMEM;
@@ -102,7 +102,6 @@ eledcr:
led_classdev_unregister(&dac->leds[i].ldev);
spi_set_drvdata(spi, NULL);
- kfree(dac);
return ret;
}
@@ -117,7 +116,6 @@ static int dac124s085_remove(struct spi_device *spi)
}
spi_set_drvdata(spi, NULL);
- kfree(dac);
return 0;
}
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index f4c470a3bc8d..c032b2180340 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -178,7 +178,8 @@ static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_dev
if (!count)
return NULL;
- priv = kzalloc(sizeof_gpio_leds_priv(count), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof_gpio_leds_priv(count),
+ GFP_KERNEL);
if (!priv)
return NULL;
@@ -215,7 +216,6 @@ static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_dev
err:
for (count = priv->num_leds - 2; count >= 0; count--)
delete_gpio_led(&priv->leds[count]);
- kfree(priv);
return NULL;
}
@@ -239,8 +239,9 @@ static int __devinit gpio_led_probe(struct platform_device *pdev)
int i, ret = 0;
if (pdata && pdata->num_leds) {
- priv = kzalloc(sizeof_gpio_leds_priv(pdata->num_leds),
- GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof_gpio_leds_priv(pdata->num_leds),
+ GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -253,7 +254,6 @@ static int __devinit gpio_led_probe(struct platform_device *pdev)
/* On failure: unwind the led creations */
for (i = i - 1; i >= 0; i--)
delete_gpio_led(&priv->leds[i]);
- kfree(priv);
return ret;
}
}
@@ -277,7 +277,6 @@ static int __devexit gpio_led_remove(struct platform_device *pdev)
delete_gpio_led(&priv->leds[i]);
dev_set_drvdata(&pdev->dev, NULL);
- kfree(priv);
return 0;
}
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 84ba6de8039c..23637bdb275d 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -386,28 +386,24 @@ static int __devinit lm3530_probe(struct i2c_client *client,
if (pdata == NULL) {
dev_err(&client->dev, "platform data required\n");
- err = -ENODEV;
- goto err_out;
+ return -ENODEV;
}
/* BL mode */
if (pdata->mode > LM3530_BL_MODE_PWM) {
dev_err(&client->dev, "Illegal Mode request\n");
- err = -EINVAL;
- goto err_out;
+ return -EINVAL;
}
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "I2C_FUNC_I2C not supported\n");
- err = -EIO;
- goto err_out;
+ return -EIO;
}
- drvdata = kzalloc(sizeof(struct lm3530_data), GFP_KERNEL);
- if (drvdata == NULL) {
- err = -ENOMEM;
- goto err_out;
- }
+ drvdata = devm_kzalloc(&client->dev, sizeof(struct lm3530_data),
+ GFP_KERNEL);
+ if (drvdata == NULL)
+ return -ENOMEM;
drvdata->mode = pdata->mode;
drvdata->client = client;
@@ -425,7 +421,7 @@ static int __devinit lm3530_probe(struct i2c_client *client,
dev_err(&client->dev, "regulator get failed\n");
err = PTR_ERR(drvdata->regulator);
drvdata->regulator = NULL;
- goto err_regulator_get;
+ return err;
}
if (drvdata->pdata->brt_val) {
@@ -458,9 +454,6 @@ err_create_file:
err_class_register:
err_reg_init:
regulator_put(drvdata->regulator);
-err_regulator_get:
- kfree(drvdata);
-err_out:
return err;
}
@@ -474,7 +467,6 @@ static int __devexit lm3530_remove(struct i2c_client *client)
regulator_disable(drvdata->regulator);
regulator_put(drvdata->regulator);
led_classdev_unregister(&drvdata->led_dev);
- kfree(drvdata);
return 0;
}
diff --git a/drivers/leds/leds-lm3556.c b/drivers/leds/leds-lm3556.c
new file mode 100644
index 000000000000..3062abd9a532
--- /dev/null
+++ b/drivers/leds/leds-lm3556.c
@@ -0,0 +1,512 @@
+/*
+ * Simple driver for Texas Instruments LM3556 LED Flash driver chip (Rev0x03)
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Please refer Documentation/leds/leds-lm3556.txt file.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/regmap.h>
+#include <linux/platform_data/leds-lm3556.h>
+
+#define REG_FILT_TIME (0x0)
+#define REG_IVFM_MODE (0x1)
+#define REG_NTC (0x2)
+#define REG_INDIC_TIME (0x3)
+#define REG_INDIC_BLINK (0x4)
+#define REG_INDIC_PERIOD (0x5)
+#define REG_TORCH_TIME (0x6)
+#define REG_CONF (0x7)
+#define REG_FLASH (0x8)
+#define REG_I_CTRL (0x9)
+#define REG_ENABLE (0xA)
+#define REG_FLAG (0xB)
+#define REG_MAX (0xB)
+
+#define IVFM_FILTER_TIME_SHIFT (3)
+#define UVLO_EN_SHIFT (7)
+#define HYSTERSIS_SHIFT (5)
+#define IVM_D_TH_SHIFT (2)
+#define IVFM_ADJ_MODE_SHIFT (0)
+#define NTC_EVENT_LVL_SHIFT (5)
+#define NTC_TRIP_TH_SHIFT (2)
+#define NTC_BIAS_I_LVL_SHIFT (0)
+#define INDIC_RAMP_UP_TIME_SHIFT (3)
+#define INDIC_RAMP_DN_TIME_SHIFT (0)
+#define INDIC_N_BLANK_SHIFT (4)
+#define INDIC_PULSE_TIME_SHIFT (0)
+#define INDIC_N_PERIOD_SHIFT (0)
+#define TORCH_RAMP_UP_TIME_SHIFT (3)
+#define TORCH_RAMP_DN_TIME_SHIFT (0)
+#define STROBE_USUAGE_SHIFT (7)
+#define STROBE_PIN_POLARITY_SHIFT (6)
+#define TORCH_PIN_POLARITY_SHIFT (5)
+#define TX_PIN_POLARITY_SHIFT (4)
+#define TX_EVENT_LVL_SHIFT (3)
+#define IVFM_EN_SHIFT (2)
+#define NTC_MODE_SHIFT (1)
+#define INDIC_MODE_SHIFT (0)
+#define INDUCTOR_I_LIMIT_SHIFT (6)
+#define FLASH_RAMP_TIME_SHIFT (3)
+#define FLASH_TOUT_TIME_SHIFT (0)
+#define TORCH_I_SHIFT (4)
+#define FLASH_I_SHIFT (0)
+#define NTC_EN_SHIFT (7)
+#define TX_PIN_EN_SHIFT (6)
+#define STROBE_PIN_EN_SHIFT (5)
+#define TORCH_PIN_EN_SHIFT (4)
+#define PRECHG_MODE_EN_SHIFT (3)
+#define PASS_MODE_ONLY_EN_SHIFT (2)
+#define MODE_BITS_SHIFT (0)
+
+#define IVFM_FILTER_TIME_MASK (0x3)
+#define UVLO_EN_MASK (0x1)
+#define HYSTERSIS_MASK (0x3)
+#define IVM_D_TH_MASK (0x7)
+#define IVFM_ADJ_MODE_MASK (0x3)
+#define NTC_EVENT_LVL_MASK (0x1)
+#define NTC_TRIP_TH_MASK (0x7)
+#define NTC_BIAS_I_LVL_MASK (0x3)
+#define INDIC_RAMP_UP_TIME_MASK (0x7)
+#define INDIC_RAMP_DN_TIME_MASK (0x7)
+#define INDIC_N_BLANK_MASK (0x7)
+#define INDIC_PULSE_TIME_MASK (0x7)
+#define INDIC_N_PERIOD_MASK (0x7)
+#define TORCH_RAMP_UP_TIME_MASK (0x7)
+#define TORCH_RAMP_DN_TIME_MASK (0x7)
+#define STROBE_USUAGE_MASK (0x1)
+#define STROBE_PIN_POLARITY_MASK (0x1)
+#define TORCH_PIN_POLARITY_MASK (0x1)
+#define TX_PIN_POLARITY_MASK (0x1)
+#define TX_EVENT_LVL_MASK (0x1)
+#define IVFM_EN_MASK (0x1)
+#define NTC_MODE_MASK (0x1)
+#define INDIC_MODE_MASK (0x1)
+#define INDUCTOR_I_LIMIT_MASK (0x3)
+#define FLASH_RAMP_TIME_MASK (0x7)
+#define FLASH_TOUT_TIME_MASK (0x7)
+#define TORCH_I_MASK (0x7)
+#define FLASH_I_MASK (0xF)
+#define NTC_EN_MASK (0x1)
+#define TX_PIN_EN_MASK (0x1)
+#define STROBE_PIN_EN_MASK (0x1)
+#define TORCH_PIN_EN_MASK (0x1)
+#define PRECHG_MODE_EN_MASK (0x1)
+#define PASS_MODE_ONLY_EN_MASK (0x1)
+#define MODE_BITS_MASK (0x13)
+#define EX_PIN_CONTROL_MASK (0xF1)
+#define EX_PIN_ENABLE_MASK (0x70)
+
+enum lm3556_indic_pulse_time {
+ PULSE_TIME_0_MS = 0,
+ PULSE_TIME_32_MS,
+ PULSE_TIME_64_MS,
+ PULSE_TIME_92_MS,
+ PULSE_TIME_128_MS,
+ PULSE_TIME_160_MS,
+ PULSE_TIME_196_MS,
+ PULSE_TIME_224_MS,
+ PULSE_TIME_256_MS,
+ PULSE_TIME_288_MS,
+ PULSE_TIME_320_MS,
+ PULSE_TIME_352_MS,
+ PULSE_TIME_384_MS,
+ PULSE_TIME_416_MS,
+ PULSE_TIME_448_MS,
+ PULSE_TIME_480_MS,
+};
+
+enum lm3556_indic_n_blank {
+ INDIC_N_BLANK_0 = 0,
+ INDIC_N_BLANK_1,
+ INDIC_N_BLANK_2,
+ INDIC_N_BLANK_3,
+ INDIC_N_BLANK_4,
+ INDIC_N_BLANK_5,
+ INDIC_N_BLANK_6,
+ INDIC_N_BLANK_7,
+ INDIC_N_BLANK_8,
+ INDIC_N_BLANK_9,
+ INDIC_N_BLANK_10,
+ INDIC_N_BLANK_11,
+ INDIC_N_BLANK_12,
+ INDIC_N_BLANK_13,
+ INDIC_N_BLANK_14,
+ INDIC_N_BLANK_15,
+};
+
+enum lm3556_indic_period {
+ INDIC_PERIOD_0 = 0,
+ INDIC_PERIOD_1,
+ INDIC_PERIOD_2,
+ INDIC_PERIOD_3,
+ INDIC_PERIOD_4,
+ INDIC_PERIOD_5,
+ INDIC_PERIOD_6,
+ INDIC_PERIOD_7,
+};
+
+enum lm3556_mode {
+ MODES_STASNDBY = 0,
+ MODES_INDIC,
+ MODES_TORCH,
+ MODES_FLASH
+};
+
+#define INDIC_PATTERN_SIZE 4
+
+struct indicator {
+ u8 blinking;
+ u8 period_cnt;
+};
+
+struct lm3556_chip_data {
+ struct device *dev;
+
+ struct led_classdev cdev_flash;
+ struct led_classdev cdev_torch;
+ struct led_classdev cdev_indicator;
+
+ struct lm3556_platform_data *pdata;
+ struct regmap *regmap;
+ struct mutex lock;
+
+ unsigned int last_flag;
+};
+
+/* indicator pattern */
+static struct indicator indicator_pattern[INDIC_PATTERN_SIZE] = {
+ [0] = {(INDIC_N_BLANK_1 << INDIC_N_BLANK_SHIFT)
+ | PULSE_TIME_32_MS, INDIC_PERIOD_1},
+ [1] = {(INDIC_N_BLANK_15 << INDIC_N_BLANK_SHIFT)
+ | PULSE_TIME_32_MS, INDIC_PERIOD_2},
+ [2] = {(INDIC_N_BLANK_10 << INDIC_N_BLANK_SHIFT)
+ | PULSE_TIME_32_MS, INDIC_PERIOD_4},
+ [3] = {(INDIC_N_BLANK_5 << INDIC_N_BLANK_SHIFT)
+ | PULSE_TIME_32_MS, INDIC_PERIOD_7},
+};
+
+/* chip initialize */
+static int __devinit lm3556_chip_init(struct lm3556_chip_data *chip)
+{
+ unsigned int reg_val;
+ int ret;
+ struct lm3556_platform_data *pdata = chip->pdata;
+
+ /* set config register */
+ ret = regmap_read(chip->regmap, REG_CONF, &reg_val);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read REG_CONF Register\n");
+ goto out;
+ }
+
+ reg_val &= (~EX_PIN_CONTROL_MASK);
+ reg_val |= ((pdata->torch_pin_polarity & 0x01)
+ << TORCH_PIN_POLARITY_SHIFT);
+ reg_val |= ((pdata->strobe_usuage & 0x01) << STROBE_USUAGE_SHIFT);
+ reg_val |= ((pdata->strobe_pin_polarity & 0x01)
+ << STROBE_PIN_POLARITY_SHIFT);
+ reg_val |= ((pdata->tx_pin_polarity & 0x01) << TX_PIN_POLARITY_SHIFT);
+ reg_val |= ((pdata->indicator_mode & 0x01) << INDIC_MODE_SHIFT);
+
+ ret = regmap_write(chip->regmap, REG_CONF, reg_val);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to write REG_CONF Regisgter\n");
+ goto out;
+ }
+
+ /* set enable register */
+ ret = regmap_read(chip->regmap, REG_ENABLE, &reg_val);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read REG_ENABLE Register\n");
+ goto out;
+ }
+
+ reg_val &= (~EX_PIN_ENABLE_MASK);
+ reg_val |= ((pdata->torch_pin_en & 0x01) << TORCH_PIN_EN_SHIFT);
+ reg_val |= ((pdata->strobe_pin_en & 0x01) << STROBE_PIN_EN_SHIFT);
+ reg_val |= ((pdata->tx_pin_en & 0x01) << TX_PIN_EN_SHIFT);
+
+ ret = regmap_write(chip->regmap, REG_ENABLE, reg_val);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to write REG_ENABLE Regisgter\n");
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+/* chip control */
+static int lm3556_control(struct lm3556_chip_data *chip,
+ u8 brightness, enum lm3556_mode opmode)
+{
+ int ret;
+ struct lm3556_platform_data *pdata = chip->pdata;
+
+ ret = regmap_read(chip->regmap, REG_FLAG, &chip->last_flag);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read REG_FLAG Register\n");
+ goto out;
+ }
+
+ if (chip->last_flag)
+ dev_info(chip->dev, "Last FLAG is 0x%x\n", chip->last_flag);
+
+ /* brightness 0 means off-state */
+ if (!brightness)
+ opmode = MODES_STASNDBY;
+
+ switch (opmode) {
+ case MODES_TORCH:
+ ret = regmap_update_bits(chip->regmap, REG_I_CTRL,
+ TORCH_I_MASK << TORCH_I_SHIFT,
+ (brightness - 1) << TORCH_I_SHIFT);
+
+ if (pdata->torch_pin_en)
+ opmode |= (TORCH_PIN_EN_MASK << TORCH_PIN_EN_SHIFT);
+ break;
+
+ case MODES_FLASH:
+ ret = regmap_update_bits(chip->regmap, REG_I_CTRL,
+ FLASH_I_MASK << FLASH_I_SHIFT,
+ (brightness - 1) << FLASH_I_SHIFT);
+ break;
+
+ case MODES_INDIC:
+ ret = regmap_update_bits(chip->regmap, REG_I_CTRL,
+ TORCH_I_MASK << TORCH_I_SHIFT,
+ (brightness - 1) << TORCH_I_SHIFT);
+ break;
+
+ case MODES_STASNDBY:
+ if (pdata->torch_pin_en)
+ opmode |= (TORCH_PIN_EN_MASK << TORCH_PIN_EN_SHIFT);
+ break;
+
+ default:
+ return ret;
+ }
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to write REG_I_CTRL Register\n");
+ goto out;
+ }
+ ret = regmap_update_bits(chip->regmap, REG_ENABLE,
+ MODE_BITS_MASK << MODE_BITS_SHIFT,
+ opmode << MODE_BITS_SHIFT);
+
+out:
+ return ret;
+}
+
+/* torch */
+static void lm3556_torch_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct lm3556_chip_data *chip =
+ container_of(cdev, struct lm3556_chip_data, cdev_torch);
+
+ mutex_lock(&chip->lock);
+ lm3556_control(chip, brightness, MODES_TORCH);
+ mutex_unlock(&chip->lock);
+}
+
+/* flash */
+static void lm3556_strobe_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct lm3556_chip_data *chip =
+ container_of(cdev, struct lm3556_chip_data, cdev_flash);
+
+ mutex_lock(&chip->lock);
+ lm3556_control(chip, brightness, MODES_FLASH);
+ mutex_unlock(&chip->lock);
+}
+
+/* indicator */
+static void lm3556_indicator_brightness_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ struct lm3556_chip_data *chip =
+ container_of(cdev, struct lm3556_chip_data, cdev_indicator);
+
+ mutex_lock(&chip->lock);
+ lm3556_control(chip, brightness, MODES_INDIC);
+ mutex_unlock(&chip->lock);
+}
+
+/* indicator pattern */
+static ssize_t lm3556_indicator_pattern_store(struct device *dev,
+ struct device_attribute *devAttr,
+ const char *buf, size_t size)
+{
+ ssize_t ret;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3556_chip_data *chip =
+ container_of(led_cdev, struct lm3556_chip_data, cdev_indicator);
+ unsigned int state;
+
+ ret = kstrtouint(buf, 10, &state);
+ if (ret)
+ goto out;
+ if (state > INDIC_PATTERN_SIZE - 1)
+ state = INDIC_PATTERN_SIZE - 1;
+
+ ret = regmap_write(chip->regmap, REG_INDIC_BLINK,
+ indicator_pattern[state].blinking);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to write REG_ENABLE Regisgter\n");
+ goto out;
+ }
+
+ ret = regmap_write(chip->regmap, REG_INDIC_PERIOD,
+ indicator_pattern[state].period_cnt);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to write REG_ENABLE Regisgter\n");
+ goto out;
+ }
+
+ return size;
+out:
+ dev_err(chip->dev, "Indicator pattern doesn't saved\n");
+ return size;
+}
+
+static DEVICE_ATTR(pattern, 0666, NULL, lm3556_indicator_pattern_store);
+
+static const struct regmap_config lm3556_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = REG_MAX,
+};
+
+/* module initialize */
+static int __devinit lm3556_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lm3556_platform_data *pdata = client->dev.platform_data;
+ struct lm3556_chip_data *chip;
+
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "i2c functionality check fail.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "Needs Platform Data.\n");
+ return -ENODATA;
+ }
+
+ chip =
+ devm_kzalloc(&client->dev, sizeof(struct lm3556_chip_data),
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &client->dev;
+ chip->pdata = pdata;
+
+ chip->regmap = devm_regmap_init_i2c(client, &lm3556_regmap);
+ if (IS_ERR(chip->regmap)) {
+ err = PTR_ERR(chip->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ err);
+ return err;
+ }
+
+ mutex_init(&chip->lock);
+ i2c_set_clientdata(client, chip);
+
+ err = lm3556_chip_init(chip);
+ if (err < 0)
+ goto err_out;
+
+ /* flash */
+ chip->cdev_flash.name = "flash";
+ chip->cdev_flash.max_brightness = 16;
+ chip->cdev_flash.brightness_set = lm3556_strobe_brightness_set;
+ err = led_classdev_register((struct device *)
+ &client->dev, &chip->cdev_flash);
+ if (err < 0)
+ goto err_out;
+ /* torch */
+ chip->cdev_torch.name = "torch";
+ chip->cdev_torch.max_brightness = 8;
+ chip->cdev_torch.brightness_set = lm3556_torch_brightness_set;
+ err = led_classdev_register((struct device *)
+ &client->dev, &chip->cdev_torch);
+ if (err < 0)
+ goto err_create_torch_file;
+ /* indicator */
+ chip->cdev_indicator.name = "indicator";
+ chip->cdev_indicator.max_brightness = 8;
+ chip->cdev_indicator.brightness_set = lm3556_indicator_brightness_set;
+ err = led_classdev_register((struct device *)
+ &client->dev, &chip->cdev_indicator);
+ if (err < 0)
+ goto err_create_indicator_file;
+
+ err = device_create_file(chip->cdev_indicator.dev, &dev_attr_pattern);
+ if (err < 0)
+ goto err_create_pattern_file;
+
+ dev_info(&client->dev, "LM3556 is initialized\n");
+ return 0;
+
+err_create_pattern_file:
+ led_classdev_unregister(&chip->cdev_indicator);
+err_create_indicator_file:
+ led_classdev_unregister(&chip->cdev_torch);
+err_create_torch_file:
+ led_classdev_unregister(&chip->cdev_flash);
+err_out:
+ return err;
+}
+
+static int __devexit lm3556_remove(struct i2c_client *client)
+{
+ struct lm3556_chip_data *chip = i2c_get_clientdata(client);
+
+ device_remove_file(chip->cdev_indicator.dev, &dev_attr_pattern);
+ led_classdev_unregister(&chip->cdev_indicator);
+ led_classdev_unregister(&chip->cdev_torch);
+ led_classdev_unregister(&chip->cdev_flash);
+ regmap_write(chip->regmap, REG_ENABLE, 0);
+ return 0;
+}
+
+static const struct i2c_device_id lm3556_id[] = {
+ {LM3556_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, lm3556_id);
+
+static struct i2c_driver lm3556_i2c_driver = {
+ .driver = {
+ .name = LM3556_NAME,
+ .owner = THIS_MODULE,
+ .pm = NULL,
+ },
+ .probe = lm3556_probe,
+ .remove = __devexit_p(lm3556_remove),
+ .id_table = lm3556_id,
+};
+
+module_i2c_driver(lm3556_i2c_driver);
+
+MODULE_DESCRIPTION("Texas Instruments Flash Lighting driver for LM3556");
+MODULE_AUTHOR("Daniel Jeong <daniel.jeong@ti.com>");
+MODULE_AUTHOR("G.Shark Jeong <gshark.jeong@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index b8f9f0a5d431..c298f7d9f535 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -393,7 +393,8 @@ static int __devinit lp3944_probe(struct i2c_client *client,
return -ENODEV;
}
- data = kzalloc(sizeof(struct lp3944_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct lp3944_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -403,10 +404,8 @@ static int __devinit lp3944_probe(struct i2c_client *client,
mutex_init(&data->lock);
err = lp3944_configure(client, data, lp3944_pdata);
- if (err < 0) {
- kfree(data);
+ if (err < 0)
return err;
- }
dev_info(&client->dev, "lp3944 enabled\n");
return 0;
@@ -431,8 +430,6 @@ static int __devexit lp3944_remove(struct i2c_client *client)
break;
}
- kfree(data);
-
return 0;
}
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 23815624f35e..2064aefedc07 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -744,7 +744,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
int ret, i, led;
u8 buf;
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -755,8 +755,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
if (!pdata) {
dev_err(&client->dev, "no platform data\n");
- ret = -EINVAL;
- goto fail1;
+ return -EINVAL;
}
mutex_init(&chip->lock);
@@ -766,7 +765,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
if (pdata->setup_resources) {
ret = pdata->setup_resources();
if (ret < 0)
- goto fail1;
+ return ret;
}
if (pdata->enable) {
@@ -807,7 +806,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
ret = lp5521_configure(client);
if (ret < 0) {
dev_err(&client->dev, "error configuring chip\n");
- goto fail2;
+ goto fail1;
}
/* Initialize leds */
@@ -822,7 +821,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
ret = lp5521_init_led(&chip->leds[led], client, i, pdata);
if (ret) {
dev_err(&client->dev, "error initializing leds\n");
- goto fail3;
+ goto fail2;
}
chip->num_leds++;
@@ -840,21 +839,19 @@ static int __devinit lp5521_probe(struct i2c_client *client,
ret = lp5521_register_sysfs(client);
if (ret) {
dev_err(&client->dev, "registering sysfs failed\n");
- goto fail3;
+ goto fail2;
}
return ret;
-fail3:
+fail2:
for (i = 0; i < chip->num_leds; i++) {
led_classdev_unregister(&chip->leds[i].cdev);
cancel_work_sync(&chip->leds[i].brightness_work);
}
-fail2:
+fail1:
if (pdata->enable)
pdata->enable(0);
if (pdata->release_resources)
pdata->release_resources();
-fail1:
- kfree(chip);
return ret;
}
@@ -875,7 +872,6 @@ static int __devexit lp5521_remove(struct i2c_client *client)
chip->pdata->enable(0);
if (chip->pdata->release_resources)
chip->pdata->release_resources();
- kfree(chip);
return 0;
}
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 857a3e15f2dd..fbc12acada95 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -877,7 +877,7 @@ static int __devinit lp5523_probe(struct i2c_client *client,
struct lp5523_platform_data *pdata;
int ret, i, led;
- chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -888,8 +888,7 @@ static int __devinit lp5523_probe(struct i2c_client *client,
if (!pdata) {
dev_err(&client->dev, "no platform data\n");
- ret = -EINVAL;
- goto fail1;
+ return -EINVAL;
}
mutex_init(&chip->lock);
@@ -899,7 +898,7 @@ static int __devinit lp5523_probe(struct i2c_client *client,
if (pdata->setup_resources) {
ret = pdata->setup_resources();
if (ret < 0)
- goto fail1;
+ return ret;
}
if (pdata->enable) {
@@ -916,7 +915,7 @@ static int __devinit lp5523_probe(struct i2c_client *client,
*/
ret = lp5523_detect(client);
if (ret)
- goto fail2;
+ goto fail1;
dev_info(&client->dev, "LP5523 Programmable led chip found\n");
@@ -925,13 +924,13 @@ static int __devinit lp5523_probe(struct i2c_client *client,
ret = lp5523_init_engine(&chip->engines[i], i + 1);
if (ret) {
dev_err(&client->dev, "error initializing engine\n");
- goto fail2;
+ goto fail1;
}
}
ret = lp5523_configure(client);
if (ret < 0) {
dev_err(&client->dev, "error configuring chip\n");
- goto fail2;
+ goto fail1;
}
/* Initialize leds */
@@ -943,10 +942,13 @@ static int __devinit lp5523_probe(struct i2c_client *client,
if (pdata->led_config[i].led_current == 0)
continue;
+ INIT_WORK(&chip->leds[led].brightness_work,
+ lp5523_led_brightness_work);
+
ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata);
if (ret) {
dev_err(&client->dev, "error initializing leds\n");
- goto fail3;
+ goto fail2;
}
chip->num_leds++;
@@ -956,30 +958,25 @@ static int __devinit lp5523_probe(struct i2c_client *client,
LP5523_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
chip->leds[led].led_current);
- INIT_WORK(&(chip->leds[led].brightness_work),
- lp5523_led_brightness_work);
-
led++;
}
ret = lp5523_register_sysfs(client);
if (ret) {
dev_err(&client->dev, "registering sysfs failed\n");
- goto fail3;
+ goto fail2;
}
return ret;
-fail3:
+fail2:
for (i = 0; i < chip->num_leds; i++) {
led_classdev_unregister(&chip->leds[i].cdev);
cancel_work_sync(&chip->leds[i].brightness_work);
}
-fail2:
+fail1:
if (pdata->enable)
pdata->enable(0);
if (pdata->release_resources)
pdata->release_resources();
-fail1:
- kfree(chip);
return ret;
}
@@ -999,7 +996,6 @@ static int lp5523_remove(struct i2c_client *client)
chip->pdata->enable(0);
if (chip->pdata->release_resources)
chip->pdata->release_resources();
- kfree(chip);
return 0;
}
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
new file mode 100644
index 000000000000..0ade6ebfc914
--- /dev/null
+++ b/drivers/leds/leds-lp8788.c
@@ -0,0 +1,193 @@
+/*
+ * TI LP8788 MFD - keyled driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/mutex.h>
+#include <linux/mfd/lp8788.h>
+#include <linux/mfd/lp8788-isink.h>
+
+#define MAX_BRIGHTNESS LP8788_ISINK_MAX_PWM
+#define DEFAULT_LED_NAME "keyboard-backlight"
+
+struct lp8788_led {
+ struct lp8788 *lp;
+ struct mutex lock;
+ struct work_struct work;
+ struct led_classdev led_dev;
+ enum lp8788_isink_number isink_num;
+ enum led_brightness brightness;
+ int on;
+};
+
+struct lp8788_led_config {
+ enum lp8788_isink_scale scale;
+ enum lp8788_isink_number num;
+ int iout;
+};
+
+static struct lp8788_led_config default_led_config = {
+ .scale = LP8788_ISINK_SCALE_100mA,
+ .num = LP8788_ISINK_3,
+ .iout = 0,
+};
+
+static int lp8788_led_init_device(struct lp8788_led *led,
+ struct lp8788_led_platform_data *pdata)
+{
+ struct lp8788_led_config *cfg = &default_led_config;
+ u8 addr, mask, val;
+ int ret;
+
+ if (pdata) {
+ cfg->scale = pdata->scale;
+ cfg->num = pdata->num;
+ cfg->iout = pdata->iout_code;
+ }
+
+ led->isink_num = cfg->num;
+
+ /* scale configuration */
+ addr = LP8788_ISINK_CTRL;
+ mask = 1 << (cfg->num + LP8788_ISINK_SCALE_OFFSET);
+ val = cfg->scale << (cfg->num + LP8788_ISINK_SCALE_OFFSET);
+ ret = lp8788_update_bits(led->lp, addr, mask, val);
+ if (ret)
+ return ret;
+
+ /* current configuration */
+ addr = lp8788_iout_addr[cfg->num];
+ mask = lp8788_iout_mask[cfg->num];
+ val = cfg->iout;
+
+ return lp8788_update_bits(led->lp, addr, mask, val);
+}
+
+static void lp8788_led_enable(struct lp8788_led *led,
+ enum lp8788_isink_number num, int on)
+{
+ u8 mask = 1 << num;
+ u8 val = on << num;
+
+ if (lp8788_update_bits(led->lp, LP8788_ISINK_CTRL, mask, val))
+ return;
+
+ led->on = on;
+}
+
+static void lp8788_led_work(struct work_struct *work)
+{
+ struct lp8788_led *led = container_of(work, struct lp8788_led, work);
+ enum lp8788_isink_number num = led->isink_num;
+ int enable;
+ u8 val = led->brightness;
+
+ mutex_lock(&led->lock);
+
+ switch (num) {
+ case LP8788_ISINK_1:
+ case LP8788_ISINK_2:
+ case LP8788_ISINK_3:
+ lp8788_write_byte(led->lp, lp8788_pwm_addr[num], val);
+ break;
+ default:
+ mutex_unlock(&led->lock);
+ return;
+ }
+
+ enable = (val > 0) ? 1 : 0;
+ if (enable != led->on)
+ lp8788_led_enable(led, num, enable);
+
+ mutex_unlock(&led->lock);
+}
+
+static void lp8788_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brt_val)
+{
+ struct lp8788_led *led =
+ container_of(led_cdev, struct lp8788_led, led_dev);
+
+ led->brightness = brt_val;
+ schedule_work(&led->work);
+}
+
+static __devinit int lp8788_led_probe(struct platform_device *pdev)
+{
+ struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+ struct lp8788_led_platform_data *led_pdata;
+ struct lp8788_led *led;
+ int ret;
+
+ led = devm_kzalloc(lp->dev, sizeof(struct lp8788_led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->lp = lp;
+ led->led_dev.max_brightness = MAX_BRIGHTNESS;
+ led->led_dev.brightness_set = lp8788_brightness_set;
+
+ led_pdata = lp->pdata ? lp->pdata->led_pdata : NULL;
+
+ if (!led_pdata || !led_pdata->name)
+ led->led_dev.name = DEFAULT_LED_NAME;
+ else
+ led->led_dev.name = led_pdata->name;
+
+ mutex_init(&led->lock);
+ INIT_WORK(&led->work, lp8788_led_work);
+
+ platform_set_drvdata(pdev, led);
+
+ ret = lp8788_led_init_device(led, led_pdata);
+ if (ret) {
+ dev_err(lp->dev, "led init device err: %d\n", ret);
+ return ret;
+ }
+
+ ret = led_classdev_register(lp->dev, &led->led_dev);
+ if (ret) {
+ dev_err(lp->dev, "led register err: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devexit lp8788_led_remove(struct platform_device *pdev)
+{
+ struct lp8788_led *led = platform_get_drvdata(pdev);
+
+ led_classdev_unregister(&led->led_dev);
+ flush_work_sync(&led->work);
+
+ return 0;
+}
+
+static struct platform_driver lp8788_led_driver = {
+ .probe = lp8788_led_probe,
+ .remove = __devexit_p(lp8788_led_remove),
+ .driver = {
+ .name = LP8788_DEV_KEYLED,
+ .owner = THIS_MODULE,
+ },
+};
+module_platform_driver(lp8788_led_driver);
+
+MODULE_DESCRIPTION("Texas Instruments LP8788 Keyboard LED Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lp8788-keyled");
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index e311a96c4469..09a732217f6d 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -149,8 +149,9 @@ static int __devinit lt3593_led_probe(struct platform_device *pdev)
if (!pdata)
return -EBUSY;
- leds_data = kzalloc(sizeof(struct lt3593_led_data) * pdata->num_leds,
- GFP_KERNEL);
+ leds_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct lt3593_led_data) * pdata->num_leds,
+ GFP_KERNEL);
if (!leds_data)
return -ENOMEM;
@@ -169,8 +170,6 @@ err:
for (i = i - 1; i >= 0; i--)
delete_lt3593_led(&leds_data[i]);
- kfree(leds_data);
-
return ret;
}
@@ -185,8 +184,6 @@ static int __devexit lt3593_led_remove(struct platform_device *pdev)
for (i = 0; i < pdata->num_leds; i++)
delete_lt3593_led(&leds_data[i]);
- kfree(leds_data);
-
return 0;
}
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
index f4c0e37fad1e..569e36de37df 100644
--- a/drivers/leds/leds-max8997.c
+++ b/drivers/leds/leds-max8997.c
@@ -49,71 +49,37 @@ struct max8997_led {
struct mutex mutex;
};
-static void max8997_led_clear_mode(struct max8997_led *led,
- enum max8997_led_mode mode)
-{
- struct i2c_client *client = led->iodev->i2c;
- u8 val = 0, mask = 0;
- int ret;
-
- switch (mode) {
- case MAX8997_FLASH_MODE:
- mask = led->id ?
- MAX8997_LED1_FLASH_MASK : MAX8997_LED0_FLASH_MASK;
- break;
- case MAX8997_MOVIE_MODE:
- mask = led->id ?
- MAX8997_LED1_MOVIE_MASK : MAX8997_LED0_MOVIE_MASK;
- break;
- case MAX8997_FLASH_PIN_CONTROL_MODE:
- mask = led->id ?
- MAX8997_LED1_FLASH_PIN_MASK : MAX8997_LED0_FLASH_PIN_MASK;
- break;
- case MAX8997_MOVIE_PIN_CONTROL_MODE:
- mask = led->id ?
- MAX8997_LED1_MOVIE_PIN_MASK : MAX8997_LED0_MOVIE_PIN_MASK;
- break;
- default:
- break;
- }
-
- if (mask) {
- ret = max8997_update_reg(client,
- MAX8997_REG_LEN_CNTL, val, mask);
- if (ret)
- dev_err(led->iodev->dev,
- "failed to update register(%d)\n", ret);
- }
-}
-
static void max8997_led_set_mode(struct max8997_led *led,
enum max8997_led_mode mode)
{
int ret;
struct i2c_client *client = led->iodev->i2c;
- u8 mask = 0;
-
- /* First, clear the previous mode */
- max8997_led_clear_mode(led, led->led_mode);
+ u8 mask = 0, val;
switch (mode) {
case MAX8997_FLASH_MODE:
- mask = led->id ?
+ mask = MAX8997_LED1_FLASH_MASK | MAX8997_LED0_FLASH_MASK;
+ val = led->id ?
MAX8997_LED1_FLASH_MASK : MAX8997_LED0_FLASH_MASK;
led->cdev.max_brightness = MAX8997_LED_FLASH_MAX_BRIGHTNESS;
break;
case MAX8997_MOVIE_MODE:
- mask = led->id ?
+ mask = MAX8997_LED1_MOVIE_MASK | MAX8997_LED0_MOVIE_MASK;
+ val = led->id ?
MAX8997_LED1_MOVIE_MASK : MAX8997_LED0_MOVIE_MASK;
led->cdev.max_brightness = MAX8997_LED_MOVIE_MAX_BRIGHTNESS;
break;
case MAX8997_FLASH_PIN_CONTROL_MODE:
- mask = led->id ?
+ mask = MAX8997_LED1_FLASH_PIN_MASK |
+ MAX8997_LED0_FLASH_PIN_MASK;
+ val = led->id ?
MAX8997_LED1_FLASH_PIN_MASK : MAX8997_LED0_FLASH_PIN_MASK;
led->cdev.max_brightness = MAX8997_LED_FLASH_MAX_BRIGHTNESS;
break;
case MAX8997_MOVIE_PIN_CONTROL_MODE:
- mask = led->id ?
+ mask = MAX8997_LED1_MOVIE_PIN_MASK |
+ MAX8997_LED0_MOVIE_PIN_MASK;
+ val = led->id ?
MAX8997_LED1_MOVIE_PIN_MASK : MAX8997_LED0_MOVIE_PIN_MASK;
led->cdev.max_brightness = MAX8997_LED_MOVIE_MAX_BRIGHTNESS;
break;
@@ -123,8 +89,8 @@ static void max8997_led_set_mode(struct max8997_led *led,
}
if (mask) {
- ret = max8997_update_reg(client,
- MAX8997_REG_LEN_CNTL, mask, mask);
+ ret = max8997_update_reg(client, MAX8997_REG_LEN_CNTL, val,
+ mask);
if (ret)
dev_err(led->iodev->dev,
"failed to update register(%d)\n", ret);
@@ -276,11 +242,9 @@ static int __devinit max8997_led_probe(struct platform_device *pdev)
return -ENODEV;
}
- led = kzalloc(sizeof(*led), GFP_KERNEL);
- if (led == NULL) {
- ret = -ENOMEM;
- goto err_mem;
- }
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+ if (led == NULL)
+ return -ENOMEM;
led->id = pdev->id;
snprintf(name, sizeof(name), "max8997-led%d", pdev->id);
@@ -315,23 +279,17 @@ static int __devinit max8997_led_probe(struct platform_device *pdev)
ret = led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
- goto err_led;
+ return ret;
ret = device_create_file(led->cdev.dev, &dev_attr_mode);
if (ret != 0) {
dev_err(&pdev->dev,
"failed to create file: %d\n", ret);
- goto err_file;
+ led_classdev_unregister(&led->cdev);
+ return ret;
}
return 0;
-
-err_file:
- led_classdev_unregister(&led->cdev);
-err_led:
- kfree(led);
-err_mem:
- return ret;
}
static int __devexit max8997_led_remove(struct platform_device *pdev)
@@ -340,7 +298,6 @@ static int __devexit max8997_led_remove(struct platform_device *pdev)
device_remove_file(led->cdev.dev, &dev_attr_mode);
led_classdev_unregister(&led->cdev);
- kfree(led);
return 0;
}
@@ -354,17 +311,7 @@ static struct platform_driver max8997_led_driver = {
.remove = __devexit_p(max8997_led_remove),
};
-static int __init max8997_led_init(void)
-{
- return platform_driver_register(&max8997_led_driver);
-}
-module_init(max8997_led_init);
-
-static void __exit max8997_led_exit(void)
-{
- platform_driver_unregister(&max8997_led_driver);
-}
-module_exit(max8997_led_exit);
+module_platform_driver(max8997_led_driver);
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
MODULE_DESCRIPTION("MAX8997 LED driver");
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index 4cc6a2e3df34..2a5d43400677 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -280,7 +280,8 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
return -EINVAL;
}
- led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
+ led = devm_kzalloc(&pdev->dev, pdata->num_leds * sizeof(*led),
+ GFP_KERNEL);
if (led == NULL) {
dev_err(&pdev->dev, "failed to alloc memory\n");
return -ENOMEM;
@@ -289,7 +290,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
ret = mc13783_leds_prepare(pdev);
if (ret) {
dev_err(&pdev->dev, "unable to init led driver\n");
- goto err_free;
+ return ret;
}
for (i = 0; i < pdata->num_leds; i++) {
@@ -344,8 +345,6 @@ err_register:
cancel_work_sync(&led[i].work);
}
-err_free:
- kfree(led);
return ret;
}
@@ -372,7 +371,7 @@ static int __devexit mc13783_led_remove(struct platform_device *pdev)
mc13xxx_unlock(dev);
- kfree(led);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 73973fdbd8be..e37618e363cf 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -362,14 +362,14 @@ static int __devinit netxbig_led_probe(struct platform_device *pdev)
if (!pdata)
return -EINVAL;
- leds_data = kzalloc(sizeof(struct netxbig_led_data) * pdata->num_leds,
- GFP_KERNEL);
+ leds_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct netxbig_led_data) * pdata->num_leds, GFP_KERNEL);
if (!leds_data)
return -ENOMEM;
ret = gpio_ext_init(pdata->gpio_ext);
if (ret < 0)
- goto err_free_data;
+ return ret;
for (i = 0; i < pdata->num_leds; i++) {
ret = create_netxbig_led(pdev, &leds_data[i], &pdata->leds[i]);
@@ -386,9 +386,6 @@ err_free_leds:
delete_netxbig_led(&leds_data[i]);
gpio_ext_free(pdata->gpio_ext);
-err_free_data:
- kfree(leds_data);
-
return ret;
}
@@ -404,7 +401,6 @@ static int __devexit netxbig_led_remove(struct platform_device *pdev)
delete_netxbig_led(&leds_data[i]);
gpio_ext_free(pdata->gpio_ext);
- kfree(leds_data);
return 0;
}
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 01cf89ec6944..10528dafb043 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -273,29 +273,23 @@ static int __devinit ns2_led_probe(struct platform_device *pdev)
if (!pdata)
return -EINVAL;
- leds_data = kzalloc(sizeof(struct ns2_led_data) *
+ leds_data = devm_kzalloc(&pdev->dev, sizeof(struct ns2_led_data) *
pdata->num_leds, GFP_KERNEL);
if (!leds_data)
return -ENOMEM;
for (i = 0; i < pdata->num_leds; i++) {
ret = create_ns2_led(pdev, &leds_data[i], &pdata->leds[i]);
- if (ret < 0)
- goto err;
-
+ if (ret < 0) {
+ for (i = i - 1; i >= 0; i--)
+ delete_ns2_led(&leds_data[i]);
+ return ret;
+ }
}
platform_set_drvdata(pdev, leds_data);
return 0;
-
-err:
- for (i = i - 1; i >= 0; i--)
- delete_ns2_led(&leds_data[i]);
-
- kfree(leds_data);
-
- return ret;
}
static int __devexit ns2_led_remove(struct platform_device *pdev)
@@ -309,7 +303,6 @@ static int __devexit ns2_led_remove(struct platform_device *pdev)
for (i = 0; i < pdata->num_leds; i++)
delete_ns2_led(&leds_data[i]);
- kfree(leds_data);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index ceccab44b5b8..cee8a5b483ac 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -449,7 +449,6 @@ static int pca9532_probe(struct i2c_client *client,
{
struct pca9532_data *data = i2c_get_clientdata(client);
struct pca9532_platform_data *pca9532_pdata = client->dev.platform_data;
- int err;
if (!pca9532_pdata)
return -EIO;
@@ -458,7 +457,7 @@ static int pca9532_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -469,11 +468,7 @@ static int pca9532_probe(struct i2c_client *client,
data->client = client;
mutex_init(&data->update_lock);
- err = pca9532_configure(client, data, pca9532_pdata);
- if (err)
- kfree(data);
-
- return err;
+ return pca9532_configure(client, data, pca9532_pdata);
}
static int pca9532_remove(struct i2c_client *client)
@@ -485,7 +480,6 @@ static int pca9532_remove(struct i2c_client *client)
if (err)
return err;
- kfree(data);
return 0;
}
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 5f462dbf0dbb..aef3cf0432fe 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -293,15 +293,14 @@ static int __devinit pca955x_probe(struct i2c_client *client,
}
}
- pca955x = kzalloc(sizeof(*pca955x), GFP_KERNEL);
+ pca955x = devm_kzalloc(&client->dev, sizeof(*pca955x), GFP_KERNEL);
if (!pca955x)
return -ENOMEM;
- pca955x->leds = kzalloc(sizeof(*pca955x_led) * chip->bits, GFP_KERNEL);
- if (!pca955x->leds) {
- err = -ENOMEM;
- goto exit_nomem;
- }
+ pca955x->leds = devm_kzalloc(&client->dev,
+ sizeof(*pca955x_led) * chip->bits, GFP_KERNEL);
+ if (!pca955x->leds)
+ return -ENOMEM;
i2c_set_clientdata(client, pca955x);
@@ -361,10 +360,6 @@ exit:
cancel_work_sync(&pca955x->leds[i].work);
}
- kfree(pca955x->leds);
-exit_nomem:
- kfree(pca955x);
-
return err;
}
@@ -378,9 +373,6 @@ static int __devexit pca955x_remove(struct i2c_client *client)
cancel_work_sync(&pca955x->leds[i].work);
}
- kfree(pca955x->leds);
- kfree(pca955x);
-
return 0;
}
diff --git a/drivers/leds/leds-pca9633.c b/drivers/leds/leds-pca9633.c
index d8926fd031aa..edcd706c5631 100644
--- a/drivers/leds/leds-pca9633.c
+++ b/drivers/leds/leds-pca9633.c
@@ -108,7 +108,7 @@ static int __devinit pca9633_probe(struct i2c_client *client,
}
}
- pca9633 = kcalloc(4, sizeof(*pca9633), GFP_KERNEL);
+ pca9633 = devm_kzalloc(&client->dev, 4 * sizeof(*pca9633), GFP_KERNEL);
if (!pca9633)
return -ENOMEM;
@@ -156,8 +156,6 @@ exit:
cancel_work_sync(&pca9633[i].work);
}
- kfree(pca9633);
-
return err;
}
@@ -171,8 +169,6 @@ static int __devexit pca9633_remove(struct i2c_client *client)
cancel_work_sync(&pca9633[i].work);
}
- kfree(pca9633);
-
return 0;
}
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 3ed92f34bd44..f2e44c719437 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -57,7 +57,8 @@ static int led_pwm_probe(struct platform_device *pdev)
if (!pdata)
return -EBUSY;
- leds_data = kzalloc(sizeof(struct led_pwm_data) * pdata->num_leds,
+ leds_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct led_pwm_data) * pdata->num_leds,
GFP_KERNEL);
if (!leds_data)
return -ENOMEM;
@@ -103,8 +104,6 @@ err:
}
}
- kfree(leds_data);
-
return ret;
}
@@ -121,8 +120,6 @@ static int __devexit led_pwm_remove(struct platform_device *pdev)
pwm_free(leds_data[i].pwm);
}
- kfree(leds_data);
-
return 0;
}
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
index df7e963bddd3..25d382d60fa9 100644
--- a/drivers/leds/leds-regulator.c
+++ b/drivers/leds/leds-regulator.c
@@ -158,7 +158,7 @@ static int __devinit regulator_led_probe(struct platform_device *pdev)
return PTR_ERR(vcc);
}
- led = kzalloc(sizeof(*led), GFP_KERNEL);
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
if (led == NULL) {
ret = -ENOMEM;
goto err_vcc;
@@ -169,7 +169,7 @@ static int __devinit regulator_led_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Invalid default brightness %d\n",
pdata->brightness);
ret = -EINVAL;
- goto err_led;
+ goto err_vcc;
}
led->value = pdata->brightness;
@@ -190,7 +190,7 @@ static int __devinit regulator_led_probe(struct platform_device *pdev)
ret = led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0) {
cancel_work_sync(&led->work);
- goto err_led;
+ goto err_vcc;
}
/* to expose the default value to userspace */
@@ -201,8 +201,6 @@ static int __devinit regulator_led_probe(struct platform_device *pdev)
return 0;
-err_led:
- kfree(led);
err_vcc:
regulator_put(vcc);
return ret;
@@ -216,7 +214,6 @@ static int __devexit regulator_led_remove(struct platform_device *pdev)
cancel_work_sync(&led->work);
regulator_led_disable(led);
regulator_put(led->vcc);
- kfree(led);
return 0;
}
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index 32fe337d5c68..771ea067e680 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -243,31 +243,30 @@ static int __devinit r_tpu_probe(struct platform_device *pdev)
struct led_renesas_tpu_config *cfg = pdev->dev.platform_data;
struct r_tpu_priv *p;
struct resource *res;
- int ret = -ENXIO;
+ int ret;
if (!cfg) {
dev_err(&pdev->dev, "missing platform data\n");
- goto err0;
+ return -ENODEV;
}
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (p == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
- ret = -ENOMEM;
- goto err0;
+ return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get I/O memory\n");
- goto err1;
+ return -ENXIO;
}
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
dev_err(&pdev->dev, "failed to remap I/O memory\n");
- goto err1;
+ return -ENXIO;
}
/* get hold of clock */
@@ -275,7 +274,7 @@ static int __devinit r_tpu_probe(struct platform_device *pdev)
if (IS_ERR(p->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
- goto err2;
+ goto err0;
}
p->pdev = pdev;
@@ -294,7 +293,7 @@ static int __devinit r_tpu_probe(struct platform_device *pdev)
p->ldev.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&pdev->dev, &p->ldev);
if (ret < 0)
- goto err3;
+ goto err1;
/* max_brightness may be updated by the LED core code */
p->min_rate = p->ldev.max_brightness * p->refresh_rate;
@@ -302,14 +301,11 @@ static int __devinit r_tpu_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
return 0;
- err3:
+ err1:
r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
clk_put(p->clk);
- err2:
- iounmap(p->mapbase);
- err1:
- kfree(p);
err0:
+ iounmap(p->mapbase);
return ret;
}
@@ -327,7 +323,6 @@ static int __devexit r_tpu_remove(struct platform_device *pdev)
clk_put(p->clk);
iounmap(p->mapbase);
- kfree(p);
return 0;
}
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index bd0a5ed49c42..942f0ea18178 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -45,17 +45,19 @@ static void s3c24xx_led_set(struct led_classdev *led_cdev,
{
struct s3c24xx_gpio_led *led = to_gpio(led_cdev);
struct s3c24xx_led_platdata *pd = led->pdata;
+ int state = (value ? 1 : 0) ^ (pd->flags & S3C24XX_LEDF_ACTLOW);
/* there will be a short delay between setting the output and
* going from output to input when using tristate. */
- s3c2410_gpio_setpin(pd->gpio, (value ? 1 : 0) ^
- (pd->flags & S3C24XX_LEDF_ACTLOW));
-
- if (pd->flags & S3C24XX_LEDF_TRISTATE)
- s3c2410_gpio_cfgpin(pd->gpio,
- value ? S3C2410_GPIO_OUTPUT : S3C2410_GPIO_INPUT);
+ gpio_set_value(pd->gpio, state);
+ if (pd->flags & S3C24XX_LEDF_TRISTATE) {
+ if (value)
+ gpio_direction_output(pd->gpio, state);
+ else
+ gpio_direction_input(pd->gpio);
+ }
}
static int s3c24xx_led_remove(struct platform_device *dev)
@@ -63,7 +65,6 @@ static int s3c24xx_led_remove(struct platform_device *dev)
struct s3c24xx_gpio_led *led = pdev_to_gpio(dev);
led_classdev_unregister(&led->cdev);
- kfree(led);
return 0;
}
@@ -74,7 +75,8 @@ static int s3c24xx_led_probe(struct platform_device *dev)
struct s3c24xx_gpio_led *led;
int ret;
- led = kzalloc(sizeof(struct s3c24xx_gpio_led), GFP_KERNEL);
+ led = devm_kzalloc(&dev->dev, sizeof(struct s3c24xx_gpio_led),
+ GFP_KERNEL);
if (led == NULL) {
dev_err(&dev->dev, "No memory for device\n");
return -ENOMEM;
@@ -89,27 +91,27 @@ static int s3c24xx_led_probe(struct platform_device *dev)
led->pdata = pdata;
+ ret = devm_gpio_request(&dev->dev, pdata->gpio, "S3C24XX_LED");
+ if (ret < 0)
+ return ret;
+
/* no point in having a pull-up if we are always driving */
- if (pdata->flags & S3C24XX_LEDF_TRISTATE) {
- s3c2410_gpio_setpin(pdata->gpio, 0);
- s3c2410_gpio_cfgpin(pdata->gpio, S3C2410_GPIO_INPUT);
- } else {
- s3c2410_gpio_pullup(pdata->gpio, 0);
- s3c2410_gpio_setpin(pdata->gpio, 0);
- s3c2410_gpio_cfgpin(pdata->gpio, S3C2410_GPIO_OUTPUT);
- }
+ s3c_gpio_setpull(pdata->gpio, S3C_GPIO_PULL_NONE);
+
+ if (pdata->flags & S3C24XX_LEDF_TRISTATE)
+ gpio_direction_input(pdata->gpio);
+ else
+ gpio_direction_output(pdata->gpio,
+ pdata->flags & S3C24XX_LEDF_ACTLOW ? 1 : 0);
/* register our new led device */
ret = led_classdev_register(&dev->dev, &led->cdev);
- if (ret < 0) {
+ if (ret < 0)
dev_err(&dev->dev, "led_classdev_register failed\n");
- kfree(led);
- return ret;
- }
- return 0;
+ return ret;
}
static struct platform_driver s3c24xx_led_driver = {
diff --git a/drivers/leds/leds-sunfire.c b/drivers/leds/leds-sunfire.c
index 1757396b20b3..134d9a4b34f1 100644
--- a/drivers/leds/leds-sunfire.c
+++ b/drivers/leds/leds-sunfire.c
@@ -132,15 +132,13 @@ static int __devinit sunfire_led_generic_probe(struct platform_device *pdev,
if (pdev->num_resources != 1) {
printk(KERN_ERR PFX "Wrong number of resources %d, should be 1\n",
pdev->num_resources);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
if (!p) {
printk(KERN_ERR PFX "Could not allocate struct sunfire_drvdata\n");
- err = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
for (i = 0; i < NUM_LEDS_PER_BOARD; i++) {
@@ -156,20 +154,15 @@ static int __devinit sunfire_led_generic_probe(struct platform_device *pdev,
if (err) {
printk(KERN_ERR PFX "Could not register %s LED\n",
lp->name);
- goto out_unregister_led_cdevs;
+ for (i--; i >= 0; i--)
+ led_classdev_unregister(&p->leds[i].led_cdev);
+ return err;
}
}
dev_set_drvdata(&pdev->dev, p);
return 0;
-
-out_unregister_led_cdevs:
- for (i--; i >= 0; i--)
- led_classdev_unregister(&p->leds[i].led_cdev);
- kfree(p);
-out:
- return err;
}
static int __devexit sunfire_led_generic_remove(struct platform_device *pdev)
@@ -180,8 +173,6 @@ static int __devexit sunfire_led_generic_remove(struct platform_device *pdev)
for (i = 0; i < NUM_LEDS_PER_BOARD; i++)
led_classdev_unregister(&p->leds[i].led_cdev);
- kfree(p);
-
return 0;
}
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 6c1c14f31635..dabcf7ae8d0f 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -687,7 +687,7 @@ static int __devinit tca6507_probe(struct i2c_client *client,
NUM_LEDS);
return -ENODEV;
}
- tca = kzalloc(sizeof(*tca), GFP_KERNEL);
+ tca = devm_kzalloc(&client->dev, sizeof(*tca), GFP_KERNEL);
if (!tca)
return -ENOMEM;
@@ -727,7 +727,6 @@ exit:
if (tca->leds[i].led_cdev.name)
led_classdev_unregister(&tca->leds[i].led_cdev);
}
- kfree(tca);
return err;
}
@@ -743,7 +742,6 @@ static int __devexit tca6507_remove(struct i2c_client *client)
}
tca6507_remove_gpio(tca);
cancel_work_sync(&tca->work);
- kfree(tca);
return 0;
}
@@ -758,18 +756,7 @@ static struct i2c_driver tca6507_driver = {
.id_table = tca6507_id,
};
-static int __init tca6507_leds_init(void)
-{
- return i2c_add_driver(&tca6507_driver);
-}
-
-static void __exit tca6507_leds_exit(void)
-{
- i2c_del_driver(&tca6507_driver);
-}
-
-module_init(tca6507_leds_init);
-module_exit(tca6507_leds_exit);
+module_i2c_driver(tca6507_driver);
MODULE_AUTHOR("NeilBrown <neilb@suse.de>");
MODULE_DESCRIPTION("TCA6507 LED/GPO driver");
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index e77c7f8dcdd4..d02acd496126 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -17,7 +17,7 @@
#include <linux/rwsem.h>
#include <linux/leds.h>
-static inline void led_set_brightness(struct led_classdev *led_cdev,
+static inline void __led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
if (value > led_cdev->max_brightness)
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index e2726867c5d4..b941685f2227 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -46,9 +46,9 @@ static int fb_notifier_callback(struct notifier_block *p,
if ((n->old_status == UNBLANK) ^ n->invert) {
n->brightness = led->brightness;
- led_set_brightness(led, LED_OFF);
+ __led_set_brightness(led, LED_OFF);
} else {
- led_set_brightness(led, n->brightness);
+ __led_set_brightness(led, n->brightness);
}
n->old_status = new_status;
@@ -87,9 +87,9 @@ static ssize_t bl_trig_invert_store(struct device *dev,
/* After inverting, we need to update the LED. */
if ((n->old_status == BLANK) ^ n->invert)
- led_set_brightness(led, LED_OFF);
+ __led_set_brightness(led, LED_OFF);
else
- led_set_brightness(led, n->brightness);
+ __led_set_brightness(led, n->brightness);
return num;
}
diff --git a/drivers/leds/ledtrig-default-on.c b/drivers/leds/ledtrig-default-on.c
index a4ef54b9d508..eac1f1b1adac 100644
--- a/drivers/leds/ledtrig-default-on.c
+++ b/drivers/leds/ledtrig-default-on.c
@@ -19,7 +19,7 @@
static void defon_trig_activate(struct led_classdev *led_cdev)
{
- led_set_brightness(led_cdev, led_cdev->max_brightness);
+ __led_set_brightness(led_cdev, led_cdev->max_brightness);
}
static struct led_trigger defon_led_trigger = {
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index f057c101b896..ba215dc42f98 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -54,12 +54,12 @@ static void gpio_trig_work(struct work_struct *work)
if (tmp) {
if (gpio_data->desired_brightness)
- led_set_brightness(gpio_data->led,
+ __led_set_brightness(gpio_data->led,
gpio_data->desired_brightness);
else
- led_set_brightness(gpio_data->led, LED_FULL);
+ __led_set_brightness(gpio_data->led, LED_FULL);
} else {
- led_set_brightness(gpio_data->led, LED_OFF);
+ __led_set_brightness(gpio_data->led, LED_OFF);
}
}
diff --git a/drivers/leds/ledtrig-heartbeat.c b/drivers/leds/ledtrig-heartbeat.c
index a019fbb70880..1edc7463ce83 100644
--- a/drivers/leds/ledtrig-heartbeat.c
+++ b/drivers/leds/ledtrig-heartbeat.c
@@ -74,7 +74,7 @@ static void led_heartbeat_function(unsigned long data)
break;
}
- led_set_brightness(led_cdev, brightness);
+ __led_set_brightness(led_cdev, brightness);
mod_timer(&heartbeat_data->timer, jiffies + delay);
}
diff --git a/drivers/leds/ledtrig-ide-disk.c b/drivers/leds/ledtrig-ide-disk.c
index ec099fcbcb00..2cd7c0cf5924 100644
--- a/drivers/leds/ledtrig-ide-disk.c
+++ b/drivers/leds/ledtrig-ide-disk.c
@@ -12,39 +12,22 @@
*/
#include <linux/module.h>
-#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/timer.h>
#include <linux/leds.h>
-static void ledtrig_ide_timerfunc(unsigned long data);
+#define BLINK_DELAY 30
DEFINE_LED_TRIGGER(ledtrig_ide);
-static DEFINE_TIMER(ledtrig_ide_timer, ledtrig_ide_timerfunc, 0, 0);
-static int ide_activity;
-static int ide_lastactivity;
+static unsigned long ide_blink_delay = BLINK_DELAY;
void ledtrig_ide_activity(void)
{
- ide_activity++;
- if (!timer_pending(&ledtrig_ide_timer))
- mod_timer(&ledtrig_ide_timer, jiffies + msecs_to_jiffies(10));
+ led_trigger_blink_oneshot(ledtrig_ide,
+ &ide_blink_delay, &ide_blink_delay, 0);
}
EXPORT_SYMBOL(ledtrig_ide_activity);
-static void ledtrig_ide_timerfunc(unsigned long data)
-{
- if (ide_lastactivity != ide_activity) {
- ide_lastactivity = ide_activity;
- /* INT_MAX will set each LED to its maximum brightness */
- led_trigger_event(ledtrig_ide, INT_MAX);
- mod_timer(&ledtrig_ide_timer, jiffies + msecs_to_jiffies(10));
- } else {
- led_trigger_event(ledtrig_ide, LED_OFF);
- }
-}
-
static int __init ledtrig_ide_init(void)
{
led_trigger_register_simple("ide-disk", &ledtrig_ide);
diff --git a/drivers/leds/ledtrig-oneshot.c b/drivers/leds/ledtrig-oneshot.c
new file mode 100644
index 000000000000..2c029aa5c4f1
--- /dev/null
+++ b/drivers/leds/ledtrig-oneshot.c
@@ -0,0 +1,204 @@
+/*
+ * One-shot LED Trigger
+ *
+ * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
+ *
+ * Based on ledtrig-timer.c by Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include "leds.h"
+
+#define DEFAULT_DELAY 100
+
+struct oneshot_trig_data {
+ unsigned int invert;
+};
+
+static ssize_t led_shot(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
+
+ led_blink_set_oneshot(led_cdev,
+ &led_cdev->blink_delay_on, &led_cdev->blink_delay_off,
+ oneshot_data->invert);
+
+ /* content is ignored */
+ return size;
+}
+static ssize_t led_invert_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%u\n", oneshot_data->invert);
+}
+
+static ssize_t led_invert_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
+ unsigned long state;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ oneshot_data->invert = !!state;
+
+ if (oneshot_data->invert)
+ __led_set_brightness(led_cdev, LED_FULL);
+ else
+ __led_set_brightness(led_cdev, LED_OFF);
+
+ return size;
+}
+
+static ssize_t led_delay_on_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
+}
+
+static ssize_t led_delay_on_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ unsigned long state;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ led_cdev->blink_delay_on = state;
+
+ return size;
+}
+static ssize_t led_delay_off_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
+}
+
+static ssize_t led_delay_off_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ unsigned long state;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ led_cdev->blink_delay_off = state;
+
+ return size;
+}
+
+static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store);
+static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store);
+static DEVICE_ATTR(invert, 0644, led_invert_show, led_invert_store);
+static DEVICE_ATTR(shot, 0200, NULL, led_shot);
+
+static void oneshot_trig_activate(struct led_classdev *led_cdev)
+{
+ struct oneshot_trig_data *oneshot_data;
+ int rc;
+
+ oneshot_data = kzalloc(sizeof(*oneshot_data), GFP_KERNEL);
+ if (!oneshot_data)
+ return;
+
+ led_cdev->trigger_data = oneshot_data;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
+ if (rc)
+ goto err_out_trig_data;
+ rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
+ if (rc)
+ goto err_out_delayon;
+ rc = device_create_file(led_cdev->dev, &dev_attr_invert);
+ if (rc)
+ goto err_out_delayoff;
+ rc = device_create_file(led_cdev->dev, &dev_attr_shot);
+ if (rc)
+ goto err_out_invert;
+
+ led_cdev->blink_delay_on = DEFAULT_DELAY;
+ led_cdev->blink_delay_off = DEFAULT_DELAY;
+
+ led_cdev->activated = true;
+
+ return;
+
+err_out_invert:
+ device_remove_file(led_cdev->dev, &dev_attr_invert);
+err_out_delayoff:
+ device_remove_file(led_cdev->dev, &dev_attr_delay_off);
+err_out_delayon:
+ device_remove_file(led_cdev->dev, &dev_attr_delay_on);
+err_out_trig_data:
+ kfree(led_cdev->trigger_data);
+}
+
+static void oneshot_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct oneshot_trig_data *oneshot_data = led_cdev->trigger_data;
+
+ if (led_cdev->activated) {
+ device_remove_file(led_cdev->dev, &dev_attr_delay_on);
+ device_remove_file(led_cdev->dev, &dev_attr_delay_off);
+ device_remove_file(led_cdev->dev, &dev_attr_invert);
+ device_remove_file(led_cdev->dev, &dev_attr_shot);
+ kfree(oneshot_data);
+ led_cdev->activated = false;
+ }
+
+ /* Stop blinking */
+ led_set_brightness(led_cdev, LED_OFF);
+}
+
+static struct led_trigger oneshot_led_trigger = {
+ .name = "oneshot",
+ .activate = oneshot_trig_activate,
+ .deactivate = oneshot_trig_deactivate,
+};
+
+static int __init oneshot_trig_init(void)
+{
+ return led_trigger_register(&oneshot_led_trigger);
+}
+
+static void __exit oneshot_trig_exit(void)
+{
+ led_trigger_unregister(&oneshot_led_trigger);
+}
+
+module_init(oneshot_trig_init);
+module_exit(oneshot_trig_exit);
+
+MODULE_AUTHOR("Fabio Baltieri <fabio.baltieri@gmail.com>");
+MODULE_DESCRIPTION("One-shot LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 9010f7abaf2c..f774d0592204 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -104,7 +104,7 @@ static void timer_trig_deactivate(struct led_classdev *led_cdev)
}
/* Stop blinking */
- led_brightness_set(led_cdev, LED_OFF);
+ led_set_brightness(led_cdev, LED_OFF);
}
static struct led_trigger timer_led_trigger = {
diff --git a/drivers/leds/ledtrig-transient.c b/drivers/leds/ledtrig-transient.c
index 83179f435e1e..398f1042c43e 100644
--- a/drivers/leds/ledtrig-transient.c
+++ b/drivers/leds/ledtrig-transient.c
@@ -41,7 +41,7 @@ static void transient_timer_function(unsigned long data)
struct transient_trig_data *transient_data = led_cdev->trigger_data;
transient_data->activate = 0;
- led_set_brightness(led_cdev, transient_data->restore_state);
+ __led_set_brightness(led_cdev, transient_data->restore_state);
}
static ssize_t transient_activate_show(struct device *dev,
@@ -72,7 +72,7 @@ static ssize_t transient_activate_store(struct device *dev,
if (state == 0 && transient_data->activate == 1) {
del_timer(&transient_data->timer);
transient_data->activate = state;
- led_set_brightness(led_cdev, transient_data->restore_state);
+ __led_set_brightness(led_cdev, transient_data->restore_state);
return size;
}
@@ -80,7 +80,7 @@ static ssize_t transient_activate_store(struct device *dev,
if (state == 1 && transient_data->activate == 0 &&
transient_data->duration != 0) {
transient_data->activate = state;
- led_set_brightness(led_cdev, transient_data->state);
+ __led_set_brightness(led_cdev, transient_data->state);
transient_data->restore_state =
(transient_data->state == LED_FULL) ? LED_OFF : LED_FULL;
mod_timer(&transient_data->timer,
@@ -203,7 +203,7 @@ static void transient_trig_deactivate(struct led_classdev *led_cdev)
if (led_cdev->activated) {
del_timer_sync(&transient_data->timer);
- led_set_brightness(led_cdev, transient_data->restore_state);
+ __led_set_brightness(led_cdev, transient_data->restore_state);
device_remove_file(led_cdev->dev, &dev_attr_activate);
device_remove_file(led_cdev->dev, &dev_attr_duration);
device_remove_file(led_cdev->dev, &dev_attr_state);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 10f122a3a856..d949b781f6f8 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -260,15 +260,6 @@ config DM_DEBUG_BLOCK_STACK_TRACING
If unsure, say N.
-config DM_DEBUG_SPACE_MAPS
- boolean "Extra validation for thin provisioning space maps"
- depends on DM_THIN_PROVISIONING
- ---help---
- Enable this for messages that may help debug problems with the
- space maps used by thin provisioning.
-
- If unsure, say N.
-
config DM_MIRROR
tristate "Mirror target"
depends on BLK_DEV_DM
@@ -277,13 +268,14 @@ config DM_MIRROR
needed for live data migration tools such as 'pvmove'.
config DM_RAID
- tristate "RAID 1/4/5/6 target"
+ tristate "RAID 1/4/5/6/10 target"
depends on BLK_DEV_DM
select MD_RAID1
+ select MD_RAID10
select MD_RAID456
select BLK_DEV_MD
---help---
- A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings
+ A dm target that supports RAID1, RAID10, RAID4, RAID5 and RAID6 mappings
A RAID-5 set of N drives with a capacity of C MB per drive provides
the capacity of C * (N - 1) MB, and protects against a failure
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 15dbe03117e4..94e7f6ba2e11 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1305,7 +1305,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
prepare_to_wait(&bitmap->overflow_wait, &__wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bitmap->counts.lock);
- io_schedule();
+ schedule();
finish_wait(&bitmap->overflow_wait, &__wait);
continue;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 3f06df59fd82..664743d6a6cd 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -42,21 +42,21 @@ struct convert_context {
unsigned int offset_out;
unsigned int idx_in;
unsigned int idx_out;
- sector_t sector;
- atomic_t pending;
+ sector_t cc_sector;
+ atomic_t cc_pending;
};
/*
* per bio private data
*/
struct dm_crypt_io {
- struct dm_target *target;
+ struct crypt_config *cc;
struct bio *base_bio;
struct work_struct work;
struct convert_context ctx;
- atomic_t pending;
+ atomic_t io_pending;
int error;
sector_t sector;
struct dm_crypt_io *base_io;
@@ -109,9 +109,6 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
*/
struct crypt_cpu {
struct ablkcipher_request *req;
- /* ESSIV: struct crypto_cipher *essiv_tfm */
- void *iv_private;
- struct crypto_ablkcipher *tfms[0];
};
/*
@@ -151,6 +148,10 @@ struct crypt_config {
* per_cpu_ptr() only.
*/
struct crypt_cpu __percpu *cpu;
+
+ /* ESSIV: struct crypto_cipher *essiv_tfm */
+ void *iv_private;
+ struct crypto_ablkcipher **tfms;
unsigned tfms_count;
/*
@@ -193,7 +194,7 @@ static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
*/
static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
{
- return __this_cpu_ptr(cc->cpu)->tfms[0];
+ return cc->tfms[0];
}
/*
@@ -258,7 +259,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
struct hash_desc desc;
struct scatterlist sg;
struct crypto_cipher *essiv_tfm;
- int err, cpu;
+ int err;
sg_init_one(&sg, cc->key, cc->key_size);
desc.tfm = essiv->hash_tfm;
@@ -268,14 +269,12 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
if (err)
return err;
- for_each_possible_cpu(cpu) {
- essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private,
+ essiv_tfm = cc->iv_private;
- err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
- crypto_hash_digestsize(essiv->hash_tfm));
- if (err)
- return err;
- }
+ err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
+ crypto_hash_digestsize(essiv->hash_tfm));
+ if (err)
+ return err;
return 0;
}
@@ -286,16 +285,14 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc)
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
struct crypto_cipher *essiv_tfm;
- int cpu, r, err = 0;
+ int r, err = 0;
memset(essiv->salt, 0, salt_size);
- for_each_possible_cpu(cpu) {
- essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private;
- r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
- if (r)
- err = r;
- }
+ essiv_tfm = cc->iv_private;
+ r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
+ if (r)
+ err = r;
return err;
}
@@ -335,8 +332,6 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
- int cpu;
- struct crypt_cpu *cpu_cc;
struct crypto_cipher *essiv_tfm;
struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
@@ -346,15 +341,12 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc)
kzfree(essiv->salt);
essiv->salt = NULL;
- for_each_possible_cpu(cpu) {
- cpu_cc = per_cpu_ptr(cc->cpu, cpu);
- essiv_tfm = cpu_cc->iv_private;
+ essiv_tfm = cc->iv_private;
- if (essiv_tfm)
- crypto_free_cipher(essiv_tfm);
+ if (essiv_tfm)
+ crypto_free_cipher(essiv_tfm);
- cpu_cc->iv_private = NULL;
- }
+ cc->iv_private = NULL;
}
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
@@ -363,7 +355,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
struct crypto_cipher *essiv_tfm = NULL;
struct crypto_hash *hash_tfm = NULL;
u8 *salt = NULL;
- int err, cpu;
+ int err;
if (!opts) {
ti->error = "Digest algorithm missing for ESSIV mode";
@@ -388,15 +380,13 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
cc->iv_gen_private.essiv.salt = salt;
cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
- for_each_possible_cpu(cpu) {
- essiv_tfm = setup_essiv_cpu(cc, ti, salt,
- crypto_hash_digestsize(hash_tfm));
- if (IS_ERR(essiv_tfm)) {
- crypt_iv_essiv_dtr(cc);
- return PTR_ERR(essiv_tfm);
- }
- per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm;
+ essiv_tfm = setup_essiv_cpu(cc, ti, salt,
+ crypto_hash_digestsize(hash_tfm));
+ if (IS_ERR(essiv_tfm)) {
+ crypt_iv_essiv_dtr(cc);
+ return PTR_ERR(essiv_tfm);
}
+ cc->iv_private = essiv_tfm;
return 0;
@@ -410,7 +400,7 @@ bad:
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
struct dm_crypt_request *dmreq)
{
- struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private;
+ struct crypto_cipher *essiv_tfm = cc->iv_private;
memset(iv, 0, cc->iv_size);
*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
@@ -664,7 +654,7 @@ static void crypt_convert_init(struct crypt_config *cc,
ctx->offset_out = 0;
ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
- ctx->sector = sector + cc->iv_offset;
+ ctx->cc_sector = sector + cc->iv_offset;
init_completion(&ctx->restart);
}
@@ -695,12 +685,12 @@ static int crypt_convert_block(struct crypt_config *cc,
struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
struct dm_crypt_request *dmreq;
u8 *iv;
- int r = 0;
+ int r;
dmreq = dmreq_of_req(cc, req);
iv = iv_of_dmreq(cc, dmreq);
- dmreq->iv_sector = ctx->sector;
+ dmreq->iv_sector = ctx->cc_sector;
dmreq->ctx = ctx;
sg_init_table(&dmreq->sg_in, 1);
sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
@@ -749,12 +739,12 @@ static void crypt_alloc_req(struct crypt_config *cc,
struct convert_context *ctx)
{
struct crypt_cpu *this_cc = this_crypt_config(cc);
- unsigned key_index = ctx->sector & (cc->tfms_count - 1);
+ unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
if (!this_cc->req)
this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
- ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]);
+ ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
ablkcipher_request_set_callback(this_cc->req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
@@ -769,14 +759,14 @@ static int crypt_convert(struct crypt_config *cc,
struct crypt_cpu *this_cc = this_crypt_config(cc);
int r;
- atomic_set(&ctx->pending, 1);
+ atomic_set(&ctx->cc_pending, 1);
while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
ctx->idx_out < ctx->bio_out->bi_vcnt) {
crypt_alloc_req(cc, ctx);
- atomic_inc(&ctx->pending);
+ atomic_inc(&ctx->cc_pending);
r = crypt_convert_block(cc, ctx, this_cc->req);
@@ -788,19 +778,19 @@ static int crypt_convert(struct crypt_config *cc,
/* fall through*/
case -EINPROGRESS:
this_cc->req = NULL;
- ctx->sector++;
+ ctx->cc_sector++;
continue;
/* sync */
case 0:
- atomic_dec(&ctx->pending);
- ctx->sector++;
+ atomic_dec(&ctx->cc_pending);
+ ctx->cc_sector++;
cond_resched();
continue;
/* error */
default:
- atomic_dec(&ctx->pending);
+ atomic_dec(&ctx->cc_pending);
return r;
}
}
@@ -811,7 +801,7 @@ static int crypt_convert(struct crypt_config *cc,
static void dm_crypt_bio_destructor(struct bio *bio)
{
struct dm_crypt_io *io = bio->bi_private;
- struct crypt_config *cc = io->target->private;
+ struct crypt_config *cc = io->cc;
bio_free(bio, cc->bs);
}
@@ -825,7 +815,7 @@ static void dm_crypt_bio_destructor(struct bio *bio)
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
unsigned *out_of_pages)
{
- struct crypt_config *cc = io->target->private;
+ struct crypt_config *cc = io->cc;
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
@@ -884,26 +874,25 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
}
}
-static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti,
+static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
struct bio *bio, sector_t sector)
{
- struct crypt_config *cc = ti->private;
struct dm_crypt_io *io;
io = mempool_alloc(cc->io_pool, GFP_NOIO);
- io->target = ti;
+ io->cc = cc;
io->base_bio = bio;
io->sector = sector;
io->error = 0;
io->base_io = NULL;
- atomic_set(&io->pending, 0);
+ atomic_set(&io->io_pending, 0);
return io;
}
static void crypt_inc_pending(struct dm_crypt_io *io)
{
- atomic_inc(&io->pending);
+ atomic_inc(&io->io_pending);
}
/*
@@ -913,12 +902,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
*/
static void crypt_dec_pending(struct dm_crypt_io *io)
{
- struct crypt_config *cc = io->target->private;
+ struct crypt_config *cc = io->cc;
struct bio *base_bio = io->base_bio;
struct dm_crypt_io *base_io = io->base_io;
int error = io->error;
- if (!atomic_dec_and_test(&io->pending))
+ if (!atomic_dec_and_test(&io->io_pending))
return;
mempool_free(io, cc->io_pool);
@@ -952,7 +941,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
static void crypt_endio(struct bio *clone, int error)
{
struct dm_crypt_io *io = clone->bi_private;
- struct crypt_config *cc = io->target->private;
+ struct crypt_config *cc = io->cc;
unsigned rw = bio_data_dir(clone);
if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
@@ -979,7 +968,7 @@ static void crypt_endio(struct bio *clone, int error)
static void clone_init(struct dm_crypt_io *io, struct bio *clone)
{
- struct crypt_config *cc = io->target->private;
+ struct crypt_config *cc = io->cc;
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
@@ -990,7 +979,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
- struct crypt_config *cc = io->target->private;
+ struct crypt_config *cc = io->cc;
struct bio *base_bio = io->base_bio;
struct bio *clone;
@@ -1038,7 +1027,7 @@ static void kcryptd_io(struct work_struct *work)
static void kcryptd_queue_io(struct dm_crypt_io *io)
{
- struct crypt_config *cc = io->target->private;
+ struct crypt_config *cc = io->cc;
INIT_WORK(&io->work, kcryptd_io);
queue_work(cc->io_queue, &io->work);
@@ -1047,7 +1036,7 @@ static void kcryptd_queue_io(struct dm_crypt_io *io)
static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
{
struct bio *clone = io->ctx.bio_out;
- struct crypt_config *cc = io->target->private;
+ struct crypt_config *cc = io->cc;
if (unlikely(io->error < 0)) {
crypt_free_buffer_pages(cc, clone);
@@ -1069,7 +1058,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
{
- struct crypt_config *cc = io->target->private;
+ struct crypt_config *cc = io->cc;
struct bio *clone;
struct dm_crypt_io *new_io;
int crypt_finished;
@@ -1107,7 +1096,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
if (r < 0)
io->error = -EIO;
- crypt_finished = atomic_dec_and_test(&io->ctx.pending);
+ crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
/* Encryption was already finished, submit io now */
if (crypt_finished) {
@@ -1135,7 +1124,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
* between fragments, so switch to a new dm_crypt_io structure.
*/
if (unlikely(!crypt_finished && remaining)) {
- new_io = crypt_io_alloc(io->target, io->base_bio,
+ new_io = crypt_io_alloc(io->cc, io->base_bio,
sector);
crypt_inc_pending(new_io);
crypt_convert_init(cc, &new_io->ctx, NULL,
@@ -1169,7 +1158,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
{
- struct crypt_config *cc = io->target->private;
+ struct crypt_config *cc = io->cc;
int r = 0;
crypt_inc_pending(io);
@@ -1181,7 +1170,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
if (r < 0)
io->error = -EIO;
- if (atomic_dec_and_test(&io->ctx.pending))
+ if (atomic_dec_and_test(&io->ctx.cc_pending))
kcryptd_crypt_read_done(io);
crypt_dec_pending(io);
@@ -1193,7 +1182,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
struct dm_crypt_request *dmreq = async_req->data;
struct convert_context *ctx = dmreq->ctx;
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
- struct crypt_config *cc = io->target->private;
+ struct crypt_config *cc = io->cc;
if (error == -EINPROGRESS) {
complete(&ctx->restart);
@@ -1208,7 +1197,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
- if (!atomic_dec_and_test(&ctx->pending))
+ if (!atomic_dec_and_test(&ctx->cc_pending))
return;
if (bio_data_dir(io->base_bio) == READ)
@@ -1229,7 +1218,7 @@ static void kcryptd_crypt(struct work_struct *work)
static void kcryptd_queue_crypt(struct dm_crypt_io *io)
{
- struct crypt_config *cc = io->target->private;
+ struct crypt_config *cc = io->cc;
INIT_WORK(&io->work, kcryptd_crypt);
queue_work(cc->crypt_queue, &io->work);
@@ -1241,7 +1230,6 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
{
char buffer[3];
- char *endp;
unsigned int i;
buffer[2] = '\0';
@@ -1250,9 +1238,7 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
buffer[0] = *hex++;
buffer[1] = *hex++;
- key[i] = (u8)simple_strtoul(buffer, &endp, 16);
-
- if (endp != &buffer[2])
+ if (kstrtou8(buffer, 16, &key[i]))
return -EINVAL;
}
@@ -1276,29 +1262,38 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
}
}
-static void crypt_free_tfms(struct crypt_config *cc, int cpu)
+static void crypt_free_tfms(struct crypt_config *cc)
{
- struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
unsigned i;
+ if (!cc->tfms)
+ return;
+
for (i = 0; i < cc->tfms_count; i++)
- if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) {
- crypto_free_ablkcipher(cpu_cc->tfms[i]);
- cpu_cc->tfms[i] = NULL;
+ if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
+ crypto_free_ablkcipher(cc->tfms[i]);
+ cc->tfms[i] = NULL;
}
+
+ kfree(cc->tfms);
+ cc->tfms = NULL;
}
-static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
+static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
{
- struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu);
unsigned i;
int err;
+ cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
+ GFP_KERNEL);
+ if (!cc->tfms)
+ return -ENOMEM;
+
for (i = 0; i < cc->tfms_count; i++) {
- cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
- if (IS_ERR(cpu_cc->tfms[i])) {
- err = PTR_ERR(cpu_cc->tfms[i]);
- crypt_free_tfms(cc, cpu);
+ cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
+ if (IS_ERR(cc->tfms[i])) {
+ err = PTR_ERR(cc->tfms[i]);
+ crypt_free_tfms(cc);
return err;
}
}
@@ -1309,15 +1304,14 @@ static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode)
static int crypt_setkey_allcpus(struct crypt_config *cc)
{
unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
- int cpu, err = 0, i, r;
-
- for_each_possible_cpu(cpu) {
- for (i = 0; i < cc->tfms_count; i++) {
- r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i],
- cc->key + (i * subkey_size), subkey_size);
- if (r)
- err = r;
- }
+ int err = 0, i, r;
+
+ for (i = 0; i < cc->tfms_count; i++) {
+ r = crypto_ablkcipher_setkey(cc->tfms[i],
+ cc->key + (i * subkey_size),
+ subkey_size);
+ if (r)
+ err = r;
}
return err;
@@ -1379,9 +1373,10 @@ static void crypt_dtr(struct dm_target *ti)
cpu_cc = per_cpu_ptr(cc->cpu, cpu);
if (cpu_cc->req)
mempool_free(cpu_cc->req, cc->req_pool);
- crypt_free_tfms(cc, cpu);
}
+ crypt_free_tfms(cc);
+
if (cc->bs)
bioset_free(cc->bs);
@@ -1414,7 +1409,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
struct crypt_config *cc = ti->private;
char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
char *cipher_api = NULL;
- int cpu, ret = -EINVAL;
+ int ret = -EINVAL;
char dummy;
/* Convert to crypto api definition? */
@@ -1455,8 +1450,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
if (tmp)
DMWARN("Ignoring unexpected additional cipher options");
- cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) +
- cc->tfms_count * sizeof(*(cc->cpu->tfms)),
+ cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
__alignof__(struct crypt_cpu));
if (!cc->cpu) {
ti->error = "Cannot allocate per cpu state";
@@ -1489,12 +1483,10 @@ static int crypt_ctr_cipher(struct dm_target *ti,
}
/* Allocate cipher */
- for_each_possible_cpu(cpu) {
- ret = crypt_alloc_tfms(cc, cpu, cipher_api);
- if (ret < 0) {
- ti->error = "Error allocating crypto tfm";
- goto bad;
- }
+ ret = crypt_alloc_tfms(cc, cipher_api);
+ if (ret < 0) {
+ ti->error = "Error allocating crypto tfm";
+ goto bad;
}
/* Initialize and set key */
@@ -1702,7 +1694,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->num_flush_requests = 1;
- ti->discard_zeroes_data_unsupported = 1;
+ ti->discard_zeroes_data_unsupported = true;
return 0;
@@ -1715,7 +1707,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
struct dm_crypt_io *io;
- struct crypt_config *cc;
+ struct crypt_config *cc = ti->private;
/*
* If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
@@ -1723,14 +1715,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
* - for REQ_DISCARD caller must use flush if IO ordering matters
*/
if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
- cc = ti->private;
bio->bi_bdev = cc->dev->bdev;
if (bio_sectors(bio))
bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
return DM_MAPIO_REMAPPED;
}
- io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
+ io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
if (bio_data_dir(io->base_bio) == READ) {
if (kcryptd_io_read(io, GFP_NOWAIT))
@@ -1742,7 +1733,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
}
static int crypt_status(struct dm_target *ti, status_type_t type,
- char *result, unsigned int maxlen)
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct crypt_config *cc = ti->private;
unsigned int sz = 0;
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 2dc22dddb2ae..f53846f9ab50 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -295,7 +295,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio,
}
static int delay_status(struct dm_target *ti, status_type_t type,
- char *result, unsigned maxlen)
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct delay_c *dc = ti->private;
int sz = 0;
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index aa70f7d43a1a..ebaa4f803eec 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -142,24 +142,19 @@ EXPORT_SYMBOL(dm_exception_store_type_unregister);
static int set_chunk_size(struct dm_exception_store *store,
const char *chunk_size_arg, char **error)
{
- unsigned long chunk_size_ulong;
- char *value;
+ unsigned chunk_size;
- chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10);
- if (*chunk_size_arg == '\0' || *value != '\0' ||
- chunk_size_ulong > UINT_MAX) {
+ if (kstrtouint(chunk_size_arg, 10, &chunk_size)) {
*error = "Invalid chunk size";
return -EINVAL;
}
- if (!chunk_size_ulong) {
+ if (!chunk_size) {
store->chunk_size = store->chunk_mask = store->chunk_shift = 0;
return 0;
}
- return dm_exception_store_set_chunk_size(store,
- (unsigned) chunk_size_ulong,
- error);
+ return dm_exception_store_set_chunk_size(store, chunk_size, error);
}
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index ac49c01f1a44..cc15543a6ad7 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -333,7 +333,7 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
}
static int flakey_status(struct dm_target *ti, status_type_t type,
- char *result, unsigned int maxlen)
+ unsigned status_flags, char *result, unsigned maxlen)
{
unsigned sz = 0;
struct flakey_c *fc = ti->private;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index a1a3e6df17b8..afd95986d099 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1054,6 +1054,7 @@ static void retrieve_status(struct dm_table *table,
char *outbuf, *outptr;
status_type_t type;
size_t remaining, len, used = 0;
+ unsigned status_flags = 0;
outptr = outbuf = get_result_buffer(param, param_size, &len);
@@ -1090,7 +1091,9 @@ static void retrieve_status(struct dm_table *table,
/* Get the status/table string from the target driver */
if (ti->type->status) {
- if (ti->type->status(ti, type, outptr, remaining)) {
+ if (param->flags & DM_NOFLUSH_FLAG)
+ status_flags |= DM_STATUS_NOFLUSH_FLAG;
+ if (ti->type->status(ti, type, status_flags, outptr, remaining)) {
param->flags |= DM_BUFFER_FULL_FLAG;
break;
}
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 3639eeab6042..1bf19a93eef0 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -96,7 +96,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio,
}
static int linear_status(struct dm_target *ti, status_type_t type,
- char *result, unsigned int maxlen)
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct linear_c *lc = (struct linear_c *) ti->private;
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 65ebaebf502b..627d19186d5a 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -571,16 +571,6 @@ static void disk_dtr(struct dm_dirty_log *log)
destroy_log_context(lc);
}
-static int count_bits32(uint32_t *addr, unsigned size)
-{
- int count = 0, i;
-
- for (i = 0; i < size; i++) {
- count += hweight32(*(addr+i));
- }
- return count;
-}
-
static void fail_log_device(struct log_c *lc)
{
if (lc->log_dev_failed)
@@ -629,7 +619,8 @@ static int disk_resume(struct dm_dirty_log *log)
/* copy clean across to sync */
memcpy(lc->sync_bits, lc->clean_bits, size);
- lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count);
+ lc->sync_count = memweight(lc->clean_bits,
+ lc->bitset_uint32_count * sizeof(uint32_t));
lc->sync_search = 0;
/* set the correct number of regions in the header */
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 638dae048b4f..d8abb90a6c2f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -85,6 +85,7 @@ struct multipath {
unsigned queue_io:1; /* Must we queue all I/O? */
unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
+ unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */
@@ -568,6 +569,8 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
int r;
struct pgpath *p;
struct multipath *m = ti->private;
+ struct request_queue *q = NULL;
+ const char *attached_handler_name;
/* we need at least a path arg */
if (as->argc < 1) {
@@ -586,13 +589,37 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
goto bad;
}
- if (m->hw_handler_name) {
- struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
+ if (m->retain_attached_hw_handler || m->hw_handler_name)
+ q = bdev_get_queue(p->path.dev->bdev);
+
+ if (m->retain_attached_hw_handler) {
+ attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
+ if (attached_handler_name) {
+ /*
+ * Reset hw_handler_name to match the attached handler
+ * and clear any hw_handler_params associated with the
+ * ignored handler.
+ *
+ * NB. This modifies the table line to show the actual
+ * handler instead of the original table passed in.
+ */
+ kfree(m->hw_handler_name);
+ m->hw_handler_name = attached_handler_name;
+
+ kfree(m->hw_handler_params);
+ m->hw_handler_params = NULL;
+ }
+ }
+ if (m->hw_handler_name) {
+ /*
+ * Increments scsi_dh reference, even when using an
+ * already-attached handler.
+ */
r = scsi_dh_attach(q, m->hw_handler_name);
if (r == -EBUSY) {
/*
- * Already attached to different hw_handler,
+ * Already attached to different hw_handler:
* try to reattach with correct one.
*/
scsi_dh_detach(q);
@@ -760,7 +787,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
const char *arg_name;
static struct dm_arg _args[] = {
- {0, 5, "invalid number of feature args"},
+ {0, 6, "invalid number of feature args"},
{1, 50, "pg_init_retries must be between 1 and 50"},
{0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
};
@@ -781,6 +808,11 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
continue;
}
+ if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
+ m->retain_attached_hw_handler = 1;
+ continue;
+ }
+
if (!strcasecmp(arg_name, "pg_init_retries") &&
(argc >= 1)) {
r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
@@ -1346,7 +1378,7 @@ static void multipath_resume(struct dm_target *ti)
* num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
*/
static int multipath_status(struct dm_target *ti, status_type_t type,
- char *result, unsigned int maxlen)
+ unsigned status_flags, char *result, unsigned maxlen)
{
int sz = 0;
unsigned long flags;
@@ -1364,13 +1396,16 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
else {
DMEMIT("%u ", m->queue_if_no_path +
(m->pg_init_retries > 0) * 2 +
- (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2);
+ (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
+ m->retain_attached_hw_handler);
if (m->queue_if_no_path)
DMEMIT("queue_if_no_path ");
if (m->pg_init_retries)
DMEMIT("pg_init_retries %u ", m->pg_init_retries);
if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
+ if (m->retain_attached_hw_handler)
+ DMEMIT("retain_attached_hw_handler ");
}
if (!m->hw_handler_name || type == STATUSTYPE_INFO)
@@ -1656,7 +1691,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 017c34d78d61..982e3e390c45 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -11,6 +11,7 @@
#include "md.h"
#include "raid1.h"
#include "raid5.h"
+#include "raid10.h"
#include "bitmap.h"
#include <linux/device-mapper.h>
@@ -52,7 +53,10 @@ struct raid_dev {
#define DMPF_MAX_RECOVERY_RATE 0x20
#define DMPF_MAX_WRITE_BEHIND 0x40
#define DMPF_STRIPE_CACHE 0x80
-#define DMPF_REGION_SIZE 0X100
+#define DMPF_REGION_SIZE 0x100
+#define DMPF_RAID10_COPIES 0x200
+#define DMPF_RAID10_FORMAT 0x400
+
struct raid_set {
struct dm_target *ti;
@@ -76,6 +80,7 @@ static struct raid_type {
const unsigned algorithm; /* RAID algorithm. */
} raid_types[] = {
{"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
+ {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */},
{"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
{"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
{"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
@@ -86,6 +91,17 @@ static struct raid_type {
{"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
};
+static unsigned raid10_md_layout_to_copies(int layout)
+{
+ return layout & 0xFF;
+}
+
+static int raid10_format_to_md_layout(char *format, unsigned copies)
+{
+ /* 1 "far" copy, and 'copies' "near" copies */
+ return (1 << 8) | (copies & 0xFF);
+}
+
static struct raid_type *get_raid_type(char *name)
{
int i;
@@ -101,20 +117,12 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
{
unsigned i;
struct raid_set *rs;
- sector_t sectors_per_dev;
if (raid_devs <= raid_type->parity_devs) {
ti->error = "Insufficient number of devices";
return ERR_PTR(-EINVAL);
}
- sectors_per_dev = ti->len;
- if ((raid_type->level > 1) &&
- sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) {
- ti->error = "Target length not divisible by number of data devices";
- return ERR_PTR(-EINVAL);
- }
-
rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
if (!rs) {
ti->error = "Cannot allocate raid context";
@@ -128,7 +136,6 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
rs->md.raid_disks = raid_devs;
rs->md.level = raid_type->level;
rs->md.new_level = rs->md.level;
- rs->md.dev_sectors = sectors_per_dev;
rs->md.layout = raid_type->algorithm;
rs->md.new_layout = rs->md.layout;
rs->md.delta_disks = 0;
@@ -143,6 +150,7 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra
* rs->md.external
* rs->md.chunk_sectors
* rs->md.new_chunk_sectors
+ * rs->md.dev_sectors
*/
return rs;
@@ -347,12 +355,20 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
* [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
* [region_size <sectors>] Defines granularity of bitmap
+ *
+ * RAID10-only options:
+ * [raid10_copies <# copies>] Number of copies. (Default: 2)
+ * [raid10_format <near>] Layout algorithm. (Default: near)
*/
static int parse_raid_params(struct raid_set *rs, char **argv,
unsigned num_raid_params)
{
+ char *raid10_format = "near";
+ unsigned raid10_copies = 2;
unsigned i, rebuild_cnt = 0;
unsigned long value, region_size = 0;
+ sector_t sectors_per_dev = rs->ti->len;
+ sector_t max_io_len;
char *key;
/*
@@ -422,20 +438,53 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
}
key = argv[i++];
+
+ /* Parameters that take a string value are checked here. */
+ if (!strcasecmp(key, "raid10_format")) {
+ if (rs->raid_type->level != 10) {
+ rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
+ return -EINVAL;
+ }
+ if (strcmp("near", argv[i])) {
+ rs->ti->error = "Invalid 'raid10_format' value given";
+ return -EINVAL;
+ }
+ raid10_format = argv[i];
+ rs->print_flags |= DMPF_RAID10_FORMAT;
+ continue;
+ }
+
if (strict_strtoul(argv[i], 10, &value) < 0) {
rs->ti->error = "Bad numerical argument given in raid params";
return -EINVAL;
}
+ /* Parameters that take a numeric value are checked here */
if (!strcasecmp(key, "rebuild")) {
rebuild_cnt++;
- if (((rs->raid_type->level != 1) &&
- (rebuild_cnt > rs->raid_type->parity_devs)) ||
- ((rs->raid_type->level == 1) &&
- (rebuild_cnt > (rs->md.raid_disks - 1)))) {
- rs->ti->error = "Too many rebuild devices specified for given RAID type";
+
+ switch (rs->raid_type->level) {
+ case 1:
+ if (rebuild_cnt >= rs->md.raid_disks) {
+ rs->ti->error = "Too many rebuild devices specified";
+ return -EINVAL;
+ }
+ break;
+ case 4:
+ case 5:
+ case 6:
+ if (rebuild_cnt > rs->raid_type->parity_devs) {
+ rs->ti->error = "Too many rebuild devices specified for given RAID type";
+ return -EINVAL;
+ }
+ break;
+ case 10:
+ default:
+ DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name);
+ rs->ti->error = "Rebuild not supported for this RAID type";
return -EINVAL;
}
+
if (value > rs->md.raid_disks) {
rs->ti->error = "Invalid rebuild index given";
return -EINVAL;
@@ -486,7 +535,8 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
*/
value /= 2;
- if (rs->raid_type->level < 5) {
+ if ((rs->raid_type->level != 5) &&
+ (rs->raid_type->level != 6)) {
rs->ti->error = "Inappropriate argument: stripe_cache";
return -EINVAL;
}
@@ -511,6 +561,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
} else if (!strcasecmp(key, "region_size")) {
rs->print_flags |= DMPF_REGION_SIZE;
region_size = value;
+ } else if (!strcasecmp(key, "raid10_copies") &&
+ (rs->raid_type->level == 10)) {
+ if ((value < 2) || (value > 0xFF)) {
+ rs->ti->error = "Bad value for 'raid10_copies'";
+ return -EINVAL;
+ }
+ rs->print_flags |= DMPF_RAID10_COPIES;
+ raid10_copies = value;
} else {
DMERR("Unable to parse RAID parameter: %s", key);
rs->ti->error = "Unable to parse RAID parameters";
@@ -522,14 +580,33 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
return -EINVAL;
if (rs->md.chunk_sectors)
- rs->ti->split_io = rs->md.chunk_sectors;
+ max_io_len = rs->md.chunk_sectors;
else
- rs->ti->split_io = region_size;
+ max_io_len = region_size;
- if (rs->md.chunk_sectors)
- rs->ti->split_io = rs->md.chunk_sectors;
- else
- rs->ti->split_io = region_size;
+ if (dm_set_target_max_io_len(rs->ti, max_io_len))
+ return -EINVAL;
+
+ if (rs->raid_type->level == 10) {
+ if (raid10_copies > rs->md.raid_disks) {
+ rs->ti->error = "Not enough devices to satisfy specification";
+ return -EINVAL;
+ }
+
+ /* (Len * #mirrors) / #devices */
+ sectors_per_dev = rs->ti->len * raid10_copies;
+ sector_div(sectors_per_dev, rs->md.raid_disks);
+
+ rs->md.layout = raid10_format_to_md_layout(raid10_format,
+ raid10_copies);
+ rs->md.new_layout = rs->md.layout;
+ } else if ((rs->raid_type->level > 1) &&
+ sector_div(sectors_per_dev,
+ (rs->md.raid_disks - rs->raid_type->parity_devs))) {
+ rs->ti->error = "Target length not divisible by number of data devices";
+ return -EINVAL;
+ }
+ rs->md.dev_sectors = sectors_per_dev;
/* Assume there are no metadata devices until the drives are parsed */
rs->md.persistent = 0;
@@ -552,6 +629,9 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
if (rs->raid_type->level == 1)
return md_raid1_congested(&rs->md, bits);
+ if (rs->raid_type->level == 10)
+ return md_raid10_congested(&rs->md, bits);
+
return md_raid5_congested(&rs->md, bits);
}
@@ -870,6 +950,9 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
case 6:
redundancy = rs->raid_type->parity_devs;
break;
+ case 10:
+ redundancy = raid10_md_layout_to_copies(mddev->layout) - 1;
+ break;
default:
ti->error = "Unknown RAID type";
return -EINVAL;
@@ -1035,12 +1118,19 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+ if (ti->len != rs->md.array_sectors) {
+ ti->error = "Array size does not match requested target length";
+ ret = -EINVAL;
+ goto size_mismatch;
+ }
rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
mddev_suspend(&rs->md);
return 0;
+size_mismatch:
+ md_stop(&rs->md);
bad:
context_free(rs);
@@ -1067,7 +1157,7 @@ static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_c
}
static int raid_status(struct dm_target *ti, status_type_t type,
- char *result, unsigned maxlen)
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct raid_set *rs = ti->private;
unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
@@ -1189,6 +1279,13 @@ static int raid_status(struct dm_target *ti, status_type_t type,
DMEMIT(" region_size %lu",
rs->md.bitmap_info.chunksize >> 9);
+ if (rs->print_flags & DMPF_RAID10_COPIES)
+ DMEMIT(" raid10_copies %u",
+ raid10_md_layout_to_copies(rs->md.layout));
+
+ if (rs->print_flags & DMPF_RAID10_FORMAT)
+ DMEMIT(" raid10_format near");
+
DMEMIT(" %d", rs->md.raid_disks);
for (i = 0; i < rs->md.raid_disks; i++) {
if (rs->dev[i].meta_dev)
@@ -1263,7 +1360,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
@@ -1290,6 +1387,8 @@ module_init(dm_raid_init);
module_exit(dm_raid_exit);
MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
+MODULE_ALIAS("dm-raid1");
+MODULE_ALIAS("dm-raid10");
MODULE_ALIAS("dm-raid4");
MODULE_ALIAS("dm-raid5");
MODULE_ALIAS("dm-raid6");
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index b58b7a33914a..bc5ddba8045b 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1081,10 +1081,14 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->private = ms;
- ti->split_io = dm_rh_get_region_size(ms->rh);
+
+ r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
+ if (r)
+ goto err_free_context;
+
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
- ti->discard_zeroes_data_unsupported = 1;
+ ti->discard_zeroes_data_unsupported = true;
ms->kmirrord_wq = alloc_workqueue("kmirrord",
WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
@@ -1363,7 +1367,7 @@ static char device_status_char(struct mirror *m)
static int mirror_status(struct dm_target *ti, status_type_t type,
- char *result, unsigned int maxlen)
+ unsigned status_flags, char *result, unsigned maxlen)
{
unsigned int m, sz = 0;
struct mirror_set *ms = (struct mirror_set *) ti->private;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 6f758870fc19..a143921feaf6 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -691,7 +691,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
* Return a minimum chunk size of all snapshots that have the specified origin.
* Return zero if the origin has no snapshots.
*/
-static sector_t __minimum_chunk_size(struct origin *o)
+static uint32_t __minimum_chunk_size(struct origin *o)
{
struct dm_snapshot *snap;
unsigned chunk_size = 0;
@@ -701,7 +701,7 @@ static sector_t __minimum_chunk_size(struct origin *o)
chunk_size = min_not_zero(chunk_size,
snap->store->chunk_size);
- return chunk_size;
+ return (uint32_t) chunk_size;
}
/*
@@ -1172,7 +1172,10 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->error = "Chunk size not set";
goto bad_read_metadata;
}
- ti->split_io = s->store->chunk_size;
+
+ r = dm_set_target_max_io_len(ti, s->store->chunk_size);
+ if (r)
+ goto bad_read_metadata;
return 0;
@@ -1239,7 +1242,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src,
snap_dest->store->snap = snap_dest;
snap_src->store->snap = snap_src;
- snap_dest->ti->split_io = snap_dest->store->chunk_size;
+ snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
snap_dest->valid = snap_src->valid;
/*
@@ -1817,9 +1820,9 @@ static void snapshot_resume(struct dm_target *ti)
up_write(&s->lock);
}
-static sector_t get_origin_minimum_chunksize(struct block_device *bdev)
+static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
{
- sector_t min_chunksize;
+ uint32_t min_chunksize;
down_read(&_origins_lock);
min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
@@ -1838,15 +1841,15 @@ static void snapshot_merge_resume(struct dm_target *ti)
snapshot_resume(ti);
/*
- * snapshot-merge acts as an origin, so set ti->split_io
+ * snapshot-merge acts as an origin, so set ti->max_io_len
*/
- ti->split_io = get_origin_minimum_chunksize(s->origin->bdev);
+ ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
start_merge(s);
}
static int snapshot_status(struct dm_target *ti, status_type_t type,
- char *result, unsigned int maxlen)
+ unsigned status_flags, char *result, unsigned maxlen)
{
unsigned sz = 0;
struct dm_snapshot *snap = ti->private;
@@ -2073,12 +2076,12 @@ static int origin_write_extent(struct dm_snapshot *merging_snap,
struct origin *o;
/*
- * The origin's __minimum_chunk_size() got stored in split_io
+ * The origin's __minimum_chunk_size() got stored in max_io_len
* by snapshot_merge_resume().
*/
down_read(&_origins_lock);
o = __lookup_origin(merging_snap->origin->bdev);
- for (n = 0; n < size; n += merging_snap->ti->split_io)
+ for (n = 0; n < size; n += merging_snap->ti->max_io_len)
if (__origin_write(&o->snapshots, sector + n, NULL) ==
DM_MAPIO_SUBMITTED)
must_wait = 1;
@@ -2138,18 +2141,18 @@ static int origin_map(struct dm_target *ti, struct bio *bio,
}
/*
- * Set the target "split_io" field to the minimum of all the snapshots'
+ * Set the target "max_io_len" field to the minimum of all the snapshots'
* chunk sizes.
*/
static void origin_resume(struct dm_target *ti)
{
struct dm_dev *dev = ti->private;
- ti->split_io = get_origin_minimum_chunksize(dev->bdev);
+ ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
}
-static int origin_status(struct dm_target *ti, status_type_t type, char *result,
- unsigned int maxlen)
+static int origin_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct dm_dev *dev = ti->private;
@@ -2176,7 +2179,6 @@ static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return max_size;
bvm->bi_bdev = dev->bdev;
- bvm->bi_sector = bvm->bi_sector;
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 35c94ff24ad5..a087bf2a8d66 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -26,14 +26,12 @@ struct stripe {
struct stripe_c {
uint32_t stripes;
int stripes_shift;
- sector_t stripes_mask;
/* The size of this target / num. stripes */
sector_t stripe_width;
- /* stripe chunk size */
- uint32_t chunk_shift;
- sector_t chunk_mask;
+ uint32_t chunk_size;
+ int chunk_size_shift;
/* Needed for handling events */
struct dm_target *ti;
@@ -91,7 +89,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
/*
* Construct a striped mapping.
- * <number of stripes> <chunk size (2^^n)> [<dev_path> <offset>]+
+ * <number of stripes> <chunk size> [<dev_path> <offset>]+
*/
static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
@@ -99,7 +97,6 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
sector_t width;
uint32_t stripes;
uint32_t chunk_size;
- char *end;
int r;
unsigned int i;
@@ -108,34 +105,23 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
- stripes = simple_strtoul(argv[0], &end, 10);
- if (!stripes || *end) {
+ if (kstrtouint(argv[0], 10, &stripes) || !stripes) {
ti->error = "Invalid stripe count";
return -EINVAL;
}
- chunk_size = simple_strtoul(argv[1], &end, 10);
- if (*end) {
+ if (kstrtouint(argv[1], 10, &chunk_size) || !chunk_size) {
ti->error = "Invalid chunk_size";
return -EINVAL;
}
- /*
- * chunk_size is a power of two
- */
- if (!is_power_of_2(chunk_size) ||
- (chunk_size < (PAGE_SIZE >> SECTOR_SHIFT))) {
- ti->error = "Invalid chunk size";
- return -EINVAL;
- }
-
- if (ti->len & (chunk_size - 1)) {
+ width = ti->len;
+ if (sector_div(width, chunk_size)) {
ti->error = "Target length not divisible by "
"chunk size";
return -EINVAL;
}
- width = ti->len;
if (sector_div(width, stripes)) {
ti->error = "Target length not divisible by "
"number of stripes";
@@ -167,17 +153,21 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (stripes & (stripes - 1))
sc->stripes_shift = -1;
- else {
- sc->stripes_shift = ffs(stripes) - 1;
- sc->stripes_mask = ((sector_t) stripes) - 1;
- }
+ else
+ sc->stripes_shift = __ffs(stripes);
+
+ r = dm_set_target_max_io_len(ti, chunk_size);
+ if (r)
+ return r;
- ti->split_io = chunk_size;
ti->num_flush_requests = stripes;
ti->num_discard_requests = stripes;
- sc->chunk_shift = ffs(chunk_size) - 1;
- sc->chunk_mask = ((sector_t) chunk_size) - 1;
+ sc->chunk_size = chunk_size;
+ if (chunk_size & (chunk_size - 1))
+ sc->chunk_size_shift = -1;
+ else
+ sc->chunk_size_shift = __ffs(chunk_size);
/*
* Get the stripe destinations.
@@ -216,17 +206,29 @@ static void stripe_dtr(struct dm_target *ti)
static void stripe_map_sector(struct stripe_c *sc, sector_t sector,
uint32_t *stripe, sector_t *result)
{
- sector_t offset = dm_target_offset(sc->ti, sector);
- sector_t chunk = offset >> sc->chunk_shift;
+ sector_t chunk = dm_target_offset(sc->ti, sector);
+ sector_t chunk_offset;
+
+ if (sc->chunk_size_shift < 0)
+ chunk_offset = sector_div(chunk, sc->chunk_size);
+ else {
+ chunk_offset = chunk & (sc->chunk_size - 1);
+ chunk >>= sc->chunk_size_shift;
+ }
if (sc->stripes_shift < 0)
*stripe = sector_div(chunk, sc->stripes);
else {
- *stripe = chunk & sc->stripes_mask;
+ *stripe = chunk & (sc->stripes - 1);
chunk >>= sc->stripes_shift;
}
- *result = (chunk << sc->chunk_shift) | (offset & sc->chunk_mask);
+ if (sc->chunk_size_shift < 0)
+ chunk *= sc->chunk_size;
+ else
+ chunk <<= sc->chunk_size_shift;
+
+ *result = chunk + chunk_offset;
}
static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
@@ -237,9 +239,16 @@ static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector,
stripe_map_sector(sc, sector, &stripe, result);
if (stripe == target_stripe)
return;
- *result &= ~sc->chunk_mask; /* round down */
+
+ /* round down */
+ sector = *result;
+ if (sc->chunk_size_shift < 0)
+ *result -= sector_div(sector, sc->chunk_size);
+ else
+ *result = sector & ~(sector_t)(sc->chunk_size - 1);
+
if (target_stripe < stripe)
- *result += sc->chunk_mask + 1; /* next chunk */
+ *result += sc->chunk_size; /* next chunk */
}
static int stripe_map_discard(struct stripe_c *sc, struct bio *bio,
@@ -302,8 +311,8 @@ static int stripe_map(struct dm_target *ti, struct bio *bio,
*
*/
-static int stripe_status(struct dm_target *ti,
- status_type_t type, char *result, unsigned int maxlen)
+static int stripe_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
char buffer[sc->stripes + 1];
@@ -324,7 +333,7 @@ static int stripe_status(struct dm_target *ti,
case STATUSTYPE_TABLE:
DMEMIT("%d %llu", sc->stripes,
- (unsigned long long)sc->chunk_mask + 1);
+ (unsigned long long)sc->chunk_size);
for (i = 0; i < sc->stripes; i++)
DMEMIT(" %s %llu", sc->stripe[i].dev->name,
(unsigned long long)sc->stripe[i].physical_start);
@@ -391,7 +400,7 @@ static void stripe_io_hints(struct dm_target *ti,
struct queue_limits *limits)
{
struct stripe_c *sc = ti->private;
- unsigned chunk_size = (sc->chunk_mask + 1) << 9;
+ unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT;
blk_limits_io_min(limits, chunk_size);
blk_limits_io_opt(limits, chunk_size * sc->stripes);
@@ -419,7 +428,7 @@ static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2e227fbf1622..f90069029aae 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1319,6 +1319,9 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
if (!ti->num_flush_requests)
continue;
+ if (ti->flush_supported)
+ return 1;
+
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_flush_capable, &flush))
return 1;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 3e2907f0bc46..693e149e9727 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Red Hat, Inc.
+ * Copyright (C) 2011-2012 Red Hat, Inc.
*
* This file is released under the GPL.
*/
@@ -80,6 +80,12 @@
#define THIN_METADATA_CACHE_SIZE 64
#define SECTOR_TO_BLOCK_SHIFT 3
+/*
+ * 3 for btree insert +
+ * 2 for btree lookup used within space map
+ */
+#define THIN_MAX_CONCURRENT_LOCKS 5
+
/* This should be plenty */
#define SPACE_MAP_ROOT_SIZE 128
@@ -172,13 +178,20 @@ struct dm_pool_metadata {
struct rw_semaphore root_lock;
uint32_t time;
- int need_commit;
dm_block_t root;
dm_block_t details_root;
struct list_head thin_devices;
uint64_t trans_id;
unsigned long flags;
sector_t data_block_size;
+ bool read_only:1;
+
+ /*
+ * Set if a transaction has to be aborted but the attempt to roll back
+ * to the previous (good) transaction failed. The only pool metadata
+ * operation possible in this state is the closing of the device.
+ */
+ bool fail_io:1;
};
struct dm_thin_device {
@@ -187,7 +200,8 @@ struct dm_thin_device {
dm_thin_id id;
int open_count;
- int changed;
+ bool changed:1;
+ bool aborted_with_changes:1;
uint64_t mapped_blocks;
uint64_t transaction_id;
uint32_t creation_time;
@@ -338,7 +352,21 @@ static int subtree_equal(void *context, void *value1_le, void *value2_le)
/*----------------------------------------------------------------*/
-static int superblock_all_zeroes(struct dm_block_manager *bm, int *result)
+static int superblock_lock_zero(struct dm_pool_metadata *pmd,
+ struct dm_block **sblock)
+{
+ return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+static int superblock_lock(struct dm_pool_metadata *pmd,
+ struct dm_block **sblock)
+{
+ return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
{
int r;
unsigned i;
@@ -365,72 +393,9 @@ static int superblock_all_zeroes(struct dm_block_manager *bm, int *result)
return dm_bm_unlock(b);
}
-static int init_pmd(struct dm_pool_metadata *pmd,
- struct dm_block_manager *bm,
- dm_block_t nr_blocks, int create)
+static void __setup_btree_details(struct dm_pool_metadata *pmd)
{
- int r;
- struct dm_space_map *sm, *data_sm;
- struct dm_transaction_manager *tm;
- struct dm_block *sblock;
-
- if (create) {
- r = dm_tm_create_with_sm(bm, THIN_SUPERBLOCK_LOCATION,
- &sb_validator, &tm, &sm, &sblock);
- if (r < 0) {
- DMERR("tm_create_with_sm failed");
- return r;
- }
-
- data_sm = dm_sm_disk_create(tm, nr_blocks);
- if (IS_ERR(data_sm)) {
- DMERR("sm_disk_create failed");
- dm_tm_unlock(tm, sblock);
- r = PTR_ERR(data_sm);
- goto bad;
- }
- } else {
- struct thin_disk_superblock *disk_super = NULL;
- size_t space_map_root_offset =
- offsetof(struct thin_disk_superblock, metadata_space_map_root);
-
- r = dm_tm_open_with_sm(bm, THIN_SUPERBLOCK_LOCATION,
- &sb_validator, space_map_root_offset,
- SPACE_MAP_ROOT_SIZE, &tm, &sm, &sblock);
- if (r < 0) {
- DMERR("tm_open_with_sm failed");
- return r;
- }
-
- disk_super = dm_block_data(sblock);
- data_sm = dm_sm_disk_open(tm, disk_super->data_space_map_root,
- sizeof(disk_super->data_space_map_root));
- if (IS_ERR(data_sm)) {
- DMERR("sm_disk_open failed");
- r = PTR_ERR(data_sm);
- goto bad;
- }
- }
-
-
- r = dm_tm_unlock(tm, sblock);
- if (r < 0) {
- DMERR("couldn't unlock superblock");
- goto bad_data_sm;
- }
-
- pmd->bm = bm;
- pmd->metadata_sm = sm;
- pmd->data_sm = data_sm;
- pmd->tm = tm;
- pmd->nb_tm = dm_tm_create_non_blocking_clone(tm);
- if (!pmd->nb_tm) {
- DMERR("could not create clone tm");
- r = -ENOMEM;
- goto bad_data_sm;
- }
-
- pmd->info.tm = tm;
+ pmd->info.tm = pmd->tm;
pmd->info.levels = 2;
pmd->info.value_type.context = pmd->data_sm;
pmd->info.value_type.size = sizeof(__le64);
@@ -441,7 +406,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info));
pmd->nb_info.tm = pmd->nb_tm;
- pmd->tl_info.tm = tm;
+ pmd->tl_info.tm = pmd->tm;
pmd->tl_info.levels = 1;
pmd->tl_info.value_type.context = &pmd->info;
pmd->tl_info.value_type.size = sizeof(__le64);
@@ -449,7 +414,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
pmd->tl_info.value_type.dec = subtree_dec;
pmd->tl_info.value_type.equal = subtree_equal;
- pmd->bl_info.tm = tm;
+ pmd->bl_info.tm = pmd->tm;
pmd->bl_info.levels = 1;
pmd->bl_info.value_type.context = pmd->data_sm;
pmd->bl_info.value_type.size = sizeof(__le64);
@@ -457,48 +422,266 @@ static int init_pmd(struct dm_pool_metadata *pmd,
pmd->bl_info.value_type.dec = data_block_dec;
pmd->bl_info.value_type.equal = data_block_equal;
- pmd->details_info.tm = tm;
+ pmd->details_info.tm = pmd->tm;
pmd->details_info.levels = 1;
pmd->details_info.value_type.context = NULL;
pmd->details_info.value_type.size = sizeof(struct disk_device_details);
pmd->details_info.value_type.inc = NULL;
pmd->details_info.value_type.dec = NULL;
pmd->details_info.value_type.equal = NULL;
+}
- pmd->root = 0;
+static int __write_initial_superblock(struct dm_pool_metadata *pmd)
+{
+ int r;
+ struct dm_block *sblock;
+ size_t metadata_len, data_len;
+ struct thin_disk_superblock *disk_super;
+ sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
- init_rwsem(&pmd->root_lock);
- pmd->time = 0;
- pmd->need_commit = 0;
- pmd->details_root = 0;
- pmd->trans_id = 0;
- pmd->flags = 0;
- INIT_LIST_HEAD(&pmd->thin_devices);
+ if (bdev_size > THIN_METADATA_MAX_SECTORS)
+ bdev_size = THIN_METADATA_MAX_SECTORS;
+
+ r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
+ if (r < 0)
+ return r;
+
+ r = dm_sm_root_size(pmd->data_sm, &data_len);
+ if (r < 0)
+ return r;
+
+ r = dm_sm_commit(pmd->data_sm);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_pre_commit(pmd->tm);
+ if (r < 0)
+ return r;
+
+ r = superblock_lock_zero(pmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+ disk_super->flags = 0;
+ memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
+ disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
+ disk_super->version = cpu_to_le32(THIN_VERSION);
+ disk_super->time = 0;
+ disk_super->trans_id = 0;
+ disk_super->held_root = 0;
+
+ r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root,
+ metadata_len);
+ if (r < 0)
+ goto bad_locked;
+
+ r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root,
+ data_len);
+ if (r < 0)
+ goto bad_locked;
+
+ disk_super->data_mapping_root = cpu_to_le64(pmd->root);
+ disk_super->device_details_root = cpu_to_le64(pmd->details_root);
+ disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
+ disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
+
+ return dm_tm_commit(pmd->tm, sblock);
+
+bad_locked:
+ dm_bm_unlock(sblock);
+ return r;
+}
+
+static int __format_metadata(struct dm_pool_metadata *pmd)
+{
+ int r;
+
+ r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &pmd->tm, &pmd->metadata_sm);
+ if (r < 0) {
+ DMERR("tm_create_with_sm failed");
+ return r;
+ }
+
+ pmd->data_sm = dm_sm_disk_create(pmd->tm, 0);
+ if (IS_ERR(pmd->data_sm)) {
+ DMERR("sm_disk_create failed");
+ r = PTR_ERR(pmd->data_sm);
+ goto bad_cleanup_tm;
+ }
+
+ pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
+ if (!pmd->nb_tm) {
+ DMERR("could not create non-blocking clone tm");
+ r = -ENOMEM;
+ goto bad_cleanup_data_sm;
+ }
+
+ __setup_btree_details(pmd);
+
+ r = dm_btree_empty(&pmd->info, &pmd->root);
+ if (r < 0)
+ goto bad_cleanup_nb_tm;
+
+ r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
+ if (r < 0) {
+ DMERR("couldn't create devices root");
+ goto bad_cleanup_nb_tm;
+ }
+
+ r = __write_initial_superblock(pmd);
+ if (r)
+ goto bad_cleanup_nb_tm;
return 0;
-bad_data_sm:
- dm_sm_destroy(data_sm);
-bad:
- dm_tm_destroy(tm);
- dm_sm_destroy(sm);
+bad_cleanup_nb_tm:
+ dm_tm_destroy(pmd->nb_tm);
+bad_cleanup_data_sm:
+ dm_sm_destroy(pmd->data_sm);
+bad_cleanup_tm:
+ dm_tm_destroy(pmd->tm);
+ dm_sm_destroy(pmd->metadata_sm);
+
+ return r;
+}
+
+static int __check_incompat_features(struct thin_disk_superblock *disk_super,
+ struct dm_pool_metadata *pmd)
+{
+ uint32_t features;
+
+ features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
+ if (features) {
+ DMERR("could not access metadata due to unsupported optional features (%lx).",
+ (unsigned long)features);
+ return -EINVAL;
+ }
+
+ /*
+ * Check for read-only metadata to skip the following RDWR checks.
+ */
+ if (get_disk_ro(pmd->bdev->bd_disk))
+ return 0;
+
+ features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
+ if (features) {
+ DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
+ (unsigned long)features);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __open_metadata(struct dm_pool_metadata *pmd)
+{
+ int r;
+ struct dm_block *sblock;
+ struct thin_disk_superblock *disk_super;
+
+ r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, &sblock);
+ if (r < 0) {
+ DMERR("couldn't read superblock");
+ return r;
+ }
+
+ disk_super = dm_block_data(sblock);
+
+ r = __check_incompat_features(disk_super, pmd);
+ if (r < 0)
+ goto bad_unlock_sblock;
+
+ r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ disk_super->metadata_space_map_root,
+ sizeof(disk_super->metadata_space_map_root),
+ &pmd->tm, &pmd->metadata_sm);
+ if (r < 0) {
+ DMERR("tm_open_with_sm failed");
+ goto bad_unlock_sblock;
+ }
+
+ pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root,
+ sizeof(disk_super->data_space_map_root));
+ if (IS_ERR(pmd->data_sm)) {
+ DMERR("sm_disk_open failed");
+ r = PTR_ERR(pmd->data_sm);
+ goto bad_cleanup_tm;
+ }
+
+ pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm);
+ if (!pmd->nb_tm) {
+ DMERR("could not create non-blocking clone tm");
+ r = -ENOMEM;
+ goto bad_cleanup_data_sm;
+ }
+
+ __setup_btree_details(pmd);
+ return dm_bm_unlock(sblock);
+
+bad_cleanup_data_sm:
+ dm_sm_destroy(pmd->data_sm);
+bad_cleanup_tm:
+ dm_tm_destroy(pmd->tm);
+ dm_sm_destroy(pmd->metadata_sm);
+bad_unlock_sblock:
+ dm_bm_unlock(sblock);
+
+ return r;
+}
+
+static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device)
+{
+ int r, unformatted;
+
+ r = __superblock_all_zeroes(pmd->bm, &unformatted);
+ if (r)
+ return r;
+
+ if (unformatted)
+ return format_device ? __format_metadata(pmd) : -EPERM;
+
+ return __open_metadata(pmd);
+}
+
+static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device)
+{
+ int r;
+
+ pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE,
+ THIN_METADATA_CACHE_SIZE,
+ THIN_MAX_CONCURRENT_LOCKS);
+ if (IS_ERR(pmd->bm)) {
+ DMERR("could not create block manager");
+ return PTR_ERR(pmd->bm);
+ }
+
+ r = __open_or_format_metadata(pmd, format_device);
+ if (r)
+ dm_block_manager_destroy(pmd->bm);
return r;
}
+static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd)
+{
+ dm_sm_destroy(pmd->data_sm);
+ dm_sm_destroy(pmd->metadata_sm);
+ dm_tm_destroy(pmd->nb_tm);
+ dm_tm_destroy(pmd->tm);
+ dm_block_manager_destroy(pmd->bm);
+}
+
static int __begin_transaction(struct dm_pool_metadata *pmd)
{
int r;
- u32 features;
struct thin_disk_superblock *disk_super;
struct dm_block *sblock;
/*
- * __maybe_commit_transaction() resets these
- */
- WARN_ON(pmd->need_commit);
-
- /*
* We re-read the superblock every time. Shouldn't need to do this
* really.
*/
@@ -515,32 +698,8 @@ static int __begin_transaction(struct dm_pool_metadata *pmd)
pmd->flags = le32_to_cpu(disk_super->flags);
pmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
- features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
- if (features) {
- DMERR("could not access metadata due to "
- "unsupported optional features (%lx).",
- (unsigned long)features);
- r = -EINVAL;
- goto out;
- }
-
- /*
- * Check for read-only metadata to skip the following RDWR checks.
- */
- if (get_disk_ro(pmd->bdev->bd_disk))
- goto out;
-
- features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
- if (features) {
- DMERR("could not access metadata RDWR due to "
- "unsupported optional features (%lx).",
- (unsigned long)features);
- r = -EINVAL;
- }
-
-out:
dm_bm_unlock(sblock);
- return r;
+ return 0;
}
static int __write_changed_details(struct dm_pool_metadata *pmd)
@@ -573,8 +732,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
list_del(&td->list);
kfree(td);
}
-
- pmd->need_commit = 1;
}
return 0;
@@ -582,9 +739,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
static int __commit_transaction(struct dm_pool_metadata *pmd)
{
- /*
- * FIXME: Associated pool should be made read-only on failure.
- */
int r;
size_t metadata_len, data_len;
struct thin_disk_superblock *disk_super;
@@ -597,31 +751,27 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
r = __write_changed_details(pmd);
if (r < 0)
- goto out;
-
- if (!pmd->need_commit)
- goto out;
+ return r;
r = dm_sm_commit(pmd->data_sm);
if (r < 0)
- goto out;
+ return r;
r = dm_tm_pre_commit(pmd->tm);
if (r < 0)
- goto out;
+ return r;
r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
if (r < 0)
- goto out;
+ return r;
r = dm_sm_root_size(pmd->data_sm, &data_len);
if (r < 0)
- goto out;
+ return r;
- r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
- &sb_validator, &sblock);
+ r = superblock_lock(pmd, &sblock);
if (r)
- goto out;
+ return r;
disk_super = dm_block_data(sblock);
disk_super->time = cpu_to_le32(pmd->time);
@@ -640,12 +790,7 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
if (r < 0)
goto out_locked;
- r = dm_tm_commit(pmd->tm, sblock);
- if (!r)
- pmd->need_commit = 0;
-
-out:
- return r;
+ return dm_tm_commit(pmd->tm, sblock);
out_locked:
dm_bm_unlock(sblock);
@@ -653,15 +798,11 @@ out_locked:
}
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
- sector_t data_block_size)
+ sector_t data_block_size,
+ bool format_device)
{
int r;
- struct thin_disk_superblock *disk_super;
struct dm_pool_metadata *pmd;
- sector_t bdev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
- struct dm_block_manager *bm;
- int create;
- struct dm_block *sblock;
pmd = kmalloc(sizeof(*pmd), GFP_KERNEL);
if (!pmd) {
@@ -669,90 +810,28 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
return ERR_PTR(-ENOMEM);
}
- /*
- * Max hex locks:
- * 3 for btree insert +
- * 2 for btree lookup used within space map
- */
- bm = dm_block_manager_create(bdev, THIN_METADATA_BLOCK_SIZE,
- THIN_METADATA_CACHE_SIZE, 5);
- if (!bm) {
- DMERR("could not create block manager");
- kfree(pmd);
- return ERR_PTR(-ENOMEM);
- }
-
- r = superblock_all_zeroes(bm, &create);
- if (r) {
- dm_block_manager_destroy(bm);
- kfree(pmd);
- return ERR_PTR(r);
- }
-
+ init_rwsem(&pmd->root_lock);
+ pmd->time = 0;
+ INIT_LIST_HEAD(&pmd->thin_devices);
+ pmd->read_only = false;
+ pmd->fail_io = false;
+ pmd->bdev = bdev;
+ pmd->data_block_size = data_block_size;
- r = init_pmd(pmd, bm, 0, create);
+ r = __create_persistent_data_objects(pmd, format_device);
if (r) {
- dm_block_manager_destroy(bm);
kfree(pmd);
return ERR_PTR(r);
}
- pmd->bdev = bdev;
-
- if (!create) {
- r = __begin_transaction(pmd);
- if (r < 0)
- goto bad;
- return pmd;
- }
-
- /*
- * Create.
- */
- r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
- &sb_validator, &sblock);
- if (r)
- goto bad;
-
- if (bdev_size > THIN_METADATA_MAX_SECTORS)
- bdev_size = THIN_METADATA_MAX_SECTORS;
-
- disk_super = dm_block_data(sblock);
- disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
- disk_super->version = cpu_to_le32(THIN_VERSION);
- disk_super->time = 0;
- disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
- disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
- disk_super->data_block_size = cpu_to_le32(data_block_size);
-
- r = dm_bm_unlock(sblock);
- if (r < 0)
- goto bad;
-
- r = dm_btree_empty(&pmd->info, &pmd->root);
- if (r < 0)
- goto bad;
-
- r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
- if (r < 0) {
- DMERR("couldn't create devices root");
- goto bad;
- }
- pmd->flags = 0;
- pmd->need_commit = 1;
- r = dm_pool_commit_metadata(pmd);
+ r = __begin_transaction(pmd);
if (r < 0) {
- DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
- __func__, r);
- goto bad;
+ if (dm_pool_metadata_close(pmd) < 0)
+ DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
+ return ERR_PTR(r);
}
return pmd;
-
-bad:
- if (dm_pool_metadata_close(pmd) < 0)
- DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
- return ERR_PTR(r);
}
int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
@@ -778,18 +857,17 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
return -EBUSY;
}
- r = __commit_transaction(pmd);
- if (r < 0)
- DMWARN("%s: __commit_transaction() failed, error = %d",
- __func__, r);
+ if (!pmd->read_only && !pmd->fail_io) {
+ r = __commit_transaction(pmd);
+ if (r < 0)
+ DMWARN("%s: __commit_transaction() failed, error = %d",
+ __func__, r);
+ }
- dm_tm_destroy(pmd->tm);
- dm_tm_destroy(pmd->nb_tm);
- dm_block_manager_destroy(pmd->bm);
- dm_sm_destroy(pmd->metadata_sm);
- dm_sm_destroy(pmd->data_sm);
- kfree(pmd);
+ if (!pmd->fail_io)
+ __destroy_persistent_data_objects(pmd);
+ kfree(pmd);
return 0;
}
@@ -850,6 +928,7 @@ static int __open_device(struct dm_pool_metadata *pmd,
(*td)->id = dev;
(*td)->open_count = 1;
(*td)->changed = changed;
+ (*td)->aborted_with_changes = false;
(*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks);
(*td)->transaction_id = le64_to_cpu(details_le.transaction_id);
(*td)->creation_time = le32_to_cpu(details_le.creation_time);
@@ -911,10 +990,11 @@ static int __create_thin(struct dm_pool_metadata *pmd,
int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
{
- int r;
+ int r = -EINVAL;
down_write(&pmd->root_lock);
- r = __create_thin(pmd, dev);
+ if (!pmd->fail_io)
+ r = __create_thin(pmd, dev);
up_write(&pmd->root_lock);
return r;
@@ -1001,10 +1081,11 @@ int dm_pool_create_snap(struct dm_pool_metadata *pmd,
dm_thin_id dev,
dm_thin_id origin)
{
- int r;
+ int r = -EINVAL;
down_write(&pmd->root_lock);
- r = __create_snap(pmd, dev, origin);
+ if (!pmd->fail_io)
+ r = __create_snap(pmd, dev, origin);
up_write(&pmd->root_lock);
return r;
@@ -1037,18 +1118,17 @@ static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev)
if (r)
return r;
- pmd->need_commit = 1;
-
return 0;
}
int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
dm_thin_id dev)
{
- int r;
+ int r = -EINVAL;
down_write(&pmd->root_lock);
- r = __delete_device(pmd, dev);
+ if (!pmd->fail_io)
+ r = __delete_device(pmd, dev);
up_write(&pmd->root_lock);
return r;
@@ -1058,28 +1138,40 @@ int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
uint64_t current_id,
uint64_t new_id)
{
+ int r = -EINVAL;
+
down_write(&pmd->root_lock);
+
+ if (pmd->fail_io)
+ goto out;
+
if (pmd->trans_id != current_id) {
- up_write(&pmd->root_lock);
DMERR("mismatched transaction id");
- return -EINVAL;
+ goto out;
}
pmd->trans_id = new_id;
- pmd->need_commit = 1;
+ r = 0;
+
+out:
up_write(&pmd->root_lock);
- return 0;
+ return r;
}
int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
uint64_t *result)
{
+ int r = -EINVAL;
+
down_read(&pmd->root_lock);
- *result = pmd->trans_id;
+ if (!pmd->fail_io) {
+ *result = pmd->trans_id;
+ r = 0;
+ }
up_read(&pmd->root_lock);
- return 0;
+ return r;
}
static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
@@ -1108,8 +1200,6 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
dm_tm_dec(pmd->tm, held_root);
dm_tm_unlock(pmd->tm, copy);
- pmd->need_commit = 1;
-
return -EBUSY;
}
@@ -1131,29 +1221,25 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
/*
* Write the held root into the superblock.
*/
- r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
- &sb_validator, &sblock);
+ r = superblock_lock(pmd, &sblock);
if (r) {
dm_tm_dec(pmd->tm, held_root);
- pmd->need_commit = 1;
return r;
}
disk_super = dm_block_data(sblock);
disk_super->held_root = cpu_to_le64(held_root);
dm_bm_unlock(sblock);
-
- pmd->need_commit = 1;
-
return 0;
}
int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
{
- int r;
+ int r = -EINVAL;
down_write(&pmd->root_lock);
- r = __reserve_metadata_snap(pmd);
+ if (!pmd->fail_io)
+ r = __reserve_metadata_snap(pmd);
up_write(&pmd->root_lock);
return r;
@@ -1166,15 +1252,13 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
struct dm_block *sblock, *copy;
dm_block_t held_root;
- r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
- &sb_validator, &sblock);
+ r = superblock_lock(pmd, &sblock);
if (r)
return r;
disk_super = dm_block_data(sblock);
held_root = le64_to_cpu(disk_super->held_root);
disk_super->held_root = cpu_to_le64(0);
- pmd->need_commit = 1;
dm_bm_unlock(sblock);
@@ -1197,10 +1281,11 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
{
- int r;
+ int r = -EINVAL;
down_write(&pmd->root_lock);
- r = __release_metadata_snap(pmd);
+ if (!pmd->fail_io)
+ r = __release_metadata_snap(pmd);
up_write(&pmd->root_lock);
return r;
@@ -1227,10 +1312,11 @@ static int __get_metadata_snap(struct dm_pool_metadata *pmd,
int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
dm_block_t *result)
{
- int r;
+ int r = -EINVAL;
down_read(&pmd->root_lock);
- r = __get_metadata_snap(pmd, result);
+ if (!pmd->fail_io)
+ r = __get_metadata_snap(pmd, result);
up_read(&pmd->root_lock);
return r;
@@ -1239,10 +1325,11 @@ int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
struct dm_thin_device **td)
{
- int r;
+ int r = -EINVAL;
down_write(&pmd->root_lock);
- r = __open_device(pmd, dev, 0, td);
+ if (!pmd->fail_io)
+ r = __open_device(pmd, dev, 0, td);
up_write(&pmd->root_lock);
return r;
@@ -1262,7 +1349,7 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
return td->id;
}
-static int __snapshotted_since(struct dm_thin_device *td, uint32_t time)
+static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
{
return td->snapshotted_time > time;
}
@@ -1270,28 +1357,31 @@ static int __snapshotted_since(struct dm_thin_device *td, uint32_t time)
int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
int can_block, struct dm_thin_lookup_result *result)
{
- int r;
+ int r = -EINVAL;
uint64_t block_time = 0;
__le64 value;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[2] = { td->id, block };
+ struct dm_btree_info *info;
if (can_block) {
down_read(&pmd->root_lock);
- r = dm_btree_lookup(&pmd->info, pmd->root, keys, &value);
- if (!r)
- block_time = le64_to_cpu(value);
- up_read(&pmd->root_lock);
-
- } else if (down_read_trylock(&pmd->root_lock)) {
- r = dm_btree_lookup(&pmd->nb_info, pmd->root, keys, &value);
- if (!r)
- block_time = le64_to_cpu(value);
- up_read(&pmd->root_lock);
-
- } else
+ info = &pmd->info;
+ } else if (down_read_trylock(&pmd->root_lock))
+ info = &pmd->nb_info;
+ else
return -EWOULDBLOCK;
+ if (pmd->fail_io)
+ goto out;
+
+ r = dm_btree_lookup(info, pmd->root, keys, &value);
+ if (!r)
+ block_time = le64_to_cpu(value);
+
+out:
+ up_read(&pmd->root_lock);
+
if (!r) {
dm_block_t exception_block;
uint32_t exception_time;
@@ -1312,7 +1402,6 @@ static int __insert(struct dm_thin_device *td, dm_block_t block,
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[2] = { td->id, block };
- pmd->need_commit = 1;
value = cpu_to_le64(pack_block_time(data_block, pmd->time));
__dm_bless_for_disk(&value);
@@ -1321,10 +1410,9 @@ static int __insert(struct dm_thin_device *td, dm_block_t block,
if (r)
return r;
- if (inserted) {
+ td->changed = 1;
+ if (inserted)
td->mapped_blocks++;
- td->changed = 1;
- }
return 0;
}
@@ -1332,10 +1420,11 @@ static int __insert(struct dm_thin_device *td, dm_block_t block,
int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
dm_block_t data_block)
{
- int r;
+ int r = -EINVAL;
down_write(&td->pmd->root_lock);
- r = __insert(td, block, data_block);
+ if (!td->pmd->fail_io)
+ r = __insert(td, block, data_block);
up_write(&td->pmd->root_lock);
return r;
@@ -1353,31 +1442,51 @@ static int __remove(struct dm_thin_device *td, dm_block_t block)
td->mapped_blocks--;
td->changed = 1;
- pmd->need_commit = 1;
return 0;
}
int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
{
- int r;
+ int r = -EINVAL;
down_write(&td->pmd->root_lock);
- r = __remove(td, block);
+ if (!td->pmd->fail_io)
+ r = __remove(td, block);
up_write(&td->pmd->root_lock);
return r;
}
-int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
+bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
{
int r;
- down_write(&pmd->root_lock);
+ down_read(&td->pmd->root_lock);
+ r = td->changed;
+ up_read(&td->pmd->root_lock);
- r = dm_sm_new_block(pmd->data_sm, result);
- pmd->need_commit = 1;
+ return r;
+}
+
+bool dm_thin_aborted_changes(struct dm_thin_device *td)
+{
+ bool r;
+ down_read(&td->pmd->root_lock);
+ r = td->aborted_with_changes;
+ up_read(&td->pmd->root_lock);
+
+ return r;
+}
+
+int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
+{
+ int r = -EINVAL;
+
+ down_write(&pmd->root_lock);
+ if (!pmd->fail_io)
+ r = dm_sm_new_block(pmd->data_sm, result);
up_write(&pmd->root_lock);
return r;
@@ -1385,9 +1494,11 @@ int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
{
- int r;
+ int r = -EINVAL;
down_write(&pmd->root_lock);
+ if (pmd->fail_io)
+ goto out;
r = __commit_transaction(pmd);
if (r <= 0)
@@ -1402,12 +1513,41 @@ out:
return r;
}
+static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
+{
+ struct dm_thin_device *td;
+
+ list_for_each_entry(td, &pmd->thin_devices, list)
+ td->aborted_with_changes = td->changed;
+}
+
+int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
+{
+ int r = -EINVAL;
+
+ down_write(&pmd->root_lock);
+ if (pmd->fail_io)
+ goto out;
+
+ __set_abort_with_changes_flags(pmd);
+ __destroy_persistent_data_objects(pmd);
+ r = __create_persistent_data_objects(pmd, false);
+ if (r)
+ pmd->fail_io = true;
+
+out:
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result)
{
- int r;
+ int r = -EINVAL;
down_read(&pmd->root_lock);
- r = dm_sm_get_nr_free(pmd->data_sm, result);
+ if (!pmd->fail_io)
+ r = dm_sm_get_nr_free(pmd->data_sm, result);
up_read(&pmd->root_lock);
return r;
@@ -1416,10 +1556,11 @@ int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *resul
int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
dm_block_t *result)
{
- int r;
+ int r = -EINVAL;
down_read(&pmd->root_lock);
- r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+ if (!pmd->fail_io)
+ r = dm_sm_get_nr_free(pmd->metadata_sm, result);
up_read(&pmd->root_lock);
return r;
@@ -1428,10 +1569,11 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
dm_block_t *result)
{
- int r;
+ int r = -EINVAL;
down_read(&pmd->root_lock);
- r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
+ if (!pmd->fail_io)
+ r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
up_read(&pmd->root_lock);
return r;
@@ -1448,10 +1590,11 @@ int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result)
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
{
- int r;
+ int r = -EINVAL;
down_read(&pmd->root_lock);
- r = dm_sm_get_nr_blocks(pmd->data_sm, result);
+ if (!pmd->fail_io)
+ r = dm_sm_get_nr_blocks(pmd->data_sm, result);
up_read(&pmd->root_lock);
return r;
@@ -1459,13 +1602,17 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result)
{
+ int r = -EINVAL;
struct dm_pool_metadata *pmd = td->pmd;
down_read(&pmd->root_lock);
- *result = td->mapped_blocks;
+ if (!pmd->fail_io) {
+ *result = td->mapped_blocks;
+ r = 0;
+ }
up_read(&pmd->root_lock);
- return 0;
+ return r;
}
static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
@@ -1487,11 +1634,12 @@ static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
dm_block_t *result)
{
- int r;
+ int r = -EINVAL;
struct dm_pool_metadata *pmd = td->pmd;
down_read(&pmd->root_lock);
- r = __highest_block(td, result);
+ if (!pmd->fail_io)
+ r = __highest_block(td, result);
up_read(&pmd->root_lock);
return r;
@@ -1514,20 +1662,25 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
return -EINVAL;
}
- r = dm_sm_extend(pmd->data_sm, new_count - old_count);
- if (!r)
- pmd->need_commit = 1;
-
- return r;
+ return dm_sm_extend(pmd->data_sm, new_count - old_count);
}
int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
{
- int r;
+ int r = -EINVAL;
down_write(&pmd->root_lock);
- r = __resize_data_dev(pmd, new_count);
+ if (!pmd->fail_io)
+ r = __resize_data_dev(pmd, new_count);
up_write(&pmd->root_lock);
return r;
}
+
+void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
+{
+ down_write(&pmd->root_lock);
+ pmd->read_only = true;
+ dm_bm_set_read_only(pmd->bm);
+ up_write(&pmd->root_lock);
+}
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index b88918ccdaf6..0cecc3702885 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -38,7 +38,8 @@ typedef uint64_t dm_thin_id;
* Reopens or creates a new, empty metadata volume.
*/
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
- sector_t data_block_size);
+ sector_t data_block_size,
+ bool format_device);
int dm_pool_metadata_close(struct dm_pool_metadata *pmd);
@@ -79,6 +80,16 @@ int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
int dm_pool_commit_metadata(struct dm_pool_metadata *pmd);
/*
+ * Discards all uncommitted changes. Rereads the superblock, rolling back
+ * to the last good transaction. Thin devices remain open.
+ * dm_thin_aborted_changes() tells you if they had uncommitted changes.
+ *
+ * If this call fails it's only useful to call dm_pool_metadata_close().
+ * All other methods will fail with -EINVAL.
+ */
+int dm_pool_abort_metadata(struct dm_pool_metadata *pmd);
+
+/*
* Set/get userspace transaction id.
*/
int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
@@ -119,7 +130,7 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td);
struct dm_thin_lookup_result {
dm_block_t block;
- int shared;
+ unsigned shared:1;
};
/*
@@ -147,6 +158,10 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
/*
* Queries.
*/
+bool dm_thin_changed_this_transaction(struct dm_thin_device *td);
+
+bool dm_thin_aborted_changes(struct dm_thin_device *td);
+
int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
dm_block_t *highest_mapped);
@@ -171,6 +186,12 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
*/
int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
+/*
+ * Flicks the underlying block manager into read only mode, so you know
+ * that nothing is changing.
+ */
+void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
+
/*----------------------------------------------------------------*/
#endif
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 68694da0d21d..af1fc3b2c2ad 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1,10 +1,11 @@
/*
- * Copyright (C) 2011 Red Hat UK.
+ * Copyright (C) 2011-2012 Red Hat UK.
*
* This file is released under the GPL.
*/
#include "dm-thin-metadata.h"
+#include "dm.h"
#include <linux/device-mapper.h>
#include <linux/dm-io.h>
@@ -19,7 +20,7 @@
/*
* Tunable constants
*/
-#define ENDIO_HOOK_POOL_SIZE 10240
+#define ENDIO_HOOK_POOL_SIZE 1024
#define DEFERRED_SET_SIZE 64
#define MAPPING_POOL_SIZE 1024
#define PRISON_CELLS 1024
@@ -496,12 +497,27 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
*/
struct dm_thin_new_mapping;
+/*
+ * The pool runs in 3 modes. Ordered in degraded order for comparisons.
+ */
+enum pool_mode {
+ PM_WRITE, /* metadata may be changed */
+ PM_READ_ONLY, /* metadata may not be changed */
+ PM_FAIL, /* all I/O fails */
+};
+
struct pool_features {
+ enum pool_mode mode;
+
unsigned zero_new_blocks:1;
unsigned discard_enabled:1;
unsigned discard_passdown:1;
};
+struct thin_c;
+typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
+typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
+
struct pool {
struct list_head list;
struct dm_target *ti; /* Only set if a pool target is bound */
@@ -510,10 +526,9 @@ struct pool {
struct block_device *md_dev;
struct dm_pool_metadata *pmd;
- uint32_t sectors_per_block;
- unsigned block_shift;
- dm_block_t offset_mask;
dm_block_t low_water_blocks;
+ uint32_t sectors_per_block;
+ int sectors_per_block_shift;
struct pool_features pf;
unsigned low_water_triggered:1; /* A dm event has been sent */
@@ -526,8 +541,8 @@ struct pool {
struct work_struct worker;
struct delayed_work waker;
- unsigned ref_count;
unsigned long last_commit_jiffies;
+ unsigned ref_count;
spinlock_t lock;
struct bio_list deferred_bios;
@@ -543,8 +558,17 @@ struct pool {
struct dm_thin_new_mapping *next_mapping;
mempool_t *mapping_pool;
mempool_t *endio_hook_pool;
+
+ process_bio_fn process_bio;
+ process_bio_fn process_discard;
+
+ process_mapping_fn process_prepared_mapping;
+ process_mapping_fn process_prepared_discard;
};
+static enum pool_mode get_pool_mode(struct pool *pool);
+static void set_pool_mode(struct pool *pool, enum pool_mode mode);
+
/*
* Target context for a pool.
*/
@@ -679,16 +703,28 @@ static void requeue_io(struct thin_c *tc)
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
- return bio->bi_sector >> tc->pool->block_shift;
+ sector_t block_nr = bio->bi_sector;
+
+ if (tc->pool->sectors_per_block_shift < 0)
+ (void) sector_div(block_nr, tc->pool->sectors_per_block);
+ else
+ block_nr >>= tc->pool->sectors_per_block_shift;
+
+ return block_nr;
}
static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
{
struct pool *pool = tc->pool;
+ sector_t bi_sector = bio->bi_sector;
bio->bi_bdev = tc->pool_dev->bdev;
- bio->bi_sector = (block << pool->block_shift) +
- (bio->bi_sector & pool->offset_mask);
+ if (tc->pool->sectors_per_block_shift < 0)
+ bio->bi_sector = (block * pool->sectors_per_block) +
+ sector_div(bi_sector, pool->sectors_per_block);
+ else
+ bio->bi_sector = (block << pool->sectors_per_block_shift) |
+ (bi_sector & (pool->sectors_per_block - 1));
}
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
@@ -696,21 +732,39 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
bio->bi_bdev = tc->origin_dev->bdev;
}
+static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
+{
+ return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+ dm_thin_changed_this_transaction(tc->td);
+}
+
static void issue(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
unsigned long flags;
+ if (!bio_triggers_commit(tc, bio)) {
+ generic_make_request(bio);
+ return;
+ }
+
/*
- * Batch together any FUA/FLUSH bios we find and then issue
- * a single commit for them in process_deferred_bios().
+ * Complete bio with an error if earlier I/O caused changes to
+ * the metadata that can't be committed e.g, due to I/O errors
+ * on the metadata device.
*/
- if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
- spin_lock_irqsave(&pool->lock, flags);
- bio_list_add(&pool->deferred_flush_bios, bio);
- spin_unlock_irqrestore(&pool->lock, flags);
- } else
- generic_make_request(bio);
+ if (dm_thin_aborted_changes(tc->td)) {
+ bio_io_error(bio);
+ return;
+ }
+
+ /*
+ * Batch together any bios that trigger commits and then issue a
+ * single commit for them in process_deferred_bios().
+ */
+ spin_lock_irqsave(&pool->lock, flags);
+ bio_list_add(&pool->deferred_flush_bios, bio);
+ spin_unlock_irqrestore(&pool->lock, flags);
}
static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
@@ -847,6 +901,14 @@ static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell
wake_worker(pool);
}
+static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
+{
+ if (m->bio)
+ m->bio->bi_end_io = m->saved_bi_end_io;
+ cell_error(m->cell);
+ list_del(&m->list);
+ mempool_free(m, m->tc->pool->mapping_pool);
+}
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
@@ -859,7 +921,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
if (m->err) {
cell_error(m->cell);
- return;
+ goto out;
}
/*
@@ -871,7 +933,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
if (r) {
DMERR("dm_thin_insert_block() failed");
cell_error(m->cell);
- return;
+ goto out;
}
/*
@@ -886,22 +948,25 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
} else
cell_defer(tc, m->cell, m->data_block);
+out:
list_del(&m->list);
mempool_free(m, tc->pool->mapping_pool);
}
-static void process_prepared_discard(struct dm_thin_new_mapping *m)
+static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
{
- int r;
struct thin_c *tc = m->tc;
- r = dm_thin_remove_block(tc->td, m->virt_block);
- if (r)
- DMERR("dm_thin_remove_block() failed");
+ bio_io_error(m->bio);
+ cell_defer_except(tc, m->cell);
+ cell_defer_except(tc, m->cell2);
+ mempool_free(m, tc->pool->mapping_pool);
+}
+
+static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
+{
+ struct thin_c *tc = m->tc;
- /*
- * Pass the discard down to the underlying device?
- */
if (m->pass_discard)
remap_and_issue(tc, m->bio, m->data_block);
else
@@ -912,8 +977,20 @@ static void process_prepared_discard(struct dm_thin_new_mapping *m)
mempool_free(m, tc->pool->mapping_pool);
}
+static void process_prepared_discard(struct dm_thin_new_mapping *m)
+{
+ int r;
+ struct thin_c *tc = m->tc;
+
+ r = dm_thin_remove_block(tc->td, m->virt_block);
+ if (r)
+ DMERR("dm_thin_remove_block() failed");
+
+ process_prepared_discard_passdown(m);
+}
+
static void process_prepared(struct pool *pool, struct list_head *head,
- void (*fn)(struct dm_thin_new_mapping *))
+ process_mapping_fn *fn)
{
unsigned long flags;
struct list_head maps;
@@ -925,7 +1002,7 @@ static void process_prepared(struct pool *pool, struct list_head *head,
spin_unlock_irqrestore(&pool->lock, flags);
list_for_each_entry_safe(m, tmp, &maps, list)
- fn(m);
+ (*fn)(m);
}
/*
@@ -933,9 +1010,7 @@ static void process_prepared(struct pool *pool, struct list_head *head,
*/
static int io_overlaps_block(struct pool *pool, struct bio *bio)
{
- return !(bio->bi_sector & pool->offset_mask) &&
- (bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT));
-
+ return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
}
static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -1093,6 +1168,35 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
}
}
+static int commit(struct pool *pool)
+{
+ int r;
+
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r)
+ DMERR("commit failed, error = %d", r);
+
+ return r;
+}
+
+/*
+ * A non-zero return indicates read_only or fail_io mode.
+ * Many callers don't care about the return value.
+ */
+static int commit_or_fallback(struct pool *pool)
+{
+ int r;
+
+ if (get_pool_mode(pool) != PM_WRITE)
+ return -EINVAL;
+
+ r = commit(pool);
+ if (r)
+ set_pool_mode(pool, PM_READ_ONLY);
+
+ return r;
+}
+
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
int r;
@@ -1121,12 +1225,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
* Try to commit to see if that will free up some
* more space.
*/
- r = dm_pool_commit_metadata(pool->pmd);
- if (r) {
- DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
- __func__, r);
- return r;
- }
+ (void) commit_or_fallback(pool);
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
if (r)
@@ -1218,7 +1317,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
*/
m = get_next_mapping(pool);
m->tc = tc;
- m->pass_discard = (!lookup_result.shared) & pool->pf.discard_passdown;
+ m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
m->virt_block = block;
m->data_block = lookup_result.block;
m->cell = cell;
@@ -1234,15 +1333,10 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
}
} else {
/*
- * This path is hit if people are ignoring
- * limits->discard_granularity. It ignores any
- * part of the discard that is in a subsequent
- * block.
+ * The DM core makes sure that the discard doesn't span
+ * a block boundary. So we submit the discard of a
+ * partial block appropriately.
*/
- sector_t offset = bio->bi_sector - (block << pool->block_shift);
- unsigned remaining = (pool->sectors_per_block - offset) << 9;
- bio->bi_size = min(bio->bi_size, remaining);
-
cell_release_singleton(cell, bio);
cell_release_singleton(cell2, bio);
if ((!lookup_result.shared) && pool->pf.discard_passdown)
@@ -1310,7 +1404,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
if (bio_detain(pool->prison, &key, bio, &cell))
return;
- if (bio_data_dir(bio) == WRITE)
+ if (bio_data_dir(bio) == WRITE && bio->bi_size)
break_sharing(tc, bio, block, &key, lookup_result, cell);
else {
struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
@@ -1362,6 +1456,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
default:
DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
+ set_pool_mode(tc->pool, PM_READ_ONLY);
cell_error(cell);
break;
}
@@ -1419,6 +1514,49 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
}
}
+static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
+{
+ int r;
+ int rw = bio_data_dir(bio);
+ dm_block_t block = get_bio_block(tc, bio);
+ struct dm_thin_lookup_result lookup_result;
+
+ r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
+ switch (r) {
+ case 0:
+ if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
+ bio_io_error(bio);
+ else
+ remap_and_issue(tc, bio, lookup_result.block);
+ break;
+
+ case -ENODATA:
+ if (rw != READ) {
+ bio_io_error(bio);
+ break;
+ }
+
+ if (tc->origin_dev) {
+ remap_to_origin_and_issue(tc, bio);
+ break;
+ }
+
+ zero_fill_bio(bio);
+ bio_endio(bio, 0);
+ break;
+
+ default:
+ DMERR("dm_thin_find_block() failed, error = %d", r);
+ bio_io_error(bio);
+ break;
+ }
+}
+
+static void process_bio_fail(struct thin_c *tc, struct bio *bio)
+{
+ bio_io_error(bio);
+}
+
static int need_commit_due_to_time(struct pool *pool)
{
return jiffies < pool->last_commit_jiffies ||
@@ -1430,7 +1568,6 @@ static void process_deferred_bios(struct pool *pool)
unsigned long flags;
struct bio *bio;
struct bio_list bios;
- int r;
bio_list_init(&bios);
@@ -1457,9 +1594,9 @@ static void process_deferred_bios(struct pool *pool)
}
if (bio->bi_rw & REQ_DISCARD)
- process_discard(tc, bio);
+ pool->process_discard(tc, bio);
else
- process_bio(tc, bio);
+ pool->process_bio(tc, bio);
}
/*
@@ -1475,10 +1612,7 @@ static void process_deferred_bios(struct pool *pool)
if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
return;
- r = dm_pool_commit_metadata(pool->pmd);
- if (r) {
- DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
- __func__, r);
+ if (commit_or_fallback(pool)) {
while ((bio = bio_list_pop(&bios)))
bio_io_error(bio);
return;
@@ -1493,8 +1627,8 @@ static void do_worker(struct work_struct *ws)
{
struct pool *pool = container_of(ws, struct pool, worker);
- process_prepared(pool, &pool->prepared_mappings, process_prepared_mapping);
- process_prepared(pool, &pool->prepared_discards, process_prepared_discard);
+ process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
+ process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
process_deferred_bios(pool);
}
@@ -1511,6 +1645,52 @@ static void do_waker(struct work_struct *ws)
/*----------------------------------------------------------------*/
+static enum pool_mode get_pool_mode(struct pool *pool)
+{
+ return pool->pf.mode;
+}
+
+static void set_pool_mode(struct pool *pool, enum pool_mode mode)
+{
+ int r;
+
+ pool->pf.mode = mode;
+
+ switch (mode) {
+ case PM_FAIL:
+ DMERR("switching pool to failure mode");
+ pool->process_bio = process_bio_fail;
+ pool->process_discard = process_bio_fail;
+ pool->process_prepared_mapping = process_prepared_mapping_fail;
+ pool->process_prepared_discard = process_prepared_discard_fail;
+ break;
+
+ case PM_READ_ONLY:
+ DMERR("switching pool to read-only mode");
+ r = dm_pool_abort_metadata(pool->pmd);
+ if (r) {
+ DMERR("aborting transaction failed");
+ set_pool_mode(pool, PM_FAIL);
+ } else {
+ dm_pool_metadata_read_only(pool->pmd);
+ pool->process_bio = process_bio_read_only;
+ pool->process_discard = process_discard;
+ pool->process_prepared_mapping = process_prepared_mapping_fail;
+ pool->process_prepared_discard = process_prepared_discard_passdown;
+ }
+ break;
+
+ case PM_WRITE:
+ pool->process_bio = process_bio;
+ pool->process_discard = process_discard;
+ pool->process_prepared_mapping = process_prepared_mapping;
+ pool->process_prepared_discard = process_prepared_discard;
+ break;
+ }
+}
+
+/*----------------------------------------------------------------*/
+
/*
* Mapping functions.
*/
@@ -1556,6 +1736,12 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
struct dm_thin_lookup_result result;
map_context->ptr = thin_hook_bio(tc, bio);
+
+ if (get_pool_mode(tc->pool) == PM_FAIL) {
+ bio_io_error(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
thin_defer_bio(tc, bio);
return DM_MAPIO_SUBMITTED;
@@ -1592,14 +1778,35 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
break;
case -ENODATA:
+ if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
+ /*
+ * This block isn't provisioned, and we have no way
+ * of doing so. Just error it.
+ */
+ bio_io_error(bio);
+ r = DM_MAPIO_SUBMITTED;
+ break;
+ }
+ /* fall through */
+
+ case -EWOULDBLOCK:
/*
* In future, the failed dm_thin_find_block above could
* provide the hint to load the metadata into cache.
*/
- case -EWOULDBLOCK:
thin_defer_bio(tc, bio);
r = DM_MAPIO_SUBMITTED;
break;
+
+ default:
+ /*
+ * Must always call bio_io_error on failure.
+ * dm_thin_find_block can fail with -EINVAL if the
+ * pool is switched to fail-io mode.
+ */
+ bio_io_error(bio);
+ r = DM_MAPIO_SUBMITTED;
+ break;
}
return r;
@@ -1636,15 +1843,26 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
{
struct pool_c *pt = ti->private;
+ /*
+ * We want to make sure that degraded pools are never upgraded.
+ */
+ enum pool_mode old_mode = pool->pf.mode;
+ enum pool_mode new_mode = pt->pf.mode;
+
+ if (old_mode > new_mode)
+ new_mode = old_mode;
+
pool->ti = ti;
pool->low_water_blocks = pt->low_water_blocks;
pool->pf = pt->pf;
+ set_pool_mode(pool, new_mode);
/*
* If discard_passdown was enabled verify that the data device
* supports discards. Disable discard_passdown if not; otherwise
* -EOPNOTSUPP will be returned.
*/
+ /* FIXME: pull this out into a sep fn. */
if (pt->pf.discard_passdown) {
struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
if (!q || !blk_queue_discard(q)) {
@@ -1670,6 +1888,7 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti)
/* Initialize pool features. */
static void pool_features_init(struct pool_features *pf)
{
+ pf->mode = PM_WRITE;
pf->zero_new_blocks = 1;
pf->discard_enabled = 1;
pf->discard_passdown = 1;
@@ -1700,14 +1919,16 @@ static struct kmem_cache *_endio_hook_cache;
static struct pool *pool_create(struct mapped_device *pool_md,
struct block_device *metadata_dev,
- unsigned long block_size, char **error)
+ unsigned long block_size,
+ int read_only, char **error)
{
int r;
void *err_p;
struct pool *pool;
struct dm_pool_metadata *pmd;
+ bool format_device = read_only ? false : true;
- pmd = dm_pool_metadata_open(metadata_dev, block_size);
+ pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
if (IS_ERR(pmd)) {
*error = "Error creating metadata object";
return (struct pool *)pmd;
@@ -1722,8 +1943,10 @@ static struct pool *pool_create(struct mapped_device *pool_md,
pool->pmd = pmd;
pool->sectors_per_block = block_size;
- pool->block_shift = ffs(block_size) - 1;
- pool->offset_mask = block_size - 1;
+ if (block_size & (block_size - 1))
+ pool->sectors_per_block_shift = -1;
+ else
+ pool->sectors_per_block_shift = __ffs(block_size);
pool->low_water_blocks = 0;
pool_features_init(&pool->pf);
pool->prison = prison_create(PRISON_CELLS);
@@ -1822,25 +2045,29 @@ static void __pool_dec(struct pool *pool)
static struct pool *__pool_find(struct mapped_device *pool_md,
struct block_device *metadata_dev,
- unsigned long block_size, char **error,
- int *created)
+ unsigned long block_size, int read_only,
+ char **error, int *created)
{
struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
if (pool) {
- if (pool->pool_md != pool_md)
+ if (pool->pool_md != pool_md) {
+ *error = "metadata device already in use by a pool";
return ERR_PTR(-EBUSY);
+ }
__pool_inc(pool);
} else {
pool = __pool_table_lookup(pool_md);
if (pool) {
- if (pool->md_dev != metadata_dev)
+ if (pool->md_dev != metadata_dev) {
+ *error = "different pool cannot replace a pool";
return ERR_PTR(-EINVAL);
+ }
__pool_inc(pool);
} else {
- pool = pool_create(pool_md, metadata_dev, block_size, error);
+ pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
*created = 1;
}
}
@@ -1891,19 +2118,23 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
arg_name = dm_shift_arg(as);
argc--;
- if (!strcasecmp(arg_name, "skip_block_zeroing")) {
+ if (!strcasecmp(arg_name, "skip_block_zeroing"))
pf->zero_new_blocks = 0;
- continue;
- } else if (!strcasecmp(arg_name, "ignore_discard")) {
+
+ else if (!strcasecmp(arg_name, "ignore_discard"))
pf->discard_enabled = 0;
- continue;
- } else if (!strcasecmp(arg_name, "no_discard_passdown")) {
+
+ else if (!strcasecmp(arg_name, "no_discard_passdown"))
pf->discard_passdown = 0;
- continue;
- }
- ti->error = "Unrecognised pool feature requested";
- r = -EINVAL;
+ else if (!strcasecmp(arg_name, "read_only"))
+ pf->mode = PM_READ_ONLY;
+
+ else {
+ ti->error = "Unrecognised pool feature requested";
+ r = -EINVAL;
+ break;
+ }
}
return r;
@@ -1967,7 +2198,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
- !is_power_of_2(block_size)) {
+ block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
ti->error = "Invalid block size";
r = -EINVAL;
goto out;
@@ -1996,7 +2227,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
- block_size, &ti->error, &pool_created);
+ block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
if (IS_ERR(pool)) {
r = PTR_ERR(pool);
goto out_free_pt;
@@ -2014,6 +2245,15 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out_flags_changed;
}
+ /*
+ * The block layer requires discard_granularity to be a power of 2.
+ */
+ if (pf.discard_enabled && !is_power_of_2(block_size)) {
+ ti->error = "Discard support must be disabled when the block size is not a power of 2";
+ r = -EINVAL;
+ goto out_flags_changed;
+ }
+
pt->pool = pool;
pt->ti = ti;
pt->metadata_dev = metadata_dev;
@@ -2033,7 +2273,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
* stacking of discard limits (this keeps the pool and
* thin devices' discard limits consistent).
*/
- ti->discards_supported = 1;
+ ti->discards_supported = true;
}
ti->private = pt;
@@ -2093,7 +2333,8 @@ static int pool_preresume(struct dm_target *ti)
int r;
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- dm_block_t data_size, sb_data_size;
+ sector_t data_size = ti->len;
+ dm_block_t sb_data_size;
/*
* Take control of the pool object.
@@ -2102,7 +2343,8 @@ static int pool_preresume(struct dm_target *ti)
if (r)
return r;
- data_size = ti->len >> pool->block_shift;
+ (void) sector_div(data_size, pool->sectors_per_block);
+
r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
if (r) {
DMERR("failed to retrieve data device size");
@@ -2111,22 +2353,19 @@ static int pool_preresume(struct dm_target *ti)
if (data_size < sb_data_size) {
DMERR("pool target too small, is %llu blocks (expected %llu)",
- data_size, sb_data_size);
+ (unsigned long long)data_size, sb_data_size);
return -EINVAL;
} else if (data_size > sb_data_size) {
r = dm_pool_resize_data_dev(pool->pmd, data_size);
if (r) {
DMERR("failed to resize data device");
+ /* FIXME Stricter than necessary: Rollback transaction instead here */
+ set_pool_mode(pool, PM_READ_ONLY);
return r;
}
- r = dm_pool_commit_metadata(pool->pmd);
- if (r) {
- DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
- __func__, r);
- return r;
- }
+ (void) commit_or_fallback(pool);
}
return 0;
@@ -2149,19 +2388,12 @@ static void pool_resume(struct dm_target *ti)
static void pool_postsuspend(struct dm_target *ti)
{
- int r;
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
cancel_delayed_work(&pool->waker);
flush_workqueue(pool->wq);
-
- r = dm_pool_commit_metadata(pool->pmd);
- if (r < 0) {
- DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
- __func__, r);
- /* FIXME: invalidate device? error the next FUA or FLUSH bio ?*/
- }
+ (void) commit_or_fallback(pool);
}
static int check_arg_count(unsigned argc, unsigned args_required)
@@ -2295,12 +2527,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
if (r)
return r;
- r = dm_pool_commit_metadata(pool->pmd);
- if (r) {
- DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
- __func__, r);
- return r;
- }
+ (void) commit_or_fallback(pool);
r = dm_pool_reserve_metadata_snap(pool->pmd);
if (r)
@@ -2361,25 +2588,41 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
else
DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
- if (!r) {
- r = dm_pool_commit_metadata(pool->pmd);
- if (r)
- DMERR("%s message: dm_pool_commit_metadata() failed, error = %d",
- argv[0], r);
- }
+ if (!r)
+ (void) commit_or_fallback(pool);
return r;
}
+static void emit_flags(struct pool_features *pf, char *result,
+ unsigned sz, unsigned maxlen)
+{
+ unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
+ !pf->discard_passdown + (pf->mode == PM_READ_ONLY);
+ DMEMIT("%u ", count);
+
+ if (!pf->zero_new_blocks)
+ DMEMIT("skip_block_zeroing ");
+
+ if (!pf->discard_enabled)
+ DMEMIT("ignore_discard ");
+
+ if (!pf->discard_passdown)
+ DMEMIT("no_discard_passdown ");
+
+ if (pf->mode == PM_READ_ONLY)
+ DMEMIT("read_only ");
+}
+
/*
* Status line is:
* <transaction id> <used metadata sectors>/<total metadata sectors>
* <used data sectors>/<total data sectors> <held metadata root>
*/
static int pool_status(struct dm_target *ti, status_type_t type,
- char *result, unsigned maxlen)
+ unsigned status_flags, char *result, unsigned maxlen)
{
- int r, count;
+ int r;
unsigned sz = 0;
uint64_t transaction_id;
dm_block_t nr_free_blocks_data;
@@ -2394,6 +2637,15 @@ static int pool_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
+ if (get_pool_mode(pool) == PM_FAIL) {
+ DMEMIT("Fail");
+ break;
+ }
+
+ /* Commit to ensure statistics aren't out-of-date */
+ if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
+ (void) commit_or_fallback(pool);
+
r = dm_pool_get_metadata_transaction_id(pool->pmd,
&transaction_id);
if (r)
@@ -2429,9 +2681,19 @@ static int pool_status(struct dm_target *ti, status_type_t type,
(unsigned long long)nr_blocks_data);
if (held_root)
- DMEMIT("%llu", held_root);
+ DMEMIT("%llu ", held_root);
+ else
+ DMEMIT("- ");
+
+ if (pool->pf.mode == PM_READ_ONLY)
+ DMEMIT("ro ");
+ else
+ DMEMIT("rw ");
+
+ if (pool->pf.discard_enabled && pool->pf.discard_passdown)
+ DMEMIT("discard_passdown");
else
- DMEMIT("-");
+ DMEMIT("no_discard_passdown");
break;
@@ -2441,20 +2703,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
(unsigned long)pool->sectors_per_block,
(unsigned long long)pt->low_water_blocks);
-
- count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled +
- !pt->pf.discard_passdown;
- DMEMIT("%u ", count);
-
- if (!pool->pf.zero_new_blocks)
- DMEMIT("skip_block_zeroing ");
-
- if (!pool->pf.discard_enabled)
- DMEMIT("ignore_discard ");
-
- if (!pt->pf.discard_passdown)
- DMEMIT("no_discard_passdown ");
-
+ emit_flags(&pt->pf, result, sz, maxlen);
break;
}
@@ -2492,7 +2741,8 @@ static void set_discard_limits(struct pool *pool, struct queue_limits *limits)
/*
* This is just a hint, and not enforced. We have to cope with
- * bios that overlap 2 blocks.
+ * bios that cover a block partially. A discard that spans a block
+ * boundary is not sent to this target.
*/
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
limits->discard_zeroes_data = pool->pf.zero_new_blocks;
@@ -2513,7 +2763,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2618,20 +2868,31 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
__pool_inc(tc->pool);
+ if (get_pool_mode(tc->pool) == PM_FAIL) {
+ ti->error = "Couldn't open thin device, Pool is in fail mode";
+ goto bad_thin_open;
+ }
+
r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
if (r) {
ti->error = "Couldn't open thin internal device";
goto bad_thin_open;
}
- ti->split_io = tc->pool->sectors_per_block;
+ r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
+ if (r)
+ goto bad_thin_open;
+
ti->num_flush_requests = 1;
+ ti->flush_supported = true;
/* In case the pool supports discards, pass them on. */
if (tc->pool->pf.discard_enabled) {
- ti->discards_supported = 1;
+ ti->discards_supported = true;
ti->num_discard_requests = 1;
- ti->discard_zeroes_data_unsupported = 1;
+ ti->discard_zeroes_data_unsupported = true;
+ /* Discard requests must be split on a block boundary */
+ ti->split_discard_requests = true;
}
dm_put(pool_md);
@@ -2712,7 +2973,7 @@ static void thin_postsuspend(struct dm_target *ti)
* <nr mapped sectors> <highest mapped sector>
*/
static int thin_status(struct dm_target *ti, status_type_t type,
- char *result, unsigned maxlen)
+ unsigned status_flags, char *result, unsigned maxlen)
{
int r;
ssize_t sz = 0;
@@ -2720,6 +2981,11 @@ static int thin_status(struct dm_target *ti, status_type_t type,
char buf[BDEVNAME_SIZE];
struct thin_c *tc = ti->private;
+ if (get_pool_mode(tc->pool) == PM_FAIL) {
+ DMEMIT("Fail");
+ return 0;
+ }
+
if (!tc->td)
DMEMIT("-");
else {
@@ -2757,19 +3023,21 @@ static int thin_status(struct dm_target *ti, status_type_t type,
static int thin_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
- dm_block_t blocks;
+ sector_t blocks;
struct thin_c *tc = ti->private;
+ struct pool *pool = tc->pool;
/*
* We can't call dm_pool_get_data_dev_size() since that blocks. So
* we follow a more convoluted path through to the pool's target.
*/
- if (!tc->pool->ti)
+ if (!pool->ti)
return 0; /* nothing is bound */
- blocks = tc->pool->ti->len >> tc->pool->block_shift;
+ blocks = pool->ti->len;
+ (void) sector_div(blocks, pool->sectors_per_block);
if (blocks)
- return fn(ti, tc->pool_dev, 0, tc->pool->sectors_per_block * blocks, data);
+ return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
return 0;
}
@@ -2786,7 +3054,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 1, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index fa365d39b612..254d19268ad2 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -515,7 +515,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio,
* Status: V (valid) or C (corruption found)
*/
static int verity_status(struct dm_target *ti, status_type_t type,
- char *result, unsigned maxlen)
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct dm_verity *v = ti->private;
unsigned sz = 0;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e24143cc2040..4e09b6ff5b49 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -968,22 +968,41 @@ static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti
static sector_t max_io_len(sector_t sector, struct dm_target *ti)
{
sector_t len = max_io_len_target_boundary(sector, ti);
+ sector_t offset, max_len;
/*
- * Does the target need to split even further ?
+ * Does the target need to split even further?
*/
- if (ti->split_io) {
- sector_t boundary;
- sector_t offset = dm_target_offset(ti, sector);
- boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
- - offset;
- if (len > boundary)
- len = boundary;
+ if (ti->max_io_len) {
+ offset = dm_target_offset(ti, sector);
+ if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
+ max_len = sector_div(offset, ti->max_io_len);
+ else
+ max_len = offset & (ti->max_io_len - 1);
+ max_len = ti->max_io_len - max_len;
+
+ if (len > max_len)
+ len = max_len;
}
return len;
}
+int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
+{
+ if (len > UINT_MAX) {
+ DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
+ (unsigned long long)len, UINT_MAX);
+ ti->error = "Maximum size of target IO is too large";
+ return -EINVAL;
+ }
+
+ ti->max_io_len = (uint32_t) len;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
+
static void __map_bio(struct dm_target *ti, struct bio *clone,
struct dm_target_io *tio)
{
@@ -1196,7 +1215,10 @@ static int __clone_and_map_discard(struct clone_info *ci)
if (!ti->num_discard_requests)
return -EOPNOTSUPP;
- len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+ if (!ti->split_discard_requests)
+ len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+ else
+ len = min(ci->sector_count, max_io_len(ci->sector, ti));
__issue_target_requests(ci, ti, ti->num_discard_requests, len);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index b7dacd59d8d7..52eef493d266 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -23,6 +23,11 @@
#define DM_SUSPEND_NOFLUSH_FLAG (1 << 1)
/*
+ * Status feature flags
+ */
+#define DM_STATUS_NOFLUSH_FLAG (1 << 0)
+
+/*
* Type of table and mapped_device's mempool
*/
#define DM_TYPE_NONE 0
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d5ab4493c8be..3f6203a4c7ea 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -498,61 +498,13 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
}
EXPORT_SYMBOL(md_flush_request);
-/* Support for plugging.
- * This mirrors the plugging support in request_queue, but does not
- * require having a whole queue or request structures.
- * We allocate an md_plug_cb for each md device and each thread it gets
- * plugged on. This links tot the private plug_handle structure in the
- * personality data where we keep a count of the number of outstanding
- * plugs so other code can see if a plug is active.
- */
-struct md_plug_cb {
- struct blk_plug_cb cb;
- struct mddev *mddev;
-};
-
-static void plugger_unplug(struct blk_plug_cb *cb)
+void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
{
- struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
- if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
- md_wakeup_thread(mdcb->mddev->thread);
- kfree(mdcb);
-}
-
-/* Check that an unplug wakeup will come shortly.
- * If not, wakeup the md thread immediately
- */
-int mddev_check_plugged(struct mddev *mddev)
-{
- struct blk_plug *plug = current->plug;
- struct md_plug_cb *mdcb;
-
- if (!plug)
- return 0;
-
- list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
- if (mdcb->cb.callback == plugger_unplug &&
- mdcb->mddev == mddev) {
- /* Already on the list, move to top */
- if (mdcb != list_first_entry(&plug->cb_list,
- struct md_plug_cb,
- cb.list))
- list_move(&mdcb->cb.list, &plug->cb_list);
- return 1;
- }
- }
- /* Not currently on the callback list */
- mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
- if (!mdcb)
- return 0;
-
- mdcb->mddev = mddev;
- mdcb->cb.callback = plugger_unplug;
- atomic_inc(&mddev->plug_cnt);
- list_add(&mdcb->cb.list, &plug->cb_list);
- return 1;
+ struct mddev *mddev = cb->data;
+ md_wakeup_thread(mddev->thread);
+ kfree(cb);
}
-EXPORT_SYMBOL_GPL(mddev_check_plugged);
+EXPORT_SYMBOL(md_unplug);
static inline struct mddev *mddev_get(struct mddev *mddev)
{
@@ -602,7 +554,6 @@ void mddev_init(struct mddev *mddev)
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
- atomic_set(&mddev->plug_cnt, 0);
spin_lock_init(&mddev->write_lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
@@ -1157,8 +1108,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
ret = 0;
}
rdev->sectors = rdev->sb_start;
- /* Limit to 4TB as metadata cannot record more than that */
- if (rdev->sectors >= (2ULL << 32))
+ /* Limit to 4TB as metadata cannot record more than that.
+ * (not needed for Linear and RAID0 as metadata doesn't
+ * record this size)
+ */
+ if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
rdev->sectors = (2ULL << 32) - 2;
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
@@ -1449,7 +1403,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
/* Limit to 4TB as metadata cannot record more than that.
* 4TB == 2^32 KB, or 2*2^32 sectors.
*/
- if (num_sectors >= (2ULL << 32))
+ if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
num_sectors = (2ULL << 32) - 2;
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
@@ -3942,17 +3896,13 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
break;
case clear:
/* stopping an active array */
- if (atomic_read(&mddev->openers) > 0)
- return -EBUSY;
err = do_md_stop(mddev, 0, NULL);
break;
case inactive:
/* stopping an active array */
- if (mddev->pers) {
- if (atomic_read(&mddev->openers) > 0)
- return -EBUSY;
+ if (mddev->pers)
err = do_md_stop(mddev, 2, NULL);
- } else
+ else
err = 0; /* already inactive */
break;
case suspended:
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7b4a3c318cae..f385b038589d 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -266,9 +266,6 @@ struct mddev {
int new_chunk_sectors;
int reshape_backwards;
- atomic_t plug_cnt; /* If device is expecting
- * more bios soon.
- */
struct md_thread *thread; /* management thread */
struct md_thread *sync_thread; /* doing resync or reconstruct */
sector_t curr_resync; /* last block scheduled */
@@ -630,6 +627,12 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
struct mddev *mddev);
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev);
-extern int mddev_check_plugged(struct mddev *mddev);
extern void md_trim_bio(struct bio *bio, int offset, int size);
+
+extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
+static inline int mddev_check_plugged(struct mddev *mddev)
+{
+ return !!blk_check_plugged(md_unplug, mddev,
+ sizeof(struct blk_plug_cb));
+}
#endif /* _MD_MD_H */
diff --git a/drivers/md/persistent-data/Makefile b/drivers/md/persistent-data/Makefile
index cfa95f662230..d8e7cb767c1e 100644
--- a/drivers/md/persistent-data/Makefile
+++ b/drivers/md/persistent-data/Makefile
@@ -1,7 +1,6 @@
obj-$(CONFIG_DM_PERSISTENT_DATA) += dm-persistent-data.o
dm-persistent-data-objs := \
dm-block-manager.o \
- dm-space-map-checker.o \
dm-space-map-common.o \
dm-space-map-disk.o \
dm-space-map-metadata.o \
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 0317ecdc6e53..5ba277768d99 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -325,11 +325,6 @@ static struct dm_buffer *to_buffer(struct dm_block *b)
return (struct dm_buffer *) b;
}
-static struct dm_bufio_client *to_bufio(struct dm_block_manager *bm)
-{
- return (struct dm_bufio_client *) bm;
-}
-
dm_block_t dm_block_location(struct dm_block *b)
{
return dm_bufio_get_block_number(to_buffer(b));
@@ -367,34 +362,60 @@ static void dm_block_manager_write_callback(struct dm_buffer *buf)
/*----------------------------------------------------------------
* Public interface
*--------------------------------------------------------------*/
+struct dm_block_manager {
+ struct dm_bufio_client *bufio;
+ bool read_only:1;
+};
+
struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
unsigned block_size,
unsigned cache_size,
unsigned max_held_per_thread)
{
- return (struct dm_block_manager *)
- dm_bufio_client_create(bdev, block_size, max_held_per_thread,
- sizeof(struct buffer_aux),
- dm_block_manager_alloc_callback,
- dm_block_manager_write_callback);
+ int r;
+ struct dm_block_manager *bm;
+
+ bm = kmalloc(sizeof(*bm), GFP_KERNEL);
+ if (!bm) {
+ r = -ENOMEM;
+ goto bad;
+ }
+
+ bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread,
+ sizeof(struct buffer_aux),
+ dm_block_manager_alloc_callback,
+ dm_block_manager_write_callback);
+ if (IS_ERR(bm->bufio)) {
+ r = PTR_ERR(bm->bufio);
+ kfree(bm);
+ goto bad;
+ }
+
+ bm->read_only = false;
+
+ return bm;
+
+bad:
+ return ERR_PTR(r);
}
EXPORT_SYMBOL_GPL(dm_block_manager_create);
void dm_block_manager_destroy(struct dm_block_manager *bm)
{
- return dm_bufio_client_destroy(to_bufio(bm));
+ dm_bufio_client_destroy(bm->bufio);
+ kfree(bm);
}
EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
unsigned dm_bm_block_size(struct dm_block_manager *bm)
{
- return dm_bufio_get_block_size(to_bufio(bm));
+ return dm_bufio_get_block_size(bm->bufio);
}
EXPORT_SYMBOL_GPL(dm_bm_block_size);
dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm)
{
- return dm_bufio_get_device_size(to_bufio(bm));
+ return dm_bufio_get_device_size(bm->bufio);
}
static int dm_bm_validate_buffer(struct dm_block_manager *bm,
@@ -406,7 +427,7 @@ static int dm_bm_validate_buffer(struct dm_block_manager *bm,
int r;
if (!v)
return 0;
- r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(to_bufio(bm)));
+ r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
if (unlikely(r))
return r;
aux->validator = v;
@@ -430,7 +451,7 @@ int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
void *p;
int r;
- p = dm_bufio_read(to_bufio(bm), b, (struct dm_buffer **) result);
+ p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
if (unlikely(IS_ERR(p)))
return PTR_ERR(p);
@@ -463,7 +484,10 @@ int dm_bm_write_lock(struct dm_block_manager *bm,
void *p;
int r;
- p = dm_bufio_read(to_bufio(bm), b, (struct dm_buffer **) result);
+ if (bm->read_only)
+ return -EPERM;
+
+ p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
if (unlikely(IS_ERR(p)))
return PTR_ERR(p);
@@ -496,7 +520,7 @@ int dm_bm_read_try_lock(struct dm_block_manager *bm,
void *p;
int r;
- p = dm_bufio_get(to_bufio(bm), b, (struct dm_buffer **) result);
+ p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
if (unlikely(IS_ERR(p)))
return PTR_ERR(p);
if (unlikely(!p))
@@ -529,7 +553,10 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
struct buffer_aux *aux;
void *p;
- p = dm_bufio_new(to_bufio(bm), b, (struct dm_buffer **) result);
+ if (bm->read_only)
+ return -EPERM;
+
+ p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
if (unlikely(IS_ERR(p)))
return PTR_ERR(p);
@@ -547,6 +574,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
return 0;
}
+EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
int dm_bm_unlock(struct dm_block *b)
{
@@ -565,45 +593,30 @@ int dm_bm_unlock(struct dm_block *b)
}
EXPORT_SYMBOL_GPL(dm_bm_unlock);
-int dm_bm_unlock_move(struct dm_block *b, dm_block_t n)
-{
- struct buffer_aux *aux;
-
- aux = dm_bufio_get_aux_data(to_buffer(b));
-
- if (aux->write_locked) {
- dm_bufio_mark_buffer_dirty(to_buffer(b));
- bl_up_write(&aux->lock);
- } else
- bl_up_read(&aux->lock);
-
- dm_bufio_release_move(to_buffer(b), n);
- return 0;
-}
-
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
struct dm_block *superblock)
{
int r;
- r = dm_bufio_write_dirty_buffers(to_bufio(bm));
- if (unlikely(r))
- return r;
- r = dm_bufio_issue_flush(to_bufio(bm));
- if (unlikely(r))
+ if (bm->read_only)
+ return -EPERM;
+
+ r = dm_bufio_write_dirty_buffers(bm->bufio);
+ if (unlikely(r)) {
+ dm_bm_unlock(superblock);
return r;
+ }
dm_bm_unlock(superblock);
- r = dm_bufio_write_dirty_buffers(to_bufio(bm));
- if (unlikely(r))
- return r;
- r = dm_bufio_issue_flush(to_bufio(bm));
- if (unlikely(r))
- return r;
+ return dm_bufio_write_dirty_buffers(bm->bufio);
+}
- return 0;
+void dm_bm_set_read_only(struct dm_block_manager *bm)
+{
+ bm->read_only = true;
}
+EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
{
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index 924833d2dfa6..be5bff61be28 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -97,14 +97,6 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, dm_block_t b,
int dm_bm_unlock(struct dm_block *b);
/*
- * An optimisation; we often want to copy a block's contents to a new
- * block. eg, as part of the shadowing operation. It's far better for
- * bufio to do this move behind the scenes than hold 2 locks and memcpy the
- * data.
- */
-int dm_bm_unlock_move(struct dm_block *b, dm_block_t n);
-
-/*
* It's a common idiom to have a superblock that should be committed last.
*
* @superblock should be write-locked on entry. It will be unlocked during
@@ -116,6 +108,19 @@ int dm_bm_unlock_move(struct dm_block *b, dm_block_t n);
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
struct dm_block *superblock);
+/*
+ * Switches the bm to a read only mode. Once read-only mode
+ * has been entered the following functions will return -EPERM.
+ *
+ * dm_bm_write_lock
+ * dm_bm_write_lock_zero
+ * dm_bm_flush_and_unlock
+ *
+ * Additionally you should not use dm_bm_unlock_move, however no error will
+ * be returned if you do.
+ */
+void dm_bm_set_read_only(struct dm_block_manager *bm);
+
u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor);
/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
deleted file mode 100644
index fc90c11620ad..000000000000
--- a/drivers/md/persistent-data/dm-space-map-checker.c
+++ /dev/null
@@ -1,446 +0,0 @@
-/*
- * Copyright (C) 2011 Red Hat, Inc.
- *
- * This file is released under the GPL.
- */
-
-#include "dm-space-map-checker.h"
-
-#include <linux/device-mapper.h>
-#include <linux/export.h>
-#include <linux/vmalloc.h>
-
-#ifdef CONFIG_DM_DEBUG_SPACE_MAPS
-
-#define DM_MSG_PREFIX "space map checker"
-
-/*----------------------------------------------------------------*/
-
-struct count_array {
- dm_block_t nr;
- dm_block_t nr_free;
-
- uint32_t *counts;
-};
-
-static int ca_get_count(struct count_array *ca, dm_block_t b, uint32_t *count)
-{
- if (b >= ca->nr)
- return -EINVAL;
-
- *count = ca->counts[b];
- return 0;
-}
-
-static int ca_count_more_than_one(struct count_array *ca, dm_block_t b, int *r)
-{
- if (b >= ca->nr)
- return -EINVAL;
-
- *r = ca->counts[b] > 1;
- return 0;
-}
-
-static int ca_set_count(struct count_array *ca, dm_block_t b, uint32_t count)
-{
- uint32_t old_count;
-
- if (b >= ca->nr)
- return -EINVAL;
-
- old_count = ca->counts[b];
-
- if (!count && old_count)
- ca->nr_free++;
-
- else if (count && !old_count)
- ca->nr_free--;
-
- ca->counts[b] = count;
- return 0;
-}
-
-static int ca_inc_block(struct count_array *ca, dm_block_t b)
-{
- if (b >= ca->nr)
- return -EINVAL;
-
- ca_set_count(ca, b, ca->counts[b] + 1);
- return 0;
-}
-
-static int ca_dec_block(struct count_array *ca, dm_block_t b)
-{
- if (b >= ca->nr)
- return -EINVAL;
-
- BUG_ON(ca->counts[b] == 0);
- ca_set_count(ca, b, ca->counts[b] - 1);
- return 0;
-}
-
-static int ca_create(struct count_array *ca, struct dm_space_map *sm)
-{
- int r;
- dm_block_t nr_blocks;
-
- r = dm_sm_get_nr_blocks(sm, &nr_blocks);
- if (r)
- return r;
-
- ca->nr = nr_blocks;
- ca->nr_free = nr_blocks;
-
- if (!nr_blocks)
- ca->counts = NULL;
- else {
- ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
- if (!ca->counts)
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void ca_destroy(struct count_array *ca)
-{
- vfree(ca->counts);
-}
-
-static int ca_load(struct count_array *ca, struct dm_space_map *sm)
-{
- int r;
- uint32_t count;
- dm_block_t nr_blocks, i;
-
- r = dm_sm_get_nr_blocks(sm, &nr_blocks);
- if (r)
- return r;
-
- BUG_ON(ca->nr != nr_blocks);
-
- DMWARN("Loading debug space map from disk. This may take some time");
- for (i = 0; i < nr_blocks; i++) {
- r = dm_sm_get_count(sm, i, &count);
- if (r) {
- DMERR("load failed");
- return r;
- }
-
- ca_set_count(ca, i, count);
- }
- DMWARN("Load complete");
-
- return 0;
-}
-
-static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
-{
- dm_block_t nr_blocks = ca->nr + extra_blocks;
- uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
- if (!counts)
- return -ENOMEM;
-
- if (ca->counts) {
- memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
- ca_destroy(ca);
- }
- ca->nr = nr_blocks;
- ca->nr_free += extra_blocks;
- ca->counts = counts;
- return 0;
-}
-
-static int ca_commit(struct count_array *old, struct count_array *new)
-{
- if (old->nr != new->nr) {
- BUG_ON(old->nr > new->nr);
- ca_extend(old, new->nr - old->nr);
- }
-
- BUG_ON(old->nr != new->nr);
- old->nr_free = new->nr_free;
- memcpy(old->counts, new->counts, sizeof(*old->counts) * old->nr);
- return 0;
-}
-
-/*----------------------------------------------------------------*/
-
-struct sm_checker {
- struct dm_space_map sm;
-
- struct count_array old_counts;
- struct count_array counts;
-
- struct dm_space_map *real_sm;
-};
-
-static void sm_checker_destroy(struct dm_space_map *sm)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
-
- dm_sm_destroy(smc->real_sm);
- ca_destroy(&smc->old_counts);
- ca_destroy(&smc->counts);
- kfree(smc);
-}
-
-static int sm_checker_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
- int r = dm_sm_get_nr_blocks(smc->real_sm, count);
- if (!r)
- BUG_ON(smc->old_counts.nr != *count);
- return r;
-}
-
-static int sm_checker_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
- int r = dm_sm_get_nr_free(smc->real_sm, count);
- if (!r) {
- /*
- * Slow, but we know it's correct.
- */
- dm_block_t b, n = 0;
- for (b = 0; b < smc->old_counts.nr; b++)
- if (smc->old_counts.counts[b] == 0 &&
- smc->counts.counts[b] == 0)
- n++;
-
- if (n != *count)
- DMERR("free block counts differ, checker %u, sm-disk:%u",
- (unsigned) n, (unsigned) *count);
- }
- return r;
-}
-
-static int sm_checker_new_block(struct dm_space_map *sm, dm_block_t *b)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
- int r = dm_sm_new_block(smc->real_sm, b);
-
- if (!r) {
- BUG_ON(*b >= smc->old_counts.nr);
- BUG_ON(smc->old_counts.counts[*b] != 0);
- BUG_ON(*b >= smc->counts.nr);
- BUG_ON(smc->counts.counts[*b] != 0);
- ca_set_count(&smc->counts, *b, 1);
- }
-
- return r;
-}
-
-static int sm_checker_inc_block(struct dm_space_map *sm, dm_block_t b)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
- int r = dm_sm_inc_block(smc->real_sm, b);
- int r2 = ca_inc_block(&smc->counts, b);
- BUG_ON(r != r2);
- return r;
-}
-
-static int sm_checker_dec_block(struct dm_space_map *sm, dm_block_t b)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
- int r = dm_sm_dec_block(smc->real_sm, b);
- int r2 = ca_dec_block(&smc->counts, b);
- BUG_ON(r != r2);
- return r;
-}
-
-static int sm_checker_get_count(struct dm_space_map *sm, dm_block_t b, uint32_t *result)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
- uint32_t result2 = 0;
- int r = dm_sm_get_count(smc->real_sm, b, result);
- int r2 = ca_get_count(&smc->counts, b, &result2);
-
- BUG_ON(r != r2);
- if (!r)
- BUG_ON(*result != result2);
- return r;
-}
-
-static int sm_checker_count_more_than_one(struct dm_space_map *sm, dm_block_t b, int *result)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
- int result2 = 0;
- int r = dm_sm_count_is_more_than_one(smc->real_sm, b, result);
- int r2 = ca_count_more_than_one(&smc->counts, b, &result2);
-
- BUG_ON(r != r2);
- if (!r)
- BUG_ON(!(*result) && result2);
- return r;
-}
-
-static int sm_checker_set_count(struct dm_space_map *sm, dm_block_t b, uint32_t count)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
- uint32_t old_rc;
- int r = dm_sm_set_count(smc->real_sm, b, count);
- int r2;
-
- BUG_ON(b >= smc->counts.nr);
- old_rc = smc->counts.counts[b];
- r2 = ca_set_count(&smc->counts, b, count);
- BUG_ON(r != r2);
-
- return r;
-}
-
-static int sm_checker_commit(struct dm_space_map *sm)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
- int r;
-
- r = dm_sm_commit(smc->real_sm);
- if (r)
- return r;
-
- r = ca_commit(&smc->old_counts, &smc->counts);
- if (r)
- return r;
-
- return 0;
-}
-
-static int sm_checker_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
- int r = dm_sm_extend(smc->real_sm, extra_blocks);
- if (r)
- return r;
-
- return ca_extend(&smc->counts, extra_blocks);
-}
-
-static int sm_checker_root_size(struct dm_space_map *sm, size_t *result)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
- return dm_sm_root_size(smc->real_sm, result);
-}
-
-static int sm_checker_copy_root(struct dm_space_map *sm, void *copy_to_here_le, size_t len)
-{
- struct sm_checker *smc = container_of(sm, struct sm_checker, sm);
- return dm_sm_copy_root(smc->real_sm, copy_to_here_le, len);
-}
-
-/*----------------------------------------------------------------*/
-
-static struct dm_space_map ops_ = {
- .destroy = sm_checker_destroy,
- .get_nr_blocks = sm_checker_get_nr_blocks,
- .get_nr_free = sm_checker_get_nr_free,
- .inc_block = sm_checker_inc_block,
- .dec_block = sm_checker_dec_block,
- .new_block = sm_checker_new_block,
- .get_count = sm_checker_get_count,
- .count_is_more_than_one = sm_checker_count_more_than_one,
- .set_count = sm_checker_set_count,
- .commit = sm_checker_commit,
- .extend = sm_checker_extend,
- .root_size = sm_checker_root_size,
- .copy_root = sm_checker_copy_root
-};
-
-struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
-{
- int r;
- struct sm_checker *smc;
-
- if (IS_ERR_OR_NULL(sm))
- return ERR_PTR(-EINVAL);
-
- smc = kmalloc(sizeof(*smc), GFP_KERNEL);
- if (!smc)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&smc->sm, &ops_, sizeof(smc->sm));
- r = ca_create(&smc->old_counts, sm);
- if (r) {
- kfree(smc);
- return ERR_PTR(r);
- }
-
- r = ca_create(&smc->counts, sm);
- if (r) {
- ca_destroy(&smc->old_counts);
- kfree(smc);
- return ERR_PTR(r);
- }
-
- smc->real_sm = sm;
-
- r = ca_load(&smc->counts, sm);
- if (r) {
- ca_destroy(&smc->counts);
- ca_destroy(&smc->old_counts);
- kfree(smc);
- return ERR_PTR(r);
- }
-
- r = ca_commit(&smc->old_counts, &smc->counts);
- if (r) {
- ca_destroy(&smc->counts);
- ca_destroy(&smc->old_counts);
- kfree(smc);
- return ERR_PTR(r);
- }
-
- return &smc->sm;
-}
-EXPORT_SYMBOL_GPL(dm_sm_checker_create);
-
-struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
-{
- int r;
- struct sm_checker *smc;
-
- if (IS_ERR_OR_NULL(sm))
- return ERR_PTR(-EINVAL);
-
- smc = kmalloc(sizeof(*smc), GFP_KERNEL);
- if (!smc)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&smc->sm, &ops_, sizeof(smc->sm));
- r = ca_create(&smc->old_counts, sm);
- if (r) {
- kfree(smc);
- return ERR_PTR(r);
- }
-
- r = ca_create(&smc->counts, sm);
- if (r) {
- ca_destroy(&smc->old_counts);
- kfree(smc);
- return ERR_PTR(r);
- }
-
- smc->real_sm = sm;
- return &smc->sm;
-}
-EXPORT_SYMBOL_GPL(dm_sm_checker_create_fresh);
-
-/*----------------------------------------------------------------*/
-
-#else
-
-struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
-{
- return sm;
-}
-EXPORT_SYMBOL_GPL(dm_sm_checker_create);
-
-struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
-{
- return sm;
-}
-EXPORT_SYMBOL_GPL(dm_sm_checker_create_fresh);
-
-/*----------------------------------------------------------------*/
-
-#endif
diff --git a/drivers/md/persistent-data/dm-space-map-checker.h b/drivers/md/persistent-data/dm-space-map-checker.h
deleted file mode 100644
index 444dccf6688c..000000000000
--- a/drivers/md/persistent-data/dm-space-map-checker.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2011 Red Hat, Inc.
- *
- * This file is released under the GPL.
- */
-
-#ifndef SNAPSHOTS_SPACE_MAP_CHECKER_H
-#define SNAPSHOTS_SPACE_MAP_CHECKER_H
-
-#include "dm-space-map.h"
-
-/*----------------------------------------------------------------*/
-
-/*
- * This space map wraps a real on-disk space map, and verifies all of its
- * operations. It uses a lot of memory, so only use if you have a specific
- * problem that you're debugging.
- *
- * Ownership of @sm passes.
- */
-struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm);
-struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm);
-
-/*----------------------------------------------------------------*/
-
-#endif
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index ff3beed6ad2d..d77602d63c83 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -224,6 +224,7 @@ static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
ll->nr_blocks = 0;
ll->bitmap_root = 0;
ll->ref_count_root = 0;
+ ll->bitmap_index_changed = false;
return 0;
}
@@ -476,7 +477,15 @@ int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
int sm_ll_commit(struct ll_disk *ll)
{
- return ll->commit(ll);
+ int r = 0;
+
+ if (ll->bitmap_index_changed) {
+ r = ll->commit(ll);
+ if (!r)
+ ll->bitmap_index_changed = false;
+ }
+
+ return r;
}
/*----------------------------------------------------------------*/
@@ -491,6 +500,7 @@ static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index,
static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index,
struct disk_index_entry *ie)
{
+ ll->bitmap_index_changed = true;
memcpy(ll->mi_le.index + index, ie, sizeof(*ie));
return 0;
}
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
index 8f220821a9a9..b3078d5eda0c 100644
--- a/drivers/md/persistent-data/dm-space-map-common.h
+++ b/drivers/md/persistent-data/dm-space-map-common.h
@@ -78,6 +78,7 @@ struct ll_disk {
open_index_fn open_index;
max_index_entries_fn max_entries;
commit_fn commit;
+ bool bitmap_index_changed:1;
};
struct disk_sm_root {
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index 3d0ed5332883..f6d29e614ab7 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -4,7 +4,6 @@
* This file is released under the GPL.
*/
-#include "dm-space-map-checker.h"
#include "dm-space-map-common.h"
#include "dm-space-map-disk.h"
#include "dm-space-map.h"
@@ -252,9 +251,8 @@ static struct dm_space_map ops = {
.copy_root = sm_disk_copy_root
};
-static struct dm_space_map *dm_sm_disk_create_real(
- struct dm_transaction_manager *tm,
- dm_block_t nr_blocks)
+struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
+ dm_block_t nr_blocks)
{
int r;
struct sm_disk *smd;
@@ -285,27 +283,10 @@ bad:
kfree(smd);
return ERR_PTR(r);
}
-
-struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
- dm_block_t nr_blocks)
-{
- struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
- struct dm_space_map *smc;
-
- if (IS_ERR_OR_NULL(sm))
- return sm;
-
- smc = dm_sm_checker_create_fresh(sm);
- if (IS_ERR(smc))
- dm_sm_destroy(sm);
-
- return smc;
-}
EXPORT_SYMBOL_GPL(dm_sm_disk_create);
-static struct dm_space_map *dm_sm_disk_open_real(
- struct dm_transaction_manager *tm,
- void *root_le, size_t len)
+struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm,
+ void *root_le, size_t len)
{
int r;
struct sm_disk *smd;
@@ -332,13 +313,6 @@ bad:
kfree(smd);
return ERR_PTR(r);
}
-
-struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm,
- void *root_le, size_t len)
-{
- return dm_sm_checker_create(
- dm_sm_disk_open_real(tm, root_le, len));
-}
EXPORT_SYMBOL_GPL(dm_sm_disk_open);
/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index e5604b32d91f..d247a35da3c6 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -5,7 +5,6 @@
*/
#include "dm-transaction-manager.h"
#include "dm-space-map.h"
-#include "dm-space-map-checker.h"
#include "dm-space-map-disk.h"
#include "dm-space-map-metadata.h"
#include "dm-persistent-data-internal.h"
@@ -220,13 +219,24 @@ static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
if (r < 0)
return r;
- r = dm_bm_unlock_move(orig_block, new);
- if (r < 0) {
+ /*
+ * It would be tempting to use dm_bm_unlock_move here, but some
+ * code, such as the space maps, keeps using the old data structures
+ * secure in the knowledge they won't be changed until the next
+ * transaction. Using unlock_move would force a synchronous read
+ * since the old block would no longer be in the cache.
+ */
+ r = dm_bm_write_lock_zero(tm->bm, new, v, result);
+ if (r) {
dm_bm_unlock(orig_block);
return r;
}
- return dm_bm_write_lock(tm->bm, new, v, result);
+ memcpy(dm_block_data(*result), dm_block_data(orig_block),
+ dm_bm_block_size(tm->bm));
+
+ dm_bm_unlock(orig_block);
+ return r;
}
int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
@@ -311,98 +321,61 @@ struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
static int dm_tm_create_internal(struct dm_block_manager *bm,
dm_block_t sb_location,
- struct dm_block_validator *sb_validator,
- size_t root_offset, size_t root_max_len,
struct dm_transaction_manager **tm,
struct dm_space_map **sm,
- struct dm_block **sblock,
- int create)
+ int create,
+ void *sm_root, size_t sm_len)
{
int r;
- struct dm_space_map *inner;
- inner = dm_sm_metadata_init();
- if (IS_ERR(inner))
- return PTR_ERR(inner);
+ *sm = dm_sm_metadata_init();
+ if (IS_ERR(*sm))
+ return PTR_ERR(*sm);
- *tm = dm_tm_create(bm, inner);
+ *tm = dm_tm_create(bm, *sm);
if (IS_ERR(*tm)) {
- dm_sm_destroy(inner);
+ dm_sm_destroy(*sm);
return PTR_ERR(*tm);
}
if (create) {
- r = dm_bm_write_lock_zero(dm_tm_get_bm(*tm), sb_location,
- sb_validator, sblock);
- if (r < 0) {
- DMERR("couldn't lock superblock");
- goto bad1;
- }
-
- r = dm_sm_metadata_create(inner, *tm, dm_bm_nr_blocks(bm),
+ r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
sb_location);
if (r) {
DMERR("couldn't create metadata space map");
- goto bad2;
- }
-
- *sm = dm_sm_checker_create(inner);
- if (IS_ERR(*sm)) {
- r = PTR_ERR(*sm);
- goto bad2;
+ goto bad;
}
} else {
- r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
- sb_validator, sblock);
- if (r < 0) {
- DMERR("couldn't lock superblock");
- goto bad1;
- }
-
- r = dm_sm_metadata_open(inner, *tm,
- dm_block_data(*sblock) + root_offset,
- root_max_len);
+ r = dm_sm_metadata_open(*sm, *tm, sm_root, sm_len);
if (r) {
DMERR("couldn't open metadata space map");
- goto bad2;
- }
-
- *sm = dm_sm_checker_create(inner);
- if (IS_ERR(*sm)) {
- r = PTR_ERR(*sm);
- goto bad2;
+ goto bad;
}
}
return 0;
-bad2:
- dm_tm_unlock(*tm, *sblock);
-bad1:
+bad:
dm_tm_destroy(*tm);
- dm_sm_destroy(inner);
+ dm_sm_destroy(*sm);
return r;
}
int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
- struct dm_block_validator *sb_validator,
struct dm_transaction_manager **tm,
- struct dm_space_map **sm, struct dm_block **sblock)
+ struct dm_space_map **sm)
{
- return dm_tm_create_internal(bm, sb_location, sb_validator,
- 0, 0, tm, sm, sblock, 1);
+ return dm_tm_create_internal(bm, sb_location, tm, sm, 1, NULL, 0);
}
EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
- struct dm_block_validator *sb_validator,
- size_t root_offset, size_t root_max_len,
+ void *sm_root, size_t root_len,
struct dm_transaction_manager **tm,
- struct dm_space_map **sm, struct dm_block **sblock)
+ struct dm_space_map **sm)
{
- return dm_tm_create_internal(bm, sb_location, sb_validator, root_offset,
- root_max_len, tm, sm, sblock, 0);
+ return dm_tm_create_internal(bm, sb_location, tm, sm, 0, sm_root, root_len);
}
EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h
index 6da784871db4..b5b139076ca5 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.h
+++ b/drivers/md/persistent-data/dm-transaction-manager.h
@@ -115,16 +115,17 @@ struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm);
*
* Returns a tm that has an open transaction to write the new disk sm.
* Caller should store the new sm root and commit.
+ *
+ * The superblock location is passed so the metadata space map knows it
+ * shouldn't be used.
*/
int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
- struct dm_block_validator *sb_validator,
struct dm_transaction_manager **tm,
- struct dm_space_map **sm, struct dm_block **sblock);
+ struct dm_space_map **sm);
int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
- struct dm_block_validator *sb_validator,
- size_t root_offset, size_t root_max_len,
+ void *sm_root, size_t root_len,
struct dm_transaction_manager **tm,
- struct dm_space_map **sm, struct dm_block **sblock);
+ struct dm_space_map **sm);
#endif /* _LINUX_DM_TRANSACTION_MANAGER_H */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index cacd008d6864..611b5f797618 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -46,6 +46,20 @@
*/
#define NR_RAID1_BIOS 256
+/* when we get a read error on a read-only array, we redirect to another
+ * device without failing the first device, or trying to over-write to
+ * correct the read error. To keep track of bad blocks on a per-bio
+ * level, we store IO_BLOCKED in the appropriate 'bios' pointer
+ */
+#define IO_BLOCKED ((struct bio *)1)
+/* When we successfully write to a known bad-block, we need to remove the
+ * bad-block marking which must be done from process context. So we record
+ * the success by setting devs[n].bio to IO_MADE_GOOD
+ */
+#define IO_MADE_GOOD ((struct bio *)2)
+
+#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
+
/* When there are this many requests queue to be written by
* the raid1 thread, we become 'congested' to provide back-pressure
* for writeback.
@@ -483,12 +497,14 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
const sector_t this_sector = r1_bio->sector;
int sectors;
int best_good_sectors;
- int start_disk;
- int best_disk;
- int i;
+ int best_disk, best_dist_disk, best_pending_disk;
+ int has_nonrot_disk;
+ int disk;
sector_t best_dist;
+ unsigned int min_pending;
struct md_rdev *rdev;
int choose_first;
+ int choose_next_idle;
rcu_read_lock();
/*
@@ -499,26 +515,26 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
retry:
sectors = r1_bio->sectors;
best_disk = -1;
+ best_dist_disk = -1;
best_dist = MaxSector;
+ best_pending_disk = -1;
+ min_pending = UINT_MAX;
best_good_sectors = 0;
+ has_nonrot_disk = 0;
+ choose_next_idle = 0;
if (conf->mddev->recovery_cp < MaxSector &&
- (this_sector + sectors >= conf->next_resync)) {
+ (this_sector + sectors >= conf->next_resync))
choose_first = 1;
- start_disk = 0;
- } else {
+ else
choose_first = 0;
- start_disk = conf->last_used;
- }
- for (i = 0 ; i < conf->raid_disks * 2 ; i++) {
+ for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
sector_t dist;
sector_t first_bad;
int bad_sectors;
-
- int disk = start_disk + i;
- if (disk >= conf->raid_disks * 2)
- disk -= conf->raid_disks * 2;
+ unsigned int pending;
+ bool nonrot;
rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (r1_bio->bios[disk] == IO_BLOCKED
@@ -577,22 +593,77 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
} else
best_good_sectors = sectors;
+ nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
+ has_nonrot_disk |= nonrot;
+ pending = atomic_read(&rdev->nr_pending);
dist = abs(this_sector - conf->mirrors[disk].head_position);
- if (choose_first
- /* Don't change to another disk for sequential reads */
- || conf->next_seq_sect == this_sector
- || dist == 0
- /* If device is idle, use it */
- || atomic_read(&rdev->nr_pending) == 0) {
+ if (choose_first) {
+ best_disk = disk;
+ break;
+ }
+ /* Don't change to another disk for sequential reads */
+ if (conf->mirrors[disk].next_seq_sect == this_sector
+ || dist == 0) {
+ int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
+ struct raid1_info *mirror = &conf->mirrors[disk];
+
+ best_disk = disk;
+ /*
+ * If buffered sequential IO size exceeds optimal
+ * iosize, check if there is idle disk. If yes, choose
+ * the idle disk. read_balance could already choose an
+ * idle disk before noticing it's a sequential IO in
+ * this disk. This doesn't matter because this disk
+ * will idle, next time it will be utilized after the
+ * first disk has IO size exceeds optimal iosize. In
+ * this way, iosize of the first disk will be optimal
+ * iosize at least. iosize of the second disk might be
+ * small, but not a big deal since when the second disk
+ * starts IO, the first disk is likely still busy.
+ */
+ if (nonrot && opt_iosize > 0 &&
+ mirror->seq_start != MaxSector &&
+ mirror->next_seq_sect > opt_iosize &&
+ mirror->next_seq_sect - opt_iosize >=
+ mirror->seq_start) {
+ choose_next_idle = 1;
+ continue;
+ }
+ break;
+ }
+ /* If device is idle, use it */
+ if (pending == 0) {
best_disk = disk;
break;
}
+
+ if (choose_next_idle)
+ continue;
+
+ if (min_pending > pending) {
+ min_pending = pending;
+ best_pending_disk = disk;
+ }
+
if (dist < best_dist) {
best_dist = dist;
- best_disk = disk;
+ best_dist_disk = disk;
}
}
+ /*
+ * If all disks are rotational, choose the closest disk. If any disk is
+ * non-rotational, choose the disk with less pending request even the
+ * disk is rotational, which might/might not be optimal for raids with
+ * mixed ratation/non-rotational disks depending on workload.
+ */
+ if (best_disk == -1) {
+ if (has_nonrot_disk)
+ best_disk = best_pending_disk;
+ else
+ best_disk = best_dist_disk;
+ }
+
if (best_disk >= 0) {
rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
if (!rdev)
@@ -606,8 +677,11 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
goto retry;
}
sectors = best_good_sectors;
- conf->next_seq_sect = this_sector + sectors;
- conf->last_used = best_disk;
+
+ if (conf->mirrors[best_disk].next_seq_sect != this_sector)
+ conf->mirrors[best_disk].seq_start = this_sector;
+
+ conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
}
rcu_read_unlock();
*max_sectors = sectors;
@@ -870,10 +944,48 @@ do_sync_io:
pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
}
+struct raid1_plug_cb {
+ struct blk_plug_cb cb;
+ struct bio_list pending;
+ int pending_cnt;
+};
+
+static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+ struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
+ cb);
+ struct mddev *mddev = plug->cb.data;
+ struct r1conf *conf = mddev->private;
+ struct bio *bio;
+
+ if (from_schedule) {
+ spin_lock_irq(&conf->device_lock);
+ bio_list_merge(&conf->pending_bio_list, &plug->pending);
+ conf->pending_count += plug->pending_cnt;
+ spin_unlock_irq(&conf->device_lock);
+ md_wakeup_thread(mddev->thread);
+ kfree(plug);
+ return;
+ }
+
+ /* we aren't scheduling, so we can do the write-out directly. */
+ bio = bio_list_get(&plug->pending);
+ bitmap_unplug(mddev->bitmap);
+ wake_up(&conf->wait_barrier);
+
+ while (bio) { /* submit pending writes */
+ struct bio *next = bio->bi_next;
+ bio->bi_next = NULL;
+ generic_make_request(bio);
+ bio = next;
+ }
+ kfree(plug);
+}
+
static void make_request(struct mddev *mddev, struct bio * bio)
{
struct r1conf *conf = mddev->private;
- struct mirror_info *mirror;
+ struct raid1_info *mirror;
struct r1bio *r1_bio;
struct bio *read_bio;
int i, disks;
@@ -883,6 +995,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
struct md_rdev *blocked_rdev;
+ struct blk_plug_cb *cb;
+ struct raid1_plug_cb *plug = NULL;
int first_clone;
int sectors_handled;
int max_sectors;
@@ -1185,11 +1299,22 @@ read_again:
mbio->bi_private = r1_bio;
atomic_inc(&r1_bio->remaining);
+
+ cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
+ if (cb)
+ plug = container_of(cb, struct raid1_plug_cb, cb);
+ else
+ plug = NULL;
spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
+ if (plug) {
+ bio_list_add(&plug->pending, mbio);
+ plug->pending_cnt++;
+ } else {
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ }
spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!mddev_check_plugged(mddev))
+ if (!plug)
md_wakeup_thread(mddev->thread);
}
/* Mustn't call r1_bio_write_done before this next test,
@@ -1364,7 +1489,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
struct r1conf *conf = mddev->private;
int err = -EEXIST;
int mirror = 0;
- struct mirror_info *p;
+ struct raid1_info *p;
int first = 0;
int last = conf->raid_disks - 1;
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -1433,7 +1558,7 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
struct r1conf *conf = mddev->private;
int err = 0;
int number = rdev->raid_disk;
- struct mirror_info *p = conf->mirrors+ number;
+ struct raid1_info *p = conf->mirrors + number;
if (rdev != p->rdev)
p = conf->mirrors + conf->raid_disks + number;
@@ -2173,8 +2298,7 @@ static void raid1d(struct mddev *mddev)
blk_start_plug(&plug);
for (;;) {
- if (atomic_read(&mddev->plug_cnt) == 0)
- flush_pending_writes(conf);
+ flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
@@ -2371,6 +2495,18 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
bio->bi_rw = READ;
bio->bi_end_io = end_sync_read;
read_targets++;
+ } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
+ test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
+ /*
+ * The device is suitable for reading (InSync),
+ * but has bad block(s) here. Let's try to correct them,
+ * if we are doing resync or repair. Otherwise, leave
+ * this device alone for this sync request.
+ */
+ bio->bi_rw = WRITE;
+ bio->bi_end_io = end_sync_write;
+ write_targets++;
}
}
if (bio->bi_end_io) {
@@ -2428,7 +2564,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
/* There is nowhere to write, so all non-sync
* drives must be failed - so we are finished
*/
- sector_t rv = max_sector - sector_nr;
+ sector_t rv;
+ if (min_bad > 0)
+ max_sector = sector_nr + min_bad;
+ rv = max_sector - sector_nr;
*skipped = 1;
put_buf(r1_bio);
return rv;
@@ -2521,7 +2660,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
{
struct r1conf *conf;
int i;
- struct mirror_info *disk;
+ struct raid1_info *disk;
struct md_rdev *rdev;
int err = -ENOMEM;
@@ -2529,7 +2668,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (!conf)
goto abort;
- conf->mirrors = kzalloc(sizeof(struct mirror_info)
+ conf->mirrors = kzalloc(sizeof(struct raid1_info)
* mddev->raid_disks * 2,
GFP_KERNEL);
if (!conf->mirrors)
@@ -2572,6 +2711,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
mddev->merge_check_needed = 1;
disk->head_position = 0;
+ disk->seq_start = MaxSector;
}
conf->raid_disks = mddev->raid_disks;
conf->mddev = mddev;
@@ -2585,7 +2725,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->recovery_disabled = mddev->recovery_disabled - 1;
err = -EIO;
- conf->last_used = -1;
for (i = 0; i < conf->raid_disks * 2; i++) {
disk = conf->mirrors + i;
@@ -2611,19 +2750,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (disk->rdev &&
(disk->rdev->saved_raid_disk < 0))
conf->fullsync = 1;
- } else if (conf->last_used < 0)
- /*
- * The first working device is used as a
- * starting point to read balancing.
- */
- conf->last_used = i;
+ }
}
- if (conf->last_used < 0) {
- printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
- mdname(mddev));
- goto abort;
- }
err = -ENOMEM;
conf->thread = md_register_thread(raid1d, mddev, "raid1");
if (!conf->thread) {
@@ -2798,7 +2927,7 @@ static int raid1_reshape(struct mddev *mddev)
*/
mempool_t *newpool, *oldpool;
struct pool_info *newpoolinfo;
- struct mirror_info *newmirrors;
+ struct raid1_info *newmirrors;
struct r1conf *conf = mddev->private;
int cnt, raid_disks;
unsigned long flags;
@@ -2841,7 +2970,7 @@ static int raid1_reshape(struct mddev *mddev)
kfree(newpoolinfo);
return -ENOMEM;
}
- newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2,
+ newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
GFP_KERNEL);
if (!newmirrors) {
kfree(newpoolinfo);
@@ -2880,7 +3009,6 @@ static int raid1_reshape(struct mddev *mddev)
conf->raid_disks = mddev->raid_disks = raid_disks;
mddev->delta_disks = 0;
- conf->last_used = 0; /* just make sure it is in-range */
lower_barrier(conf);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 80ded139314c..0ff3715fb7eb 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -1,9 +1,15 @@
#ifndef _RAID1_H
#define _RAID1_H
-struct mirror_info {
+struct raid1_info {
struct md_rdev *rdev;
sector_t head_position;
+
+ /* When choose the best device for a read (read_balance())
+ * we try to keep sequential reads one the same device
+ */
+ sector_t next_seq_sect;
+ sector_t seq_start;
};
/*
@@ -24,17 +30,11 @@ struct pool_info {
struct r1conf {
struct mddev *mddev;
- struct mirror_info *mirrors; /* twice 'raid_disks' to
+ struct raid1_info *mirrors; /* twice 'raid_disks' to
* allow for replacements.
*/
int raid_disks;
- /* When choose the best device for a read (read_balance())
- * we try to keep sequential reads one the same device
- * using 'last_used' and 'next_seq_sect'
- */
- int last_used;
- sector_t next_seq_sect;
/* During resync, read_balancing is only allowed on the part
* of the array that has been resynced. 'next_resync' tells us
* where that is.
@@ -135,20 +135,6 @@ struct r1bio {
/* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
};
-/* when we get a read error on a read-only array, we redirect to another
- * device without failing the first device, or trying to over-write to
- * correct the read error. To keep track of bad blocks on a per-bio
- * level, we store IO_BLOCKED in the appropriate 'bios' pointer
- */
-#define IO_BLOCKED ((struct bio *)1)
-/* When we successfully write to a known bad-block, we need to remove the
- * bad-block marking which must be done from process context. So we record
- * the success by setting bios[n] to IO_MADE_GOOD
- */
-#define IO_MADE_GOOD ((struct bio *)2)
-
-#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
-
/* bits for r1bio.state */
#define R1BIO_Uptodate 0
#define R1BIO_IsSync 1
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8da6282254c3..1c2eb38f3c51 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -60,7 +60,21 @@
*/
#define NR_RAID10_BIOS 256
-/* When there are this many requests queue to be written by
+/* when we get a read error on a read-only array, we redirect to another
+ * device without failing the first device, or trying to over-write to
+ * correct the read error. To keep track of bad blocks on a per-bio
+ * level, we store IO_BLOCKED in the appropriate 'bios' pointer
+ */
+#define IO_BLOCKED ((struct bio *)1)
+/* When we successfully write to a known bad-block, we need to remove the
+ * bad-block marking which must be done from process context. So we record
+ * the success by setting devs[n].bio to IO_MADE_GOOD
+ */
+#define IO_MADE_GOOD ((struct bio *)2)
+
+#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
+
+/* When there are this many requests queued to be written by
* the raid10 thread, we become 'congested' to provide back-pressure
* for writeback.
*/
@@ -645,7 +659,11 @@ static int raid10_mergeable_bvec(struct request_queue *q,
max = biovec->bv_len;
if (mddev->merge_check_needed) {
- struct r10bio r10_bio;
+ struct {
+ struct r10bio r10_bio;
+ struct r10dev devs[conf->copies];
+ } on_stack;
+ struct r10bio *r10_bio = &on_stack.r10_bio;
int s;
if (conf->reshape_progress != MaxSector) {
/* Cannot give any guidance during reshape */
@@ -653,18 +671,18 @@ static int raid10_mergeable_bvec(struct request_queue *q,
return biovec->bv_len;
return 0;
}
- r10_bio.sector = sector;
- raid10_find_phys(conf, &r10_bio);
+ r10_bio->sector = sector;
+ raid10_find_phys(conf, r10_bio);
rcu_read_lock();
for (s = 0; s < conf->copies; s++) {
- int disk = r10_bio.devs[s].devnum;
+ int disk = r10_bio->devs[s].devnum;
struct md_rdev *rdev = rcu_dereference(
conf->mirrors[disk].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q =
bdev_get_queue(rdev->bdev);
if (q->merge_bvec_fn) {
- bvm->bi_sector = r10_bio.devs[s].addr
+ bvm->bi_sector = r10_bio->devs[s].addr
+ rdev->data_offset;
bvm->bi_bdev = rdev->bdev;
max = min(max, q->merge_bvec_fn(
@@ -676,7 +694,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
struct request_queue *q =
bdev_get_queue(rdev->bdev);
if (q->merge_bvec_fn) {
- bvm->bi_sector = r10_bio.devs[s].addr
+ bvm->bi_sector = r10_bio->devs[s].addr
+ rdev->data_offset;
bvm->bi_bdev = rdev->bdev;
max = min(max, q->merge_bvec_fn(
@@ -717,7 +735,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
int sectors = r10_bio->sectors;
int best_good_sectors;
sector_t new_distance, best_dist;
- struct md_rdev *rdev, *best_rdev;
+ struct md_rdev *best_rdev, *rdev = NULL;
int do_balance;
int best_slot;
struct geom *geo = &conf->geo;
@@ -839,9 +857,8 @@ retry:
return rdev;
}
-static int raid10_congested(void *data, int bits)
+int md_raid10_congested(struct mddev *mddev, int bits)
{
- struct mddev *mddev = data;
struct r10conf *conf = mddev->private;
int i, ret = 0;
@@ -849,8 +866,6 @@ static int raid10_congested(void *data, int bits)
conf->pending_count >= max_queued_requests)
return 1;
- if (mddev_congested(mddev, bits))
- return 1;
rcu_read_lock();
for (i = 0;
(i < conf->geo.raid_disks || i < conf->prev.raid_disks)
@@ -866,6 +881,15 @@ static int raid10_congested(void *data, int bits)
rcu_read_unlock();
return ret;
}
+EXPORT_SYMBOL_GPL(md_raid10_congested);
+
+static int raid10_congested(void *data, int bits)
+{
+ struct mddev *mddev = data;
+
+ return mddev_congested(mddev, bits) ||
+ md_raid10_congested(mddev, bits);
+}
static void flush_pending_writes(struct r10conf *conf)
{
@@ -1546,7 +1570,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
static void print_conf(struct r10conf *conf)
{
int i;
- struct mirror_info *tmp;
+ struct raid10_info *tmp;
printk(KERN_DEBUG "RAID10 conf printout:\n");
if (!conf) {
@@ -1580,7 +1604,7 @@ static int raid10_spare_active(struct mddev *mddev)
{
int i;
struct r10conf *conf = mddev->private;
- struct mirror_info *tmp;
+ struct raid10_info *tmp;
int count = 0;
unsigned long flags;
@@ -1655,7 +1679,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
else
mirror = first;
for ( ; mirror <= last ; mirror++) {
- struct mirror_info *p = &conf->mirrors[mirror];
+ struct raid10_info *p = &conf->mirrors[mirror];
if (p->recovery_disabled == mddev->recovery_disabled)
continue;
if (p->rdev) {
@@ -1709,7 +1733,7 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
int err = 0;
int number = rdev->raid_disk;
struct md_rdev **rdevp;
- struct mirror_info *p = conf->mirrors + number;
+ struct raid10_info *p = conf->mirrors + number;
print_conf(conf);
if (rdev == p->rdev)
@@ -2660,8 +2684,7 @@ static void raid10d(struct mddev *mddev)
blk_start_plug(&plug);
for (;;) {
- if (atomic_read(&mddev->plug_cnt) == 0)
- flush_pending_writes(conf);
+ flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
@@ -2876,7 +2899,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sect;
int must_sync;
int any_working;
- struct mirror_info *mirror = &conf->mirrors[i];
+ struct raid10_info *mirror = &conf->mirrors[i];
if ((mirror->rdev == NULL ||
test_bit(In_sync, &mirror->rdev->flags))
@@ -3388,7 +3411,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
goto out;
/* FIXME calc properly */
- conf->mirrors = kzalloc(sizeof(struct mirror_info)*(mddev->raid_disks +
+ conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
max(0,mddev->delta_disks)),
GFP_KERNEL);
if (!conf->mirrors)
@@ -3452,7 +3475,7 @@ static int run(struct mddev *mddev)
{
struct r10conf *conf;
int i, disk_idx, chunk_size;
- struct mirror_info *disk;
+ struct raid10_info *disk;
struct md_rdev *rdev;
sector_t size;
sector_t min_offset_diff = 0;
@@ -3472,12 +3495,14 @@ static int run(struct mddev *mddev)
conf->thread = NULL;
chunk_size = mddev->chunk_sectors << 9;
- blk_queue_io_min(mddev->queue, chunk_size);
- if (conf->geo.raid_disks % conf->geo.near_copies)
- blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
- else
- blk_queue_io_opt(mddev->queue, chunk_size *
- (conf->geo.raid_disks / conf->geo.near_copies));
+ if (mddev->queue) {
+ blk_queue_io_min(mddev->queue, chunk_size);
+ if (conf->geo.raid_disks % conf->geo.near_copies)
+ blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
+ else
+ blk_queue_io_opt(mddev->queue, chunk_size *
+ (conf->geo.raid_disks / conf->geo.near_copies));
+ }
rdev_for_each(rdev, mddev) {
long long diff;
@@ -3511,8 +3536,9 @@ static int run(struct mddev *mddev)
if (first || diff < min_offset_diff)
min_offset_diff = diff;
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
+ if (mddev->gendisk)
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
disk->head_position = 0;
}
@@ -3575,22 +3601,22 @@ static int run(struct mddev *mddev)
md_set_array_sectors(mddev, size);
mddev->resync_max_sectors = size;
- mddev->queue->backing_dev_info.congested_fn = raid10_congested;
- mddev->queue->backing_dev_info.congested_data = mddev;
-
- /* Calculate max read-ahead size.
- * We need to readahead at least twice a whole stripe....
- * maybe...
- */
- {
+ if (mddev->queue) {
int stripe = conf->geo.raid_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
+ mddev->queue->backing_dev_info.congested_fn = raid10_congested;
+ mddev->queue->backing_dev_info.congested_data = mddev;
+
+ /* Calculate max read-ahead size.
+ * We need to readahead at least twice a whole stripe....
+ * maybe...
+ */
stripe /= conf->geo.near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
}
- blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
if (md_integrity_register(mddev))
goto out_free_conf;
@@ -3641,7 +3667,10 @@ static int stop(struct mddev *mddev)
lower_barrier(conf);
md_unregister_thread(&mddev->thread);
- blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
+ if (mddev->queue)
+ /* the unplug fn references 'conf'*/
+ blk_sync_queue(mddev->queue);
+
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
kfree(conf->mirrors);
@@ -3805,7 +3834,7 @@ static int raid10_check_reshape(struct mddev *mddev)
if (mddev->delta_disks > 0) {
/* allocate new 'mirrors' list */
conf->mirrors_new = kzalloc(
- sizeof(struct mirror_info)
+ sizeof(struct raid10_info)
*(mddev->raid_disks +
mddev->delta_disks),
GFP_KERNEL);
@@ -3930,7 +3959,7 @@ static int raid10_start_reshape(struct mddev *mddev)
spin_lock_irq(&conf->device_lock);
if (conf->mirrors_new) {
memcpy(conf->mirrors_new, conf->mirrors,
- sizeof(struct mirror_info)*conf->prev.raid_disks);
+ sizeof(struct raid10_info)*conf->prev.raid_disks);
smp_mb();
kfree(conf->mirrors_old); /* FIXME and elsewhere */
conf->mirrors_old = conf->mirrors;
@@ -4389,14 +4418,18 @@ static int handle_reshape_read_error(struct mddev *mddev,
{
/* Use sync reads to get the blocks from somewhere else */
int sectors = r10_bio->sectors;
- struct r10bio r10b;
struct r10conf *conf = mddev->private;
+ struct {
+ struct r10bio r10_bio;
+ struct r10dev devs[conf->copies];
+ } on_stack;
+ struct r10bio *r10b = &on_stack.r10_bio;
int slot = 0;
int idx = 0;
struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
- r10b.sector = r10_bio->sector;
- __raid10_find_phys(&conf->prev, &r10b);
+ r10b->sector = r10_bio->sector;
+ __raid10_find_phys(&conf->prev, r10b);
while (sectors) {
int s = sectors;
@@ -4407,7 +4440,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
s = PAGE_SIZE >> 9;
while (!success) {
- int d = r10b.devs[slot].devnum;
+ int d = r10b->devs[slot].devnum;
struct md_rdev *rdev = conf->mirrors[d].rdev;
sector_t addr;
if (rdev == NULL ||
@@ -4415,7 +4448,7 @@ static int handle_reshape_read_error(struct mddev *mddev,
!test_bit(In_sync, &rdev->flags))
goto failed;
- addr = r10b.devs[slot].addr + idx * PAGE_SIZE;
+ addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
success = sync_page_io(rdev,
addr,
s << 9,
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 135b1b0a1554..1054cf602345 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -1,7 +1,7 @@
#ifndef _RAID10_H
#define _RAID10_H
-struct mirror_info {
+struct raid10_info {
struct md_rdev *rdev, *replacement;
sector_t head_position;
int recovery_disabled; /* matches
@@ -13,8 +13,8 @@ struct mirror_info {
struct r10conf {
struct mddev *mddev;
- struct mirror_info *mirrors;
- struct mirror_info *mirrors_new, *mirrors_old;
+ struct raid10_info *mirrors;
+ struct raid10_info *mirrors_new, *mirrors_old;
spinlock_t device_lock;
/* geometry */
@@ -110,7 +110,7 @@ struct r10bio {
* We choose the number when they are allocated.
* We sometimes need an extra bio to write to the replacement.
*/
- struct {
+ struct r10dev {
struct bio *bio;
union {
struct bio *repl_bio; /* used for resync and
@@ -123,20 +123,6 @@ struct r10bio {
} devs[0];
};
-/* when we get a read error on a read-only array, we redirect to another
- * device without failing the first device, or trying to over-write to
- * correct the read error. To keep track of bad blocks on a per-bio
- * level, we store IO_BLOCKED in the appropriate 'bios' pointer
- */
-#define IO_BLOCKED ((struct bio*)1)
-/* When we successfully write to a known bad-block, we need to remove the
- * bad-block marking which must be done from process context. So we record
- * the success by setting devs[n].bio to IO_MADE_GOOD
- */
-#define IO_MADE_GOOD ((struct bio *)2)
-
-#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
-
/* bits for r10bio.state */
enum r10bio_state {
R10BIO_Uptodate,
@@ -159,4 +145,7 @@ enum r10bio_state {
*/
R10BIO_Previous,
};
+
+extern int md_raid10_congested(struct mddev *mddev, int bits);
+
#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 04348d76bb30..adda94df5eb2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -99,34 +99,40 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
* We maintain a biased count of active stripes in the bottom 16 bits of
* bi_phys_segments, and a count of processed stripes in the upper 16 bits
*/
-static inline int raid5_bi_phys_segments(struct bio *bio)
+static inline int raid5_bi_processed_stripes(struct bio *bio)
{
- return bio->bi_phys_segments & 0xffff;
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+ return (atomic_read(segments) >> 16) & 0xffff;
}
-static inline int raid5_bi_hw_segments(struct bio *bio)
+static inline int raid5_dec_bi_active_stripes(struct bio *bio)
{
- return (bio->bi_phys_segments >> 16) & 0xffff;
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+ return atomic_sub_return(1, segments) & 0xffff;
}
-static inline int raid5_dec_bi_phys_segments(struct bio *bio)
+static inline void raid5_inc_bi_active_stripes(struct bio *bio)
{
- --bio->bi_phys_segments;
- return raid5_bi_phys_segments(bio);
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+ atomic_inc(segments);
}
-static inline int raid5_dec_bi_hw_segments(struct bio *bio)
+static inline void raid5_set_bi_processed_stripes(struct bio *bio,
+ unsigned int cnt)
{
- unsigned short val = raid5_bi_hw_segments(bio);
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+ int old, new;
- --val;
- bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
- return val;
+ do {
+ old = atomic_read(segments);
+ new = (old & 0xffff) | (cnt << 16);
+ } while (atomic_cmpxchg(segments, old, new) != old);
}
-static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
+static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
{
- bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
+ atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
+ atomic_set(segments, cnt);
}
/* Find first data disk in a raid6 stripe */
@@ -190,49 +196,56 @@ static int stripe_operations_active(struct stripe_head *sh)
test_bit(STRIPE_COMPUTE_RUN, &sh->state);
}
-static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
+static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh)
{
- if (atomic_dec_and_test(&sh->count)) {
- BUG_ON(!list_empty(&sh->lru));
- BUG_ON(atomic_read(&conf->active_stripes)==0);
- if (test_bit(STRIPE_HANDLE, &sh->state)) {
- if (test_bit(STRIPE_DELAYED, &sh->state) &&
- !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- list_add_tail(&sh->lru, &conf->delayed_list);
- else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
- sh->bm_seq - conf->seq_write > 0)
- list_add_tail(&sh->lru, &conf->bitmap_list);
- else {
- clear_bit(STRIPE_DELAYED, &sh->state);
- clear_bit(STRIPE_BIT_DELAY, &sh->state);
- list_add_tail(&sh->lru, &conf->handle_list);
- }
- md_wakeup_thread(conf->mddev->thread);
- } else {
- BUG_ON(stripe_operations_active(sh));
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
- if (atomic_dec_return(&conf->preread_active_stripes)
- < IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- atomic_dec(&conf->active_stripes);
- if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
- list_add_tail(&sh->lru, &conf->inactive_list);
- wake_up(&conf->wait_for_stripe);
- if (conf->retry_read_aligned)
- md_wakeup_thread(conf->mddev->thread);
- }
+ BUG_ON(!list_empty(&sh->lru));
+ BUG_ON(atomic_read(&conf->active_stripes)==0);
+ if (test_bit(STRIPE_HANDLE, &sh->state)) {
+ if (test_bit(STRIPE_DELAYED, &sh->state) &&
+ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ list_add_tail(&sh->lru, &conf->delayed_list);
+ else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
+ sh->bm_seq - conf->seq_write > 0)
+ list_add_tail(&sh->lru, &conf->bitmap_list);
+ else {
+ clear_bit(STRIPE_DELAYED, &sh->state);
+ clear_bit(STRIPE_BIT_DELAY, &sh->state);
+ list_add_tail(&sh->lru, &conf->handle_list);
+ }
+ md_wakeup_thread(conf->mddev->thread);
+ } else {
+ BUG_ON(stripe_operations_active(sh));
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ if (atomic_dec_return(&conf->preread_active_stripes)
+ < IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+ atomic_dec(&conf->active_stripes);
+ if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
+ list_add_tail(&sh->lru, &conf->inactive_list);
+ wake_up(&conf->wait_for_stripe);
+ if (conf->retry_read_aligned)
+ md_wakeup_thread(conf->mddev->thread);
}
}
}
+static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
+{
+ if (atomic_dec_and_test(&sh->count))
+ do_release_stripe(conf, sh);
+}
+
static void release_stripe(struct stripe_head *sh)
{
struct r5conf *conf = sh->raid_conf;
unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- __release_stripe(conf, sh);
- spin_unlock_irqrestore(&conf->device_lock, flags);
+ local_irq_save(flags);
+ if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
+ do_release_stripe(conf, sh);
+ spin_unlock(&conf->device_lock);
+ }
+ local_irq_restore(flags);
}
static inline void remove_hash(struct stripe_head *sh)
@@ -471,7 +484,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
} else {
if (atomic_read(&sh->count)) {
BUG_ON(!list_empty(&sh->lru)
- && !test_bit(STRIPE_EXPANDING, &sh->state));
+ && !test_bit(STRIPE_EXPANDING, &sh->state)
+ && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
} else {
if (!test_bit(STRIPE_HANDLE, &sh->state))
atomic_inc(&conf->active_stripes);
@@ -640,6 +654,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
else
bi->bi_sector = (sh->sector
+ rdev->data_offset);
+ if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+ bi->bi_rw |= REQ_FLUSH;
+
bi->bi_flags = 1 << BIO_UPTODATE;
bi->bi_idx = 0;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
@@ -749,14 +766,12 @@ static void ops_complete_biofill(void *stripe_head_ref)
{
struct stripe_head *sh = stripe_head_ref;
struct bio *return_bi = NULL;
- struct r5conf *conf = sh->raid_conf;
int i;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
/* clear completed biofills */
- spin_lock_irq(&conf->device_lock);
for (i = sh->disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
@@ -774,7 +789,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
while (rbi && rbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
rbi2 = r5_next_bio(rbi, dev->sector);
- if (!raid5_dec_bi_phys_segments(rbi)) {
+ if (!raid5_dec_bi_active_stripes(rbi)) {
rbi->bi_next = return_bi;
return_bi = rbi;
}
@@ -782,7 +797,6 @@ static void ops_complete_biofill(void *stripe_head_ref)
}
}
}
- spin_unlock_irq(&conf->device_lock);
clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
return_io(return_bi);
@@ -794,7 +808,6 @@ static void ops_complete_biofill(void *stripe_head_ref)
static void ops_run_biofill(struct stripe_head *sh)
{
struct dma_async_tx_descriptor *tx = NULL;
- struct r5conf *conf = sh->raid_conf;
struct async_submit_ctl submit;
int i;
@@ -805,10 +818,10 @@ static void ops_run_biofill(struct stripe_head *sh)
struct r5dev *dev = &sh->dev[i];
if (test_bit(R5_Wantfill, &dev->flags)) {
struct bio *rbi;
- spin_lock_irq(&conf->device_lock);
+ spin_lock_irq(&sh->stripe_lock);
dev->read = rbi = dev->toread;
dev->toread = NULL;
- spin_unlock_irq(&conf->device_lock);
+ spin_unlock_irq(&sh->stripe_lock);
while (rbi && rbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(0, rbi, dev->page,
@@ -1144,12 +1157,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
struct bio *wbi;
- spin_lock_irq(&sh->raid_conf->device_lock);
+ spin_lock_irq(&sh->stripe_lock);
chosen = dev->towrite;
dev->towrite = NULL;
BUG_ON(dev->written);
wbi = dev->written = chosen;
- spin_unlock_irq(&sh->raid_conf->device_lock);
+ spin_unlock_irq(&sh->stripe_lock);
while (wbi && wbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
@@ -1454,6 +1467,8 @@ static int grow_one_stripe(struct r5conf *conf)
init_waitqueue_head(&sh->ops.wait_for_ops);
#endif
+ spin_lock_init(&sh->stripe_lock);
+
if (grow_buffers(sh)) {
shrink_buffers(sh);
kmem_cache_free(conf->slab_cache, sh);
@@ -1739,7 +1754,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
- }
+ } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+ clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
+
if (atomic_read(&rdev->read_errors))
atomic_set(&rdev->read_errors, 0);
} else {
@@ -1784,7 +1801,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
else
retry = 1;
if (retry)
- set_bit(R5_ReadError, &sh->dev[i].flags);
+ if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
+ set_bit(R5_ReadError, &sh->dev[i].flags);
+ clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
+ } else
+ set_bit(R5_ReadNoMerge, &sh->dev[i].flags);
else {
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
@@ -2340,11 +2361,18 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
(unsigned long long)bi->bi_sector,
(unsigned long long)sh->sector);
-
- spin_lock_irq(&conf->device_lock);
+ /*
+ * If several bio share a stripe. The bio bi_phys_segments acts as a
+ * reference count to avoid race. The reference count should already be
+ * increased before this function is called (for example, in
+ * make_request()), so other bio sharing this stripe will not free the
+ * stripe. If a stripe is owned by one stripe, the stripe lock will
+ * protect it.
+ */
+ spin_lock_irq(&sh->stripe_lock);
if (forwrite) {
bip = &sh->dev[dd_idx].towrite;
- if (*bip == NULL && sh->dev[dd_idx].written == NULL)
+ if (*bip == NULL)
firstwrite = 1;
} else
bip = &sh->dev[dd_idx].toread;
@@ -2360,7 +2388,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip)
bi->bi_next = *bip;
*bip = bi;
- bi->bi_phys_segments++;
+ raid5_inc_bi_active_stripes(bi);
if (forwrite) {
/* check if page is covered */
@@ -2375,7 +2403,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
}
- spin_unlock_irq(&conf->device_lock);
+ spin_unlock_irq(&sh->stripe_lock);
pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
(unsigned long long)(*bip)->bi_sector,
@@ -2391,7 +2419,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
overlap:
set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
- spin_unlock_irq(&conf->device_lock);
+ spin_unlock_irq(&sh->stripe_lock);
return 0;
}
@@ -2441,10 +2469,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
rdev_dec_pending(rdev, conf->mddev);
}
}
- spin_lock_irq(&conf->device_lock);
+ spin_lock_irq(&sh->stripe_lock);
/* fail all writes first */
bi = sh->dev[i].towrite;
sh->dev[i].towrite = NULL;
+ spin_unlock_irq(&sh->stripe_lock);
if (bi) {
s->to_write--;
bitmap_end = 1;
@@ -2457,13 +2486,17 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (!raid5_dec_bi_phys_segments(bi)) {
+ if (!raid5_dec_bi_active_stripes(bi)) {
md_write_end(conf->mddev);
bi->bi_next = *return_bi;
*return_bi = bi;
}
bi = nextbi;
}
+ if (bitmap_end)
+ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS, 0, 0);
+ bitmap_end = 0;
/* and fail all 'written' */
bi = sh->dev[i].written;
sh->dev[i].written = NULL;
@@ -2472,7 +2505,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (!raid5_dec_bi_phys_segments(bi)) {
+ if (!raid5_dec_bi_active_stripes(bi)) {
md_write_end(conf->mddev);
bi->bi_next = *return_bi;
*return_bi = bi;
@@ -2496,14 +2529,13 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags);
- if (!raid5_dec_bi_phys_segments(bi)) {
+ if (!raid5_dec_bi_active_stripes(bi)) {
bi->bi_next = *return_bi;
*return_bi = bi;
}
bi = nextbi;
}
}
- spin_unlock_irq(&conf->device_lock);
if (bitmap_end)
bitmap_endwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS, 0, 0);
@@ -2707,30 +2739,23 @@ static void handle_stripe_clean_event(struct r5conf *conf,
test_bit(R5_UPTODATE, &dev->flags)) {
/* We can return any write requests */
struct bio *wbi, *wbi2;
- int bitmap_end = 0;
pr_debug("Return write for disc %d\n", i);
- spin_lock_irq(&conf->device_lock);
wbi = dev->written;
dev->written = NULL;
while (wbi && wbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector);
- if (!raid5_dec_bi_phys_segments(wbi)) {
+ if (!raid5_dec_bi_active_stripes(wbi)) {
md_write_end(conf->mddev);
wbi->bi_next = *return_bi;
*return_bi = wbi;
}
wbi = wbi2;
}
- if (dev->towrite == NULL)
- bitmap_end = 1;
- spin_unlock_irq(&conf->device_lock);
- if (bitmap_end)
- bitmap_endwrite(conf->mddev->bitmap,
- sh->sector,
- STRIPE_SECTORS,
+ bitmap_endwrite(conf->mddev->bitmap, sh->sector,
+ STRIPE_SECTORS,
!test_bit(STRIPE_DEGRADED, &sh->state),
- 0);
+ 0);
}
}
@@ -3182,7 +3207,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
/* Now to look around and see what can be done */
rcu_read_lock();
- spin_lock_irq(&conf->device_lock);
for (i=disks; i--; ) {
struct md_rdev *rdev;
sector_t first_bad;
@@ -3328,7 +3352,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
do_recovery = 1;
}
}
- spin_unlock_irq(&conf->device_lock);
if (test_bit(STRIPE_SYNCING, &sh->state)) {
/* If there is a failed device being replaced,
* we must be recovering.
@@ -3791,7 +3814,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
* this sets the active strip count to 1 and the processed
* strip count to zero (upper 8 bits)
*/
- bi->bi_phys_segments = 1; /* biased count of active stripes */
+ raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
}
return bi;
@@ -3988,6 +4011,62 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
return sh;
}
+struct raid5_plug_cb {
+ struct blk_plug_cb cb;
+ struct list_head list;
+};
+
+static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
+{
+ struct raid5_plug_cb *cb = container_of(
+ blk_cb, struct raid5_plug_cb, cb);
+ struct stripe_head *sh;
+ struct mddev *mddev = cb->cb.data;
+ struct r5conf *conf = mddev->private;
+
+ if (cb->list.next && !list_empty(&cb->list)) {
+ spin_lock_irq(&conf->device_lock);
+ while (!list_empty(&cb->list)) {
+ sh = list_first_entry(&cb->list, struct stripe_head, lru);
+ list_del_init(&sh->lru);
+ /*
+ * avoid race release_stripe_plug() sees
+ * STRIPE_ON_UNPLUG_LIST clear but the stripe
+ * is still in our list
+ */
+ smp_mb__before_clear_bit();
+ clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
+ __release_stripe(conf, sh);
+ }
+ spin_unlock_irq(&conf->device_lock);
+ }
+ kfree(cb);
+}
+
+static void release_stripe_plug(struct mddev *mddev,
+ struct stripe_head *sh)
+{
+ struct blk_plug_cb *blk_cb = blk_check_plugged(
+ raid5_unplug, mddev,
+ sizeof(struct raid5_plug_cb));
+ struct raid5_plug_cb *cb;
+
+ if (!blk_cb) {
+ release_stripe(sh);
+ return;
+ }
+
+ cb = container_of(blk_cb, struct raid5_plug_cb, cb);
+
+ if (cb->list.next == NULL)
+ INIT_LIST_HEAD(&cb->list);
+
+ if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
+ list_add_tail(&sh->lru, &cb->list);
+ else
+ release_stripe(sh);
+}
+
static void make_request(struct mddev *mddev, struct bio * bi)
{
struct r5conf *conf = mddev->private;
@@ -4113,11 +4192,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
finish_wait(&conf->wait_for_overlap, &w);
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
- if ((bi->bi_rw & REQ_SYNC) &&
+ if ((bi->bi_rw & REQ_NOIDLE) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
- mddev_check_plugged(mddev);
- release_stripe(sh);
+ release_stripe_plug(mddev, sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -4126,9 +4204,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
}
}
- spin_lock_irq(&conf->device_lock);
- remaining = raid5_dec_bi_phys_segments(bi);
- spin_unlock_irq(&conf->device_lock);
+ remaining = raid5_dec_bi_active_stripes(bi);
if (remaining == 0) {
if ( rw == WRITE )
@@ -4484,7 +4560,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
sector += STRIPE_SECTORS,
scnt++) {
- if (scnt < raid5_bi_hw_segments(raid_bio))
+ if (scnt < raid5_bi_processed_stripes(raid_bio))
/* already done this stripe */
continue;
@@ -4492,25 +4568,24 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
if (!sh) {
/* failed to get a stripe - must wait */
- raid5_set_bi_hw_segments(raid_bio, scnt);
+ raid5_set_bi_processed_stripes(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
return handled;
}
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
release_stripe(sh);
- raid5_set_bi_hw_segments(raid_bio, scnt);
+ raid5_set_bi_processed_stripes(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
return handled;
}
+ set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags);
handle_stripe(sh);
release_stripe(sh);
handled++;
}
- spin_lock_irq(&conf->device_lock);
- remaining = raid5_dec_bi_phys_segments(raid_bio);
- spin_unlock_irq(&conf->device_lock);
+ remaining = raid5_dec_bi_active_stripes(raid_bio);
if (remaining == 0)
bio_endio(raid_bio, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
@@ -4518,6 +4593,30 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled;
}
+#define MAX_STRIPE_BATCH 8
+static int handle_active_stripes(struct r5conf *conf)
+{
+ struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
+ int i, batch_size = 0;
+
+ while (batch_size < MAX_STRIPE_BATCH &&
+ (sh = __get_priority_stripe(conf)) != NULL)
+ batch[batch_size++] = sh;
+
+ if (batch_size == 0)
+ return batch_size;
+ spin_unlock_irq(&conf->device_lock);
+
+ for (i = 0; i < batch_size; i++)
+ handle_stripe(batch[i]);
+
+ cond_resched();
+
+ spin_lock_irq(&conf->device_lock);
+ for (i = 0; i < batch_size; i++)
+ __release_stripe(conf, batch[i]);
+ return batch_size;
+}
/*
* This is our raid5 kernel thread.
@@ -4528,7 +4627,6 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
*/
static void raid5d(struct mddev *mddev)
{
- struct stripe_head *sh;
struct r5conf *conf = mddev->private;
int handled;
struct blk_plug plug;
@@ -4542,8 +4640,9 @@ static void raid5d(struct mddev *mddev)
spin_lock_irq(&conf->device_lock);
while (1) {
struct bio *bio;
+ int batch_size;
- if (atomic_read(&mddev->plug_cnt) == 0 &&
+ if (
!list_empty(&conf->bitmap_list)) {
/* Now is a good time to flush some bitmap updates */
conf->seq_flush++;
@@ -4553,8 +4652,7 @@ static void raid5d(struct mddev *mddev)
conf->seq_write = conf->seq_flush;
activate_bit_delay(conf);
}
- if (atomic_read(&mddev->plug_cnt) == 0)
- raid5_activate_delayed(conf);
+ raid5_activate_delayed(conf);
while ((bio = remove_bio_from_retry(conf))) {
int ok;
@@ -4566,21 +4664,16 @@ static void raid5d(struct mddev *mddev)
handled++;
}
- sh = __get_priority_stripe(conf);
-
- if (!sh)
+ batch_size = handle_active_stripes(conf);
+ if (!batch_size)
break;
- spin_unlock_irq(&conf->device_lock);
-
- handled++;
- handle_stripe(sh);
- release_stripe(sh);
- cond_resched();
+ handled += batch_size;
- if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
+ if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) {
+ spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
-
- spin_lock_irq(&conf->device_lock);
+ spin_lock_irq(&conf->device_lock);
+ }
}
pr_debug("%d stripes handled\n", handled);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2164021f3b5f..a9fc24901eda 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -210,6 +210,7 @@ struct stripe_head {
int disks; /* disks in stripe */
enum check_states check_state;
enum reconstruct_states reconstruct_state;
+ spinlock_t stripe_lock;
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -273,6 +274,7 @@ enum r5dev_flags {
R5_Wantwrite,
R5_Overlap, /* There is a pending overlapping request
* on this block */
+ R5_ReadNoMerge, /* prevent bio from merging in block-layer */
R5_ReadError, /* seen a read error here recently */
R5_ReWrite, /* have tried to over-write the readerror */
@@ -319,6 +321,7 @@ enum {
STRIPE_BIOFILL_RUN,
STRIPE_COMPUTE_RUN,
STRIPE_OPS_REQ_PENDING,
+ STRIPE_ON_UNPLUG_LIST,
};
/*
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 9575db429df4..d941581ab921 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -6,20 +6,82 @@ menuconfig MEDIA_SUPPORT
tristate "Multimedia support"
depends on HAS_IOMEM
help
- If you want to use Video for Linux, DVB for Linux, or DAB adapters,
+ If you want to use Webcams, Video grabber devices and/or TV devices
enable this option and other options below.
+ Additional info and docs are available on the web at
+ <http://linuxtv.org>
if MEDIA_SUPPORT
comment "Multimedia core support"
#
+# Multimedia support - automatically enable V4L2 and DVB core
+#
+config MEDIA_CAMERA_SUPPORT
+ bool "Cameras/video grabbers support"
+ ---help---
+ Enable support for webcams and video grabbers.
+
+ Say Y when you have a webcam or a video capture grabber board.
+
+config MEDIA_ANALOG_TV_SUPPORT
+ bool "Analog TV support"
+ ---help---
+ Enable analog TV support.
+
+ Say Y when you have a TV board with analog support or with a
+ hybrid analog/digital TV chipset.
+
+ Note: There are several DVB cards that are based on chips that
+ support both analog and digital TV. Disabling this option
+ will disable support for them.
+
+config MEDIA_DIGITAL_TV_SUPPORT
+ bool "Digital TV support"
+ ---help---
+ Enable digital TV support.
+
+ Say Y when you have a board with digital support or a board with
+ hybrid digital TV and analog TV.
+
+config MEDIA_RADIO_SUPPORT
+ bool "AM/FM radio receivers/transmitters support"
+ ---help---
+ Enable AM/FM radio support.
+
+ Additional info and docs are available on the web at
+ <http://linuxtv.org>
+
+ Say Y when you have a board with radio support.
+
+ Note: There are several TV cards that are based on chips that
+ support radio reception. Disabling this option will
+ disable support for them.
+
+config MEDIA_RC_SUPPORT
+ bool "Remote Controller support"
+ depends on INPUT
+ ---help---
+ Enable support for Remote Controllers on Linux. This is
+ needed in order to support several video capture adapters,
+ standalone IR receivers/transmitters, and RF receivers.
+
+ Enable this option if you have a video capture board even
+ if you don't need IR, as otherwise, you may not be able to
+ compile the driver for your adapter.
+
+ Say Y when you have a TV or an IR device.
+
+#
# Media controller
+# Selectable only for webcam/grabbers, as other drivers don't use it
#
config MEDIA_CONTROLLER
bool "Media Controller API (EXPERIMENTAL)"
depends on EXPERIMENTAL
+ depends on MEDIA_CAMERA_SUPPORT
---help---
Enable the media controller API used to query media devices internal
topology and configure it dynamically.
@@ -27,26 +89,15 @@ config MEDIA_CONTROLLER
This API is mostly used by camera interfaces in embedded platforms.
#
-# V4L core and enabled API's
+# Video4Linux support
+# Only enables if one of the V4L2 types (ATV, webcam, radio) is selected
#
config VIDEO_DEV
- tristate "Video For Linux"
- ---help---
- V4L core support for video capture and overlay devices, webcams and
- AM/FM radio cards.
-
- This kernel includes support for the new Video for Linux Two API,
- (V4L2).
-
- Additional info and docs are available on the web at
- <http://linuxtv.org>
-
- Documentation for V4L2 is also available on the web at
- <http://bytesex.org/v4l/>.
-
- To compile this driver as a module, choose M here: the
- module will be called videodev.
+ tristate
+ depends on MEDIA_SUPPORT
+ depends on MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT || MEDIA_RADIO_SUPPORT
+ default y
config VIDEO_V4L2_COMMON
tristate
@@ -64,25 +115,15 @@ config VIDEO_V4L2_SUBDEV_API
#
# DVB Core
+# Only enables if one of DTV is selected
#
config DVB_CORE
- tristate "DVB for Linux"
+ tristate
+ depends on MEDIA_SUPPORT
+ depends on MEDIA_DIGITAL_TV_SUPPORT
+ default y
select CRC32
- help
- DVB core utility functions for device handling, software fallbacks etc.
-
- Enable this if you own a DVB/ATSC adapter and want to use it or if
- you compile Linux for a digital SetTopBox.
-
- Say Y when you have a DVB or an ATSC card and want to use it.
-
- API specs and user tools are available from <http://www.linuxtv.org/>.
-
- Please report problems regarding this support to the LinuxDVB
- mailing list.
-
- If unsure say N.
config DVB_NET
bool "DVB Network Support"
@@ -97,12 +138,7 @@ config DVB_NET
You may want to disable the network support on embedded devices. If
unsure say Y.
-config VIDEO_MEDIA
- tristate
- default (DVB_CORE && (VIDEO_DEV = n)) || (VIDEO_DEV && (DVB_CORE = n)) || (DVB_CORE && VIDEO_DEV)
-
-comment "Multimedia drivers"
-
+comment "Media drivers"
source "drivers/media/common/Kconfig"
source "drivers/media/rc/Kconfig"
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index bbf4945149a9..94c6ff7a5da3 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -1,7 +1,8 @@
config MEDIA_ATTACH
bool "Load and attach frontend and tuner driver modules as needed"
- depends on VIDEO_MEDIA
+ depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
depends on MODULES
+ default y if !EXPERT
help
Remove the static dependency of DVB card drivers on all
frontend modules for all possible card variants. Instead,
@@ -19,15 +20,15 @@ config MEDIA_ATTACH
config MEDIA_TUNER
tristate
- default VIDEO_MEDIA && I2C
- depends on VIDEO_MEDIA && I2C
+ depends on (MEDIA_ANALOG_TV_SUPPORT || MEDIA_RADIO_SUPPORT) && I2C
+ default y
select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_XC4000 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MT20XX if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMISE
- select MEDIA_TUNER_TEA5761 if !MEDIA_TUNER_CUSTOMISE && EXPERIMENTAL
- select MEDIA_TUNER_TEA5767 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_TEA5761 if !MEDIA_TUNER_CUSTOMISE && MEDIA_RADIO_SUPPORT && EXPERIMENTAL
+ select MEDIA_TUNER_TEA5767 if !MEDIA_TUNER_CUSTOMISE && MEDIA_RADIO_SUPPORT
select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_TDA9887 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MC44S803 if !MEDIA_TUNER_CUSTOMISE
@@ -47,10 +48,11 @@ config MEDIA_TUNER_CUSTOMISE
menu "Customize TV tuners"
visible if MEDIA_TUNER_CUSTOMISE
+ depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
config MEDIA_TUNER_SIMPLE
tristate "Simple tuner support"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
select MEDIA_TUNER_TDA9887
default m if MEDIA_TUNER_CUSTOMISE
help
@@ -58,7 +60,7 @@ config MEDIA_TUNER_SIMPLE
config MEDIA_TUNER_TDA8290
tristate "TDA 8290/8295 + 8275(a)/18271 tuner combo"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
select MEDIA_TUNER_TDA827X
select MEDIA_TUNER_TDA18271
default m if MEDIA_TUNER_CUSTOMISE
@@ -67,21 +69,21 @@ config MEDIA_TUNER_TDA8290
config MEDIA_TUNER_TDA827X
tristate "Philips TDA827X silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
A DVB-T silicon tuner module. Say Y when you want to support this tuner.
config MEDIA_TUNER_TDA18271
tristate "NXP TDA18271 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
A silicon tuner module. Say Y when you want to support this tuner.
config MEDIA_TUNER_TDA9887
tristate "TDA 9885/6/7 analog IF demodulator"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
Say Y here to include support for Philips TDA9885/6/7
@@ -89,7 +91,7 @@ config MEDIA_TUNER_TDA9887
config MEDIA_TUNER_TEA5761
tristate "TEA 5761 radio tuner (EXPERIMENTAL)"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
depends on EXPERIMENTAL
default m if MEDIA_TUNER_CUSTOMISE
help
@@ -97,63 +99,63 @@ config MEDIA_TUNER_TEA5761
config MEDIA_TUNER_TEA5767
tristate "TEA 5767 radio tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
Say Y here to include support for the Philips TEA5767 radio tuner.
config MEDIA_TUNER_MT20XX
tristate "Microtune 2032 / 2050 tuners"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
Say Y here to include support for the MT2032 / MT2050 tuner.
config MEDIA_TUNER_MT2060
tristate "Microtune MT2060 silicon IF tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
A driver for the silicon IF tuner MT2060 from Microtune.
config MEDIA_TUNER_MT2063
tristate "Microtune MT2063 silicon IF tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
A driver for the silicon IF tuner MT2063 from Microtune.
config MEDIA_TUNER_MT2266
tristate "Microtune MT2266 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
A driver for the silicon baseband tuner MT2266 from Microtune.
config MEDIA_TUNER_MT2131
tristate "Microtune MT2131 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
A driver for the silicon baseband tuner MT2131 from Microtune.
config MEDIA_TUNER_QT1010
tristate "Quantek QT1010 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
A driver for the silicon tuner QT1010 from Quantek.
config MEDIA_TUNER_XC2028
tristate "XCeive xc2028/xc3028 tuners"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
Say Y here to include support for the xc2028/xc3028 tuners.
config MEDIA_TUNER_XC5000
tristate "Xceive XC5000 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
A driver for the silicon tuner XC5000 from Xceive.
@@ -162,7 +164,7 @@ config MEDIA_TUNER_XC5000
config MEDIA_TUNER_XC4000
tristate "Xceive XC4000 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
A driver for the silicon tuner XC4000 from Xceive.
@@ -171,70 +173,70 @@ config MEDIA_TUNER_XC4000
config MEDIA_TUNER_MXL5005S
tristate "MaxLinear MSL5005S silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
A driver for the silicon tuner MXL5005S from MaxLinear.
config MEDIA_TUNER_MXL5007T
tristate "MaxLinear MxL5007T silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
A driver for the silicon tuner MxL5007T from MaxLinear.
config MEDIA_TUNER_MC44S803
tristate "Freescale MC44S803 Low Power CMOS Broadband tuners"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
Say Y here to support the Freescale MC44S803 based tuners
config MEDIA_TUNER_MAX2165
tristate "Maxim MAX2165 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
A driver for the silicon tuner MAX2165 from Maxim.
config MEDIA_TUNER_TDA18218
tristate "NXP TDA18218 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
NXP TDA18218 silicon tuner driver.
config MEDIA_TUNER_FC0011
tristate "Fitipower FC0011 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
Fitipower FC0011 silicon tuner driver.
config MEDIA_TUNER_FC0012
tristate "Fitipower FC0012 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
Fitipower FC0012 silicon tuner driver.
config MEDIA_TUNER_FC0013
tristate "Fitipower FC0013 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
Fitipower FC0013 silicon tuner driver.
config MEDIA_TUNER_TDA18212
tristate "NXP TDA18212 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
NXP TDA18212 silicon tuner driver.
config MEDIA_TUNER_TUA9001
tristate "Infineon TUA 9001 silicon tuner"
- depends on VIDEO_MEDIA && I2C
+ depends on MEDIA_SUPPORT && I2C
default m if MEDIA_TUNER_CUSTOMISE
help
Infineon TUA 9001 silicon tuner driver.
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index b5ee3ebfcfca..ea0550eafe7d 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -90,11 +90,22 @@ struct firmware_properties {
int scode_nr;
};
+enum xc2028_state {
+ XC2028_NO_FIRMWARE = 0,
+ XC2028_WAITING_FIRMWARE,
+ XC2028_ACTIVE,
+ XC2028_SLEEP,
+ XC2028_NODEV,
+};
+
struct xc2028_data {
struct list_head hybrid_tuner_instance_list;
struct tuner_i2c_props i2c_props;
__u32 frequency;
+ enum xc2028_state state;
+ const char *fname;
+
struct firmware_description *firm;
int firm_size;
__u16 firm_version;
@@ -255,6 +266,21 @@ static v4l2_std_id parse_audio_std_option(void)
return 0;
}
+static int check_device_status(struct xc2028_data *priv)
+{
+ switch (priv->state) {
+ case XC2028_NO_FIRMWARE:
+ case XC2028_WAITING_FIRMWARE:
+ return -EAGAIN;
+ case XC2028_ACTIVE:
+ case XC2028_SLEEP:
+ return 0;
+ case XC2028_NODEV:
+ return -ENODEV;
+ }
+ return 0;
+}
+
static void free_firmware(struct xc2028_data *priv)
{
int i;
@@ -270,45 +296,28 @@ static void free_firmware(struct xc2028_data *priv)
priv->firm = NULL;
priv->firm_size = 0;
+ priv->state = XC2028_NO_FIRMWARE;
memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
}
-static int load_all_firmwares(struct dvb_frontend *fe)
+static int load_all_firmwares(struct dvb_frontend *fe,
+ const struct firmware *fw)
{
struct xc2028_data *priv = fe->tuner_priv;
- const struct firmware *fw = NULL;
const unsigned char *p, *endp;
int rc = 0;
int n, n_array;
char name[33];
- char *fname;
tuner_dbg("%s called\n", __func__);
- if (!firmware_name[0])
- fname = priv->ctrl.fname;
- else
- fname = firmware_name;
-
- tuner_dbg("Reading firmware %s\n", fname);
- rc = request_firmware(&fw, fname, priv->i2c_props.adap->dev.parent);
- if (rc < 0) {
- if (rc == -ENOENT)
- tuner_err("Error: firmware %s not found.\n",
- fname);
- else
- tuner_err("Error %d while requesting firmware %s \n",
- rc, fname);
-
- return rc;
- }
p = fw->data;
endp = p + fw->size;
if (fw->size < sizeof(name) - 1 + 2 + 2) {
tuner_err("Error: firmware file %s has invalid size!\n",
- fname);
+ priv->fname);
goto corrupt;
}
@@ -323,7 +332,7 @@ static int load_all_firmwares(struct dvb_frontend *fe)
p += 2;
tuner_info("Loading %d firmware images from %s, type: %s, ver %d.%d\n",
- n_array, fname, name,
+ n_array, priv->fname, name,
priv->firm_version >> 8, priv->firm_version & 0xff);
priv->firm = kcalloc(n_array, sizeof(*priv->firm), GFP_KERNEL);
@@ -417,9 +426,10 @@ err:
free_firmware(priv);
done:
- release_firmware(fw);
if (rc == 0)
tuner_dbg("Firmware files loaded.\n");
+ else
+ priv->state = XC2028_NODEV;
return rc;
}
@@ -707,22 +717,15 @@ static int check_firmware(struct dvb_frontend *fe, unsigned int type,
{
struct xc2028_data *priv = fe->tuner_priv;
struct firmware_properties new_fw;
- int rc = 0, retry_count = 0;
+ int rc, retry_count = 0;
u16 version, hwmodel;
v4l2_std_id std0;
tuner_dbg("%s called\n", __func__);
- if (!priv->firm) {
- if (!priv->ctrl.fname) {
- tuner_info("xc2028/3028 firmware name not set!\n");
- return -EINVAL;
- }
-
- rc = load_all_firmwares(fe);
- if (rc < 0)
- return rc;
- }
+ rc = check_device_status(priv);
+ if (rc < 0)
+ return rc;
if (priv->ctrl.mts && !(type & FM))
type |= MTS;
@@ -749,9 +752,13 @@ retry:
printk("scode_nr %d\n", new_fw.scode_nr);
}
- /* No need to reload base firmware if it matches */
- if (((BASE | new_fw.type) & BASE_TYPES) ==
- (priv->cur_fw.type & BASE_TYPES)) {
+ /*
+ * No need to reload base firmware if it matches and if the tuner
+ * is not at sleep mode
+ */
+ if ((priv->state == XC2028_ACTIVE) &&
+ (((BASE | new_fw.type) & BASE_TYPES) ==
+ (priv->cur_fw.type & BASE_TYPES))) {
tuner_dbg("BASE firmware not changed.\n");
goto skip_base;
}
@@ -872,10 +879,13 @@ read_not_reliable:
* 2. Tell whether BASE firmware was just changed the next time through.
*/
priv->cur_fw.type |= BASE;
+ priv->state = XC2028_ACTIVE;
return 0;
fail:
+ priv->state = XC2028_SLEEP;
+
memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
if (retry_count < 8) {
msleep(50);
@@ -893,28 +903,39 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
{
struct xc2028_data *priv = fe->tuner_priv;
u16 frq_lock, signal = 0;
- int rc;
+ int rc, i;
tuner_dbg("%s called\n", __func__);
+ rc = check_device_status(priv);
+ if (rc < 0)
+ return rc;
+
mutex_lock(&priv->lock);
/* Sync Lock Indicator */
- rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
- if (rc < 0)
- goto ret;
+ for (i = 0; i < 3; i++) {
+ rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
+ if (rc < 0)
+ goto ret;
- /* Frequency is locked */
- if (frq_lock == 1)
- signal = 1 << 11;
+ if (frq_lock)
+ break;
+ msleep(6);
+ }
+
+ /* Frequency didn't lock */
+ if (frq_lock == 2)
+ goto ret;
/* Get SNR of the video signal */
rc = xc2028_get_reg(priv, XREG_SNR, &signal);
if (rc < 0)
goto ret;
- /* Use both frq_lock and signal to generate the result */
- signal = signal || ((signal & 0x07) << 12);
+ /* Signal level is 3 bits only */
+
+ signal = ((1 << 12) - 1) | ((signal & 0x07) << 12);
ret:
mutex_unlock(&priv->lock);
@@ -926,6 +947,49 @@ ret:
return rc;
}
+static int xc2028_get_afc(struct dvb_frontend *fe, s32 *afc)
+{
+ struct xc2028_data *priv = fe->tuner_priv;
+ int i, rc;
+ u16 frq_lock = 0;
+ s16 afc_reg = 0;
+
+ rc = check_device_status(priv);
+ if (rc < 0)
+ return rc;
+
+ mutex_lock(&priv->lock);
+
+ /* Sync Lock Indicator */
+ for (i = 0; i < 3; i++) {
+ rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
+ if (rc < 0)
+ goto ret;
+
+ if (frq_lock)
+ break;
+ msleep(6);
+ }
+
+ /* Frequency didn't lock */
+ if (frq_lock == 2)
+ goto ret;
+
+ /* Get AFC */
+ rc = xc2028_get_reg(priv, XREG_FREQ_ERROR, &afc_reg);
+ if (rc < 0)
+ goto ret;
+
+ *afc = afc_reg * 15625; /* Hz */
+
+ tuner_dbg("AFC is %d Hz\n", *afc);
+
+ret:
+ mutex_unlock(&priv->lock);
+
+ return rc;
+}
+
#define DIV 15625
static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
@@ -1111,11 +1175,16 @@ static int xc2028_set_params(struct dvb_frontend *fe)
u32 delsys = c->delivery_system;
u32 bw = c->bandwidth_hz;
struct xc2028_data *priv = fe->tuner_priv;
- unsigned int type=0;
+ int rc;
+ unsigned int type = 0;
u16 demod = 0;
tuner_dbg("%s called\n", __func__);
+ rc = check_device_status(priv);
+ if (rc < 0)
+ return rc;
+
switch (delsys) {
case SYS_DVBT:
case SYS_DVBT2:
@@ -1201,7 +1270,11 @@ static int xc2028_set_params(struct dvb_frontend *fe)
static int xc2028_sleep(struct dvb_frontend *fe)
{
struct xc2028_data *priv = fe->tuner_priv;
- int rc = 0;
+ int rc;
+
+ rc = check_device_status(priv);
+ if (rc < 0)
+ return rc;
/* Avoid firmware reload on slow devices or if PM disabled */
if (no_poweroff || priv->ctrl.disable_power_mgmt)
@@ -1220,7 +1293,7 @@ static int xc2028_sleep(struct dvb_frontend *fe)
else
rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
- priv->cur_fw.type = 0; /* need firmware reload */
+ priv->state = XC2028_SLEEP;
mutex_unlock(&priv->lock);
@@ -1237,8 +1310,9 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
/* only perform final cleanup if this is the last instance */
if (hybrid_tuner_report_instance_count(priv) == 1) {
- kfree(priv->ctrl.fname);
free_firmware(priv);
+ kfree(priv->ctrl.fname);
+ priv->ctrl.fname = NULL;
}
if (priv)
@@ -1254,14 +1328,42 @@ static int xc2028_dvb_release(struct dvb_frontend *fe)
static int xc2028_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct xc2028_data *priv = fe->tuner_priv;
+ int rc;
tuner_dbg("%s called\n", __func__);
+ rc = check_device_status(priv);
+ if (rc < 0)
+ return rc;
+
*frequency = priv->frequency;
return 0;
}
+static void load_firmware_cb(const struct firmware *fw,
+ void *context)
+{
+ struct dvb_frontend *fe = context;
+ struct xc2028_data *priv = fe->tuner_priv;
+ int rc;
+
+ tuner_dbg("request_firmware_nowait(): %s\n", fw ? "OK" : "error");
+ if (!fw) {
+ tuner_err("Could not load firmware %s.\n", priv->fname);
+ priv->state = XC2028_NODEV;
+ return;
+ }
+
+ rc = load_all_firmwares(fe, fw);
+
+ release_firmware(fw);
+
+ if (rc < 0)
+ return;
+ priv->state = XC2028_SLEEP;
+}
+
static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
{
struct xc2028_data *priv = fe->tuner_priv;
@@ -1272,21 +1374,49 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
mutex_lock(&priv->lock);
+ /*
+ * Copy the config data.
+ * For the firmware name, keep a local copy of the string,
+ * in order to avoid troubles during device release.
+ */
+ if (priv->ctrl.fname)
+ kfree(priv->ctrl.fname);
memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
- if (priv->ctrl.max_len < 9)
- priv->ctrl.max_len = 13;
-
if (p->fname) {
- if (priv->ctrl.fname && strcmp(p->fname, priv->ctrl.fname)) {
- kfree(priv->ctrl.fname);
- free_firmware(priv);
- }
-
priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
if (priv->ctrl.fname == NULL)
rc = -ENOMEM;
}
+ /*
+ * If firmware name changed, frees firmware. As free_firmware will
+ * reset the status to NO_FIRMWARE, this forces a new request_firmware
+ */
+ if (!firmware_name[0] && p->fname &&
+ priv->fname && strcmp(p->fname, priv->fname))
+ free_firmware(priv);
+
+ if (priv->ctrl.max_len < 9)
+ priv->ctrl.max_len = 13;
+
+ if (priv->state == XC2028_NO_FIRMWARE) {
+ if (!firmware_name[0])
+ priv->fname = priv->ctrl.fname;
+ else
+ priv->fname = firmware_name;
+
+ rc = request_firmware_nowait(THIS_MODULE, 1,
+ priv->fname,
+ priv->i2c_props.adap->dev.parent,
+ GFP_KERNEL,
+ fe, load_firmware_cb);
+ if (rc < 0) {
+ tuner_err("Failed to request firmware %s\n",
+ priv->fname);
+ priv->state = XC2028_NODEV;
+ }
+ priv->state = XC2028_WAITING_FIRMWARE;
+ }
mutex_unlock(&priv->lock);
return rc;
@@ -1305,6 +1435,7 @@ static const struct dvb_tuner_ops xc2028_dvb_tuner_ops = {
.release = xc2028_dvb_release,
.get_frequency = xc2028_get_frequency,
.get_rf_strength = xc2028_signal,
+ .get_afc = xc2028_get_afc,
.set_params = xc2028_set_params,
.sleep = xc2028_sleep,
};
@@ -1375,3 +1506,5 @@ MODULE_DESCRIPTION("Xceive xc2028/xc3028 tuner driver");
MODULE_AUTHOR("Michel Ludwig <michel.ludwig@gmail.com>");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(XC2028_DEFAULT_FIRMWARE);
+MODULE_FIRMWARE(XC3028L_DEFAULT_FIRMWARE);
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index dcca42ca57be..362a8d7c9738 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -210,13 +210,15 @@ struct xc5000_fw_cfg {
u16 size;
};
+#define XC5000A_FIRMWARE "dvb-fe-xc5000-1.6.114.fw"
static const struct xc5000_fw_cfg xc5000a_1_6_114 = {
- .name = "dvb-fe-xc5000-1.6.114.fw",
+ .name = XC5000A_FIRMWARE,
.size = 12401,
};
+#define XC5000C_FIRMWARE "dvb-fe-xc5000c-41.024.5.fw"
static const struct xc5000_fw_cfg xc5000c_41_024_5 = {
- .name = "dvb-fe-xc5000c-41.024.5.fw",
+ .name = XC5000C_FIRMWARE,
.size = 16497,
};
@@ -717,6 +719,12 @@ static int xc5000_set_params(struct dvb_frontend *fe)
priv->freq_hz = freq - 1750000;
priv->video_standard = DTV6;
break;
+ case SYS_ISDBT:
+ /* All ISDB-T are currently for 6 MHz bw */
+ if (!bw)
+ bw = 6000000;
+ /* fall to OFDM handling */
+ case SYS_DMBTH:
case SYS_DVBT:
case SYS_DVBT2:
dprintk(1, "%s() OFDM\n", __func__);
@@ -1253,3 +1261,5 @@ EXPORT_SYMBOL(xc5000_attach);
MODULE_AUTHOR("Steven Toth");
MODULE_DESCRIPTION("Xceive xc5000 silicon tuner driver");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(XC5000A_FIRMWARE);
+MODULE_FIRMWARE(XC5000C_FIRMWARE);
diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
index 131b938e9e81..ebf3f05839d2 100644
--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
@@ -578,6 +578,7 @@ static int demod_attach_drxk(struct ddb_input *input)
memset(&config, 0, sizeof(config));
config.microcode_name = "drxk_a3.mc";
+ config.qam_demod_parameter_count = 4;
config.adr = 0x29 + (input->nr & 1);
fe = input->fe = dvb_attach(drxk_attach, &config, i2c);
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.h b/drivers/media/dvb/dvb-core/dvb_frontend.h
index e929d5697b87..7c64c09103a9 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.h
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.h
@@ -220,6 +220,7 @@ struct dvb_tuner_ops {
#define TUNER_STATUS_STEREO 2
int (*get_status)(struct dvb_frontend *fe, u32 *status);
int (*get_rf_strength)(struct dvb_frontend *fe, u16 *strength);
+ int (*get_afc)(struct dvb_frontend *fe, s32 *afc);
/** These are provided separately from set_params in order to facilitate silicon
* tuners which require sophisticated tuning loops, controlling each parameter separately. */
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index a26949336b3d..c2161565023a 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -418,9 +418,12 @@ config DVB_USB_RTL28XXU
tristate "Realtek RTL28xxU DVB USB support"
depends on DVB_USB && EXPERIMENTAL
select DVB_RTL2830
+ select DVB_RTL2832
select MEDIA_TUNER_QT1010 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MT2060 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_FC0012 if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_FC0013 if !MEDIA_TUNER_CUSTOMISE
help
Say Y here to support the Realtek RTL28xxU DVB USB receiver.
diff --git a/drivers/media/dvb/dvb-usb/az6007.c b/drivers/media/dvb/dvb-usb/az6007.c
index 4008b9c50fbd..86861e6f86d2 100644
--- a/drivers/media/dvb/dvb-usb/az6007.c
+++ b/drivers/media/dvb/dvb-usb/az6007.c
@@ -590,12 +590,10 @@ static int az6007_read_mac_addr(struct dvb_usb_device *d, u8 mac[6])
int ret;
ret = az6007_read(d, AZ6007_READ_DATA, 6, 0, st->data, 6);
- memcpy(mac, st->data, sizeof(mac));
+ memcpy(mac, st->data, 6);
if (ret > 0)
- deb_info("%s: mac is %02x:%02x:%02x:%02x:%02x:%02x\n",
- __func__, mac[0], mac[1], mac[2],
- mac[3], mac[4], mac[5]);
+ deb_info("%s: mac is %pM\n", __func__, mac);
return ret;
}
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
index 7a6160bf54ba..26c44818a5ab 100644
--- a/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
+++ b/drivers/media/dvb/dvb-usb/dvb-usb-ids.h
@@ -100,6 +100,7 @@
#define USB_PID_CONCEPTRONIC_CTVDIGRCU 0xe397
#define USB_PID_CONEXANT_D680_DMB 0x86d6
#define USB_PID_CREATIX_CTX1921 0x1921
+#define USB_PID_DELOCK_USB2_DVBT 0xb803
#define USB_PID_DIBCOM_HOOK_DEFAULT 0x0064
#define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM 0x0065
#define USB_PID_DIBCOM_MOD3000_COLD 0x0bb8
@@ -160,6 +161,7 @@
#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093
#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
+#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9
#define USB_PID_TWINHAN_VP7041_COLD 0x3201
#define USB_PID_TWINHAN_VP7041_WARM 0x3202
#define USB_PID_TWINHAN_VP7020_COLD 0x3203
@@ -245,6 +247,7 @@
#define USB_PID_TERRATEC_H7_2 0x10a3
#define USB_PID_TERRATEC_T3 0x10a0
#define USB_PID_TERRATEC_T5 0x10a1
+#define USB_PID_NOXON_DAB_STICK 0x00b3
#define USB_PID_PINNACLE_EXPRESSCARD_320CX 0x022e
#define USB_PID_PINNACLE_PCTV2000E 0x022c
#define USB_PID_PINNACLE_PCTV_DVB_T_FLASH 0x0228
diff --git a/drivers/media/dvb/dvb-usb/rtl28xxu.c b/drivers/media/dvb/dvb-usb/rtl28xxu.c
index 41e1f5537f44..6bd0bd792437 100644
--- a/drivers/media/dvb/dvb-usb/rtl28xxu.c
+++ b/drivers/media/dvb/dvb-usb/rtl28xxu.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2009 Antti Palosaari <crope@iki.fi>
* Copyright (C) 2011 Antti Palosaari <crope@iki.fi>
+ * Copyright (C) 2012 Thomas Mair <thomas.mair86@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,10 +23,13 @@
#include "rtl28xxu.h"
#include "rtl2830.h"
+#include "rtl2832.h"
#include "qt1010.h"
#include "mt2060.h"
#include "mxl5005s.h"
+#include "fc0012.h"
+#include "fc0013.h"
/* debug */
static int dvb_usb_rtl28xxu_debug;
@@ -80,7 +84,7 @@ err:
return ret;
}
-static int rtl2831_wr_regs(struct dvb_usb_device *d, u16 reg, u8 *val, int len)
+static int rtl28xx_wr_regs(struct dvb_usb_device *d, u16 reg, u8 *val, int len)
{
struct rtl28xxu_req req;
@@ -116,12 +120,12 @@ static int rtl2831_rd_regs(struct dvb_usb_device *d, u16 reg, u8 *val, int len)
return rtl28xxu_ctrl_msg(d, &req);
}
-static int rtl2831_wr_reg(struct dvb_usb_device *d, u16 reg, u8 val)
+static int rtl28xx_wr_reg(struct dvb_usb_device *d, u16 reg, u8 val)
{
- return rtl2831_wr_regs(d, reg, &val, 1);
+ return rtl28xx_wr_regs(d, reg, &val, 1);
}
-static int rtl2831_rd_reg(struct dvb_usb_device *d, u16 reg, u8 *val)
+static int rtl28xx_rd_reg(struct dvb_usb_device *d, u16 reg, u8 *val)
{
return rtl2831_rd_regs(d, reg, val, 1);
}
@@ -308,12 +312,12 @@ static int rtl2831u_frontend_attach(struct dvb_usb_adapter *adap)
*/
/* GPIO direction */
- ret = rtl2831_wr_reg(adap->dev, SYS_GPIO_DIR, 0x0a);
+ ret = rtl28xx_wr_reg(adap->dev, SYS_GPIO_DIR, 0x0a);
if (ret)
goto err;
/* enable as output GPIO0, GPIO2, GPIO4 */
- ret = rtl2831_wr_reg(adap->dev, SYS_GPIO_OUT_EN, 0x15);
+ ret = rtl28xx_wr_reg(adap->dev, SYS_GPIO_OUT_EN, 0x15);
if (ret)
goto err;
@@ -381,34 +385,159 @@ err:
return ret;
}
+static struct rtl2832_config rtl28xxu_rtl2832_fc0012_config = {
+ .i2c_addr = 0x10, /* 0x20 */
+ .xtal = 28800000,
+ .if_dvbt = 0,
+ .tuner = TUNER_RTL2832_FC0012
+};
+
+static struct rtl2832_config rtl28xxu_rtl2832_fc0013_config = {
+ .i2c_addr = 0x10, /* 0x20 */
+ .xtal = 28800000,
+ .if_dvbt = 0,
+ .tuner = TUNER_RTL2832_FC0013
+};
+
+static int rtl2832u_fc0012_tuner_callback(struct dvb_usb_device *d,
+ int cmd, int arg)
+{
+ int ret;
+ u8 val;
+
+ deb_info("%s cmd=%d arg=%d\n", __func__, cmd, arg);
+ switch (cmd) {
+ case FC_FE_CALLBACK_VHF_ENABLE:
+ /* set output values */
+ ret = rtl28xx_rd_reg(d, SYS_GPIO_OUT_VAL, &val);
+ if (ret)
+ goto err;
+
+ if (arg)
+ val &= 0xbf; /* set GPIO6 low */
+ else
+ val |= 0x40; /* set GPIO6 high */
+
+
+ ret = rtl28xx_wr_reg(d, SYS_GPIO_OUT_VAL, val);
+ if (ret)
+ goto err;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ return 0;
+
+err:
+ err("%s: failed=%d\n", __func__, ret);
+
+ return ret;
+}
+
+
+static int rtl2832u_fc0013_tuner_callback(struct dvb_usb_device *d,
+ int cmd, int arg)
+{
+ /* TODO implement*/
+ return 0;
+}
+
+static int rtl2832u_tuner_callback(struct dvb_usb_device *d, int cmd, int arg)
+{
+ struct rtl28xxu_priv *priv = d->priv;
+
+ switch (priv->tuner) {
+ case TUNER_RTL2832_FC0012:
+ return rtl2832u_fc0012_tuner_callback(d, cmd, arg);
+
+ case TUNER_RTL2832_FC0013:
+ return rtl2832u_fc0013_tuner_callback(d, cmd, arg);
+ default:
+ break;
+ }
+
+ return -ENODEV;
+}
+
+static int rtl2832u_frontend_callback(void *adapter_priv, int component,
+ int cmd, int arg)
+{
+ struct i2c_adapter *adap = adapter_priv;
+ struct dvb_usb_device *d = i2c_get_adapdata(adap);
+
+ switch (component) {
+ case DVB_FRONTEND_COMPONENT_TUNER:
+ return rtl2832u_tuner_callback(d, cmd, arg);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+
+
+
static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
{
int ret;
struct rtl28xxu_priv *priv = adap->dev->priv;
- u8 buf[1];
+ struct rtl2832_config *rtl2832_config;
+
+ u8 buf[2], val;
/* open RTL2832U/RTL2832 I2C gate */
struct rtl28xxu_req req_gate_open = {0x0120, 0x0011, 0x0001, "\x18"};
/* close RTL2832U/RTL2832 I2C gate */
struct rtl28xxu_req req_gate_close = {0x0120, 0x0011, 0x0001, "\x10"};
+ /* for FC0012 tuner probe */
+ struct rtl28xxu_req req_fc0012 = {0x00c6, CMD_I2C_RD, 1, buf};
+ /* for FC0013 tuner probe */
+ struct rtl28xxu_req req_fc0013 = {0x00c6, CMD_I2C_RD, 1, buf};
+ /* for MT2266 tuner probe */
+ struct rtl28xxu_req req_mt2266 = {0x00c0, CMD_I2C_RD, 1, buf};
/* for FC2580 tuner probe */
struct rtl28xxu_req req_fc2580 = {0x01ac, CMD_I2C_RD, 1, buf};
+ /* for MT2063 tuner probe */
+ struct rtl28xxu_req req_mt2063 = {0x00c0, CMD_I2C_RD, 1, buf};
+ /* for MAX3543 tuner probe */
+ struct rtl28xxu_req req_max3543 = {0x00c0, CMD_I2C_RD, 1, buf};
+ /* for TUA9001 tuner probe */
+ struct rtl28xxu_req req_tua9001 = {0x7ec0, CMD_I2C_RD, 2, buf};
+ /* for MXL5007T tuner probe */
+ struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf};
+ /* for E4000 tuner probe */
+ struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf};
+ /* for TDA18272 tuner probe */
+ struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf};
deb_info("%s:\n", __func__);
- /* GPIO direction */
- ret = rtl2831_wr_reg(adap->dev, SYS_GPIO_DIR, 0x0a);
+
+ ret = rtl28xx_rd_reg(adap->dev, SYS_GPIO_DIR, &val);
if (ret)
goto err;
- /* enable as output GPIO0, GPIO2, GPIO4 */
- ret = rtl2831_wr_reg(adap->dev, SYS_GPIO_OUT_EN, 0x15);
+ val &= 0xbf;
+
+ ret = rtl28xx_wr_reg(adap->dev, SYS_GPIO_DIR, val);
if (ret)
goto err;
- ret = rtl2831_wr_reg(adap->dev, SYS_DEMOD_CTL, 0xe8);
+
+ /* enable as output GPIO3 and GPIO6*/
+ ret = rtl28xx_rd_reg(adap->dev, SYS_GPIO_OUT_EN, &val);
if (ret)
goto err;
+ val |= 0x48;
+
+ ret = rtl28xx_wr_reg(adap->dev, SYS_GPIO_OUT_EN, val);
+ if (ret)
+ goto err;
+
+
+
/*
* Probe used tuner. We need to know used tuner before demod attach
* since there is some demod params needed to set according to tuner.
@@ -419,25 +548,108 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
if (ret)
goto err;
+ priv->tuner = TUNER_NONE;
+
+ /* check FC0012 ID register; reg=00 val=a1 */
+ ret = rtl28xxu_ctrl_msg(adap->dev, &req_fc0012);
+ if (ret == 0 && buf[0] == 0xa1) {
+ priv->tuner = TUNER_RTL2832_FC0012;
+ rtl2832_config = &rtl28xxu_rtl2832_fc0012_config;
+ info("%s: FC0012 tuner found", __func__);
+ goto found;
+ }
+
+ /* check FC0013 ID register; reg=00 val=a3 */
+ ret = rtl28xxu_ctrl_msg(adap->dev, &req_fc0013);
+ if (ret == 0 && buf[0] == 0xa3) {
+ priv->tuner = TUNER_RTL2832_FC0013;
+ rtl2832_config = &rtl28xxu_rtl2832_fc0013_config;
+ info("%s: FC0013 tuner found", __func__);
+ goto found;
+ }
+
+ /* check MT2266 ID register; reg=00 val=85 */
+ ret = rtl28xxu_ctrl_msg(adap->dev, &req_mt2266);
+ if (ret == 0 && buf[0] == 0x85) {
+ priv->tuner = TUNER_RTL2832_MT2266;
+ /* TODO implement tuner */
+ info("%s: MT2266 tuner found", __func__);
+ goto unsupported;
+ }
+
/* check FC2580 ID register; reg=01 val=56 */
ret = rtl28xxu_ctrl_msg(adap->dev, &req_fc2580);
if (ret == 0 && buf[0] == 0x56) {
priv->tuner = TUNER_RTL2832_FC2580;
- deb_info("%s: FC2580\n", __func__);
- goto found;
- } else {
- deb_info("%s: FC2580 probe failed=%d - %02x\n",
- __func__, ret, buf[0]);
+ /* TODO implement tuner */
+ info("%s: FC2580 tuner found", __func__);
+ goto unsupported;
}
+ /* check MT2063 ID register; reg=00 val=9e || 9c */
+ ret = rtl28xxu_ctrl_msg(adap->dev, &req_mt2063);
+ if (ret == 0 && (buf[0] == 0x9e || buf[0] == 0x9c)) {
+ priv->tuner = TUNER_RTL2832_MT2063;
+ /* TODO implement tuner */
+ info("%s: MT2063 tuner found", __func__);
+ goto unsupported;
+ }
+
+ /* check MAX3543 ID register; reg=00 val=38 */
+ ret = rtl28xxu_ctrl_msg(adap->dev, &req_max3543);
+ if (ret == 0 && buf[0] == 0x38) {
+ priv->tuner = TUNER_RTL2832_MAX3543;
+ /* TODO implement tuner */
+ info("%s: MAX3534 tuner found", __func__);
+ goto unsupported;
+ }
+
+ /* check TUA9001 ID register; reg=7e val=2328 */
+ ret = rtl28xxu_ctrl_msg(adap->dev, &req_tua9001);
+ if (ret == 0 && buf[0] == 0x23 && buf[1] == 0x28) {
+ priv->tuner = TUNER_RTL2832_TUA9001;
+ /* TODO implement tuner */
+ info("%s: TUA9001 tuner found", __func__);
+ goto unsupported;
+ }
+
+ /* check MXL5007R ID register; reg=d9 val=14 */
+ ret = rtl28xxu_ctrl_msg(adap->dev, &req_mxl5007t);
+ if (ret == 0 && buf[0] == 0x14) {
+ priv->tuner = TUNER_RTL2832_MXL5007T;
+ /* TODO implement tuner */
+ info("%s: MXL5007T tuner found", __func__);
+ goto unsupported;
+ }
+
+ /* check E4000 ID register; reg=02 val=40 */
+ ret = rtl28xxu_ctrl_msg(adap->dev, &req_e4000);
+ if (ret == 0 && buf[0] == 0x40) {
+ priv->tuner = TUNER_RTL2832_E4000;
+ /* TODO implement tuner */
+ info("%s: E4000 tuner found", __func__);
+ goto unsupported;
+ }
+
+ /* check TDA18272 ID register; reg=00 val=c760 */
+ ret = rtl28xxu_ctrl_msg(adap->dev, &req_tda18272);
+ if (ret == 0 && (buf[0] == 0xc7 || buf[1] == 0x60)) {
+ priv->tuner = TUNER_RTL2832_TDA18272;
+ /* TODO implement tuner */
+ info("%s: TDA18272 tuner found", __func__);
+ goto unsupported;
+ }
+
+unsupported:
/* close demod I2C gate */
ret = rtl28xxu_ctrl_msg(adap->dev, &req_gate_close);
if (ret)
goto err;
/* tuner not found */
+ deb_info("No compatible tuner found");
ret = -ENODEV;
- goto err;
+ return ret;
found:
/* close demod I2C gate */
@@ -446,9 +658,18 @@ found:
goto err;
/* attach demodulator */
- /* TODO: */
+ adap->fe_adap[0].fe = dvb_attach(rtl2832_attach, rtl2832_config,
+ &adap->dev->i2c_adap);
+ if (adap->fe_adap[0].fe == NULL) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* set fe callbacks */
+ adap->fe_adap[0].fe->callback = rtl2832u_frontend_callback;
return ret;
+
err:
deb_info("%s: failed=%d\n", __func__, ret);
return ret;
@@ -531,10 +752,24 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
deb_info("%s:\n", __func__);
switch (priv->tuner) {
- case TUNER_RTL2832_FC2580:
- /* TODO: */
- fe = NULL;
+ case TUNER_RTL2832_FC0012:
+ fe = dvb_attach(fc0012_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, 0xc6>>1, 0, FC_XTAL_28_8_MHZ);
+
+ /* since fc0012 includs reading the signal strength delegate
+ * that to the tuner driver */
+ adap->fe_adap[0].fe->ops.read_signal_strength = adap->fe_adap[0].
+ fe->ops.tuner_ops.get_rf_strength;
+ return 0;
break;
+ case TUNER_RTL2832_FC0013:
+ fe = dvb_attach(fc0013_attach, adap->fe_adap[0].fe,
+ &adap->dev->i2c_adap, 0xc6>>1, 0, FC_XTAL_28_8_MHZ);
+
+ /* fc0013 also supports signal strength reading */
+ adap->fe_adap[0].fe->ops.read_signal_strength = adap->fe_adap[0]
+ .fe->ops.tuner_ops.get_rf_strength;
+ return 0;
default:
fe = NULL;
err("unknown tuner=%d", priv->tuner);
@@ -551,14 +786,14 @@ err:
return ret;
}
-static int rtl28xxu_streaming_ctrl(struct dvb_usb_adapter *adap , int onoff)
+static int rtl2831u_streaming_ctrl(struct dvb_usb_adapter *adap , int onoff)
{
int ret;
u8 buf[2], gpio;
deb_info("%s: onoff=%d\n", __func__, onoff);
- ret = rtl2831_rd_reg(adap->dev, SYS_GPIO_OUT_VAL, &gpio);
+ ret = rtl28xx_rd_reg(adap->dev, SYS_GPIO_OUT_VAL, &gpio);
if (ret)
goto err;
@@ -572,11 +807,37 @@ static int rtl28xxu_streaming_ctrl(struct dvb_usb_adapter *adap , int onoff)
gpio &= (~0x04); /* LED off */
}
- ret = rtl2831_wr_reg(adap->dev, SYS_GPIO_OUT_VAL, gpio);
+ ret = rtl28xx_wr_reg(adap->dev, SYS_GPIO_OUT_VAL, gpio);
if (ret)
goto err;
- ret = rtl2831_wr_regs(adap->dev, USB_EPA_CTL, buf, 2);
+ ret = rtl28xx_wr_regs(adap->dev, USB_EPA_CTL, buf, 2);
+ if (ret)
+ goto err;
+
+ return ret;
+err:
+ deb_info("%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+static int rtl2832u_streaming_ctrl(struct dvb_usb_adapter *adap , int onoff)
+{
+ int ret;
+ u8 buf[2];
+
+ deb_info("%s: onoff=%d\n", __func__, onoff);
+
+
+ if (onoff) {
+ buf[0] = 0x00;
+ buf[1] = 0x00;
+ } else {
+ buf[0] = 0x10; /* stall EPA */
+ buf[1] = 0x02; /* reset EPA */
+ }
+
+ ret = rtl28xx_wr_regs(adap->dev, USB_EPA_CTL, buf, 2);
if (ret)
goto err;
@@ -586,7 +847,7 @@ err:
return ret;
}
-static int rtl28xxu_power_ctrl(struct dvb_usb_device *d, int onoff)
+static int rtl2831u_power_ctrl(struct dvb_usb_device *d, int onoff)
{
int ret;
u8 gpio, sys0;
@@ -594,12 +855,12 @@ static int rtl28xxu_power_ctrl(struct dvb_usb_device *d, int onoff)
deb_info("%s: onoff=%d\n", __func__, onoff);
/* demod adc */
- ret = rtl2831_rd_reg(d, SYS_SYS0, &sys0);
+ ret = rtl28xx_rd_reg(d, SYS_SYS0, &sys0);
if (ret)
goto err;
/* tuner power, read GPIOs */
- ret = rtl2831_rd_reg(d, SYS_GPIO_OUT_VAL, &gpio);
+ ret = rtl28xx_rd_reg(d, SYS_GPIO_OUT_VAL, &gpio);
if (ret)
goto err;
@@ -619,12 +880,12 @@ static int rtl28xxu_power_ctrl(struct dvb_usb_device *d, int onoff)
deb_info("%s: WR SYS0=%02x GPIO_OUT_VAL=%02x\n", __func__, sys0, gpio);
/* demod adc */
- ret = rtl2831_wr_reg(d, SYS_SYS0, sys0);
+ ret = rtl28xx_wr_reg(d, SYS_SYS0, sys0);
if (ret)
goto err;
/* tuner power, write GPIOs */
- ret = rtl2831_wr_reg(d, SYS_GPIO_OUT_VAL, gpio);
+ ret = rtl28xx_wr_reg(d, SYS_GPIO_OUT_VAL, gpio);
if (ret)
goto err;
@@ -634,6 +895,128 @@ err:
return ret;
}
+static int rtl2832u_power_ctrl(struct dvb_usb_device *d, int onoff)
+{
+ int ret;
+ u8 val;
+
+ deb_info("%s: onoff=%d\n", __func__, onoff);
+
+ if (onoff) {
+ /* set output values */
+ ret = rtl28xx_rd_reg(d, SYS_GPIO_OUT_VAL, &val);
+ if (ret)
+ goto err;
+
+ val |= 0x08;
+ val &= 0xef;
+
+ ret = rtl28xx_wr_reg(d, SYS_GPIO_OUT_VAL, val);
+ if (ret)
+ goto err;
+
+ /* demod_ctl_1 */
+ ret = rtl28xx_rd_reg(d, SYS_DEMOD_CTL1, &val);
+ if (ret)
+ goto err;
+
+ val &= 0xef;
+
+ ret = rtl28xx_wr_reg(d, SYS_DEMOD_CTL1, val);
+ if (ret)
+ goto err;
+
+ /* demod control */
+ /* PLL enable */
+ ret = rtl28xx_rd_reg(d, SYS_DEMOD_CTL, &val);
+ if (ret)
+ goto err;
+
+ /* bit 7 to 1 */
+ val |= 0x80;
+
+ ret = rtl28xx_wr_reg(d, SYS_DEMOD_CTL, val);
+ if (ret)
+ goto err;
+
+ /* demod HW reset */
+ ret = rtl28xx_rd_reg(d, SYS_DEMOD_CTL, &val);
+ if (ret)
+ goto err;
+ /* bit 5 to 0 */
+ val &= 0xdf;
+
+ ret = rtl28xx_wr_reg(d, SYS_DEMOD_CTL, val);
+ if (ret)
+ goto err;
+
+ ret = rtl28xx_rd_reg(d, SYS_DEMOD_CTL, &val);
+ if (ret)
+ goto err;
+
+ val |= 0x20;
+
+ ret = rtl28xx_wr_reg(d, SYS_DEMOD_CTL, val);
+ if (ret)
+ goto err;
+
+ mdelay(5);
+
+ /*enable ADC_Q and ADC_I */
+ ret = rtl28xx_rd_reg(d, SYS_DEMOD_CTL, &val);
+ if (ret)
+ goto err;
+
+ val |= 0x48;
+
+ ret = rtl28xx_wr_reg(d, SYS_DEMOD_CTL, val);
+ if (ret)
+ goto err;
+
+
+ } else {
+ /* demod_ctl_1 */
+ ret = rtl28xx_rd_reg(d, SYS_DEMOD_CTL1, &val);
+ if (ret)
+ goto err;
+
+ val |= 0x0c;
+
+ ret = rtl28xx_wr_reg(d, SYS_DEMOD_CTL1, val);
+ if (ret)
+ goto err;
+
+ /* set output values */
+ ret = rtl28xx_rd_reg(d, SYS_GPIO_OUT_VAL, &val);
+ if (ret)
+ goto err;
+
+ val |= 0x10;
+
+ ret = rtl28xx_wr_reg(d, SYS_GPIO_OUT_VAL, val);
+ if (ret)
+ goto err;
+
+ /* demod control */
+ ret = rtl28xx_rd_reg(d, SYS_DEMOD_CTL, &val);
+ if (ret)
+ goto err;
+
+ val &= 0x37;
+
+ ret = rtl28xx_wr_reg(d, SYS_DEMOD_CTL, val);
+ if (ret)
+ goto err;
+
+ }
+
+ return ret;
+err:
+ deb_info("%s: failed=%d\n", __func__, ret);
+ return ret;
+}
+
+
static int rtl2831u_rc_query(struct dvb_usb_device *d)
{
int ret, i;
@@ -660,7 +1043,7 @@ static int rtl2831u_rc_query(struct dvb_usb_device *d)
/* init remote controller */
if (!priv->rc_active) {
for (i = 0; i < ARRAY_SIZE(rc_nec_tab); i++) {
- ret = rtl2831_wr_reg(d, rc_nec_tab[i].reg,
+ ret = rtl28xx_wr_reg(d, rc_nec_tab[i].reg,
rc_nec_tab[i].val);
if (ret)
goto err;
@@ -690,12 +1073,12 @@ static int rtl2831u_rc_query(struct dvb_usb_device *d)
rc_keydown(d->rc_dev, rc_code, 0);
- ret = rtl2831_wr_reg(d, SYS_IRRC_SR, 1);
+ ret = rtl28xx_wr_reg(d, SYS_IRRC_SR, 1);
if (ret)
goto err;
/* repeated intentionally to avoid extra keypress */
- ret = rtl2831_wr_reg(d, SYS_IRRC_SR, 1);
+ ret = rtl28xx_wr_reg(d, SYS_IRRC_SR, 1);
if (ret)
goto err;
}
@@ -732,7 +1115,7 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
/* init remote controller */
if (!priv->rc_active) {
for (i = 0; i < ARRAY_SIZE(rc_nec_tab); i++) {
- ret = rtl2831_wr_reg(d, rc_nec_tab[i].reg,
+ ret = rtl28xx_wr_reg(d, rc_nec_tab[i].reg,
rc_nec_tab[i].val);
if (ret)
goto err;
@@ -740,14 +1123,14 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
priv->rc_active = true;
}
- ret = rtl2831_rd_reg(d, IR_RX_IF, &buf[0]);
+ ret = rtl28xx_rd_reg(d, IR_RX_IF, &buf[0]);
if (ret)
goto err;
if (buf[0] != 0x83)
goto exit;
- ret = rtl2831_rd_reg(d, IR_RX_BC, &buf[0]);
+ ret = rtl28xx_rd_reg(d, IR_RX_BC, &buf[0]);
if (ret)
goto err;
@@ -756,9 +1139,9 @@ static int rtl2832u_rc_query(struct dvb_usb_device *d)
/* TODO: pass raw IR to Kernel IR decoder */
- ret = rtl2831_wr_reg(d, IR_RX_IF, 0x03);
- ret = rtl2831_wr_reg(d, IR_RX_BUF_CTRL, 0x80);
- ret = rtl2831_wr_reg(d, IR_RX_CTRL, 0x80);
+ ret = rtl28xx_wr_reg(d, IR_RX_IF, 0x03);
+ ret = rtl28xx_wr_reg(d, IR_RX_BUF_CTRL, 0x80);
+ ret = rtl28xx_wr_reg(d, IR_RX_CTRL, 0x80);
exit:
return ret;
@@ -771,6 +1154,9 @@ enum rtl28xxu_usb_table_entry {
RTL2831U_0BDA_2831,
RTL2831U_14AA_0160,
RTL2831U_14AA_0161,
+ RTL2832U_0CCD_00A9,
+ RTL2832U_1F4D_B803,
+ RTL2832U_0CCD_00B3,
};
static struct usb_device_id rtl28xxu_table[] = {
@@ -783,6 +1169,12 @@ static struct usb_device_id rtl28xxu_table[] = {
USB_DEVICE(USB_VID_WIDEVIEW, USB_PID_FREECOM_DVBT_2)},
/* RTL2832U */
+ [RTL2832U_0CCD_00A9] = {
+ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1)},
+ [RTL2832U_1F4D_B803] = {
+ USB_DEVICE(USB_VID_GTEK, USB_PID_DELOCK_USB2_DVBT)},
+ [RTL2832U_0CCD_00B3] = {
+ USB_DEVICE(USB_VID_TERRATEC, USB_PID_NOXON_DAB_STICK)},
{} /* terminating entry */
};
@@ -805,7 +1197,7 @@ static struct dvb_usb_device_properties rtl28xxu_properties[] = {
{
.frontend_attach = rtl2831u_frontend_attach,
.tuner_attach = rtl2831u_tuner_attach,
- .streaming_ctrl = rtl28xxu_streaming_ctrl,
+ .streaming_ctrl = rtl2831u_streaming_ctrl,
.stream = {
.type = USB_BULK,
.count = 6,
@@ -821,7 +1213,7 @@ static struct dvb_usb_device_properties rtl28xxu_properties[] = {
}
},
- .power_ctrl = rtl28xxu_power_ctrl,
+ .power_ctrl = rtl2831u_power_ctrl,
.rc.core = {
.protocol = RC_TYPE_NEC,
@@ -867,7 +1259,7 @@ static struct dvb_usb_device_properties rtl28xxu_properties[] = {
{
.frontend_attach = rtl2832u_frontend_attach,
.tuner_attach = rtl2832u_tuner_attach,
- .streaming_ctrl = rtl28xxu_streaming_ctrl,
+ .streaming_ctrl = rtl2832u_streaming_ctrl,
.stream = {
.type = USB_BULK,
.count = 6,
@@ -883,7 +1275,7 @@ static struct dvb_usb_device_properties rtl28xxu_properties[] = {
}
},
- .power_ctrl = rtl28xxu_power_ctrl,
+ .power_ctrl = rtl2832u_power_ctrl,
.rc.core = {
.protocol = RC_TYPE_NEC,
@@ -896,10 +1288,25 @@ static struct dvb_usb_device_properties rtl28xxu_properties[] = {
.i2c_algo = &rtl28xxu_i2c_algo,
- .num_device_descs = 0, /* disabled as no support for RTL2832 */
+ .num_device_descs = 3,
.devices = {
{
- .name = "Realtek RTL2832U reference design",
+ .name = "Terratec Cinergy T Stick Black",
+ .warm_ids = {
+ &rtl28xxu_table[RTL2832U_0CCD_00A9],
+ },
+ },
+ {
+ .name = "G-Tek Electronics Group Lifeview LV5TDLX DVB-T",
+ .warm_ids = {
+ &rtl28xxu_table[RTL2832U_1F4D_B803],
+ },
+ },
+ {
+ .name = "NOXON DAB/DAB+ USB dongle",
+ .warm_ids = {
+ &rtl28xxu_table[RTL2832U_0CCD_00B3],
+ },
},
}
},
@@ -910,6 +1317,7 @@ static int rtl28xxu_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int ret, i;
+ u8 val;
int properties_count = ARRAY_SIZE(rtl28xxu_properties);
struct dvb_usb_device *d;
struct usb_device *udev;
@@ -954,16 +1362,25 @@ static int rtl28xxu_probe(struct usb_interface *intf,
if (ret)
goto err;
+
/* init USB endpoints */
- ret = rtl2831_wr_reg(d, USB_SYSCTL_0, 0x09);
+ ret = rtl28xx_rd_reg(d, USB_SYSCTL_0, &val);
+ if (ret)
+ goto err;
+
+ /* enable DMA and Full Packet Mode*/
+ val |= 0x09;
+ ret = rtl28xx_wr_reg(d, USB_SYSCTL_0, val);
if (ret)
goto err;
- ret = rtl2831_wr_regs(d, USB_EPA_MAXPKT, "\x00\x02\x00\x00", 4);
+ /* set EPA maximum packet size to 0x0200 */
+ ret = rtl28xx_wr_regs(d, USB_EPA_MAXPKT, "\x00\x02\x00\x00", 4);
if (ret)
goto err;
- ret = rtl2831_wr_regs(d, USB_EPA_FIFO_CFG, "\x14\x00\x00\x00", 4);
+ /* change EPA FIFO length */
+ ret = rtl28xx_wr_regs(d, USB_EPA_FIFO_CFG, "\x14\x00\x00\x00", 4);
if (ret)
goto err;
@@ -1007,4 +1424,5 @@ module_exit(rtl28xxu_module_exit);
MODULE_DESCRIPTION("Realtek RTL28xxU DVB USB driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_AUTHOR("Thomas Mair <thomas.mair86@googlemail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index b98ebb264e29..a08c2152d0ee 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -1,6 +1,7 @@
config DVB_FE_CUSTOMISE
bool "Customise the frontend modules to build"
depends on DVB_CORE
+ depends on EXPERT
default y if EXPERT
help
This allows the user to select/deselect frontend drivers for their
@@ -432,6 +433,13 @@ config DVB_RTL2830
help
Say Y when you want to support this frontend.
+config DVB_RTL2832
+ tristate "Realtek RTL2832 DVB-T"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ Say Y when you want to support this frontend.
+
comment "DVB-C (cable) frontends"
depends on DVB_CORE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index cd1ac2fd5774..185bb8b51952 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_DVB_IT913X_FE) += it913x-fe.o
obj-$(CONFIG_DVB_A8293) += a8293.o
obj-$(CONFIG_DVB_TDA10071) += tda10071.o
obj-$(CONFIG_DVB_RTL2830) += rtl2830.o
+obj-$(CONFIG_DVB_RTL2832) += rtl2832.o
obj-$(CONFIG_DVB_M88RS2000) += m88rs2000.o
obj-$(CONFIG_DVB_AF9033) += af9033.o
diff --git a/drivers/media/dvb/frontends/a8293.c b/drivers/media/dvb/frontends/a8293.c
index bb56497e940a..cff44a389b40 100644
--- a/drivers/media/dvb/frontends/a8293.c
+++ b/drivers/media/dvb/frontends/a8293.c
@@ -21,24 +21,6 @@
#include "dvb_frontend.h"
#include "a8293.h"
-static int debug;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
-
-#define LOG_PREFIX "a8293"
-
-#undef dbg
-#define dbg(f, arg...) \
- if (debug) \
- printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
-#undef err
-#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
-#undef info
-#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
-#undef warn
-#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
-
-
struct a8293_priv {
struct i2c_adapter *i2c;
const struct a8293_config *cfg;
@@ -65,7 +47,8 @@ static int a8293_i2c(struct a8293_priv *priv, u8 *val, int len, bool rd)
if (ret == 1) {
ret = 0;
} else {
- warn("i2c failed=%d rd=%d", ret, rd);
+ dev_warn(&priv->i2c->dev, "%s: i2c failed=%d rd=%d\n",
+ KBUILD_MODNAME, ret, rd);
ret = -EREMOTEIO;
}
@@ -88,7 +71,8 @@ static int a8293_set_voltage(struct dvb_frontend *fe,
struct a8293_priv *priv = fe->sec_priv;
int ret;
- dbg("%s: fe_sec_voltage=%d", __func__, fe_sec_voltage);
+ dev_dbg(&priv->i2c->dev, "%s: fe_sec_voltage=%d\n", __func__,
+ fe_sec_voltage);
switch (fe_sec_voltage) {
case SEC_VOLTAGE_OFF:
@@ -114,14 +98,12 @@ static int a8293_set_voltage(struct dvb_frontend *fe,
return ret;
err:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static void a8293_release_sec(struct dvb_frontend *fe)
{
- dbg("%s:", __func__);
-
a8293_set_voltage(fe, SEC_VOLTAGE_OFF);
kfree(fe->sec_priv);
@@ -154,7 +136,7 @@ struct dvb_frontend *a8293_attach(struct dvb_frontend *fe,
/* ENB=0 */
priv->reg[0] = 0x10;
- ret = a8293_wr(priv, &priv->reg[1], 1);
+ ret = a8293_wr(priv, &priv->reg[0], 1);
if (ret)
goto err;
@@ -164,16 +146,17 @@ struct dvb_frontend *a8293_attach(struct dvb_frontend *fe,
if (ret)
goto err;
- info("Allegro A8293 SEC attached.");
-
fe->ops.release_sec = a8293_release_sec;
/* override frontend ops */
fe->ops.set_voltage = a8293_set_voltage;
+ dev_info(&priv->i2c->dev, "%s: Allegro A8293 SEC attached\n",
+ KBUILD_MODNAME);
+
return fe;
err:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
kfree(priv);
return NULL;
}
diff --git a/drivers/media/dvb/frontends/dib8000.c b/drivers/media/dvb/frontends/dib8000.c
index 9ca34f495009..1f3bcb5a1de8 100644
--- a/drivers/media/dvb/frontends/dib8000.c
+++ b/drivers/media/dvb/frontends/dib8000.c
@@ -2680,12 +2680,14 @@ static int dib8000_tune(struct dvb_frontend *fe)
{
struct dib8000_state *state = fe->demodulator_priv;
int ret = 0;
- u16 lock, value, mode = fft_to_mode(state);
+ u16 lock, value, mode;
// we are already tuned - just resuming from suspend
if (state == NULL)
return -EINVAL;
+ mode = fft_to_mode(state);
+
dib8000_set_bandwidth(fe, state->fe[0]->dtv_property_cache.bandwidth_hz / 1000);
dib8000_set_channel(state, 0, 0);
diff --git a/drivers/media/dvb/frontends/drxk.h b/drivers/media/dvb/frontends/drxk.h
index 9d64e4fea066..d615d7d055a2 100644
--- a/drivers/media/dvb/frontends/drxk.h
+++ b/drivers/media/dvb/frontends/drxk.h
@@ -20,6 +20,14 @@
* means that 1=DVBC, 0 = DVBT. Zero means the opposite.
* @mpeg_out_clk_strength: DRXK Mpeg output clock drive strength.
* @microcode_name: Name of the firmware file with the microcode
+ * @qam_demod_parameter_count: The number of parameters used for the command
+ * to set the demodulator parameters. All
+ * firmwares are using the 2-parameter commmand.
+ * An exception is the "drxk_a3.mc" firmware,
+ * which uses the 4-parameter command.
+ * A value of 0 (default) or lower indicates that
+ * the correct number of parameters will be
+ * automatically detected.
*
* On the *_gpio vars, bit 0 is UIO-1, bit 1 is UIO-2 and bit 2 is
* UIO-3.
@@ -38,7 +46,8 @@ struct drxk_config {
u8 mpeg_out_clk_strength;
int chunk_size;
- const char *microcode_name;
+ const char *microcode_name;
+ int qam_demod_parameter_count;
};
#if defined(CONFIG_DVB_DRXK) || (defined(CONFIG_DVB_DRXK_MODULE) \
diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
index 60b868faeacf..1ab8154542da 100644
--- a/drivers/media/dvb/frontends/drxk_hard.c
+++ b/drivers/media/dvb/frontends/drxk_hard.c
@@ -28,6 +28,7 @@
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/i2c.h>
+#include <linux/hardirq.h>
#include <asm/div64.h>
#include "dvb_frontend.h"
@@ -308,16 +309,42 @@ static u32 Log10Times100(u32 x)
/* I2C **********************************************************************/
/****************************************************************************/
-static int i2c_read1(struct i2c_adapter *adapter, u8 adr, u8 *val)
+static int drxk_i2c_lock(struct drxk_state *state)
+{
+ i2c_lock_adapter(state->i2c);
+ state->drxk_i2c_exclusive_lock = true;
+
+ return 0;
+}
+
+static void drxk_i2c_unlock(struct drxk_state *state)
+{
+ if (!state->drxk_i2c_exclusive_lock)
+ return;
+
+ i2c_unlock_adapter(state->i2c);
+ state->drxk_i2c_exclusive_lock = false;
+}
+
+static int drxk_i2c_transfer(struct drxk_state *state, struct i2c_msg *msgs,
+ unsigned len)
+{
+ if (state->drxk_i2c_exclusive_lock)
+ return __i2c_transfer(state->i2c, msgs, len);
+ else
+ return i2c_transfer(state->i2c, msgs, len);
+}
+
+static int i2c_read1(struct drxk_state *state, u8 adr, u8 *val)
{
struct i2c_msg msgs[1] = { {.addr = adr, .flags = I2C_M_RD,
.buf = val, .len = 1}
};
- return i2c_transfer(adapter, msgs, 1);
+ return drxk_i2c_transfer(state, msgs, 1);
}
-static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
+static int i2c_write(struct drxk_state *state, u8 adr, u8 *data, int len)
{
int status;
struct i2c_msg msg = {
@@ -330,7 +357,7 @@ static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
printk(KERN_CONT " %02x", data[i]);
printk(KERN_CONT "\n");
}
- status = i2c_transfer(adap, &msg, 1);
+ status = drxk_i2c_transfer(state, &msg, 1);
if (status >= 0 && status != 1)
status = -EIO;
@@ -340,7 +367,7 @@ static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
return status;
}
-static int i2c_read(struct i2c_adapter *adap,
+static int i2c_read(struct drxk_state *state,
u8 adr, u8 *msg, int len, u8 *answ, int alen)
{
int status;
@@ -351,7 +378,7 @@ static int i2c_read(struct i2c_adapter *adap,
.buf = answ, .len = alen}
};
- status = i2c_transfer(adap, msgs, 2);
+ status = drxk_i2c_transfer(state, msgs, 2);
if (status != 2) {
if (debug > 2)
printk(KERN_CONT ": ERROR!\n");
@@ -394,7 +421,7 @@ static int read16_flags(struct drxk_state *state, u32 reg, u16 *data, u8 flags)
len = 2;
}
dprintk(2, "(0x%08x, 0x%02x)\n", reg, flags);
- status = i2c_read(state->i2c, adr, mm1, len, mm2, 2);
+ status = i2c_read(state, adr, mm1, len, mm2, 2);
if (status < 0)
return status;
if (data)
@@ -428,7 +455,7 @@ static int read32_flags(struct drxk_state *state, u32 reg, u32 *data, u8 flags)
len = 2;
}
dprintk(2, "(0x%08x, 0x%02x)\n", reg, flags);
- status = i2c_read(state->i2c, adr, mm1, len, mm2, 4);
+ status = i2c_read(state, adr, mm1, len, mm2, 4);
if (status < 0)
return status;
if (data)
@@ -464,7 +491,7 @@ static int write16_flags(struct drxk_state *state, u32 reg, u16 data, u8 flags)
mm[len + 1] = (data >> 8) & 0xff;
dprintk(2, "(0x%08x, 0x%04x, 0x%02x)\n", reg, data, flags);
- return i2c_write(state->i2c, adr, mm, len + 2);
+ return i2c_write(state, adr, mm, len + 2);
}
static int write16(struct drxk_state *state, u32 reg, u16 data)
@@ -495,7 +522,7 @@ static int write32_flags(struct drxk_state *state, u32 reg, u32 data, u8 flags)
mm[len + 3] = (data >> 24) & 0xff;
dprintk(2, "(0x%08x, 0x%08x, 0x%02x)\n", reg, data, flags);
- return i2c_write(state->i2c, adr, mm, len + 4);
+ return i2c_write(state, adr, mm, len + 4);
}
static int write32(struct drxk_state *state, u32 reg, u32 data)
@@ -542,7 +569,7 @@ static int write_block(struct drxk_state *state, u32 Address,
printk(KERN_CONT " %02x", pBlock[i]);
printk(KERN_CONT "\n");
}
- status = i2c_write(state->i2c, state->demod_address,
+ status = i2c_write(state, state->demod_address,
&state->Chunk[0], Chunk + AdrLength);
if (status < 0) {
printk(KERN_ERR "drxk: %s: i2c write error at addr 0x%02x\n",
@@ -568,17 +595,17 @@ int PowerUpDevice(struct drxk_state *state)
dprintk(1, "\n");
- status = i2c_read1(state->i2c, state->demod_address, &data);
+ status = i2c_read1(state, state->demod_address, &data);
if (status < 0) {
do {
data = 0;
- status = i2c_write(state->i2c, state->demod_address,
+ status = i2c_write(state, state->demod_address,
&data, 1);
msleep(10);
retryCount++;
if (status < 0)
continue;
- status = i2c_read1(state->i2c, state->demod_address,
+ status = i2c_read1(state, state->demod_address,
&data);
} while (status < 0 &&
(retryCount < DRXK_MAX_RETRIES_POWERUP));
@@ -932,7 +959,7 @@ static int GetDeviceCapabilities(struct drxk_state *state)
if (status < 0)
goto error;
-printk(KERN_ERR "drxk: status = 0x%08x\n", sioTopJtagidLo);
+ printk(KERN_INFO "drxk: status = 0x%08x\n", sioTopJtagidLo);
/* driver 0.9.0 */
switch ((sioTopJtagidLo >> 29) & 0xF) {
@@ -2824,7 +2851,7 @@ static int ConfigureI2CBridge(struct drxk_state *state, bool bEnableBridge)
dprintk(1, "\n");
if (state->m_DrxkState == DRXK_UNINITIALIZED)
- goto error;
+ return 0;
if (state->m_DrxkState == DRXK_POWERED_DOWN)
goto error;
@@ -2977,7 +3004,7 @@ static int ADCSynchronization(struct drxk_state *state)
status = read16(state, IQM_AF_CLKNEG__A, &clkNeg);
if (status < 0)
goto error;
- if ((clkNeg | IQM_AF_CLKNEG_CLKNEGDATA__M) ==
+ if ((clkNeg & IQM_AF_CLKNEG_CLKNEGDATA__M) ==
IQM_AF_CLKNEG_CLKNEGDATA_CLK_ADC_DATA_POS) {
clkNeg &= (~(IQM_AF_CLKNEG_CLKNEGDATA__M));
clkNeg |=
@@ -5361,7 +5388,7 @@ static int GetQAMLockStatus(struct drxk_state *state, u32 *pLockStatus)
SCU_RAM_COMMAND_CMD_DEMOD_GET_LOCK, 0, NULL, 2,
Result);
if (status < 0)
- printk(KERN_ERR "drxk: %s status = %08x\n", __func__, status);
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
if (Result[1] < SCU_RAM_QAM_LOCKED_LOCKED_DEMOD_LOCKED) {
/* 0x0000 NOT LOCKED */
@@ -5388,12 +5415,67 @@ static int GetQAMLockStatus(struct drxk_state *state, u32 *pLockStatus)
#define QAM_LOCKRANGE__M 0x10
#define QAM_LOCKRANGE_NORMAL 0x10
+static int QAMDemodulatorCommand(struct drxk_state *state,
+ int numberOfParameters)
+{
+ int status;
+ u16 cmdResult;
+ u16 setParamParameters[4] = { 0, 0, 0, 0 };
+
+ setParamParameters[0] = state->m_Constellation; /* modulation */
+ setParamParameters[1] = DRXK_QAM_I12_J17; /* interleave mode */
+
+ if (numberOfParameters == 2) {
+ u16 setEnvParameters[1] = { 0 };
+
+ if (state->m_OperationMode == OM_QAM_ITU_C)
+ setEnvParameters[0] = QAM_TOP_ANNEX_C;
+ else
+ setEnvParameters[0] = QAM_TOP_ANNEX_A;
+
+ status = scu_command(state,
+ SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV,
+ 1, setEnvParameters, 1, &cmdResult);
+ if (status < 0)
+ goto error;
+
+ status = scu_command(state,
+ SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM,
+ numberOfParameters, setParamParameters,
+ 1, &cmdResult);
+ } else if (numberOfParameters == 4) {
+ if (state->m_OperationMode == OM_QAM_ITU_C)
+ setParamParameters[2] = QAM_TOP_ANNEX_C;
+ else
+ setParamParameters[2] = QAM_TOP_ANNEX_A;
+
+ setParamParameters[3] |= (QAM_MIRROR_AUTO_ON);
+ /* Env parameters */
+ /* check for LOCKRANGE Extented */
+ /* setParamParameters[3] |= QAM_LOCKRANGE_NORMAL; */
+
+ status = scu_command(state,
+ SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM,
+ numberOfParameters, setParamParameters,
+ 1, &cmdResult);
+ } else {
+ printk(KERN_WARNING "drxk: Unknown QAM demodulator parameter "
+ "count %d\n", numberOfParameters);
+ }
+
+error:
+ if (status < 0)
+ printk(KERN_WARNING "drxk: Warning %d on %s\n",
+ status, __func__);
+ return status;
+}
+
static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
s32 tunerFreqOffset)
{
int status;
- u16 setParamParameters[4] = { 0, 0, 0, 0 };
u16 cmdResult;
+ int qamDemodParamCount = state->qam_demod_parameter_count;
dprintk(1, "\n");
/*
@@ -5445,34 +5527,42 @@ static int SetQAM(struct drxk_state *state, u16 IntermediateFreqkHz,
}
if (status < 0)
goto error;
- setParamParameters[0] = state->m_Constellation; /* modulation */
- setParamParameters[1] = DRXK_QAM_I12_J17; /* interleave mode */
- if (state->m_OperationMode == OM_QAM_ITU_C)
- setParamParameters[2] = QAM_TOP_ANNEX_C;
- else
- setParamParameters[2] = QAM_TOP_ANNEX_A;
- setParamParameters[3] |= (QAM_MIRROR_AUTO_ON);
- /* Env parameters */
- /* check for LOCKRANGE Extented */
- /* setParamParameters[3] |= QAM_LOCKRANGE_NORMAL; */
- status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, 4, setParamParameters, 1, &cmdResult);
- if (status < 0) {
- /* Fall-back to the simpler call */
- if (state->m_OperationMode == OM_QAM_ITU_C)
- setParamParameters[0] = QAM_TOP_ANNEX_C;
- else
- setParamParameters[0] = QAM_TOP_ANNEX_A;
- status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_ENV, 1, setParamParameters, 1, &cmdResult);
- if (status < 0)
- goto error;
+ /* Use the 4-parameter if it's requested or we're probing for
+ * the correct command. */
+ if (state->qam_demod_parameter_count == 4
+ || !state->qam_demod_parameter_count) {
+ qamDemodParamCount = 4;
+ status = QAMDemodulatorCommand(state, qamDemodParamCount);
+ }
- setParamParameters[0] = state->m_Constellation; /* modulation */
- setParamParameters[1] = DRXK_QAM_I12_J17; /* interleave mode */
- status = scu_command(state, SCU_RAM_COMMAND_STANDARD_QAM | SCU_RAM_COMMAND_CMD_DEMOD_SET_PARAM, 2, setParamParameters, 1, &cmdResult);
+ /* Use the 2-parameter command if it was requested or if we're
+ * probing for the correct command and the 4-parameter command
+ * failed. */
+ if (state->qam_demod_parameter_count == 2
+ || (!state->qam_demod_parameter_count && status < 0)) {
+ qamDemodParamCount = 2;
+ status = QAMDemodulatorCommand(state, qamDemodParamCount);
}
- if (status < 0)
+
+ if (status < 0) {
+ dprintk(1, "Could not set demodulator parameters. Make "
+ "sure qam_demod_parameter_count (%d) is correct for "
+ "your firmware (%s).\n",
+ state->qam_demod_parameter_count,
+ state->microcode_name);
goto error;
+ } else if (!state->qam_demod_parameter_count) {
+ dprintk(1, "Auto-probing the correct QAM demodulator command "
+ "parameters was successful - using %d parameters.\n",
+ qamDemodParamCount);
+
+ /*
+ * One of our commands was successful. We don't need to
+ * auto-probe anymore, now that we got the correct command.
+ */
+ state->qam_demod_parameter_count = qamDemodParamCount;
+ }
/*
* STEP 3: enable the system in a mode where the ADC provides valid
@@ -5968,34 +6058,15 @@ error:
return status;
}
-static int load_microcode(struct drxk_state *state, const char *mc_name)
-{
- const struct firmware *fw = NULL;
- int err = 0;
-
- dprintk(1, "\n");
-
- err = request_firmware(&fw, mc_name, state->i2c->dev.parent);
- if (err < 0) {
- printk(KERN_ERR
- "drxk: Could not load firmware file %s.\n", mc_name);
- printk(KERN_INFO
- "drxk: Copy %s to your hotplug directory!\n", mc_name);
- return err;
- }
- err = DownloadMicrocode(state, fw->data, fw->size);
- release_firmware(fw);
- return err;
-}
-
static int init_drxk(struct drxk_state *state)
{
- int status = 0;
+ int status = 0, n = 0;
enum DRXPowerMode powerMode = DRXK_POWER_DOWN_OFDM;
u16 driverVersion;
dprintk(1, "\n");
if ((state->m_DrxkState == DRXK_UNINITIALIZED)) {
+ drxk_i2c_lock(state);
status = PowerUpDevice(state);
if (status < 0)
goto error;
@@ -6073,8 +6144,12 @@ static int init_drxk(struct drxk_state *state)
if (status < 0)
goto error;
- if (state->microcode_name)
- load_microcode(state, state->microcode_name);
+ if (state->fw) {
+ status = DownloadMicrocode(state, state->fw->data,
+ state->fw->size);
+ if (status < 0)
+ goto error;
+ }
/* disable token-ring bus through OFDM block for possible ucode upload */
status = write16(state, SIO_OFDM_SH_OFDM_RING_ENABLE__A, SIO_OFDM_SH_OFDM_RING_ENABLE_OFF);
@@ -6167,19 +6242,71 @@ static int init_drxk(struct drxk_state *state)
state->m_DrxkState = DRXK_POWERED_DOWN;
} else
state->m_DrxkState = DRXK_STOPPED;
+
+ /* Initialize the supported delivery systems */
+ n = 0;
+ if (state->m_hasDVBC) {
+ state->frontend.ops.delsys[n++] = SYS_DVBC_ANNEX_A;
+ state->frontend.ops.delsys[n++] = SYS_DVBC_ANNEX_C;
+ strlcat(state->frontend.ops.info.name, " DVB-C",
+ sizeof(state->frontend.ops.info.name));
+ }
+ if (state->m_hasDVBT) {
+ state->frontend.ops.delsys[n++] = SYS_DVBT;
+ strlcat(state->frontend.ops.info.name, " DVB-T",
+ sizeof(state->frontend.ops.info.name));
+ }
+ drxk_i2c_unlock(state);
}
error:
- if (status < 0)
+ if (status < 0) {
+ state->m_DrxkState = DRXK_NO_DEV;
+ drxk_i2c_unlock(state);
printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ }
return status;
}
+static void load_firmware_cb(const struct firmware *fw,
+ void *context)
+{
+ struct drxk_state *state = context;
+
+ dprintk(1, ": %s\n", fw ? "firmware loaded" : "firmware not loaded");
+ if (!fw) {
+ printk(KERN_ERR
+ "drxk: Could not load firmware file %s.\n",
+ state->microcode_name);
+ printk(KERN_INFO
+ "drxk: Copy %s to your hotplug directory!\n",
+ state->microcode_name);
+ state->microcode_name = NULL;
+
+ /*
+ * As firmware is now load asynchronous, it is not possible
+ * anymore to fail at frontend attach. We might silently
+ * return here, and hope that the driver won't crash.
+ * We might also change all DVB callbacks to return -ENODEV
+ * if the device is not initialized.
+ * As the DRX-K devices have their own internal firmware,
+ * let's just hope that it will match a firmware revision
+ * compatible with this driver and proceed.
+ */
+ }
+ state->fw = fw;
+
+ init_drxk(state);
+}
+
static void drxk_release(struct dvb_frontend *fe)
{
struct drxk_state *state = fe->demodulator_priv;
dprintk(1, "\n");
+ if (state->fw)
+ release_firmware(state->fw);
+
kfree(state);
}
@@ -6188,6 +6315,12 @@ static int drxk_sleep(struct dvb_frontend *fe)
struct drxk_state *state = fe->demodulator_priv;
dprintk(1, "\n");
+
+ if (state->m_DrxkState == DRXK_NO_DEV)
+ return -ENODEV;
+ if (state->m_DrxkState == DRXK_UNINITIALIZED)
+ return 0;
+
ShutDown(state);
return 0;
}
@@ -6196,7 +6329,11 @@ static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct drxk_state *state = fe->demodulator_priv;
- dprintk(1, "%s\n", enable ? "enable" : "disable");
+ dprintk(1, ": %s\n", enable ? "enable" : "disable");
+
+ if (state->m_DrxkState == DRXK_NO_DEV)
+ return -ENODEV;
+
return ConfigureI2CBridge(state, enable ? true : false);
}
@@ -6209,6 +6346,12 @@ static int drxk_set_parameters(struct dvb_frontend *fe)
dprintk(1, "\n");
+ if (state->m_DrxkState == DRXK_NO_DEV)
+ return -ENODEV;
+
+ if (state->m_DrxkState == DRXK_UNINITIALIZED)
+ return -EAGAIN;
+
if (!fe->ops.tuner_ops.get_if_frequency) {
printk(KERN_ERR
"drxk: Error: get_if_frequency() not defined at tuner. Can't work without it!\n");
@@ -6262,6 +6405,12 @@ static int drxk_read_status(struct dvb_frontend *fe, fe_status_t *status)
u32 stat;
dprintk(1, "\n");
+
+ if (state->m_DrxkState == DRXK_NO_DEV)
+ return -ENODEV;
+ if (state->m_DrxkState == DRXK_UNINITIALIZED)
+ return -EAGAIN;
+
*status = 0;
GetLockStatus(state, &stat, 0);
if (stat == MPEG_LOCK)
@@ -6275,8 +6424,15 @@ static int drxk_read_status(struct dvb_frontend *fe, fe_status_t *status)
static int drxk_read_ber(struct dvb_frontend *fe, u32 *ber)
{
+ struct drxk_state *state = fe->demodulator_priv;
+
dprintk(1, "\n");
+ if (state->m_DrxkState == DRXK_NO_DEV)
+ return -ENODEV;
+ if (state->m_DrxkState == DRXK_UNINITIALIZED)
+ return -EAGAIN;
+
*ber = 0;
return 0;
}
@@ -6288,6 +6444,12 @@ static int drxk_read_signal_strength(struct dvb_frontend *fe,
u32 val = 0;
dprintk(1, "\n");
+
+ if (state->m_DrxkState == DRXK_NO_DEV)
+ return -ENODEV;
+ if (state->m_DrxkState == DRXK_UNINITIALIZED)
+ return -EAGAIN;
+
ReadIFAgc(state, &val);
*strength = val & 0xffff;
return 0;
@@ -6299,6 +6461,12 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr)
s32 snr2;
dprintk(1, "\n");
+
+ if (state->m_DrxkState == DRXK_NO_DEV)
+ return -ENODEV;
+ if (state->m_DrxkState == DRXK_UNINITIALIZED)
+ return -EAGAIN;
+
GetSignalToNoise(state, &snr2);
*snr = snr2 & 0xffff;
return 0;
@@ -6310,6 +6478,12 @@ static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
u16 err;
dprintk(1, "\n");
+
+ if (state->m_DrxkState == DRXK_NO_DEV)
+ return -ENODEV;
+ if (state->m_DrxkState == DRXK_UNINITIALIZED)
+ return -EAGAIN;
+
DVBTQAMGetAccPktErr(state, &err);
*ucblocks = (u32) err;
return 0;
@@ -6318,9 +6492,16 @@ static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
static int drxk_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings
*sets)
{
+ struct drxk_state *state = fe->demodulator_priv;
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
dprintk(1, "\n");
+
+ if (state->m_DrxkState == DRXK_NO_DEV)
+ return -ENODEV;
+ if (state->m_DrxkState == DRXK_UNINITIALIZED)
+ return -EAGAIN;
+
switch (p->delivery_system) {
case SYS_DVBC_ANNEX_A:
case SYS_DVBC_ANNEX_C:
@@ -6371,10 +6552,9 @@ static struct dvb_frontend_ops drxk_ops = {
struct dvb_frontend *drxk_attach(const struct drxk_config *config,
struct i2c_adapter *i2c)
{
- int n;
-
struct drxk_state *state = NULL;
u8 adr = config->adr;
+ int status;
dprintk(1, "\n");
state = kzalloc(sizeof(struct drxk_state), GFP_KERNEL);
@@ -6385,6 +6565,7 @@ struct dvb_frontend *drxk_attach(const struct drxk_config *config,
state->demod_address = adr;
state->single_master = config->single_master;
state->microcode_name = config->microcode_name;
+ state->qam_demod_parameter_count = config->qam_demod_parameter_count;
state->no_i2c_bridge = config->no_i2c_bridge;
state->antenna_gpio = config->antenna_gpio;
state->antenna_dvbt = config->antenna_dvbt;
@@ -6425,22 +6606,21 @@ struct dvb_frontend *drxk_attach(const struct drxk_config *config,
state->frontend.demodulator_priv = state;
init_state(state);
- if (init_drxk(state) < 0)
- goto error;
- /* Initialize the supported delivery systems */
- n = 0;
- if (state->m_hasDVBC) {
- state->frontend.ops.delsys[n++] = SYS_DVBC_ANNEX_A;
- state->frontend.ops.delsys[n++] = SYS_DVBC_ANNEX_C;
- strlcat(state->frontend.ops.info.name, " DVB-C",
- sizeof(state->frontend.ops.info.name));
- }
- if (state->m_hasDVBT) {
- state->frontend.ops.delsys[n++] = SYS_DVBT;
- strlcat(state->frontend.ops.info.name, " DVB-T",
- sizeof(state->frontend.ops.info.name));
- }
+ /* Load firmware and initialize DRX-K */
+ if (state->microcode_name) {
+ status = request_firmware_nowait(THIS_MODULE, 1,
+ state->microcode_name,
+ state->i2c->dev.parent,
+ GFP_KERNEL,
+ state, load_firmware_cb);
+ if (status < 0) {
+ printk(KERN_ERR
+ "drxk: failed to request a firmware\n");
+ return NULL;
+ }
+ } else if (init_drxk(state) < 0)
+ goto error;
printk(KERN_INFO "drxk: frontend initialized.\n");
return &state->frontend;
diff --git a/drivers/media/dvb/frontends/drxk_hard.h b/drivers/media/dvb/frontends/drxk_hard.h
index 4bbf841de83a..6bb9fc4a7b96 100644
--- a/drivers/media/dvb/frontends/drxk_hard.h
+++ b/drivers/media/dvb/frontends/drxk_hard.h
@@ -94,7 +94,15 @@ enum DRXPowerMode {
enum AGC_CTRL_MODE { DRXK_AGC_CTRL_AUTO = 0, DRXK_AGC_CTRL_USER, DRXK_AGC_CTRL_OFF };
-enum EDrxkState { DRXK_UNINITIALIZED = 0, DRXK_STOPPED, DRXK_DTV_STARTED, DRXK_ATV_STARTED, DRXK_POWERED_DOWN };
+enum EDrxkState {
+ DRXK_UNINITIALIZED = 0,
+ DRXK_STOPPED,
+ DRXK_DTV_STARTED,
+ DRXK_ATV_STARTED,
+ DRXK_POWERED_DOWN,
+ DRXK_NO_DEV /* If drxk init failed */
+};
+
enum EDrxkCoefArrayIndex {
DRXK_COEF_IDX_MN = 0,
DRXK_COEF_IDX_FM ,
@@ -325,6 +333,9 @@ struct drxk_state {
enum DRXPowerMode m_currentPowerMode;
+ /* when true, avoids other devices to use the I2C bus */
+ bool drxk_i2c_exclusive_lock;
+
/*
* Configurable parameters at the driver. They stores the values found
* at struct drxk_config.
@@ -338,7 +349,11 @@ struct drxk_state {
bool antenna_dvbt;
u16 antenna_gpio;
+ /* Firmware */
const char *microcode_name;
+ struct completion fw_wait_load;
+ const struct firmware *fw;
+ int qam_demod_parameter_count;
};
#define NEVER_LOCK 0
diff --git a/drivers/media/dvb/frontends/lgs8gxx.c b/drivers/media/dvb/frontends/lgs8gxx.c
index 568363a10a31..c2ea2749ebed 100644
--- a/drivers/media/dvb/frontends/lgs8gxx.c
+++ b/drivers/media/dvb/frontends/lgs8gxx.c
@@ -40,6 +40,8 @@
static int debug;
static int fake_signal_str = 1;
+#define LGS8GXX_FIRMWARE "lgs8g75.fw"
+
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
@@ -592,7 +594,7 @@ static int lgs8g75_init_data(struct lgs8gxx_state *priv)
int rc;
int i;
- rc = request_firmware(&fw, "lgs8g75.fw", &priv->i2c->dev);
+ rc = request_firmware(&fw, LGS8GXX_FIRMWARE, &priv->i2c->dev);
if (rc)
return rc;
@@ -1070,3 +1072,4 @@ EXPORT_SYMBOL(lgs8gxx_attach);
MODULE_DESCRIPTION("Legend Silicon LGS8913/LGS8GXX DMB-TH demodulator driver");
MODULE_AUTHOR("David T. L. Wong <davidtlwong@gmail.com>");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(LGS8GXX_FIRMWARE);
diff --git a/drivers/media/dvb/frontends/rtl2832.c b/drivers/media/dvb/frontends/rtl2832.c
new file mode 100644
index 000000000000..28269ccaeab7
--- /dev/null
+++ b/drivers/media/dvb/frontends/rtl2832.c
@@ -0,0 +1,789 @@
+/*
+ * Realtek RTL2832 DVB-T demodulator driver
+ *
+ * Copyright (C) 2012 Thomas Mair <thomas.mair86@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "rtl2832_priv.h"
+#include <linux/bitops.h>
+
+int rtl2832_debug;
+module_param_named(debug, rtl2832_debug, int, 0644);
+MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
+
+#define REG_MASK(b) (BIT(b + 1) - 1)
+
+static const struct rtl2832_reg_entry registers[] = {
+ [DVBT_SOFT_RST] = {0x1, 0x1, 2, 2},
+ [DVBT_IIC_REPEAT] = {0x1, 0x1, 3, 3},
+ [DVBT_TR_WAIT_MIN_8K] = {0x1, 0x88, 11, 2},
+ [DVBT_RSD_BER_FAIL_VAL] = {0x1, 0x8f, 15, 0},
+ [DVBT_EN_BK_TRK] = {0x1, 0xa6, 7, 7},
+ [DVBT_AD_EN_REG] = {0x0, 0x8, 7, 7},
+ [DVBT_AD_EN_REG1] = {0x0, 0x8, 6, 6},
+ [DVBT_EN_BBIN] = {0x1, 0xb1, 0, 0},
+ [DVBT_MGD_THD0] = {0x1, 0x95, 7, 0},
+ [DVBT_MGD_THD1] = {0x1, 0x96, 7, 0},
+ [DVBT_MGD_THD2] = {0x1, 0x97, 7, 0},
+ [DVBT_MGD_THD3] = {0x1, 0x98, 7, 0},
+ [DVBT_MGD_THD4] = {0x1, 0x99, 7, 0},
+ [DVBT_MGD_THD5] = {0x1, 0x9a, 7, 0},
+ [DVBT_MGD_THD6] = {0x1, 0x9b, 7, 0},
+ [DVBT_MGD_THD7] = {0x1, 0x9c, 7, 0},
+ [DVBT_EN_CACQ_NOTCH] = {0x1, 0x61, 4, 4},
+ [DVBT_AD_AV_REF] = {0x0, 0x9, 6, 0},
+ [DVBT_REG_PI] = {0x0, 0xa, 2, 0},
+ [DVBT_PIP_ON] = {0x0, 0x21, 3, 3},
+ [DVBT_SCALE1_B92] = {0x2, 0x92, 7, 0},
+ [DVBT_SCALE1_B93] = {0x2, 0x93, 7, 0},
+ [DVBT_SCALE1_BA7] = {0x2, 0xa7, 7, 0},
+ [DVBT_SCALE1_BA9] = {0x2, 0xa9, 7, 0},
+ [DVBT_SCALE1_BAA] = {0x2, 0xaa, 7, 0},
+ [DVBT_SCALE1_BAB] = {0x2, 0xab, 7, 0},
+ [DVBT_SCALE1_BAC] = {0x2, 0xac, 7, 0},
+ [DVBT_SCALE1_BB0] = {0x2, 0xb0, 7, 0},
+ [DVBT_SCALE1_BB1] = {0x2, 0xb1, 7, 0},
+ [DVBT_KB_P1] = {0x1, 0x64, 3, 1},
+ [DVBT_KB_P2] = {0x1, 0x64, 6, 4},
+ [DVBT_KB_P3] = {0x1, 0x65, 2, 0},
+ [DVBT_OPT_ADC_IQ] = {0x0, 0x6, 5, 4},
+ [DVBT_AD_AVI] = {0x0, 0x9, 1, 0},
+ [DVBT_AD_AVQ] = {0x0, 0x9, 3, 2},
+ [DVBT_K1_CR_STEP12] = {0x2, 0xad, 9, 4},
+ [DVBT_TRK_KS_P2] = {0x1, 0x6f, 2, 0},
+ [DVBT_TRK_KS_I2] = {0x1, 0x70, 5, 3},
+ [DVBT_TR_THD_SET2] = {0x1, 0x72, 3, 0},
+ [DVBT_TRK_KC_P2] = {0x1, 0x73, 5, 3},
+ [DVBT_TRK_KC_I2] = {0x1, 0x75, 2, 0},
+ [DVBT_CR_THD_SET2] = {0x1, 0x76, 7, 6},
+ [DVBT_PSET_IFFREQ] = {0x1, 0x19, 21, 0},
+ [DVBT_SPEC_INV] = {0x1, 0x15, 0, 0},
+ [DVBT_RSAMP_RATIO] = {0x1, 0x9f, 27, 2},
+ [DVBT_CFREQ_OFF_RATIO] = {0x1, 0x9d, 23, 4},
+ [DVBT_FSM_STAGE] = {0x3, 0x51, 6, 3},
+ [DVBT_RX_CONSTEL] = {0x3, 0x3c, 3, 2},
+ [DVBT_RX_HIER] = {0x3, 0x3c, 6, 4},
+ [DVBT_RX_C_RATE_LP] = {0x3, 0x3d, 2, 0},
+ [DVBT_RX_C_RATE_HP] = {0x3, 0x3d, 5, 3},
+ [DVBT_GI_IDX] = {0x3, 0x51, 1, 0},
+ [DVBT_FFT_MODE_IDX] = {0x3, 0x51, 2, 2},
+ [DVBT_RSD_BER_EST] = {0x3, 0x4e, 15, 0},
+ [DVBT_CE_EST_EVM] = {0x4, 0xc, 15, 0},
+ [DVBT_RF_AGC_VAL] = {0x3, 0x5b, 13, 0},
+ [DVBT_IF_AGC_VAL] = {0x3, 0x59, 13, 0},
+ [DVBT_DAGC_VAL] = {0x3, 0x5, 7, 0},
+ [DVBT_SFREQ_OFF] = {0x3, 0x18, 13, 0},
+ [DVBT_CFREQ_OFF] = {0x3, 0x5f, 17, 0},
+ [DVBT_POLAR_RF_AGC] = {0x0, 0xe, 1, 1},
+ [DVBT_POLAR_IF_AGC] = {0x0, 0xe, 0, 0},
+ [DVBT_AAGC_HOLD] = {0x1, 0x4, 5, 5},
+ [DVBT_EN_RF_AGC] = {0x1, 0x4, 6, 6},
+ [DVBT_EN_IF_AGC] = {0x1, 0x4, 7, 7},
+ [DVBT_IF_AGC_MIN] = {0x1, 0x8, 7, 0},
+ [DVBT_IF_AGC_MAX] = {0x1, 0x9, 7, 0},
+ [DVBT_RF_AGC_MIN] = {0x1, 0xa, 7, 0},
+ [DVBT_RF_AGC_MAX] = {0x1, 0xb, 7, 0},
+ [DVBT_IF_AGC_MAN] = {0x1, 0xc, 6, 6},
+ [DVBT_IF_AGC_MAN_VAL] = {0x1, 0xc, 13, 0},
+ [DVBT_RF_AGC_MAN] = {0x1, 0xe, 6, 6},
+ [DVBT_RF_AGC_MAN_VAL] = {0x1, 0xe, 13, 0},
+ [DVBT_DAGC_TRG_VAL] = {0x1, 0x12, 7, 0},
+ [DVBT_AGC_TARG_VAL_0] = {0x1, 0x2, 0, 0},
+ [DVBT_AGC_TARG_VAL_8_1] = {0x1, 0x3, 7, 0},
+ [DVBT_AAGC_LOOP_GAIN] = {0x1, 0xc7, 5, 1},
+ [DVBT_LOOP_GAIN2_3_0] = {0x1, 0x4, 4, 1},
+ [DVBT_LOOP_GAIN2_4] = {0x1, 0x5, 7, 7},
+ [DVBT_LOOP_GAIN3] = {0x1, 0xc8, 4, 0},
+ [DVBT_VTOP1] = {0x1, 0x6, 5, 0},
+ [DVBT_VTOP2] = {0x1, 0xc9, 5, 0},
+ [DVBT_VTOP3] = {0x1, 0xca, 5, 0},
+ [DVBT_KRF1] = {0x1, 0xcb, 7, 0},
+ [DVBT_KRF2] = {0x1, 0x7, 7, 0},
+ [DVBT_KRF3] = {0x1, 0xcd, 7, 0},
+ [DVBT_KRF4] = {0x1, 0xce, 7, 0},
+ [DVBT_EN_GI_PGA] = {0x1, 0xe5, 0, 0},
+ [DVBT_THD_LOCK_UP] = {0x1, 0xd9, 8, 0},
+ [DVBT_THD_LOCK_DW] = {0x1, 0xdb, 8, 0},
+ [DVBT_THD_UP1] = {0x1, 0xdd, 7, 0},
+ [DVBT_THD_DW1] = {0x1, 0xde, 7, 0},
+ [DVBT_INTER_CNT_LEN] = {0x1, 0xd8, 3, 0},
+ [DVBT_GI_PGA_STATE] = {0x1, 0xe6, 3, 3},
+ [DVBT_EN_AGC_PGA] = {0x1, 0xd7, 0, 0},
+ [DVBT_CKOUTPAR] = {0x1, 0x7b, 5, 5},
+ [DVBT_CKOUT_PWR] = {0x1, 0x7b, 6, 6},
+ [DVBT_SYNC_DUR] = {0x1, 0x7b, 7, 7},
+ [DVBT_ERR_DUR] = {0x1, 0x7c, 0, 0},
+ [DVBT_SYNC_LVL] = {0x1, 0x7c, 1, 1},
+ [DVBT_ERR_LVL] = {0x1, 0x7c, 2, 2},
+ [DVBT_VAL_LVL] = {0x1, 0x7c, 3, 3},
+ [DVBT_SERIAL] = {0x1, 0x7c, 4, 4},
+ [DVBT_SER_LSB] = {0x1, 0x7c, 5, 5},
+ [DVBT_CDIV_PH0] = {0x1, 0x7d, 3, 0},
+ [DVBT_CDIV_PH1] = {0x1, 0x7d, 7, 4},
+ [DVBT_MPEG_IO_OPT_2_2] = {0x0, 0x6, 7, 7},
+ [DVBT_MPEG_IO_OPT_1_0] = {0x0, 0x7, 7, 6},
+ [DVBT_CKOUTPAR_PIP] = {0x0, 0xb7, 4, 4},
+ [DVBT_CKOUT_PWR_PIP] = {0x0, 0xb7, 3, 3},
+ [DVBT_SYNC_LVL_PIP] = {0x0, 0xb7, 2, 2},
+ [DVBT_ERR_LVL_PIP] = {0x0, 0xb7, 1, 1},
+ [DVBT_VAL_LVL_PIP] = {0x0, 0xb7, 0, 0},
+ [DVBT_CKOUTPAR_PID] = {0x0, 0xb9, 4, 4},
+ [DVBT_CKOUT_PWR_PID] = {0x0, 0xb9, 3, 3},
+ [DVBT_SYNC_LVL_PID] = {0x0, 0xb9, 2, 2},
+ [DVBT_ERR_LVL_PID] = {0x0, 0xb9, 1, 1},
+ [DVBT_VAL_LVL_PID] = {0x0, 0xb9, 0, 0},
+ [DVBT_SM_PASS] = {0x1, 0x93, 11, 0},
+ [DVBT_AD7_SETTING] = {0x0, 0x11, 15, 0},
+ [DVBT_RSSI_R] = {0x3, 0x1, 6, 0},
+ [DVBT_ACI_DET_IND] = {0x3, 0x12, 0, 0},
+ [DVBT_REG_MON] = {0x0, 0xd, 1, 0},
+ [DVBT_REG_MONSEL] = {0x0, 0xd, 2, 2},
+ [DVBT_REG_GPE] = {0x0, 0xd, 7, 7},
+ [DVBT_REG_GPO] = {0x0, 0x10, 0, 0},
+ [DVBT_REG_4MSEL] = {0x0, 0x13, 0, 0},
+};
+
+/* write multiple hardware registers */
+static int rtl2832_wr(struct rtl2832_priv *priv, u8 reg, u8 *val, int len)
+{
+ int ret;
+ u8 buf[1+len];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = priv->cfg.i2c_addr,
+ .flags = 0,
+ .len = 1+len,
+ .buf = buf,
+ }
+ };
+
+ buf[0] = reg;
+ memcpy(&buf[1], val, len);
+
+ ret = i2c_transfer(priv->i2c, msg, 1);
+ if (ret == 1) {
+ ret = 0;
+ } else {
+ warn("i2c wr failed=%d reg=%02x len=%d", ret, reg, len);
+ ret = -EREMOTEIO;
+ }
+ return ret;
+}
+
+/* read multiple hardware registers */
+static int rtl2832_rd(struct rtl2832_priv *priv, u8 reg, u8 *val, int len)
+{
+ int ret;
+ struct i2c_msg msg[2] = {
+ {
+ .addr = priv->cfg.i2c_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &reg,
+ }, {
+ .addr = priv->cfg.i2c_addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = val,
+ }
+ };
+
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret == 2) {
+ ret = 0;
+ } else {
+ warn("i2c rd failed=%d reg=%02x len=%d", ret, reg, len);
+ ret = -EREMOTEIO;
+}
+return ret;
+}
+
+/* write multiple registers */
+static int rtl2832_wr_regs(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val,
+ int len)
+{
+ int ret;
+
+
+ /* switch bank if needed */
+ if (page != priv->page) {
+ ret = rtl2832_wr(priv, 0x00, &page, 1);
+ if (ret)
+ return ret;
+
+ priv->page = page;
+}
+
+return rtl2832_wr(priv, reg, val, len);
+}
+
+/* read multiple registers */
+static int rtl2832_rd_regs(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val,
+ int len)
+{
+ int ret;
+
+ /* switch bank if needed */
+ if (page != priv->page) {
+ ret = rtl2832_wr(priv, 0x00, &page, 1);
+ if (ret)
+ return ret;
+
+ priv->page = page;
+ }
+
+ return rtl2832_rd(priv, reg, val, len);
+}
+
+#if 0 /* currently not used */
+/* write single register */
+static int rtl2832_wr_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 val)
+{
+ return rtl2832_wr_regs(priv, reg, page, &val, 1);
+}
+#endif
+
+/* read single register */
+static int rtl2832_rd_reg(struct rtl2832_priv *priv, u8 reg, u8 page, u8 *val)
+{
+ return rtl2832_rd_regs(priv, reg, page, val, 1);
+}
+
+int rtl2832_rd_demod_reg(struct rtl2832_priv *priv, int reg, u32 *val)
+{
+ int ret;
+
+ u8 reg_start_addr;
+ u8 msb, lsb;
+ u8 page;
+ u8 reading[4];
+ u32 reading_tmp;
+ int i;
+
+ u8 len;
+ u32 mask;
+
+ reg_start_addr = registers[reg].start_address;
+ msb = registers[reg].msb;
+ lsb = registers[reg].lsb;
+ page = registers[reg].page;
+
+ len = (msb >> 3) + 1;
+ mask = REG_MASK(msb - lsb);
+
+ ret = rtl2832_rd_regs(priv, reg_start_addr, page, &reading[0], len);
+ if (ret)
+ goto err;
+
+ reading_tmp = 0;
+ for (i = 0; i < len; i++)
+ reading_tmp |= reading[i] << ((len - 1 - i) * 8);
+
+ *val = (reading_tmp >> lsb) & mask;
+
+ return ret;
+
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+
+}
+
+int rtl2832_wr_demod_reg(struct rtl2832_priv *priv, int reg, u32 val)
+{
+ int ret, i;
+ u8 len;
+ u8 reg_start_addr;
+ u8 msb, lsb;
+ u8 page;
+ u32 mask;
+
+
+ u8 reading[4];
+ u8 writing[4];
+ u32 reading_tmp;
+ u32 writing_tmp;
+
+
+ reg_start_addr = registers[reg].start_address;
+ msb = registers[reg].msb;
+ lsb = registers[reg].lsb;
+ page = registers[reg].page;
+
+ len = (msb >> 3) + 1;
+ mask = REG_MASK(msb - lsb);
+
+
+ ret = rtl2832_rd_regs(priv, reg_start_addr, page, &reading[0], len);
+ if (ret)
+ goto err;
+
+ reading_tmp = 0;
+ for (i = 0; i < len; i++)
+ reading_tmp |= reading[i] << ((len - 1 - i) * 8);
+
+ writing_tmp = reading_tmp & ~(mask << lsb);
+ writing_tmp |= ((val & mask) << lsb);
+
+
+ for (i = 0; i < len; i++)
+ writing[i] = (writing_tmp >> ((len - 1 - i) * 8)) & 0xff;
+
+ ret = rtl2832_wr_regs(priv, reg_start_addr, page, &writing[0], len);
+ if (ret)
+ goto err;
+
+ return ret;
+
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+
+}
+
+
+static int rtl2832_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
+{
+ int ret;
+ struct rtl2832_priv *priv = fe->demodulator_priv;
+
+ dbg("%s: enable=%d", __func__, enable);
+
+ /* gate already open or close */
+ if (priv->i2c_gate_state == enable)
+ return 0;
+
+ ret = rtl2832_wr_demod_reg(priv, DVBT_IIC_REPEAT, (enable ? 0x1 : 0x0));
+ if (ret)
+ goto err;
+
+ priv->i2c_gate_state = enable;
+
+ return ret;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+
+
+static int rtl2832_init(struct dvb_frontend *fe)
+{
+ struct rtl2832_priv *priv = fe->demodulator_priv;
+ int i, ret;
+
+ u8 en_bbin;
+ u64 pset_iffreq;
+
+ /* initialization values for the demodulator registers */
+ struct rtl2832_reg_value rtl2832_initial_regs[] = {
+ {DVBT_AD_EN_REG, 0x1},
+ {DVBT_AD_EN_REG1, 0x1},
+ {DVBT_RSD_BER_FAIL_VAL, 0x2800},
+ {DVBT_MGD_THD0, 0x10},
+ {DVBT_MGD_THD1, 0x20},
+ {DVBT_MGD_THD2, 0x20},
+ {DVBT_MGD_THD3, 0x40},
+ {DVBT_MGD_THD4, 0x22},
+ {DVBT_MGD_THD5, 0x32},
+ {DVBT_MGD_THD6, 0x37},
+ {DVBT_MGD_THD7, 0x39},
+ {DVBT_EN_BK_TRK, 0x0},
+ {DVBT_EN_CACQ_NOTCH, 0x0},
+ {DVBT_AD_AV_REF, 0x2a},
+ {DVBT_REG_PI, 0x6},
+ {DVBT_PIP_ON, 0x0},
+ {DVBT_CDIV_PH0, 0x8},
+ {DVBT_CDIV_PH1, 0x8},
+ {DVBT_SCALE1_B92, 0x4},
+ {DVBT_SCALE1_B93, 0xb0},
+ {DVBT_SCALE1_BA7, 0x78},
+ {DVBT_SCALE1_BA9, 0x28},
+ {DVBT_SCALE1_BAA, 0x59},
+ {DVBT_SCALE1_BAB, 0x83},
+ {DVBT_SCALE1_BAC, 0xd4},
+ {DVBT_SCALE1_BB0, 0x65},
+ {DVBT_SCALE1_BB1, 0x43},
+ {DVBT_KB_P1, 0x1},
+ {DVBT_KB_P2, 0x4},
+ {DVBT_KB_P3, 0x7},
+ {DVBT_K1_CR_STEP12, 0xa},
+ {DVBT_REG_GPE, 0x1},
+ {DVBT_SERIAL, 0x0},
+ {DVBT_CDIV_PH0, 0x9},
+ {DVBT_CDIV_PH1, 0x9},
+ {DVBT_MPEG_IO_OPT_2_2, 0x0},
+ {DVBT_MPEG_IO_OPT_1_0, 0x0},
+ {DVBT_TRK_KS_P2, 0x4},
+ {DVBT_TRK_KS_I2, 0x7},
+ {DVBT_TR_THD_SET2, 0x6},
+ {DVBT_TRK_KC_I2, 0x5},
+ {DVBT_CR_THD_SET2, 0x1},
+ {DVBT_SPEC_INV, 0x0},
+ {DVBT_DAGC_TRG_VAL, 0x5a},
+ {DVBT_AGC_TARG_VAL_0, 0x0},
+ {DVBT_AGC_TARG_VAL_8_1, 0x5a},
+ {DVBT_AAGC_LOOP_GAIN, 0x16},
+ {DVBT_LOOP_GAIN2_3_0, 0x6},
+ {DVBT_LOOP_GAIN2_4, 0x1},
+ {DVBT_LOOP_GAIN3, 0x16},
+ {DVBT_VTOP1, 0x35},
+ {DVBT_VTOP2, 0x21},
+ {DVBT_VTOP3, 0x21},
+ {DVBT_KRF1, 0x0},
+ {DVBT_KRF2, 0x40},
+ {DVBT_KRF3, 0x10},
+ {DVBT_KRF4, 0x10},
+ {DVBT_IF_AGC_MIN, 0x80},
+ {DVBT_IF_AGC_MAX, 0x7f},
+ {DVBT_RF_AGC_MIN, 0x80},
+ {DVBT_RF_AGC_MAX, 0x7f},
+ {DVBT_POLAR_RF_AGC, 0x0},
+ {DVBT_POLAR_IF_AGC, 0x0},
+ {DVBT_AD7_SETTING, 0xe9bf},
+ {DVBT_EN_GI_PGA, 0x0},
+ {DVBT_THD_LOCK_UP, 0x0},
+ {DVBT_THD_LOCK_DW, 0x0},
+ {DVBT_THD_UP1, 0x11},
+ {DVBT_THD_DW1, 0xef},
+ {DVBT_INTER_CNT_LEN, 0xc},
+ {DVBT_GI_PGA_STATE, 0x0},
+ {DVBT_EN_AGC_PGA, 0x1},
+ {DVBT_IF_AGC_MAN, 0x0},
+ };
+
+
+ dbg("%s", __func__);
+
+ en_bbin = (priv->cfg.if_dvbt == 0 ? 0x1 : 0x0);
+
+ /*
+ * PSET_IFFREQ = - floor((IfFreqHz % CrystalFreqHz) * pow(2, 22)
+ * / CrystalFreqHz)
+ */
+ pset_iffreq = priv->cfg.if_dvbt % priv->cfg.xtal;
+ pset_iffreq *= 0x400000;
+ pset_iffreq = div_u64(pset_iffreq, priv->cfg.xtal);
+ pset_iffreq = pset_iffreq & 0x3fffff;
+
+
+
+ for (i = 0; i < ARRAY_SIZE(rtl2832_initial_regs); i++) {
+ ret = rtl2832_wr_demod_reg(priv, rtl2832_initial_regs[i].reg,
+ rtl2832_initial_regs[i].value);
+ if (ret)
+ goto err;
+ }
+
+ /* if frequency settings */
+ ret = rtl2832_wr_demod_reg(priv, DVBT_EN_BBIN, en_bbin);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_wr_demod_reg(priv, DVBT_PSET_IFFREQ, pset_iffreq);
+ if (ret)
+ goto err;
+
+ priv->sleeping = false;
+
+ return ret;
+
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int rtl2832_sleep(struct dvb_frontend *fe)
+{
+ struct rtl2832_priv *priv = fe->demodulator_priv;
+
+ dbg("%s", __func__);
+ priv->sleeping = true;
+ return 0;
+}
+
+int rtl2832_get_tune_settings(struct dvb_frontend *fe,
+ struct dvb_frontend_tune_settings *s)
+{
+ dbg("%s", __func__);
+ s->min_delay_ms = 1000;
+ s->step_size = fe->ops.info.frequency_stepsize * 2;
+ s->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1;
+ return 0;
+}
+
+static int rtl2832_set_frontend(struct dvb_frontend *fe)
+{
+ struct rtl2832_priv *priv = fe->demodulator_priv;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ int ret, i, j;
+ u64 bw_mode, num, num2;
+ u32 resamp_ratio, cfreq_off_ratio;
+
+
+ static u8 bw_params[3][32] = {
+ /* 6 MHz bandwidth */
+ {
+ 0xf5, 0xff, 0x15, 0x38, 0x5d, 0x6d, 0x52, 0x07, 0xfa, 0x2f,
+ 0x53, 0xf5, 0x3f, 0xca, 0x0b, 0x91, 0xea, 0x30, 0x63, 0xb2,
+ 0x13, 0xda, 0x0b, 0xc4, 0x18, 0x7e, 0x16, 0x66, 0x08, 0x67,
+ 0x19, 0xe0,
+ },
+
+ /* 7 MHz bandwidth */
+ {
+ 0xe7, 0xcc, 0xb5, 0xba, 0xe8, 0x2f, 0x67, 0x61, 0x00, 0xaf,
+ 0x86, 0xf2, 0xbf, 0x59, 0x04, 0x11, 0xb6, 0x33, 0xa4, 0x30,
+ 0x15, 0x10, 0x0a, 0x42, 0x18, 0xf8, 0x17, 0xd9, 0x07, 0x22,
+ 0x19, 0x10,
+ },
+
+ /* 8 MHz bandwidth */
+ {
+ 0x09, 0xf6, 0xd2, 0xa7, 0x9a, 0xc9, 0x27, 0x77, 0x06, 0xbf,
+ 0xec, 0xf4, 0x4f, 0x0b, 0xfc, 0x01, 0x63, 0x35, 0x54, 0xa7,
+ 0x16, 0x66, 0x08, 0xb4, 0x19, 0x6e, 0x19, 0x65, 0x05, 0xc8,
+ 0x19, 0xe0,
+ },
+ };
+
+
+ dbg("%s: frequency=%d bandwidth_hz=%d inversion=%d", __func__,
+ c->frequency, c->bandwidth_hz, c->inversion);
+
+
+ /* program tuner */
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe);
+
+
+ switch (c->bandwidth_hz) {
+ case 6000000:
+ i = 0;
+ bw_mode = 48000000;
+ break;
+ case 7000000:
+ i = 1;
+ bw_mode = 56000000;
+ break;
+ case 8000000:
+ i = 2;
+ bw_mode = 64000000;
+ break;
+ default:
+ dbg("invalid bandwidth");
+ return -EINVAL;
+ }
+
+ for (j = 0; j < sizeof(bw_params[0]); j++) {
+ ret = rtl2832_wr_regs(priv, 0x1c+j, 1, &bw_params[i][j], 1);
+ if (ret)
+ goto err;
+ }
+
+ /* calculate and set resample ratio
+ * RSAMP_RATIO = floor(CrystalFreqHz * 7 * pow(2, 22)
+ * / ConstWithBandwidthMode)
+ */
+ num = priv->cfg.xtal * 7;
+ num *= 0x400000;
+ num = div_u64(num, bw_mode);
+ resamp_ratio = num & 0x3ffffff;
+ ret = rtl2832_wr_demod_reg(priv, DVBT_RSAMP_RATIO, resamp_ratio);
+ if (ret)
+ goto err;
+
+ /* calculate and set cfreq off ratio
+ * CFREQ_OFF_RATIO = - floor(ConstWithBandwidthMode * pow(2, 20)
+ * / (CrystalFreqHz * 7))
+ */
+ num = bw_mode << 20;
+ num2 = priv->cfg.xtal * 7;
+ num = div_u64(num, num2);
+ num = -num;
+ cfreq_off_ratio = num & 0xfffff;
+ ret = rtl2832_wr_demod_reg(priv, DVBT_CFREQ_OFF_RATIO, cfreq_off_ratio);
+ if (ret)
+ goto err;
+
+
+ /* soft reset */
+ ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x1);
+ if (ret)
+ goto err;
+
+ ret = rtl2832_wr_demod_reg(priv, DVBT_SOFT_RST, 0x0);
+ if (ret)
+ goto err;
+
+ return ret;
+err:
+ info("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int rtl2832_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ struct rtl2832_priv *priv = fe->demodulator_priv;
+ int ret;
+ u32 tmp;
+ *status = 0;
+
+
+ dbg("%s", __func__);
+ if (priv->sleeping)
+ return 0;
+
+ ret = rtl2832_rd_demod_reg(priv, DVBT_FSM_STAGE, &tmp);
+ if (ret)
+ goto err;
+
+ if (tmp == 11) {
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK;
+ }
+ /* TODO find out if this is also true for rtl2832? */
+ /*else if (tmp == 10) {
+ *status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
+ FE_HAS_VITERBI;
+ }*/
+
+ return ret;
+err:
+ info("%s: failed=%d", __func__, ret);
+ return ret;
+}
+
+static int rtl2832_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ *snr = 0;
+ return 0;
+}
+
+static int rtl2832_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ *ber = 0;
+ return 0;
+}
+
+static int rtl2832_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ *ucblocks = 0;
+ return 0;
+}
+
+
+static int rtl2832_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ *strength = 0;
+ return 0;
+}
+
+static struct dvb_frontend_ops rtl2832_ops;
+
+static void rtl2832_release(struct dvb_frontend *fe)
+{
+ struct rtl2832_priv *priv = fe->demodulator_priv;
+
+ dbg("%s", __func__);
+ kfree(priv);
+}
+
+struct dvb_frontend *rtl2832_attach(const struct rtl2832_config *cfg,
+ struct i2c_adapter *i2c)
+{
+ struct rtl2832_priv *priv = NULL;
+ int ret = 0;
+ u8 tmp;
+
+ dbg("%s", __func__);
+
+ /* allocate memory for the internal state */
+ priv = kzalloc(sizeof(struct rtl2832_priv), GFP_KERNEL);
+ if (priv == NULL)
+ goto err;
+
+ /* setup the priv */
+ priv->i2c = i2c;
+ priv->tuner = cfg->tuner;
+ memcpy(&priv->cfg, cfg, sizeof(struct rtl2832_config));
+
+ /* check if the demod is there */
+ ret = rtl2832_rd_reg(priv, 0x00, 0x0, &tmp);
+ if (ret)
+ goto err;
+
+ /* create dvb_frontend */
+ memcpy(&priv->fe.ops, &rtl2832_ops, sizeof(struct dvb_frontend_ops));
+ priv->fe.demodulator_priv = priv;
+
+ /* TODO implement sleep mode */
+ priv->sleeping = true;
+
+ return &priv->fe;
+err:
+ dbg("%s: failed=%d", __func__, ret);
+ kfree(priv);
+ return NULL;
+}
+EXPORT_SYMBOL(rtl2832_attach);
+
+static struct dvb_frontend_ops rtl2832_ops = {
+ .delsys = { SYS_DVBT },
+ .info = {
+ .name = "Realtek RTL2832 (DVB-T)",
+ .frequency_min = 174000000,
+ .frequency_max = 862000000,
+ .frequency_stepsize = 166667,
+ .caps = FE_CAN_FEC_1_2 |
+ FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 |
+ FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK |
+ FE_CAN_QAM_16 |
+ FE_CAN_QAM_64 |
+ FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO |
+ FE_CAN_GUARD_INTERVAL_AUTO |
+ FE_CAN_HIERARCHY_AUTO |
+ FE_CAN_RECOVER |
+ FE_CAN_MUTE_TS
+ },
+
+ .release = rtl2832_release,
+
+ .init = rtl2832_init,
+ .sleep = rtl2832_sleep,
+
+ .get_tune_settings = rtl2832_get_tune_settings,
+
+ .set_frontend = rtl2832_set_frontend,
+
+ .read_status = rtl2832_read_status,
+ .read_snr = rtl2832_read_snr,
+ .read_ber = rtl2832_read_ber,
+ .read_ucblocks = rtl2832_read_ucblocks,
+ .read_signal_strength = rtl2832_read_signal_strength,
+ .i2c_gate_ctrl = rtl2832_i2c_gate_ctrl,
+};
+
+MODULE_AUTHOR("Thomas Mair <mair.thomas86@gmail.com>");
+MODULE_DESCRIPTION("Realtek RTL2832 DVB-T demodulator driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.5");
diff --git a/drivers/media/dvb/frontends/rtl2832.h b/drivers/media/dvb/frontends/rtl2832.h
new file mode 100644
index 000000000000..d94dc9a3fa62
--- /dev/null
+++ b/drivers/media/dvb/frontends/rtl2832.h
@@ -0,0 +1,74 @@
+/*
+ * Realtek RTL2832 DVB-T demodulator driver
+ *
+ * Copyright (C) 2012 Thomas Mair <thomas.mair86@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef RTL2832_H
+#define RTL2832_H
+
+#include <linux/dvb/frontend.h>
+
+struct rtl2832_config {
+ /*
+ * Demodulator I2C address.
+ */
+ u8 i2c_addr;
+
+ /*
+ * Xtal frequency.
+ * Hz
+ * 4000000, 16000000, 25000000, 28800000
+ */
+ u32 xtal;
+
+ /*
+ * IFs for all used modes.
+ * Hz
+ * 4570000, 4571429, 36000000, 36125000, 36166667, 44000000
+ */
+ u32 if_dvbt;
+
+ /*
+ */
+ u8 tuner;
+};
+
+
+#if defined(CONFIG_DVB_RTL2832) || \
+ (defined(CONFIG_DVB_RTL2832_MODULE) && defined(MODULE))
+extern struct dvb_frontend *rtl2832_attach(
+ const struct rtl2832_config *cfg,
+ struct i2c_adapter *i2c
+);
+
+extern struct i2c_adapter *rtl2832_get_tuner_i2c_adapter(
+ struct dvb_frontend *fe
+);
+#else
+static inline struct dvb_frontend *rtl2832_attach(
+ const struct rtl2832_config *config,
+ struct i2c_adapter *i2c
+)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+#endif
+
+
+#endif /* RTL2832_H */
diff --git a/drivers/media/dvb/frontends/rtl2832_priv.h b/drivers/media/dvb/frontends/rtl2832_priv.h
new file mode 100644
index 000000000000..0ce9502da8ba
--- /dev/null
+++ b/drivers/media/dvb/frontends/rtl2832_priv.h
@@ -0,0 +1,260 @@
+/*
+ * Realtek RTL2832 DVB-T demodulator driver
+ *
+ * Copyright (C) 2012 Thomas Mair <thomas.mair86@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef RTL2832_PRIV_H
+#define RTL2832_PRIV_H
+
+#include "dvb_frontend.h"
+#include "rtl2832.h"
+
+#define LOG_PREFIX "rtl2832"
+
+#undef dbg
+#define dbg(f, arg...) \
+do { \
+ if (rtl2832_debug) \
+ printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg); \
+} while (0)
+#undef err
+#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
+#undef info
+#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
+#undef warn
+#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
+
+struct rtl2832_priv {
+ struct i2c_adapter *i2c;
+ struct dvb_frontend fe;
+ struct rtl2832_config cfg;
+
+ bool i2c_gate_state;
+ bool sleeping;
+
+ u8 tuner;
+ u8 page; /* active register page */
+};
+
+struct rtl2832_reg_entry {
+ u8 page;
+ u8 start_address;
+ u8 msb;
+ u8 lsb;
+};
+
+struct rtl2832_reg_value {
+ int reg;
+ u32 value;
+};
+
+
+/* Demod register bit names */
+enum DVBT_REG_BIT_NAME {
+ DVBT_SOFT_RST,
+ DVBT_IIC_REPEAT,
+ DVBT_TR_WAIT_MIN_8K,
+ DVBT_RSD_BER_FAIL_VAL,
+ DVBT_EN_BK_TRK,
+ DVBT_REG_PI,
+ DVBT_REG_PFREQ_1_0,
+ DVBT_PD_DA8,
+ DVBT_LOCK_TH,
+ DVBT_BER_PASS_SCAL,
+ DVBT_CE_FFSM_BYPASS,
+ DVBT_ALPHAIIR_N,
+ DVBT_ALPHAIIR_DIF,
+ DVBT_EN_TRK_SPAN,
+ DVBT_LOCK_TH_LEN,
+ DVBT_CCI_THRE,
+ DVBT_CCI_MON_SCAL,
+ DVBT_CCI_M0,
+ DVBT_CCI_M1,
+ DVBT_CCI_M2,
+ DVBT_CCI_M3,
+ DVBT_SPEC_INIT_0,
+ DVBT_SPEC_INIT_1,
+ DVBT_SPEC_INIT_2,
+ DVBT_AD_EN_REG,
+ DVBT_AD_EN_REG1,
+ DVBT_EN_BBIN,
+ DVBT_MGD_THD0,
+ DVBT_MGD_THD1,
+ DVBT_MGD_THD2,
+ DVBT_MGD_THD3,
+ DVBT_MGD_THD4,
+ DVBT_MGD_THD5,
+ DVBT_MGD_THD6,
+ DVBT_MGD_THD7,
+ DVBT_EN_CACQ_NOTCH,
+ DVBT_AD_AV_REF,
+ DVBT_PIP_ON,
+ DVBT_SCALE1_B92,
+ DVBT_SCALE1_B93,
+ DVBT_SCALE1_BA7,
+ DVBT_SCALE1_BA9,
+ DVBT_SCALE1_BAA,
+ DVBT_SCALE1_BAB,
+ DVBT_SCALE1_BAC,
+ DVBT_SCALE1_BB0,
+ DVBT_SCALE1_BB1,
+ DVBT_KB_P1,
+ DVBT_KB_P2,
+ DVBT_KB_P3,
+ DVBT_OPT_ADC_IQ,
+ DVBT_AD_AVI,
+ DVBT_AD_AVQ,
+ DVBT_K1_CR_STEP12,
+ DVBT_TRK_KS_P2,
+ DVBT_TRK_KS_I2,
+ DVBT_TR_THD_SET2,
+ DVBT_TRK_KC_P2,
+ DVBT_TRK_KC_I2,
+ DVBT_CR_THD_SET2,
+ DVBT_PSET_IFFREQ,
+ DVBT_SPEC_INV,
+ DVBT_BW_INDEX,
+ DVBT_RSAMP_RATIO,
+ DVBT_CFREQ_OFF_RATIO,
+ DVBT_FSM_STAGE,
+ DVBT_RX_CONSTEL,
+ DVBT_RX_HIER,
+ DVBT_RX_C_RATE_LP,
+ DVBT_RX_C_RATE_HP,
+ DVBT_GI_IDX,
+ DVBT_FFT_MODE_IDX,
+ DVBT_RSD_BER_EST,
+ DVBT_CE_EST_EVM,
+ DVBT_RF_AGC_VAL,
+ DVBT_IF_AGC_VAL,
+ DVBT_DAGC_VAL,
+ DVBT_SFREQ_OFF,
+ DVBT_CFREQ_OFF,
+ DVBT_POLAR_RF_AGC,
+ DVBT_POLAR_IF_AGC,
+ DVBT_AAGC_HOLD,
+ DVBT_EN_RF_AGC,
+ DVBT_EN_IF_AGC,
+ DVBT_IF_AGC_MIN,
+ DVBT_IF_AGC_MAX,
+ DVBT_RF_AGC_MIN,
+ DVBT_RF_AGC_MAX,
+ DVBT_IF_AGC_MAN,
+ DVBT_IF_AGC_MAN_VAL,
+ DVBT_RF_AGC_MAN,
+ DVBT_RF_AGC_MAN_VAL,
+ DVBT_DAGC_TRG_VAL,
+ DVBT_AGC_TARG_VAL,
+ DVBT_LOOP_GAIN_3_0,
+ DVBT_LOOP_GAIN_4,
+ DVBT_VTOP,
+ DVBT_KRF,
+ DVBT_AGC_TARG_VAL_0,
+ DVBT_AGC_TARG_VAL_8_1,
+ DVBT_AAGC_LOOP_GAIN,
+ DVBT_LOOP_GAIN2_3_0,
+ DVBT_LOOP_GAIN2_4,
+ DVBT_LOOP_GAIN3,
+ DVBT_VTOP1,
+ DVBT_VTOP2,
+ DVBT_VTOP3,
+ DVBT_KRF1,
+ DVBT_KRF2,
+ DVBT_KRF3,
+ DVBT_KRF4,
+ DVBT_EN_GI_PGA,
+ DVBT_THD_LOCK_UP,
+ DVBT_THD_LOCK_DW,
+ DVBT_THD_UP1,
+ DVBT_THD_DW1,
+ DVBT_INTER_CNT_LEN,
+ DVBT_GI_PGA_STATE,
+ DVBT_EN_AGC_PGA,
+ DVBT_CKOUTPAR,
+ DVBT_CKOUT_PWR,
+ DVBT_SYNC_DUR,
+ DVBT_ERR_DUR,
+ DVBT_SYNC_LVL,
+ DVBT_ERR_LVL,
+ DVBT_VAL_LVL,
+ DVBT_SERIAL,
+ DVBT_SER_LSB,
+ DVBT_CDIV_PH0,
+ DVBT_CDIV_PH1,
+ DVBT_MPEG_IO_OPT_2_2,
+ DVBT_MPEG_IO_OPT_1_0,
+ DVBT_CKOUTPAR_PIP,
+ DVBT_CKOUT_PWR_PIP,
+ DVBT_SYNC_LVL_PIP,
+ DVBT_ERR_LVL_PIP,
+ DVBT_VAL_LVL_PIP,
+ DVBT_CKOUTPAR_PID,
+ DVBT_CKOUT_PWR_PID,
+ DVBT_SYNC_LVL_PID,
+ DVBT_ERR_LVL_PID,
+ DVBT_VAL_LVL_PID,
+ DVBT_SM_PASS,
+ DVBT_UPDATE_REG_2,
+ DVBT_BTHD_P3,
+ DVBT_BTHD_D3,
+ DVBT_FUNC4_REG0,
+ DVBT_FUNC4_REG1,
+ DVBT_FUNC4_REG2,
+ DVBT_FUNC4_REG3,
+ DVBT_FUNC4_REG4,
+ DVBT_FUNC4_REG5,
+ DVBT_FUNC4_REG6,
+ DVBT_FUNC4_REG7,
+ DVBT_FUNC4_REG8,
+ DVBT_FUNC4_REG9,
+ DVBT_FUNC4_REG10,
+ DVBT_FUNC5_REG0,
+ DVBT_FUNC5_REG1,
+ DVBT_FUNC5_REG2,
+ DVBT_FUNC5_REG3,
+ DVBT_FUNC5_REG4,
+ DVBT_FUNC5_REG5,
+ DVBT_FUNC5_REG6,
+ DVBT_FUNC5_REG7,
+ DVBT_FUNC5_REG8,
+ DVBT_FUNC5_REG9,
+ DVBT_FUNC5_REG10,
+ DVBT_FUNC5_REG11,
+ DVBT_FUNC5_REG12,
+ DVBT_FUNC5_REG13,
+ DVBT_FUNC5_REG14,
+ DVBT_FUNC5_REG15,
+ DVBT_FUNC5_REG16,
+ DVBT_FUNC5_REG17,
+ DVBT_FUNC5_REG18,
+ DVBT_AD7_SETTING,
+ DVBT_RSSI_R,
+ DVBT_ACI_DET_IND,
+ DVBT_REG_MON,
+ DVBT_REG_MONSEL,
+ DVBT_REG_GPE,
+ DVBT_REG_GPO,
+ DVBT_REG_4MSEL,
+ DVBT_TEST_REG_1,
+ DVBT_TEST_REG_2,
+ DVBT_TEST_REG_3,
+ DVBT_TEST_REG_4,
+ DVBT_REG_BIT_NAME_ITEM_TERMINATOR,
+};
+
+#endif /* RTL2832_PRIV_H */
diff --git a/drivers/media/dvb/frontends/s5h1420.c b/drivers/media/dvb/frontends/s5h1420.c
index 2322257c69ae..e2fec9ebf947 100644
--- a/drivers/media/dvb/frontends/s5h1420.c
+++ b/drivers/media/dvb/frontends/s5h1420.c
@@ -634,7 +634,6 @@ static int s5h1420_set_frontend(struct dvb_frontend *fe)
struct s5h1420_state* state = fe->demodulator_priv;
int frequency_delta;
struct dvb_frontend_tune_settings fesettings;
- uint8_t clock_setting;
dprintk("enter %s\n", __func__);
@@ -679,25 +678,6 @@ static int s5h1420_set_frontend(struct dvb_frontend *fe)
else
state->fclk = 44000000;
- /* Clock */
- switch (state->fclk) {
- default:
- case 88000000:
- clock_setting = 80;
- break;
- case 86000000:
- clock_setting = 78;
- break;
- case 80000000:
- clock_setting = 72;
- break;
- case 59000000:
- clock_setting = 51;
- break;
- case 44000000:
- clock_setting = 36;
- break;
- }
dprintk("pll01: %d, ToneFreq: %d\n", state->fclk/1000000 - 8, (state->fclk + (TONE_FREQ * 32) - 1) / (TONE_FREQ * 32));
s5h1420_writereg(state, PLL01, state->fclk/1000000 - 8);
s5h1420_writereg(state, PLL02, 0x40);
diff --git a/drivers/media/dvb/frontends/stb0899_drv.c b/drivers/media/dvb/frontends/stb0899_drv.c
index 8b0dc74a3298..5d7f8a9b451b 100644
--- a/drivers/media/dvb/frontends/stb0899_drv.c
+++ b/drivers/media/dvb/frontends/stb0899_drv.c
@@ -1129,7 +1129,6 @@ static int stb0899_read_ber(struct dvb_frontend *fe, u32 *ber)
struct stb0899_internal *internal = &state->internal;
u8 lsb, msb;
- u32 i;
*ber = 0;
@@ -1137,14 +1136,9 @@ static int stb0899_read_ber(struct dvb_frontend *fe, u32 *ber)
case SYS_DVBS:
case SYS_DSS:
if (internal->lock) {
- /* average 5 BER values */
- for (i = 0; i < 5; i++) {
- msleep(100);
- lsb = stb0899_read_reg(state, STB0899_ECNT1L);
- msb = stb0899_read_reg(state, STB0899_ECNT1M);
- *ber += MAKEWORD16(msb, lsb);
- }
- *ber /= 5;
+ lsb = stb0899_read_reg(state, STB0899_ECNT1L);
+ msb = stb0899_read_reg(state, STB0899_ECNT1M);
+ *ber = MAKEWORD16(msb, lsb);
/* Viterbi Check */
if (STB0899_GETFIELD(VSTATUS_PRFVIT, internal->v_status)) {
/* Error Rate */
@@ -1157,13 +1151,9 @@ static int stb0899_read_ber(struct dvb_frontend *fe, u32 *ber)
break;
case SYS_DVBS2:
if (internal->lock) {
- /* Average 5 PER values */
- for (i = 0; i < 5; i++) {
- msleep(100);
- lsb = stb0899_read_reg(state, STB0899_ECNT1L);
- msb = stb0899_read_reg(state, STB0899_ECNT1M);
- *ber += MAKEWORD16(msb, lsb);
- }
+ lsb = stb0899_read_reg(state, STB0899_ECNT1L);
+ msb = stb0899_read_reg(state, STB0899_ECNT1M);
+ *ber = MAKEWORD16(msb, lsb);
/* ber = ber * 10 ^ 7 */
*ber *= 10000000;
*ber /= (-1 + (1 << (4 + 2 * STB0899_GETFIELD(NOE, internal->err_ctrl))));
diff --git a/drivers/media/dvb/frontends/stv0367.c b/drivers/media/dvb/frontends/stv0367.c
index fdd20c7737b5..2a8aaeb1112d 100644
--- a/drivers/media/dvb/frontends/stv0367.c
+++ b/drivers/media/dvb/frontends/stv0367.c
@@ -1584,7 +1584,7 @@ static int stv0367ter_algo(struct dvb_frontend *fe)
struct stv0367ter_state *ter_state = state->ter_state;
int offset = 0, tempo = 0;
u8 u_var;
- u8 /*constell,*/ counter, tps_rcvd[2];
+ u8 /*constell,*/ counter;
s8 step;
s32 timing_offset = 0;
u32 trl_nomrate = 0, InternalFreq = 0, temp = 0;
@@ -1709,9 +1709,6 @@ static int stv0367ter_algo(struct dvb_frontend *fe)
return 0;
ter_state->state = FE_TER_LOCKOK;
- /* update results */
- tps_rcvd[0] = stv0367_readreg(state, R367TER_TPS_RCVD2);
- tps_rcvd[1] = stv0367_readreg(state, R367TER_TPS_RCVD3);
ter_state->mode = stv0367_readbits(state, F367TER_SYR_MODE);
ter_state->guard = stv0367_readbits(state, F367TER_SYR_GUARD);
diff --git a/drivers/media/dvb/frontends/stv090x.c b/drivers/media/dvb/frontends/stv090x.c
index d79e69f65cbb..ea86a5603e57 100644
--- a/drivers/media/dvb/frontends/stv090x.c
+++ b/drivers/media/dvb/frontends/stv090x.c
@@ -3172,7 +3172,7 @@ static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
enum stv090x_signal_state signal_state = STV090x_NOCARRIER;
u32 reg;
s32 agc1_power, power_iq = 0, i;
- int lock = 0, low_sr = 0, no_signal = 0;
+ int lock = 0, low_sr = 0;
reg = STV090x_READ_DEMOD(state, TSCFGH);
STV090x_SETFIELD_Px(reg, RST_HWARE_FIELD, 1); /* Stop path 1 stream merger */
@@ -3413,7 +3413,7 @@ static enum stv090x_signal_state stv090x_algo(struct stv090x_state *state)
goto err;
} else {
signal_state = STV090x_NODATA;
- no_signal = stv090x_chk_signal(state);
+ stv090x_chk_signal(state);
}
}
return signal_state;
diff --git a/drivers/media/dvb/frontends/tda10071.c b/drivers/media/dvb/frontends/tda10071.c
index c21bc92d2811..703c3d05f9f4 100644
--- a/drivers/media/dvb/frontends/tda10071.c
+++ b/drivers/media/dvb/frontends/tda10071.c
@@ -20,10 +20,6 @@
#include "tda10071_priv.h"
-int tda10071_debug;
-module_param_named(debug, tda10071_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
-
static struct dvb_frontend_ops tda10071_ops;
/* write multiple registers */
@@ -48,7 +44,8 @@ static int tda10071_wr_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
if (ret == 1) {
ret = 0;
} else {
- warn("i2c wr failed=%d reg=%02x len=%d", ret, reg, len);
+ dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d reg=%02x " \
+ "len=%d\n", KBUILD_MODNAME, ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
@@ -79,7 +76,8 @@ static int tda10071_rd_regs(struct tda10071_priv *priv, u8 reg, u8 *val,
memcpy(val, buf, len);
ret = 0;
} else {
- warn("i2c rd failed=%d reg=%02x len=%d", ret, reg, len);
+ dev_warn(&priv->i2c->dev, "%s: i2c rd failed=%d reg=%02x " \
+ "len=%d\n", KBUILD_MODNAME, ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
@@ -170,7 +168,7 @@ static int tda10071_cmd_execute(struct tda10071_priv *priv,
usleep_range(200, 5000);
}
- dbg("%s: loop=%d", __func__, i);
+ dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
if (i == 0) {
ret = -ETIMEDOUT;
@@ -179,7 +177,7 @@ static int tda10071_cmd_execute(struct tda10071_priv *priv,
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -196,7 +194,8 @@ static int tda10071_set_tone(struct dvb_frontend *fe,
goto error;
}
- dbg("%s: tone_mode=%d", __func__, fe_sec_tone_mode);
+ dev_dbg(&priv->i2c->dev, "%s: tone_mode=%d\n", __func__,
+ fe_sec_tone_mode);
switch (fe_sec_tone_mode) {
case SEC_TONE_ON:
@@ -206,24 +205,25 @@ static int tda10071_set_tone(struct dvb_frontend *fe,
tone = 0;
break;
default:
- dbg("%s: invalid fe_sec_tone_mode", __func__);
+ dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_tone_mode\n",
+ __func__);
ret = -EINVAL;
goto error;
}
- cmd.args[0x00] = CMD_LNB_PCB_CONFIG;
- cmd.args[0x01] = 0;
- cmd.args[0x02] = 0x00;
- cmd.args[0x03] = 0x00;
- cmd.args[0x04] = tone;
- cmd.len = 0x05;
+ cmd.args[0] = CMD_LNB_PCB_CONFIG;
+ cmd.args[1] = 0;
+ cmd.args[2] = 0x00;
+ cmd.args[3] = 0x00;
+ cmd.args[4] = tone;
+ cmd.len = 5;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -240,7 +240,7 @@ static int tda10071_set_voltage(struct dvb_frontend *fe,
goto error;
}
- dbg("%s: voltage=%d", __func__, fe_sec_voltage);
+ dev_dbg(&priv->i2c->dev, "%s: voltage=%d\n", __func__, fe_sec_voltage);
switch (fe_sec_voltage) {
case SEC_VOLTAGE_13:
@@ -253,22 +253,23 @@ static int tda10071_set_voltage(struct dvb_frontend *fe,
voltage = 0;
break;
default:
- dbg("%s: invalid fe_sec_voltage", __func__);
+ dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_voltage\n",
+ __func__);
ret = -EINVAL;
goto error;
};
- cmd.args[0x00] = CMD_LNB_SET_DC_LEVEL;
- cmd.args[0x01] = 0;
- cmd.args[0x02] = voltage;
- cmd.len = 0x03;
+ cmd.args[0] = CMD_LNB_SET_DC_LEVEL;
+ cmd.args[1] = 0;
+ cmd.args[2] = voltage;
+ cmd.len = 3;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -285,9 +286,10 @@ static int tda10071_diseqc_send_master_cmd(struct dvb_frontend *fe,
goto error;
}
- dbg("%s: msg_len=%d", __func__, diseqc_cmd->msg_len);
+ dev_dbg(&priv->i2c->dev, "%s: msg_len=%d\n", __func__,
+ diseqc_cmd->msg_len);
- if (diseqc_cmd->msg_len < 3 || diseqc_cmd->msg_len > 16) {
+ if (diseqc_cmd->msg_len < 3 || diseqc_cmd->msg_len > 6) {
ret = -EINVAL;
goto error;
}
@@ -301,7 +303,7 @@ static int tda10071_diseqc_send_master_cmd(struct dvb_frontend *fe,
usleep_range(10000, 20000);
}
- dbg("%s: loop=%d", __func__, i);
+ dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
if (i == 0) {
ret = -ETIMEDOUT;
@@ -312,22 +314,22 @@ static int tda10071_diseqc_send_master_cmd(struct dvb_frontend *fe,
if (ret)
goto error;
- cmd.args[0x00] = CMD_LNB_SEND_DISEQC;
- cmd.args[0x01] = 0;
- cmd.args[0x02] = 0;
- cmd.args[0x03] = 0;
- cmd.args[0x04] = 2;
- cmd.args[0x05] = 0;
- cmd.args[0x06] = diseqc_cmd->msg_len;
- memcpy(&cmd.args[0x07], diseqc_cmd->msg, diseqc_cmd->msg_len);
- cmd.len = 0x07 + diseqc_cmd->msg_len;
+ cmd.args[0] = CMD_LNB_SEND_DISEQC;
+ cmd.args[1] = 0;
+ cmd.args[2] = 0;
+ cmd.args[3] = 0;
+ cmd.args[4] = 2;
+ cmd.args[5] = 0;
+ cmd.args[6] = diseqc_cmd->msg_len;
+ memcpy(&cmd.args[7], diseqc_cmd->msg, diseqc_cmd->msg_len);
+ cmd.len = 7 + diseqc_cmd->msg_len;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -344,7 +346,7 @@ static int tda10071_diseqc_recv_slave_reply(struct dvb_frontend *fe,
goto error;
}
- dbg("%s:", __func__);
+ dev_dbg(&priv->i2c->dev, "%s:\n", __func__);
/* wait LNB RX */
for (i = 500, tmp = 0; i && !tmp; i--) {
@@ -355,7 +357,7 @@ static int tda10071_diseqc_recv_slave_reply(struct dvb_frontend *fe,
usleep_range(10000, 20000);
}
- dbg("%s: loop=%d", __func__, i);
+ dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
if (i == 0) {
ret = -ETIMEDOUT;
@@ -372,9 +374,9 @@ static int tda10071_diseqc_recv_slave_reply(struct dvb_frontend *fe,
reply->msg_len = sizeof(reply->msg); /* truncate API max */
/* read reply */
- cmd.args[0x00] = CMD_LNB_UPDATE_REPLY;
- cmd.args[0x01] = 0;
- cmd.len = 0x02;
+ cmd.args[0] = CMD_LNB_UPDATE_REPLY;
+ cmd.args[1] = 0;
+ cmd.len = 2;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
@@ -385,7 +387,7 @@ static int tda10071_diseqc_recv_slave_reply(struct dvb_frontend *fe,
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -402,7 +404,8 @@ static int tda10071_diseqc_send_burst(struct dvb_frontend *fe,
goto error;
}
- dbg("%s: fe_sec_mini_cmd=%d", __func__, fe_sec_mini_cmd);
+ dev_dbg(&priv->i2c->dev, "%s: fe_sec_mini_cmd=%d\n", __func__,
+ fe_sec_mini_cmd);
switch (fe_sec_mini_cmd) {
case SEC_MINI_A:
@@ -412,7 +415,8 @@ static int tda10071_diseqc_send_burst(struct dvb_frontend *fe,
burst = 1;
break;
default:
- dbg("%s: invalid fe_sec_mini_cmd", __func__);
+ dev_dbg(&priv->i2c->dev, "%s: invalid fe_sec_mini_cmd\n",
+ __func__);
ret = -EINVAL;
goto error;
}
@@ -426,7 +430,7 @@ static int tda10071_diseqc_send_burst(struct dvb_frontend *fe,
usleep_range(10000, 20000);
}
- dbg("%s: loop=%d", __func__, i);
+ dev_dbg(&priv->i2c->dev, "%s: loop=%d\n", __func__, i);
if (i == 0) {
ret = -ETIMEDOUT;
@@ -437,17 +441,17 @@ static int tda10071_diseqc_send_burst(struct dvb_frontend *fe,
if (ret)
goto error;
- cmd.args[0x00] = CMD_LNB_SEND_TONEBURST;
- cmd.args[0x01] = 0;
- cmd.args[0x02] = burst;
- cmd.len = 0x03;
+ cmd.args[0] = CMD_LNB_SEND_TONEBURST;
+ cmd.args[1] = 0;
+ cmd.args[2] = burst;
+ cmd.len = 3;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -481,7 +485,7 @@ static int tda10071_read_status(struct dvb_frontend *fe, fe_status_t *status)
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -506,7 +510,7 @@ static int tda10071_read_snr(struct dvb_frontend *fe, u16 *snr)
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -523,9 +527,9 @@ static int tda10071_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
goto error;
}
- cmd.args[0x00] = CMD_GET_AGCACC;
- cmd.args[0x01] = 0;
- cmd.len = 0x02;
+ cmd.args[0] = CMD_GET_AGCACC;
+ cmd.args[1] = 0;
+ cmd.len = 2;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
@@ -545,7 +549,7 @@ static int tda10071_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -583,17 +587,18 @@ static int tda10071_read_ber(struct dvb_frontend *fe, u32 *ber)
goto error;
if (priv->meas_count[i] == tmp) {
- dbg("%s: meas not ready=%02x", __func__, tmp);
+ dev_dbg(&priv->i2c->dev, "%s: meas not ready=%02x\n", __func__,
+ tmp);
*ber = priv->ber;
return 0;
} else {
priv->meas_count[i] = tmp;
}
- cmd.args[0x00] = CMD_BER_UPDATE_COUNTERS;
- cmd.args[0x01] = 0;
- cmd.args[0x02] = i;
- cmd.len = 0x03;
+ cmd.args[0] = CMD_BER_UPDATE_COUNTERS;
+ cmd.args[1] = 0;
+ cmd.args[2] = i;
+ cmd.len = 3;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
@@ -612,7 +617,7 @@ static int tda10071_read_ber(struct dvb_frontend *fe, u32 *ber)
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -632,7 +637,7 @@ static int tda10071_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -644,10 +649,11 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
int ret, i;
u8 mode, rolloff, pilot, inversion, div;
- dbg("%s: delivery_system=%d modulation=%d frequency=%d " \
- "symbol_rate=%d inversion=%d pilot=%d rolloff=%d", __func__,
- c->delivery_system, c->modulation, c->frequency,
- c->symbol_rate, c->inversion, c->pilot, c->rolloff);
+ dev_dbg(&priv->i2c->dev, "%s: delivery_system=%d modulation=%d " \
+ "frequency=%d symbol_rate=%d inversion=%d pilot=%d " \
+ "rolloff=%d\n", __func__, c->delivery_system, c->modulation,
+ c->frequency, c->symbol_rate, c->inversion, c->pilot,
+ c->rolloff);
priv->delivery_system = SYS_UNDEFINED;
@@ -669,7 +675,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
inversion = 3;
break;
default:
- dbg("%s: invalid inversion", __func__);
+ dev_dbg(&priv->i2c->dev, "%s: invalid inversion\n", __func__);
ret = -EINVAL;
goto error;
}
@@ -692,7 +698,8 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
break;
case ROLLOFF_AUTO:
default:
- dbg("%s: invalid rolloff", __func__);
+ dev_dbg(&priv->i2c->dev, "%s: invalid rolloff\n",
+ __func__);
ret = -EINVAL;
goto error;
}
@@ -708,13 +715,15 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
pilot = 2;
break;
default:
- dbg("%s: invalid pilot", __func__);
+ dev_dbg(&priv->i2c->dev, "%s: invalid pilot\n",
+ __func__);
ret = -EINVAL;
goto error;
}
break;
default:
- dbg("%s: invalid delivery_system", __func__);
+ dev_dbg(&priv->i2c->dev, "%s: invalid delivery_system\n",
+ __func__);
ret = -EINVAL;
goto error;
}
@@ -724,13 +733,15 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
c->modulation == TDA10071_MODCOD[i].modulation &&
c->fec_inner == TDA10071_MODCOD[i].fec) {
mode = TDA10071_MODCOD[i].val;
- dbg("%s: mode found=%02x", __func__, mode);
+ dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
+ __func__, mode);
break;
}
}
if (mode == 0xff) {
- dbg("%s: invalid parameter combination", __func__);
+ dev_dbg(&priv->i2c->dev, "%s: invalid parameter combination\n",
+ __func__);
ret = -EINVAL;
goto error;
}
@@ -748,22 +759,22 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
if (ret)
goto error;
- cmd.args[0x00] = CMD_CHANGE_CHANNEL;
- cmd.args[0x01] = 0;
- cmd.args[0x02] = mode;
- cmd.args[0x03] = (c->frequency >> 16) & 0xff;
- cmd.args[0x04] = (c->frequency >> 8) & 0xff;
- cmd.args[0x05] = (c->frequency >> 0) & 0xff;
- cmd.args[0x06] = ((c->symbol_rate / 1000) >> 8) & 0xff;
- cmd.args[0x07] = ((c->symbol_rate / 1000) >> 0) & 0xff;
- cmd.args[0x08] = (tda10071_ops.info.frequency_tolerance >> 8) & 0xff;
- cmd.args[0x09] = (tda10071_ops.info.frequency_tolerance >> 0) & 0xff;
- cmd.args[0x0a] = rolloff;
- cmd.args[0x0b] = inversion;
- cmd.args[0x0c] = pilot;
- cmd.args[0x0d] = 0x00;
- cmd.args[0x0e] = 0x00;
- cmd.len = 0x0f;
+ cmd.args[0] = CMD_CHANGE_CHANNEL;
+ cmd.args[1] = 0;
+ cmd.args[2] = mode;
+ cmd.args[3] = (c->frequency >> 16) & 0xff;
+ cmd.args[4] = (c->frequency >> 8) & 0xff;
+ cmd.args[5] = (c->frequency >> 0) & 0xff;
+ cmd.args[6] = ((c->symbol_rate / 1000) >> 8) & 0xff;
+ cmd.args[7] = ((c->symbol_rate / 1000) >> 0) & 0xff;
+ cmd.args[8] = (tda10071_ops.info.frequency_tolerance >> 8) & 0xff;
+ cmd.args[9] = (tda10071_ops.info.frequency_tolerance >> 0) & 0xff;
+ cmd.args[10] = rolloff;
+ cmd.args[11] = inversion;
+ cmd.args[12] = pilot;
+ cmd.args[13] = 0x00;
+ cmd.args[14] = 0x00;
+ cmd.len = 15;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
@@ -772,7 +783,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -829,7 +840,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -915,10 +926,10 @@ static int tda10071_init(struct dvb_frontend *fe)
goto error;
}
- cmd.args[0x00] = CMD_SET_SLEEP_MODE;
- cmd.args[0x01] = 0;
- cmd.args[0x02] = 0;
- cmd.len = 0x03;
+ cmd.args[0] = CMD_SET_SLEEP_MODE;
+ cmd.args[1] = 0;
+ cmd.args[2] = 0;
+ cmd.len = 3;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
@@ -929,10 +940,11 @@ static int tda10071_init(struct dvb_frontend *fe)
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
if (ret) {
- err("did not find the firmware file. (%s) "
- "Please see linux/Documentation/dvb/ for more" \
- " details on firmware-problems. (%d)",
- fw_file, ret);
+ dev_err(&priv->i2c->dev, "%s: did not find the " \
+ "firmware file. (%s) Please see " \
+ "linux/Documentation/dvb/ for more " \
+ "details on firmware-problems. (%d)\n",
+ KBUILD_MODNAME, fw_file, ret);
goto error;
}
@@ -961,10 +973,11 @@ static int tda10071_init(struct dvb_frontend *fe)
if (ret)
goto error_release_firmware;
- info("found a '%s' in cold state, will try to load a firmware",
- tda10071_ops.info.name);
-
- info("downloading firmware from file '%s'", fw_file);
+ dev_info(&priv->i2c->dev, "%s: found a '%s' in cold state, " \
+ "will try to load a firmware\n", KBUILD_MODNAME,
+ tda10071_ops.info.name);
+ dev_info(&priv->i2c->dev, "%s: downloading firmware from " \
+ "file '%s'\n", KBUILD_MODNAME, fw_file);
/* do not download last byte */
fw_size = fw->size - 1;
@@ -978,7 +991,9 @@ static int tda10071_init(struct dvb_frontend *fe)
ret = tda10071_wr_regs(priv, 0xfa,
(u8 *) &fw->data[fw_size - remaining], len);
if (ret) {
- err("firmware download failed=%d", ret);
+ dev_err(&priv->i2c->dev, "%s: firmware " \
+ "download failed=%d\n",
+ KBUILD_MODNAME, ret);
if (ret)
goto error_release_firmware;
}
@@ -1002,15 +1017,16 @@ static int tda10071_init(struct dvb_frontend *fe)
goto error;
if (tmp) {
- info("firmware did not run");
+ dev_info(&priv->i2c->dev, "%s: firmware did not run\n",
+ KBUILD_MODNAME);
ret = -EFAULT;
goto error;
} else {
priv->warm = 1;
}
- cmd.args[0x00] = CMD_GET_FW_VERSION;
- cmd.len = 0x01;
+ cmd.args[0] = CMD_GET_FW_VERSION;
+ cmd.len = 1;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
@@ -1019,54 +1035,55 @@ static int tda10071_init(struct dvb_frontend *fe)
if (ret)
goto error;
- info("firmware version %d.%d.%d.%d",
- buf[0], buf[1], buf[2], buf[3]);
- info("found a '%s' in warm state.", tda10071_ops.info.name);
+ dev_info(&priv->i2c->dev, "%s: firmware version %d.%d.%d.%d\n",
+ KBUILD_MODNAME, buf[0], buf[1], buf[2], buf[3]);
+ dev_info(&priv->i2c->dev, "%s: found a '%s' in warm state\n",
+ KBUILD_MODNAME, tda10071_ops.info.name);
ret = tda10071_rd_regs(priv, 0x81, buf, 2);
if (ret)
goto error;
- cmd.args[0x00] = CMD_DEMOD_INIT;
- cmd.args[0x01] = ((priv->cfg.xtal / 1000) >> 8) & 0xff;
- cmd.args[0x02] = ((priv->cfg.xtal / 1000) >> 0) & 0xff;
- cmd.args[0x03] = buf[0];
- cmd.args[0x04] = buf[1];
- cmd.args[0x05] = priv->cfg.pll_multiplier;
- cmd.args[0x06] = priv->cfg.spec_inv;
- cmd.args[0x07] = 0x00;
- cmd.len = 0x08;
+ cmd.args[0] = CMD_DEMOD_INIT;
+ cmd.args[1] = ((priv->cfg.xtal / 1000) >> 8) & 0xff;
+ cmd.args[2] = ((priv->cfg.xtal / 1000) >> 0) & 0xff;
+ cmd.args[3] = buf[0];
+ cmd.args[4] = buf[1];
+ cmd.args[5] = priv->cfg.pll_multiplier;
+ cmd.args[6] = priv->cfg.spec_inv;
+ cmd.args[7] = 0x00;
+ cmd.len = 8;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
- cmd.args[0x00] = CMD_TUNER_INIT;
- cmd.args[0x01] = 0x00;
- cmd.args[0x02] = 0x00;
- cmd.args[0x03] = 0x00;
- cmd.args[0x04] = 0x00;
- cmd.args[0x05] = 0x14;
- cmd.args[0x06] = 0x00;
- cmd.args[0x07] = 0x03;
- cmd.args[0x08] = 0x02;
- cmd.args[0x09] = 0x02;
- cmd.args[0x0a] = 0x00;
- cmd.args[0x0b] = 0x00;
- cmd.args[0x0c] = 0x00;
- cmd.args[0x0d] = 0x00;
- cmd.args[0x0e] = 0x00;
- cmd.len = 0x0f;
+ cmd.args[0] = CMD_TUNER_INIT;
+ cmd.args[1] = 0x00;
+ cmd.args[2] = 0x00;
+ cmd.args[3] = 0x00;
+ cmd.args[4] = 0x00;
+ cmd.args[5] = 0x14;
+ cmd.args[6] = 0x00;
+ cmd.args[7] = 0x03;
+ cmd.args[8] = 0x02;
+ cmd.args[9] = 0x02;
+ cmd.args[10] = 0x00;
+ cmd.args[11] = 0x00;
+ cmd.args[12] = 0x00;
+ cmd.args[13] = 0x00;
+ cmd.args[14] = 0x00;
+ cmd.len = 15;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
- cmd.args[0x00] = CMD_MPEG_CONFIG;
- cmd.args[0x01] = 0;
- cmd.args[0x02] = priv->cfg.ts_mode;
- cmd.args[0x03] = 0x00;
- cmd.args[0x04] = 0x04;
- cmd.args[0x05] = 0x00;
- cmd.len = 0x06;
+ cmd.args[0] = CMD_MPEG_CONFIG;
+ cmd.args[1] = 0;
+ cmd.args[2] = priv->cfg.ts_mode;
+ cmd.args[3] = 0x00;
+ cmd.args[4] = 0x04;
+ cmd.args[5] = 0x00;
+ cmd.len = 6;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
@@ -1075,27 +1092,27 @@ static int tda10071_init(struct dvb_frontend *fe)
if (ret)
goto error;
- cmd.args[0x00] = CMD_LNB_CONFIG;
- cmd.args[0x01] = 0;
- cmd.args[0x02] = 150;
- cmd.args[0x03] = 3;
- cmd.args[0x04] = 22;
- cmd.args[0x05] = 1;
- cmd.args[0x06] = 1;
- cmd.args[0x07] = 30;
- cmd.args[0x08] = 30;
- cmd.args[0x09] = 30;
- cmd.args[0x0a] = 30;
- cmd.len = 0x0b;
+ cmd.args[0] = CMD_LNB_CONFIG;
+ cmd.args[1] = 0;
+ cmd.args[2] = 150;
+ cmd.args[3] = 3;
+ cmd.args[4] = 22;
+ cmd.args[5] = 1;
+ cmd.args[6] = 1;
+ cmd.args[7] = 30;
+ cmd.args[8] = 30;
+ cmd.args[9] = 30;
+ cmd.args[10] = 30;
+ cmd.len = 11;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
- cmd.args[0x00] = CMD_BER_CONTROL;
- cmd.args[0x01] = 0;
- cmd.args[0x02] = 14;
- cmd.args[0x03] = 14;
- cmd.len = 0x04;
+ cmd.args[0] = CMD_BER_CONTROL;
+ cmd.args[1] = 0;
+ cmd.args[2] = 14;
+ cmd.args[3] = 14;
+ cmd.len = 4;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
@@ -1105,7 +1122,7 @@ static int tda10071_init(struct dvb_frontend *fe)
error_release_firmware:
release_firmware(fw);
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -1132,10 +1149,10 @@ static int tda10071_sleep(struct dvb_frontend *fe)
goto error;
}
- cmd.args[0x00] = CMD_SET_SLEEP_MODE;
- cmd.args[0x01] = 0;
- cmd.args[0x02] = 1;
- cmd.len = 0x03;
+ cmd.args[0] = CMD_SET_SLEEP_MODE;
+ cmd.args[1] = 0;
+ cmd.args[2] = 1;
+ cmd.len = 3;
ret = tda10071_cmd_execute(priv, &cmd);
if (ret)
goto error;
@@ -1149,7 +1166,7 @@ static int tda10071_sleep(struct dvb_frontend *fe)
return ret;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&priv->i2c->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
@@ -1208,7 +1225,7 @@ struct dvb_frontend *tda10071_attach(const struct tda10071_config *config,
return &priv->fe;
error:
- dbg("%s: failed=%d", __func__, ret);
+ dev_dbg(&i2c->dev, "%s: failed=%d\n", __func__, ret);
kfree(priv);
return NULL;
}
diff --git a/drivers/media/dvb/frontends/tda10071_priv.h b/drivers/media/dvb/frontends/tda10071_priv.h
index 93c5e6317f07..0fa85cfa70c2 100644
--- a/drivers/media/dvb/frontends/tda10071_priv.h
+++ b/drivers/media/dvb/frontends/tda10071_priv.h
@@ -25,19 +25,6 @@
#include "tda10071.h"
#include <linux/firmware.h>
-#define LOG_PREFIX "tda10071"
-
-#undef dbg
-#define dbg(f, arg...) \
- if (tda10071_debug) \
- printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
-#undef err
-#define err(f, arg...) printk(KERN_ERR LOG_PREFIX": " f "\n" , ## arg)
-#undef info
-#define info(f, arg...) printk(KERN_INFO LOG_PREFIX": " f "\n" , ## arg)
-#undef warn
-#define warn(f, arg...) printk(KERN_WARNING LOG_PREFIX": " f "\n" , ## arg)
-
struct tda10071_priv {
struct i2c_adapter *i2c;
struct dvb_frontend fe;
@@ -112,7 +99,7 @@ struct tda10071_reg_val_mask {
#define CMD_BER_UPDATE_COUNTERS 0x3f
/* firmare command struct */
-#define TDA10071_ARGLEN 0x1e
+#define TDA10071_ARGLEN 30
struct tda10071_cmd {
u8 args[TDA10071_ARGLEN];
u8 len;
diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
index 7539a5d71029..72ee8de02260 100644
--- a/drivers/media/dvb/ngene/ngene-cards.c
+++ b/drivers/media/dvb/ngene/ngene-cards.c
@@ -217,6 +217,7 @@ static int demod_attach_drxk(struct ngene_channel *chan,
memset(&config, 0, sizeof(config));
config.microcode_name = "drxk_a3.mc";
+ config.qam_demod_parameter_count = 4;
config.adr = 0x29 + (chan->number ^ 2);
chan->fe = dvb_attach(drxk_attach, &config, i2c);
diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c
index 7331e8450d1a..9cc55546cc30 100644
--- a/drivers/media/dvb/siano/smscoreapi.c
+++ b/drivers/media/dvb/siano/smscoreapi.c
@@ -276,16 +276,13 @@ static void smscore_notify_clients(struct smscore_device_t *coredev)
static int smscore_notify_callbacks(struct smscore_device_t *coredev,
struct device *device, int arrival)
{
- struct list_head *next, *first;
+ struct smscore_device_notifyee_t *elem;
int rc = 0;
/* note: must be called under g_deviceslock */
- first = &g_smscore_notifyees;
-
- for (next = first->next; next != first; next = next->next) {
- rc = ((struct smscore_device_notifyee_t *) next)->
- hotplug(coredev, device, arrival);
+ list_for_each_entry(elem, &g_smscore_notifyees, entry) {
+ rc = elem->hotplug(coredev, device, arrival);
if (rc < 0)
break;
}
@@ -940,29 +937,25 @@ static struct
smscore_client_t *smscore_find_client(struct smscore_device_t *coredev,
int data_type, int id)
{
- struct smscore_client_t *client = NULL;
- struct list_head *next, *first;
+ struct list_head *first;
+ struct smscore_client_t *client;
unsigned long flags;
- struct list_head *firstid, *nextid;
-
+ struct list_head *firstid;
+ struct smscore_idlist_t *client_id;
spin_lock_irqsave(&coredev->clientslock, flags);
first = &coredev->clients;
- for (next = first->next;
- (next != first) && !client;
- next = next->next) {
- firstid = &((struct smscore_client_t *)next)->idlist;
- for (nextid = firstid->next;
- nextid != firstid;
- nextid = nextid->next) {
- if ((((struct smscore_idlist_t *)nextid)->id == id) &&
- (((struct smscore_idlist_t *)nextid)->data_type == data_type ||
- (((struct smscore_idlist_t *)nextid)->data_type == 0))) {
- client = (struct smscore_client_t *) next;
- break;
- }
+ list_for_each_entry(client, first, entry) {
+ firstid = &client->idlist;
+ list_for_each_entry(client_id, firstid, entry) {
+ if ((client_id->id == id) &&
+ (client_id->data_type == data_type ||
+ (client_id->data_type == 0)))
+ goto found;
}
}
+ client = NULL;
+found:
spin_unlock_irqrestore(&coredev->clientslock, flags);
return client;
}
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 664e460f247b..aac622200e99 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -481,7 +481,7 @@ static int smsusb_resume(struct usb_interface *intf)
return 0;
}
-static const struct usb_device_id smsusb_id_table[] __devinitconst = {
+static const struct usb_device_id smsusb_id_table[] = {
{ USB_DEVICE(0x187f, 0x0010),
.driver_info = SMS1XXX_BOARD_SIANO_STELLAR },
{ USB_DEVICE(0x187f, 0x0100),
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index c257da13d766..8090b87b3066 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -5,6 +5,7 @@
menuconfig RADIO_ADAPTERS
bool "Radio Adapters"
depends on VIDEO_V4L2
+ depends on MEDIA_RADIO_SUPPORT
default y
---help---
Say Y here to enable selecting AM/FM radio adapters.
@@ -56,6 +57,39 @@ config RADIO_MAXIRADIO
To compile this driver as a module, choose M here: the
module will be called radio-maxiradio.
+config RADIO_SHARK
+ tristate "Griffin radioSHARK USB radio receiver"
+ depends on USB && SND
+ ---help---
+ Choose Y here if you have this radio receiver.
+
+ There are 2 versions of this device, this driver is for version 1,
+ which is white.
+
+ In order to control your radio card, you will need to use programs
+ that are compatible with the Video For Linux API. Information on
+ this API and pointers to "v4l" programs may be found at
+ <file:Documentation/video4linux/API.html>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-shark.
+
+config RADIO_SHARK2
+ tristate "Griffin radioSHARK2 USB radio receiver"
+ depends on USB
+ ---help---
+ Choose Y here if you have this radio receiver.
+
+ There are 2 versions of this device, this driver is for version 2,
+ which is black.
+
+ In order to control your radio card, you will need to use programs
+ that are compatible with the Video For Linux API. Information on
+ this API and pointers to "v4l" programs may be found at
+ <file:Documentation/video4linux/API.html>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called radio-shark2.
config I2C_SI4713
tristate "I2C driver for Silicon Labs Si4713 device"
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index ca8c7d134b95..c03ce4fe74e9 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_RADIO_CADET) += radio-cadet.o
obj-$(CONFIG_RADIO_TYPHOON) += radio-typhoon.o
obj-$(CONFIG_RADIO_TERRATEC) += radio-terratec.o
obj-$(CONFIG_RADIO_MAXIRADIO) += radio-maxiradio.o
+obj-$(CONFIG_RADIO_SHARK) += radio-shark.o
+obj-$(CONFIG_RADIO_SHARK2) += shark2.o
obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o
obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
@@ -29,4 +31,6 @@ obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
obj-$(CONFIG_RADIO_WL128X) += wl128x/
+shark2-objs := radio-shark2.o radio-tea5777.o
+
ccflags-y += -Isound
diff --git a/drivers/media/radio/lm7000.h b/drivers/media/radio/lm7000.h
new file mode 100644
index 000000000000..139cd6b68824
--- /dev/null
+++ b/drivers/media/radio/lm7000.h
@@ -0,0 +1,43 @@
+#ifndef __LM7000_H
+#define __LM7000_H
+
+/* Sanyo LM7000 tuner chip control
+ *
+ * Copyright 2012 Ondrej Zary <linux@rainbow-software.org>
+ * based on radio-aimslab.c by M. Kirkwood
+ * and radio-sf16fmi.c by M. Kirkwood and Petr Vandrovec
+ */
+
+#define LM7000_DATA (1 << 0)
+#define LM7000_CLK (1 << 1)
+#define LM7000_CE (1 << 2)
+
+#define LM7000_FM_100 (0 << 20)
+#define LM7000_FM_50 (1 << 20)
+#define LM7000_FM_25 (2 << 20)
+#define LM7000_BIT_FM (1 << 23)
+
+static inline void lm7000_set_freq(u32 freq, void *handle,
+ void (*set_pins)(void *handle, u8 pins))
+{
+ int i;
+ u8 data;
+ u32 val;
+
+ freq += 171200; /* Add 10.7 MHz IF */
+ freq /= 400; /* Convert to 25 kHz units */
+ val = freq | LM7000_FM_25 | LM7000_BIT_FM;
+ /* write the 24-bit register, starting with LSB */
+ for (i = 0; i < 24; i++) {
+ data = val & (1 << i) ? LM7000_DATA : 0;
+ set_pins(handle, data | LM7000_CE);
+ udelay(2);
+ set_pins(handle, data | LM7000_CE | LM7000_CLK);
+ udelay(2);
+ set_pins(handle, data | LM7000_CE);
+ udelay(2);
+ }
+ set_pins(handle, 0);
+}
+
+#endif /* __LM7000_H */
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 98e0c8c20312..12c70e876f58 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -37,6 +37,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include "radio-isa.h"
+#include "lm7000.h"
MODULE_AUTHOR("M. Kirkwood");
MODULE_DESCRIPTION("A driver for the RadioTrack/RadioReveal radio card.");
@@ -72,55 +73,38 @@ static struct radio_isa_card *rtrack_alloc(void)
return rt ? &rt->isa : NULL;
}
-/* The 128+64 on these outb's is to keep the volume stable while tuning.
- * Without them, the volume _will_ creep up with each frequency change
- * and bit 4 (+16) is to keep the signal strength meter enabled.
- */
+#define AIMS_BIT_TUN_CE (1 << 0)
+#define AIMS_BIT_TUN_CLK (1 << 1)
+#define AIMS_BIT_TUN_DATA (1 << 2)
+#define AIMS_BIT_VOL_CE (1 << 3)
+#define AIMS_BIT_TUN_STRQ (1 << 4)
+/* bit 5 is not connected */
+#define AIMS_BIT_VOL_UP (1 << 6) /* active low */
+#define AIMS_BIT_VOL_DN (1 << 7) /* active low */
-static void send_0_byte(struct radio_isa_card *isa, int on)
+void rtrack_set_pins(void *handle, u8 pins)
{
- outb_p(128+64+16+on+1, isa->io); /* wr-enable + data low */
- outb_p(128+64+16+on+2+1, isa->io); /* clock */
- msleep(1);
-}
+ struct radio_isa_card *isa = handle;
+ struct rtrack *rt = container_of(isa, struct rtrack, isa);
+ u8 bits = AIMS_BIT_VOL_DN | AIMS_BIT_VOL_UP | AIMS_BIT_TUN_STRQ;
-static void send_1_byte(struct radio_isa_card *isa, int on)
-{
- outb_p(128+64+16+on+4+1, isa->io); /* wr-enable+data high */
- outb_p(128+64+16+on+4+2+1, isa->io); /* clock */
- msleep(1);
+ if (!v4l2_ctrl_g_ctrl(rt->isa.mute))
+ bits |= AIMS_BIT_VOL_CE;
+
+ if (pins & LM7000_DATA)
+ bits |= AIMS_BIT_TUN_DATA;
+ if (pins & LM7000_CLK)
+ bits |= AIMS_BIT_TUN_CLK;
+ if (pins & LM7000_CE)
+ bits |= AIMS_BIT_TUN_CE;
+
+ outb_p(bits, rt->isa.io);
}
static int rtrack_s_frequency(struct radio_isa_card *isa, u32 freq)
{
- int on = v4l2_ctrl_g_ctrl(isa->mute) ? 0 : 8;
- int i;
-
- freq += 171200; /* Add 10.7 MHz IF */
- freq /= 800; /* Convert to 50 kHz units */
-
- send_0_byte(isa, on); /* 0: LSB of frequency */
-
- for (i = 0; i < 13; i++) /* : frequency bits (1-13) */
- if (freq & (1 << i))
- send_1_byte(isa, on);
- else
- send_0_byte(isa, on);
-
- send_0_byte(isa, on); /* 14: test bit - always 0 */
- send_0_byte(isa, on); /* 15: test bit - always 0 */
-
- send_0_byte(isa, on); /* 16: band data 0 - always 0 */
- send_0_byte(isa, on); /* 17: band data 1 - always 0 */
- send_0_byte(isa, on); /* 18: band data 2 - always 0 */
- send_0_byte(isa, on); /* 19: time base - always 0 */
-
- send_0_byte(isa, on); /* 20: spacing (0 = 25 kHz) */
- send_1_byte(isa, on); /* 21: spacing (1 = 25 kHz) */
- send_0_byte(isa, on); /* 22: spacing (0 = 25 kHz) */
- send_1_byte(isa, on); /* 23: AM/FM (FM = 1, always) */
+ lm7000_set_freq(freq, isa, rtrack_set_pins);
- outb(0xd0 + on, isa->io); /* volume steady + sigstr */
return 0;
}
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 16a089fad909..697a421c9940 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -41,6 +41,9 @@
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
MODULE_AUTHOR("Fred Gleason, Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
MODULE_DESCRIPTION("A driver for the ADS Cadet AM/FM/RDS radio card.");
@@ -61,14 +64,15 @@ module_param(radio_nr, int, 0);
struct cadet {
struct v4l2_device v4l2_dev;
struct video_device vdev;
+ struct v4l2_ctrl_handler ctrl_handler;
int io;
- int users;
- int curtuner;
+ bool is_fm_band;
+ u32 curfreq;
int tunestat;
int sigstrength;
wait_queue_head_t read_queue;
struct timer_list readtimer;
- __u8 rdsin, rdsout, rdsstat;
+ u8 rdsin, rdsout, rdsstat;
unsigned char rdsbuf[RDS_BUFFER];
struct mutex lock;
int reading;
@@ -81,9 +85,9 @@ static struct cadet cadet_card;
* The V4L API spec does not define any particular unit for the signal
* strength value. These values are in microvolts of RF at the tuner's input.
*/
-static __u16 sigtable[2][4] = {
- { 5, 10, 30, 150 },
- { 28, 40, 63, 1000 }
+static u16 sigtable[2][4] = {
+ { 1835, 2621, 4128, 65535 },
+ { 2185, 4369, 13107, 65535 },
};
@@ -91,14 +95,12 @@ static int cadet_getstereo(struct cadet *dev)
{
int ret = V4L2_TUNER_SUB_MONO;
- if (dev->curtuner != 0) /* Only FM has stereo capability! */
+ if (!dev->is_fm_band) /* Only FM has stereo capability! */
return V4L2_TUNER_SUB_MONO;
- mutex_lock(&dev->lock);
outb(7, dev->io); /* Select tuner control */
if ((inb(dev->io + 1) & 0x40) == 0)
ret = V4L2_TUNER_SUB_STEREO;
- mutex_unlock(&dev->lock);
return ret;
}
@@ -111,8 +113,6 @@ static unsigned cadet_gettune(struct cadet *dev)
* Prepare for read
*/
- mutex_lock(&dev->lock);
-
outb(7, dev->io); /* Select tuner control */
curvol = inb(dev->io + 1); /* Save current volume/mute setting */
outb(0x00, dev->io + 1); /* Ensure WRITE-ENABLE is LOW */
@@ -134,8 +134,6 @@ static unsigned cadet_gettune(struct cadet *dev)
* Restore volume/mute setting
*/
outb(curvol, dev->io + 1);
- mutex_unlock(&dev->lock);
-
return fifo;
}
@@ -152,20 +150,18 @@ static unsigned cadet_getfreq(struct cadet *dev)
/*
* Convert to actual frequency
*/
- if (dev->curtuner == 0) { /* FM */
- test = 12500;
- for (i = 0; i < 14; i++) {
- if ((fifo & 0x01) != 0)
- freq += test;
- test = test << 1;
- fifo = fifo >> 1;
- }
- freq -= 10700000; /* IF frequency is 10.7 MHz */
- freq = (freq * 16) / 1000000; /* Make it 1/16 MHz */
+ if (!dev->is_fm_band) /* AM */
+ return ((fifo & 0x7fff) - 450) * 16;
+
+ test = 12500;
+ for (i = 0; i < 14; i++) {
+ if ((fifo & 0x01) != 0)
+ freq += test;
+ test = test << 1;
+ fifo = fifo >> 1;
}
- if (dev->curtuner == 1) /* AM */
- freq = ((fifo & 0x7fff) - 2010) * 16;
-
+ freq -= 10700000; /* IF frequency is 10.7 MHz */
+ freq = (freq * 16) / 1000; /* Make it 1/16 kHz */
return freq;
}
@@ -174,8 +170,6 @@ static void cadet_settune(struct cadet *dev, unsigned fifo)
int i;
unsigned test;
- mutex_lock(&dev->lock);
-
outb(7, dev->io); /* Select tuner control */
/*
* Write the shift register
@@ -194,7 +188,6 @@ static void cadet_settune(struct cadet *dev, unsigned fifo)
test = 0x1c | ((fifo >> 23) & 0x02);
outb(test, dev->io + 1);
}
- mutex_unlock(&dev->lock);
}
static void cadet_setfreq(struct cadet *dev, unsigned freq)
@@ -203,13 +196,14 @@ static void cadet_setfreq(struct cadet *dev, unsigned freq)
int i, j, test;
int curvol;
+ dev->curfreq = freq;
/*
* Formulate a fifo command
*/
fifo = 0;
- if (dev->curtuner == 0) { /* FM */
+ if (dev->is_fm_band) { /* FM */
test = 102400;
- freq = (freq * 1000) / 16; /* Make it kHz */
+ freq = freq / 16; /* Make it kHz */
freq += 10700; /* IF is 10700 kHz */
for (i = 0; i < 14; i++) {
fifo = fifo << 1;
@@ -219,20 +213,17 @@ static void cadet_setfreq(struct cadet *dev, unsigned freq)
}
test = test >> 1;
}
- }
- if (dev->curtuner == 1) { /* AM */
- fifo = (freq / 16) + 2010; /* Make it kHz */
- fifo |= 0x100000; /* Select AM Band */
+ } else { /* AM */
+ fifo = (freq / 16) + 450; /* Make it kHz */
+ fifo |= 0x100000; /* Select AM Band */
}
/*
* Save current volume/mute setting
*/
- mutex_lock(&dev->lock);
outb(7, dev->io); /* Select tuner control */
curvol = inb(dev->io + 1);
- mutex_unlock(&dev->lock);
/*
* Tune the card
@@ -240,49 +231,24 @@ static void cadet_setfreq(struct cadet *dev, unsigned freq)
for (j = 3; j > -1; j--) {
cadet_settune(dev, fifo | (j << 16));
- mutex_lock(&dev->lock);
outb(7, dev->io); /* Select tuner control */
outb(curvol, dev->io + 1);
- mutex_unlock(&dev->lock);
msleep(100);
cadet_gettune(dev);
if ((dev->tunestat & 0x40) == 0) { /* Tuned */
- dev->sigstrength = sigtable[dev->curtuner][j];
- return;
+ dev->sigstrength = sigtable[dev->is_fm_band][j];
+ goto reset_rds;
}
}
dev->sigstrength = 0;
+reset_rds:
+ outb(3, dev->io);
+ outb(inb(dev->io + 1) & 0x7f, dev->io + 1);
}
-static int cadet_getvol(struct cadet *dev)
-{
- int ret = 0;
-
- mutex_lock(&dev->lock);
-
- outb(7, dev->io); /* Select tuner control */
- if ((inb(dev->io + 1) & 0x20) != 0)
- ret = 0xffff;
-
- mutex_unlock(&dev->lock);
- return ret;
-}
-
-
-static void cadet_setvol(struct cadet *dev, int vol)
-{
- mutex_lock(&dev->lock);
- outb(7, dev->io); /* Select tuner control */
- if (vol > 0)
- outb(0x20, dev->io + 1);
- else
- outb(0x00, dev->io + 1);
- mutex_unlock(&dev->lock);
-}
-
static void cadet_handler(unsigned long data)
{
struct cadet *dev = (void *)data;
@@ -295,7 +261,7 @@ static void cadet_handler(unsigned long data)
outb(0x80, dev->io); /* Select RDS fifo */
while ((inb(dev->io) & 0x80) != 0) {
dev->rdsbuf[dev->rdsin] = inb(dev->io + 1);
- if (dev->rdsin == dev->rdsout)
+ if (dev->rdsin + 1 == dev->rdsout)
printk(KERN_WARNING "cadet: RDS buffer overflow\n");
else
dev->rdsin++;
@@ -314,11 +280,21 @@ static void cadet_handler(unsigned long data)
*/
init_timer(&dev->readtimer);
dev->readtimer.function = cadet_handler;
- dev->readtimer.data = (unsigned long)0;
+ dev->readtimer.data = data;
dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
add_timer(&dev->readtimer);
}
+static void cadet_start_rds(struct cadet *dev)
+{
+ dev->rdsstat = 1;
+ outb(0x80, dev->io); /* Select RDS fifo */
+ init_timer(&dev->readtimer);
+ dev->readtimer.function = cadet_handler;
+ dev->readtimer.data = (unsigned long)dev;
+ dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
+ add_timer(&dev->readtimer);
+}
static ssize_t cadet_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
@@ -327,28 +303,24 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
int i = 0;
mutex_lock(&dev->lock);
- if (dev->rdsstat == 0) {
- dev->rdsstat = 1;
- outb(0x80, dev->io); /* Select RDS fifo */
- init_timer(&dev->readtimer);
- dev->readtimer.function = cadet_handler;
- dev->readtimer.data = (unsigned long)dev;
- dev->readtimer.expires = jiffies + msecs_to_jiffies(50);
- add_timer(&dev->readtimer);
- }
+ if (dev->rdsstat == 0)
+ cadet_start_rds(dev);
if (dev->rdsin == dev->rdsout) {
+ if (file->f_flags & O_NONBLOCK) {
+ i = -EWOULDBLOCK;
+ goto unlock;
+ }
mutex_unlock(&dev->lock);
- if (file->f_flags & O_NONBLOCK)
- return -EWOULDBLOCK;
interruptible_sleep_on(&dev->read_queue);
mutex_lock(&dev->lock);
}
while (i < count && dev->rdsin != dev->rdsout)
readbuf[i++] = dev->rdsbuf[dev->rdsout++];
- mutex_unlock(&dev->lock);
- if (copy_to_user(data, readbuf, i))
- return -EFAULT;
+ if (i && copy_to_user(data, readbuf, i))
+ i = -EFAULT;
+unlock:
+ mutex_unlock(&dev->lock);
return i;
}
@@ -359,48 +331,58 @@ static int vidioc_querycap(struct file *file, void *priv,
strlcpy(v->driver, "ADS Cadet", sizeof(v->driver));
strlcpy(v->card, "ADS Cadet", sizeof(v->card));
strlcpy(v->bus_info, "ISA", sizeof(v->bus_info));
- v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
+ v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO |
V4L2_CAP_READWRITE | V4L2_CAP_RDS_CAPTURE;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
+static const struct v4l2_frequency_band bands[] = {
+ {
+ .index = 0,
+ .type = V4L2_TUNER_RADIO,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 8320, /* 520 kHz */
+ .rangehigh = 26400, /* 1650 kHz */
+ .modulation = V4L2_BAND_MODULATION_AM,
+ }, {
+ .index = 1,
+ .type = V4L2_TUNER_RADIO,
+ .capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS |
+ V4L2_TUNER_CAP_RDS_BLOCK_IO | V4L2_TUNER_CAP_LOW |
+ V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 1400000, /* 87.5 MHz */
+ .rangehigh = 1728000, /* 108.0 MHz */
+ .modulation = V4L2_BAND_MODULATION_FM,
+ },
+};
+
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
struct cadet *dev = video_drvdata(file);
+ if (v->index)
+ return -EINVAL;
v->type = V4L2_TUNER_RADIO;
- switch (v->index) {
- case 0:
- strlcpy(v->name, "FM", sizeof(v->name));
- v->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS |
- V4L2_TUNER_CAP_RDS_BLOCK_IO;
- v->rangelow = 1400; /* 87.5 MHz */
- v->rangehigh = 1728; /* 108.0 MHz */
+ strlcpy(v->name, "Radio", sizeof(v->name));
+ v->capability = bands[0].capability | bands[1].capability;
+ v->rangelow = bands[0].rangelow; /* 520 kHz (start of AM band) */
+ v->rangehigh = bands[1].rangehigh; /* 108.0 MHz (end of FM band) */
+ if (dev->is_fm_band) {
v->rxsubchans = cadet_getstereo(dev);
- switch (v->rxsubchans) {
- case V4L2_TUNER_SUB_MONO:
- v->audmode = V4L2_TUNER_MODE_MONO;
- break;
- case V4L2_TUNER_SUB_STEREO:
- v->audmode = V4L2_TUNER_MODE_STEREO;
- break;
- default:
- break;
- }
- v->rxsubchans |= V4L2_TUNER_SUB_RDS;
- break;
- case 1:
- strlcpy(v->name, "AM", sizeof(v->name));
- v->capability = V4L2_TUNER_CAP_LOW;
+ outb(3, dev->io);
+ outb(inb(dev->io + 1) & 0x7f, dev->io + 1);
+ mdelay(100);
+ outb(3, dev->io);
+ if (inb(dev->io + 1) & 0x80)
+ v->rxsubchans |= V4L2_TUNER_SUB_RDS;
+ } else {
v->rangelow = 8320; /* 520 kHz */
v->rangehigh = 26400; /* 1650 kHz */
v->rxsubchans = V4L2_TUNER_SUB_MONO;
- v->audmode = V4L2_TUNER_MODE_MONO;
- break;
- default:
- return -EINVAL;
}
+ v->audmode = V4L2_TUNER_MODE_STEREO;
v->signal = dev->sigstrength; /* We might need to modify scaling of this */
return 0;
}
@@ -408,11 +390,17 @@ static int vidioc_g_tuner(struct file *file, void *priv,
static int vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
- struct cadet *dev = video_drvdata(file);
+ return v->index ? -EINVAL : 0;
+}
- if (v->index != 0 && v->index != 1)
+static int vidioc_enum_freq_bands(struct file *file, void *priv,
+ struct v4l2_frequency_band *band)
+{
+ if (band->tuner)
+ return -EINVAL;
+ if (band->index >= ARRAY_SIZE(bands))
return -EINVAL;
- dev->curtuner = v->index;
+ *band = bands[band->index];
return 0;
}
@@ -421,9 +409,10 @@ static int vidioc_g_frequency(struct file *file, void *priv,
{
struct cadet *dev = video_drvdata(file);
- f->tuner = dev->curtuner;
+ if (f->tuner)
+ return -EINVAL;
f->type = V4L2_TUNER_RADIO;
- f->frequency = cadet_getfreq(dev);
+ f->frequency = dev->curfreq;
return 0;
}
@@ -433,103 +422,46 @@ static int vidioc_s_frequency(struct file *file, void *priv,
{
struct cadet *dev = video_drvdata(file);
- if (f->type != V4L2_TUNER_RADIO)
- return -EINVAL;
- if (dev->curtuner == 0 && (f->frequency < 1400 || f->frequency > 1728))
- return -EINVAL;
- if (dev->curtuner == 1 && (f->frequency < 8320 || f->frequency > 26400))
+ if (f->tuner)
return -EINVAL;
+ dev->is_fm_band =
+ f->frequency >= (bands[0].rangehigh + bands[1].rangelow) / 2;
+ clamp(f->frequency, bands[dev->is_fm_band].rangelow,
+ bands[dev->is_fm_band].rangehigh);
cadet_setfreq(dev, f->frequency);
return 0;
}
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
+static int cadet_s_ctrl(struct v4l2_ctrl *ctrl)
{
- switch (qc->id) {
- case V4L2_CID_AUDIO_MUTE:
- return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
- case V4L2_CID_AUDIO_VOLUME:
- return v4l2_ctrl_query_fill(qc, 0, 0xff, 1, 0xff);
- }
- return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct cadet *dev = video_drvdata(file);
+ struct cadet *dev = container_of(ctrl->handler, struct cadet, ctrl_handler);
switch (ctrl->id) {
- case V4L2_CID_AUDIO_MUTE: /* TODO: Handle this correctly */
- ctrl->value = (cadet_getvol(dev) == 0);
- break;
- case V4L2_CID_AUDIO_VOLUME:
- ctrl->value = cadet_getvol(dev);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct cadet *dev = video_drvdata(file);
-
- switch (ctrl->id){
- case V4L2_CID_AUDIO_MUTE: /* TODO: Handle this correctly */
- if (ctrl->value)
- cadet_setvol(dev, 0);
+ case V4L2_CID_AUDIO_MUTE:
+ outb(7, dev->io); /* Select tuner control */
+ if (ctrl->val)
+ outb(0x00, dev->io + 1);
else
- cadet_setvol(dev, 0xffff);
- break;
- case V4L2_CID_AUDIO_VOLUME:
- cadet_setvol(dev, ctrl->value);
- break;
- default:
- return -EINVAL;
+ outb(0x20, dev->io + 1);
+ return 0;
}
- return 0;
-}
-
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
- return i ? -EINVAL : 0;
-}
-
-static int vidioc_g_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- a->index = 0;
- strlcpy(a->name, "Radio", sizeof(a->name));
- a->capability = V4L2_AUDCAP_STEREO;
- return 0;
-}
-
-static int vidioc_s_audio(struct file *file, void *priv,
- struct v4l2_audio *a)
-{
- return a->index ? -EINVAL : 0;
+ return -EINVAL;
}
static int cadet_open(struct file *file)
{
struct cadet *dev = video_drvdata(file);
+ int err;
mutex_lock(&dev->lock);
- dev->users++;
- if (1 == dev->users)
+ err = v4l2_fh_open(file);
+ if (err)
+ goto fail;
+ if (v4l2_fh_is_singular_file(file))
init_waitqueue_head(&dev->read_queue);
+fail:
mutex_unlock(&dev->lock);
- return 0;
+ return err;
}
static int cadet_release(struct file *file)
@@ -537,11 +469,11 @@ static int cadet_release(struct file *file)
struct cadet *dev = video_drvdata(file);
mutex_lock(&dev->lock);
- dev->users--;
- if (0 == dev->users) {
+ if (v4l2_fh_is_singular_file(file) && dev->rdsstat) {
del_timer_sync(&dev->readtimer);
dev->rdsstat = 0;
}
+ v4l2_fh_release(file);
mutex_unlock(&dev->lock);
return 0;
}
@@ -549,11 +481,19 @@ static int cadet_release(struct file *file)
static unsigned int cadet_poll(struct file *file, struct poll_table_struct *wait)
{
struct cadet *dev = video_drvdata(file);
+ unsigned long req_events = poll_requested_events(wait);
+ unsigned int res = v4l2_ctrl_poll(file, wait);
poll_wait(file, &dev->read_queue, wait);
+ if (dev->rdsstat == 0 && (req_events & (POLLIN | POLLRDNORM))) {
+ mutex_lock(&dev->lock);
+ if (dev->rdsstat == 0)
+ cadet_start_rds(dev);
+ mutex_unlock(&dev->lock);
+ }
if (dev->rdsin != dev->rdsout)
- return POLLIN | POLLRDNORM;
- return 0;
+ res |= POLLIN | POLLRDNORM;
+ return res;
}
@@ -572,13 +512,14 @@ static const struct v4l2_ioctl_ops cadet_ioctl_ops = {
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_g_audio = vidioc_g_audio,
- .vidioc_s_audio = vidioc_s_audio,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
+ .vidioc_enum_freq_bands = vidioc_enum_freq_bands,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_ctrl_ops cadet_ctrl_ops = {
+ .s_ctrl = cadet_s_ctrl,
};
#ifdef CONFIG_PNP
@@ -628,8 +569,8 @@ static void cadet_probe(struct cadet *dev)
for (i = 0; i < 8; i++) {
dev->io = iovals[i];
if (request_region(dev->io, 2, "cadet-probe")) {
- cadet_setfreq(dev, 1410);
- if (cadet_getfreq(dev) == 1410) {
+ cadet_setfreq(dev, bands[1].rangelow);
+ if (cadet_getfreq(dev) == bands[1].rangelow) {
release_region(dev->io, 2);
return;
}
@@ -648,7 +589,8 @@ static int __init cadet_init(void)
{
struct cadet *dev = &cadet_card;
struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
- int res;
+ struct v4l2_ctrl_handler *hdl;
+ int res = -ENODEV;
strlcpy(v4l2_dev->name, "cadet", sizeof(v4l2_dev->name));
mutex_init(&dev->lock);
@@ -680,23 +622,40 @@ static int __init cadet_init(void)
goto fail;
}
+ hdl = &dev->ctrl_handler;
+ v4l2_ctrl_handler_init(hdl, 2);
+ v4l2_ctrl_new_std(hdl, &cadet_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ v4l2_dev->ctrl_handler = hdl;
+ if (hdl->error) {
+ res = hdl->error;
+ v4l2_err(v4l2_dev, "Could not register controls\n");
+ goto err_hdl;
+ }
+
+ dev->is_fm_band = true;
+ dev->curfreq = bands[dev->is_fm_band].rangelow;
+ cadet_setfreq(dev, dev->curfreq);
strlcpy(dev->vdev.name, v4l2_dev->name, sizeof(dev->vdev.name));
dev->vdev.v4l2_dev = v4l2_dev;
dev->vdev.fops = &cadet_fops;
dev->vdev.ioctl_ops = &cadet_ioctl_ops;
dev->vdev.release = video_device_release_empty;
+ dev->vdev.lock = &dev->lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &dev->vdev.flags);
video_set_drvdata(&dev->vdev, dev);
- if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0) {
- v4l2_device_unregister(v4l2_dev);
- release_region(dev->io, 2);
- goto fail;
- }
+ if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0)
+ goto err_hdl;
v4l2_info(v4l2_dev, "ADS Cadet Radio Card at 0x%x\n", dev->io);
return 0;
+err_hdl:
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_device_unregister(v4l2_dev);
+ release_region(dev->io, 2);
fail:
pnp_unregister_driver(&cadet_pnp_driver);
- return -ENODEV;
+ return res;
}
static void __exit cadet_exit(void)
@@ -704,7 +663,10 @@ static void __exit cadet_exit(void)
struct cadet *dev = &cadet_card;
video_unregister_device(&dev->vdev);
+ v4l2_ctrl_handler_free(&dev->ctrl_handler);
v4l2_device_unregister(&dev->v4l2_dev);
+ outb(7, dev->io); /* Mute */
+ outb(0x00, dev->io + 1);
release_region(dev->io, 2);
pnp_unregister_driver(&cadet_pnp_driver);
}
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 94cb6bc690f5..3182b26d6efa 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -295,7 +295,8 @@ static int vidioc_g_tuner(struct file *file, void *priv,
v->type = V4L2_TUNER_RADIO;
v->rangelow = FREQ_MIN * FREQ_MUL;
v->rangehigh = FREQ_MAX * FREQ_MUL;
- v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+ v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_HWSEEK_WRAP;
v->rxsubchans = is_stereo ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO;
v->audmode = radio->stereo ?
V4L2_TUNER_MODE_STEREO : V4L2_TUNER_MODE_MONO;
@@ -372,7 +373,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
timeout = jiffies + msecs_to_jiffies(30000);
for (;;) {
if (time_after(jiffies, timeout)) {
- retval = -EAGAIN;
+ retval = -ENODATA;
break;
}
if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c
index a81d723b8c77..8185d5fbfa89 100644
--- a/drivers/media/radio/radio-sf16fmi.c
+++ b/drivers/media/radio/radio-sf16fmi.c
@@ -27,6 +27,7 @@
#include <linux/io.h> /* outb, outb_p */
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include "lm7000.h"
MODULE_AUTHOR("Petr Vandrovec, vandrove@vc.cvut.cz and M. Kirkwood");
MODULE_DESCRIPTION("A driver for the SF16-FMI, SF16-FMP and SF16-FMD radio.");
@@ -54,31 +55,33 @@ static struct fmi fmi_card;
static struct pnp_dev *dev;
bool pnp_attached;
-/* freq is in 1/16 kHz to internal number, hw precision is 50 kHz */
-/* It is only useful to give freq in interval of 800 (=0.05Mhz),
- * other bits will be truncated, e.g 92.7400016 -> 92.7, but
- * 92.7400017 -> 92.75
- */
-#define RSF16_ENCODE(x) ((x) / 800 + 214)
#define RSF16_MINFREQ (87 * 16000)
#define RSF16_MAXFREQ (108 * 16000)
-static void outbits(int bits, unsigned int data, int io)
+#define FMI_BIT_TUN_CE (1 << 0)
+#define FMI_BIT_TUN_CLK (1 << 1)
+#define FMI_BIT_TUN_DATA (1 << 2)
+#define FMI_BIT_VOL_SW (1 << 3)
+#define FMI_BIT_TUN_STRQ (1 << 4)
+
+void fmi_set_pins(void *handle, u8 pins)
{
- while (bits--) {
- if (data & 1) {
- outb(5, io);
- udelay(6);
- outb(7, io);
- udelay(6);
- } else {
- outb(1, io);
- udelay(6);
- outb(3, io);
- udelay(6);
- }
- data >>= 1;
- }
+ struct fmi *fmi = handle;
+ u8 bits = FMI_BIT_TUN_STRQ;
+
+ if (!fmi->mute)
+ bits |= FMI_BIT_VOL_SW;
+
+ if (pins & LM7000_DATA)
+ bits |= FMI_BIT_TUN_DATA;
+ if (pins & LM7000_CLK)
+ bits |= FMI_BIT_TUN_CLK;
+ if (pins & LM7000_CE)
+ bits |= FMI_BIT_TUN_CE;
+
+ mutex_lock(&fmi->lock);
+ outb_p(bits, fmi->io);
+ mutex_unlock(&fmi->lock);
}
static inline void fmi_mute(struct fmi *fmi)
@@ -95,20 +98,6 @@ static inline void fmi_unmute(struct fmi *fmi)
mutex_unlock(&fmi->lock);
}
-static inline int fmi_setfreq(struct fmi *fmi, unsigned long freq)
-{
- mutex_lock(&fmi->lock);
- fmi->curfreq = freq;
-
- outbits(16, RSF16_ENCODE(freq), fmi->io);
- outbits(8, 0xC0, fmi->io);
- msleep(143); /* was schedule_timeout(HZ/7) */
- mutex_unlock(&fmi->lock);
- if (!fmi->mute)
- fmi_unmute(fmi);
- return 0;
-}
-
static inline int fmi_getsigstr(struct fmi *fmi)
{
int val;
@@ -173,7 +162,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
return -EINVAL;
/* rounding in steps of 800 to match the freq
that will be used */
- fmi_setfreq(fmi, (f->frequency / 800) * 800);
+ lm7000_set_freq((f->frequency / 800) * 800, fmi, fmi_set_pins);
return 0;
}
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
new file mode 100644
index 000000000000..72ded29728bb
--- /dev/null
+++ b/drivers/media/radio/radio-shark.c
@@ -0,0 +1,381 @@
+/*
+ * Linux V4L2 radio driver for the Griffin radioSHARK USB radio receiver
+ *
+ * Note the radioSHARK offers the audio through a regular USB audio device,
+ * this driver only handles the tuning.
+ *
+ * The info necessary to drive the shark was taken from the small userspace
+ * shark.c program by Michael Rolig, which he kindly placed in the Public
+ * Domain.
+ *
+ * Copyright (c) 2012 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-device.h>
+#include <sound/tea575x-tuner.h>
+
+#if defined(CONFIG_LEDS_CLASS) || \
+ (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK_MODULE))
+#define SHARK_USE_LEDS 1
+#endif
+
+/*
+ * Version Information
+ */
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Griffin radioSHARK, USB radio receiver driver");
+MODULE_LICENSE("GPL");
+
+#define SHARK_IN_EP 0x83
+#define SHARK_OUT_EP 0x05
+
+#define TEA575X_BIT_MONO (1<<22) /* 0 = stereo, 1 = mono */
+#define TEA575X_BIT_BAND_MASK (3<<20)
+#define TEA575X_BIT_BAND_FM (0<<20)
+
+#define TB_LEN 6
+#define DRV_NAME "radioshark"
+
+#define v4l2_dev_to_shark(d) container_of(d, struct shark_device, v4l2_dev)
+
+enum { BLUE_LED, BLUE_PULSE_LED, RED_LED, NO_LEDS };
+
+struct shark_device {
+ struct usb_device *usbdev;
+ struct v4l2_device v4l2_dev;
+ struct snd_tea575x tea;
+
+#ifdef SHARK_USE_LEDS
+ struct work_struct led_work;
+ struct led_classdev leds[NO_LEDS];
+ char led_names[NO_LEDS][32];
+ atomic_t brightness[NO_LEDS];
+ unsigned long brightness_new;
+#endif
+
+ u8 *transfer_buffer;
+ u32 last_val;
+};
+
+static atomic_t shark_instance = ATOMIC_INIT(0);
+
+static void shark_write_val(struct snd_tea575x *tea, u32 val)
+{
+ struct shark_device *shark = tea->private_data;
+ int i, res, actual_len;
+
+ /* Avoid unnecessary (slow) USB transfers */
+ if (shark->last_val == val)
+ return;
+
+ memset(shark->transfer_buffer, 0, TB_LEN);
+ shark->transfer_buffer[0] = 0xc0; /* Write shift register command */
+ for (i = 0; i < 4; i++)
+ shark->transfer_buffer[i] |= (val >> (24 - i * 8)) & 0xff;
+
+ res = usb_interrupt_msg(shark->usbdev,
+ usb_sndintpipe(shark->usbdev, SHARK_OUT_EP),
+ shark->transfer_buffer, TB_LEN,
+ &actual_len, 1000);
+ if (res >= 0)
+ shark->last_val = val;
+ else
+ v4l2_err(&shark->v4l2_dev, "set-freq error: %d\n", res);
+}
+
+static u32 shark_read_val(struct snd_tea575x *tea)
+{
+ struct shark_device *shark = tea->private_data;
+ int i, res, actual_len;
+ u32 val = 0;
+
+ memset(shark->transfer_buffer, 0, TB_LEN);
+ shark->transfer_buffer[0] = 0x80;
+ res = usb_interrupt_msg(shark->usbdev,
+ usb_sndintpipe(shark->usbdev, SHARK_OUT_EP),
+ shark->transfer_buffer, TB_LEN,
+ &actual_len, 1000);
+ if (res < 0) {
+ v4l2_err(&shark->v4l2_dev, "request-status error: %d\n", res);
+ return shark->last_val;
+ }
+
+ res = usb_interrupt_msg(shark->usbdev,
+ usb_rcvintpipe(shark->usbdev, SHARK_IN_EP),
+ shark->transfer_buffer, TB_LEN,
+ &actual_len, 1000);
+ if (res < 0) {
+ v4l2_err(&shark->v4l2_dev, "get-status error: %d\n", res);
+ return shark->last_val;
+ }
+
+ for (i = 0; i < 4; i++)
+ val |= shark->transfer_buffer[i] << (24 - i * 8);
+
+ shark->last_val = val;
+
+ /*
+ * The shark does not allow actually reading the stereo / mono pin :(
+ * So assume that when we're tuned to an FM station and mono has not
+ * been requested, that we're receiving stereo.
+ */
+ if (((val & TEA575X_BIT_BAND_MASK) == TEA575X_BIT_BAND_FM) &&
+ !(val & TEA575X_BIT_MONO))
+ shark->tea.stereo = true;
+ else
+ shark->tea.stereo = false;
+
+ return val;
+}
+
+static struct snd_tea575x_ops shark_tea_ops = {
+ .write_val = shark_write_val,
+ .read_val = shark_read_val,
+};
+
+#ifdef SHARK_USE_LEDS
+static void shark_led_work(struct work_struct *work)
+{
+ struct shark_device *shark =
+ container_of(work, struct shark_device, led_work);
+ int i, res, brightness, actual_len;
+
+ for (i = 0; i < 3; i++) {
+ if (!test_and_clear_bit(i, &shark->brightness_new))
+ continue;
+
+ brightness = atomic_read(&shark->brightness[i]);
+ memset(shark->transfer_buffer, 0, TB_LEN);
+ if (i != RED_LED) {
+ shark->transfer_buffer[0] = 0xA0 + i;
+ shark->transfer_buffer[1] = brightness;
+ } else
+ shark->transfer_buffer[0] = brightness ? 0xA9 : 0xA8;
+ res = usb_interrupt_msg(shark->usbdev,
+ usb_sndintpipe(shark->usbdev, 0x05),
+ shark->transfer_buffer, TB_LEN,
+ &actual_len, 1000);
+ if (res < 0)
+ v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n",
+ shark->led_names[i], res);
+ }
+}
+
+static void shark_led_set_blue(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct shark_device *shark =
+ container_of(led_cdev, struct shark_device, leds[BLUE_LED]);
+
+ atomic_set(&shark->brightness[BLUE_LED], value);
+ set_bit(BLUE_LED, &shark->brightness_new);
+ schedule_work(&shark->led_work);
+}
+
+static void shark_led_set_blue_pulse(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct shark_device *shark = container_of(led_cdev,
+ struct shark_device, leds[BLUE_PULSE_LED]);
+
+ atomic_set(&shark->brightness[BLUE_PULSE_LED], 256 - value);
+ set_bit(BLUE_PULSE_LED, &shark->brightness_new);
+ schedule_work(&shark->led_work);
+}
+
+static void shark_led_set_red(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct shark_device *shark =
+ container_of(led_cdev, struct shark_device, leds[RED_LED]);
+
+ atomic_set(&shark->brightness[RED_LED], value);
+ set_bit(RED_LED, &shark->brightness_new);
+ schedule_work(&shark->led_work);
+}
+
+static const struct led_classdev shark_led_templates[NO_LEDS] = {
+ [BLUE_LED] = {
+ .name = "%s:blue:",
+ .brightness = LED_OFF,
+ .max_brightness = 127,
+ .brightness_set = shark_led_set_blue,
+ },
+ [BLUE_PULSE_LED] = {
+ .name = "%s:blue-pulse:",
+ .brightness = LED_OFF,
+ .max_brightness = 255,
+ .brightness_set = shark_led_set_blue_pulse,
+ },
+ [RED_LED] = {
+ .name = "%s:red:",
+ .brightness = LED_OFF,
+ .max_brightness = 1,
+ .brightness_set = shark_led_set_red,
+ },
+};
+
+static int shark_register_leds(struct shark_device *shark, struct device *dev)
+{
+ int i, retval;
+
+ INIT_WORK(&shark->led_work, shark_led_work);
+ for (i = 0; i < NO_LEDS; i++) {
+ shark->leds[i] = shark_led_templates[i];
+ snprintf(shark->led_names[i], sizeof(shark->led_names[0]),
+ shark->leds[i].name, shark->v4l2_dev.name);
+ shark->leds[i].name = shark->led_names[i];
+ retval = led_classdev_register(dev, &shark->leds[i]);
+ if (retval) {
+ v4l2_err(&shark->v4l2_dev,
+ "couldn't register led: %s\n",
+ shark->led_names[i]);
+ return retval;
+ }
+ }
+ return 0;
+}
+
+static void shark_unregister_leds(struct shark_device *shark)
+{
+ int i;
+
+ for (i = 0; i < NO_LEDS; i++)
+ led_classdev_unregister(&shark->leds[i]);
+
+ cancel_work_sync(&shark->led_work);
+}
+#else
+static int shark_register_leds(struct shark_device *shark, struct device *dev)
+{
+ v4l2_warn(&shark->v4l2_dev,
+ "CONFIG_LED_CLASS not enabled, LED support disabled\n");
+ return 0;
+}
+static inline void shark_unregister_leds(struct shark_device *shark) { }
+#endif
+
+static void usb_shark_disconnect(struct usb_interface *intf)
+{
+ struct v4l2_device *v4l2_dev = usb_get_intfdata(intf);
+ struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
+
+ mutex_lock(&shark->tea.mutex);
+ v4l2_device_disconnect(&shark->v4l2_dev);
+ snd_tea575x_exit(&shark->tea);
+ mutex_unlock(&shark->tea.mutex);
+
+ shark_unregister_leds(shark);
+
+ v4l2_device_put(&shark->v4l2_dev);
+}
+
+static void usb_shark_release(struct v4l2_device *v4l2_dev)
+{
+ struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
+
+ v4l2_device_unregister(&shark->v4l2_dev);
+ kfree(shark->transfer_buffer);
+ kfree(shark);
+}
+
+static int usb_shark_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct shark_device *shark;
+ int retval = -ENOMEM;
+
+ shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
+ if (!shark)
+ return retval;
+
+ shark->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL);
+ if (!shark->transfer_buffer)
+ goto err_alloc_buffer;
+
+ v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);
+
+ retval = shark_register_leds(shark, &intf->dev);
+ if (retval)
+ goto err_reg_leds;
+
+ shark->v4l2_dev.release = usb_shark_release;
+ retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev);
+ if (retval) {
+ v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n");
+ goto err_reg_dev;
+ }
+
+ shark->usbdev = interface_to_usbdev(intf);
+ shark->tea.v4l2_dev = &shark->v4l2_dev;
+ shark->tea.private_data = shark;
+ shark->tea.radio_nr = -1;
+ shark->tea.ops = &shark_tea_ops;
+ shark->tea.cannot_mute = true;
+ strlcpy(shark->tea.card, "Griffin radioSHARK",
+ sizeof(shark->tea.card));
+ usb_make_path(shark->usbdev, shark->tea.bus_info,
+ sizeof(shark->tea.bus_info));
+
+ retval = snd_tea575x_init(&shark->tea, THIS_MODULE);
+ if (retval) {
+ v4l2_err(&shark->v4l2_dev, "couldn't init tea5757\n");
+ goto err_init_tea;
+ }
+
+ return 0;
+
+err_init_tea:
+ v4l2_device_unregister(&shark->v4l2_dev);
+err_reg_dev:
+ shark_unregister_leds(shark);
+err_reg_leds:
+ kfree(shark->transfer_buffer);
+err_alloc_buffer:
+ kfree(shark);
+
+ return retval;
+}
+
+/* Specify the bcdDevice value, as the radioSHARK and radioSHARK2 share ids */
+static struct usb_device_id usb_shark_device_table[] = {
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION |
+ USB_DEVICE_ID_MATCH_INT_CLASS,
+ .idVendor = 0x077d,
+ .idProduct = 0x627a,
+ .bcdDevice_lo = 0x0001,
+ .bcdDevice_hi = 0x0001,
+ .bInterfaceClass = 3,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, usb_shark_device_table);
+
+static struct usb_driver usb_shark_driver = {
+ .name = DRV_NAME,
+ .probe = usb_shark_probe,
+ .disconnect = usb_shark_disconnect,
+ .id_table = usb_shark_device_table,
+};
+module_usb_driver(usb_shark_driver);
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
new file mode 100644
index 000000000000..7b4efdfaae28
--- /dev/null
+++ b/drivers/media/radio/radio-shark2.c
@@ -0,0 +1,355 @@
+/*
+ * Linux V4L2 radio driver for the Griffin radioSHARK2 USB radio receiver
+ *
+ * Note the radioSHARK2 offers the audio through a regular USB audio device,
+ * this driver only handles the tuning.
+ *
+ * The info necessary to drive the shark2 was taken from the small userspace
+ * shark2.c program by Hisaaki Shibata, which he kindly placed in the Public
+ * Domain.
+ *
+ * Copyright (c) 2012 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-device.h>
+#include "radio-tea5777.h"
+
+#if defined(CONFIG_LEDS_CLASS) || \
+ (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK2_MODULE))
+#define SHARK_USE_LEDS 1
+#endif
+
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Griffin radioSHARK2, USB radio receiver driver");
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0-1)");
+
+#define SHARK_IN_EP 0x83
+#define SHARK_OUT_EP 0x05
+
+#define TB_LEN 7
+#define DRV_NAME "radioshark2"
+
+#define v4l2_dev_to_shark(d) container_of(d, struct shark_device, v4l2_dev)
+
+enum { BLUE_LED, RED_LED, NO_LEDS };
+
+struct shark_device {
+ struct usb_device *usbdev;
+ struct v4l2_device v4l2_dev;
+ struct radio_tea5777 tea;
+
+#ifdef SHARK_USE_LEDS
+ struct work_struct led_work;
+ struct led_classdev leds[NO_LEDS];
+ char led_names[NO_LEDS][32];
+ atomic_t brightness[NO_LEDS];
+ unsigned long brightness_new;
+#endif
+
+ u8 *transfer_buffer;
+};
+
+static atomic_t shark_instance = ATOMIC_INIT(0);
+
+static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
+{
+ struct shark_device *shark = tea->private_data;
+ int i, res, actual_len;
+
+ memset(shark->transfer_buffer, 0, TB_LEN);
+ shark->transfer_buffer[0] = 0x81; /* Write register command */
+ for (i = 0; i < 6; i++)
+ shark->transfer_buffer[i + 1] = (reg >> (40 - i * 8)) & 0xff;
+
+ v4l2_dbg(1, debug, tea->v4l2_dev,
+ "shark2-write: %02x %02x %02x %02x %02x %02x %02x\n",
+ shark->transfer_buffer[0], shark->transfer_buffer[1],
+ shark->transfer_buffer[2], shark->transfer_buffer[3],
+ shark->transfer_buffer[4], shark->transfer_buffer[5],
+ shark->transfer_buffer[6]);
+
+ res = usb_interrupt_msg(shark->usbdev,
+ usb_sndintpipe(shark->usbdev, SHARK_OUT_EP),
+ shark->transfer_buffer, TB_LEN,
+ &actual_len, 1000);
+ if (res < 0) {
+ v4l2_err(tea->v4l2_dev, "write error: %d\n", res);
+ return res;
+ }
+
+ return 0;
+}
+
+static int shark_read_reg(struct radio_tea5777 *tea, u32 *reg_ret)
+{
+ struct shark_device *shark = tea->private_data;
+ int i, res, actual_len;
+ u32 reg = 0;
+
+ memset(shark->transfer_buffer, 0, TB_LEN);
+ shark->transfer_buffer[0] = 0x82;
+ res = usb_interrupt_msg(shark->usbdev,
+ usb_sndintpipe(shark->usbdev, SHARK_OUT_EP),
+ shark->transfer_buffer, TB_LEN,
+ &actual_len, 1000);
+ if (res < 0) {
+ v4l2_err(tea->v4l2_dev, "request-read error: %d\n", res);
+ return res;
+ }
+
+ res = usb_interrupt_msg(shark->usbdev,
+ usb_rcvintpipe(shark->usbdev, SHARK_IN_EP),
+ shark->transfer_buffer, TB_LEN,
+ &actual_len, 1000);
+ if (res < 0) {
+ v4l2_err(tea->v4l2_dev, "read error: %d\n", res);
+ return res;
+ }
+
+ for (i = 0; i < 3; i++)
+ reg |= shark->transfer_buffer[i] << (16 - i * 8);
+
+ v4l2_dbg(1, debug, tea->v4l2_dev, "shark2-read: %02x %02x %02x\n",
+ shark->transfer_buffer[0], shark->transfer_buffer[1],
+ shark->transfer_buffer[2]);
+
+ *reg_ret = reg;
+ return 0;
+}
+
+static struct radio_tea5777_ops shark_tea_ops = {
+ .write_reg = shark_write_reg,
+ .read_reg = shark_read_reg,
+};
+
+#ifdef SHARK_USE_LEDS
+static void shark_led_work(struct work_struct *work)
+{
+ struct shark_device *shark =
+ container_of(work, struct shark_device, led_work);
+ int i, res, brightness, actual_len;
+
+ for (i = 0; i < 2; i++) {
+ if (!test_and_clear_bit(i, &shark->brightness_new))
+ continue;
+
+ brightness = atomic_read(&shark->brightness[i]);
+ memset(shark->transfer_buffer, 0, TB_LEN);
+ shark->transfer_buffer[0] = 0x83 + i;
+ shark->transfer_buffer[1] = brightness;
+ res = usb_interrupt_msg(shark->usbdev,
+ usb_sndintpipe(shark->usbdev,
+ SHARK_OUT_EP),
+ shark->transfer_buffer, TB_LEN,
+ &actual_len, 1000);
+ if (res < 0)
+ v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n",
+ shark->led_names[i], res);
+ }
+}
+
+static void shark_led_set_blue(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct shark_device *shark =
+ container_of(led_cdev, struct shark_device, leds[BLUE_LED]);
+
+ atomic_set(&shark->brightness[BLUE_LED], value);
+ set_bit(BLUE_LED, &shark->brightness_new);
+ schedule_work(&shark->led_work);
+}
+
+static void shark_led_set_red(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct shark_device *shark =
+ container_of(led_cdev, struct shark_device, leds[RED_LED]);
+
+ atomic_set(&shark->brightness[RED_LED], value);
+ set_bit(RED_LED, &shark->brightness_new);
+ schedule_work(&shark->led_work);
+}
+
+static const struct led_classdev shark_led_templates[NO_LEDS] = {
+ [BLUE_LED] = {
+ .name = "%s:blue:",
+ .brightness = LED_OFF,
+ .max_brightness = 127,
+ .brightness_set = shark_led_set_blue,
+ },
+ [RED_LED] = {
+ .name = "%s:red:",
+ .brightness = LED_OFF,
+ .max_brightness = 1,
+ .brightness_set = shark_led_set_red,
+ },
+};
+
+static int shark_register_leds(struct shark_device *shark, struct device *dev)
+{
+ int i, retval;
+
+ INIT_WORK(&shark->led_work, shark_led_work);
+ for (i = 0; i < NO_LEDS; i++) {
+ shark->leds[i] = shark_led_templates[i];
+ snprintf(shark->led_names[i], sizeof(shark->led_names[0]),
+ shark->leds[i].name, shark->v4l2_dev.name);
+ shark->leds[i].name = shark->led_names[i];
+ retval = led_classdev_register(dev, &shark->leds[i]);
+ if (retval) {
+ v4l2_err(&shark->v4l2_dev,
+ "couldn't register led: %s\n",
+ shark->led_names[i]);
+ return retval;
+ }
+ }
+ return 0;
+}
+
+static void shark_unregister_leds(struct shark_device *shark)
+{
+ int i;
+
+ for (i = 0; i < NO_LEDS; i++)
+ led_classdev_unregister(&shark->leds[i]);
+
+ cancel_work_sync(&shark->led_work);
+}
+#else
+static int shark_register_leds(struct shark_device *shark, struct device *dev)
+{
+ v4l2_warn(&shark->v4l2_dev,
+ "CONFIG_LED_CLASS not enabled, LED support disabled\n");
+ return 0;
+}
+static inline void shark_unregister_leds(struct shark_device *shark) { }
+#endif
+
+static void usb_shark_disconnect(struct usb_interface *intf)
+{
+ struct v4l2_device *v4l2_dev = usb_get_intfdata(intf);
+ struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
+
+ mutex_lock(&shark->tea.mutex);
+ v4l2_device_disconnect(&shark->v4l2_dev);
+ radio_tea5777_exit(&shark->tea);
+ mutex_unlock(&shark->tea.mutex);
+
+ shark_unregister_leds(shark);
+
+ v4l2_device_put(&shark->v4l2_dev);
+}
+
+static void usb_shark_release(struct v4l2_device *v4l2_dev)
+{
+ struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev);
+
+ v4l2_device_unregister(&shark->v4l2_dev);
+ kfree(shark->transfer_buffer);
+ kfree(shark);
+}
+
+static int usb_shark_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct shark_device *shark;
+ int retval = -ENOMEM;
+
+ shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL);
+ if (!shark)
+ return retval;
+
+ shark->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL);
+ if (!shark->transfer_buffer)
+ goto err_alloc_buffer;
+
+ v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance);
+
+ retval = shark_register_leds(shark, &intf->dev);
+ if (retval)
+ goto err_reg_leds;
+
+ shark->v4l2_dev.release = usb_shark_release;
+ retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev);
+ if (retval) {
+ v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n");
+ goto err_reg_dev;
+ }
+
+ shark->usbdev = interface_to_usbdev(intf);
+ shark->tea.v4l2_dev = &shark->v4l2_dev;
+ shark->tea.private_data = shark;
+ shark->tea.ops = &shark_tea_ops;
+ shark->tea.has_am = true;
+ shark->tea.write_before_read = true;
+ strlcpy(shark->tea.card, "Griffin radioSHARK2",
+ sizeof(shark->tea.card));
+ usb_make_path(shark->usbdev, shark->tea.bus_info,
+ sizeof(shark->tea.bus_info));
+
+ retval = radio_tea5777_init(&shark->tea, THIS_MODULE);
+ if (retval) {
+ v4l2_err(&shark->v4l2_dev, "couldn't init tea5777\n");
+ goto err_init_tea;
+ }
+
+ return 0;
+
+err_init_tea:
+ v4l2_device_unregister(&shark->v4l2_dev);
+err_reg_dev:
+ shark_unregister_leds(shark);
+err_reg_leds:
+ kfree(shark->transfer_buffer);
+err_alloc_buffer:
+ kfree(shark);
+
+ return retval;
+}
+
+/* Specify the bcdDevice value, as the radioSHARK and radioSHARK2 share ids */
+static struct usb_device_id usb_shark_device_table[] = {
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION |
+ USB_DEVICE_ID_MATCH_INT_CLASS,
+ .idVendor = 0x077d,
+ .idProduct = 0x627a,
+ .bcdDevice_lo = 0x0010,
+ .bcdDevice_hi = 0x0010,
+ .bInterfaceClass = 3,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, usb_shark_device_table);
+
+static struct usb_driver usb_shark_driver = {
+ .name = DRV_NAME,
+ .probe = usb_shark_probe,
+ .disconnect = usb_shark_disconnect,
+ .id_table = usb_shark_device_table,
+};
+module_usb_driver(usb_shark_driver);
diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
new file mode 100644
index 000000000000..5bc9fa62720b
--- /dev/null
+++ b/drivers/media/radio/radio-tea5777.c
@@ -0,0 +1,491 @@
+/*
+ * v4l2 driver for TEA5777 Philips AM/FM radio tuner chips
+ *
+ * Copyright (c) 2012 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on the ALSA driver for TEA5757/5759 Philips AM/FM radio tuner chips:
+ *
+ * Copyright (c) 2004 Jaroslav Kysela <perex@perex.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <asm/div64.h>
+#include "radio-tea5777.h"
+
+MODULE_AUTHOR("Hans de Goede <perex@perex.cz>");
+MODULE_DESCRIPTION("Routines for control of TEA5777 Philips AM/FM radio tuner chips");
+MODULE_LICENSE("GPL");
+
+/* Fixed FM only band for now, will implement multi-band support when the
+ VIDIOC_ENUM_FREQ_BANDS API is upstream */
+#define TEA5777_FM_RANGELOW (76000 * 16)
+#define TEA5777_FM_RANGEHIGH (108000 * 16)
+
+#define TEA5777_FM_IF 150 /* kHz */
+#define TEA5777_FM_FREQ_STEP 50 /* kHz */
+
+/* Write reg, common bits */
+#define TEA5777_W_MUTE_MASK (1LL << 47)
+#define TEA5777_W_MUTE_SHIFT 47
+#define TEA5777_W_AM_FM_MASK (1LL << 46)
+#define TEA5777_W_AM_FM_SHIFT 46
+#define TEA5777_W_STB_MASK (1LL << 45)
+#define TEA5777_W_STB_SHIFT 45
+
+#define TEA5777_W_IFCE_MASK (1LL << 29)
+#define TEA5777_W_IFCE_SHIFT 29
+#define TEA5777_W_IFW_MASK (1LL << 28)
+#define TEA5777_W_IFW_SHIFT 28
+#define TEA5777_W_HILO_MASK (1LL << 27)
+#define TEA5777_W_HILO_SHIFT 27
+#define TEA5777_W_DBUS_MASK (1LL << 26)
+#define TEA5777_W_DBUS_SHIFT 26
+
+#define TEA5777_W_INTEXT_MASK (1LL << 24)
+#define TEA5777_W_INTEXT_SHIFT 24
+#define TEA5777_W_P1_MASK (1LL << 23)
+#define TEA5777_W_P1_SHIFT 23
+#define TEA5777_W_P0_MASK (1LL << 22)
+#define TEA5777_W_P0_SHIFT 22
+#define TEA5777_W_PEN1_MASK (1LL << 21)
+#define TEA5777_W_PEN1_SHIFT 21
+#define TEA5777_W_PEN0_MASK (1LL << 20)
+#define TEA5777_W_PEN0_SHIFT 20
+
+#define TEA5777_W_CHP0_MASK (1LL << 18)
+#define TEA5777_W_CHP0_SHIFT 18
+#define TEA5777_W_DEEM_MASK (1LL << 17)
+#define TEA5777_W_DEEM_SHIFT 17
+
+#define TEA5777_W_SEARCH_MASK (1LL << 7)
+#define TEA5777_W_SEARCH_SHIFT 7
+#define TEA5777_W_PROGBLIM_MASK (1LL << 6)
+#define TEA5777_W_PROGBLIM_SHIFT 6
+#define TEA5777_W_UPDWN_MASK (1LL << 5)
+#define TEA5777_W_UPDWN_SHIFT 5
+#define TEA5777_W_SLEV_MASK (3LL << 3)
+#define TEA5777_W_SLEV_SHIFT 3
+
+/* Write reg, FM specific bits */
+#define TEA5777_W_FM_PLL_MASK (0x1fffLL << 32)
+#define TEA5777_W_FM_PLL_SHIFT 32
+#define TEA5777_W_FM_FREF_MASK (0x03LL << 30)
+#define TEA5777_W_FM_FREF_SHIFT 30
+#define TEA5777_W_FM_FREF_VALUE 0 /* 50 kHz tune steps, 150 kHz IF */
+
+#define TEA5777_W_FM_FORCEMONO_MASK (1LL << 15)
+#define TEA5777_W_FM_FORCEMONO_SHIFT 15
+#define TEA5777_W_FM_SDSOFF_MASK (1LL << 14)
+#define TEA5777_W_FM_SDSOFF_SHIFT 14
+#define TEA5777_W_FM_DOFF_MASK (1LL << 13)
+#define TEA5777_W_FM_DOFF_SHIFT 13
+
+#define TEA5777_W_FM_STEP_MASK (3LL << 1)
+#define TEA5777_W_FM_STEP_SHIFT 1
+
+/* Write reg, AM specific bits */
+#define TEA5777_W_AM_PLL_MASK (0x7ffLL << 34)
+#define TEA5777_W_AM_PLL_SHIFT 34
+#define TEA5777_W_AM_AGCRF_MASK (1LL << 33)
+#define TEA5777_W_AM_AGCRF_SHIFT 33
+#define TEA5777_W_AM_AGCIF_MASK (1LL << 32)
+#define TEA5777_W_AM_AGCIF_SHIFT 32
+#define TEA5777_W_AM_MWLW_MASK (1LL << 31)
+#define TEA5777_W_AM_MWLW_SHIFT 31
+#define TEA5777_W_AM_LNA_MASK (1LL << 30)
+#define TEA5777_W_AM_LNA_SHIFT 30
+
+#define TEA5777_W_AM_PEAK_MASK (1LL << 25)
+#define TEA5777_W_AM_PEAK_SHIFT 25
+
+#define TEA5777_W_AM_RFB_MASK (1LL << 16)
+#define TEA5777_W_AM_RFB_SHIFT 16
+#define TEA5777_W_AM_CALLIGN_MASK (1LL << 15)
+#define TEA5777_W_AM_CALLIGN_SHIFT 15
+#define TEA5777_W_AM_CBANK_MASK (0x7fLL << 8)
+#define TEA5777_W_AM_CBANK_SHIFT 8
+
+#define TEA5777_W_AM_DELAY_MASK (1LL << 2)
+#define TEA5777_W_AM_DELAY_SHIFT 2
+#define TEA5777_W_AM_STEP_MASK (1LL << 1)
+#define TEA5777_W_AM_STEP_SHIFT 1
+
+/* Read reg, common bits */
+#define TEA5777_R_LEVEL_MASK (0x0f << 17)
+#define TEA5777_R_LEVEL_SHIFT 17
+#define TEA5777_R_SFOUND_MASK (0x01 << 16)
+#define TEA5777_R_SFOUND_SHIFT 16
+#define TEA5777_R_BLIM_MASK (0x01 << 15)
+#define TEA5777_R_BLIM_SHIFT 15
+
+/* Read reg, FM specific bits */
+#define TEA5777_R_FM_STEREO_MASK (0x01 << 21)
+#define TEA5777_R_FM_STEREO_SHIFT 21
+#define TEA5777_R_FM_PLL_MASK 0x1fff
+#define TEA5777_R_FM_PLL_SHIFT 0
+
+static u32 tea5777_freq_to_v4l2_freq(struct radio_tea5777 *tea, u32 freq)
+{
+ return (freq * TEA5777_FM_FREQ_STEP + TEA5777_FM_IF) * 16;
+}
+
+static int radio_tea5777_set_freq(struct radio_tea5777 *tea)
+{
+ u64 freq;
+ int res;
+
+ freq = clamp_t(u32, tea->freq,
+ TEA5777_FM_RANGELOW, TEA5777_FM_RANGEHIGH) + 8;
+ do_div(freq, 16); /* to kHz */
+
+ freq -= TEA5777_FM_IF;
+ do_div(freq, TEA5777_FM_FREQ_STEP);
+
+ tea->write_reg &= ~(TEA5777_W_FM_PLL_MASK | TEA5777_W_FM_FREF_MASK);
+ tea->write_reg |= freq << TEA5777_W_FM_PLL_SHIFT;
+ tea->write_reg |= TEA5777_W_FM_FREF_VALUE << TEA5777_W_FM_FREF_SHIFT;
+
+ res = tea->ops->write_reg(tea, tea->write_reg);
+ if (res)
+ return res;
+
+ tea->needs_write = false;
+ tea->read_reg = -1;
+ tea->freq = tea5777_freq_to_v4l2_freq(tea, freq);
+
+ return 0;
+}
+
+static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
+{
+ int res;
+
+ if (tea->read_reg != -1)
+ return 0;
+
+ if (tea->write_before_read && tea->needs_write) {
+ res = radio_tea5777_set_freq(tea);
+ if (res)
+ return res;
+ }
+
+ if (wait) {
+ if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
+ return -ERESTARTSYS;
+ }
+
+ res = tea->ops->read_reg(tea, &tea->read_reg);
+ if (res)
+ return res;
+
+ tea->needs_write = true;
+ return 0;
+}
+
+/*
+ * Linux Video interface
+ */
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *v)
+{
+ struct radio_tea5777 *tea = video_drvdata(file);
+
+ strlcpy(v->driver, tea->v4l2_dev->name, sizeof(v->driver));
+ strlcpy(v->card, tea->card, sizeof(v->card));
+ strlcat(v->card, " TEA5777", sizeof(v->card));
+ strlcpy(v->bus_info, tea->bus_info, sizeof(v->bus_info));
+ v->device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+ v->device_caps |= V4L2_CAP_HW_FREQ_SEEK;
+ v->capabilities = v->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *v)
+{
+ struct radio_tea5777 *tea = video_drvdata(file);
+ int res;
+
+ if (v->index > 0)
+ return -EINVAL;
+
+ res = radio_tea5777_update_read_reg(tea, 0);
+ if (res)
+ return res;
+
+ memset(v, 0, sizeof(*v));
+ if (tea->has_am)
+ strlcpy(v->name, "AM/FM", sizeof(v->name));
+ else
+ strlcpy(v->name, "FM", sizeof(v->name));
+ v->type = V4L2_TUNER_RADIO;
+ v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_HWSEEK_BOUNDED;
+ v->rangelow = TEA5777_FM_RANGELOW;
+ v->rangehigh = TEA5777_FM_RANGEHIGH;
+ v->rxsubchans = (tea->read_reg & TEA5777_R_FM_STEREO_MASK) ?
+ V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO;
+ v->audmode = (tea->write_reg & TEA5777_W_FM_FORCEMONO_MASK) ?
+ V4L2_TUNER_MODE_MONO : V4L2_TUNER_MODE_STEREO;
+ /* shift - 12 to convert 4-bits (0-15) scale to 16-bits (0-65535) */
+ v->signal = (tea->read_reg & TEA5777_R_LEVEL_MASK) >>
+ (TEA5777_R_LEVEL_SHIFT - 12);
+
+ /* Invalidate read_reg, so that next call we return up2date signal */
+ tea->read_reg = -1;
+
+ return 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *v)
+{
+ struct radio_tea5777 *tea = video_drvdata(file);
+
+ if (v->index)
+ return -EINVAL;
+
+ if (v->audmode == V4L2_TUNER_MODE_MONO)
+ tea->write_reg |= TEA5777_W_FM_FORCEMONO_MASK;
+ else
+ tea->write_reg &= ~TEA5777_W_FM_FORCEMONO_MASK;
+
+ return radio_tea5777_set_freq(tea);
+}
+
+static int vidioc_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct radio_tea5777 *tea = video_drvdata(file);
+
+ if (f->tuner != 0)
+ return -EINVAL;
+ f->type = V4L2_TUNER_RADIO;
+ f->frequency = tea->freq;
+ return 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct radio_tea5777 *tea = video_drvdata(file);
+
+ if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
+ return -EINVAL;
+
+ tea->freq = f->frequency;
+ return radio_tea5777_set_freq(tea);
+}
+
+static int vidioc_s_hw_freq_seek(struct file *file, void *fh,
+ struct v4l2_hw_freq_seek *a)
+{
+ struct radio_tea5777 *tea = video_drvdata(file);
+ u32 orig_freq = tea->freq;
+ unsigned long timeout;
+ int res, spacing = 200 * 16; /* 200 kHz */
+ /* These are fixed *for now* */
+ const u32 seek_rangelow = TEA5777_FM_RANGELOW;
+ const u32 seek_rangehigh = TEA5777_FM_RANGEHIGH;
+
+ if (a->tuner || a->wrap_around)
+ return -EINVAL;
+
+ tea->write_reg |= TEA5777_W_PROGBLIM_MASK;
+ if (seek_rangelow != tea->seek_rangelow) {
+ tea->write_reg &= ~TEA5777_W_UPDWN_MASK;
+ tea->freq = seek_rangelow;
+ res = radio_tea5777_set_freq(tea);
+ if (res)
+ goto leave;
+ tea->seek_rangelow = tea->freq;
+ }
+ if (seek_rangehigh != tea->seek_rangehigh) {
+ tea->write_reg |= TEA5777_W_UPDWN_MASK;
+ tea->freq = seek_rangehigh;
+ res = radio_tea5777_set_freq(tea);
+ if (res)
+ goto leave;
+ tea->seek_rangehigh = tea->freq;
+ }
+ tea->write_reg &= ~TEA5777_W_PROGBLIM_MASK;
+
+ tea->write_reg |= TEA5777_W_SEARCH_MASK;
+ if (a->seek_upward) {
+ tea->write_reg |= TEA5777_W_UPDWN_MASK;
+ tea->freq = orig_freq + spacing;
+ } else {
+ tea->write_reg &= ~TEA5777_W_UPDWN_MASK;
+ tea->freq = orig_freq - spacing;
+ }
+ res = radio_tea5777_set_freq(tea);
+ if (res)
+ goto leave;
+
+ timeout = jiffies + msecs_to_jiffies(5000);
+ for (;;) {
+ if (time_after(jiffies, timeout)) {
+ res = -ENODATA;
+ break;
+ }
+
+ res = radio_tea5777_update_read_reg(tea, 100);
+ if (res)
+ break;
+
+ /*
+ * Note we use tea->freq to track how far we've searched sofar
+ * this is necessary to ensure we continue seeking at the right
+ * point, in the write_before_read case.
+ */
+ tea->freq = (tea->read_reg & TEA5777_R_FM_PLL_MASK);
+ tea->freq = tea5777_freq_to_v4l2_freq(tea, tea->freq);
+
+ if ((tea->read_reg & TEA5777_R_SFOUND_MASK)) {
+ tea->write_reg &= ~TEA5777_W_SEARCH_MASK;
+ return 0;
+ }
+
+ if (tea->read_reg & TEA5777_R_BLIM_MASK) {
+ res = -ENODATA;
+ break;
+ }
+
+ /* Force read_reg update */
+ tea->read_reg = -1;
+ }
+leave:
+ tea->write_reg &= ~TEA5777_W_PROGBLIM_MASK;
+ tea->write_reg &= ~TEA5777_W_SEARCH_MASK;
+ tea->freq = orig_freq;
+ radio_tea5777_set_freq(tea);
+ return res;
+}
+
+static int tea575x_s_ctrl(struct v4l2_ctrl *c)
+{
+ struct radio_tea5777 *tea =
+ container_of(c->handler, struct radio_tea5777, ctrl_handler);
+
+ switch (c->id) {
+ case V4L2_CID_AUDIO_MUTE:
+ if (c->val)
+ tea->write_reg |= TEA5777_W_MUTE_MASK;
+ else
+ tea->write_reg &= ~TEA5777_W_MUTE_MASK;
+
+ return radio_tea5777_set_freq(tea);
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_file_operations tea575x_fops = {
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = v4l2_fh_release,
+ .poll = v4l2_ctrl_poll,
+};
+
+static const struct v4l2_ioctl_ops tea575x_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_s_hw_freq_seek = vidioc_s_hw_freq_seek,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct video_device tea575x_radio = {
+ .ioctl_ops = &tea575x_ioctl_ops,
+ .release = video_device_release_empty,
+};
+
+static const struct v4l2_ctrl_ops tea575x_ctrl_ops = {
+ .s_ctrl = tea575x_s_ctrl,
+};
+
+int radio_tea5777_init(struct radio_tea5777 *tea, struct module *owner)
+{
+ int res;
+
+ tea->write_reg = (1LL << TEA5777_W_IFCE_SHIFT) |
+ (1LL << TEA5777_W_IFW_SHIFT) |
+ (1LL << TEA5777_W_INTEXT_SHIFT) |
+ (1LL << TEA5777_W_CHP0_SHIFT) |
+ (2LL << TEA5777_W_SLEV_SHIFT);
+ tea->freq = 90500 * 16; /* 90.5Mhz default */
+ res = radio_tea5777_set_freq(tea);
+ if (res) {
+ v4l2_err(tea->v4l2_dev, "can't set initial freq (%d)\n", res);
+ return res;
+ }
+
+ tea->vd = tea575x_radio;
+ video_set_drvdata(&tea->vd, tea);
+ mutex_init(&tea->mutex);
+ strlcpy(tea->vd.name, tea->v4l2_dev->name, sizeof(tea->vd.name));
+ tea->vd.lock = &tea->mutex;
+ tea->vd.v4l2_dev = tea->v4l2_dev;
+ tea->fops = tea575x_fops;
+ tea->fops.owner = owner;
+ tea->vd.fops = &tea->fops;
+ set_bit(V4L2_FL_USE_FH_PRIO, &tea->vd.flags);
+
+ tea->vd.ctrl_handler = &tea->ctrl_handler;
+ v4l2_ctrl_handler_init(&tea->ctrl_handler, 1);
+ v4l2_ctrl_new_std(&tea->ctrl_handler, &tea575x_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ res = tea->ctrl_handler.error;
+ if (res) {
+ v4l2_err(tea->v4l2_dev, "can't initialize controls\n");
+ v4l2_ctrl_handler_free(&tea->ctrl_handler);
+ return res;
+ }
+ v4l2_ctrl_handler_setup(&tea->ctrl_handler);
+
+ res = video_register_device(&tea->vd, VFL_TYPE_RADIO, -1);
+ if (res) {
+ v4l2_err(tea->v4l2_dev, "can't register video device!\n");
+ v4l2_ctrl_handler_free(tea->vd.ctrl_handler);
+ return res;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(radio_tea5777_init);
+
+void radio_tea5777_exit(struct radio_tea5777 *tea)
+{
+ video_unregister_device(&tea->vd);
+ v4l2_ctrl_handler_free(tea->vd.ctrl_handler);
+}
+EXPORT_SYMBOL_GPL(radio_tea5777_exit);
diff --git a/drivers/media/radio/radio-tea5777.h b/drivers/media/radio/radio-tea5777.h
new file mode 100644
index 000000000000..55cbd78df5ed
--- /dev/null
+++ b/drivers/media/radio/radio-tea5777.h
@@ -0,0 +1,87 @@
+#ifndef __RADIO_TEA5777_H
+#define __RADIO_TEA5777_H
+
+/*
+ * v4l2 driver for TEA5777 Philips AM/FM radio tuner chips
+ *
+ * Copyright (c) 2012 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on the ALSA driver for TEA5757/5759 Philips AM/FM radio tuner chips:
+ *
+ * Copyright (c) 2004 Jaroslav Kysela <perex@perex.cz>
+ * Copyright (c) 2012 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+
+#define TEA575X_FMIF 10700
+#define TEA575X_AMIF 450
+
+struct radio_tea5777;
+
+struct radio_tea5777_ops {
+ /*
+ * Write the 6 bytes large write register of the tea5777
+ *
+ * val represents the 6 write registers, with byte 1 from the
+ * datasheet being the most significant byte (so byte 5 of the u64),
+ * and byte 6 from the datasheet being the least significant byte.
+ *
+ * returns 0 on success.
+ */
+ int (*write_reg)(struct radio_tea5777 *tea, u64 val);
+ /*
+ * Read the 3 bytes large read register of the tea5777
+ *
+ * The read value gets returned in val, akin to write_reg, byte 1 from
+ * the datasheet is stored as the most significant byte (so byte 2 of
+ * the u32), and byte 3 from the datasheet gets stored as the least
+ * significant byte (iow byte 0 of the u32).
+ *
+ * returns 0 on success.
+ */
+ int (*read_reg)(struct radio_tea5777 *tea, u32 *val);
+};
+
+struct radio_tea5777 {
+ struct v4l2_device *v4l2_dev;
+ struct v4l2_file_operations fops;
+ struct video_device vd; /* video device */
+ bool has_am; /* Device can tune to AM freqs */
+ bool write_before_read; /* must write before read quirk */
+ bool needs_write; /* for write before read quirk */
+ u32 freq; /* current frequency */
+ u32 seek_rangelow; /* current hwseek limits */
+ u32 seek_rangehigh;
+ u32 read_reg;
+ u64 write_reg;
+ struct mutex mutex;
+ struct radio_tea5777_ops *ops;
+ void *private_data;
+ u8 card[32];
+ u8 bus_info[32];
+ struct v4l2_ctrl_handler ctrl_handler;
+};
+
+int radio_tea5777_init(struct radio_tea5777 *tea, struct module *owner);
+void radio_tea5777_exit(struct radio_tea5777 *tea);
+
+#endif /* __RADIO_TEA5777_H */
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index f1b607099b6c..e8428f573ccd 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1514,7 +1514,8 @@ static int wl1273_fm_vidioc_g_tuner(struct file *file, void *priv,
tuner->rangehigh = WL1273_FREQ(WL1273_BAND_OTHER_HIGH);
tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_RDS |
- V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS_BLOCK_IO;
+ V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS_BLOCK_IO |
+ V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP;
if (radio->stereo)
tuner->audmode = V4L2_TUNER_MODE_STEREO;
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index 969cf494d85b..9bb65e170d99 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -4,6 +4,7 @@
* Driver for radios with Silicon Labs Si470x FM Radio Receivers
*
* Copyright (c) 2009 Tobias Lorenz <tobias.lorenz@gmx.net>
+ * Copyright (c) 2012 Hans de Goede <hdegoede@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -127,14 +128,6 @@ static unsigned short space = 2;
module_param(space, ushort, 0444);
MODULE_PARM_DESC(space, "Spacing: 0=200kHz 1=100kHz *2=50kHz*");
-/* Bottom of Band (MHz) */
-/* 0: 87.5 - 108 MHz (USA, Europe)*/
-/* 1: 76 - 108 MHz (Japan wide band) */
-/* 2: 76 - 90 MHz (Japan) */
-static unsigned short band = 1;
-module_param(band, ushort, 0444);
-MODULE_PARM_DESC(band, "Band: 0=87.5..108MHz *1=76..108MHz* 2=76..90MHz");
-
/* De-emphasis */
/* 0: 75 us (USA) */
/* 1: 50 us (Europe, Australia, Japan) */
@@ -152,19 +145,69 @@ static unsigned int seek_timeout = 5000;
module_param(seek_timeout, uint, 0644);
MODULE_PARM_DESC(seek_timeout, "Seek timeout: *5000*");
-
+static const struct v4l2_frequency_band bands[] = {
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
+ V4L2_TUNER_CAP_FREQ_BANDS |
+ V4L2_TUNER_CAP_HWSEEK_BOUNDED |
+ V4L2_TUNER_CAP_HWSEEK_WRAP,
+ .rangelow = 87500 * 16,
+ .rangehigh = 108000 * 16,
+ .modulation = V4L2_BAND_MODULATION_FM,
+ },
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 1,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
+ V4L2_TUNER_CAP_FREQ_BANDS |
+ V4L2_TUNER_CAP_HWSEEK_BOUNDED |
+ V4L2_TUNER_CAP_HWSEEK_WRAP,
+ .rangelow = 76000 * 16,
+ .rangehigh = 108000 * 16,
+ .modulation = V4L2_BAND_MODULATION_FM,
+ },
+ {
+ .type = V4L2_TUNER_RADIO,
+ .index = 2,
+ .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
+ V4L2_TUNER_CAP_FREQ_BANDS |
+ V4L2_TUNER_CAP_HWSEEK_BOUNDED |
+ V4L2_TUNER_CAP_HWSEEK_WRAP,
+ .rangelow = 76000 * 16,
+ .rangehigh = 90000 * 16,
+ .modulation = V4L2_BAND_MODULATION_FM,
+ },
+};
/**************************************************************************
* Generic Functions
**************************************************************************/
/*
+ * si470x_set_band - set the band
+ */
+static int si470x_set_band(struct si470x_device *radio, int band)
+{
+ if (radio->band == band)
+ return 0;
+
+ radio->band = band;
+ radio->registers[SYSCONFIG2] &= ~SYSCONFIG2_BAND;
+ radio->registers[SYSCONFIG2] |= radio->band << 6;
+ return si470x_set_register(radio, SYSCONFIG2);
+}
+
+/*
* si470x_set_chan - set the channel
*/
static int si470x_set_chan(struct si470x_device *radio, unsigned short chan)
{
int retval;
- unsigned long timeout;
bool timed_out = 0;
/* start tuning */
@@ -174,26 +217,12 @@ static int si470x_set_chan(struct si470x_device *radio, unsigned short chan)
if (retval < 0)
goto done;
- /* currently I2C driver only uses interrupt way to tune */
- if (radio->stci_enabled) {
- INIT_COMPLETION(radio->completion);
-
- /* wait till tune operation has completed */
- retval = wait_for_completion_timeout(&radio->completion,
- msecs_to_jiffies(tune_timeout));
- if (!retval)
- timed_out = true;
- } else {
- /* wait till tune operation has completed */
- timeout = jiffies + msecs_to_jiffies(tune_timeout);
- do {
- retval = si470x_get_register(radio, STATUSRSSI);
- if (retval < 0)
- goto stop;
- timed_out = time_after(jiffies, timeout);
- } while (((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0)
- && (!timed_out));
- }
+ /* wait till tune operation has completed */
+ INIT_COMPLETION(radio->completion);
+ retval = wait_for_completion_timeout(&radio->completion,
+ msecs_to_jiffies(tune_timeout));
+ if (!retval)
+ timed_out = true;
if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0)
dev_warn(&radio->videodev.dev, "tune does not complete\n");
@@ -201,7 +230,6 @@ static int si470x_set_chan(struct si470x_device *radio, unsigned short chan)
dev_warn(&radio->videodev.dev,
"tune timed out after %u ms\n", tune_timeout);
-stop:
/* stop tuning */
radio->registers[CHANNEL] &= ~CHANNEL_TUNE;
retval = si470x_set_register(radio, CHANNEL);
@@ -210,48 +238,39 @@ done:
return retval;
}
-
/*
- * si470x_get_freq - get the frequency
+ * si470x_get_step - get channel spacing
*/
-static int si470x_get_freq(struct si470x_device *radio, unsigned int *freq)
+static unsigned int si470x_get_step(struct si470x_device *radio)
{
- unsigned int spacing, band_bottom;
- unsigned short chan;
- int retval;
-
/* Spacing (kHz) */
switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_SPACE) >> 4) {
/* 0: 200 kHz (USA, Australia) */
case 0:
- spacing = 0.200 * FREQ_MUL; break;
+ return 200 * 16;
/* 1: 100 kHz (Europe, Japan) */
case 1:
- spacing = 0.100 * FREQ_MUL; break;
+ return 100 * 16;
/* 2: 50 kHz */
default:
- spacing = 0.050 * FREQ_MUL; break;
+ return 50 * 16;
};
+}
- /* Bottom of Band (MHz) */
- switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
- /* 0: 87.5 - 108 MHz (USA, Europe) */
- case 0:
- band_bottom = 87.5 * FREQ_MUL; break;
- /* 1: 76 - 108 MHz (Japan wide band) */
- default:
- band_bottom = 76 * FREQ_MUL; break;
- /* 2: 76 - 90 MHz (Japan) */
- case 2:
- band_bottom = 76 * FREQ_MUL; break;
- };
+
+/*
+ * si470x_get_freq - get the frequency
+ */
+static int si470x_get_freq(struct si470x_device *radio, unsigned int *freq)
+{
+ int chan, retval;
/* read channel */
retval = si470x_get_register(radio, READCHAN);
chan = radio->registers[READCHAN] & READCHAN_READCHAN;
/* Frequency (MHz) = Spacing (kHz) x Channel + Bottom of Band (MHz) */
- *freq = chan * spacing + band_bottom;
+ *freq = chan * si470x_get_step(radio) + bands[radio->band].rangelow;
return retval;
}
@@ -262,44 +281,12 @@ static int si470x_get_freq(struct si470x_device *radio, unsigned int *freq)
*/
int si470x_set_freq(struct si470x_device *radio, unsigned int freq)
{
- unsigned int spacing, band_bottom, band_top;
unsigned short chan;
- /* Spacing (kHz) */
- switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_SPACE) >> 4) {
- /* 0: 200 kHz (USA, Australia) */
- case 0:
- spacing = 0.200 * FREQ_MUL; break;
- /* 1: 100 kHz (Europe, Japan) */
- case 1:
- spacing = 0.100 * FREQ_MUL; break;
- /* 2: 50 kHz */
- default:
- spacing = 0.050 * FREQ_MUL; break;
- };
-
- /* Bottom/Top of Band (MHz) */
- switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
- /* 0: 87.5 - 108 MHz (USA, Europe) */
- case 0:
- band_bottom = 87.5 * FREQ_MUL;
- band_top = 108 * FREQ_MUL;
- break;
- /* 1: 76 - 108 MHz (Japan wide band) */
- default:
- band_bottom = 76 * FREQ_MUL;
- band_top = 108 * FREQ_MUL;
- break;
- /* 2: 76 - 90 MHz (Japan) */
- case 2:
- band_bottom = 76 * FREQ_MUL;
- band_top = 90 * FREQ_MUL;
- break;
- };
-
- freq = clamp(freq, band_bottom, band_top);
+ freq = clamp(freq, bands[radio->band].rangelow,
+ bands[radio->band].rangehigh);
/* Chan = [ Freq (Mhz) - Bottom of Band (MHz) ] / Spacing (kHz) */
- chan = (freq - band_bottom) / spacing;
+ chan = (freq - bands[radio->band].rangelow) / si470x_get_step(radio);
return si470x_set_chan(radio, chan);
}
@@ -309,19 +296,43 @@ int si470x_set_freq(struct si470x_device *radio, unsigned int freq)
* si470x_set_seek - set seek
*/
static int si470x_set_seek(struct si470x_device *radio,
- unsigned int wrap_around, unsigned int seek_upward)
+ struct v4l2_hw_freq_seek *seek)
{
- int retval = 0;
- unsigned long timeout;
+ int band, retval;
+ unsigned int freq;
bool timed_out = 0;
+ /* set band */
+ if (seek->rangelow || seek->rangehigh) {
+ for (band = 0; band < ARRAY_SIZE(bands); band++) {
+ if (bands[band].rangelow == seek->rangelow &&
+ bands[band].rangehigh == seek->rangehigh)
+ break;
+ }
+ if (band == ARRAY_SIZE(bands))
+ return -EINVAL; /* No matching band found */
+ } else
+ band = 1; /* If nothing is specified seek 76 - 108 Mhz */
+
+ if (radio->band != band) {
+ retval = si470x_get_freq(radio, &freq);
+ if (retval)
+ return retval;
+ retval = si470x_set_band(radio, band);
+ if (retval)
+ return retval;
+ retval = si470x_set_freq(radio, freq);
+ if (retval)
+ return retval;
+ }
+
/* start seeking */
radio->registers[POWERCFG] |= POWERCFG_SEEK;
- if (wrap_around == 1)
+ if (seek->wrap_around)
radio->registers[POWERCFG] &= ~POWERCFG_SKMODE;
else
radio->registers[POWERCFG] |= POWERCFG_SKMODE;
- if (seek_upward == 1)
+ if (seek->seek_upward)
radio->registers[POWERCFG] |= POWERCFG_SEEKUP;
else
radio->registers[POWERCFG] &= ~POWERCFG_SEEKUP;
@@ -329,26 +340,12 @@ static int si470x_set_seek(struct si470x_device *radio,
if (retval < 0)
return retval;
- /* currently I2C driver only uses interrupt way to seek */
- if (radio->stci_enabled) {
- INIT_COMPLETION(radio->completion);
-
- /* wait till seek operation has completed */
- retval = wait_for_completion_timeout(&radio->completion,
- msecs_to_jiffies(seek_timeout));
- if (!retval)
- timed_out = true;
- } else {
- /* wait till seek operation has completed */
- timeout = jiffies + msecs_to_jiffies(seek_timeout);
- do {
- retval = si470x_get_register(radio, STATUSRSSI);
- if (retval < 0)
- goto stop;
- timed_out = time_after(jiffies, timeout);
- } while (((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0)
- && (!timed_out));
- }
+ /* wait till tune operation has completed */
+ INIT_COMPLETION(radio->completion);
+ retval = wait_for_completion_timeout(&radio->completion,
+ msecs_to_jiffies(seek_timeout));
+ if (!retval)
+ timed_out = true;
if ((radio->registers[STATUSRSSI] & STATUSRSSI_STC) == 0)
dev_warn(&radio->videodev.dev, "seek does not complete\n");
@@ -356,14 +353,13 @@ static int si470x_set_seek(struct si470x_device *radio,
dev_warn(&radio->videodev.dev,
"seek failed / band limit reached\n");
-stop:
/* stop seeking */
radio->registers[POWERCFG] &= ~POWERCFG_SEEK;
retval = si470x_set_register(radio, POWERCFG);
/* try again, if timed out */
if (retval == 0 && timed_out)
- return -EAGAIN;
+ return -ENODATA;
return retval;
}
@@ -391,8 +387,8 @@ int si470x_start(struct si470x_device *radio)
/* sysconfig 2 */
radio->registers[SYSCONFIG2] =
- (0x3f << 8) | /* SEEKTH */
- ((band << 6) & SYSCONFIG2_BAND) | /* BAND */
+ (0x1f << 8) | /* SEEKTH */
+ ((radio->band << 6) & SYSCONFIG2_BAND) |/* BAND */
((space << 4) & SYSCONFIG2_SPACE) | /* SPACE */
15; /* VOLUME (max) */
retval = si470x_set_register(radio, SYSCONFIG2);
@@ -583,39 +579,26 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *tuner)
{
struct si470x_device *radio = video_drvdata(file);
- int retval;
+ int retval = 0;
if (tuner->index != 0)
return -EINVAL;
- retval = si470x_get_register(radio, STATUSRSSI);
- if (retval < 0)
- return retval;
+ if (!radio->status_rssi_auto_update) {
+ retval = si470x_get_register(radio, STATUSRSSI);
+ if (retval < 0)
+ return retval;
+ }
/* driver constants */
strcpy(tuner->name, "FM");
tuner->type = V4L2_TUNER_RADIO;
tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
- V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO;
-
- /* range limits */
- switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
- /* 0: 87.5 - 108 MHz (USA, Europe, default) */
- default:
- tuner->rangelow = 87.5 * FREQ_MUL;
- tuner->rangehigh = 108 * FREQ_MUL;
- break;
- /* 1: 76 - 108 MHz (Japan wide band) */
- case 1:
- tuner->rangelow = 76 * FREQ_MUL;
- tuner->rangehigh = 108 * FREQ_MUL;
- break;
- /* 2: 76 - 90 MHz (Japan) */
- case 2:
- tuner->rangelow = 76 * FREQ_MUL;
- tuner->rangehigh = 90 * FREQ_MUL;
- break;
- };
+ V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO |
+ V4L2_TUNER_CAP_HWSEEK_BOUNDED |
+ V4L2_TUNER_CAP_HWSEEK_WRAP;
+ tuner->rangelow = 76 * FREQ_MUL;
+ tuner->rangehigh = 108 * FREQ_MUL;
/* stereo indicator == stereo (instead of mono) */
if ((radio->registers[STATUSRSSI] & STATUSRSSI_ST) == 0)
@@ -698,10 +681,18 @@ static int si470x_vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *freq)
{
struct si470x_device *radio = video_drvdata(file);
+ int retval;
if (freq->tuner != 0)
return -EINVAL;
+ if (freq->frequency < bands[radio->band].rangelow ||
+ freq->frequency > bands[radio->band].rangehigh) {
+ /* Switch to band 1 which covers everything we support */
+ retval = si470x_set_band(radio, 1);
+ if (retval)
+ return retval;
+ }
return si470x_set_freq(radio, freq->frequency);
}
@@ -717,7 +708,21 @@ static int si470x_vidioc_s_hw_freq_seek(struct file *file, void *priv,
if (seek->tuner != 0)
return -EINVAL;
- return si470x_set_seek(radio, seek->wrap_around, seek->seek_upward);
+ return si470x_set_seek(radio, seek);
+}
+
+/*
+ * si470x_vidioc_enum_freq_bands - enumerate supported bands
+ */
+static int si470x_vidioc_enum_freq_bands(struct file *file, void *priv,
+ struct v4l2_frequency_band *band)
+{
+ if (band->tuner != 0)
+ return -EINVAL;
+ if (band->index >= ARRAY_SIZE(bands))
+ return -EINVAL;
+ *band = bands[band->index];
+ return 0;
}
const struct v4l2_ctrl_ops si470x_ctrl_ops = {
@@ -734,6 +739,7 @@ static const struct v4l2_ioctl_ops si470x_ioctl_ops = {
.vidioc_g_frequency = si470x_vidioc_g_frequency,
.vidioc_s_frequency = si470x_vidioc_s_frequency,
.vidioc_s_hw_freq_seek = si470x_vidioc_s_hw_freq_seek,
+ .vidioc_enum_freq_bands = si470x_vidioc_enum_freq_bands,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index a80044c5874e..f867f04cccc9 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -225,8 +225,9 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
{
strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver));
strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
- capability->capabilities = V4L2_CAP_HW_FREQ_SEEK |
- V4L2_CAP_TUNER | V4L2_CAP_RADIO;
+ capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE |
+ V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
+ capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -350,7 +351,9 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
}
radio->client = client;
+ radio->band = 1; /* Default to 76 - 108 MHz */
mutex_init(&radio->lock);
+ init_completion(&radio->completion);
/* video device initialization */
radio->videodev = si470x_viddev_template;
@@ -406,10 +409,6 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client,
radio->rd_index = 0;
init_waitqueue_head(&radio->read_queue);
- /* mark Seek/Tune Complete Interrupt enabled */
- radio->stci_enabled = true;
- init_completion(&radio->completion);
-
retval = request_threaded_irq(client->irq, NULL, si470x_i2c_interrupt,
IRQF_TRIGGER_FALLING, DRIVER_NAME, radio);
if (retval) {
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index f412f7ab270b..be076f7181e7 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -143,7 +143,7 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
* Software/Hardware Versions from Scratch Page
**************************************************************************/
#define RADIO_SW_VERSION_NOT_BOOTLOADABLE 6
-#define RADIO_SW_VERSION 7
+#define RADIO_SW_VERSION 1
#define RADIO_HW_VERSION 1
@@ -399,12 +399,19 @@ static void si470x_int_in_callback(struct urb *urb)
}
}
- if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
+ /* Sometimes the device returns len 0 packets */
+ if (urb->actual_length != RDS_REPORT_SIZE)
goto resubmit;
- if (urb->actual_length > 0) {
+ radio->registers[STATUSRSSI] =
+ get_unaligned_be16(&radio->int_in_buffer[1]);
+
+ if (radio->registers[STATUSRSSI] & STATUSRSSI_STC)
+ complete(&radio->completion);
+
+ if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS)) {
/* Update RDS registers with URB data */
- for (regnr = 0; regnr < RDS_REGISTER_NUM; regnr++)
+ for (regnr = 1; regnr < RDS_REGISTER_NUM; regnr++)
radio->registers[STATUSRSSI + regnr] =
get_unaligned_be16(&radio->int_in_buffer[
regnr * RADIO_REGISTER_SIZE + 1]);
@@ -480,6 +487,7 @@ resubmit:
radio->int_in_running = 0;
}
}
+ radio->status_rssi_auto_update = radio->int_in_running;
}
@@ -523,7 +531,7 @@ int si470x_vidioc_querycap(struct file *file, void *priv,
strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
usb_make_path(radio->usbdev, capability->bus_info,
sizeof(capability->bus_info));
- capability->device_caps = V4L2_CAP_HW_FREQ_SEEK |
+ capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE |
V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
@@ -534,13 +542,6 @@ static int si470x_start_usb(struct si470x_device *radio)
{
int retval;
- /* start radio */
- retval = si470x_start(radio);
- if (retval < 0)
- return retval;
-
- v4l2_ctrl_handler_setup(&radio->hdl);
-
/* initialize interrupt urb */
usb_fill_int_urb(radio->int_in_urb, radio->usbdev,
usb_rcvintpipe(radio->usbdev,
@@ -560,6 +561,15 @@ static int si470x_start_usb(struct si470x_device *radio)
"submitting int urb failed (%d)\n", retval);
radio->int_in_running = 0;
}
+ radio->status_rssi_auto_update = radio->int_in_running;
+
+ /* start radio */
+ retval = si470x_start(radio);
+ if (retval < 0)
+ return retval;
+
+ v4l2_ctrl_handler_setup(&radio->hdl);
+
return retval;
}
@@ -587,7 +597,9 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
}
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
+ radio->band = 1; /* Default to 76 - 108 MHz */
mutex_init(&radio->lock);
+ init_completion(&radio->completion);
iface_desc = intf->cur_altsetting;
@@ -698,9 +710,6 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
"linux-media@vger.kernel.org\n");
}
- /* set initial frequency */
- si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
-
/* set led to connect state */
si470x_set_led_state(radio, BLINK_GREEN_LED);
@@ -723,6 +732,9 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
if (retval < 0)
goto err_all;
+ /* set initial frequency */
+ si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
+
/* register video device */
retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO,
radio_nr);
@@ -781,11 +793,16 @@ static int si470x_usb_driver_suspend(struct usb_interface *intf,
static int si470x_usb_driver_resume(struct usb_interface *intf)
{
struct si470x_device *radio = usb_get_intfdata(intf);
+ int ret;
dev_info(&intf->dev, "resuming now...\n");
/* start radio */
- return si470x_start_usb(radio);
+ ret = si470x_start_usb(radio);
+ if (ret == 0)
+ v4l2_ctrl_handler_setup(&radio->hdl);
+
+ return ret;
}
diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h
index 4921cab8e0fa..2f089b4252df 100644
--- a/drivers/media/radio/si470x/radio-si470x.h
+++ b/drivers/media/radio/si470x/radio-si470x.h
@@ -87,7 +87,7 @@
#define SYSCONFIG2 5 /* System Configuration 2 */
#define SYSCONFIG2_SEEKTH 0xff00 /* bits 15..08: RSSI Seek Threshold */
-#define SYSCONFIG2_BAND 0x0080 /* bits 07..06: Band Select */
+#define SYSCONFIG2_BAND 0x00c0 /* bits 07..06: Band Select */
#define SYSCONFIG2_SPACE 0x0030 /* bits 05..04: Channel Spacing */
#define SYSCONFIG2_VOLUME 0x000f /* bits 03..00: Volume */
@@ -147,6 +147,7 @@ struct si470x_device {
struct v4l2_device v4l2_dev;
struct video_device videodev;
struct v4l2_ctrl_handler hdl;
+ int band;
/* Silabs internal registers (0..15) */
unsigned short registers[RADIO_REGISTER_NUM];
@@ -160,7 +161,7 @@ struct si470x_device {
unsigned int wr_index;
struct completion completion;
- bool stci_enabled; /* Seek/Tune Complete Interrupt */
+ bool status_rssi_auto_update; /* Does RSSI get updated automatic? */
#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
/* reference to USB and video device */
@@ -189,7 +190,7 @@ struct si470x_device {
* Firmware Versions
**************************************************************************/
-#define RADIO_FW_VERSION 15
+#define RADIO_FW_VERSION 12
diff --git a/drivers/media/radio/wl128x/fmdrv_rx.c b/drivers/media/radio/wl128x/fmdrv_rx.c
index 43fb72291bea..3dd9fc097c47 100644
--- a/drivers/media/radio/wl128x/fmdrv_rx.c
+++ b/drivers/media/radio/wl128x/fmdrv_rx.c
@@ -251,7 +251,7 @@ again:
if (!timeleft) {
fmerr("Timeout(%d sec),didn't get tune ended int\n",
jiffies_to_msecs(FM_DRV_RX_SEEK_TIMEOUT) / 1000);
- return -ETIMEDOUT;
+ return -ENODATA;
}
int_reason = fmdev->irq_info.flag & (FM_TUNE_COMPLETE | FM_BAND_LIMIT);
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
index 080b96a61f1a..49a11ec1f449 100644
--- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
+++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
@@ -285,7 +285,9 @@ static int fm_v4l2_vidioc_g_tuner(struct file *file, void *priv,
tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO |
((fmdev->rx.rds.flag == FM_RDS_ENABLE) ? V4L2_TUNER_SUB_RDS : 0);
tuner->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS |
- V4L2_TUNER_CAP_LOW;
+ V4L2_TUNER_CAP_LOW |
+ V4L2_TUNER_CAP_HWSEEK_BOUNDED |
+ V4L2_TUNER_CAP_HWSEEK_WRAP;
tuner->audmode = (stereo_mono_mode ?
V4L2_TUNER_MODE_MONO : V4L2_TUNER_MODE_STEREO);
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index f97eeb870455..8be57634ba60 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -1,21 +1,20 @@
-menuconfig RC_CORE
- tristate "Remote Controller adapters"
+config RC_CORE
+ tristate
+ depends on MEDIA_RC_SUPPORT
depends on INPUT
- default INPUT
- ---help---
- Enable support for Remote Controllers on Linux. This is
- needed in order to support several video capture adapters,
- standalone IR receivers/transmitters, and RF receivers.
+ default y
- Enable this option if you have a video capture board even
- if you don't need IR, as otherwise, you may not be able to
- compile the driver for your adapter.
+source "drivers/media/rc/keymaps/Kconfig"
-if RC_CORE
+menuconfig RC_DECODERS
+ bool "Remote controller decoders"
+ depends on RC_CORE
+ default y
+if RC_DECODERS
config LIRC
- tristate
- default y
+ tristate "LIRC interface driver"
+ depends on RC_CORE
---help---
Enable this option to build the Linux Infrared Remote
@@ -24,7 +23,16 @@ config LIRC
LIRC daemon handles protocol decoding for IR reception and
encoding for IR transmitting (aka "blasting").
-source "drivers/media/rc/keymaps/Kconfig"
+config IR_LIRC_CODEC
+ tristate "Enable IR to LIRC bridge"
+ depends on RC_CORE
+ depends on LIRC
+ default y
+
+ ---help---
+ Enable this option to pass raw IR to and from userspace via
+ the LIRC interface.
+
config IR_NEC_DECODER
tristate "Enable IR raw decoder for the NEC protocol"
@@ -108,16 +116,13 @@ config IR_MCE_KBD_DECODER
Enable this option if you have a Microsoft Remote Keyboard for
Windows Media Center Edition, which you would like to use with
a raw IR receiver in your system.
+endif #RC_DECODERS
-config IR_LIRC_CODEC
- tristate "Enable IR to LIRC bridge"
+menuconfig RC_DEVICES
+ bool "Remote Controller devices"
depends on RC_CORE
- depends on LIRC
- default y
- ---help---
- Enable this option to pass raw IR to and from userspace via
- the LIRC interface.
+if RC_DEVICES
config RC_ATI_REMOTE
tristate "ATI / X10 based USB RF remote controls"
@@ -254,6 +259,18 @@ config IR_WINBOND_CIR
To compile this driver as a module, choose M here: the module will
be called winbond_cir.
+config IR_IGUANA
+ tristate "IguanaWorks USB IR Transceiver"
+ depends on USB_ARCH_HAS_HCD
+ depends on RC_CORE
+ select USB
+ ---help---
+ Say Y here if you want to use the IgaunaWorks USB IR Transceiver.
+ Both infrared receive and send are supported.
+
+ To compile this driver as a module, choose M here: the module will
+ be called iguanair.
+
config RC_LOOPBACK
tristate "Remote Control Loopback Driver"
depends on RC_CORE
@@ -276,4 +293,4 @@ config IR_GPIO_CIR
To compile this driver as a module, choose M here: the module will
be called gpio-ir-recv.
-endif #RC_CORE
+endif #RC_DEVICES
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 29f364f88a94..f871d1986c21 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -27,3 +27,4 @@ obj-$(CONFIG_IR_STREAMZAP) += streamzap.o
obj-$(CONFIG_IR_WINBOND_CIR) += winbond-cir.o
obj-$(CONFIG_RC_LOOPBACK) += rc-loopback.o
obj-$(CONFIG_IR_GPIO_CIR) += gpio-ir-recv.o
+obj-$(CONFIG_IR_IGUANA) += iguanair.o
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 7be377fc1be8..8fa72e2dacb1 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -147,7 +147,8 @@ static bool mouse = true;
module_param(mouse, bool, 0444);
MODULE_PARM_DESC(mouse, "Enable mouse device, default = yes");
-#define dbginfo(dev, format, arg...) do { if (debug) dev_info(dev , format , ## arg); } while (0)
+#define dbginfo(dev, format, arg...) \
+ do { if (debug) dev_info(dev , format , ## arg); } while (0)
#undef err
#define err(format, arg...) printk(KERN_ERR format , ## arg)
@@ -191,17 +192,41 @@ static const char *get_medion_keymap(struct usb_interface *interface)
return RC_MAP_MEDION_X10;
}
-static const struct ati_receiver_type type_ati = { .default_keymap = RC_MAP_ATI_X10 };
-static const struct ati_receiver_type type_medion = { .get_default_keymap = get_medion_keymap };
-static const struct ati_receiver_type type_firefly = { .default_keymap = RC_MAP_SNAPSTREAM_FIREFLY };
+static const struct ati_receiver_type type_ati = {
+ .default_keymap = RC_MAP_ATI_X10
+};
+static const struct ati_receiver_type type_medion = {
+ .get_default_keymap = get_medion_keymap
+};
+static const struct ati_receiver_type type_firefly = {
+ .default_keymap = RC_MAP_SNAPSTREAM_FIREFLY
+};
static struct usb_device_id ati_remote_table[] = {
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_ati },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA2_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_ati },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, ATI_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_ati },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, NVIDIA_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_ati },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, MEDION_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_medion },
- { USB_DEVICE(ATI_REMOTE_VENDOR_ID, FIREFLY_REMOTE_PRODUCT_ID), .driver_info = (unsigned long)&type_firefly },
+ {
+ USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA_REMOTE_PRODUCT_ID),
+ .driver_info = (unsigned long)&type_ati
+ },
+ {
+ USB_DEVICE(ATI_REMOTE_VENDOR_ID, LOLA2_REMOTE_PRODUCT_ID),
+ .driver_info = (unsigned long)&type_ati
+ },
+ {
+ USB_DEVICE(ATI_REMOTE_VENDOR_ID, ATI_REMOTE_PRODUCT_ID),
+ .driver_info = (unsigned long)&type_ati
+ },
+ {
+ USB_DEVICE(ATI_REMOTE_VENDOR_ID, NVIDIA_REMOTE_PRODUCT_ID),
+ .driver_info = (unsigned long)&type_ati
+ },
+ {
+ USB_DEVICE(ATI_REMOTE_VENDOR_ID, MEDION_REMOTE_PRODUCT_ID),
+ .driver_info = (unsigned long)&type_medion
+ },
+ {
+ USB_DEVICE(ATI_REMOTE_VENDOR_ID, FIREFLY_REMOTE_PRODUCT_ID),
+ .driver_info = (unsigned long)&type_firefly
+ },
{} /* Terminating entry */
};
@@ -296,25 +321,8 @@ static const struct {
{KIND_END, 0x00, EV_MAX + 1, 0, 0}
};
-/* Local function prototypes */
-static int ati_remote_sendpacket (struct ati_remote *ati_remote, u16 cmd, unsigned char *data);
-static void ati_remote_irq_out (struct urb *urb);
-static void ati_remote_irq_in (struct urb *urb);
-static void ati_remote_input_report (struct urb *urb);
-static int ati_remote_initialize (struct ati_remote *ati_remote);
-static int ati_remote_probe (struct usb_interface *interface, const struct usb_device_id *id);
-static void ati_remote_disconnect (struct usb_interface *interface);
-
-/* usb specific object to register with the usb subsystem */
-static struct usb_driver ati_remote_driver = {
- .name = "ati_remote",
- .probe = ati_remote_probe,
- .disconnect = ati_remote_disconnect,
- .id_table = ati_remote_table,
-};
-
/*
- * ati_remote_dump_input
+ * ati_remote_dump_input
*/
static void ati_remote_dump(struct device *dev, unsigned char *data,
unsigned int len)
@@ -326,12 +334,14 @@ static void ati_remote_dump(struct device *dev, unsigned char *data,
dev_warn(dev, "Weird key %02x %02x %02x %02x\n",
data[0], data[1], data[2], data[3]);
else
- dev_warn(dev, "Weird data, len=%d %02x %02x %02x %02x %02x %02x ...\n",
- len, data[0], data[1], data[2], data[3], data[4], data[5]);
+ dev_warn(dev,
+ "Weird data, len=%d %02x %02x %02x %02x %02x %02x ...\n",
+ len, data[0], data[1], data[2], data[3], data[4],
+ data[5]);
}
/*
- * ati_remote_open
+ * ati_remote_open
*/
static int ati_remote_open(struct ati_remote *ati_remote)
{
@@ -355,7 +365,7 @@ out: mutex_unlock(&ati_remote->open_mutex);
}
/*
- * ati_remote_close
+ * ati_remote_close
*/
static void ati_remote_close(struct ati_remote *ati_remote)
{
@@ -390,7 +400,7 @@ static void ati_remote_rc_close(struct rc_dev *rdev)
}
/*
- * ati_remote_irq_out
+ * ati_remote_irq_out
*/
static void ati_remote_irq_out(struct urb *urb)
{
@@ -408,11 +418,12 @@ static void ati_remote_irq_out(struct urb *urb)
}
/*
- * ati_remote_sendpacket
+ * ati_remote_sendpacket
*
- * Used to send device initialization strings
+ * Used to send device initialization strings
*/
-static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigned char *data)
+static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd,
+ unsigned char *data)
{
int retval = 0;
@@ -441,7 +452,7 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigne
}
/*
- * ati_remote_compute_accel
+ * ati_remote_compute_accel
*
* Implements acceleration curve for directional control pad
* If elapsed time since last event is > 1/4 second, user "stopped",
@@ -478,7 +489,7 @@ static int ati_remote_compute_accel(struct ati_remote *ati_remote)
}
/*
- * ati_remote_report_input
+ * ati_remote_report_input
*/
static void ati_remote_input_report(struct urb *urb)
{
@@ -518,7 +529,8 @@ static void ati_remote_input_report(struct urb *urb)
remote_num = (data[3] >> 4) & 0x0f;
if (channel_mask & (1 << (remote_num + 1))) {
dbginfo(&ati_remote->interface->dev,
- "Masked input from channel 0x%02x: data %02x,%02x, mask= 0x%02lx\n",
+ "Masked input from channel 0x%02x: data %02x,%02x, "
+ "mask= 0x%02lx\n",
remote_num, data[1], data[2], channel_mask);
return;
}
@@ -546,7 +558,9 @@ static void ati_remote_input_report(struct urb *urb)
if (wheel_keycode == KEY_RESERVED) {
/* scrollwheel was not mapped, assume mouse */
- /* Look up event code index in the mouse translation table. */
+ /* Look up event code index in the mouse translation
+ * table.
+ */
for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
if (scancode == ati_remote_tbl[i].data) {
index = i;
@@ -630,9 +644,9 @@ static void ati_remote_input_report(struct urb *urb)
} else {
/*
- * Other event kinds are from the directional control pad, and have an
- * acceleration factor applied to them. Without this acceleration, the
- * control pad is mostly unusable.
+ * Other event kinds are from the directional control pad, and
+ * have an acceleration factor applied to them. Without this
+ * acceleration, the control pad is mostly unusable.
*/
acc = ati_remote_compute_accel(ati_remote);
@@ -659,7 +673,8 @@ static void ati_remote_input_report(struct urb *urb)
input_report_rel(dev, REL_Y, acc);
break;
default:
- dev_dbg(&ati_remote->interface->dev, "ati_remote kind=%d\n",
+ dev_dbg(&ati_remote->interface->dev,
+ "ati_remote kind=%d\n",
ati_remote_tbl[index].kind);
}
input_sync(dev);
@@ -670,7 +685,7 @@ static void ati_remote_input_report(struct urb *urb)
}
/*
- * ati_remote_irq_in
+ * ati_remote_irq_in
*/
static void ati_remote_irq_in(struct urb *urb)
{
@@ -684,22 +699,25 @@ static void ati_remote_irq_in(struct urb *urb)
case -ECONNRESET: /* unlink */
case -ENOENT:
case -ESHUTDOWN:
- dev_dbg(&ati_remote->interface->dev, "%s: urb error status, unlink? \n",
+ dev_dbg(&ati_remote->interface->dev,
+ "%s: urb error status, unlink?\n",
__func__);
return;
default: /* error */
- dev_dbg(&ati_remote->interface->dev, "%s: Nonzero urb status %d\n",
+ dev_dbg(&ati_remote->interface->dev,
+ "%s: Nonzero urb status %d\n",
__func__, urb->status);
}
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
- dev_err(&ati_remote->interface->dev, "%s: usb_submit_urb()=%d\n",
+ dev_err(&ati_remote->interface->dev,
+ "%s: usb_submit_urb()=%d\n",
__func__, retval);
}
/*
- * ati_remote_alloc_buffers
+ * ati_remote_alloc_buffers
*/
static int ati_remote_alloc_buffers(struct usb_device *udev,
struct ati_remote *ati_remote)
@@ -726,7 +744,7 @@ static int ati_remote_alloc_buffers(struct usb_device *udev,
}
/*
- * ati_remote_free_buffers
+ * ati_remote_free_buffers
*/
static void ati_remote_free_buffers(struct ati_remote *ati_remote)
{
@@ -825,9 +843,10 @@ static int ati_remote_initialize(struct ati_remote *ati_remote)
}
/*
- * ati_remote_probe
+ * ati_remote_probe
*/
-static int ati_remote_probe(struct usb_interface *interface, const struct usb_device_id *id)
+static int ati_remote_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct usb_host_interface *iface_host = interface->cur_altsetting;
@@ -949,7 +968,7 @@ static int ati_remote_probe(struct usb_interface *interface, const struct usb_de
}
/*
- * ati_remote_disconnect
+ * ati_remote_disconnect
*/
static void ati_remote_disconnect(struct usb_interface *interface)
{
@@ -971,6 +990,14 @@ static void ati_remote_disconnect(struct usb_interface *interface)
kfree(ati_remote);
}
+/* usb specific object to register with the usb subsystem */
+static struct usb_driver ati_remote_driver = {
+ .name = "ati_remote",
+ .probe = ati_remote_probe,
+ .disconnect = ati_remote_disconnect,
+ .id_table = ati_remote_table,
+};
+
module_usb_driver(ati_remote_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index bef5296173c9..647dd951b0e8 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -1018,6 +1018,8 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
spin_lock_init(&dev->hw_lock);
+ dev->hw_io = pnp_port_start(pnp_dev, 0);
+
pnp_set_drvdata(pnp_dev, dev);
dev->pnp_dev = pnp_dev;
@@ -1072,7 +1074,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
/* claim the resources */
error = -EBUSY;
- dev->hw_io = pnp_port_start(pnp_dev, 0);
if (!request_region(dev->hw_io, ENE_IO_SIZE, ENE_DRIVER_NAME)) {
dev->hw_io = -1;
dev->irq = -1;
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 6aabf7ae3a31..ab30c64f8124 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -23,6 +23,8 @@
* USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pnp.h>
@@ -110,30 +112,32 @@ static u8 fintek_cir_reg_read(struct fintek_dev *fintek, u8 offset)
return val;
}
-#define pr_reg(text, ...) \
- printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__)
-
/* dump current cir register contents */
static void cir_dump_regs(struct fintek_dev *fintek)
{
fintek_config_mode_enable(fintek);
fintek_select_logical_dev(fintek, fintek->logical_dev_cir);
- pr_reg("%s: Dump CIR logical device registers:\n", FINTEK_DRIVER_NAME);
- pr_reg(" * CR CIR BASE ADDR: 0x%x\n",
- (fintek_cr_read(fintek, CIR_CR_BASE_ADDR_HI) << 8) |
+ pr_info("%s: Dump CIR logical device registers:\n", FINTEK_DRIVER_NAME);
+ pr_info(" * CR CIR BASE ADDR: 0x%x\n",
+ (fintek_cr_read(fintek, CIR_CR_BASE_ADDR_HI) << 8) |
fintek_cr_read(fintek, CIR_CR_BASE_ADDR_LO));
- pr_reg(" * CR CIR IRQ NUM: 0x%x\n",
- fintek_cr_read(fintek, CIR_CR_IRQ_SEL));
+ pr_info(" * CR CIR IRQ NUM: 0x%x\n",
+ fintek_cr_read(fintek, CIR_CR_IRQ_SEL));
fintek_config_mode_disable(fintek);
- pr_reg("%s: Dump CIR registers:\n", FINTEK_DRIVER_NAME);
- pr_reg(" * STATUS: 0x%x\n", fintek_cir_reg_read(fintek, CIR_STATUS));
- pr_reg(" * CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_CONTROL));
- pr_reg(" * RX_DATA: 0x%x\n", fintek_cir_reg_read(fintek, CIR_RX_DATA));
- pr_reg(" * TX_CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_CONTROL));
- pr_reg(" * TX_DATA: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_DATA));
+ pr_info("%s: Dump CIR registers:\n", FINTEK_DRIVER_NAME);
+ pr_info(" * STATUS: 0x%x\n",
+ fintek_cir_reg_read(fintek, CIR_STATUS));
+ pr_info(" * CONTROL: 0x%x\n",
+ fintek_cir_reg_read(fintek, CIR_CONTROL));
+ pr_info(" * RX_DATA: 0x%x\n",
+ fintek_cir_reg_read(fintek, CIR_RX_DATA));
+ pr_info(" * TX_CONTROL: 0x%x\n",
+ fintek_cir_reg_read(fintek, CIR_TX_CONTROL));
+ pr_info(" * TX_DATA: 0x%x\n",
+ fintek_cir_reg_read(fintek, CIR_TX_DATA));
}
/* detect hardware features */
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 0d875450c5ce..04cb272db16a 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -82,12 +82,21 @@ static int __devinit gpio_ir_recv_probe(struct platform_device *pdev)
goto err_allocate_device;
}
+ rcdev->priv = gpio_dev;
rcdev->driver_type = RC_DRIVER_IR_RAW;
- rcdev->allowed_protos = RC_TYPE_ALL;
rcdev->input_name = GPIO_IR_DEVICE_NAME;
+ rcdev->input_phys = GPIO_IR_DEVICE_NAME "/input0";
rcdev->input_id.bustype = BUS_HOST;
+ rcdev->input_id.vendor = 0x0001;
+ rcdev->input_id.product = 0x0001;
+ rcdev->input_id.version = 0x0100;
+ rcdev->dev.parent = &pdev->dev;
rcdev->driver_name = GPIO_IR_DRIVER_NAME;
- rcdev->map_name = RC_MAP_EMPTY;
+ if (pdata->allowed_protos)
+ rcdev->allowed_protos = pdata->allowed_protos;
+ else
+ rcdev->allowed_protos = RC_TYPE_ALL;
+ rcdev->map_name = pdata->map_name ?: RC_MAP_EMPTY;
gpio_dev->rcdev = rcdev;
gpio_dev->gpio_nr = pdata->gpio_nr;
@@ -188,18 +197,7 @@ static struct platform_driver gpio_ir_recv_driver = {
#endif
},
};
-
-static int __init gpio_ir_recv_init(void)
-{
- return platform_driver_register(&gpio_ir_recv_driver);
-}
-module_init(gpio_ir_recv_init);
-
-static void __exit gpio_ir_recv_exit(void)
-{
- platform_driver_unregister(&gpio_ir_recv_driver);
-}
-module_exit(gpio_ir_recv_exit);
+module_platform_driver(gpio_ir_recv_driver);
MODULE_DESCRIPTION("GPIO IR Receiver driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
new file mode 100644
index 000000000000..5e2eaf8ba73e
--- /dev/null
+++ b/drivers/media/rc/iguanair.c
@@ -0,0 +1,639 @@
+/*
+ * IguanaWorks USB IR Transceiver support
+ *
+ * Copyright (C) 2012 Sean Young <sean@mess.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/input.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <media/rc-core.h>
+
+#define DRIVER_NAME "iguanair"
+
+struct iguanair {
+ struct rc_dev *rc;
+
+ struct device *dev;
+ struct usb_device *udev;
+
+ int pipe_in, pipe_out;
+ uint8_t bufsize;
+ uint8_t version[2];
+
+ struct mutex lock;
+
+ /* receiver support */
+ bool receiver_on;
+ dma_addr_t dma_in;
+ uint8_t *buf_in;
+ struct urb *urb_in;
+ struct completion completion;
+
+ /* transmit support */
+ bool tx_overflow;
+ uint32_t carrier;
+ uint8_t cycle_overhead;
+ uint8_t channels;
+ uint8_t busy4;
+ uint8_t busy7;
+
+ char name[64];
+ char phys[64];
+};
+
+#define CMD_GET_VERSION 0x01
+#define CMD_GET_BUFSIZE 0x11
+#define CMD_GET_FEATURES 0x10
+#define CMD_SEND 0x15
+#define CMD_EXECUTE 0x1f
+#define CMD_RX_OVERFLOW 0x31
+#define CMD_TX_OVERFLOW 0x32
+#define CMD_RECEIVER_ON 0x12
+#define CMD_RECEIVER_OFF 0x14
+
+#define DIR_IN 0xdc
+#define DIR_OUT 0xcd
+
+#define MAX_PACKET_SIZE 8u
+#define TIMEOUT 1000
+
+struct packet {
+ uint16_t start;
+ uint8_t direction;
+ uint8_t cmd;
+};
+
+struct response_packet {
+ struct packet header;
+ uint8_t data[4];
+};
+
+struct send_packet {
+ struct packet header;
+ uint8_t length;
+ uint8_t channels;
+ uint8_t busy7;
+ uint8_t busy4;
+ uint8_t payload[0];
+};
+
+static void process_ir_data(struct iguanair *ir, unsigned len)
+{
+ if (len >= 4 && ir->buf_in[0] == 0 && ir->buf_in[1] == 0) {
+ switch (ir->buf_in[3]) {
+ case CMD_TX_OVERFLOW:
+ ir->tx_overflow = true;
+ case CMD_RECEIVER_OFF:
+ case CMD_RECEIVER_ON:
+ case CMD_SEND:
+ complete(&ir->completion);
+ break;
+ case CMD_RX_OVERFLOW:
+ dev_warn(ir->dev, "receive overflow\n");
+ break;
+ default:
+ dev_warn(ir->dev, "control code %02x received\n",
+ ir->buf_in[3]);
+ break;
+ }
+ } else if (len >= 7) {
+ DEFINE_IR_RAW_EVENT(rawir);
+ unsigned i;
+
+ init_ir_raw_event(&rawir);
+
+ for (i = 0; i < 7; i++) {
+ if (ir->buf_in[i] == 0x80) {
+ rawir.pulse = false;
+ rawir.duration = US_TO_NS(21845);
+ } else {
+ rawir.pulse = (ir->buf_in[i] & 0x80) == 0;
+ rawir.duration = ((ir->buf_in[i] & 0x7f) + 1) *
+ 21330;
+ }
+
+ ir_raw_event_store_with_filter(ir->rc, &rawir);
+ }
+
+ ir_raw_event_handle(ir->rc);
+ }
+}
+
+static void iguanair_rx(struct urb *urb)
+{
+ struct iguanair *ir;
+
+ if (!urb)
+ return;
+
+ ir = urb->context;
+ if (!ir) {
+ usb_unlink_urb(urb);
+ return;
+ }
+
+ switch (urb->status) {
+ case 0:
+ process_ir_data(ir, urb->actual_length);
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ usb_unlink_urb(urb);
+ return;
+ case -EPIPE:
+ default:
+ dev_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
+ break;
+ }
+
+ usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+static int iguanair_send(struct iguanair *ir, void *data, unsigned size,
+ struct response_packet *response, unsigned *res_len)
+{
+ unsigned offset, len;
+ int rc, transferred;
+
+ for (offset = 0; offset < size; offset += MAX_PACKET_SIZE) {
+ len = min(size - offset, MAX_PACKET_SIZE);
+
+ if (ir->tx_overflow)
+ return -EOVERFLOW;
+
+ rc = usb_interrupt_msg(ir->udev, ir->pipe_out, data + offset,
+ len, &transferred, TIMEOUT);
+ if (rc)
+ return rc;
+
+ if (transferred != len)
+ return -EIO;
+ }
+
+ if (response) {
+ rc = usb_interrupt_msg(ir->udev, ir->pipe_in, response,
+ sizeof(*response), res_len, TIMEOUT);
+ }
+
+ return rc;
+}
+
+static int iguanair_get_features(struct iguanair *ir)
+{
+ struct packet packet;
+ struct response_packet response;
+ int rc, len;
+
+ packet.start = 0;
+ packet.direction = DIR_OUT;
+ packet.cmd = CMD_GET_VERSION;
+
+ rc = iguanair_send(ir, &packet, sizeof(packet), &response, &len);
+ if (rc) {
+ dev_info(ir->dev, "failed to get version\n");
+ goto out;
+ }
+
+ if (len != 6) {
+ dev_info(ir->dev, "failed to get version\n");
+ rc = -EIO;
+ goto out;
+ }
+
+ ir->version[0] = response.data[0];
+ ir->version[1] = response.data[1];
+ ir->bufsize = 150;
+ ir->cycle_overhead = 65;
+
+ packet.cmd = CMD_GET_BUFSIZE;
+
+ rc = iguanair_send(ir, &packet, sizeof(packet), &response, &len);
+ if (rc) {
+ dev_info(ir->dev, "failed to get buffer size\n");
+ goto out;
+ }
+
+ if (len != 5) {
+ dev_info(ir->dev, "failed to get buffer size\n");
+ rc = -EIO;
+ goto out;
+ }
+
+ ir->bufsize = response.data[0];
+
+ if (ir->version[0] == 0 || ir->version[1] == 0)
+ goto out;
+
+ packet.cmd = CMD_GET_FEATURES;
+
+ rc = iguanair_send(ir, &packet, sizeof(packet), &response, &len);
+ if (rc) {
+ dev_info(ir->dev, "failed to get features\n");
+ goto out;
+ }
+
+ if (len < 5) {
+ dev_info(ir->dev, "failed to get features\n");
+ rc = -EIO;
+ goto out;
+ }
+
+ if (len > 5 && ir->version[0] >= 4)
+ ir->cycle_overhead = response.data[1];
+
+out:
+ return rc;
+}
+
+static int iguanair_receiver(struct iguanair *ir, bool enable)
+{
+ struct packet packet = { 0, DIR_OUT, enable ?
+ CMD_RECEIVER_ON : CMD_RECEIVER_OFF };
+ int rc;
+
+ INIT_COMPLETION(ir->completion);
+
+ rc = iguanair_send(ir, &packet, sizeof(packet), NULL, NULL);
+ if (rc)
+ return rc;
+
+ wait_for_completion_timeout(&ir->completion, TIMEOUT);
+
+ return 0;
+}
+
+/*
+ * The iguana ir creates the carrier by busy spinning after each pulse or
+ * space. This is counted in CPU cycles, with the CPU running at 24MHz. It is
+ * broken down into 7-cycles and 4-cyles delays, with a preference for
+ * 4-cycle delays.
+ */
+static int iguanair_set_tx_carrier(struct rc_dev *dev, uint32_t carrier)
+{
+ struct iguanair *ir = dev->priv;
+
+ if (carrier < 25000 || carrier > 150000)
+ return -EINVAL;
+
+ mutex_lock(&ir->lock);
+
+ if (carrier != ir->carrier) {
+ uint32_t cycles, fours, sevens;
+
+ ir->carrier = carrier;
+
+ cycles = DIV_ROUND_CLOSEST(24000000, carrier * 2) -
+ ir->cycle_overhead;
+
+ /* make up the the remainer of 4-cycle blocks */
+ switch (cycles & 3) {
+ case 0:
+ sevens = 0;
+ break;
+ case 1:
+ sevens = 3;
+ break;
+ case 2:
+ sevens = 2;
+ break;
+ case 3:
+ sevens = 1;
+ break;
+ }
+
+ fours = (cycles - sevens * 7) / 4;
+
+ /* magic happens here */
+ ir->busy7 = (4 - sevens) * 2;
+ ir->busy4 = 110 - fours;
+ }
+
+ mutex_unlock(&ir->lock);
+
+ return carrier;
+}
+
+static int iguanair_set_tx_mask(struct rc_dev *dev, uint32_t mask)
+{
+ struct iguanair *ir = dev->priv;
+
+ if (mask > 15)
+ return 4;
+
+ mutex_lock(&ir->lock);
+ ir->channels = mask;
+ mutex_unlock(&ir->lock);
+
+ return 0;
+}
+
+static int iguanair_tx(struct rc_dev *dev, unsigned *txbuf, unsigned count)
+{
+ struct iguanair *ir = dev->priv;
+ uint8_t space, *payload;
+ unsigned i, size, rc;
+ struct send_packet *packet;
+
+ mutex_lock(&ir->lock);
+
+ /* convert from us to carrier periods */
+ for (i = size = 0; i < count; i++) {
+ txbuf[i] = DIV_ROUND_CLOSEST(txbuf[i] * ir->carrier, 1000000);
+ size += (txbuf[i] + 126) / 127;
+ }
+
+ packet = kmalloc(sizeof(*packet) + size, GFP_KERNEL);
+ if (!packet) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (size > ir->bufsize) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ packet->header.start = 0;
+ packet->header.direction = DIR_OUT;
+ packet->header.cmd = CMD_SEND;
+ packet->length = size;
+ packet->channels = ir->channels << 4;
+ packet->busy7 = ir->busy7;
+ packet->busy4 = ir->busy4;
+
+ space = 0;
+ payload = packet->payload;
+
+ for (i = 0; i < count; i++) {
+ unsigned periods = txbuf[i];
+
+ while (periods > 127) {
+ *payload++ = 127 | space;
+ periods -= 127;
+ }
+
+ *payload++ = periods | space;
+ space ^= 0x80;
+ }
+
+ if (ir->receiver_on) {
+ rc = iguanair_receiver(ir, false);
+ if (rc) {
+ dev_warn(ir->dev, "disable receiver before transmit failed\n");
+ goto out;
+ }
+ }
+
+ ir->tx_overflow = false;
+
+ INIT_COMPLETION(ir->completion);
+
+ rc = iguanair_send(ir, packet, size + 8, NULL, NULL);
+
+ if (rc == 0) {
+ wait_for_completion_timeout(&ir->completion, TIMEOUT);
+ if (ir->tx_overflow)
+ rc = -EOVERFLOW;
+ }
+
+ ir->tx_overflow = false;
+
+ if (ir->receiver_on) {
+ if (iguanair_receiver(ir, true))
+ dev_warn(ir->dev, "re-enable receiver after transmit failed\n");
+ }
+
+out:
+ mutex_unlock(&ir->lock);
+ kfree(packet);
+
+ return rc;
+}
+
+static int iguanair_open(struct rc_dev *rdev)
+{
+ struct iguanair *ir = rdev->priv;
+ int rc;
+
+ mutex_lock(&ir->lock);
+
+ usb_submit_urb(ir->urb_in, GFP_KERNEL);
+
+ BUG_ON(ir->receiver_on);
+
+ rc = iguanair_receiver(ir, true);
+ if (rc == 0)
+ ir->receiver_on = true;
+
+ mutex_unlock(&ir->lock);
+
+ return rc;
+}
+
+static void iguanair_close(struct rc_dev *rdev)
+{
+ struct iguanair *ir = rdev->priv;
+ int rc;
+
+ mutex_lock(&ir->lock);
+
+ rc = iguanair_receiver(ir, false);
+ ir->receiver_on = false;
+ if (rc)
+ dev_warn(ir->dev, "failed to disable receiver: %d\n", rc);
+
+ usb_kill_urb(ir->urb_in);
+
+ mutex_unlock(&ir->lock);
+}
+
+static int __devinit iguanair_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct iguanair *ir;
+ struct rc_dev *rc;
+ int ret;
+ struct usb_host_interface *idesc;
+
+ ir = kzalloc(sizeof(*ir), GFP_KERNEL);
+ rc = rc_allocate_device();
+ if (!ir || !rc) {
+ ret = ENOMEM;
+ goto out;
+ }
+
+ ir->buf_in = usb_alloc_coherent(udev, MAX_PACKET_SIZE, GFP_ATOMIC,
+ &ir->dma_in);
+ ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
+
+ if (!ir->buf_in || !ir->urb_in) {
+ ret = ENOMEM;
+ goto out;
+ }
+
+ idesc = intf->altsetting;
+
+ if (idesc->desc.bNumEndpoints < 2) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ir->rc = rc;
+ ir->dev = &intf->dev;
+ ir->udev = udev;
+ ir->pipe_in = usb_rcvintpipe(udev,
+ idesc->endpoint[0].desc.bEndpointAddress);
+ ir->pipe_out = usb_sndintpipe(udev,
+ idesc->endpoint[1].desc.bEndpointAddress);
+ mutex_init(&ir->lock);
+ init_completion(&ir->completion);
+
+ ret = iguanair_get_features(ir);
+ if (ret) {
+ dev_warn(&intf->dev, "failed to get device features");
+ goto out;
+ }
+
+ usb_fill_int_urb(ir->urb_in, ir->udev, ir->pipe_in, ir->buf_in,
+ MAX_PACKET_SIZE, iguanair_rx, ir,
+ idesc->endpoint[0].desc.bInterval);
+ ir->urb_in->transfer_dma = ir->dma_in;
+ ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ snprintf(ir->name, sizeof(ir->name),
+ "IguanaWorks USB IR Transceiver version %d.%d",
+ ir->version[0], ir->version[1]);
+
+ usb_make_path(ir->udev, ir->phys, sizeof(ir->phys));
+
+ rc->input_name = ir->name;
+ rc->input_phys = ir->phys;
+ usb_to_input_id(ir->udev, &rc->input_id);
+ rc->dev.parent = &intf->dev;
+ rc->driver_type = RC_DRIVER_IR_RAW;
+ rc->allowed_protos = RC_TYPE_ALL;
+ rc->priv = ir;
+ rc->open = iguanair_open;
+ rc->close = iguanair_close;
+ rc->s_tx_mask = iguanair_set_tx_mask;
+ rc->s_tx_carrier = iguanair_set_tx_carrier;
+ rc->tx_ir = iguanair_tx;
+ rc->driver_name = DRIVER_NAME;
+ rc->map_name = RC_MAP_EMPTY;
+
+ iguanair_set_tx_carrier(rc, 38000);
+
+ ret = rc_register_device(rc);
+ if (ret < 0) {
+ dev_err(&intf->dev, "failed to register rc device %d", ret);
+ goto out;
+ }
+
+ usb_set_intfdata(intf, ir);
+
+ dev_info(&intf->dev, "Registered %s", ir->name);
+
+ return 0;
+out:
+ if (ir) {
+ usb_free_urb(ir->urb_in);
+ usb_free_coherent(udev, MAX_PACKET_SIZE, ir->buf_in,
+ ir->dma_in);
+ }
+ rc_free_device(rc);
+ kfree(ir);
+ return ret;
+}
+
+static void __devexit iguanair_disconnect(struct usb_interface *intf)
+{
+ struct iguanair *ir = usb_get_intfdata(intf);
+
+ usb_set_intfdata(intf, NULL);
+
+ usb_kill_urb(ir->urb_in);
+ usb_free_urb(ir->urb_in);
+ usb_free_coherent(ir->udev, MAX_PACKET_SIZE, ir->buf_in, ir->dma_in);
+ rc_unregister_device(ir->rc);
+ kfree(ir);
+}
+
+static int iguanair_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct iguanair *ir = usb_get_intfdata(intf);
+ int rc = 0;
+
+ mutex_lock(&ir->lock);
+
+ if (ir->receiver_on) {
+ rc = iguanair_receiver(ir, false);
+ if (rc)
+ dev_warn(ir->dev, "failed to disable receiver for suspend\n");
+ }
+
+ mutex_unlock(&ir->lock);
+
+ return rc;
+}
+
+static int iguanair_resume(struct usb_interface *intf)
+{
+ struct iguanair *ir = usb_get_intfdata(intf);
+ int rc = 0;
+
+ mutex_lock(&ir->lock);
+
+ if (ir->receiver_on) {
+ rc = iguanair_receiver(ir, true);
+ if (rc)
+ dev_warn(ir->dev, "failed to enable receiver after resume\n");
+ }
+
+ mutex_unlock(&ir->lock);
+
+ return rc;
+}
+
+static const struct usb_device_id iguanair_table[] = {
+ { USB_DEVICE(0x1781, 0x0938) },
+ { }
+};
+
+static struct usb_driver iguanair_driver = {
+ .name = DRIVER_NAME,
+ .probe = iguanair_probe,
+ .disconnect = __devexit_p(iguanair_disconnect),
+ .suspend = iguanair_suspend,
+ .resume = iguanair_resume,
+ .reset_resume = iguanair_resume,
+ .id_table = iguanair_table
+};
+
+module_usb_driver(iguanair_driver);
+
+MODULE_DESCRIPTION("IguanaWorks USB IR Transceiver");
+MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(usb, iguanair_table);
+
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 84e06d3aa696..f38d9a8c6880 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -199,6 +199,7 @@ static bool debug;
#define VENDOR_REALTEK 0x0bda
#define VENDOR_TIVO 0x105a
#define VENDOR_CONEXANT 0x0572
+#define VENDOR_TWISTEDMELON 0x2596
enum mceusb_model_type {
MCE_GEN2 = 0, /* Most boards */
@@ -391,6 +392,12 @@ static struct usb_device_id mceusb_dev_table[] = {
/* Conexant Hybrid TV RDU253S Polaris */
{ USB_DEVICE(VENDOR_CONEXANT, 0x58a5),
.driver_info = CX_HYBRID_TV },
+ /* Twisted Melon Inc. - Manta Mini Receiver */
+ { USB_DEVICE(VENDOR_TWISTEDMELON, 0x8008) },
+ /* Twisted Melon Inc. - Manta Pico Receiver */
+ { USB_DEVICE(VENDOR_TWISTEDMELON, 0x8016) },
+ /* Twisted Melon Inc. - Manta Transceiver */
+ { USB_DEVICE(VENDOR_TWISTEDMELON, 0x8042) },
/* Terminating entry */
{ }
};
@@ -410,14 +417,12 @@ struct mceusb_dev {
/* usb */
struct usb_device *usbdev;
struct urb *urb_in;
- struct usb_endpoint_descriptor *usb_ep_in;
struct usb_endpoint_descriptor *usb_ep_out;
/* buffers and dma */
unsigned char *buf_in;
unsigned int len_in;
dma_addr_t dma_in;
- dma_addr_t dma_out;
enum {
CMD_HEADER = 0,
@@ -686,7 +691,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
dev_info(dev, "Raw IR data, %d pulse/space samples\n", ir->rem);
}
-static void mce_async_callback(struct urb *urb, struct pt_regs *regs)
+static void mce_async_callback(struct urb *urb)
{
struct mceusb_dev *ir;
int len;
@@ -733,7 +738,7 @@ static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
pipe = usb_sndintpipe(ir->usbdev,
ir->usb_ep_out->bEndpointAddress);
usb_fill_int_urb(async_urb, ir->usbdev, pipe,
- async_buf, size, (usb_complete_t)mce_async_callback,
+ async_buf, size, mce_async_callback,
ir, ir->usb_ep_out->bInterval);
memcpy(async_buf, data, size);
@@ -1031,7 +1036,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
ir_raw_event_handle(ir->rc);
}
-static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
+static void mceusb_dev_recv(struct urb *urb)
{
struct mceusb_dev *ir;
int buf_len;
@@ -1331,7 +1336,6 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
ir->model = model;
/* Saving usb interface data for use by the transmitter routine */
- ir->usb_ep_in = ep_in;
ir->usb_ep_out = ep_out;
if (dev->descriptor.iManufacturer
@@ -1349,8 +1353,8 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
goto rc_dev_fail;
/* wire up inbound data handler */
- usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in,
- maxp, (usb_complete_t) mceusb_dev_recv, ir, ep_in->bInterval);
+ usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+ mceusb_dev_recv, ir, ep_in->bInterval);
ir->urb_in->transfer_dma = ir->dma_in;
ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index dc8a7dddccd4..699eef39128b 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -25,6 +25,8 @@
* USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pnp.h>
@@ -123,43 +125,40 @@ static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
return val;
}
-#define pr_reg(text, ...) \
- printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__)
-
/* dump current cir register contents */
static void cir_dump_regs(struct nvt_dev *nvt)
{
nvt_efm_enable(nvt);
nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
- pr_reg("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
- pr_reg(" * CR CIR ACTIVE : 0x%x\n",
- nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
- pr_reg(" * CR CIR BASE ADDR: 0x%x\n",
- (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
+ pr_info("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
+ pr_info(" * CR CIR ACTIVE : 0x%x\n",
+ nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
+ pr_info(" * CR CIR BASE ADDR: 0x%x\n",
+ (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
- pr_reg(" * CR CIR IRQ NUM: 0x%x\n",
- nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
+ pr_info(" * CR CIR IRQ NUM: 0x%x\n",
+ nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
nvt_efm_disable(nvt);
- pr_reg("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
- pr_reg(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
- pr_reg(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
- pr_reg(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
- pr_reg(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
- pr_reg(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
- pr_reg(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
- pr_reg(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
- pr_reg(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
- pr_reg(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
- pr_reg(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
- pr_reg(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
- pr_reg(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
- pr_reg(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
- pr_reg(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
- pr_reg(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
- pr_reg(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
+ pr_info("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
+ pr_info(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
+ pr_info(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
+ pr_info(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
+ pr_info(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
+ pr_info(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
+ pr_info(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
+ pr_info(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
+ pr_info(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
+ pr_info(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
+ pr_info(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
+ pr_info(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
+ pr_info(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
+ pr_info(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
+ pr_info(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
+ pr_info(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
+ pr_info(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
}
/* dump current cir wake register contents */
@@ -170,59 +169,59 @@ static void cir_wake_dump_regs(struct nvt_dev *nvt)
nvt_efm_enable(nvt);
nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
- pr_reg("%s: Dump CIR WAKE logical device registers:\n",
- NVT_DRIVER_NAME);
- pr_reg(" * CR CIR WAKE ACTIVE : 0x%x\n",
- nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
- pr_reg(" * CR CIR WAKE BASE ADDR: 0x%x\n",
- (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
+ pr_info("%s: Dump CIR WAKE logical device registers:\n",
+ NVT_DRIVER_NAME);
+ pr_info(" * CR CIR WAKE ACTIVE : 0x%x\n",
+ nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
+ pr_info(" * CR CIR WAKE BASE ADDR: 0x%x\n",
+ (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
- pr_reg(" * CR CIR WAKE IRQ NUM: 0x%x\n",
- nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
+ pr_info(" * CR CIR WAKE IRQ NUM: 0x%x\n",
+ nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
nvt_efm_disable(nvt);
- pr_reg("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
- pr_reg(" * IRCON: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
- pr_reg(" * IRSTS: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
- pr_reg(" * IREN: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
- pr_reg(" * FIFO CMP DEEP: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
- pr_reg(" * FIFO CMP TOL: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
- pr_reg(" * FIFO COUNT: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
- pr_reg(" * SLCH: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
- pr_reg(" * SLCL: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
- pr_reg(" * FIFOCON: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
- pr_reg(" * SRXFSTS: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
- pr_reg(" * SAMPLE RX FIFO: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
- pr_reg(" * WR FIFO DATA: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
- pr_reg(" * RD FIFO ONLY: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
- pr_reg(" * RD FIFO ONLY IDX: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
- pr_reg(" * FIFO IGNORE: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
- pr_reg(" * IRFSM: 0x%x\n",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
+ pr_info("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
+ pr_info(" * IRCON: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
+ pr_info(" * IRSTS: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
+ pr_info(" * IREN: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
+ pr_info(" * FIFO CMP DEEP: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
+ pr_info(" * FIFO CMP TOL: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
+ pr_info(" * FIFO COUNT: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
+ pr_info(" * SLCH: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
+ pr_info(" * SLCL: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
+ pr_info(" * FIFOCON: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
+ pr_info(" * SRXFSTS: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
+ pr_info(" * SAMPLE RX FIFO: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
+ pr_info(" * WR FIFO DATA: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
+ pr_info(" * RD FIFO ONLY: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
+ pr_info(" * RD FIFO ONLY IDX: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
+ pr_info(" * FIFO IGNORE: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
+ pr_info(" * IRFSM: 0x%x\n",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
- pr_reg("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
- pr_reg("* Contents = ");
+ pr_info("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
+ pr_info("* Contents =");
for (i = 0; i < fifo_len; i++)
- printk(KERN_CONT "%02x ",
- nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
- printk(KERN_CONT "\n");
+ pr_cont(" %02x",
+ nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
+ pr_cont("\n");
}
/* detect hardware features */
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 6e16b09c24a9..cabc19c10515 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -775,10 +775,11 @@ static ssize_t show_protocols(struct device *device,
if (dev->driver_type == RC_DRIVER_SCANCODE) {
enabled = dev->rc_map.rc_type;
allowed = dev->allowed_protos;
- } else {
+ } else if (dev->raw) {
enabled = dev->raw->enabled_protocols;
allowed = ir_raw_get_allowed_protocols();
- }
+ } else
+ return -ENODEV;
IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n",
(long long)allowed,
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index 99937c94d7df..c128fac0ce2c 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -5,7 +5,7 @@
config VIDEO_V4L2
tristate
depends on VIDEO_DEV && VIDEO_V4L2_COMMON
- default VIDEO_DEV && VIDEO_V4L2_COMMON
+ default y
config VIDEOBUF_GEN
tristate
@@ -73,6 +73,7 @@ config VIDEOBUF2_DMA_SG
menuconfig VIDEO_CAPTURE_DRIVERS
bool "Video capture adapters"
depends on VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT || MEDIA_ANALOG_TV_SUPPORT
default y
---help---
Say Y here to enable selecting the video adapters for
@@ -461,6 +462,15 @@ config VIDEO_ADV7343
To compile this driver as a module, choose M here: the
module will be called adv7343.
+config VIDEO_ADV7393
+ tristate "ADV7393 video encoder"
+ depends on I2C
+ help
+ Support for Analog Devices I2C bus based ADV7393 encoder.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7393.
+
config VIDEO_AK881X
tristate "AK8813/AK8814 video encoders"
depends on I2C
@@ -478,6 +488,7 @@ config VIDEO_SMIAPP_PLL
config VIDEO_OV7670
tristate "OmniVision OV7670 sensor support"
depends on I2C && VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
---help---
This is a Video4Linux2 sensor-level driver for the OmniVision
OV7670 VGA camera. It currently only works with the M88ALP01
@@ -486,6 +497,7 @@ config VIDEO_OV7670
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
+ depends on MEDIA_CAMERA_SUPPORT
---help---
This is a Video4Linux2 sensor-level driver for the ST VS6624
camera.
@@ -496,6 +508,7 @@ config VIDEO_VS6624
config VIDEO_MT9M032
tristate "MT9M032 camera sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
---help---
This driver supports MT9M032 camera sensors from Aptina, monochrome
@@ -504,6 +517,7 @@ config VIDEO_MT9M032
config VIDEO_MT9P031
tristate "Aptina MT9P031 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
select VIDEO_APTINA_PLL
---help---
This is a Video4Linux2 sensor-level driver for the Aptina
@@ -512,6 +526,7 @@ config VIDEO_MT9P031
config VIDEO_MT9T001
tristate "Aptina MT9T001 support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
---help---
This is a Video4Linux2 sensor-level driver for the Aptina
(Micron) mt0t001 3 Mpixel camera.
@@ -519,6 +534,7 @@ config VIDEO_MT9T001
config VIDEO_MT9V011
tristate "Micron mt9v011 sensor support"
depends on I2C && VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
---help---
This is a Video4Linux2 sensor-level driver for the Micron
mt0v011 1.3 Mpixel camera. It currently only works with the
@@ -527,6 +543,7 @@ config VIDEO_MT9V011
config VIDEO_MT9V032
tristate "Micron MT9V032 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
---help---
This is a Video4Linux2 sensor-level driver for the Micron
MT9V032 752x480 CMOS sensor.
@@ -534,6 +551,7 @@ config VIDEO_MT9V032
config VIDEO_TCM825X
tristate "TCM825x camera sensor support"
depends on I2C && VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
---help---
This is a driver for the Toshiba TCM825x VGA camera sensor.
It is used for example in Nokia N800.
@@ -541,12 +559,14 @@ config VIDEO_TCM825X
config VIDEO_SR030PC30
tristate "Siliconfile SR030PC30 sensor support"
depends on I2C && VIDEO_V4L2
+ depends on MEDIA_CAMERA_SUPPORT
---help---
This driver supports SR030PC30 VGA camera from Siliconfile
config VIDEO_NOON010PC30
tristate "Siliconfile NOON010PC30 sensor support"
depends on I2C && VIDEO_V4L2 && EXPERIMENTAL && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
---help---
This driver supports NOON010PC30 CIF camera from Siliconfile
@@ -554,6 +574,7 @@ source "drivers/media/video/m5mols/Kconfig"
config VIDEO_S5K6AA
tristate "Samsung S5K6AAFX sensor support"
+ depends on MEDIA_CAMERA_SUPPORT
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
---help---
This is a V4L2 sensor-level driver for Samsung S5K6AA(FX) 1.3M
@@ -566,6 +587,7 @@ comment "Flash devices"
config VIDEO_ADP1653
tristate "ADP1653 flash support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on MEDIA_CAMERA_SUPPORT
---help---
This is a driver for the ADP1653 flash controller. It is used for
example in Nokia N900.
@@ -573,6 +595,7 @@ config VIDEO_ADP1653
config VIDEO_AS3645A
tristate "AS3645A flash driver support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on MEDIA_CAMERA_SUPPORT
---help---
This is a driver for the AS3645A and LM3555 flash controllers. It has
build in control for flash, torch and indicator LEDs.
@@ -647,30 +670,14 @@ menuconfig V4L_USB_DRIVERS
depends on USB
default y
-if V4L_USB_DRIVERS
+if V4L_USB_DRIVERS && MEDIA_CAMERA_SUPPORT
-source "drivers/media/video/au0828/Kconfig"
+ comment "Webcam devices"
source "drivers/media/video/uvc/Kconfig"
source "drivers/media/video/gspca/Kconfig"
-source "drivers/media/video/pvrusb2/Kconfig"
-
-source "drivers/media/video/hdpvr/Kconfig"
-
-source "drivers/media/video/em28xx/Kconfig"
-
-source "drivers/media/video/tlg2300/Kconfig"
-
-source "drivers/media/video/cx231xx/Kconfig"
-
-source "drivers/media/video/tm6000/Kconfig"
-
-source "drivers/media/video/usbvision/Kconfig"
-
-source "drivers/media/video/sn9c102/Kconfig"
-
source "drivers/media/video/pwc/Kconfig"
source "drivers/media/video/cpia2/Kconfig"
@@ -711,15 +718,46 @@ config USB_S2255
Say Y here if you want support for the Sensoray 2255 USB device.
This driver can be compiled as a module, called s2255drv.
+source "drivers/media/video/sn9c102/Kconfig"
+
+endif # V4L_USB_DRIVERS && MEDIA_CAMERA_SUPPORT
+
+if V4L_USB_DRIVERS
+
+ comment "Webcam and/or TV USB devices"
+
+source "drivers/media/video/em28xx/Kconfig"
+
+endif
+
+if V4L_USB_DRIVERS && MEDIA_ANALOG_TV_SUPPORT
+
+ comment "TV USB devices"
+
+source "drivers/media/video/au0828/Kconfig"
+
+source "drivers/media/video/pvrusb2/Kconfig"
+
+source "drivers/media/video/hdpvr/Kconfig"
+
+source "drivers/media/video/tlg2300/Kconfig"
+
+source "drivers/media/video/cx231xx/Kconfig"
+
+source "drivers/media/video/tm6000/Kconfig"
+
+source "drivers/media/video/usbvision/Kconfig"
+
endif # V4L_USB_DRIVERS
#
-# PCI drivers configuration
+# PCI drivers configuration - No devices here are for webcams
#
menuconfig V4L_PCI_DRIVERS
bool "V4L PCI(e) devices"
depends on PCI
+ depends on MEDIA_ANALOG_TV_SUPPORT
default y
---help---
Say Y here to enable support for these PCI(e) drivers.
@@ -814,11 +852,13 @@ endif # V4L_PCI_DRIVERS
#
# ISA & parallel port drivers configuration
+# All devices here are webcam or grabber devices
#
menuconfig V4L_ISA_PARPORT_DRIVERS
bool "V4L ISA and parallel port devices"
depends on ISA || PARPORT
+ depends on MEDIA_CAMERA_SUPPORT
default n
---help---
Say Y here to enable support for these ISA and parallel port drivers.
@@ -871,8 +911,13 @@ config VIDEO_W9966
endif # V4L_ISA_PARPORT_DRIVERS
+#
+# Platform drivers
+# All drivers here are currently for webcam support
+
menuconfig V4L_PLATFORM_DRIVERS
bool "V4L platform devices"
+ depends on MEDIA_CAMERA_SUPPORT
default n
---help---
Say Y here to enable support for platform-specific V4L drivers.
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index d209de0e0ca8..b7da9faa3b0a 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
obj-$(CONFIG_VIDEO_ADV7183) += adv7183.o
obj-$(CONFIG_VIDEO_ADV7343) += adv7343.o
+obj-$(CONFIG_VIDEO_ADV7393) += adv7393.o
obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
obj-$(CONFIG_VIDEO_VS6624) += vs6624.o
obj-$(CONFIG_VIDEO_BT819) += bt819.o
diff --git a/drivers/media/video/adv7180.c b/drivers/media/video/adv7180.c
index 174bffacf117..45ecf8db1eae 100644
--- a/drivers/media/video/adv7180.c
+++ b/drivers/media/video/adv7180.c
@@ -26,11 +26,10 @@
#include <media/v4l2-ioctl.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-chip-ident.h>
#include <linux/mutex.h>
-#define DRIVER_NAME "adv7180"
-
#define ADV7180_INPUT_CONTROL_REG 0x00
#define ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM 0x00
#define ADV7180_INPUT_CONTROL_AD_PAL_BG_NTSC_J_SECAM_PED 0x10
@@ -55,21 +54,21 @@
#define ADV7180_AUTODETECT_ENABLE_REG 0x07
#define ADV7180_AUTODETECT_DEFAULT 0x7f
-
+/* Contrast */
#define ADV7180_CON_REG 0x08 /*Unsigned */
-#define CON_REG_MIN 0
-#define CON_REG_DEF 128
-#define CON_REG_MAX 255
-
+#define ADV7180_CON_MIN 0
+#define ADV7180_CON_DEF 128
+#define ADV7180_CON_MAX 255
+/* Brightness*/
#define ADV7180_BRI_REG 0x0a /*Signed */
-#define BRI_REG_MIN -128
-#define BRI_REG_DEF 0
-#define BRI_REG_MAX 127
-
+#define ADV7180_BRI_MIN -128
+#define ADV7180_BRI_DEF 0
+#define ADV7180_BRI_MAX 127
+/* Hue */
#define ADV7180_HUE_REG 0x0b /*Signed, inverted */
-#define HUE_REG_MIN -127
-#define HUE_REG_DEF 0
-#define HUE_REG_MAX 128
+#define ADV7180_HUE_MIN -127
+#define ADV7180_HUE_DEF 0
+#define ADV7180_HUE_MAX 128
#define ADV7180_ADI_CTRL_REG 0x0e
#define ADV7180_ADI_CTRL_IRQ_SPACE 0x20
@@ -98,12 +97,12 @@
#define ADV7180_ICONF1_ACTIVE_LOW 0x01
#define ADV7180_ICONF1_PSYNC_ONLY 0x10
#define ADV7180_ICONF1_ACTIVE_TO_CLR 0xC0
-
+/* Saturation */
#define ADV7180_SD_SAT_CB_REG 0xe3 /*Unsigned */
#define ADV7180_SD_SAT_CR_REG 0xe4 /*Unsigned */
-#define SAT_REG_MIN 0
-#define SAT_REG_DEF 128
-#define SAT_REG_MAX 255
+#define ADV7180_SAT_MIN 0
+#define ADV7180_SAT_DEF 128
+#define ADV7180_SAT_MAX 255
#define ADV7180_IRQ1_LOCK 0x01
#define ADV7180_IRQ1_UNLOCK 0x02
@@ -121,18 +120,18 @@
#define ADV7180_NTSC_V_BIT_END_MANUAL_NVEND 0x4F
struct adv7180_state {
+ struct v4l2_ctrl_handler ctrl_hdl;
struct v4l2_subdev sd;
struct work_struct work;
struct mutex mutex; /* mutual excl. when accessing chip */
int irq;
v4l2_std_id curr_norm;
bool autodetect;
- s8 brightness;
- s16 hue;
- u8 contrast;
- u8 saturation;
u8 input;
};
+#define to_adv7180_sd(_ctrl) (&container_of(_ctrl->handler, \
+ struct adv7180_state, \
+ ctrl_hdl)->sd)
static v4l2_std_id adv7180_std_to_v4l2(u8 status1)
{
@@ -237,7 +236,7 @@ static int adv7180_s_routing(struct v4l2_subdev *sd, u32 input,
if (ret)
return ret;
- /*We cannot discriminate between LQFP and 40-pin LFCSP, so accept
+ /* We cannot discriminate between LQFP and 40-pin LFCSP, so accept
* all inputs and let the card driver take care of validation
*/
if ((input & ADV7180_INPUT_CONTROL_INSEL_MASK) != input)
@@ -316,117 +315,39 @@ out:
return ret;
}
-static int adv7180_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
- switch (qc->id) {
- case V4L2_CID_BRIGHTNESS:
- return v4l2_ctrl_query_fill(qc, BRI_REG_MIN, BRI_REG_MAX,
- 1, BRI_REG_DEF);
- case V4L2_CID_HUE:
- return v4l2_ctrl_query_fill(qc, HUE_REG_MIN, HUE_REG_MAX,
- 1, HUE_REG_DEF);
- case V4L2_CID_CONTRAST:
- return v4l2_ctrl_query_fill(qc, CON_REG_MIN, CON_REG_MAX,
- 1, CON_REG_DEF);
- case V4L2_CID_SATURATION:
- return v4l2_ctrl_query_fill(qc, SAT_REG_MIN, SAT_REG_MAX,
- 1, SAT_REG_DEF);
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-static int adv7180_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
- struct adv7180_state *state = to_state(sd);
- int ret = mutex_lock_interruptible(&state->mutex);
- if (ret)
- return ret;
-
- switch (ctrl->id) {
- case V4L2_CID_BRIGHTNESS:
- ctrl->value = state->brightness;
- break;
- case V4L2_CID_HUE:
- ctrl->value = state->hue;
- break;
- case V4L2_CID_CONTRAST:
- ctrl->value = state->contrast;
- break;
- case V4L2_CID_SATURATION:
- ctrl->value = state->saturation;
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&state->mutex);
- return ret;
-}
-
-static int adv7180_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
+static int adv7180_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct v4l2_subdev *sd = to_adv7180_sd(ctrl);
struct adv7180_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = mutex_lock_interruptible(&state->mutex);
+ int val;
+
if (ret)
return ret;
-
+ val = ctrl->val;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- if ((ctrl->value > BRI_REG_MAX)
- || (ctrl->value < BRI_REG_MIN)) {
- ret = -ERANGE;
- break;
- }
- state->brightness = ctrl->value;
- ret = i2c_smbus_write_byte_data(client,
- ADV7180_BRI_REG,
- state->brightness);
+ ret = i2c_smbus_write_byte_data(client, ADV7180_BRI_REG, val);
break;
case V4L2_CID_HUE:
- if ((ctrl->value > HUE_REG_MAX)
- || (ctrl->value < HUE_REG_MIN)) {
- ret = -ERANGE;
- break;
- }
- state->hue = ctrl->value;
/*Hue is inverted according to HSL chart */
- ret = i2c_smbus_write_byte_data(client,
- ADV7180_HUE_REG, -state->hue);
+ ret = i2c_smbus_write_byte_data(client, ADV7180_HUE_REG, -val);
break;
case V4L2_CID_CONTRAST:
- if ((ctrl->value > CON_REG_MAX)
- || (ctrl->value < CON_REG_MIN)) {
- ret = -ERANGE;
- break;
- }
- state->contrast = ctrl->value;
- ret = i2c_smbus_write_byte_data(client,
- ADV7180_CON_REG,
- state->contrast);
+ ret = i2c_smbus_write_byte_data(client, ADV7180_CON_REG, val);
break;
case V4L2_CID_SATURATION:
- if ((ctrl->value > SAT_REG_MAX)
- || (ctrl->value < SAT_REG_MIN)) {
- ret = -ERANGE;
- break;
- }
/*
*This could be V4L2_CID_BLUE_BALANCE/V4L2_CID_RED_BALANCE
*Let's not confuse the user, everybody understands saturation
*/
- state->saturation = ctrl->value;
- ret = i2c_smbus_write_byte_data(client,
- ADV7180_SD_SAT_CB_REG,
- state->saturation);
+ ret = i2c_smbus_write_byte_data(client, ADV7180_SD_SAT_CB_REG,
+ val);
if (ret < 0)
break;
- ret = i2c_smbus_write_byte_data(client,
- ADV7180_SD_SAT_CR_REG,
- state->saturation);
+ ret = i2c_smbus_write_byte_data(client, ADV7180_SD_SAT_CR_REG,
+ val);
break;
default:
ret = -EINVAL;
@@ -436,6 +357,42 @@ static int adv7180_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
return ret;
}
+static const struct v4l2_ctrl_ops adv7180_ctrl_ops = {
+ .s_ctrl = adv7180_s_ctrl,
+};
+
+static int adv7180_init_controls(struct adv7180_state *state)
+{
+ v4l2_ctrl_handler_init(&state->ctrl_hdl, 4);
+
+ v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7180_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, ADV7180_BRI_MIN,
+ ADV7180_BRI_MAX, 1, ADV7180_BRI_DEF);
+ v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7180_ctrl_ops,
+ V4L2_CID_CONTRAST, ADV7180_CON_MIN,
+ ADV7180_CON_MAX, 1, ADV7180_CON_DEF);
+ v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7180_ctrl_ops,
+ V4L2_CID_SATURATION, ADV7180_SAT_MIN,
+ ADV7180_SAT_MAX, 1, ADV7180_SAT_DEF);
+ v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7180_ctrl_ops,
+ V4L2_CID_HUE, ADV7180_HUE_MIN,
+ ADV7180_HUE_MAX, 1, ADV7180_HUE_DEF);
+ state->sd.ctrl_handler = &state->ctrl_hdl;
+ if (state->ctrl_hdl.error) {
+ int err = state->ctrl_hdl.error;
+
+ v4l2_ctrl_handler_free(&state->ctrl_hdl);
+ return err;
+ }
+ v4l2_ctrl_handler_setup(&state->ctrl_hdl);
+
+ return 0;
+}
+static void adv7180_exit_controls(struct adv7180_state *state)
+{
+ v4l2_ctrl_handler_free(&state->ctrl_hdl);
+}
+
static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.querystd = adv7180_querystd,
.g_input_status = adv7180_g_input_status,
@@ -445,9 +402,9 @@ static const struct v4l2_subdev_video_ops adv7180_video_ops = {
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
.g_chip_ident = adv7180_g_chip_ident,
.s_std = adv7180_s_std,
- .queryctrl = adv7180_queryctrl,
- .g_ctrl = adv7180_g_ctrl,
- .s_ctrl = adv7180_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
};
static const struct v4l2_subdev_ops adv7180_ops = {
@@ -539,7 +496,7 @@ static int init_device(struct i2c_client *client, struct adv7180_state *state)
/* register for interrupts */
if (state->irq > 0) {
- ret = request_irq(state->irq, adv7180_irq, 0, DRIVER_NAME,
+ ret = request_irq(state->irq, adv7180_irq, 0, KBUILD_MODNAME,
state);
if (ret)
return ret;
@@ -580,31 +537,6 @@ static int init_device(struct i2c_client *client, struct adv7180_state *state)
return ret;
}
- /*Set default value for controls */
- ret = i2c_smbus_write_byte_data(client, ADV7180_BRI_REG,
- state->brightness);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_write_byte_data(client, ADV7180_HUE_REG, state->hue);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_write_byte_data(client, ADV7180_CON_REG,
- state->contrast);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_write_byte_data(client, ADV7180_SD_SAT_CB_REG,
- state->saturation);
- if (ret < 0)
- return ret;
-
- ret = i2c_smbus_write_byte_data(client, ADV7180_SD_SAT_CR_REG,
- state->saturation);
- if (ret < 0)
- return ret;
-
return 0;
}
@@ -632,25 +564,26 @@ static __devinit int adv7180_probe(struct i2c_client *client,
INIT_WORK(&state->work, adv7180_work);
mutex_init(&state->mutex);
state->autodetect = true;
- state->brightness = BRI_REG_DEF;
- state->hue = HUE_REG_DEF;
- state->contrast = CON_REG_DEF;
- state->saturation = SAT_REG_DEF;
state->input = 0;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
- ret = init_device(client, state);
- if (0 != ret)
+ ret = adv7180_init_controls(state);
+ if (ret)
goto err_unreg_subdev;
+ ret = init_device(client, state);
+ if (ret)
+ goto err_free_ctrl;
return 0;
+err_free_ctrl:
+ adv7180_exit_controls(state);
err_unreg_subdev:
mutex_destroy(&state->mutex);
v4l2_device_unregister_subdev(sd);
kfree(state);
err:
- printk(KERN_ERR DRIVER_NAME ": Failed to probe: %d\n", ret);
+ printk(KERN_ERR KBUILD_MODNAME ": Failed to probe: %d\n", ret);
return ret;
}
@@ -678,7 +611,7 @@ static __devexit int adv7180_remove(struct i2c_client *client)
}
static const struct i2c_device_id adv7180_id[] = {
- {DRIVER_NAME, 0},
+ {KBUILD_MODNAME, 0},
{},
};
@@ -716,7 +649,7 @@ MODULE_DEVICE_TABLE(i2c, adv7180_id);
static struct i2c_driver adv7180_driver = {
.driver = {
.owner = THIS_MODULE,
- .name = DRIVER_NAME,
+ .name = KBUILD_MODNAME,
},
.probe = adv7180_probe,
.remove = __devexit_p(adv7180_remove),
diff --git a/drivers/media/video/adv7393.c b/drivers/media/video/adv7393.c
new file mode 100644
index 000000000000..3dc6098c7267
--- /dev/null
+++ b/drivers/media/video/adv7393.c
@@ -0,0 +1,487 @@
+/*
+ * adv7393 - ADV7393 Video Encoder Driver
+ *
+ * The encoder hardware does not support SECAM.
+ *
+ * Copyright (C) 2010-2012 ADVANSEE - http://www.advansee.com/
+ * Benoît Thébaudeau <benoit.thebaudeau@advansee.com>
+ *
+ * Based on ADV7343 driver,
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+
+#include <media/adv7393.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-chip-ident.h>
+#include <media/v4l2-ctrls.h>
+
+#include "adv7393_regs.h"
+
+MODULE_DESCRIPTION("ADV7393 video encoder driver");
+MODULE_LICENSE("GPL");
+
+static bool debug;
+module_param(debug, bool, 0644);
+MODULE_PARM_DESC(debug, "Debug level 0-1");
+
+struct adv7393_state {
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl_handler hdl;
+ u8 reg00;
+ u8 reg01;
+ u8 reg02;
+ u8 reg35;
+ u8 reg80;
+ u8 reg82;
+ u32 output;
+ v4l2_std_id std;
+};
+
+static inline struct adv7393_state *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct adv7393_state, sd);
+}
+
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct adv7393_state, hdl)->sd;
+}
+
+static inline int adv7393_write(struct v4l2_subdev *sd, u8 reg, u8 value)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+static const u8 adv7393_init_reg_val[] = {
+ ADV7393_SOFT_RESET, ADV7393_SOFT_RESET_DEFAULT,
+ ADV7393_POWER_MODE_REG, ADV7393_POWER_MODE_REG_DEFAULT,
+
+ ADV7393_HD_MODE_REG1, ADV7393_HD_MODE_REG1_DEFAULT,
+ ADV7393_HD_MODE_REG2, ADV7393_HD_MODE_REG2_DEFAULT,
+ ADV7393_HD_MODE_REG3, ADV7393_HD_MODE_REG3_DEFAULT,
+ ADV7393_HD_MODE_REG4, ADV7393_HD_MODE_REG4_DEFAULT,
+ ADV7393_HD_MODE_REG5, ADV7393_HD_MODE_REG5_DEFAULT,
+ ADV7393_HD_MODE_REG6, ADV7393_HD_MODE_REG6_DEFAULT,
+ ADV7393_HD_MODE_REG7, ADV7393_HD_MODE_REG7_DEFAULT,
+
+ ADV7393_SD_MODE_REG1, ADV7393_SD_MODE_REG1_DEFAULT,
+ ADV7393_SD_MODE_REG2, ADV7393_SD_MODE_REG2_DEFAULT,
+ ADV7393_SD_MODE_REG3, ADV7393_SD_MODE_REG3_DEFAULT,
+ ADV7393_SD_MODE_REG4, ADV7393_SD_MODE_REG4_DEFAULT,
+ ADV7393_SD_MODE_REG5, ADV7393_SD_MODE_REG5_DEFAULT,
+ ADV7393_SD_MODE_REG6, ADV7393_SD_MODE_REG6_DEFAULT,
+ ADV7393_SD_MODE_REG7, ADV7393_SD_MODE_REG7_DEFAULT,
+ ADV7393_SD_MODE_REG8, ADV7393_SD_MODE_REG8_DEFAULT,
+
+ ADV7393_SD_TIMING_REG0, ADV7393_SD_TIMING_REG0_DEFAULT,
+
+ ADV7393_SD_HUE_ADJUST, ADV7393_SD_HUE_ADJUST_DEFAULT,
+ ADV7393_SD_CGMS_WSS0, ADV7393_SD_CGMS_WSS0_DEFAULT,
+ ADV7393_SD_BRIGHTNESS_WSS, ADV7393_SD_BRIGHTNESS_WSS_DEFAULT,
+};
+
+/*
+ * 2^32
+ * FSC(reg) = FSC (HZ) * --------
+ * 27000000
+ */
+static const struct adv7393_std_info stdinfo[] = {
+ {
+ /* FSC(Hz) = 4,433,618.75 Hz */
+ SD_STD_NTSC, 705268427, V4L2_STD_NTSC_443,
+ }, {
+ /* FSC(Hz) = 3,579,545.45 Hz */
+ SD_STD_NTSC, 569408542, V4L2_STD_NTSC,
+ }, {
+ /* FSC(Hz) = 3,575,611.00 Hz */
+ SD_STD_PAL_M, 568782678, V4L2_STD_PAL_M,
+ }, {
+ /* FSC(Hz) = 3,582,056.00 Hz */
+ SD_STD_PAL_N, 569807903, V4L2_STD_PAL_Nc,
+ }, {
+ /* FSC(Hz) = 4,433,618.75 Hz */
+ SD_STD_PAL_N, 705268427, V4L2_STD_PAL_N,
+ }, {
+ /* FSC(Hz) = 4,433,618.75 Hz */
+ SD_STD_PAL_M, 705268427, V4L2_STD_PAL_60,
+ }, {
+ /* FSC(Hz) = 4,433,618.75 Hz */
+ SD_STD_PAL_BDGHI, 705268427, V4L2_STD_PAL,
+ },
+};
+
+static int adv7393_setstd(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct adv7393_state *state = to_state(sd);
+ const struct adv7393_std_info *std_info;
+ int num_std;
+ u8 reg;
+ u32 val;
+ int err = 0;
+ int i;
+
+ num_std = ARRAY_SIZE(stdinfo);
+
+ for (i = 0; i < num_std; i++) {
+ if (stdinfo[i].stdid & std)
+ break;
+ }
+
+ if (i == num_std) {
+ v4l2_dbg(1, debug, sd,
+ "Invalid std or std is not supported: %llx\n",
+ (unsigned long long)std);
+ return -EINVAL;
+ }
+
+ std_info = &stdinfo[i];
+
+ /* Set the standard */
+ val = state->reg80 & ~SD_STD_MASK;
+ val |= std_info->standard_val3;
+ err = adv7393_write(sd, ADV7393_SD_MODE_REG1, val);
+ if (err < 0)
+ goto setstd_exit;
+
+ state->reg80 = val;
+
+ /* Configure the input mode register */
+ val = state->reg01 & ~INPUT_MODE_MASK;
+ val |= SD_INPUT_MODE;
+ err = adv7393_write(sd, ADV7393_MODE_SELECT_REG, val);
+ if (err < 0)
+ goto setstd_exit;
+
+ state->reg01 = val;
+
+ /* Program the sub carrier frequency registers */
+ val = std_info->fsc_val;
+ for (reg = ADV7393_FSC_REG0; reg <= ADV7393_FSC_REG3; reg++) {
+ err = adv7393_write(sd, reg, val);
+ if (err < 0)
+ goto setstd_exit;
+ val >>= 8;
+ }
+
+ val = state->reg82;
+
+ /* Pedestal settings */
+ if (std & (V4L2_STD_NTSC | V4L2_STD_NTSC_443))
+ val |= SD_PEDESTAL_EN;
+ else
+ val &= SD_PEDESTAL_DI;
+
+ err = adv7393_write(sd, ADV7393_SD_MODE_REG2, val);
+ if (err < 0)
+ goto setstd_exit;
+
+ state->reg82 = val;
+
+setstd_exit:
+ if (err != 0)
+ v4l2_err(sd, "Error setting std, write failed\n");
+
+ return err;
+}
+
+static int adv7393_setoutput(struct v4l2_subdev *sd, u32 output_type)
+{
+ struct adv7393_state *state = to_state(sd);
+ u8 val;
+ int err = 0;
+
+ if (output_type > ADV7393_SVIDEO_ID) {
+ v4l2_dbg(1, debug, sd,
+ "Invalid output type or output type not supported:%d\n",
+ output_type);
+ return -EINVAL;
+ }
+
+ /* Enable Appropriate DAC */
+ val = state->reg00 & 0x03;
+
+ if (output_type == ADV7393_COMPOSITE_ID)
+ val |= ADV7393_COMPOSITE_POWER_VALUE;
+ else if (output_type == ADV7393_COMPONENT_ID)
+ val |= ADV7393_COMPONENT_POWER_VALUE;
+ else
+ val |= ADV7393_SVIDEO_POWER_VALUE;
+
+ err = adv7393_write(sd, ADV7393_POWER_MODE_REG, val);
+ if (err < 0)
+ goto setoutput_exit;
+
+ state->reg00 = val;
+
+ /* Enable YUV output */
+ val = state->reg02 | YUV_OUTPUT_SELECT;
+ err = adv7393_write(sd, ADV7393_MODE_REG0, val);
+ if (err < 0)
+ goto setoutput_exit;
+
+ state->reg02 = val;
+
+ /* configure SD DAC Output 1 bit */
+ val = state->reg82;
+ if (output_type == ADV7393_COMPONENT_ID)
+ val &= SD_DAC_OUT1_DI;
+ else
+ val |= SD_DAC_OUT1_EN;
+ err = adv7393_write(sd, ADV7393_SD_MODE_REG2, val);
+ if (err < 0)
+ goto setoutput_exit;
+
+ state->reg82 = val;
+
+ /* configure ED/HD Color DAC Swap bit to zero */
+ val = state->reg35 & HD_DAC_SWAP_DI;
+ err = adv7393_write(sd, ADV7393_HD_MODE_REG6, val);
+ if (err < 0)
+ goto setoutput_exit;
+
+ state->reg35 = val;
+
+setoutput_exit:
+ if (err != 0)
+ v4l2_err(sd, "Error setting output, write failed\n");
+
+ return err;
+}
+
+static int adv7393_log_status(struct v4l2_subdev *sd)
+{
+ struct adv7393_state *state = to_state(sd);
+
+ v4l2_info(sd, "Standard: %llx\n", (unsigned long long)state->std);
+ v4l2_info(sd, "Output: %s\n", (state->output == 0) ? "Composite" :
+ ((state->output == 1) ? "Component" : "S-Video"));
+ return 0;
+}
+
+static int adv7393_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ return adv7393_write(sd, ADV7393_SD_BRIGHTNESS_WSS,
+ ctrl->val & SD_BRIGHTNESS_VALUE_MASK);
+
+ case V4L2_CID_HUE:
+ return adv7393_write(sd, ADV7393_SD_HUE_ADJUST,
+ ctrl->val - ADV7393_HUE_MIN);
+
+ case V4L2_CID_GAIN:
+ return adv7393_write(sd, ADV7393_DAC123_OUTPUT_LEVEL,
+ ctrl->val);
+ }
+ return -EINVAL;
+}
+
+static int adv7393_g_chip_ident(struct v4l2_subdev *sd,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7393, 0);
+}
+
+static const struct v4l2_ctrl_ops adv7393_ctrl_ops = {
+ .s_ctrl = adv7393_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops adv7393_core_ops = {
+ .log_status = adv7393_log_status,
+ .g_chip_ident = adv7393_g_chip_ident,
+ .g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
+ .try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
+ .s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
+ .g_ctrl = v4l2_subdev_g_ctrl,
+ .s_ctrl = v4l2_subdev_s_ctrl,
+ .queryctrl = v4l2_subdev_queryctrl,
+ .querymenu = v4l2_subdev_querymenu,
+};
+
+static int adv7393_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
+{
+ struct adv7393_state *state = to_state(sd);
+ int err = 0;
+
+ if (state->std == std)
+ return 0;
+
+ err = adv7393_setstd(sd, std);
+ if (!err)
+ state->std = std;
+
+ return err;
+}
+
+static int adv7393_s_routing(struct v4l2_subdev *sd,
+ u32 input, u32 output, u32 config)
+{
+ struct adv7393_state *state = to_state(sd);
+ int err = 0;
+
+ if (state->output == output)
+ return 0;
+
+ err = adv7393_setoutput(sd, output);
+ if (!err)
+ state->output = output;
+
+ return err;
+}
+
+static const struct v4l2_subdev_video_ops adv7393_video_ops = {
+ .s_std_output = adv7393_s_std_output,
+ .s_routing = adv7393_s_routing,
+};
+
+static const struct v4l2_subdev_ops adv7393_ops = {
+ .core = &adv7393_core_ops,
+ .video = &adv7393_video_ops,
+};
+
+static int adv7393_initialize(struct v4l2_subdev *sd)
+{
+ struct adv7393_state *state = to_state(sd);
+ int err = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adv7393_init_reg_val); i += 2) {
+
+ err = adv7393_write(sd, adv7393_init_reg_val[i],
+ adv7393_init_reg_val[i+1]);
+ if (err) {
+ v4l2_err(sd, "Error initializing\n");
+ return err;
+ }
+ }
+
+ /* Configure for default video standard */
+ err = adv7393_setoutput(sd, state->output);
+ if (err < 0) {
+ v4l2_err(sd, "Error setting output during init\n");
+ return -EINVAL;
+ }
+
+ err = adv7393_setstd(sd, state->std);
+ if (err < 0) {
+ v4l2_err(sd, "Error setting std during init\n");
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int adv7393_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adv7393_state *state;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ v4l_info(client, "chip found @ 0x%x (%s)\n",
+ client->addr << 1, client->adapter->name);
+
+ state = kzalloc(sizeof(struct adv7393_state), GFP_KERNEL);
+ if (state == NULL)
+ return -ENOMEM;
+
+ state->reg00 = ADV7393_POWER_MODE_REG_DEFAULT;
+ state->reg01 = 0x00;
+ state->reg02 = 0x20;
+ state->reg35 = ADV7393_HD_MODE_REG6_DEFAULT;
+ state->reg80 = ADV7393_SD_MODE_REG1_DEFAULT;
+ state->reg82 = ADV7393_SD_MODE_REG2_DEFAULT;
+
+ state->output = ADV7393_COMPOSITE_ID;
+ state->std = V4L2_STD_NTSC;
+
+ v4l2_i2c_subdev_init(&state->sd, client, &adv7393_ops);
+
+ v4l2_ctrl_handler_init(&state->hdl, 3);
+ v4l2_ctrl_new_std(&state->hdl, &adv7393_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, ADV7393_BRIGHTNESS_MIN,
+ ADV7393_BRIGHTNESS_MAX, 1,
+ ADV7393_BRIGHTNESS_DEF);
+ v4l2_ctrl_new_std(&state->hdl, &adv7393_ctrl_ops,
+ V4L2_CID_HUE, ADV7393_HUE_MIN,
+ ADV7393_HUE_MAX, 1,
+ ADV7393_HUE_DEF);
+ v4l2_ctrl_new_std(&state->hdl, &adv7393_ctrl_ops,
+ V4L2_CID_GAIN, ADV7393_GAIN_MIN,
+ ADV7393_GAIN_MAX, 1,
+ ADV7393_GAIN_DEF);
+ state->sd.ctrl_handler = &state->hdl;
+ if (state->hdl.error) {
+ int err = state->hdl.error;
+
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
+ return err;
+ }
+ v4l2_ctrl_handler_setup(&state->hdl);
+
+ err = adv7393_initialize(&state->sd);
+ if (err) {
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
+ }
+ return err;
+}
+
+static int adv7393_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct adv7393_state *state = to_state(sd);
+
+ v4l2_device_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&state->hdl);
+ kfree(state);
+
+ return 0;
+}
+
+static const struct i2c_device_id adv7393_id[] = {
+ {"adv7393", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, adv7393_id);
+
+static struct i2c_driver adv7393_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "adv7393",
+ },
+ .probe = adv7393_probe,
+ .remove = adv7393_remove,
+ .id_table = adv7393_id,
+};
+module_i2c_driver(adv7393_driver);
diff --git a/drivers/media/video/adv7393_regs.h b/drivers/media/video/adv7393_regs.h
new file mode 100644
index 000000000000..78968330f0be
--- /dev/null
+++ b/drivers/media/video/adv7393_regs.h
@@ -0,0 +1,188 @@
+/*
+ * ADV7393 encoder related structure and register definitions
+ *
+ * Copyright (C) 2010-2012 ADVANSEE - http://www.advansee.com/
+ * Benoît Thébaudeau <benoit.thebaudeau@advansee.com>
+ *
+ * Based on ADV7343 driver,
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ADV7393_REGS_H
+#define ADV7393_REGS_H
+
+struct adv7393_std_info {
+ u32 standard_val3;
+ u32 fsc_val;
+ v4l2_std_id stdid;
+};
+
+/* Register offset macros */
+#define ADV7393_POWER_MODE_REG (0x00)
+#define ADV7393_MODE_SELECT_REG (0x01)
+#define ADV7393_MODE_REG0 (0x02)
+
+#define ADV7393_DAC123_OUTPUT_LEVEL (0x0B)
+
+#define ADV7393_SOFT_RESET (0x17)
+
+#define ADV7393_HD_MODE_REG1 (0x30)
+#define ADV7393_HD_MODE_REG2 (0x31)
+#define ADV7393_HD_MODE_REG3 (0x32)
+#define ADV7393_HD_MODE_REG4 (0x33)
+#define ADV7393_HD_MODE_REG5 (0x34)
+#define ADV7393_HD_MODE_REG6 (0x35)
+
+#define ADV7393_HD_MODE_REG7 (0x39)
+
+#define ADV7393_SD_MODE_REG1 (0x80)
+#define ADV7393_SD_MODE_REG2 (0x82)
+#define ADV7393_SD_MODE_REG3 (0x83)
+#define ADV7393_SD_MODE_REG4 (0x84)
+#define ADV7393_SD_MODE_REG5 (0x86)
+#define ADV7393_SD_MODE_REG6 (0x87)
+#define ADV7393_SD_MODE_REG7 (0x88)
+#define ADV7393_SD_MODE_REG8 (0x89)
+
+#define ADV7393_SD_TIMING_REG0 (0x8A)
+
+#define ADV7393_FSC_REG0 (0x8C)
+#define ADV7393_FSC_REG1 (0x8D)
+#define ADV7393_FSC_REG2 (0x8E)
+#define ADV7393_FSC_REG3 (0x8F)
+
+#define ADV7393_SD_CGMS_WSS0 (0x99)
+
+#define ADV7393_SD_HUE_ADJUST (0xA0)
+#define ADV7393_SD_BRIGHTNESS_WSS (0xA1)
+
+/* Default values for the registers */
+#define ADV7393_POWER_MODE_REG_DEFAULT (0x10)
+#define ADV7393_HD_MODE_REG1_DEFAULT (0x3C) /* Changed Default
+ 720p EAV/SAV code*/
+#define ADV7393_HD_MODE_REG2_DEFAULT (0x01) /* Changed Pixel data
+ valid */
+#define ADV7393_HD_MODE_REG3_DEFAULT (0x00) /* Color delay 0 clks */
+#define ADV7393_HD_MODE_REG4_DEFAULT (0xEC) /* Changed */
+#define ADV7393_HD_MODE_REG5_DEFAULT (0x08)
+#define ADV7393_HD_MODE_REG6_DEFAULT (0x00)
+#define ADV7393_HD_MODE_REG7_DEFAULT (0x00)
+#define ADV7393_SOFT_RESET_DEFAULT (0x02)
+#define ADV7393_COMPOSITE_POWER_VALUE (0x10)
+#define ADV7393_COMPONENT_POWER_VALUE (0x1C)
+#define ADV7393_SVIDEO_POWER_VALUE (0x0C)
+#define ADV7393_SD_HUE_ADJUST_DEFAULT (0x80)
+#define ADV7393_SD_BRIGHTNESS_WSS_DEFAULT (0x00)
+
+#define ADV7393_SD_CGMS_WSS0_DEFAULT (0x10)
+
+#define ADV7393_SD_MODE_REG1_DEFAULT (0x10)
+#define ADV7393_SD_MODE_REG2_DEFAULT (0xC9)
+#define ADV7393_SD_MODE_REG3_DEFAULT (0x00)
+#define ADV7393_SD_MODE_REG4_DEFAULT (0x00)
+#define ADV7393_SD_MODE_REG5_DEFAULT (0x02)
+#define ADV7393_SD_MODE_REG6_DEFAULT (0x8C)
+#define ADV7393_SD_MODE_REG7_DEFAULT (0x14)
+#define ADV7393_SD_MODE_REG8_DEFAULT (0x00)
+
+#define ADV7393_SD_TIMING_REG0_DEFAULT (0x0C)
+
+/* Bit masks for Mode Select Register */
+#define INPUT_MODE_MASK (0x70)
+#define SD_INPUT_MODE (0x00)
+#define HD_720P_INPUT_MODE (0x10)
+#define HD_1080I_INPUT_MODE (0x10)
+
+/* Bit masks for Mode Register 0 */
+#define TEST_PATTERN_BLACK_BAR_EN (0x04)
+#define YUV_OUTPUT_SELECT (0x20)
+#define RGB_OUTPUT_SELECT (0xDF)
+
+/* Bit masks for SD brightness/WSS */
+#define SD_BRIGHTNESS_VALUE_MASK (0x7F)
+#define SD_BLANK_WSS_DATA_MASK (0x80)
+
+/* Bit masks for soft reset register */
+#define SOFT_RESET (0x02)
+
+/* Bit masks for HD Mode Register 1 */
+#define OUTPUT_STD_MASK (0x03)
+#define OUTPUT_STD_SHIFT (0)
+#define OUTPUT_STD_EIA0_2 (0x00)
+#define OUTPUT_STD_EIA0_1 (0x01)
+#define OUTPUT_STD_FULL (0x02)
+#define EMBEDDED_SYNC (0x04)
+#define EXTERNAL_SYNC (0xFB)
+#define STD_MODE_MASK (0x1F)
+#define STD_MODE_SHIFT (3)
+#define STD_MODE_720P (0x05)
+#define STD_MODE_720P_25 (0x08)
+#define STD_MODE_720P_30 (0x07)
+#define STD_MODE_720P_50 (0x06)
+#define STD_MODE_1080I (0x0D)
+#define STD_MODE_1080I_25 (0x0E)
+#define STD_MODE_1080P_24 (0x11)
+#define STD_MODE_1080P_25 (0x10)
+#define STD_MODE_1080P_30 (0x0F)
+#define STD_MODE_525P (0x00)
+#define STD_MODE_625P (0x03)
+
+/* Bit masks for SD Mode Register 1 */
+#define SD_STD_MASK (0x03)
+#define SD_STD_NTSC (0x00)
+#define SD_STD_PAL_BDGHI (0x01)
+#define SD_STD_PAL_M (0x02)
+#define SD_STD_PAL_N (0x03)
+#define SD_LUMA_FLTR_MASK (0x07)
+#define SD_LUMA_FLTR_SHIFT (2)
+#define SD_CHROMA_FLTR_MASK (0x07)
+#define SD_CHROMA_FLTR_SHIFT (5)
+
+/* Bit masks for SD Mode Register 2 */
+#define SD_PRPB_SSAF_EN (0x01)
+#define SD_PRPB_SSAF_DI (0xFE)
+#define SD_DAC_OUT1_EN (0x02)
+#define SD_DAC_OUT1_DI (0xFD)
+#define SD_PEDESTAL_EN (0x08)
+#define SD_PEDESTAL_DI (0xF7)
+#define SD_SQUARE_PIXEL_EN (0x10)
+#define SD_SQUARE_PIXEL_DI (0xEF)
+#define SD_PIXEL_DATA_VALID (0x40)
+#define SD_ACTIVE_EDGE_EN (0x80)
+#define SD_ACTIVE_EDGE_DI (0x7F)
+
+/* Bit masks for HD Mode Register 6 */
+#define HD_PRPB_SYNC_EN (0x04)
+#define HD_PRPB_SYNC_DI (0xFB)
+#define HD_DAC_SWAP_EN (0x08)
+#define HD_DAC_SWAP_DI (0xF7)
+#define HD_GAMMA_CURVE_A (0xEF)
+#define HD_GAMMA_CURVE_B (0x10)
+#define HD_GAMMA_EN (0x20)
+#define HD_GAMMA_DI (0xDF)
+#define HD_ADPT_FLTR_MODEA (0xBF)
+#define HD_ADPT_FLTR_MODEB (0x40)
+#define HD_ADPT_FLTR_EN (0x80)
+#define HD_ADPT_FLTR_DI (0x7F)
+
+#define ADV7393_BRIGHTNESS_MAX (63)
+#define ADV7393_BRIGHTNESS_MIN (-64)
+#define ADV7393_BRIGHTNESS_DEF (0)
+#define ADV7393_HUE_MAX (127)
+#define ADV7393_HUE_MIN (-128)
+#define ADV7393_HUE_DEF (0)
+#define ADV7393_GAIN_MAX (64)
+#define ADV7393_GAIN_MIN (-64)
+#define ADV7393_GAIN_DEF (0)
+
+#endif
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 856ab962cd63..38952faaffda 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -345,7 +345,7 @@ static struct CARD {
{ 0x15401836, BTTV_BOARD_PV183, "Provideo PV183-7" },
{ 0x15401837, BTTV_BOARD_PV183, "Provideo PV183-8" },
{ 0x3116f200, BTTV_BOARD_TVT_TD3116, "Tongwei Video Technology TD-3116" },
-
+ { 0x02280279, BTTV_BOARD_APOSONIC_WDVR, "Aposonic W-DVR" },
{ 0, -1, NULL }
};
@@ -676,6 +676,7 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.has_remote = 1,
+ .has_radio = 1, /* not every card has radio */
},
[BTTV_BOARD_VOBIS_BOOSTAR] = {
.name = "Terratec TerraTV+ Version 1.0 (Bt848)/ Terra TValue Version 1.0/ Vobis TV-Boostar",
@@ -2817,6 +2818,14 @@ struct tvcard bttv_tvcards[] = {
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
},
+ [BTTV_BOARD_APOSONIC_WDVR] = {
+ .name = "Aposonic W-DVR",
+ .video_inputs = 4,
+ .svhs = NO_SVHS,
+ .muxsel = MUXSEL(2, 3, 1, 0),
+ .tuner_type = TUNER_ABSENT,
+ },
+
};
static const unsigned int bttv_num_tvcards = ARRAY_SIZE(bttv_tvcards);
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index ff7a589d8e0f..b58ff87db771 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -558,12 +558,6 @@ static const struct bttv_format formats[] = {
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},{
- .name = "4:2:2, packed, YUYV",
- .fourcc = V4L2_PIX_FMT_YUYV,
- .btformat = BT848_COLOR_FMT_YUY2,
- .depth = 16,
- .flags = FORMAT_FLAGS_PACKED,
- },{
.name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.btformat = BT848_COLOR_FMT_YUY2,
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index acfe2f3b92d9..79a11240a590 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -184,7 +184,7 @@
#define BTTV_BOARD_GEOVISION_GV800S_SL 0x9e
#define BTTV_BOARD_PV183 0x9f
#define BTTV_BOARD_TVT_TD3116 0xa0
-
+#define BTTV_BOARD_APOSONIC_WDVR 0xa1
/* more card-specific defines */
#define PT2254_L_CHANNEL 0x10
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 55e92902a76c..a62a7b739991 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -932,7 +932,7 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
buf->sequence = cam->buffers[buf->index].seq;
buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
buf->length = cam->frame_size;
- buf->input = 0;
+ buf->reserved2 = 0;
buf->reserved = 0;
memset(&buf->timecode, 0, sizeof(buf->timecode));
diff --git a/drivers/media/video/cs8420.h b/drivers/media/video/cs8420.h
deleted file mode 100644
index 621c0c6678ea..000000000000
--- a/drivers/media/video/cs8420.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* cs8420.h - cs8420 initializations
- Copyright (C) 1999 Nathan Laredo (laredo@gnu.org)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- */
-#ifndef __CS8420_H__
-#define __CS8420_H__
-
-/* Initialization Sequence */
-
-static __u8 init8420[] = {
- 1, 0x01, 2, 0x02, 3, 0x00, 4, 0x46,
- 5, 0x24, 6, 0x84, 18, 0x18, 19, 0x13,
-};
-
-#define INIT8420LEN (sizeof(init8420)/2)
-
-static __u8 mode8420pro[] = { /* professional output mode */
- 32, 0xa1, 33, 0x00, 34, 0x00, 35, 0x00,
- 36, 0x00, 37, 0x00, 38, 0x00, 39, 0x00,
- 40, 0x00, 41, 0x00, 42, 0x00, 43, 0x00,
- 44, 0x00, 45, 0x00, 46, 0x00, 47, 0x00,
- 48, 0x00, 49, 0x00, 50, 0x00, 51, 0x00,
- 52, 0x00, 53, 0x00, 54, 0x00, 55, 0x00,
-};
-#define MODE8420LEN (sizeof(mode8420pro)/2)
-
-static __u8 mode8420con[] = { /* consumer output mode */
- 32, 0x20, 33, 0x00, 34, 0x00, 35, 0x48,
- 36, 0x00, 37, 0x00, 38, 0x00, 39, 0x00,
- 40, 0x00, 41, 0x00, 42, 0x00, 43, 0x00,
- 44, 0x00, 45, 0x00, 46, 0x00, 47, 0x00,
- 48, 0x00, 49, 0x00, 50, 0x00, 51, 0x00,
- 52, 0x00, 53, 0x00, 54, 0x00, 55, 0x00,
-};
-
-#endif
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index 35fde4e931f5..e9912db3b496 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -1142,24 +1142,6 @@ static long cx18_default(struct file *file, void *fh, bool valid_prio,
return 0;
}
-long cx18_v4l2_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct video_device *vfd = video_devdata(filp);
- struct cx18_open_id *id = file2id(filp);
- struct cx18 *cx = id->cx;
- long res;
-
- mutex_lock(&cx->serialize_lock);
-
- if (cx18_debug & CX18_DBGFLG_IOCTL)
- vfd->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG;
- res = video_ioctl2(filp, cmd, arg);
- vfd->debug = 0;
- mutex_unlock(&cx->serialize_lock);
- return res;
-}
-
static const struct v4l2_ioctl_ops cx18_ioctl_ops = {
.vidioc_querycap = cx18_querycap,
.vidioc_s_audio = cx18_s_audio,
diff --git a/drivers/media/video/cx18/cx18-ioctl.h b/drivers/media/video/cx18/cx18-ioctl.h
index dcb2559ad520..2f9dd591ee0f 100644
--- a/drivers/media/video/cx18/cx18-ioctl.h
+++ b/drivers/media/video/cx18/cx18-ioctl.h
@@ -29,5 +29,3 @@ void cx18_set_funcs(struct video_device *vdev);
int cx18_s_std(struct file *file, void *fh, v4l2_std_id *std);
int cx18_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
int cx18_s_input(struct file *file, void *fh, unsigned int inp);
-long cx18_v4l2_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
index 4185bcb80ca3..9d598ab88615 100644
--- a/drivers/media/video/cx18/cx18-streams.c
+++ b/drivers/media/video/cx18/cx18-streams.c
@@ -40,8 +40,7 @@ static struct v4l2_file_operations cx18_v4l2_enc_fops = {
.owner = THIS_MODULE,
.read = cx18_v4l2_read,
.open = cx18_v4l2_open,
- /* FIXME change to video_ioctl2 if serialization lock can be removed */
- .unlocked_ioctl = cx18_v4l2_ioctl,
+ .unlocked_ioctl = video_ioctl2,
.release = cx18_v4l2_close,
.poll = cx18_v4l2_enc_poll,
.mmap = cx18_v4l2_mmap,
@@ -376,6 +375,7 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
s->video_dev->fops = &cx18_v4l2_enc_fops;
s->video_dev->release = video_device_release;
s->video_dev->tvnorms = V4L2_STD_ALL;
+ s->video_dev->lock = &cx->serialize_lock;
set_bit(V4L2_FL_USE_FH_PRIO, &s->video_dev->flags);
cx18_set_funcs(s->video_dev);
return 0;
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c
index b085a3c6dc04..447148eff958 100644
--- a/drivers/media/video/cx231xx/cx231xx-avcore.c
+++ b/drivers/media/video/cx231xx/cx231xx-avcore.c
@@ -89,7 +89,7 @@ void initGPIO(struct cx231xx *dev)
verve_read_byte(dev, 0x07, &val);
cx231xx_info(" verve_read_byte address0x07=0x%x\n", val);
- cx231xx_capture_start(dev, 1, 2);
+ cx231xx_capture_start(dev, 1, Vbi);
cx231xx_mode_register(dev, EP_MODE_SET, 0x0500FE00);
cx231xx_mode_register(dev, GBULK_BIT_EN, 0xFFFDFFFF);
@@ -99,7 +99,7 @@ void uninitGPIO(struct cx231xx *dev)
{
u8 value[4] = { 0, 0, 0, 0 };
- cx231xx_capture_start(dev, 0, 2);
+ cx231xx_capture_start(dev, 0, Vbi);
verve_write_byte(dev, 0x07, 0x14);
cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER,
0x68, value, 4);
@@ -2516,29 +2516,29 @@ int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type)
if (dev->udev->speed == USB_SPEED_HIGH) {
switch (media_type) {
- case 81: /* audio */
+ case Audio:
cx231xx_info("%s: Audio enter HANC\n", __func__);
status =
cx231xx_mode_register(dev, TS_MODE_REG, 0x9300);
break;
- case 2: /* vbi */
+ case Vbi:
cx231xx_info("%s: set vanc registers\n", __func__);
status = cx231xx_mode_register(dev, TS_MODE_REG, 0x300);
break;
- case 3: /* sliced cc */
+ case Sliced_cc:
cx231xx_info("%s: set hanc registers\n", __func__);
status =
cx231xx_mode_register(dev, TS_MODE_REG, 0x1300);
break;
- case 0: /* video */
+ case Raw_Video:
cx231xx_info("%s: set video registers\n", __func__);
status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100);
break;
- case 4: /* ts1 */
+ case TS1_serial_mode:
cx231xx_info("%s: set ts1 registers", __func__);
if (dev->board.has_417) {
@@ -2569,7 +2569,7 @@ int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type)
}
break;
- case 6: /* ts1 parallel mode */
+ case TS1_parallel_mode:
cx231xx_info("%s: set ts1 parallel mode registers\n",
__func__);
status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100);
@@ -2592,52 +2592,28 @@ int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type)
/* get EP for media type */
pcb_config = (struct pcb_config *)&dev->current_pcb_config;
- if (pcb_config->config_num == 1) {
+ if (pcb_config->config_num) {
switch (media_type) {
- case 0: /* Video */
+ case Raw_Video:
ep_mask = ENABLE_EP4; /* ep4 [00:1000] */
break;
- case 1: /* Audio */
+ case Audio:
ep_mask = ENABLE_EP3; /* ep3 [00:0100] */
break;
- case 2: /* Vbi */
+ case Vbi:
ep_mask = ENABLE_EP5; /* ep5 [01:0000] */
break;
- case 3: /* Sliced_cc */
+ case Sliced_cc:
ep_mask = ENABLE_EP6; /* ep6 [10:0000] */
break;
- case 4: /* ts1 */
- case 6: /* ts1 parallel mode */
+ case TS1_serial_mode:
+ case TS1_parallel_mode:
ep_mask = ENABLE_EP1; /* ep1 [00:0001] */
break;
- case 5: /* ts2 */
+ case TS2:
ep_mask = ENABLE_EP2; /* ep2 [00:0010] */
break;
}
-
- } else if (pcb_config->config_num > 1) {
- switch (media_type) {
- case 0: /* Video */
- ep_mask = ENABLE_EP4; /* ep4 [00:1000] */
- break;
- case 1: /* Audio */
- ep_mask = ENABLE_EP3; /* ep3 [00:0100] */
- break;
- case 2: /* Vbi */
- ep_mask = ENABLE_EP5; /* ep5 [01:0000] */
- break;
- case 3: /* Sliced_cc */
- ep_mask = ENABLE_EP6; /* ep6 [10:0000] */
- break;
- case 4: /* ts1 */
- case 6: /* ts1 parallel mode */
- ep_mask = ENABLE_EP1; /* ep1 [00:0001] */
- break;
- case 5: /* ts2 */
- ep_mask = ENABLE_EP2; /* ep2 [00:0010] */
- break;
- }
-
}
if (start) {
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 8ed460d692e0..02d4d36735d3 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -1023,7 +1023,6 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
int nr = 0, ifnum;
int i, isoc_pipe = 0;
char *speed;
- char descr[255] = "";
struct usb_interface_assoc_descriptor *assoc_desc;
udev = usb_get_dev(interface_to_usbdev(interface));
@@ -1098,20 +1097,10 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
speed = "unknown";
}
- if (udev->manufacturer)
- strlcpy(descr, udev->manufacturer, sizeof(descr));
-
- if (udev->product) {
- if (*descr)
- strlcat(descr, " ", sizeof(descr));
- strlcat(descr, udev->product, sizeof(descr));
- }
- if (*descr)
- strlcat(descr, " ", sizeof(descr));
-
- cx231xx_info("New device %s@ %s Mbps "
+ cx231xx_info("New device %s %s @ %s Mbps "
"(%04x:%04x) with %d interfaces\n",
- descr,
+ udev->manufacturer ? udev->manufacturer : "",
+ udev->product ? udev->product : "",
speed,
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
diff --git a/drivers/media/video/cx231xx/cx231xx-i2c.c b/drivers/media/video/cx231xx/cx231xx-i2c.c
index 925f3a04e53c..781feed406f7 100644
--- a/drivers/media/video/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/video/cx231xx/cx231xx-i2c.c
@@ -499,16 +499,12 @@ int cx231xx_i2c_register(struct cx231xx_i2c *bus)
BUG_ON(!dev->cx231xx_send_usb_command);
- memcpy(&bus->i2c_adap, &cx231xx_adap_template, sizeof(bus->i2c_adap));
- memcpy(&bus->i2c_algo, &cx231xx_algo, sizeof(bus->i2c_algo));
- memcpy(&bus->i2c_client, &cx231xx_client_template,
- sizeof(bus->i2c_client));
-
+ bus->i2c_adap = cx231xx_adap_template;
+ bus->i2c_client = cx231xx_client_template;
bus->i2c_adap.dev.parent = &dev->udev->dev;
strlcpy(bus->i2c_adap.name, bus->dev->name, sizeof(bus->i2c_adap.name));
- bus->i2c_algo.data = bus;
bus->i2c_adap.algo_data = bus;
i2c_set_adapdata(&bus->i2c_adap, &dev->v4l2_dev);
i2c_add_adapter(&bus->i2c_adap);
diff --git a/drivers/media/video/cx231xx/cx231xx.h b/drivers/media/video/cx231xx/cx231xx.h
index e17447554a0d..a89d020de948 100644
--- a/drivers/media/video/cx231xx/cx231xx.h
+++ b/drivers/media/video/cx231xx/cx231xx.h
@@ -26,7 +26,6 @@
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
@@ -481,7 +480,6 @@ struct cx231xx_i2c {
/* i2c i/o */
struct i2c_adapter i2c_adap;
- struct i2c_algo_bit_data i2c_algo;
struct i2c_client i2c_client;
u32 i2c_rc;
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index be1e21d8295c..4887314339cb 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -316,19 +316,13 @@ int cx23885_i2c_register(struct cx23885_i2c *bus)
dprintk(1, "%s(bus = %d)\n", __func__, bus->nr);
- memcpy(&bus->i2c_adap, &cx23885_i2c_adap_template,
- sizeof(bus->i2c_adap));
- memcpy(&bus->i2c_algo, &cx23885_i2c_algo_template,
- sizeof(bus->i2c_algo));
- memcpy(&bus->i2c_client, &cx23885_i2c_client_template,
- sizeof(bus->i2c_client));
-
+ bus->i2c_adap = cx23885_i2c_adap_template;
+ bus->i2c_client = cx23885_i2c_client_template;
bus->i2c_adap.dev.parent = &dev->pci->dev;
strlcpy(bus->i2c_adap.name, bus->dev->name,
sizeof(bus->i2c_adap.name));
- bus->i2c_algo.data = bus;
bus->i2c_adap.algo_data = bus;
i2c_set_adapdata(&bus->i2c_adap, &dev->v4l2_dev);
i2c_add_adapter(&bus->i2c_adap);
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index 13c37ec07ae7..5d560c747e09 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -21,7 +21,6 @@
#include <linux/pci.h>
#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
#include <linux/kdev_t.h>
#include <linux/slab.h>
@@ -247,7 +246,6 @@ struct cx23885_i2c {
/* i2c i/o */
struct i2c_adapter i2c_adap;
- struct i2c_algo_bit_data i2c_algo;
struct i2c_client i2c_client;
u32 i2c_rc;
diff --git a/drivers/media/video/cx25821/cx25821-i2c.c b/drivers/media/video/cx25821/cx25821-i2c.c
index 6311180f430c..9844549764c9 100644
--- a/drivers/media/video/cx25821/cx25821-i2c.c
+++ b/drivers/media/video/cx25821/cx25821-i2c.c
@@ -305,18 +305,12 @@ int cx25821_i2c_register(struct cx25821_i2c *bus)
dprintk(1, "%s(bus = %d)\n", __func__, bus->nr);
- memcpy(&bus->i2c_adap, &cx25821_i2c_adap_template,
- sizeof(bus->i2c_adap));
- memcpy(&bus->i2c_algo, &cx25821_i2c_algo_template,
- sizeof(bus->i2c_algo));
- memcpy(&bus->i2c_client, &cx25821_i2c_client_template,
- sizeof(bus->i2c_client));
-
+ bus->i2c_adap = cx25821_i2c_adap_template;
+ bus->i2c_client = cx25821_i2c_client_template;
bus->i2c_adap.dev.parent = &dev->pci->dev;
strlcpy(bus->i2c_adap.name, bus->dev->name, sizeof(bus->i2c_adap.name));
- bus->i2c_algo.data = bus;
bus->i2c_adap.algo_data = bus;
i2c_set_adapdata(&bus->i2c_adap, &dev->v4l2_dev);
i2c_add_adapter(&bus->i2c_adap);
diff --git a/drivers/media/video/cx25821/cx25821-medusa-video.c b/drivers/media/video/cx25821/cx25821-medusa-video.c
index 313fb20a0b47..6a92e5c70c2a 100644
--- a/drivers/media/video/cx25821/cx25821-medusa-video.c
+++ b/drivers/media/video/cx25821/cx25821-medusa-video.c
@@ -499,7 +499,7 @@ static void medusa_set_decoderduration(struct cx25821_dev *dev, int decoder,
mutex_lock(&dev->lock);
/* no support */
- if (decoder < VDEC_A && decoder > VDEC_H) {
+ if (decoder < VDEC_A || decoder > VDEC_H) {
mutex_unlock(&dev->lock);
return;
}
diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
index 029f2934a6d8..8a9c0c869412 100644
--- a/drivers/media/video/cx25821/cx25821.h
+++ b/drivers/media/video/cx25821/cx25821.h
@@ -26,7 +26,6 @@
#include <linux/pci.h>
#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/sched.h>
@@ -213,7 +212,6 @@ struct cx25821_i2c {
/* i2c i/o */
struct i2c_adapter i2c_adap;
- struct i2c_algo_bit_data i2c_algo;
struct i2c_client i2c_client;
u32 i2c_rc;
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 04bf6627d362..dfac6e34859f 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -585,13 +585,10 @@ static void snd_cx88_wm8775_volume_put(struct snd_kcontrol *kcontrol,
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
- struct v4l2_control client_ctl;
int left = value->value.integer.value[0];
int right = value->value.integer.value[1];
int v, b;
- memset(&client_ctl, 0, sizeof(client_ctl));
-
/* Pass volume & balance onto any WM8775 */
if (left >= right) {
v = left << 10;
@@ -600,13 +597,8 @@ static void snd_cx88_wm8775_volume_put(struct snd_kcontrol *kcontrol,
v = right << 10;
b = right ? 0xffff - (0x8000 * left) / right : 0x8000;
}
- client_ctl.value = v;
- client_ctl.id = V4L2_CID_AUDIO_VOLUME;
- call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
-
- client_ctl.value = b;
- client_ctl.id = V4L2_CID_AUDIO_BALANCE;
- call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
+ wm8775_s_ctrl(core, V4L2_CID_AUDIO_VOLUME, v);
+ wm8775_s_ctrl(core, V4L2_CID_AUDIO_BALANCE, b);
}
/* OK - TODO: test it */
@@ -687,14 +679,8 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
/* Pass mute onto any WM8775 */
if ((core->board.audio_chip == V4L2_IDENT_WM8775) &&
- ((1<<6) == bit)) {
- struct v4l2_control client_ctl;
-
- memset(&client_ctl, 0, sizeof(client_ctl));
- client_ctl.value = 0 != (vol & bit);
- client_ctl.id = V4L2_CID_AUDIO_MUTE;
- call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
- }
+ ((1<<6) == bit))
+ wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
ret = 1;
}
spin_unlock_irq(&chip->reg_lock);
@@ -724,13 +710,10 @@ static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol,
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
- struct v4l2_control client_ctl;
-
- memset(&client_ctl, 0, sizeof(client_ctl));
- client_ctl.id = V4L2_CID_AUDIO_LOUDNESS;
- call_hw(core, WM8775_GID, core, g_ctrl, &client_ctl);
- value->value.integer.value[0] = client_ctl.value ? 1 : 0;
+ s32 val;
+ val = wm8775_g_ctrl(core, V4L2_CID_AUDIO_LOUDNESS);
+ value->value.integer.value[0] = val ? 1 : 0;
return 0;
}
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index ed7b2aa1ed83..843ffd9e533b 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -35,6 +35,7 @@
#include <linux/firmware.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include <media/cx2341x.h>
#include "cx88.h"
@@ -523,11 +524,10 @@ static void blackbird_codec_settings(struct cx8802_dev *dev)
blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
dev->height, dev->width);
- dev->params.width = dev->width;
- dev->params.height = dev->height;
- dev->params.is_50hz = (dev->core->tvnorm & V4L2_STD_625_50) != 0;
-
- cx2341x_update(dev, blackbird_mbox_func, NULL, &dev->params);
+ dev->cxhdl.width = dev->width;
+ dev->cxhdl.height = dev->height;
+ cx2341x_handler_set_50hz(&dev->cxhdl, dev->core->tvnorm & V4L2_STD_625_50);
+ cx2341x_handler_setup(&dev->cxhdl);
}
static int blackbird_initialize_codec(struct cx8802_dev *dev)
@@ -618,6 +618,8 @@ static int blackbird_start_codec(struct file *file, void *priv)
/* initialize the video input */
blackbird_api_cmd(dev, CX2341X_ENC_INITIALIZE_INPUT, 0, 0);
+ cx2341x_handler_set_busy(&dev->cxhdl, 1);
+
/* start capturing to the host interface */
blackbird_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0,
BLACKBIRD_MPEG_CAPTURE,
@@ -636,6 +638,8 @@ static int blackbird_stop_codec(struct cx8802_dev *dev)
BLACKBIRD_RAW_BITS_NONE
);
+ cx2341x_handler_set_busy(&dev->cxhdl, 0);
+
dev->mpeg_active = 0;
return 0;
}
@@ -685,58 +689,15 @@ static struct videobuf_queue_ops blackbird_qops = {
/* ------------------------------------------------------------------ */
-static const u32 *ctrl_classes[] = {
- cx88_user_ctrls,
- cx2341x_mpeg_ctrls,
- NULL
-};
-
-static int blackbird_queryctrl(struct cx8802_dev *dev, struct v4l2_queryctrl *qctrl)
-{
- qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
- if (qctrl->id == 0)
- return -EINVAL;
-
- /* Standard V4L2 controls */
- if (cx8800_ctrl_query(dev->core, qctrl) == 0)
- return 0;
-
- /* MPEG V4L2 controls */
- if (cx2341x_ctrl_query(&dev->params, qctrl))
- qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
- return 0;
-}
-
-/* ------------------------------------------------------------------ */
-/* IOCTL Handlers */
-
-static int vidioc_querymenu (struct file *file, void *priv,
- struct v4l2_querymenu *qmenu)
-{
- struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
- struct v4l2_queryctrl qctrl;
-
- qctrl.id = qmenu->id;
- blackbird_queryctrl(dev, &qctrl);
- return v4l2_ctrl_query_menu(qmenu, &qctrl,
- cx2341x_ctrl_get_menu(&dev->params, qmenu->id));
-}
-
-static int vidioc_querycap (struct file *file, void *priv,
+static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct cx88_core *core = dev->core;
strcpy(cap->driver, "cx88_blackbird");
- strlcpy(cap->card, core->board.name, sizeof(cap->card));
- sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci));
- cap->capabilities =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING;
- if (UNSET != core->board.tuner_type)
- cap->capabilities |= V4L2_CAP_TUNER;
+ sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
+ cx88_querycap(file, core, cap);
return 0;
}
@@ -748,6 +709,7 @@ static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
strlcpy(f->description, "MPEG", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_MPEG;
+ f->flags = V4L2_FMT_FLAG_COMPRESSED;
return 0;
}
@@ -759,12 +721,12 @@ static int vidioc_g_fmt_vid_cap (struct file *file, void *priv,
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */
- f->fmt.pix.colorspace = 0;
+ f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
f->fmt.pix.field = fh->mpegq.field;
- dprintk(0,"VIDIOC_G_FMT: w: %d, h: %d, f: %d\n",
+ dprintk(1, "VIDIOC_G_FMT: w: %d, h: %d, f: %d\n",
dev->width, dev->height, fh->mpegq.field );
return 0;
}
@@ -777,9 +739,9 @@ static int vidioc_try_fmt_vid_cap (struct file *file, void *priv,
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */;
- f->fmt.pix.colorspace = 0;
- dprintk(0,"VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n",
+ f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ dprintk(1, "VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n",
dev->width, dev->height, fh->mpegq.field );
return 0;
}
@@ -793,15 +755,15 @@ static int vidioc_s_fmt_vid_cap (struct file *file, void *priv,
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
- f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */;
- f->fmt.pix.colorspace = 0;
+ f->fmt.pix.sizeimage = 188 * 4 * mpegbufs; /* 188 * 4 * 1024; */;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
fh->mpegq.field = f->fmt.pix.field;
cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
f->fmt.pix.height, f->fmt.pix.width);
- dprintk(0,"VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
+ dprintk(1, "VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field );
return 0;
}
@@ -834,60 +796,21 @@ static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct cx8802_fh *fh = priv;
+ struct cx8802_dev *dev = fh->dev;
+
+ if (!dev->mpeg_active)
+ blackbird_start_codec(file, fh);
return videobuf_streamon(&fh->mpegq);
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct cx8802_fh *fh = priv;
- return videobuf_streamoff(&fh->mpegq);
-}
-
-static int vidioc_g_ext_ctrls (struct file *file, void *priv,
- struct v4l2_ext_controls *f)
-{
- struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
-
- if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
- return -EINVAL;
- return cx2341x_ext_ctrls(&dev->params, 0, f, VIDIOC_G_EXT_CTRLS);
-}
-
-static int vidioc_s_ext_ctrls (struct file *file, void *priv,
- struct v4l2_ext_controls *f)
-{
- struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
- struct cx2341x_mpeg_params p;
- int err;
-
- if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
- return -EINVAL;
+ struct cx8802_dev *dev = fh->dev;
if (dev->mpeg_active)
blackbird_stop_codec(dev);
-
- p = dev->params;
- err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_S_EXT_CTRLS);
- if (!err) {
- err = cx2341x_update(dev, blackbird_mbox_func, &dev->params, &p);
- dev->params = p;
- }
- return err;
-}
-
-static int vidioc_try_ext_ctrls (struct file *file, void *priv,
- struct v4l2_ext_controls *f)
-{
- struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
- struct cx2341x_mpeg_params p;
- int err;
-
- if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
- return -EINVAL;
- p = dev->params;
- err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_TRY_EXT_CTRLS);
-
- return err;
+ return videobuf_streamoff(&fh->mpegq);
}
static int vidioc_s_frequency (struct file *file, void *priv,
@@ -897,6 +820,10 @@ static int vidioc_s_frequency (struct file *file, void *priv,
struct cx8802_dev *dev = fh->dev;
struct cx88_core *core = dev->core;
+ if (unlikely(UNSET == core->board.tuner_type))
+ return -EINVAL;
+ if (unlikely(f->tuner != 0))
+ return -EINVAL;
if (dev->mpeg_active)
blackbird_stop_codec(dev);
@@ -914,29 +841,11 @@ static int vidioc_log_status (struct file *file, void *priv)
char name[32 + 2];
snprintf(name, sizeof(name), "%s/2", core->name);
- printk("%s/2: ============ START LOG STATUS ============\n",
- core->name);
call_all(core, core, log_status);
- cx2341x_log_status(&dev->params, name);
- printk("%s/2: ============= END LOG STATUS =============\n",
- core->name);
+ v4l2_ctrl_handler_log_status(&dev->cxhdl.hdl, name);
return 0;
}
-static int vidioc_queryctrl (struct file *file, void *priv,
- struct v4l2_queryctrl *qctrl)
-{
- struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
-
- if (blackbird_queryctrl(dev, qctrl) == 0)
- return 0;
-
- qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
- if (unlikely(qctrl->id == 0))
- return -EINVAL;
- return cx8800_ctrl_query(dev->core, qctrl);
-}
-
static int vidioc_enum_input (struct file *file, void *priv,
struct v4l2_input *i)
{
@@ -944,22 +853,6 @@ static int vidioc_enum_input (struct file *file, void *priv,
return cx88_enum_input (core,i);
}
-static int vidioc_g_ctrl (struct file *file, void *priv,
- struct v4l2_control *ctl)
-{
- struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
- return
- cx88_get_control(core,ctl);
-}
-
-static int vidioc_s_ctrl (struct file *file, void *priv,
- struct v4l2_control *ctl)
-{
- struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
- return
- cx88_set_control(core,ctl);
-}
-
static int vidioc_g_frequency (struct file *file, void *priv,
struct v4l2_frequency *f)
{
@@ -968,8 +861,9 @@ static int vidioc_g_frequency (struct file *file, void *priv,
if (unlikely(UNSET == core->board.tuner_type))
return -EINVAL;
+ if (unlikely(f->tuner != 0))
+ return -EINVAL;
- f->type = V4L2_TUNER_ANALOG_TV;
f->frequency = core->freq;
call_all(core, tuner, g_frequency, f);
@@ -990,6 +884,8 @@ static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
if (i >= 4)
return -EINVAL;
+ if (0 == INPUT(i).type)
+ return -EINVAL;
mutex_lock(&core->lock);
cx88_newstation(core);
@@ -1010,9 +906,9 @@ static int vidioc_g_tuner (struct file *file, void *priv,
return -EINVAL;
strcpy(t->name, "Television");
- t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM;
t->rangehigh = 0xffffffffUL;
+ call_all(core, tuner, g_tuner, t);
cx88_get_stereo(core ,t);
reg = cx_read(MO_DEVICE_STATUS);
@@ -1034,6 +930,14 @@ static int vidioc_s_tuner (struct file *file, void *priv,
return 0;
}
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
+{
+ struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
+
+ *tvnorm = core->tvnorm;
+ return 0;
+}
+
static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *id)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
@@ -1087,6 +991,7 @@ static int mpeg_open(struct file *file)
mutex_unlock(&dev->core->lock);
return -ENOMEM;
}
+ v4l2_fh_init(&fh->fh, vdev);
file->private_data = fh;
fh->dev = dev;
@@ -1103,6 +1008,7 @@ static int mpeg_open(struct file *file)
dev->core->mpeg_users++;
mutex_unlock(&dev->core->lock);
+ v4l2_fh_add(&fh->fh);
return 0;
}
@@ -1123,6 +1029,8 @@ static int mpeg_release(struct file *file)
videobuf_mmap_free(&fh->mpegq);
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
file->private_data = NULL;
kfree(fh);
@@ -1155,13 +1063,14 @@ mpeg_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
static unsigned int
mpeg_poll(struct file *file, struct poll_table_struct *wait)
{
+ unsigned long req_events = poll_requested_events(wait);
struct cx8802_fh *fh = file->private_data;
struct cx8802_dev *dev = fh->dev;
- if (!dev->mpeg_active)
+ if (!dev->mpeg_active && (req_events & (POLLIN | POLLRDNORM)))
blackbird_start_codec(file, fh);
- return videobuf_poll_stream(file, &fh->mpegq, wait);
+ return v4l2_ctrl_poll(file, wait) | videobuf_poll_stream(file, &fh->mpegq, wait);
}
static int
@@ -1180,11 +1089,10 @@ static const struct v4l2_file_operations mpeg_fops =
.read = mpeg_read,
.poll = mpeg_poll,
.mmap = mpeg_mmap,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
- .vidioc_querymenu = vidioc_querymenu,
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
@@ -1196,21 +1104,18 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
- .vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
- .vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
- .vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
.vidioc_s_frequency = vidioc_s_frequency,
.vidioc_log_status = vidioc_log_status,
- .vidioc_queryctrl = vidioc_queryctrl,
.vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_std = vidioc_g_std,
.vidioc_s_std = vidioc_s_std,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device cx8802_mpeg_template = {
@@ -1218,7 +1123,6 @@ static struct video_device cx8802_mpeg_template = {
.fops = &mpeg_fops,
.ioctl_ops = &mpeg_ioctl_ops,
.tvnorms = CX88_NORMS,
- .current_norm = V4L2_STD_NTSC_M,
};
/* ------------------------------------------------------------------ */
@@ -1286,6 +1190,7 @@ static int blackbird_register_video(struct cx8802_dev *dev)
dev->mpeg_dev = cx88_vdev_init(dev->core,dev->pci,
&cx8802_mpeg_template,"mpeg");
+ dev->mpeg_dev->ctrl_handler = &dev->cxhdl.hdl;
video_set_drvdata(dev->mpeg_dev, dev);
err = video_register_device(dev->mpeg_dev,VFL_TYPE_GRABBER, -1);
if (err < 0) {
@@ -1318,17 +1223,20 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
goto fail_core;
dev->width = 720;
- dev->height = 576;
- cx2341x_fill_defaults(&dev->params);
- dev->params.port = CX2341X_PORT_STREAMING;
-
- cx8802_mpeg_template.current_norm = core->tvnorm;
-
if (core->tvnorm & V4L2_STD_525_60) {
dev->height = 480;
} else {
dev->height = 576;
}
+ dev->cxhdl.port = CX2341X_PORT_STREAMING;
+ dev->cxhdl.width = dev->width;
+ dev->cxhdl.height = dev->height;
+ dev->cxhdl.func = blackbird_mbox_func;
+ dev->cxhdl.priv = dev;
+ err = cx2341x_handler_init(&dev->cxhdl, 36);
+ if (err)
+ goto fail_core;
+ v4l2_ctrl_add_handler(&dev->cxhdl.hdl, &core->video_hdl);
/* blackbird stuff */
printk("%s/2: cx23416 based mpeg encoder (blackbird reference design)\n",
@@ -1336,12 +1244,14 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
host_setup(dev->core);
blackbird_initialize_codec(dev);
- blackbird_register_video(dev);
/* initial device configuration: needed ? */
// init_controls(core);
cx88_set_tvnorm(core,core->tvnorm);
cx88_video_mux(core,0);
+ cx2341x_handler_set_50hz(&dev->cxhdl, dev->height == 576);
+ cx2341x_handler_setup(&dev->cxhdl);
+ blackbird_register_video(dev);
return 0;
@@ -1351,8 +1261,12 @@ static int cx8802_blackbird_probe(struct cx8802_driver *drv)
static int cx8802_blackbird_remove(struct cx8802_driver *drv)
{
+ struct cx88_core *core = drv->core;
+ struct cx8802_dev *dev = core->dvbdev;
+
/* blackbird */
blackbird_unregister_video(drv->core->dvbdev);
+ v4l2_ctrl_handler_free(&dev->cxhdl.hdl);
return 0;
}
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index cbd5d119a2c6..4e9d4f722960 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -3693,7 +3693,22 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
return NULL;
}
+ if (v4l2_ctrl_handler_init(&core->video_hdl, 13)) {
+ v4l2_device_unregister(&core->v4l2_dev);
+ kfree(core);
+ return NULL;
+ }
+
+ if (v4l2_ctrl_handler_init(&core->audio_hdl, 13)) {
+ v4l2_ctrl_handler_free(&core->video_hdl);
+ v4l2_device_unregister(&core->v4l2_dev);
+ kfree(core);
+ return NULL;
+ }
+
if (0 != cx88_get_resources(core, pci)) {
+ v4l2_ctrl_handler_free(&core->video_hdl);
+ v4l2_ctrl_handler_free(&core->audio_hdl);
v4l2_device_unregister(&core->v4l2_dev);
kfree(core);
return NULL;
@@ -3706,6 +3721,11 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
core->bmmio = (u8 __iomem *)core->lmmio;
if (core->lmmio == NULL) {
+ release_mem_region(pci_resource_start(pci, 0),
+ pci_resource_len(pci, 0));
+ v4l2_ctrl_handler_free(&core->video_hdl);
+ v4l2_ctrl_handler_free(&core->audio_hdl);
+ v4l2_device_unregister(&core->v4l2_dev);
kfree(core);
return NULL;
}
diff --git a/drivers/media/video/cx88/cx88-core.c b/drivers/media/video/cx88/cx88-core.c
index fbfdd8067937..e81c735f012a 100644
--- a/drivers/media/video/cx88/cx88-core.c
+++ b/drivers/media/video/cx88/cx88-core.c
@@ -1012,6 +1012,9 @@ int cx88_set_tvnorm(struct cx88_core *core, v4l2_std_id norm)
// tell i2c chips
call_all(core, core, s_std, norm);
+ /* The chroma_agc control should be inaccessible if the video format is SECAM */
+ v4l2_ctrl_grab(core->chroma_agc, cxiformat == VideoFormatSECAM);
+
// done
return 0;
}
@@ -1030,10 +1033,10 @@ struct video_device *cx88_vdev_init(struct cx88_core *core,
return NULL;
*vfd = *template_;
vfd->v4l2_dev = &core->v4l2_dev;
- vfd->parent = &pci->dev;
vfd->release = video_device_release;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)",
core->name, type, core->board.name);
+ set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
return vfd;
}
@@ -1086,6 +1089,8 @@ void cx88_core_put(struct cx88_core *core, struct pci_dev *pci)
iounmap(core->lmmio);
cx88_devcount--;
mutex_unlock(&devlist);
+ v4l2_ctrl_handler_free(&core->video_hdl);
+ v4l2_ctrl_handler_free(&core->audio_hdl);
v4l2_device_unregister(&core->v4l2_dev);
kfree(core);
}
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 921c56d115d6..f6fcc7e763ab 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -40,6 +40,7 @@
#include "cx88.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include <media/wm8775.h>
MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards");
@@ -155,219 +156,147 @@ static const struct cx8800_fmt* format_by_fourcc(unsigned int fourcc)
/* ------------------------------------------------------------------- */
-static const struct v4l2_queryctrl no_ctl = {
- .name = "42",
- .flags = V4L2_CTRL_FLAG_DISABLED,
+struct cx88_ctrl {
+ /* control information */
+ u32 id;
+ s32 minimum;
+ s32 maximum;
+ u32 step;
+ s32 default_value;
+
+ /* control register information */
+ u32 off;
+ u32 reg;
+ u32 sreg;
+ u32 mask;
+ u32 shift;
};
-static const struct cx88_ctrl cx8800_ctls[] = {
+static const struct cx88_ctrl cx8800_vid_ctls[] = {
/* --- video --- */
{
- .v = {
- .id = V4L2_CID_BRIGHTNESS,
- .name = "Brightness",
- .minimum = 0x00,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x7f,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .off = 128,
- .reg = MO_CONTR_BRIGHT,
- .mask = 0x00ff,
- .shift = 0,
+ .id = V4L2_CID_BRIGHTNESS,
+ .minimum = 0x00,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x7f,
+ .off = 128,
+ .reg = MO_CONTR_BRIGHT,
+ .mask = 0x00ff,
+ .shift = 0,
},{
- .v = {
- .id = V4L2_CID_CONTRAST,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x3f,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .off = 0,
- .reg = MO_CONTR_BRIGHT,
- .mask = 0xff00,
- .shift = 8,
+ .id = V4L2_CID_CONTRAST,
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x3f,
+ .off = 0,
+ .reg = MO_CONTR_BRIGHT,
+ .mask = 0xff00,
+ .shift = 8,
},{
- .v = {
- .id = V4L2_CID_HUE,
- .name = "Hue",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x7f,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .off = 128,
- .reg = MO_HUE,
- .mask = 0x00ff,
- .shift = 0,
+ .id = V4L2_CID_HUE,
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x7f,
+ .off = 128,
+ .reg = MO_HUE,
+ .mask = 0x00ff,
+ .shift = 0,
},{
/* strictly, this only describes only U saturation.
* V saturation is handled specially through code.
*/
- .v = {
- .id = V4L2_CID_SATURATION,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x7f,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .off = 0,
- .reg = MO_UV_SATURATION,
- .mask = 0x00ff,
- .shift = 0,
+ .id = V4L2_CID_SATURATION,
+ .minimum = 0,
+ .maximum = 0xff,
+ .step = 1,
+ .default_value = 0x7f,
+ .off = 0,
+ .reg = MO_UV_SATURATION,
+ .mask = 0x00ff,
+ .shift = 0,
}, {
- .v = {
- .id = V4L2_CID_SHARPNESS,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 4,
- .step = 1,
- .default_value = 0x0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .off = 0,
+ .id = V4L2_CID_SHARPNESS,
+ .minimum = 0,
+ .maximum = 4,
+ .step = 1,
+ .default_value = 0x0,
+ .off = 0,
/* NOTE: the value is converted and written to both even
and odd registers in the code */
- .reg = MO_FILTER_ODD,
- .mask = 7 << 7,
- .shift = 7,
+ .reg = MO_FILTER_ODD,
+ .mask = 7 << 7,
+ .shift = 7,
}, {
- .v = {
- .id = V4L2_CID_CHROMA_AGC,
- .name = "Chroma AGC",
- .minimum = 0,
- .maximum = 1,
- .default_value = 0x1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },
- .reg = MO_INPUT_FORMAT,
- .mask = 1 << 10,
- .shift = 10,
+ .id = V4L2_CID_CHROMA_AGC,
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0x1,
+ .reg = MO_INPUT_FORMAT,
+ .mask = 1 << 10,
+ .shift = 10,
}, {
- .v = {
- .id = V4L2_CID_COLOR_KILLER,
- .name = "Color killer",
- .minimum = 0,
- .maximum = 1,
- .default_value = 0x1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },
- .reg = MO_INPUT_FORMAT,
- .mask = 1 << 9,
- .shift = 9,
+ .id = V4L2_CID_COLOR_KILLER,
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 0x1,
+ .reg = MO_INPUT_FORMAT,
+ .mask = 1 << 9,
+ .shift = 9,
}, {
- .v = {
- .id = V4L2_CID_BAND_STOP_FILTER,
- .name = "Notch filter",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0x0,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .off = 0,
- .reg = MO_HTOTAL,
- .mask = 3 << 11,
- .shift = 11,
- }, {
- /* --- audio --- */
- .v = {
- .id = V4L2_CID_AUDIO_MUTE,
- .name = "Mute",
- .minimum = 0,
- .maximum = 1,
- .default_value = 1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- },
- .reg = AUD_VOL_CTL,
- .sreg = SHADOW_AUD_VOL_CTL,
- .mask = (1 << 6),
- .shift = 6,
+ .id = V4L2_CID_BAND_STOP_FILTER,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0x0,
+ .off = 0,
+ .reg = MO_HTOTAL,
+ .mask = 3 << 11,
+ .shift = 11,
+ }
+};
+
+static const struct cx88_ctrl cx8800_aud_ctls[] = {
+ {
+ /* --- audio --- */
+ .id = V4L2_CID_AUDIO_MUTE,
+ .minimum = 0,
+ .maximum = 1,
+ .default_value = 1,
+ .reg = AUD_VOL_CTL,
+ .sreg = SHADOW_AUD_VOL_CTL,
+ .mask = (1 << 6),
+ .shift = 6,
},{
- .v = {
- .id = V4L2_CID_AUDIO_VOLUME,
- .name = "Volume",
- .minimum = 0,
- .maximum = 0x3f,
- .step = 1,
- .default_value = 0x3f,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .reg = AUD_VOL_CTL,
- .sreg = SHADOW_AUD_VOL_CTL,
- .mask = 0x3f,
- .shift = 0,
+ .id = V4L2_CID_AUDIO_VOLUME,
+ .minimum = 0,
+ .maximum = 0x3f,
+ .step = 1,
+ .default_value = 0x3f,
+ .reg = AUD_VOL_CTL,
+ .sreg = SHADOW_AUD_VOL_CTL,
+ .mask = 0x3f,
+ .shift = 0,
},{
- .v = {
- .id = V4L2_CID_AUDIO_BALANCE,
- .name = "Balance",
- .minimum = 0,
- .maximum = 0x7f,
- .step = 1,
- .default_value = 0x40,
- .type = V4L2_CTRL_TYPE_INTEGER,
- },
- .reg = AUD_BAL_CTL,
- .sreg = SHADOW_AUD_BAL_CTL,
- .mask = 0x7f,
- .shift = 0,
+ .id = V4L2_CID_AUDIO_BALANCE,
+ .minimum = 0,
+ .maximum = 0x7f,
+ .step = 1,
+ .default_value = 0x40,
+ .reg = AUD_BAL_CTL,
+ .sreg = SHADOW_AUD_BAL_CTL,
+ .mask = 0x7f,
+ .shift = 0,
}
};
-enum { CX8800_CTLS = ARRAY_SIZE(cx8800_ctls) };
-
-/* Must be sorted from low to high control ID! */
-const u32 cx88_user_ctrls[] = {
- V4L2_CID_USER_CLASS,
- V4L2_CID_BRIGHTNESS,
- V4L2_CID_CONTRAST,
- V4L2_CID_SATURATION,
- V4L2_CID_HUE,
- V4L2_CID_AUDIO_VOLUME,
- V4L2_CID_AUDIO_BALANCE,
- V4L2_CID_AUDIO_MUTE,
- V4L2_CID_SHARPNESS,
- V4L2_CID_CHROMA_AGC,
- V4L2_CID_COLOR_KILLER,
- V4L2_CID_BAND_STOP_FILTER,
- 0
-};
-EXPORT_SYMBOL(cx88_user_ctrls);
-static const u32 * const ctrl_classes[] = {
- cx88_user_ctrls,
- NULL
+enum {
+ CX8800_VID_CTLS = ARRAY_SIZE(cx8800_vid_ctls),
+ CX8800_AUD_CTLS = ARRAY_SIZE(cx8800_aud_ctls),
};
-int cx8800_ctrl_query(struct cx88_core *core, struct v4l2_queryctrl *qctrl)
-{
- int i;
-
- if (qctrl->id < V4L2_CID_BASE ||
- qctrl->id >= V4L2_CID_LASTP1)
- return -EINVAL;
- for (i = 0; i < CX8800_CTLS; i++)
- if (cx8800_ctls[i].v.id == qctrl->id)
- break;
- if (i == CX8800_CTLS) {
- *qctrl = no_ctl;
- return 0;
- }
- *qctrl = cx8800_ctls[i].v;
- /* Report chroma AGC as inactive when SECAM is selected */
- if (cx8800_ctls[i].v.id == V4L2_CID_CHROMA_AGC &&
- core->tvnorm & V4L2_STD_SECAM)
- qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
-
- return 0;
-}
-EXPORT_SYMBOL(cx8800_ctrl_query);
-
/* ------------------------------------------------------------------- */
/* resource management */
@@ -591,8 +520,9 @@ static int
buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
{
struct cx8800_fh *fh = q->priv_data;
+ struct cx8800_dev *dev = fh->dev;
- *size = fh->fmt->depth*fh->width*fh->height >> 3;
+ *size = dev->fmt->depth * dev->width * dev->height >> 3;
if (0 == *count)
*count = 32;
if (*size * *count > vid_limit * 1024 * 1024)
@@ -611,21 +541,21 @@ buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
int rc, init_buffer = 0;
- BUG_ON(NULL == fh->fmt);
- if (fh->width < 48 || fh->width > norm_maxw(core->tvnorm) ||
- fh->height < 32 || fh->height > norm_maxh(core->tvnorm))
+ BUG_ON(NULL == dev->fmt);
+ if (dev->width < 48 || dev->width > norm_maxw(core->tvnorm) ||
+ dev->height < 32 || dev->height > norm_maxh(core->tvnorm))
return -EINVAL;
- buf->vb.size = (fh->width * fh->height * fh->fmt->depth) >> 3;
+ buf->vb.size = (dev->width * dev->height * dev->fmt->depth) >> 3;
if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
return -EINVAL;
- if (buf->fmt != fh->fmt ||
- buf->vb.width != fh->width ||
- buf->vb.height != fh->height ||
+ if (buf->fmt != dev->fmt ||
+ buf->vb.width != dev->width ||
+ buf->vb.height != dev->height ||
buf->vb.field != field) {
- buf->fmt = fh->fmt;
- buf->vb.width = fh->width;
- buf->vb.height = fh->height;
+ buf->fmt = dev->fmt;
+ buf->vb.width = dev->width;
+ buf->vb.height = dev->height;
buf->vb.field = field;
init_buffer = 1;
}
@@ -675,7 +605,7 @@ buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
}
dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
buf, buf->vb.i,
- fh->width, fh->height, fh->fmt->depth, fh->fmt->name,
+ dev->width, dev->height, dev->fmt->depth, dev->fmt->name,
(unsigned long)buf->risc.dma);
buf->vb.state = VIDEOBUF_PREPARED;
@@ -755,12 +685,15 @@ static const struct videobuf_queue_ops cx8800_video_qops = {
/* ------------------------------------------------------------------ */
-static struct videobuf_queue* get_queue(struct cx8800_fh *fh)
+static struct videobuf_queue *get_queue(struct file *file)
{
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ struct video_device *vdev = video_devdata(file);
+ struct cx8800_fh *fh = file->private_data;
+
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
return &fh->vidq;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case VFL_TYPE_VBI:
return &fh->vbiq;
default:
BUG();
@@ -768,12 +701,14 @@ static struct videobuf_queue* get_queue(struct cx8800_fh *fh)
}
}
-static int get_ressource(struct cx8800_fh *fh)
+static int get_resource(struct file *file)
{
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ struct video_device *vdev = video_devdata(file);
+
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
return RESOURCE_VIDEO;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case VFL_TYPE_VBI:
return RESOURCE_VBI;
default:
BUG();
@@ -810,13 +745,9 @@ static int video_open(struct file *file)
if (unlikely(!fh))
return -ENOMEM;
+ v4l2_fh_init(&fh->fh, vdev);
file->private_data = fh;
fh->dev = dev;
- fh->radio = radio;
- fh->type = type;
- fh->width = 320;
- fh->height = 240;
- fh->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
mutex_lock(&core->lock);
@@ -833,7 +764,7 @@ static int video_open(struct file *file)
sizeof(struct cx88_buffer),
fh, NULL);
- if (fh->radio) {
+ if (vdev->vfl_type == VFL_TYPE_RADIO) {
dprintk(1,"video_open: setting radio device\n");
cx_write(MO_GP3_IO, core->board.radio.gpio3);
cx_write(MO_GP0_IO, core->board.radio.gpio0);
@@ -859,6 +790,7 @@ static int video_open(struct file *file)
core->users++;
mutex_unlock(&core->lock);
+ v4l2_fh_add(&fh->fh);
return 0;
}
@@ -866,15 +798,16 @@ static int video_open(struct file *file)
static ssize_t
video_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
+ struct video_device *vdev = video_devdata(file);
struct cx8800_fh *fh = file->private_data;
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_GRABBER:
if (res_locked(fh->dev,RESOURCE_VIDEO))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
file->f_flags & O_NONBLOCK);
- case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case VFL_TYPE_VBI:
if (!res_get(fh->dev,fh,RESOURCE_VBI))
return -EBUSY;
return videobuf_read_stream(&fh->vbiq, data, count, ppos, 1,
@@ -888,16 +821,16 @@ video_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
static unsigned int
video_poll(struct file *file, struct poll_table_struct *wait)
{
+ struct video_device *vdev = video_devdata(file);
struct cx8800_fh *fh = file->private_data;
struct cx88_buffer *buf;
- unsigned int rc = POLLERR;
+ unsigned int rc = v4l2_ctrl_poll(file, wait);
- if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
+ if (vdev->vfl_type == VFL_TYPE_VBI) {
if (!res_get(fh->dev,fh,RESOURCE_VBI))
- return POLLERR;
- return videobuf_poll_stream(file, &fh->vbiq, wait);
+ return rc | POLLERR;
+ return rc | videobuf_poll_stream(file, &fh->vbiq, wait);
}
-
mutex_lock(&fh->vidq.vb_lock);
if (res_check(fh,RESOURCE_VIDEO)) {
/* streaming capture */
@@ -913,9 +846,7 @@ video_poll(struct file *file, struct poll_table_struct *wait)
poll_wait(file, &buf->vb.done, wait);
if (buf->vb.state == VIDEOBUF_DONE ||
buf->vb.state == VIDEOBUF_ERROR)
- rc = POLLIN|POLLRDNORM;
- else
- rc = 0;
+ rc |= POLLIN|POLLRDNORM;
done:
mutex_unlock(&fh->vidq.vb_lock);
return rc;
@@ -952,6 +883,8 @@ static int video_release(struct file *file)
videobuf_mmap_free(&fh->vbiq);
mutex_lock(&dev->core->lock);
+ v4l2_fh_del(&fh->fh);
+ v4l2_fh_exit(&fh->fh);
file->private_data = NULL;
kfree(fh);
@@ -966,156 +899,104 @@ static int video_release(struct file *file)
static int
video_mmap(struct file *file, struct vm_area_struct * vma)
{
- struct cx8800_fh *fh = file->private_data;
-
- return videobuf_mmap_mapper(get_queue(fh), vma);
+ return videobuf_mmap_mapper(get_queue(file), vma);
}
/* ------------------------------------------------------------------ */
/* VIDEO CTRL IOCTLS */
-int cx88_get_control (struct cx88_core *core, struct v4l2_control *ctl)
+static int cx8800_s_vid_ctrl(struct v4l2_ctrl *ctrl)
{
- const struct cx88_ctrl *c = NULL;
- u32 value;
- int i;
+ struct cx88_core *core =
+ container_of(ctrl->handler, struct cx88_core, video_hdl);
+ const struct cx88_ctrl *cc = ctrl->priv;
+ u32 value, mask;
- for (i = 0; i < CX8800_CTLS; i++)
- if (cx8800_ctls[i].v.id == ctl->id)
- c = &cx8800_ctls[i];
- if (unlikely(NULL == c))
- return -EINVAL;
+ mask = cc->mask;
+ switch (ctrl->id) {
+ case V4L2_CID_SATURATION:
+ /* special v_sat handling */
- value = c->sreg ? cx_sread(c->sreg) : cx_read(c->reg);
- switch (ctl->id) {
- case V4L2_CID_AUDIO_BALANCE:
- ctl->value = ((value & 0x7f) < 0x40) ? ((value & 0x7f) + 0x40)
- : (0x7f - (value & 0x7f));
- break;
- case V4L2_CID_AUDIO_VOLUME:
- ctl->value = 0x3f - (value & 0x3f);
+ value = ((ctrl->val - cc->off) << cc->shift) & cc->mask;
+
+ if (core->tvnorm & V4L2_STD_SECAM) {
+ /* For SECAM, both U and V sat should be equal */
+ value = value << 8 | value;
+ } else {
+ /* Keeps U Saturation proportional to V Sat */
+ value = (value * 0x5a) / 0x7f << 8 | value;
+ }
+ mask = 0xffff;
break;
case V4L2_CID_SHARPNESS:
- ctl->value = ((value & 0x0200) ? (((value & 0x0180) >> 7) + 1)
- : 0);
+ /* 0b000, 0b100, 0b101, 0b110, or 0b111 */
+ value = (ctrl->val < 1 ? 0 : ((ctrl->val + 3) << 7));
+ /* needs to be set for both fields */
+ cx_andor(MO_FILTER_EVEN, mask, value);
+ break;
+ case V4L2_CID_CHROMA_AGC:
+ value = ((ctrl->val - cc->off) << cc->shift) & cc->mask;
break;
default:
- ctl->value = ((value + (c->off << c->shift)) & c->mask) >> c->shift;
+ value = ((ctrl->val - cc->off) << cc->shift) & cc->mask;
break;
}
- dprintk(1,"get_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
- ctl->id, c->v.name, ctl->value, c->reg,
- value,c->mask, c->sreg ? " [shadowed]" : "");
+ dprintk(1, "set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
+ ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
+ mask, cc->sreg ? " [shadowed]" : "");
+ if (cc->sreg)
+ cx_sandor(cc->sreg, cc->reg, mask, value);
+ else
+ cx_andor(cc->reg, mask, value);
return 0;
}
-EXPORT_SYMBOL(cx88_get_control);
-int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl)
+static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
{
- const struct cx88_ctrl *c = NULL;
+ struct cx88_core *core =
+ container_of(ctrl->handler, struct cx88_core, audio_hdl);
+ const struct cx88_ctrl *cc = ctrl->priv;
u32 value,mask;
- int i;
-
- for (i = 0; i < CX8800_CTLS; i++) {
- if (cx8800_ctls[i].v.id == ctl->id) {
- c = &cx8800_ctls[i];
- }
- }
- if (unlikely(NULL == c))
- return -EINVAL;
-
- if (ctl->value < c->v.minimum)
- ctl->value = c->v.minimum;
- if (ctl->value > c->v.maximum)
- ctl->value = c->v.maximum;
/* Pass changes onto any WM8775 */
if (core->board.audio_chip == V4L2_IDENT_WM8775) {
- struct v4l2_control client_ctl;
- memset(&client_ctl, 0, sizeof(client_ctl));
- client_ctl.id = ctl->id;
-
- switch (ctl->id) {
+ switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
- client_ctl.value = ctl->value;
+ wm8775_s_ctrl(core, ctrl->id, ctrl->val);
break;
case V4L2_CID_AUDIO_VOLUME:
- client_ctl.value = (ctl->value) ?
- (0x90 + ctl->value) << 8 : 0;
+ wm8775_s_ctrl(core, ctrl->id, (ctrl->val) ?
+ (0x90 + ctrl->val) << 8 : 0);
break;
case V4L2_CID_AUDIO_BALANCE:
- client_ctl.value = ctl->value << 9;
+ wm8775_s_ctrl(core, ctrl->id, ctrl->val << 9);
break;
default:
- client_ctl.id = 0;
break;
}
- if (client_ctl.id)
- call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
}
- mask=c->mask;
- switch (ctl->id) {
+ mask = cc->mask;
+ switch (ctrl->id) {
case V4L2_CID_AUDIO_BALANCE:
- value = (ctl->value < 0x40) ? (0x7f - ctl->value) : (ctl->value - 0x40);
+ value = (ctrl->val < 0x40) ? (0x7f - ctrl->val) : (ctrl->val - 0x40);
break;
case V4L2_CID_AUDIO_VOLUME:
- value = 0x3f - (ctl->value & 0x3f);
- break;
- case V4L2_CID_SATURATION:
- /* special v_sat handling */
-
- value = ((ctl->value - c->off) << c->shift) & c->mask;
-
- if (core->tvnorm & V4L2_STD_SECAM) {
- /* For SECAM, both U and V sat should be equal */
- value=value<<8|value;
- } else {
- /* Keeps U Saturation proportional to V Sat */
- value=(value*0x5a)/0x7f<<8|value;
- }
- mask=0xffff;
- break;
- case V4L2_CID_SHARPNESS:
- /* 0b000, 0b100, 0b101, 0b110, or 0b111 */
- value = (ctl->value < 1 ? 0 : ((ctl->value + 3) << 7));
- /* needs to be set for both fields */
- cx_andor(MO_FILTER_EVEN, mask, value);
- break;
- case V4L2_CID_CHROMA_AGC:
- /* Do not allow chroma AGC to be enabled for SECAM */
- value = ((ctl->value - c->off) << c->shift) & c->mask;
- if (core->tvnorm & V4L2_STD_SECAM && value)
- return -EINVAL;
+ value = 0x3f - (ctrl->val & 0x3f);
break;
default:
- value = ((ctl->value - c->off) << c->shift) & c->mask;
+ value = ((ctrl->val - cc->off) << cc->shift) & cc->mask;
break;
}
dprintk(1,"set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
- ctl->id, c->v.name, ctl->value, c->reg, value,
- mask, c->sreg ? " [shadowed]" : "");
- if (c->sreg) {
- cx_sandor(c->sreg, c->reg, mask, value);
- } else {
- cx_andor(c->reg, mask, value);
- }
+ ctrl->id, ctrl->name, ctrl->val, cc->reg, value,
+ mask, cc->sreg ? " [shadowed]" : "");
+ if (cc->sreg)
+ cx_sandor(cc->sreg, cc->reg, mask, value);
+ else
+ cx_andor(cc->reg, mask, value);
return 0;
}
-EXPORT_SYMBOL(cx88_set_control);
-
-static void init_controls(struct cx88_core *core)
-{
- struct v4l2_control ctrl;
- int i;
-
- for (i = 0; i < CX8800_CTLS; i++) {
- ctrl.id=cx8800_ctls[i].v.id;
- ctrl.value=cx8800_ctls[i].v.default_value;
-
- cx88_set_control(core, &ctrl);
- }
-}
/* ------------------------------------------------------------------ */
/* VIDEO IOCTLS */
@@ -1124,15 +1005,17 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx8800_fh *fh = priv;
+ struct cx8800_dev *dev = fh->dev;
- f->fmt.pix.width = fh->width;
- f->fmt.pix.height = fh->height;
+ f->fmt.pix.width = dev->width;
+ f->fmt.pix.height = dev->height;
f->fmt.pix.field = fh->vidq.field;
- f->fmt.pix.pixelformat = fh->fmt->fourcc;
+ f->fmt.pix.pixelformat = dev->fmt->fourcc;
f->fmt.pix.bytesperline =
- (f->fmt.pix.width * fh->fmt->depth) >> 3;
+ (f->fmt.pix.width * dev->fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
}
@@ -1184,33 +1067,54 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx8800_fh *fh = priv;
+ struct cx8800_dev *dev = fh->dev;
int err = vidioc_try_fmt_vid_cap (file,priv,f);
if (0 != err)
return err;
- fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- fh->width = f->fmt.pix.width;
- fh->height = f->fmt.pix.height;
+ dev->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ dev->width = f->fmt.pix.width;
+ dev->height = f->fmt.pix.height;
fh->vidq.field = f->fmt.pix.field;
return 0;
}
-static int vidioc_querycap (struct file *file, void *priv,
+void cx88_querycap(struct file *file, struct cx88_core *core,
+ struct v4l2_capability *cap)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ strlcpy(cap->card, core->board.name, sizeof(cap->card));
+ cap->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING;
+ if (UNSET != core->board.tuner_type)
+ cap->device_caps |= V4L2_CAP_TUNER;
+ switch (vdev->vfl_type) {
+ case VFL_TYPE_RADIO:
+ cap->device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER;
+ break;
+ case VFL_TYPE_GRABBER:
+ cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
+ break;
+ case VFL_TYPE_VBI:
+ cap->device_caps |= V4L2_CAP_VBI_CAPTURE;
+ break;
+ }
+ cap->capabilities = cap->device_caps | V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_VBI_CAPTURE | V4L2_CAP_DEVICE_CAPS;
+ if (core->board.radio.type == CX88_RADIO)
+ cap->capabilities |= V4L2_CAP_RADIO;
+}
+EXPORT_SYMBOL(cx88_querycap);
+
+static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct cx8800_dev *dev = ((struct cx8800_fh *)priv)->dev;
struct cx88_core *core = dev->core;
strcpy(cap->driver, "cx8800");
- strlcpy(cap->card, core->board.name, sizeof(cap->card));
- sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci));
- cap->capabilities =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_READWRITE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_VBI_CAPTURE;
- if (UNSET != core->board.tuner_type)
- cap->capabilities |= V4L2_CAP_TUNER;
+ sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
+ cx88_querycap(file, core, cap);
return 0;
}
@@ -1228,69 +1132,67 @@ static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
static int vidioc_reqbufs (struct file *file, void *priv, struct v4l2_requestbuffers *p)
{
- struct cx8800_fh *fh = priv;
- return (videobuf_reqbufs(get_queue(fh), p));
+ return videobuf_reqbufs(get_queue(file), p);
}
static int vidioc_querybuf (struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct cx8800_fh *fh = priv;
- return (videobuf_querybuf(get_queue(fh), p));
+ return videobuf_querybuf(get_queue(file), p);
}
static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct cx8800_fh *fh = priv;
- return (videobuf_qbuf(get_queue(fh), p));
+ return videobuf_qbuf(get_queue(file), p);
}
static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
{
- struct cx8800_fh *fh = priv;
- return (videobuf_dqbuf(get_queue(fh), p,
- file->f_flags & O_NONBLOCK));
+ return videobuf_dqbuf(get_queue(file), p,
+ file->f_flags & O_NONBLOCK);
}
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
+ struct video_device *vdev = video_devdata(file);
struct cx8800_fh *fh = priv;
struct cx8800_dev *dev = fh->dev;
- /* We should remember that this driver also supports teletext, */
- /* so we have to test if the v4l2_buf_type is VBI capture data. */
- if (unlikely((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
- (fh->type != V4L2_BUF_TYPE_VBI_CAPTURE)))
- return -EINVAL;
-
- if (unlikely(i != fh->type))
+ if ((vdev->vfl_type == VFL_TYPE_GRABBER && i != V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
+ (vdev->vfl_type == VFL_TYPE_VBI && i != V4L2_BUF_TYPE_VBI_CAPTURE))
return -EINVAL;
- if (unlikely(!res_get(dev,fh,get_ressource(fh))))
+ if (unlikely(!res_get(dev, fh, get_resource(file))))
return -EBUSY;
- return videobuf_streamon(get_queue(fh));
+ return videobuf_streamon(get_queue(file));
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
+ struct video_device *vdev = video_devdata(file);
struct cx8800_fh *fh = priv;
struct cx8800_dev *dev = fh->dev;
int err, res;
- if ((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
- (fh->type != V4L2_BUF_TYPE_VBI_CAPTURE))
+ if ((vdev->vfl_type == VFL_TYPE_GRABBER && i != V4L2_BUF_TYPE_VIDEO_CAPTURE) ||
+ (vdev->vfl_type == VFL_TYPE_VBI && i != V4L2_BUF_TYPE_VBI_CAPTURE))
return -EINVAL;
- if (i != fh->type)
- return -EINVAL;
-
- res = get_ressource(fh);
- err = videobuf_streamoff(get_queue(fh));
+ res = get_resource(file);
+ err = videobuf_streamoff(get_queue(file));
if (err < 0)
return err;
res_free(dev,fh,res);
return 0;
}
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm)
+{
+ struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
+
+ *tvnorm = core->tvnorm;
+ return 0;
+}
+
static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *tvnorms)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
@@ -1327,8 +1229,8 @@ int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i)
if ((CX88_VMUX_TELEVISION == INPUT(n).type) ||
(CX88_VMUX_CABLE == INPUT(n).type)) {
i->type = V4L2_INPUT_TYPE_TUNER;
- i->std = CX88_NORMS;
}
+ i->std = CX88_NORMS;
return 0;
}
EXPORT_SYMBOL(cx88_enum_input);
@@ -1354,6 +1256,8 @@ static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
if (i >= 4)
return -EINVAL;
+ if (0 == INPUT(i).type)
+ return -EINVAL;
mutex_lock(&core->lock);
cx88_newstation(core);
@@ -1362,35 +1266,6 @@ static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
return 0;
}
-
-
-static int vidioc_queryctrl (struct file *file, void *priv,
- struct v4l2_queryctrl *qctrl)
-{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
-
- qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
- if (unlikely(qctrl->id == 0))
- return -EINVAL;
- return cx8800_ctrl_query(core, qctrl);
-}
-
-static int vidioc_g_ctrl (struct file *file, void *priv,
- struct v4l2_control *ctl)
-{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
- return
- cx88_get_control(core,ctl);
-}
-
-static int vidioc_s_ctrl (struct file *file, void *priv,
- struct v4l2_control *ctl)
-{
- struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
- return
- cx88_set_control(core,ctl);
-}
-
static int vidioc_g_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
@@ -1403,9 +1278,9 @@ static int vidioc_g_tuner (struct file *file, void *priv,
return -EINVAL;
strcpy(t->name, "Television");
- t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM;
t->rangehigh = 0xffffffffUL;
+ call_all(core, tuner, g_tuner, t);
cx88_get_stereo(core ,t);
reg = cx_read(MO_DEVICE_STATUS);
@@ -1435,9 +1310,9 @@ static int vidioc_g_frequency (struct file *file, void *priv,
if (unlikely(UNSET == core->board.tuner_type))
return -EINVAL;
+ if (f->tuner)
+ return -EINVAL;
- /* f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; */
- f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = core->freq;
call_all(core, tuner, g_frequency, f);
@@ -1454,9 +1329,10 @@ int cx88_set_freq (struct cx88_core *core,
return -EINVAL;
mutex_lock(&core->lock);
- core->freq = f->frequency;
cx88_newstation(core);
call_all(core, tuner, s_frequency, f);
+ call_all(core, tuner, g_frequency, f);
+ core->freq = f->frequency;
/* When changing channels it is required to reset TVAUDIO */
msleep (10);
@@ -1474,13 +1350,17 @@ static int vidioc_s_frequency (struct file *file, void *priv,
struct cx8800_fh *fh = priv;
struct cx88_core *core = fh->dev->core;
- if (unlikely(0 == fh->radio && f->type != V4L2_TUNER_ANALOG_TV))
- return -EINVAL;
- if (unlikely(1 == fh->radio && f->type != V4L2_TUNER_RADIO))
- return -EINVAL;
+ return cx88_set_freq(core, f);
+}
- return
- cx88_set_freq (core,f);
+static int vidioc_g_chip_ident(struct file *file, void *priv,
+ struct v4l2_dbg_chip_ident *chip)
+{
+ if (!v4l2_chip_match_host(&chip->match))
+ return -EINVAL;
+ chip->revision = 0;
+ chip->ident = V4L2_IDENT_UNKNOWN;
+ return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1513,19 +1393,6 @@ static int vidioc_s_register (struct file *file, void *fh,
/* RADIO ESPECIFIC IOCTLS */
/* ----------------------------------------------------------- */
-static int radio_querycap (struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct cx8800_dev *dev = ((struct cx8800_fh *)priv)->dev;
- struct cx88_core *core = dev->core;
-
- strcpy(cap->driver, "cx8800");
- strlcpy(cap->card, core->board.name, sizeof(cap->card));
- sprintf(cap->bus_info,"PCI:%s", pci_name(dev->pci));
- cap->capabilities = V4L2_CAP_TUNER;
- return 0;
-}
-
static int radio_g_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
@@ -1535,32 +1402,11 @@ static int radio_g_tuner (struct file *file, void *priv,
return -EINVAL;
strcpy(t->name, "Radio");
- t->type = V4L2_TUNER_RADIO;
call_all(core, tuner, g_tuner, t);
return 0;
}
-static int radio_enum_input (struct file *file, void *priv,
- struct v4l2_input *i)
-{
- if (i->index != 0)
- return -EINVAL;
- strcpy(i->name,"Radio");
- i->type = V4L2_INPUT_TYPE_TUNER;
-
- return 0;
-}
-
-static int radio_g_audio (struct file *file, void *priv, struct v4l2_audio *a)
-{
- if (unlikely(a->index))
- return -EINVAL;
-
- strcpy(a->name,"Radio");
- return 0;
-}
-
/* FIXME: Should add a standard for radio */
static int radio_s_tuner (struct file *file, void *priv,
@@ -1570,46 +1416,14 @@ static int radio_s_tuner (struct file *file, void *priv,
if (0 != t->index)
return -EINVAL;
+ if (t->audmode > V4L2_TUNER_MODE_STEREO)
+ t->audmode = V4L2_TUNER_MODE_STEREO;
call_all(core, tuner, s_tuner, t);
return 0;
}
-static int radio_s_audio (struct file *file, void *fh,
- struct v4l2_audio *a)
-{
- return 0;
-}
-
-static int radio_s_input (struct file *file, void *fh, unsigned int i)
-{
- return 0;
-}
-
-static int radio_queryctrl (struct file *file, void *priv,
- struct v4l2_queryctrl *c)
-{
- int i;
-
- if (c->id < V4L2_CID_BASE ||
- c->id >= V4L2_CID_LASTP1)
- return -EINVAL;
- if (c->id == V4L2_CID_AUDIO_MUTE ||
- c->id == V4L2_CID_AUDIO_VOLUME ||
- c->id == V4L2_CID_AUDIO_BALANCE) {
- for (i = 0; i < CX8800_CTLS; i++) {
- if (cx8800_ctls[i].v.id == c->id)
- break;
- }
- if (i == CX8800_CTLS)
- return -EINVAL;
- *c = cx8800_ctls[i].v;
- } else
- *c = no_ctl;
- return 0;
-}
-
/* ----------------------------------------------------------- */
static void cx8800_vid_timeout(unsigned long data)
@@ -1752,63 +1566,89 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_g_fmt_vbi_cap = cx8800_vbi_fmt,
- .vidioc_try_fmt_vbi_cap = cx8800_vbi_fmt,
- .vidioc_s_fmt_vbi_cap = cx8800_vbi_fmt,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_g_std = vidioc_g_std,
.vidioc_s_std = vidioc_s_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_g_chip_ident = vidioc_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
#endif
};
-static struct video_device cx8800_vbi_template;
-
static const struct video_device cx8800_video_template = {
.name = "cx8800-video",
.fops = &video_fops,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX88_NORMS,
- .current_norm = V4L2_STD_NTSC_M,
+};
+
+static const struct v4l2_ioctl_ops vbi_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_g_fmt_vbi_cap = cx8800_vbi_fmt,
+ .vidioc_try_fmt_vbi_cap = cx8800_vbi_fmt,
+ .vidioc_s_fmt_vbi_cap = cx8800_vbi_fmt,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+ .vidioc_g_std = vidioc_g_std,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_g_chip_ident = vidioc_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .vidioc_g_register = vidioc_g_register,
+ .vidioc_s_register = vidioc_s_register,
+#endif
+};
+
+static const struct video_device cx8800_vbi_template = {
+ .name = "cx8800-vbi",
+ .fops = &video_fops,
+ .ioctl_ops = &vbi_ioctl_ops,
+ .tvnorms = CX88_NORMS,
};
static const struct v4l2_file_operations radio_fops =
{
.owner = THIS_MODULE,
.open = video_open,
+ .poll = v4l2_ctrl_poll,
.release = video_release,
.unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops radio_ioctl_ops = {
- .vidioc_querycap = radio_querycap,
+ .vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = radio_g_tuner,
- .vidioc_enum_input = radio_enum_input,
- .vidioc_g_audio = radio_g_audio,
.vidioc_s_tuner = radio_s_tuner,
- .vidioc_s_audio = radio_s_audio,
- .vidioc_s_input = radio_s_input,
- .vidioc_queryctrl = radio_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_g_chip_ident = vidioc_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
@@ -1821,6 +1661,14 @@ static const struct video_device cx8800_radio_template = {
.ioctl_ops = &radio_ioctl_ops,
};
+static const struct v4l2_ctrl_ops cx8800_ctrl_vid_ops = {
+ .s_ctrl = cx8800_s_vid_ctrl,
+};
+
+static const struct v4l2_ctrl_ops cx8800_ctrl_aud_ops = {
+ .s_ctrl = cx8800_s_aud_ctrl,
+};
+
/* ----------------------------------------------------------- */
static void cx8800_unregister_video(struct cx8800_dev *dev)
@@ -1853,8 +1701,8 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
{
struct cx8800_dev *dev;
struct cx88_core *core;
-
int err;
+ int i;
dev = kzalloc(sizeof(*dev),GFP_KERNEL);
if (NULL == dev)
@@ -1888,14 +1736,9 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
goto fail_core;
}
- /* Initialize VBI template */
- memcpy( &cx8800_vbi_template, &cx8800_video_template,
- sizeof(cx8800_vbi_template) );
- strcpy(cx8800_vbi_template.name,"cx8800-vbi");
-
/* initialize driver struct */
spin_lock_init(&dev->slock);
- core->tvnorm = cx8800_video_template.current_norm;
+ core->tvnorm = V4L2_STD_NTSC_M;
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
@@ -1925,6 +1768,35 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
}
cx_set(MO_PCI_INTMSK, core->pci_irqmask);
+ for (i = 0; i < CX8800_AUD_CTLS; i++) {
+ const struct cx88_ctrl *cc = &cx8800_aud_ctls[i];
+ struct v4l2_ctrl *vc;
+
+ vc = v4l2_ctrl_new_std(&core->audio_hdl, &cx8800_ctrl_aud_ops,
+ cc->id, cc->minimum, cc->maximum, cc->step, cc->default_value);
+ if (vc == NULL) {
+ err = core->audio_hdl.error;
+ goto fail_core;
+ }
+ vc->priv = (void *)cc;
+ }
+
+ for (i = 0; i < CX8800_VID_CTLS; i++) {
+ const struct cx88_ctrl *cc = &cx8800_vid_ctls[i];
+ struct v4l2_ctrl *vc;
+
+ vc = v4l2_ctrl_new_std(&core->video_hdl, &cx8800_ctrl_vid_ops,
+ cc->id, cc->minimum, cc->maximum, cc->step, cc->default_value);
+ if (vc == NULL) {
+ err = core->video_hdl.error;
+ goto fail_core;
+ }
+ vc->priv = (void *)cc;
+ if (vc->id == V4L2_CID_CHROMA_AGC)
+ core->chroma_agc = vc;
+ }
+ v4l2_ctrl_add_handler(&core->video_hdl, &core->audio_hdl);
+
/* load and configure helper modules */
if (core->board.audio_chip == V4L2_IDENT_WM8775) {
@@ -1942,8 +1814,10 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
sd = v4l2_i2c_new_subdev_board(&core->v4l2_dev, &core->i2c_adap,
&wm8775_info, NULL);
- if (sd != NULL)
+ if (sd != NULL) {
+ core->sd_wm8775 = sd;
sd->grp_id = WM8775_GID;
+ }
}
if (core->board.audio_chip == V4L2_IDENT_TVAUDIO) {
@@ -1971,16 +1845,22 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
/* Sets device info at pci_dev */
pci_set_drvdata(pci_dev, dev);
+ dev->width = 320;
+ dev->height = 240;
+ dev->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
+
/* initial device configuration */
mutex_lock(&core->lock);
cx88_set_tvnorm(core, core->tvnorm);
- init_controls(core);
+ v4l2_ctrl_handler_setup(&core->video_hdl);
+ v4l2_ctrl_handler_setup(&core->audio_hdl);
cx88_video_mux(core, 0);
/* register v4l devices */
dev->video_dev = cx88_vdev_init(core,dev->pci,
&cx8800_video_template,"video");
video_set_drvdata(dev->video_dev, dev);
+ dev->video_dev->ctrl_handler = &core->video_hdl;
err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
video_nr[core->nr]);
if (err < 0) {
@@ -2007,6 +1887,7 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
dev->radio_dev = cx88_vdev_init(core,dev->pci,
&cx8800_radio_template,"radio");
video_set_drvdata(dev->radio_dev, dev);
+ dev->radio_dev->ctrl_handler = &core->audio_hdl;
err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
radio_nr[core->nr]);
if (err < 0) {
diff --git a/drivers/media/video/cx88/cx88.h b/drivers/media/video/cx88/cx88.h
index c9659def2a78..0cae0fd9e164 100644
--- a/drivers/media/video/cx88/cx88.h
+++ b/drivers/media/video/cx88/cx88.h
@@ -26,6 +26,7 @@
#include <linux/kdev_t.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
#include <media/tuner.h>
#include <media/tveeprom.h>
#include <media/videobuf-dma-sg.h>
@@ -115,15 +116,6 @@ struct cx8800_fmt {
u32 cxformat;
};
-struct cx88_ctrl {
- struct v4l2_queryctrl v;
- u32 off;
- u32 reg;
- u32 sreg;
- u32 mask;
- u32 shift;
-};
-
/* ----------------------------------------------------------- */
/* SRAM memory management data (see cx88-core.c) */
@@ -359,6 +351,10 @@ struct cx88_core {
/* config info -- analog */
struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler video_hdl;
+ struct v4l2_ctrl *chroma_agc;
+ struct v4l2_ctrl_handler audio_hdl;
+ struct v4l2_subdev *sd_wm8775;
struct i2c_client *i2c_rtc;
unsigned int boardnr;
struct cx88_board board;
@@ -409,8 +405,6 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
return container_of(v4l2_dev, struct cx88_core, v4l2_dev);
}
-#define WM8775_GID (1 << 0)
-
#define call_hw(core, grpid, o, f, args...) \
do { \
if (!core->i2c_rc) { \
@@ -424,6 +418,36 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
#define call_all(core, o, f, args...) call_hw(core, 0, o, f, ##args)
+#define WM8775_GID (1 << 0)
+
+#define wm8775_s_ctrl(core, id, val) \
+ do { \
+ struct v4l2_ctrl *ctrl_ = \
+ v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id); \
+ if (ctrl_ && !core->i2c_rc) { \
+ if (core->gate_ctrl) \
+ core->gate_ctrl(core, 1); \
+ v4l2_ctrl_s_ctrl(ctrl_, val); \
+ if (core->gate_ctrl) \
+ core->gate_ctrl(core, 0); \
+ } \
+ } while (0)
+
+#define wm8775_g_ctrl(core, id) \
+ ({ \
+ struct v4l2_ctrl *ctrl_ = \
+ v4l2_ctrl_find(core->sd_wm8775->ctrl_handler, id); \
+ s32 val = 0; \
+ if (ctrl_ && !core->i2c_rc) { \
+ if (core->gate_ctrl) \
+ core->gate_ctrl(core, 1); \
+ val = v4l2_ctrl_g_ctrl(ctrl_); \
+ if (core->gate_ctrl) \
+ core->gate_ctrl(core, 0); \
+ } \
+ val; \
+ })
+
struct cx8800_dev;
struct cx8802_dev;
@@ -431,19 +455,11 @@ struct cx8802_dev;
/* function 0: video stuff */
struct cx8800_fh {
+ struct v4l2_fh fh;
struct cx8800_dev *dev;
- enum v4l2_buf_type type;
- int radio;
unsigned int resources;
- /* video overlay */
- struct v4l2_window win;
- struct v4l2_clip *clips;
- unsigned int nclips;
-
/* video capture */
- const struct cx8800_fmt *fmt;
- unsigned int width,height;
struct videobuf_queue vidq;
/* vbi capture */
@@ -468,6 +484,8 @@ struct cx8800_dev {
struct pci_dev *pci;
unsigned char pci_rev,pci_lat;
+ const struct cx8800_fmt *fmt;
+ unsigned int width, height;
/* capture queues */
struct cx88_dmaqueue vidq;
@@ -488,6 +506,7 @@ struct cx8800_dev {
/* function 2: mpeg stuff */
struct cx8802_fh {
+ struct v4l2_fh fh;
struct cx8802_dev *dev;
struct videobuf_queue mpegq;
};
@@ -552,7 +571,7 @@ struct cx8802_dev {
unsigned char mpeg_active; /* nonzero if mpeg encoder is active */
/* mpeg params */
- struct cx2341x_mpeg_params params;
+ struct cx2341x_handler cxhdl;
#endif
#if defined(CONFIG_VIDEO_CX88_DVB) || defined(CONFIG_VIDEO_CX88_DVB_MODULE)
@@ -722,11 +741,8 @@ void cx8802_cancel_buffers(struct cx8802_dev *dev);
/* ----------------------------------------------------------- */
/* cx88-video.c*/
-extern const u32 cx88_user_ctrls[];
-extern int cx8800_ctrl_query(struct cx88_core *core,
- struct v4l2_queryctrl *qctrl);
int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i);
int cx88_set_freq (struct cx88_core *core,struct v4l2_frequency *f);
-int cx88_get_control(struct cx88_core *core, struct v4l2_control *ctl);
-int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl);
int cx88_video_mux(struct cx88_core *core, unsigned int input);
+void cx88_querycap(struct file *file, struct cx88_core *core,
+ struct v4l2_capability *cap);
diff --git a/drivers/media/video/davinci/Kconfig b/drivers/media/video/davinci/Kconfig
index 9337b5605c90..52c5ca68cb3d 100644
--- a/drivers/media/video/davinci/Kconfig
+++ b/drivers/media/video/davinci/Kconfig
@@ -1,30 +1,34 @@
-config DISPLAY_DAVINCI_DM646X_EVM
- tristate "DM646x EVM Video Display"
- depends on VIDEO_DEV && MACH_DAVINCI_DM6467_EVM
- select VIDEOBUF_DMA_CONTIG
+config VIDEO_DAVINCI_VPIF_DISPLAY
+ tristate "DM646x/DA850/OMAPL138 EVM Video Display"
+ depends on VIDEO_DEV && (MACH_DAVINCI_DM6467_EVM || MACH_DAVINCI_DA850_EVM)
+ select VIDEOBUF2_DMA_CONTIG
select VIDEO_DAVINCI_VPIF
- select VIDEO_ADV7343
- select VIDEO_THS7303
+ select VIDEO_ADV7343 if VIDEO_HELPER_CHIPS_AUTO
+ select VIDEO_THS7303 if VIDEO_HELPER_CHIPS_AUTO
help
- Support for DM6467 based display device.
+ Enables Davinci VPIF module used for display devices.
+ This module is common for following DM6467/DA850/OMAPL138
+ based display devices.
To compile this driver as a module, choose M here: the
module will be called vpif_display.
-config CAPTURE_DAVINCI_DM646X_EVM
- tristate "DM646x EVM Video Capture"
- depends on VIDEO_DEV && MACH_DAVINCI_DM6467_EVM
- select VIDEOBUF_DMA_CONTIG
+config VIDEO_DAVINCI_VPIF_CAPTURE
+ tristate "DM646x/DA850/OMAPL138 EVM Video Capture"
+ depends on VIDEO_DEV && (MACH_DAVINCI_DM6467_EVM || MACH_DAVINCI_DA850_EVM)
+ select VIDEOBUF2_DMA_CONTIG
select VIDEO_DAVINCI_VPIF
help
- Support for DM6467 based capture device.
+ Enables Davinci VPIF module used for captur devices.
+ This module is common for following DM6467/DA850/OMAPL138
+ based capture devices.
To compile this driver as a module, choose M here: the
module will be called vpif_capture.
config VIDEO_DAVINCI_VPIF
tristate "DaVinci VPIF Driver"
- depends on DISPLAY_DAVINCI_DM646X_EVM
+ depends on VIDEO_DAVINCI_VPIF_DISPLAY || VIDEO_DAVINCI_VPIF_CAPTURE
help
Support for DaVinci VPIF Driver.
diff --git a/drivers/media/video/davinci/Makefile b/drivers/media/video/davinci/Makefile
index ae7dafb689ab..74ed92d09257 100644
--- a/drivers/media/video/davinci/Makefile
+++ b/drivers/media/video/davinci/Makefile
@@ -5,10 +5,10 @@
# VPIF
obj-$(CONFIG_VIDEO_DAVINCI_VPIF) += vpif.o
-#DM646x EVM Display driver
-obj-$(CONFIG_DISPLAY_DAVINCI_DM646X_EVM) += vpif_display.o
-#DM646x EVM Capture driver
-obj-$(CONFIG_CAPTURE_DAVINCI_DM646X_EVM) += vpif_capture.o
+#VPIF Display driver
+obj-$(CONFIG_VIDEO_DAVINCI_VPIF_DISPLAY) += vpif_display.o
+#VPIF Capture driver
+obj-$(CONFIG_VIDEO_DAVINCI_VPIF_CAPTURE) += vpif_capture.o
# Capture: DM6446 and DM355
obj-$(CONFIG_VIDEO_VPSS_SYSTEM) += vpss.o
diff --git a/drivers/media/video/davinci/vpbe_display.c b/drivers/media/video/davinci/vpbe_display.c
index e106b72810a9..6fe7034bea7c 100644
--- a/drivers/media/video/davinci/vpbe_display.c
+++ b/drivers/media/video/davinci/vpbe_display.c
@@ -1083,7 +1083,7 @@ vpbe_display_s_dv_preset(struct file *file, void *priv,
}
/* Set the given standard in the encoder */
- if (NULL != vpbe_dev->ops.s_dv_preset)
+ if (!vpbe_dev->ops.s_dv_preset)
return -EINVAL;
ret = vpbe_dev->ops.s_dv_preset(vpbe_dev, preset);
@@ -1517,6 +1517,8 @@ static int vpbe_display_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
struct v4l2_dbg_match *match = &reg->match;
+ struct vpbe_fh *fh = file->private_data;
+ struct vpbe_device *vpbe_dev = fh->disp_dev->vpbe_dev;
if (match->type >= 2) {
v4l2_subdev_call(vpbe_dev->venc,
diff --git a/drivers/media/video/davinci/vpif.c b/drivers/media/video/davinci/vpif.c
index af9680273ff9..b3637aff8fee 100644
--- a/drivers/media/video/davinci/vpif.c
+++ b/drivers/media/video/davinci/vpif.c
@@ -1,5 +1,5 @@
/*
- * vpif - DM646x Video Port Interface driver
+ * vpif - Video Port Interface driver
* VPIF is a receiver and transmitter for video data. It has two channels(0, 1)
* that receiveing video byte stream and two channels(2, 3) for video output.
* The hardware supports SDTV, HDTV formats, raw data capture.
@@ -23,6 +23,8 @@
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <mach/hardware.h>
#include "vpif.h"
@@ -40,6 +42,7 @@ static struct resource *res;
spinlock_t vpif_lock;
void __iomem *vpif_base;
+struct clk *vpif_clk;
/**
* ch_params: video standard configuration parameters for vpif
@@ -346,7 +349,7 @@ static void config_vpif_params(struct vpif_params *vpifparams,
value = regr(reg);
/* Set data width */
- value &= ((~(unsigned int)(0x3)) <<
+ value &= ~(0x3u <<
VPIF_CH_DATA_WIDTH_BIT);
value |= ((vpifparams->params.data_sz) <<
VPIF_CH_DATA_WIDTH_BIT);
@@ -434,10 +437,19 @@ static int __init vpif_probe(struct platform_device *pdev)
goto fail;
}
+ vpif_clk = clk_get(&pdev->dev, "vpif");
+ if (IS_ERR(vpif_clk)) {
+ status = PTR_ERR(vpif_clk);
+ goto clk_fail;
+ }
+ clk_enable(vpif_clk);
+
spin_lock_init(&vpif_lock);
dev_info(&pdev->dev, "vpif probe success\n");
return 0;
+clk_fail:
+ iounmap(vpif_base);
fail:
release_mem_region(res->start, res_len);
return status;
@@ -445,15 +457,44 @@ fail:
static int __devexit vpif_remove(struct platform_device *pdev)
{
+ if (vpif_clk) {
+ clk_disable(vpif_clk);
+ clk_put(vpif_clk);
+ }
+
iounmap(vpif_base);
release_mem_region(res->start, res_len);
return 0;
}
+#ifdef CONFIG_PM
+static int vpif_suspend(struct device *dev)
+{
+ clk_disable(vpif_clk);
+ return 0;
+}
+
+static int vpif_resume(struct device *dev)
+{
+ clk_enable(vpif_clk);
+ return 0;
+}
+
+static const struct dev_pm_ops vpif_pm = {
+ .suspend = vpif_suspend,
+ .resume = vpif_resume,
+};
+
+#define vpif_pm_ops (&vpif_pm)
+#else
+#define vpif_pm_ops NULL
+#endif
+
static struct platform_driver vpif_driver = {
.driver = {
.name = "vpif",
.owner = THIS_MODULE,
+ .pm = vpif_pm_ops,
},
.remove = __devexit_p(vpif_remove),
.probe = vpif_probe,
diff --git a/drivers/media/video/davinci/vpif.h b/drivers/media/video/davinci/vpif.h
index 8bcac65f9294..c2ce4d97c279 100644
--- a/drivers/media/video/davinci/vpif.h
+++ b/drivers/media/video/davinci/vpif.h
@@ -211,6 +211,12 @@ static inline void vpif_clr_bit(u32 reg, u32 bit)
#define VPIF_CH3_INT_CTRL_SHIFT (6)
#define VPIF_CH_INT_CTRL_SHIFT (6)
+#define VPIF_CH2_CLIP_ANC_EN 14
+#define VPIF_CH2_CLIP_ACTIVE_EN 13
+
+#define VPIF_CH3_CLIP_ANC_EN 14
+#define VPIF_CH3_CLIP_ACTIVE_EN 13
+
/* enabled interrupt on both the fields on vpid_ch0_ctrl register */
#define channel0_intr_assert() (regw((regr(VPIF_CH0_CTRL)|\
(VPIF_INT_BOTH << VPIF_CH0_INT_CTRL_SHIFT)), VPIF_CH0_CTRL))
@@ -515,6 +521,30 @@ static inline void channel3_raw_enable(int enable, u8 index)
vpif_clr_bit(VPIF_CH3_CTRL, mask);
}
+/* function to enable clipping (for both active and blanking regions) on ch 2 */
+static inline void channel2_clipping_enable(int enable)
+{
+ if (enable) {
+ vpif_set_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ANC_EN);
+ vpif_set_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ACTIVE_EN);
+ } else {
+ vpif_clr_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ANC_EN);
+ vpif_clr_bit(VPIF_CH2_CTRL, VPIF_CH2_CLIP_ACTIVE_EN);
+ }
+}
+
+/* function to enable clipping (for both active and blanking regions) on ch 2 */
+static inline void channel3_clipping_enable(int enable)
+{
+ if (enable) {
+ vpif_set_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ANC_EN);
+ vpif_set_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ACTIVE_EN);
+ } else {
+ vpif_clr_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ANC_EN);
+ vpif_clr_bit(VPIF_CH3_CTRL, VPIF_CH3_CLIP_ACTIVE_EN);
+ }
+}
+
/* inline function to set buffer addresses in case of Y/C non mux mode */
static inline void ch2_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
unsigned long btm_strt_luma,
@@ -569,6 +599,21 @@ static inline void ch3_set_vbi_addr(unsigned long top_strt_luma,
regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_VANC);
}
+static inline int vpif_intr_status(int channel)
+{
+ int status = 0;
+ int mask;
+
+ if (channel < 0 || channel > 3)
+ return 0;
+
+ mask = 1 << channel;
+ status = regr(VPIF_STATUS) & mask;
+ regw(status, VPIF_STATUS_CLR);
+
+ return status;
+}
+
#define VPIF_MAX_NAME (30)
/* This structure will store size parameters as per the mode selected by user */
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index 96046957bf21..266025e5d81d 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -80,108 +80,45 @@ static struct vpif_config_params config_params = {
/* global variables */
static struct vpif_device vpif_obj = { {NULL} };
static struct device *vpif_dev;
-
-/**
- * vpif_uservirt_to_phys : translate user/virtual address to phy address
- * @virtp: user/virtual address
- *
- * This inline function is used to convert user space virtual address to
- * physical address.
- */
-static inline u32 vpif_uservirt_to_phys(u32 virtp)
-{
- unsigned long physp = 0;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
-
- vma = find_vma(mm, virtp);
-
- /* For kernel direct-mapped memory, take the easy way */
- if (virtp >= PAGE_OFFSET)
- physp = virt_to_phys((void *)virtp);
- else if (vma && (vma->vm_flags & VM_IO) && (vma->vm_pgoff))
- /**
- * this will catch, kernel-allocated, mmaped-to-usermode
- * addresses
- */
- physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
- else {
- /* otherwise, use get_user_pages() for general userland pages */
- int res, nr_pages = 1;
- struct page *pages;
-
- down_read(&current->mm->mmap_sem);
-
- res = get_user_pages(current, current->mm,
- virtp, nr_pages, 1, 0, &pages, NULL);
- up_read(&current->mm->mmap_sem);
-
- if (res == nr_pages)
- physp = __pa(page_address(&pages[0]) +
- (virtp & ~PAGE_MASK));
- else {
- vpif_err("get_user_pages failed\n");
- return 0;
- }
- }
- return physp;
-}
+static void vpif_calculate_offsets(struct channel_obj *ch);
+static void vpif_config_addr(struct channel_obj *ch, int muxmode);
/**
* buffer_prepare : callback function for buffer prepare
- * @q : buffer queue ptr
- * @vb: ptr to video buffer
- * @field: field info
+ * @vb: ptr to vb2_buffer
*
- * This is the callback function for buffer prepare when videobuf_qbuf()
+ * This is the callback function for buffer prepare when vb2_qbuf()
* function is called. The buffer is prepared and user space virtual address
* or user address is converted into physical address
*/
-static int vpif_buffer_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int vpif_buffer_prepare(struct vb2_buffer *vb)
{
/* Get the file handle object and channel object */
- struct vpif_fh *fh = q->priv_data;
+ struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_queue *q = vb->vb2_queue;
struct channel_obj *ch = fh->channel;
struct common_obj *common;
unsigned long addr;
-
vpif_dbg(2, debug, "vpif_buffer_prepare\n");
common = &ch->common[VPIF_VIDEO_INDEX];
- /* If buffer is not initialized, initialize it */
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- vb->width = common->width;
- vb->height = common->height;
- vb->size = vb->width * vb->height;
- vb->field = field;
- }
- vb->state = VIDEOBUF_PREPARED;
- /**
- * if user pointer memory mechanism is used, get the physical
- * address of the buffer
- */
- if (V4L2_MEMORY_USERPTR == common->memory) {
- if (0 == vb->baddr) {
- vpif_dbg(1, debug, "buffer address is 0\n");
- return -EINVAL;
-
- }
- vb->boff = vpif_uservirt_to_phys(vb->baddr);
- if (!IS_ALIGNED(vb->boff, 8))
+ if (vb->state != VB2_BUF_STATE_ACTIVE &&
+ vb->state != VB2_BUF_STATE_PREPARED) {
+ vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage);
+ if (vb2_plane_vaddr(vb, 0) &&
+ vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
goto exit;
- }
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
- addr = vb->boff;
- if (q->streaming) {
- if (!IS_ALIGNED((addr + common->ytop_off), 8) ||
- !IS_ALIGNED((addr + common->ybtm_off), 8) ||
- !IS_ALIGNED((addr + common->ctop_off), 8) ||
- !IS_ALIGNED((addr + common->cbtm_off), 8))
- goto exit;
+ if (q->streaming) {
+ if (!IS_ALIGNED((addr + common->ytop_off), 8) ||
+ !IS_ALIGNED((addr + common->ybtm_off), 8) ||
+ !IS_ALIGNED((addr + common->ctop_off), 8) ||
+ !IS_ALIGNED((addr + common->cbtm_off), 8))
+ goto exit;
+ }
}
return 0;
exit:
@@ -190,49 +127,79 @@ exit:
}
/**
- * vpif_buffer_setup : Callback function for buffer setup.
- * @q: buffer queue ptr
- * @count: number of buffers
- * @size: size of the buffer
+ * vpif_buffer_queue_setup : Callback function for buffer setup.
+ * @vq: vb2_queue ptr
+ * @fmt: v4l2 format
+ * @nbuffers: ptr to number of buffers requested by application
+ * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @sizes[]: contains the size (in bytes) of each plane.
+ * @alloc_ctxs: ptr to allocation context
*
* This callback function is called when reqbuf() is called to adjust
* the buffer count and buffer size
*/
-static int vpif_buffer_setup(struct videobuf_queue *q, unsigned int *count,
- unsigned int *size)
+static int vpif_buffer_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
/* Get the file handle object and channel object */
- struct vpif_fh *fh = q->priv_data;
+ struct vpif_fh *fh = vb2_get_drv_priv(vq);
struct channel_obj *ch = fh->channel;
struct common_obj *common;
+ unsigned long size;
common = &ch->common[VPIF_VIDEO_INDEX];
vpif_dbg(2, debug, "vpif_buffer_setup\n");
/* If memory type is not mmap, return */
- if (V4L2_MEMORY_MMAP != common->memory)
- return 0;
+ if (V4L2_MEMORY_MMAP == common->memory) {
+ /* Calculate the size of the buffer */
+ size = config_params.channel_bufsize[ch->channel_id];
+ /*
+ * Checking if the buffer size exceeds the available buffer
+ * ycmux_mode = 0 means 1 channel mode HD and
+ * ycmux_mode = 1 means 2 channels mode SD
+ */
+ if (ch->vpifparams.std_info.ycmux_mode == 0) {
+ if (config_params.video_limit[ch->channel_id])
+ while (size * *nbuffers >
+ (config_params.video_limit[0]
+ + config_params.video_limit[1]))
+ (*nbuffers)--;
+ } else {
+ if (config_params.video_limit[ch->channel_id])
+ while (size * *nbuffers >
+ config_params.video_limit[ch->channel_id])
+ (*nbuffers)--;
+ }
+
+ } else {
+ size = common->fmt.fmt.pix.sizeimage;
+ }
+
+ if (*nbuffers < config_params.min_numbuffers)
+ *nbuffers = config_params.min_numbuffers;
- /* Calculate the size of the buffer */
- *size = config_params.channel_bufsize[ch->channel_id];
+ *nplanes = 1;
+ sizes[0] = size;
+ alloc_ctxs[0] = common->alloc_ctx;
- if (*count < config_params.min_numbuffers)
- *count = config_params.min_numbuffers;
return 0;
}
/**
* vpif_buffer_queue : Callback function to add buffer to DMA queue
- * @q: ptr to videobuf_queue
- * @vb: ptr to videobuf_buffer
+ * @vb: ptr to vb2_buffer
*/
-static void vpif_buffer_queue(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void vpif_buffer_queue(struct vb2_buffer *vb)
{
/* Get the file handle object and channel object */
- struct vpif_fh *fh = q->priv_data;
+ struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
struct channel_obj *ch = fh->channel;
+ struct vpif_cap_buffer *buf = container_of(vb,
+ struct vpif_cap_buffer, vb);
struct common_obj *common;
common = &ch->common[VPIF_VIDEO_INDEX];
@@ -240,43 +207,189 @@ static void vpif_buffer_queue(struct videobuf_queue *q,
vpif_dbg(2, debug, "vpif_buffer_queue\n");
/* add the buffer to the DMA queue */
- list_add_tail(&vb->queue, &common->dma_queue);
- /* Change state of the buffer */
- vb->state = VIDEOBUF_QUEUED;
+ list_add_tail(&buf->list, &common->dma_queue);
}
/**
- * vpif_buffer_release : Callback function to free buffer
- * @q: buffer queue ptr
- * @vb: ptr to video buffer
+ * vpif_buf_cleanup : Callback function to free buffer
+ * @vb: ptr to vb2_buffer
*
- * This function is called from the videobuf layer to free memory
+ * This function is called from the videobuf2 layer to free memory
* allocated to the buffers
*/
-static void vpif_buffer_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void vpif_buf_cleanup(struct vb2_buffer *vb)
{
/* Get the file handle object and channel object */
- struct vpif_fh *fh = q->priv_data;
+ struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpif_cap_buffer *buf = container_of(vb,
+ struct vpif_cap_buffer, vb);
struct channel_obj *ch = fh->channel;
struct common_obj *common;
+ unsigned long flags;
common = &ch->common[VPIF_VIDEO_INDEX];
- videobuf_dma_contig_free(q, vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
+ spin_lock_irqsave(&common->irqlock, flags);
+ if (vb->state == VB2_BUF_STATE_ACTIVE)
+ list_del_init(&buf->list);
+ spin_unlock_irqrestore(&common->irqlock, flags);
+
}
-static struct videobuf_queue_ops video_qops = {
- .buf_setup = vpif_buffer_setup,
- .buf_prepare = vpif_buffer_prepare,
- .buf_queue = vpif_buffer_queue,
- .buf_release = vpif_buffer_release,
-};
+static void vpif_wait_prepare(struct vb2_queue *vq)
+{
+ struct vpif_fh *fh = vb2_get_drv_priv(vq);
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+ mutex_unlock(&common->lock);
+}
+
+static void vpif_wait_finish(struct vb2_queue *vq)
+{
+ struct vpif_fh *fh = vb2_get_drv_priv(vq);
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+ mutex_lock(&common->lock);
+}
+
+static int vpif_buffer_init(struct vb2_buffer *vb)
+{
+ struct vpif_cap_buffer *buf = container_of(vb,
+ struct vpif_cap_buffer, vb);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] =
{ {1, 1} };
+static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpif_capture_config *vpif_config_data =
+ vpif_dev->platform_data;
+ struct vpif_fh *fh = vb2_get_drv_priv(vq);
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_params *vpif = &ch->vpifparams;
+ unsigned long addr = 0;
+ int ret;
+
+ /* If buffer queue is empty, return error */
+ if (list_empty(&common->dma_queue)) {
+ vpif_dbg(1, debug, "buffer queue is empty\n");
+ return -EIO;
+ }
+
+ /* Get the next frame from the buffer queue */
+ common->cur_frm = common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_cap_buffer, list);
+ /* Remove buffer from the buffer queue */
+ list_del(&common->cur_frm->list);
+ /* Mark state of the current frame to active */
+ common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ /* Initialize field_id and started member */
+ ch->field_id = 0;
+ common->started = 1;
+ addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0);
+
+ /* Calculate the offset for Y and C data in the buffer */
+ vpif_calculate_offsets(ch);
+
+ if ((vpif->std_info.frm_fmt &&
+ ((common->fmt.fmt.pix.field != V4L2_FIELD_NONE) &&
+ (common->fmt.fmt.pix.field != V4L2_FIELD_ANY))) ||
+ (!vpif->std_info.frm_fmt &&
+ (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
+ vpif_dbg(1, debug, "conflict in field format and std format\n");
+ return -EINVAL;
+ }
+
+ /* configure 1 or 2 channel mode */
+ ret = vpif_config_data->setup_input_channel_mode
+ (vpif->std_info.ycmux_mode);
+
+ if (ret < 0) {
+ vpif_dbg(1, debug, "can't set vpif channel mode\n");
+ return ret;
+ }
+
+ /* Call vpif_set_params function to set the parameters and addresses */
+ ret = vpif_set_video_params(vpif, ch->channel_id);
+
+ if (ret < 0) {
+ vpif_dbg(1, debug, "can't set video params\n");
+ return ret;
+ }
+
+ common->started = ret;
+ vpif_config_addr(ch, ret);
+
+ common->set_addr(addr + common->ytop_off,
+ addr + common->ybtm_off,
+ addr + common->ctop_off,
+ addr + common->cbtm_off);
+
+ /**
+ * Set interrupt for both the fields in VPIF Register enable channel in
+ * VPIF register
+ */
+ if ((VPIF_CHANNEL0_VIDEO == ch->channel_id)) {
+ channel0_intr_assert();
+ channel0_intr_enable(1);
+ enable_channel0(1);
+ }
+ if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
+ (common->started == 2)) {
+ channel1_intr_assert();
+ channel1_intr_enable(1);
+ enable_channel1(1);
+ }
+ channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
+
+ return 0;
+}
+
+/* abort streaming and wait for last buffer */
+static int vpif_stop_streaming(struct vb2_queue *vq)
+{
+ struct vpif_fh *fh = vb2_get_drv_priv(vq);
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common;
+
+ if (!vb2_is_streaming(vq))
+ return 0;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ /* release all active buffers */
+ while (!list_empty(&common->dma_queue)) {
+ common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_cap_buffer, list);
+ list_del(&common->next_frm->list);
+ vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ return 0;
+}
+
+static struct vb2_ops video_qops = {
+ .queue_setup = vpif_buffer_queue_setup,
+ .wait_prepare = vpif_wait_prepare,
+ .wait_finish = vpif_wait_finish,
+ .buf_init = vpif_buffer_init,
+ .buf_prepare = vpif_buffer_prepare,
+ .start_streaming = vpif_start_streaming,
+ .stop_streaming = vpif_stop_streaming,
+ .buf_cleanup = vpif_buf_cleanup,
+ .buf_queue = vpif_buffer_queue,
+};
+
/**
* vpif_process_buffer_complete: process a completed buffer
* @common: ptr to common channel object
@@ -287,9 +400,9 @@ static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] =
*/
static void vpif_process_buffer_complete(struct common_obj *common)
{
- do_gettimeofday(&common->cur_frm->ts);
- common->cur_frm->state = VIDEOBUF_DONE;
- wake_up_interruptible(&common->cur_frm->done);
+ do_gettimeofday(&common->cur_frm->vb.v4l2_buf.timestamp);
+ vb2_buffer_done(&common->cur_frm->vb,
+ VB2_BUF_STATE_DONE);
/* Make curFrm pointing to nextFrm */
common->cur_frm = common->next_frm;
}
@@ -307,14 +420,11 @@ static void vpif_schedule_next_buffer(struct common_obj *common)
unsigned long addr = 0;
common->next_frm = list_entry(common->dma_queue.next,
- struct videobuf_buffer, queue);
+ struct vpif_cap_buffer, list);
/* Remove that buffer from the buffer queue */
- list_del(&common->next_frm->queue);
- common->next_frm->state = VIDEOBUF_ACTIVE;
- if (V4L2_MEMORY_USERPTR == common->memory)
- addr = common->next_frm->boff;
- else
- addr = videobuf_to_dma_contig(common->next_frm);
+ list_del(&common->next_frm->list);
+ common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+ addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
/* Set top and bottom field addresses in VPIF registers */
common->set_addr(addr + common->ytop_off,
@@ -341,6 +451,9 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
int fid = -1, i;
channel_id = *(int *)(dev_id);
+ if (!vpif_intr_status(channel_id))
+ return IRQ_NONE;
+
ch = dev->dev[channel_id];
field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field;
@@ -485,10 +598,7 @@ static void vpif_calculate_offsets(struct channel_obj *ch)
} else
vid_ch->buf_field = common->fmt.fmt.pix.field;
- if (V4L2_MEMORY_USERPTR == common->memory)
- sizeimage = common->fmt.fmt.pix.sizeimage;
- else
- sizeimage = config_params.channel_bufsize[ch->channel_id];
+ sizeimage = common->fmt.fmt.pix.sizeimage;
hpitch = common->fmt.fmt.pix.bytesperline;
vpitch = sizeimage / (hpitch * 2);
@@ -640,10 +750,7 @@ static int vpif_check_format(struct channel_obj *ch,
hpitch = vpif_params->std_info.width;
}
- if (V4L2_MEMORY_USERPTR == common->memory)
- sizeimage = pixfmt->sizeimage;
- else
- sizeimage = config_params.channel_bufsize[ch->channel_id];
+ sizeimage = pixfmt->sizeimage;
vpitch = sizeimage / (hpitch * 2);
@@ -703,7 +810,7 @@ static void vpif_config_addr(struct channel_obj *ch, int muxmode)
}
/**
- * vpfe_mmap : It is used to map kernel space buffers into user spaces
+ * vpif_mmap : It is used to map kernel space buffers into user spaces
* @filep: file pointer
* @vma: ptr to vm_area_struct
*/
@@ -716,7 +823,7 @@ static int vpif_mmap(struct file *filep, struct vm_area_struct *vma)
vpif_dbg(2, debug, "vpif_mmap\n");
- return videobuf_mmap_mapper(&common->buffer_queue, vma);
+ return vb2_mmap(&common->buffer_queue, vma);
}
/**
@@ -733,7 +840,7 @@ static unsigned int vpif_poll(struct file *filep, poll_table * wait)
vpif_dbg(2, debug, "vpif_poll\n");
if (common->started)
- return videobuf_poll_stream(filep, &common->buffer_queue, wait);
+ return vb2_poll(&common->buffer_queue, filep, wait);
return 0;
}
@@ -812,7 +919,7 @@ static int vpif_open(struct file *filep)
* vpif_release : function to clean up file close
* @filep: file pointer
*
- * This function deletes buffer queue, frees the buffers and the vpfe file
+ * This function deletes buffer queue, frees the buffers and the vpif file
* handle
*/
static int vpif_release(struct file *filep)
@@ -841,8 +948,8 @@ static int vpif_release(struct file *filep)
}
common->started = 0;
/* Free buffers allocated */
- videobuf_queue_cancel(&common->buffer_queue);
- videobuf_mmap_free(&common->buffer_queue);
+ vb2_queue_release(&common->buffer_queue);
+ vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
}
/* Decrement channel usrs counter */
@@ -872,6 +979,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
struct channel_obj *ch = fh->channel;
struct common_obj *common;
u8 index = 0;
+ struct vb2_queue *q;
vpif_dbg(2, debug, "vpif_reqbufs\n");
@@ -887,7 +995,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
}
}
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != reqbuf->type)
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != reqbuf->type || !vpif_dev)
return -EINVAL;
index = VPIF_VIDEO_INDEX;
@@ -897,14 +1005,21 @@ static int vpif_reqbufs(struct file *file, void *priv,
if (0 != common->io_usrs)
return -EBUSY;
- /* Initialize videobuf queue as per the buffer type */
- videobuf_queue_dma_contig_init(&common->buffer_queue,
- &video_qops, NULL,
- &common->irqlock,
- reqbuf->type,
- common->fmt.fmt.pix.field,
- sizeof(struct videobuf_buffer), fh,
- &common->lock);
+ /* Initialize videobuf2 queue as per the buffer type */
+ common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
+ if (!common->alloc_ctx) {
+ vpif_err("Failed to get the context\n");
+ return -EINVAL;
+ }
+ q = &common->buffer_queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = fh;
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpif_cap_buffer);
+
+ vb2_queue_init(q);
/* Set io allowed member of file handle to TRUE */
fh->io_allowed[index] = 1;
@@ -915,7 +1030,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
INIT_LIST_HEAD(&common->dma_queue);
/* Allocate buffers */
- return videobuf_reqbufs(&common->buffer_queue, reqbuf);
+ return vb2_reqbufs(&common->buffer_queue, reqbuf);
}
/**
@@ -941,7 +1056,7 @@ static int vpif_querybuf(struct file *file, void *priv,
return -EINVAL;
}
- return videobuf_querybuf(&common->buffer_queue, buf);
+ return vb2_querybuf(&common->buffer_queue, buf);
}
/**
@@ -957,10 +1072,6 @@ static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
struct channel_obj *ch = fh->channel;
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct v4l2_buffer tbuf = *buf;
- struct videobuf_buffer *buf1;
- unsigned long addr = 0;
- unsigned long flags;
- int ret = 0;
vpif_dbg(2, debug, "vpif_qbuf\n");
@@ -970,76 +1081,11 @@ static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
}
if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
- vpif_err("fh io not allowed \n");
+ vpif_err("fh io not allowed\n");
return -EACCES;
}
- if (!(list_empty(&common->dma_queue)) ||
- (common->cur_frm != common->next_frm) ||
- !common->started ||
- (common->started && (0 == ch->field_id)))
- return videobuf_qbuf(&common->buffer_queue, buf);
-
- /* bufferqueue is empty store buffer address in VPIF registers */
- mutex_lock(&common->buffer_queue.vb_lock);
- buf1 = common->buffer_queue.bufs[tbuf.index];
-
- if ((buf1->state == VIDEOBUF_QUEUED) ||
- (buf1->state == VIDEOBUF_ACTIVE)) {
- vpif_err("invalid state\n");
- goto qbuf_exit;
- }
-
- switch (buf1->memory) {
- case V4L2_MEMORY_MMAP:
- if (buf1->baddr == 0)
- goto qbuf_exit;
- break;
-
- case V4L2_MEMORY_USERPTR:
- if (tbuf.length < buf1->bsize)
- goto qbuf_exit;
-
- if ((VIDEOBUF_NEEDS_INIT != buf1->state)
- && (buf1->baddr != tbuf.m.userptr)) {
- vpif_buffer_release(&common->buffer_queue, buf1);
- buf1->baddr = tbuf.m.userptr;
- }
- break;
-
- default:
- goto qbuf_exit;
- }
-
- local_irq_save(flags);
- ret = vpif_buffer_prepare(&common->buffer_queue, buf1,
- common->buffer_queue.field);
- if (ret < 0) {
- local_irq_restore(flags);
- goto qbuf_exit;
- }
-
- buf1->state = VIDEOBUF_ACTIVE;
-
- if (V4L2_MEMORY_USERPTR == common->memory)
- addr = buf1->boff;
- else
- addr = videobuf_to_dma_contig(buf1);
-
- common->next_frm = buf1;
- common->set_addr(addr + common->ytop_off,
- addr + common->ybtm_off,
- addr + common->ctop_off,
- addr + common->cbtm_off);
-
- local_irq_restore(flags);
- list_add_tail(&buf1->stream, &common->buffer_queue.stream);
- mutex_unlock(&common->buffer_queue.vb_lock);
- return 0;
-
-qbuf_exit:
- mutex_unlock(&common->buffer_queue.vb_lock);
- return -EINVAL;
+ return vb2_qbuf(&common->buffer_queue, buf);
}
/**
@@ -1056,8 +1102,8 @@ static int vpif_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
vpif_dbg(2, debug, "vpif_dqbuf\n");
- return videobuf_dqbuf(&common->buffer_queue, buf,
- file->f_flags & O_NONBLOCK);
+ return vb2_dqbuf(&common->buffer_queue, buf,
+ (file->f_flags & O_NONBLOCK));
}
/**
@@ -1070,13 +1116,11 @@ static int vpif_streamon(struct file *file, void *priv,
enum v4l2_buf_type buftype)
{
- struct vpif_capture_config *config = vpif_dev->platform_data;
struct vpif_fh *fh = priv;
struct channel_obj *ch = fh->channel;
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct channel_obj *oth_ch = vpif_obj.dev[!ch->channel_id];
struct vpif_params *vpif;
- unsigned long addr = 0;
int ret = 0;
vpif_dbg(2, debug, "vpif_streamon\n");
@@ -1122,95 +1166,13 @@ static int vpif_streamon(struct file *file, void *priv,
return ret;
}
- /* Call videobuf_streamon to start streaming in videobuf */
- ret = videobuf_streamon(&common->buffer_queue);
+ /* Call vb2_streamon to start streaming in videobuf2 */
+ ret = vb2_streamon(&common->buffer_queue, buftype);
if (ret) {
- vpif_dbg(1, debug, "videobuf_streamon\n");
+ vpif_dbg(1, debug, "vb2_streamon\n");
return ret;
}
- /* If buffer queue is empty, return error */
- if (list_empty(&common->dma_queue)) {
- vpif_dbg(1, debug, "buffer queue is empty\n");
- ret = -EIO;
- goto exit;
- }
-
- /* Get the next frame from the buffer queue */
- common->cur_frm = list_entry(common->dma_queue.next,
- struct videobuf_buffer, queue);
- common->next_frm = common->cur_frm;
-
- /* Remove buffer from the buffer queue */
- list_del(&common->cur_frm->queue);
- /* Mark state of the current frame to active */
- common->cur_frm->state = VIDEOBUF_ACTIVE;
- /* Initialize field_id and started member */
- ch->field_id = 0;
- common->started = 1;
-
- if (V4L2_MEMORY_USERPTR == common->memory)
- addr = common->cur_frm->boff;
- else
- addr = videobuf_to_dma_contig(common->cur_frm);
-
- /* Calculate the offset for Y and C data in the buffer */
- vpif_calculate_offsets(ch);
-
- if ((vpif->std_info.frm_fmt &&
- ((common->fmt.fmt.pix.field != V4L2_FIELD_NONE) &&
- (common->fmt.fmt.pix.field != V4L2_FIELD_ANY))) ||
- (!vpif->std_info.frm_fmt &&
- (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
- vpif_dbg(1, debug, "conflict in field format and std format\n");
- ret = -EINVAL;
- goto exit;
- }
-
- /* configure 1 or 2 channel mode */
- ret = config->setup_input_channel_mode(vpif->std_info.ycmux_mode);
-
- if (ret < 0) {
- vpif_dbg(1, debug, "can't set vpif channel mode\n");
- goto exit;
- }
-
- /* Call vpif_set_params function to set the parameters and addresses */
- ret = vpif_set_video_params(vpif, ch->channel_id);
-
- if (ret < 0) {
- vpif_dbg(1, debug, "can't set video params\n");
- goto exit;
- }
-
- common->started = ret;
- vpif_config_addr(ch, ret);
-
- common->set_addr(addr + common->ytop_off,
- addr + common->ybtm_off,
- addr + common->ctop_off,
- addr + common->cbtm_off);
-
- /**
- * Set interrupt for both the fields in VPIF Register enable channel in
- * VPIF register
- */
- if ((VPIF_CHANNEL0_VIDEO == ch->channel_id)) {
- channel0_intr_assert();
- channel0_intr_enable(1);
- enable_channel0(1);
- }
- if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
- (common->started == 2)) {
- channel1_intr_assert();
- channel1_intr_enable(1);
- enable_channel1(1);
- }
- channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
- return ret;
-
-exit:
- videobuf_streamoff(&common->buffer_queue);
return ret;
}
@@ -1265,7 +1227,7 @@ static int vpif_streamoff(struct file *file, void *priv,
if (ret && (ret != -ENOIOCTLCMD))
vpif_dbg(1, debug, "stream off failed in subdev\n");
- return videobuf_streamoff(&common->buffer_queue);
+ return vb2_streamoff(&common->buffer_queue, buftype);
}
/**
@@ -1679,7 +1641,7 @@ static int vpif_querycap(struct file *file, void *priv,
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
strlcpy(cap->driver, "vpif capture", sizeof(cap->driver));
- strlcpy(cap->bus_info, "DM646x Platform", sizeof(cap->bus_info));
+ strlcpy(cap->bus_info, "VPIF Platform", sizeof(cap->bus_info));
strlcpy(cap->card, config->card_name, sizeof(cap->card));
return 0;
@@ -2168,6 +2130,7 @@ static __init int vpif_probe(struct platform_device *pdev)
struct video_device *vfd;
struct resource *res;
int subdev_count;
+ size_t size;
vpif_dev = &pdev->dev;
@@ -2186,8 +2149,8 @@ static __init int vpif_probe(struct platform_device *pdev)
k = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
- if (request_irq(i, vpif_channel_isr, IRQF_DISABLED,
- "DM646x_Capture",
+ if (request_irq(i, vpif_channel_isr, IRQF_SHARED,
+ "VPIF_Capture",
(void *)(&vpif_obj.dev[k]->channel_id))) {
err = -EBUSY;
i--;
@@ -2216,12 +2179,29 @@ static __init int vpif_probe(struct platform_device *pdev)
vfd->v4l2_dev = &vpif_obj.v4l2_dev;
vfd->release = video_device_release;
snprintf(vfd->name, sizeof(vfd->name),
- "DM646x_VPIFCapture_DRIVER_V%s",
+ "VPIF_Capture_DRIVER_V%s",
VPIF_CAPTURE_VERSION);
/* Set video_dev to the video device */
ch->video_dev = vfd;
}
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res) {
+ size = resource_size(res);
+ /* The resources are divided into two equal memory and when we
+ * have HD output we can add them together
+ */
+ for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
+ ch = vpif_obj.dev[j];
+ ch->channel_id = j;
+ /* only enabled if second resource exists */
+ config_params.video_limit[ch->channel_id] = 0;
+ if (size)
+ config_params.video_limit[ch->channel_id] =
+ size/2;
+ }
+ }
+
for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
ch = vpif_obj.dev[j];
ch->channel_id = j;
@@ -2275,8 +2255,7 @@ static __init int vpif_probe(struct platform_device *pdev)
vpif_obj.sd[i]->grp_id = 1 << i;
}
- v4l2_info(&vpif_obj.v4l2_dev,
- "DM646x VPIF capture driver initialized\n");
+ v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n");
return 0;
probe_subdev_out:
@@ -2333,26 +2312,70 @@ static int vpif_remove(struct platform_device *device)
return 0;
}
+#ifdef CONFIG_PM
/**
* vpif_suspend: vpif device suspend
- *
- * TODO: Add suspend code here
*/
-static int
-vpif_suspend(struct device *dev)
+static int vpif_suspend(struct device *dev)
{
- return -1;
+
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[VPIF_VIDEO_INDEX];
+ mutex_lock(&common->lock);
+ if (ch->usrs && common->io_usrs) {
+ /* Disable channel */
+ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) {
+ enable_channel0(0);
+ channel0_intr_enable(0);
+ }
+ if (ch->channel_id == VPIF_CHANNEL1_VIDEO ||
+ common->started == 2) {
+ enable_channel1(0);
+ channel1_intr_enable(0);
+ }
+ }
+ mutex_unlock(&common->lock);
+ }
+
+ return 0;
}
-/**
+/*
* vpif_resume: vpif device suspend
- *
- * TODO: Add resume code here
*/
-static int
-vpif_resume(struct device *dev)
+static int vpif_resume(struct device *dev)
{
- return -1;
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[VPIF_VIDEO_INDEX];
+ mutex_lock(&common->lock);
+ if (ch->usrs && common->io_usrs) {
+ /* Disable channel */
+ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) {
+ enable_channel0(1);
+ channel0_intr_enable(1);
+ }
+ if (ch->channel_id == VPIF_CHANNEL1_VIDEO ||
+ common->started == 2) {
+ enable_channel1(1);
+ channel1_intr_enable(1);
+ }
+ }
+ mutex_unlock(&common->lock);
+ }
+
+ return 0;
}
static const struct dev_pm_ops vpif_dev_pm_ops = {
@@ -2360,11 +2383,16 @@ static const struct dev_pm_ops vpif_dev_pm_ops = {
.resume = vpif_resume,
};
+#define vpif_pm_ops (&vpif_dev_pm_ops)
+#else
+#define vpif_pm_ops NULL
+#endif
+
static __refdata struct platform_driver vpif_driver = {
.driver = {
.name = "vpif_capture",
.owner = THIS_MODULE,
- .pm = &vpif_dev_pm_ops,
+ .pm = vpif_pm_ops,
},
.probe = vpif_probe,
.remove = vpif_remove,
diff --git a/drivers/media/video/davinci/vpif_capture.h b/drivers/media/video/davinci/vpif_capture.h
index a693d4ebda55..3511510f43ee 100644
--- a/drivers/media/video/davinci/vpif_capture.h
+++ b/drivers/media/video/davinci/vpif_capture.h
@@ -26,7 +26,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/videobuf-core.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/videobuf2-dma-contig.h>
#include <media/davinci/vpif_types.h>
#include "vpif.h"
@@ -60,11 +60,16 @@ struct video_obj {
u32 input_idx;
};
+struct vpif_cap_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
struct common_obj {
/* Pointer pointing to current v4l2_buffer */
- struct videobuf_buffer *cur_frm;
+ struct vpif_cap_buffer *cur_frm;
/* Pointer pointing to current v4l2_buffer */
- struct videobuf_buffer *next_frm;
+ struct vpif_cap_buffer *next_frm;
/*
* This field keeps track of type of buffer exchange mechanism
* user has selected
@@ -73,7 +78,9 @@ struct common_obj {
/* Used to store pixel format */
struct v4l2_format fmt;
/* Buffer queue used in video-buf */
- struct videobuf_queue buffer_queue;
+ struct vb2_queue buffer_queue;
+ /* allocator-specific contexts for each plane */
+ struct vb2_alloc_ctx *alloc_ctx;
/* Queue of filled frames */
struct list_head dma_queue;
/* Used in video-buf */
@@ -151,6 +158,7 @@ struct vpif_config_params {
u32 min_bufsize[VPIF_CAPTURE_NUM_CHANNELS];
u32 channel_bufsize[VPIF_CAPTURE_NUM_CHANNELS];
u8 default_device[VPIF_CAPTURE_NUM_CHANNELS];
+ u32 video_limit[VPIF_CAPTURE_NUM_CHANNELS];
u8 max_device_type;
};
/* Struct which keeps track of the line numbers for the sliced vbi service */
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index e6488ee7db18..e129c98921ad 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -46,7 +46,7 @@ MODULE_DESCRIPTION("TI DaVinci VPIF Display driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(VPIF_DISPLAY_VERSION);
-#define DM646X_V4L2_STD (V4L2_STD_525_60 | V4L2_STD_625_50)
+#define VPIF_V4L2_STD (V4L2_STD_525_60 | V4L2_STD_625_50)
#define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg)
#define vpif_dbg(level, debug, fmt, arg...) \
@@ -82,89 +82,38 @@ static struct vpif_config_params config_params = {
static struct vpif_device vpif_obj = { {NULL} };
static struct device *vpif_dev;
+static void vpif_calculate_offsets(struct channel_obj *ch);
+static void vpif_config_addr(struct channel_obj *ch, int muxmode);
/*
- * vpif_uservirt_to_phys: This function is used to convert user
- * space virtual address to physical address.
- */
-static u32 vpif_uservirt_to_phys(u32 virtp)
-{
- struct mm_struct *mm = current->mm;
- unsigned long physp = 0;
- struct vm_area_struct *vma;
-
- vma = find_vma(mm, virtp);
-
- /* For kernel direct-mapped memory, take the easy way */
- if (virtp >= PAGE_OFFSET) {
- physp = virt_to_phys((void *)virtp);
- } else if (vma && (vma->vm_flags & VM_IO) && (vma->vm_pgoff)) {
- /* this will catch, kernel-allocated, mmaped-to-usermode addr */
- physp = (vma->vm_pgoff << PAGE_SHIFT) + (virtp - vma->vm_start);
- } else {
- /* otherwise, use get_user_pages() for general userland pages */
- int res, nr_pages = 1;
- struct page *pages;
- down_read(&current->mm->mmap_sem);
-
- res = get_user_pages(current, current->mm,
- virtp, nr_pages, 1, 0, &pages, NULL);
- up_read(&current->mm->mmap_sem);
-
- if (res == nr_pages) {
- physp = __pa(page_address(&pages[0]) +
- (virtp & ~PAGE_MASK));
- } else {
- vpif_err("get_user_pages failed\n");
- return 0;
- }
- }
-
- return physp;
-}
-
-/*
- * buffer_prepare: This is the callback function called from videobuf_qbuf()
+ * buffer_prepare: This is the callback function called from vb2_qbuf()
* function the buffer is prepared and user space virtual address is converted
* into physical address
*/
-static int vpif_buffer_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
+static int vpif_buffer_prepare(struct vb2_buffer *vb)
{
- struct vpif_fh *fh = q->priv_data;
+ struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_queue *q = vb->vb2_queue;
struct common_obj *common;
unsigned long addr;
common = &fh->channel->common[VPIF_VIDEO_INDEX];
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- vb->width = common->width;
- vb->height = common->height;
- vb->size = vb->width * vb->height;
- vb->field = field;
- }
- vb->state = VIDEOBUF_PREPARED;
-
- /* if user pointer memory mechanism is used, get the physical
- * address of the buffer */
- if (V4L2_MEMORY_USERPTR == common->memory) {
- if (!vb->baddr) {
- vpif_err("buffer_address is 0\n");
- return -EINVAL;
- }
-
- vb->boff = vpif_uservirt_to_phys(vb->baddr);
- if (!ISALIGNED(vb->boff))
+ if (vb->state != VB2_BUF_STATE_ACTIVE &&
+ vb->state != VB2_BUF_STATE_PREPARED) {
+ vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage);
+ if (vb2_plane_vaddr(vb, 0) &&
+ vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
goto buf_align_exit;
- }
- addr = vb->boff;
- if (q->streaming && (V4L2_BUF_TYPE_SLICED_VBI_OUTPUT != q->type)) {
- if (!ISALIGNED(addr + common->ytop_off) ||
- !ISALIGNED(addr + common->ybtm_off) ||
- !ISALIGNED(addr + common->ctop_off) ||
- !ISALIGNED(addr + common->cbtm_off))
- goto buf_align_exit;
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (q->streaming &&
+ (V4L2_BUF_TYPE_SLICED_VBI_OUTPUT != q->type)) {
+ if (!ISALIGNED(addr + common->ytop_off) ||
+ !ISALIGNED(addr + common->ybtm_off) ||
+ !ISALIGNED(addr + common->ctop_off) ||
+ !ISALIGNED(addr + common->cbtm_off))
+ goto buf_align_exit;
+ }
}
return 0;
@@ -174,86 +123,255 @@ buf_align_exit:
}
/*
- * vpif_buffer_setup: This function allocates memory for the buffers
+ * vpif_buffer_queue_setup: This function allocates memory for the buffers
*/
-static int vpif_buffer_setup(struct videobuf_queue *q, unsigned int *count,
- unsigned int *size)
+static int vpif_buffer_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
{
- struct vpif_fh *fh = q->priv_data;
+ struct vpif_fh *fh = vb2_get_drv_priv(vq);
struct channel_obj *ch = fh->channel;
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ unsigned long size;
+
+ if (V4L2_MEMORY_MMAP == common->memory) {
+ size = config_params.channel_bufsize[ch->channel_id];
+ /*
+ * Checking if the buffer size exceeds the available buffer
+ * ycmux_mode = 0 means 1 channel mode HD and
+ * ycmux_mode = 1 means 2 channels mode SD
+ */
+ if (ch->vpifparams.std_info.ycmux_mode == 0) {
+ if (config_params.video_limit[ch->channel_id])
+ while (size * *nbuffers >
+ (config_params.video_limit[0]
+ + config_params.video_limit[1]))
+ (*nbuffers)--;
+ } else {
+ if (config_params.video_limit[ch->channel_id])
+ while (size * *nbuffers >
+ config_params.video_limit[ch->channel_id])
+ (*nbuffers)--;
+ }
+ } else {
+ size = common->fmt.fmt.pix.sizeimage;
+ }
- if (V4L2_MEMORY_MMAP != common->memory)
- return 0;
-
- *size = config_params.channel_bufsize[ch->channel_id];
- if (*count < config_params.min_numbuffers)
- *count = config_params.min_numbuffers;
+ if (*nbuffers < config_params.min_numbuffers)
+ *nbuffers = config_params.min_numbuffers;
+ *nplanes = 1;
+ sizes[0] = size;
+ alloc_ctxs[0] = common->alloc_ctx;
return 0;
}
/*
* vpif_buffer_queue: This function adds the buffer to DMA queue
*/
-static void vpif_buffer_queue(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void vpif_buffer_queue(struct vb2_buffer *vb)
{
- struct vpif_fh *fh = q->priv_data;
+ struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpif_disp_buffer *buf = container_of(vb,
+ struct vpif_disp_buffer, vb);
+ struct channel_obj *ch = fh->channel;
struct common_obj *common;
- common = &fh->channel->common[VPIF_VIDEO_INDEX];
+ common = &ch->common[VPIF_VIDEO_INDEX];
/* add the buffer to the DMA queue */
- list_add_tail(&vb->queue, &common->dma_queue);
- vb->state = VIDEOBUF_QUEUED;
+ list_add_tail(&buf->list, &common->dma_queue);
}
/*
- * vpif_buffer_release: This function is called from the videobuf layer to
+ * vpif_buf_cleanup: This function is called from the videobuf2 layer to
* free memory allocated to the buffers
*/
-static void vpif_buffer_release(struct videobuf_queue *q,
- struct videobuf_buffer *vb)
+static void vpif_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vpif_fh *fh = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpif_disp_buffer *buf = container_of(vb,
+ struct vpif_disp_buffer, vb);
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common;
+ unsigned long flags;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ spin_lock_irqsave(&common->irqlock, flags);
+ if (vb->state == VB2_BUF_STATE_ACTIVE)
+ list_del_init(&buf->list);
+ spin_unlock_irqrestore(&common->irqlock, flags);
+}
+
+static void vpif_wait_prepare(struct vb2_queue *vq)
{
- struct vpif_fh *fh = q->priv_data;
+ struct vpif_fh *fh = vb2_get_drv_priv(vq);
struct channel_obj *ch = fh->channel;
struct common_obj *common;
- unsigned int buf_size = 0;
common = &ch->common[VPIF_VIDEO_INDEX];
+ mutex_unlock(&common->lock);
+}
- videobuf_dma_contig_free(q, vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
+static void vpif_wait_finish(struct vb2_queue *vq)
+{
+ struct vpif_fh *fh = vb2_get_drv_priv(vq);
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common;
- if (V4L2_MEMORY_MMAP != common->memory)
- return;
+ common = &ch->common[VPIF_VIDEO_INDEX];
+ mutex_lock(&common->lock);
+}
- buf_size = config_params.channel_bufsize[ch->channel_id];
+static int vpif_buffer_init(struct vb2_buffer *vb)
+{
+ struct vpif_disp_buffer *buf = container_of(vb,
+ struct vpif_disp_buffer, vb);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
}
-static struct videobuf_queue_ops video_qops = {
- .buf_setup = vpif_buffer_setup,
- .buf_prepare = vpif_buffer_prepare,
- .buf_queue = vpif_buffer_queue,
- .buf_release = vpif_buffer_release,
-};
static u8 channel_first_int[VPIF_NUMOBJECTS][2] = { {1, 1} };
+static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpif_display_config *vpif_config_data =
+ vpif_dev->platform_data;
+ struct vpif_fh *fh = vb2_get_drv_priv(vq);
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_params *vpif = &ch->vpifparams;
+ unsigned long addr = 0;
+ int ret;
+
+ /* If buffer queue is empty, return error */
+ if (list_empty(&common->dma_queue)) {
+ vpif_err("buffer queue is empty\n");
+ return -EIO;
+ }
+
+ /* Get the next frame from the buffer queue */
+ common->next_frm = common->cur_frm =
+ list_entry(common->dma_queue.next,
+ struct vpif_disp_buffer, list);
+
+ list_del(&common->cur_frm->list);
+ /* Mark state of the current frame to active */
+ common->cur_frm->vb.state = VB2_BUF_STATE_ACTIVE;
+
+ /* Initialize field_id and started member */
+ ch->field_id = 0;
+ common->started = 1;
+ addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb, 0);
+ /* Calculate the offset for Y and C data in the buffer */
+ vpif_calculate_offsets(ch);
+
+ if ((ch->vpifparams.std_info.frm_fmt &&
+ ((common->fmt.fmt.pix.field != V4L2_FIELD_NONE)
+ && (common->fmt.fmt.pix.field != V4L2_FIELD_ANY)))
+ || (!ch->vpifparams.std_info.frm_fmt
+ && (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
+ vpif_err("conflict in field format and std format\n");
+ return -EINVAL;
+ }
+
+ /* clock settings */
+ ret =
+ vpif_config_data->set_clock(ch->vpifparams.std_info.ycmux_mode,
+ ch->vpifparams.std_info.hd_sd);
+ if (ret < 0) {
+ vpif_err("can't set clock\n");
+ return ret;
+ }
+
+ /* set the parameters and addresses */
+ ret = vpif_set_video_params(vpif, ch->channel_id + 2);
+ if (ret < 0)
+ return ret;
+
+ common->started = ret;
+ vpif_config_addr(ch, ret);
+ common->set_addr((addr + common->ytop_off),
+ (addr + common->ybtm_off),
+ (addr + common->ctop_off),
+ (addr + common->cbtm_off));
+
+ /* Set interrupt for both the fields in VPIF
+ Register enable channel in VPIF register */
+ if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+ channel2_intr_assert();
+ channel2_intr_enable(1);
+ enable_channel2(1);
+ if (vpif_config_data->ch2_clip_en)
+ channel2_clipping_enable(1);
+ }
+
+ if ((VPIF_CHANNEL3_VIDEO == ch->channel_id)
+ || (common->started == 2)) {
+ channel3_intr_assert();
+ channel3_intr_enable(1);
+ enable_channel3(1);
+ if (vpif_config_data->ch3_clip_en)
+ channel3_clipping_enable(1);
+ }
+ channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
+
+ return 0;
+}
+
+/* abort streaming and wait for last buffer */
+static int vpif_stop_streaming(struct vb2_queue *vq)
+{
+ struct vpif_fh *fh = vb2_get_drv_priv(vq);
+ struct channel_obj *ch = fh->channel;
+ struct common_obj *common;
+
+ if (!vb2_is_streaming(vq))
+ return 0;
+
+ common = &ch->common[VPIF_VIDEO_INDEX];
+
+ /* release all active buffers */
+ while (!list_empty(&common->dma_queue)) {
+ common->next_frm = list_entry(common->dma_queue.next,
+ struct vpif_disp_buffer, list);
+ list_del(&common->next_frm->list);
+ vb2_buffer_done(&common->next_frm->vb, VB2_BUF_STATE_ERROR);
+ }
+
+ return 0;
+}
+
+static struct vb2_ops video_qops = {
+ .queue_setup = vpif_buffer_queue_setup,
+ .wait_prepare = vpif_wait_prepare,
+ .wait_finish = vpif_wait_finish,
+ .buf_init = vpif_buffer_init,
+ .buf_prepare = vpif_buffer_prepare,
+ .start_streaming = vpif_start_streaming,
+ .stop_streaming = vpif_stop_streaming,
+ .buf_cleanup = vpif_buf_cleanup,
+ .buf_queue = vpif_buffer_queue,
+};
+
static void process_progressive_mode(struct common_obj *common)
{
unsigned long addr = 0;
/* Get the next buffer from buffer queue */
common->next_frm = list_entry(common->dma_queue.next,
- struct videobuf_buffer, queue);
+ struct vpif_disp_buffer, list);
/* Remove that buffer from the buffer queue */
- list_del(&common->next_frm->queue);
+ list_del(&common->next_frm->list);
/* Mark status of the buffer as active */
- common->next_frm->state = VIDEOBUF_ACTIVE;
+ common->next_frm->vb.state = VB2_BUF_STATE_ACTIVE;
/* Set top and bottom field addrs in VPIF registers */
- addr = videobuf_to_dma_contig(common->next_frm);
+ addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb, 0);
common->set_addr(addr + common->ytop_off,
addr + common->ybtm_off,
addr + common->ctop_off,
@@ -271,11 +389,10 @@ static void process_interlaced_mode(int fid, struct common_obj *common)
/* one frame is displayed If next frame is
* available, release cur_frm and move on */
/* Copy frame display time */
- do_gettimeofday(&common->cur_frm->ts);
+ do_gettimeofday(&common->cur_frm->vb.v4l2_buf.timestamp);
/* Change status of the cur_frm */
- common->cur_frm->state = VIDEOBUF_DONE;
- /* unlock semaphore on cur_frm */
- wake_up_interruptible(&common->cur_frm->done);
+ vb2_buffer_done(&common->cur_frm->vb,
+ VB2_BUF_STATE_DONE);
/* Make cur_frm pointing to next_frm */
common->cur_frm = common->next_frm;
@@ -307,6 +424,9 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
int channel_id = 0;
channel_id = *(int *)(dev_id);
+ if (!vpif_intr_status(channel_id + 2))
+ return IRQ_NONE;
+
ch = dev->dev[channel_id];
field = ch->common[VPIF_VIDEO_INDEX].fmt.fmt.pix.field;
for (i = 0; i < VPIF_NUMOBJECTS; i++) {
@@ -323,9 +443,10 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
if (!channel_first_int[i][channel_id]) {
/* Mark status of the cur_frm to
* done and unlock semaphore on it */
- do_gettimeofday(&common->cur_frm->ts);
- common->cur_frm->state = VIDEOBUF_DONE;
- wake_up_interruptible(&common->cur_frm->done);
+ do_gettimeofday(&common->cur_frm->vb.
+ v4l2_buf.timestamp);
+ vb2_buffer_done(&common->cur_frm->vb,
+ VB2_BUF_STATE_DONE);
/* Make cur_frm pointing to next_frm */
common->cur_frm = common->next_frm;
}
@@ -443,10 +564,7 @@ static void vpif_calculate_offsets(struct channel_obj *ch)
vid_ch->buf_field = common->fmt.fmt.pix.field;
}
- if (V4L2_MEMORY_USERPTR == common->memory)
- sizeimage = common->fmt.fmt.pix.sizeimage;
- else
- sizeimage = config_params.channel_bufsize[ch->channel_id];
+ sizeimage = common->fmt.fmt.pix.sizeimage;
hpitch = common->fmt.fmt.pix.bytesperline;
vpitch = sizeimage / (hpitch * 2);
@@ -523,10 +641,7 @@ static int vpif_check_format(struct channel_obj *ch,
if (pixfmt->bytesperline <= 0)
goto invalid_pitch_exit;
- if (V4L2_MEMORY_USERPTR == common->memory)
- sizeimage = pixfmt->sizeimage;
- else
- sizeimage = config_params.channel_bufsize[ch->channel_id];
+ sizeimage = pixfmt->sizeimage;
if (vpif_update_resolution(ch))
return -EINVAL;
@@ -583,7 +698,7 @@ static int vpif_mmap(struct file *filep, struct vm_area_struct *vma)
vpif_dbg(2, debug, "vpif_mmap\n");
- return videobuf_mmap_mapper(&common->buffer_queue, vma);
+ return vb2_mmap(&common->buffer_queue, vma);
}
/*
@@ -596,7 +711,7 @@ static unsigned int vpif_poll(struct file *filep, poll_table *wait)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
if (common->started)
- return videobuf_poll_stream(filep, &common->buffer_queue, wait);
+ return vb2_poll(&common->buffer_queue, filep, wait);
return 0;
}
@@ -665,9 +780,11 @@ static int vpif_release(struct file *filep)
channel3_intr_enable(0);
}
common->started = 0;
+
/* Free buffers allocated */
- videobuf_queue_cancel(&common->buffer_queue);
- videobuf_mmap_free(&common->buffer_queue);
+ vb2_queue_release(&common->buffer_queue);
+ vb2_dma_contig_cleanup_ctx(common->alloc_ctx);
+
common->numbuffers =
config_params.numbuffers[ch->channel_id];
}
@@ -806,6 +923,7 @@ static int vpif_reqbufs(struct file *file, void *priv,
struct channel_obj *ch = fh->channel;
struct common_obj *common;
enum v4l2_field field;
+ struct vb2_queue *q;
u8 index = 0;
/* This file handle has not initialized the channel,
@@ -825,9 +943,8 @@ static int vpif_reqbufs(struct file *file, void *priv,
common = &ch->common[index];
- if (common->fmt.type != reqbuf->type)
+ if (common->fmt.type != reqbuf->type || !vpif_dev)
return -EINVAL;
-
if (0 != common->io_usrs)
return -EBUSY;
@@ -839,14 +956,21 @@ static int vpif_reqbufs(struct file *file, void *priv,
} else {
field = V4L2_VBI_INTERLACED;
}
+ /* Initialize videobuf2 queue as per the buffer type */
+ common->alloc_ctx = vb2_dma_contig_init_ctx(vpif_dev);
+ if (!common->alloc_ctx) {
+ vpif_err("Failed to get the context\n");
+ return -EINVAL;
+ }
+ q = &common->buffer_queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ q->io_modes = VB2_MMAP | VB2_USERPTR;
+ q->drv_priv = fh;
+ q->ops = &video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpif_disp_buffer);
- /* Initialize videobuf queue as per the buffer type */
- videobuf_queue_dma_contig_init(&common->buffer_queue,
- &video_qops, NULL,
- &common->irqlock,
- reqbuf->type, field,
- sizeof(struct videobuf_buffer), fh,
- &common->lock);
+ vb2_queue_init(q);
/* Set io allowed member of file handle to TRUE */
fh->io_allowed[index] = 1;
@@ -855,9 +979,8 @@ static int vpif_reqbufs(struct file *file, void *priv,
/* Store type of memory requested in channel object */
common->memory = reqbuf->memory;
INIT_LIST_HEAD(&common->dma_queue);
-
/* Allocate buffers */
- return videobuf_reqbufs(&common->buffer_queue, reqbuf);
+ return vb2_reqbufs(&common->buffer_queue, reqbuf);
}
static int vpif_querybuf(struct file *file, void *priv,
@@ -870,22 +993,25 @@ static int vpif_querybuf(struct file *file, void *priv,
if (common->fmt.type != tbuf->type)
return -EINVAL;
- return videobuf_querybuf(&common->buffer_queue, tbuf);
+ return vb2_querybuf(&common->buffer_queue, tbuf);
}
static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
+ struct vpif_fh *fh = NULL;
+ struct channel_obj *ch = NULL;
+ struct common_obj *common = NULL;
- struct vpif_fh *fh = priv;
- struct channel_obj *ch = fh->channel;
- struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
- struct v4l2_buffer tbuf = *buf;
- struct videobuf_buffer *buf1;
- unsigned long addr = 0;
- unsigned long flags;
- int ret = 0;
+ if (!buf || !priv)
+ return -EINVAL;
+
+ fh = priv;
+ ch = fh->channel;
+ if (!ch)
+ return -EINVAL;
- if (common->fmt.type != tbuf.type)
+ common = &(ch->common[VPIF_VIDEO_INDEX]);
+ if (common->fmt.type != buf->type)
return -EINVAL;
if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
@@ -893,73 +1019,7 @@ static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
return -EACCES;
}
- if (!(list_empty(&common->dma_queue)) ||
- (common->cur_frm != common->next_frm) ||
- !(common->started) ||
- (common->started && (0 == ch->field_id)))
- return videobuf_qbuf(&common->buffer_queue, buf);
-
- /* bufferqueue is empty store buffer address in VPIF registers */
- mutex_lock(&common->buffer_queue.vb_lock);
- buf1 = common->buffer_queue.bufs[tbuf.index];
- if (buf1->memory != tbuf.memory) {
- vpif_err("invalid buffer type\n");
- goto qbuf_exit;
- }
-
- if ((buf1->state == VIDEOBUF_QUEUED) ||
- (buf1->state == VIDEOBUF_ACTIVE)) {
- vpif_err("invalid state\n");
- goto qbuf_exit;
- }
-
- switch (buf1->memory) {
- case V4L2_MEMORY_MMAP:
- if (buf1->baddr == 0)
- goto qbuf_exit;
- break;
-
- case V4L2_MEMORY_USERPTR:
- if (tbuf.length < buf1->bsize)
- goto qbuf_exit;
-
- if ((VIDEOBUF_NEEDS_INIT != buf1->state)
- && (buf1->baddr != tbuf.m.userptr)) {
- vpif_buffer_release(&common->buffer_queue, buf1);
- buf1->baddr = tbuf.m.userptr;
- }
- break;
-
- default:
- goto qbuf_exit;
- }
-
- local_irq_save(flags);
- ret = vpif_buffer_prepare(&common->buffer_queue, buf1,
- common->buffer_queue.field);
- if (ret < 0) {
- local_irq_restore(flags);
- goto qbuf_exit;
- }
-
- buf1->state = VIDEOBUF_ACTIVE;
- addr = buf1->boff;
- common->next_frm = buf1;
- if (tbuf.type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) {
- common->set_addr((addr + common->ytop_off),
- (addr + common->ybtm_off),
- (addr + common->ctop_off),
- (addr + common->cbtm_off));
- }
-
- local_irq_restore(flags);
- list_add_tail(&buf1->stream, &common->buffer_queue.stream);
- mutex_unlock(&common->buffer_queue.vb_lock);
- return 0;
-
-qbuf_exit:
- mutex_unlock(&common->buffer_queue.vb_lock);
- return -EINVAL;
+ return vb2_qbuf(&common->buffer_queue, buf);
}
static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
@@ -969,7 +1029,7 @@ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id *std_id)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
int ret = 0;
- if (!(*std_id & DM646X_V4L2_STD))
+ if (!(*std_id & VPIF_V4L2_STD))
return -EINVAL;
if (common->started) {
@@ -1026,7 +1086,7 @@ static int vpif_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
struct channel_obj *ch = fh->channel;
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
- return videobuf_dqbuf(&common->buffer_queue, p,
+ return vb2_dqbuf(&common->buffer_queue, p,
(file->f_flags & O_NONBLOCK));
}
@@ -1037,10 +1097,6 @@ static int vpif_streamon(struct file *file, void *priv,
struct channel_obj *ch = fh->channel;
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
struct channel_obj *oth_ch = vpif_obj.dev[!ch->channel_id];
- struct vpif_params *vpif = &ch->vpifparams;
- struct vpif_display_config *vpif_config_data =
- vpif_dev->platform_data;
- unsigned long addr = 0;
int ret = 0;
if (buftype != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
@@ -1072,82 +1128,13 @@ static int vpif_streamon(struct file *file, void *priv,
if (ret < 0)
return ret;
- /* Call videobuf_streamon to start streaming in videobuf */
- ret = videobuf_streamon(&common->buffer_queue);
+ /* Call vb2_streamon to start streaming in videobuf2 */
+ ret = vb2_streamon(&common->buffer_queue, buftype);
if (ret < 0) {
- vpif_err("videobuf_streamon\n");
+ vpif_err("vb2_streamon\n");
return ret;
}
- /* If buffer queue is empty, return error */
- if (list_empty(&common->dma_queue)) {
- vpif_err("buffer queue is empty\n");
- return -EIO;
- }
-
- /* Get the next frame from the buffer queue */
- common->next_frm = common->cur_frm =
- list_entry(common->dma_queue.next,
- struct videobuf_buffer, queue);
-
- list_del(&common->cur_frm->queue);
- /* Mark state of the current frame to active */
- common->cur_frm->state = VIDEOBUF_ACTIVE;
-
- /* Initialize field_id and started member */
- ch->field_id = 0;
- common->started = 1;
- if (buftype == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- addr = common->cur_frm->boff;
- /* Calculate the offset for Y and C data in the buffer */
- vpif_calculate_offsets(ch);
-
- if ((ch->vpifparams.std_info.frm_fmt &&
- ((common->fmt.fmt.pix.field != V4L2_FIELD_NONE)
- && (common->fmt.fmt.pix.field != V4L2_FIELD_ANY)))
- || (!ch->vpifparams.std_info.frm_fmt
- && (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
- vpif_err("conflict in field format and std format\n");
- return -EINVAL;
- }
-
- /* clock settings */
- ret =
- vpif_config_data->set_clock(ch->vpifparams.std_info.ycmux_mode,
- ch->vpifparams.std_info.hd_sd);
- if (ret < 0) {
- vpif_err("can't set clock\n");
- return ret;
- }
-
- /* set the parameters and addresses */
- ret = vpif_set_video_params(vpif, ch->channel_id + 2);
- if (ret < 0)
- return ret;
-
- common->started = ret;
- vpif_config_addr(ch, ret);
- common->set_addr((addr + common->ytop_off),
- (addr + common->ybtm_off),
- (addr + common->ctop_off),
- (addr + common->cbtm_off));
-
- /* Set interrupt for both the fields in VPIF
- Register enable channel in VPIF register */
- if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
- channel2_intr_assert();
- channel2_intr_enable(1);
- enable_channel2(1);
- }
-
- if ((VPIF_CHANNEL3_VIDEO == ch->channel_id)
- || (common->started == 2)) {
- channel3_intr_assert();
- channel3_intr_enable(1);
- enable_channel3(1);
- }
- channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
- }
return ret;
}
@@ -1157,6 +1144,8 @@ static int vpif_streamoff(struct file *file, void *priv,
struct vpif_fh *fh = priv;
struct channel_obj *ch = fh->channel;
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct vpif_display_config *vpif_config_data =
+ vpif_dev->platform_data;
if (buftype != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
vpif_err("buffer type not supported\n");
@@ -1176,18 +1165,22 @@ static int vpif_streamoff(struct file *file, void *priv,
if (buftype == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
/* disable channel */
if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
+ if (vpif_config_data->ch2_clip_en)
+ channel2_clipping_enable(0);
enable_channel2(0);
channel2_intr_enable(0);
}
if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) ||
(2 == common->started)) {
+ if (vpif_config_data->ch3_clip_en)
+ channel3_clipping_enable(0);
enable_channel3(0);
channel3_intr_enable(0);
}
}
common->started = 0;
- return videobuf_streamoff(&common->buffer_queue);
+ return vb2_streamoff(&common->buffer_queue, buftype);
}
static int vpif_cropcap(struct file *file, void *priv,
@@ -1220,7 +1213,7 @@ static int vpif_enum_output(struct file *file, void *fh,
strcpy(output->name, config->output[output->index]);
output->type = V4L2_OUTPUT_TYPE_ANALOG;
- output->std = DM646X_V4L2_STD;
+ output->std = VPIF_V4L2_STD;
return 0;
}
@@ -1605,7 +1598,7 @@ static struct video_device vpif_video_template = {
.name = "vpif",
.fops = &vpif_fops,
.ioctl_ops = &vpif_ioctl_ops,
- .tvnorms = DM646X_V4L2_STD,
+ .tvnorms = VPIF_V4L2_STD,
.current_norm = V4L2_STD_625_50,
};
@@ -1687,9 +1680,9 @@ static __init int vpif_probe(struct platform_device *pdev)
struct video_device *vfd;
struct resource *res;
int subdev_count;
+ size_t size;
vpif_dev = &pdev->dev;
-
err = initialize_vpif();
if (err) {
@@ -1706,8 +1699,8 @@ static __init int vpif_probe(struct platform_device *pdev)
k = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
- if (request_irq(i, vpif_channel_isr, IRQF_DISABLED,
- "DM646x_Display",
+ if (request_irq(i, vpif_channel_isr, IRQF_SHARED,
+ "VPIF_Display",
(void *)(&vpif_obj.dev[k]->channel_id))) {
err = -EBUSY;
goto vpif_int_err;
@@ -1737,13 +1730,31 @@ static __init int vpif_probe(struct platform_device *pdev)
vfd->v4l2_dev = &vpif_obj.v4l2_dev;
vfd->release = video_device_release;
snprintf(vfd->name, sizeof(vfd->name),
- "DM646x_VPIFDisplay_DRIVER_V%s",
+ "VPIF_Display_DRIVER_V%s",
VPIF_DISPLAY_VERSION);
/* Set video_dev to the video device */
ch->video_dev = vfd;
}
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res) {
+ size = resource_size(res);
+ /* The resources are divided into two equal memory and when
+ * we have HD output we can add them together
+ */
+ for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) {
+ ch = vpif_obj.dev[j];
+ ch->channel_id = j;
+
+ /* only enabled if second resource exists */
+ config_params.video_limit[ch->channel_id] = 0;
+ if (size)
+ config_params.video_limit[ch->channel_id] =
+ size/2;
+ }
+ }
+
for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) {
ch = vpif_obj.dev[j];
/* Initialize field of the channel objects */
@@ -1823,7 +1834,7 @@ static __init int vpif_probe(struct platform_device *pdev)
}
v4l2_info(&vpif_obj.v4l2_dev,
- "DM646x VPIF display driver initialized\n");
+ " VPIF display driver initialized\n");
return 0;
probe_subdev_out:
@@ -1871,10 +1882,81 @@ static int vpif_remove(struct platform_device *device)
return 0;
}
+#ifdef CONFIG_PM
+static int vpif_suspend(struct device *dev)
+{
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[VPIF_VIDEO_INDEX];
+ mutex_lock(&common->lock);
+ if (atomic_read(&ch->usrs) && common->io_usrs) {
+ /* Disable channel */
+ if (ch->channel_id == VPIF_CHANNEL2_VIDEO) {
+ enable_channel2(0);
+ channel2_intr_enable(0);
+ }
+ if (ch->channel_id == VPIF_CHANNEL3_VIDEO ||
+ common->started == 2) {
+ enable_channel3(0);
+ channel3_intr_enable(0);
+ }
+ }
+ mutex_unlock(&common->lock);
+ }
+
+ return 0;
+}
+
+static int vpif_resume(struct device *dev)
+{
+
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i;
+
+ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[i];
+ common = &ch->common[VPIF_VIDEO_INDEX];
+ mutex_lock(&common->lock);
+ if (atomic_read(&ch->usrs) && common->io_usrs) {
+ /* Enable channel */
+ if (ch->channel_id == VPIF_CHANNEL2_VIDEO) {
+ enable_channel2(1);
+ channel2_intr_enable(1);
+ }
+ if (ch->channel_id == VPIF_CHANNEL3_VIDEO ||
+ common->started == 2) {
+ enable_channel3(1);
+ channel3_intr_enable(1);
+ }
+ }
+ mutex_unlock(&common->lock);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops vpif_pm = {
+ .suspend = vpif_suspend,
+ .resume = vpif_resume,
+};
+
+#define vpif_pm_ops (&vpif_pm)
+#else
+#define vpif_pm_ops NULL
+#endif
+
static __refdata struct platform_driver vpif_driver = {
.driver = {
.name = "vpif_display",
.owner = THIS_MODULE,
+ .pm = vpif_pm_ops,
},
.probe = vpif_probe,
.remove = vpif_remove,
diff --git a/drivers/media/video/davinci/vpif_display.h b/drivers/media/video/davinci/vpif_display.h
index 56879d1a0684..8967ffb44058 100644
--- a/drivers/media/video/davinci/vpif_display.h
+++ b/drivers/media/video/davinci/vpif_display.h
@@ -1,5 +1,5 @@
/*
- * DM646x display header file
+ * VPIF display header file
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
*
@@ -21,7 +21,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/videobuf-core.h>
-#include <media/videobuf-dma-contig.h>
+#include <media/videobuf2-dma-contig.h>
#include <media/davinci/vpif_types.h>
#include "vpif.h"
@@ -73,21 +73,29 @@ struct vbi_obj {
* vbi data */
};
+struct vpif_disp_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
struct common_obj {
/* Buffer specific parameters */
u8 *fbuffers[VIDEO_MAX_FRAME]; /* List of buffer pointers for
* storing frames */
u32 numbuffers; /* number of buffers */
- struct videobuf_buffer *cur_frm; /* Pointer pointing to current
- * videobuf_buffer */
- struct videobuf_buffer *next_frm; /* Pointer pointing to next
- * videobuf_buffer */
+ struct vpif_disp_buffer *cur_frm; /* Pointer pointing to current
+ * vb2_buffer */
+ struct vpif_disp_buffer *next_frm; /* Pointer pointing to next
+ * vb2_buffer */
enum v4l2_memory memory; /* This field keeps track of
* type of buffer exchange
* method user has selected */
struct v4l2_format fmt; /* Used to store the format */
- struct videobuf_queue buffer_queue; /* Buffer queue used in
+ struct vb2_queue buffer_queue; /* Buffer queue used in
* video-buf */
+ /* allocator-specific contexts for each plane */
+ struct vb2_alloc_ctx *alloc_ctx;
+
struct list_head dma_queue; /* Queue of filled frames */
spinlock_t irqlock; /* Used in video-buf */
@@ -158,6 +166,7 @@ struct vpif_config_params {
u32 min_bufsize[VPIF_DISPLAY_NUM_CHANNELS];
u32 channel_bufsize[VPIF_DISPLAY_NUM_CHANNELS];
u8 numbuffers[VPIF_DISPLAY_NUM_CHANNELS];
+ u32 video_limit[VPIF_DISPLAY_NUM_CHANNELS];
u8 min_numbuffers;
};
diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
index d7e2a3dc5525..07dc594e79f0 100644
--- a/drivers/media/video/em28xx/em28xx-audio.c
+++ b/drivers/media/video/em28xx/em28xx-audio.c
@@ -42,6 +42,7 @@
#include <sound/initval.h>
#include <sound/control.h>
#include <sound/tlv.h>
+#include <sound/ac97_codec.h>
#include <media/v4l2-common.h>
#include "em28xx.h"
@@ -679,19 +680,19 @@ static int em28xx_audio_init(struct em28xx *dev)
INIT_WORK(&dev->wq_trigger, audio_trigger);
if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
- em28xx_cvol_new(card, dev, "Video", AC97_VIDEO_VOL);
- em28xx_cvol_new(card, dev, "Line In", AC97_LINEIN_VOL);
- em28xx_cvol_new(card, dev, "Phone", AC97_PHONE_VOL);
- em28xx_cvol_new(card, dev, "Microphone", AC97_PHONE_VOL);
- em28xx_cvol_new(card, dev, "CD", AC97_CD_VOL);
- em28xx_cvol_new(card, dev, "AUX", AC97_AUX_VOL);
- em28xx_cvol_new(card, dev, "PCM", AC97_PCM_OUT_VOL);
-
- em28xx_cvol_new(card, dev, "Master", AC97_MASTER_VOL);
- em28xx_cvol_new(card, dev, "Line", AC97_LINE_LEVEL_VOL);
- em28xx_cvol_new(card, dev, "Mono", AC97_MASTER_MONO_VOL);
- em28xx_cvol_new(card, dev, "LFE", AC97_LFE_MASTER_VOL);
- em28xx_cvol_new(card, dev, "Surround", AC97_SURR_MASTER_VOL);
+ em28xx_cvol_new(card, dev, "Video", AC97_VIDEO);
+ em28xx_cvol_new(card, dev, "Line In", AC97_LINE);
+ em28xx_cvol_new(card, dev, "Phone", AC97_PHONE);
+ em28xx_cvol_new(card, dev, "Microphone", AC97_MIC);
+ em28xx_cvol_new(card, dev, "CD", AC97_CD);
+ em28xx_cvol_new(card, dev, "AUX", AC97_AUX);
+ em28xx_cvol_new(card, dev, "PCM", AC97_PCM);
+
+ em28xx_cvol_new(card, dev, "Master", AC97_MASTER);
+ em28xx_cvol_new(card, dev, "Line", AC97_HEADPHONE);
+ em28xx_cvol_new(card, dev, "Mono", AC97_MASTER_MONO);
+ em28xx_cvol_new(card, dev, "LFE", AC97_CENTER_LFE_MASTER);
+ em28xx_cvol_new(card, dev, "Surround", AC97_SURROUND_MASTER);
}
err = snd_card_register(card);
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 862c6575c557..ca62b9981380 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -975,12 +975,7 @@ struct em28xx_board em28xx_boards[] = {
.name = "Terratec Cinergy HTC Stick",
.has_dvb = 1,
.ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS,
-#if 0
- .tuner_type = TUNER_PHILIPS_TDA8290,
- .tuner_addr = 0x41,
- .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */
- .tuner_gpio = terratec_h5_gpio,
-#endif
+ .tuner_type = TUNER_ABSENT,
.i2c_speed = EM2874_I2C_SECONDARY_BUS_SELECT |
EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_400_KHZ,
diff --git a/drivers/media/video/em28xx/em28xx-core.c b/drivers/media/video/em28xx/em28xx-core.c
index 5717bdee8f1b..de2cb20ad2cc 100644
--- a/drivers/media/video/em28xx/em28xx-core.c
+++ b/drivers/media/video/em28xx/em28xx-core.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/vmalloc.h>
+#include <sound/ac97_codec.h>
#include <media/v4l2-common.h>
#include "em28xx.h"
@@ -326,13 +327,13 @@ struct em28xx_vol_itable {
};
static struct em28xx_vol_itable inputs[] = {
- { EM28XX_AMUX_VIDEO, AC97_VIDEO_VOL },
- { EM28XX_AMUX_LINE_IN, AC97_LINEIN_VOL },
- { EM28XX_AMUX_PHONE, AC97_PHONE_VOL },
- { EM28XX_AMUX_MIC, AC97_MIC_VOL },
- { EM28XX_AMUX_CD, AC97_CD_VOL },
- { EM28XX_AMUX_AUX, AC97_AUX_VOL },
- { EM28XX_AMUX_PCM_OUT, AC97_PCM_OUT_VOL },
+ { EM28XX_AMUX_VIDEO, AC97_VIDEO },
+ { EM28XX_AMUX_LINE_IN, AC97_LINE },
+ { EM28XX_AMUX_PHONE, AC97_PHONE },
+ { EM28XX_AMUX_MIC, AC97_MIC },
+ { EM28XX_AMUX_CD, AC97_CD },
+ { EM28XX_AMUX_AUX, AC97_AUX },
+ { EM28XX_AMUX_PCM_OUT, AC97_PCM },
};
static int set_ac97_input(struct em28xx *dev)
@@ -415,11 +416,11 @@ struct em28xx_vol_otable {
};
static const struct em28xx_vol_otable outputs[] = {
- { EM28XX_AOUT_MASTER, AC97_MASTER_VOL },
- { EM28XX_AOUT_LINE, AC97_LINE_LEVEL_VOL },
- { EM28XX_AOUT_MONO, AC97_MASTER_MONO_VOL },
- { EM28XX_AOUT_LFE, AC97_LFE_MASTER_VOL },
- { EM28XX_AOUT_SURR, AC97_SURR_MASTER_VOL },
+ { EM28XX_AOUT_MASTER, AC97_MASTER },
+ { EM28XX_AOUT_LINE, AC97_HEADPHONE },
+ { EM28XX_AOUT_MONO, AC97_MASTER_MONO },
+ { EM28XX_AOUT_LFE, AC97_CENTER_LFE_MASTER },
+ { EM28XX_AOUT_SURR, AC97_SURROUND_MASTER },
};
int em28xx_audio_analog_set(struct em28xx *dev)
@@ -459,9 +460,9 @@ int em28xx_audio_analog_set(struct em28xx *dev)
if (dev->audio_mode.ac97 != EM28XX_NO_AC97) {
int vol;
- em28xx_write_ac97(dev, AC97_POWER_DOWN_CTRL, 0x4200);
- em28xx_write_ac97(dev, AC97_EXT_AUD_CTRL, 0x0031);
- em28xx_write_ac97(dev, AC97_PCM_IN_SRATE, 0xbb80);
+ em28xx_write_ac97(dev, AC97_POWERDOWN, 0x4200);
+ em28xx_write_ac97(dev, AC97_EXTENDED_STATUS, 0x0031);
+ em28xx_write_ac97(dev, AC97_PCM_LR_ADC_RATE, 0xbb80);
/* LSB: left channel - both channels with the same level */
vol = (0x1f - dev->volume) | ((0x1f - dev->volume) << 8);
@@ -487,7 +488,7 @@ int em28xx_audio_analog_set(struct em28xx *dev)
channels */
sel |= (sel << 8);
- em28xx_write_ac97(dev, AC97_RECORD_SELECT, sel);
+ em28xx_write_ac97(dev, AC97_REC_SEL, sel);
}
}
diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c
index 16410ac20092..a16531fa937a 100644
--- a/drivers/media/video/em28xx/em28xx-dvb.c
+++ b/drivers/media/video/em28xx/em28xx-dvb.c
@@ -310,31 +310,47 @@ static struct drxd_config em28xx_drxd = {
.disable_i2c_gate_ctrl = 1,
};
-struct drxk_config terratec_h5_drxk = {
+static struct drxk_config terratec_h5_drxk = {
.adr = 0x29,
.single_master = 1,
.no_i2c_bridge = 1,
.microcode_name = "dvb-usb-terratec-h5-drxk.fw",
+ .qam_demod_parameter_count = 2,
};
-struct drxk_config hauppauge_930c_drxk = {
+static struct drxk_config hauppauge_930c_drxk = {
.adr = 0x29,
.single_master = 1,
.no_i2c_bridge = 1,
.microcode_name = "dvb-usb-hauppauge-hvr930c-drxk.fw",
.chunk_size = 56,
+ .qam_demod_parameter_count = 2,
};
-struct drxk_config maxmedia_ub425_tc_drxk = {
+struct drxk_config terratec_htc_stick_drxk = {
.adr = 0x29,
.single_master = 1,
.no_i2c_bridge = 1,
+ .microcode_name = "dvb-usb-terratec-htc-stick-drxk.fw",
+ .chunk_size = 54,
+ .qam_demod_parameter_count = 2,
+ /* Required for the antenna_gpio to disable LNA. */
+ .antenna_dvbt = true,
+ /* The windows driver uses the same. This will disable LNA. */
+ .antenna_gpio = 0x6,
};
-struct drxk_config pctv_520e_drxk = {
+static struct drxk_config maxmedia_ub425_tc_drxk = {
+ .adr = 0x29,
+ .single_master = 1,
+ .no_i2c_bridge = 1,
+};
+
+static struct drxk_config pctv_520e_drxk = {
.adr = 0x29,
.single_master = 1,
.microcode_name = "dvb-demod-drxk-pctv.fw",
+ .qam_demod_parameter_count = 2,
.chunk_size = 58,
.antenna_dvbt = true, /* disable LNA */
.antenna_gpio = (1 << 2), /* disable LNA */
@@ -473,6 +489,57 @@ static void terratec_h5_init(struct em28xx *dev)
em28xx_gpio_set(dev, terratec_h5_end);
};
+static void terratec_htc_stick_init(struct em28xx *dev)
+{
+ int i;
+
+ /*
+ * GPIO configuration:
+ * 0xff: unknown (does not affect DVB-T).
+ * 0xf6: DRX-K (demodulator).
+ * 0xe6: unknown (does not affect DVB-T).
+ * 0xb6: unknown (does not affect DVB-T).
+ */
+ struct em28xx_reg_seq terratec_htc_stick_init[] = {
+ {EM28XX_R08_GPIO, 0xff, 0xff, 10},
+ {EM2874_R80_GPIO, 0xf6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xe6, 0xff, 50},
+ {EM2874_R80_GPIO, 0xf6, 0xff, 100},
+ { -1, -1, -1, -1},
+ };
+ struct em28xx_reg_seq terratec_htc_stick_end[] = {
+ {EM2874_R80_GPIO, 0xb6, 0xff, 100},
+ {EM2874_R80_GPIO, 0xf6, 0xff, 50},
+ { -1, -1, -1, -1},
+ };
+
+ /* Init the analog decoder? */
+ struct {
+ unsigned char r[4];
+ int len;
+ } regs[] = {
+ {{ 0x06, 0x02, 0x00, 0x31 }, 4},
+ {{ 0x01, 0x02 }, 2},
+ {{ 0x01, 0x02, 0x00, 0xc6 }, 4},
+ {{ 0x01, 0x00 }, 2},
+ {{ 0x01, 0x00, 0xff, 0xaf }, 4},
+ };
+
+ em28xx_gpio_set(dev, terratec_htc_stick_init);
+
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x40);
+ msleep(10);
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x44);
+ msleep(10);
+
+ dev->i2c_client.addr = 0x82 >> 1;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++)
+ i2c_master_send(&dev->i2c_client, regs[i].r, regs[i].len);
+
+ em28xx_gpio_set(dev, terratec_htc_stick_end);
+};
+
static void pctv_520e_init(struct em28xx *dev)
{
/*
@@ -944,7 +1011,6 @@ static int em28xx_dvb_init(struct em28xx *dev)
break;
}
case EM2884_BOARD_TERRATEC_H5:
- case EM2884_BOARD_CINERGY_HTC_STICK:
terratec_h5_init(dev);
dvb->fe[0] = dvb_attach(drxk_attach, &terratec_h5_drxk, &dev->i2c_adap);
@@ -1021,6 +1087,25 @@ static int em28xx_dvb_init(struct em28xx *dev)
}
}
break;
+ case EM2884_BOARD_CINERGY_HTC_STICK:
+ terratec_htc_stick_init(dev);
+
+ /* attach demodulator */
+ dvb->fe[0] = dvb_attach(drxk_attach, &terratec_htc_stick_drxk,
+ &dev->i2c_adap);
+ if (!dvb->fe[0]) {
+ result = -EINVAL;
+ goto out_free;
+ }
+
+ /* Attach the demodulator. */
+ if (!dvb_attach(tda18271_attach, dvb->fe[0], 0x60,
+ &dev->i2c_adap,
+ &em28xx_cxd2820r_tda18271_config)) {
+ result = -EINVAL;
+ goto out_free;
+ }
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
diff --git a/drivers/media/video/em28xx/em28xx-i2c.c b/drivers/media/video/em28xx/em28xx-i2c.c
index 185db65b766e..1683bd9d51ee 100644
--- a/drivers/media/video/em28xx/em28xx-i2c.c
+++ b/drivers/media/video/em28xx/em28xx-i2c.c
@@ -475,6 +475,7 @@ static struct i2c_client em28xx_client_template = {
*/
static char *i2c_devs[128] = {
[0x4a >> 1] = "saa7113h",
+ [0x52 >> 1] = "drxk",
[0x60 >> 1] = "remote IR sensor",
[0x8e >> 1] = "remote IR sensor",
[0x86 >> 1] = "tda9887",
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index 5e30c4f3f248..97d36b4f19db 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -345,7 +345,7 @@ static void em28xx_ir_stop(struct rc_dev *rc)
cancel_delayed_work_sync(&ir->work);
}
-int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
+static int em28xx_ir_change_protocol(struct rc_dev *rc_dev, u64 rc_type)
{
int rc = 0;
struct em28xx_IR *ir = rc_dev->priv;
diff --git a/drivers/media/video/em28xx/em28xx-reg.h b/drivers/media/video/em28xx/em28xx-reg.h
index 2f6268505726..6ff368297f6e 100644
--- a/drivers/media/video/em28xx/em28xx-reg.h
+++ b/drivers/media/video/em28xx/em28xx-reg.h
@@ -211,58 +211,9 @@ enum em28xx_chip_id {
};
/*
- * Registers used by em202 and other AC97 chips
+ * Registers used by em202
*/
-/* Standard AC97 registers */
-#define AC97_RESET 0x00
-
- /* Output volumes */
-#define AC97_MASTER_VOL 0x02
-#define AC97_LINE_LEVEL_VOL 0x04 /* Some devices use for headphones */
-#define AC97_MASTER_MONO_VOL 0x06
-
- /* Input volumes */
-#define AC97_PC_BEEP_VOL 0x0a
-#define AC97_PHONE_VOL 0x0c
-#define AC97_MIC_VOL 0x0e
-#define AC97_LINEIN_VOL 0x10
-#define AC97_CD_VOL 0x12
-#define AC97_VIDEO_VOL 0x14
-#define AC97_AUX_VOL 0x16
-#define AC97_PCM_OUT_VOL 0x18
-
- /* capture registers */
-#define AC97_RECORD_SELECT 0x1a
-#define AC97_RECORD_GAIN 0x1c
-
- /* control registers */
-#define AC97_GENERAL_PURPOSE 0x20
-#define AC97_3D_CTRL 0x22
-#define AC97_AUD_INT_AND_PAG 0x24
-#define AC97_POWER_DOWN_CTRL 0x26
-#define AC97_EXT_AUD_ID 0x28
-#define AC97_EXT_AUD_CTRL 0x2a
-
-/* Supported rate varies for each AC97 device
- if write an unsupported value, it will return the closest one
- */
-#define AC97_PCM_OUT_FRONT_SRATE 0x2c
-#define AC97_PCM_OUT_SURR_SRATE 0x2e
-#define AC97_PCM_OUT_LFE_SRATE 0x30
-#define AC97_PCM_IN_SRATE 0x32
-
- /* For devices with more than 2 channels, extra output volumes */
-#define AC97_LFE_MASTER_VOL 0x36
-#define AC97_SURR_MASTER_VOL 0x38
-
- /* Digital SPDIF output control */
-#define AC97_SPDIF_OUT_CTRL 0x3a
-
- /* Vendor ID identifier */
-#define AC97_VENDOR_ID1 0x7c
-#define AC97_VENDOR_ID2 0x7e
-
/* EMP202 vendor registers */
#define EM202_EXT_MODEM_CTRL 0x3e
#define EM202_GPIO_CONF 0x4c
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 9769f17915c0..352f32190e68 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -33,10 +33,6 @@ struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
};
-/* V4L2 controls supported by the driver */
-static const struct ctrl sd_ctrls[] = {
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
@@ -256,8 +252,6 @@ static void sd_isoc_irq(struct urb *urb)
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
.start = sd_start,
@@ -288,6 +282,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index f39fee0fd10f..c9052f20435e 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -31,74 +31,18 @@ MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA USB Conexant Camera Driver");
MODULE_LICENSE("GPL");
+#define QUALITY 50
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
-
- unsigned char brightness;
- unsigned char contrast;
- unsigned char colors;
- u8 quality;
-#define QUALITY_MIN 30
-#define QUALITY_MAX 60
-#define QUALITY_DEF 40
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *sat;
u8 jpeg_hdr[JPEG_HDR_SZ];
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BRIGHTNESS_DEF 0xd4
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0x0a,
- .maximum = 0x1f,
- .step = 1,
-#define CONTRAST_DEF 0x0c
- .default_value = CONTRAST_DEF,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
- {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
- .minimum = 0,
- .maximum = 7,
- .step = 1,
-#define COLOR_DEF 3
- .default_value = COLOR_DEF,
- },
- .set = sd_setcolors,
- .get = sd_getcolors,
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 176,
@@ -817,17 +761,11 @@ static void cx11646_init1(struct gspca_dev *gspca_dev)
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
cam = &gspca_dev->cam;
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
-
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLOR_DEF;
- sd->quality = QUALITY_DEF;
return 0;
}
@@ -849,7 +787,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* create the JPEG header */
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x22); /* JPEG 411 */
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
+ jpeg_set_qual(sd->jpeg_hdr, QUALITY);
cx11646_initsize(gspca_dev);
cx11646_fw(gspca_dev);
@@ -903,142 +841,99 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val, s32 sat)
{
- struct sd *sd = (struct sd *) gspca_dev;
__u8 regE5cbx[] = { 0x88, 0x00, 0xd4, 0x01, 0x88, 0x01, 0x01, 0x01 };
__u8 reg51c[2];
- __u8 bright;
- __u8 colors;
- bright = sd->brightness;
- regE5cbx[2] = bright;
+ regE5cbx[2] = val;
reg_w(gspca_dev, 0x00e5, regE5cbx, 8);
reg_r(gspca_dev, 0x00e8, 8);
reg_w(gspca_dev, 0x00e5, regE5c, 4);
reg_r(gspca_dev, 0x00e8, 1); /* 0x00 */
- colors = sd->colors;
reg51c[0] = 0x77;
- reg51c[1] = colors;
+ reg51c[1] = sat;
reg_w(gspca_dev, 0x0051, reg51c, 2);
reg_w(gspca_dev, 0x0010, reg10, 2);
reg_w_val(gspca_dev, 0x0070, reg70);
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val, s32 sat)
{
- struct sd *sd = (struct sd *) gspca_dev;
__u8 regE5acx[] = { 0x88, 0x0a, 0x0c, 0x01 }; /* seem MSB */
/* __u8 regE5bcx[] = { 0x88, 0x0b, 0x12, 0x01}; * LSB */
__u8 reg51c[2];
- regE5acx[2] = sd->contrast;
+ regE5acx[2] = val;
reg_w(gspca_dev, 0x00e5, regE5acx, 4);
reg_r(gspca_dev, 0x00e8, 1); /* 0x00 */
reg51c[0] = 0x77;
- reg51c[1] = sd->colors;
+ reg51c[1] = sat;
reg_w(gspca_dev, 0x0051, reg51c, 2);
reg_w(gspca_dev, 0x0010, reg10, 2);
reg_w_val(gspca_dev, 0x0070, reg70);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
- *val = sd->contrast;
- return 0;
-}
+ gspca_dev->usb_err = 0;
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (!gspca_dev->streaming)
+ return 0;
- sd->colors = val;
- if (gspca_dev->streaming) {
- setbrightness(gspca_dev);
- setcontrast(gspca_dev);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val, sd->sat->cur.val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val, sd->sat->cur.val);
+ break;
+ case V4L2_CID_SATURATION:
+ setbrightness(gspca_dev, sd->brightness->cur.val, ctrl->val);
+ setcontrast(gspca_dev, sd->contrast->cur.val, ctrl->val);
+ break;
}
- return 0;
+ return gspca_dev->usb_err;
}
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_set_jcomp(struct gspca_dev *gspca_dev,
- struct v4l2_jpegcompression *jcomp)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (jcomp->quality < QUALITY_MIN)
- sd->quality = QUALITY_MIN;
- else if (jcomp->quality > QUALITY_MAX)
- sd->quality = QUALITY_MAX;
- else
- sd->quality = jcomp->quality;
- if (gspca_dev->streaming)
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
- return 0;
-}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
-static int sd_get_jcomp(struct gspca_dev *gspca_dev,
- struct v4l2_jpegcompression *jcomp)
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = sd->quality;
- jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
- | V4L2_JPEG_MARKER_DQT;
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 3);
+ sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 0xd4);
+ sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0x0a, 0x1f, 1, 0x0c);
+ sd->sat = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 7, 1, 3);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
- .get_jcomp = sd_get_jcomp,
- .set_jcomp = sd_set_jcomp,
};
/* -- module initialisation -- */
@@ -1064,6 +959,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index 8f33bbd091ad..2499a881d9a3 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -225,6 +225,15 @@ MODULE_LICENSE("GPL");
#define FIRMWARE_VERSION(x, y) (sd->params.version.firmwareVersion == (x) && \
sd->params.version.firmwareRevision == (y))
+#define CPIA1_CID_COMP_TARGET (V4L2_CTRL_CLASS_USER + 0x1000)
+#define BRIGHTNESS_DEF 50
+#define CONTRAST_DEF 48
+#define SATURATION_DEF 50
+#define FREQ_DEF V4L2_CID_POWER_LINE_FREQUENCY_50HZ
+#define ILLUMINATORS_1_DEF 0
+#define ILLUMINATORS_2_DEF 0
+#define COMP_TARGET_DEF CPIA_COMPRESSION_TARGET_QUALITY
+
/* Developer's Guide Table 5 p 3-34
* indexed by [mains][sensorFps.baserate][sensorFps.divisor]*/
static u8 flicker_jumps[2][2][4] =
@@ -360,135 +369,9 @@ struct sd {
atomic_t fps;
int exposure_count;
u8 exposure_status;
+ struct v4l2_ctrl *freq;
u8 mainsFreq; /* 0 = 50hz, 1 = 60hz */
u8 first_frame;
- u8 freq;
-};
-
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsaturation(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsaturation(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcomptarget(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcomptarget(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getilluminator1(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getilluminator2(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
-#define BRIGHTNESS_IDX 0
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 100,
- .step = 1,
-#define BRIGHTNESS_DEF 50
- .default_value = BRIGHTNESS_DEF,
- .flags = 0,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
-#define CONTRAST_IDX 1
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 96,
- .step = 8,
-#define CONTRAST_DEF 48
- .default_value = CONTRAST_DEF,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
-#define SATURATION_IDX 2
- {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 100,
- .step = 1,
-#define SATURATION_DEF 50
- .default_value = SATURATION_DEF,
- },
- .set = sd_setsaturation,
- .get = sd_getsaturation,
- },
-#define POWER_LINE_FREQUENCY_IDX 3
- {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light frequency filter",
- .minimum = 0,
- .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
- .step = 1,
-#define FREQ_DEF 1
- .default_value = FREQ_DEF,
- },
- .set = sd_setfreq,
- .get = sd_getfreq,
- },
-#define ILLUMINATORS_1_IDX 4
- {
- {
- .id = V4L2_CID_ILLUMINATORS_1,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Illuminator 1",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define ILLUMINATORS_1_DEF 0
- .default_value = ILLUMINATORS_1_DEF,
- },
- .set = sd_setilluminator1,
- .get = sd_getilluminator1,
- },
-#define ILLUMINATORS_2_IDX 5
- {
- {
- .id = V4L2_CID_ILLUMINATORS_2,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Illuminator 2",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define ILLUMINATORS_2_DEF 0
- .default_value = ILLUMINATORS_2_DEF,
- },
- .set = sd_setilluminator2,
- .get = sd_getilluminator2,
- },
-#define COMP_TARGET_IDX 6
- {
- {
-#define V4L2_CID_COMP_TARGET V4L2_CID_PRIVATE_BASE
- .id = V4L2_CID_COMP_TARGET,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Compression Target",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define COMP_TARGET_DEF CPIA_COMPRESSION_TARGET_QUALITY
- .default_value = COMP_TARGET_DEF,
- },
- .set = sd_setcomptarget,
- .get = sd_getcomptarget,
- },
};
static const struct v4l2_pix_format mode[] = {
@@ -770,15 +653,6 @@ static void reset_camera_params(struct gspca_dev *gspca_dev)
params->apcor.gain2 = 0x16;
params->apcor.gain4 = 0x24;
params->apcor.gain8 = 0x34;
- params->flickerControl.flickerMode = 0;
- params->flickerControl.disabled = 1;
-
- params->flickerControl.coarseJump =
- flicker_jumps[sd->mainsFreq]
- [params->sensorFps.baserate]
- [params->sensorFps.divisor];
- params->flickerControl.allowableOverExposure =
- find_over_exposure(params->colourParams.brightness);
params->vlOffset.gain1 = 20;
params->vlOffset.gain2 = 24;
params->vlOffset.gain4 = 26;
@@ -798,6 +672,15 @@ static void reset_camera_params(struct gspca_dev *gspca_dev)
params->sensorFps.divisor = 1;
params->sensorFps.baserate = 1;
+ params->flickerControl.flickerMode = 0;
+ params->flickerControl.disabled = 1;
+ params->flickerControl.coarseJump =
+ flicker_jumps[sd->mainsFreq]
+ [params->sensorFps.baserate]
+ [params->sensorFps.divisor];
+ params->flickerControl.allowableOverExposure =
+ find_over_exposure(params->colourParams.brightness);
+
params->yuvThreshold.yThreshold = 6; /* From windows driver */
params->yuvThreshold.uvThreshold = 6; /* From windows driver */
@@ -1110,9 +993,6 @@ static int command_setlights(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int ret, p1, p2;
- if (!sd->params.qx3.qx3_detected)
- return 0;
-
p1 = (sd->params.qx3.bottomlight == 0) << 1;
p2 = (sd->params.qx3.toplight == 0) << 3;
@@ -1551,8 +1431,10 @@ static void restart_flicker(struct gspca_dev *gspca_dev)
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
+ struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
+ sd->mainsFreq = FREQ_DEF == V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
reset_camera_params(gspca_dev);
PDEBUG(D_PROBE, "cpia CPiA camera detected (vid/pid 0x%04X:0x%04X)",
@@ -1562,8 +1444,25 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = mode;
cam->nmodes = ARRAY_SIZE(mode);
- sd_setfreq(gspca_dev, FREQ_DEF);
+ goto_low_power(gspca_dev);
+ /* Check the firmware version. */
+ sd->params.version.firmwareVersion = 0;
+ get_version_information(gspca_dev);
+ if (sd->params.version.firmwareVersion != 1) {
+ PDEBUG(D_ERR, "only firmware version 1 is supported (got: %d)",
+ sd->params.version.firmwareVersion);
+ return -ENODEV;
+ }
+ /* A bug in firmware 1-02 limits gainMode to 2 */
+ if (sd->params.version.firmwareRevision <= 2 &&
+ sd->params.exposure.gainMode > 2) {
+ sd->params.exposure.gainMode = 2;
+ }
+
+ /* set QX3 detected flag */
+ sd->params.qx3.qx3_detected = (sd->params.pnpID.vendor == 0x0813 &&
+ sd->params.pnpID.product == 0x0001);
return 0;
}
@@ -1602,21 +1501,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* Check the firmware version. */
sd->params.version.firmwareVersion = 0;
get_version_information(gspca_dev);
- if (sd->params.version.firmwareVersion != 1) {
- PDEBUG(D_ERR, "only firmware version 1 is supported (got: %d)",
- sd->params.version.firmwareVersion);
- return -ENODEV;
- }
-
- /* A bug in firmware 1-02 limits gainMode to 2 */
- if (sd->params.version.firmwareRevision <= 2 &&
- sd->params.exposure.gainMode > 2) {
- sd->params.exposure.gainMode = 2;
- }
-
- /* set QX3 detected flag */
- sd->params.qx3.qx3_detected = (sd->params.pnpID.vendor == 0x0813 &&
- sd->params.pnpID.product == 0x0001);
/* The fatal error checking should be done after
* the camera powers up (developer's guide p 3-38) */
@@ -1785,9 +1669,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
or disable the illuminator controls, if this isn't a QX3 */
if (sd->params.qx3.qx3_detected)
command_setlights(gspca_dev);
- else
- gspca_dev->ctrl_dis |=
- ((1 << ILLUMINATORS_1_IDX) | (1 << ILLUMINATORS_2_IDX));
sd_stopN(gspca_dev);
@@ -1871,235 +1752,123 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev)
do_command(gspca_dev, CPIA_COMMAND_ReadMCPorts, 0, 0, 0, 0);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- int ret;
-
- sd->params.colourParams.brightness = val;
- sd->params.flickerControl.allowableOverExposure =
- find_over_exposure(sd->params.colourParams.brightness);
- if (gspca_dev->streaming) {
- ret = command_setcolourparams(gspca_dev);
- if (ret)
- return ret;
- return command_setflickerctrl(gspca_dev);
- }
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->params.colourParams.brightness;
- return 0;
-}
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ gspca_dev->usb_err = 0;
- sd->params.colourParams.contrast = val;
- if (gspca_dev->streaming)
- return command_setcolourparams(gspca_dev);
-
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->params.colourParams.contrast;
- return 0;
-}
-
-static int sd_setsaturation(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->params.colourParams.saturation = val;
- if (gspca_dev->streaming)
- return command_setcolourparams(gspca_dev);
-
- return 0;
-}
-
-static int sd_getsaturation(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->params.colourParams.saturation;
- return 0;
-}
-
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- int on;
+ if (!gspca_dev->streaming && ctrl->id != V4L2_CID_POWER_LINE_FREQUENCY)
+ return 0;
- switch (val) {
- case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
- on = 0;
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ sd->params.colourParams.brightness = ctrl->val;
+ sd->params.flickerControl.allowableOverExposure =
+ find_over_exposure(sd->params.colourParams.brightness);
+ gspca_dev->usb_err = command_setcolourparams(gspca_dev);
+ if (!gspca_dev->usb_err)
+ gspca_dev->usb_err = command_setflickerctrl(gspca_dev);
break;
- case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
- on = 1;
- sd->mainsFreq = 0;
+ case V4L2_CID_CONTRAST:
+ sd->params.colourParams.contrast = ctrl->val;
+ gspca_dev->usb_err = command_setcolourparams(gspca_dev);
break;
- case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
- on = 1;
- sd->mainsFreq = 1;
+ case V4L2_CID_SATURATION:
+ sd->params.colourParams.saturation = ctrl->val;
+ gspca_dev->usb_err = command_setcolourparams(gspca_dev);
break;
- default:
- return -EINVAL;
- }
-
- sd->freq = val;
- sd->params.flickerControl.coarseJump =
- flicker_jumps[sd->mainsFreq]
- [sd->params.sensorFps.baserate]
- [sd->params.sensorFps.divisor];
-
- return set_flicker(gspca_dev, on, gspca_dev->streaming);
-}
-
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->freq;
- return 0;
-}
-
-static int sd_setcomptarget(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->params.compressionTarget.frTargeting = val;
- if (gspca_dev->streaming)
- return command_setcompressiontarget(gspca_dev);
-
- return 0;
-}
-
-static int sd_getcomptarget(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->params.compressionTarget.frTargeting;
- return 0;
-}
-
-static int sd_setilluminator(struct gspca_dev *gspca_dev, __s32 val, int n)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- int ret;
-
- if (!sd->params.qx3.qx3_detected)
- return -EINVAL;
-
- switch (n) {
- case 1:
- sd->params.qx3.bottomlight = val ? 1 : 0;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ sd->mainsFreq = ctrl->val == V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
+ sd->params.flickerControl.coarseJump =
+ flicker_jumps[sd->mainsFreq]
+ [sd->params.sensorFps.baserate]
+ [sd->params.sensorFps.divisor];
+
+ gspca_dev->usb_err = set_flicker(gspca_dev,
+ ctrl->val != V4L2_CID_POWER_LINE_FREQUENCY_DISABLED,
+ gspca_dev->streaming);
break;
- case 2:
- sd->params.qx3.toplight = val ? 1 : 0;
+ case V4L2_CID_ILLUMINATORS_1:
+ sd->params.qx3.bottomlight = ctrl->val;
+ gspca_dev->usb_err = command_setlights(gspca_dev);
break;
- default:
- return -EINVAL;
- }
-
- ret = command_setlights(gspca_dev);
- if (ret && ret != -EINVAL)
- ret = -EBUSY;
-
- return ret;
-}
-
-static int sd_setilluminator1(struct gspca_dev *gspca_dev, __s32 val)
-{
- return sd_setilluminator(gspca_dev, val, 1);
-}
-
-static int sd_setilluminator2(struct gspca_dev *gspca_dev, __s32 val)
-{
- return sd_setilluminator(gspca_dev, val, 2);
-}
-
-static int sd_getilluminator(struct gspca_dev *gspca_dev, __s32 *val, int n)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (!sd->params.qx3.qx3_detected)
- return -EINVAL;
-
- switch (n) {
- case 1:
- *val = sd->params.qx3.bottomlight;
+ case V4L2_CID_ILLUMINATORS_2:
+ sd->params.qx3.toplight = ctrl->val;
+ gspca_dev->usb_err = command_setlights(gspca_dev);
break;
- case 2:
- *val = sd->params.qx3.toplight;
+ case CPIA1_CID_COMP_TARGET:
+ sd->params.compressionTarget.frTargeting = ctrl->val;
+ gspca_dev->usb_err = command_setcompressiontarget(gspca_dev);
break;
- default:
- return -EINVAL;
}
- return 0;
+ return gspca_dev->usb_err;
}
-static int sd_getilluminator1(struct gspca_dev *gspca_dev, __s32 *val)
-{
- return sd_getilluminator(gspca_dev, val, 1);
-}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
-static int sd_getilluminator2(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- return sd_getilluminator(gspca_dev, val, 2);
-}
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+ static const char * const comp_target_menu[] = {
+ "Quality",
+ "Framerate",
+ NULL
+ };
+ static const struct v4l2_ctrl_config comp_target = {
+ .ops = &sd_ctrl_ops,
+ .id = CPIA1_CID_COMP_TARGET,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .name = "Compression Target",
+ .qmenu = comp_target_menu,
+ .max = 1,
+ .def = COMP_TARGET_DEF,
+ };
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 7);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 100, 1, BRIGHTNESS_DEF);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 96, 8, CONTRAST_DEF);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 100, 1, SATURATION_DEF);
+ sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0,
+ FREQ_DEF);
+ if (sd->params.qx3.qx3_detected) {
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_ILLUMINATORS_1, 0, 1, 1,
+ ILLUMINATORS_1_DEF);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_ILLUMINATORS_2, 0, 1, 1,
+ ILLUMINATORS_2_DEF);
+ }
+ v4l2_ctrl_new_custom(hdl, &comp_target, NULL);
-static int sd_querymenu(struct gspca_dev *gspca_dev,
- struct v4l2_querymenu *menu)
-{
- switch (menu->id) {
- case V4L2_CID_POWER_LINE_FREQUENCY:
- switch (menu->index) {
- case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
- strcpy((char *) menu->name, "NoFliker");
- return 0;
- case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
- strcpy((char *) menu->name, "50 Hz");
- return 0;
- case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
- strcpy((char *) menu->name, "60 Hz");
- return 0;
- }
- break;
- case V4L2_CID_COMP_TARGET:
- switch (menu->index) {
- case CPIA_COMPRESSION_TARGET_QUALITY:
- strcpy((char *) menu->name, "Quality");
- return 0;
- case CPIA_COMPRESSION_TARGET_FRAMERATE:
- strcpy((char *) menu->name, "Framerate");
- return 0;
- }
- break;
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
}
- return -EINVAL;
+ return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.dq_callback = sd_dq_callback,
.pkt_scan = sd_pkt_scan,
- .querymenu = sd_querymenu,
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.other_input = 1,
#endif
@@ -2129,6 +1898,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index 81a4adbd9f7c..38f68e11c3a2 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -32,9 +32,6 @@ MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- unsigned char brightness;
- unsigned char contrast;
- unsigned char colors;
unsigned char autogain;
char sensor;
@@ -44,76 +41,6 @@ struct sd {
#define AG_CNT_START 13
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 1,
- .maximum = 127,
- .step = 1,
-#define BRIGHTNESS_DEF 63
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define CONTRAST_DEF 127
- .default_value = CONTRAST_DEF,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
-#define COLOR_IDX 2
- {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
- .minimum = 0,
- .maximum = 15,
- .step = 1,
-#define COLOR_DEF 7
- .default_value = COLOR_DEF,
- },
- .set = sd_setcolors,
- .get = sd_getcolors,
- },
- {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 1
- .default_value = AUTOGAIN_DEF,
- },
- .set = sd_setautogain,
- .get = sd_getautogain,
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 320,
@@ -464,36 +391,31 @@ static void Et_init2(struct gspca_dev *gspca_dev)
reg_w_val(gspca_dev, 0x80, 0x20); /* 0x20; */
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
int i;
- __u8 brightness = sd->brightness;
for (i = 0; i < 4; i++)
- reg_w_val(gspca_dev, ET_O_RED + i, brightness);
+ reg_w_val(gspca_dev, ET_O_RED + i, val);
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
__u8 RGBG[] = { 0x80, 0x80, 0x80, 0x80, 0x00, 0x00 };
- __u8 contrast = sd->contrast;
- memset(RGBG, contrast, sizeof(RGBG) - 2);
+ memset(RGBG, val, sizeof(RGBG) - 2);
reg_w(gspca_dev, ET_G_RED, RGBG, 6);
}
-static void setcolors(struct gspca_dev *gspca_dev)
+static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
__u8 I2cc[] = { 0x05, 0x02, 0x02, 0x05, 0x0d };
__u8 i2cflags = 0x01;
/* __u8 green = 0; */
- __u8 colors = sd->colors;
- I2cc[3] = colors; /* red */
- I2cc[0] = 15 - colors; /* blue */
+ I2cc[3] = val; /* red */
+ I2cc[0] = 15 - val; /* blue */
/* green = 15 - ((((7*I2cc[0]) >> 2 ) + I2cc[3]) >> 1); */
/* I2cc[1] = I2cc[2] = green; */
if (sd->sensor == SENSOR_PAS106) {
@@ -504,15 +426,16 @@ static void setcolors(struct gspca_dev *gspca_dev)
I2cc[3], I2cc[0], green); */
}
-static void getcolors(struct gspca_dev *gspca_dev)
+static s32 getcolors(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
if (sd->sensor == SENSOR_PAS106) {
/* i2c_r(gspca_dev, PAS106_REG9); * blue */
i2c_r(gspca_dev, PAS106_REG9 + 3); /* red */
- sd->colors = gspca_dev->usb_buf[0] & 0x0f;
+ return gspca_dev->usb_buf[0] & 0x0f;
}
+ return 0;
}
static void setautogain(struct gspca_dev *gspca_dev)
@@ -622,8 +545,7 @@ static void Et_init1(struct gspca_dev *gspca_dev)
i2c_w(gspca_dev, PAS106_REG7, I2c4, sizeof I2c4, 1);
/* now set by fifo the whole colors setting */
reg_w(gspca_dev, ET_G_RED, GainRGBG, 6);
- getcolors(gspca_dev);
- setcolors(gspca_dev);
+ setcolors(gspca_dev, getcolors(gspca_dev));
}
/* this function is called at probe time */
@@ -641,12 +563,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
} else {
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
- gspca_dev->ctrl_dis = (1 << COLOR_IDX);
}
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLOR_DEF;
- sd->autogain = AUTOGAIN_DEF;
sd->ag_cnt = -1;
return 0;
}
@@ -780,85 +697,68 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
}
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
- return 0;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
- if (gspca_dev->streaming)
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolors(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ sd->autogain = ctrl->val;
setautogain(gspca_dev);
- return 0;
+ break;
+ }
+ return gspca_dev->usb_err;
}
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
- *val = sd->autogain;
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 1, 127, 1, 63);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 127);
+ if (sd->sensor == SENSOR_PAS106)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 15, 1, 7);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
@@ -892,6 +792,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index 6e26c93b4656..c8f2201cc35a 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -299,6 +299,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index c549574c1c7e..ced3b71f14e5 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -521,6 +521,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 31721eadc597..d4e8343f5b10 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -930,6 +930,7 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
goto out;
}
gspca_dev->streaming = 1;
+ v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler);
/* some bulk transfers are started by the subdriver */
if (gspca_dev->cam.bulk && gspca_dev->cam.bulk_nurbs == 0)
@@ -1049,12 +1050,6 @@ static int vidioc_g_register(struct file *file, void *priv,
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- if (!gspca_dev->sd_desc->get_chip_ident)
- return -ENOTTY;
-
- if (!gspca_dev->sd_desc->get_register)
- return -ENOTTY;
-
gspca_dev->usb_err = 0;
return gspca_dev->sd_desc->get_register(gspca_dev, reg);
}
@@ -1064,12 +1059,6 @@ static int vidioc_s_register(struct file *file, void *priv,
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- if (!gspca_dev->sd_desc->get_chip_ident)
- return -ENOTTY;
-
- if (!gspca_dev->sd_desc->set_register)
- return -ENOTTY;
-
gspca_dev->usb_err = 0;
return gspca_dev->sd_desc->set_register(gspca_dev, reg);
}
@@ -1080,9 +1069,6 @@ static int vidioc_g_chip_ident(struct file *file, void *priv,
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- if (!gspca_dev->sd_desc->get_chip_ident)
- return -ENOTTY;
-
gspca_dev->usb_err = 0;
return gspca_dev->sd_desc->get_chip_ident(gspca_dev, chip);
}
@@ -1136,8 +1122,10 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
int mode;
mode = gspca_dev->curr_mode;
- memcpy(&fmt->fmt.pix, &gspca_dev->cam.cam_mode[mode],
- sizeof fmt->fmt.pix);
+ fmt->fmt.pix = gspca_dev->cam.cam_mode[mode];
+ /* some drivers use priv internally, zero it before giving it to
+ userspace */
+ fmt->fmt.pix.priv = 0;
return 0;
}
@@ -1168,8 +1156,10 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,
/* else
; * no chance, return this mode */
}
- memcpy(&fmt->fmt.pix, &gspca_dev->cam.cam_mode[mode],
- sizeof fmt->fmt.pix);
+ fmt->fmt.pix = gspca_dev->cam.cam_mode[mode];
+ /* some drivers use priv internally, zero it before giving it to
+ userspace */
+ fmt->fmt.pix.priv = 0;
return mode; /* used when s_fmt */
}
@@ -1284,9 +1274,6 @@ static void gspca_release(struct v4l2_device *v4l2_device)
struct gspca_dev *gspca_dev =
container_of(v4l2_device, struct gspca_dev, v4l2_dev);
- PDEBUG(D_PROBE, "%s released",
- video_device_node_name(&gspca_dev->vdev));
-
v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
v4l2_device_unregister(&gspca_dev->v4l2_dev);
kfree(gspca_dev->usb_buf);
@@ -1694,8 +1681,6 @@ static int vidioc_g_jpegcomp(struct file *file, void *priv,
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- if (!gspca_dev->sd_desc->get_jcomp)
- return -ENOTTY;
gspca_dev->usb_err = 0;
return gspca_dev->sd_desc->get_jcomp(gspca_dev, jpegcomp);
}
@@ -1705,8 +1690,6 @@ static int vidioc_s_jpegcomp(struct file *file, void *priv,
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- if (!gspca_dev->sd_desc->set_jcomp)
- return -ENOTTY;
gspca_dev->usb_err = 0;
return gspca_dev->sd_desc->set_jcomp(gspca_dev, jpegcomp);
}
@@ -2290,6 +2273,20 @@ int gspca_dev_probe2(struct usb_interface *intf,
v4l2_disable_ioctl_locking(&gspca_dev->vdev, VIDIOC_DQBUF);
v4l2_disable_ioctl_locking(&gspca_dev->vdev, VIDIOC_QBUF);
v4l2_disable_ioctl_locking(&gspca_dev->vdev, VIDIOC_QUERYBUF);
+ if (!gspca_dev->sd_desc->get_chip_ident)
+ v4l2_disable_ioctl(&gspca_dev->vdev, VIDIOC_DBG_G_CHIP_IDENT);
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ if (!gspca_dev->sd_desc->get_chip_ident ||
+ !gspca_dev->sd_desc->get_register)
+ v4l2_disable_ioctl(&gspca_dev->vdev, VIDIOC_DBG_G_REGISTER);
+ if (!gspca_dev->sd_desc->get_chip_ident ||
+ !gspca_dev->sd_desc->set_register)
+ v4l2_disable_ioctl(&gspca_dev->vdev, VIDIOC_DBG_S_REGISTER);
+#endif
+ if (!gspca_dev->sd_desc->get_jcomp)
+ v4l2_disable_ioctl(&gspca_dev->vdev, VIDIOC_G_JPEGCOMP);
+ if (!gspca_dev->sd_desc->set_jcomp)
+ v4l2_disable_ioctl(&gspca_dev->vdev, VIDIOC_S_JPEGCOMP);
/* init video stuff */
ret = video_register_device(&gspca_dev->vdev,
@@ -2429,7 +2426,6 @@ int gspca_resume(struct usb_interface *intf)
*/
streaming = gspca_dev->streaming;
gspca_dev->streaming = 0;
- v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler);
if (streaming)
ret = gspca_init_transfer(gspca_dev);
mutex_unlock(&gspca_dev->usb_lock);
diff --git a/drivers/media/video/gspca/jeilinj.c b/drivers/media/video/gspca/jeilinj.c
index 5ab3f7e12760..26b99310d628 100644
--- a/drivers/media/video/gspca/jeilinj.c
+++ b/drivers/media/video/gspca/jeilinj.c
@@ -54,21 +54,13 @@ enum {
#define CAMQUALITY_MIN 0 /* highest cam quality */
#define CAMQUALITY_MAX 97 /* lowest cam quality */
-enum e_ctrl {
- LIGHTFREQ,
- AUTOGAIN,
- RED,
- GREEN,
- BLUE,
- NCTRLS /* number of controls */
-};
-
/* Structure to hold all of our device specific stuff */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- struct gspca_ctrl ctrls[NCTRLS];
int blocks_left;
const struct v4l2_pix_format *cap_mode;
+ struct v4l2_ctrl *freq;
+ struct v4l2_ctrl *jpegqual;
/* Driver stuff */
u8 type;
u8 quality; /* image quality */
@@ -139,23 +131,21 @@ static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char response)
}
}
-static void setfreq(struct gspca_dev *gspca_dev)
+static void setfreq(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 freq_commands[][2] = {
{0x71, 0x80},
{0x70, 0x07}
};
- freq_commands[0][1] |= (sd->ctrls[LIGHTFREQ].val >> 1);
+ freq_commands[0][1] |= val >> 1;
jlj_write2(gspca_dev, freq_commands[0]);
jlj_write2(gspca_dev, freq_commands[1]);
}
-static void setcamquality(struct gspca_dev *gspca_dev)
+static void setcamquality(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 quality_commands[][2] = {
{0x71, 0x1E},
{0x70, 0x06}
@@ -163,7 +153,7 @@ static void setcamquality(struct gspca_dev *gspca_dev)
u8 camquality;
/* adapt camera quality from jpeg quality */
- camquality = ((QUALITY_MAX - sd->quality) * CAMQUALITY_MAX)
+ camquality = ((QUALITY_MAX - val) * CAMQUALITY_MAX)
/ (QUALITY_MAX - QUALITY_MIN);
quality_commands[0][1] += camquality;
@@ -171,130 +161,58 @@ static void setcamquality(struct gspca_dev *gspca_dev)
jlj_write2(gspca_dev, quality_commands[1]);
}
-static void setautogain(struct gspca_dev *gspca_dev)
+static void setautogain(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 autogain_commands[][2] = {
{0x94, 0x02},
{0xcf, 0x00}
};
- autogain_commands[1][1] = (sd->ctrls[AUTOGAIN].val << 4);
+ autogain_commands[1][1] = val << 4;
jlj_write2(gspca_dev, autogain_commands[0]);
jlj_write2(gspca_dev, autogain_commands[1]);
}
-static void setred(struct gspca_dev *gspca_dev)
+static void setred(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 setred_commands[][2] = {
{0x94, 0x02},
{0xe6, 0x00}
};
- setred_commands[1][1] = sd->ctrls[RED].val;
+ setred_commands[1][1] = val;
jlj_write2(gspca_dev, setred_commands[0]);
jlj_write2(gspca_dev, setred_commands[1]);
}
-static void setgreen(struct gspca_dev *gspca_dev)
+static void setgreen(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 setgreen_commands[][2] = {
{0x94, 0x02},
{0xe7, 0x00}
};
- setgreen_commands[1][1] = sd->ctrls[GREEN].val;
+ setgreen_commands[1][1] = val;
jlj_write2(gspca_dev, setgreen_commands[0]);
jlj_write2(gspca_dev, setgreen_commands[1]);
}
-static void setblue(struct gspca_dev *gspca_dev)
+static void setblue(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 setblue_commands[][2] = {
{0x94, 0x02},
{0xe9, 0x00}
};
- setblue_commands[1][1] = sd->ctrls[BLUE].val;
+ setblue_commands[1][1] = val;
jlj_write2(gspca_dev, setblue_commands[0]);
jlj_write2(gspca_dev, setblue_commands[1]);
}
-static const struct ctrl sd_ctrls[NCTRLS] = {
-[LIGHTFREQ] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light frequency filter",
- .minimum = V4L2_CID_POWER_LINE_FREQUENCY_DISABLED, /* 1 */
- .maximum = V4L2_CID_POWER_LINE_FREQUENCY_60HZ, /* 2 */
- .step = 1,
- .default_value = V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
- },
- .set_control = setfreq
- },
-[AUTOGAIN] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Automatic Gain (and Exposure)",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
-#define AUTOGAIN_DEF 0
- .default_value = AUTOGAIN_DEF,
- },
- .set_control = setautogain
- },
-[RED] = {
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "red balance",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
-#define RED_BALANCE_DEF 2
- .default_value = RED_BALANCE_DEF,
- },
- .set_control = setred
- },
-
-[GREEN] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "green balance",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
-#define GREEN_BALANCE_DEF 2
- .default_value = GREEN_BALANCE_DEF,
- },
- .set_control = setgreen
- },
-[BLUE] = {
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "blue balance",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
-#define BLUE_BALANCE_DEF 2
- .default_value = BLUE_BALANCE_DEF,
- },
- .set_control = setblue
- },
-};
-
static int jlj_start(struct gspca_dev *gspca_dev)
{
int i;
@@ -344,9 +262,9 @@ static int jlj_start(struct gspca_dev *gspca_dev)
if (start_commands[i].ack_wanted)
jlj_read1(gspca_dev, response);
}
- setcamquality(gspca_dev);
+ setcamquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual));
msleep(2);
- setfreq(gspca_dev);
+ setfreq(gspca_dev, v4l2_ctrl_g_ctrl(sd->freq));
if (gspca_dev->usb_err < 0)
PDEBUG(D_ERR, "Start streaming command failed");
return gspca_dev->usb_err;
@@ -403,7 +321,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
struct sd *dev = (struct sd *) gspca_dev;
dev->type = id->driver_info;
- gspca_dev->cam.ctrls = dev->ctrls;
dev->quality = QUALITY_DEF;
cam->cam_mode = jlj_mode;
@@ -479,25 +396,81 @@ static const struct usb_device_id device_table[] = {
MODULE_DEVICE_TABLE(usb, device_table);
-static int sd_querymenu(struct gspca_dev *gspca_dev,
- struct v4l2_querymenu *menu)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- switch (menu->id) {
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
case V4L2_CID_POWER_LINE_FREQUENCY:
- switch (menu->index) {
- case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
- strcpy((char *) menu->name, "disable");
- return 0;
- case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
- strcpy((char *) menu->name, "50 Hz");
- return 0;
- case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
- strcpy((char *) menu->name, "60 Hz");
- return 0;
- }
+ setfreq(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_RED_BALANCE:
+ setred(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_GAIN:
+ setgreen(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ setblue(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ setautogain(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ jpeg_set_qual(sd->jpeg_hdr, ctrl->val);
+ setcamquality(gspca_dev, ctrl->val);
break;
}
- return -EINVAL;
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+ static const struct v4l2_ctrl_config custom_autogain = {
+ .ops = &sd_ctrl_ops,
+ .id = V4L2_CID_AUTOGAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Automatic Gain (and Exposure)",
+ .max = 3,
+ .step = 1,
+ .def = 0,
+ };
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 6);
+ sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 1,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ);
+ v4l2_ctrl_new_custom(hdl, &custom_autogain, NULL);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0, 3, 1, 2);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 3, 1, 2);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, 0, 3, 1, 2);
+ sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ QUALITY_MIN, QUALITY_MAX, 1, QUALITY_DEF);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ return 0;
}
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
@@ -505,16 +478,7 @@ static int sd_set_jcomp(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
- if (jcomp->quality < QUALITY_MIN)
- sd->quality = QUALITY_MIN;
- else if (jcomp->quality > QUALITY_MAX)
- sd->quality = QUALITY_MAX;
- else
- sd->quality = jcomp->quality;
- if (gspca_dev->streaming) {
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
- setcamquality(gspca_dev);
- }
+ v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality);
return 0;
}
@@ -524,7 +488,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = sd->quality;
+ jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
| V4L2_JPEG_MARKER_DQT;
return 0;
@@ -546,12 +510,10 @@ static const struct sd_desc sd_desc_sportscam_dv15 = {
.name = MODULE_NAME,
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
- .querymenu = sd_querymenu,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
};
@@ -579,6 +541,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/jl2005bcd.c b/drivers/media/video/gspca/jl2005bcd.c
index 9c591c7c6f54..234777116e5f 100644
--- a/drivers/media/video/gspca/jl2005bcd.c
+++ b/drivers/media/video/gspca/jl2005bcd.c
@@ -505,8 +505,6 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- /* .ctrls = none have been detected */
- /* .nctrls = ARRAY_SIZE(sd_ctrls), */
.config = sd_config,
.init = sd_init,
.start = sd_start,
@@ -514,7 +512,7 @@ static const struct sd_desc sd_desc = {
};
/* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0979, 0x0227)},
{}
};
@@ -536,6 +534,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
index e8e8f2fe9166..40ad6687ee5d 100644
--- a/drivers/media/video/gspca/kinect.c
+++ b/drivers/media/video/gspca/kinect.c
@@ -63,12 +63,6 @@ struct sd {
uint8_t ibuf[0x200]; /* input buffer for control commands */
};
-/* V4L2 controls supported by the driver */
-/* controls prototypes here */
-
-static const struct ctrl sd_ctrls[] = {
-};
-
#define MODE_640x480 0x0001
#define MODE_640x488 0x0002
#define MODE_1280x1024 0x0004
@@ -373,15 +367,12 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *__data, int len)
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
/*
- .querymenu = sd_querymenu,
.get_streamparm = sd_get_streamparm,
.set_streamparm = sd_set_streamparm,
*/
@@ -410,6 +401,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
index f0c0d74dfe92..bbf91e07e38b 100644
--- a/drivers/media/video/gspca/konica.c
+++ b/drivers/media/video/gspca/konica.c
@@ -50,107 +50,8 @@ struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
struct urb *last_data_urb;
u8 snapshot_pressed;
- u8 brightness;
- u8 contrast;
- u8 saturation;
- u8 whitebal;
- u8 sharpness;
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsaturation(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsaturation(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setwhitebal(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getwhitebal(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
-#define SD_BRIGHTNESS 0
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 9,
- .step = 1,
-#define BRIGHTNESS_DEFAULT 4
- .default_value = BRIGHTNESS_DEFAULT,
- .flags = 0,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
-#define SD_CONTRAST 1
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 9,
- .step = 4,
-#define CONTRAST_DEFAULT 10
- .default_value = CONTRAST_DEFAULT,
- .flags = 0,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
-#define SD_SATURATION 2
- {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 9,
- .step = 1,
-#define SATURATION_DEFAULT 4
- .default_value = SATURATION_DEFAULT,
- .flags = 0,
- },
- .set = sd_setsaturation,
- .get = sd_getsaturation,
- },
-#define SD_WHITEBAL 3
- {
- {
- .id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "White Balance",
- .minimum = 0,
- .maximum = 33,
- .step = 1,
-#define WHITEBAL_DEFAULT 25
- .default_value = WHITEBAL_DEFAULT,
- .flags = 0,
- },
- .set = sd_setwhitebal,
- .get = sd_getwhitebal,
- },
-#define SD_SHARPNESS 4
- {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 9,
- .step = 1,
-#define SHARPNESS_DEFAULT 4
- .default_value = SHARPNESS_DEFAULT,
- .flags = 0,
- },
- .set = sd_setsharpness,
- .get = sd_getsharpness,
- },
-};
/* .priv is what goes to register 8 for this mode, known working values:
0x00 -> 176x144, cropped
@@ -202,7 +103,8 @@ static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index)
0,
1000);
if (ret < 0) {
- pr_err("reg_w err %d\n", ret);
+ pr_err("reg_w err writing %02x to %02x: %d\n",
+ value, index, ret);
gspca_dev->usb_err = ret;
}
}
@@ -223,7 +125,7 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index)
2,
1000);
if (ret < 0) {
- pr_err("reg_w err %d\n", ret);
+ pr_err("reg_r err %d\n", ret);
gspca_dev->usb_err = ret;
}
}
@@ -242,34 +144,33 @@ static void konica_stream_off(struct gspca_dev *gspca_dev)
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
gspca_dev->cam.cam_mode = vga_mode;
gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
gspca_dev->cam.no_urb_create = 1;
- sd->brightness = BRIGHTNESS_DEFAULT;
- sd->contrast = CONTRAST_DEFAULT;
- sd->saturation = SATURATION_DEFAULT;
- sd->whitebal = WHITEBAL_DEFAULT;
- sd->sharpness = SHARPNESS_DEFAULT;
-
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
- /* HDG not sure if these 2 reads are needed */
- reg_r(gspca_dev, 0, 0x10);
- PDEBUG(D_PROBE, "Reg 0x10 reads: %02x %02x",
- gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]);
- reg_r(gspca_dev, 0, 0x10);
- PDEBUG(D_PROBE, "Reg 0x10 reads: %02x %02x",
- gspca_dev->usb_buf[0], gspca_dev->usb_buf[1]);
+ int i;
+
+ /*
+ * The konica needs a freaking large time to "boot" (approx 6.5 sec.),
+ * and does not want to be bothered while doing so :|
+ * Register 0x10 counts from 1 - 3, with 3 being "ready"
+ */
+ msleep(6000);
+ for (i = 0; i < 20; i++) {
+ reg_r(gspca_dev, 0, 0x10);
+ if (gspca_dev->usb_buf[0] == 3)
+ break;
+ msleep(100);
+ }
reg_w(gspca_dev, 0, 0x0d);
- return 0;
+ return gspca_dev->usb_err;
}
static int sd_start(struct gspca_dev *gspca_dev)
@@ -289,12 +190,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
- reg_w(gspca_dev, sd->brightness, BRIGHTNESS_REG);
- reg_w(gspca_dev, sd->whitebal, WHITEBAL_REG);
- reg_w(gspca_dev, sd->contrast, CONTRAST_REG);
- reg_w(gspca_dev, sd->saturation, SATURATION_REG);
- reg_w(gspca_dev, sd->sharpness, SHARPNESS_REG);
-
n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
reg_w(gspca_dev, n, 0x08);
@@ -479,125 +374,82 @@ resubmit:
pr_err("usb_submit_urb(status_urb) ret %d\n", st);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming) {
- konica_stream_off(gspca_dev);
- reg_w(gspca_dev, sd->brightness, BRIGHTNESS_REG);
- konica_stream_on(gspca_dev);
- }
-
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
- return 0;
-}
+ gspca_dev->usb_err = 0;
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (!gspca_dev->streaming)
+ return 0;
- sd->contrast = val;
- if (gspca_dev->streaming) {
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
konica_stream_off(gspca_dev);
- reg_w(gspca_dev, sd->contrast, CONTRAST_REG);
+ reg_w(gspca_dev, ctrl->val, BRIGHTNESS_REG);
konica_stream_on(gspca_dev);
- }
-
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
-
- return 0;
-}
-
-static int sd_setsaturation(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->saturation = val;
- if (gspca_dev->streaming) {
+ break;
+ case V4L2_CID_CONTRAST:
konica_stream_off(gspca_dev);
- reg_w(gspca_dev, sd->saturation, SATURATION_REG);
+ reg_w(gspca_dev, ctrl->val, CONTRAST_REG);
konica_stream_on(gspca_dev);
- }
- return 0;
-}
-
-static int sd_getsaturation(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->saturation;
-
- return 0;
-}
-
-static int sd_setwhitebal(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->whitebal = val;
- if (gspca_dev->streaming) {
+ break;
+ case V4L2_CID_SATURATION:
konica_stream_off(gspca_dev);
- reg_w(gspca_dev, sd->whitebal, WHITEBAL_REG);
+ reg_w(gspca_dev, ctrl->val, SATURATION_REG);
konica_stream_on(gspca_dev);
- }
- return 0;
-}
-
-static int sd_getwhitebal(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->whitebal;
-
- return 0;
-}
-
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->sharpness = val;
- if (gspca_dev->streaming) {
+ break;
+ case V4L2_CID_WHITE_BALANCE_TEMPERATURE:
konica_stream_off(gspca_dev);
- reg_w(gspca_dev, sd->sharpness, SHARPNESS_REG);
+ reg_w(gspca_dev, ctrl->val, WHITEBAL_REG);
konica_stream_on(gspca_dev);
+ break;
+ case V4L2_CID_SHARPNESS:
+ konica_stream_off(gspca_dev);
+ reg_w(gspca_dev, ctrl->val, SHARPNESS_REG);
+ konica_stream_on(gspca_dev);
+ break;
}
- return 0;
+ return gspca_dev->usb_err;
}
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->sharpness;
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 5);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 9, 1, 4);
+ /* Needs to be verified */
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 9, 1, 4);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 9, 1, 4);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_WHITE_BALANCE_TEMPERATURE,
+ 0, 33, 1, 25);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 9, 1, 4);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
@@ -628,6 +480,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index 0c4493675438..ed22638978ce 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -400,6 +400,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
.disconnect = m5602_disconnect
};
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index ec7b21ee79fb..ff2c5abf115b 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -30,6 +30,8 @@ MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA/Mars USB Camera Driver");
MODULE_LICENSE("GPL");
+#define QUALITY 50
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
@@ -42,13 +44,6 @@ struct sd {
struct v4l2_ctrl *illum_top;
struct v4l2_ctrl *illum_bottom;
};
- struct v4l2_ctrl *jpegqual;
-
- u8 quality;
-#define QUALITY_MIN 40
-#define QUALITY_MAX 70
-#define QUALITY_DEF 50
-
u8 jpeg_hdr[JPEG_HDR_SZ];
};
@@ -194,9 +189,6 @@ static int mars_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_SHARPNESS:
setsharpness(gspca_dev, ctrl->val);
break;
- case V4L2_CID_JPEG_COMPRESSION_QUALITY:
- jpeg_set_qual(sd->jpeg_hdr, ctrl->val);
- break;
default:
return -EINVAL;
}
@@ -214,7 +206,7 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
gspca_dev->vdev.ctrl_handler = hdl;
- v4l2_ctrl_handler_init(hdl, 7);
+ v4l2_ctrl_handler_init(hdl, 6);
sd->brightness = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 30, 1, 15);
sd->saturation = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
@@ -229,9 +221,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
sd->illum_bottom = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
V4L2_CID_ILLUMINATORS_2, 0, 1, 1, 0);
sd->illum_bottom->flags |= V4L2_CTRL_FLAG_UPDATE;
- sd->jpegqual = v4l2_ctrl_new_std(hdl, &mars_ctrl_ops,
- V4L2_CID_JPEG_COMPRESSION_QUALITY,
- QUALITY_MIN, QUALITY_MAX, 1, QUALITY_DEF);
if (hdl->error) {
pr_err("Could not initialize controls\n");
return hdl->error;
@@ -244,13 +233,11 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
cam = &gspca_dev->cam;
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
- sd->quality = QUALITY_DEF;
return 0;
}
@@ -269,7 +256,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* create the JPEG header */
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x21); /* JPEG 422 */
- jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual));
+ jpeg_set_qual(sd->jpeg_hdr, QUALITY);
data = gspca_dev->usb_buf;
@@ -411,31 +398,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_set_jcomp(struct gspca_dev *gspca_dev,
- struct v4l2_jpegcompression *jcomp)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- int ret;
-
- ret = v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality);
- if (ret)
- return ret;
- jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
- return 0;
-}
-
-static int sd_get_jcomp(struct gspca_dev *gspca_dev,
- struct v4l2_jpegcompression *jcomp)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
- jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
- | V4L2_JPEG_MARKER_DQT;
- return 0;
-}
-
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
@@ -445,8 +407,6 @@ static const struct sd_desc sd_desc = {
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
- .get_jcomp = sd_get_jcomp,
- .set_jcomp = sd_set_jcomp,
};
/* -- module initialisation -- */
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index d73e5bd3dbf7..8f4714df5990 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -67,6 +67,7 @@
#define MR97310A_CS_GAIN_MAX 0x7ff
#define MR97310A_CS_GAIN_DEFAULT 0x110
+#define MR97310A_CID_CLOCKDIV (V4L2_CTRL_CLASS_USER + 0x1000)
#define MR97310A_MIN_CLOCKDIV_MIN 3
#define MR97310A_MIN_CLOCKDIV_MAX 8
#define MR97310A_MIN_CLOCKDIV_DEFAULT 3
@@ -84,17 +85,15 @@ MODULE_PARM_DESC(force_sensor_type, "Force sensor type (-1 (auto), 0 or 1)");
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
+ struct { /* exposure/min_clockdiv control cluster */
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *min_clockdiv;
+ };
u8 sof_read;
u8 cam_type; /* 0 is CIF and 1 is VGA */
u8 sensor_type; /* We use 0 and 1 here, too. */
u8 do_lcd_stop;
u8 adj_colors;
-
- int brightness;
- u16 exposure;
- u32 gain;
- u8 contrast;
- u8 min_clockdiv;
};
struct sensor_w_data {
@@ -105,132 +104,6 @@ struct sensor_w_data {
};
static void sd_stopN(struct gspca_dev *gspca_dev);
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setmin_clockdiv(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getmin_clockdiv(struct gspca_dev *gspca_dev, __s32 *val);
-static void setbrightness(struct gspca_dev *gspca_dev);
-static void setexposure(struct gspca_dev *gspca_dev);
-static void setgain(struct gspca_dev *gspca_dev);
-static void setcontrast(struct gspca_dev *gspca_dev);
-
-/* V4L2 controls supported by the driver */
-static const struct ctrl sd_ctrls[] = {
-/* Separate brightness control description for Argus QuickClix as it has
- * different limits from the other mr97310a cameras, and separate gain
- * control for Sakar CyberPix camera. */
- {
-#define NORM_BRIGHTNESS_IDX 0
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = -254,
- .maximum = 255,
- .step = 1,
- .default_value = MR97310A_BRIGHTNESS_DEFAULT,
- .flags = 0,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
- {
-#define ARGUS_QC_BRIGHTNESS_IDX 1
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 15,
- .step = 1,
- .default_value = MR97310A_BRIGHTNESS_DEFAULT,
- .flags = 0,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
- {
-#define EXPOSURE_IDX 2
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = MR97310A_EXPOSURE_MIN,
- .maximum = MR97310A_EXPOSURE_MAX,
- .step = 1,
- .default_value = MR97310A_EXPOSURE_DEFAULT,
- .flags = 0,
- },
- .set = sd_setexposure,
- .get = sd_getexposure,
- },
- {
-#define GAIN_IDX 3
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = MR97310A_GAIN_MIN,
- .maximum = MR97310A_GAIN_MAX,
- .step = 1,
- .default_value = MR97310A_GAIN_DEFAULT,
- .flags = 0,
- },
- .set = sd_setgain,
- .get = sd_getgain,
- },
- {
-#define SAKAR_CS_GAIN_IDX 4
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = MR97310A_CS_GAIN_MIN,
- .maximum = MR97310A_CS_GAIN_MAX,
- .step = 1,
- .default_value = MR97310A_CS_GAIN_DEFAULT,
- .flags = 0,
- },
- .set = sd_setgain,
- .get = sd_getgain,
- },
- {
-#define CONTRAST_IDX 5
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = MR97310A_CONTRAST_MIN,
- .maximum = MR97310A_CONTRAST_MAX,
- .step = 1,
- .default_value = MR97310A_CONTRAST_DEFAULT,
- .flags = 0,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
- {
-#define MIN_CLOCKDIV_IDX 6
- {
- .id = V4L2_CID_PRIVATE_BASE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Minimum Clock Divider",
- .minimum = MR97310A_MIN_CLOCKDIV_MIN,
- .maximum = MR97310A_MIN_CLOCKDIV_MAX,
- .step = 1,
- .default_value = MR97310A_MIN_CLOCKDIV_DEFAULT,
- .flags = 0,
- },
- .set = sd_setmin_clockdiv,
- .get = sd_getmin_clockdiv,
- },
-};
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_MR97310A, V4L2_FIELD_NONE,
@@ -481,7 +354,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
- int gain_default = MR97310A_GAIN_DEFAULT;
int err_code;
cam = &gspca_dev->cam;
@@ -615,52 +487,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor_type);
}
- /* Setup controls depending on camera type */
- if (sd->cam_type == CAM_TYPE_CIF) {
- /* No brightness for sensor_type 0 */
- if (sd->sensor_type == 0)
- gspca_dev->ctrl_dis = (1 << NORM_BRIGHTNESS_IDX) |
- (1 << ARGUS_QC_BRIGHTNESS_IDX) |
- (1 << CONTRAST_IDX) |
- (1 << SAKAR_CS_GAIN_IDX);
- else
- gspca_dev->ctrl_dis = (1 << ARGUS_QC_BRIGHTNESS_IDX) |
- (1 << CONTRAST_IDX) |
- (1 << SAKAR_CS_GAIN_IDX) |
- (1 << MIN_CLOCKDIV_IDX);
- } else {
- /* All controls need to be disabled if VGA sensor_type is 0 */
- if (sd->sensor_type == 0)
- gspca_dev->ctrl_dis = (1 << NORM_BRIGHTNESS_IDX) |
- (1 << ARGUS_QC_BRIGHTNESS_IDX) |
- (1 << EXPOSURE_IDX) |
- (1 << GAIN_IDX) |
- (1 << CONTRAST_IDX) |
- (1 << SAKAR_CS_GAIN_IDX) |
- (1 << MIN_CLOCKDIV_IDX);
- else if (sd->sensor_type == 2) {
- gspca_dev->ctrl_dis = (1 << NORM_BRIGHTNESS_IDX) |
- (1 << ARGUS_QC_BRIGHTNESS_IDX) |
- (1 << GAIN_IDX) |
- (1 << MIN_CLOCKDIV_IDX);
- gain_default = MR97310A_CS_GAIN_DEFAULT;
- } else if (sd->do_lcd_stop)
- /* Argus QuickClix has different brightness limits */
- gspca_dev->ctrl_dis = (1 << NORM_BRIGHTNESS_IDX) |
- (1 << CONTRAST_IDX) |
- (1 << SAKAR_CS_GAIN_IDX);
- else
- gspca_dev->ctrl_dis = (1 << ARGUS_QC_BRIGHTNESS_IDX) |
- (1 << CONTRAST_IDX) |
- (1 << SAKAR_CS_GAIN_IDX);
- }
-
- sd->brightness = MR97310A_BRIGHTNESS_DEFAULT;
- sd->exposure = MR97310A_EXPOSURE_DEFAULT;
- sd->gain = gain_default;
- sd->contrast = MR97310A_CONTRAST_DEFAULT;
- sd->min_clockdiv = MR97310A_MIN_CLOCKDIV_DEFAULT;
-
return 0;
}
@@ -952,11 +778,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
if (err_code < 0)
return err_code;
- setbrightness(gspca_dev);
- setcontrast(gspca_dev);
- setexposure(gspca_dev);
- setgain(gspca_dev);
-
return isoc_enable(gspca_dev);
}
@@ -971,37 +792,25 @@ static void sd_stopN(struct gspca_dev *gspca_dev)
lcd_stop(gspca_dev);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 val;
u8 sign_reg = 7; /* This reg and the next one used on CIF cams. */
u8 value_reg = 8; /* VGA cams seem to use regs 0x0b and 0x0c */
static const u8 quick_clix_table[] =
/* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
{ 0, 4, 8, 12, 1, 2, 3, 5, 6, 9, 7, 10, 13, 11, 14, 15};
- /*
- * This control is disabled for CIF type 1 and VGA type 0 cameras.
- * It does not quite act linearly for the Argus QuickClix camera,
- * but it does control brightness. The values are 0 - 15 only, and
- * the table above makes them act consecutively.
- */
- if ((gspca_dev->ctrl_dis & (1 << NORM_BRIGHTNESS_IDX)) &&
- (gspca_dev->ctrl_dis & (1 << ARGUS_QC_BRIGHTNESS_IDX)))
- return;
-
if (sd->cam_type == CAM_TYPE_VGA) {
sign_reg += 4;
value_reg += 4;
}
/* Note register 7 is also seen as 0x8x or 0xCx in some dumps */
- if (sd->brightness > 0) {
+ if (val > 0) {
sensor_write1(gspca_dev, sign_reg, 0x00);
- val = sd->brightness;
} else {
sensor_write1(gspca_dev, sign_reg, 0x01);
- val = (257 - sd->brightness);
+ val = 257 - val;
}
/* Use lookup table for funky Argus QuickClix brightness */
if (sd->do_lcd_stop)
@@ -1010,23 +819,20 @@ static void setbrightness(struct gspca_dev *gspca_dev)
sensor_write1(gspca_dev, value_reg, val);
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 min_clockdiv)
{
struct sd *sd = (struct sd *) gspca_dev;
int exposure = MR97310A_EXPOSURE_DEFAULT;
u8 buf[2];
- if (gspca_dev->ctrl_dis & (1 << EXPOSURE_IDX))
- return;
-
if (sd->cam_type == CAM_TYPE_CIF && sd->sensor_type == 1) {
/* This cam does not like exposure settings < 300,
so scale 0 - 4095 to 300 - 4095 */
- exposure = (sd->exposure * 9267) / 10000 + 300;
+ exposure = (expo * 9267) / 10000 + 300;
sensor_write1(gspca_dev, 3, exposure >> 4);
sensor_write1(gspca_dev, 4, exposure & 0x0f);
} else if (sd->sensor_type == 2) {
- exposure = sd->exposure;
+ exposure = expo;
exposure >>= 3;
sensor_write1(gspca_dev, 3, exposure >> 8);
sensor_write1(gspca_dev, 4, exposure & 0xff);
@@ -1038,11 +844,11 @@ static void setexposure(struct gspca_dev *gspca_dev)
Note our 0 - 4095 exposure is mapped to 0 - 511
milliseconds exposure time */
- u8 clockdiv = (60 * sd->exposure + 7999) / 8000;
+ u8 clockdiv = (60 * expo + 7999) / 8000;
/* Limit framerate to not exceed usb bandwidth */
- if (clockdiv < sd->min_clockdiv && gspca_dev->width >= 320)
- clockdiv = sd->min_clockdiv;
+ if (clockdiv < min_clockdiv && gspca_dev->width >= 320)
+ clockdiv = min_clockdiv;
else if (clockdiv < 2)
clockdiv = 2;
@@ -1051,7 +857,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
/* Frame exposure time in ms = 1000 * clockdiv / 60 ->
exposure = (sd->exposure / 8) * 511 / (1000 * clockdiv / 60) */
- exposure = (60 * 511 * sd->exposure) / (8000 * clockdiv);
+ exposure = (60 * 511 * expo) / (8000 * clockdiv);
if (exposure > 511)
exposure = 511;
@@ -1065,125 +871,148 @@ static void setexposure(struct gspca_dev *gspca_dev)
}
}
-static void setgain(struct gspca_dev *gspca_dev)
+static void setgain(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 gainreg;
- if ((gspca_dev->ctrl_dis & (1 << GAIN_IDX)) &&
- (gspca_dev->ctrl_dis & (1 << SAKAR_CS_GAIN_IDX)))
- return;
-
if (sd->cam_type == CAM_TYPE_CIF && sd->sensor_type == 1)
- sensor_write1(gspca_dev, 0x0e, sd->gain);
+ sensor_write1(gspca_dev, 0x0e, val);
else if (sd->cam_type == CAM_TYPE_VGA && sd->sensor_type == 2)
for (gainreg = 0x0a; gainreg < 0x11; gainreg += 2) {
- sensor_write1(gspca_dev, gainreg, sd->gain >> 8);
- sensor_write1(gspca_dev, gainreg + 1, sd->gain & 0xff);
+ sensor_write1(gspca_dev, gainreg, val >> 8);
+ sensor_write1(gspca_dev, gainreg + 1, val & 0xff);
}
else
- sensor_write1(gspca_dev, 0x10, sd->gain);
-}
-
-static void setcontrast(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (gspca_dev->ctrl_dis & (1 << CONTRAST_IDX))
- return;
-
- sensor_write1(gspca_dev, 0x1c, sd->contrast);
-}
-
-
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->exposure = val;
- if (gspca_dev->streaming)
- setexposure(gspca_dev);
- return 0;
+ sensor_write1(gspca_dev, 0x10, val);
}
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->exposure;
- return 0;
+ sensor_write1(gspca_dev, 0x1c, val);
}
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
- sd->gain = val;
- if (gspca_dev->streaming)
- setgain(gspca_dev);
- return 0;
-}
+ gspca_dev->usb_err = 0;
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->gain;
- return 0;
-}
+ if (!gspca_dev->streaming)
+ return 0;
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return 0;
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ setexposure(gspca_dev, sd->exposure->val,
+ sd->min_clockdiv ? sd->min_clockdiv->val : 0);
+ break;
+ case V4L2_CID_GAIN:
+ setgain(gspca_dev, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setmin_clockdiv(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+ static const struct v4l2_ctrl_config clockdiv = {
+ .ops = &sd_ctrl_ops,
+ .id = MR97310A_CID_CLOCKDIV,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Minimum Clock Divider",
+ .min = MR97310A_MIN_CLOCKDIV_MIN,
+ .max = MR97310A_MIN_CLOCKDIV_MAX,
+ .step = 1,
+ .def = MR97310A_MIN_CLOCKDIV_DEFAULT,
+ };
+ bool has_brightness = false;
+ bool has_argus_brightness = false;
+ bool has_contrast = false;
+ bool has_gain = false;
+ bool has_cs_gain = false;
+ bool has_exposure = false;
+ bool has_clockdiv = false;
- sd->min_clockdiv = val;
- if (gspca_dev->streaming)
- setexposure(gspca_dev);
- return 0;
-}
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
-static int sd_getmin_clockdiv(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ /* Setup controls depending on camera type */
+ if (sd->cam_type == CAM_TYPE_CIF) {
+ /* No brightness for sensor_type 0 */
+ if (sd->sensor_type == 0)
+ has_exposure = has_gain = has_clockdiv = true;
+ else
+ has_exposure = has_gain = has_brightness = true;
+ } else {
+ /* All controls need to be disabled if VGA sensor_type is 0 */
+ if (sd->sensor_type == 0)
+ ; /* no controls! */
+ else if (sd->sensor_type == 2)
+ has_exposure = has_cs_gain = has_contrast = true;
+ else if (sd->do_lcd_stop)
+ has_exposure = has_gain = has_argus_brightness =
+ has_clockdiv = true;
+ else
+ has_exposure = has_gain = has_brightness =
+ has_clockdiv = true;
+ }
- *val = sd->min_clockdiv;
+ /* Separate brightness control description for Argus QuickClix as it has
+ * different limits from the other mr97310a cameras, and separate gain
+ * control for Sakar CyberPix camera. */
+ /*
+ * This control is disabled for CIF type 1 and VGA type 0 cameras.
+ * It does not quite act linearly for the Argus QuickClix camera,
+ * but it does control brightness. The values are 0 - 15 only, and
+ * the table above makes them act consecutively.
+ */
+ if (has_brightness)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -254, 255, 1,
+ MR97310A_BRIGHTNESS_DEFAULT);
+ else if (has_argus_brightness)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 15, 1,
+ MR97310A_BRIGHTNESS_DEFAULT);
+ if (has_contrast)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, MR97310A_CONTRAST_MIN,
+ MR97310A_CONTRAST_MAX, 1, MR97310A_CONTRAST_DEFAULT);
+ if (has_gain)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, MR97310A_GAIN_MIN, MR97310A_GAIN_MAX,
+ 1, MR97310A_GAIN_DEFAULT);
+ else if (has_cs_gain)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN,
+ MR97310A_CS_GAIN_MIN, MR97310A_CS_GAIN_MAX,
+ 1, MR97310A_CS_GAIN_DEFAULT);
+ if (has_exposure)
+ sd->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, MR97310A_EXPOSURE_MIN,
+ MR97310A_EXPOSURE_MAX, 1, MR97310A_EXPOSURE_DEFAULT);
+ if (has_clockdiv)
+ sd->min_clockdiv = v4l2_ctrl_new_custom(hdl, &clockdiv, NULL);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ if (has_exposure && has_clockdiv)
+ v4l2_ctrl_cluster(2, &sd->exposure);
return 0;
}
@@ -1221,10 +1050,9 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
@@ -1256,6 +1084,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/nw80x.c b/drivers/media/video/gspca/nw80x.c
index 42e021931e60..44c9964b1b3e 100644
--- a/drivers/media/video/gspca/nw80x.c
+++ b/drivers/media/video/gspca/nw80x.c
@@ -32,22 +32,10 @@ MODULE_LICENSE("GPL");
static int webcam;
-/* controls */
-enum e_ctrl {
- GAIN,
- EXPOSURE,
- AUTOGAIN,
- NCTRLS /* number of controls */
-};
-
-#define AUTOGAIN_DEF 1
-
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- struct gspca_ctrl ctrls[NCTRLS];
-
u32 ae_res;
s8 ag_cnt;
#define AG_CNT_START 13
@@ -1667,17 +1655,13 @@ static int swap_bits(int v)
return r;
}
-static void setgain(struct gspca_dev *gspca_dev)
+static void setgain(struct gspca_dev *gspca_dev, u8 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 val, v[2];
+ u8 v[2];
- val = sd->ctrls[GAIN].val;
switch (sd->webcam) {
case P35u:
- /* Note the control goes from 0-255 not 0-127, but anything
- above 127 just means amplifying noise */
- val >>= 1; /* 0 - 255 -> 0 - 127 */
reg_w(gspca_dev, 0x1026, &val, 1);
break;
case Kr651us:
@@ -1690,13 +1674,11 @@ static void setgain(struct gspca_dev *gspca_dev)
}
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- s16 val;
u8 v[2];
- val = sd->ctrls[EXPOSURE].val;
switch (sd->webcam) {
case P35u:
v[0] = ((9 - val) << 3) | 0x01;
@@ -1713,14 +1695,12 @@ static void setexposure(struct gspca_dev *gspca_dev)
}
}
-static void setautogain(struct gspca_dev *gspca_dev)
+static void setautogain(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
int w, h;
- if (gspca_dev->ctrl_dis & (1 << AUTOGAIN))
- return;
- if (!sd->ctrls[AUTOGAIN].val) {
+ if (!val) {
sd->ag_cnt = -1;
return;
}
@@ -1763,7 +1743,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
if ((unsigned) webcam >= NWEBCAMS)
webcam = 0;
sd->webcam = webcam;
- gspca_dev->cam.ctrls = sd->ctrls;
gspca_dev->cam.needs_full_bandwidth = 1;
sd->ag_cnt = -1;
@@ -1834,33 +1813,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
break;
}
}
- switch (sd->webcam) {
- case P35u:
-/* sd->ctrls[EXPOSURE].max = 9;
- * sd->ctrls[EXPOSURE].def = 9; */
- /* coarse expo auto gain function gain minimum, to avoid
- * a large settings jump the first auto adjustment */
- sd->ctrls[GAIN].def = 255 / 5 * 2;
- break;
- case Cvideopro:
- case DvcV6:
- case Kritter:
- gspca_dev->ctrl_dis = (1 << GAIN) | (1 << AUTOGAIN);
- /* fall thru */
- case Kr651us:
- sd->ctrls[EXPOSURE].max = 315;
- sd->ctrls[EXPOSURE].def = 150;
- break;
- default:
- gspca_dev->ctrl_dis = (1 << GAIN) | (1 << EXPOSURE)
- | (1 << AUTOGAIN);
- break;
- }
-#if AUTOGAIN_DEF
- if (!(gspca_dev->ctrl_dis & (1 << AUTOGAIN)))
- gspca_dev->ctrl_inac = (1 << GAIN) | (1 << EXPOSURE);
-#endif
return gspca_dev->usb_err;
}
@@ -1925,9 +1878,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
}
- setgain(gspca_dev);
- setexposure(gspca_dev);
- setautogain(gspca_dev);
sd->exp_too_high_cnt = 0;
sd->exp_too_low_cnt = 0;
return gspca_dev->usb_err;
@@ -1987,24 +1937,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
}
}
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->ctrls[AUTOGAIN].val = val;
- if (val)
- gspca_dev->ctrl_inac = (1 << GAIN) | (1 << EXPOSURE);
- else
- gspca_dev->ctrl_inac = 0;
- if (gspca_dev->streaming)
- setautogain(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-#define WANT_REGULAR_AUTOGAIN
-#define WANT_COARSE_EXPO_AUTOGAIN
-#include "autogain_functions.h"
-
static void do_autogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -2024,62 +1956,100 @@ static void do_autogain(struct gspca_dev *gspca_dev)
switch (sd->webcam) {
case P35u:
- coarse_grained_expo_autogain(gspca_dev, luma, 100, 5);
+ gspca_coarse_grained_expo_autogain(gspca_dev, luma, 100, 5);
break;
default:
- auto_gain_n_exposure(gspca_dev, luma, 100, 5, 230, 0);
+ gspca_expo_autogain(gspca_dev, luma, 100, 5, 230, 0);
break;
}
}
-/* V4L2 controls supported by the driver */
-static const struct ctrl sd_ctrls[NCTRLS] = {
-[GAIN] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 253,
- .step = 1,
- .default_value = 128
- },
- .set_control = setgain
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 9,
- .step = 1,
- .default_value = 9
- },
- .set_control = setexposure
- },
-[AUTOGAIN] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = AUTOGAIN_DEF,
- .flags = V4L2_CTRL_FLAG_UPDATE
- },
- .set = sd_setautogain
- },
+
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ /* autogain/gain/exposure control cluster */
+ case V4L2_CID_AUTOGAIN:
+ if (ctrl->is_new)
+ setautogain(gspca_dev, ctrl->val);
+ if (!ctrl->val) {
+ if (gspca_dev->gain->is_new)
+ setgain(gspca_dev, gspca_dev->gain->val);
+ if (gspca_dev->exposure->is_new)
+ setexposure(gspca_dev,
+ gspca_dev->exposure->val);
+ }
+ break;
+ /* Some webcams only have exposure, so handle that separately from the
+ autogain/gain/exposure cluster in the previous case. */
+ case V4L2_CID_EXPOSURE:
+ setexposure(gspca_dev, gspca_dev->exposure->val);
+ break;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
};
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 3);
+ switch (sd->webcam) {
+ case P35u:
+ gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ /* For P35u choose coarse expo auto gain function gain minimum,
+ * to avoid a large settings jump the first auto adjustment */
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 127, 1, 127 / 5 * 2);
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 9, 1, 9);
+ break;
+ case Kr651us:
+ gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 253, 1, 128);
+ /* fall through */
+ case Cvideopro:
+ case DvcV6:
+ case Kritter:
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 315, 1, 150);
+ break;
+ default:
+ break;
+ }
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ if (gspca_dev->autogain)
+ v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false);
+ return 0;
+}
+
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
@@ -2117,6 +2087,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 183457c5cfdb..bfc7cefa59f8 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -60,25 +60,20 @@ static int frame_rate;
* are getting "Failed to read sensor ID..." */
static int i2c_detect_tries = 10;
-/* controls */
-enum e_ctrl {
- BRIGHTNESS,
- CONTRAST,
- EXPOSURE,
- COLORS,
- HFLIP,
- VFLIP,
- AUTOBRIGHT,
- AUTOGAIN,
- FREQ,
- NCTRL /* number of controls */
-};
-
/* ov519 device descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- struct gspca_ctrl ctrls[NCTRL];
+ struct v4l2_ctrl *jpegqual;
+ struct v4l2_ctrl *freq;
+ struct { /* h/vflip control cluster */
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
+ struct { /* autobrightness/brightness control cluster */
+ struct v4l2_ctrl *autobright;
+ struct v4l2_ctrl *brightness;
+ };
u8 packet_nr;
@@ -101,7 +96,6 @@ struct sd {
/* Determined by sensor type */
u8 sif;
- u8 quality;
#define QUALITY_MIN 50
#define QUALITY_MAX 70
#define QUALITY_DEF 50
@@ -145,209 +139,112 @@ enum sensors {
really should move the sensor drivers to v4l2 sub drivers. */
#include "w996Xcf.c"
-/* V4L2 controls supported by the driver */
-static void setbrightness(struct gspca_dev *gspca_dev);
-static void setcontrast(struct gspca_dev *gspca_dev);
-static void setexposure(struct gspca_dev *gspca_dev);
-static void setcolors(struct gspca_dev *gspca_dev);
-static void sethvflip(struct gspca_dev *gspca_dev);
-static void setautobright(struct gspca_dev *gspca_dev);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static void setfreq(struct gspca_dev *gspca_dev);
-static void setfreq_i(struct sd *sd);
-
-static const struct ctrl sd_ctrls[] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 127,
- },
- .set_control = setbrightness,
+/* table of the disabled controls */
+struct ctrl_valid {
+ int has_brightness:1;
+ int has_contrast:1;
+ int has_exposure:1;
+ int has_autogain:1;
+ int has_sat:1;
+ int has_hvflip:1;
+ int has_autobright:1;
+ int has_freq:1;
+};
+
+static const struct ctrl_valid valid_controls[] = {
+ [SEN_OV2610] = {
+ .has_exposure = 1,
+ .has_autogain = 1,
},
-[CONTRAST] = {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 127,
- },
- .set_control = setcontrast,
+ [SEN_OV2610AE] = {
+ .has_exposure = 1,
+ .has_autogain = 1,
},
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 127,
- },
- .set_control = setexposure,
+ [SEN_OV3610] = {
+ /* No controls */
},
-[COLORS] = {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 127,
- },
- .set_control = setcolors,
+ [SEN_OV6620] = {
+ .has_brightness = 1,
+ .has_contrast = 1,
+ .has_sat = 1,
+ .has_autobright = 1,
+ .has_freq = 1,
},
-/* The flip controls work for sensors ov7660 and ov7670 only */
-[HFLIP] = {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- .set_control = sethvflip,
+ [SEN_OV6630] = {
+ .has_brightness = 1,
+ .has_contrast = 1,
+ .has_sat = 1,
+ .has_autobright = 1,
+ .has_freq = 1,
},
-[VFLIP] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Vflip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- .set_control = sethvflip,
+ [SEN_OV66308AF] = {
+ .has_brightness = 1,
+ .has_contrast = 1,
+ .has_sat = 1,
+ .has_autobright = 1,
+ .has_freq = 1,
},
-[AUTOBRIGHT] = {
- {
- .id = V4L2_CID_AUTOBRIGHTNESS,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Brightness",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
- .set_control = setautobright,
+ [SEN_OV7610] = {
+ .has_brightness = 1,
+ .has_contrast = 1,
+ .has_sat = 1,
+ .has_autobright = 1,
+ .has_freq = 1,
},
-[AUTOGAIN] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- .flags = V4L2_CTRL_FLAG_UPDATE
- },
- .set = sd_setautogain,
+ [SEN_OV7620] = {
+ .has_brightness = 1,
+ .has_contrast = 1,
+ .has_sat = 1,
+ .has_autobright = 1,
+ .has_freq = 1,
},
-[FREQ] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light frequency filter",
- .minimum = 0,
- .maximum = 2, /* 0: no flicker, 1: 50Hz, 2:60Hz, 3: auto */
- .step = 1,
- .default_value = 0,
- },
- .set_control = setfreq,
+ [SEN_OV7620AE] = {
+ .has_brightness = 1,
+ .has_contrast = 1,
+ .has_sat = 1,
+ .has_autobright = 1,
+ .has_freq = 1,
+ },
+ [SEN_OV7640] = {
+ .has_brightness = 1,
+ .has_sat = 1,
+ .has_freq = 1,
+ },
+ [SEN_OV7648] = {
+ .has_brightness = 1,
+ .has_sat = 1,
+ .has_freq = 1,
+ },
+ [SEN_OV7660] = {
+ .has_brightness = 1,
+ .has_contrast = 1,
+ .has_sat = 1,
+ .has_hvflip = 1,
+ .has_freq = 1,
+ },
+ [SEN_OV7670] = {
+ .has_brightness = 1,
+ .has_contrast = 1,
+ .has_hvflip = 1,
+ .has_freq = 1,
+ },
+ [SEN_OV76BE] = {
+ .has_brightness = 1,
+ .has_contrast = 1,
+ .has_sat = 1,
+ .has_autobright = 1,
+ .has_freq = 1,
+ },
+ [SEN_OV8610] = {
+ .has_brightness = 1,
+ .has_contrast = 1,
+ .has_sat = 1,
+ .has_autobright = 1,
+ },
+ [SEN_OV9600] = {
+ .has_exposure = 1,
+ .has_autogain = 1,
},
-};
-
-/* table of the disabled controls */
-static const unsigned ctrl_dis[] = {
-[SEN_OV2610] = ((1 << NCTRL) - 1) /* no control */
- ^ ((1 << EXPOSURE) /* but exposure */
- | (1 << AUTOGAIN)), /* and autogain */
-
-[SEN_OV2610AE] = ((1 << NCTRL) - 1) /* no control */
- ^ ((1 << EXPOSURE) /* but exposure */
- | (1 << AUTOGAIN)), /* and autogain */
-
-[SEN_OV3610] = (1 << NCTRL) - 1, /* no control */
-
-[SEN_OV6620] = (1 << HFLIP) |
- (1 << VFLIP) |
- (1 << EXPOSURE) |
- (1 << AUTOGAIN),
-
-[SEN_OV6630] = (1 << HFLIP) |
- (1 << VFLIP) |
- (1 << EXPOSURE) |
- (1 << AUTOGAIN),
-
-[SEN_OV66308AF] = (1 << HFLIP) |
- (1 << VFLIP) |
- (1 << EXPOSURE) |
- (1 << AUTOGAIN),
-
-[SEN_OV7610] = (1 << HFLIP) |
- (1 << VFLIP) |
- (1 << EXPOSURE) |
- (1 << AUTOGAIN),
-
-[SEN_OV7620] = (1 << HFLIP) |
- (1 << VFLIP) |
- (1 << EXPOSURE) |
- (1 << AUTOGAIN),
-
-[SEN_OV7620AE] = (1 << HFLIP) |
- (1 << VFLIP) |
- (1 << EXPOSURE) |
- (1 << AUTOGAIN),
-
-[SEN_OV7640] = (1 << HFLIP) |
- (1 << VFLIP) |
- (1 << AUTOBRIGHT) |
- (1 << CONTRAST) |
- (1 << EXPOSURE) |
- (1 << AUTOGAIN),
-
-[SEN_OV7648] = (1 << HFLIP) |
- (1 << VFLIP) |
- (1 << AUTOBRIGHT) |
- (1 << CONTRAST) |
- (1 << EXPOSURE) |
- (1 << AUTOGAIN),
-
-[SEN_OV7660] = (1 << AUTOBRIGHT) |
- (1 << EXPOSURE) |
- (1 << AUTOGAIN),
-
-[SEN_OV7670] = (1 << COLORS) |
- (1 << AUTOBRIGHT) |
- (1 << EXPOSURE) |
- (1 << AUTOGAIN),
-
-[SEN_OV76BE] = (1 << HFLIP) |
- (1 << VFLIP) |
- (1 << EXPOSURE) |
- (1 << AUTOGAIN),
-
-[SEN_OV8610] = (1 << HFLIP) |
- (1 << VFLIP) |
- (1 << EXPOSURE) |
- (1 << AUTOGAIN) |
- (1 << FREQ),
-[SEN_OV9600] = ((1 << NCTRL) - 1) /* no control */
- ^ ((1 << EXPOSURE) /* but exposure */
- | (1 << AUTOGAIN)), /* and autogain */
-
};
static const struct v4l2_pix_format ov519_vga_mode[] = {
@@ -3306,11 +3203,11 @@ static void ov519_set_fr(struct sd *sd)
ov518_i2c_w(sd, OV7670_R11_CLKRC, clock);
}
-static void setautogain(struct gspca_dev *gspca_dev)
+static void setautogain(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- i2c_w_mask(sd, 0x13, sd->ctrls[AUTOGAIN].val ? 0x05 : 0x00, 0x05);
+ i2c_w_mask(sd, 0x13, val ? 0x05 : 0x00, 0x05);
}
/* this function is called at probe time */
@@ -3351,8 +3248,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
break;
}
- gspca_dev->cam.ctrls = sd->ctrls;
- sd->quality = QUALITY_DEF;
sd->frame_rate = 15;
return 0;
@@ -3467,8 +3362,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
break;
}
- gspca_dev->ctrl_dis = ctrl_dis[sd->sensor];
-
/* initialize the sensor */
switch (sd->sensor) {
case SEN_OV2610:
@@ -3494,8 +3387,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
break;
case SEN_OV6630:
case SEN_OV66308AF:
- sd->ctrls[CONTRAST].def = 200;
- /* The default is too low for the ov6630 */
write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30));
break;
default:
@@ -3522,26 +3413,12 @@ static int sd_init(struct gspca_dev *gspca_dev)
sd->gspca_dev.curr_mode = 1; /* 640x480 */
ov519_set_mode(sd);
ov519_set_fr(sd);
- sd->ctrls[COLORS].max = 4; /* 0..4 */
- sd->ctrls[COLORS].val =
- sd->ctrls[COLORS].def = 2;
- setcolors(gspca_dev);
- sd->ctrls[CONTRAST].max = 6; /* 0..6 */
- sd->ctrls[CONTRAST].val =
- sd->ctrls[CONTRAST].def = 3;
- setcontrast(gspca_dev);
- sd->ctrls[BRIGHTNESS].max = 6; /* 0..6 */
- sd->ctrls[BRIGHTNESS].val =
- sd->ctrls[BRIGHTNESS].def = 3;
- setbrightness(gspca_dev);
sd_reset_snapshot(gspca_dev);
ov51x_restart(sd);
ov51x_stop(sd); /* not in win traces */
ov51x_led_control(sd, 0);
break;
case SEN_OV7670:
- sd->ctrls[FREQ].max = 3; /* auto */
- sd->ctrls[FREQ].def = 3;
write_i2c_regvals(sd, norm_7670, ARRAY_SIZE(norm_7670));
break;
case SEN_OV8610:
@@ -4177,15 +4054,14 @@ static void mode_init_ov_sensor_regs(struct sd *sd)
}
/* this function works for bridge ov519 and sensors ov7660 and ov7670 only */
-static void sethvflip(struct gspca_dev *gspca_dev)
+static void sethvflip(struct gspca_dev *gspca_dev, s32 hflip, s32 vflip)
{
struct sd *sd = (struct sd *) gspca_dev;
if (sd->gspca_dev.streaming)
reg_w(sd, OV519_R51_RESET1, 0x0f); /* block stream */
i2c_w_mask(sd, OV7670_R1E_MVFP,
- OV7670_MVFP_MIRROR * sd->ctrls[HFLIP].val
- | OV7670_MVFP_VFLIP * sd->ctrls[VFLIP].val,
+ OV7670_MVFP_MIRROR * hflip | OV7670_MVFP_VFLIP * vflip,
OV7670_MVFP_MIRROR | OV7670_MVFP_VFLIP);
if (sd->gspca_dev.streaming)
reg_w(sd, OV519_R51_RESET1, 0x00); /* restart stream */
@@ -4333,23 +4209,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_ov_sensor_window(sd);
- if (!(sd->gspca_dev.ctrl_dis & (1 << CONTRAST)))
- setcontrast(gspca_dev);
- if (!(sd->gspca_dev.ctrl_dis & (1 << BRIGHTNESS)))
- setbrightness(gspca_dev);
- if (!(sd->gspca_dev.ctrl_dis & (1 << EXPOSURE)))
- setexposure(gspca_dev);
- if (!(sd->gspca_dev.ctrl_dis & (1 << COLORS)))
- setcolors(gspca_dev);
- if (!(sd->gspca_dev.ctrl_dis & ((1 << HFLIP) | (1 << VFLIP))))
- sethvflip(gspca_dev);
- if (!(sd->gspca_dev.ctrl_dis & (1 << AUTOBRIGHT)))
- setautobright(gspca_dev);
- if (!(sd->gspca_dev.ctrl_dis & (1 << AUTOGAIN)))
- setautogain(gspca_dev);
- if (!(sd->gspca_dev.ctrl_dis & (1 << FREQ)))
- setfreq_i(sd);
-
/* Force clear snapshot state in case the snapshot button was
pressed while we weren't streaming */
sd->snapshot_needs_reset = 1;
@@ -4605,10 +4464,9 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* -- management routines -- */
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- int val;
static const struct ov_i2c_regvals brit_7660[][7] = {
{{0x0f, 0x6a}, {0x24, 0x40}, {0x25, 0x2b}, {0x26, 0x90},
{0x27, 0xe0}, {0x28, 0xe0}, {0x2c, 0xe0}},
@@ -4626,7 +4484,6 @@ static void setbrightness(struct gspca_dev *gspca_dev)
{0x27, 0x60}, {0x28, 0x60}, {0x2c, 0x60}}
};
- val = sd->ctrls[BRIGHTNESS].val;
switch (sd->sensor) {
case SEN_OV8610:
case SEN_OV7610:
@@ -4640,9 +4497,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
break;
case SEN_OV7620:
case SEN_OV7620AE:
- /* 7620 doesn't like manual changes when in auto mode */
- if (!sd->ctrls[AUTOBRIGHT].val)
- i2c_w(sd, OV7610_REG_BRT, val);
+ i2c_w(sd, OV7610_REG_BRT, val);
break;
case SEN_OV7660:
write_i2c_regvals(sd, brit_7660[val],
@@ -4656,10 +4511,9 @@ static void setbrightness(struct gspca_dev *gspca_dev)
}
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- int val;
static const struct ov_i2c_regvals contrast_7660[][31] = {
{{0x6c, 0xf0}, {0x6d, 0xf0}, {0x6e, 0xf8}, {0x6f, 0xa0},
{0x70, 0x58}, {0x71, 0x38}, {0x72, 0x30}, {0x73, 0x30},
@@ -4719,7 +4573,6 @@ static void setcontrast(struct gspca_dev *gspca_dev)
{0x88, 0xf1}, {0x89, 0xf9}, {0x8a, 0xfd}},
};
- val = sd->ctrls[CONTRAST].val;
switch (sd->sensor) {
case SEN_OV7610:
case SEN_OV6620:
@@ -4760,18 +4613,16 @@ static void setcontrast(struct gspca_dev *gspca_dev)
}
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (!sd->ctrls[AUTOGAIN].val)
- i2c_w(sd, 0x10, sd->ctrls[EXPOSURE].val);
+ i2c_w(sd, 0x10, val);
}
-static void setcolors(struct gspca_dev *gspca_dev)
+static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- int val;
static const struct ov_i2c_regvals colors_7660[][6] = {
{{0x4f, 0x28}, {0x50, 0x2a}, {0x51, 0x02}, {0x52, 0x0a},
{0x53, 0x19}, {0x54, 0x23}},
@@ -4785,7 +4636,6 @@ static void setcolors(struct gspca_dev *gspca_dev)
{0x53, 0x66}, {0x54, 0x8e}},
};
- val = sd->ctrls[COLORS].val;
switch (sd->sensor) {
case SEN_OV8610:
case SEN_OV7610:
@@ -4819,34 +4669,18 @@ static void setcolors(struct gspca_dev *gspca_dev)
}
}
-static void setautobright(struct gspca_dev *gspca_dev)
+static void setautobright(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- i2c_w_mask(sd, 0x2d, sd->ctrls[AUTOBRIGHT].val ? 0x10 : 0x00, 0x10);
+ i2c_w_mask(sd, 0x2d, val ? 0x10 : 0x00, 0x10);
}
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->ctrls[AUTOGAIN].val = val;
- if (val) {
- gspca_dev->ctrl_inac |= (1 << EXPOSURE);
- } else {
- gspca_dev->ctrl_inac &= ~(1 << EXPOSURE);
- sd->ctrls[EXPOSURE].val = i2c_r(sd, 0x10);
- }
- if (gspca_dev->streaming)
- setautogain(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static void setfreq_i(struct sd *sd)
+static void setfreq_i(struct sd *sd, s32 val)
{
if (sd->sensor == SEN_OV7660
|| sd->sensor == SEN_OV7670) {
- switch (sd->ctrls[FREQ].val) {
+ switch (val) {
case 0: /* Banding filter disabled */
i2c_w_mask(sd, OV7670_R13_COM8, 0, OV7670_COM8_BFILT);
break;
@@ -4868,7 +4702,7 @@ static void setfreq_i(struct sd *sd)
break;
}
} else {
- switch (sd->ctrls[FREQ].val) {
+ switch (val) {
case 0: /* Banding filter disabled */
i2c_w_mask(sd, 0x2d, 0x00, 0x04);
i2c_w_mask(sd, 0x2a, 0x00, 0x80);
@@ -4900,56 +4734,28 @@ static void setfreq_i(struct sd *sd)
}
}
}
-static void setfreq(struct gspca_dev *gspca_dev)
+
+static void setfreq(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- setfreq_i(sd);
+ setfreq_i(sd, val);
/* Ugly but necessary */
if (sd->bridge == BRIDGE_W9968CF)
w9968cf_set_crop_window(sd);
}
-static int sd_querymenu(struct gspca_dev *gspca_dev,
- struct v4l2_querymenu *menu)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- switch (menu->id) {
- case V4L2_CID_POWER_LINE_FREQUENCY:
- switch (menu->index) {
- case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
- strcpy((char *) menu->name, "NoFliker");
- return 0;
- case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
- strcpy((char *) menu->name, "50 Hz");
- return 0;
- case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
- strcpy((char *) menu->name, "60 Hz");
- return 0;
- case 3:
- if (sd->sensor != SEN_OV7670)
- return -EINVAL;
-
- strcpy((char *) menu->name, "Automatic");
- return 0;
- }
- break;
- }
- return -EINVAL;
-}
-
static int sd_get_jcomp(struct gspca_dev *gspca_dev,
struct v4l2_jpegcompression *jcomp)
{
struct sd *sd = (struct sd *) gspca_dev;
if (sd->bridge != BRIDGE_W9968CF)
- return -EINVAL;
+ return -ENOTTY;
memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = sd->quality;
+ jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT |
V4L2_JPEG_MARKER_DRI;
return 0;
@@ -4961,38 +4767,161 @@ static int sd_set_jcomp(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
if (sd->bridge != BRIDGE_W9968CF)
- return -EINVAL;
+ return -ENOTTY;
+
+ v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality);
+ return 0;
+}
- if (gspca_dev->streaming)
- return -EBUSY;
+static int sd_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
- if (jcomp->quality < QUALITY_MIN)
- sd->quality = QUALITY_MIN;
- else if (jcomp->quality > QUALITY_MAX)
- sd->quality = QUALITY_MAX;
- else
- sd->quality = jcomp->quality;
+ gspca_dev->usb_err = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ gspca_dev->exposure->val = i2c_r(sd, 0x10);
+ break;
+ }
+ return 0;
+}
+
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ setfreq(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_AUTOBRIGHTNESS:
+ if (ctrl->is_new)
+ setautobright(gspca_dev, ctrl->val);
+ if (!ctrl->val && sd->brightness->is_new)
+ setbrightness(gspca_dev, sd->brightness->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolors(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ sethvflip(gspca_dev, ctrl->val, sd->vflip->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ if (ctrl->is_new)
+ setautogain(gspca_dev, ctrl->val);
+ if (!ctrl->val && gspca_dev->exposure->is_new)
+ setexposure(gspca_dev, gspca_dev->exposure->val);
+ break;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ return -EBUSY; /* Should never happen, as we grab the ctrl */
+ }
+ return gspca_dev->usb_err;
+}
- /* Return resulting jcomp params to app */
- sd_get_jcomp(gspca_dev, jcomp);
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .g_volatile_ctrl = sd_g_volatile_ctrl,
+ .s_ctrl = sd_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 10);
+ if (valid_controls[sd->sensor].has_brightness)
+ sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0,
+ sd->sensor == SEN_OV7660 ? 6 : 255, 1,
+ sd->sensor == SEN_OV7660 ? 3 : 127);
+ if (valid_controls[sd->sensor].has_contrast) {
+ if (sd->sensor == SEN_OV7660)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 6, 1, 3);
+ else
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1,
+ (sd->sensor == SEN_OV6630 ||
+ sd->sensor == SEN_OV66308AF) ? 200 : 127);
+ }
+ if (valid_controls[sd->sensor].has_sat)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0,
+ sd->sensor == SEN_OV7660 ? 4 : 255, 1,
+ sd->sensor == SEN_OV7660 ? 2 : 127);
+ if (valid_controls[sd->sensor].has_exposure)
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 255, 1, 127);
+ if (valid_controls[sd->sensor].has_hvflip) {
+ sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ }
+ if (valid_controls[sd->sensor].has_autobright)
+ sd->autobright = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOBRIGHTNESS, 0, 1, 1, 1);
+ if (valid_controls[sd->sensor].has_autogain)
+ gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ if (valid_controls[sd->sensor].has_freq) {
+ if (sd->sensor == SEN_OV7670)
+ sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO);
+ else
+ sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, 0);
+ }
+ if (sd->bridge == BRIDGE_W9968CF)
+ sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ QUALITY_MIN, QUALITY_MAX, 1, QUALITY_DEF);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ if (gspca_dev->autogain)
+ v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, true);
+ if (sd->autobright)
+ v4l2_ctrl_auto_cluster(2, &sd->autobright, 0, false);
+ if (sd->hflip)
+ v4l2_ctrl_cluster(2, &sd->hflip);
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = sd_reset_snapshot,
- .querymenu = sd_querymenu,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
@@ -5052,6 +4981,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 80c81dd6d68b..bb09d7884b89 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -35,6 +35,7 @@
#include "gspca.h"
#include <linux/fixp-arith.h>
+#include <media/v4l2-ctrls.h>
#define OV534_REG_ADDRESS 0xf1 /* sensor address */
#define OV534_REG_SUBADDR 0xf2
@@ -53,29 +54,28 @@ MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
MODULE_DESCRIPTION("GSPCA/OV534 USB Camera Driver");
MODULE_LICENSE("GPL");
-/* controls */
-enum e_ctrl {
- HUE,
- SATURATION,
- BRIGHTNESS,
- CONTRAST,
- GAIN,
- EXPOSURE,
- AGC,
- AWB,
- AEC,
- SHARPNESS,
- HFLIP,
- VFLIP,
- LIGHTFREQ,
- NCTRLS /* number of controls */
-};
-
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- struct gspca_ctrl ctrls[NCTRLS];
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *hue;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *contrast;
+ struct { /* gain control cluster */
+ struct v4l2_ctrl *autogain;
+ struct v4l2_ctrl *gain;
+ };
+ struct v4l2_ctrl *autowhitebalance;
+ struct { /* exposure control cluster */
+ struct v4l2_ctrl *autoexposure;
+ struct v4l2_ctrl *exposure;
+ };
+ struct v4l2_ctrl *sharpness;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *plfreq;
__u32 last_pts;
u16 last_fid;
@@ -89,181 +89,9 @@ enum sensors {
NSENSORS
};
-/* V4L2 controls supported by the driver */
-static void sethue(struct gspca_dev *gspca_dev);
-static void setsaturation(struct gspca_dev *gspca_dev);
-static void setbrightness(struct gspca_dev *gspca_dev);
-static void setcontrast(struct gspca_dev *gspca_dev);
-static void setgain(struct gspca_dev *gspca_dev);
-static void setexposure(struct gspca_dev *gspca_dev);
-static void setagc(struct gspca_dev *gspca_dev);
-static void setawb(struct gspca_dev *gspca_dev);
-static void setaec(struct gspca_dev *gspca_dev);
-static void setsharpness(struct gspca_dev *gspca_dev);
-static void sethvflip(struct gspca_dev *gspca_dev);
-static void setlightfreq(struct gspca_dev *gspca_dev);
-
static int sd_start(struct gspca_dev *gspca_dev);
static void sd_stopN(struct gspca_dev *gspca_dev);
-static const struct ctrl sd_ctrls[] = {
-[HUE] = {
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = -90,
- .maximum = 90,
- .step = 1,
- .default_value = 0,
- },
- .set_control = sethue
- },
-[SATURATION] = {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 64,
- },
- .set_control = setsaturation
- },
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 0,
- },
- .set_control = setbrightness
- },
-[CONTRAST] = {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 32,
- },
- .set_control = setcontrast
- },
-[GAIN] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Main Gain",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
- .default_value = 20,
- },
- .set_control = setgain
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 120,
- },
- .set_control = setexposure
- },
-[AGC] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
- .set_control = setagc
- },
-[AWB] = {
- {
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto White Balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
- .set_control = setawb
- },
-[AEC] = {
- {
- .id = V4L2_CID_EXPOSURE_AUTO,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 1,
- },
- .set_control = setaec
- },
-[SHARPNESS] = {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
- .default_value = 0,
- },
- .set_control = setsharpness
- },
-[HFLIP] = {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "HFlip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- .set_control = sethvflip
- },
-[VFLIP] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "VFlip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- .set_control = sethvflip
- },
-[LIGHTFREQ] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light Frequency Filter",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- .set_control = setlightfreq
- },
-};
static const struct v4l2_pix_format ov772x_mode[] = {
{320, 240, V4L2_PIX_FMT_YUYV, V4L2_FIELD_NONE,
@@ -972,12 +800,10 @@ static void set_frame_rate(struct gspca_dev *gspca_dev)
PDEBUG(D_PROBE, "frame_rate: %d", r->fps);
}
-static void sethue(struct gspca_dev *gspca_dev)
+static void sethue(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- int val;
- val = sd->ctrls[HUE].val;
if (sd->sensor == SENSOR_OV767x) {
/* TBD */
} else {
@@ -1014,12 +840,10 @@ static void sethue(struct gspca_dev *gspca_dev)
}
}
-static void setsaturation(struct gspca_dev *gspca_dev)
+static void setsaturation(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- int val;
- val = sd->ctrls[SATURATION].val;
if (sd->sensor == SENSOR_OV767x) {
int i;
static u8 color_tb[][6] = {
@@ -1040,12 +864,10 @@ static void setsaturation(struct gspca_dev *gspca_dev)
}
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- int val;
- val = sd->ctrls[BRIGHTNESS].val;
if (sd->sensor == SENSOR_OV767x) {
if (val < 0)
val = 0x80 - val;
@@ -1055,27 +877,18 @@ static void setbrightness(struct gspca_dev *gspca_dev)
}
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 val;
- val = sd->ctrls[CONTRAST].val;
if (sd->sensor == SENSOR_OV767x)
sccb_reg_write(gspca_dev, 0x56, val); /* contras */
else
sccb_reg_write(gspca_dev, 0x9c, val);
}
-static void setgain(struct gspca_dev *gspca_dev)
+static void setgain(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 val;
-
- if (sd->ctrls[AGC].val)
- return;
-
- val = sd->ctrls[GAIN].val;
switch (val & 0x30) {
case 0x00:
val &= 0x0f;
@@ -1097,15 +910,15 @@ static void setgain(struct gspca_dev *gspca_dev)
sccb_reg_write(gspca_dev, 0x00, val);
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static s32 getgain(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 val;
+ return sccb_reg_read(gspca_dev, 0x00);
+}
- if (sd->ctrls[AEC].val)
- return;
+static void setexposure(struct gspca_dev *gspca_dev, s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
- val = sd->ctrls[EXPOSURE].val;
if (sd->sensor == SENSOR_OV767x) {
/* set only aec[9:2] */
@@ -1123,11 +936,23 @@ static void setexposure(struct gspca_dev *gspca_dev)
}
}
-static void setagc(struct gspca_dev *gspca_dev)
+static s32 getexposure(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->ctrls[AGC].val) {
+ if (sd->sensor == SENSOR_OV767x) {
+ /* get only aec[9:2] */
+ return sccb_reg_read(gspca_dev, 0x10); /* aech */
+ } else {
+ u8 hi = sccb_reg_read(gspca_dev, 0x08);
+ u8 lo = sccb_reg_read(gspca_dev, 0x10);
+ return (hi << 8 | lo) >> 1;
+ }
+}
+
+static void setagc(struct gspca_dev *gspca_dev, s32 val)
+{
+ if (val) {
sccb_reg_write(gspca_dev, 0x13,
sccb_reg_read(gspca_dev, 0x13) | 0x04);
sccb_reg_write(gspca_dev, 0x64,
@@ -1137,16 +962,14 @@ static void setagc(struct gspca_dev *gspca_dev)
sccb_reg_read(gspca_dev, 0x13) & ~0x04);
sccb_reg_write(gspca_dev, 0x64,
sccb_reg_read(gspca_dev, 0x64) & ~0x03);
-
- setgain(gspca_dev);
}
}
-static void setawb(struct gspca_dev *gspca_dev)
+static void setawb(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->ctrls[AWB].val) {
+ if (val) {
sccb_reg_write(gspca_dev, 0x13,
sccb_reg_read(gspca_dev, 0x13) | 0x02);
if (sd->sensor == SENSOR_OV772x)
@@ -1161,7 +984,7 @@ static void setawb(struct gspca_dev *gspca_dev)
}
}
-static void setaec(struct gspca_dev *gspca_dev)
+static void setaec(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 data;
@@ -1169,31 +992,25 @@ static void setaec(struct gspca_dev *gspca_dev)
data = sd->sensor == SENSOR_OV767x ?
0x05 : /* agc + aec */
0x01; /* agc */
- if (sd->ctrls[AEC].val)
+ switch (val) {
+ case V4L2_EXPOSURE_AUTO:
sccb_reg_write(gspca_dev, 0x13,
sccb_reg_read(gspca_dev, 0x13) | data);
- else {
+ break;
+ case V4L2_EXPOSURE_MANUAL:
sccb_reg_write(gspca_dev, 0x13,
sccb_reg_read(gspca_dev, 0x13) & ~data);
- if (sd->sensor == SENSOR_OV767x)
- sd->ctrls[EXPOSURE].val =
- sccb_reg_read(gspca_dev, 10); /* aech */
- else
- setexposure(gspca_dev);
+ break;
}
}
-static void setsharpness(struct gspca_dev *gspca_dev)
+static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 val;
-
- val = sd->ctrls[SHARPNESS].val;
sccb_reg_write(gspca_dev, 0x91, val); /* Auto de-noise threshold */
sccb_reg_write(gspca_dev, 0x8e, val); /* De-noise threshold */
}
-static void sethvflip(struct gspca_dev *gspca_dev)
+static void sethvflip(struct gspca_dev *gspca_dev, s32 hflip, s32 vflip)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 val;
@@ -1201,28 +1018,27 @@ static void sethvflip(struct gspca_dev *gspca_dev)
if (sd->sensor == SENSOR_OV767x) {
val = sccb_reg_read(gspca_dev, 0x1e); /* mvfp */
val &= ~0x30;
- if (sd->ctrls[HFLIP].val)
+ if (hflip)
val |= 0x20;
- if (sd->ctrls[VFLIP].val)
+ if (vflip)
val |= 0x10;
sccb_reg_write(gspca_dev, 0x1e, val);
} else {
val = sccb_reg_read(gspca_dev, 0x0c);
val &= ~0xc0;
- if (sd->ctrls[HFLIP].val == 0)
+ if (hflip == 0)
val |= 0x40;
- if (sd->ctrls[VFLIP].val == 0)
+ if (vflip == 0)
val |= 0x80;
sccb_reg_write(gspca_dev, 0x0c, val);
}
}
-static void setlightfreq(struct gspca_dev *gspca_dev)
+static void setlightfreq(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 val;
- val = sd->ctrls[LIGHTFREQ].val ? 0x9e : 0x00;
+ val = val ? 0x9e : 0x00;
if (sd->sensor == SENSOR_OV767x) {
sccb_reg_write(gspca_dev, 0x2a, 0x00);
if (val)
@@ -1241,8 +1057,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam = &gspca_dev->cam;
- cam->ctrls = sd->ctrls;
-
cam->cam_mode = ov772x_mode;
cam->nmodes = ARRAY_SIZE(ov772x_mode);
@@ -1251,6 +1065,195 @@ static int sd_config(struct gspca_dev *gspca_dev,
return 0;
}
+static int ov534_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct sd *sd = container_of(ctrl->handler, struct sd, ctrl_handler);
+ struct gspca_dev *gspca_dev = &sd->gspca_dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ gspca_dev->usb_err = 0;
+ if (ctrl->val && sd->gain && gspca_dev->streaming)
+ sd->gain->val = getgain(gspca_dev);
+ return gspca_dev->usb_err;
+
+ case V4L2_CID_EXPOSURE_AUTO:
+ gspca_dev->usb_err = 0;
+ if (ctrl->val == V4L2_EXPOSURE_AUTO && sd->exposure &&
+ gspca_dev->streaming)
+ sd->exposure->val = getexposure(gspca_dev);
+ return gspca_dev->usb_err;
+ }
+ return -EINVAL;
+}
+
+static int ov534_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct sd *sd = container_of(ctrl->handler, struct sd, ctrl_handler);
+ struct gspca_dev *gspca_dev = &sd->gspca_dev;
+
+ gspca_dev->usb_err = 0;
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HUE:
+ sethue(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setsaturation(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ /* case V4L2_CID_GAIN: */
+ setagc(gspca_dev, ctrl->val);
+ if (!gspca_dev->usb_err && !ctrl->val && sd->gain)
+ setgain(gspca_dev, sd->gain->val);
+ break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ setawb(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ /* case V4L2_CID_EXPOSURE: */
+ setaec(gspca_dev, ctrl->val);
+ if (!gspca_dev->usb_err && ctrl->val == V4L2_EXPOSURE_MANUAL &&
+ sd->exposure)
+ setexposure(gspca_dev, sd->exposure->val);
+ break;
+ case V4L2_CID_SHARPNESS:
+ setsharpness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ sethvflip(gspca_dev, ctrl->val, sd->vflip->val);
+ break;
+ case V4L2_CID_VFLIP:
+ sethvflip(gspca_dev, sd->hflip->val, ctrl->val);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ setlightfreq(gspca_dev, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops ov534_ctrl_ops = {
+ .g_volatile_ctrl = ov534_g_volatile_ctrl,
+ .s_ctrl = ov534_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &sd->ctrl_handler;
+ /* parameters with different values between the supported sensors */
+ int saturation_min;
+ int saturation_max;
+ int saturation_def;
+ int brightness_min;
+ int brightness_max;
+ int brightness_def;
+ int contrast_max;
+ int contrast_def;
+ int exposure_min;
+ int exposure_max;
+ int exposure_def;
+ int hflip_def;
+
+ if (sd->sensor == SENSOR_OV767x) {
+ saturation_min = 0,
+ saturation_max = 6,
+ saturation_def = 3,
+ brightness_min = -127;
+ brightness_max = 127;
+ brightness_def = 0;
+ contrast_max = 0x80;
+ contrast_def = 0x40;
+ exposure_min = 0x08;
+ exposure_max = 0x60;
+ exposure_def = 0x13;
+ hflip_def = 1;
+ } else {
+ saturation_min = 0,
+ saturation_max = 255,
+ saturation_def = 64,
+ brightness_min = 0;
+ brightness_max = 255;
+ brightness_def = 0;
+ contrast_max = 255;
+ contrast_def = 32;
+ exposure_min = 0;
+ exposure_max = 255;
+ exposure_def = 120;
+ hflip_def = 0;
+ }
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+
+ v4l2_ctrl_handler_init(hdl, 13);
+
+ if (sd->sensor == SENSOR_OV772x)
+ sd->hue = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops,
+ V4L2_CID_HUE, -90, 90, 1, 0);
+
+ sd->saturation = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops,
+ V4L2_CID_SATURATION, saturation_min, saturation_max, 1,
+ saturation_def);
+ sd->brightness = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, brightness_min, brightness_max, 1,
+ brightness_def);
+ sd->contrast = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, contrast_max, 1, contrast_def);
+
+ if (sd->sensor == SENSOR_OV772x) {
+ sd->autogain = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ sd->gain = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops,
+ V4L2_CID_GAIN, 0, 63, 1, 20);
+ }
+
+ sd->autoexposure = v4l2_ctrl_new_std_menu(hdl, &ov534_ctrl_ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0,
+ V4L2_EXPOSURE_AUTO);
+ sd->exposure = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops,
+ V4L2_CID_EXPOSURE, exposure_min, exposure_max, 1,
+ exposure_def);
+
+ sd->autowhitebalance = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+
+ if (sd->sensor == SENSOR_OV772x)
+ sd->sharpness = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 63, 1, 0);
+
+ sd->hflip = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, hflip_def);
+ sd->vflip = v4l2_ctrl_new_std(hdl, &ov534_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ sd->plfreq = v4l2_ctrl_new_std_menu(hdl, &ov534_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_DISABLED);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+
+ if (sd->sensor == SENSOR_OV772x)
+ v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, true);
+
+ v4l2_ctrl_auto_cluster(2, &sd->autoexposure, V4L2_EXPOSURE_MANUAL,
+ true);
+
+ return 0;
+}
+
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
@@ -1286,24 +1289,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
if ((sensor_id & 0xfff0) == 0x7670) {
sd->sensor = SENSOR_OV767x;
- gspca_dev->ctrl_dis = (1 << HUE) |
- (1 << GAIN) |
- (1 << AGC) |
- (1 << SHARPNESS); /* auto */
- sd->ctrls[SATURATION].min = 0,
- sd->ctrls[SATURATION].max = 6,
- sd->ctrls[SATURATION].def = 3,
- sd->ctrls[BRIGHTNESS].min = -127;
- sd->ctrls[BRIGHTNESS].max = 127;
- sd->ctrls[BRIGHTNESS].def = 0;
- sd->ctrls[CONTRAST].max = 0x80;
- sd->ctrls[CONTRAST].def = 0x40;
- sd->ctrls[EXPOSURE].min = 0x08;
- sd->ctrls[EXPOSURE].max = 0x60;
- sd->ctrls[EXPOSURE].def = 0x13;
- sd->ctrls[SHARPNESS].max = 9;
- sd->ctrls[SHARPNESS].def = 4;
- sd->ctrls[HFLIP].def = 1;
gspca_dev->cam.cam_mode = ov767x_mode;
gspca_dev->cam.nmodes = ARRAY_SIZE(ov767x_mode);
} else {
@@ -1366,22 +1351,23 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_frame_rate(gspca_dev);
- if (!(gspca_dev->ctrl_dis & (1 << HUE)))
- sethue(gspca_dev);
- setsaturation(gspca_dev);
- if (!(gspca_dev->ctrl_dis & (1 << AGC)))
- setagc(gspca_dev);
- setawb(gspca_dev);
- setaec(gspca_dev);
- if (!(gspca_dev->ctrl_dis & (1 << GAIN)))
- setgain(gspca_dev);
- setexposure(gspca_dev);
- setbrightness(gspca_dev);
- setcontrast(gspca_dev);
- if (!(gspca_dev->ctrl_dis & (1 << SHARPNESS)))
- setsharpness(gspca_dev);
- sethvflip(gspca_dev);
- setlightfreq(gspca_dev);
+ if (sd->hue)
+ sethue(gspca_dev, v4l2_ctrl_g_ctrl(sd->hue));
+ setsaturation(gspca_dev, v4l2_ctrl_g_ctrl(sd->saturation));
+ if (sd->autogain)
+ setagc(gspca_dev, v4l2_ctrl_g_ctrl(sd->autogain));
+ setawb(gspca_dev, v4l2_ctrl_g_ctrl(sd->autowhitebalance));
+ setaec(gspca_dev, v4l2_ctrl_g_ctrl(sd->autoexposure));
+ if (sd->gain)
+ setgain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
+ setbrightness(gspca_dev, v4l2_ctrl_g_ctrl(sd->brightness));
+ setcontrast(gspca_dev, v4l2_ctrl_g_ctrl(sd->contrast));
+ if (sd->sharpness)
+ setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness));
+ sethvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
+ v4l2_ctrl_g_ctrl(sd->vflip));
+ setlightfreq(gspca_dev, v4l2_ctrl_g_ctrl(sd->plfreq));
ov534_set_led(gspca_dev, 1);
ov534_reg_write(gspca_dev, 0xe0, 0x00);
@@ -1483,25 +1469,6 @@ scan_next:
} while (remaining_len > 0);
}
-static int sd_querymenu(struct gspca_dev *gspca_dev,
- struct v4l2_querymenu *menu)
-{
- switch (menu->id) {
- case V4L2_CID_POWER_LINE_FREQUENCY:
- switch (menu->index) {
- case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
- strcpy((char *) menu->name, "Disabled");
- return 0;
- case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
- strcpy((char *) menu->name, "50 Hz");
- return 0;
- }
- break;
- }
-
- return -EINVAL;
-}
-
/* get stream parameters (framerate) */
static void sd_get_streamparm(struct gspca_dev *gspca_dev,
struct v4l2_streamparm *parm)
@@ -1536,14 +1503,12 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
- .querymenu = sd_querymenu,
.get_streamparm = sd_get_streamparm,
.set_streamparm = sd_set_streamparm,
};
@@ -1572,6 +1537,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index 1fd41f0d2e95..c4cd028fe0b4 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -47,22 +47,9 @@ MODULE_AUTHOR("Jean-Francois Moine <moinejf@free.fr>");
MODULE_DESCRIPTION("GSPCA/OV534_9 USB Camera Driver");
MODULE_LICENSE("GPL");
-/* controls */
-enum e_ctrl {
- BRIGHTNESS,
- CONTRAST,
- AUTOGAIN,
- EXPOSURE,
- SHARPNESS,
- SATUR,
- LIGHTFREQ,
- NCTRLS /* number of controls */
-};
-
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- struct gspca_ctrl ctrls[NCTRLS];
__u32 last_pts;
u8 last_fid;
@@ -75,103 +62,6 @@ enum sensors {
NSENSORS
};
-/* V4L2 controls supported by the driver */
-static void setbrightness(struct gspca_dev *gspca_dev);
-static void setcontrast(struct gspca_dev *gspca_dev);
-static void setautogain(struct gspca_dev *gspca_dev);
-static void setexposure(struct gspca_dev *gspca_dev);
-static void setsharpness(struct gspca_dev *gspca_dev);
-static void setsatur(struct gspca_dev *gspca_dev);
-static void setlightfreq(struct gspca_dev *gspca_dev);
-
-static const struct ctrl sd_ctrls[NCTRLS] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 15,
- .step = 1,
- .default_value = 7
- },
- .set_control = setbrightness
- },
-[CONTRAST] = {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 15,
- .step = 1,
- .default_value = 3
- },
- .set_control = setcontrast
- },
-[AUTOGAIN] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Autogain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 1
- .default_value = AUTOGAIN_DEF,
- },
- .set_control = setautogain
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 0
- },
- .set_control = setexposure
- },
-[SHARPNESS] = {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = -1, /* -1 = auto */
- .maximum = 4,
- .step = 1,
- .default_value = -1
- },
- .set_control = setsharpness
- },
-[SATUR] = {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 4,
- .step = 1,
- .default_value = 2
- },
- .set_control = setsatur
- },
-[LIGHTFREQ] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light frequency filter",
- .minimum = 0,
- .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
- .step = 1,
- .default_value = 0
- },
- .set_control = setlightfreq
- },
-};
-
static const struct v4l2_pix_format ov965x_mode[] = {
#define QVGA_MODE 0
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
@@ -1104,16 +994,14 @@ static void set_led(struct gspca_dev *gspca_dev, int status)
}
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 brightness)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 val;
s8 sval;
- if (gspca_dev->ctrl_dis & (1 << BRIGHTNESS))
- return;
if (sd->sensor == SENSOR_OV562x) {
- sval = sd->ctrls[BRIGHTNESS].val;
+ sval = brightness;
val = 0x76;
val += sval;
sccb_write(gspca_dev, 0x24, val);
@@ -1128,7 +1016,7 @@ static void setbrightness(struct gspca_dev *gspca_dev)
val = 0xe6;
sccb_write(gspca_dev, 0x26, val);
} else {
- val = sd->ctrls[BRIGHTNESS].val;
+ val = brightness;
if (val < 8)
val = 15 - val; /* f .. 8 */
else
@@ -1138,43 +1026,32 @@ static void setbrightness(struct gspca_dev *gspca_dev)
}
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (gspca_dev->ctrl_dis & (1 << CONTRAST))
- return;
sccb_write(gspca_dev, 0x56, /* cnst1 - contrast 1 ctrl coeff */
- sd->ctrls[CONTRAST].val << 4);
+ val << 4);
}
-static void setautogain(struct gspca_dev *gspca_dev)
+static void setautogain(struct gspca_dev *gspca_dev, s32 autogain)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 val;
- if (gspca_dev->ctrl_dis & (1 << AUTOGAIN))
- return;
/*fixme: should adjust agc/awb/aec by different controls */
val = sccb_read(gspca_dev, 0x13); /* com8 */
sccb_write(gspca_dev, 0xff, 0x00);
- if (sd->ctrls[AUTOGAIN].val)
+ if (autogain)
val |= 0x05; /* agc & aec */
else
val &= 0xfa;
sccb_write(gspca_dev, 0x13, val);
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 exposure)
{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 val;
static const u8 expo[4] = {0x00, 0x25, 0x38, 0x5e};
+ u8 val;
- if (gspca_dev->ctrl_dis & (1 << EXPOSURE))
- return;
- sccb_write(gspca_dev, 0x10, /* aec[9:2] */
- expo[sd->ctrls[EXPOSURE].val]);
+ sccb_write(gspca_dev, 0x10, expo[exposure]); /* aec[9:2] */
val = sccb_read(gspca_dev, 0x13); /* com8 */
sccb_write(gspca_dev, 0xff, 0x00);
@@ -1185,14 +1062,8 @@ static void setexposure(struct gspca_dev *gspca_dev)
sccb_write(gspca_dev, 0xa1, val & 0xe0); /* aec[15:10] = 0 */
}
-static void setsharpness(struct gspca_dev *gspca_dev)
+static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- s8 val;
-
- if (gspca_dev->ctrl_dis & (1 << SHARPNESS))
- return;
- val = sd->ctrls[SHARPNESS].val;
if (val < 0) { /* auto */
val = sccb_read(gspca_dev, 0x42); /* com17 */
sccb_write(gspca_dev, 0xff, 0x00);
@@ -1209,9 +1080,8 @@ static void setsharpness(struct gspca_dev *gspca_dev)
sccb_write(gspca_dev, 0x42, val & 0xbf);
}
-static void setsatur(struct gspca_dev *gspca_dev)
+static void setsatur(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 val1, val2, val3;
static const u8 matrix[5][2] = {
{0x14, 0x38},
@@ -1221,10 +1091,8 @@ static void setsatur(struct gspca_dev *gspca_dev)
{0x48, 0x90}
};
- if (gspca_dev->ctrl_dis & (1 << SATUR))
- return;
- val1 = matrix[sd->ctrls[SATUR].val][0];
- val2 = matrix[sd->ctrls[SATUR].val][1];
+ val1 = matrix[val][0];
+ val2 = matrix[val][1];
val3 = val1 + val2;
sccb_write(gspca_dev, 0x4f, val3); /* matrix coeff */
sccb_write(gspca_dev, 0x50, val3);
@@ -1239,16 +1107,13 @@ static void setsatur(struct gspca_dev *gspca_dev)
sccb_write(gspca_dev, 0x41, val1);
}
-static void setlightfreq(struct gspca_dev *gspca_dev)
+static void setlightfreq(struct gspca_dev *gspca_dev, s32 freq)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 val;
- if (gspca_dev->ctrl_dis & (1 << LIGHTFREQ))
- return;
val = sccb_read(gspca_dev, 0x13); /* com8 */
sccb_write(gspca_dev, 0xff, 0x00);
- if (sd->ctrls[LIGHTFREQ].val == 0) {
+ if (freq == 0) {
sccb_write(gspca_dev, 0x13, val & 0xdf);
return;
}
@@ -1256,7 +1121,7 @@ static void setlightfreq(struct gspca_dev *gspca_dev)
val = sccb_read(gspca_dev, 0x42); /* com17 */
sccb_write(gspca_dev, 0xff, 0x00);
- if (sd->ctrls[LIGHTFREQ].val == 1)
+ if (freq == 1)
val |= 0x01;
else
val &= 0xfe;
@@ -1267,13 +1132,6 @@ static void setlightfreq(struct gspca_dev *gspca_dev)
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- gspca_dev->cam.ctrls = sd->ctrls;
-
-#if AUTOGAIN_DEF != 0
- gspca_dev->ctrl_inac |= (1 << EXPOSURE);
-#endif
return 0;
}
@@ -1330,9 +1188,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
gspca_dev->cam.cam_mode = ov971x_mode;
gspca_dev->cam.nmodes = ARRAY_SIZE(ov971x_mode);
- /* no control yet */
- gspca_dev->ctrl_dis = (1 << NCTRLS) - 1;
-
gspca_dev->cam.bulk = 1;
gspca_dev->cam.bulk_size = 16384;
gspca_dev->cam.bulk_nurbs = 2;
@@ -1358,16 +1213,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x56, 0x17);
} else if ((sensor_id & 0xfff0) == 0x5620) {
sd->sensor = SENSOR_OV562x;
- gspca_dev->ctrl_dis = (1 << CONTRAST) |
- (1 << AUTOGAIN) |
- (1 << EXPOSURE) |
- (1 << SHARPNESS) |
- (1 << SATUR) |
- (1 << LIGHTFREQ);
-
- sd->ctrls[BRIGHTNESS].min = -90;
- sd->ctrls[BRIGHTNESS].max = 90;
- sd->ctrls[BRIGHTNESS].def = 0;
gspca_dev->cam.cam_mode = ov562x_mode;
gspca_dev->cam.nmodes = ARRAY_SIZE(ov562x_mode);
@@ -1390,10 +1235,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
if (sd->sensor == SENSOR_OV971x)
return gspca_dev->usb_err;
- else if (sd->sensor == SENSOR_OV562x) {
- setbrightness(gspca_dev);
+ if (sd->sensor == SENSOR_OV562x)
return gspca_dev->usb_err;
- }
+
switch (gspca_dev->curr_mode) {
case QVGA_MODE: /* 320x240 */
sccb_w_array(gspca_dev, ov965x_start_1_vga,
@@ -1437,13 +1281,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
ARRAY_SIZE(ov965x_start_2_sxga));
break;
}
- setlightfreq(gspca_dev);
- setautogain(gspca_dev);
- setbrightness(gspca_dev);
- setcontrast(gspca_dev);
- setexposure(gspca_dev);
- setsharpness(gspca_dev);
- setsatur(gspca_dev);
reg_w(gspca_dev, 0xe0, 0x00);
reg_w(gspca_dev, 0xe0, 0x00);
@@ -1541,38 +1378,94 @@ scan_next:
} while (remaining_len > 0);
}
-static int sd_querymenu(struct gspca_dev *gspca_dev,
- struct v4l2_querymenu *menu)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- switch (menu->id) {
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setsatur(gspca_dev, ctrl->val);
+ break;
case V4L2_CID_POWER_LINE_FREQUENCY:
- switch (menu->index) {
- case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */
- strcpy((char *) menu->name, "NoFliker");
- return 0;
- case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */
- strcpy((char *) menu->name, "50 Hz");
- return 0;
- case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */
- strcpy((char *) menu->name, "60 Hz");
- return 0;
- }
+ setlightfreq(gspca_dev, ctrl->val);
break;
+ case V4L2_CID_SHARPNESS:
+ setsharpness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ if (ctrl->is_new)
+ setautogain(gspca_dev, ctrl->val);
+ if (!ctrl->val && gspca_dev->exposure->is_new)
+ setexposure(gspca_dev, gspca_dev->exposure->val);
+ break;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ if (sd->sensor == SENSOR_OV971x)
+ return 0;
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 7);
+ if (sd->sensor == SENSOR_OV562x) {
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -90, 90, 1, 0);
+ } else {
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 15, 1, 7);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 15, 1, 3);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 4, 1, 2);
+ /* -1 = auto */
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SHARPNESS, -1, 4, 1, -1);
+ gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 3, 1, 0);
+ v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, 0);
+ v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false);
}
- return -EINVAL;
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = NCTRLS,
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
- .querymenu = sd_querymenu,
};
/* -- module initialisation -- */
@@ -1600,6 +1493,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index fa661c6d6d55..d236d1791f78 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -462,6 +462,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index a0369a58c4bb..4877f7ab3d59 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -84,31 +84,31 @@
/* Include pac common sof detection functions */
#include "pac_common.h"
+#define PAC7302_GAIN_DEFAULT 15
+#define PAC7302_GAIN_KNEE 42
+#define PAC7302_EXPOSURE_DEFAULT 66 /* 33 ms / 30 fps */
+#define PAC7302_EXPOSURE_KNEE 133 /* 66 ms / 15 fps */
+
MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>, "
"Thomas Kaiser thomas@kaiser-linux.li");
MODULE_DESCRIPTION("Pixart PAC7302");
MODULE_LICENSE("GPL");
-enum e_ctrl {
- BRIGHTNESS,
- CONTRAST,
- COLORS,
- WHITE_BALANCE,
- RED_BALANCE,
- BLUE_BALANCE,
- GAIN,
- AUTOGAIN,
- EXPOSURE,
- VFLIP,
- HFLIP,
- NCTRLS /* number of controls */
-};
-
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- struct gspca_ctrl ctrls[NCTRLS];
-
+ struct { /* brightness / contrast cluster */
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *contrast;
+ };
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *white_balance;
+ struct v4l2_ctrl *red_balance;
+ struct v4l2_ctrl *blue_balance;
+ struct { /* flip cluster */
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
u8 flags;
#define FL_HFLIP 0x01 /* mirrored by default */
#define FL_VFLIP 0x02 /* vertical flipped by default */
@@ -119,160 +119,6 @@ struct sd {
atomic_t avg_lum;
};
-/* V4L2 controls supported by the driver */
-static void setbrightcont(struct gspca_dev *gspca_dev);
-static void setcolors(struct gspca_dev *gspca_dev);
-static void setwhitebalance(struct gspca_dev *gspca_dev);
-static void setredbalance(struct gspca_dev *gspca_dev);
-static void setbluebalance(struct gspca_dev *gspca_dev);
-static void setgain(struct gspca_dev *gspca_dev);
-static void setexposure(struct gspca_dev *gspca_dev);
-static void setautogain(struct gspca_dev *gspca_dev);
-static void sethvflip(struct gspca_dev *gspca_dev);
-
-static const struct ctrl sd_ctrls[] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
-#define BRIGHTNESS_MAX 0x20
- .maximum = BRIGHTNESS_MAX,
- .step = 1,
- .default_value = 0x10,
- },
- .set_control = setbrightcont
- },
-[CONTRAST] = {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
-#define CONTRAST_MAX 255
- .maximum = CONTRAST_MAX,
- .step = 1,
- .default_value = 127,
- },
- .set_control = setbrightcont
- },
-[COLORS] = {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
-#define COLOR_MAX 255
- .maximum = COLOR_MAX,
- .step = 1,
- .default_value = 127
- },
- .set_control = setcolors
- },
-[WHITE_BALANCE] = {
- {
- .id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "White Balance",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 4,
- },
- .set_control = setwhitebalance
- },
-[RED_BALANCE] = {
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 1,
- },
- .set_control = setredbalance
- },
-[BLUE_BALANCE] = {
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 1,
- },
- .set_control = setbluebalance
- },
-[GAIN] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 62,
- .step = 1,
-#define GAIN_DEF 15
-#define GAIN_KNEE 46
- .default_value = GAIN_DEF,
- },
- .set_control = setgain
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 1023,
- .step = 1,
-#define EXPOSURE_DEF 66 /* 33 ms / 30 fps */
-#define EXPOSURE_KNEE 133 /* 66 ms / 15 fps */
- .default_value = EXPOSURE_DEF,
- },
- .set_control = setexposure
- },
-[AUTOGAIN] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 1
- .default_value = AUTOGAIN_DEF,
- },
- .set_control = setautogain,
- },
-[HFLIP] = {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- .set_control = sethvflip,
- },
-[VFLIP] = {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Vflip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = 0,
- },
- .set_control = sethvflip
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{640, 480, V4L2_PIX_FMT_PJPG, V4L2_FIELD_NONE,
.bytesperline = 640,
@@ -516,8 +362,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = vga_mode; /* only 640x480 */
cam->nmodes = ARRAY_SIZE(vga_mode);
- gspca_dev->cam.ctrls = sd->ctrls;
-
sd->flags = id->driver_info;
return 0;
}
@@ -536,9 +380,9 @@ static void setbrightcont(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
for (i = 0; i < 10; i++) {
v = max[i];
- v += (sd->ctrls[BRIGHTNESS].val - BRIGHTNESS_MAX)
- * 150 / BRIGHTNESS_MAX; /* 200 ? */
- v -= delta[i] * sd->ctrls[CONTRAST].val / CONTRAST_MAX;
+ v += (sd->brightness->val - sd->brightness->maximum)
+ * 150 / sd->brightness->maximum; /* 200 ? */
+ v -= delta[i] * sd->contrast->val / sd->contrast->maximum;
if (v < 0)
v = 0;
else if (v > 0xff)
@@ -561,7 +405,8 @@ static void setcolors(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x11, 0x01);
reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
for (i = 0; i < 9; i++) {
- v = a[i] * sd->ctrls[COLORS].val / COLOR_MAX + b[i];
+ v = a[i] * sd->saturation->val / sd->saturation->maximum;
+ v += b[i];
reg_w(gspca_dev, 0x0f + 2 * i, (v >> 8) & 0x07);
reg_w(gspca_dev, 0x0f + 2 * i + 1, v);
}
@@ -573,7 +418,7 @@ static void setwhitebalance(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
- reg_w(gspca_dev, 0xc6, sd->ctrls[WHITE_BALANCE].val);
+ reg_w(gspca_dev, 0xc6, sd->white_balance->val);
reg_w(gspca_dev, 0xdc, 0x01);
}
@@ -583,7 +428,7 @@ static void setredbalance(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
- reg_w(gspca_dev, 0xc5, sd->ctrls[RED_BALANCE].val);
+ reg_w(gspca_dev, 0xc5, sd->red_balance->val);
reg_w(gspca_dev, 0xdc, 0x01);
}
@@ -593,22 +438,21 @@ static void setbluebalance(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
reg_w(gspca_dev, 0xff, 0x00); /* page 0 */
- reg_w(gspca_dev, 0xc7, sd->ctrls[BLUE_BALANCE].val);
+ reg_w(gspca_dev, 0xc7, sd->blue_balance->val);
reg_w(gspca_dev, 0xdc, 0x01);
}
static void setgain(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 reg10, reg12;
- if (sd->ctrls[GAIN].val < 32) {
- reg10 = sd->ctrls[GAIN].val;
+ if (gspca_dev->gain->val < 32) {
+ reg10 = gspca_dev->gain->val;
reg12 = 0;
} else {
reg10 = 31;
- reg12 = sd->ctrls[GAIN].val - 31;
+ reg12 = gspca_dev->gain->val - 31;
}
reg_w(gspca_dev, 0xff, 0x03); /* page 3 */
@@ -621,7 +465,6 @@ static void setgain(struct gspca_dev *gspca_dev)
static void setexposure(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 clockdiv;
u16 exposure;
@@ -630,7 +473,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
* no fps according to the formula: 90 / reg. sd->exposure is the
* desired exposure time in 0.5 ms.
*/
- clockdiv = (90 * sd->ctrls[EXPOSURE].val + 1999) / 2000;
+ clockdiv = (90 * gspca_dev->exposure->val + 1999) / 2000;
/*
* Note clockdiv = 3 also works, but when running at 30 fps, depending
@@ -655,7 +498,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
* frame exposure time in ms = 1000 * clockdiv / 90 ->
* exposure = (sd->exposure / 2) * 448 / (1000 * clockdiv / 90)
*/
- exposure = (sd->ctrls[EXPOSURE].val * 45 * 448) / (1000 * clockdiv);
+ exposure = (gspca_dev->exposure->val * 45 * 448) / (1000 * clockdiv);
/* 0 = use full frametime, 448 = no exposure, reverse it */
exposure = 448 - exposure;
@@ -668,37 +511,15 @@ static void setexposure(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x11, 0x01);
}
-static void setautogain(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- /*
- * When switching to autogain set defaults to make sure
- * we are on a valid point of the autogain gain /
- * exposure knee graph, and give this change time to
- * take effect before doing autogain.
- */
- if (sd->ctrls[AUTOGAIN].val) {
- sd->ctrls[EXPOSURE].val = EXPOSURE_DEF;
- sd->ctrls[GAIN].val = GAIN_DEF;
- sd->autogain_ignore_frames =
- PAC_AUTOGAIN_IGNORE_FRAMES;
- } else {
- sd->autogain_ignore_frames = -1;
- }
- setexposure(gspca_dev);
- setgain(gspca_dev);
-}
-
static void sethvflip(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 data, hflip, vflip;
- hflip = sd->ctrls[HFLIP].val;
+ hflip = sd->hflip->val;
if (sd->flags & FL_HFLIP)
hflip = !hflip;
- vflip = sd->ctrls[VFLIP].val;
+ vflip = sd->vflip->val;
if (sd->flags & FL_VFLIP)
vflip = !vflip;
@@ -717,6 +538,112 @@ static int sd_init(struct gspca_dev *gspca_dev)
return gspca_dev->usb_err;
}
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (ctrl->id == V4L2_CID_AUTOGAIN && ctrl->is_new && ctrl->val) {
+ /* when switching to autogain set defaults to make sure
+ we are on a valid point of the autogain gain /
+ exposure knee graph, and give this change time to
+ take effect before doing autogain. */
+ gspca_dev->exposure->val = PAC7302_EXPOSURE_DEFAULT;
+ gspca_dev->gain->val = PAC7302_GAIN_DEFAULT;
+ sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
+ }
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightcont(gspca_dev);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolors(gspca_dev);
+ break;
+ case V4L2_CID_WHITE_BALANCE_TEMPERATURE:
+ setwhitebalance(gspca_dev);
+ break;
+ case V4L2_CID_RED_BALANCE:
+ setredbalance(gspca_dev);
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ setbluebalance(gspca_dev);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ if (gspca_dev->exposure->is_new || (ctrl->is_new && ctrl->val))
+ setexposure(gspca_dev);
+ if (gspca_dev->gain->is_new || (ctrl->is_new && ctrl->val))
+ setgain(gspca_dev);
+ break;
+ case V4L2_CID_HFLIP:
+ sethvflip(gspca_dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+/* this function is called at probe time */
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 11);
+
+ sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 32, 1, 16);
+ sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 127);
+
+ sd->saturation = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 127);
+ sd->white_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_WHITE_BALANCE_TEMPERATURE,
+ 0, 255, 1, 4);
+ sd->red_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0, 3, 1, 1);
+ sd->blue_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0, 3, 1, 1);
+
+ gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 1023, 1,
+ PAC7302_EXPOSURE_DEFAULT);
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 62, 1,
+ PAC7302_GAIN_DEFAULT);
+
+ sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+
+ v4l2_ctrl_cluster(2, &sd->brightness);
+ v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false);
+ v4l2_ctrl_cluster(2, &sd->hflip);
+ return 0;
+}
+
+/* -- start the camera -- */
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -728,11 +655,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
setwhitebalance(gspca_dev);
setredbalance(gspca_dev);
setbluebalance(gspca_dev);
- setautogain(gspca_dev);
+ setexposure(gspca_dev);
+ setgain(gspca_dev);
sethvflip(gspca_dev);
sd->sof_read = 0;
- atomic_set(&sd->avg_lum, 270 + sd->ctrls[BRIGHTNESS].val);
+ sd->autogain_ignore_frames = 0;
+ atomic_set(&sd->avg_lum, 270 + sd->brightness->val);
/* start stream */
reg_w(gspca_dev, 0xff, 0x01);
@@ -758,9 +687,6 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x78, 0x40);
}
-#define WANT_REGULAR_AUTOGAIN
-#include "autogain_functions.h"
-
static void do_autogain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -774,11 +700,13 @@ static void do_autogain(struct gspca_dev *gspca_dev)
if (sd->autogain_ignore_frames > 0) {
sd->autogain_ignore_frames--;
} else {
- desired_lum = 270 + sd->ctrls[BRIGHTNESS].val;
+ desired_lum = 270 + sd->brightness->val;
- auto_gain_n_exposure(gspca_dev, avg_lum, desired_lum,
- deadzone, GAIN_KNEE, EXPOSURE_KNEE);
- sd->autogain_ignore_frames = PAC_AUTOGAIN_IGNORE_FRAMES;
+ if (gspca_expo_autogain(gspca_dev, avg_lum, desired_lum,
+ deadzone, PAC7302_GAIN_KNEE,
+ PAC7302_EXPOSURE_KNEE))
+ sd->autogain_ignore_frames =
+ PAC_AUTOGAIN_IGNORE_FRAMES;
}
}
@@ -944,10 +872,9 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
/* sub-driver description for pac7302 */
static const struct sd_desc sd_desc = {
.name = KBUILD_MODNAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.stop0 = sd_stop0,
@@ -998,6 +925,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 115da169f32a..ba3558d3f017 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -694,6 +694,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/se401.c b/drivers/media/video/gspca/se401.c
index bb70092c2229..a33cb78a839c 100644
--- a/drivers/media/video/gspca/se401.c
+++ b/drivers/media/video/gspca/se401.c
@@ -45,15 +45,6 @@ MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("Endpoints se401");
MODULE_LICENSE("GPL");
-/* controls */
-enum e_ctrl {
- BRIGHTNESS,
- GAIN,
- EXPOSURE,
- FREQ,
- NCTRL /* number of controls */
-};
-
/* exposure change state machine states */
enum {
EXPO_CHANGED,
@@ -64,7 +55,11 @@ enum {
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- struct gspca_ctrl ctrls[NCTRL];
+ struct { /* exposure/freq control cluster */
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *freq;
+ };
+ bool has_brightness;
struct v4l2_pix_format fmts[MAX_MODES];
int pixels_read;
int packet_read;
@@ -77,60 +72,6 @@ struct sd {
int expo_change_state;
};
-static void setbrightness(struct gspca_dev *gspca_dev);
-static void setgain(struct gspca_dev *gspca_dev);
-static void setexposure(struct gspca_dev *gspca_dev);
-
-static const struct ctrl sd_ctrls[NCTRL] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 15,
- },
- .set_control = setbrightness
- },
-[GAIN] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 50, /* Really 63 but > 50 is not pretty */
- .step = 1,
- .default_value = 25,
- },
- .set_control = setgain
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 32767,
- .step = 1,
- .default_value = 15000,
- },
- .set_control = setexposure
- },
-[FREQ] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light frequency filter",
- .minimum = 0,
- .maximum = 2,
- .step = 1,
- .default_value = 0,
- },
- .set_control = setexposure
- },
-};
static void se401_write_req(struct gspca_dev *gspca_dev, u16 req, u16 value,
int silent)
@@ -224,22 +165,15 @@ static int se401_get_feature(struct gspca_dev *gspca_dev, u16 selector)
return gspca_dev->usb_buf[0] | (gspca_dev->usb_buf[1] << 8);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (gspca_dev->ctrl_dis & (1 << BRIGHTNESS))
- return;
-
/* HDG: this does not seem to do anything on my cam */
- se401_write_req(gspca_dev, SE401_REQ_SET_BRT,
- sd->ctrls[BRIGHTNESS].val, 0);
+ se401_write_req(gspca_dev, SE401_REQ_SET_BRT, val, 0);
}
-static void setgain(struct gspca_dev *gspca_dev)
+static void setgain(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- u16 gain = 63 - sd->ctrls[GAIN].val;
+ u16 gain = 63 - val;
/* red color gain */
se401_set_feature(gspca_dev, HV7131_REG_ARCG, gain);
@@ -249,10 +183,10 @@ static void setgain(struct gspca_dev *gspca_dev)
se401_set_feature(gspca_dev, HV7131_REG_ABCG, gain);
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 val, s32 freq)
{
struct sd *sd = (struct sd *) gspca_dev;
- int integration = sd->ctrls[EXPOSURE].val << 6;
+ int integration = val << 6;
u8 expose_h, expose_m, expose_l;
/* Do this before the set_feature calls, for proper timing wrt
@@ -262,9 +196,9 @@ static void setexposure(struct gspca_dev *gspca_dev)
through so be it */
sd->expo_change_state = EXPO_CHANGED;
- if (sd->ctrls[FREQ].val == V4L2_CID_POWER_LINE_FREQUENCY_50HZ)
+ if (freq == V4L2_CID_POWER_LINE_FREQUENCY_50HZ)
integration = integration - integration % 106667;
- if (sd->ctrls[FREQ].val == V4L2_CID_POWER_LINE_FREQUENCY_60HZ)
+ if (freq == V4L2_CID_POWER_LINE_FREQUENCY_60HZ)
integration = integration - integration % 88889;
expose_h = (integration >> 16);
@@ -375,15 +309,12 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->bulk = 1;
cam->bulk_size = BULK_SIZE;
cam->bulk_nurbs = 4;
- cam->ctrls = sd->ctrls;
sd->resetlevel = 0x2d; /* Set initial resetlevel */
/* See if the camera supports brightness */
se401_read_req(gspca_dev, SE401_REQ_GET_BRT, 1);
- if (gspca_dev->usb_err) {
- gspca_dev->ctrl_dis = (1 << BRIGHTNESS);
- gspca_dev->usb_err = 0;
- }
+ sd->has_brightness = !!gspca_dev->usb_err;
+ gspca_dev->usb_err = 0;
return 0;
}
@@ -442,9 +373,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
se401_set_feature(gspca_dev, SE401_OPERATINGMODE, mode);
- setbrightness(gspca_dev);
- setgain(gspca_dev);
- setexposure(gspca_dev);
se401_set_feature(gspca_dev, HV7131_REG_ARLV, sd->resetlevel);
sd->packet_read = 0;
@@ -666,27 +594,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
sd_pkt_scan_janggu(gspca_dev, data, len);
}
-static int sd_querymenu(struct gspca_dev *gspca_dev,
- struct v4l2_querymenu *menu)
-{
- switch (menu->id) {
- case V4L2_CID_POWER_LINE_FREQUENCY:
- switch (menu->index) {
- case V4L2_CID_POWER_LINE_FREQUENCY_DISABLED:
- strcpy((char *) menu->name, "NoFliker");
- return 0;
- case V4L2_CID_POWER_LINE_FREQUENCY_50HZ:
- strcpy((char *) menu->name, "50 Hz");
- return 0;
- case V4L2_CID_POWER_LINE_FREQUENCY_60HZ:
- strcpy((char *) menu->name, "60 Hz");
- return 0;
- }
- break;
- }
- return -EINVAL;
-}
-
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
{
@@ -714,19 +621,73 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len)
}
#endif
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_GAIN:
+ setgain(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ setexposure(gspca_dev, ctrl->val, sd->freq->val);
+ break;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ if (sd->has_brightness)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 15);
+ /* max is really 63 but > 50 is not pretty */
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 50, 1, 25);
+ sd->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 32767, 1, 15000);
+ sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, 0);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ v4l2_ctrl_cluster(2, &sd->exposure);
+ return 0;
+}
+
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stopN = sd_stopN,
.dq_callback = sd_dq_callback,
.pkt_scan = sd_pkt_scan,
- .querymenu = sd_querymenu,
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.int_pkt_scan = sd_int_pkt_scan,
#endif
@@ -769,6 +730,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
.pre_reset = sd_pre_reset,
.post_reset = sd_post_reset,
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index 478533cb1152..03fa3fd940b4 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -40,10 +40,6 @@ struct init_command {
unsigned char to_read; /* length to read. 0 means no reply requested */
};
-/* V4L2 controls supported by the driver */
-static const struct ctrl sd_ctrls[] = {
-};
-
/* How to change the resolution of any of the VGA cams is unknown */
static const struct v4l2_pix_format vga_mode[] = {
{640, 480, V4L2_PIX_FMT_SN9C2028, V4L2_FIELD_NONE,
@@ -695,8 +691,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
.start = sd_start,
@@ -734,6 +728,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index e2bdf8f632f4..fd1f8d2d3b0b 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -56,26 +56,16 @@ MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("GSPCA/SN9C102 USB Camera Driver");
MODULE_LICENSE("GPL");
-/* controls */
-enum e_ctrl {
- BRIGHTNESS,
- GAIN,
- EXPOSURE,
- AUTOGAIN,
- FREQ,
- NCTRLS /* number of controls */
-};
-
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- struct gspca_ctrl ctrls[NCTRLS];
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *plfreq;
atomic_t avg_lum;
int prev_avg_lum;
- int exp_too_low_cnt;
- int exp_too_high_cnt;
+ int exposure_knee;
int header_read;
u8 header[12]; /* Header without sof marker */
@@ -107,24 +97,16 @@ struct sensor_data {
sensor_init_t *sensor_init;
int sensor_init_size;
int flags;
- unsigned ctrl_dis;
__u8 sensor_addr;
};
/* sensor_data flags */
-#define F_GAIN 0x01 /* has gain */
-#define F_SIF 0x02 /* sif or vga */
-#define F_COARSE_EXPO 0x04 /* exposure control is coarse */
+#define F_SIF 0x01 /* sif or vga */
/* priv field of struct v4l2_pix_format flags (do not use low nibble!) */
#define MODE_RAW 0x10 /* raw bayer mode */
#define MODE_REDUCED_SIF 0x20 /* vga mode (320x240 / 160x120) on sif cam */
-/* ctrl_dis helper macros */
-#define NO_EXPO ((1 << EXPOSURE) | (1 << AUTOGAIN))
-#define NO_FREQ (1 << FREQ)
-#define NO_BRIGHTNESS (1 << BRIGHTNESS)
-
#define COMP 0xc7 /* 0x87 //0x07 */
#define COMP1 0xc9 /* 0x89 //0x09 */
@@ -133,12 +115,12 @@ struct sensor_data {
#define SYS_CLK 0x04
-#define SENS(bridge, sensor, _flags, _ctrl_dis, _sensor_addr) \
+#define SENS(bridge, sensor, _flags, _sensor_addr) \
{ \
.bridge_init = bridge, \
.sensor_init = sensor, \
.sensor_init_size = sizeof(sensor), \
- .flags = _flags, .ctrl_dis = _ctrl_dis, .sensor_addr = _sensor_addr \
+ .flags = _flags, .sensor_addr = _sensor_addr \
}
/* We calculate the autogain at the end of the transfer of a frame, at this
@@ -147,87 +129,6 @@ struct sensor_data {
the new settings to come into effect before doing any other adjustments. */
#define AUTOGAIN_IGNORE_FRAMES 1
-/* V4L2 controls supported by the driver */
-static void setbrightness(struct gspca_dev *gspca_dev);
-static void setgain(struct gspca_dev *gspca_dev);
-static void setexposure(struct gspca_dev *gspca_dev);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static void setfreq(struct gspca_dev *gspca_dev);
-
-static const struct ctrl sd_ctrls[NCTRLS] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 127,
- },
- .set_control = setbrightness
- },
-[GAIN] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define GAIN_KNEE 230
- .default_value = 127,
- },
- .set_control = setgain
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 1023,
- .step = 1,
- .default_value = 66,
- /* 33 ms / 30 fps (except on PASXXX) */
-#define EXPOSURE_KNEE 200 /* 100 ms / 10 fps (except on PASXXX) */
- .flags = 0,
- },
- .set_control = setexposure
- },
-/* for coarse exposure */
-#define COARSE_EXPOSURE_MIN 2
-#define COARSE_EXPOSURE_MAX 15
-#define COARSE_EXPOSURE_DEF 2 /* 30 fps */
-[AUTOGAIN] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Automatic Gain (and Exposure)",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 1
- .default_value = AUTOGAIN_DEF,
- .flags = V4L2_CTRL_FLAG_UPDATE
- },
- .set = sd_setautogain,
- },
-[FREQ] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light frequency filter",
- .minimum = 0,
- .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
- .step = 1,
-#define FREQ_DEF 0
- .default_value = FREQ_DEF,
- },
- .set_control = setfreq
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 160,
@@ -532,25 +433,27 @@ static const __u8 tas5130_sensor_init[][8] = {
};
static const struct sensor_data sensor_data[] = {
-SENS(initHv7131d, hv7131d_sensor_init, F_GAIN, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initHv7131r, hv7131r_sensor_init, 0, NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
-SENS(initOv6650, ov6650_sensor_init, F_GAIN|F_SIF, 0, 0x60),
-SENS(initOv7630, ov7630_sensor_init, F_GAIN, 0, 0x21),
-SENS(initPas106, pas106_sensor_init, F_GAIN|F_SIF, NO_FREQ, 0),
-SENS(initPas202, pas202_sensor_init, F_GAIN, NO_FREQ, 0),
-SENS(initTas5110c, tas5110c_sensor_init, F_GAIN|F_SIF|F_COARSE_EXPO,
- NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initTas5110d, tas5110d_sensor_init, F_GAIN|F_SIF|F_COARSE_EXPO,
- NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initTas5130, tas5130_sensor_init, F_GAIN,
- NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
+ SENS(initHv7131d, hv7131d_sensor_init, 0, 0),
+ SENS(initHv7131r, hv7131r_sensor_init, 0, 0),
+ SENS(initOv6650, ov6650_sensor_init, F_SIF, 0x60),
+ SENS(initOv7630, ov7630_sensor_init, 0, 0x21),
+ SENS(initPas106, pas106_sensor_init, F_SIF, 0),
+ SENS(initPas202, pas202_sensor_init, 0, 0),
+ SENS(initTas5110c, tas5110c_sensor_init, F_SIF, 0),
+ SENS(initTas5110d, tas5110d_sensor_init, F_SIF, 0),
+ SENS(initTas5130, tas5130_sensor_init, 0, 0),
};
/* get one byte in gspca_dev->usb_buf */
static void reg_r(struct gspca_dev *gspca_dev,
__u16 value)
{
- usb_control_msg(gspca_dev->dev,
+ int res;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+
+ res = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
0, /* request */
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
@@ -558,6 +461,12 @@ static void reg_r(struct gspca_dev *gspca_dev,
0, /* index */
gspca_dev->usb_buf, 1,
500);
+
+ if (res < 0) {
+ dev_err(gspca_dev->v4l2_dev.dev,
+ "Error reading register %02x: %d\n", value, res);
+ gspca_dev->usb_err = res;
+ }
}
static void reg_w(struct gspca_dev *gspca_dev,
@@ -565,14 +474,13 @@ static void reg_w(struct gspca_dev *gspca_dev,
const __u8 *buffer,
int len)
{
-#ifdef GSPCA_DEBUG
- if (len > USB_BUF_SZ) {
- PDEBUG(D_ERR|D_PACK, "reg_w: buffer overflow");
+ int res;
+
+ if (gspca_dev->usb_err < 0)
return;
- }
-#endif
+
memcpy(gspca_dev->usb_buf, buffer, len);
- usb_control_msg(gspca_dev->dev,
+ res = usb_control_msg(gspca_dev->dev,
usb_sndctrlpipe(gspca_dev->dev, 0),
0x08, /* request */
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
@@ -580,30 +488,48 @@ static void reg_w(struct gspca_dev *gspca_dev,
0, /* index */
gspca_dev->usb_buf, len,
500);
+
+ if (res < 0) {
+ dev_err(gspca_dev->v4l2_dev.dev,
+ "Error writing register %02x: %d\n", value, res);
+ gspca_dev->usb_err = res;
+ }
}
-static int i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)
+static void i2c_w(struct gspca_dev *gspca_dev, const __u8 *buffer)
{
int retry = 60;
+ if (gspca_dev->usb_err < 0)
+ return;
+
/* is i2c ready */
reg_w(gspca_dev, 0x08, buffer, 8);
while (retry--) {
+ if (gspca_dev->usb_err < 0)
+ return;
msleep(10);
reg_r(gspca_dev, 0x08);
if (gspca_dev->usb_buf[0] & 0x04) {
- if (gspca_dev->usb_buf[0] & 0x08)
- return -1;
- return 0;
+ if (gspca_dev->usb_buf[0] & 0x08) {
+ dev_err(gspca_dev->v4l2_dev.dev,
+ "i2c write error\n");
+ gspca_dev->usb_err = -EIO;
+ }
+ return;
}
}
- return -1;
+
+ dev_err(gspca_dev->v4l2_dev.dev, "i2c write timeout\n");
+ gspca_dev->usb_err = -EIO;
}
static void i2c_w_vector(struct gspca_dev *gspca_dev,
const __u8 buffer[][8], int len)
{
for (;;) {
+ if (gspca_dev->usb_err < 0)
+ return;
reg_w(gspca_dev, 0x08, *buffer, 8);
len -= 8;
if (len <= 0)
@@ -624,11 +550,10 @@ static void setbrightness(struct gspca_dev *gspca_dev)
/* change reg 0x06 */
i2cOV[1] = sensor_data[sd->sensor].sensor_addr;
- i2cOV[3] = sd->ctrls[BRIGHTNESS].val;
- if (i2c_w(gspca_dev, i2cOV) < 0)
- goto err;
+ i2cOV[3] = sd->brightness->val;
+ i2c_w(gspca_dev, i2cOV);
break;
- }
+ }
case SENSOR_PAS106:
case SENSOR_PAS202: {
__u8 i2cpbright[] =
@@ -642,54 +567,49 @@ static void setbrightness(struct gspca_dev *gspca_dev)
i2cpdoit[2] = 0x13;
}
- if (sd->ctrls[BRIGHTNESS].val < 127) {
+ if (sd->brightness->val < 127) {
/* change reg 0x0b, signreg */
i2cpbright[3] = 0x01;
/* set reg 0x0c, offset */
- i2cpbright[4] = 127 - sd->ctrls[BRIGHTNESS].val;
+ i2cpbright[4] = 127 - sd->brightness->val;
} else
- i2cpbright[4] = sd->ctrls[BRIGHTNESS].val - 127;
+ i2cpbright[4] = sd->brightness->val - 127;
- if (i2c_w(gspca_dev, i2cpbright) < 0)
- goto err;
- if (i2c_w(gspca_dev, i2cpdoit) < 0)
- goto err;
+ i2c_w(gspca_dev, i2cpbright);
+ i2c_w(gspca_dev, i2cpdoit);
+ break;
+ }
+ default:
break;
- }
}
- return;
-err:
- PDEBUG(D_ERR, "i2c error brightness");
}
-static void setsensorgain(struct gspca_dev *gspca_dev)
+static void setgain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 gain = sd->ctrls[GAIN].val;
+ u8 gain = gspca_dev->gain->val;
switch (sd->sensor) {
case SENSOR_HV7131D: {
__u8 i2c[] =
{0xc0, 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x17};
- i2c[3] = 0x3f - (gain / 4);
- i2c[4] = 0x3f - (gain / 4);
- i2c[5] = 0x3f - (gain / 4);
+ i2c[3] = 0x3f - gain;
+ i2c[4] = 0x3f - gain;
+ i2c[5] = 0x3f - gain;
- if (i2c_w(gspca_dev, i2c) < 0)
- goto err;
+ i2c_w(gspca_dev, i2c);
break;
- }
+ }
case SENSOR_TAS5110C:
case SENSOR_TAS5130CXX: {
__u8 i2c[] =
{0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10};
i2c[4] = 255 - gain;
- if (i2c_w(gspca_dev, i2c) < 0)
- goto err;
+ i2c_w(gspca_dev, i2c);
break;
- }
+ }
case SENSOR_TAS5110D: {
__u8 i2c[] = {
0xb0, 0x61, 0x02, 0x00, 0x10, 0x00, 0x00, 0x17 };
@@ -703,23 +623,25 @@ static void setsensorgain(struct gspca_dev *gspca_dev)
i2c[3] |= (gain & 0x04) << 3;
i2c[3] |= (gain & 0x02) << 5;
i2c[3] |= (gain & 0x01) << 7;
- if (i2c_w(gspca_dev, i2c) < 0)
- goto err;
+ i2c_w(gspca_dev, i2c);
break;
- }
-
+ }
case SENSOR_OV6650:
- gain >>= 1;
- /* fall thru */
case SENSOR_OV7630: {
__u8 i2c[] = {0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10};
+ /*
+ * The ov7630's gain is weird, at 32 the gain drops to the
+ * same level as at 16, so skip 32-47 (of the 0-63 scale).
+ */
+ if (sd->sensor == SENSOR_OV7630 && gain >= 32)
+ gain += 16;
+
i2c[1] = sensor_data[sd->sensor].sensor_addr;
- i2c[3] = gain >> 2;
- if (i2c_w(gspca_dev, i2c) < 0)
- goto err;
+ i2c[3] = gain;
+ i2c_w(gspca_dev, i2c);
break;
- }
+ }
case SENSOR_PAS106:
case SENSOR_PAS202: {
__u8 i2cpgain[] =
@@ -737,49 +659,27 @@ static void setsensorgain(struct gspca_dev *gspca_dev)
i2cpdoit[2] = 0x13;
}
- i2cpgain[3] = gain >> 3;
- i2cpcolorgain[3] = gain >> 4;
- i2cpcolorgain[4] = gain >> 4;
- i2cpcolorgain[5] = gain >> 4;
- i2cpcolorgain[6] = gain >> 4;
-
- if (i2c_w(gspca_dev, i2cpgain) < 0)
- goto err;
- if (i2c_w(gspca_dev, i2cpcolorgain) < 0)
- goto err;
- if (i2c_w(gspca_dev, i2cpdoit) < 0)
- goto err;
- break;
- }
- }
- return;
-err:
- PDEBUG(D_ERR, "i2c error gain");
-}
-
-static void setgain(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- __u8 gain;
- __u8 buf[3] = { 0, 0, 0 };
+ i2cpgain[3] = gain;
+ i2cpcolorgain[3] = gain >> 1;
+ i2cpcolorgain[4] = gain >> 1;
+ i2cpcolorgain[5] = gain >> 1;
+ i2cpcolorgain[6] = gain >> 1;
- if (sensor_data[sd->sensor].flags & F_GAIN) {
- /* Use the sensor gain to do the actual gain */
- setsensorgain(gspca_dev);
- return;
+ i2c_w(gspca_dev, i2cpgain);
+ i2c_w(gspca_dev, i2cpcolorgain);
+ i2c_w(gspca_dev, i2cpdoit);
+ break;
}
-
- if (sd->bridge == BRIDGE_103) {
- gain = sd->ctrls[GAIN].val >> 1;
- buf[0] = gain; /* Red */
- buf[1] = gain; /* Green */
- buf[2] = gain; /* Blue */
- reg_w(gspca_dev, 0x05, buf, 3);
- } else {
- gain = sd->ctrls[GAIN].val >> 4;
- buf[0] = gain << 4 | gain; /* Red and blue */
- buf[1] = gain; /* Green */
- reg_w(gspca_dev, 0x10, buf, 2);
+ default:
+ if (sd->bridge == BRIDGE_103) {
+ u8 buf[3] = { gain, gain, gain }; /* R, G, B */
+ reg_w(gspca_dev, 0x05, buf, 3);
+ } else {
+ u8 buf[2];
+ buf[0] = gain << 4 | gain; /* Red and blue */
+ buf[1] = gain; /* Green */
+ reg_w(gspca_dev, 0x10, buf, 2);
+ }
}
}
@@ -792,31 +692,24 @@ static void setexposure(struct gspca_dev *gspca_dev)
/* Note the datasheet wrongly says line mode exposure uses reg
0x26 and 0x27, testing has shown 0x25 + 0x26 */
__u8 i2c[] = {0xc0, 0x11, 0x25, 0x00, 0x00, 0x00, 0x00, 0x17};
- /* The HV7131D's exposure goes from 0 - 65535, we scale our
- exposure of 0-1023 to 0-6138. There are 2 reasons for this:
- 1) This puts our exposure knee of 200 at approx the point
- where the framerate starts dropping
- 2) At 6138 the framerate has already dropped to 2 fps,
- going any lower makes little sense */
- u16 reg = sd->ctrls[EXPOSURE].val * 6;
+ u16 reg = gspca_dev->exposure->val;
i2c[3] = reg >> 8;
i2c[4] = reg & 0xff;
- if (i2c_w(gspca_dev, i2c) != 0)
- goto err;
+ i2c_w(gspca_dev, i2c);
break;
- }
+ }
case SENSOR_TAS5110C:
case SENSOR_TAS5110D: {
/* register 19's high nibble contains the sn9c10x clock divider
The high nibble configures the no fps according to the
formula: 60 / high_nibble. With a maximum of 30 fps */
- u8 reg = sd->ctrls[EXPOSURE].val;
+ u8 reg = gspca_dev->exposure->val;
reg = (reg << 4) | 0x0b;
reg_w(gspca_dev, 0x19, &reg, 1);
break;
- }
+ }
case SENSOR_OV6650:
case SENSOR_OV7630: {
/* The ov6650 / ov7630 have 2 registers which both influence
@@ -848,7 +741,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
} else
reg10_max = 0x41;
- reg11 = (15 * sd->ctrls[EXPOSURE].val + 999) / 1000;
+ reg11 = (15 * gspca_dev->exposure->val + 999) / 1000;
if (reg11 < 1)
reg11 = 1;
else if (reg11 > 16)
@@ -861,16 +754,16 @@ static void setexposure(struct gspca_dev *gspca_dev)
reg11 = 4;
/* frame exposure time in ms = 1000 * reg11 / 30 ->
- reg10 = (sd->ctrls[EXPOSURE].val / 2) * reg10_max
+ reg10 = (gspca_dev->exposure->val / 2) * reg10_max
/ (1000 * reg11 / 30) */
- reg10 = (sd->ctrls[EXPOSURE].val * 15 * reg10_max)
+ reg10 = (gspca_dev->exposure->val * 15 * reg10_max)
/ (1000 * reg11);
/* Don't allow this to get below 10 when using autogain, the
steps become very large (relatively) when below 10 causing
the image to oscilate from much too dark, to much too bright
and back again. */
- if (sd->ctrls[AUTOGAIN].val && reg10 < 10)
+ if (gspca_dev->autogain->val && reg10 < 10)
reg10 = 10;
else if (reg10 > reg10_max)
reg10 = reg10_max;
@@ -884,12 +777,11 @@ static void setexposure(struct gspca_dev *gspca_dev)
if (sd->reg11 == reg11)
i2c[0] = 0xa0;
- if (i2c_w(gspca_dev, i2c) == 0)
+ i2c_w(gspca_dev, i2c);
+ if (gspca_dev->usb_err == 0)
sd->reg11 = reg11;
- else
- goto err;
break;
- }
+ }
case SENSOR_PAS202: {
__u8 i2cpframerate[] =
{0xb0, 0x40, 0x04, 0x00, 0x00, 0x00, 0x00, 0x16};
@@ -909,28 +801,25 @@ static void setexposure(struct gspca_dev *gspca_dev)
frame exposure times (like we are doing with the ov chips),
as that sometimes leads to jumps in the exposure control,
which are bad for auto exposure. */
- if (sd->ctrls[EXPOSURE].val < 200) {
- i2cpexpo[3] = 255 - (sd->ctrls[EXPOSURE].val * 255)
+ if (gspca_dev->exposure->val < 200) {
+ i2cpexpo[3] = 255 - (gspca_dev->exposure->val * 255)
/ 200;
framerate_ctrl = 500;
} else {
/* The PAS202's exposure control goes from 0 - 4095,
but anything below 500 causes vsync issues, so scale
our 200-1023 to 500-4095 */
- framerate_ctrl = (sd->ctrls[EXPOSURE].val - 200)
+ framerate_ctrl = (gspca_dev->exposure->val - 200)
* 1000 / 229 + 500;
}
i2cpframerate[3] = framerate_ctrl >> 6;
i2cpframerate[4] = framerate_ctrl & 0x3f;
- if (i2c_w(gspca_dev, i2cpframerate) < 0)
- goto err;
- if (i2c_w(gspca_dev, i2cpexpo) < 0)
- goto err;
- if (i2c_w(gspca_dev, i2cpdoit) < 0)
- goto err;
+ i2c_w(gspca_dev, i2cpframerate);
+ i2c_w(gspca_dev, i2cpexpo);
+ i2c_w(gspca_dev, i2cpdoit);
break;
- }
+ }
case SENSOR_PAS106: {
__u8 i2cpframerate[] =
{0xb1, 0x40, 0x03, 0x00, 0x00, 0x00, 0x00, 0x14};
@@ -942,46 +831,40 @@ static void setexposure(struct gspca_dev *gspca_dev)
/* For values below 150 use partial frame exposure, above
that use framerate ctrl */
- if (sd->ctrls[EXPOSURE].val < 150) {
- i2cpexpo[3] = 150 - sd->ctrls[EXPOSURE].val;
+ if (gspca_dev->exposure->val < 150) {
+ i2cpexpo[3] = 150 - gspca_dev->exposure->val;
framerate_ctrl = 300;
} else {
/* The PAS106's exposure control goes from 0 - 4095,
but anything below 300 causes vsync issues, so scale
our 150-1023 to 300-4095 */
- framerate_ctrl = (sd->ctrls[EXPOSURE].val - 150)
+ framerate_ctrl = (gspca_dev->exposure->val - 150)
* 1000 / 230 + 300;
}
i2cpframerate[3] = framerate_ctrl >> 4;
i2cpframerate[4] = framerate_ctrl & 0x0f;
- if (i2c_w(gspca_dev, i2cpframerate) < 0)
- goto err;
- if (i2c_w(gspca_dev, i2cpexpo) < 0)
- goto err;
- if (i2c_w(gspca_dev, i2cpdoit) < 0)
- goto err;
+ i2c_w(gspca_dev, i2cpframerate);
+ i2c_w(gspca_dev, i2cpexpo);
+ i2c_w(gspca_dev, i2cpdoit);
+ break;
+ }
+ default:
break;
- }
}
- return;
-err:
- PDEBUG(D_ERR, "i2c error exposure");
}
static void setfreq(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- switch (sd->sensor) {
- case SENSOR_OV6650:
- case SENSOR_OV7630: {
+ if (sd->sensor == SENSOR_OV6650 || sd->sensor == SENSOR_OV7630) {
/* Framerate adjust register for artificial light 50 hz flicker
compensation, for the ov6650 this is identical to ov6630
0x2b register, see ov6630 datasheet.
0x4f / 0x8a -> (30 fps -> 25 fps), 0x00 -> no adjustment */
__u8 i2c[] = {0xa0, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10};
- switch (sd->ctrls[FREQ].val) {
+ switch (sd->plfreq->val) {
default:
/* case 0: * no filter*/
/* case 2: * 60 hz */
@@ -993,25 +876,17 @@ static void setfreq(struct gspca_dev *gspca_dev)
break;
}
i2c[1] = sensor_data[sd->sensor].sensor_addr;
- if (i2c_w(gspca_dev, i2c) < 0)
- PDEBUG(D_ERR, "i2c error setfreq");
- break;
- }
+ i2c_w(gspca_dev, i2c);
}
}
-#define WANT_REGULAR_AUTOGAIN
-#define WANT_COARSE_EXPO_AUTOGAIN
-#include "autogain_functions.h"
-
static void do_autogain(struct gspca_dev *gspca_dev)
{
- int deadzone, desired_avg_lum, result;
struct sd *sd = (struct sd *) gspca_dev;
- int avg_lum = atomic_read(&sd->avg_lum);
+ int deadzone, desired_avg_lum, avg_lum;
- if ((gspca_dev->ctrl_dis & (1 << AUTOGAIN)) ||
- avg_lum == -1 || !sd->ctrls[AUTOGAIN].val)
+ avg_lum = atomic_read(&sd->avg_lum);
+ if (avg_lum == -1)
return;
if (sd->autogain_ignore_frames > 0) {
@@ -1030,22 +905,18 @@ static void do_autogain(struct gspca_dev *gspca_dev)
desired_avg_lum = 13000;
}
- if (sensor_data[sd->sensor].flags & F_COARSE_EXPO)
- result = coarse_grained_expo_autogain(gspca_dev, avg_lum,
- sd->ctrls[BRIGHTNESS].val
- * desired_avg_lum / 127,
- deadzone);
- else
- result = auto_gain_n_exposure(gspca_dev, avg_lum,
- sd->ctrls[BRIGHTNESS].val
- * desired_avg_lum / 127,
- deadzone, GAIN_KNEE, EXPOSURE_KNEE);
-
- if (result) {
- PDEBUG(D_FRAM, "autogain: gain changed: gain: %d expo: %d",
- (int) sd->ctrls[GAIN].val,
- (int) sd->ctrls[EXPOSURE].val);
- sd->autogain_ignore_frames = AUTOGAIN_IGNORE_FRAMES;
+ if (sd->brightness)
+ desired_avg_lum = sd->brightness->val * desired_avg_lum / 127;
+
+ if (gspca_dev->exposure->maximum < 500) {
+ if (gspca_coarse_grained_expo_autogain(gspca_dev, avg_lum,
+ desired_avg_lum, deadzone))
+ sd->autogain_ignore_frames = AUTOGAIN_IGNORE_FRAMES;
+ } else {
+ int gain_knee = gspca_dev->gain->maximum * 9 / 10;
+ if (gspca_expo_autogain(gspca_dev, avg_lum, desired_avg_lum,
+ deadzone, gain_knee, sd->exposure_knee))
+ sd->autogain_ignore_frames = AUTOGAIN_IGNORE_FRAMES;
}
}
@@ -1064,14 +935,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
sd->sensor = id->driver_info >> 8;
sd->bridge = id->driver_info & 0xff;
- gspca_dev->ctrl_dis = sensor_data[sd->sensor].ctrl_dis;
-#if AUTOGAIN_DEF
- if (!(gspca_dev->ctrl_dis & (1 << AUTOGAIN)))
- gspca_dev->ctrl_inac = (1 << GAIN) | (1 << EXPOSURE);
-#endif
-
cam = &gspca_dev->cam;
- cam->ctrls = sd->ctrls;
if (!(sensor_data[sd->sensor].flags & F_SIF)) {
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
@@ -1087,18 +951,143 @@ static int sd_config(struct gspca_dev *gspca_dev,
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
const __u8 stop = 0x09; /* Disable stream turn of LED */
- if (sensor_data[sd->sensor].flags & F_COARSE_EXPO) {
- sd->ctrls[EXPOSURE].min = COARSE_EXPOSURE_MIN;
- sd->ctrls[EXPOSURE].max = COARSE_EXPOSURE_MAX;
- sd->ctrls[EXPOSURE].def = COARSE_EXPOSURE_DEF;
- if (sd->ctrls[EXPOSURE].val > COARSE_EXPOSURE_MAX)
- sd->ctrls[EXPOSURE].val = COARSE_EXPOSURE_DEF;
+ reg_w(gspca_dev, 0x01, &stop, 1);
+
+ return gspca_dev->usb_err;
+}
+
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (ctrl->id == V4L2_CID_AUTOGAIN && ctrl->is_new && ctrl->val) {
+ /* when switching to autogain set defaults to make sure
+ we are on a valid point of the autogain gain /
+ exposure knee graph, and give this change time to
+ take effect before doing autogain. */
+ gspca_dev->gain->val = gspca_dev->gain->default_value;
+ gspca_dev->exposure->val = gspca_dev->exposure->default_value;
+ sd->autogain_ignore_frames = AUTOGAIN_IGNORE_FRAMES;
}
- reg_w(gspca_dev, 0x01, &stop, 1);
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ if (gspca_dev->exposure->is_new || (ctrl->is_new && ctrl->val))
+ setexposure(gspca_dev);
+ if (gspca_dev->gain->is_new || (ctrl->is_new && ctrl->val))
+ setgain(gspca_dev);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ setfreq(gspca_dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+/* this function is called at probe time */
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 5);
+
+ if (sd->sensor == SENSOR_OV6650 || sd->sensor == SENSOR_OV7630 ||
+ sd->sensor == SENSOR_PAS106 || sd->sensor == SENSOR_PAS202)
+ sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
+
+ /* Gain range is sensor dependent */
+ switch (sd->sensor) {
+ case SENSOR_OV6650:
+ case SENSOR_PAS106:
+ case SENSOR_PAS202:
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 31, 1, 15);
+ break;
+ case SENSOR_OV7630:
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 47, 1, 31);
+ break;
+ case SENSOR_HV7131D:
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 63, 1, 31);
+ break;
+ case SENSOR_TAS5110C:
+ case SENSOR_TAS5110D:
+ case SENSOR_TAS5130CXX:
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 127);
+ break;
+ default:
+ if (sd->bridge == BRIDGE_103) {
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 127, 1, 63);
+ } else {
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 15, 1, 7);
+ }
+ }
+
+ /* Exposure range is sensor dependent, and not all have exposure */
+ switch (sd->sensor) {
+ case SENSOR_HV7131D:
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 8191, 1, 482);
+ sd->exposure_knee = 964;
+ break;
+ case SENSOR_OV6650:
+ case SENSOR_OV7630:
+ case SENSOR_PAS106:
+ case SENSOR_PAS202:
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 1023, 1, 66);
+ sd->exposure_knee = 200;
+ break;
+ case SENSOR_TAS5110C:
+ case SENSOR_TAS5110D:
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 2, 15, 1, 2);
+ break;
+ }
+
+ if (gspca_dev->exposure) {
+ gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ }
+
+ if (sd->sensor == SENSOR_OV6650 || sd->sensor == SENSOR_OV7630)
+ sd->plfreq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_DISABLED);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+
+ if (gspca_dev->autogain)
+ v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false);
return 0;
}
@@ -1242,10 +1231,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
sd->frames_to_drop = 0;
sd->autogain_ignore_frames = 0;
- sd->exp_too_high_cnt = 0;
- sd->exp_too_low_cnt = 0;
+ gspca_dev->exp_too_high_cnt = 0;
+ gspca_dev->exp_too_low_cnt = 0;
atomic_set(&sd->avg_lum, -1);
- return 0;
+ return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -1387,37 +1376,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
}
}
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->ctrls[AUTOGAIN].val = val;
- sd->exp_too_high_cnt = 0;
- sd->exp_too_low_cnt = 0;
-
- /* when switching to autogain set defaults to make sure
- we are on a valid point of the autogain gain /
- exposure knee graph, and give this change time to
- take effect before doing autogain. */
- if (sd->ctrls[AUTOGAIN].val
- && !(sensor_data[sd->sensor].flags & F_COARSE_EXPO)) {
- sd->ctrls[EXPOSURE].val = sd->ctrls[EXPOSURE].def;
- sd->ctrls[GAIN].val = sd->ctrls[GAIN].def;
- if (gspca_dev->streaming) {
- sd->autogain_ignore_frames = AUTOGAIN_IGNORE_FRAMES;
- setexposure(gspca_dev);
- setgain(gspca_dev);
- }
- }
-
- if (sd->ctrls[AUTOGAIN].val)
- gspca_dev->ctrl_inac = (1 << GAIN) | (1 << EXPOSURE);
- else
- gspca_dev->ctrl_inac = 0;
-
- return 0;
-}
-
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu)
{
@@ -1461,10 +1419,9 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
@@ -1529,6 +1486,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index f38faa9b37c3..150b2df40f7f 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -3199,6 +3199,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/spca1528.c b/drivers/media/video/gspca/spca1528.c
index 070b9c33b517..14d635277d71 100644
--- a/drivers/media/video/gspca/spca1528.c
+++ b/drivers/media/video/gspca/spca1528.c
@@ -33,102 +33,11 @@ MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- u8 brightness;
- u8 contrast;
- u8 hue;
- u8 color;
- u8 sharpness;
-
u8 pkt_seq;
u8 jpeg_hdr[JPEG_HDR_SZ];
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolor(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolor(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BRIGHTNESS_DEF 128
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 8,
- .step = 1,
-#define CONTRAST_DEF 1
- .default_value = CONTRAST_DEF,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
- {
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define HUE_DEF 0
- .default_value = HUE_DEF,
- },
- .set = sd_sethue,
- .get = sd_gethue,
- },
- {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 8,
- .step = 1,
-#define COLOR_DEF 1
- .default_value = COLOR_DEF,
- },
- .set = sd_setcolor,
- .get = sd_getcolor,
- },
- {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define SHARPNESS_DEF 0
- .default_value = SHARPNESS_DEF,
- },
- .set = sd_setsharpness,
- .get = sd_getsharpness,
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
/* (does not work correctly)
{176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
@@ -259,58 +168,40 @@ static void wait_status_1(struct gspca_dev *gspca_dev)
gspca_dev->usb_err = -ETIME;
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_wb(gspca_dev, 0xc0, 0x0000, 0x00c0, sd->brightness);
+ reg_wb(gspca_dev, 0xc0, 0x0000, 0x00c0, val);
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_wb(gspca_dev, 0xc1, 0x0000, 0x00c1, sd->contrast);
+ reg_wb(gspca_dev, 0xc1, 0x0000, 0x00c1, val);
}
-static void sethue(struct gspca_dev *gspca_dev)
+static void sethue(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_wb(gspca_dev, 0xc2, 0x0000, 0x0000, sd->hue);
+ reg_wb(gspca_dev, 0xc2, 0x0000, 0x0000, val);
}
-static void setcolor(struct gspca_dev *gspca_dev)
+static void setcolor(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_wb(gspca_dev, 0xc3, 0x0000, 0x00c3, sd->color);
+ reg_wb(gspca_dev, 0xc3, 0x0000, 0x00c3, val);
}
-static void setsharpness(struct gspca_dev *gspca_dev)
+static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_wb(gspca_dev, 0xc4, 0x0000, 0x00c4, sd->sharpness);
+ reg_wb(gspca_dev, 0xc4, 0x0000, 0x00c4, val);
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
gspca_dev->cam.cam_mode = vga_mode;
gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
gspca_dev->cam.npkt = 128; /* number of packets per ISOC message */
/*fixme: 256 in ms-win traces*/
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->hue = HUE_DEF;
- sd->color = COLOR_DEF;
- sd->sharpness = SHARPNESS_DEF;
-
return 0;
}
@@ -370,14 +261,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* the JPEG quality shall be 85% */
jpeg_set_qual(sd->jpeg_hdr, 85);
- /* set the controls */
- setbrightness(gspca_dev);
- setcontrast(gspca_dev);
- sethue(gspca_dev);
- setcolor(gspca_dev);
- setsharpness(gspca_dev);
-
- msleep(5);
reg_r(gspca_dev, 0x00, 0x2520, 1);
msleep(8);
@@ -457,103 +340,70 @@ err:
gspca_dev->last_packet_type = DISCARD_PACKET;
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->hue = val;
- if (gspca_dev->streaming)
- sethue(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->hue;
- return 0;
-}
-
-static int sd_setcolor(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->color = val;
- if (gspca_dev->streaming)
- setcolor(gspca_dev);
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_HUE:
+ sethue(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolor(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SHARPNESS:
+ setsharpness(gspca_dev, ctrl->val);
+ break;
+ }
return gspca_dev->usb_err;
}
-static int sd_getcolor(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->color;
- return 0;
-}
-
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->sharpness = val;
- if (gspca_dev->streaming)
- setsharpness(gspca_dev);
- return gspca_dev->usb_err;
-}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->sharpness;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 5);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 8, 1, 1);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HUE, 0, 255, 1, 0);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 8, 1, 1);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 255, 1, 0);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stopN = sd_stopN,
@@ -587,6 +437,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index 103984708c77..25cb68d0556d 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -30,18 +30,12 @@ MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA/SPCA500 USB Camera Driver");
MODULE_LICENSE("GPL");
+#define QUALITY 85
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- unsigned char brightness;
- unsigned char contrast;
- unsigned char colors;
- u8 quality;
-#define QUALITY_MIN 70
-#define QUALITY_MAX 95
-#define QUALITY_DEF 85
-
char subtype;
#define AgfaCl20 0
#define AiptekPocketDV 1
@@ -62,59 +56,6 @@ struct sd {
u8 jpeg_hdr[JPEG_HDR_SZ];
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BRIGHTNESS_DEF 127
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
-#define CONTRAST_DEF 31
- .default_value = CONTRAST_DEF,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
- {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
-#define COLOR_DEF 31
- .default_value = COLOR_DEF,
- },
- .set = sd_setcolors,
- .get = sd_getcolors,
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
@@ -641,10 +582,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = sif_mode;
cam->nmodes = ARRAY_SIZE(sif_mode);
}
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLOR_DEF;
- sd->quality = QUALITY_DEF;
return 0;
}
@@ -673,7 +610,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* create the JPEG header */
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x22); /* JPEG 411 */
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
+ jpeg_set_qual(sd->jpeg_hdr, QUALITY);
if (sd->subtype == LogitechClickSmart310) {
xmult = 0x16;
@@ -934,122 +871,79 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
reg_w(gspca_dev, 0x00, 0x8167,
- (__u8) (sd->brightness - 128));
+ (__u8) (val - 128));
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_w(gspca_dev, 0x00, 0x8168, sd->contrast);
+ reg_w(gspca_dev, 0x00, 0x8168, val);
}
-static void setcolors(struct gspca_dev *gspca_dev)
+static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_w(gspca_dev, 0x00, 0x8169, sd->colors);
+ reg_w(gspca_dev, 0x00, 0x8169, val);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
+ gspca_dev->usb_err = 0;
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (!gspca_dev->streaming)
+ return 0;
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
- return 0;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolors(gspca_dev, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
}
-static int sd_set_jcomp(struct gspca_dev *gspca_dev,
- struct v4l2_jpegcompression *jcomp)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (jcomp->quality < QUALITY_MIN)
- sd->quality = QUALITY_MIN;
- else if (jcomp->quality > QUALITY_MAX)
- sd->quality = QUALITY_MAX;
- else
- sd->quality = jcomp->quality;
- if (gspca_dev->streaming)
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
- return 0;
-}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
-static int sd_get_jcomp(struct gspca_dev *gspca_dev,
- struct v4l2_jpegcompression *jcomp)
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = sd->quality;
- jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
- | V4L2_JPEG_MARKER_DQT;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 3);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 63, 1, 31);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 63, 1, 31);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
- .get_jcomp = sd_get_jcomp,
- .set_jcomp = sd_set_jcomp,
};
/* -- module initialisation -- */
@@ -1089,6 +983,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index 9c16821addd4..3b7f777785b4 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -49,91 +49,6 @@ struct sd {
#define ViewQuestM318B 6
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
-#define MY_BRIGHTNESS 0
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 0,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
-#define MY_CONTRAST 1
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 64725,
- .step = 1,
- .default_value = 64725,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
-#define MY_COLOR 2
- {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
- .default_value = 20,
- },
- .set = sd_setcolors,
- .get = sd_getcolors,
- },
-#define MY_BLUE_BALANCE 3
- {
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 0,
- },
- .set = sd_setblue_balance,
- .get = sd_getblue_balance,
- },
-#define MY_RED_BALANCE 4
- {
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
- .default_value = 0,
- },
- .set = sd_setred_balance,
- .get = sd_getred_balance,
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_SPCA501, V4L2_FIELD_NONE,
.bytesperline = 160,
@@ -1878,42 +1793,32 @@ static int write_vector(struct gspca_dev *gspca_dev,
return 0;
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x12, sd->brightness);
+ reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x12, val);
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
reg_write(gspca_dev->dev, 0x00, 0x00,
- (sd->contrast >> 8) & 0xff);
+ (val >> 8) & 0xff);
reg_write(gspca_dev->dev, 0x00, 0x01,
- sd->contrast & 0xff);
+ val & 0xff);
}
-static void setcolors(struct gspca_dev *gspca_dev)
+static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x0c, sd->colors);
+ reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x0c, val);
}
-static void setblue_balance(struct gspca_dev *gspca_dev)
+static void setblue_balance(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x11, sd->blue_balance);
+ reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x11, val);
}
-static void setred_balance(struct gspca_dev *gspca_dev)
+static void setred_balance(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x13, sd->red_balance);
+ reg_write(gspca_dev->dev, SPCA501_REG_CCDSP, 0x13, val);
}
/* this function is called at probe time */
@@ -1927,9 +1832,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
sd->subtype = id->driver_info;
- sd->brightness = sd_ctrls[MY_BRIGHTNESS].qctrl.default_value;
- sd->contrast = sd_ctrls[MY_CONTRAST].qctrl.default_value;
- sd->colors = sd_ctrls[MY_COLOR].qctrl.default_value;
return 0;
}
@@ -2008,13 +1910,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
}
reg_write(dev, SPCA501_REG_CTLRL, 0x01, 0x02);
- /* HDG atleast the Intel CreateAndShare needs to have one of its
- * brightness / contrast / color set otherwise it assumes what seems
- * max contrast. Note that strange enough setting any of these is
- * enough to fix the max contrast problem, to be sure we set all 3 */
- setbrightness(gspca_dev);
- setcontrast(gspca_dev);
- setcolors(gspca_dev);
return 0;
}
@@ -2053,103 +1948,70 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ gspca_dev->usb_err = 0;
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (!gspca_dev->streaming)
+ return 0;
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
- return 0;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_setblue_balance(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->blue_balance = val;
- if (gspca_dev->streaming)
- setblue_balance(gspca_dev);
- return 0;
-}
-
-static int sd_getblue_balance(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->blue_balance;
- return 0;
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolors(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ setblue_balance(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_RED_BALANCE:
+ setred_balance(gspca_dev, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
}
-static int sd_setred_balance(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->red_balance = val;
- if (gspca_dev->streaming)
- setred_balance(gspca_dev);
- return 0;
-}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
-static int sd_getred_balance(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->red_balance;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 5);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 64725, 1, 64725);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 63, 1, 20);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, 0, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0, 127, 1, 0);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.stop0 = sd_stop0,
@@ -2185,6 +2047,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index 1320f35e39f2..bc7d67c3cb04 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -33,34 +33,11 @@ MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- u8 brightness;
-
u8 subtype;
#define IntelPCCameraPro 0
#define Nxultra 1
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BRIGHTNESS_DEF 127
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
.bytesperline = 160,
@@ -633,7 +610,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = ARRAY_SIZE(vga_mode);
else /* no 640x480 for IntelPCCameraPro */
cam->nmodes = ARRAY_SIZE(vga_mode) - 1;
- sd->brightness = BRIGHTNESS_DEF;
return 0;
}
@@ -651,11 +627,8 @@ static int sd_init(struct gspca_dev *gspca_dev)
return 0;
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 brightness)
{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 brightness = sd->brightness;
-
reg_write(gspca_dev->dev, 0x05, 0x00, (255 - brightness) >> 6);
reg_write(gspca_dev->dev, 0x05, 0x01, (255 - brightness) << 2);
}
@@ -706,13 +679,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_write(dev, SPCA50X_REG_COMPRESS, 0x06, mode_tb[mode][1]);
reg_write(dev, SPCA50X_REG_COMPRESS, 0x07, mode_tb[mode][2]);
- ret = reg_write(dev, SPCA50X_REG_USB,
+ return reg_write(dev, SPCA50X_REG_USB,
SPCA50X_USB_CTRL,
SPCA50X_CUSB_ENABLE);
-
- setbrightness(gspca_dev);
-
- return ret;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -756,30 +725,49 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
}
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
}
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
- *val = sd->brightness;
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 5);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
+ .init_controls = sd_init_controls,
.init = sd_init,
.start = sd_start,
.stopN = sd_stopN,
@@ -812,6 +800,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
index 54eed87672d2..bab01c86c315 100644
--- a/drivers/media/video/gspca/spca506.c
+++ b/drivers/media/video/gspca/spca506.c
@@ -33,83 +33,10 @@ MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- unsigned char brightness;
- unsigned char contrast;
- unsigned char colors;
- unsigned char hue;
char norme;
char channel;
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
-#define SD_BRIGHTNESS 0
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x80,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
-#define SD_CONTRAST 1
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x47,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
-#define SD_COLOR 2
- {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0x40,
- },
- .set = sd_setcolors,
- .get = sd_getcolors,
- },
-#define SD_HUE 3
- {
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
- .default_value = 0,
- },
- .set = sd_sethue,
- .get = sd_gethue,
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
.bytesperline = 160,
@@ -281,16 +208,11 @@ static void spca506_Setsize(struct gspca_dev *gspca_dev, __u16 code,
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
cam = &gspca_dev->cam;
cam->cam_mode = vga_mode;
cam->nmodes = ARRAY_SIZE(vga_mode);
- sd->brightness = sd_ctrls[SD_BRIGHTNESS].qctrl.default_value;
- sd->contrast = sd_ctrls[SD_CONTRAST].qctrl.default_value;
- sd->colors = sd_ctrls[SD_COLOR].qctrl.default_value;
- sd->hue = sd_ctrls[SD_HUE].qctrl.default_value;
return 0;
}
@@ -564,128 +486,100 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
}
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
spca506_Initi2c(gspca_dev);
- spca506_WriteI2c(gspca_dev, sd->brightness, SAA7113_bright);
+ spca506_WriteI2c(gspca_dev, val, SAA7113_bright);
spca506_WriteI2c(gspca_dev, 0x01, 0x09);
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
spca506_Initi2c(gspca_dev);
- spca506_WriteI2c(gspca_dev, sd->contrast, SAA7113_contrast);
+ spca506_WriteI2c(gspca_dev, val, SAA7113_contrast);
spca506_WriteI2c(gspca_dev, 0x01, 0x09);
}
-static void setcolors(struct gspca_dev *gspca_dev)
+static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
spca506_Initi2c(gspca_dev);
- spca506_WriteI2c(gspca_dev, sd->colors, SAA7113_saturation);
+ spca506_WriteI2c(gspca_dev, val, SAA7113_saturation);
spca506_WriteI2c(gspca_dev, 0x01, 0x09);
}
-static void sethue(struct gspca_dev *gspca_dev)
+static void sethue(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
spca506_Initi2c(gspca_dev);
- spca506_WriteI2c(gspca_dev, sd->hue, SAA7113_hue);
+ spca506_WriteI2c(gspca_dev, val, SAA7113_hue);
spca506_WriteI2c(gspca_dev, 0x01, 0x09);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return 0;
-}
+ gspca_dev->usb_err = 0;
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (!gspca_dev->streaming)
+ return 0;
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
- return 0;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolors(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_HUE:
+ sethue(gspca_dev, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
}
-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->hue = val;
- if (gspca_dev->streaming)
- sethue(gspca_dev);
- return 0;
-}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->hue;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 0x47);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 0x40);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HUE, 0, 255, 1, 0);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
};
/* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x06e1, 0xa190)},
/*fixme: may be IntelPCCameraPro BRIDGE_SPCA505
{USB_DEVICE(0x0733, 0x0430)}, */
@@ -711,6 +605,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index df4e16996461..1286b4170b88 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -32,8 +32,6 @@ MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- u8 brightness;
-
u8 subtype;
#define CreativeVista 0
#define HamaUSBSightcam 1
@@ -43,27 +41,6 @@ struct sd {
#define ViewQuestVQ110 5
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BRIGHTNESS_DEF 128
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
-};
-
static const struct v4l2_pix_format sif_mode[] = {
{160, 120, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
.bytesperline = 160,
@@ -1411,7 +1388,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = ARRAY_SIZE(sif_mode);
sd->subtype = id->driver_info;
- sd->brightness = BRIGHTNESS_DEF;
init_data = init_data_tb[sd->subtype];
return write_vector(gspca_dev, init_data);
@@ -1471,11 +1447,8 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
}
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 brightness)
{
- struct sd *sd = (struct sd *) gspca_dev;
- u8 brightness = sd->brightness;
-
/* MX seem contrast */
reg_write(gspca_dev->dev, 0x8651, brightness);
reg_write(gspca_dev->dev, 0x8652, brightness);
@@ -1483,31 +1456,50 @@ static void setbrightness(struct gspca_dev *gspca_dev)
reg_write(gspca_dev->dev, 0x8654, brightness);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
}
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
- *val = sd->brightness;
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 5);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
@@ -1541,6 +1533,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index 4a5f209ce719..cfe71dd6747d 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -31,39 +31,17 @@ MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA/SPCA561 USB Camera Driver");
MODULE_LICENSE("GPL");
+#define EXPOSURE_MAX (2047 + 325)
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- __u16 exposure; /* rev12a only */
-#define EXPOSURE_MIN 1
-#define EXPOSURE_DEF 700 /* == 10 fps */
-#define EXPOSURE_MAX (2047 + 325) /* see setexposure */
-
- __u8 contrast; /* rev72a only */
-#define CONTRAST_MIN 0x00
-#define CONTRAST_DEF 0x20
-#define CONTRAST_MAX 0x3f
-
- __u8 brightness; /* rev72a only */
-#define BRIGHTNESS_MIN 0
-#define BRIGHTNESS_DEF 0x20
-#define BRIGHTNESS_MAX 0x3f
-
- __u8 white;
-#define HUE_MIN 1
-#define HUE_DEF 0x40
-#define HUE_MAX 0x7f
-
- __u8 autogain;
-#define AUTOGAIN_MIN 0
-#define AUTOGAIN_DEF 1
-#define AUTOGAIN_MAX 1
-
- __u8 gain; /* rev12a only */
-#define GAIN_MIN 0
-#define GAIN_DEF 63
-#define GAIN_MAX 255
+ struct { /* hue/contrast control cluster */
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *hue;
+ };
+ struct v4l2_ctrl *autogain;
#define EXPO12A_DEF 3
__u8 expo12a; /* expo/gain? for rev 12a */
@@ -461,12 +439,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = sif_072a_mode;
cam->nmodes = ARRAY_SIZE(sif_072a_mode);
}
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->white = HUE_DEF;
- sd->exposure = EXPOSURE_DEF;
- sd->autogain = AUTOGAIN_DEF;
- sd->gain = GAIN_DEF;
sd->expo12a = EXPO12A_DEF;
return 0;
}
@@ -491,66 +463,49 @@ static int sd_init_72a(struct gspca_dev *gspca_dev)
return 0;
}
-/* rev 72a only */
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
struct usb_device *dev = gspca_dev->dev;
- __u8 value;
+ __u16 reg;
- value = sd->brightness;
+ if (sd->chip_revision == Rev012A)
+ reg = 0x8610;
+ else
+ reg = 0x8611;
- /* offsets for white balance */
- reg_w_val(dev, 0x8611, value); /* R */
- reg_w_val(dev, 0x8612, value); /* Gr */
- reg_w_val(dev, 0x8613, value); /* B */
- reg_w_val(dev, 0x8614, value); /* Gb */
+ reg_w_val(dev, reg + 0, val); /* R */
+ reg_w_val(dev, reg + 1, val); /* Gr */
+ reg_w_val(dev, reg + 2, val); /* B */
+ reg_w_val(dev, reg + 3, val); /* Gb */
}
-static void setwhite(struct gspca_dev *gspca_dev)
+static void setwhite(struct gspca_dev *gspca_dev, s32 white, s32 contrast)
{
struct sd *sd = (struct sd *) gspca_dev;
- __u16 white;
+ struct usb_device *dev = gspca_dev->dev;
__u8 blue, red;
__u16 reg;
/* try to emulate MS-win as possible */
- white = sd->white;
red = 0x20 + white * 3 / 8;
blue = 0x90 - white * 5 / 8;
if (sd->chip_revision == Rev012A) {
reg = 0x8614;
} else {
reg = 0x8651;
- red += sd->contrast - 0x20;
- blue += sd->contrast - 0x20;
+ red += contrast - 0x20;
+ blue += contrast - 0x20;
+ reg_w_val(dev, 0x8652, contrast + 0x20); /* Gr */
+ reg_w_val(dev, 0x8654, contrast + 0x20); /* Gb */
}
- reg_w_val(gspca_dev->dev, reg, red);
- reg_w_val(gspca_dev->dev, reg + 2, blue);
-}
-
-static void setcontrast(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- struct usb_device *dev = gspca_dev->dev;
- __u8 value;
-
- if (sd->chip_revision != Rev072A)
- return;
- value = sd->contrast + 0x20;
-
- /* gains for white balance */
- setwhite(gspca_dev);
-/* reg_w_val(dev, 0x8651, value); * R - done by setwhite */
- reg_w_val(dev, 0x8652, value); /* Gr */
-/* reg_w_val(dev, 0x8653, value); * B - done by setwhite */
- reg_w_val(dev, 0x8654, value); /* Gb */
+ reg_w_val(dev, reg, red);
+ reg_w_val(dev, reg + 2, blue);
}
/* rev 12a only */
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
int i, expo = 0;
/* Register 0x8309 controls exposure for the spca561,
@@ -572,8 +527,8 @@ static void setexposure(struct gspca_dev *gspca_dev)
int table[] = { 0, 450, 550, 625, EXPOSURE_MAX };
for (i = 0; i < ARRAY_SIZE(table) - 1; i++) {
- if (sd->exposure <= table[i + 1]) {
- expo = sd->exposure - table[i];
+ if (val <= table[i + 1]) {
+ expo = val - table[i];
if (i)
expo += 300;
expo |= i << 11;
@@ -587,29 +542,27 @@ static void setexposure(struct gspca_dev *gspca_dev)
}
/* rev 12a only */
-static void setgain(struct gspca_dev *gspca_dev)
+static void setgain(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
/* gain reg low 6 bits 0-63 gain, bit 6 and 7, both double the
sensitivity when set, so 31 + one of them set == 63, and 15
with both of them set == 63 */
- if (sd->gain < 64)
- gspca_dev->usb_buf[0] = sd->gain;
- else if (sd->gain < 128)
- gspca_dev->usb_buf[0] = (sd->gain / 2) | 0x40;
+ if (val < 64)
+ gspca_dev->usb_buf[0] = val;
+ else if (val < 128)
+ gspca_dev->usb_buf[0] = (val / 2) | 0x40;
else
- gspca_dev->usb_buf[0] = (sd->gain / 4) | 0xc0;
+ gspca_dev->usb_buf[0] = (val / 4) | 0xc0;
gspca_dev->usb_buf[1] = 0;
reg_w_buf(gspca_dev, 0x8335, 2);
}
-static void setautogain(struct gspca_dev *gspca_dev)
+static void setautogain(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->autogain)
+ if (val)
sd->ag_cnt = AG_CNT_START;
else
sd->ag_cnt = -1;
@@ -644,9 +597,6 @@ static int sd_start_12a(struct gspca_dev *gspca_dev)
memcpy(gspca_dev->usb_buf, Reg8391, 8);
reg_w_buf(gspca_dev, 0x8391, 8);
reg_w_buf(gspca_dev, 0x8390, 8);
- setwhite(gspca_dev);
- setgain(gspca_dev);
- setexposure(gspca_dev);
/* Led ON (bit 3 -> 0 */
reg_w_val(gspca_dev->dev, 0x8114, 0x00);
@@ -654,6 +604,7 @@ static int sd_start_12a(struct gspca_dev *gspca_dev)
}
static int sd_start_72a(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
struct usb_device *dev = gspca_dev->dev;
int Clck;
int mode;
@@ -683,9 +634,10 @@ static int sd_start_72a(struct gspca_dev *gspca_dev)
reg_w_val(dev, 0x8702, 0x81);
reg_w_val(dev, 0x8500, mode); /* mode */
write_sensor_72a(gspca_dev, rev72a_init_sensor2);
- setcontrast(gspca_dev);
+ setwhite(gspca_dev, v4l2_ctrl_g_ctrl(sd->hue),
+ v4l2_ctrl_g_ctrl(sd->contrast));
/* setbrightness(gspca_dev); * fixme: bad values */
- setautogain(gspca_dev);
+ setautogain(gspca_dev, v4l2_ctrl_g_ctrl(sd->autogain));
reg_w_val(dev, 0x8112, 0x10 | 0x20);
return 0;
}
@@ -819,221 +771,96 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-/* rev 72a only */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-/* rev 72a only */
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ gspca_dev->usb_err = 0;
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (!gspca_dev->streaming)
+ return 0;
- sd->autogain = val;
- if (gspca_dev->streaming)
- setautogain(gspca_dev);
- return 0;
-}
-
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autogain;
- return 0;
-}
-
-static int sd_setwhite(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->white = val;
- if (gspca_dev->streaming)
- setwhite(gspca_dev);
- return 0;
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ /* hue/contrast control cluster for 72a */
+ setwhite(gspca_dev, sd->hue->val, ctrl->val);
+ break;
+ case V4L2_CID_HUE:
+ /* just plain hue control for 12a */
+ setwhite(gspca_dev, ctrl->val, 0);
+ break;
+ case V4L2_CID_EXPOSURE:
+ setexposure(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_GAIN:
+ setgain(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ setautogain(gspca_dev, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
}
-static int sd_getwhite(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->white;
- return 0;
-}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
-/* rev12a only */
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_init_controls_12a(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
- sd->exposure = val;
- if (gspca_dev->streaming)
- setexposure(gspca_dev);
- return 0;
-}
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 3);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HUE, 1, 0x7f, 1, 0x40);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 1, EXPOSURE_MAX, 1, 700);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 255, 1, 63);
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->exposure;
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
-/* rev12a only */
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_init_controls_72a(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->gain = val;
- if (gspca_dev->streaming)
- setgain(gspca_dev);
- return 0;
-}
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 0x3f, 1, 0x20);
+ sd->hue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HUE, 1, 0x7f, 1, 0x40);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 0x3f, 1, 0x20);
+ sd->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
- *val = sd->gain;
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ v4l2_ctrl_cluster(2, &sd->contrast);
return 0;
}
-/* control tables */
-static const struct ctrl sd_ctrls_12a[] = {
- {
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = HUE_MIN,
- .maximum = HUE_MAX,
- .step = 1,
- .default_value = HUE_DEF,
- },
- .set = sd_setwhite,
- .get = sd_getwhite,
- },
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = EXPOSURE_MIN,
- .maximum = EXPOSURE_MAX,
- .step = 1,
- .default_value = EXPOSURE_DEF,
- },
- .set = sd_setexposure,
- .get = sd_getexposure,
- },
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = GAIN_MIN,
- .maximum = GAIN_MAX,
- .step = 1,
- .default_value = GAIN_DEF,
- },
- .set = sd_setgain,
- .get = sd_getgain,
- },
-};
-
-static const struct ctrl sd_ctrls_72a[] = {
- {
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = HUE_MIN,
- .maximum = HUE_MAX,
- .step = 1,
- .default_value = HUE_DEF,
- },
- .set = sd_setwhite,
- .get = sd_getwhite,
- },
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = BRIGHTNESS_MIN,
- .maximum = BRIGHTNESS_MAX,
- .step = 1,
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = CONTRAST_MIN,
- .maximum = CONTRAST_MAX,
- .step = 1,
- .default_value = CONTRAST_DEF,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
- {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = AUTOGAIN_MIN,
- .maximum = AUTOGAIN_MAX,
- .step = 1,
- .default_value = AUTOGAIN_DEF,
- },
- .set = sd_setautogain,
- .get = sd_getautogain,
- },
-};
-
/* sub-driver description */
static const struct sd_desc sd_desc_12a = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls_12a,
- .nctrls = ARRAY_SIZE(sd_ctrls_12a),
+ .init_controls = sd_init_controls_12a,
.config = sd_config,
.init = sd_init_12a,
.start = sd_start_12a,
@@ -1045,8 +872,7 @@ static const struct sd_desc sd_desc_12a = {
};
static const struct sd_desc sd_desc_72a = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls_72a,
- .nctrls = ARRAY_SIZE(sd_ctrls_72a),
+ .init_controls = sd_init_controls_72a,
.config = sd_config,
.init = sd_init_72a,
.start = sd_start_72a,
@@ -1103,6 +929,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 04f54654a026..a8ac97931ad6 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -433,6 +433,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index f34ddb0570c8..2c2f3d2f357f 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -340,6 +340,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index 1a8ba9b3550a..3e1e486af883 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -36,8 +36,10 @@ MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- u16 expo;
- u8 gain;
+ struct { /* exposure/gain control cluster */
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
+ };
u8 do_ctrl;
u8 gpio[2];
@@ -55,42 +57,6 @@ enum sensors {
SENSOR_OV9630,
};
-static int sd_setexpo(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getexpo(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0x0001,
- .maximum = 0x0fff,
- .step = 1,
-#define EXPO_DEF 0x0356
- .default_value = EXPO_DEF,
- },
- .set = sd_setexpo,
- .get = sd_getexpo,
- },
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0x01,
- .maximum = 0xff,
- .step = 1,
-#define GAIN_DEF 0x8d
- .default_value = GAIN_DEF,
- },
- .set = sd_setgain,
- .get = sd_getgain,
- },
-};
-
static struct v4l2_pix_format vga_mode[] = {
{320, 240, V4L2_PIX_FMT_SRGGB8, V4L2_FIELD_NONE,
.bytesperline = 320,
@@ -791,7 +757,7 @@ static void lz24bp_ppl(struct sd *sd, u16 ppl)
ucbus_write(&sd->gspca_dev, cmds, ARRAY_SIZE(cmds), 2);
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 gain)
{
struct sd *sd = (struct sd *) gspca_dev;
int i, integclks, intstartclk, frameclks, min_frclk;
@@ -799,7 +765,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
u16 cmd;
u8 buf[15];
- integclks = sd->expo;
+ integclks = expo;
i = 0;
cmd = SQ930_CTRL_SET_EXPOSURE;
@@ -818,7 +784,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
buf[i++] = intstartclk;
buf[i++] = frameclks >> 8;
buf[i++] = frameclks;
- buf[i++] = sd->gain;
+ buf[i++] = gain;
break;
default: /* cmos */
/* case SENSOR_MI0360: */
@@ -834,7 +800,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
buf[i++] = 0x35; /* reg = global gain */
buf[i++] = 0x00; /* val H */
buf[i++] = sensor->i2c_dum;
- buf[i++] = 0x80 + sd->gain / 2; /* val L */
+ buf[i++] = 0x80 + gain / 2; /* val L */
buf[i++] = 0x00;
buf[i++] = 0x00;
buf[i++] = 0x00;
@@ -860,9 +826,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->bulk = 1;
- sd->gain = GAIN_DEF;
- sd->expo = EXPO_DEF;
-
return 0;
}
@@ -1089,7 +1052,8 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev)
return;
sd->do_ctrl = 0;
- setexposure(gspca_dev);
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure),
+ v4l2_ctrl_g_ctrl(sd->gain));
gspca_dev->cam.bulk_nurbs = 1;
ret = usb_submit_urb(gspca_dev->urb[0], GFP_ATOMIC);
@@ -1113,48 +1077,55 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
}
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
struct sd *sd = (struct sd *) gspca_dev;
- sd->gain = val;
- if (gspca_dev->streaming)
- sd->do_ctrl = 1;
- return 0;
-}
+ gspca_dev->usb_err = 0;
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (!gspca_dev->streaming)
+ return 0;
- *val = sd->gain;
- return 0;
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ setexposure(gspca_dev, ctrl->val, sd->gain->val);
+ break;
+ }
+ return gspca_dev->usb_err;
}
-static int sd_setexpo(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- sd->expo = val;
- if (gspca_dev->streaming)
- sd->do_ctrl = 1;
- return 0;
-}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
-static int sd_getexpo(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
struct sd *sd = (struct sd *) gspca_dev;
- *val = sd->expo;
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 2);
+ sd->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 1, 0xfff, 1, 0x356);
+ sd->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 1, 255, 1, 0x8d);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ v4l2_ctrl_cluster(2, &sd->exposure);
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stopN = sd_stopN,
@@ -1194,6 +1165,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 4ae7cc8f463a..8c0982607f25 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -29,86 +29,14 @@ MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("Syntek DV4000 (STK014) USB Camera Driver");
MODULE_LICENSE("GPL");
-/* controls */
-enum e_ctrl {
- BRIGHTNESS,
- CONTRAST,
- COLORS,
- LIGHTFREQ,
- NCTRLS /* number of controls */
-};
+#define QUALITY 50
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
-
- struct gspca_ctrl ctrls[NCTRLS];
-
- u8 quality;
-#define QUALITY_MIN 70
-#define QUALITY_MAX 95
-#define QUALITY_DEF 88
-
u8 jpeg_hdr[JPEG_HDR_SZ];
};
-/* V4L2 controls supported by the driver */
-static void setbrightness(struct gspca_dev *gspca_dev);
-static void setcontrast(struct gspca_dev *gspca_dev);
-static void setcolors(struct gspca_dev *gspca_dev);
-static void setlightfreq(struct gspca_dev *gspca_dev);
-
-static const struct ctrl sd_ctrls[NCTRLS] = {
-[BRIGHTNESS] = {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 127,
- },
- .set_control = setbrightness
- },
-[CONTRAST] = {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 127,
- },
- .set_control = setcontrast
- },
-[COLORS] = {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 127,
- },
- .set_control = setcolors
- },
-[LIGHTFREQ] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light frequency filter",
- .minimum = 1,
- .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */
- .step = 1,
- .default_value = 1,
- },
- .set_control = setlightfreq
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
@@ -255,41 +183,36 @@ static void set_par(struct gspca_dev *gspca_dev,
snd_val(gspca_dev, 0x003f08, parval);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
int parval;
parval = 0x06000000 /* whiteness */
- + (sd->ctrls[BRIGHTNESS].val << 16);
+ + (val << 16);
set_par(gspca_dev, parval);
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
int parval;
parval = 0x07000000 /* contrast */
- + (sd->ctrls[CONTRAST].val << 16);
+ + (val << 16);
set_par(gspca_dev, parval);
}
-static void setcolors(struct gspca_dev *gspca_dev)
+static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
int parval;
parval = 0x08000000 /* saturation */
- + (sd->ctrls[COLORS].val << 16);
+ + (val << 16);
set_par(gspca_dev, parval);
}
-static void setlightfreq(struct gspca_dev *gspca_dev)
+static void setlightfreq(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- set_par(gspca_dev, sd->ctrls[LIGHTFREQ].val == 1
+ set_par(gspca_dev, val == 1
? 0x33640000 /* 50 Hz */
: 0x33780000); /* 60 Hz */
}
@@ -298,12 +221,8 @@ static void setlightfreq(struct gspca_dev *gspca_dev)
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
gspca_dev->cam.cam_mode = vga_mode;
gspca_dev->cam.nmodes = ARRAY_SIZE(vga_mode);
- gspca_dev->cam.ctrls = sd->ctrls;
- sd->quality = QUALITY_DEF;
return 0;
}
@@ -333,7 +252,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* create the JPEG header */
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x22); /* JPEG 411 */
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
+ jpeg_set_qual(sd->jpeg_hdr, QUALITY);
/* work on alternate 1 */
usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1);
@@ -365,14 +284,10 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x0640, 0);
reg_w(gspca_dev, 0x0650, 0);
reg_w(gspca_dev, 0x0660, 0);
- setbrightness(gspca_dev); /* whiteness */
- setcontrast(gspca_dev); /* contrast */
- setcolors(gspca_dev); /* saturation */
set_par(gspca_dev, 0x09800000); /* Red ? */
set_par(gspca_dev, 0x0a800000); /* Green ? */
set_par(gspca_dev, 0x0b800000); /* Blue ? */
set_par(gspca_dev, 0x0d030000); /* Gamma ? */
- setlightfreq(gspca_dev);
/* start the video flow */
set_par(gspca_dev, 0x01000000);
@@ -435,62 +350,70 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_querymenu(struct gspca_dev *gspca_dev,
- struct v4l2_querymenu *menu)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- static const char *freq_nm[3] = {"NoFliker", "50 Hz", "60 Hz"};
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
- switch (menu->id) {
- case V4L2_CID_POWER_LINE_FREQUENCY:
- if ((unsigned) menu->index >= ARRAY_SIZE(freq_nm))
- break;
- strcpy((char *) menu->name, freq_nm[menu->index]);
- return 0;
- }
- return -EINVAL;
-}
+ gspca_dev->usb_err = 0;
-static int sd_set_jcomp(struct gspca_dev *gspca_dev,
- struct v4l2_jpegcompression *jcomp)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (!gspca_dev->streaming)
+ return 0;
- if (jcomp->quality < QUALITY_MIN)
- sd->quality = QUALITY_MIN;
- else if (jcomp->quality > QUALITY_MAX)
- sd->quality = QUALITY_MAX;
- else
- sd->quality = jcomp->quality;
- if (gspca_dev->streaming)
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolors(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ setlightfreq(gspca_dev, ctrl->val);
+ break;
+ }
return gspca_dev->usb_err;
}
-static int sd_get_jcomp(struct gspca_dev *gspca_dev,
- struct v4l2_jpegcompression *jcomp)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
- memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = sd->quality;
- jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
- | V4L2_JPEG_MARKER_DQT;
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 127);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 127);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 127);
+ v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 1,
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = NCTRLS,
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
- .querymenu = sd_querymenu,
- .get_jcomp = sd_get_jcomp,
- .set_jcomp = sd_set_jcomp,
};
/* -- module initialisation -- */
@@ -516,6 +439,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c
index 461ed645f309..67605272aaa8 100644
--- a/drivers/media/video/gspca/stv0680.c
+++ b/drivers/media/video/gspca/stv0680.c
@@ -46,10 +46,6 @@ struct sd {
u8 current_mode;
};
-/* V4L2 controls supported by the driver */
-static const struct ctrl sd_ctrls[] = {
-};
-
static int stv_sndctrl(struct gspca_dev *gspca_dev, int set, u8 req, u16 val,
int size)
{
@@ -318,8 +314,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
.start = sd_start,
@@ -352,6 +346,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index c80f0c0c75b6..9ccfcb1c6479 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -30,18 +30,13 @@ MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA/SPCA5xx USB Camera Driver");
MODULE_LICENSE("GPL");
+#define QUALITY 85
+
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- s8 brightness;
- u8 contrast;
- u8 colors;
- u8 autogain;
- u8 quality;
-#define QUALITY_MIN 70
-#define QUALITY_MAX 95
-#define QUALITY_DEF 85
+ bool autogain;
u8 bridge;
#define BRIDGE_SPCA504 0
@@ -59,75 +54,6 @@ struct sd {
u8 jpeg_hdr[JPEG_HDR_SZ];
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = -128,
- .maximum = 127,
- .step = 1,
-#define BRIGHTNESS_DEF 0
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
-#define CONTRAST_DEF 0x20
- .default_value = CONTRAST_DEF,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
- {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
- .minimum = 0,
- .maximum = 0xff,
- .step = 1,
-#define COLOR_DEF 0x1a
- .default_value = COLOR_DEF,
- },
- .set = sd_setcolors,
- .get = sd_getcolors,
- },
- {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 1
- .default_value = AUTOGAIN_DEF,
- },
- .set = sd_setautogain,
- .get = sd_getautogain,
- },
-};
-
static const struct v4l2_pix_format vga_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
@@ -597,31 +523,31 @@ static void spca504B_setQtable(struct gspca_dev *gspca_dev)
spca504B_PollingDataReady(gspca_dev);
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
u16 reg;
reg = sd->bridge == BRIDGE_SPCA536 ? 0x20f0 : 0x21a7;
- reg_w_riv(gspca_dev, 0x00, reg, sd->brightness);
+ reg_w_riv(gspca_dev, 0x00, reg, val);
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
u16 reg;
reg = sd->bridge == BRIDGE_SPCA536 ? 0x20f1 : 0x21a8;
- reg_w_riv(gspca_dev, 0x00, reg, sd->contrast);
+ reg_w_riv(gspca_dev, 0x00, reg, val);
}
-static void setcolors(struct gspca_dev *gspca_dev)
+static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
u16 reg;
reg = sd->bridge == BRIDGE_SPCA536 ? 0x20f6 : 0x21ae;
- reg_w_riv(gspca_dev, 0x00, reg, sd->colors);
+ reg_w_riv(gspca_dev, 0x00, reg, val);
}
static void init_ctl_reg(struct gspca_dev *gspca_dev)
@@ -629,10 +555,6 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int pollreg = 1;
- setbrightness(gspca_dev);
- setcontrast(gspca_dev);
- setcolors(gspca_dev);
-
switch (sd->bridge) {
case BRIDGE_SPCA504:
case BRIDGE_SPCA504C:
@@ -704,11 +626,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->nmodes = ARRAY_SIZE(vga_mode2);
break;
}
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLOR_DEF;
- sd->autogain = AUTOGAIN_DEF;
- sd->quality = QUALITY_DEF;
return 0;
}
@@ -807,7 +724,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* create the JPEG header */
jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
0x22); /* JPEG 411 */
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
+ jpeg_set_qual(sd->jpeg_hdr, QUALITY);
if (sd->bridge == BRIDGE_SPCA504B)
spca504B_setQtable(gspca_dev);
@@ -1012,116 +929,69 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return gspca_dev->usb_err;
-}
+ gspca_dev->usb_err = 0;
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (!gspca_dev->streaming)
+ return 0;
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolors(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ sd->autogain = ctrl->val;
+ break;
+ }
return gspca_dev->usb_err;
}
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
- return 0;
-}
-
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autogain;
- return 0;
-}
-
-static int sd_set_jcomp(struct gspca_dev *gspca_dev,
- struct v4l2_jpegcompression *jcomp)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (jcomp->quality < QUALITY_MIN)
- sd->quality = QUALITY_MIN;
- else if (jcomp->quality > QUALITY_MAX)
- sd->quality = QUALITY_MAX;
- else
- sd->quality = jcomp->quality;
- if (gspca_dev->streaming)
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
- return gspca_dev->usb_err;
-}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
-static int sd_get_jcomp(struct gspca_dev *gspca_dev,
- struct v4l2_jpegcompression *jcomp)
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = sd->quality;
- jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
- | V4L2_JPEG_MARKER_DQT;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 0x20);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 0x1a);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
- .get_jcomp = sd_get_jcomp,
- .set_jcomp = sd_set_jcomp,
};
/* -- module initialisation -- */
@@ -1208,6 +1078,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 9b9f85a8e60e..8bc6c3ceec2c 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -34,28 +34,19 @@
#include <linux/slab.h>
#include "gspca.h"
-#define V4L2_CID_EFFECTS (V4L2_CID_PRIVATE_BASE + 0)
-
MODULE_AUTHOR("Leandro Costantino <le_costantino@pixartargentina.com.ar>");
MODULE_DESCRIPTION("GSPCA/T613 (JPEG Compliance) USB Camera Driver");
MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
-
- u8 brightness;
- u8 contrast;
- u8 colors;
- u8 autogain;
- u8 gamma;
- u8 sharpness;
- u8 freq;
- u8 red_gain;
- u8 blue_gain;
- u8 green_gain;
- u8 awb; /* set default r/g/b and activate */
- u8 mirror;
- u8 effect;
+ struct v4l2_ctrl *freq;
+ struct { /* awb / color gains control cluster */
+ struct v4l2_ctrl *awb;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *red_balance;
+ struct v4l2_ctrl *blue_balance;
+ };
u8 sensor;
u8 button_pressed;
@@ -67,245 +58,31 @@ enum sensors {
SENSOR_LT168G, /* must verify if this is the actual model */
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setlowlight(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getlowlight(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
-
-static int sd_setawb(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setblue_gain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getblue_gain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setred_gain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getred_gain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
-
-static int sd_setmirror(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getmirror(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_seteffect(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_geteffect(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 14,
- .step = 1,
-#define BRIGHTNESS_DEF 8
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 0x0d,
- .step = 1,
-#define CONTRAST_DEF 0x07
- .default_value = CONTRAST_DEF,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
- {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Color",
- .minimum = 0,
- .maximum = 0x0f,
- .step = 1,
-#define COLORS_DEF 0x05
- .default_value = COLORS_DEF,
- },
- .set = sd_setcolors,
- .get = sd_getcolors,
- },
-#define GAMMA_MAX 16
-#define GAMMA_DEF 10
- {
- {
- .id = V4L2_CID_GAMMA, /* (gamma on win) */
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gamma",
- .minimum = 0,
- .maximum = GAMMA_MAX - 1,
- .step = 1,
- .default_value = GAMMA_DEF,
- },
- .set = sd_setgamma,
- .get = sd_getgamma,
- },
- {
- {
- .id = V4L2_CID_BACKLIGHT_COMPENSATION, /* Activa lowlight,
- * some apps dont bring up the
- * backligth_compensation control) */
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Low Light",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 0x01
- .default_value = AUTOGAIN_DEF,
- },
- .set = sd_setlowlight,
- .get = sd_getlowlight,
- },
- {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror Image",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define MIRROR_DEF 0
- .default_value = MIRROR_DEF,
- },
- .set = sd_setmirror,
- .get = sd_getmirror
- },
- {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light Frequency Filter",
- .minimum = 1, /* 1 -> 0x50, 2->0x60 */
- .maximum = 2,
- .step = 1,
-#define FREQ_DEF 1
- .default_value = FREQ_DEF,
- },
- .set = sd_setfreq,
- .get = sd_getfreq},
-
- {
- {
- .id = V4L2_CID_AUTO_WHITE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Auto White Balance",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AWB_DEF 0
- .default_value = AWB_DEF,
- },
- .set = sd_setawb,
- .get = sd_getawb
- },
- {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 15,
- .step = 1,
-#define SHARPNESS_DEF 0x06
- .default_value = SHARPNESS_DEF,
- },
- .set = sd_setsharpness,
- .get = sd_getsharpness,
- },
- {
- {
- .id = V4L2_CID_EFFECTS,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Webcam Effects",
- .minimum = 0,
- .maximum = 4,
- .step = 1,
-#define EFFECTS_DEF 0
- .default_value = EFFECTS_DEF,
- },
- .set = sd_seteffect,
- .get = sd_geteffect
- },
- {
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue Balance",
- .minimum = 0x10,
- .maximum = 0x40,
- .step = 1,
-#define BLUE_GAIN_DEF 0x20
- .default_value = BLUE_GAIN_DEF,
- },
- .set = sd_setblue_gain,
- .get = sd_getblue_gain,
- },
- {
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red Balance",
- .minimum = 0x10,
- .maximum = 0x40,
- .step = 1,
-#define RED_GAIN_DEF 0x20
- .default_value = RED_GAIN_DEF,
- },
- .set = sd_setred_gain,
- .get = sd_getred_gain,
- },
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0x10,
- .maximum = 0x40,
- .step = 1,
-#define GAIN_DEF 0x20
- .default_value = GAIN_DEF,
- },
- .set = sd_setgain,
- .get = sd_getgain,
- },
-};
-
static const struct v4l2_pix_format vga_mode_t16[] = {
{160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 160,
.sizeimage = 160 * 120 * 4 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 4},
+#if 0 /* HDG: broken with my test cam, so lets disable it */
{176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 3},
+#endif
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 2},
+#if 0 /* HDG: broken with my test cam, so lets disable it */
{352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
+#endif
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480 * 3 / 8 + 590,
@@ -454,17 +231,6 @@ static const struct additional_sensor_data sensor_data[] = {
};
#define MAX_EFFECTS 7
-/* easily done by soft, this table could be removed,
- * i keep it here just in case */
-static char *effects_control[MAX_EFFECTS] = {
- "Normal",
- "Emboss", /* disabled */
- "Monochrome",
- "Sepia",
- "Sketch",
- "Sun Effect", /* disabled */
- "Negative",
-};
static const u8 effects_table[MAX_EFFECTS][6] = {
{0xa8, 0xe8, 0xc6, 0xd2, 0xc0, 0x00}, /* Normal */
{0xa8, 0xc8, 0xc6, 0x52, 0xc0, 0x04}, /* Repujar */
@@ -475,7 +241,8 @@ static const u8 effects_table[MAX_EFFECTS][6] = {
{0xa8, 0xc8, 0xc6, 0xd2, 0xc0, 0x40}, /* Negative */
};
-static const u8 gamma_table[GAMMA_MAX][17] = {
+#define GAMMA_MAX (15)
+static const u8 gamma_table[GAMMA_MAX+1][17] = {
/* gamma table from cam1690.ini */
{0x00, 0x00, 0x01, 0x04, 0x08, 0x0e, 0x16, 0x21, /* 0 */
0x2e, 0x3d, 0x50, 0x65, 0x7d, 0x99, 0xb8, 0xdb,
@@ -683,38 +450,18 @@ static void om6802_sensor_init(struct gspca_dev *gspca_dev)
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
- struct cam *cam;
-
- cam = &gspca_dev->cam;
+ struct cam *cam = &gspca_dev->cam;
cam->cam_mode = vga_mode_t16;
cam->nmodes = ARRAY_SIZE(vga_mode_t16);
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLORS_DEF;
- sd->gamma = GAMMA_DEF;
- sd->autogain = AUTOGAIN_DEF;
- sd->mirror = MIRROR_DEF;
- sd->freq = FREQ_DEF;
- sd->awb = AWB_DEF;
- sd->sharpness = SHARPNESS_DEF;
- sd->effect = EFFECTS_DEF;
- sd->red_gain = RED_GAIN_DEF;
- sd->blue_gain = BLUE_GAIN_DEF;
- sd->green_gain = GAIN_DEF * 3 - RED_GAIN_DEF - BLUE_GAIN_DEF;
-
return 0;
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 brightness)
{
- struct sd *sd = (struct sd *) gspca_dev;
- unsigned int brightness;
u8 set6[4] = { 0x8f, 0x24, 0xc3, 0x00 };
- brightness = sd->brightness;
if (brightness < 7) {
set6[1] = 0x26;
set6[3] = 0x70 - brightness * 0x10;
@@ -725,10 +472,8 @@ static void setbrightness(struct gspca_dev *gspca_dev)
reg_w_buf(gspca_dev, set6, sizeof set6);
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, s32 contrast)
{
- struct sd *sd = (struct sd *) gspca_dev;
- unsigned int contrast = sd->contrast;
u16 reg_to_write;
if (contrast < 7)
@@ -739,89 +484,62 @@ static void setcontrast(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, reg_to_write);
}
-static void setcolors(struct gspca_dev *gspca_dev)
+static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u16 reg_to_write;
- reg_to_write = 0x80bb + sd->colors * 0x100; /* was 0xc0 */
+ reg_to_write = 0x80bb + val * 0x100; /* was 0xc0 */
reg_w(gspca_dev, reg_to_write);
}
-static void setgamma(struct gspca_dev *gspca_dev)
+static void setgamma(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
PDEBUG(D_CONF, "Gamma: %d", sd->gamma);
reg_w_ixbuf(gspca_dev, 0x90,
- gamma_table[sd->gamma], sizeof gamma_table[0]);
+ gamma_table[val], sizeof gamma_table[0]);
}
-static void setRGB(struct gspca_dev *gspca_dev)
+static void setawb_n_RGB(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 all_gain_reg[6] =
- {0x87, 0x00, 0x88, 0x00, 0x89, 0x00};
+ u8 all_gain_reg[8] = {
+ 0x87, 0x00, 0x88, 0x00, 0x89, 0x00, 0x80, 0x00 };
+ s32 red_gain, blue_gain, green_gain;
+
+ green_gain = sd->gain->val;
+
+ red_gain = green_gain + sd->red_balance->val;
+ if (red_gain > 0x40)
+ red_gain = 0x40;
+ else if (red_gain < 0x10)
+ red_gain = 0x10;
+
+ blue_gain = green_gain + sd->blue_balance->val;
+ if (blue_gain > 0x40)
+ blue_gain = 0x40;
+ else if (blue_gain < 0x10)
+ blue_gain = 0x10;
+
+ all_gain_reg[1] = red_gain;
+ all_gain_reg[3] = blue_gain;
+ all_gain_reg[5] = green_gain;
+ all_gain_reg[7] = sensor_data[sd->sensor].reg80;
+ if (!sd->awb->val)
+ all_gain_reg[7] &= ~0x04; /* AWB off */
- all_gain_reg[1] = sd->red_gain;
- all_gain_reg[3] = sd->blue_gain;
- all_gain_reg[5] = sd->green_gain;
reg_w_buf(gspca_dev, all_gain_reg, sizeof all_gain_reg);
}
-/* Generic fnc for r/b balance, exposure and awb */
-static void setawb(struct gspca_dev *gspca_dev)
+static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
- u16 reg80;
-
- reg80 = (sensor_data[sd->sensor].reg80 << 8) | 0x80;
-
- /* on awb leave defaults values */
- if (!sd->awb) {
- /* shoud we wait here.. */
- /* update and reset RGB gains with webcam values */
- sd->red_gain = reg_r(gspca_dev, 0x0087);
- sd->blue_gain = reg_r(gspca_dev, 0x0088);
- sd->green_gain = reg_r(gspca_dev, 0x0089);
- reg80 &= ~0x0400; /* AWB off */
- }
- reg_w(gspca_dev, reg80);
- reg_w(gspca_dev, reg80);
-}
-
-static void init_gains(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- u16 reg80;
- u8 all_gain_reg[8] =
- {0x87, 0x00, 0x88, 0x00, 0x89, 0x00, 0x80, 0x00};
-
- all_gain_reg[1] = sd->red_gain;
- all_gain_reg[3] = sd->blue_gain;
- all_gain_reg[5] = sd->green_gain;
- reg80 = sensor_data[sd->sensor].reg80;
- if (!sd->awb)
- reg80 &= ~0x04;
- all_gain_reg[7] = reg80;
- reg_w_buf(gspca_dev, all_gain_reg, sizeof all_gain_reg);
-
- reg_w(gspca_dev, (sd->red_gain << 8) + 0x87);
- reg_w(gspca_dev, (sd->blue_gain << 8) + 0x88);
- reg_w(gspca_dev, (sd->green_gain << 8) + 0x89);
-}
-
-static void setsharpness(struct gspca_dev *gspca_dev)
-{
- struct sd *sd = (struct sd *) gspca_dev;
u16 reg_to_write;
- reg_to_write = 0x0aa6 + 0x1000 * sd->sharpness;
+ reg_to_write = 0x0aa6 + 0x1000 * val;
reg_w(gspca_dev, reg_to_write);
}
-static void setfreq(struct gspca_dev *gspca_dev)
+static void setfreq(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 reg66;
@@ -829,7 +547,7 @@ static void setfreq(struct gspca_dev *gspca_dev)
switch (sd->sensor) {
case SENSOR_LT168G:
- if (sd->freq != 0)
+ if (val != 0)
freq[3] = 0xa8;
reg66 = 0x41;
break;
@@ -840,7 +558,7 @@ static void setfreq(struct gspca_dev *gspca_dev)
reg66 = 0x40;
break;
}
- switch (sd->freq) {
+ switch (val) {
case 0: /* no flicker */
freq[3] = 0xf0;
break;
@@ -941,14 +659,9 @@ static int sd_init(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, (sensor->reg80 << 8) + 0x80);
reg_w(gspca_dev, (sensor->reg80 << 8) + 0x80);
reg_w(gspca_dev, (sensor->reg8e << 8) + 0x8e);
-
- setbrightness(gspca_dev);
- setcontrast(gspca_dev);
- setgamma(gspca_dev);
- setcolors(gspca_dev);
- setsharpness(gspca_dev);
- init_gains(gspca_dev);
- setfreq(gspca_dev);
+ reg_w(gspca_dev, (0x20 << 8) + 0x87);
+ reg_w(gspca_dev, (0x20 << 8) + 0x88);
+ reg_w(gspca_dev, (0x20 << 8) + 0x89);
reg_w_buf(gspca_dev, sensor->data5, sizeof sensor->data5);
reg_w_buf(gspca_dev, sensor->nset8, sizeof sensor->nset8);
@@ -968,31 +681,44 @@ static int sd_init(struct gspca_dev *gspca_dev)
return 0;
}
-static void setmirror(struct gspca_dev *gspca_dev)
+static void setmirror(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 hflipcmd[8] =
{0x62, 0x07, 0x63, 0x03, 0x64, 0x00, 0x60, 0x09};
- if (sd->mirror)
+ if (val)
hflipcmd[3] = 0x01;
reg_w_buf(gspca_dev, hflipcmd, sizeof hflipcmd);
}
-static void seteffect(struct gspca_dev *gspca_dev)
+static void seteffect(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ int idx = 0;
- reg_w_buf(gspca_dev, effects_table[sd->effect],
- sizeof effects_table[0]);
- if (sd->effect == 1 || sd->effect == 5) {
- PDEBUG(D_CONF,
- "This effect have been disabled for webcam \"safety\"");
- return;
+ switch (val) {
+ case V4L2_COLORFX_NONE:
+ break;
+ case V4L2_COLORFX_BW:
+ idx = 2;
+ break;
+ case V4L2_COLORFX_SEPIA:
+ idx = 3;
+ break;
+ case V4L2_COLORFX_SKETCH:
+ idx = 4;
+ break;
+ case V4L2_COLORFX_NEGATIVE:
+ idx = 6;
+ break;
+ default:
+ break;
}
- if (sd->effect == 1 || sd->effect == 4)
+ reg_w_buf(gspca_dev, effects_table[idx],
+ sizeof effects_table[0]);
+
+ if (val == V4L2_COLORFX_SKETCH)
reg_w(gspca_dev, 0x4aa6);
else
reg_w(gspca_dev, 0xfaa6);
@@ -1070,7 +796,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
}
sensor = &sensor_data[sd->sensor];
- setfreq(gspca_dev);
+ setfreq(gspca_dev, v4l2_ctrl_g_ctrl(sd->freq));
reg_r(gspca_dev, 0x0012);
reg_w_buf(gspca_dev, t2, sizeof t2);
reg_w_ixbuf(gspca_dev, 0xb3, t3, sizeof t3);
@@ -1142,296 +868,157 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, pkt_type, data, len);
}
-static int sd_setblue_gain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->blue_gain = val;
- if (gspca_dev->streaming)
- reg_w(gspca_dev, (val << 8) + 0x88);
- return 0;
-}
-
-static int sd_getblue_gain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->blue_gain;
- return 0;
-}
-
-static int sd_setred_gain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->red_gain = val;
- if (gspca_dev->streaming)
- reg_w(gspca_dev, (val << 8) + 0x87);
-
- return 0;
-}
-
-static int sd_getred_gain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->red_gain;
- return 0;
-}
-
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
- u16 psg, nsg;
-
- psg = sd->red_gain + sd->blue_gain + sd->green_gain;
- nsg = val * 3;
- sd->red_gain = sd->red_gain * nsg / psg;
- if (sd->red_gain > 0x40)
- sd->red_gain = 0x40;
- else if (sd->red_gain < 0x10)
- sd->red_gain = 0x10;
- sd->blue_gain = sd->blue_gain * nsg / psg;
- if (sd->blue_gain > 0x40)
- sd->blue_gain = 0x40;
- else if (sd->blue_gain < 0x10)
- sd->blue_gain = 0x10;
- sd->green_gain = sd->green_gain * nsg / psg;
- if (sd->green_gain > 0x40)
- sd->green_gain = 0x40;
- else if (sd->green_gain < 0x10)
- sd->green_gain = 0x10;
-
- if (gspca_dev->streaming)
- setRGB(gspca_dev);
- return 0;
-}
-
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = (sd->red_gain + sd->blue_gain + sd->green_gain) / 3;
- return 0;
-}
-
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return *val;
-}
-
-static int sd_setawb(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->awb = val;
- if (gspca_dev->streaming)
- setawb(gspca_dev);
- return 0;
-}
-
-static int sd_getawb(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->awb;
- return *val;
-}
-
-static int sd_setmirror(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->mirror = val;
- if (gspca_dev->streaming)
- setmirror(gspca_dev);
- return 0;
-}
-
-static int sd_getmirror(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->mirror;
- return *val;
-}
-
-static int sd_seteffect(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->effect = val;
- if (gspca_dev->streaming)
- seteffect(gspca_dev);
- return 0;
-}
-
-static int sd_geteffect(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->effect;
- return *val;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return *val;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
- return 0;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_setgamma(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->gamma = val;
- if (gspca_dev->streaming)
- setgamma(gspca_dev);
- return 0;
-}
-
-static int sd_getgamma(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->gamma;
- return 0;
-}
-
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->freq = val;
- if (gspca_dev->streaming)
- setfreq(gspca_dev);
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+ s32 red_gain, blue_gain, green_gain;
+
+ gspca_dev->usb_err = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ red_gain = reg_r(gspca_dev, 0x0087);
+ if (red_gain > 0x40)
+ red_gain = 0x40;
+ else if (red_gain < 0x10)
+ red_gain = 0x10;
+
+ blue_gain = reg_r(gspca_dev, 0x0088);
+ if (blue_gain > 0x40)
+ blue_gain = 0x40;
+ else if (blue_gain < 0x10)
+ blue_gain = 0x10;
+
+ green_gain = reg_r(gspca_dev, 0x0089);
+ if (green_gain > 0x40)
+ green_gain = 0x40;
+ else if (green_gain < 0x10)
+ green_gain = 0x10;
+
+ sd->gain->val = green_gain;
+ sd->red_balance->val = red_gain - green_gain;
+ sd->blue_balance->val = blue_gain - green_gain;
+ break;
+ }
return 0;
}
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
- *val = sd->freq;
- return 0;
-}
+ gspca_dev->usb_err = 0;
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (!gspca_dev->streaming)
+ return 0;
- sd->sharpness = val;
- if (gspca_dev->streaming)
- setsharpness(gspca_dev);
- return 0;
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolors(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_GAMMA:
+ setgamma(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ setmirror(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SHARPNESS:
+ setsharpness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ setfreq(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_BACKLIGHT_COMPENSATION:
+ reg_w(gspca_dev, ctrl->val ? 0xf48e : 0xb48e);
+ break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ setawb_n_RGB(gspca_dev);
+ break;
+ case V4L2_CID_COLORFX:
+ seteffect(gspca_dev, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
}
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->sharpness;
- return 0;
-}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .g_volatile_ctrl = sd_g_volatile_ctrl,
+ .s_ctrl = sd_s_ctrl,
+};
-/* Low Light set here......*/
-static int sd_setlowlight(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 12);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 14, 1, 8);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 0x0d, 1, 7);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 0xf, 1, 5);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAMMA, 0, GAMMA_MAX, 1, 10);
+ /* Activate lowlight, some apps dont bring up the
+ backlight_compensation control) */
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BACKLIGHT_COMPENSATION, 0, 1, 1, 1);
+ if (sd->sensor == SENSOR_TAS5130A)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ sd->awb = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
+ sd->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0x10, 0x40, 1, 0x20);
+ sd->blue_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, -0x30, 0x30, 1, 0);
+ sd->red_balance = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_RED_BALANCE, -0x30, 0x30, 1, 0);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 15, 1, 6);
+ v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_COLORFX, V4L2_COLORFX_SKETCH,
+ ~((1 << V4L2_COLORFX_NONE) |
+ (1 << V4L2_COLORFX_BW) |
+ (1 << V4L2_COLORFX_SEPIA) |
+ (1 << V4L2_COLORFX_SKETCH) |
+ (1 << V4L2_COLORFX_NEGATIVE)),
+ V4L2_COLORFX_NONE);
+ sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 1,
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
- sd->autogain = val;
- if (val != 0)
- reg_w(gspca_dev, 0xf48e);
- else
- reg_w(gspca_dev, 0xb48e);
- return 0;
-}
+ v4l2_ctrl_auto_cluster(4, &sd->awb, 0, true);
-static int sd_getlowlight(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autogain;
return 0;
}
-static int sd_querymenu(struct gspca_dev *gspca_dev,
- struct v4l2_querymenu *menu)
-{
- static const char *freq_nm[3] = {"NoFliker", "50 Hz", "60 Hz"};
-
- switch (menu->id) {
- case V4L2_CID_POWER_LINE_FREQUENCY:
- if ((unsigned) menu->index >= ARRAY_SIZE(freq_nm))
- break;
- strcpy((char *) menu->name, freq_nm[menu->index]);
- return 0;
- case V4L2_CID_EFFECTS:
- if ((unsigned) menu->index < ARRAY_SIZE(effects_control)) {
- strlcpy((char *) menu->name,
- effects_control[menu->index],
- sizeof menu->name);
- return 0;
- }
- break;
- }
- return -EINVAL;
-}
-
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
- .querymenu = sd_querymenu,
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
.other_input = 1,
#endif
@@ -1460,6 +1047,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/topro.c b/drivers/media/video/gspca/topro.c
index c6326d177a3d..a6055246cb9d 100644
--- a/drivers/media/video/gspca/topro.c
+++ b/drivers/media/video/gspca/topro.c
@@ -120,24 +120,13 @@ static const u8 jpeg_head[] = {
#define JPEG_HDR_SZ 521
};
-enum e_ctrl {
- EXPOSURE,
- QUALITY,
- SHARPNESS,
- RGAIN,
- GAIN,
- BGAIN,
- GAMMA,
- AUTOGAIN,
- NCTRLS /* number of controls */
-};
-
-#define AUTOGAIN_DEF 1
-
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
-
- struct gspca_ctrl ctrls[NCTRLS];
+ struct v4l2_ctrl *jpegqual;
+ struct v4l2_ctrl *sharpness;
+ struct v4l2_ctrl *gamma;
+ struct v4l2_ctrl *blue;
+ struct v4l2_ctrl *red;
u8 framerate;
u8 quality; /* webcam current JPEG quality (0..16) */
@@ -1415,32 +1404,33 @@ static void soi763a_6810_init(struct gspca_dev *gspca_dev)
}
/* set the gain and exposure */
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 gain,
+ s32 blue, s32 red)
{
struct sd *sd = (struct sd *) gspca_dev;
if (sd->sensor == SENSOR_CX0342) {
- int expo;
-
- expo = (sd->ctrls[EXPOSURE].val << 2) - 1;
+ expo = (expo << 2) - 1;
i2c_w(gspca_dev, CX0342_EXPO_LINE_L, expo);
i2c_w(gspca_dev, CX0342_EXPO_LINE_H, expo >> 8);
if (sd->bridge == BRIDGE_TP6800)
i2c_w(gspca_dev, CX0342_RAW_GBGAIN_H,
- sd->ctrls[GAIN].val >> 8);
- i2c_w(gspca_dev, CX0342_RAW_GBGAIN_L, sd->ctrls[GAIN].val);
+ gain >> 8);
+ i2c_w(gspca_dev, CX0342_RAW_GBGAIN_L, gain);
if (sd->bridge == BRIDGE_TP6800)
i2c_w(gspca_dev, CX0342_RAW_GRGAIN_H,
- sd->ctrls[GAIN].val >> 8);
- i2c_w(gspca_dev, CX0342_RAW_GRGAIN_L, sd->ctrls[GAIN].val);
- if (sd->bridge == BRIDGE_TP6800)
- i2c_w(gspca_dev, CX0342_RAW_BGAIN_H,
- sd->ctrls[BGAIN].val >> 8);
- i2c_w(gspca_dev, CX0342_RAW_BGAIN_L, sd->ctrls[BGAIN].val);
- if (sd->bridge == BRIDGE_TP6800)
- i2c_w(gspca_dev, CX0342_RAW_RGAIN_H,
- sd->ctrls[RGAIN].val >> 8);
- i2c_w(gspca_dev, CX0342_RAW_RGAIN_L, sd->ctrls[RGAIN].val);
+ gain >> 8);
+ i2c_w(gspca_dev, CX0342_RAW_GRGAIN_L, gain);
+ if (sd->sensor == SENSOR_CX0342) {
+ if (sd->bridge == BRIDGE_TP6800)
+ i2c_w(gspca_dev, CX0342_RAW_BGAIN_H,
+ blue >> 8);
+ i2c_w(gspca_dev, CX0342_RAW_BGAIN_L, blue);
+ if (sd->bridge == BRIDGE_TP6800)
+ i2c_w(gspca_dev, CX0342_RAW_RGAIN_H,
+ red >> 8);
+ i2c_w(gspca_dev, CX0342_RAW_RGAIN_L, red);
+ }
i2c_w(gspca_dev, CX0342_SYS_CTRL_0,
sd->bridge == BRIDGE_TP6800 ? 0x80 : 0x81);
return;
@@ -1448,10 +1438,10 @@ static void setexposure(struct gspca_dev *gspca_dev)
/* soi763a */
i2c_w(gspca_dev, 0x10, /* AEC_H (exposure time) */
- sd->ctrls[EXPOSURE].val);
+ expo);
/* i2c_w(gspca_dev, 0x76, 0x02); * AEC_L ([1:0] */
i2c_w(gspca_dev, 0x00, /* gain */
- sd->ctrls[GAIN].val);
+ gain);
}
/* set the JPEG quantization tables */
@@ -1472,12 +1462,10 @@ static void set_dqt(struct gspca_dev *gspca_dev, u8 q)
}
/* set the JPEG compression quality factor */
-static void setquality(struct gspca_dev *gspca_dev)
+static void setquality(struct gspca_dev *gspca_dev, s32 q)
{
struct sd *sd = (struct sd *) gspca_dev;
- u16 q;
- q = sd->ctrls[QUALITY].val;
if (q != 16)
q = 15 - q;
@@ -1508,10 +1496,9 @@ static const u8 color_gain[NSENSORS][18] = {
0xd5, 0x00, 0x46, 0x03, 0xdc, 0x03}, /* V R/G/B */
};
-static void setgamma(struct gspca_dev *gspca_dev)
+static void setgamma(struct gspca_dev *gspca_dev, s32 gamma)
{
struct sd *sd = (struct sd *) gspca_dev;
- int gamma;
#define NGAMMA 6
static const u8 gamma_tb[NGAMMA][3][1024] = {
{ /* gamma 0 - from tp6800 + soi763a */
@@ -3836,7 +3823,6 @@ static void setgamma(struct gspca_dev *gspca_dev)
if (sd->bridge == BRIDGE_TP6810)
reg_w(gspca_dev, 0x02, 0x28);
/* msleep(50); */
- gamma = sd->ctrls[GAMMA].val;
bulk_w(gspca_dev, 0x00, gamma_tb[gamma][0], 1024);
bulk_w(gspca_dev, 0x01, gamma_tb[gamma][1], 1024);
bulk_w(gspca_dev, 0x02, gamma_tb[gamma][2], 1024);
@@ -3864,43 +3850,35 @@ static void setgamma(struct gspca_dev *gspca_dev)
/* msleep(50); */
}
-static void setsharpness(struct gspca_dev *gspca_dev)
+static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 val;
if (sd->bridge == BRIDGE_TP6800) {
- val = sd->ctrls[SHARPNESS].val
- | 0x08; /* grid compensation enable */
+ val |= 0x08; /* grid compensation enable */
if (gspca_dev->width == 640)
reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */
else
val |= 0x04; /* scaling down enable */
reg_w(gspca_dev, TP6800_R5D_DEMOSAIC_CFG, val);
} else {
- val = (sd->ctrls[SHARPNESS].val << 5) | 0x08;
+ val = (val << 5) | 0x08;
reg_w(gspca_dev, 0x59, val);
}
}
-static void setautogain(struct gspca_dev *gspca_dev)
+static void setautogain(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
- if (gspca_dev->ctrl_dis & (1 << AUTOGAIN))
- return;
- if (sd->ctrls[AUTOGAIN].val) {
- sd->ag_cnt = AG_CNT_START;
- gspca_dev->ctrl_inac |= (1 << EXPOSURE) | (1 << GAIN);
- } else {
- sd->ag_cnt = -1;
- gspca_dev->ctrl_inac &= ~((1 << EXPOSURE) | (1 << GAIN));
- }
+ sd->ag_cnt = val ? AG_CNT_START : -1;
}
/* set the resolution for sensor cx0342 */
static void set_resolution(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
+
reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00);
if (gspca_dev->width == 320) {
reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x06);
@@ -3926,8 +3904,9 @@ static void set_resolution(struct gspca_dev *gspca_dev)
i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x01);
bulk_w(gspca_dev, 0x03, color_gain[SENSOR_CX0342],
ARRAY_SIZE(color_gain[0]));
- setgamma(gspca_dev);
- setquality(gspca_dev);
+ setgamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma));
+ if (sd->sensor == SENSOR_SOI763A)
+ setquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual));
}
/* convert the frame rate to a tp68x0 value */
@@ -3963,7 +3942,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev)
return i;
}
-static void setframerate(struct gspca_dev *gspca_dev)
+static void setframerate(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 fr_idx;
@@ -3974,7 +3953,7 @@ static void setframerate(struct gspca_dev *gspca_dev)
reg_r(gspca_dev, 0x7b);
reg_w(gspca_dev, 0x7b,
sd->sensor == SENSOR_CX0342 ? 0x10 : 0x90);
- if (sd->ctrls[EXPOSURE].val >= 128)
+ if (val >= 128)
fr_idx = 0xf0; /* lower frame rate */
}
@@ -3984,43 +3963,43 @@ static void setframerate(struct gspca_dev *gspca_dev)
i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01);
}
-static void setrgain(struct gspca_dev *gspca_dev)
+static void setrgain(struct gspca_dev *gspca_dev, s32 rgain)
{
- struct sd *sd = (struct sd *) gspca_dev;
- int rgain;
-
- rgain = sd->ctrls[RGAIN].val;
i2c_w(gspca_dev, CX0342_RAW_RGAIN_H, rgain >> 8);
i2c_w(gspca_dev, CX0342_RAW_RGAIN_L, rgain);
i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x80);
}
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_setgain(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
+ s32 val = gspca_dev->gain->val;
if (sd->sensor == SENSOR_CX0342) {
- sd->ctrls[BGAIN].val = sd->ctrls[BGAIN].val
- * val / sd->ctrls[GAIN].val;
- if (sd->ctrls[BGAIN].val > 4095)
- sd->ctrls[BGAIN].val = 4095;
- sd->ctrls[RGAIN].val = sd->ctrls[RGAIN].val
- * val / sd->ctrls[GAIN].val;
- if (sd->ctrls[RGAIN].val > 4095)
- sd->ctrls[RGAIN].val = 4095;
+ s32 old = gspca_dev->gain->cur.val ?
+ gspca_dev->gain->cur.val : 1;
+
+ sd->blue->val = sd->blue->val * val / old;
+ if (sd->blue->val > 4095)
+ sd->blue->val = 4095;
+ sd->red->val = sd->red->val * val / old;
+ if (sd->red->val > 4095)
+ sd->red->val = 4095;
+ }
+ if (gspca_dev->streaming) {
+ if (sd->sensor == SENSOR_CX0342)
+ setexposure(gspca_dev, gspca_dev->exposure->val,
+ gspca_dev->gain->val,
+ sd->blue->val, sd->red->val);
+ else
+ setexposure(gspca_dev, gspca_dev->exposure->val,
+ gspca_dev->gain->val, 0, 0);
}
- sd->ctrls[GAIN].val = val;
- if (gspca_dev->streaming)
- setexposure(gspca_dev);
return gspca_dev->usb_err;
}
-static void setbgain(struct gspca_dev *gspca_dev)
+static void setbgain(struct gspca_dev *gspca_dev, s32 bgain)
{
- struct sd *sd = (struct sd *) gspca_dev;
- int bgain;
-
- bgain = sd->ctrls[BGAIN].val;
i2c_w(gspca_dev, CX0342_RAW_BGAIN_H, bgain >> 8);
i2c_w(gspca_dev, CX0342_RAW_BGAIN_L, bgain);
i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x80);
@@ -4040,7 +4019,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
framerates : framerates_6810;
sd->framerate = 30; /* default: 30 fps */
- gspca_dev->cam.ctrls = sd->ctrls;
return 0;
}
@@ -4108,32 +4086,16 @@ static int sd_init(struct gspca_dev *gspca_dev)
}
if (sd->sensor == SENSOR_SOI763A) {
pr_info("Sensor soi763a\n");
- sd->ctrls[GAMMA].def = sd->bridge == BRIDGE_TP6800 ? 0 : 1;
- sd->ctrls[GAIN].max = 15;
- sd->ctrls[GAIN].def = 3;
- gspca_dev->ctrl_dis = (1 << RGAIN) | (1 << BGAIN);
if (sd->bridge == BRIDGE_TP6810) {
soi763a_6810_init(gspca_dev);
-#if AUTOGAIN_DEF
- gspca_dev->ctrl_inac |= (1 << EXPOSURE) | (1 << GAIN);
-#endif
- } else {
- gspca_dev->ctrl_dis |= (1 << AUTOGAIN);
}
} else {
pr_info("Sensor cx0342\n");
if (sd->bridge == BRIDGE_TP6810) {
cx0342_6810_init(gspca_dev);
-#if AUTOGAIN_DEF
- gspca_dev->ctrl_inac |= (1 << EXPOSURE) | (1 << GAIN);
-#endif
- } else {
- gspca_dev->ctrl_dis |= (1 << AUTOGAIN);
}
}
- if (sd->bridge == BRIDGE_TP6810)
- sd->ctrls[QUALITY].def = 0; /* auto quality */
set_dqt(gspca_dev, 0);
return 0;
}
@@ -4207,8 +4169,9 @@ static void set_led(struct gspca_dev *gspca_dev, int on)
static void cx0342_6800_start(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
static const struct cmd reg_init[] = {
-/*fixme: is this usefull?*/
+ /* fixme: is this useful? */
{TP6800_R17_GPIO_IO, 0x9f},
{TP6800_R16_GPIO_PD, 0x40},
{TP6800_R10_SIF_TYPE, 0x00}, /* i2c 8 bits */
@@ -4279,13 +4242,21 @@ static void cx0342_6800_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, TP6800_R54_DARK_CFG, 0x00);
i2c_w(gspca_dev, CX0342_EXPO_LINE_H, 0x00);
i2c_w(gspca_dev, CX0342_SYS_CTRL_0, 0x01);
- setexposure(gspca_dev);
+ if (sd->sensor == SENSOR_CX0342)
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure),
+ v4l2_ctrl_g_ctrl(gspca_dev->gain),
+ v4l2_ctrl_g_ctrl(sd->blue),
+ v4l2_ctrl_g_ctrl(sd->red));
+ else
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure),
+ v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0);
set_led(gspca_dev, 1);
set_resolution(gspca_dev);
}
static void cx0342_6810_start(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
static const struct cmd sensor_init_2[] = {
{CX0342_EXPO_LINE_L, 0x6f},
{CX0342_EXPO_LINE_H, 0x02},
@@ -4366,10 +4337,10 @@ static void cx0342_6810_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x07, 0x85);
reg_w(gspca_dev, TP6800_R78_FORMAT, 0x01); /* qvga */
}
- setgamma(gspca_dev);
+ setgamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma));
reg_w_buf(gspca_dev, tp6810_bridge_start,
ARRAY_SIZE(tp6810_bridge_start));
- setsharpness(gspca_dev);
+ setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness));
bulk_w(gspca_dev, 0x03, color_gain[SENSOR_CX0342],
ARRAY_SIZE(color_gain[0]));
reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x87);
@@ -4380,11 +4351,12 @@ static void cx0342_6810_start(struct gspca_dev *gspca_dev)
i2c_w_buf(gspca_dev, sensor_init_5, ARRAY_SIZE(sensor_init_5));
set_led(gspca_dev, 1);
-/* setquality(gspca_dev); */
+/* setquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual)); */
}
static void soi763a_6800_start(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
static const struct cmd reg_init[] = {
{TP6800_R79_QUALITY, 0x04},
{TP6800_R79_QUALITY, 0x01},
@@ -4484,19 +4456,28 @@ static void soi763a_6800_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, TP6800_R5C_EDGE_THRLD, 0x10);
reg_w(gspca_dev, TP6800_R54_DARK_CFG, 0x00);
- setsharpness(gspca_dev);
+ setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness));
bulk_w(gspca_dev, 0x03, color_gain[SENSOR_SOI763A],
ARRAY_SIZE(color_gain[0]));
set_led(gspca_dev, 1);
- setexposure(gspca_dev);
- setquality(gspca_dev);
- setgamma(gspca_dev);
+ if (sd->sensor == SENSOR_CX0342)
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure),
+ v4l2_ctrl_g_ctrl(gspca_dev->gain),
+ v4l2_ctrl_g_ctrl(sd->blue),
+ v4l2_ctrl_g_ctrl(sd->red));
+ else
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure),
+ v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0);
+ if (sd->sensor == SENSOR_SOI763A)
+ setquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual));
+ setgamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma));
}
static void soi763a_6810_start(struct gspca_dev *gspca_dev)
{
+ struct sd *sd = (struct sd *) gspca_dev;
static const struct cmd bridge_init_2[] = {
{TP6800_R7A_BLK_THRLD, 0x00},
{TP6800_R79_QUALITY, 0x04},
@@ -4520,7 +4501,14 @@ static void soi763a_6810_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x22, gspca_dev->alt);
bulk_w(gspca_dev, 0x03, color_null, sizeof color_null);
reg_w(gspca_dev, 0x59, 0x40);
- setexposure(gspca_dev);
+ if (sd->sensor == SENSOR_CX0342)
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure),
+ v4l2_ctrl_g_ctrl(gspca_dev->gain),
+ v4l2_ctrl_g_ctrl(sd->blue),
+ v4l2_ctrl_g_ctrl(sd->red));
+ else
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure),
+ v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0);
reg_w_buf(gspca_dev, bridge_init_2, ARRAY_SIZE(bridge_init_2));
reg_w_buf(gspca_dev, tp6810_ov_init_common,
ARRAY_SIZE(tp6810_ov_init_common));
@@ -4534,7 +4522,7 @@ static void soi763a_6810_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x07, 0x85);
reg_w(gspca_dev, TP6800_R78_FORMAT, 0x01); /* qvga */
}
- setgamma(gspca_dev);
+ setgamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma));
reg_w_buf(gspca_dev, tp6810_bridge_start,
ARRAY_SIZE(tp6810_bridge_start));
@@ -4545,12 +4533,19 @@ static void soi763a_6810_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x00, 0x00);
- setsharpness(gspca_dev);
+ setsharpness(gspca_dev, v4l2_ctrl_g_ctrl(sd->sharpness));
bulk_w(gspca_dev, 0x03, color_gain[SENSOR_SOI763A],
ARRAY_SIZE(color_gain[0]));
set_led(gspca_dev, 1);
reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0xf0);
- setexposure(gspca_dev);
+ if (sd->sensor == SENSOR_CX0342)
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure),
+ v4l2_ctrl_g_ctrl(gspca_dev->gain),
+ v4l2_ctrl_g_ctrl(sd->blue),
+ v4l2_ctrl_g_ctrl(sd->red));
+ else
+ setexposure(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure),
+ v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0);
reg_w_buf(gspca_dev, bridge_init_6, ARRAY_SIZE(bridge_init_6));
}
@@ -4576,12 +4571,25 @@ static int sd_start(struct gspca_dev *gspca_dev)
reg_w(gspca_dev, 0x80, 0x03);
reg_w(gspca_dev, 0x82, gspca_dev->curr_mode ? 0x0a : 0x0e);
- setexposure(gspca_dev);
- setquality(gspca_dev);
- setautogain(gspca_dev);
+ if (sd->sensor == SENSOR_CX0342)
+ setexposure(gspca_dev,
+ v4l2_ctrl_g_ctrl(gspca_dev->exposure),
+ v4l2_ctrl_g_ctrl(gspca_dev->gain),
+ v4l2_ctrl_g_ctrl(sd->blue),
+ v4l2_ctrl_g_ctrl(sd->red));
+ else
+ setexposure(gspca_dev,
+ v4l2_ctrl_g_ctrl(gspca_dev->exposure),
+ v4l2_ctrl_g_ctrl(gspca_dev->gain), 0, 0);
+ if (sd->sensor == SENSOR_SOI763A)
+ setquality(gspca_dev,
+ v4l2_ctrl_g_ctrl(sd->jpegqual));
+ if (sd->bridge == BRIDGE_TP6810)
+ setautogain(gspca_dev,
+ v4l2_ctrl_g_ctrl(gspca_dev->autogain));
}
- setframerate(gspca_dev);
+ setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
return gspca_dev->usb_err;
}
@@ -4672,12 +4680,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
}
}
-/* -- do autogain -- */
-/* gain setting is done in setexposure() for tp6810 */
-static void setgain(struct gspca_dev *gspca_dev) {}
-#define WANT_REGULAR_AUTOGAIN
-#include "autogain_functions.h"
-
static void sd_dq_callback(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -4739,17 +4741,19 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev)
luma /= 4;
reg_w(gspca_dev, 0x7d, 0x00);
- expo = sd->ctrls[EXPOSURE].val;
- ret = auto_gain_n_exposure(gspca_dev, luma,
+ expo = v4l2_ctrl_g_ctrl(gspca_dev->exposure);
+ ret = gspca_expo_autogain(gspca_dev, luma,
60, /* desired luma */
6, /* dead zone */
2, /* gain knee */
70); /* expo knee */
sd->ag_cnt = AG_CNT_START;
if (sd->bridge == BRIDGE_TP6810) {
- if ((expo >= 128 && sd->ctrls[EXPOSURE].val < 128)
- || (expo < 128 && sd->ctrls[EXPOSURE].val >= 128))
- setframerate(gspca_dev);
+ int new_expo = v4l2_ctrl_g_ctrl(gspca_dev->exposure);
+
+ if ((expo >= 128 && new_expo < 128)
+ || (expo < 128 && new_expo >= 128))
+ setframerate(gspca_dev, new_expo);
}
break;
}
@@ -4789,7 +4793,7 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
sd->framerate = tpf->denominator / tpf->numerator;
if (gspca_dev->streaming)
- setframerate(gspca_dev);
+ setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
/* Return the actual framerate */
i = get_fr_idx(gspca_dev);
@@ -4806,12 +4810,10 @@ static int sd_set_jcomp(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
- if (sd->sensor == SENSOR_SOI763A)
- jpeg_set_qual(sd->jpeg_hdr, jcomp->quality);
-/* else
- fixme: TODO
-*/
- return gspca_dev->usb_err;
+ if (sd->sensor != SENSOR_SOI763A)
+ return -ENOTTY;
+ v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality);
+ return 0;
}
static int sd_get_jcomp(struct gspca_dev *gspca_dev,
@@ -4819,118 +4821,109 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
+ if (sd->sensor != SENSOR_SOI763A)
+ return -ENOTTY;
memset(jcomp, 0, sizeof *jcomp);
- jcomp->quality = jpeg_q[sd->quality];
+ jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT
| V4L2_JPEG_MARKER_DQT;
return 0;
}
-static struct ctrl sd_ctrls[NCTRLS] = {
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0x01,
- .maximum = 0xdc,
- .step = 1,
- .default_value = 0x4e,
- },
- .set_control = setexposure
- },
-[QUALITY] = {
- {
- .id = V4L2_CID_PRIVATE_BASE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Compression quality",
- .minimum = 0,
- .maximum = 15,
- .step = 1,
- .default_value = 13,
- },
- .set_control = setquality
- },
-[RGAIN] = {
- {
- .id = V4L2_CID_RED_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Red balance",
- .minimum = 0,
- .maximum = 4095,
- .step = 1,
- .default_value = 256,
- },
- .set_control = setrgain
- },
-[GAIN] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 4095,
- .step = 1,
- .default_value = 256,
- },
- .set = sd_setgain
- },
-[BGAIN] = {
- {
- .id = V4L2_CID_BLUE_BALANCE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Blue balance",
- .minimum = 0,
- .maximum = 4095,
- .step = 1,
- .default_value = 256,
- },
- .set_control = setbgain
- },
-[SHARPNESS] = {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 2,
- },
- .set_control = setsharpness
- },
-[GAMMA] = {
- {
- .id = V4L2_CID_GAMMA,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gamma",
- .minimum = 0,
- .maximum = NGAMMA - 1,
- .step = 1,
- .default_value = 1,
- },
- .set_control = setgamma
- },
-[AUTOGAIN] = {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Auto Gain",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
- .default_value = AUTOGAIN_DEF
- },
- .set_control = setautogain
- },
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_SHARPNESS:
+ setsharpness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_GAMMA:
+ setgamma(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_BLUE_BALANCE:
+ setbgain(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_RED_BALANCE:
+ setrgain(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ sd_setgain(gspca_dev);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ if (ctrl->val)
+ break;
+ sd_setgain(gspca_dev);
+ break;
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ jpeg_set_qual(sd->jpeg_hdr, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
};
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 1, 0xdc, 1, 0x4e);
+ if (sd->sensor == SENSOR_CX0342) {
+ sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_RED_BALANCE, 0, 4095, 1, 256);
+ sd->blue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BLUE_BALANCE, 0, 4095, 1, 256);
+ }
+ if (sd->sensor == SENSOR_SOI763A)
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 15, 1, 3);
+ else
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 4095, 1, 256);
+ sd->sharpness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 3, 1, 2);
+ sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAMMA, 0, NGAMMA - 1, 1,
+ (sd->sensor == SENSOR_SOI763A &&
+ sd->bridge == BRIDGE_TP6800) ? 0 : 1);
+ if (sd->bridge == BRIDGE_TP6810)
+ gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ if (sd->sensor == SENSOR_SOI763A)
+ sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ 0, 15, 1, (sd->bridge == BRIDGE_TP6810) ? 0 : 13);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ if (gspca_dev->autogain)
+ v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, false);
+ else
+ v4l2_ctrl_cluster(2, &gspca_dev->exposure);
+ return 0;
+}
+
static const struct sd_desc sd_desc = {
.name = KBUILD_MODNAME,
- .ctrls = sd_ctrls,
- .nctrls = NCTRLS,
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stopN = sd_stopN,
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index c8922c5ffbf5..8591324a53e1 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -30,49 +30,9 @@ MODULE_LICENSE("GPL");
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
- __u16 exposure;
- __u16 gain;
-
__u8 packet;
};
-/* V4L2 controls supported by the driver */
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 1,
- .maximum = 0x18f,
- .step = 1,
-#define EXPOSURE_DEF 0x18f
- .default_value = EXPOSURE_DEF,
- },
- .set = sd_setexposure,
- .get = sd_getexposure,
- },
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 0x7ff,
- .step = 1,
-#define GAIN_DEF 0x100
- .default_value = GAIN_DEF,
- },
- .set = sd_setgain,
- .get = sd_getgain,
- },
-};
-
static const struct v4l2_pix_format sif_mode[] = {
{176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 176,
@@ -202,15 +162,12 @@ static void tv_8532WriteEEprom(struct gspca_dev *gspca_dev)
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
- struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
cam = &gspca_dev->cam;
cam->cam_mode = sif_mode;
cam->nmodes = ARRAY_SIZE(sif_mode);
- sd->exposure = EXPOSURE_DEF;
- sd->gain = GAIN_DEF;
return 0;
}
@@ -241,23 +198,19 @@ static int sd_init(struct gspca_dev *gspca_dev)
return 0;
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_w2(gspca_dev, R1C_AD_EXPOSE_TIMEL, sd->exposure);
+ reg_w2(gspca_dev, R1C_AD_EXPOSE_TIMEL, val);
reg_w1(gspca_dev, R00_PART_CONTROL, LATENT_CHANGE | EXPO_CHANGE);
/* 0x84 */
}
-static void setgain(struct gspca_dev *gspca_dev)
+static void setgain(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- reg_w2(gspca_dev, R20_GAIN_G1L, sd->gain);
- reg_w2(gspca_dev, R22_GAIN_RL, sd->gain);
- reg_w2(gspca_dev, R24_GAIN_BL, sd->gain);
- reg_w2(gspca_dev, R26_GAIN_G2L, sd->gain);
+ reg_w2(gspca_dev, R20_GAIN_G1L, val);
+ reg_w2(gspca_dev, R22_GAIN_RL, val);
+ reg_w2(gspca_dev, R24_GAIN_BL, val);
+ reg_w2(gspca_dev, R26_GAIN_G2L, val);
}
/* -- start the camera -- */
@@ -289,9 +242,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
tv_8532_setReg(gspca_dev);
- setexposure(gspca_dev);
- setgain(gspca_dev);
-
/************************************************/
reg_w1(gspca_dev, R31_UPD, 0x01); /* update registers */
msleep(200);
@@ -339,49 +289,55 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
data + gspca_dev->width + 5, gspca_dev->width);
}
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
- sd->exposure = val;
- if (gspca_dev->streaming)
- setexposure(gspca_dev);
- return 0;
-}
+ gspca_dev->usb_err = 0;
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ if (!gspca_dev->streaming)
+ return 0;
- *val = sd->exposure;
- return 0;
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ setexposure(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_GAIN:
+ setgain(gspca_dev, ctrl->val);
+ break;
+ }
+ return gspca_dev->usb_err;
}
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->gain = val;
- if (gspca_dev->streaming)
- setgain(gspca_dev);
- return 0;
-}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->gain;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 2);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 0x18f, 1, 0x18f);
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 0x7ff, 1, 0x100);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
@@ -415,6 +371,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 208f6b2d512a..f21fd1677c38 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -33,18 +33,10 @@ MODULE_LICENSE("GPL");
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
-
- u8 brightness;
- u8 contrast;
- u8 colors;
- u8 hflip;
- u8 vflip;
- u8 lightfreq;
- s8 sharpness;
- u16 exposure;
- u8 gain;
- u8 autogain;
- u8 backlight;
+ struct { /* hvflip cluster */
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ };
u8 image_offset;
@@ -73,252 +65,6 @@ enum sensors {
NSENSORS
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setbacklight(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbacklight(struct gspca_dev *gspca_dev, __s32 *val);
-
-static const struct ctrl sd_ctrls[] = {
-#define BRIGHTNESS_IDX 0
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define BRIGHTNESS_DEF 128
- .default_value = BRIGHTNESS_DEF,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
-#define CONTRAST_IDX 1
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Contrast",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
-#define CONTRAST_DEF 127
- .default_value = CONTRAST_DEF,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
-#define COLORS_IDX 2
- {
- {
- .id = V4L2_CID_SATURATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Saturation",
- .minimum = 1,
- .maximum = 127,
- .step = 1,
-#define COLOR_DEF 63
- .default_value = COLOR_DEF,
- },
- .set = sd_setcolors,
- .get = sd_getcolors,
- },
-/* next 2 controls work with some sensors only */
-#define HFLIP_IDX 3
- {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define HFLIP_DEF 0
- .default_value = HFLIP_DEF,
- },
- .set = sd_sethflip,
- .get = sd_gethflip,
- },
-#define VFLIP_IDX 4
- {
- {
- .id = V4L2_CID_VFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Vflip",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define VFLIP_DEF 0
- .default_value = VFLIP_DEF,
- },
- .set = sd_setvflip,
- .get = sd_getvflip,
- },
-#define LIGHTFREQ_IDX 5
- {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .type = V4L2_CTRL_TYPE_MENU,
- .name = "Light frequency filter",
- .minimum = 0,
- .maximum = 2, /* 0: No, 1: 50Hz, 2:60Hz */
- .step = 1,
-#define FREQ_DEF 1
- .default_value = FREQ_DEF,
- },
- .set = sd_setfreq,
- .get = sd_getfreq,
- },
-#define SHARPNESS_IDX 6
- {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = -1,
- .maximum = 2,
- .step = 1,
-#define SHARPNESS_DEF -1
- .default_value = SHARPNESS_DEF,
- },
- .set = sd_setsharpness,
- .get = sd_getsharpness,
- },
-#define GAIN_IDX 7
- {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 78,
- .step = 1,
-#define GAIN_DEF 0
- .default_value = GAIN_DEF,
- },
- .set = sd_setgain,
- .get = sd_getgain,
- },
-#define EXPOSURE_IDX 8
- {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
-#define EXPOSURE_DEF 450
- .minimum = 0,
- .maximum = 4095,
- .step = 1,
- .default_value = EXPOSURE_DEF,
- },
- .set = sd_setexposure,
- .get = sd_getexposure,
- },
-#define AUTOGAIN_IDX 9
- {
- {
- .id = V4L2_CID_AUTOGAIN,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Automatic Gain and Exposure",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define AUTOGAIN_DEF 1
- .default_value = AUTOGAIN_DEF,
- },
- .set = sd_setautogain,
- .get = sd_getautogain,
- },
-#define BACKLIGHT_IDX 10
- {
- {
- .id = V4L2_CID_BACKLIGHT_COMPENSATION,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Backlight Compensation",
- .minimum = 0,
- .maximum = 15,
- .step = 1,
-#define BACKLIGHT_DEF 15
- .default_value = BACKLIGHT_DEF,
- },
- .set = sd_setbacklight,
- .get = sd_getbacklight,
- },
-};
-
-/* table of the disabled controls */
-static u32 ctrl_dis[NSENSORS] = {
- [SENSOR_HV7131R] =
- (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << LIGHTFREQ_IDX)
- | (1 << SHARPNESS_IDX)
- | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
- | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
- [SENSOR_MI0360] =
- (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << LIGHTFREQ_IDX)
- | (1 << SHARPNESS_IDX)
- | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
- | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
- [SENSOR_MI1310_SOC] =
- (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX)
- | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
- | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
- [SENSOR_MI1320] =
- (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX)
- | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
- | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
- [SENSOR_MI1320_SOC] =
- (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX)
- | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
- | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
- [SENSOR_OV7660] =
- (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << LIGHTFREQ_IDX) | (1 << SHARPNESS_IDX)
- | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
- | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
- [SENSOR_OV7670] =
- (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << SHARPNESS_IDX)
- | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
- | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
- [SENSOR_PO1200] =
- (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << LIGHTFREQ_IDX)
- | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
- | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
- [SENSOR_PO3130NC] =
- (1 << BRIGHTNESS_IDX) | (1 << CONTRAST_IDX) | (1 << COLORS_IDX)
- | (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << LIGHTFREQ_IDX)
- | (1 << SHARPNESS_IDX)
- | (1 << GAIN_IDX) | (1 << EXPOSURE_IDX)
- | (1 << AUTOGAIN_IDX) | (1 << BACKLIGHT_IDX),
- [SENSOR_POxxxx] =
- (1 << HFLIP_IDX) | (1 << VFLIP_IDX) | (1 << LIGHTFREQ_IDX),
-};
static const struct v4l2_pix_format vc0321_mode[] = {
{320, 240, V4L2_PIX_FMT_YVYU, V4L2_FIELD_NONE,
@@ -3405,18 +3151,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
(id->idProduct == 0x0892 || id->idProduct == 0x0896))
sd->sensor = SENSOR_POxxxx; /* no probe */
- sd->brightness = BRIGHTNESS_DEF;
- sd->contrast = CONTRAST_DEF;
- sd->colors = COLOR_DEF;
- sd->hflip = HFLIP_DEF;
- sd->vflip = VFLIP_DEF;
- sd->lightfreq = FREQ_DEF;
- sd->sharpness = SHARPNESS_DEF;
- sd->gain = GAIN_DEF;
- sd->exposure = EXPOSURE_DEF;
- sd->autogain = AUTOGAIN_DEF;
- sd->backlight = BACKLIGHT_DEF;
-
return 0;
}
@@ -3512,7 +3246,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
}
}
cam->npkt = npkt[sd->sensor];
- gspca_dev->ctrl_dis = ctrl_dis[sd->sensor];
if (sd->sensor == SENSOR_OV7670)
sd->flags |= FL_HFLIP | FL_VFLIP;
@@ -3534,14 +3267,11 @@ static int sd_init(struct gspca_dev *gspca_dev)
return gspca_dev->usb_err;
}
-static void setbrightness(struct gspca_dev *gspca_dev)
+static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 data;
- if (gspca_dev->ctrl_dis & (1 << BRIGHTNESS_IDX))
- return;
- data = sd->brightness;
+ data = val;
if (data >= 0x80)
data &= 0x7f;
else
@@ -3549,36 +3279,27 @@ static void setbrightness(struct gspca_dev *gspca_dev)
i2c_write(gspca_dev, 0x98, &data, 1);
}
-static void setcontrast(struct gspca_dev *gspca_dev)
+static void setcontrast(struct gspca_dev *gspca_dev, u8 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (gspca_dev->ctrl_dis & (1 << CONTRAST_IDX))
- return;
- i2c_write(gspca_dev, 0x99, &sd->contrast, 1);
+ i2c_write(gspca_dev, 0x99, &val, 1);
}
-static void setcolors(struct gspca_dev *gspca_dev)
+static void setcolors(struct gspca_dev *gspca_dev, u8 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 data;
- if (gspca_dev->ctrl_dis & (1 << COLORS_IDX))
- return;
- data = sd->colors - (sd->colors >> 3) - 1;
+ data = val - (val >> 3) - 1;
i2c_write(gspca_dev, 0x94, &data, 1);
- i2c_write(gspca_dev, 0x95, &sd->colors, 1);
+ i2c_write(gspca_dev, 0x95, &val, 1);
}
-static void sethvflip(struct gspca_dev *gspca_dev)
+static void sethvflip(struct gspca_dev *gspca_dev, bool hflip, bool vflip)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 data[2], hflip, vflip;
+ u8 data[2];
- hflip = sd->hflip;
if (sd->flags & FL_HFLIP)
hflip = !hflip;
- vflip = sd->vflip;
if (sd->flags & FL_VFLIP)
vflip = !vflip;
switch (sd->sensor) {
@@ -3610,7 +3331,7 @@ static void sethvflip(struct gspca_dev *gspca_dev)
}
}
-static void setlightfreq(struct gspca_dev *gspca_dev)
+static void setlightfreq(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
static const u8 (*ov7660_freq_tb[3])[4] =
@@ -3618,10 +3339,10 @@ static void setlightfreq(struct gspca_dev *gspca_dev)
if (sd->sensor != SENSOR_OV7660)
return;
- usb_exchange(gspca_dev, ov7660_freq_tb[sd->lightfreq]);
+ usb_exchange(gspca_dev, ov7660_freq_tb[val]);
}
-static void setsharpness(struct gspca_dev *gspca_dev)
+static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 data;
@@ -3630,51 +3351,41 @@ static void setsharpness(struct gspca_dev *gspca_dev)
case SENSOR_PO1200:
data = 0;
i2c_write(gspca_dev, 0x03, &data, 1);
- if (sd->sharpness < 0)
+ if (val < 0)
data = 0x6a;
else
- data = 0xb5 + sd->sharpness * 3;
+ data = 0xb5 + val * 3;
i2c_write(gspca_dev, 0x61, &data, 1);
break;
case SENSOR_POxxxx:
- if (sd->sharpness < 0)
+ if (val < 0)
data = 0x7e; /* def = max */
else
- data = 0x60 + sd->sharpness * 0x0f;
+ data = 0x60 + val * 0x0f;
i2c_write(gspca_dev, 0x59, &data, 1);
break;
}
}
-static void setgain(struct gspca_dev *gspca_dev)
+static void setgain(struct gspca_dev *gspca_dev, u8 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- if (gspca_dev->ctrl_dis & (1 << GAIN_IDX))
- return;
- i2c_write(gspca_dev, 0x15, &sd->gain, 1);
+ i2c_write(gspca_dev, 0x15, &val, 1);
}
-static void setexposure(struct gspca_dev *gspca_dev)
+static void setexposure(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u8 data;
- if (gspca_dev->ctrl_dis & (1 << EXPOSURE_IDX))
- return;
- data = sd->exposure >> 8;
+ data = val >> 8;
i2c_write(gspca_dev, 0x1a, &data, 1);
- data = sd->exposure;
+ data = val;
i2c_write(gspca_dev, 0x1b, &data, 1);
}
-static void setautogain(struct gspca_dev *gspca_dev)
+static void setautogain(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
static const u8 data[2] = {0x28, 0x3c};
- if (gspca_dev->ctrl_dis & (1 << AUTOGAIN_IDX))
- return;
- i2c_write(gspca_dev, 0xd1, &data[sd->autogain], 1);
+ i2c_write(gspca_dev, 0xd1, &data[val], 1);
}
static void setgamma(struct gspca_dev *gspca_dev)
@@ -3683,30 +3394,29 @@ static void setgamma(struct gspca_dev *gspca_dev)
usb_exchange(gspca_dev, poxxxx_gamma);
}
-static void setbacklight(struct gspca_dev *gspca_dev)
+static void setbacklight(struct gspca_dev *gspca_dev, s32 val)
{
- struct sd *sd = (struct sd *) gspca_dev;
u16 v;
u8 data;
- data = (sd->backlight << 4) | 0x0f;
+ data = (val << 4) | 0x0f;
i2c_write(gspca_dev, 0xaa, &data, 1);
- v = 613 + 12 * sd->backlight;
+ v = 613 + 12 * val;
data = v >> 8;
i2c_write(gspca_dev, 0xc4, &data, 1);
data = v;
i2c_write(gspca_dev, 0xc5, &data, 1);
- v = 1093 - 12 * sd->backlight;
+ v = 1093 - 12 * val;
data = v >> 8;
i2c_write(gspca_dev, 0xc6, &data, 1);
data = v;
i2c_write(gspca_dev, 0xc7, &data, 1);
- v = 342 + 9 * sd->backlight;
+ v = 342 + 9 * val;
data = v >> 8;
i2c_write(gspca_dev, 0xc8, &data, 1);
data = v;
i2c_write(gspca_dev, 0xc9, &data, 1);
- v = 702 - 9 * sd->backlight;
+ v = 702 - 9 * val;
data = v >> 8;
i2c_write(gspca_dev, 0xca, &data, 1);
data = v;
@@ -3833,14 +3543,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
/* case SENSOR_POxxxx: */
usb_exchange(gspca_dev, poxxxx_init_common);
setgamma(gspca_dev);
- setbacklight(gspca_dev);
- setbrightness(gspca_dev);
- setcontrast(gspca_dev);
- setcolors(gspca_dev);
- setsharpness(gspca_dev);
- setautogain(gspca_dev);
- setexposure(gspca_dev);
- setgain(gspca_dev);
usb_exchange(gspca_dev, poxxxx_init_start_3);
if (mode)
init = poxxxx_initQVGA;
@@ -3873,8 +3575,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
}
msleep(100);
- sethvflip(gspca_dev);
- setlightfreq(gspca_dev);
}
switch (sd->sensor) {
case SENSOR_OV7670:
@@ -3960,233 +3660,152 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming)
- setbrightness(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming)
- setcontrast(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
- return 0;
-}
-
-static int sd_setcolors(struct gspca_dev *gspca_dev, __s32 val)
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->colors = val;
- if (gspca_dev->streaming)
- setcolors(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getcolors(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->colors;
- return 0;
-}
-
-static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->hflip = val;
- if (gspca_dev->streaming)
- sethvflip(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->hflip;
- return 0;
-}
-
-static int sd_setvflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->vflip = val;
- if (gspca_dev->streaming)
- sethvflip(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getvflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
- *val = sd->vflip;
- return 0;
-}
+ gspca_dev->usb_err = 0;
-static int sd_setfreq(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->lightfreq = val;
- if (gspca_dev->streaming)
- setlightfreq(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getfreq(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->lightfreq;
- return 0;
-}
-
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->sharpness = val;
- if (gspca_dev->streaming)
- setsharpness(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->sharpness;
- return 0;
-}
-
-static int sd_setgain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->gain = val;
- if (gspca_dev->streaming)
- setgain(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getgain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->gain;
- return 0;
-}
-
-static int sd_setexposure(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->exposure = val;
- if (gspca_dev->streaming)
- setexposure(gspca_dev);
- return gspca_dev->usb_err;
-}
-
-static int sd_getexposure(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->exposure;
- return 0;
-}
-
-static int sd_setautogain(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->autogain = val;
- if (gspca_dev->streaming)
- setautogain(gspca_dev);
+ if (!gspca_dev->streaming && ctrl->id != V4L2_CID_POWER_LINE_FREQUENCY)
+ return 0;
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ setbrightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ setcontrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ setcolors(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ sethvflip(gspca_dev, sd->hflip->val, sd->vflip->val);
+ break;
+ case V4L2_CID_SHARPNESS:
+ setsharpness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_AUTOGAIN:
+ setautogain(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_GAIN:
+ setgain(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ setexposure(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_BACKLIGHT_COMPENSATION:
+ setbacklight(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_POWER_LINE_FREQUENCY:
+ setlightfreq(gspca_dev, ctrl->val);
+ break;
+ }
return gspca_dev->usb_err;
}
-static int sd_getautogain(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->autogain;
- return 0;
-}
-
-static int sd_setbacklight(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->backlight = val;
- if (gspca_dev->streaming)
- setbacklight(gspca_dev);
-
- return gspca_dev->usb_err;
-}
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
-static int sd_getbacklight(struct gspca_dev *gspca_dev, __s32 *val)
+static int sd_init_controls(struct gspca_dev *gspca_dev)
{
- struct sd *sd = (struct sd *) gspca_dev;
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+ bool has_brightness = false;
+ bool has_contrast = false;
+ bool has_sat = false;
+ bool has_hvflip = false;
+ bool has_freq = false;
+ bool has_backlight = false;
+ bool has_exposure = false;
+ bool has_autogain = false;
+ bool has_gain = false;
+ bool has_sharpness = false;
- *val = sd->backlight;
- return 0;
-}
-
-static int sd_querymenu(struct gspca_dev *gspca_dev,
- struct v4l2_querymenu *menu)
-{
- static const char *freq_nm[3] = {"NoFliker", "50 Hz", "60 Hz"};
+ switch (sd->sensor) {
+ case SENSOR_HV7131R:
+ case SENSOR_MI0360:
+ case SENSOR_PO3130NC:
+ break;
+ case SENSOR_MI1310_SOC:
+ case SENSOR_MI1320:
+ case SENSOR_MI1320_SOC:
+ case SENSOR_OV7660:
+ has_hvflip = true;
+ break;
+ case SENSOR_OV7670:
+ has_hvflip = has_freq = true;
+ break;
+ case SENSOR_PO1200:
+ has_hvflip = has_sharpness = true;
+ break;
+ case SENSOR_POxxxx:
+ has_brightness = has_contrast = has_sat = has_backlight =
+ has_exposure = has_autogain = has_gain =
+ has_sharpness = true;
+ break;
+ }
- switch (menu->id) {
- case V4L2_CID_POWER_LINE_FREQUENCY:
- if (menu->index >= ARRAY_SIZE(freq_nm))
- break;
- strcpy((char *) menu->name, freq_nm[menu->index]);
- return 0;
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 8);
+ if (has_brightness)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
+ if (has_contrast)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 127);
+ if (has_sat)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SATURATION, 1, 127, 1, 63);
+ if (has_hvflip) {
+ sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
}
- return -EINVAL;
+ if (has_sharpness)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SHARPNESS, -1, 2, 1, -1);
+ if (has_freq)
+ v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_POWER_LINE_FREQUENCY,
+ V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0,
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
+ if (has_autogain)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ if (has_gain)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_GAIN, 0, 78, 1, 0);
+ if (has_exposure)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_EXPOSURE, 0, 4095, 1, 450);
+ if (has_backlight)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BACKLIGHT_COMPENSATION, 0, 15, 1, 15);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ if (sd->hflip)
+ v4l2_ctrl_cluster(2, &sd->hflip);
+ return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
+ .init_controls = sd_init_controls,
.config = sd_config,
.init = sd_init,
.start = sd_start,
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
- .querymenu = sd_querymenu,
};
/* -- module initialisation -- */
@@ -4227,6 +3846,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/vicam.c b/drivers/media/video/gspca/vicam.c
index 15a30f7a4b2a..b1a64b912666 100644
--- a/drivers/media/video/gspca/vicam.c
+++ b/drivers/media/video/gspca/vicam.c
@@ -44,17 +44,10 @@ MODULE_DESCRIPTION("GSPCA ViCam USB Camera Driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(VICAM_FIRMWARE);
-enum e_ctrl {
- GAIN,
- EXPOSURE,
- NCTRL /* number of controls */
-};
-
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
struct work_struct work_struct;
struct workqueue_struct *work_thread;
- struct gspca_ctrl ctrls[NCTRL];
};
/* The vicam sensor has a resolution of 512 x 244, with I believe square
@@ -86,31 +79,6 @@ static struct v4l2_pix_format vicam_mode[] = {
.colorspace = V4L2_COLORSPACE_SRGB,},
};
-static const struct ctrl sd_ctrls[] = {
-[GAIN] = {
- {
- .id = V4L2_CID_GAIN,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Gain",
- .minimum = 0,
- .maximum = 255,
- .step = 1,
- .default_value = 200,
- },
- },
-[EXPOSURE] = {
- {
- .id = V4L2_CID_EXPOSURE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Exposure",
- .minimum = 0,
- .maximum = 2047,
- .step = 1,
- .default_value = 256,
- },
- },
-};
-
static int vicam_control_msg(struct gspca_dev *gspca_dev, u8 request,
u16 value, u16 index, u8 *data, u16 len)
{
@@ -146,12 +114,13 @@ static int vicam_set_camera_power(struct gspca_dev *gspca_dev, int state)
*/
static int vicam_read_frame(struct gspca_dev *gspca_dev, u8 *data, int size)
{
- struct sd *sd = (struct sd *)gspca_dev;
int ret, unscaled_height, act_len = 0;
u8 *req_data = gspca_dev->usb_buf;
+ s32 expo = v4l2_ctrl_g_ctrl(gspca_dev->exposure);
+ s32 gain = v4l2_ctrl_g_ctrl(gspca_dev->gain);
memset(req_data, 0, 16);
- req_data[0] = sd->ctrls[GAIN].val;
+ req_data[0] = gain;
if (gspca_dev->width == 256)
req_data[1] |= 0x01; /* low nibble x-scale */
if (gspca_dev->height <= 122) {
@@ -167,9 +136,9 @@ static int vicam_read_frame(struct gspca_dev *gspca_dev, u8 *data, int size)
else /* Up to 244 lines with req_data[3] == 0x08 */
req_data[3] = 0x08; /* vend? */
- if (sd->ctrls[EXPOSURE].val < 256) {
+ if (expo < 256) {
/* Frame rate maxed out, use partial frame expo time */
- req_data[4] = 255 - sd->ctrls[EXPOSURE].val;
+ req_data[4] = 255 - expo;
req_data[5] = 0x00;
req_data[6] = 0x00;
req_data[7] = 0x01;
@@ -177,8 +146,8 @@ static int vicam_read_frame(struct gspca_dev *gspca_dev, u8 *data, int size)
/* Modify frame rate */
req_data[4] = 0x00;
req_data[5] = 0x00;
- req_data[6] = sd->ctrls[EXPOSURE].val & 0xFF;
- req_data[7] = sd->ctrls[EXPOSURE].val >> 8;
+ req_data[6] = expo & 0xFF;
+ req_data[7] = expo >> 8;
}
req_data[8] = ((244 - unscaled_height) / 2) & ~0x01; /* vstart */
/* bytes 9-15 do not seem to affect exposure or image quality */
@@ -260,7 +229,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->bulk_size = 64;
cam->cam_mode = vicam_mode;
cam->nmodes = ARRAY_SIZE(vicam_mode);
- cam->ctrls = sd->ctrls;
INIT_WORK(&sd->work_struct, vicam_dostream);
@@ -335,6 +303,24 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
vicam_set_camera_power(gspca_dev, 0);
}
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 2);
+ gspca_dev->exposure = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_EXPOSURE, 0, 2047, 1, 256);
+ gspca_dev->gain = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_GAIN, 0, 255, 1, 200);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ return 0;
+}
+
/* Table of supported USB devices */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x04c1, 0x009d)},
@@ -347,10 +333,9 @@ MODULE_DEVICE_TABLE(usb, device_table);
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stop0 = sd_stop0,
};
@@ -373,6 +358,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/gspca/w996Xcf.c b/drivers/media/video/gspca/w996Xcf.c
index 27d2cef0692a..9e3a909e0a00 100644
--- a/drivers/media/video/gspca/w996Xcf.c
+++ b/drivers/media/video/gspca/w996Xcf.c
@@ -404,9 +404,14 @@ static void w9968cf_set_crop_window(struct sd *sd)
}
if (sd->sensor == SEN_OV7620) {
- /* Sigh, this is dependend on the clock / framerate changes
- made by the frequency control, sick. */
- if (sd->ctrls[FREQ].val == 1) {
+ /*
+ * Sigh, this is dependend on the clock / framerate changes
+ * made by the frequency control, sick.
+ *
+ * Note we cannot use v4l2_ctrl_g_ctrl here, as we get called
+ * from ov519.c:setfreq() with the ctrl lock held!
+ */
+ if (sd->freq->val == 1) {
start_cropx = 277;
start_cropy = 37;
} else {
@@ -474,8 +479,9 @@ static void w9968cf_mode_init_regs(struct sd *sd)
/* We may get called multiple times (usb isoc bw negotiat.) */
jpeg_define(sd->jpeg_hdr, sd->gspca_dev.height,
sd->gspca_dev.width, 0x22); /* JPEG 420 */
- jpeg_set_qual(sd->jpeg_hdr, sd->quality);
+ jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual));
w9968cf_upload_quantizationtables(sd);
+ v4l2_ctrl_grab(sd->jpegqual, true);
}
/* Video Capture Control Register */
@@ -514,6 +520,7 @@ static void w9968cf_mode_init_regs(struct sd *sd)
static void w9968cf_stop0(struct sd *sd)
{
+ v4l2_ctrl_grab(sd->jpegqual, false);
reg_w(sd, 0x39, 0x0000); /* disable JPEG encoder */
reg_w(sd, 0x16, 0x0000); /* stop video capture */
}
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
index ecada178bceb..13b8d395d210 100644
--- a/drivers/media/video/gspca/xirlink_cit.c
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -53,6 +53,7 @@ MODULE_PARM_DESC(rca_input,
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
+ struct v4l2_ctrl *lighting;
u8 model;
#define CIT_MODEL0 0 /* bcd version 0.01 cams ie the xvp-500 */
#define CIT_MODEL1 1 /* The model 1 - 4 nomenclature comes from the old */
@@ -65,127 +66,10 @@ struct sd {
u8 stop_on_control_change;
u8 sof_read;
u8 sof_len;
- u8 contrast;
- u8 brightness;
- u8 hue;
- u8 sharpness;
- u8 lighting;
- u8 hflip;
};
-/* V4L2 controls supported by the driver */
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_setlighting(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_getlighting(struct gspca_dev *gspca_dev, __s32 *val);
-static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val);
-static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val);
static void sd_stop0(struct gspca_dev *gspca_dev);
-static const struct ctrl sd_ctrls[] = {
-#define SD_BRIGHTNESS 0
- {
- {
- .id = V4L2_CID_BRIGHTNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Brightness",
- .minimum = 0,
- .maximum = 63,
- .step = 1,
-#define BRIGHTNESS_DEFAULT 32
- .default_value = BRIGHTNESS_DEFAULT,
- .flags = 0,
- },
- .set = sd_setbrightness,
- .get = sd_getbrightness,
- },
-#define SD_CONTRAST 1
- {
- {
- .id = V4L2_CID_CONTRAST,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "contrast",
- .minimum = 0,
- .maximum = 20,
- .step = 1,
-#define CONTRAST_DEFAULT 10
- .default_value = CONTRAST_DEFAULT,
- .flags = 0,
- },
- .set = sd_setcontrast,
- .get = sd_getcontrast,
- },
-#define SD_HUE 2
- {
- {
- .id = V4L2_CID_HUE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Hue",
- .minimum = 0,
- .maximum = 127,
- .step = 1,
-#define HUE_DEFAULT 63
- .default_value = HUE_DEFAULT,
- .flags = 0,
- },
- .set = sd_sethue,
- .get = sd_gethue,
- },
-#define SD_SHARPNESS 3
- {
- {
- .id = V4L2_CID_SHARPNESS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Sharpness",
- .minimum = 0,
- .maximum = 6,
- .step = 1,
-#define SHARPNESS_DEFAULT 3
- .default_value = SHARPNESS_DEFAULT,
- .flags = 0,
- },
- .set = sd_setsharpness,
- .get = sd_getsharpness,
- },
-#define SD_LIGHTING 4
- {
- {
- .id = V4L2_CID_BACKLIGHT_COMPENSATION,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Lighting",
- .minimum = 0,
- .maximum = 2,
- .step = 1,
-#define LIGHTING_DEFAULT 1
- .default_value = LIGHTING_DEFAULT,
- .flags = 0,
- },
- .set = sd_setlighting,
- .get = sd_getlighting,
- },
-#define SD_HFLIP 5
- {
- {
- .id = V4L2_CID_HFLIP,
- .type = V4L2_CTRL_TYPE_BOOLEAN,
- .name = "Mirror",
- .minimum = 0,
- .maximum = 1,
- .step = 1,
-#define HFLIP_DEFAULT 0
- .default_value = HFLIP_DEFAULT,
- },
- .set = sd_sethflip,
- .get = sd_gethflip,
- },
-};
-
static const struct v4l2_pix_format cif_yuv_mode[] = {
{176, 144, V4L2_PIX_FMT_CIT_YYVYUY, V4L2_FIELD_NONE,
.bytesperline = 176,
@@ -995,56 +879,36 @@ static int sd_config(struct gspca_dev *gspca_dev,
case CIT_MODEL0:
cam->cam_mode = model0_mode;
cam->nmodes = ARRAY_SIZE(model0_mode);
- gspca_dev->ctrl_dis = ~((1 << SD_CONTRAST) | (1 << SD_HFLIP));
sd->sof_len = 4;
break;
case CIT_MODEL1:
cam->cam_mode = cif_yuv_mode;
cam->nmodes = ARRAY_SIZE(cif_yuv_mode);
- gspca_dev->ctrl_dis = (1 << SD_HUE) | (1 << SD_HFLIP);
sd->sof_len = 4;
break;
case CIT_MODEL2:
cam->cam_mode = model2_mode + 1; /* no 160x120 */
cam->nmodes = 3;
- gspca_dev->ctrl_dis = (1 << SD_CONTRAST) |
- (1 << SD_SHARPNESS) |
- (1 << SD_HFLIP);
break;
case CIT_MODEL3:
cam->cam_mode = vga_yuv_mode;
cam->nmodes = ARRAY_SIZE(vga_yuv_mode);
- gspca_dev->ctrl_dis = (1 << SD_HUE) |
- (1 << SD_LIGHTING) |
- (1 << SD_HFLIP);
sd->stop_on_control_change = 1;
sd->sof_len = 4;
break;
case CIT_MODEL4:
cam->cam_mode = model2_mode;
cam->nmodes = ARRAY_SIZE(model2_mode);
- gspca_dev->ctrl_dis = (1 << SD_CONTRAST) |
- (1 << SD_SHARPNESS) |
- (1 << SD_LIGHTING) |
- (1 << SD_HFLIP);
break;
case CIT_IBM_NETCAM_PRO:
cam->cam_mode = vga_yuv_mode;
cam->nmodes = 2; /* no 640 x 480 */
cam->input_flags = V4L2_IN_ST_VFLIP;
- gspca_dev->ctrl_dis = ~(1 << SD_CONTRAST);
sd->stop_on_control_change = 1;
sd->sof_len = 4;
break;
}
- sd->brightness = BRIGHTNESS_DEFAULT;
- sd->contrast = CONTRAST_DEFAULT;
- sd->hue = HUE_DEFAULT;
- sd->sharpness = SHARPNESS_DEFAULT;
- sd->lighting = LIGHTING_DEFAULT;
- sd->hflip = HFLIP_DEFAULT;
-
return 0;
}
@@ -1287,7 +1151,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
return 0;
}
-static int cit_set_brightness(struct gspca_dev *gspca_dev)
+static int cit_set_brightness(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
int i;
@@ -1299,19 +1163,19 @@ static int cit_set_brightness(struct gspca_dev *gspca_dev)
break;
case CIT_MODEL1:
/* Model 1: Brightness range 0 - 63 */
- cit_Packet_Format1(gspca_dev, 0x0031, sd->brightness);
- cit_Packet_Format1(gspca_dev, 0x0032, sd->brightness);
- cit_Packet_Format1(gspca_dev, 0x0033, sd->brightness);
+ cit_Packet_Format1(gspca_dev, 0x0031, val);
+ cit_Packet_Format1(gspca_dev, 0x0032, val);
+ cit_Packet_Format1(gspca_dev, 0x0033, val);
break;
case CIT_MODEL2:
/* Model 2: Brightness range 0x60 - 0xee */
/* Scale 0 - 63 to 0x60 - 0xee */
- i = 0x60 + sd->brightness * 2254 / 1000;
+ i = 0x60 + val * 2254 / 1000;
cit_model2_Packet1(gspca_dev, 0x001a, i);
break;
case CIT_MODEL3:
/* Model 3: Brightness range 'i' in [0x0C..0x3F] */
- i = sd->brightness;
+ i = val;
if (i < 0x0c)
i = 0x0c;
cit_model3_Packet1(gspca_dev, 0x0036, i);
@@ -1319,7 +1183,7 @@ static int cit_set_brightness(struct gspca_dev *gspca_dev)
case CIT_MODEL4:
/* Model 4: Brightness range 'i' in [0x04..0xb4] */
/* Scale 0 - 63 to 0x04 - 0xb4 */
- i = 0x04 + sd->brightness * 2794 / 1000;
+ i = 0x04 + val * 2794 / 1000;
cit_model4_BrightnessPacket(gspca_dev, i);
break;
}
@@ -1327,7 +1191,7 @@ static int cit_set_brightness(struct gspca_dev *gspca_dev)
return 0;
}
-static int cit_set_contrast(struct gspca_dev *gspca_dev)
+static int cit_set_contrast(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1335,16 +1199,16 @@ static int cit_set_contrast(struct gspca_dev *gspca_dev)
case CIT_MODEL0: {
int i;
/* gain 0-15, 0-20 -> 0-15 */
- i = sd->contrast * 1000 / 1333;
+ i = val * 1000 / 1333;
cit_write_reg(gspca_dev, i, 0x0422);
/* gain 0-31, may not be lower then 0x0422, 0-20 -> 0-31 */
- i = sd->contrast * 2000 / 1333;
+ i = val * 2000 / 1333;
cit_write_reg(gspca_dev, i, 0x0423);
/* gain 0-127, may not be lower then 0x0423, 0-20 -> 0-63 */
- i = sd->contrast * 4000 / 1333;
+ i = val * 4000 / 1333;
cit_write_reg(gspca_dev, i, 0x0424);
/* gain 0-127, may not be lower then 0x0424, , 0-20 -> 0-127 */
- i = sd->contrast * 8000 / 1333;
+ i = val * 8000 / 1333;
cit_write_reg(gspca_dev, i, 0x0425);
break;
}
@@ -1355,7 +1219,7 @@ static int cit_set_contrast(struct gspca_dev *gspca_dev)
case CIT_MODEL1:
{
/* Scale 0 - 20 to 15 - 0 */
- int i, new_contrast = (20 - sd->contrast) * 1000 / 1333;
+ int i, new_contrast = (20 - val) * 1000 / 1333;
for (i = 0; i < cit_model1_ntries; i++) {
cit_Packet_Format1(gspca_dev, 0x0014, new_contrast);
cit_send_FF_04_02(gspca_dev);
@@ -1377,20 +1241,20 @@ static int cit_set_contrast(struct gspca_dev *gspca_dev)
{ 0x01, 0x0e, 0x16 },
{ 0x01, 0x10, 0x16 } /* Maximum */
};
- int i = sd->contrast / 3;
+ int i = val / 3;
cit_model3_Packet1(gspca_dev, 0x0067, cv[i].cv1);
cit_model3_Packet1(gspca_dev, 0x005b, cv[i].cv2);
cit_model3_Packet1(gspca_dev, 0x005c, cv[i].cv3);
break;
}
case CIT_IBM_NETCAM_PRO:
- cit_model3_Packet1(gspca_dev, 0x005b, sd->contrast + 1);
+ cit_model3_Packet1(gspca_dev, 0x005b, val + 1);
break;
}
return 0;
}
-static int cit_set_hue(struct gspca_dev *gspca_dev)
+static int cit_set_hue(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1401,7 +1265,7 @@ static int cit_set_hue(struct gspca_dev *gspca_dev)
/* No hue control for these models */
break;
case CIT_MODEL2:
- cit_model2_Packet1(gspca_dev, 0x0024, sd->hue);
+ cit_model2_Packet1(gspca_dev, 0x0024, val);
/* cit_model2_Packet1(gspca_dev, 0x0020, sat); */
break;
case CIT_MODEL3: {
@@ -1409,7 +1273,7 @@ static int cit_set_hue(struct gspca_dev *gspca_dev)
/* TESTME according to the ibmcam driver this does not work */
if (0) {
/* Scale 0 - 127 to 0x05 - 0x37 */
- int i = 0x05 + sd->hue * 1000 / 2540;
+ int i = 0x05 + val * 1000 / 2540;
cit_model3_Packet1(gspca_dev, 0x007e, i);
}
break;
@@ -1435,14 +1299,14 @@ static int cit_set_hue(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 160, 0x012e); /* Red gain */
cit_write_reg(gspca_dev, 160, 0x0130); /* Blue gain */
cit_write_reg(gspca_dev, 0x8a28, 0x0124);
- cit_write_reg(gspca_dev, sd->hue, 0x012d); /* Hue */
+ cit_write_reg(gspca_dev, val, 0x012d); /* Hue */
cit_write_reg(gspca_dev, 0xf545, 0x0124);
break;
}
return 0;
}
-static int cit_set_sharpness(struct gspca_dev *gspca_dev)
+static int cit_set_sharpness(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1459,7 +1323,7 @@ static int cit_set_sharpness(struct gspca_dev *gspca_dev)
0x11, 0x13, 0x16, 0x18, 0x1a, 0x8, 0x0a };
for (i = 0; i < cit_model1_ntries; i++)
- cit_PacketFormat2(gspca_dev, 0x0013, sa[sd->sharpness]);
+ cit_PacketFormat2(gspca_dev, 0x0013, sa[val]);
break;
}
case CIT_MODEL3:
@@ -1482,10 +1346,10 @@ static int cit_set_sharpness(struct gspca_dev *gspca_dev)
{ 0x03, 0x06, 0x05, 0x14 },
{ 0x03, 0x07, 0x05, 0x14 } /* Sharpest */
};
- cit_model3_Packet1(gspca_dev, 0x0060, sv[sd->sharpness].sv1);
- cit_model3_Packet1(gspca_dev, 0x0061, sv[sd->sharpness].sv2);
- cit_model3_Packet1(gspca_dev, 0x0062, sv[sd->sharpness].sv3);
- cit_model3_Packet1(gspca_dev, 0x0063, sv[sd->sharpness].sv4);
+ cit_model3_Packet1(gspca_dev, 0x0060, sv[val].sv1);
+ cit_model3_Packet1(gspca_dev, 0x0061, sv[val].sv2);
+ cit_model3_Packet1(gspca_dev, 0x0062, sv[val].sv3);
+ cit_model3_Packet1(gspca_dev, 0x0063, sv[val].sv4);
break;
}
}
@@ -1510,7 +1374,7 @@ static int cit_set_sharpness(struct gspca_dev *gspca_dev)
* 1/5/00 Created.
* 2/20/00 Added support for Model 2 cameras.
*/
-static void cit_set_lighting(struct gspca_dev *gspca_dev)
+static void cit_set_lighting(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1524,19 +1388,19 @@ static void cit_set_lighting(struct gspca_dev *gspca_dev)
case CIT_MODEL1: {
int i;
for (i = 0; i < cit_model1_ntries; i++)
- cit_Packet_Format1(gspca_dev, 0x0027, sd->lighting);
+ cit_Packet_Format1(gspca_dev, 0x0027, val);
break;
}
}
}
-static void cit_set_hflip(struct gspca_dev *gspca_dev)
+static void cit_set_hflip(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
switch (sd->model) {
case CIT_MODEL0:
- if (sd->hflip)
+ if (val)
cit_write_reg(gspca_dev, 0x0020, 0x0115);
else
cit_write_reg(gspca_dev, 0x0040, 0x0115);
@@ -1831,7 +1695,8 @@ static int cit_start_model1(struct gspca_dev *gspca_dev)
cit_PacketFormat2(gspca_dev, 0x13, 0x1a);
/* Default lighting conditions */
- cit_Packet_Format1(gspca_dev, 0x0027, sd->lighting);
+ cit_Packet_Format1(gspca_dev, 0x0027,
+ v4l2_ctrl_g_ctrl(sd->lighting));
}
/* Assorted init */
@@ -2049,9 +1914,10 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
break;
}
- /* FIXME this cannot be changed while streaming, so we
- should report a grabbed flag for this control. */
- cit_model2_Packet1(gspca_dev, 0x0028, sd->lighting);
+ cit_model2_Packet1(gspca_dev, 0x0028, v4l2_ctrl_g_ctrl(sd->lighting));
+ /* model2 cannot change the backlight compensation while streaming */
+ v4l2_ctrl_grab(sd->lighting, true);
+
/* color balance rg2 */
cit_model2_Packet1(gspca_dev, 0x001e, 0x002f);
/* saturation */
@@ -2755,13 +2621,6 @@ static int sd_start(struct gspca_dev *gspca_dev)
break;
}
- cit_set_brightness(gspca_dev);
- cit_set_contrast(gspca_dev);
- cit_set_hue(gspca_dev);
- cit_set_sharpness(gspca_dev);
- cit_set_lighting(gspca_dev);
- cit_set_hflip(gspca_dev);
-
/* Program max isoc packet size */
cit_write_reg(gspca_dev, packet_size >> 8, 0x0106);
cit_write_reg(gspca_dev, packet_size & 0xff, 0x0107);
@@ -2857,6 +2716,8 @@ static void sd_stop0(struct gspca_dev *gspca_dev)
cit_write_reg(gspca_dev, 0x81, 0x0100); /* LED Off */
break;
case CIT_MODEL2:
+ v4l2_ctrl_grab(sd->lighting, false);
+ /* Fall through! */
case CIT_MODEL4:
cit_model2_Packet1(gspca_dev, 0x0030, 0x0004);
@@ -3055,152 +2916,6 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
-static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->brightness = val;
- if (gspca_dev->streaming) {
- if (sd->stop_on_control_change)
- sd_stopN(gspca_dev);
- cit_set_brightness(gspca_dev);
- if (sd->stop_on_control_change)
- cit_restart_stream(gspca_dev);
- }
-
- return 0;
-}
-
-static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->brightness;
-
- return 0;
-}
-
-static int sd_setcontrast(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->contrast = val;
- if (gspca_dev->streaming) {
- if (sd->stop_on_control_change)
- sd_stopN(gspca_dev);
- cit_set_contrast(gspca_dev);
- if (sd->stop_on_control_change)
- cit_restart_stream(gspca_dev);
- }
-
- return 0;
-}
-
-static int sd_getcontrast(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->contrast;
-
- return 0;
-}
-
-static int sd_sethue(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->hue = val;
- if (gspca_dev->streaming) {
- if (sd->stop_on_control_change)
- sd_stopN(gspca_dev);
- cit_set_hue(gspca_dev);
- if (sd->stop_on_control_change)
- cit_restart_stream(gspca_dev);
- }
- return 0;
-}
-
-static int sd_gethue(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->hue;
-
- return 0;
-}
-
-static int sd_setsharpness(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->sharpness = val;
- if (gspca_dev->streaming) {
- if (sd->stop_on_control_change)
- sd_stopN(gspca_dev);
- cit_set_sharpness(gspca_dev);
- if (sd->stop_on_control_change)
- cit_restart_stream(gspca_dev);
- }
- return 0;
-}
-
-static int sd_getsharpness(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->sharpness;
-
- return 0;
-}
-
-static int sd_setlighting(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->lighting = val;
- if (gspca_dev->streaming) {
- if (sd->stop_on_control_change)
- sd_stopN(gspca_dev);
- cit_set_lighting(gspca_dev);
- if (sd->stop_on_control_change)
- cit_restart_stream(gspca_dev);
- }
- return 0;
-}
-
-static int sd_getlighting(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->lighting;
-
- return 0;
-}
-
-static int sd_sethflip(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->hflip = val;
- if (gspca_dev->streaming) {
- if (sd->stop_on_control_change)
- sd_stopN(gspca_dev);
- cit_set_hflip(gspca_dev);
- if (sd->stop_on_control_change)
- cit_restart_stream(gspca_dev);
- }
- return 0;
-}
-
-static int sd_gethflip(struct gspca_dev *gspca_dev, __s32 *val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- *val = sd->hflip;
-
- return 0;
-}
-
#if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
static void cit_check_button(struct gspca_dev *gspca_dev)
{
@@ -3234,13 +2949,117 @@ static void cit_check_button(struct gspca_dev *gspca_dev)
}
#endif
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+ struct sd *sd = (struct sd *)gspca_dev;
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ if (sd->stop_on_control_change)
+ sd_stopN(gspca_dev);
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ cit_set_brightness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ cit_set_contrast(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_HUE:
+ cit_set_hue(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ cit_set_hflip(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_SHARPNESS:
+ cit_set_sharpness(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_BACKLIGHT_COMPENSATION:
+ cit_set_lighting(gspca_dev, ctrl->val);
+ break;
+ }
+ if (sd->stop_on_control_change)
+ cit_restart_stream(gspca_dev);
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *)gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+ bool has_brightness;
+ bool has_contrast;
+ bool has_hue;
+ bool has_sharpness;
+ bool has_lighting;
+ bool has_hflip;
+
+ has_brightness = has_contrast = has_hue =
+ has_sharpness = has_hflip = has_lighting = false;
+ switch (sd->model) {
+ case CIT_MODEL0:
+ has_contrast = has_hflip = true;
+ break;
+ case CIT_MODEL1:
+ has_brightness = has_contrast =
+ has_sharpness = has_lighting = true;
+ break;
+ case CIT_MODEL2:
+ has_brightness = has_hue = has_lighting = true;
+ break;
+ case CIT_MODEL3:
+ has_brightness = has_contrast = has_sharpness = true;
+ break;
+ case CIT_MODEL4:
+ has_brightness = has_hue = true;
+ break;
+ case CIT_IBM_NETCAM_PRO:
+ has_brightness = has_hue =
+ has_sharpness = has_hflip = has_lighting = true;
+ break;
+ }
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 5);
+ if (has_brightness)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 63, 1, 32);
+ if (has_contrast)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 20, 1, 10);
+ if (has_hue)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HUE, 0, 127, 1, 63);
+ if (has_sharpness)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_SHARPNESS, 0, 6, 1, 3);
+ if (has_lighting)
+ sd->lighting = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_BACKLIGHT_COMPENSATION, 0, 2, 1, 1);
+ if (has_hflip)
+ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ return 0;
+}
+
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.stopN = sd_stopN,
.stop0 = sd_stop0,
@@ -3253,10 +3072,9 @@ static const struct sd_desc sd_desc = {
static const struct sd_desc sd_desc_isoc_nego = {
.name = MODULE_NAME,
- .ctrls = sd_ctrls,
- .nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
+ .init_controls = sd_init_controls,
.start = sd_start,
.isoc_init = sd_isoc_init,
.isoc_nego = sd_isoc_nego,
@@ -3320,6 +3138,7 @@ static struct usb_driver sd_driver = {
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
+ .reset_resume = gspca_resume,
#endif
};
diff --git a/drivers/media/video/ibmmpeg2.h b/drivers/media/video/ibmmpeg2.h
deleted file mode 100644
index 68e10387c498..000000000000
--- a/drivers/media/video/ibmmpeg2.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* ibmmpeg2.h - IBM MPEGCD21 definitions */
-
-#ifndef __IBM_MPEG2__
-#define __IBM_MPEG2__
-
-/* Define all MPEG Decoder registers */
-/* Chip Control and Status */
-#define IBM_MP2_CHIP_CONTROL 0x200*2
-#define IBM_MP2_CHIP_MODE 0x201*2
-/* Timer Control and Status */
-#define IBM_MP2_SYNC_STC2 0x202*2
-#define IBM_MP2_SYNC_STC1 0x203*2
-#define IBM_MP2_SYNC_STC0 0x204*2
-#define IBM_MP2_SYNC_PTS2 0x205*2
-#define IBM_MP2_SYNC_PTS1 0x206*2
-#define IBM_MP2_SYNC_PTS0 0x207*2
-/* Video FIFO Control */
-#define IBM_MP2_FIFO 0x208*2
-#define IBM_MP2_FIFOW 0x100*2
-#define IBM_MP2_FIFO_STAT 0x209*2
-#define IBM_MP2_RB_THRESHOLD 0x22b*2
-/* Command buffer */
-#define IBM_MP2_COMMAND 0x20a*2
-#define IBM_MP2_CMD_DATA 0x20b*2
-#define IBM_MP2_CMD_STAT 0x20c*2
-#define IBM_MP2_CMD_ADDR 0x20d*2
-/* Internal Processor Control and Status */
-#define IBM_MP2_PROC_IADDR 0x20e*2
-#define IBM_MP2_PROC_IDATA 0x20f*2
-#define IBM_MP2_WR_PROT 0x235*2
-/* DRAM Access */
-#define IBM_MP2_DRAM_ADDR 0x210*2
-#define IBM_MP2_DRAM_DATA 0x212*2
-#define IBM_MP2_DRAM_CMD_STAT 0x213*2
-#define IBM_MP2_BLOCK_SIZE 0x23b*2
-#define IBM_MP2_SRC_ADDR 0x23c*2
-/* Onscreen Display */
-#define IBM_MP2_OSD_ADDR 0x214*2
-#define IBM_MP2_OSD_DATA 0x215*2
-#define IBM_MP2_OSD_MODE 0x217*2
-#define IBM_MP2_OSD_LINK_ADDR 0x229*2
-#define IBM_MP2_OSD_SIZE 0x22a*2
-/* Interrupt Control */
-#define IBM_MP2_HOST_INT 0x218*2
-#define IBM_MP2_MASK0 0x219*2
-#define IBM_MP2_HOST_INT1 0x23e*2
-#define IBM_MP2_MASK1 0x23f*2
-/* Audio Control */
-#define IBM_MP2_AUD_IADDR 0x21a*2
-#define IBM_MP2_AUD_IDATA 0x21b*2
-#define IBM_MP2_AUD_FIFO 0x21c*2
-#define IBM_MP2_AUD_FIFOW 0x101*2
-#define IBM_MP2_AUD_CTL 0x21d*2
-#define IBM_MP2_BEEP_CTL 0x21e*2
-#define IBM_MP2_FRNT_ATTEN 0x22d*2
-/* Display Control */
-#define IBM_MP2_DISP_MODE 0x220*2
-#define IBM_MP2_DISP_DLY 0x221*2
-#define IBM_MP2_VBI_CTL 0x222*2
-#define IBM_MP2_DISP_LBOR 0x223*2
-#define IBM_MP2_DISP_TBOR 0x224*2
-/* Polarity Control */
-#define IBM_MP2_INFC_CTL 0x22c*2
-
-/* control commands */
-#define IBM_MP2_PLAY 0
-#define IBM_MP2_PAUSE 1
-#define IBM_MP2_SINGLE_FRAME 2
-#define IBM_MP2_FAST_FORWARD 3
-#define IBM_MP2_SLOW_MOTION 4
-#define IBM_MP2_IMED_NORM_PLAY 5
-#define IBM_MP2_RESET_WINDOW 6
-#define IBM_MP2_FREEZE_FRAME 7
-#define IBM_MP2_RESET_VID_RATE 8
-#define IBM_MP2_CONFIG_DECODER 9
-#define IBM_MP2_CHANNEL_SWITCH 10
-#define IBM_MP2_RESET_AUD_RATE 11
-#define IBM_MP2_PRE_OP_CHN_SW 12
-#define IBM_MP2_SET_STILL_MODE 14
-
-/* Define Xilinx FPGA Internal Registers */
-
-/* general control register 0 */
-#define XILINX_CTL0 0x600
-/* genlock delay resister 1 */
-#define XILINX_GLDELAY 0x602
-/* send 16 bits to CS3310 port */
-#define XILINX_CS3310 0x604
-/* send 16 bits to CS3310 and complete */
-#define XILINX_CS3310_CMPLT 0x60c
-/* pulse width modulator control */
-#define XILINX_PWM 0x606
-
-#endif
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index f7d57b3f2842..32a591062d0b 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1830,18 +1830,6 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
return 0;
}
-long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- struct video_device *vfd = video_devdata(filp);
- long ret;
-
- if (ivtv_debug & IVTV_DBGFLG_IOCTL)
- vfd->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG;
- ret = video_ioctl2(filp, cmd, arg);
- vfd->debug = 0;
- return ret;
-}
-
static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
.vidioc_querycap = ivtv_querycap,
.vidioc_s_audio = ivtv_s_audio,
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.h b/drivers/media/video/ivtv/ivtv-ioctl.h
index 89185caeafae..7c553d16579b 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.h
+++ b/drivers/media/video/ivtv/ivtv-ioctl.h
@@ -31,6 +31,5 @@ void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id *std);
void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std);
int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
int ivtv_s_input(struct file *file, void *fh, unsigned int inp);
-long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
#endif
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 6738592aa35d..87990c5f0910 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -50,7 +50,7 @@ static const struct v4l2_file_operations ivtv_v4l2_enc_fops = {
.read = ivtv_v4l2_read,
.write = ivtv_v4l2_write,
.open = ivtv_v4l2_open,
- .unlocked_ioctl = ivtv_v4l2_ioctl,
+ .unlocked_ioctl = video_ioctl2,
.release = ivtv_v4l2_close,
.poll = ivtv_v4l2_enc_poll,
};
@@ -60,7 +60,7 @@ static const struct v4l2_file_operations ivtv_v4l2_dec_fops = {
.read = ivtv_v4l2_read,
.write = ivtv_v4l2_write,
.open = ivtv_v4l2_open,
- .unlocked_ioctl = ivtv_v4l2_ioctl,
+ .unlocked_ioctl = video_ioctl2,
.release = ivtv_v4l2_close,
.poll = ivtv_v4l2_dec_poll,
};
diff --git a/drivers/media/video/m5mols/Kconfig b/drivers/media/video/m5mols/Kconfig
index 302dc3d70193..dc8c2505907e 100644
--- a/drivers/media/video/m5mols/Kconfig
+++ b/drivers/media/video/m5mols/Kconfig
@@ -1,5 +1,6 @@
config VIDEO_M5MOLS
tristate "Fujitsu M-5MOLS 8MP sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
---help---
This driver supports Fujitsu M-5MOLS camera sensor with ISP
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c
index 392a028730e2..fdbc205a2969 100644
--- a/drivers/media/video/m5mols/m5mols_controls.c
+++ b/drivers/media/video/m5mols/m5mols_controls.c
@@ -527,8 +527,8 @@ static const struct v4l2_ctrl_ops m5mols_ctrl_ops = {
/* Supported manual ISO values */
static const s64 iso_qmenu[] = {
- /* AE_ISO: 0x01...0x07 */
- 50, 100, 200, 400, 800, 1600, 3200
+ /* AE_ISO: 0x01...0x07 (ISO: 50...3200) */
+ 50000, 100000, 200000, 400000, 800000, 1600000, 3200000
};
/* Supported Exposure Bias values, -2.0EV...+2.0EV */
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index 3945556f5733..0b91a5cd38eb 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -27,6 +27,8 @@
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
#include <media/videobuf2-vmalloc.h>
#define MEM2MEM_TEST_MODULE_NAME "mem2mem-testdev"
@@ -60,6 +62,10 @@ MODULE_VERSION("0.1.1");
#define MEM2MEM_COLOR_STEP (0xff >> 4)
#define MEM2MEM_NUM_TILES 8
+/* Flags that indicate processing mode */
+#define MEM2MEM_HFLIP (1 << 0)
+#define MEM2MEM_VFLIP (1 << 1)
+
#define dprintk(dev, fmt, arg...) \
v4l2_dbg(1, 1, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
@@ -97,6 +103,8 @@ static struct m2mtest_fmt formats[] = {
},
};
+#define NUM_FORMATS ARRAY_SIZE(formats)
+
/* Per-queue, driver-specific private data */
struct m2mtest_q_data {
unsigned int width;
@@ -110,32 +118,8 @@ enum {
V4L2_M2M_DST = 1,
};
-#define V4L2_CID_TRANS_TIME_MSEC V4L2_CID_PRIVATE_BASE
-#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_PRIVATE_BASE + 1)
-
-static struct v4l2_queryctrl m2mtest_ctrls[] = {
- {
- .id = V4L2_CID_TRANS_TIME_MSEC,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Transaction time (msec)",
- .minimum = 1,
- .maximum = 10000,
- .step = 100,
- .default_value = 1000,
- .flags = 0,
- }, {
- .id = V4L2_CID_TRANS_NUM_BUFS,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Buffers per transaction",
- .minimum = 1,
- .maximum = MEM2MEM_DEF_NUM_BUFS,
- .step = 1,
- .default_value = 1,
- .flags = 0,
- },
-};
-
-#define NUM_FORMATS ARRAY_SIZE(formats)
+#define V4L2_CID_TRANS_TIME_MSEC (V4L2_CID_USER_BASE + 0x1000)
+#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_USER_BASE + 0x1001)
static struct m2mtest_fmt *find_format(struct v4l2_format *f)
{
@@ -168,8 +152,11 @@ struct m2mtest_dev {
};
struct m2mtest_ctx {
+ struct v4l2_fh fh;
struct m2mtest_dev *dev;
+ struct v4l2_ctrl_handler hdl;
+
/* Processed buffers in this transaction */
u8 num_processed;
@@ -181,12 +168,22 @@ struct m2mtest_ctx {
/* Abort requested by m2m */
int aborting;
+ /* Processing mode */
+ int mode;
+
+ enum v4l2_colorspace colorspace;
+
struct v4l2_m2m_ctx *m2m_ctx;
/* Source and destination queue data */
struct m2mtest_q_data q_data[2];
};
+static inline struct m2mtest_ctx *file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct m2mtest_ctx, fh);
+}
+
static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
enum v4l2_buf_type type)
{
@@ -202,18 +199,6 @@ static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
}
-static struct v4l2_queryctrl *get_ctrl(int id)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(m2mtest_ctrls); ++i) {
- if (id == m2mtest_ctrls[i].id)
- return &m2mtest_ctrls[i];
- }
-
- return NULL;
-}
-
static int device_process(struct m2mtest_ctx *ctx,
struct vb2_buffer *in_vb,
struct vb2_buffer *out_vb)
@@ -249,19 +234,84 @@ static int device_process(struct m2mtest_ctx *ctx,
bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES;
w = 0;
- for (y = 0; y < height; ++y) {
- for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
- if (w & 0x1) {
- for (x = 0; x < tile_w; ++x)
- *p_out++ = *p_in++ + MEM2MEM_COLOR_STEP;
- } else {
- for (x = 0; x < tile_w; ++x)
- *p_out++ = *p_in++ - MEM2MEM_COLOR_STEP;
+ switch (ctx->mode) {
+ case MEM2MEM_HFLIP | MEM2MEM_VFLIP:
+ p_out += bytesperline * height - bytes_left;
+ for (y = 0; y < height; ++y) {
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x1) {
+ for (x = 0; x < tile_w; ++x)
+ *--p_out = *p_in++ +
+ MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *--p_out = *p_in++ -
+ MEM2MEM_COLOR_STEP;
+ }
+ ++w;
}
- ++w;
+ p_in += bytes_left;
+ p_out -= bytes_left;
+ }
+ break;
+
+ case MEM2MEM_HFLIP:
+ for (y = 0; y < height; ++y) {
+ p_out += MEM2MEM_NUM_TILES * tile_w;
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x01) {
+ for (x = 0; x < tile_w; ++x)
+ *--p_out = *p_in++ +
+ MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *--p_out = *p_in++ -
+ MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out += bytesperline;
+ }
+ break;
+
+ case MEM2MEM_VFLIP:
+ p_out += bytesperline * (height - 1);
+ for (y = 0; y < height; ++y) {
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x1) {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ +
+ MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ -
+ MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out += bytes_left - 2 * bytesperline;
+ }
+ break;
+
+ default:
+ for (y = 0; y < height; ++y) {
+ for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
+ if (w & 0x1) {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ +
+ MEM2MEM_COLOR_STEP;
+ } else {
+ for (x = 0; x < tile_w; ++x)
+ *p_out++ = *p_in++ -
+ MEM2MEM_COLOR_STEP;
+ }
+ ++w;
+ }
+ p_in += bytes_left;
+ p_out += bytes_left;
}
- p_in += bytes_left;
- p_out += bytes_left;
}
return 0;
@@ -380,10 +430,9 @@ static int vidioc_querycap(struct file *file, void *priv,
{
strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
- cap->bus_info[0] = 0;
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
- | V4L2_CAP_STREAMING;
-
+ strlcpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -446,6 +495,7 @@ static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
f->fmt.pix.pixelformat = q_data->fmt->fourcc;
f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
f->fmt.pix.sizeimage = q_data->sizeimage;
+ f->fmt.pix.colorspace = ctx->colorspace;
return 0;
}
@@ -453,13 +503,13 @@ static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- return vidioc_g_fmt(priv, f);
+ return vidioc_g_fmt(file2ctx(file), f);
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- return vidioc_g_fmt(priv, f);
+ return vidioc_g_fmt(file2ctx(file), f);
}
static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt)
@@ -498,7 +548,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct m2mtest_fmt *fmt;
- struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_ctx *ctx = file2ctx(file);
fmt = find_format(f);
if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
@@ -507,6 +557,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.pixelformat);
return -EINVAL;
}
+ f->fmt.pix.colorspace = ctx->colorspace;
return vidioc_try_fmt(f, fmt);
}
@@ -515,7 +566,7 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct m2mtest_fmt *fmt;
- struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_ctx *ctx = file2ctx(file);
fmt = find_format(f);
if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
@@ -524,6 +575,8 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
f->fmt.pix.pixelformat);
return -EINVAL;
}
+ if (!f->fmt.pix.colorspace)
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
return vidioc_try_fmt(f, fmt);
}
@@ -568,25 +621,29 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
if (ret)
return ret;
- return vidioc_s_fmt(priv, f);
+ return vidioc_s_fmt(file2ctx(file), f);
}
static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
+ struct m2mtest_ctx *ctx = file2ctx(file);
int ret;
ret = vidioc_try_fmt_vid_out(file, priv, f);
if (ret)
return ret;
- return vidioc_s_fmt(priv, f);
+ ret = vidioc_s_fmt(file2ctx(file), f);
+ if (!ret)
+ ctx->colorspace = f->fmt.pix.colorspace;
+ return ret;
}
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *reqbufs)
{
- struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_ctx *ctx = file2ctx(file);
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
@@ -594,21 +651,21 @@ static int vidioc_reqbufs(struct file *file, void *priv,
static int vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_ctx *ctx = file2ctx(file);
return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
}
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_ctx *ctx = file2ctx(file);
return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
- struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_ctx *ctx = file2ctx(file);
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
@@ -616,7 +673,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_ctx *ctx = file2ctx(file);
return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
}
@@ -624,79 +681,37 @@ static int vidioc_streamon(struct file *file, void *priv,
static int vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_ctx *ctx = file2ctx(file);
return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
-static int vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *qc)
+static int m2mtest_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct v4l2_queryctrl *c;
-
- c = get_ctrl(qc->id);
- if (!c)
- return -EINVAL;
-
- *qc = *c;
- return 0;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct m2mtest_ctx *ctx = priv;
+ struct m2mtest_ctx *ctx =
+ container_of(ctrl->handler, struct m2mtest_ctx, hdl);
switch (ctrl->id) {
- case V4L2_CID_TRANS_TIME_MSEC:
- ctrl->value = ctx->transtime;
+ case V4L2_CID_HFLIP:
+ if (ctrl->val)
+ ctx->mode |= MEM2MEM_HFLIP;
+ else
+ ctx->mode &= ~MEM2MEM_HFLIP;
break;
- case V4L2_CID_TRANS_NUM_BUFS:
- ctrl->value = ctx->translen;
+ case V4L2_CID_VFLIP:
+ if (ctrl->val)
+ ctx->mode |= MEM2MEM_VFLIP;
+ else
+ ctx->mode &= ~MEM2MEM_VFLIP;
break;
- default:
- v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int check_ctrl_val(struct m2mtest_ctx *ctx, struct v4l2_control *ctrl)
-{
- struct v4l2_queryctrl *c;
-
- c = get_ctrl(ctrl->id);
- if (!c)
- return -EINVAL;
-
- if (ctrl->value < c->minimum || ctrl->value > c->maximum) {
- v4l2_err(&ctx->dev->v4l2_dev, "Value out of range\n");
- return -ERANGE;
- }
-
- return 0;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *ctrl)
-{
- struct m2mtest_ctx *ctx = priv;
- int ret = 0;
-
- ret = check_ctrl_val(ctx, ctrl);
- if (ret != 0)
- return ret;
-
- switch (ctrl->id) {
case V4L2_CID_TRANS_TIME_MSEC:
- ctx->transtime = ctrl->value;
+ ctx->transtime = ctrl->val;
break;
case V4L2_CID_TRANS_NUM_BUFS:
- ctx->translen = ctrl->value;
+ ctx->translen = ctrl->val;
break;
default:
@@ -707,6 +722,10 @@ static int vidioc_s_ctrl(struct file *file, void *priv,
return 0;
}
+static const struct v4l2_ctrl_ops m2mtest_ctrl_ops = {
+ .s_ctrl = m2mtest_s_ctrl,
+};
+
static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
@@ -729,10 +748,8 @@ static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
-
- .vidioc_queryctrl = vidioc_queryctrl,
- .vidioc_g_ctrl = vidioc_g_ctrl,
- .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -844,6 +861,28 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds
return vb2_queue_init(dst_vq);
}
+static const struct v4l2_ctrl_config m2mtest_ctrl_trans_time_msec = {
+ .ops = &m2mtest_ctrl_ops,
+ .id = V4L2_CID_TRANS_TIME_MSEC,
+ .name = "Transaction Time (msec)",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = 1001,
+ .min = 1,
+ .max = 10001,
+ .step = 100,
+};
+
+static const struct v4l2_ctrl_config m2mtest_ctrl_trans_num_bufs = {
+ .ops = &m2mtest_ctrl_ops,
+ .id = V4L2_CID_TRANS_NUM_BUFS,
+ .name = "Buffers Per Transaction",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = 1,
+ .min = 1,
+ .max = MEM2MEM_DEF_NUM_BUFS,
+ .step = 1,
+};
+
/*
* File operations
*/
@@ -851,29 +890,51 @@ static int m2mtest_open(struct file *file)
{
struct m2mtest_dev *dev = video_drvdata(file);
struct m2mtest_ctx *ctx = NULL;
+ struct v4l2_ctrl_handler *hdl;
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- file->private_data = ctx;
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
ctx->dev = dev;
- ctx->translen = MEM2MEM_DEF_TRANSLEN;
- ctx->transtime = MEM2MEM_DEF_TRANSTIME;
- ctx->num_processed = 0;
+ hdl = &ctx->hdl;
+ v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_new_std(hdl, &m2mtest_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdl, &m2mtest_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_custom(hdl, &m2mtest_ctrl_trans_time_msec, NULL);
+ v4l2_ctrl_new_custom(hdl, &m2mtest_ctrl_trans_num_bufs, NULL);
+ if (hdl->error) {
+ int err = hdl->error;
+
+ v4l2_ctrl_handler_free(hdl);
+ return err;
+ }
+ ctx->fh.ctrl_handler = hdl;
+ v4l2_ctrl_handler_setup(hdl);
ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0];
- ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
+ ctx->q_data[V4L2_M2M_SRC].width = 640;
+ ctx->q_data[V4L2_M2M_SRC].height = 480;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage =
+ ctx->q_data[V4L2_M2M_SRC].width *
+ ctx->q_data[V4L2_M2M_SRC].height *
+ (ctx->q_data[V4L2_M2M_SRC].fmt->depth >> 3);
+ ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC];
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
if (IS_ERR(ctx->m2m_ctx)) {
int ret = PTR_ERR(ctx->m2m_ctx);
+ v4l2_ctrl_handler_free(hdl);
kfree(ctx);
return ret;
}
+ v4l2_fh_add(&ctx->fh);
atomic_inc(&dev->num_inst);
dprintk(dev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
@@ -884,10 +945,13 @@ static int m2mtest_open(struct file *file)
static int m2mtest_release(struct file *file)
{
struct m2mtest_dev *dev = video_drvdata(file);
- struct m2mtest_ctx *ctx = file->private_data;
+ struct m2mtest_ctx *ctx = file2ctx(file);
dprintk(dev, "Releasing instance %p\n", ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
kfree(ctx);
@@ -899,14 +963,14 @@ static int m2mtest_release(struct file *file)
static unsigned int m2mtest_poll(struct file *file,
struct poll_table_struct *wait)
{
- struct m2mtest_ctx *ctx = file->private_data;
+ struct m2mtest_ctx *ctx = file2ctx(file);
return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
}
static int m2mtest_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct m2mtest_ctx *ctx = file->private_data;
+ struct m2mtest_ctx *ctx = file2ctx(file);
return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
}
diff --git a/drivers/media/video/mt9m001.c b/drivers/media/video/mt9m001.c
index 7e648183f157..00583f5fd26b 100644
--- a/drivers/media/video/mt9m001.c
+++ b/drivers/media/video/mt9m001.c
@@ -22,7 +22,7 @@
/*
* mt9m001 i2c address 0x5d
- * The platform has to define ctruct i2c_board_info objects and link to them
+ * The platform has to define struct i2c_board_info objects and link to them
* from struct soc_camera_link
*/
diff --git a/drivers/media/video/mt9m032.c b/drivers/media/video/mt9m032.c
index 3c1e626139b7..445359c96113 100644
--- a/drivers/media/video/mt9m032.c
+++ b/drivers/media/video/mt9m032.c
@@ -688,11 +688,17 @@ static const struct v4l2_subdev_ops mt9m032_ops = {
static int mt9m032_probe(struct i2c_client *client,
const struct i2c_device_id *devid)
{
+ struct mt9m032_platform_data *pdata = client->dev.platform_data;
struct i2c_adapter *adapter = client->adapter;
struct mt9m032 *sensor;
int chip_version;
int ret;
+ if (pdata == NULL) {
+ dev_err(&client->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
dev_warn(&client->dev,
"I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
@@ -708,7 +714,7 @@ static int mt9m032_probe(struct i2c_client *client,
mutex_init(&sensor->lock);
- sensor->pdata = client->dev.platform_data;
+ sensor->pdata = pdata;
v4l2_i2c_subdev_init(&sensor->subdev, client, &mt9m032_ops);
sensor->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
@@ -738,7 +744,7 @@ static int mt9m032_probe(struct i2c_client *client,
sensor->format.field = V4L2_FIELD_NONE;
sensor->format.colorspace = V4L2_COLORSPACE_SRGB;
- v4l2_ctrl_handler_init(&sensor->ctrls, 4);
+ v4l2_ctrl_handler_init(&sensor->ctrls, 5);
v4l2_ctrl_new_std(&sensor->ctrls, &mt9m032_ctrl_ops,
V4L2_CID_GAIN, 0, 127, 1, 64);
@@ -754,6 +760,9 @@ static int mt9m032_probe(struct i2c_client *client,
V4L2_CID_EXPOSURE, MT9M032_SHUTTER_WIDTH_MIN,
MT9M032_SHUTTER_WIDTH_MAX, 1,
MT9M032_SHUTTER_WIDTH_DEF);
+ v4l2_ctrl_new_std(&sensor->ctrls, &mt9m032_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, pdata->pix_clock,
+ pdata->pix_clock, 1, pdata->pix_clock);
if (sensor->ctrls.error) {
ret = sensor->ctrls.error;
diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c
index b0c529964329..863d722dda06 100644
--- a/drivers/media/video/mt9m111.c
+++ b/drivers/media/video/mt9m111.c
@@ -214,7 +214,6 @@ struct mt9m111 {
int power_count;
const struct mt9m111_datafmt *fmt;
int lastpage; /* PageMap cache value */
- unsigned char datawidth;
};
/* Find a data format by a pixel code */
diff --git a/drivers/media/video/mt9p031.c b/drivers/media/video/mt9p031.c
index 8f061d9ac443..3be537ef22d2 100644
--- a/drivers/media/video/mt9p031.c
+++ b/drivers/media/video/mt9p031.c
@@ -950,7 +950,7 @@ static int mt9p031_probe(struct i2c_client *client,
mt9p031->model = did->driver_data;
mt9p031->reset = -1;
- v4l2_ctrl_handler_init(&mt9p031->ctrls, ARRAY_SIZE(mt9p031_ctrls) + 4);
+ v4l2_ctrl_handler_init(&mt9p031->ctrls, ARRAY_SIZE(mt9p031_ctrls) + 5);
v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
V4L2_CID_EXPOSURE, MT9P031_SHUTTER_WIDTH_MIN,
@@ -963,6 +963,9 @@ static int mt9p031_probe(struct i2c_client *client,
V4L2_CID_HFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, pdata->target_freq,
+ pdata->target_freq, 1, pdata->target_freq);
for (i = 0; i < ARRAY_SIZE(mt9p031_ctrls); ++i)
v4l2_ctrl_new_custom(&mt9p031->ctrls, &mt9p031_ctrls[i], NULL);
diff --git a/drivers/media/video/mt9t001.c b/drivers/media/video/mt9t001.c
index 49ca3cbfc6f1..6d343adf891d 100644
--- a/drivers/media/video/mt9t001.c
+++ b/drivers/media/video/mt9t001.c
@@ -691,7 +691,7 @@ static int mt9t001_video_probe(struct i2c_client *client)
return ret;
/* Configure the pixel clock polarity */
- if (pdata && pdata->clk_pol) {
+ if (pdata->clk_pol) {
ret = mt9t001_write(client, MT9T001_PIXEL_CLOCK,
MT9T001_PIXEL_CLOCK_INVERT);
if (ret < 0)
@@ -715,10 +715,16 @@ static int mt9t001_video_probe(struct i2c_client *client)
static int mt9t001_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
+ struct mt9t001_platform_data *pdata = client->dev.platform_data;
struct mt9t001 *mt9t001;
unsigned int i;
int ret;
+ if (pdata == NULL) {
+ dev_err(&client->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
dev_warn(&client->adapter->dev,
@@ -735,7 +741,7 @@ static int mt9t001_probe(struct i2c_client *client,
return -ENOMEM;
v4l2_ctrl_handler_init(&mt9t001->ctrls, ARRAY_SIZE(mt9t001_ctrls) +
- ARRAY_SIZE(mt9t001_gains) + 2);
+ ARRAY_SIZE(mt9t001_gains) + 3);
v4l2_ctrl_new_std(&mt9t001->ctrls, &mt9t001_ctrl_ops,
V4L2_CID_EXPOSURE, MT9T001_SHUTTER_WIDTH_MIN,
@@ -743,6 +749,9 @@ static int mt9t001_probe(struct i2c_client *client,
MT9T001_SHUTTER_WIDTH_DEF);
v4l2_ctrl_new_std(&mt9t001->ctrls, &mt9t001_ctrl_ops,
V4L2_CID_BLACK_LEVEL, 1, 1, 1, 1);
+ v4l2_ctrl_new_std(&mt9t001->ctrls, &mt9t001_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, pdata->ext_clk, pdata->ext_clk,
+ 1, pdata->ext_clk);
for (i = 0; i < ARRAY_SIZE(mt9t001_ctrls); ++i)
v4l2_ctrl_new_custom(&mt9t001->ctrls, &mt9t001_ctrls[i], NULL);
diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c
index bf63417adb8f..72479247522a 100644
--- a/drivers/media/video/mt9v022.c
+++ b/drivers/media/video/mt9v022.c
@@ -23,7 +23,7 @@
/*
* mt9v022 i2c address 0x48, 0x4c, 0x58, 0x5c
- * The platform has to define ctruct i2c_board_info objects and link to them
+ * The platform has to define struct i2c_board_info objects and link to them
* from struct soc_camera_link
*/
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index d2e6f82ecfac..560a65aa7038 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -403,7 +403,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev)
dev_dbg(pcdev->icd->parent, "Activate device\n");
- clk_enable(pcdev->clk);
+ clk_prepare_enable(pcdev->clk);
/* enable CSI before doing anything else */
__raw_writel(csicr1, pcdev->base + CSICR1);
@@ -422,7 +422,7 @@ static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev)
/* Disable all CSI interface */
__raw_writel(0x00, pcdev->base + CSICR1);
- clk_disable(pcdev->clk);
+ clk_disable_unprepare(pcdev->clk);
}
/*
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 637bde8aca28..ac175406e582 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -272,7 +272,7 @@ struct mx2_camera_dev {
struct device *dev;
struct soc_camera_host soc_host;
struct soc_camera_device *icd;
- struct clk *clk_csi, *clk_emma;
+ struct clk *clk_csi, *clk_emma_ahb, *clk_emma_ipg;
unsigned int irq_csi, irq_emma;
void __iomem *base_csi, *base_emma;
@@ -407,7 +407,7 @@ static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev)
{
unsigned long flags;
- clk_disable(pcdev->clk_csi);
+ clk_disable_unprepare(pcdev->clk_csi);
writel(0, pcdev->base_csi + CSICR1);
if (cpu_is_mx27()) {
writel(0, pcdev->base_emma + PRP_CNTL);
@@ -435,7 +435,7 @@ static int mx2_camera_add_device(struct soc_camera_device *icd)
if (pcdev->icd)
return -EBUSY;
- ret = clk_enable(pcdev->clk_csi);
+ ret = clk_prepare_enable(pcdev->clk_csi);
if (ret < 0)
return ret;
@@ -1633,23 +1633,34 @@ static int __devinit mx27_camera_emma_init(struct mx2_camera_dev *pcdev)
goto exit_iounmap;
}
- pcdev->clk_emma = clk_get(NULL, "emma");
- if (IS_ERR(pcdev->clk_emma)) {
- err = PTR_ERR(pcdev->clk_emma);
+ pcdev->clk_emma_ipg = clk_get(pcdev->dev, "emma-ipg");
+ if (IS_ERR(pcdev->clk_emma_ipg)) {
+ err = PTR_ERR(pcdev->clk_emma_ipg);
goto exit_free_irq;
}
- clk_enable(pcdev->clk_emma);
+ clk_prepare_enable(pcdev->clk_emma_ipg);
+
+ pcdev->clk_emma_ahb = clk_get(pcdev->dev, "emma-ahb");
+ if (IS_ERR(pcdev->clk_emma_ahb)) {
+ err = PTR_ERR(pcdev->clk_emma_ahb);
+ goto exit_clk_emma_ipg_put;
+ }
+
+ clk_prepare_enable(pcdev->clk_emma_ahb);
err = mx27_camera_emma_prp_reset(pcdev);
if (err)
- goto exit_clk_emma_put;
+ goto exit_clk_emma_ahb_put;
return err;
-exit_clk_emma_put:
- clk_disable(pcdev->clk_emma);
- clk_put(pcdev->clk_emma);
+exit_clk_emma_ahb_put:
+ clk_disable_unprepare(pcdev->clk_emma_ahb);
+ clk_put(pcdev->clk_emma_ahb);
+exit_clk_emma_ipg_put:
+ clk_disable_unprepare(pcdev->clk_emma_ipg);
+ clk_put(pcdev->clk_emma_ipg);
exit_free_irq:
free_irq(pcdev->irq_emma, pcdev);
exit_iounmap:
@@ -1685,7 +1696,7 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev)
goto exit;
}
- pcdev->clk_csi = clk_get(&pdev->dev, NULL);
+ pcdev->clk_csi = clk_get(&pdev->dev, "ahb");
if (IS_ERR(pcdev->clk_csi)) {
dev_err(&pdev->dev, "Could not get csi clock\n");
err = PTR_ERR(pcdev->clk_csi);
@@ -1785,8 +1796,10 @@ exit_free_emma:
eallocctx:
if (cpu_is_mx27()) {
free_irq(pcdev->irq_emma, pcdev);
- clk_disable(pcdev->clk_emma);
- clk_put(pcdev->clk_emma);
+ clk_disable_unprepare(pcdev->clk_emma_ipg);
+ clk_put(pcdev->clk_emma_ipg);
+ clk_disable_unprepare(pcdev->clk_emma_ahb);
+ clk_put(pcdev->clk_emma_ahb);
iounmap(pcdev->base_emma);
release_mem_region(pcdev->res_emma->start, resource_size(pcdev->res_emma));
}
@@ -1825,8 +1838,10 @@ static int __devexit mx2_camera_remove(struct platform_device *pdev)
iounmap(pcdev->base_csi);
if (cpu_is_mx27()) {
- clk_disable(pcdev->clk_emma);
- clk_put(pcdev->clk_emma);
+ clk_disable_unprepare(pcdev->clk_emma_ipg);
+ clk_put(pcdev->clk_emma_ipg);
+ clk_disable_unprepare(pcdev->clk_emma_ahb);
+ clk_put(pcdev->clk_emma_ahb);
iounmap(pcdev->base_emma);
res = pcdev->res_emma;
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/media/video/mx2_emmaprp.c b/drivers/media/video/mx2_emmaprp.c
index 0bd5815de369..5f8a6f5b98f9 100644
--- a/drivers/media/video/mx2_emmaprp.c
+++ b/drivers/media/video/mx2_emmaprp.c
@@ -396,9 +396,13 @@ static int vidioc_querycap(struct file *file, void *priv,
{
strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
- | V4L2_CAP_STREAMING;
-
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
return 0;
}
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index f13643d31353..af2297dd49c8 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -61,15 +61,9 @@
#define MAX_VIDEO_MEM 16
-enum csi_buffer_state {
- CSI_BUF_NEEDS_INIT,
- CSI_BUF_PREPARED,
-};
-
struct mx3_camera_buffer {
/* common v4l buffer stuff -- must be first */
struct vb2_buffer vb;
- enum csi_buffer_state state;
struct list_head queue;
/* One descriptot per scatterlist (per frame) */
@@ -285,7 +279,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
goto error;
}
- if (buf->state == CSI_BUF_NEEDS_INIT) {
+ if (!buf->txd) {
sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0);
sg_dma_len(sg) = new_size;
@@ -298,7 +292,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
txd->callback_param = txd;
txd->callback = mx3_cam_dma_done;
- buf->state = CSI_BUF_PREPARED;
buf->txd = txd;
} else {
txd = buf->txd;
@@ -385,7 +378,6 @@ static void mx3_videobuf_release(struct vb2_buffer *vb)
/* Doesn't hurt also if the list is empty */
list_del_init(&buf->queue);
- buf->state = CSI_BUF_NEEDS_INIT;
if (txd) {
buf->txd = NULL;
@@ -405,13 +397,13 @@ static int mx3_videobuf_init(struct vb2_buffer *vb)
struct mx3_camera_dev *mx3_cam = ici->priv;
struct mx3_camera_buffer *buf = to_mx3_vb(vb);
- /* This is for locking debugging only */
- INIT_LIST_HEAD(&buf->queue);
- sg_init_table(&buf->sg, 1);
+ if (!buf->txd) {
+ /* This is for locking debugging only */
+ INIT_LIST_HEAD(&buf->queue);
+ sg_init_table(&buf->sg, 1);
- buf->state = CSI_BUF_NEEDS_INIT;
-
- mx3_cam->buf_total += vb2_plane_size(vb, 0);
+ mx3_cam->buf_total += vb2_plane_size(vb, 0);
+ }
return 0;
}
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index 7e32331b60fb..f1220d3d4970 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -2014,7 +2014,7 @@ static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
return -EINVAL;
switch (sel->target) {
- case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
sel->r.left = 0;
sel->r.top = 0;
sel->r.width = INT_MAX;
@@ -2024,7 +2024,7 @@ static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
ccdc_try_crop(ccdc, format, &sel->r);
break;
- case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ case V4L2_SEL_TGT_CROP:
sel->r = *__ccdc_get_crop(ccdc, fh, sel->which);
break;
@@ -2052,7 +2052,7 @@ static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- if (sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL ||
+ if (sel->target != V4L2_SEL_TGT_CROP ||
sel->pad != CCDC_PAD_SOURCE_OF)
return -EINVAL;
@@ -2064,7 +2064,7 @@ static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
* pad. If the KEEP_CONFIG flag is set, just return the current crop
* rectangle.
*/
- if (sel->flags & V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG) {
+ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
sel->r = *__ccdc_get_crop(ccdc, fh, sel->which);
return 0;
}
diff --git a/drivers/media/video/omap3isp/isppreview.c b/drivers/media/video/omap3isp/isppreview.c
index dd91da26f1b0..53f5a703e31a 100644
--- a/drivers/media/video/omap3isp/isppreview.c
+++ b/drivers/media/video/omap3isp/isppreview.c
@@ -1949,7 +1949,7 @@ static int preview_get_selection(struct v4l2_subdev *sd,
return -EINVAL;
switch (sel->target) {
- case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
sel->r.left = 0;
sel->r.top = 0;
sel->r.width = INT_MAX;
@@ -1960,7 +1960,7 @@ static int preview_get_selection(struct v4l2_subdev *sd,
preview_try_crop(prev, format, &sel->r);
break;
- case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ case V4L2_SEL_TGT_CROP:
sel->r = *__preview_get_crop(prev, fh, sel->which);
break;
@@ -1988,7 +1988,7 @@ static int preview_set_selection(struct v4l2_subdev *sd,
struct isp_prev_device *prev = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
- if (sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL ||
+ if (sel->target != V4L2_SEL_TGT_CROP ||
sel->pad != PREV_PAD_SINK)
return -EINVAL;
@@ -2000,7 +2000,7 @@ static int preview_set_selection(struct v4l2_subdev *sd,
* pad. If the KEEP_CONFIG flag is set, just return the current crop
* rectangle.
*/
- if (sel->flags & V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG) {
+ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
sel->r = *__preview_get_crop(prev, fh, sel->which);
return 0;
}
diff --git a/drivers/media/video/omap3isp/ispresizer.c b/drivers/media/video/omap3isp/ispresizer.c
index 14041c9c8643..ae17d917f77b 100644
--- a/drivers/media/video/omap3isp/ispresizer.c
+++ b/drivers/media/video/omap3isp/ispresizer.c
@@ -1249,7 +1249,7 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
sel->which);
switch (sel->target) {
- case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
sel->r.left = 0;
sel->r.top = 0;
sel->r.width = INT_MAX;
@@ -1259,7 +1259,7 @@ static int resizer_get_selection(struct v4l2_subdev *sd,
resizer_calc_ratios(res, &sel->r, format_source, &ratio);
break;
- case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ case V4L2_SEL_TGT_CROP:
sel->r = *__resizer_get_crop(res, fh, sel->which);
resizer_calc_ratios(res, &sel->r, format_source, &ratio);
break;
@@ -1293,7 +1293,7 @@ static int resizer_set_selection(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *format_sink, *format_source;
struct resizer_ratio ratio;
- if (sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL ||
+ if (sel->target != V4L2_SEL_TGT_CROP ||
sel->pad != RESZ_PAD_SINK)
return -EINVAL;
diff --git a/drivers/media/video/ov2640.c b/drivers/media/video/ov2640.c
index 3c2c5d3bcc6b..7c44d1fe3c87 100644
--- a/drivers/media/video/ov2640.c
+++ b/drivers/media/video/ov2640.c
@@ -837,10 +837,8 @@ static int ov2640_g_fmt(struct v4l2_subdev *sd,
if (!priv->win) {
u32 width = W_SVGA, height = H_SVGA;
- int ret = ov2640_set_params(client, &width, &height,
- V4L2_MBUS_FMT_UYVY8_2X8);
- if (ret < 0)
- return ret;
+ priv->win = ov2640_select_win(&width, &height);
+ priv->cfmt_code = V4L2_MBUS_FMT_UYVY8_2X8;
}
mf->width = priv->win->width;
diff --git a/drivers/media/video/ov772x.c b/drivers/media/video/ov772x.c
index 74e77d327ed8..6d79b89b8603 100644
--- a/drivers/media/video/ov772x.c
+++ b/drivers/media/video/ov772x.c
@@ -880,15 +880,11 @@ static int ov772x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
static int ov772x_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov772x_priv *priv = container_of(sd, struct ov772x_priv, subdev);
if (!priv->win || !priv->cfmt) {
- u32 width = VGA_WIDTH, height = VGA_HEIGHT;
- int ret = ov772x_set_params(client, &width, &height,
- V4L2_MBUS_FMT_YUYV8_2X8);
- if (ret < 0)
- return ret;
+ priv->cfmt = &ov772x_cfmts[0];
+ priv->win = ov772x_select_win(VGA_WIDTH, VGA_HEIGHT);
}
mf->width = priv->win->width;
diff --git a/drivers/media/video/ov9640.c b/drivers/media/video/ov9640.c
index 23412debb36b..9ed4ba4236c4 100644
--- a/drivers/media/video/ov9640.c
+++ b/drivers/media/video/ov9640.c
@@ -605,6 +605,7 @@ static int ov9640_video_probe(struct i2c_client *client)
devname = "ov9640";
priv->model = V4L2_IDENT_OV9640;
priv->revision = 2;
+ break;
case OV9640_V3:
devname = "ov9640";
priv->model = V4L2_IDENT_OV9640;
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index b4c679b3fb0f..77f9c92186f4 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -30,7 +30,6 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/mutex.h>
-#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/isa.h>
#include <asm/io.h>
diff --git a/drivers/media/video/pvrusb2/Kconfig b/drivers/media/video/pvrusb2/Kconfig
index f9b6001e1dd7..25e412ecad2c 100644
--- a/drivers/media/video/pvrusb2/Kconfig
+++ b/drivers/media/video/pvrusb2/Kconfig
@@ -1,7 +1,6 @@
config VIDEO_PVRUSB2
tristate "Hauppauge WinTV-PVR USB2 support"
depends on VIDEO_V4L2 && I2C
- depends on VIDEO_MEDIA # Avoids pvrusb = Y / DVB = M
select VIDEO_TUNER
select VIDEO_TVEEPROM
select VIDEO_CX2341X
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
index 7bddfaeeafc3..f344aed32a93 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -226,13 +226,11 @@ static int pvr2_enum_input(struct file *file, void *priv, struct v4l2_input *vi)
struct v4l2_input tmp;
unsigned int cnt;
int val;
- int ret;
cptr = pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_INPUT);
memset(&tmp, 0, sizeof(tmp));
tmp.index = vi->index;
- ret = 0;
if (vi->index >= fh->input_cnt)
return -EINVAL;
val = fh->input_map[vi->index];
@@ -556,9 +554,7 @@ static int pvr2_queryctrl(struct file *file, void *priv,
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
struct pvr2_ctrl *cptr;
int val;
- int ret;
- ret = 0;
if (vc->id & V4L2_CTRL_FLAG_NEXT_CTRL) {
cptr = pvr2_hdw_get_ctrl_nextv4l(
hdw, (vc->id & ~V4L2_CTRL_FLAG_NEXT_CTRL));
@@ -705,11 +701,9 @@ static int pvr2_try_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_control *ctrl;
struct pvr2_ctrl *pctl;
unsigned int idx;
- int ret;
/* For the moment just validate that the requested control
actually exists. */
- ret = 0;
for (idx = 0; idx < ctls->count; idx++) {
ctrl = ctls->controls + idx;
pctl = pvr2_hdw_get_ctrl_v4l(hdw, ctrl->id);
@@ -770,12 +764,10 @@ static int pvr2_s_crop(struct file *file, void *priv, struct v4l2_crop *crop)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- struct v4l2_cropcap cap;
int ret;
if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ret = pvr2_ctrl_set_value(
pvr2_hdw_get_ctrl_by_id(hdw, PVR2_CID_CROPL),
crop->c.left);
@@ -965,7 +957,7 @@ static long pvr2_v4l2_ioctl(struct file *file,
long ret = -EINVAL;
if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL)
- v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw), cmd);
+ v4l_printk_ioctl(pvr2_hdw_get_driver_name(hdw), cmd);
if (!pvr2_hdw_dev_ok(hdw)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
@@ -998,7 +990,7 @@ static long pvr2_v4l2_ioctl(struct file *file,
pvr2_trace(PVR2_TRACE_V4LIOCTL,
"pvr2_v4l2_do_ioctl failure, ret=%ld"
" command was:", ret);
- v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),
+ v4l_printk_ioctl(pvr2_hdw_get_driver_name(hdw),
cmd);
}
}
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index ec4e2ef54e65..de7c7ba99ef4 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -136,19 +136,13 @@ static int leds[2] = { 100, 0 };
/***/
-static int pwc_video_close(struct file *file);
-static ssize_t pwc_video_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos);
-static unsigned int pwc_video_poll(struct file *file, poll_table *wait);
-static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma);
-
static const struct v4l2_file_operations pwc_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
- .release = pwc_video_close,
- .read = pwc_video_read,
- .poll = pwc_video_poll,
- .mmap = pwc_video_mmap,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
static struct video_device pwc_template = {
@@ -562,17 +556,6 @@ static const char *pwc_sensor_type_to_string(unsigned int sensor_type)
/***************************************************************************/
/* Video4Linux functions */
-int pwc_test_n_set_capt_file(struct pwc_device *pdev, struct file *file)
-{
- if (pdev->capt_file != NULL &&
- pdev->capt_file != file)
- return -EBUSY;
-
- pdev->capt_file = file;
-
- return 0;
-}
-
static void pwc_video_release(struct v4l2_device *v)
{
struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
@@ -583,113 +566,6 @@ static void pwc_video_release(struct v4l2_device *v)
kfree(pdev);
}
-static int pwc_video_close(struct file *file)
-{
- struct pwc_device *pdev = video_drvdata(file);
-
- /*
- * If we're still streaming vb2_queue_release will call stream_stop
- * so we must take both the v4l2_lock and the vb_queue_lock.
- */
- if (mutex_lock_interruptible(&pdev->v4l2_lock))
- return -ERESTARTSYS;
- if (mutex_lock_interruptible(&pdev->vb_queue_lock)) {
- mutex_unlock(&pdev->v4l2_lock);
- return -ERESTARTSYS;
- }
-
- if (pdev->capt_file == file) {
- vb2_queue_release(&pdev->vb_queue);
- pdev->capt_file = NULL;
- }
-
- mutex_unlock(&pdev->vb_queue_lock);
- mutex_unlock(&pdev->v4l2_lock);
-
- return v4l2_fh_release(file);
-}
-
-static ssize_t pwc_video_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct pwc_device *pdev = video_drvdata(file);
- int lock_v4l2 = 0;
- ssize_t ret;
-
- if (mutex_lock_interruptible(&pdev->vb_queue_lock))
- return -ERESTARTSYS;
-
- ret = pwc_test_n_set_capt_file(pdev, file);
- if (ret)
- goto out;
-
- /* stream_start will get called so we must take the v4l2_lock */
- if (pdev->vb_queue.fileio == NULL)
- lock_v4l2 = 1;
-
- /* Use try_lock, since we're taking the locks in the *wrong* order! */
- if (lock_v4l2 && !mutex_trylock(&pdev->v4l2_lock)) {
- ret = -ERESTARTSYS;
- goto out;
- }
- ret = vb2_read(&pdev->vb_queue, buf, count, ppos,
- file->f_flags & O_NONBLOCK);
- if (lock_v4l2)
- mutex_unlock(&pdev->v4l2_lock);
-out:
- mutex_unlock(&pdev->vb_queue_lock);
- return ret;
-}
-
-static unsigned int pwc_video_poll(struct file *file, poll_table *wait)
-{
- struct pwc_device *pdev = video_drvdata(file);
- struct vb2_queue *q = &pdev->vb_queue;
- unsigned long req_events = poll_requested_events(wait);
- unsigned int ret = POLL_ERR;
- int lock_v4l2 = 0;
-
- if (mutex_lock_interruptible(&pdev->vb_queue_lock))
- return POLL_ERR;
-
- /* Will this start fileio and thus call start_stream? */
- if ((req_events & (POLLIN | POLLRDNORM)) &&
- q->num_buffers == 0 && !q->streaming && q->fileio == NULL) {
- if (pwc_test_n_set_capt_file(pdev, file))
- goto out;
- lock_v4l2 = 1;
- }
-
- /* Use try_lock, since we're taking the locks in the *wrong* order! */
- if (lock_v4l2 && !mutex_trylock(&pdev->v4l2_lock))
- goto out;
- ret = vb2_poll(&pdev->vb_queue, file, wait);
- if (lock_v4l2)
- mutex_unlock(&pdev->v4l2_lock);
-
-out:
- if (!pdev->udev)
- ret |= POLLHUP;
- mutex_unlock(&pdev->vb_queue_lock);
- return ret;
-}
-
-static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct pwc_device *pdev = video_drvdata(file);
- int ret;
-
- if (mutex_lock_interruptible(&pdev->vb_queue_lock))
- return -ERESTARTSYS;
-
- ret = pwc_test_n_set_capt_file(pdev, file);
- if (ret == 0)
- ret = vb2_mmap(&pdev->vb_queue, vma);
-
- mutex_unlock(&pdev->vb_queue_lock);
- return ret;
-}
-
/***************************************************************************/
/* Videobuf2 operations */
@@ -782,6 +658,8 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
if (!pdev->udev)
return -ENODEV;
+ if (mutex_lock_interruptible(&pdev->v4l2_lock))
+ return -ERESTARTSYS;
/* Turn on camera and set LEDS on */
pwc_camera_power(pdev, 1);
pwc_set_leds(pdev, leds[0], leds[1]);
@@ -794,6 +672,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
/* And cleanup any queued bufs!! */
pwc_cleanup_queued_bufs(pdev);
}
+ mutex_unlock(&pdev->v4l2_lock);
return r;
}
@@ -802,6 +681,8 @@ static int stop_streaming(struct vb2_queue *vq)
{
struct pwc_device *pdev = vb2_get_drv_priv(vq);
+ if (mutex_lock_interruptible(&pdev->v4l2_lock))
+ return -ERESTARTSYS;
if (pdev->udev) {
pwc_set_leds(pdev, 0, 0);
pwc_camera_power(pdev, 0);
@@ -809,22 +690,11 @@ static int stop_streaming(struct vb2_queue *vq)
}
pwc_cleanup_queued_bufs(pdev);
+ mutex_unlock(&pdev->v4l2_lock);
return 0;
}
-static void wait_prepare(struct vb2_queue *vq)
-{
- struct pwc_device *pdev = vb2_get_drv_priv(vq);
- mutex_unlock(&pdev->vb_queue_lock);
-}
-
-static void wait_finish(struct vb2_queue *vq)
-{
- struct pwc_device *pdev = vb2_get_drv_priv(vq);
- mutex_lock(&pdev->vb_queue_lock);
-}
-
static struct vb2_ops pwc_vb_queue_ops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
@@ -834,8 +704,8 @@ static struct vb2_ops pwc_vb_queue_ops = {
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
- .wait_prepare = wait_prepare,
- .wait_finish = wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/***************************************************************************/
@@ -1136,6 +1006,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
/* Init video_device structure */
memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
strcpy(pdev->vdev.name, name);
+ pdev->vdev.queue = &pdev->vb_queue;
+ pdev->vdev.queue->lock = &pdev->vb_queue_lock;
set_bit(V4L2_FL_USE_FH_PRIO, &pdev->vdev.flags);
video_set_drvdata(&pdev->vdev, pdev);
@@ -1190,15 +1062,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pdev->vdev.v4l2_dev = &pdev->v4l2_dev;
pdev->vdev.lock = &pdev->v4l2_lock;
- /*
- * Don't take v4l2_lock for these ioctls. This improves latency if
- * v4l2_lock is taken for a long time, e.g. when changing a control
- * value, and a new frame is ready to be dequeued.
- */
- v4l2_disable_ioctl_locking(&pdev->vdev, VIDIOC_DQBUF);
- v4l2_disable_ioctl_locking(&pdev->vdev, VIDIOC_QBUF);
- v4l2_disable_ioctl_locking(&pdev->vdev, VIDIOC_QUERYBUF);
-
rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
if (rc < 0) {
PWC_ERROR("Failed to register as video device (%d).\n", rc);
@@ -1253,20 +1116,18 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
struct v4l2_device *v = usb_get_intfdata(intf);
struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
- mutex_lock(&pdev->v4l2_lock);
-
mutex_lock(&pdev->vb_queue_lock);
+ mutex_lock(&pdev->v4l2_lock);
/* No need to keep the urbs around after disconnection */
if (pdev->vb_queue.streaming)
pwc_isoc_cleanup(pdev);
pdev->udev = NULL;
pwc_cleanup_queued_bufs(pdev);
- mutex_unlock(&pdev->vb_queue_lock);
v4l2_device_disconnect(&pdev->v4l2_dev);
video_unregister_device(&pdev->vdev);
-
mutex_unlock(&pdev->v4l2_lock);
+ mutex_unlock(pdev->vb_queue.lock);
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
if (pdev->button_dev)
diff --git a/drivers/media/video/pwc/pwc-v4l.c b/drivers/media/video/pwc/pwc-v4l.c
index c691e29cc36e..545e9bbdeede 100644
--- a/drivers/media/video/pwc/pwc-v4l.c
+++ b/drivers/media/video/pwc/pwc-v4l.c
@@ -405,6 +405,7 @@ static void pwc_vidioc_fill_fmt(struct v4l2_format *f,
f->fmt.pix.pixelformat = pixfmt;
f->fmt.pix.bytesperline = f->fmt.pix.width;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.width * 3 / 2;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
PWC_DEBUG_IOCTL("pwc_vidioc_fill_fmt() "
"width=%d, height=%d, bytesperline=%d, sizeimage=%d, pixelformat=%c%c%c%c\n",
f->fmt.pix.width,
@@ -468,17 +469,8 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
if (ret < 0)
return ret;
- if (mutex_lock_interruptible(&pdev->vb_queue_lock))
- return -ERESTARTSYS;
-
- ret = pwc_test_n_set_capt_file(pdev, file);
- if (ret)
- goto leave;
-
- if (pdev->vb_queue.streaming) {
- ret = -EBUSY;
- goto leave;
- }
+ if (vb2_is_busy(&pdev->vb_queue))
+ return -EBUSY;
pixelformat = f->fmt.pix.pixelformat;
@@ -496,8 +488,6 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret);
pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
-leave:
- mutex_unlock(&pdev->vb_queue_lock);
return ret;
}
@@ -508,10 +498,9 @@ static int pwc_querycap(struct file *file, void *fh, struct v4l2_capability *cap
strcpy(cap->driver, PWC_NAME);
strlcpy(cap->card, pdev->vdev.name, sizeof(cap->card));
usb_make_path(pdev->udev, cap->bus_info, sizeof(cap->bus_info));
- cap->capabilities =
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING |
- V4L2_CAP_READWRITE;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -520,7 +509,8 @@ static int pwc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
if (i->index) /* Only one INPUT is supported */
return -EINVAL;
- strcpy(i->name, "usb");
+ strlcpy(i->name, "Camera", sizeof(i->name));
+ i->type = V4L2_INPUT_TYPE_CAMERA;
return 0;
}
@@ -933,104 +923,6 @@ static int pwc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *
return pwc_vidioc_try_fmt(pdev, f);
}
-static int pwc_reqbufs(struct file *file, void *fh,
- struct v4l2_requestbuffers *rb)
-{
- struct pwc_device *pdev = video_drvdata(file);
- int ret;
-
- if (mutex_lock_interruptible(&pdev->vb_queue_lock))
- return -ERESTARTSYS;
-
- ret = pwc_test_n_set_capt_file(pdev, file);
- if (ret == 0)
- ret = vb2_reqbufs(&pdev->vb_queue, rb);
-
- mutex_unlock(&pdev->vb_queue_lock);
- return ret;
-}
-
-static int pwc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
-{
- struct pwc_device *pdev = video_drvdata(file);
- int ret;
-
- if (mutex_lock_interruptible(&pdev->vb_queue_lock))
- return -ERESTARTSYS;
-
- ret = pwc_test_n_set_capt_file(pdev, file);
- if (ret == 0)
- ret = vb2_querybuf(&pdev->vb_queue, buf);
-
- mutex_unlock(&pdev->vb_queue_lock);
- return ret;
-}
-
-static int pwc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
-{
- struct pwc_device *pdev = video_drvdata(file);
- int ret;
-
- if (mutex_lock_interruptible(&pdev->vb_queue_lock))
- return -ERESTARTSYS;
-
- ret = pwc_test_n_set_capt_file(pdev, file);
- if (ret == 0)
- ret = vb2_qbuf(&pdev->vb_queue, buf);
-
- mutex_unlock(&pdev->vb_queue_lock);
- return ret;
-}
-
-static int pwc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
-{
- struct pwc_device *pdev = video_drvdata(file);
- int ret;
-
- if (mutex_lock_interruptible(&pdev->vb_queue_lock))
- return -ERESTARTSYS;
-
- ret = pwc_test_n_set_capt_file(pdev, file);
- if (ret == 0)
- ret = vb2_dqbuf(&pdev->vb_queue, buf,
- file->f_flags & O_NONBLOCK);
-
- mutex_unlock(&pdev->vb_queue_lock);
- return ret;
-}
-
-static int pwc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
-{
- struct pwc_device *pdev = video_drvdata(file);
- int ret;
-
- if (mutex_lock_interruptible(&pdev->vb_queue_lock))
- return -ERESTARTSYS;
-
- ret = pwc_test_n_set_capt_file(pdev, file);
- if (ret == 0)
- ret = vb2_streamon(&pdev->vb_queue, i);
-
- mutex_unlock(&pdev->vb_queue_lock);
- return ret;
-}
-
-static int pwc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
-{
- struct pwc_device *pdev = video_drvdata(file);
- int ret;
-
- if (mutex_lock_interruptible(&pdev->vb_queue_lock))
- return -ERESTARTSYS;
-
- ret = pwc_test_n_set_capt_file(pdev, file);
- if (ret == 0)
- ret = vb2_streamoff(&pdev->vb_queue, i);
-
- mutex_unlock(&pdev->vb_queue_lock);
- return ret;
-}
-
static int pwc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
@@ -1112,32 +1004,27 @@ static int pwc_s_parm(struct file *file, void *fh,
int compression = 0;
int ret, fps;
- if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- parm->parm.capture.timeperframe.numerator == 0)
+ if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- fps = parm->parm.capture.timeperframe.denominator /
- parm->parm.capture.timeperframe.numerator;
-
- if (mutex_lock_interruptible(&pdev->vb_queue_lock))
- return -ERESTARTSYS;
+ /* If timeperframe == 0, then reset the framerate to the nominal value.
+ We pick a high framerate here, and let pwc_set_video_mode() figure
+ out the best match. */
+ if (parm->parm.capture.timeperframe.numerator == 0 ||
+ parm->parm.capture.timeperframe.denominator == 0)
+ fps = 30;
+ else
+ fps = parm->parm.capture.timeperframe.denominator /
+ parm->parm.capture.timeperframe.numerator;
- ret = pwc_test_n_set_capt_file(pdev, file);
- if (ret)
- goto leave;
-
- if (pdev->vb_queue.streaming) {
- ret = -EBUSY;
- goto leave;
- }
+ if (vb2_is_busy(&pdev->vb_queue))
+ return -EBUSY;
ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
fps, &compression, 0);
pwc_g_parm(file, fh, parm);
-leave:
- mutex_unlock(&pdev->vb_queue_lock);
return ret;
}
@@ -1150,12 +1037,12 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
.vidioc_g_fmt_vid_cap = pwc_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = pwc_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = pwc_try_fmt_vid_cap,
- .vidioc_reqbufs = pwc_reqbufs,
- .vidioc_querybuf = pwc_querybuf,
- .vidioc_qbuf = pwc_qbuf,
- .vidioc_dqbuf = pwc_dqbuf,
- .vidioc_streamon = pwc_streamon,
- .vidioc_streamoff = pwc_streamoff,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = v4l2_ctrl_log_status,
.vidioc_enum_framesizes = pwc_enum_framesizes,
.vidioc_enum_frameintervals = pwc_enum_frameintervals,
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index d6b5b216b9d6..7a6a0d39c2c6 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -239,7 +239,6 @@ struct pwc_device
int features; /* feature bits */
/*** Video data ***/
- struct file *capt_file; /* file doing video capture */
int vendpoint; /* video isoc endpoint */
int vcinterface; /* video control interface */
int valternate; /* alternate interface needed */
@@ -355,8 +354,6 @@ struct pwc_device
extern int pwc_trace;
#endif
-int pwc_test_n_set_capt_file(struct pwc_device *pdev, struct file *file);
-
/** Functions in pwc-misc.c */
/* sizes in pixels */
extern const int pwc_image_sizes[PSZ_MAX][2];
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 01c2179f0520..95007dda0c93 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -2686,3 +2686,4 @@ MODULE_DESCRIPTION("Sensoray 2255 Video for Linux driver");
MODULE_AUTHOR("Dean Anderson (Sensoray Company Inc.)");
MODULE_LICENSE("GPL");
MODULE_VERSION(S2255_VERSION);
+MODULE_FIRMWARE(FIRMWARE_FILE_NAME);
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 725812aa0c30..8e413dd3c0b0 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -480,48 +480,59 @@ static int fimc_capture_set_default_format(struct fimc_dev *fimc);
static int fimc_capture_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
- int ret;
+ int ret = -EBUSY;
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
if (fimc_m2m_active(fimc))
- return -EBUSY;
+ goto unlock;
set_bit(ST_CAPT_BUSY, &fimc->state);
ret = pm_runtime_get_sync(&fimc->pdev->dev);
if (ret < 0)
- return ret;
+ goto unlock;
ret = v4l2_fh_open(file);
- if (ret)
- return ret;
-
- if (++fimc->vid_cap.refcnt != 1)
- return 0;
+ if (ret) {
+ pm_runtime_put(&fimc->pdev->dev);
+ goto unlock;
+ }
- ret = fimc_pipeline_initialize(&fimc->pipeline,
+ if (++fimc->vid_cap.refcnt == 1) {
+ ret = fimc_pipeline_initialize(&fimc->pipeline,
&fimc->vid_cap.vfd->entity, true);
- if (ret < 0) {
- clear_bit(ST_CAPT_BUSY, &fimc->state);
- pm_runtime_put_sync(&fimc->pdev->dev);
- fimc->vid_cap.refcnt--;
- v4l2_fh_release(file);
- return ret;
- }
- ret = fimc_capture_ctrls_create(fimc);
- if (!ret && !fimc->vid_cap.user_subdev_api)
- ret = fimc_capture_set_default_format(fimc);
+ if (!ret && !fimc->vid_cap.user_subdev_api)
+ ret = fimc_capture_set_default_format(fimc);
+ if (!ret)
+ ret = fimc_capture_ctrls_create(fimc);
+
+ if (ret < 0) {
+ clear_bit(ST_CAPT_BUSY, &fimc->state);
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ fimc->vid_cap.refcnt--;
+ v4l2_fh_release(file);
+ }
+ }
+unlock:
+ mutex_unlock(&fimc->lock);
return ret;
}
static int fimc_capture_close(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
+ int ret;
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
if (--fimc->vid_cap.refcnt == 0) {
clear_bit(ST_CAPT_BUSY, &fimc->state);
fimc_stop_capture(fimc, false);
@@ -535,22 +546,40 @@ static int fimc_capture_close(struct file *file)
vb2_queue_release(&fimc->vid_cap.vbq);
fimc_ctrls_delete(fimc->vid_cap.ctx);
}
- return v4l2_fh_release(file);
+
+ ret = v4l2_fh_release(file);
+
+ mutex_unlock(&fimc->lock);
+ return ret;
}
static unsigned int fimc_capture_poll(struct file *file,
struct poll_table_struct *wait)
{
struct fimc_dev *fimc = video_drvdata(file);
+ int ret;
- return vb2_poll(&fimc->vid_cap.vbq, file, wait);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return POLL_ERR;
+
+ ret = vb2_poll(&fimc->vid_cap.vbq, file, wait);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
}
static int fimc_capture_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fimc_dev *fimc = video_drvdata(file);
+ int ret;
- return vb2_mmap(&fimc->vid_cap.vbq, vma);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ ret = vb2_mmap(&fimc->vid_cap.vbq, vma);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
}
static const struct v4l2_file_operations fimc_capture_fops = {
@@ -658,7 +687,7 @@ static void fimc_capture_try_selection(struct fimc_ctx *ctx,
r->left = r->top = 0;
return;
}
- if (target == V4L2_SEL_TGT_COMPOSE_ACTIVE) {
+ if (target == V4L2_SEL_TGT_COMPOSE) {
if (ctx->rotation != 90 && ctx->rotation != 270)
align_h = 1;
max_sc_h = min(SCALER_MAX_HRATIO, 1 << (ffs(sink->width) - 3));
@@ -685,7 +714,7 @@ static void fimc_capture_try_selection(struct fimc_ctx *ctx,
rotate ? sink->f_height : sink->f_width);
max_h = min_t(u32, FIMC_CAMIF_MAX_HEIGHT, sink->f_height);
- if (target == V4L2_SEL_TGT_COMPOSE_ACTIVE) {
+ if (target == V4L2_SEL_TGT_COMPOSE) {
min_w = min_t(u32, max_w, sink->f_width / max_sc_h);
min_h = min_t(u32, max_h, sink->f_height / max_sc_v);
if (rotate) {
@@ -1146,9 +1175,9 @@ static int fimc_cap_g_selection(struct file *file, void *fh,
s->r.height = f->o_height;
return 0;
- case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ case V4L2_SEL_TGT_COMPOSE:
f = &ctx->d_frame;
- case V4L2_SEL_TGT_CROP_ACTIVE:
+ case V4L2_SEL_TGT_CROP:
s->r.left = f->offs_h;
s->r.top = f->offs_v;
s->r.width = f->width;
@@ -1160,7 +1189,7 @@ static int fimc_cap_g_selection(struct file *file, void *fh,
}
/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
-int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
{
if (a->left < b->left || a->top < b->top)
return 0;
@@ -1184,9 +1213,9 @@ static int fimc_cap_s_selection(struct file *file, void *fh,
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
return -EINVAL;
- if (s->target == V4L2_SEL_TGT_COMPOSE_ACTIVE)
+ if (s->target == V4L2_SEL_TGT_COMPOSE)
f = &ctx->d_frame;
- else if (s->target == V4L2_SEL_TGT_CROP_ACTIVE)
+ else if (s->target == V4L2_SEL_TGT_CROP)
f = &ctx->s_frame;
else
return -EINVAL;
@@ -1428,9 +1457,9 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
mutex_lock(&fimc->lock);
switch (sel->target) {
- case V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
f = &ctx->d_frame;
- case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
r->width = f->o_width;
r->height = f->o_height;
r->left = 0;
@@ -1438,10 +1467,10 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
mutex_unlock(&fimc->lock);
return 0;
- case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ case V4L2_SEL_TGT_CROP:
try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
break;
- case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ case V4L2_SEL_TGT_COMPOSE:
try_sel = v4l2_subdev_get_try_compose(fh, sel->pad);
f = &ctx->d_frame;
break;
@@ -1482,12 +1511,12 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
return -EINVAL;
mutex_lock(&fimc->lock);
- fimc_capture_try_selection(ctx, r, V4L2_SEL_TGT_CROP_ACTIVE);
+ fimc_capture_try_selection(ctx, r, V4L2_SEL_TGT_CROP);
switch (sel->target) {
- case V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
f = &ctx->d_frame;
- case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
r->width = f->o_width;
r->height = f->o_height;
r->left = 0;
@@ -1495,10 +1524,10 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
mutex_unlock(&fimc->lock);
return 0;
- case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ case V4L2_SEL_TGT_CROP:
try_sel = v4l2_subdev_get_try_crop(fh, sel->pad);
break;
- case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ case V4L2_SEL_TGT_COMPOSE:
try_sel = v4l2_subdev_get_try_compose(fh, sel->pad);
f = &ctx->d_frame;
break;
@@ -1514,7 +1543,7 @@ static int fimc_subdev_set_selection(struct v4l2_subdev *sd,
set_frame_crop(f, r->left, r->top, r->width, r->height);
set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
spin_unlock_irqrestore(&fimc->slock, flags);
- if (sel->target == V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL)
+ if (sel->target == V4L2_SEL_TGT_COMPOSE)
ctx->state |= FIMC_COMPOSE;
}
@@ -1589,10 +1618,7 @@ static int fimc_register_capture_device(struct fimc_dev *fimc,
vfd->minor = -1;
vfd->release = video_device_release;
vfd->lock = &fimc->lock;
- /* Locking in file operations other than ioctl should be done
- by the driver, not the V4L2 core.
- This driver needs auditing so that this flag can be removed. */
- set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
+
video_set_drvdata(vfd, fimc);
vid_cap = &fimc->vid_cap;
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index a4646ca1d56f..1a445404e73d 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -463,7 +463,7 @@ void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f)
f->fmt->color, f->dma_offset.y_h, f->dma_offset.y_v);
}
-int fimc_set_color_effect(struct fimc_ctx *ctx, enum v4l2_colorfx colorfx)
+static int fimc_set_color_effect(struct fimc_ctx *ctx, enum v4l2_colorfx colorfx)
{
struct fimc_effect *effect = &ctx->effect;
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index 95b27ae5cf27..808ccc621846 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -27,9 +27,6 @@
#include <media/v4l2-mediabus.h>
#include <media/s5p_fimc.h>
-#define err(fmt, args...) \
- printk(KERN_ERR "%s:%d: " fmt "\n", __func__, __LINE__, ##args)
-
#define dbg(fmt, args...) \
pr_debug("%s:%d: " fmt "\n", __func__, __LINE__, ##args)
diff --git a/drivers/media/video/s5p-fimc/fimc-lite-reg.c b/drivers/media/video/s5p-fimc/fimc-lite-reg.c
index 419adfb7cdf9..f996e94873f6 100644
--- a/drivers/media/video/s5p-fimc/fimc-lite-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-lite-reg.c
@@ -215,7 +215,7 @@ void flite_hw_set_camera_bus(struct fimc_lite *dev,
flite_hw_set_camera_port(dev, s_info->mux_id);
}
-void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
+static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
{
static const u32 pixcode[4][2] = {
{ V4L2_MBUS_FMT_YUYV8_2X8, FLITE_REG_CIODMAFMT_YCBYCR },
diff --git a/drivers/media/video/s5p-fimc/fimc-lite.c b/drivers/media/video/s5p-fimc/fimc-lite.c
index 74ff310db30c..c5b57e805b68 100644
--- a/drivers/media/video/s5p-fimc/fimc-lite.c
+++ b/drivers/media/video/s5p-fimc/fimc-lite.c
@@ -902,7 +902,7 @@ static int fimc_lite_g_selection(struct file *file, void *fh,
sel->r.height = f->f_height;
return 0;
- case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ case V4L2_SEL_TGT_COMPOSE:
sel->r = f->rect;
return 0;
}
@@ -919,7 +919,7 @@ static int fimc_lite_s_selection(struct file *file, void *fh,
unsigned long flags;
if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
- sel->target != V4L2_SEL_TGT_COMPOSE_ACTIVE)
+ sel->target != V4L2_SEL_TGT_COMPOSE)
return -EINVAL;
fimc_lite_try_compose(fimc, &rect);
@@ -1117,9 +1117,9 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
struct fimc_lite *fimc = v4l2_get_subdevdata(sd);
struct flite_frame *f = &fimc->inp_frame;
- if ((sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL &&
- sel->target != V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS) ||
- sel->pad != FLITE_SD_PAD_SINK)
+ if ((sel->target != V4L2_SEL_TGT_CROP &&
+ sel->target != V4L2_SEL_TGT_CROP_BOUNDS) ||
+ sel->pad != FLITE_SD_PAD_SINK)
return -EINVAL;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
@@ -1128,7 +1128,7 @@ static int fimc_lite_subdev_get_selection(struct v4l2_subdev *sd,
}
mutex_lock(&fimc->lock);
- if (sel->target == V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL) {
+ if (sel->target == V4L2_SEL_TGT_CROP) {
sel->r = f->rect;
} else {
sel->r.left = 0;
@@ -1153,8 +1153,7 @@ static int fimc_lite_subdev_set_selection(struct v4l2_subdev *sd,
struct flite_frame *f = &fimc->inp_frame;
int ret = 0;
- if (sel->target != V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL ||
- sel->pad != FLITE_SD_PAD_SINK)
+ if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != FLITE_SD_PAD_SINK)
return -EINVAL;
mutex_lock(&fimc->lock);
diff --git a/drivers/media/video/s5p-fimc/fimc-m2m.c b/drivers/media/video/s5p-fimc/fimc-m2m.c
index 4c58e0570962..c587011d80ef 100644
--- a/drivers/media/video/s5p-fimc/fimc-m2m.c
+++ b/drivers/media/video/s5p-fimc/fimc-m2m.c
@@ -259,7 +259,12 @@ static int fimc_m2m_querycap(struct file *file, void *fh,
strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1);
strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
- cap->capabilities = V4L2_CAP_STREAMING |
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE |
V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
return 0;
@@ -642,21 +647,25 @@ static int fimc_m2m_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
struct fimc_ctx *ctx;
- int ret;
+ int ret = -EBUSY;
dbg("pid: %d, state: 0x%lx, refcnt: %d",
task_pid_nr(current), fimc->state, fimc->vid_cap.refcnt);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
/*
* Return if the corresponding video capture node
* is already opened.
*/
if (fimc->vid_cap.refcnt > 0)
- return -EBUSY;
+ goto unlock;
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ if (!ctx) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
v4l2_fh_init(&ctx->fh, fimc->m2m.vfd);
ctx->fimc_dev = fimc;
@@ -687,6 +696,8 @@ static int fimc_m2m_open(struct file *file)
if (fimc->m2m.refcnt++ == 0)
set_bit(ST_M2M_RUN, &fimc->state);
+
+ mutex_unlock(&fimc->lock);
return 0;
error_c:
@@ -695,6 +706,8 @@ error_fh:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
+unlock:
+ mutex_unlock(&fimc->lock);
return ret;
}
@@ -706,6 +719,9 @@ static int fimc_m2m_release(struct file *file)
dbg("pid: %d, state: 0x%lx, refcnt= %d",
task_pid_nr(current), fimc->state, fimc->m2m.refcnt);
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
v4l2_m2m_ctx_release(ctx->m2m_ctx);
fimc_ctrls_delete(ctx);
v4l2_fh_del(&ctx->fh);
@@ -714,6 +730,8 @@ static int fimc_m2m_release(struct file *file)
if (--fimc->m2m.refcnt <= 0)
clear_bit(ST_M2M_RUN, &fimc->state);
kfree(ctx);
+
+ mutex_unlock(&fimc->lock);
return 0;
}
@@ -721,16 +739,32 @@ static unsigned int fimc_m2m_poll(struct file *file,
struct poll_table_struct *wait)
{
struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+ mutex_unlock(&fimc->lock);
- return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+ return ret;
}
static int fimc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fimc_ctx *ctx = fh_to_ctx(file->private_data);
+ struct fimc_dev *fimc = ctx->fimc_dev;
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
- return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+ ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
}
static const struct v4l2_file_operations fimc_m2m_fops = {
@@ -772,10 +806,6 @@ int fimc_register_m2m_device(struct fimc_dev *fimc,
vfd->minor = -1;
vfd->release = video_device_release;
vfd->lock = &fimc->lock;
- /* Locking in file operations other than ioctl should be done
- by the driver, not the V4L2 core.
- This driver needs auditing so that this flag can be removed. */
- set_bit(V4L2_FL_LOCK_ALL_FOPS, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.m2m", fimc->id);
video_set_drvdata(vfd, fimc);
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index 52cef4865423..e65bb283fd8a 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(fimc_pipeline_initialize);
* sensor clock.
* Called with the graph mutex held.
*/
-int __fimc_pipeline_shutdown(struct fimc_pipeline *p)
+static int __fimc_pipeline_shutdown(struct fimc_pipeline *p)
{
int ret = 0;
@@ -1010,7 +1010,7 @@ static struct platform_driver fimc_md_driver = {
}
};
-int __init fimc_md_init(void)
+static int __init fimc_md_init(void)
{
int ret;
@@ -1021,7 +1021,8 @@ int __init fimc_md_init(void)
return platform_driver_register(&fimc_md_driver);
}
-void __exit fimc_md_exit(void)
+
+static void __exit fimc_md_exit(void)
{
platform_driver_unregister(&fimc_md_driver);
fimc_unregister_driver();
diff --git a/drivers/media/video/s5p-fimc/fimc-reg.c b/drivers/media/video/s5p-fimc/fimc-reg.c
index 1fc4ce8446f5..0e3eb9ce4f98 100644
--- a/drivers/media/video/s5p-fimc/fimc-reg.c
+++ b/drivers/media/video/s5p-fimc/fimc-reg.c
@@ -667,7 +667,8 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
FIMC_REG_CIGCTRL_SELCAM_MIPI | FIMC_REG_CIGCTRL_CAMIF_SELWB |
FIMC_REG_CIGCTRL_SELCAM_MIPI_A | FIMC_REG_CIGCTRL_CAM_JPEG);
- if (cam->bus_type == FIMC_MIPI_CSI2) {
+ switch (cam->bus_type) {
+ case FIMC_MIPI_CSI2:
cfg |= FIMC_REG_CIGCTRL_SELCAM_MIPI;
if (cam->mux_id == 0)
@@ -683,23 +684,24 @@ int fimc_hw_set_camera_type(struct fimc_dev *fimc,
cfg |= FIMC_REG_CIGCTRL_CAM_JPEG;
break;
default:
- v4l2_err(fimc->vid_cap.vfd,
- "Not supported camera pixel format: %d",
+ v4l2_err(vid_cap->vfd,
+ "Not supported camera pixel format: %#x\n",
vid_cap->mf.code);
return -EINVAL;
}
tmp |= (csis_data_alignment == 32) << 8;
writel(tmp, fimc->regs + FIMC_REG_CSIIMGFMT);
-
- } else if (cam->bus_type == FIMC_ITU_601 ||
- cam->bus_type == FIMC_ITU_656) {
+ break;
+ case FIMC_ITU_601...FIMC_ITU_656:
if (cam->mux_id == 0) /* ITU-A, ITU-B: 0, 1 */
cfg |= FIMC_REG_CIGCTRL_SELCAM_ITU_A;
- } else if (cam->bus_type == FIMC_LCD_WB) {
+ break;
+ case FIMC_LCD_WB:
cfg |= FIMC_REG_CIGCTRL_CAMIF_SELWB;
- } else {
- err("invalid camera bus type selected\n");
+ break;
+ default:
+ v4l2_err(vid_cap->vfd, "Invalid camera bus type selected\n");
return -EINVAL;
}
writel(cfg, fimc->regs + FIMC_REG_CIGCTRL);
diff --git a/drivers/media/video/s5p-g2d/g2d.c b/drivers/media/video/s5p-g2d/g2d.c
index 7c98ee7377ee..7c2200435206 100644
--- a/drivers/media/video/s5p-g2d/g2d.c
+++ b/drivers/media/video/s5p-g2d/g2d.c
@@ -290,8 +290,13 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->card, G2D_NAME, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
cap->version = KERNEL_VERSION(1, 0, 0);
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
- | V4L2_CAP_STREAMING;
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
return 0;
}
diff --git a/drivers/media/video/s5p-jpeg/jpeg-core.c b/drivers/media/video/s5p-jpeg/jpeg-core.c
index 28b5225d94f5..813b801238d1 100644
--- a/drivers/media/video/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/video/s5p-jpeg/jpeg-core.c
@@ -489,9 +489,13 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
sizeof(cap->card));
}
cap->bus_info[0] = 0;
- cap->capabilities = V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_VIDEO_OUTPUT;
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M |
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
return 0;
}
@@ -824,10 +828,10 @@ static int s5p_jpeg_g_selection(struct file *file, void *priv,
/* For JPEG blob active == default == bounds */
switch (s->target) {
- case V4L2_SEL_TGT_CROP_ACTIVE:
+ case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
s->r.width = ctx->out_q.w;
s->r.height = ctx->out_q.h;
@@ -1503,29 +1507,7 @@ static struct platform_driver s5p_jpeg_driver = {
},
};
-static int __init
-s5p_jpeg_register(void)
-{
- int ret;
-
- pr_info("S5P JPEG V4L2 Driver, (c) 2011 Samsung Electronics\n");
-
- ret = platform_driver_register(&s5p_jpeg_driver);
-
- if (ret)
- pr_err("%s: failed to register jpeg driver\n", __func__);
-
- return ret;
-}
-
-static void __exit
-s5p_jpeg_unregister(void)
-{
- platform_driver_unregister(&s5p_jpeg_driver);
-}
-
-module_init(s5p_jpeg_register);
-module_exit(s5p_jpeg_unregister);
+module_platform_driver(s5p_jpeg_driver);
MODULE_AUTHOR("Andrzej Pietrasiewicz <andrzej.p@samsung.com>");
MODULE_DESCRIPTION("Samsung JPEG codec driver");
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index feea867f318c..c5d567f87d77 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -220,8 +220,14 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
cap->version = KERNEL_VERSION(1, 0, 0);
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
- V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING;
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ cap->capabilities = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE;
return 0;
}
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index 158b78989b89..aa1c244cf66e 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -779,9 +779,14 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
cap->version = KERNEL_VERSION(1, 0, 0);
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE
- | V4L2_CAP_VIDEO_OUTPUT_MPLANE
- | V4L2_CAP_STREAMING;
+ /*
+ * This is only a mem-to-mem video device. The capture and output
+ * device capability flags are left only for backward compatibility
+ * and are scheduled for removal.
+ */
+ cap->capabilities = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_VIDEO_OUTPUT_MPLANE;
return 0;
}
diff --git a/drivers/media/video/s5p-tv/mixer_video.c b/drivers/media/video/s5p-tv/mixer_video.c
index 33fde2a763ec..6c74b05d1f95 100644
--- a/drivers/media/video/s5p-tv/mixer_video.c
+++ b/drivers/media/video/s5p-tv/mixer_video.c
@@ -367,7 +367,7 @@ static int mxr_g_selection(struct file *file, void *fh,
return -EINVAL;
switch (s->target) {
- case V4L2_SEL_TGT_CROP_ACTIVE:
+ case V4L2_SEL_TGT_CROP:
s->r.left = geo->src.x_offset;
s->r.top = geo->src.y_offset;
s->r.width = geo->src.width;
@@ -380,7 +380,7 @@ static int mxr_g_selection(struct file *file, void *fh,
s->r.width = geo->src.full_width;
s->r.height = geo->src.full_height;
break;
- case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_PADDED:
s->r.left = geo->dst.x_offset;
s->r.top = geo->dst.y_offset;
@@ -449,11 +449,11 @@ static int mxr_s_selection(struct file *file, void *fh,
res.height = geo->dst.full_height;
break;
- case V4L2_SEL_TGT_CROP_ACTIVE:
+ case V4L2_SEL_TGT_CROP:
target = &geo->src;
stage = MXR_GEOMETRY_CROP;
break;
- case V4L2_SEL_TGT_COMPOSE_ACTIVE:
+ case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_PADDED:
target = &geo->dst;
stage = MXR_GEOMETRY_COMPOSE;
diff --git a/drivers/media/video/s5p-tv/sii9234_drv.c b/drivers/media/video/s5p-tv/sii9234_drv.c
index 0f31eccd7b80..6d348f90237a 100644
--- a/drivers/media/video/s5p-tv/sii9234_drv.c
+++ b/drivers/media/video/s5p-tv/sii9234_drv.c
@@ -419,14 +419,4 @@ static struct i2c_driver sii9234_driver = {
.id_table = sii9234_id,
};
-static int __init sii9234_init(void)
-{
- return i2c_add_driver(&sii9234_driver);
-}
-module_init(sii9234_init);
-
-static void __exit sii9234_exit(void)
-{
- i2c_del_driver(&sii9234_driver);
-}
-module_exit(sii9234_exit);
+module_i2c_driver(sii9234_driver);
diff --git a/drivers/media/video/saa7121.h b/drivers/media/video/saa7121.h
deleted file mode 100644
index 66967ae37494..000000000000
--- a/drivers/media/video/saa7121.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/* saa7121.h - saa7121 initializations
- Copyright (C) 1999 Nathan Laredo (laredo@gnu.org)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
- */
-#ifndef __SAA7121_H__
-#define __SAA7121_H__
-
-#define NTSC_BURST_START 0x19 /* 28 */
-#define NTSC_BURST_END 0x1d /* 29 */
-#define NTSC_CHROMA_PHASE 0x67 /* 5a */
-#define NTSC_GAINU 0x76 /* 5b */
-#define NTSC_GAINV 0xa5 /* 5c */
-#define NTSC_BLACK_LEVEL 0x2a /* 5d */
-#define NTSC_BLANKING_LEVEL 0x2e /* 5e */
-#define NTSC_VBI_BLANKING 0x2e /* 5f */
-#define NTSC_DAC_CONTROL 0x11 /* 61 */
-#define NTSC_BURST_AMP 0x3f /* 62 */
-#define NTSC_SUBC3 0x1f /* 63 */
-#define NTSC_SUBC2 0x7c /* 64 */
-#define NTSC_SUBC1 0xf0 /* 65 */
-#define NTSC_SUBC0 0x21 /* 66 */
-#define NTSC_HTRIG 0x72 /* 6c */
-#define NTSC_VTRIG 0x00 /* 6c */
-#define NTSC_MULTI 0x30 /* 6e */
-#define NTSC_CCTTX 0x11 /* 6f */
-#define NTSC_FIRST_ACTIVE 0x12 /* 7a */
-#define NTSC_LAST_ACTIVE 0x02 /* 7b */
-#define NTSC_MSB_VERTICAL 0x40 /* 7c */
-
-#define PAL_BURST_START 0x21 /* 28 */
-#define PAL_BURST_END 0x1d /* 29 */
-#define PAL_CHROMA_PHASE 0x3f /* 5a */
-#define PAL_GAINU 0x7d /* 5b */
-#define PAL_GAINV 0xaf /* 5c */
-#define PAL_BLACK_LEVEL 0x23 /* 5d */
-#define PAL_BLANKING_LEVEL 0x35 /* 5e */
-#define PAL_VBI_BLANKING 0x35 /* 5f */
-#define PAL_DAC_CONTROL 0x02 /* 61 */
-#define PAL_BURST_AMP 0x2f /* 62 */
-#define PAL_SUBC3 0xcb /* 63 */
-#define PAL_SUBC2 0x8a /* 64 */
-#define PAL_SUBC1 0x09 /* 65 */
-#define PAL_SUBC0 0x2a /* 66 */
-#define PAL_HTRIG 0x86 /* 6c */
-#define PAL_VTRIG 0x04 /* 6d */
-#define PAL_MULTI 0x20 /* 6e */
-#define PAL_CCTTX 0x15 /* 6f */
-#define PAL_FIRST_ACTIVE 0x16 /* 7a */
-#define PAL_LAST_ACTIVE 0x36 /* 7b */
-#define PAL_MSB_VERTICAL 0x40 /* 7c */
-
-/* Initialization Sequence */
-
-static __u8 init7121ntsc[] = {
- 0x26, 0x0, 0x27, 0x0,
- 0x28, NTSC_BURST_START, 0x29, NTSC_BURST_END,
- 0x2a, 0x0, 0x2b, 0x0, 0x2c, 0x0, 0x2d, 0x0,
- 0x2e, 0x0, 0x2f, 0x0, 0x30, 0x0, 0x31, 0x0,
- 0x32, 0x0, 0x33, 0x0, 0x34, 0x0, 0x35, 0x0,
- 0x36, 0x0, 0x37, 0x0, 0x38, 0x0, 0x39, 0x0,
- 0x3a, 0x03, 0x3b, 0x0, 0x3c, 0x0, 0x3d, 0x0,
- 0x3e, 0x0, 0x3f, 0x0, 0x40, 0x0, 0x41, 0x0,
- 0x42, 0x0, 0x43, 0x0, 0x44, 0x0, 0x45, 0x0,
- 0x46, 0x0, 0x47, 0x0, 0x48, 0x0, 0x49, 0x0,
- 0x4a, 0x0, 0x4b, 0x0, 0x4c, 0x0, 0x4d, 0x0,
- 0x4e, 0x0, 0x4f, 0x0, 0x50, 0x0, 0x51, 0x0,
- 0x52, 0x0, 0x53, 0x0, 0x54, 0x0, 0x55, 0x0,
- 0x56, 0x0, 0x57, 0x0, 0x58, 0x0, 0x59, 0x0,
- 0x5a, NTSC_CHROMA_PHASE, 0x5b, NTSC_GAINU,
- 0x5c, NTSC_GAINV, 0x5d, NTSC_BLACK_LEVEL,
- 0x5e, NTSC_BLANKING_LEVEL, 0x5f, NTSC_VBI_BLANKING,
- 0x60, 0x0, 0x61, NTSC_DAC_CONTROL,
- 0x62, NTSC_BURST_AMP, 0x63, NTSC_SUBC3,
- 0x64, NTSC_SUBC2, 0x65, NTSC_SUBC1,
- 0x66, NTSC_SUBC0, 0x67, 0x80, 0x68, 0x80,
- 0x69, 0x80, 0x6a, 0x80, 0x6b, 0x29,
- 0x6c, NTSC_HTRIG, 0x6d, NTSC_VTRIG,
- 0x6e, NTSC_MULTI, 0x6f, NTSC_CCTTX,
- 0x70, 0xc9, 0x71, 0x68, 0x72, 0x60, 0x73, 0x0,
- 0x74, 0x0, 0x75, 0x0, 0x76, 0x0, 0x77, 0x0,
- 0x78, 0x0, 0x79, 0x0, 0x7a, NTSC_FIRST_ACTIVE,
- 0x7b, NTSC_LAST_ACTIVE, 0x7c, NTSC_MSB_VERTICAL,
- 0x7d, 0x0, 0x7e, 0x0, 0x7f, 0x0
-};
-#define INIT7121LEN (sizeof(init7121ntsc)/2)
-
-static __u8 init7121pal[] = {
- 0x26, 0x0, 0x27, 0x0,
- 0x28, PAL_BURST_START, 0x29, PAL_BURST_END,
- 0x2a, 0x0, 0x2b, 0x0, 0x2c, 0x0, 0x2d, 0x0,
- 0x2e, 0x0, 0x2f, 0x0, 0x30, 0x0, 0x31, 0x0,
- 0x32, 0x0, 0x33, 0x0, 0x34, 0x0, 0x35, 0x0,
- 0x36, 0x0, 0x37, 0x0, 0x38, 0x0, 0x39, 0x0,
- 0x3a, 0x03, 0x3b, 0x0, 0x3c, 0x0, 0x3d, 0x0,
- 0x3e, 0x0, 0x3f, 0x0, 0x40, 0x0, 0x41, 0x0,
- 0x42, 0x0, 0x43, 0x0, 0x44, 0x0, 0x45, 0x0,
- 0x46, 0x0, 0x47, 0x0, 0x48, 0x0, 0x49, 0x0,
- 0x4a, 0x0, 0x4b, 0x0, 0x4c, 0x0, 0x4d, 0x0,
- 0x4e, 0x0, 0x4f, 0x0, 0x50, 0x0, 0x51, 0x0,
- 0x52, 0x0, 0x53, 0x0, 0x54, 0x0, 0x55, 0x0,
- 0x56, 0x0, 0x57, 0x0, 0x58, 0x0, 0x59, 0x0,
- 0x5a, PAL_CHROMA_PHASE, 0x5b, PAL_GAINU,
- 0x5c, PAL_GAINV, 0x5d, PAL_BLACK_LEVEL,
- 0x5e, PAL_BLANKING_LEVEL, 0x5f, PAL_VBI_BLANKING,
- 0x60, 0x0, 0x61, PAL_DAC_CONTROL,
- 0x62, PAL_BURST_AMP, 0x63, PAL_SUBC3,
- 0x64, PAL_SUBC2, 0x65, PAL_SUBC1,
- 0x66, PAL_SUBC0, 0x67, 0x80, 0x68, 0x80,
- 0x69, 0x80, 0x6a, 0x80, 0x6b, 0x29,
- 0x6c, PAL_HTRIG, 0x6d, PAL_VTRIG,
- 0x6e, PAL_MULTI, 0x6f, PAL_CCTTX,
- 0x70, 0xc9, 0x71, 0x68, 0x72, 0x60, 0x73, 0x0,
- 0x74, 0x0, 0x75, 0x0, 0x76, 0x0, 0x77, 0x0,
- 0x78, 0x0, 0x79, 0x0, 0x7a, PAL_FIRST_ACTIVE,
- 0x7b, PAL_LAST_ACTIVE, 0x7c, PAL_MSB_VERTICAL,
- 0x7d, 0x0, 0x7e, 0x0, 0x7f, 0x0
-};
-#endif
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 5dfd826d734e..cc7f3d6ee966 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -1282,7 +1282,7 @@ static int dvb_init(struct saa7134_dev *dev)
case SAA7134_BOARD_FLYDVBT_DUO_CARDBUS:
if (configure_tda827x_fe(dev, &tda827x_lifeview_config,
&tda827x_cfg_0) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_PHILIPS_EUROPA:
case SAA7134_BOARD_VIDEOMATE_DVBT_300:
@@ -1322,7 +1322,7 @@ static int dvb_init(struct saa7134_dev *dev)
case SAA7134_BOARD_KWORLD_DVBT_210:
if (configure_tda827x_fe(dev, &kworld_dvb_t_210_config,
&tda827x_cfg_2) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_HAUPPAUGE_HVR1120:
fe0->dvb.frontend = dvb_attach(tda10048_attach,
@@ -1340,17 +1340,17 @@ static int dvb_init(struct saa7134_dev *dev)
case SAA7134_BOARD_PHILIPS_TIGER:
if (configure_tda827x_fe(dev, &philips_tiger_config,
&tda827x_cfg_0) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_PINNACLE_PCTV_310i:
if (configure_tda827x_fe(dev, &pinnacle_pctv_310i_config,
&tda827x_cfg_1) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_HAUPPAUGE_HVR1110:
if (configure_tda827x_fe(dev, &hauppauge_hvr_1110_config,
&tda827x_cfg_1) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_HAUPPAUGE_HVR1150:
fe0->dvb.frontend = dvb_attach(lgdt3305_attach,
@@ -1368,30 +1368,30 @@ static int dvb_init(struct saa7134_dev *dev)
case SAA7134_BOARD_ASUSTeK_P7131_DUAL:
if (configure_tda827x_fe(dev, &asus_p7131_dual_config,
&tda827x_cfg_0) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_FLYDVBT_LR301:
if (configure_tda827x_fe(dev, &tda827x_lifeview_config,
&tda827x_cfg_0) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_FLYDVB_TRIO:
if (!use_frontend) { /* terrestrial */
if (configure_tda827x_fe(dev, &lifeview_trio_config,
&tda827x_cfg_0) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
} else { /* satellite */
fe0->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs, &dev->i2c_adap);
if (fe0->dvb.frontend) {
if (dvb_attach(tda826x_attach, fe0->dvb.frontend, 0x63,
&dev->i2c_adap, 0) == NULL) {
wprintk("%s: Lifeview Trio, No tda826x found!\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
if (dvb_attach(isl6421_attach, fe0->dvb.frontend, &dev->i2c_adap,
0x08, 0, 0) == NULL) {
wprintk("%s: Lifeview Trio, No ISL6421 found!\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
}
}
@@ -1407,7 +1407,7 @@ static int dvb_init(struct saa7134_dev *dev)
&ads_duo_cfg) == NULL) {
wprintk("no tda827x tuner found at addr: %02x\n",
ads_tech_duo_config.tuner_address);
- goto dettach_frontend;
+ goto detach_frontend;
}
} else
wprintk("failed to attach tda10046\n");
@@ -1415,13 +1415,13 @@ static int dvb_init(struct saa7134_dev *dev)
case SAA7134_BOARD_TEVION_DVBT_220RF:
if (configure_tda827x_fe(dev, &tevion_dvbt220rf_config,
&tda827x_cfg_0) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_MEDION_MD8800_QUADRO:
if (!use_frontend) { /* terrestrial */
if (configure_tda827x_fe(dev, &md8800_dvbt_config,
&tda827x_cfg_0) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
} else { /* satellite */
fe0->dvb.frontend = dvb_attach(tda10086_attach,
&flydvbs, &dev->i2c_adap);
@@ -1435,7 +1435,7 @@ static int dvb_init(struct saa7134_dev *dev)
0x60, &dev->i2c_adap, 0) == NULL) {
wprintk("%s: Medion Quadro, no tda826x "
"found !\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
if (dev_id != 0x08) {
/* we need to open the i2c gate (we know it exists) */
@@ -1444,7 +1444,7 @@ static int dvb_init(struct saa7134_dev *dev)
&dev->i2c_adap, 0x08, 0, 0) == NULL) {
wprintk("%s: Medion Quadro, no ISL6405 "
"found !\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
if (dev_id == 0x07) {
/* fire up the 2nd section of the LNB supply since
@@ -1503,12 +1503,12 @@ static int dvb_init(struct saa7134_dev *dev)
if (dvb_attach(tda826x_attach, fe0->dvb.frontend, 0x60,
&dev->i2c_adap, 0) == NULL) {
wprintk("%s: No tda826x found!\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
if (dvb_attach(isl6421_attach, fe0->dvb.frontend,
&dev->i2c_adap, 0x08, 0, 0) == NULL) {
wprintk("%s: No ISL6421 found!\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
}
break;
@@ -1537,37 +1537,37 @@ static int dvb_init(struct saa7134_dev *dev)
case SAA7134_BOARD_CINERGY_HT_PCMCIA:
if (configure_tda827x_fe(dev, &cinergy_ht_config,
&tda827x_cfg_0) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_CINERGY_HT_PCI:
if (configure_tda827x_fe(dev, &cinergy_ht_pci_config,
&tda827x_cfg_0) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_PHILIPS_TIGER_S:
if (configure_tda827x_fe(dev, &philips_tiger_s_config,
&tda827x_cfg_2) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_ASUS_P7131_4871:
if (configure_tda827x_fe(dev, &asus_p7131_4871_config,
&tda827x_cfg_2) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_ASUSTeK_P7131_HYBRID_LNA:
if (configure_tda827x_fe(dev, &asus_p7131_hybrid_lna_config,
&tda827x_cfg_2) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_AVERMEDIA_SUPER_007:
if (configure_tda827x_fe(dev, &avermedia_super_007_config,
&tda827x_cfg_0) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_TWINHAN_DTV_DVB_3056:
if (configure_tda827x_fe(dev, &twinhan_dtv_dvb_3056_config,
&tda827x_cfg_2_sw42) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_PHILIPS_SNAKE:
fe0->dvb.frontend = dvb_attach(tda10086_attach, &flydvbs,
@@ -1576,24 +1576,24 @@ static int dvb_init(struct saa7134_dev *dev)
if (dvb_attach(tda826x_attach, fe0->dvb.frontend, 0x60,
&dev->i2c_adap, 0) == NULL) {
wprintk("%s: No tda826x found!\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
&dev->i2c_adap, 0, 0) == NULL) {
wprintk("%s: No lnbp21 found!\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
}
break;
case SAA7134_BOARD_CREATIX_CTX953:
if (configure_tda827x_fe(dev, &md8800_dvbt_config,
&tda827x_cfg_0) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_MSI_TVANYWHERE_AD11:
if (configure_tda827x_fe(dev, &philips_tiger_s_config,
&tda827x_cfg_2) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_AVERMEDIA_CARDBUS_506:
dprintk("AverMedia E506R dvb setup\n");
@@ -1614,7 +1614,7 @@ static int dvb_init(struct saa7134_dev *dev)
&dev->i2c_adap, DVB_PLL_PHILIPS_SD1878_TDA8261) == NULL) {
wprintk("%s: MD7134 DVB-S, no SD1878 "
"found !\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
/* we need to open the i2c gate (we know it exists) */
fe = fe0->dvb.frontend;
@@ -1623,7 +1623,7 @@ static int dvb_init(struct saa7134_dev *dev)
&dev->i2c_adap, 0x08, 0, 0) == NULL) {
wprintk("%s: MD7134 DVB-S, no ISL6405 "
"found !\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
fe->ops.i2c_gate_ctrl(fe, 0);
dev->original_set_voltage = fe->ops.set_voltage;
@@ -1645,7 +1645,7 @@ static int dvb_init(struct saa7134_dev *dev)
if (!use_frontend) { /* terrestrial */
if (configure_tda827x_fe(dev, &asus_tiger_3in1_config,
&tda827x_cfg_2) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
} else { /* satellite */
fe0->dvb.frontend = dvb_attach(tda10086_attach,
&flydvbs, &dev->i2c_adap);
@@ -1655,13 +1655,13 @@ static int dvb_init(struct saa7134_dev *dev)
&dev->i2c_adap, 0) == NULL) {
wprintk("%s: Asus Tiger 3in1, no "
"tda826x found!\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
&dev->i2c_adap, 0, 0) == NULL) {
wprintk("%s: Asus Tiger 3in1, no lnbp21"
" found!\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
}
}
@@ -1670,7 +1670,7 @@ static int dvb_init(struct saa7134_dev *dev)
if (!use_frontend) { /* terrestrial */
if (configure_tda827x_fe(dev, &asus_ps3_100_config,
&tda827x_cfg_2) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
} else { /* satellite */
fe0->dvb.frontend = dvb_attach(tda10086_attach,
&flydvbs, &dev->i2c_adap);
@@ -1680,13 +1680,13 @@ static int dvb_init(struct saa7134_dev *dev)
&dev->i2c_adap, 0) == NULL) {
wprintk("%s: Asus My Cinema PS3-100, no "
"tda826x found!\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
if (dvb_attach(lnbp21_attach, fe0->dvb.frontend,
&dev->i2c_adap, 0, 0) == NULL) {
wprintk("%s: Asus My Cinema PS3-100, no lnbp21"
" found!\n", __func__);
- goto dettach_frontend;
+ goto detach_frontend;
}
}
}
@@ -1694,7 +1694,7 @@ static int dvb_init(struct saa7134_dev *dev)
case SAA7134_BOARD_ASUSTeK_TIGER:
if (configure_tda827x_fe(dev, &philips_tiger_config,
&tda827x_cfg_0) < 0)
- goto dettach_frontend;
+ goto detach_frontend;
break;
case SAA7134_BOARD_BEHOLD_H6:
fe0->dvb.frontend = dvb_attach(zl10353_attach,
@@ -1830,19 +1830,19 @@ static int dvb_init(struct saa7134_dev *dev)
};
if (!fe0->dvb.frontend)
- goto dettach_frontend;
+ goto detach_frontend;
fe = dvb_attach(xc2028_attach, fe0->dvb.frontend, &cfg);
if (!fe) {
printk(KERN_ERR "%s/2: xc3028 attach failed\n",
dev->name);
- goto dettach_frontend;
+ goto detach_frontend;
}
}
if (NULL == fe0->dvb.frontend) {
printk(KERN_ERR "%s/dvb: frontend initialization failed\n", dev->name);
- goto dettach_frontend;
+ goto detach_frontend;
}
/* define general-purpose callback pointer */
fe0->dvb.frontend->callback = saa7134_tuner_callback;
@@ -1864,7 +1864,7 @@ static int dvb_init(struct saa7134_dev *dev)
}
return ret;
-dettach_frontend:
+detach_frontend:
videobuf_dvb_dealloc_frontends(&dev->frontends);
return -EINVAL;
}
diff --git a/drivers/media/video/saa7146.h b/drivers/media/video/saa7146.h
deleted file mode 100644
index 9fadb331a40b..000000000000
--- a/drivers/media/video/saa7146.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- saa7146.h - definitions philips saa7146 based cards
- Copyright (C) 1999 Nathan Laredo (laredo@gnu.org)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#ifndef __SAA7146__
-#define __SAA7146__
-
-#define SAA7146_VERSION_CODE 0x000101
-
-#include <linux/types.h>
-#include <linux/wait.h>
-
-#ifndef O_NONCAP
-#define O_NONCAP O_TRUNC
-#endif
-
-#define MAX_GBUFFERS 2
-#define FBUF_SIZE 0x190000
-
-#ifdef __KERNEL__
-
-struct saa7146_window
-{
- int x, y;
- ushort width, height;
- ushort bpp, bpl;
- ushort swidth, sheight;
- short cropx, cropy;
- ushort cropwidth, cropheight;
- unsigned long vidadr;
- int color_fmt;
- ushort depth;
-};
-
-/* Per-open data for handling multiple opens on one device */
-struct device_open
-{
- int isopen;
- int noncapturing;
- struct saa7146 *dev;
-};
-#define MAX_OPENS 3
-
-struct saa7146
-{
- struct video_device video_dev;
- struct video_picture picture;
- struct video_audio audio_dev;
- struct video_info vidinfo;
- int user;
- int cap;
- int capuser;
- int irqstate; /* irq routine is state driven */
- int writemode;
- int playmode;
- unsigned int nr;
- unsigned long irq; /* IRQ used by SAA7146 card */
- unsigned short id;
- unsigned char revision;
- unsigned char boardcfg[64]; /* 64 bytes of config from eeprom */
- unsigned long saa7146_adr; /* bus address of IO mem from PCI BIOS */
- struct saa7146_window win;
- unsigned char __iomem *saa7146_mem; /* pointer to mapped IO memory */
- struct device_open open_data[MAX_OPENS];
-#define MAX_MARKS 16
- /* for a/v sync */
- int endmark[MAX_MARKS], endmarkhead, endmarktail;
- u32 *dmaRPS1, *pageRPS1, *dmaRPS2, *pageRPS2, *dmavid1, *dmavid2,
- *dmavid3, *dmaa1in, *dmaa1out, *dmaa2in, *dmaa2out,
- *pagedebi, *pagevid1, *pagevid2, *pagevid3, *pagea1in,
- *pagea1out, *pagea2in, *pagea2out;
- wait_queue_head_t i2cq, debiq, audq, vidq;
- u8 *vidbuf, *audbuf, *osdbuf, *dmadebi;
- int audhead, vidhead, osdhead, audtail, vidtail, osdtail;
- spinlock_t lock; /* the device lock */
-};
-#endif
-
-#ifdef _ALPHA_SAA7146
-#define saawrite(dat,adr) writel((dat), saa->saa7146_adr+(adr))
-#define saaread(adr) readl(saa->saa7146_adr+(adr))
-#else
-#define saawrite(dat,adr) writel((dat), saa->saa7146_mem+(adr))
-#define saaread(adr) readl(saa->saa7146_mem+(adr))
-#endif
-
-#define saaand(dat,adr) saawrite((dat) & saaread(adr), adr)
-#define saaor(dat,adr) saawrite((dat) | saaread(adr), adr)
-#define saaaor(dat,mask,adr) saawrite((dat) | ((mask) & saaread(adr)), adr)
-
-/* bitmask of attached hardware found */
-#define SAA7146_UNKNOWN 0x00000000
-#define SAA7146_SAA7111 0x00000001
-#define SAA7146_SAA7121 0x00000002
-#define SAA7146_IBMMPEG 0x00000004
-
-#endif
diff --git a/drivers/media/video/saa7146reg.h b/drivers/media/video/saa7146reg.h
deleted file mode 100644
index 80ec2c146b4c..000000000000
--- a/drivers/media/video/saa7146reg.h
+++ /dev/null
@@ -1,283 +0,0 @@
-/*
- saa7146.h - definitions philips saa7146 based cards
- Copyright (C) 1999 Nathan Laredo (laredo@gnu.org)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#ifndef __SAA7146_REG__
-#define __SAA7146_REG__
-#define SAA7146_BASE_ODD1 0x00
-#define SAA7146_BASE_EVEN1 0x04
-#define SAA7146_PROT_ADDR1 0x08
-#define SAA7146_PITCH1 0x0c
-#define SAA7146_PAGE1 0x10
-#define SAA7146_NUM_LINE_BYTE1 0x14
-#define SAA7146_BASE_ODD2 0x18
-#define SAA7146_BASE_EVEN2 0x1c
-#define SAA7146_PROT_ADDR2 0x20
-#define SAA7146_PITCH2 0x24
-#define SAA7146_PAGE2 0x28
-#define SAA7146_NUM_LINE_BYTE2 0x2c
-#define SAA7146_BASE_ODD3 0x30
-#define SAA7146_BASE_EVEN3 0x34
-#define SAA7146_PROT_ADDR3 0x38
-#define SAA7146_PITCH3 0x3c
-#define SAA7146_PAGE3 0x40
-#define SAA7146_NUM_LINE_BYTE3 0x44
-#define SAA7146_PCI_BT_V1 0x48
-#define SAA7146_PCI_BT_V2 0x49
-#define SAA7146_PCI_BT_V3 0x4a
-#define SAA7146_PCI_BT_DEBI 0x4b
-#define SAA7146_PCI_BT_A 0x4c
-#define SAA7146_DD1_INIT 0x50
-#define SAA7146_DD1_STREAM_B 0x54
-#define SAA7146_DD1_STREAM_A 0x56
-#define SAA7146_BRS_CTRL 0x58
-#define SAA7146_HPS_CTRL 0x5c
-#define SAA7146_HPS_V_SCALE 0x60
-#define SAA7146_HPS_V_GAIN 0x64
-#define SAA7146_HPS_H_PRESCALE 0x68
-#define SAA7146_HPS_H_SCALE 0x6c
-#define SAA7146_BCS_CTRL 0x70
-#define SAA7146_CHROMA_KEY_RANGE 0x74
-#define SAA7146_CLIP_FORMAT_CTRL 0x78
-#define SAA7146_DEBI_CONFIG 0x7c
-#define SAA7146_DEBI_COMMAND 0x80
-#define SAA7146_DEBI_PAGE 0x84
-#define SAA7146_DEBI_AD 0x88
-#define SAA7146_I2C_TRANSFER 0x8c
-#define SAA7146_I2C_STATUS 0x90
-#define SAA7146_BASE_A1_IN 0x94
-#define SAA7146_PROT_A1_IN 0x98
-#define SAA7146_PAGE_A1_IN 0x9C
-#define SAA7146_BASE_A1_OUT 0xa0
-#define SAA7146_PROT_A1_OUT 0xa4
-#define SAA7146_PAGE_A1_OUT 0xa8
-#define SAA7146_BASE_A2_IN 0xac
-#define SAA7146_PROT_A2_IN 0xb0
-#define SAA7146_PAGE_A2_IN 0xb4
-#define SAA7146_BASE_A2_OUT 0xb8
-#define SAA7146_PROT_A2_OUT 0xbc
-#define SAA7146_PAGE_A2_OUT 0xc0
-#define SAA7146_RPS_PAGE0 0xc4
-#define SAA7146_RPS_PAGE1 0xc8
-#define SAA7146_RPS_THRESH0 0xcc
-#define SAA7146_RPS_THRESH1 0xd0
-#define SAA7146_RPS_TOV0 0xd4
-#define SAA7146_RPS_TOV1 0xd8
-#define SAA7146_IER 0xdc
-#define SAA7146_GPIO_CTRL 0xe0
-#define SAA7146_EC1SSR 0xe4
-#define SAA7146_EC2SSR 0xe8
-#define SAA7146_ECT1R 0xec
-#define SAA7146_ECT2R 0xf0
-#define SAA7146_ACON1 0xf4
-#define SAA7146_ACON2 0xf8
-#define SAA7146_MC1 0xfc
-#define SAA7146_MC2 0x100
-#define SAA7146_RPS_ADDR0 0x104
-#define SAA7146_RPS_ADDR1 0x108
-#define SAA7146_ISR 0x10c
-#define SAA7146_PSR 0x110
-#define SAA7146_SSR 0x114
-#define SAA7146_EC1R 0x118
-#define SAA7146_EC2R 0x11c
-#define SAA7146_VDP1 0x120
-#define SAA7146_VDP2 0x124
-#define SAA7146_VDP3 0x128
-#define SAA7146_ADP1 0x12c
-#define SAA7146_ADP2 0x130
-#define SAA7146_ADP3 0x134
-#define SAA7146_ADP4 0x138
-#define SAA7146_DDP 0x13c
-#define SAA7146_LEVEL_REP 0x140
-#define SAA7146_FB_BUFFER1 0x144
-#define SAA7146_FB_BUFFER2 0x148
-#define SAA7146_A_TIME_SLOT1 0x180
-#define SAA7146_A_TIME_SLOT2 0x1C0
-
-/* bitfield defines */
-#define MASK_31 0x80000000
-#define MASK_30 0x40000000
-#define MASK_29 0x20000000
-#define MASK_28 0x10000000
-#define MASK_27 0x08000000
-#define MASK_26 0x04000000
-#define MASK_25 0x02000000
-#define MASK_24 0x01000000
-#define MASK_23 0x00800000
-#define MASK_22 0x00400000
-#define MASK_21 0x00200000
-#define MASK_20 0x00100000
-#define MASK_19 0x00080000
-#define MASK_18 0x00040000
-#define MASK_17 0x00020000
-#define MASK_16 0x00010000
-#define MASK_15 0x00008000
-#define MASK_14 0x00004000
-#define MASK_13 0x00002000
-#define MASK_12 0x00001000
-#define MASK_11 0x00000800
-#define MASK_10 0x00000400
-#define MASK_09 0x00000200
-#define MASK_08 0x00000100
-#define MASK_07 0x00000080
-#define MASK_06 0x00000040
-#define MASK_05 0x00000020
-#define MASK_04 0x00000010
-#define MASK_03 0x00000008
-#define MASK_02 0x00000004
-#define MASK_01 0x00000002
-#define MASK_00 0x00000001
-#define MASK_B0 0x000000ff
-#define MASK_B1 0x0000ff00
-#define MASK_B2 0x00ff0000
-#define MASK_B3 0xff000000
-#define MASK_W0 0x0000ffff
-#define MASK_W1 0xffff0000
-#define MASK_PA 0xfffffffc
-#define MASK_PR 0xfffffffe
-#define MASK_ER 0xffffffff
-#define MASK_NONE 0x00000000
-
-#define SAA7146_PAGE_MAP_EN MASK_11
-/* main control register 1 */
-#define SAA7146_MC1_MRST_N MASK_15
-#define SAA7146_MC1_ERPS1 MASK_13
-#define SAA7146_MC1_ERPS0 MASK_12
-#define SAA7146_MC1_EDP MASK_11
-#define SAA7146_MC1_EVP MASK_10
-#define SAA7146_MC1_EAP MASK_09
-#define SAA7146_MC1_EI2C MASK_08
-#define SAA7146_MC1_TR_E_DEBI MASK_07
-#define SAA7146_MC1_TR_E_1 MASK_06
-#define SAA7146_MC1_TR_E_2 MASK_05
-#define SAA7146_MC1_TR_E_3 MASK_04
-#define SAA7146_MC1_TR_E_A2_OUT MASK_03
-#define SAA7146_MC1_TR_E_A2_IN MASK_02
-#define SAA7146_MC1_TR_E_A1_OUT MASK_01
-#define SAA7146_MC1_TR_E_A1_IN MASK_00
-/* main control register 2 */
-#define SAA7146_MC2_RPS_SIG4 MASK_15
-#define SAA7146_MC2_RPS_SIG3 MASK_14
-#define SAA7146_MC2_RPS_SIG2 MASK_13
-#define SAA7146_MC2_RPS_SIG1 MASK_12
-#define SAA7146_MC2_RPS_SIG0 MASK_11
-#define SAA7146_MC2_UPLD_D1_B MASK_10
-#define SAA7146_MC2_UPLD_D1_A MASK_09
-#define SAA7146_MC2_UPLD_BRS MASK_08
-#define SAA7146_MC2_UPLD_HPS_H MASK_06
-#define SAA7146_MC2_UPLD_HPS_V MASK_05
-#define SAA7146_MC2_UPLD_DMA3 MASK_04
-#define SAA7146_MC2_UPLD_DMA2 MASK_03
-#define SAA7146_MC2_UPLD_DMA1 MASK_02
-#define SAA7146_MC2_UPLD_DEBI MASK_01
-#define SAA7146_MC2_UPLD_I2C MASK_00
-/* Primary Status Register and Interrupt Enable/Status Registers */
-#define SAA7146_PSR_PPEF MASK_31
-#define SAA7146_PSR_PABO MASK_30
-#define SAA7146_PSR_PPED MASK_29
-#define SAA7146_PSR_RPS_I1 MASK_28
-#define SAA7146_PSR_RPS_I0 MASK_27
-#define SAA7146_PSR_RPS_LATE1 MASK_26
-#define SAA7146_PSR_RPS_LATE0 MASK_25
-#define SAA7146_PSR_RPS_E1 MASK_24
-#define SAA7146_PSR_RPS_E0 MASK_23
-#define SAA7146_PSR_RPS_TO1 MASK_22
-#define SAA7146_PSR_RPS_TO0 MASK_21
-#define SAA7146_PSR_UPLD MASK_20
-#define SAA7146_PSR_DEBI_S MASK_19
-#define SAA7146_PSR_DEBI_E MASK_18
-#define SAA7146_PSR_I2C_S MASK_17
-#define SAA7146_PSR_I2C_E MASK_16
-#define SAA7146_PSR_A2_IN MASK_15
-#define SAA7146_PSR_A2_OUT MASK_14
-#define SAA7146_PSR_A1_IN MASK_13
-#define SAA7146_PSR_A1_OUT MASK_12
-#define SAA7146_PSR_AFOU MASK_11
-#define SAA7146_PSR_V_PE MASK_10
-#define SAA7146_PSR_VFOU MASK_09
-#define SAA7146_PSR_FIDA MASK_08
-#define SAA7146_PSR_FIDB MASK_07
-#define SAA7146_PSR_PIN3 MASK_06
-#define SAA7146_PSR_PIN2 MASK_05
-#define SAA7146_PSR_PIN1 MASK_04
-#define SAA7146_PSR_PIN0 MASK_03
-#define SAA7146_PSR_ECS MASK_02
-#define SAA7146_PSR_EC3S MASK_01
-#define SAA7146_PSR_EC0S MASK_00
-/* Secondary Status Register */
-#define SAA7146_SSR_PRQ MASK_31
-#define SAA7146_SSR_PMA MASK_30
-#define SAA7146_SSR_RPS_RE1 MASK_29
-#define SAA7146_SSR_RPS_PE1 MASK_28
-#define SAA7146_SSR_RPS_A1 MASK_27
-#define SAA7146_SSR_RPS_RE0 MASK_26
-#define SAA7146_SSR_RPS_PE0 MASK_25
-#define SAA7146_SSR_RPS_A0 MASK_24
-#define SAA7146_SSR_DEBI_TO MASK_23
-#define SAA7146_SSR_DEBI_EF MASK_22
-#define SAA7146_SSR_I2C_EA MASK_21
-#define SAA7146_SSR_I2C_EW MASK_20
-#define SAA7146_SSR_I2C_ER MASK_19
-#define SAA7146_SSR_I2C_EL MASK_18
-#define SAA7146_SSR_I2C_EF MASK_17
-#define SAA7146_SSR_V3P MASK_16
-#define SAA7146_SSR_V2P MASK_15
-#define SAA7146_SSR_V1P MASK_14
-#define SAA7146_SSR_VF3 MASK_13
-#define SAA7146_SSR_VF2 MASK_12
-#define SAA7146_SSR_VF1 MASK_11
-#define SAA7146_SSR_AF2_IN MASK_10
-#define SAA7146_SSR_AF2_OUT MASK_09
-#define SAA7146_SSR_AF1_IN MASK_08
-#define SAA7146_SSR_AF1_OUT MASK_07
-#define SAA7146_SSR_VGT MASK_05
-#define SAA7146_SSR_LNQG MASK_04
-#define SAA7146_SSR_EC5S MASK_03
-#define SAA7146_SSR_EC4S MASK_02
-#define SAA7146_SSR_EC2S MASK_01
-#define SAA7146_SSR_EC1S MASK_00
-/* I2C status register */
-#define SAA7146_I2C_ABORT MASK_07
-#define SAA7146_I2C_SPERR MASK_06
-#define SAA7146_I2C_APERR MASK_05
-#define SAA7146_I2C_DTERR MASK_04
-#define SAA7146_I2C_DRERR MASK_03
-#define SAA7146_I2C_AL MASK_02
-#define SAA7146_I2C_ERR MASK_01
-#define SAA7146_I2C_BUSY MASK_00
-/* output formats */
-#define SAA7146_YUV422 0
-#define SAA7146_RGB16 0
-#define SAA7146_YUV444 1
-#define SAA7146_RGB24 1
-#define SAA7146_ARGB32 2
-#define SAA7146_YUV411 3
-#define SAA7146_ARGB15 3
-#define SAA7146_YUV2 4
-#define SAA7146_RGAB15 4
-#define SAA7146_Y8 6
-#define SAA7146_YUV8 7
-#define SAA7146_RGB8 7
-#define SAA7146_YUV444p 8
-#define SAA7146_YUV422p 9
-#define SAA7146_YUV420p 10
-#define SAA7146_YUV1620 11
-#define SAA7146_Y1 13
-#define SAA7146_Y2 14
-#define SAA7146_YUV1 15
-#endif
diff --git a/drivers/media/video/saa7164/saa7164-api.c b/drivers/media/video/saa7164/saa7164-api.c
index 8a98ab68239e..c8799fdaae67 100644
--- a/drivers/media/video/saa7164/saa7164-api.c
+++ b/drivers/media/video/saa7164/saa7164-api.c
@@ -1367,7 +1367,6 @@ int saa7164_api_i2c_read(struct saa7164_i2c *bus, u8 addr, u32 reglen, u8 *reg,
struct saa7164_dev *dev = bus->dev;
u16 len = 0;
int unitid;
- u32 regval;
u8 buf[256];
int ret;
@@ -1376,19 +1375,6 @@ int saa7164_api_i2c_read(struct saa7164_i2c *bus, u8 addr, u32 reglen, u8 *reg,
if (reglen > 4)
return -EIO;
- if (reglen == 1)
- regval = *(reg);
- else
- if (reglen == 2)
- regval = ((*(reg) << 8) || *(reg+1));
- else
- if (reglen == 3)
- regval = ((*(reg) << 16) | (*(reg+1) << 8) | *(reg+2));
- else
- if (reglen == 4)
- regval = ((*(reg) << 24) | (*(reg+1) << 16) |
- (*(reg+2) << 8) | *(reg+3));
-
/* Prepare the send buffer */
/* Bytes 00-03 source register length
* 04-07 source bytes to read
diff --git a/drivers/media/video/saa7164/saa7164-i2c.c b/drivers/media/video/saa7164/saa7164-i2c.c
index 26148f76cba2..4f7e3b42263f 100644
--- a/drivers/media/video/saa7164/saa7164-i2c.c
+++ b/drivers/media/video/saa7164/saa7164-i2c.c
@@ -69,15 +69,6 @@ err:
return retval;
}
-void saa7164_call_i2c_clients(struct saa7164_i2c *bus, unsigned int cmd,
- void *arg)
-{
- if (bus->i2c_rc != 0)
- return;
-
- i2c_clients_command(&bus->i2c_adap, cmd, arg);
-}
-
static u32 saa7164_functionality(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C;
@@ -106,21 +97,14 @@ int saa7164_i2c_register(struct saa7164_i2c *bus)
dprintk(DBGLVL_I2C, "%s(bus = %d)\n", __func__, bus->nr);
- memcpy(&bus->i2c_adap, &saa7164_i2c_adap_template,
- sizeof(bus->i2c_adap));
-
- memcpy(&bus->i2c_algo, &saa7164_i2c_algo_template,
- sizeof(bus->i2c_algo));
-
- memcpy(&bus->i2c_client, &saa7164_i2c_client_template,
- sizeof(bus->i2c_client));
+ bus->i2c_adap = saa7164_i2c_adap_template;
+ bus->i2c_client = saa7164_i2c_client_template;
bus->i2c_adap.dev.parent = &dev->pci->dev;
strlcpy(bus->i2c_adap.name, bus->dev->name,
sizeof(bus->i2c_adap.name));
- bus->i2c_algo.data = bus;
bus->i2c_adap.algo_data = bus;
i2c_set_adapdata(&bus->i2c_adap, bus);
i2c_add_adapter(&bus->i2c_adap);
diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h
index 8d120e3baf70..35219b9b0fbc 100644
--- a/drivers/media/video/saa7164/saa7164.h
+++ b/drivers/media/video/saa7164/saa7164.h
@@ -46,7 +46,6 @@
#include <linux/pci.h>
#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
#include <linux/kdev_t.h>
#include <linux/mutex.h>
#include <linux/crc32.h>
@@ -251,7 +250,6 @@ struct saa7164_i2c {
/* I2C I/O */
struct i2c_adapter i2c_adap;
- struct i2c_algo_bit_data i2c_algo;
struct i2c_client i2c_client;
u32 i2c_rc;
};
diff --git a/drivers/media/video/smiapp/Kconfig b/drivers/media/video/smiapp/Kconfig
index fb99ff18be07..3149cda1d0db 100644
--- a/drivers/media/video/smiapp/Kconfig
+++ b/drivers/media/video/smiapp/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_SMIAPP
tristate "SMIA++/SMIA sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAVE_CLK
+ depends on MEDIA_CAMERA_SUPPORT
select VIDEO_SMIAPP_PLL
---help---
This is a generic driver for SMIA++/SMIA camera modules.
diff --git a/drivers/media/video/smiapp/smiapp-core.c b/drivers/media/video/smiapp/smiapp-core.c
index 9cf5bda35fbe..bfd47c106134 100644
--- a/drivers/media/video/smiapp/smiapp-core.c
+++ b/drivers/media/video/smiapp/smiapp-core.c
@@ -33,15 +33,14 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-device.h>
#include "smiapp.h"
-#define SMIAPP_ALIGN_DIM(dim, flags) \
- ((flags) & V4L2_SUBDEV_SEL_FLAG_SIZE_GE \
- ? ALIGN((dim), 2) \
+#define SMIAPP_ALIGN_DIM(dim, flags) \
+ ((flags) & V4L2_SEL_FLAG_GE \
+ ? ALIGN((dim), 2) \
: (dim) & ~1)
/*
@@ -1631,7 +1630,7 @@ static void smiapp_propagate(struct v4l2_subdev *subdev,
smiapp_get_crop_compose(subdev, fh, crops, &comp, which);
switch (target) {
- case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ case V4L2_SEL_TGT_CROP:
comp->width = crops[SMIAPP_PAD_SINK]->width;
comp->height = crops[SMIAPP_PAD_SINK]->height;
if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
@@ -1647,7 +1646,7 @@ static void smiapp_propagate(struct v4l2_subdev *subdev,
}
}
/* Fall through */
- case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ case V4L2_SEL_TGT_COMPOSE:
*crops[SMIAPP_PAD_SRC] = *comp;
break;
default:
@@ -1723,7 +1722,7 @@ static int smiapp_set_format(struct v4l2_subdev *subdev,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
ssd->sink_fmt = *crops[ssd->sink_pad];
smiapp_propagate(subdev, fh, fmt->which,
- V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL);
+ V4L2_SEL_TGT_CROP);
mutex_unlock(&sensor->mutex);
@@ -1748,14 +1747,14 @@ static int scaling_goodness(struct v4l2_subdev *subdev, int w, int ask_w,
h &= ~1;
ask_h &= ~1;
- if (flags & V4L2_SUBDEV_SEL_FLAG_SIZE_GE) {
+ if (flags & V4L2_SEL_FLAG_GE) {
if (w < ask_w)
val -= SCALING_GOODNESS;
if (h < ask_h)
val -= SCALING_GOODNESS;
}
- if (flags & V4L2_SUBDEV_SEL_FLAG_SIZE_LE) {
+ if (flags & V4L2_SEL_FLAG_LE) {
if (w > ask_w)
val -= SCALING_GOODNESS;
if (h > ask_h)
@@ -1958,7 +1957,7 @@ static int smiapp_set_compose(struct v4l2_subdev *subdev,
*comp = sel->r;
smiapp_propagate(subdev, fh, sel->which,
- V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL);
+ V4L2_SEL_TGT_COMPOSE);
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
return smiapp_update_mode(sensor);
@@ -1974,8 +1973,8 @@ static int __smiapp_sel_supported(struct v4l2_subdev *subdev,
/* We only implement crop in three places. */
switch (sel->target) {
- case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
- case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
if (ssd == sensor->pixel_array
&& sel->pad == SMIAPP_PA_PAD_SRC)
return 0;
@@ -1988,8 +1987,8 @@ static int __smiapp_sel_supported(struct v4l2_subdev *subdev,
== SMIAPP_DIGITAL_CROP_CAPABILITY_INPUT_CROP)
return 0;
return -EINVAL;
- case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
- case V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
if (sel->pad == ssd->source_pad)
return -EINVAL;
if (ssd == sensor->binner)
@@ -2051,7 +2050,7 @@ static int smiapp_set_crop(struct v4l2_subdev *subdev,
if (ssd != sensor->pixel_array && sel->pad == SMIAPP_PAD_SINK)
smiapp_propagate(subdev, fh, sel->which,
- V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL);
+ V4L2_SEL_TGT_CROP);
return 0;
}
@@ -2085,7 +2084,7 @@ static int __smiapp_get_selection(struct v4l2_subdev *subdev,
}
switch (sel->target) {
- case V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
if (ssd == sensor->pixel_array) {
sel->r.width =
sensor->limits[SMIAPP_LIMIT_X_ADDR_MAX] + 1;
@@ -2097,11 +2096,11 @@ static int __smiapp_get_selection(struct v4l2_subdev *subdev,
sel->r = *comp;
}
break;
- case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
- case V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
sel->r = *crops[sel->pad];
break;
- case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ case V4L2_SEL_TGT_COMPOSE:
sel->r = *comp;
break;
}
@@ -2148,10 +2147,10 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
sel->r.height);
switch (sel->target) {
- case V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL:
+ case V4L2_SEL_TGT_CROP:
ret = smiapp_set_crop(subdev, fh, sel);
break;
- case V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL:
+ case V4L2_SEL_TGT_COMPOSE:
ret = smiapp_set_compose(subdev, fh, sel);
break;
default:
diff --git a/drivers/media/video/sn9c102/sn9c102.h b/drivers/media/video/sn9c102/sn9c102.h
index 22ea211ab54f..2bc153e869be 100644
--- a/drivers/media/video/sn9c102/sn9c102.h
+++ b/drivers/media/video/sn9c102/sn9c102.h
@@ -182,7 +182,7 @@ do { \
# define V4LDBG(level, name, cmd) \
do { \
if (debug >= (level)) \
- v4l_print_ioctl(name, cmd); \
+ v4l_printk_ioctl(name, cmd); \
} while (0)
# define KDBG(level, fmt, args...) \
do { \
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 0421bf9453b4..1bde255e45df 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -62,7 +62,7 @@ static int soc_camera_power_on(struct soc_camera_device *icd,
}
if (icl->power) {
- ret = icl->power(icd->pdev, 1);
+ ret = icl->power(icd->control, 1);
if (ret < 0) {
dev_err(icd->pdev,
"Platform failed to power-on the camera.\n");
@@ -78,7 +78,7 @@ static int soc_camera_power_on(struct soc_camera_device *icd,
esdpwr:
if (icl->power)
- icl->power(icd->pdev, 0);
+ icl->power(icd->control, 0);
elinkpwr:
regulator_bulk_disable(icl->num_regulators,
icl->regulators);
@@ -95,7 +95,7 @@ static int soc_camera_power_off(struct soc_camera_device *icd,
return ret;
if (icl->power) {
- ret = icl->power(icd->pdev, 0);
+ ret = icl->power(icd->control, 0);
if (ret < 0) {
dev_err(icd->pdev,
"Platform failed to power-off the camera.\n");
@@ -171,7 +171,8 @@ static int soc_camera_try_fmt(struct soc_camera_device *icd,
dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
pixfmtstr(pix->pixelformat), pix->width, pix->height);
- if (!(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) {
+ if (pix->pixelformat != V4L2_PIX_FMT_JPEG &&
+ !(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) {
pix->bytesperline = 0;
pix->sizeimage = 0;
}
@@ -1518,6 +1519,7 @@ static int __devexit soc_camera_pdrv_remove(struct platform_device *pdev)
}
static struct platform_driver __refdata soc_camera_pdrv = {
+ .probe = soc_camera_pdrv_probe,
.remove = __devexit_p(soc_camera_pdrv_remove),
.driver = {
.name = "soc-camera-pdrv",
@@ -1527,7 +1529,7 @@ static struct platform_driver __refdata soc_camera_pdrv = {
static int __init soc_camera_init(void)
{
- return platform_driver_probe(&soc_camera_pdrv, soc_camera_pdrv_probe);
+ return platform_driver_register(&soc_camera_pdrv);
}
static void __exit soc_camera_exit(void)
diff --git a/drivers/media/video/soc_mediabus.c b/drivers/media/video/soc_mediabus.c
index 89dce097a827..a397812635d6 100644
--- a/drivers/media/video/soc_mediabus.c
+++ b/drivers/media/video/soc_mediabus.c
@@ -378,6 +378,9 @@ EXPORT_SYMBOL(soc_mbus_samples_per_pixel);
s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf)
{
+ if (mf->fourcc == V4L2_PIX_FMT_JPEG)
+ return 0;
+
if (mf->layout != SOC_MBUS_LAYOUT_PACKED)
return width * mf->bits_per_sample / 8;
@@ -400,6 +403,9 @@ EXPORT_SYMBOL(soc_mbus_bytes_per_line);
s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf,
u32 bytes_per_line, u32 height)
{
+ if (mf->fourcc == V4L2_PIX_FMT_JPEG)
+ return 0;
+
if (mf->layout == SOC_MBUS_LAYOUT_PACKED)
return bytes_per_line * height;
diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c
index c096b3f74200..7b1f6ebd0e2c 100644
--- a/drivers/media/video/tlg2300/pd-main.c
+++ b/drivers/media/video/tlg2300/pd-main.c
@@ -53,7 +53,8 @@ int debug_mode;
module_param(debug_mode, int, 0644);
MODULE_PARM_DESC(debug_mode, "0 = disable, 1 = enable, 2 = verbose");
-static const char *firmware_name = "tlg2300_firmware.bin";
+#define TLG2300_FIRMWARE "tlg2300_firmware.bin"
+static const char *firmware_name = TLG2300_FIRMWARE;
static struct usb_driver poseidon_driver;
static LIST_HEAD(pd_device_list);
@@ -532,3 +533,4 @@ MODULE_AUTHOR("Telegent Systems");
MODULE_DESCRIPTION("For tlg2300-based USB device ");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.0.2");
+MODULE_FIRMWARE(TLG2300_FIRMWARE);
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 1ad5ab6ce5cf..b5a819af2b8c 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -228,6 +228,16 @@ static int fe_has_signal(struct dvb_frontend *fe)
return strength;
}
+static int fe_get_afc(struct dvb_frontend *fe)
+{
+ s32 afc = 0;
+
+ if (fe->ops.tuner_ops.get_afc)
+ fe->ops.tuner_ops.get_afc(fe, &afc);
+
+ return 0;
+}
+
static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg)
{
struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
@@ -247,6 +257,7 @@ static struct analog_demod_ops tuner_analog_ops = {
.set_params = fe_set_params,
.standby = fe_standby,
.has_signal = fe_has_signal,
+ .get_afc = fe_get_afc,
.set_config = fe_set_config,
.tuner_status = tuner_status
};
@@ -1178,6 +1189,8 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
return 0;
if (vt->type == t->mode && analog_ops->get_afc)
vt->afc = analog_ops->get_afc(&t->fe);
+ if (analog_ops->has_signal)
+ vt->signal = analog_ops->has_signal(&t->fe);
if (vt->type != V4L2_TUNER_RADIO) {
vt->capability |= V4L2_TUNER_CAP_NORM;
vt->rangelow = tv_range[0] * 16;
@@ -1197,8 +1210,6 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
V4L2_TUNER_SUB_STEREO :
V4L2_TUNER_SUB_MONO;
}
- if (analog_ops->has_signal)
- vt->signal = analog_ops->has_signal(&t->fe);
vt->audmode = t->audmode;
}
vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index c5b1a7365e4f..321b3153df87 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -59,8 +59,8 @@ struct CHIPSTATE;
typedef int (*getvalue)(int);
typedef int (*checkit)(struct CHIPSTATE*);
typedef int (*initialize)(struct CHIPSTATE*);
-typedef int (*getmode)(struct CHIPSTATE*);
-typedef void (*setmode)(struct CHIPSTATE*, int mode);
+typedef int (*getrxsubchans)(struct CHIPSTATE *);
+typedef void (*setaudmode)(struct CHIPSTATE*, int mode);
/* i2c command */
typedef struct AUDIOCMD {
@@ -96,8 +96,8 @@ struct CHIPDESC {
getvalue volfunc,treblefunc,bassfunc;
/* get/set mode */
- getmode getmode;
- setmode setmode;
+ getrxsubchans getrxsubchans;
+ setaudmode setaudmode;
/* input switch register + values for v4l inputs */
int inputreg;
@@ -118,7 +118,7 @@ struct CHIPSTATE {
audiocmd shadow;
/* current settings */
- __u16 left,right,treble,bass,muted,mode;
+ __u16 left, right, treble, bass, muted;
int prevmode;
int radio;
int input;
@@ -126,7 +126,6 @@ struct CHIPSTATE {
/* thread */
struct task_struct *thread;
struct timer_list wt;
- int watch_stereo;
int audmode;
};
@@ -288,7 +287,7 @@ static int chip_thread(void *data)
struct CHIPSTATE *chip = data;
struct CHIPDESC *desc = chip->desc;
struct v4l2_subdev *sd = &chip->sd;
- int mode;
+ int mode, selected;
v4l2_dbg(1, debug, sd, "thread started\n");
set_freezable();
@@ -302,12 +301,12 @@ static int chip_thread(void *data)
break;
v4l2_dbg(1, debug, sd, "thread wakeup\n");
- /* don't do anything for radio or if mode != auto */
- if (chip->radio || chip->mode != 0)
+ /* don't do anything for radio */
+ if (chip->radio)
continue;
/* have a look what's going on */
- mode = desc->getmode(chip);
+ mode = desc->getrxsubchans(chip);
if (mode == chip->prevmode)
continue;
@@ -316,16 +315,32 @@ static int chip_thread(void *data)
chip->prevmode = mode;
- if (mode & V4L2_TUNER_MODE_STEREO)
- desc->setmode(chip, V4L2_TUNER_MODE_STEREO);
- if (mode & V4L2_TUNER_MODE_LANG1_LANG2)
- desc->setmode(chip, V4L2_TUNER_MODE_STEREO);
- else if (mode & V4L2_TUNER_MODE_LANG1)
- desc->setmode(chip, V4L2_TUNER_MODE_LANG1);
- else if (mode & V4L2_TUNER_MODE_LANG2)
- desc->setmode(chip, V4L2_TUNER_MODE_LANG2);
- else
- desc->setmode(chip, V4L2_TUNER_MODE_MONO);
+ selected = V4L2_TUNER_MODE_MONO;
+ switch (chip->audmode) {
+ case V4L2_TUNER_MODE_MONO:
+ if (mode & V4L2_TUNER_SUB_LANG1)
+ selected = V4L2_TUNER_MODE_LANG1;
+ break;
+ case V4L2_TUNER_MODE_STEREO:
+ case V4L2_TUNER_MODE_LANG1:
+ if (mode & V4L2_TUNER_SUB_LANG1)
+ selected = V4L2_TUNER_MODE_LANG1;
+ else if (mode & V4L2_TUNER_SUB_STEREO)
+ selected = V4L2_TUNER_MODE_STEREO;
+ break;
+ case V4L2_TUNER_MODE_LANG2:
+ if (mode & V4L2_TUNER_SUB_LANG2)
+ selected = V4L2_TUNER_MODE_LANG2;
+ else if (mode & V4L2_TUNER_SUB_STEREO)
+ selected = V4L2_TUNER_MODE_STEREO;
+ break;
+ case V4L2_TUNER_MODE_LANG1_LANG2:
+ if (mode & V4L2_TUNER_SUB_LANG2)
+ selected = V4L2_TUNER_MODE_LANG1_LANG2;
+ else if (mode & V4L2_TUNER_SUB_STEREO)
+ selected = V4L2_TUNER_MODE_STEREO;
+ }
+ desc->setaudmode(chip, selected);
/* schedule next check */
mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000));
@@ -358,24 +373,25 @@ static int chip_thread(void *data)
#define TDA9840_TEST_INT1SN 0x1 /* Integration time 0.5s when set */
#define TDA9840_TEST_INTFU 0x02 /* Disables integrator function */
-static int tda9840_getmode(struct CHIPSTATE *chip)
+static int tda9840_getrxsubchans(struct CHIPSTATE *chip)
{
struct v4l2_subdev *sd = &chip->sd;
int val, mode;
val = chip_read(chip);
- mode = V4L2_TUNER_MODE_MONO;
+ mode = V4L2_TUNER_SUB_MONO;
if (val & TDA9840_DS_DUAL)
- mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2;
+ mode |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
if (val & TDA9840_ST_STEREO)
- mode |= V4L2_TUNER_MODE_STEREO;
+ mode = V4L2_TUNER_SUB_STEREO;
- v4l2_dbg(1, debug, sd, "tda9840_getmode(): raw chip read: %d, return: %d\n",
+ v4l2_dbg(1, debug, sd,
+ "tda9840_getrxsubchans(): raw chip read: %d, return: %d\n",
val, mode);
return mode;
}
-static void tda9840_setmode(struct CHIPSTATE *chip, int mode)
+static void tda9840_setaudmode(struct CHIPSTATE *chip, int mode)
{
int update = 1;
int t = chip->shadow.bytes[TDA9840_SW + 1] & ~0x7e;
@@ -393,6 +409,9 @@ static void tda9840_setmode(struct CHIPSTATE *chip, int mode)
case V4L2_TUNER_MODE_LANG2:
t |= TDA9840_DUALB;
break;
+ case V4L2_TUNER_MODE_LANG1_LANG2:
+ t |= TDA9840_DUALAB;
+ break;
default:
update = 0;
}
@@ -477,6 +496,7 @@ static int tda9840_checkit(struct CHIPSTATE *chip)
/* 0x06 - C6 - Control 2 in TDA9855, Control 3 in TDA9850 */
/* Common to TDA9855 and TDA9850: */
#define TDA985x_SAP 3<<6 /* Selects SAP output, mute if not received */
+#define TDA985x_MONOSAP 2<<6 /* Selects Mono on left, SAP on right */
#define TDA985x_STEREO 1<<6 /* Selects Stereo ouput, mono if not received */
#define TDA985x_MONO 0 /* Forces Mono output */
#define TDA985x_LMU 1<<3 /* Mute (LOR/LOL for 9855, OUTL/OUTR for 9850) */
@@ -513,18 +533,22 @@ static int tda9855_volume(int val) { return val/0x2e8+0x27; }
static int tda9855_bass(int val) { return val/0xccc+0x06; }
static int tda9855_treble(int val) { return (val/0x1c71+0x3)<<1; }
-static int tda985x_getmode(struct CHIPSTATE *chip)
+static int tda985x_getrxsubchans(struct CHIPSTATE *chip)
{
- int mode;
+ int mode, val;
- mode = ((TDA985x_STP | TDA985x_SAPP) &
- chip_read(chip)) >> 4;
/* Add mono mode regardless of SAP and stereo */
/* Allows forced mono */
- return mode | V4L2_TUNER_MODE_MONO;
+ mode = V4L2_TUNER_SUB_MONO;
+ val = chip_read(chip);
+ if (val & TDA985x_STP)
+ mode = V4L2_TUNER_SUB_STEREO;
+ if (val & TDA985x_SAPP)
+ mode |= V4L2_TUNER_SUB_SAP;
+ return mode;
}
-static void tda985x_setmode(struct CHIPSTATE *chip, int mode)
+static void tda985x_setaudmode(struct CHIPSTATE *chip, int mode)
{
int update = 1;
int c6 = chip->shadow.bytes[TDA985x_C6+1] & 0x3f;
@@ -534,11 +558,15 @@ static void tda985x_setmode(struct CHIPSTATE *chip, int mode)
c6 |= TDA985x_MONO;
break;
case V4L2_TUNER_MODE_STEREO:
+ case V4L2_TUNER_MODE_LANG1:
c6 |= TDA985x_STEREO;
break;
- case V4L2_TUNER_MODE_LANG1:
+ case V4L2_TUNER_MODE_SAP:
c6 |= TDA985x_SAP;
break;
+ case V4L2_TUNER_MODE_LANG1_LANG2:
+ c6 |= TDA985x_MONOSAP;
+ break;
default:
update = 0;
}
@@ -583,9 +611,10 @@ static void tda985x_setmode(struct CHIPSTATE *chip, int mode)
#define TDA9873_TR_MASK (7 << 2)
#define TDA9873_TR_MONO 4
#define TDA9873_TR_STEREO 1 << 4
-#define TDA9873_TR_REVERSE (1 << 3) & (1 << 2)
+#define TDA9873_TR_REVERSE ((1 << 3) | (1 << 2))
#define TDA9873_TR_DUALA 1 << 2
#define TDA9873_TR_DUALB 1 << 3
+#define TDA9873_TR_DUALAB 0
/* output level controls
* B5: output level switch (0 = reduced gain, 1 = normal gain)
@@ -653,46 +682,51 @@ static void tda985x_setmode(struct CHIPSTATE *chip, int mode)
#define TDA9873_MOUT_DUALA 0
#define TDA9873_MOUT_DUALB 1 << 3
#define TDA9873_MOUT_ST 1 << 4
-#define TDA9873_MOUT_EXTM (1 << 4 ) & (1 << 3)
+#define TDA9873_MOUT_EXTM ((1 << 4) | (1 << 3))
#define TDA9873_MOUT_EXTL 1 << 5
-#define TDA9873_MOUT_EXTR (1 << 5 ) & (1 << 3)
-#define TDA9873_MOUT_EXTLR (1 << 5 ) & (1 << 4)
-#define TDA9873_MOUT_MUTE (1 << 5 ) & (1 << 4) & (1 << 3)
+#define TDA9873_MOUT_EXTR ((1 << 5) | (1 << 3))
+#define TDA9873_MOUT_EXTLR ((1 << 5) | (1 << 4))
+#define TDA9873_MOUT_MUTE ((1 << 5) | (1 << 4) | (1 << 3))
/* Status bits: (chip read) */
#define TDA9873_PONR 0 /* Power-on reset detected if = 1 */
#define TDA9873_STEREO 2 /* Stereo sound is identified */
#define TDA9873_DUAL 4 /* Dual sound is identified */
-static int tda9873_getmode(struct CHIPSTATE *chip)
+static int tda9873_getrxsubchans(struct CHIPSTATE *chip)
{
struct v4l2_subdev *sd = &chip->sd;
int val,mode;
val = chip_read(chip);
- mode = V4L2_TUNER_MODE_MONO;
+ mode = V4L2_TUNER_SUB_MONO;
if (val & TDA9873_STEREO)
- mode |= V4L2_TUNER_MODE_STEREO;
+ mode = V4L2_TUNER_SUB_STEREO;
if (val & TDA9873_DUAL)
- mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2;
- v4l2_dbg(1, debug, sd, "tda9873_getmode(): raw chip read: %d, return: %d\n",
+ mode |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
+ v4l2_dbg(1, debug, sd,
+ "tda9873_getrxsubchans(): raw chip read: %d, return: %d\n",
val, mode);
return mode;
}
-static void tda9873_setmode(struct CHIPSTATE *chip, int mode)
+static void tda9873_setaudmode(struct CHIPSTATE *chip, int mode)
{
struct v4l2_subdev *sd = &chip->sd;
int sw_data = chip->shadow.bytes[TDA9873_SW+1] & ~ TDA9873_TR_MASK;
/* int adj_data = chip->shadow.bytes[TDA9873_AD+1] ; */
if ((sw_data & TDA9873_INP_MASK) != TDA9873_INTERNAL) {
- v4l2_dbg(1, debug, sd, "tda9873_setmode(): external input\n");
+ v4l2_dbg(1, debug, sd,
+ "tda9873_setaudmode(): external input\n");
return;
}
- v4l2_dbg(1, debug, sd, "tda9873_setmode(): chip->shadow.bytes[%d] = %d\n", TDA9873_SW+1, chip->shadow.bytes[TDA9873_SW+1]);
- v4l2_dbg(1, debug, sd, "tda9873_setmode(): sw_data = %d\n", sw_data);
+ v4l2_dbg(1, debug, sd,
+ "tda9873_setaudmode(): chip->shadow.bytes[%d] = %d\n",
+ TDA9873_SW+1, chip->shadow.bytes[TDA9873_SW+1]);
+ v4l2_dbg(1, debug, sd, "tda9873_setaudmode(): sw_data = %d\n",
+ sw_data);
switch (mode) {
case V4L2_TUNER_MODE_MONO:
@@ -707,13 +741,16 @@ static void tda9873_setmode(struct CHIPSTATE *chip, int mode)
case V4L2_TUNER_MODE_LANG2:
sw_data |= TDA9873_TR_DUALB;
break;
+ case V4L2_TUNER_MODE_LANG1_LANG2:
+ sw_data |= TDA9873_TR_DUALAB;
+ break;
default:
- chip->mode = 0;
return;
}
chip_write(chip, TDA9873_SW, sw_data);
- v4l2_dbg(1, debug, sd, "tda9873_setmode(): req. mode %d; chip_write: %d\n",
+ v4l2_dbg(1, debug, sd,
+ "tda9873_setaudmode(): req. mode %d; chip_write: %d\n",
mode, sw_data);
}
@@ -859,13 +896,13 @@ static int tda9874a_setup(struct CHIPSTATE *chip)
return 1;
}
-static int tda9874a_getmode(struct CHIPSTATE *chip)
+static int tda9874a_getrxsubchans(struct CHIPSTATE *chip)
{
struct v4l2_subdev *sd = &chip->sd;
int dsr,nsr,mode;
int necr; /* just for debugging */
- mode = V4L2_TUNER_MODE_MONO;
+ mode = V4L2_TUNER_SUB_MONO;
if(-1 == (dsr = chip_read2(chip,TDA9874A_DSR)))
return mode;
@@ -888,22 +925,23 @@ static int tda9874a_getmode(struct CHIPSTATE *chip)
* external 4052 multiplexer in audio_hook().
*/
if(nsr & 0x02) /* NSR.S/MB=1 */
- mode |= V4L2_TUNER_MODE_STEREO;
+ mode = V4L2_TUNER_SUB_STEREO;
if(nsr & 0x01) /* NSR.D/SB=1 */
- mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2;
+ mode |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
} else {
if(dsr & 0x02) /* DSR.IDSTE=1 */
- mode |= V4L2_TUNER_MODE_STEREO;
+ mode = V4L2_TUNER_SUB_STEREO;
if(dsr & 0x04) /* DSR.IDDUA=1 */
- mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2;
+ mode |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
}
- v4l2_dbg(1, debug, sd, "tda9874a_getmode(): DSR=0x%X, NSR=0x%X, NECR=0x%X, return: %d.\n",
+ v4l2_dbg(1, debug, sd,
+ "tda9874a_getrxsubchans(): DSR=0x%X, NSR=0x%X, NECR=0x%X, return: %d.\n",
dsr, nsr, necr, mode);
return mode;
}
-static void tda9874a_setmode(struct CHIPSTATE *chip, int mode)
+static void tda9874a_setaudmode(struct CHIPSTATE *chip, int mode)
{
struct v4l2_subdev *sd = &chip->sd;
@@ -939,14 +977,18 @@ static void tda9874a_setmode(struct CHIPSTATE *chip, int mode)
aosr = 0xa0; /* auto-select, dual B/B */
mdacosr = (tda9874a_mode) ? 0x83:0x81;
break;
+ case V4L2_TUNER_MODE_LANG1_LANG2:
+ aosr = 0x00; /* always route L to L and R to R */
+ mdacosr = (tda9874a_mode) ? 0x82:0x80;
+ break;
default:
- chip->mode = 0;
return;
}
chip_write(chip, TDA9874A_AOSR, aosr);
chip_write(chip, TDA9874A_MDACOSR, mdacosr);
- v4l2_dbg(1, debug, sd, "tda9874a_setmode(): req. mode %d; AOSR=0x%X, MDACOSR=0x%X.\n",
+ v4l2_dbg(1, debug, sd,
+ "tda9874a_setaudmode(): req. mode %d; AOSR=0x%X, MDACOSR=0x%X.\n",
mode, aosr, mdacosr);
} else { /* dic == 0x07 */
@@ -974,14 +1016,18 @@ static void tda9874a_setmode(struct CHIPSTATE *chip, int mode)
fmmr = 0x02; /* dual */
aosr = 0x20; /* dual B/B */
break;
+ case V4L2_TUNER_MODE_LANG1_LANG2:
+ fmmr = 0x02; /* dual */
+ aosr = 0x00; /* dual A/B */
+ break;
default:
- chip->mode = 0;
return;
}
chip_write(chip, TDA9874A_FMMR, fmmr);
chip_write(chip, TDA9874A_AOSR, aosr);
- v4l2_dbg(1, debug, sd, "tda9874a_setmode(): req. mode %d; FMMR=0x%X, AOSR=0x%X.\n",
+ v4l2_dbg(1, debug, sd,
+ "tda9874a_setaudmode(): req. mode %d; FMMR=0x%X, AOSR=0x%X.\n",
mode, fmmr, aosr);
}
}
@@ -1226,25 +1272,33 @@ static int tea6320_initialize(struct CHIPSTATE * chip)
static int tda8425_shift10(int val) { return (val >> 10) | 0xc0; }
static int tda8425_shift12(int val) { return (val >> 12) | 0xf0; }
-static void tda8425_setmode(struct CHIPSTATE *chip, int mode)
+static void tda8425_setaudmode(struct CHIPSTATE *chip, int mode)
{
int s1 = chip->shadow.bytes[TDA8425_S1+1] & 0xe1;
- if (mode & V4L2_TUNER_MODE_LANG1) {
+ switch (mode) {
+ case V4L2_TUNER_MODE_LANG1:
s1 |= TDA8425_S1_ML_SOUND_A;
s1 |= TDA8425_S1_STEREO_PSEUDO;
-
- } else if (mode & V4L2_TUNER_MODE_LANG2) {
+ break;
+ case V4L2_TUNER_MODE_LANG2:
s1 |= TDA8425_S1_ML_SOUND_B;
s1 |= TDA8425_S1_STEREO_PSEUDO;
-
- } else {
+ break;
+ case V4L2_TUNER_MODE_LANG1_LANG2:
s1 |= TDA8425_S1_ML_STEREO;
-
- if (mode & V4L2_TUNER_MODE_MONO)
- s1 |= TDA8425_S1_STEREO_MONO;
- if (mode & V4L2_TUNER_MODE_STEREO)
- s1 |= TDA8425_S1_STEREO_SPATIAL;
+ s1 |= TDA8425_S1_STEREO_LINEAR;
+ break;
+ case V4L2_TUNER_MODE_MONO:
+ s1 |= TDA8425_S1_ML_STEREO;
+ s1 |= TDA8425_S1_STEREO_MONO;
+ break;
+ case V4L2_TUNER_MODE_STEREO:
+ s1 |= TDA8425_S1_ML_STEREO;
+ s1 |= TDA8425_S1_STEREO_SPATIAL;
+ break;
+ default:
+ return;
}
chip_write(chip,TDA8425_S1,s1);
}
@@ -1297,18 +1351,20 @@ static void tda8425_setmode(struct CHIPSTATE *chip, int mode)
* stereo L L
* BIL H L
*/
-static int ta8874z_getmode(struct CHIPSTATE *chip)
+static int ta8874z_getrxsubchans(struct CHIPSTATE *chip)
{
int val, mode;
val = chip_read(chip);
- mode = V4L2_TUNER_MODE_MONO;
+ mode = V4L2_TUNER_SUB_MONO;
if (val & TA8874Z_B1){
- mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2;
+ mode |= V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
}else if (!(val & TA8874Z_B0)){
- mode |= V4L2_TUNER_MODE_STEREO;
+ mode = V4L2_TUNER_SUB_STEREO;
}
- /* v4l_dbg(1, debug, chip->c, "ta8874z_getmode(): raw chip read: 0x%02x, return: 0x%02x\n", val, mode); */
+ /* v4l2_dbg(1, debug, &chip->sd,
+ "ta8874z_getrxsubchans(): raw chip read: 0x%02x, return: 0x%02x\n",
+ val, mode); */
return mode;
}
@@ -1316,14 +1372,15 @@ static audiocmd ta8874z_stereo = { 2, {0, TA8874Z_SEPARATION_DEFAULT}};
static audiocmd ta8874z_mono = {2, { TA8874Z_MONO_SET, TA8874Z_SEPARATION_DEFAULT}};
static audiocmd ta8874z_main = {2, { 0, TA8874Z_SEPARATION_DEFAULT}};
static audiocmd ta8874z_sub = {2, { TA8874Z_MODE_SUB, TA8874Z_SEPARATION_DEFAULT}};
+static audiocmd ta8874z_both = {2, { TA8874Z_MODE_MAIN | TA8874Z_MODE_SUB, TA8874Z_SEPARATION_DEFAULT}};
-static void ta8874z_setmode(struct CHIPSTATE *chip, int mode)
+static void ta8874z_setaudmode(struct CHIPSTATE *chip, int mode)
{
struct v4l2_subdev *sd = &chip->sd;
int update = 1;
audiocmd *t = NULL;
- v4l2_dbg(1, debug, sd, "ta8874z_setmode(): mode: 0x%02x\n", mode);
+ v4l2_dbg(1, debug, sd, "ta8874z_setaudmode(): mode: 0x%02x\n", mode);
switch(mode){
case V4L2_TUNER_MODE_MONO:
@@ -1338,6 +1395,9 @@ static void ta8874z_setmode(struct CHIPSTATE *chip, int mode)
case V4L2_TUNER_MODE_LANG2:
t = &ta8874z_sub;
break;
+ case V4L2_TUNER_MODE_LANG1_LANG2:
+ t = &ta8874z_both;
+ break;
default:
update = 0;
}
@@ -1394,8 +1454,8 @@ static struct CHIPDESC chiplist[] = {
/* callbacks */
.checkit = tda9840_checkit,
- .getmode = tda9840_getmode,
- .setmode = tda9840_setmode,
+ .getrxsubchans = tda9840_getrxsubchans,
+ .setaudmode = tda9840_setaudmode,
.init = { 2, { TDA9840_TEST, TDA9840_TEST_INT1SN
/* ,TDA9840_SW, TDA9840_MONO */} }
@@ -1410,8 +1470,8 @@ static struct CHIPDESC chiplist[] = {
/* callbacks */
.checkit = tda9873_checkit,
- .getmode = tda9873_getmode,
- .setmode = tda9873_setmode,
+ .getrxsubchans = tda9873_getrxsubchans,
+ .setaudmode = tda9873_setaudmode,
.init = { 4, { TDA9873_SW, 0xa4, 0x06, 0x03 } },
.inputreg = TDA9873_SW,
@@ -1430,8 +1490,8 @@ static struct CHIPDESC chiplist[] = {
/* callbacks */
.initialize = tda9874a_initialize,
.checkit = tda9874a_checkit,
- .getmode = tda9874a_getmode,
- .setmode = tda9874a_setmode,
+ .getrxsubchans = tda9874a_getrxsubchans,
+ .setaudmode = tda9874a_setaudmode,
},
{
.name = "tda9875",
@@ -1460,8 +1520,8 @@ static struct CHIPDESC chiplist[] = {
.addr_hi = I2C_ADDR_TDA985x_H >> 1,
.registers = 11,
- .getmode = tda985x_getmode,
- .setmode = tda985x_setmode,
+ .getrxsubchans = tda985x_getrxsubchans,
+ .setaudmode = tda985x_setaudmode,
.init = { 8, { TDA9850_C4, 0x08, 0x08, TDA985x_STEREO, 0x07, 0x10, 0x10, 0x03 } }
},
@@ -1482,8 +1542,8 @@ static struct CHIPDESC chiplist[] = {
.volfunc = tda9855_volume,
.bassfunc = tda9855_bass,
.treblefunc = tda9855_treble,
- .getmode = tda985x_getmode,
- .setmode = tda985x_setmode,
+ .getrxsubchans = tda985x_getrxsubchans,
+ .setaudmode = tda985x_setaudmode,
.init = { 12, { 0, 0x6f, 0x6f, 0x0e, 0x07<<1, 0x8<<2,
TDA9855_MUTE | TDA9855_AVL | TDA9855_LOUD | TDA9855_INT,
@@ -1564,7 +1624,7 @@ static struct CHIPDESC chiplist[] = {
.volfunc = tda8425_shift10,
.bassfunc = tda8425_shift12,
.treblefunc = tda8425_shift12,
- .setmode = tda8425_setmode,
+ .setaudmode = tda8425_setaudmode,
.inputreg = TDA8425_S1,
.inputmap = { TDA8425_S1_CH1, TDA8425_S1_CH1, TDA8425_S1_CH1 },
@@ -1593,11 +1653,10 @@ static struct CHIPDESC chiplist[] = {
.addr_lo = I2C_ADDR_TDA9840 >> 1,
.addr_hi = I2C_ADDR_TDA9840 >> 1,
.registers = 2,
- .flags = CHIP_NEED_CHECKMODE,
/* callbacks */
- .getmode = ta8874z_getmode,
- .setmode = ta8874z_setmode,
+ .getrxsubchans = ta8874z_getrxsubchans,
+ .setaudmode = ta8874z_setaudmode,
.init = {2, { TA8874Z_MONO_SET, TA8874Z_SEPARATION_DEFAULT}},
},
@@ -1736,7 +1795,6 @@ static int tvaudio_s_radio(struct v4l2_subdev *sd)
struct CHIPSTATE *chip = to_state(sd);
chip->radio = 1;
- chip->watch_stereo = 0;
/* del_timer(&chip->wt); */
return 0;
}
@@ -1793,9 +1851,8 @@ static int tvaudio_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct CHIPSTATE *chip = to_state(sd);
struct CHIPDESC *desc = chip->desc;
- int mode = 0;
- if (!desc->setmode)
+ if (!desc->setaudmode)
return 0;
if (chip->radio)
return 0;
@@ -1805,22 +1862,18 @@ static int tvaudio_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
case V4L2_TUNER_MODE_STEREO:
case V4L2_TUNER_MODE_LANG1:
case V4L2_TUNER_MODE_LANG2:
- mode = vt->audmode;
- break;
case V4L2_TUNER_MODE_LANG1_LANG2:
- mode = V4L2_TUNER_MODE_STEREO;
break;
default:
return -EINVAL;
}
chip->audmode = vt->audmode;
- if (mode) {
- chip->watch_stereo = 0;
- /* del_timer(&chip->wt); */
- chip->mode = mode;
- desc->setmode(chip, mode);
- }
+ if (chip->thread)
+ wake_up_process(chip->thread);
+ else
+ desc->setaudmode(chip, vt->audmode);
+
return 0;
}
@@ -1828,30 +1881,17 @@ static int tvaudio_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct CHIPSTATE *chip = to_state(sd);
struct CHIPDESC *desc = chip->desc;
- int mode = V4L2_TUNER_MODE_MONO;
- if (!desc->getmode)
+ if (!desc->getrxsubchans)
return 0;
if (chip->radio)
return 0;
vt->audmode = chip->audmode;
- vt->rxsubchans = 0;
+ vt->rxsubchans = desc->getrxsubchans(chip);
vt->capability = V4L2_TUNER_CAP_STEREO |
V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
- mode = desc->getmode(chip);
-
- if (mode & V4L2_TUNER_MODE_MONO)
- vt->rxsubchans |= V4L2_TUNER_SUB_MONO;
- if (mode & V4L2_TUNER_MODE_STEREO)
- vt->rxsubchans |= V4L2_TUNER_SUB_STEREO;
- /* Note: for SAP it should be mono/lang2 or stereo/lang2.
- When this module is converted fully to v4l2, then this
- should change for those chips that can detect SAP. */
- if (mode & V4L2_TUNER_MODE_LANG1)
- vt->rxsubchans = V4L2_TUNER_SUB_LANG1 |
- V4L2_TUNER_SUB_LANG2;
return 0;
}
@@ -1868,9 +1908,7 @@ static int tvaudio_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *fr
struct CHIPSTATE *chip = to_state(sd);
struct CHIPDESC *desc = chip->desc;
- chip->mode = 0; /* automatic */
-
- /* For chips that provide getmode and setmode, and doesn't
+ /* For chips that provide getrxsubchans and setaudmode, and doesn't
automatically follows the stereo carrier, a kthread is
created to set the audio standard. In this case, when then
the video channel is changed, tvaudio starts on MONO mode.
@@ -1879,9 +1917,8 @@ static int tvaudio_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *fr
audio carrier.
*/
if (chip->thread) {
- desc->setmode(chip, V4L2_TUNER_MODE_MONO);
- if (chip->prevmode != V4L2_TUNER_MODE_MONO)
- chip->prevmode = -1; /* reset previous mode */
+ desc->setaudmode(chip, V4L2_TUNER_MODE_MONO);
+ chip->prevmode = -1; /* reset previous mode */
mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000));
}
return 0;
@@ -2023,7 +2060,7 @@ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *
chip->thread = NULL;
init_timer(&chip->wt);
if (desc->flags & CHIP_NEED_CHECKMODE) {
- if (!desc->getmode || !desc->setmode) {
+ if (!desc->getrxsubchans || !desc->setaudmode) {
/* This shouldn't be happen. Warn user, but keep working
without kthread
*/
diff --git a/drivers/media/video/tvp5150.c b/drivers/media/video/tvp5150.c
index b7867427e5c4..a751b6c146fd 100644
--- a/drivers/media/video/tvp5150.c
+++ b/drivers/media/video/tvp5150.c
@@ -61,13 +61,20 @@ static int tvp5150_read(struct v4l2_subdev *sd, unsigned char addr)
int rc;
buffer[0] = addr;
- if (1 != (rc = i2c_master_send(c, buffer, 1)))
- v4l2_dbg(0, debug, sd, "i2c i/o error: rc == %d (should be 1)\n", rc);
+
+ rc = i2c_master_send(c, buffer, 1);
+ if (rc < 0) {
+ v4l2_err(sd, "i2c i/o error: rc == %d (should be 1)\n", rc);
+ return rc;
+ }
msleep(10);
- if (1 != (rc = i2c_master_recv(c, buffer, 1)))
- v4l2_dbg(0, debug, sd, "i2c i/o error: rc == %d (should be 1)\n", rc);
+ rc = i2c_master_recv(c, buffer, 1);
+ if (rc < 0) {
+ v4l2_err(sd, "i2c i/o error: rc == %d (should be 1)\n", rc);
+ return rc;
+ }
v4l2_dbg(2, debug, sd, "tvp5150: read 0x%02x = 0x%02x\n", addr, buffer[0]);
@@ -250,7 +257,7 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
int opmode = 0;
struct tvp5150 *decoder = to_tvp5150(sd);
int input = 0;
- unsigned char val;
+ int val;
if ((decoder->output & TVP5150_BLACK_SCREEN) || !decoder->enable)
input = 8;
@@ -279,6 +286,11 @@ static inline void tvp5150_selmux(struct v4l2_subdev *sd)
* For Composite and TV, it should be the reverse
*/
val = tvp5150_read(sd, TVP5150_MISC_CTL);
+ if (val < 0) {
+ v4l2_err(sd, "%s: failed with error = %d\n", __func__, val);
+ return;
+ }
+
if (decoder->input == TVP5150_SVIDEO)
val = (val & ~0x40) | 0x10;
else
@@ -676,6 +688,7 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd,
v4l2_std_id std = decoder->norm;
u8 reg;
int pos, type = 0;
+ int i, ret = 0;
if (std == V4L2_STD_ALL) {
v4l2_err(sd, "VBI can't be configured without knowing number of lines\n");
@@ -690,13 +703,17 @@ static int tvp5150_get_vbi(struct v4l2_subdev *sd,
reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI;
- pos = tvp5150_read(sd, reg) & 0x0f;
- if (pos < 0x0f)
- type = regs[pos].type.vbi_type;
-
- pos = tvp5150_read(sd, reg + 1) & 0x0f;
- if (pos < 0x0f)
- type |= regs[pos].type.vbi_type;
+ for (i = 0; i <= 1; i++) {
+ ret = tvp5150_read(sd, reg + i);
+ if (ret < 0) {
+ v4l2_err(sd, "%s: failed with error = %d\n",
+ __func__, ret);
+ return 0;
+ }
+ pos = ret & 0x0f;
+ if (pos < 0x0f)
+ type |= regs[pos].type.vbi_type;
+ }
return type;
}
@@ -1031,13 +1048,21 @@ static int tvp5150_g_chip_ident(struct v4l2_subdev *sd,
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int tvp5150_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
+ int res;
+
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (!v4l2_chip_match_i2c_client(client, &reg->match))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- reg->val = tvp5150_read(sd, reg->reg & 0xff);
+ res = tvp5150_read(sd, reg->reg & 0xff);
+ if (res < 0) {
+ v4l2_err(sd, "%s: failed with error = %d\n", __func__, res);
+ return res;
+ }
+
+ reg->val = res;
reg->size = 1;
return 0;
}
@@ -1126,7 +1151,8 @@ static int tvp5150_probe(struct i2c_client *c,
{
struct tvp5150 *core;
struct v4l2_subdev *sd;
- u8 msb_id, lsb_id, msb_rom, lsb_rom;
+ int tvp5150_id[4];
+ int i, res;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(c->adapter,
@@ -1139,26 +1165,37 @@ static int tvp5150_probe(struct i2c_client *c,
}
sd = &core->sd;
v4l2_i2c_subdev_init(sd, c, &tvp5150_ops);
+
+ /*
+ * Read consequent registers - TVP5150_MSB_DEV_ID, TVP5150_LSB_DEV_ID,
+ * TVP5150_ROM_MAJOR_VER, TVP5150_ROM_MINOR_VER
+ */
+ for (i = 0; i < 4; i++) {
+ res = tvp5150_read(sd, TVP5150_MSB_DEV_ID + i);
+ if (res < 0)
+ goto free_core;
+ tvp5150_id[i] = res;
+ }
+
v4l_info(c, "chip found @ 0x%02x (%s)\n",
c->addr << 1, c->adapter->name);
- msb_id = tvp5150_read(sd, TVP5150_MSB_DEV_ID);
- lsb_id = tvp5150_read(sd, TVP5150_LSB_DEV_ID);
- msb_rom = tvp5150_read(sd, TVP5150_ROM_MAJOR_VER);
- lsb_rom = tvp5150_read(sd, TVP5150_ROM_MINOR_VER);
-
- if (msb_rom == 4 && lsb_rom == 0) { /* Is TVP5150AM1 */
- v4l2_info(sd, "tvp%02x%02xam1 detected.\n", msb_id, lsb_id);
+ if (tvp5150_id[2] == 4 && tvp5150_id[3] == 0) { /* Is TVP5150AM1 */
+ v4l2_info(sd, "tvp%02x%02xam1 detected.\n",
+ tvp5150_id[0], tvp5150_id[1]);
/* ITU-T BT.656.4 timing */
tvp5150_write(sd, TVP5150_REV_SELECT, 0);
} else {
- if (msb_rom == 3 || lsb_rom == 0x21) { /* Is TVP5150A */
- v4l2_info(sd, "tvp%02x%02xa detected.\n", msb_id, lsb_id);
+ /* Is TVP5150A */
+ if (tvp5150_id[2] == 3 || tvp5150_id[3] == 0x21) {
+ v4l2_info(sd, "tvp%02x%02xa detected.\n",
+ tvp5150_id[2], tvp5150_id[3]);
} else {
v4l2_info(sd, "*** unknown tvp%02x%02x chip detected.\n",
- msb_id, lsb_id);
- v4l2_info(sd, "*** Rom ver is %d.%d\n", msb_rom, lsb_rom);
+ tvp5150_id[2], tvp5150_id[3]);
+ v4l2_info(sd, "*** Rom ver is %d.%d\n",
+ tvp5150_id[2], tvp5150_id[3]);
}
}
@@ -1177,11 +1214,9 @@ static int tvp5150_probe(struct i2c_client *c,
V4L2_CID_HUE, -128, 127, 1, 0);
sd->ctrl_handler = &core->hdl;
if (core->hdl.error) {
- int err = core->hdl.error;
-
+ res = core->hdl.error;
v4l2_ctrl_handler_free(&core->hdl);
- kfree(core);
- return err;
+ goto free_core;
}
v4l2_ctrl_handler_setup(&core->hdl);
@@ -1197,6 +1232,10 @@ static int tvp5150_probe(struct i2c_client *c,
if (debug > 1)
tvp5150_log_status(sd);
return 0;
+
+free_core:
+ kfree(core);
+ return res;
}
static int tvp5150_remove(struct i2c_client *c)
diff --git a/drivers/media/video/tw9910.c b/drivers/media/video/tw9910.c
index 8768efb8508a..9f53eacb66e3 100644
--- a/drivers/media/video/tw9910.c
+++ b/drivers/media/video/tw9910.c
@@ -699,11 +699,9 @@ static int tw9910_g_fmt(struct v4l2_subdev *sd,
struct tw9910_priv *priv = to_tw9910(client);
if (!priv->scale) {
- int ret;
- u32 width = 640, height = 480;
- ret = tw9910_set_frame(sd, &width, &height);
- if (ret < 0)
- return ret;
+ priv->scale = tw9910_select_norm(priv->norm, 640, 480);
+ if (!priv->scale)
+ return -EINVAL;
}
mf->width = priv->scale->width;
diff --git a/drivers/media/video/uvc/Kconfig b/drivers/media/video/uvc/Kconfig
index 6c197da531b2..541c9f1e4c6a 100644
--- a/drivers/media/video/uvc/Kconfig
+++ b/drivers/media/video/uvc/Kconfig
@@ -10,6 +10,7 @@ config USB_VIDEO_CLASS
config USB_VIDEO_CLASS_INPUT_EVDEV
bool "UVC input events device support"
default y
+ depends on USB_VIDEO_CLASS
depends on USB_VIDEO_CLASS=INPUT || INPUT=y
---help---
This option makes USB Video Class devices register an input device
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index af26bbe6f76e..f7061a5ef1d2 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -2083,7 +2083,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
/* Walk the entities list and instantiate controls */
list_for_each_entry(entity, &dev->entities, list) {
struct uvc_control *ctrl;
- unsigned int bControlSize = 0, ncontrols = 0;
+ unsigned int bControlSize = 0, ncontrols;
__u8 *bmControls = NULL;
if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT) {
@@ -2101,8 +2101,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
uvc_ctrl_prune_entity(dev, entity);
/* Count supported controls and allocate the controls array */
- for (i = 0; i < bControlSize; ++i)
- ncontrols += hweight8(bmControls[i]);
+ ncontrols = memweight(bmControls, bControlSize);
if (ncontrols == 0)
continue;
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 9288fbd5001b..5577381b5bf0 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -338,6 +338,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
buf->error = 0;
buf->state = UVC_BUF_STATE_QUEUED;
+ buf->bytesused = 0;
vb2_set_plane_payload(&buf->buf, 0, 0);
return buf;
}
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 759bef8897e9..f00db3060e0e 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -1051,7 +1051,7 @@ static long uvc_v4l2_ioctl(struct file *file,
{
if (uvc_trace_param & UVC_TRACE_IOCTL) {
uvc_printk(KERN_DEBUG, "uvc_v4l2_ioctl(");
- v4l_printk_ioctl(cmd);
+ v4l_printk_ioctl(NULL, cmd);
printk(")\n");
}
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index b76b0ac0958f..7ac4347ca09e 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -1188,7 +1188,11 @@ static void uvc_video_decode_bulk(struct urb *urb, struct uvc_streaming *stream,
u8 *mem;
int len, ret;
- if (urb->actual_length == 0)
+ /*
+ * Ignore ZLPs if they're not part of a frame, otherwise process them
+ * to trigger the end of payload detection.
+ */
+ if (urb->actual_length == 0 && stream->bulk.header_size == 0)
return;
mem = urb->transfer_buffer;
@@ -1594,7 +1598,7 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
psize = le16_to_cpu(ep->desc.wMaxPacketSize);
psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
if (psize >= bandwidth && psize <= best_psize) {
- altsetting = i;
+ altsetting = alts->desc.bAlternateSetting;
best_psize = psize;
best_ep = ep;
}
diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c
index 5327ad3a6390..9ebd5c540d10 100644
--- a/drivers/media/video/v4l2-compat-ioctl32.c
+++ b/drivers/media/video/v4l2-compat-ioctl32.c
@@ -327,7 +327,7 @@ struct v4l2_buffer32 {
compat_caddr_t planes;
} m;
__u32 length;
- __u32 input;
+ __u32 reserved2;
__u32 reserved;
};
@@ -387,8 +387,7 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
get_user(kp->index, &up->index) ||
get_user(kp->type, &up->type) ||
get_user(kp->flags, &up->flags) ||
- get_user(kp->memory, &up->memory) ||
- get_user(kp->input, &up->input))
+ get_user(kp->memory, &up->memory))
return -EFAULT;
if (V4L2_TYPE_IS_OUTPUT(kp->type))
@@ -472,8 +471,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
put_user(kp->index, &up->index) ||
put_user(kp->type, &up->type) ||
put_user(kp->flags, &up->flags) ||
- put_user(kp->memory, &up->memory) ||
- put_user(kp->input, &up->input))
+ put_user(kp->memory, &up->memory))
return -EFAULT;
if (put_user(kp->bytesused, &up->bytesused) ||
@@ -482,6 +480,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
put_user(kp->sequence, &up->sequence) ||
+ put_user(kp->reserved2, &up->reserved2) ||
put_user(kp->reserved, &up->reserved))
return -EFAULT;
@@ -1026,6 +1025,7 @@ long v4l2_compat_ioctl32(struct file *file, unsigned int cmd, unsigned long arg)
case VIDIOC_ENUM_DV_TIMINGS:
case VIDIOC_QUERY_DV_TIMINGS:
case VIDIOC_DV_TIMINGS_CAP:
+ case VIDIOC_ENUM_FREQ_BANDS:
ret = do_video_ioctl(file, cmd, arg);
break;
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 9abd9abd4502..b6a2ee71e5c3 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -755,6 +755,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_HUE_AUTO:
case V4L2_CID_CHROMA_AGC:
case V4L2_CID_COLOR_KILLER:
+ case V4L2_CID_AUTOBRIGHTNESS:
case V4L2_CID_MPEG_AUDIO_MUTE:
case V4L2_CID_MPEG_VIDEO_MUTE:
case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
@@ -2120,7 +2121,7 @@ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl,
/* First zero the helper field in the master control references */
for (i = 0; i < cs->count; i++)
- helpers[i].mref->helper = 0;
+ helpers[i].mref->helper = NULL;
for (i = 0, h = helpers; i < cs->count; i++, h++) {
struct v4l2_ctrl_ref *mref = h->mref;
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 0cbada18f6f5..07aeafca9eaa 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -46,6 +46,29 @@ static ssize_t show_index(struct device *cd,
return sprintf(buf, "%i\n", vdev->index);
}
+static ssize_t show_debug(struct device *cd,
+ struct device_attribute *attr, char *buf)
+{
+ struct video_device *vdev = to_video_device(cd);
+
+ return sprintf(buf, "%i\n", vdev->debug);
+}
+
+static ssize_t set_debug(struct device *cd, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct video_device *vdev = to_video_device(cd);
+ int res = 0;
+ u16 value;
+
+ res = kstrtou16(buf, 0, &value);
+ if (res)
+ return res;
+
+ vdev->debug = value;
+ return len;
+}
+
static ssize_t show_name(struct device *cd,
struct device_attribute *attr, char *buf)
{
@@ -56,6 +79,7 @@ static ssize_t show_name(struct device *cd,
static struct device_attribute video_device_attrs[] = {
__ATTR(name, S_IRUGO, show_name, NULL),
+ __ATTR(debug, 0644, show_debug, set_debug),
__ATTR(index, S_IRUGO, show_index, NULL),
__ATTR_NULL
};
@@ -281,6 +305,9 @@ static ssize_t v4l2_read(struct file *filp, char __user *buf,
ret = vdev->fops->read(filp, buf, sz, off);
if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
+ if (vdev->debug)
+ printk(KERN_DEBUG "%s: read: %zd (%d)\n",
+ video_device_node_name(vdev), sz, ret);
return ret;
}
@@ -299,6 +326,9 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf,
ret = vdev->fops->write(filp, buf, sz, off);
if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
+ if (vdev->debug)
+ printk(KERN_DEBUG "%s: write: %zd (%d)\n",
+ video_device_node_name(vdev), sz, ret);
return ret;
}
@@ -315,6 +345,9 @@ static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll)
ret = vdev->fops->poll(filp, poll);
if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
+ if (vdev->debug)
+ printk(KERN_DEBUG "%s: poll: %08x\n",
+ video_device_node_name(vdev), ret);
return ret;
}
@@ -324,20 +357,14 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
int ret = -ENODEV;
if (vdev->fops->unlocked_ioctl) {
- bool locked = false;
+ struct mutex *lock = v4l2_ioctl_get_lock(vdev, cmd);
- if (vdev->lock) {
- /* always lock unless the cmd is marked as "don't use lock" */
- locked = !v4l2_is_known_ioctl(cmd) ||
- !test_bit(_IOC_NR(cmd), vdev->disable_locking);
-
- if (locked && mutex_lock_interruptible(vdev->lock))
- return -ERESTARTSYS;
- }
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
if (video_is_registered(vdev))
ret = vdev->fops->unlocked_ioctl(filp, cmd, arg);
- if (locked)
- mutex_unlock(vdev->lock);
+ if (lock)
+ mutex_unlock(lock);
} else if (vdev->fops->ioctl) {
/* This code path is a replacement for the BKL. It is a major
* hack but it will have to do for those drivers that are not
@@ -385,12 +412,17 @@ static unsigned long v4l2_get_unmapped_area(struct file *filp,
unsigned long flags)
{
struct video_device *vdev = video_devdata(filp);
+ int ret;
if (!vdev->fops->get_unmapped_area)
return -ENOSYS;
if (!video_is_registered(vdev))
return -ENODEV;
- return vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
+ ret = vdev->fops->get_unmapped_area(filp, addr, len, pgoff, flags);
+ if (vdev->debug)
+ printk(KERN_DEBUG "%s: get_unmapped_area (%d)\n",
+ video_device_node_name(vdev), ret);
+ return ret;
}
#endif
@@ -408,6 +440,9 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
ret = vdev->fops->mmap(filp, vm);
if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
+ if (vdev->debug)
+ printk(KERN_DEBUG "%s: mmap (%d)\n",
+ video_device_node_name(vdev), ret);
return ret;
}
@@ -443,6 +478,9 @@ static int v4l2_open(struct inode *inode, struct file *filp)
}
err:
+ if (vdev->debug)
+ printk(KERN_DEBUG "%s: open (%d)\n",
+ video_device_node_name(vdev), ret);
/* decrease the refcount in case of an error */
if (ret)
video_put(vdev);
@@ -462,6 +500,9 @@ static int v4l2_release(struct inode *inode, struct file *filp)
if (test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags))
mutex_unlock(vdev->lock);
}
+ if (vdev->debug)
+ printk(KERN_DEBUG "%s: release\n",
+ video_device_node_name(vdev));
/* decrease the refcount unconditionally since the release()
return value is ignored. */
video_put(vdev);
@@ -656,7 +697,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
- if (ops->vidioc_g_parm || vdev->vfl_type == VFL_TYPE_GRABBER)
+ if (ops->vidioc_g_parm || (vdev->vfl_type == VFL_TYPE_GRABBER &&
+ (ops->vidioc_g_std || vdev->tvnorms)))
set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
@@ -688,6 +730,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_UNSUBSCRIBE_EVENT, vidioc_unsubscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_CREATE_BUFS, vidioc_create_bufs);
SET_VALID_IOCTL(ops, VIDIOC_PREPARE_BUF, vidioc_prepare_buf);
+ if (ops->vidioc_enum_freq_bands || ops->vidioc_g_tuner || ops->vidioc_g_modulator)
+ set_bit(_IOC_NR(VIDIOC_ENUM_FREQ_BANDS), valid_ioctls);
bitmap_andnot(vdev->valid_ioctls, valid_ioctls, vdev->valid_ioctls,
BASE_VIDIOC_PRIVATE);
}
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index d7fa8962d8b3..6bc47fc82fe2 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -27,27 +27,7 @@
#include <media/v4l2-event.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
-
-#define dbgarg(cmd, fmt, arg...) \
- do { \
- if (vfd->debug & V4L2_DEBUG_IOCTL_ARG) { \
- printk(KERN_DEBUG "%s: ", vfd->name); \
- v4l_printk_ioctl(cmd); \
- printk(" " fmt, ## arg); \
- } \
- } while (0)
-
-#define dbgarg2(fmt, arg...) \
- do { \
- if (vfd->debug & V4L2_DEBUG_IOCTL_ARG) \
- printk(KERN_DEBUG "%s: " fmt, vfd->name, ## arg);\
- } while (0)
-
-#define dbgarg3(fmt, arg...) \
- do { \
- if (vfd->debug & V4L2_DEBUG_IOCTL_ARG) \
- printk(KERN_CONT "%s: " fmt, vfd->name, ## arg);\
- } while (0)
+#include <media/videobuf2-core.h>
/* Zero out the end of the struct pointed to by p. Everything after, but
* not including, the specified field is cleared. */
@@ -183,207 +163,509 @@ static const char *v4l2_memory_names[] = {
/* ------------------------------------------------------------------ */
/* debug help functions */
-struct v4l2_ioctl_info {
- unsigned int ioctl;
- u16 flags;
- const char * const name;
-};
+static void v4l_print_querycap(const void *arg, bool write_only)
+{
+ const struct v4l2_capability *p = arg;
-/* This control needs a priority check */
-#define INFO_FL_PRIO (1 << 0)
-/* This control can be valid if the filehandle passes a control handler. */
-#define INFO_FL_CTRL (1 << 1)
+ pr_cont("driver=%s, card=%s, bus=%s, version=0x%08x, "
+ "capabilities=0x%08x, device_caps=0x%08x\n",
+ p->driver, p->card, p->bus_info,
+ p->version, p->capabilities, p->device_caps);
+}
-#define IOCTL_INFO(_ioctl, _flags) [_IOC_NR(_ioctl)] = { \
- .ioctl = _ioctl, \
- .flags = _flags, \
- .name = #_ioctl, \
+static void v4l_print_enuminput(const void *arg, bool write_only)
+{
+ const struct v4l2_input *p = arg;
+
+ pr_cont("index=%u, name=%s, type=%u, audioset=0x%x, tuner=%u, "
+ "std=0x%08Lx, status=0x%x, capabilities=0x%x\n",
+ p->index, p->name, p->type, p->audioset, p->tuner,
+ (unsigned long long)p->std, p->status, p->capabilities);
}
-static struct v4l2_ioctl_info v4l2_ioctls[] = {
- IOCTL_INFO(VIDIOC_QUERYCAP, 0),
- IOCTL_INFO(VIDIOC_ENUM_FMT, 0),
- IOCTL_INFO(VIDIOC_G_FMT, 0),
- IOCTL_INFO(VIDIOC_S_FMT, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_REQBUFS, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_QUERYBUF, 0),
- IOCTL_INFO(VIDIOC_G_FBUF, 0),
- IOCTL_INFO(VIDIOC_S_FBUF, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_OVERLAY, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_QBUF, 0),
- IOCTL_INFO(VIDIOC_DQBUF, 0),
- IOCTL_INFO(VIDIOC_STREAMON, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_STREAMOFF, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_G_PARM, 0),
- IOCTL_INFO(VIDIOC_S_PARM, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_G_STD, 0),
- IOCTL_INFO(VIDIOC_S_STD, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_ENUMSTD, 0),
- IOCTL_INFO(VIDIOC_ENUMINPUT, 0),
- IOCTL_INFO(VIDIOC_G_CTRL, INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_S_CTRL, INFO_FL_PRIO | INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_G_TUNER, 0),
- IOCTL_INFO(VIDIOC_S_TUNER, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_G_AUDIO, 0),
- IOCTL_INFO(VIDIOC_S_AUDIO, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_QUERYCTRL, INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_QUERYMENU, INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_G_INPUT, 0),
- IOCTL_INFO(VIDIOC_S_INPUT, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_G_OUTPUT, 0),
- IOCTL_INFO(VIDIOC_S_OUTPUT, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_ENUMOUTPUT, 0),
- IOCTL_INFO(VIDIOC_G_AUDOUT, 0),
- IOCTL_INFO(VIDIOC_S_AUDOUT, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_G_MODULATOR, 0),
- IOCTL_INFO(VIDIOC_S_MODULATOR, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_G_FREQUENCY, 0),
- IOCTL_INFO(VIDIOC_S_FREQUENCY, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_CROPCAP, 0),
- IOCTL_INFO(VIDIOC_G_CROP, 0),
- IOCTL_INFO(VIDIOC_S_CROP, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_G_SELECTION, 0),
- IOCTL_INFO(VIDIOC_S_SELECTION, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_G_JPEGCOMP, 0),
- IOCTL_INFO(VIDIOC_S_JPEGCOMP, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_QUERYSTD, 0),
- IOCTL_INFO(VIDIOC_TRY_FMT, 0),
- IOCTL_INFO(VIDIOC_ENUMAUDIO, 0),
- IOCTL_INFO(VIDIOC_ENUMAUDOUT, 0),
- IOCTL_INFO(VIDIOC_G_PRIORITY, 0),
- IOCTL_INFO(VIDIOC_S_PRIORITY, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_G_SLICED_VBI_CAP, 0),
- IOCTL_INFO(VIDIOC_LOG_STATUS, 0),
- IOCTL_INFO(VIDIOC_G_EXT_CTRLS, INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_S_EXT_CTRLS, INFO_FL_PRIO | INFO_FL_CTRL),
- IOCTL_INFO(VIDIOC_TRY_EXT_CTRLS, 0),
- IOCTL_INFO(VIDIOC_ENUM_FRAMESIZES, 0),
- IOCTL_INFO(VIDIOC_ENUM_FRAMEINTERVALS, 0),
- IOCTL_INFO(VIDIOC_G_ENC_INDEX, 0),
- IOCTL_INFO(VIDIOC_ENCODER_CMD, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_TRY_ENCODER_CMD, 0),
- IOCTL_INFO(VIDIOC_DECODER_CMD, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_TRY_DECODER_CMD, 0),
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- IOCTL_INFO(VIDIOC_DBG_S_REGISTER, 0),
- IOCTL_INFO(VIDIOC_DBG_G_REGISTER, 0),
-#endif
- IOCTL_INFO(VIDIOC_DBG_G_CHIP_IDENT, 0),
- IOCTL_INFO(VIDIOC_S_HW_FREQ_SEEK, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_ENUM_DV_PRESETS, 0),
- IOCTL_INFO(VIDIOC_S_DV_PRESET, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_G_DV_PRESET, 0),
- IOCTL_INFO(VIDIOC_QUERY_DV_PRESET, 0),
- IOCTL_INFO(VIDIOC_S_DV_TIMINGS, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_G_DV_TIMINGS, 0),
- IOCTL_INFO(VIDIOC_DQEVENT, 0),
- IOCTL_INFO(VIDIOC_SUBSCRIBE_EVENT, 0),
- IOCTL_INFO(VIDIOC_UNSUBSCRIBE_EVENT, 0),
- IOCTL_INFO(VIDIOC_CREATE_BUFS, INFO_FL_PRIO),
- IOCTL_INFO(VIDIOC_PREPARE_BUF, 0),
- IOCTL_INFO(VIDIOC_ENUM_DV_TIMINGS, 0),
- IOCTL_INFO(VIDIOC_QUERY_DV_TIMINGS, 0),
- IOCTL_INFO(VIDIOC_DV_TIMINGS_CAP, 0),
-};
-#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
+static void v4l_print_enumoutput(const void *arg, bool write_only)
+{
+ const struct v4l2_output *p = arg;
-bool v4l2_is_known_ioctl(unsigned int cmd)
+ pr_cont("index=%u, name=%s, type=%u, audioset=0x%x, "
+ "modulator=%u, std=0x%08Lx, capabilities=0x%x\n",
+ p->index, p->name, p->type, p->audioset, p->modulator,
+ (unsigned long long)p->std, p->capabilities);
+}
+
+static void v4l_print_audio(const void *arg, bool write_only)
{
- if (_IOC_NR(cmd) >= V4L2_IOCTLS)
- return false;
- return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd;
+ const struct v4l2_audio *p = arg;
+
+ if (write_only)
+ pr_cont("index=%u, mode=0x%x\n", p->index, p->mode);
+ else
+ pr_cont("index=%u, name=%s, capability=0x%x, mode=0x%x\n",
+ p->index, p->name, p->capability, p->mode);
}
-/* Common ioctl debug function. This function can be used by
- external ioctl messages as well as internal V4L ioctl */
-void v4l_printk_ioctl(unsigned int cmd)
+static void v4l_print_audioout(const void *arg, bool write_only)
{
- char *dir, *type;
+ const struct v4l2_audioout *p = arg;
- switch (_IOC_TYPE(cmd)) {
- case 'd':
- type = "v4l2_int";
+ if (write_only)
+ pr_cont("index=%u\n", p->index);
+ else
+ pr_cont("index=%u, name=%s, capability=0x%x, mode=0x%x\n",
+ p->index, p->name, p->capability, p->mode);
+}
+
+static void v4l_print_fmtdesc(const void *arg, bool write_only)
+{
+ const struct v4l2_fmtdesc *p = arg;
+
+ pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%c%c%c%c, description='%s'\n",
+ p->index, prt_names(p->type, v4l2_type_names),
+ p->flags, (p->pixelformat & 0xff),
+ (p->pixelformat >> 8) & 0xff,
+ (p->pixelformat >> 16) & 0xff,
+ (p->pixelformat >> 24) & 0xff,
+ p->description);
+}
+
+static void v4l_print_format(const void *arg, bool write_only)
+{
+ const struct v4l2_format *p = arg;
+ const struct v4l2_pix_format *pix;
+ const struct v4l2_pix_format_mplane *mp;
+ const struct v4l2_vbi_format *vbi;
+ const struct v4l2_sliced_vbi_format *sliced;
+ const struct v4l2_window *win;
+ const struct v4l2_clip *clip;
+ unsigned i;
+
+ pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ pix = &p->fmt.pix;
+ pr_cont(", width=%u, height=%u, "
+ "pixelformat=%c%c%c%c, field=%s, "
+ "bytesperline=%u sizeimage=%u, colorspace=%d\n",
+ pix->width, pix->height,
+ (pix->pixelformat & 0xff),
+ (pix->pixelformat >> 8) & 0xff,
+ (pix->pixelformat >> 16) & 0xff,
+ (pix->pixelformat >> 24) & 0xff,
+ prt_names(pix->field, v4l2_field_names),
+ pix->bytesperline, pix->sizeimage,
+ pix->colorspace);
break;
- case 'V':
- if (_IOC_NR(cmd) >= V4L2_IOCTLS) {
- type = "v4l2";
- break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ mp = &p->fmt.pix_mp;
+ pr_cont(", width=%u, height=%u, "
+ "format=%c%c%c%c, field=%s, "
+ "colorspace=%d, num_planes=%u\n",
+ mp->width, mp->height,
+ (mp->pixelformat & 0xff),
+ (mp->pixelformat >> 8) & 0xff,
+ (mp->pixelformat >> 16) & 0xff,
+ (mp->pixelformat >> 24) & 0xff,
+ prt_names(mp->field, v4l2_field_names),
+ mp->colorspace, mp->num_planes);
+ for (i = 0; i < mp->num_planes; i++)
+ printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
+ mp->plane_fmt[i].bytesperline,
+ mp->plane_fmt[i].sizeimage);
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ win = &p->fmt.win;
+ pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, "
+ "chromakey=0x%08x, bitmap=%p, "
+ "global_alpha=0x%02x\n",
+ win->w.width, win->w.height,
+ win->w.left, win->w.top,
+ prt_names(win->field, v4l2_field_names),
+ win->chromakey, win->bitmap, win->global_alpha);
+ clip = win->clips;
+ for (i = 0; i < win->clipcount; i++) {
+ printk(KERN_DEBUG "clip %u: wxh=%dx%d, x,y=%d,%d\n",
+ i, clip->c.width, clip->c.height,
+ clip->c.left, clip->c.top);
+ clip = clip->next;
}
- printk("%s", v4l2_ioctls[_IOC_NR(cmd)].name);
- return;
- default:
- type = "unknown";
+ break;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ vbi = &p->fmt.vbi;
+ pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, "
+ "sample_format=%c%c%c%c, start=%u,%u, count=%u,%u\n",
+ vbi->sampling_rate, vbi->offset,
+ vbi->samples_per_line,
+ (vbi->sample_format & 0xff),
+ (vbi->sample_format >> 8) & 0xff,
+ (vbi->sample_format >> 16) & 0xff,
+ (vbi->sample_format >> 24) & 0xff,
+ vbi->start[0], vbi->start[1],
+ vbi->count[0], vbi->count[1]);
+ break;
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ sliced = &p->fmt.sliced;
+ pr_cont(", service_set=0x%08x, io_size=%d\n",
+ sliced->service_set, sliced->io_size);
+ for (i = 0; i < 24; i++)
+ printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i,
+ sliced->service_lines[0][i],
+ sliced->service_lines[1][i]);
+ break;
+ case V4L2_BUF_TYPE_PRIVATE:
+ pr_cont("\n");
+ break;
}
+}
- switch (_IOC_DIR(cmd)) {
- case _IOC_NONE: dir = "--"; break;
- case _IOC_READ: dir = "r-"; break;
- case _IOC_WRITE: dir = "-w"; break;
- case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
- default: dir = "*ERR*"; break;
- }
- printk("%s ioctl '%c', dir=%s, #%d (0x%08x)",
- type, _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd);
+static void v4l_print_framebuffer(const void *arg, bool write_only)
+{
+ const struct v4l2_framebuffer *p = arg;
+
+ pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
+ "height=%u, pixelformat=%c%c%c%c, "
+ "bytesperline=%u sizeimage=%u, colorspace=%d\n",
+ p->capability, p->flags, p->base,
+ p->fmt.width, p->fmt.height,
+ (p->fmt.pixelformat & 0xff),
+ (p->fmt.pixelformat >> 8) & 0xff,
+ (p->fmt.pixelformat >> 16) & 0xff,
+ (p->fmt.pixelformat >> 24) & 0xff,
+ p->fmt.bytesperline, p->fmt.sizeimage,
+ p->fmt.colorspace);
+}
+
+static void v4l_print_buftype(const void *arg, bool write_only)
+{
+ pr_cont("type=%s\n", prt_names(*(u32 *)arg, v4l2_type_names));
+}
+
+static void v4l_print_modulator(const void *arg, bool write_only)
+{
+ const struct v4l2_modulator *p = arg;
+
+ if (write_only)
+ pr_cont("index=%u, txsubchans=0x%x", p->index, p->txsubchans);
+ else
+ pr_cont("index=%u, name=%s, capability=0x%x, "
+ "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
+ p->index, p->name, p->capability,
+ p->rangelow, p->rangehigh, p->txsubchans);
+}
+
+static void v4l_print_tuner(const void *arg, bool write_only)
+{
+ const struct v4l2_tuner *p = arg;
+
+ if (write_only)
+ pr_cont("index=%u, audmode=%u\n", p->index, p->audmode);
+ else
+ pr_cont("index=%u, name=%s, type=%u, capability=0x%x, "
+ "rangelow=%u, rangehigh=%u, signal=%u, afc=%d, "
+ "rxsubchans=0x%x, audmode=%u\n",
+ p->index, p->name, p->type,
+ p->capability, p->rangelow,
+ p->rangehigh, p->signal, p->afc,
+ p->rxsubchans, p->audmode);
}
-EXPORT_SYMBOL(v4l_printk_ioctl);
-static void dbgbuf(unsigned int cmd, struct video_device *vfd,
- struct v4l2_buffer *p)
+static void v4l_print_frequency(const void *arg, bool write_only)
{
- struct v4l2_timecode *tc = &p->timecode;
- struct v4l2_plane *plane;
+ const struct v4l2_frequency *p = arg;
+
+ pr_cont("tuner=%u, type=%u, frequency=%u\n",
+ p->tuner, p->type, p->frequency);
+}
+
+static void v4l_print_standard(const void *arg, bool write_only)
+{
+ const struct v4l2_standard *p = arg;
+
+ pr_cont("index=%u, id=0x%Lx, name=%s, fps=%u/%u, "
+ "framelines=%u\n", p->index,
+ (unsigned long long)p->id, p->name,
+ p->frameperiod.numerator,
+ p->frameperiod.denominator,
+ p->framelines);
+}
+
+static void v4l_print_std(const void *arg, bool write_only)
+{
+ pr_cont("std=0x%08Lx\n", *(const long long unsigned *)arg);
+}
+
+static void v4l_print_hw_freq_seek(const void *arg, bool write_only)
+{
+ const struct v4l2_hw_freq_seek *p = arg;
+
+ pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, "
+ "rangelow=%u, rangehigh=%u\n",
+ p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing,
+ p->rangelow, p->rangehigh);
+}
+
+static void v4l_print_requestbuffers(const void *arg, bool write_only)
+{
+ const struct v4l2_requestbuffers *p = arg;
+
+ pr_cont("count=%d, type=%s, memory=%s\n",
+ p->count,
+ prt_names(p->type, v4l2_type_names),
+ prt_names(p->memory, v4l2_memory_names));
+}
+
+static void v4l_print_buffer(const void *arg, bool write_only)
+{
+ const struct v4l2_buffer *p = arg;
+ const struct v4l2_timecode *tc = &p->timecode;
+ const struct v4l2_plane *plane;
int i;
- dbgarg(cmd, "%02ld:%02d:%02d.%08ld index=%d, type=%s, "
- "flags=0x%08d, field=%0d, sequence=%d, memory=%s\n",
+ pr_cont("%02ld:%02d:%02d.%08ld index=%d, type=%s, "
+ "flags=0x%08x, field=%s, sequence=%d, memory=%s",
p->timestamp.tv_sec / 3600,
(int)(p->timestamp.tv_sec / 60) % 60,
(int)(p->timestamp.tv_sec % 60),
(long)p->timestamp.tv_usec,
p->index,
prt_names(p->type, v4l2_type_names),
- p->flags, p->field, p->sequence,
- prt_names(p->memory, v4l2_memory_names));
+ p->flags, prt_names(p->field, v4l2_field_names),
+ p->sequence, prt_names(p->memory, v4l2_memory_names));
if (V4L2_TYPE_IS_MULTIPLANAR(p->type) && p->m.planes) {
+ pr_cont("\n");
for (i = 0; i < p->length; ++i) {
plane = &p->m.planes[i];
- dbgarg2("plane %d: bytesused=%d, data_offset=0x%08x "
- "offset/userptr=0x%08lx, length=%d\n",
+ printk(KERN_DEBUG
+ "plane %d: bytesused=%d, data_offset=0x%08x "
+ "offset/userptr=0x%lx, length=%d\n",
i, plane->bytesused, plane->data_offset,
plane->m.userptr, plane->length);
}
} else {
- dbgarg2("bytesused=%d, offset/userptr=0x%08lx, length=%d\n",
+ pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n",
p->bytesused, p->m.userptr, p->length);
}
- dbgarg2("timecode=%02d:%02d:%02d type=%d, "
- "flags=0x%08d, frames=%d, userbits=0x%08x\n",
+ printk(KERN_DEBUG "timecode=%02d:%02d:%02d type=%d, "
+ "flags=0x%08x, frames=%d, userbits=0x%08x\n",
tc->hours, tc->minutes, tc->seconds,
tc->type, tc->flags, tc->frames, *(__u32 *)tc->userbits);
}
-static inline void dbgrect(struct video_device *vfd, char *s,
- struct v4l2_rect *r)
+static void v4l_print_create_buffers(const void *arg, bool write_only)
{
- dbgarg2("%sRect start at %dx%d, size=%dx%d\n", s, r->left, r->top,
- r->width, r->height);
-};
+ const struct v4l2_create_buffers *p = arg;
+
+ pr_cont("index=%d, count=%d, memory=%s, ",
+ p->index, p->count,
+ prt_names(p->memory, v4l2_memory_names));
+ v4l_print_format(&p->format, write_only);
+}
-static void dbgtimings(struct video_device *vfd,
- const struct v4l2_dv_timings *p)
+static void v4l_print_streamparm(const void *arg, bool write_only)
{
+ const struct v4l2_streamparm *p = arg;
+
+ pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
+
+ if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ const struct v4l2_captureparm *c = &p->parm.capture;
+
+ pr_cont(", capability=0x%x, capturemode=0x%x, timeperframe=%d/%d, "
+ "extendedmode=%d, readbuffers=%d\n",
+ c->capability, c->capturemode,
+ c->timeperframe.numerator, c->timeperframe.denominator,
+ c->extendedmode, c->readbuffers);
+ } else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ const struct v4l2_outputparm *c = &p->parm.output;
+
+ pr_cont(", capability=0x%x, outputmode=0x%x, timeperframe=%d/%d, "
+ "extendedmode=%d, writebuffers=%d\n",
+ c->capability, c->outputmode,
+ c->timeperframe.numerator, c->timeperframe.denominator,
+ c->extendedmode, c->writebuffers);
+ }
+}
+
+static void v4l_print_queryctrl(const void *arg, bool write_only)
+{
+ const struct v4l2_queryctrl *p = arg;
+
+ pr_cont("id=0x%x, type=%d, name=%s, min/max=%d/%d, "
+ "step=%d, default=%d, flags=0x%08x\n",
+ p->id, p->type, p->name,
+ p->minimum, p->maximum,
+ p->step, p->default_value, p->flags);
+}
+
+static void v4l_print_querymenu(const void *arg, bool write_only)
+{
+ const struct v4l2_querymenu *p = arg;
+
+ pr_cont("id=0x%x, index=%d\n", p->id, p->index);
+}
+
+static void v4l_print_control(const void *arg, bool write_only)
+{
+ const struct v4l2_control *p = arg;
+
+ pr_cont("id=0x%x, value=%d\n", p->id, p->value);
+}
+
+static void v4l_print_ext_controls(const void *arg, bool write_only)
+{
+ const struct v4l2_ext_controls *p = arg;
+ int i;
+
+ pr_cont("class=0x%x, count=%d, error_idx=%d",
+ p->ctrl_class, p->count, p->error_idx);
+ for (i = 0; i < p->count; i++) {
+ if (p->controls[i].size)
+ pr_cont(", id/val=0x%x/0x%x",
+ p->controls[i].id, p->controls[i].value);
+ else
+ pr_cont(", id/size=0x%x/%u",
+ p->controls[i].id, p->controls[i].size);
+ }
+ pr_cont("\n");
+}
+
+static void v4l_print_cropcap(const void *arg, bool write_only)
+{
+ const struct v4l2_cropcap *p = arg;
+
+ pr_cont("type=%s, bounds wxh=%dx%d, x,y=%d,%d, "
+ "defrect wxh=%dx%d, x,y=%d,%d\n, "
+ "pixelaspect %d/%d\n",
+ prt_names(p->type, v4l2_type_names),
+ p->bounds.width, p->bounds.height,
+ p->bounds.left, p->bounds.top,
+ p->defrect.width, p->defrect.height,
+ p->defrect.left, p->defrect.top,
+ p->pixelaspect.numerator, p->pixelaspect.denominator);
+}
+
+static void v4l_print_crop(const void *arg, bool write_only)
+{
+ const struct v4l2_crop *p = arg;
+
+ pr_cont("type=%s, wxh=%dx%d, x,y=%d,%d\n",
+ prt_names(p->type, v4l2_type_names),
+ p->c.width, p->c.height,
+ p->c.left, p->c.top);
+}
+
+static void v4l_print_selection(const void *arg, bool write_only)
+{
+ const struct v4l2_selection *p = arg;
+
+ pr_cont("type=%s, target=%d, flags=0x%x, wxh=%dx%d, x,y=%d,%d\n",
+ prt_names(p->type, v4l2_type_names),
+ p->target, p->flags,
+ p->r.width, p->r.height, p->r.left, p->r.top);
+}
+
+static void v4l_print_jpegcompression(const void *arg, bool write_only)
+{
+ const struct v4l2_jpegcompression *p = arg;
+
+ pr_cont("quality=%d, APPn=%d, APP_len=%d, "
+ "COM_len=%d, jpeg_markers=0x%x\n",
+ p->quality, p->APPn, p->APP_len,
+ p->COM_len, p->jpeg_markers);
+}
+
+static void v4l_print_enc_idx(const void *arg, bool write_only)
+{
+ const struct v4l2_enc_idx *p = arg;
+
+ pr_cont("entries=%d, entries_cap=%d\n",
+ p->entries, p->entries_cap);
+}
+
+static void v4l_print_encoder_cmd(const void *arg, bool write_only)
+{
+ const struct v4l2_encoder_cmd *p = arg;
+
+ pr_cont("cmd=%d, flags=0x%x\n",
+ p->cmd, p->flags);
+}
+
+static void v4l_print_decoder_cmd(const void *arg, bool write_only)
+{
+ const struct v4l2_decoder_cmd *p = arg;
+
+ pr_cont("cmd=%d, flags=0x%x\n", p->cmd, p->flags);
+
+ if (p->cmd == V4L2_DEC_CMD_START)
+ pr_info("speed=%d, format=%u\n",
+ p->start.speed, p->start.format);
+ else if (p->cmd == V4L2_DEC_CMD_STOP)
+ pr_info("pts=%llu\n", p->stop.pts);
+}
+
+static void v4l_print_dbg_chip_ident(const void *arg, bool write_only)
+{
+ const struct v4l2_dbg_chip_ident *p = arg;
+
+ pr_cont("type=%u, ", p->match.type);
+ if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
+ pr_cont("name=%s, ", p->match.name);
+ else
+ pr_cont("addr=%u, ", p->match.addr);
+ pr_cont("chip_ident=%u, revision=0x%x\n",
+ p->ident, p->revision);
+}
+
+static void v4l_print_dbg_register(const void *arg, bool write_only)
+{
+ const struct v4l2_dbg_register *p = arg;
+
+ pr_cont("type=%u, ", p->match.type);
+ if (p->match.type == V4L2_CHIP_MATCH_I2C_DRIVER)
+ pr_cont("name=%s, ", p->match.name);
+ else
+ pr_cont("addr=%u, ", p->match.addr);
+ pr_cont("reg=0x%llx, val=0x%llx\n",
+ p->reg, p->val);
+}
+
+static void v4l_print_dv_enum_presets(const void *arg, bool write_only)
+{
+ const struct v4l2_dv_enum_preset *p = arg;
+
+ pr_cont("index=%u, preset=%u, name=%s, width=%u, height=%u\n",
+ p->index, p->preset, p->name, p->width, p->height);
+}
+
+static void v4l_print_dv_preset(const void *arg, bool write_only)
+{
+ const struct v4l2_dv_preset *p = arg;
+
+ pr_cont("preset=%u\n", p->preset);
+}
+
+static void v4l_print_dv_timings(const void *arg, bool write_only)
+{
+ const struct v4l2_dv_timings *p = arg;
+
switch (p->type) {
case V4L2_DV_BT_656_1120:
- dbgarg2("bt-656/1120:interlaced=%d,"
- " pixelclock=%lld,"
- " width=%d, height=%d, polarities=%x,"
- " hfrontporch=%d, hsync=%d,"
- " hbackporch=%d, vfrontporch=%d,"
- " vsync=%d, vbackporch=%d,"
- " il_vfrontporch=%d, il_vsync=%d,"
- " il_vbackporch=%d, standards=%x, flags=%x\n",
+ pr_cont("type=bt-656/1120, interlaced=%u, "
+ "pixelclock=%llu, "
+ "width=%u, height=%u, polarities=0x%x, "
+ "hfrontporch=%u, hsync=%u, "
+ "hbackporch=%u, vfrontporch=%u, "
+ "vsync=%u, vbackporch=%u, "
+ "il_vfrontporch=%u, il_vsync=%u, "
+ "il_vbackporch=%u, standards=0x%x, flags=0x%x\n",
p->bt.interlaced, p->bt.pixelclock,
p->bt.width, p->bt.height,
p->bt.polarities, p->bt.hfrontporch,
@@ -394,67 +676,184 @@ static void dbgtimings(struct video_device *vfd,
p->bt.standards, p->bt.flags);
break;
default:
- dbgarg2("Unknown type %d!\n", p->type);
+ pr_cont("type=%d\n", p->type);
break;
}
}
-static inline void v4l_print_pix_fmt(struct video_device *vfd,
- struct v4l2_pix_format *fmt)
+static void v4l_print_enum_dv_timings(const void *arg, bool write_only)
{
- dbgarg2("width=%d, height=%d, format=%c%c%c%c, field=%s, "
- "bytesperline=%d sizeimage=%d, colorspace=%d\n",
- fmt->width, fmt->height,
- (fmt->pixelformat & 0xff),
- (fmt->pixelformat >> 8) & 0xff,
- (fmt->pixelformat >> 16) & 0xff,
- (fmt->pixelformat >> 24) & 0xff,
- prt_names(fmt->field, v4l2_field_names),
- fmt->bytesperline, fmt->sizeimage, fmt->colorspace);
-};
+ const struct v4l2_enum_dv_timings *p = arg;
+
+ pr_cont("index=%u, ", p->index);
+ v4l_print_dv_timings(&p->timings, write_only);
+}
-static inline void v4l_print_pix_fmt_mplane(struct video_device *vfd,
- struct v4l2_pix_format_mplane *fmt)
+static void v4l_print_dv_timings_cap(const void *arg, bool write_only)
{
- int i;
+ const struct v4l2_dv_timings_cap *p = arg;
- dbgarg2("width=%d, height=%d, format=%c%c%c%c, field=%s, "
- "colorspace=%d, num_planes=%d\n",
- fmt->width, fmt->height,
- (fmt->pixelformat & 0xff),
- (fmt->pixelformat >> 8) & 0xff,
- (fmt->pixelformat >> 16) & 0xff,
- (fmt->pixelformat >> 24) & 0xff,
- prt_names(fmt->field, v4l2_field_names),
- fmt->colorspace, fmt->num_planes);
+ switch (p->type) {
+ case V4L2_DV_BT_656_1120:
+ pr_cont("type=bt-656/1120, width=%u-%u, height=%u-%u, "
+ "pixelclock=%llu-%llu, standards=0x%x, capabilities=0x%x\n",
+ p->bt.min_width, p->bt.max_width,
+ p->bt.min_height, p->bt.max_height,
+ p->bt.min_pixelclock, p->bt.max_pixelclock,
+ p->bt.standards, p->bt.capabilities);
+ break;
+ default:
+ pr_cont("type=%u\n", p->type);
+ break;
+ }
+}
- for (i = 0; i < fmt->num_planes; ++i)
- dbgarg2("plane %d: bytesperline=%d sizeimage=%d\n", i,
- fmt->plane_fmt[i].bytesperline,
- fmt->plane_fmt[i].sizeimage);
+static void v4l_print_frmsizeenum(const void *arg, bool write_only)
+{
+ const struct v4l2_frmsizeenum *p = arg;
+
+ pr_cont("index=%u, pixelformat=%c%c%c%c, type=%u",
+ p->index,
+ (p->pixel_format & 0xff),
+ (p->pixel_format >> 8) & 0xff,
+ (p->pixel_format >> 16) & 0xff,
+ (p->pixel_format >> 24) & 0xff,
+ p->type);
+ switch (p->type) {
+ case V4L2_FRMSIZE_TYPE_DISCRETE:
+ pr_cont(" wxh=%ux%u\n",
+ p->discrete.width, p->discrete.height);
+ break;
+ case V4L2_FRMSIZE_TYPE_STEPWISE:
+ pr_cont(" min=%ux%u, max=%ux%u, step=%ux%u\n",
+ p->stepwise.min_width, p->stepwise.min_height,
+ p->stepwise.step_width, p->stepwise.step_height,
+ p->stepwise.max_width, p->stepwise.max_height);
+ break;
+ case V4L2_FRMSIZE_TYPE_CONTINUOUS:
+ /* fall through */
+ default:
+ pr_cont("\n");
+ break;
+ }
}
-static inline void v4l_print_ext_ctrls(unsigned int cmd,
- struct video_device *vfd, struct v4l2_ext_controls *c, int show_vals)
+static void v4l_print_frmivalenum(const void *arg, bool write_only)
{
- __u32 i;
+ const struct v4l2_frmivalenum *p = arg;
- if (!(vfd->debug & V4L2_DEBUG_IOCTL_ARG))
- return;
- dbgarg(cmd, "");
- printk(KERN_CONT "class=0x%x", c->ctrl_class);
- for (i = 0; i < c->count; i++) {
- if (show_vals && !c->controls[i].size)
- printk(KERN_CONT " id/val=0x%x/0x%x",
- c->controls[i].id, c->controls[i].value);
+ pr_cont("index=%u, pixelformat=%c%c%c%c, wxh=%ux%u, type=%u",
+ p->index,
+ (p->pixel_format & 0xff),
+ (p->pixel_format >> 8) & 0xff,
+ (p->pixel_format >> 16) & 0xff,
+ (p->pixel_format >> 24) & 0xff,
+ p->width, p->height, p->type);
+ switch (p->type) {
+ case V4L2_FRMIVAL_TYPE_DISCRETE:
+ pr_cont(" fps=%d/%d\n",
+ p->discrete.numerator,
+ p->discrete.denominator);
+ break;
+ case V4L2_FRMIVAL_TYPE_STEPWISE:
+ pr_cont(" min=%d/%d, max=%d/%d, step=%d/%d\n",
+ p->stepwise.min.numerator,
+ p->stepwise.min.denominator,
+ p->stepwise.max.numerator,
+ p->stepwise.max.denominator,
+ p->stepwise.step.numerator,
+ p->stepwise.step.denominator);
+ break;
+ case V4L2_FRMIVAL_TYPE_CONTINUOUS:
+ /* fall through */
+ default:
+ pr_cont("\n");
+ break;
+ }
+}
+
+static void v4l_print_event(const void *arg, bool write_only)
+{
+ const struct v4l2_event *p = arg;
+ const struct v4l2_event_ctrl *c;
+
+ pr_cont("type=0x%x, pending=%u, sequence=%u, id=%u, "
+ "timestamp=%lu.%9.9lu\n",
+ p->type, p->pending, p->sequence, p->id,
+ p->timestamp.tv_sec, p->timestamp.tv_nsec);
+ switch (p->type) {
+ case V4L2_EVENT_VSYNC:
+ printk(KERN_DEBUG "field=%s\n",
+ prt_names(p->u.vsync.field, v4l2_field_names));
+ break;
+ case V4L2_EVENT_CTRL:
+ c = &p->u.ctrl;
+ printk(KERN_DEBUG "changes=0x%x, type=%u, ",
+ c->changes, c->type);
+ if (c->type == V4L2_CTRL_TYPE_INTEGER64)
+ pr_cont("value64=%lld, ", c->value64);
else
- printk(KERN_CONT " id=0x%x,size=%u",
- c->controls[i].id, c->controls[i].size);
+ pr_cont("value=%d, ", c->value);
+ pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d,"
+ " default_value=%d\n",
+ c->flags, c->minimum, c->maximum,
+ c->step, c->default_value);
+ break;
+ case V4L2_EVENT_FRAME_SYNC:
+ pr_cont("frame_sequence=%u\n",
+ p->u.frame_sync.frame_sequence);
+ break;
}
- printk(KERN_CONT "\n");
-};
+}
+
+static void v4l_print_event_subscription(const void *arg, bool write_only)
+{
+ const struct v4l2_event_subscription *p = arg;
-static inline int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
+ pr_cont("type=0x%x, id=0x%x, flags=0x%x\n",
+ p->type, p->id, p->flags);
+}
+
+static void v4l_print_sliced_vbi_cap(const void *arg, bool write_only)
+{
+ const struct v4l2_sliced_vbi_cap *p = arg;
+ int i;
+
+ pr_cont("type=%s, service_set=0x%08x\n",
+ prt_names(p->type, v4l2_type_names), p->service_set);
+ for (i = 0; i < 24; i++)
+ printk(KERN_DEBUG "line[%02u]=0x%04x, 0x%04x\n", i,
+ p->service_lines[0][i],
+ p->service_lines[1][i]);
+}
+
+static void v4l_print_freq_band(const void *arg, bool write_only)
+{
+ const struct v4l2_frequency_band *p = arg;
+
+ pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
+ "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
+ p->tuner, p->type, p->index,
+ p->capability, p->rangelow,
+ p->rangehigh, p->modulation);
+}
+
+static void v4l_print_u32(const void *arg, bool write_only)
+{
+ pr_cont("value=%u\n", *(const u32 *)arg);
+}
+
+static void v4l_print_newline(const void *arg, bool write_only)
+{
+ pr_cont("\n");
+}
+
+static void v4l_print_default(const void *arg, bool write_only)
+{
+ pr_cont("driver-specific ioctl\n");
+}
+
+static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
{
__u32 i;
@@ -536,1615 +935,1238 @@ static int check_fmt(const struct v4l2_ioctl_ops *ops, enum v4l2_buf_type type)
return -EINVAL;
}
-static long __video_do_ioctl(struct file *file,
- unsigned int cmd, void *arg)
+static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
{
- struct video_device *vfd = video_devdata(file);
- const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
- void *fh = file->private_data;
- struct v4l2_fh *vfh = NULL;
- int use_fh_prio = 0;
- long ret = -ENOTTY;
-
- if (ops == NULL) {
- printk(KERN_WARNING "videodev: \"%s\" has no ioctl_ops.\n",
- vfd->name);
- return ret;
- }
+ struct v4l2_capability *cap = (struct v4l2_capability *)arg;
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
- vfh = file->private_data;
- use_fh_prio = test_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
- }
+ cap->version = LINUX_VERSION_CODE;
+ return ops->vidioc_querycap(file, fh, cap);
+}
- if (v4l2_is_known_ioctl(cmd)) {
- struct v4l2_ioctl_info *info = &v4l2_ioctls[_IOC_NR(cmd)];
+static int v4l_s_input(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_s_input(file, fh, *(unsigned int *)arg);
+}
- if (!test_bit(_IOC_NR(cmd), vfd->valid_ioctls) &&
- !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
- return -ENOTTY;
+static int v4l_s_output(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_s_output(file, fh, *(unsigned int *)arg);
+}
- if (use_fh_prio && (info->flags & INFO_FL_PRIO)) {
- ret = v4l2_prio_check(vfd->prio, vfh->prio);
- if (ret)
- return ret;
- }
- }
+static int v4l_g_priority(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd;
+ u32 *p = arg;
- if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
- !(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) {
- v4l_print_ioctl(vfd->name, cmd);
- printk(KERN_CONT "\n");
- }
+ if (ops->vidioc_g_priority)
+ return ops->vidioc_g_priority(file, fh, arg);
+ vfd = video_devdata(file);
+ *p = v4l2_prio_max(&vfd->v4l2_dev->prio);
+ return 0;
+}
- switch (cmd) {
+static int v4l_s_priority(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd;
+ struct v4l2_fh *vfh;
+ u32 *p = arg;
+
+ if (ops->vidioc_s_priority)
+ return ops->vidioc_s_priority(file, fh, *p);
+ vfd = video_devdata(file);
+ vfh = file->private_data;
+ return v4l2_prio_change(&vfd->v4l2_dev->prio, &vfh->prio, *p);
+}
- /* --- capabilities ------------------------------------------ */
- case VIDIOC_QUERYCAP:
- {
- struct v4l2_capability *cap = (struct v4l2_capability *)arg;
-
- cap->version = LINUX_VERSION_CODE;
- ret = ops->vidioc_querycap(file, fh, cap);
- if (!ret)
- dbgarg(cmd, "driver=%s, card=%s, bus=%s, "
- "version=0x%08x, "
- "capabilities=0x%08x, "
- "device_caps=0x%08x\n",
- cap->driver, cap->card, cap->bus_info,
- cap->version,
- cap->capabilities,
- cap->device_caps);
- break;
- }
+static int v4l_enuminput(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_input *p = arg;
- /* --- priority ------------------------------------------ */
- case VIDIOC_G_PRIORITY:
- {
- enum v4l2_priority *p = arg;
+ /*
+ * We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS &
+ * CAP_STD here based on ioctl handler provided by the
+ * driver. If the driver doesn't support these
+ * for a specific input, it must override these flags.
+ */
+ if (ops->vidioc_s_std)
+ p->capabilities |= V4L2_IN_CAP_STD;
+ if (ops->vidioc_s_dv_preset)
+ p->capabilities |= V4L2_IN_CAP_PRESETS;
+ if (ops->vidioc_s_dv_timings)
+ p->capabilities |= V4L2_IN_CAP_CUSTOM_TIMINGS;
+
+ return ops->vidioc_enum_input(file, fh, p);
+}
- if (ops->vidioc_g_priority) {
- ret = ops->vidioc_g_priority(file, fh, p);
- } else if (use_fh_prio) {
- *p = v4l2_prio_max(&vfd->v4l2_dev->prio);
- ret = 0;
- }
- if (!ret)
- dbgarg(cmd, "priority is %d\n", *p);
- break;
- }
- case VIDIOC_S_PRIORITY:
- {
- enum v4l2_priority *p = arg;
+static int v4l_enumoutput(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_output *p = arg;
- dbgarg(cmd, "setting priority to %d\n", *p);
- if (ops->vidioc_s_priority)
- ret = ops->vidioc_s_priority(file, fh, *p);
- else
- ret = v4l2_prio_change(&vfd->v4l2_dev->prio,
- &vfh->prio, *p);
- break;
- }
+ /*
+ * We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS &
+ * CAP_STD here based on ioctl handler provided by the
+ * driver. If the driver doesn't support these
+ * for a specific output, it must override these flags.
+ */
+ if (ops->vidioc_s_std)
+ p->capabilities |= V4L2_OUT_CAP_STD;
+ if (ops->vidioc_s_dv_preset)
+ p->capabilities |= V4L2_OUT_CAP_PRESETS;
+ if (ops->vidioc_s_dv_timings)
+ p->capabilities |= V4L2_OUT_CAP_CUSTOM_TIMINGS;
+
+ return ops->vidioc_enum_output(file, fh, p);
+}
- /* --- capture ioctls ---------------------------------------- */
- case VIDIOC_ENUM_FMT:
- {
- struct v4l2_fmtdesc *f = arg;
+static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_fmtdesc *p = arg;
- ret = -EINVAL;
- switch (f->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (likely(ops->vidioc_enum_fmt_vid_cap))
- ret = ops->vidioc_enum_fmt_vid_cap(file, fh, f);
- break;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (likely(ops->vidioc_enum_fmt_vid_cap_mplane))
- ret = ops->vidioc_enum_fmt_vid_cap_mplane(file,
- fh, f);
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (unlikely(!ops->vidioc_enum_fmt_vid_cap))
break;
- case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- if (likely(ops->vidioc_enum_fmt_vid_overlay))
- ret = ops->vidioc_enum_fmt_vid_overlay(file,
- fh, f);
+ return ops->vidioc_enum_fmt_vid_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (unlikely(!ops->vidioc_enum_fmt_vid_cap_mplane))
break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- if (likely(ops->vidioc_enum_fmt_vid_out))
- ret = ops->vidioc_enum_fmt_vid_out(file, fh, f);
+ return ops->vidioc_enum_fmt_vid_cap_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (unlikely(!ops->vidioc_enum_fmt_vid_overlay))
break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- if (likely(ops->vidioc_enum_fmt_vid_out_mplane))
- ret = ops->vidioc_enum_fmt_vid_out_mplane(file,
- fh, f);
+ return ops->vidioc_enum_fmt_vid_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!ops->vidioc_enum_fmt_vid_out))
break;
- case V4L2_BUF_TYPE_PRIVATE:
- if (likely(ops->vidioc_enum_fmt_type_private))
- ret = ops->vidioc_enum_fmt_type_private(file,
- fh, f);
+ return ops->vidioc_enum_fmt_vid_out(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (unlikely(!ops->vidioc_enum_fmt_vid_out_mplane))
break;
- default:
+ return ops->vidioc_enum_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_PRIVATE:
+ if (unlikely(!ops->vidioc_enum_fmt_type_private))
break;
- }
- if (likely(!ret))
- dbgarg(cmd, "index=%d, type=%d, flags=%d, "
- "pixelformat=%c%c%c%c, description='%s'\n",
- f->index, f->type, f->flags,
- (f->pixelformat & 0xff),
- (f->pixelformat >> 8) & 0xff,
- (f->pixelformat >> 16) & 0xff,
- (f->pixelformat >> 24) & 0xff,
- f->description);
- break;
+ return ops->vidioc_enum_fmt_type_private(file, fh, arg);
}
- case VIDIOC_G_FMT:
- {
- struct v4l2_format *f = (struct v4l2_format *)arg;
-
- /* FIXME: Should be one dump per type */
- dbgarg(cmd, "type=%s\n", prt_names(f->type, v4l2_type_names));
-
- ret = -EINVAL;
- switch (f->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (ops->vidioc_g_fmt_vid_cap)
- ret = ops->vidioc_g_fmt_vid_cap(file, fh, f);
- if (!ret)
- v4l_print_pix_fmt(vfd, &f->fmt.pix);
+ return -EINVAL;
+}
+
+static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_format *p = arg;
+
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (unlikely(!ops->vidioc_g_fmt_vid_cap))
break;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (ops->vidioc_g_fmt_vid_cap_mplane)
- ret = ops->vidioc_g_fmt_vid_cap_mplane(file,
- fh, f);
- if (!ret)
- v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
+ return ops->vidioc_g_fmt_vid_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (unlikely(!ops->vidioc_g_fmt_vid_cap_mplane))
break;
- case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- if (likely(ops->vidioc_g_fmt_vid_overlay))
- ret = ops->vidioc_g_fmt_vid_overlay(file,
- fh, f);
+ return ops->vidioc_g_fmt_vid_cap_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (unlikely(!ops->vidioc_g_fmt_vid_overlay))
break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- if (ops->vidioc_g_fmt_vid_out)
- ret = ops->vidioc_g_fmt_vid_out(file, fh, f);
- if (!ret)
- v4l_print_pix_fmt(vfd, &f->fmt.pix);
+ return ops->vidioc_g_fmt_vid_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!ops->vidioc_g_fmt_vid_out))
break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- if (ops->vidioc_g_fmt_vid_out_mplane)
- ret = ops->vidioc_g_fmt_vid_out_mplane(file,
- fh, f);
- if (!ret)
- v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
+ return ops->vidioc_g_fmt_vid_out(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (unlikely(!ops->vidioc_g_fmt_vid_out_mplane))
break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
- if (likely(ops->vidioc_g_fmt_vid_out_overlay))
- ret = ops->vidioc_g_fmt_vid_out_overlay(file,
- fh, f);
+ return ops->vidioc_g_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ if (unlikely(!ops->vidioc_g_fmt_vid_out_overlay))
break;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- if (likely(ops->vidioc_g_fmt_vbi_cap))
- ret = ops->vidioc_g_fmt_vbi_cap(file, fh, f);
+ return ops->vidioc_g_fmt_vid_out_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_g_fmt_vbi_cap))
break;
- case V4L2_BUF_TYPE_VBI_OUTPUT:
- if (likely(ops->vidioc_g_fmt_vbi_out))
- ret = ops->vidioc_g_fmt_vbi_out(file, fh, f);
+ return ops->vidioc_g_fmt_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_g_fmt_vbi_out))
break;
- case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- if (likely(ops->vidioc_g_fmt_sliced_vbi_cap))
- ret = ops->vidioc_g_fmt_sliced_vbi_cap(file,
- fh, f);
+ return ops->vidioc_g_fmt_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_g_fmt_sliced_vbi_cap))
break;
- case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- if (likely(ops->vidioc_g_fmt_sliced_vbi_out))
- ret = ops->vidioc_g_fmt_sliced_vbi_out(file,
- fh, f);
+ return ops->vidioc_g_fmt_sliced_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_g_fmt_sliced_vbi_out))
break;
- case V4L2_BUF_TYPE_PRIVATE:
- if (likely(ops->vidioc_g_fmt_type_private))
- ret = ops->vidioc_g_fmt_type_private(file,
- fh, f);
+ return ops->vidioc_g_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_PRIVATE:
+ if (unlikely(!ops->vidioc_g_fmt_type_private))
break;
- }
- break;
+ return ops->vidioc_g_fmt_type_private(file, fh, arg);
}
- case VIDIOC_S_FMT:
- {
- struct v4l2_format *f = (struct v4l2_format *)arg;
-
- ret = -EINVAL;
+ return -EINVAL;
+}
- /* FIXME: Should be one dump per type */
- dbgarg(cmd, "type=%s\n", prt_names(f->type, v4l2_type_names));
+static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_format *p = arg;
- switch (f->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- CLEAR_AFTER_FIELD(f, fmt.pix);
- v4l_print_pix_fmt(vfd, &f->fmt.pix);
- if (ops->vidioc_s_fmt_vid_cap)
- ret = ops->vidioc_s_fmt_vid_cap(file, fh, f);
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_vid_cap))
break;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- CLEAR_AFTER_FIELD(f, fmt.pix_mp);
- v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
- if (ops->vidioc_s_fmt_vid_cap_mplane)
- ret = ops->vidioc_s_fmt_vid_cap_mplane(file,
- fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.pix);
+ return ops->vidioc_s_fmt_vid_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
break;
- case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- CLEAR_AFTER_FIELD(f, fmt.win);
- if (ops->vidioc_s_fmt_vid_overlay)
- ret = ops->vidioc_s_fmt_vid_overlay(file,
- fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+ return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- CLEAR_AFTER_FIELD(f, fmt.pix);
- v4l_print_pix_fmt(vfd, &f->fmt.pix);
- if (ops->vidioc_s_fmt_vid_out)
- ret = ops->vidioc_s_fmt_vid_out(file, fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.win);
+ return ops->vidioc_s_fmt_vid_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_vid_out))
break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- CLEAR_AFTER_FIELD(f, fmt.pix_mp);
- v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
- if (ops->vidioc_s_fmt_vid_out_mplane)
- ret = ops->vidioc_s_fmt_vid_out_mplane(file,
- fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.pix);
+ return ops->vidioc_s_fmt_vid_out(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
- CLEAR_AFTER_FIELD(f, fmt.win);
- if (ops->vidioc_s_fmt_vid_out_overlay)
- ret = ops->vidioc_s_fmt_vid_out_overlay(file,
- fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+ return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
break;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- CLEAR_AFTER_FIELD(f, fmt.vbi);
- if (likely(ops->vidioc_s_fmt_vbi_cap))
- ret = ops->vidioc_s_fmt_vbi_cap(file, fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.win);
+ return ops->vidioc_s_fmt_vid_out_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_vbi_cap))
break;
- case V4L2_BUF_TYPE_VBI_OUTPUT:
- CLEAR_AFTER_FIELD(f, fmt.vbi);
- if (likely(ops->vidioc_s_fmt_vbi_out))
- ret = ops->vidioc_s_fmt_vbi_out(file, fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.vbi);
+ return ops->vidioc_s_fmt_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_vbi_out))
break;
- case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- CLEAR_AFTER_FIELD(f, fmt.sliced);
- if (likely(ops->vidioc_s_fmt_sliced_vbi_cap))
- ret = ops->vidioc_s_fmt_sliced_vbi_cap(file,
- fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.vbi);
+ return ops->vidioc_s_fmt_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_cap))
break;
- case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- CLEAR_AFTER_FIELD(f, fmt.sliced);
- if (likely(ops->vidioc_s_fmt_sliced_vbi_out))
- ret = ops->vidioc_s_fmt_sliced_vbi_out(file,
- fh, f);
-
+ CLEAR_AFTER_FIELD(p, fmt.sliced);
+ return ops->vidioc_s_fmt_sliced_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_sliced_vbi_out))
break;
- case V4L2_BUF_TYPE_PRIVATE:
- /* CLEAR_AFTER_FIELD(f, fmt.raw_data); <- does nothing */
- if (likely(ops->vidioc_s_fmt_type_private))
- ret = ops->vidioc_s_fmt_type_private(file,
- fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.sliced);
+ return ops->vidioc_s_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_PRIVATE:
+ if (unlikely(!ops->vidioc_s_fmt_type_private))
break;
- }
- break;
+ return ops->vidioc_s_fmt_type_private(file, fh, arg);
}
- case VIDIOC_TRY_FMT:
- {
- struct v4l2_format *f = (struct v4l2_format *)arg;
-
- /* FIXME: Should be one dump per type */
- dbgarg(cmd, "type=%s\n", prt_names(f->type,
- v4l2_type_names));
- ret = -EINVAL;
- switch (f->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- CLEAR_AFTER_FIELD(f, fmt.pix);
- if (ops->vidioc_try_fmt_vid_cap)
- ret = ops->vidioc_try_fmt_vid_cap(file, fh, f);
- if (!ret)
- v4l_print_pix_fmt(vfd, &f->fmt.pix);
+ return -EINVAL;
+}
+
+static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_format *p = arg;
+
+ switch (p->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_vid_cap))
break;
- case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- CLEAR_AFTER_FIELD(f, fmt.pix_mp);
- if (ops->vidioc_try_fmt_vid_cap_mplane)
- ret = ops->vidioc_try_fmt_vid_cap_mplane(file,
- fh, f);
- if (!ret)
- v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
+ CLEAR_AFTER_FIELD(p, fmt.pix);
+ return ops->vidioc_try_fmt_vid_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
break;
- case V4L2_BUF_TYPE_VIDEO_OVERLAY:
- CLEAR_AFTER_FIELD(f, fmt.win);
- if (likely(ops->vidioc_try_fmt_vid_overlay))
- ret = ops->vidioc_try_fmt_vid_overlay(file,
- fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+ return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- CLEAR_AFTER_FIELD(f, fmt.pix);
- if (ops->vidioc_try_fmt_vid_out)
- ret = ops->vidioc_try_fmt_vid_out(file, fh, f);
- if (!ret)
- v4l_print_pix_fmt(vfd, &f->fmt.pix);
+ CLEAR_AFTER_FIELD(p, fmt.win);
+ return ops->vidioc_try_fmt_vid_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_vid_out))
break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- CLEAR_AFTER_FIELD(f, fmt.pix_mp);
- if (ops->vidioc_try_fmt_vid_out_mplane)
- ret = ops->vidioc_try_fmt_vid_out_mplane(file,
- fh, f);
- if (!ret)
- v4l_print_pix_fmt_mplane(vfd, &f->fmt.pix_mp);
+ CLEAR_AFTER_FIELD(p, fmt.pix);
+ return ops->vidioc_try_fmt_vid_out(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
break;
- case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
- CLEAR_AFTER_FIELD(f, fmt.win);
- if (likely(ops->vidioc_try_fmt_vid_out_overlay))
- ret = ops->vidioc_try_fmt_vid_out_overlay(file,
- fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp);
+ return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
break;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- CLEAR_AFTER_FIELD(f, fmt.vbi);
- if (likely(ops->vidioc_try_fmt_vbi_cap))
- ret = ops->vidioc_try_fmt_vbi_cap(file, fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.win);
+ return ops->vidioc_try_fmt_vid_out_overlay(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_vbi_cap))
break;
- case V4L2_BUF_TYPE_VBI_OUTPUT:
- CLEAR_AFTER_FIELD(f, fmt.vbi);
- if (likely(ops->vidioc_try_fmt_vbi_out))
- ret = ops->vidioc_try_fmt_vbi_out(file, fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.vbi);
+ return ops->vidioc_try_fmt_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_vbi_out))
break;
- case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
- CLEAR_AFTER_FIELD(f, fmt.sliced);
- if (likely(ops->vidioc_try_fmt_sliced_vbi_cap))
- ret = ops->vidioc_try_fmt_sliced_vbi_cap(file,
- fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.vbi);
+ return ops->vidioc_try_fmt_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_cap))
break;
- case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
- CLEAR_AFTER_FIELD(f, fmt.sliced);
- if (likely(ops->vidioc_try_fmt_sliced_vbi_out))
- ret = ops->vidioc_try_fmt_sliced_vbi_out(file,
- fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.sliced);
+ return ops->vidioc_try_fmt_sliced_vbi_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_sliced_vbi_out))
break;
- case V4L2_BUF_TYPE_PRIVATE:
- /* CLEAR_AFTER_FIELD(f, fmt.raw_data); <- does nothing */
- if (likely(ops->vidioc_try_fmt_type_private))
- ret = ops->vidioc_try_fmt_type_private(file,
- fh, f);
+ CLEAR_AFTER_FIELD(p, fmt.sliced);
+ return ops->vidioc_try_fmt_sliced_vbi_out(file, fh, arg);
+ case V4L2_BUF_TYPE_PRIVATE:
+ if (unlikely(!ops->vidioc_try_fmt_type_private))
break;
- }
- break;
+ return ops->vidioc_try_fmt_type_private(file, fh, arg);
}
- /* FIXME: Those buf reqs could be handled here,
- with some changes on videobuf to allow its header to be included at
- videodev2.h or being merged at videodev2.
- */
- case VIDIOC_REQBUFS:
- {
- struct v4l2_requestbuffers *p = arg;
+ return -EINVAL;
+}
- ret = check_fmt(ops, p->type);
- if (ret)
- break;
+static int v4l_streamon(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_streamon(file, fh, *(unsigned int *)arg);
+}
- if (p->type < V4L2_BUF_TYPE_PRIVATE)
- CLEAR_AFTER_FIELD(p, memory);
+static int v4l_streamoff(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_streamoff(file, fh, *(unsigned int *)arg);
+}
- ret = ops->vidioc_reqbufs(file, fh, p);
- dbgarg(cmd, "count=%d, type=%s, memory=%s\n",
- p->count,
- prt_names(p->type, v4l2_type_names),
- prt_names(p->memory, v4l2_memory_names));
- break;
- }
- case VIDIOC_QUERYBUF:
- {
- struct v4l2_buffer *p = arg;
+static int v4l_g_tuner(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_tuner *p = arg;
+ int err;
- ret = check_fmt(ops, p->type);
- if (ret)
- break;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ err = ops->vidioc_g_tuner(file, fh, p);
+ if (!err)
+ p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
+ return err;
+}
- ret = ops->vidioc_querybuf(file, fh, p);
- if (!ret)
- dbgbuf(cmd, vfd, p);
- break;
- }
- case VIDIOC_QBUF:
- {
- struct v4l2_buffer *p = arg;
+static int v4l_s_tuner(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_tuner *p = arg;
- ret = check_fmt(ops, p->type);
- if (ret)
- break;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ return ops->vidioc_s_tuner(file, fh, p);
+}
- ret = ops->vidioc_qbuf(file, fh, p);
- if (!ret)
- dbgbuf(cmd, vfd, p);
- break;
- }
- case VIDIOC_DQBUF:
- {
- struct v4l2_buffer *p = arg;
+static int v4l_g_modulator(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_modulator *p = arg;
+ int err;
- ret = check_fmt(ops, p->type);
- if (ret)
- break;
+ err = ops->vidioc_g_modulator(file, fh, p);
+ if (!err)
+ p->capability |= V4L2_TUNER_CAP_FREQ_BANDS;
+ return err;
+}
- ret = ops->vidioc_dqbuf(file, fh, p);
- if (!ret)
- dbgbuf(cmd, vfd, p);
- break;
- }
- case VIDIOC_OVERLAY:
- {
- int *i = arg;
+static int v4l_g_frequency(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_frequency *p = arg;
- dbgarg(cmd, "value=%d\n", *i);
- ret = ops->vidioc_overlay(file, fh, *i);
- break;
- }
- case VIDIOC_G_FBUF:
- {
- struct v4l2_framebuffer *p = arg;
-
- ret = ops->vidioc_g_fbuf(file, fh, arg);
- if (!ret) {
- dbgarg(cmd, "capability=0x%x, flags=%d, base=0x%08lx\n",
- p->capability, p->flags,
- (unsigned long)p->base);
- v4l_print_pix_fmt(vfd, &p->fmt);
- }
- break;
- }
- case VIDIOC_S_FBUF:
- {
- struct v4l2_framebuffer *p = arg;
-
- dbgarg(cmd, "capability=0x%x, flags=%d, base=0x%08lx\n",
- p->capability, p->flags, (unsigned long)p->base);
- v4l_print_pix_fmt(vfd, &p->fmt);
- ret = ops->vidioc_s_fbuf(file, fh, arg);
- break;
- }
- case VIDIOC_STREAMON:
- {
- enum v4l2_buf_type i = *(int *)arg;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ return ops->vidioc_g_frequency(file, fh, p);
+}
- dbgarg(cmd, "type=%s\n", prt_names(i, v4l2_type_names));
- ret = ops->vidioc_streamon(file, fh, i);
- break;
- }
- case VIDIOC_STREAMOFF:
- {
- enum v4l2_buf_type i = *(int *)arg;
+static int v4l_s_frequency(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_frequency *p = arg;
+ enum v4l2_tuner_type type;
- dbgarg(cmd, "type=%s\n", prt_names(i, v4l2_type_names));
- ret = ops->vidioc_streamoff(file, fh, i);
- break;
- }
- /* ---------- tv norms ---------- */
- case VIDIOC_ENUMSTD:
- {
- struct v4l2_standard *p = arg;
- v4l2_std_id id = vfd->tvnorms, curr_id = 0;
- unsigned int index = p->index, i, j = 0;
- const char *descr = "";
-
- if (id == 0)
- break;
- ret = -EINVAL;
-
- /* Return norm array in a canonical way */
- for (i = 0; i <= index && id; i++) {
- /* last std value in the standards array is 0, so this
- while always ends there since (id & 0) == 0. */
- while ((id & standards[j].std) != standards[j].std)
- j++;
- curr_id = standards[j].std;
- descr = standards[j].descr;
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (p->type != type)
+ return -EINVAL;
+ return ops->vidioc_s_frequency(file, fh, p);
+}
+
+static int v4l_enumstd(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_standard *p = arg;
+ v4l2_std_id id = vfd->tvnorms, curr_id = 0;
+ unsigned int index = p->index, i, j = 0;
+ const char *descr = "";
+
+ /* Return norm array in a canonical way */
+ for (i = 0; i <= index && id; i++) {
+ /* last std value in the standards array is 0, so this
+ while always ends there since (id & 0) == 0. */
+ while ((id & standards[j].std) != standards[j].std)
j++;
- if (curr_id == 0)
- break;
- if (curr_id != V4L2_STD_PAL &&
- curr_id != V4L2_STD_SECAM &&
- curr_id != V4L2_STD_NTSC)
- id &= ~curr_id;
- }
- if (i <= index)
+ curr_id = standards[j].std;
+ descr = standards[j].descr;
+ j++;
+ if (curr_id == 0)
break;
+ if (curr_id != V4L2_STD_PAL &&
+ curr_id != V4L2_STD_SECAM &&
+ curr_id != V4L2_STD_NTSC)
+ id &= ~curr_id;
+ }
+ if (i <= index)
+ return -EINVAL;
- v4l2_video_std_construct(p, curr_id, descr);
+ v4l2_video_std_construct(p, curr_id, descr);
+ return 0;
+}
- dbgarg(cmd, "index=%d, id=0x%Lx, name=%s, fps=%d/%d, "
- "framelines=%d\n", p->index,
- (unsigned long long)p->id, p->name,
- p->frameperiod.numerator,
- p->frameperiod.denominator,
- p->framelines);
+static int v4l_g_std(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ v4l2_std_id *id = arg;
- ret = 0;
- break;
+ /* Calls the specific handler */
+ if (ops->vidioc_g_std)
+ return ops->vidioc_g_std(file, fh, arg);
+ if (vfd->current_norm) {
+ *id = vfd->current_norm;
+ return 0;
}
- case VIDIOC_G_STD:
- {
- v4l2_std_id *id = arg;
-
- /* Calls the specific handler */
- if (ops->vidioc_g_std)
- ret = ops->vidioc_g_std(file, fh, id);
- else if (vfd->current_norm) {
- ret = 0;
- *id = vfd->current_norm;
- }
+ return -ENOTTY;
+}
- if (likely(!ret))
- dbgarg(cmd, "std=0x%08Lx\n", (long long unsigned)*id);
- break;
- }
- case VIDIOC_S_STD:
- {
- v4l2_std_id *id = arg, norm;
+static int v4l_s_std(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ v4l2_std_id *id = arg, norm;
+ int ret;
- dbgarg(cmd, "std=%08Lx\n", (long long unsigned)*id);
+ norm = (*id) & vfd->tvnorms;
+ if (vfd->tvnorms && !norm) /* Check if std is supported */
+ return -EINVAL;
- ret = -EINVAL;
- norm = (*id) & vfd->tvnorms;
- if (vfd->tvnorms && !norm) /* Check if std is supported */
- break;
+ /* Calls the specific handler */
+ ret = ops->vidioc_s_std(file, fh, &norm);
- /* Calls the specific handler */
- ret = ops->vidioc_s_std(file, fh, &norm);
+ /* Updates standard information */
+ if (ret >= 0)
+ vfd->current_norm = norm;
+ return ret;
+}
- /* Updates standard information */
- if (ret >= 0)
- vfd->current_norm = norm;
- break;
- }
- case VIDIOC_QUERYSTD:
- {
- v4l2_std_id *p = arg;
+static int v4l_querystd(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ v4l2_std_id *p = arg;
+
+ /*
+ * If nothing detected, it should return all supported
+ * standard.
+ * Drivers just need to mask the std argument, in order
+ * to remove the standards that don't apply from the mask.
+ * This means that tuners, audio and video decoders can join
+ * their efforts to improve the standards detection.
+ */
+ *p = vfd->tvnorms;
+ return ops->vidioc_querystd(file, fh, arg);
+}
- /*
- * If nothing detected, it should return all supported
- * Drivers just need to mask the std argument, in order
- * to remove the standards that don't apply from the mask.
- * This means that tuners, audio and video decoders can join
- * their efforts to improve the standards detection
- */
- *p = vfd->tvnorms;
- ret = ops->vidioc_querystd(file, fh, arg);
- if (!ret)
- dbgarg(cmd, "detected std=%08Lx\n",
- (unsigned long long)*p);
- break;
- }
- /* ------ input switching ---------- */
- /* FIXME: Inputs can be handled inside videodev2 */
- case VIDIOC_ENUMINPUT:
- {
- struct v4l2_input *p = arg;
+static int v4l_s_hw_freq_seek(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_hw_freq_seek *p = arg;
+ enum v4l2_tuner_type type;
- /*
- * We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS &
- * CAP_STD here based on ioctl handler provided by the
- * driver. If the driver doesn't support these
- * for a specific input, it must override these flags.
- */
- if (ops->vidioc_s_std)
- p->capabilities |= V4L2_IN_CAP_STD;
- if (ops->vidioc_s_dv_preset)
- p->capabilities |= V4L2_IN_CAP_PRESETS;
- if (ops->vidioc_s_dv_timings)
- p->capabilities |= V4L2_IN_CAP_CUSTOM_TIMINGS;
-
- ret = ops->vidioc_enum_input(file, fh, p);
- if (!ret)
- dbgarg(cmd, "index=%d, name=%s, type=%d, "
- "audioset=%d, "
- "tuner=%d, std=%08Lx, status=%d\n",
- p->index, p->name, p->type, p->audioset,
- p->tuner,
- (unsigned long long)p->std,
- p->status);
- break;
- }
- case VIDIOC_G_INPUT:
- {
- unsigned int *i = arg;
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
+ if (p->type != type)
+ return -EINVAL;
+ return ops->vidioc_s_hw_freq_seek(file, fh, p);
+}
- ret = ops->vidioc_g_input(file, fh, i);
- if (!ret)
- dbgarg(cmd, "value=%d\n", *i);
- break;
- }
- case VIDIOC_S_INPUT:
- {
- unsigned int *i = arg;
+static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_requestbuffers *p = arg;
+ int ret = check_fmt(ops, p->type);
- dbgarg(cmd, "value=%d\n", *i);
- ret = ops->vidioc_s_input(file, fh, *i);
- break;
- }
+ if (ret)
+ return ret;
- /* ------ output switching ---------- */
- case VIDIOC_ENUMOUTPUT:
- {
- struct v4l2_output *p = arg;
+ if (p->type < V4L2_BUF_TYPE_PRIVATE)
+ CLEAR_AFTER_FIELD(p, memory);
- /*
- * We set the flags for CAP_PRESETS, CAP_CUSTOM_TIMINGS &
- * CAP_STD here based on ioctl handler provided by the
- * driver. If the driver doesn't support these
- * for a specific output, it must override these flags.
- */
- if (ops->vidioc_s_std)
- p->capabilities |= V4L2_OUT_CAP_STD;
- if (ops->vidioc_s_dv_preset)
- p->capabilities |= V4L2_OUT_CAP_PRESETS;
- if (ops->vidioc_s_dv_timings)
- p->capabilities |= V4L2_OUT_CAP_CUSTOM_TIMINGS;
-
- ret = ops->vidioc_enum_output(file, fh, p);
- if (!ret)
- dbgarg(cmd, "index=%d, name=%s, type=%d, "
- "audioset=0x%x, "
- "modulator=%d, std=0x%08Lx\n",
- p->index, p->name, p->type, p->audioset,
- p->modulator, (unsigned long long)p->std);
- break;
- }
- case VIDIOC_G_OUTPUT:
- {
- unsigned int *i = arg;
+ return ops->vidioc_reqbufs(file, fh, p);
+}
- ret = ops->vidioc_g_output(file, fh, i);
- if (!ret)
- dbgarg(cmd, "value=%d\n", *i);
- break;
- }
- case VIDIOC_S_OUTPUT:
- {
- unsigned int *i = arg;
+static int v4l_querybuf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_buffer *p = arg;
+ int ret = check_fmt(ops, p->type);
- dbgarg(cmd, "value=%d\n", *i);
- ret = ops->vidioc_s_output(file, fh, *i);
- break;
- }
+ return ret ? ret : ops->vidioc_querybuf(file, fh, p);
+}
- /* --- controls ---------------------------------------------- */
- case VIDIOC_QUERYCTRL:
- {
- struct v4l2_queryctrl *p = arg;
-
- if (vfh && vfh->ctrl_handler)
- ret = v4l2_queryctrl(vfh->ctrl_handler, p);
- else if (vfd->ctrl_handler)
- ret = v4l2_queryctrl(vfd->ctrl_handler, p);
- else if (ops->vidioc_queryctrl)
- ret = ops->vidioc_queryctrl(file, fh, p);
- else
- break;
- if (!ret)
- dbgarg(cmd, "id=0x%x, type=%d, name=%s, min/max=%d/%d, "
- "step=%d, default=%d, flags=0x%08x\n",
- p->id, p->type, p->name,
- p->minimum, p->maximum,
- p->step, p->default_value, p->flags);
- else
- dbgarg(cmd, "id=0x%x\n", p->id);
- break;
- }
- case VIDIOC_G_CTRL:
- {
- struct v4l2_control *p = arg;
-
- if (vfh && vfh->ctrl_handler)
- ret = v4l2_g_ctrl(vfh->ctrl_handler, p);
- else if (vfd->ctrl_handler)
- ret = v4l2_g_ctrl(vfd->ctrl_handler, p);
- else if (ops->vidioc_g_ctrl)
- ret = ops->vidioc_g_ctrl(file, fh, p);
- else if (ops->vidioc_g_ext_ctrls) {
- struct v4l2_ext_controls ctrls;
- struct v4l2_ext_control ctrl;
-
- ctrls.ctrl_class = V4L2_CTRL_ID2CLASS(p->id);
- ctrls.count = 1;
- ctrls.controls = &ctrl;
- ctrl.id = p->id;
- ctrl.value = p->value;
- if (check_ext_ctrls(&ctrls, 1)) {
- ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls);
- if (ret == 0)
- p->value = ctrl.value;
- }
- } else
- break;
- if (!ret)
- dbgarg(cmd, "id=0x%x, value=%d\n", p->id, p->value);
- else
- dbgarg(cmd, "id=0x%x\n", p->id);
- break;
- }
- case VIDIOC_S_CTRL:
- {
- struct v4l2_control *p = arg;
- struct v4l2_ext_controls ctrls;
- struct v4l2_ext_control ctrl;
-
- if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
- !ops->vidioc_s_ctrl && !ops->vidioc_s_ext_ctrls)
- break;
+static int v4l_qbuf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_buffer *p = arg;
+ int ret = check_fmt(ops, p->type);
- dbgarg(cmd, "id=0x%x, value=%d\n", p->id, p->value);
+ return ret ? ret : ops->vidioc_qbuf(file, fh, p);
+}
- if (vfh && vfh->ctrl_handler) {
- ret = v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
- break;
- }
- if (vfd->ctrl_handler) {
- ret = v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
- break;
- }
- if (ops->vidioc_s_ctrl) {
- ret = ops->vidioc_s_ctrl(file, fh, p);
- break;
- }
- if (!ops->vidioc_s_ext_ctrls)
- break;
+static int v4l_dqbuf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_buffer *p = arg;
+ int ret = check_fmt(ops, p->type);
- ctrls.ctrl_class = V4L2_CTRL_ID2CLASS(p->id);
- ctrls.count = 1;
- ctrls.controls = &ctrl;
- ctrl.id = p->id;
- ctrl.value = p->value;
- if (check_ext_ctrls(&ctrls, 1))
- ret = ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
- else
- ret = -EINVAL;
- break;
- }
- case VIDIOC_G_EXT_CTRLS:
- {
- struct v4l2_ext_controls *p = arg;
-
- p->error_idx = p->count;
- if (vfh && vfh->ctrl_handler)
- ret = v4l2_g_ext_ctrls(vfh->ctrl_handler, p);
- else if (vfd->ctrl_handler)
- ret = v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
- else if (ops->vidioc_g_ext_ctrls)
- ret = check_ext_ctrls(p, 0) ?
- ops->vidioc_g_ext_ctrls(file, fh, p) :
- -EINVAL;
- else
- break;
- v4l_print_ext_ctrls(cmd, vfd, p, !ret);
- break;
- }
- case VIDIOC_S_EXT_CTRLS:
- {
- struct v4l2_ext_controls *p = arg;
+ return ret ? ret : ops->vidioc_dqbuf(file, fh, p);
+}
- p->error_idx = p->count;
- if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
- !ops->vidioc_s_ext_ctrls)
- break;
- v4l_print_ext_ctrls(cmd, vfd, p, 1);
- if (vfh && vfh->ctrl_handler)
- ret = v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
- else if (vfd->ctrl_handler)
- ret = v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p);
- else if (check_ext_ctrls(p, 0))
- ret = ops->vidioc_s_ext_ctrls(file, fh, p);
- else
- ret = -EINVAL;
- break;
- }
- case VIDIOC_TRY_EXT_CTRLS:
- {
- struct v4l2_ext_controls *p = arg;
+static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_create_buffers *create = arg;
+ int ret = check_fmt(ops, create->format.type);
- p->error_idx = p->count;
- if (!(vfh && vfh->ctrl_handler) && !vfd->ctrl_handler &&
- !ops->vidioc_try_ext_ctrls)
- break;
- v4l_print_ext_ctrls(cmd, vfd, p, 1);
- if (vfh && vfh->ctrl_handler)
- ret = v4l2_try_ext_ctrls(vfh->ctrl_handler, p);
- else if (vfd->ctrl_handler)
- ret = v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
- else if (check_ext_ctrls(p, 0))
- ret = ops->vidioc_try_ext_ctrls(file, fh, p);
- else
- ret = -EINVAL;
- break;
- }
- case VIDIOC_QUERYMENU:
- {
- struct v4l2_querymenu *p = arg;
-
- if (vfh && vfh->ctrl_handler)
- ret = v4l2_querymenu(vfh->ctrl_handler, p);
- else if (vfd->ctrl_handler)
- ret = v4l2_querymenu(vfd->ctrl_handler, p);
- else if (ops->vidioc_querymenu)
- ret = ops->vidioc_querymenu(file, fh, p);
- else
- break;
- if (!ret)
- dbgarg(cmd, "id=0x%x, index=%d, name=%s\n",
- p->id, p->index, p->name);
- else
- dbgarg(cmd, "id=0x%x, index=%d\n",
- p->id, p->index);
- break;
- }
- /* --- audio ---------------------------------------------- */
- case VIDIOC_ENUMAUDIO:
- {
- struct v4l2_audio *p = arg;
-
- ret = ops->vidioc_enumaudio(file, fh, p);
- if (!ret)
- dbgarg(cmd, "index=%d, name=%s, capability=0x%x, "
- "mode=0x%x\n", p->index, p->name,
- p->capability, p->mode);
- else
- dbgarg(cmd, "index=%d\n", p->index);
- break;
- }
- case VIDIOC_G_AUDIO:
- {
- struct v4l2_audio *p = arg;
-
- ret = ops->vidioc_g_audio(file, fh, p);
- if (!ret)
- dbgarg(cmd, "index=%d, name=%s, capability=0x%x, "
- "mode=0x%x\n", p->index,
- p->name, p->capability, p->mode);
- else
- dbgarg(cmd, "index=%d\n", p->index);
- break;
- }
- case VIDIOC_S_AUDIO:
- {
- struct v4l2_audio *p = arg;
-
- dbgarg(cmd, "index=%d, name=%s, capability=0x%x, "
- "mode=0x%x\n", p->index, p->name,
- p->capability, p->mode);
- ret = ops->vidioc_s_audio(file, fh, p);
- break;
- }
- case VIDIOC_ENUMAUDOUT:
- {
- struct v4l2_audioout *p = arg;
-
- dbgarg(cmd, "Enum for index=%d\n", p->index);
- ret = ops->vidioc_enumaudout(file, fh, p);
- if (!ret)
- dbgarg2("index=%d, name=%s, capability=%d, "
- "mode=%d\n", p->index, p->name,
- p->capability, p->mode);
- break;
- }
- case VIDIOC_G_AUDOUT:
- {
- struct v4l2_audioout *p = arg;
-
- ret = ops->vidioc_g_audout(file, fh, p);
- if (!ret)
- dbgarg2("index=%d, name=%s, capability=%d, "
- "mode=%d\n", p->index, p->name,
- p->capability, p->mode);
- break;
- }
- case VIDIOC_S_AUDOUT:
- {
- struct v4l2_audioout *p = arg;
+ return ret ? ret : ops->vidioc_create_bufs(file, fh, create);
+}
- dbgarg(cmd, "index=%d, name=%s, capability=%d, "
- "mode=%d\n", p->index, p->name,
- p->capability, p->mode);
+static int v4l_prepare_buf(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_buffer *b = arg;
+ int ret = check_fmt(ops, b->type);
- ret = ops->vidioc_s_audout(file, fh, p);
- break;
- }
- case VIDIOC_G_MODULATOR:
- {
- struct v4l2_modulator *p = arg;
-
- ret = ops->vidioc_g_modulator(file, fh, p);
- if (!ret)
- dbgarg(cmd, "index=%d, name=%s, "
- "capability=%d, rangelow=%d,"
- " rangehigh=%d, txsubchans=%d\n",
- p->index, p->name, p->capability,
- p->rangelow, p->rangehigh,
- p->txsubchans);
- break;
- }
- case VIDIOC_S_MODULATOR:
- {
- struct v4l2_modulator *p = arg;
-
- dbgarg(cmd, "index=%d, name=%s, capability=%d, "
- "rangelow=%d, rangehigh=%d, txsubchans=%d\n",
- p->index, p->name, p->capability, p->rangelow,
- p->rangehigh, p->txsubchans);
- ret = ops->vidioc_s_modulator(file, fh, p);
- break;
- }
- case VIDIOC_G_CROP:
- {
- struct v4l2_crop *p = arg;
+ return ret ? ret : ops->vidioc_prepare_buf(file, fh, b);
+}
- dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
+static int v4l_g_parm(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_streamparm *p = arg;
+ v4l2_std_id std;
+ int ret = check_fmt(ops, p->type);
- if (ops->vidioc_g_crop) {
- ret = ops->vidioc_g_crop(file, fh, p);
- } else {
- /* simulate capture crop using selection api */
- struct v4l2_selection s = {
- .type = p->type,
- };
-
- /* crop means compose for output devices */
- if (V4L2_TYPE_IS_OUTPUT(p->type))
- s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
- else
- s.target = V4L2_SEL_TGT_CROP_ACTIVE;
-
- ret = ops->vidioc_g_selection(file, fh, &s);
-
- /* copying results to old structure on success */
- if (!ret)
- p->c = s.r;
- }
+ if (ret)
+ return ret;
+ if (ops->vidioc_g_parm)
+ return ops->vidioc_g_parm(file, fh, p);
+ std = vfd->current_norm;
+ if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+ p->parm.capture.readbuffers = 2;
+ if (ops->vidioc_g_std)
+ ret = ops->vidioc_g_std(file, fh, &std);
+ if (ret == 0)
+ v4l2_video_std_frame_period(std,
+ &p->parm.capture.timeperframe);
+ return ret;
+}
- if (!ret)
- dbgrect(vfd, "", &p->c);
- break;
- }
- case VIDIOC_S_CROP:
- {
- struct v4l2_crop *p = arg;
+static int v4l_s_parm(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_streamparm *p = arg;
+ int ret = check_fmt(ops, p->type);
- dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
- dbgrect(vfd, "", &p->c);
+ return ret ? ret : ops->vidioc_s_parm(file, fh, p);
+}
- if (ops->vidioc_s_crop) {
- ret = ops->vidioc_s_crop(file, fh, p);
- } else {
- /* simulate capture crop using selection api */
- struct v4l2_selection s = {
- .type = p->type,
- .r = p->c,
- };
-
- /* crop means compose for output devices */
- if (V4L2_TYPE_IS_OUTPUT(p->type))
- s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
- else
- s.target = V4L2_SEL_TGT_CROP_ACTIVE;
-
- ret = ops->vidioc_s_selection(file, fh, &s);
- }
- break;
- }
- case VIDIOC_G_SELECTION:
- {
- struct v4l2_selection *p = arg;
+static int v4l_queryctrl(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_queryctrl *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_queryctrl(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_queryctrl(vfd->ctrl_handler, p);
+ if (ops->vidioc_queryctrl)
+ return ops->vidioc_queryctrl(file, fh, p);
+ return -ENOTTY;
+}
- dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
+static int v4l_querymenu(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_querymenu *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_querymenu(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_querymenu(vfd->ctrl_handler, p);
+ if (ops->vidioc_querymenu)
+ return ops->vidioc_querymenu(file, fh, p);
+ return -ENOTTY;
+}
- ret = ops->vidioc_g_selection(file, fh, p);
- if (!ret)
- dbgrect(vfd, "", &p->r);
- break;
+static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_control *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_ext_controls ctrls;
+ struct v4l2_ext_control ctrl;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_g_ctrl(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_g_ctrl(vfd->ctrl_handler, p);
+ if (ops->vidioc_g_ctrl)
+ return ops->vidioc_g_ctrl(file, fh, p);
+ if (ops->vidioc_g_ext_ctrls == NULL)
+ return -ENOTTY;
+
+ ctrls.ctrl_class = V4L2_CTRL_ID2CLASS(p->id);
+ ctrls.count = 1;
+ ctrls.controls = &ctrl;
+ ctrl.id = p->id;
+ ctrl.value = p->value;
+ if (check_ext_ctrls(&ctrls, 1)) {
+ int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls);
+
+ if (ret == 0)
+ p->value = ctrl.value;
+ return ret;
}
- case VIDIOC_S_SELECTION:
- {
- struct v4l2_selection *p = arg;
+ return -EINVAL;
+}
+static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_control *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+ struct v4l2_ext_controls ctrls;
+ struct v4l2_ext_control ctrl;
+
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_s_ctrl(NULL, vfd->ctrl_handler, p);
+ if (ops->vidioc_s_ctrl)
+ return ops->vidioc_s_ctrl(file, fh, p);
+ if (ops->vidioc_s_ext_ctrls == NULL)
+ return -ENOTTY;
+
+ ctrls.ctrl_class = V4L2_CTRL_ID2CLASS(p->id);
+ ctrls.count = 1;
+ ctrls.controls = &ctrl;
+ ctrl.id = p->id;
+ ctrl.value = p->value;
+ if (check_ext_ctrls(&ctrls, 1))
+ return ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
+ return -EINVAL;
+}
- dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
- dbgrect(vfd, "", &p->r);
+static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_ext_controls *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ p->error_idx = p->count;
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_g_ext_ctrls(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_g_ext_ctrls(vfd->ctrl_handler, p);
+ if (ops->vidioc_g_ext_ctrls == NULL)
+ return -ENOTTY;
+ return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) :
+ -EINVAL;
+}
- ret = ops->vidioc_s_selection(file, fh, p);
- break;
- }
- case VIDIOC_CROPCAP:
- {
- struct v4l2_cropcap *p = arg;
-
- /*FIXME: Should also show v4l2_fract pixelaspect */
- dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
- if (ops->vidioc_cropcap) {
- ret = ops->vidioc_cropcap(file, fh, p);
- } else {
- struct v4l2_selection s = { .type = p->type };
+static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_ext_controls *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ p->error_idx = p->count;
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_s_ext_ctrls(NULL, vfd->ctrl_handler, p);
+ if (ops->vidioc_s_ext_ctrls == NULL)
+ return -ENOTTY;
+ return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) :
+ -EINVAL;
+}
- /* obtaining bounds */
- if (V4L2_TYPE_IS_OUTPUT(p->type))
- s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS;
- else
- s.target = V4L2_SEL_TGT_CROP_BOUNDS;
+static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_ext_controls *p = arg;
+ struct v4l2_fh *vfh =
+ test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
+
+ p->error_idx = p->count;
+ if (vfh && vfh->ctrl_handler)
+ return v4l2_try_ext_ctrls(vfh->ctrl_handler, p);
+ if (vfd->ctrl_handler)
+ return v4l2_try_ext_ctrls(vfd->ctrl_handler, p);
+ if (ops->vidioc_try_ext_ctrls == NULL)
+ return -ENOTTY;
+ return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) :
+ -EINVAL;
+}
- ret = ops->vidioc_g_selection(file, fh, &s);
- if (ret)
- break;
- p->bounds = s.r;
+static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_crop *p = arg;
+ struct v4l2_selection s = {
+ .type = p->type,
+ };
+ int ret;
+
+ if (ops->vidioc_g_crop)
+ return ops->vidioc_g_crop(file, fh, p);
+ /* simulate capture crop using selection api */
+
+ /* crop means compose for output devices */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
+ else
+ s.target = V4L2_SEL_TGT_CROP_ACTIVE;
+
+ ret = ops->vidioc_g_selection(file, fh, &s);
+
+ /* copying results to old structure on success */
+ if (!ret)
+ p->c = s.r;
+ return ret;
+}
- /* obtaining defrect */
- if (V4L2_TYPE_IS_OUTPUT(p->type))
- s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
- else
- s.target = V4L2_SEL_TGT_CROP_DEFAULT;
+static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_crop *p = arg;
+ struct v4l2_selection s = {
+ .type = p->type,
+ .r = p->c,
+ };
+
+ if (ops->vidioc_s_crop)
+ return ops->vidioc_s_crop(file, fh, p);
+ /* simulate capture crop using selection api */
+
+ /* crop means compose for output devices */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
+ else
+ s.target = V4L2_SEL_TGT_CROP_ACTIVE;
+
+ return ops->vidioc_s_selection(file, fh, &s);
+}
- ret = ops->vidioc_g_selection(file, fh, &s);
- if (ret)
- break;
- p->defrect = s.r;
+static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_cropcap *p = arg;
+ struct v4l2_selection s = { .type = p->type };
+ int ret;
- /* setting trivial pixelaspect */
- p->pixelaspect.numerator = 1;
- p->pixelaspect.denominator = 1;
- }
+ if (ops->vidioc_cropcap)
+ return ops->vidioc_cropcap(file, fh, p);
- if (!ret) {
- dbgrect(vfd, "bounds ", &p->bounds);
- dbgrect(vfd, "defrect ", &p->defrect);
- }
- break;
- }
- case VIDIOC_G_JPEGCOMP:
- {
- struct v4l2_jpegcompression *p = arg;
-
- ret = ops->vidioc_g_jpegcomp(file, fh, p);
- if (!ret)
- dbgarg(cmd, "quality=%d, APPn=%d, "
- "APP_len=%d, COM_len=%d, "
- "jpeg_markers=%d\n",
- p->quality, p->APPn, p->APP_len,
- p->COM_len, p->jpeg_markers);
- break;
- }
- case VIDIOC_S_JPEGCOMP:
- {
- struct v4l2_jpegcompression *p = arg;
-
- dbgarg(cmd, "quality=%d, APPn=%d, APP_len=%d, "
- "COM_len=%d, jpeg_markers=%d\n",
- p->quality, p->APPn, p->APP_len,
- p->COM_len, p->jpeg_markers);
- ret = ops->vidioc_s_jpegcomp(file, fh, p);
- break;
- }
- case VIDIOC_G_ENC_INDEX:
- {
- struct v4l2_enc_idx *p = arg;
-
- ret = ops->vidioc_g_enc_index(file, fh, p);
- if (!ret)
- dbgarg(cmd, "entries=%d, entries_cap=%d\n",
- p->entries, p->entries_cap);
- break;
- }
- case VIDIOC_ENCODER_CMD:
- {
- struct v4l2_encoder_cmd *p = arg;
+ /* obtaining bounds */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS;
+ else
+ s.target = V4L2_SEL_TGT_CROP_BOUNDS;
- ret = ops->vidioc_encoder_cmd(file, fh, p);
- if (!ret)
- dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
- break;
- }
- case VIDIOC_TRY_ENCODER_CMD:
- {
- struct v4l2_encoder_cmd *p = arg;
+ ret = ops->vidioc_g_selection(file, fh, &s);
+ if (ret)
+ return ret;
+ p->bounds = s.r;
- ret = ops->vidioc_try_encoder_cmd(file, fh, p);
- if (!ret)
- dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
- break;
- }
- case VIDIOC_DECODER_CMD:
- {
- struct v4l2_decoder_cmd *p = arg;
+ /* obtaining defrect */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
+ else
+ s.target = V4L2_SEL_TGT_CROP_DEFAULT;
- ret = ops->vidioc_decoder_cmd(file, fh, p);
- if (!ret)
- dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
- break;
- }
- case VIDIOC_TRY_DECODER_CMD:
- {
- struct v4l2_decoder_cmd *p = arg;
+ ret = ops->vidioc_g_selection(file, fh, &s);
+ if (ret)
+ return ret;
+ p->defrect = s.r;
- ret = ops->vidioc_try_decoder_cmd(file, fh, p);
- if (!ret)
- dbgarg(cmd, "cmd=%d, flags=%x\n", p->cmd, p->flags);
- break;
- }
- case VIDIOC_G_PARM:
- {
- struct v4l2_streamparm *p = arg;
+ /* setting trivial pixelaspect */
+ p->pixelaspect.numerator = 1;
+ p->pixelaspect.denominator = 1;
+ return 0;
+}
- if (ops->vidioc_g_parm) {
- ret = check_fmt(ops, p->type);
- if (ret)
- break;
- ret = ops->vidioc_g_parm(file, fh, p);
- } else {
- v4l2_std_id std = vfd->current_norm;
+static int v4l_log_status(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ int ret;
+
+ if (vfd->v4l2_dev)
+ pr_info("%s: ================= START STATUS =================\n",
+ vfd->v4l2_dev->name);
+ ret = ops->vidioc_log_status(file, fh);
+ if (vfd->v4l2_dev)
+ pr_info("%s: ================== END STATUS ==================\n",
+ vfd->v4l2_dev->name);
+ return ret;
+}
- ret = -EINVAL;
- if (p->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- break;
+static int v4l_dbg_g_register(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ struct v4l2_dbg_register *p = arg;
- ret = 0;
- p->parm.capture.readbuffers = 2;
- if (ops->vidioc_g_std)
- ret = ops->vidioc_g_std(file, fh, &std);
- if (ret == 0)
- v4l2_video_std_frame_period(std,
- &p->parm.capture.timeperframe);
- }
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return ops->vidioc_g_register(file, fh, p);
+#else
+ return -ENOTTY;
+#endif
+}
- dbgarg(cmd, "type=%d\n", p->type);
- break;
- }
- case VIDIOC_S_PARM:
- {
- struct v4l2_streamparm *p = arg;
+static int v4l_dbg_s_register(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ struct v4l2_dbg_register *p = arg;
- ret = check_fmt(ops, p->type);
- if (ret)
- break;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return ops->vidioc_s_register(file, fh, p);
+#else
+ return -ENOTTY;
+#endif
+}
- dbgarg(cmd, "type=%d\n", p->type);
- ret = ops->vidioc_s_parm(file, fh, p);
- break;
- }
- case VIDIOC_G_TUNER:
- {
- struct v4l2_tuner *p = arg;
+static int v4l_dbg_g_chip_ident(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_dbg_chip_ident *p = arg;
- p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- ret = ops->vidioc_g_tuner(file, fh, p);
- if (!ret)
- dbgarg(cmd, "index=%d, name=%s, type=%d, "
- "capability=0x%x, rangelow=%d, "
- "rangehigh=%d, signal=%d, afc=%d, "
- "rxsubchans=0x%x, audmode=%d\n",
- p->index, p->name, p->type,
- p->capability, p->rangelow,
- p->rangehigh, p->signal, p->afc,
- p->rxsubchans, p->audmode);
- break;
- }
- case VIDIOC_S_TUNER:
- {
- struct v4l2_tuner *p = arg;
+ p->ident = V4L2_IDENT_NONE;
+ p->revision = 0;
+ return ops->vidioc_g_chip_ident(file, fh, p);
+}
- p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- dbgarg(cmd, "index=%d, name=%s, type=%d, "
- "capability=0x%x, rangelow=%d, "
- "rangehigh=%d, signal=%d, afc=%d, "
- "rxsubchans=0x%x, audmode=%d\n",
- p->index, p->name, p->type,
- p->capability, p->rangelow,
- p->rangehigh, p->signal, p->afc,
- p->rxsubchans, p->audmode);
- ret = ops->vidioc_s_tuner(file, fh, p);
- break;
- }
- case VIDIOC_G_FREQUENCY:
- {
- struct v4l2_frequency *p = arg;
+static int v4l_dqevent(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return v4l2_event_dequeue(fh, arg, file->f_flags & O_NONBLOCK);
+}
- p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- ret = ops->vidioc_g_frequency(file, fh, p);
- if (!ret)
- dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
- p->tuner, p->type, p->frequency);
- break;
- }
- case VIDIOC_S_FREQUENCY:
- {
- struct v4l2_frequency *p = arg;
- enum v4l2_tuner_type type;
+static int v4l_subscribe_event(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_subscribe_event(fh, arg);
+}
- type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
- V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
- p->tuner, p->type, p->frequency);
- if (p->type != type)
- ret = -EINVAL;
- else
- ret = ops->vidioc_s_frequency(file, fh, p);
- break;
- }
- case VIDIOC_G_SLICED_VBI_CAP:
- {
- struct v4l2_sliced_vbi_cap *p = arg;
+static int v4l_unsubscribe_event(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ return ops->vidioc_unsubscribe_event(fh, arg);
+}
- /* Clear up to type, everything after type is zerod already */
- memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type));
+static int v4l_g_sliced_vbi_cap(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_sliced_vbi_cap *p = arg;
- dbgarg(cmd, "type=%s\n", prt_names(p->type, v4l2_type_names));
- ret = ops->vidioc_g_sliced_vbi_cap(file, fh, p);
- if (!ret)
- dbgarg2("service_set=%d\n", p->service_set);
- break;
- }
- case VIDIOC_LOG_STATUS:
- {
- if (vfd->v4l2_dev)
- pr_info("%s: ================= START STATUS =================\n",
- vfd->v4l2_dev->name);
- ret = ops->vidioc_log_status(file, fh);
- if (vfd->v4l2_dev)
- pr_info("%s: ================== END STATUS ==================\n",
- vfd->v4l2_dev->name);
- break;
- }
- case VIDIOC_DBG_G_REGISTER:
- {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- struct v4l2_dbg_register *p = arg;
+ /* Clear up to type, everything after type is zeroed already */
+ memset(p, 0, offsetof(struct v4l2_sliced_vbi_cap, type));
- if (!capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- else
- ret = ops->vidioc_g_register(file, fh, p);
-#endif
- break;
- }
- case VIDIOC_DBG_S_REGISTER:
- {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- struct v4l2_dbg_register *p = arg;
+ return ops->vidioc_g_sliced_vbi_cap(file, fh, p);
+}
- if (!capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- else
- ret = ops->vidioc_s_register(file, fh, p);
-#endif
- break;
- }
- case VIDIOC_DBG_G_CHIP_IDENT:
- {
- struct v4l2_dbg_chip_ident *p = arg;
-
- p->ident = V4L2_IDENT_NONE;
- p->revision = 0;
- ret = ops->vidioc_g_chip_ident(file, fh, p);
- if (!ret)
- dbgarg(cmd, "chip_ident=%u, revision=0x%x\n", p->ident, p->revision);
- break;
- }
- case VIDIOC_S_HW_FREQ_SEEK:
- {
- struct v4l2_hw_freq_seek *p = arg;
- enum v4l2_tuner_type type;
+static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ struct v4l2_frequency_band *p = arg;
+ enum v4l2_tuner_type type;
+ int err;
- type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
- dbgarg(cmd,
- "tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u\n",
- p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing);
- if (p->type != type)
- ret = -EINVAL;
- else
- ret = ops->vidioc_s_hw_freq_seek(file, fh, p);
- break;
- }
- case VIDIOC_ENUM_FRAMESIZES:
- {
- struct v4l2_frmsizeenum *p = arg;
- ret = ops->vidioc_enum_framesizes(file, fh, p);
- dbgarg(cmd,
- "index=%d, pixelformat=%c%c%c%c, type=%d ",
- p->index,
- (p->pixel_format & 0xff),
- (p->pixel_format >> 8) & 0xff,
- (p->pixel_format >> 16) & 0xff,
- (p->pixel_format >> 24) & 0xff,
- p->type);
- switch (p->type) {
- case V4L2_FRMSIZE_TYPE_DISCRETE:
- dbgarg3("width = %d, height=%d\n",
- p->discrete.width, p->discrete.height);
- break;
- case V4L2_FRMSIZE_TYPE_STEPWISE:
- dbgarg3("min %dx%d, max %dx%d, step %dx%d\n",
- p->stepwise.min_width, p->stepwise.min_height,
- p->stepwise.step_width, p->stepwise.step_height,
- p->stepwise.max_width, p->stepwise.max_height);
- break;
- case V4L2_FRMSIZE_TYPE_CONTINUOUS:
- dbgarg3("continuous\n");
- break;
- default:
- dbgarg3("- Unknown type!\n");
- }
-
- break;
- }
- case VIDIOC_ENUM_FRAMEINTERVALS:
- {
- struct v4l2_frmivalenum *p = arg;
-
- ret = ops->vidioc_enum_frameintervals(file, fh, p);
- dbgarg(cmd,
- "index=%d, pixelformat=%d, width=%d, height=%d, type=%d ",
- p->index, p->pixel_format,
- p->width, p->height, p->type);
- switch (p->type) {
- case V4L2_FRMIVAL_TYPE_DISCRETE:
- dbgarg2("fps=%d/%d\n",
- p->discrete.numerator,
- p->discrete.denominator);
- break;
- case V4L2_FRMIVAL_TYPE_STEPWISE:
- dbgarg2("min=%d/%d, max=%d/%d, step=%d/%d\n",
- p->stepwise.min.numerator,
- p->stepwise.min.denominator,
- p->stepwise.max.numerator,
- p->stepwise.max.denominator,
- p->stepwise.step.numerator,
- p->stepwise.step.denominator);
- break;
- case V4L2_FRMIVAL_TYPE_CONTINUOUS:
- dbgarg2("continuous\n");
- break;
- default:
- dbgarg2("- Unknown type!\n");
- }
- break;
+ if (type != p->type)
+ return -EINVAL;
+ if (ops->vidioc_enum_freq_bands)
+ return ops->vidioc_enum_freq_bands(file, fh, p);
+ if (ops->vidioc_g_tuner) {
+ struct v4l2_tuner t = {
+ .index = p->tuner,
+ .type = type,
+ };
+
+ if (p->index)
+ return -EINVAL;
+ err = ops->vidioc_g_tuner(file, fh, &t);
+ if (err)
+ return err;
+ p->capability = t.capability | V4L2_TUNER_CAP_FREQ_BANDS;
+ p->rangelow = t.rangelow;
+ p->rangehigh = t.rangehigh;
+ p->modulation = (type == V4L2_TUNER_RADIO) ?
+ V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
+ return 0;
}
- case VIDIOC_ENUM_DV_PRESETS:
- {
- struct v4l2_dv_enum_preset *p = arg;
-
- ret = ops->vidioc_enum_dv_presets(file, fh, p);
- if (!ret)
- dbgarg(cmd,
- "index=%d, preset=%d, name=%s, width=%d,"
- " height=%d ",
- p->index, p->preset, p->name, p->width,
- p->height);
- break;
+ if (ops->vidioc_g_modulator) {
+ struct v4l2_modulator m = {
+ .index = p->tuner,
+ };
+
+ if (type != V4L2_TUNER_RADIO)
+ return -EINVAL;
+ if (p->index)
+ return -EINVAL;
+ err = ops->vidioc_g_modulator(file, fh, &m);
+ if (err)
+ return err;
+ p->capability = m.capability | V4L2_TUNER_CAP_FREQ_BANDS;
+ p->rangelow = m.rangelow;
+ p->rangehigh = m.rangehigh;
+ p->modulation = (type == V4L2_TUNER_RADIO) ?
+ V4L2_BAND_MODULATION_FM : V4L2_BAND_MODULATION_VSB;
+ return 0;
}
- case VIDIOC_S_DV_PRESET:
- {
- struct v4l2_dv_preset *p = arg;
+ return -ENOTTY;
+}
- dbgarg(cmd, "preset=%d\n", p->preset);
- ret = ops->vidioc_s_dv_preset(file, fh, p);
- break;
- }
- case VIDIOC_G_DV_PRESET:
- {
- struct v4l2_dv_preset *p = arg;
+struct v4l2_ioctl_info {
+ unsigned int ioctl;
+ u32 flags;
+ const char * const name;
+ union {
+ u32 offset;
+ int (*func)(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *p);
+ } u;
+ void (*debug)(const void *arg, bool write_only);
+};
- ret = ops->vidioc_g_dv_preset(file, fh, p);
- if (!ret)
- dbgarg(cmd, "preset=%d\n", p->preset);
- break;
+/* This control needs a priority check */
+#define INFO_FL_PRIO (1 << 0)
+/* This control can be valid if the filehandle passes a control handler. */
+#define INFO_FL_CTRL (1 << 1)
+/* This is a standard ioctl, no need for special code */
+#define INFO_FL_STD (1 << 2)
+/* This is ioctl has its own function */
+#define INFO_FL_FUNC (1 << 3)
+/* Queuing ioctl */
+#define INFO_FL_QUEUE (1 << 4)
+/* Zero struct from after the field to the end */
+#define INFO_FL_CLEAR(v4l2_struct, field) \
+ ((offsetof(struct v4l2_struct, field) + \
+ sizeof(((struct v4l2_struct *)0)->field)) << 16)
+#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
+
+#define IOCTL_INFO_STD(_ioctl, _vidioc, _debug, _flags) \
+ [_IOC_NR(_ioctl)] = { \
+ .ioctl = _ioctl, \
+ .flags = _flags | INFO_FL_STD, \
+ .name = #_ioctl, \
+ .u.offset = offsetof(struct v4l2_ioctl_ops, _vidioc), \
+ .debug = _debug, \
+ }
+
+#define IOCTL_INFO_FNC(_ioctl, _func, _debug, _flags) \
+ [_IOC_NR(_ioctl)] = { \
+ .ioctl = _ioctl, \
+ .flags = _flags | INFO_FL_FUNC, \
+ .name = #_ioctl, \
+ .u.func = _func, \
+ .debug = _debug, \
}
- case VIDIOC_QUERY_DV_PRESET:
- {
- struct v4l2_dv_preset *p = arg;
- ret = ops->vidioc_query_dv_preset(file, fh, p);
- if (!ret)
- dbgarg(cmd, "preset=%d\n", p->preset);
- break;
- }
- case VIDIOC_S_DV_TIMINGS:
- {
- struct v4l2_dv_timings *p = arg;
-
- dbgtimings(vfd, p);
- switch (p->type) {
- case V4L2_DV_BT_656_1120:
- ret = ops->vidioc_s_dv_timings(file, fh, p);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- break;
- }
- case VIDIOC_G_DV_TIMINGS:
- {
- struct v4l2_dv_timings *p = arg;
+static struct v4l2_ioctl_info v4l2_ioctls[] = {
+ IOCTL_INFO_FNC(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0),
+ IOCTL_INFO_FNC(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, INFO_FL_CLEAR(v4l2_fmtdesc, type)),
+ IOCTL_INFO_FNC(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, INFO_FL_CLEAR(v4l2_format, type)),
+ IOCTL_INFO_FNC(VIDIOC_S_FMT, v4l_s_fmt, v4l_print_format, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_REQBUFS, v4l_reqbufs, v4l_print_requestbuffers, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO_FNC(VIDIOC_QUERYBUF, v4l_querybuf, v4l_print_buffer, INFO_FL_QUEUE | INFO_FL_CLEAR(v4l2_buffer, length)),
+ IOCTL_INFO_STD(VIDIOC_G_FBUF, vidioc_g_fbuf, v4l_print_framebuffer, 0),
+ IOCTL_INFO_STD(VIDIOC_S_FBUF, vidioc_s_fbuf, v4l_print_framebuffer, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_OVERLAY, vidioc_overlay, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_QBUF, v4l_qbuf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO_FNC(VIDIOC_DQBUF, v4l_dqbuf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO_FNC(VIDIOC_STREAMON, v4l_streamon, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO_FNC(VIDIOC_STREAMOFF, v4l_streamoff, v4l_print_buftype, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO_FNC(VIDIOC_G_PARM, v4l_g_parm, v4l_print_streamparm, INFO_FL_CLEAR(v4l2_streamparm, type)),
+ IOCTL_INFO_FNC(VIDIOC_S_PARM, v4l_s_parm, v4l_print_streamparm, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_G_STD, v4l_g_std, v4l_print_std, 0),
+ IOCTL_INFO_FNC(VIDIOC_S_STD, v4l_s_std, v4l_print_std, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_ENUMSTD, v4l_enumstd, v4l_print_standard, INFO_FL_CLEAR(v4l2_standard, index)),
+ IOCTL_INFO_FNC(VIDIOC_ENUMINPUT, v4l_enuminput, v4l_print_enuminput, INFO_FL_CLEAR(v4l2_input, index)),
+ IOCTL_INFO_FNC(VIDIOC_G_CTRL, v4l_g_ctrl, v4l_print_control, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_control, id)),
+ IOCTL_INFO_FNC(VIDIOC_S_CTRL, v4l_s_ctrl, v4l_print_control, INFO_FL_PRIO | INFO_FL_CTRL),
+ IOCTL_INFO_FNC(VIDIOC_G_TUNER, v4l_g_tuner, v4l_print_tuner, INFO_FL_CLEAR(v4l2_tuner, index)),
+ IOCTL_INFO_FNC(VIDIOC_S_TUNER, v4l_s_tuner, v4l_print_tuner, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_AUDIO, vidioc_g_audio, v4l_print_audio, 0),
+ IOCTL_INFO_STD(VIDIOC_S_AUDIO, vidioc_s_audio, v4l_print_audio, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_QUERYCTRL, v4l_queryctrl, v4l_print_queryctrl, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_queryctrl, id)),
+ IOCTL_INFO_FNC(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),
+ IOCTL_INFO_STD(VIDIOC_G_INPUT, vidioc_g_input, v4l_print_u32, 0),
+ IOCTL_INFO_FNC(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_OUTPUT, vidioc_g_output, v4l_print_u32, 0),
+ IOCTL_INFO_FNC(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)),
+ IOCTL_INFO_STD(VIDIOC_G_AUDOUT, vidioc_g_audout, v4l_print_audioout, 0),
+ IOCTL_INFO_STD(VIDIOC_S_AUDOUT, vidioc_s_audout, v4l_print_audioout, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_G_MODULATOR, v4l_g_modulator, v4l_print_modulator, INFO_FL_CLEAR(v4l2_modulator, index)),
+ IOCTL_INFO_STD(VIDIOC_S_MODULATOR, vidioc_s_modulator, v4l_print_modulator, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_G_FREQUENCY, v4l_g_frequency, v4l_print_frequency, INFO_FL_CLEAR(v4l2_frequency, tuner)),
+ IOCTL_INFO_FNC(VIDIOC_S_FREQUENCY, v4l_s_frequency, v4l_print_frequency, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)),
+ IOCTL_INFO_FNC(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)),
+ IOCTL_INFO_FNC(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_SELECTION, vidioc_g_selection, v4l_print_selection, 0),
+ IOCTL_INFO_STD(VIDIOC_S_SELECTION, vidioc_s_selection, v4l_print_selection, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp, v4l_print_jpegcompression, 0),
+ IOCTL_INFO_STD(VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0),
+ IOCTL_INFO_FNC(VIDIOC_TRY_FMT, v4l_try_fmt, v4l_print_format, 0),
+ IOCTL_INFO_STD(VIDIOC_ENUMAUDIO, vidioc_enumaudio, v4l_print_audio, INFO_FL_CLEAR(v4l2_audio, index)),
+ IOCTL_INFO_STD(VIDIOC_ENUMAUDOUT, vidioc_enumaudout, v4l_print_audioout, INFO_FL_CLEAR(v4l2_audioout, index)),
+ IOCTL_INFO_FNC(VIDIOC_G_PRIORITY, v4l_g_priority, v4l_print_u32, 0),
+ IOCTL_INFO_FNC(VIDIOC_S_PRIORITY, v4l_s_priority, v4l_print_u32, INFO_FL_PRIO),
+ IOCTL_INFO_FNC(VIDIOC_G_SLICED_VBI_CAP, v4l_g_sliced_vbi_cap, v4l_print_sliced_vbi_cap, INFO_FL_CLEAR(v4l2_sliced_vbi_cap, type)),
+ IOCTL_INFO_FNC(VIDIOC_LOG_STATUS, v4l_log_status, v4l_print_newline, 0),
+ IOCTL_INFO_FNC(VIDIOC_G_EXT_CTRLS, v4l_g_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
+ IOCTL_INFO_FNC(VIDIOC_S_EXT_CTRLS, v4l_s_ext_ctrls, v4l_print_ext_controls, INFO_FL_PRIO | INFO_FL_CTRL),
+ IOCTL_INFO_FNC(VIDIOC_TRY_EXT_CTRLS, v4l_try_ext_ctrls, v4l_print_ext_controls, INFO_FL_CTRL),
+ IOCTL_INFO_STD(VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes, v4l_print_frmsizeenum, INFO_FL_CLEAR(v4l2_frmsizeenum, pixel_format)),
+ IOCTL_INFO_STD(VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals, v4l_print_frmivalenum, INFO_FL_CLEAR(v4l2_frmivalenum, height)),
+ IOCTL_INFO_STD(VIDIOC_G_ENC_INDEX, vidioc_g_enc_index, v4l_print_enc_idx, 0),
+ IOCTL_INFO_STD(VIDIOC_ENCODER_CMD, vidioc_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
+ IOCTL_INFO_STD(VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd, v4l_print_encoder_cmd, INFO_FL_CLEAR(v4l2_encoder_cmd, flags)),
+ IOCTL_INFO_STD(VIDIOC_DECODER_CMD, vidioc_decoder_cmd, v4l_print_decoder_cmd, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd, v4l_print_decoder_cmd, 0),
+ IOCTL_INFO_FNC(VIDIOC_DBG_S_REGISTER, v4l_dbg_s_register, v4l_print_dbg_register, 0),
+ IOCTL_INFO_FNC(VIDIOC_DBG_G_REGISTER, v4l_dbg_g_register, v4l_print_dbg_register, 0),
+ IOCTL_INFO_FNC(VIDIOC_DBG_G_CHIP_IDENT, v4l_dbg_g_chip_ident, v4l_print_dbg_chip_ident, 0),
+ IOCTL_INFO_FNC(VIDIOC_S_HW_FREQ_SEEK, v4l_s_hw_freq_seek, v4l_print_hw_freq_seek, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_ENUM_DV_PRESETS, vidioc_enum_dv_presets, v4l_print_dv_enum_presets, 0),
+ IOCTL_INFO_STD(VIDIOC_S_DV_PRESET, vidioc_s_dv_preset, v4l_print_dv_preset, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_DV_PRESET, vidioc_g_dv_preset, v4l_print_dv_preset, 0),
+ IOCTL_INFO_STD(VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset, v4l_print_dv_preset, 0),
+ IOCTL_INFO_STD(VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings, v4l_print_dv_timings, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings, v4l_print_dv_timings, 0),
+ IOCTL_INFO_FNC(VIDIOC_DQEVENT, v4l_dqevent, v4l_print_event, 0),
+ IOCTL_INFO_FNC(VIDIOC_SUBSCRIBE_EVENT, v4l_subscribe_event, v4l_print_event_subscription, 0),
+ IOCTL_INFO_FNC(VIDIOC_UNSUBSCRIBE_EVENT, v4l_unsubscribe_event, v4l_print_event_subscription, 0),
+ IOCTL_INFO_FNC(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE),
+ IOCTL_INFO_FNC(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE),
+ IOCTL_INFO_STD(VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings, v4l_print_enum_dv_timings, 0),
+ IOCTL_INFO_STD(VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings, v4l_print_dv_timings, 0),
+ IOCTL_INFO_STD(VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, type)),
+ IOCTL_INFO_FNC(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0),
+};
+#define V4L2_IOCTLS ARRAY_SIZE(v4l2_ioctls)
- ret = ops->vidioc_g_dv_timings(file, fh, p);
- if (!ret)
- dbgtimings(vfd, p);
- break;
- }
- case VIDIOC_ENUM_DV_TIMINGS:
- {
- struct v4l2_enum_dv_timings *p = arg;
+bool v4l2_is_known_ioctl(unsigned int cmd)
+{
+ if (_IOC_NR(cmd) >= V4L2_IOCTLS)
+ return false;
+ return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd;
+}
- if (!ops->vidioc_enum_dv_timings)
- break;
+struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, unsigned cmd)
+{
+ if (_IOC_NR(cmd) >= V4L2_IOCTLS)
+ return vdev->lock;
+ if (test_bit(_IOC_NR(cmd), vdev->disable_locking))
+ return NULL;
+ if (vdev->queue && vdev->queue->lock &&
+ (v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE))
+ return vdev->queue->lock;
+ return vdev->lock;
+}
- ret = ops->vidioc_enum_dv_timings(file, fh, p);
- if (!ret) {
- dbgarg(cmd, "index=%d: ", p->index);
- dbgtimings(vfd, &p->timings);
- }
- break;
- }
- case VIDIOC_QUERY_DV_TIMINGS:
- {
- struct v4l2_dv_timings *p = arg;
+/* Common ioctl debug function. This function can be used by
+ external ioctl messages as well as internal V4L ioctl */
+void v4l_printk_ioctl(const char *prefix, unsigned int cmd)
+{
+ const char *dir, *type;
- if (!ops->vidioc_query_dv_timings)
- break;
+ if (prefix)
+ printk(KERN_DEBUG "%s: ", prefix);
- ret = ops->vidioc_query_dv_timings(file, fh, p);
- if (!ret)
- dbgtimings(vfd, p);
+ switch (_IOC_TYPE(cmd)) {
+ case 'd':
+ type = "v4l2_int";
break;
- }
- case VIDIOC_DV_TIMINGS_CAP:
- {
- struct v4l2_dv_timings_cap *p = arg;
-
- if (!ops->vidioc_dv_timings_cap)
- break;
-
- ret = ops->vidioc_dv_timings_cap(file, fh, p);
- if (ret)
- break;
- switch (p->type) {
- case V4L2_DV_BT_656_1120:
- dbgarg(cmd,
- "type=%d, width=%u-%u, height=%u-%u, "
- "pixelclock=%llu-%llu, standards=%x, capabilities=%x ",
- p->type,
- p->bt.min_width, p->bt.max_width,
- p->bt.min_height, p->bt.max_height,
- p->bt.min_pixelclock, p->bt.max_pixelclock,
- p->bt.standards, p->bt.capabilities);
- break;
- default:
- dbgarg(cmd, "unknown type ");
+ case 'V':
+ if (_IOC_NR(cmd) >= V4L2_IOCTLS) {
+ type = "v4l2";
break;
}
+ pr_cont("%s", v4l2_ioctls[_IOC_NR(cmd)].name);
+ return;
+ default:
+ type = "unknown";
break;
}
- case VIDIOC_DQEVENT:
- {
- struct v4l2_event *ev = arg;
- ret = v4l2_event_dequeue(fh, ev, file->f_flags & O_NONBLOCK);
- if (ret < 0) {
- dbgarg(cmd, "no pending events?");
- break;
- }
- dbgarg(cmd,
- "pending=%d, type=0x%8.8x, sequence=%d, "
- "timestamp=%lu.%9.9lu ",
- ev->pending, ev->type, ev->sequence,
- ev->timestamp.tv_sec, ev->timestamp.tv_nsec);
- break;
+ switch (_IOC_DIR(cmd)) {
+ case _IOC_NONE: dir = "--"; break;
+ case _IOC_READ: dir = "r-"; break;
+ case _IOC_WRITE: dir = "-w"; break;
+ case _IOC_READ | _IOC_WRITE: dir = "rw"; break;
+ default: dir = "*ERR*"; break;
}
- case VIDIOC_SUBSCRIBE_EVENT:
- {
- struct v4l2_event_subscription *sub = arg;
+ pr_cont("%s ioctl '%c', dir=%s, #%d (0x%08x)",
+ type, _IOC_TYPE(cmd), dir, _IOC_NR(cmd), cmd);
+}
+EXPORT_SYMBOL(v4l_printk_ioctl);
- ret = ops->vidioc_subscribe_event(fh, sub);
- if (ret < 0) {
- dbgarg(cmd, "failed, ret=%ld", ret);
- break;
- }
- dbgarg(cmd, "type=0x%8.8x", sub->type);
- break;
- }
- case VIDIOC_UNSUBSCRIBE_EVENT:
- {
- struct v4l2_event_subscription *sub = arg;
+static long __video_do_ioctl(struct file *file,
+ unsigned int cmd, void *arg)
+{
+ struct video_device *vfd = video_devdata(file);
+ const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
+ bool write_only = false;
+ struct v4l2_ioctl_info default_info;
+ const struct v4l2_ioctl_info *info;
+ void *fh = file->private_data;
+ struct v4l2_fh *vfh = NULL;
+ int use_fh_prio = 0;
+ int debug = vfd->debug;
+ long ret = -ENOTTY;
- ret = ops->vidioc_unsubscribe_event(fh, sub);
- if (ret < 0) {
- dbgarg(cmd, "failed, ret=%ld", ret);
- break;
- }
- dbgarg(cmd, "type=0x%8.8x", sub->type);
- break;
+ if (ops == NULL) {
+ pr_warn("%s: has no ioctl_ops.\n",
+ video_device_node_name(vfd));
+ return ret;
}
- case VIDIOC_CREATE_BUFS:
- {
- struct v4l2_create_buffers *create = arg;
- ret = check_fmt(ops, create->format.type);
- if (ret)
- break;
-
- ret = ops->vidioc_create_bufs(file, fh, create);
-
- dbgarg(cmd, "count=%d @ %d\n", create->count, create->index);
- break;
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
+ vfh = file->private_data;
+ use_fh_prio = test_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
}
- case VIDIOC_PREPARE_BUF:
- {
- struct v4l2_buffer *b = arg;
-
- ret = check_fmt(ops, b->type);
- if (ret)
- break;
- ret = ops->vidioc_prepare_buf(file, fh, b);
+ if (v4l2_is_known_ioctl(cmd)) {
+ info = &v4l2_ioctls[_IOC_NR(cmd)];
- dbgarg(cmd, "index=%d", b->index);
- break;
- }
- default:
- if (!ops->vidioc_default)
- break;
- ret = ops->vidioc_default(file, fh, use_fh_prio ?
- v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0,
- cmd, arg);
- break;
- } /* switch */
+ if (!test_bit(_IOC_NR(cmd), vfd->valid_ioctls) &&
+ !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler))
+ goto done;
- if (vfd->debug & V4L2_DEBUG_IOCTL_ARG) {
- if (ret < 0) {
- v4l_print_ioctl(vfd->name, cmd);
- printk(KERN_CONT " error %ld\n", ret);
+ if (use_fh_prio && (info->flags & INFO_FL_PRIO)) {
+ ret = v4l2_prio_check(vfd->prio, vfh->prio);
+ if (ret)
+ goto done;
+ }
+ } else {
+ default_info.ioctl = cmd;
+ default_info.flags = 0;
+ default_info.debug = v4l_print_default;
+ info = &default_info;
+ }
+
+ write_only = _IOC_DIR(cmd) == _IOC_WRITE;
+ if (write_only && debug > V4L2_DEBUG_IOCTL) {
+ v4l_printk_ioctl(video_device_node_name(vfd), cmd);
+ pr_cont(": ");
+ info->debug(arg, write_only);
+ }
+ if (info->flags & INFO_FL_STD) {
+ typedef int (*vidioc_op)(struct file *file, void *fh, void *p);
+ const void *p = vfd->ioctl_ops;
+ const vidioc_op *vidioc = p + info->u.offset;
+
+ ret = (*vidioc)(file, fh, arg);
+ } else if (info->flags & INFO_FL_FUNC) {
+ ret = info->u.func(ops, file, fh, arg);
+ } else if (!ops->vidioc_default) {
+ ret = -ENOTTY;
+ } else {
+ ret = ops->vidioc_default(file, fh,
+ use_fh_prio ? v4l2_prio_check(vfd->prio, vfh->prio) >= 0 : 0,
+ cmd, arg);
+ }
+
+done:
+ if (debug) {
+ if (write_only && debug > V4L2_DEBUG_IOCTL) {
+ if (ret < 0)
+ printk(KERN_DEBUG "%s: error %ld\n",
+ video_device_node_name(vfd), ret);
+ return ret;
+ }
+ v4l_printk_ioctl(video_device_node_name(vfd), cmd);
+ if (ret < 0)
+ pr_cont(": error %ld\n", ret);
+ else if (debug == V4L2_DEBUG_IOCTL)
+ pr_cont("\n");
+ else if (_IOC_DIR(cmd) == _IOC_NONE)
+ info->debug(arg, write_only);
+ else {
+ pr_cont(": ");
+ info->debug(arg, write_only);
}
}
return ret;
}
-/* In some cases, only a few fields are used as input, i.e. when the app sets
- * "index" and then the driver fills in the rest of the structure for the thing
- * with that index. We only need to copy up the first non-input field. */
-static unsigned long cmd_input_size(unsigned int cmd)
-{
- /* Size of structure up to and including 'field' */
-#define CMDINSIZE(cmd, type, field) \
- case VIDIOC_##cmd: \
- return offsetof(struct v4l2_##type, field) + \
- sizeof(((struct v4l2_##type *)0)->field);
-
- switch (cmd) {
- CMDINSIZE(ENUM_FMT, fmtdesc, type);
- CMDINSIZE(G_FMT, format, type);
- CMDINSIZE(QUERYBUF, buffer, length);
- CMDINSIZE(G_PARM, streamparm, type);
- CMDINSIZE(ENUMSTD, standard, index);
- CMDINSIZE(ENUMINPUT, input, index);
- CMDINSIZE(G_CTRL, control, id);
- CMDINSIZE(G_TUNER, tuner, index);
- CMDINSIZE(QUERYCTRL, queryctrl, id);
- CMDINSIZE(QUERYMENU, querymenu, index);
- CMDINSIZE(ENUMOUTPUT, output, index);
- CMDINSIZE(G_MODULATOR, modulator, index);
- CMDINSIZE(G_FREQUENCY, frequency, tuner);
- CMDINSIZE(CROPCAP, cropcap, type);
- CMDINSIZE(G_CROP, crop, type);
- CMDINSIZE(ENUMAUDIO, audio, index);
- CMDINSIZE(ENUMAUDOUT, audioout, index);
- CMDINSIZE(ENCODER_CMD, encoder_cmd, flags);
- CMDINSIZE(TRY_ENCODER_CMD, encoder_cmd, flags);
- CMDINSIZE(G_SLICED_VBI_CAP, sliced_vbi_cap, type);
- CMDINSIZE(ENUM_FRAMESIZES, frmsizeenum, pixel_format);
- CMDINSIZE(ENUM_FRAMEINTERVALS, frmivalenum, height);
- default:
- return _IOC_SIZE(cmd);
- }
-}
-
static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
void * __user *user_ptr, void ***kernel_ptr)
{
@@ -2219,7 +2241,20 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
err = -EFAULT;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
- unsigned long n = cmd_input_size(cmd);
+ unsigned int n = _IOC_SIZE(cmd);
+
+ /*
+ * In some cases, only a few fields are used as input,
+ * i.e. when the app sets "index" and then the driver
+ * fills in the rest of the structure for the thing
+ * with that index. We only need to copy up the first
+ * non-input field.
+ */
+ if (v4l2_is_known_ioctl(cmd)) {
+ u32 flags = v4l2_ioctls[_IOC_NR(cmd)].flags;
+ if (flags & INFO_FL_CLEAR_MASK)
+ n = (flags & INFO_FL_CLEAR_MASK) >> 16;
+ }
if (copy_from_user(parg, (void __user *)arg, n))
goto out;
diff --git a/drivers/media/video/v4l2-mem2mem.c b/drivers/media/video/v4l2-mem2mem.c
index 975d0fa938c6..97b48318aee1 100644
--- a/drivers/media/video/v4l2-mem2mem.c
+++ b/drivers/media/video/v4l2-mem2mem.c
@@ -19,6 +19,9 @@
#include <media/videobuf2-core.h>
#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
@@ -407,11 +410,24 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
struct poll_table_struct *wait)
{
+ struct video_device *vfd = video_devdata(file);
+ unsigned long req_events = poll_requested_events(wait);
struct vb2_queue *src_q, *dst_q;
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
unsigned int rc = 0;
unsigned long flags;
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
+ struct v4l2_fh *fh = file->private_data;
+
+ if (v4l2_event_pending(fh))
+ rc = POLLPRI;
+ else if (req_events & POLLPRI)
+ poll_wait(file, &fh->wait, wait);
+ if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
+ return rc;
+ }
+
src_q = v4l2_m2m_get_src_vq(m2m_ctx);
dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
@@ -422,7 +438,7 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
*/
if ((!src_q->streaming || list_empty(&src_q->queued_list))
&& (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
- rc = POLLERR;
+ rc |= POLLERR;
goto end;
}
diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c
index db6e859b93d4..9182f81deb5b 100644
--- a/drivers/media/video/v4l2-subdev.c
+++ b/drivers/media/video/v4l2-subdev.c
@@ -245,7 +245,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
- sel.target = V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL;
+ sel.target = V4L2_SEL_TGT_CROP;
rval = v4l2_subdev_call(
sd, pad, get_selection, subdev_fh, &sel);
@@ -274,7 +274,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
- sel.target = V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL;
+ sel.target = V4L2_SEL_TGT_CROP;
sel.r = crop->rect;
rval = v4l2_subdev_call(
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index 308e150a39bc..eb404c2ce270 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -963,7 +963,7 @@ static int viacam_do_try_fmt(struct via_camera *cam,
upix->pixelformat = f->pixelformat;
viacam_fmt_pre(upix, spix);
- v4l2_fill_mbus_format(&mbus_fmt, upix, f->mbus_code);
+ v4l2_fill_mbus_format(&mbus_fmt, spix, f->mbus_code);
ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
v4l2_fill_pix_format(spix, &mbus_fmt);
viacam_fmt_post(upix, spix);
diff --git a/drivers/media/video/videobuf-core.c b/drivers/media/video/videobuf-core.c
index ffdf59cfe405..bf7a326b1cdc 100644
--- a/drivers/media/video/videobuf-core.c
+++ b/drivers/media/video/videobuf-core.c
@@ -359,11 +359,6 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
break;
}
- if (vb->input != UNSET) {
- b->flags |= V4L2_BUF_FLAG_INPUT;
- b->input = vb->input;
- }
-
b->field = vb->field;
b->timestamp = vb->ts;
b->bytesused = vb->size;
@@ -402,7 +397,6 @@ int __videobuf_mmap_setup(struct videobuf_queue *q,
break;
q->bufs[i]->i = i;
- q->bufs[i]->input = UNSET;
q->bufs[i]->memory = memory;
q->bufs[i]->bsize = bsize;
switch (memory) {
@@ -566,16 +560,6 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
goto done;
}
- if (b->flags & V4L2_BUF_FLAG_INPUT) {
- if (b->input >= q->inputs) {
- dprintk(1, "qbuf: wrong input.\n");
- goto done;
- }
- buf->input = b->input;
- } else {
- buf->input = UNSET;
- }
-
switch (b->memory) {
case V4L2_MEMORY_MMAP:
if (0 == buf->baddr) {
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index b6b5cc1a43cb..3a43ba0959bf 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -40,7 +40,7 @@ struct videobuf_dma_contig_memory {
static int __videobuf_dc_alloc(struct device *dev,
struct videobuf_dma_contig_memory *mem,
- unsigned long size, unsigned long flags)
+ unsigned long size, gfp_t flags)
{
mem->size = size;
if (mem->cached) {
@@ -56,7 +56,7 @@ static int __videobuf_dc_alloc(struct device *dev,
dev_err(dev, "dma_map_single failed\n");
free_pages_exact(mem->vaddr, mem->size);
- mem->vaddr = 0;
+ mem->vaddr = NULL;
return err;
}
}
@@ -359,32 +359,43 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
size = vma->vm_end - vma->vm_start;
size = (size < mem->size) ? size : mem->size;
- if (!mem->cached)
+ if (!mem->cached) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- pos = (unsigned long)mem->vaddr;
-
- while (size > 0) {
- page = virt_to_page((void *)pos);
- if (NULL == page) {
- dev_err(q->dev, "mmap: virt_to_page failed\n");
- __videobuf_dc_free(q->dev, mem);
- goto error;
- }
- retval = vm_insert_page(vma, start, page);
+ retval = remap_pfn_range(vma, vma->vm_start,
+ mem->dma_handle >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
if (retval) {
- dev_err(q->dev, "mmap: insert failed with error %d\n",
- retval);
- __videobuf_dc_free(q->dev, mem);
+ dev_err(q->dev, "mmap: remap failed with error %d. ",
+ retval);
+ dma_free_coherent(q->dev, mem->size,
+ mem->vaddr, mem->dma_handle);
goto error;
}
- start += PAGE_SIZE;
- pos += PAGE_SIZE;
+ } else {
+ pos = (unsigned long)mem->vaddr;
- if (size > PAGE_SIZE)
- size -= PAGE_SIZE;
- else
- size = 0;
+ while (size > 0) {
+ page = virt_to_page((void *)pos);
+ if (NULL == page) {
+ dev_err(q->dev, "mmap: virt_to_page failed\n");
+ __videobuf_dc_free(q->dev, mem);
+ goto error;
+ }
+ retval = vm_insert_page(vma, start, page);
+ if (retval) {
+ dev_err(q->dev, "mmap: insert failed with error %d\n",
+ retval);
+ __videobuf_dc_free(q->dev, mem);
+ goto error;
+ }
+ start += PAGE_SIZE;
+ pos += PAGE_SIZE;
+
+ if (size > PAGE_SIZE)
+ size -= PAGE_SIZE;
+ else
+ size = 0;
+ }
}
vma->vm_ops = &videobuf_vm_ops;
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 9d4e9edbd2e7..268c7dd4f823 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -336,9 +336,9 @@ static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
struct vb2_queue *q = vb->vb2_queue;
int ret;
- /* Copy back data such as timestamp, flags, input, etc. */
+ /* Copy back data such as timestamp, flags, etc. */
memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
- b->input = vb->v4l2_buf.input;
+ b->reserved2 = vb->v4l2_buf.reserved2;
b->reserved = vb->v4l2_buf.reserved;
if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
@@ -454,7 +454,50 @@ static int __verify_mmap_ops(struct vb2_queue *q)
}
/**
- * vb2_reqbufs() - Initiate streaming
+ * __verify_memory_type() - Check whether the memory type and buffer type
+ * passed to a buffer operation are compatible with the queue.
+ */
+static int __verify_memory_type(struct vb2_queue *q,
+ enum v4l2_memory memory, enum v4l2_buf_type type)
+{
+ if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR) {
+ dprintk(1, "reqbufs: unsupported memory type\n");
+ return -EINVAL;
+ }
+
+ if (type != q->type) {
+ dprintk(1, "reqbufs: requested type is incorrect\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure all the required memory ops for given memory type
+ * are available.
+ */
+ if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
+ dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
+ dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Place the busy tests at the end: -EBUSY can be ignored when
+ * create_bufs is called with count == 0, but count == 0 should still
+ * do the memory and type validation.
+ */
+ if (q->fileio) {
+ dprintk(1, "reqbufs: file io in progress\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
+ * __reqbufs() - Initiate streaming
* @q: videobuf2 queue
* @req: struct passed from userspace to vidioc_reqbufs handler in driver
*
@@ -476,46 +519,16 @@ static int __verify_mmap_ops(struct vb2_queue *q)
* The return values from this function are intended to be directly returned
* from vidioc_reqbufs handler in driver.
*/
-int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
+static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
{
unsigned int num_buffers, allocated_buffers, num_planes = 0;
- int ret = 0;
-
- if (q->fileio) {
- dprintk(1, "reqbufs: file io in progress\n");
- return -EBUSY;
- }
-
- if (req->memory != V4L2_MEMORY_MMAP
- && req->memory != V4L2_MEMORY_USERPTR) {
- dprintk(1, "reqbufs: unsupported memory type\n");
- return -EINVAL;
- }
-
- if (req->type != q->type) {
- dprintk(1, "reqbufs: requested type is incorrect\n");
- return -EINVAL;
- }
+ int ret;
if (q->streaming) {
dprintk(1, "reqbufs: streaming active\n");
return -EBUSY;
}
- /*
- * Make sure all the required memory ops for given memory type
- * are available.
- */
- if (req->memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
- dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
- return -EINVAL;
- }
-
- if (req->memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
- dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
- return -EINVAL;
- }
-
if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
/*
* We already have buffers allocated, so first check if they
@@ -595,10 +608,23 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return 0;
}
+
+/**
+ * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and
+ * type values.
+ * @q: videobuf2 queue
+ * @req: struct passed from userspace to vidioc_reqbufs handler in driver
+ */
+int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
+{
+ int ret = __verify_memory_type(q, req->memory, req->type);
+
+ return ret ? ret : __reqbufs(q, req);
+}
EXPORT_SYMBOL_GPL(vb2_reqbufs);
/**
- * vb2_create_bufs() - Allocate buffers and any required auxiliary structs
+ * __create_bufs() - Allocate buffers and any required auxiliary structs
* @q: videobuf2 queue
* @create: creation parameters, passed from userspace to vidioc_create_bufs
* handler in driver
@@ -612,40 +638,10 @@ EXPORT_SYMBOL_GPL(vb2_reqbufs);
* The return values from this function are intended to be directly returned
* from vidioc_create_bufs handler in driver.
*/
-int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
+static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
{
unsigned int num_planes = 0, num_buffers, allocated_buffers;
- int ret = 0;
-
- if (q->fileio) {
- dprintk(1, "%s(): file io in progress\n", __func__);
- return -EBUSY;
- }
-
- if (create->memory != V4L2_MEMORY_MMAP
- && create->memory != V4L2_MEMORY_USERPTR) {
- dprintk(1, "%s(): unsupported memory type\n", __func__);
- return -EINVAL;
- }
-
- if (create->format.type != q->type) {
- dprintk(1, "%s(): requested type is incorrect\n", __func__);
- return -EINVAL;
- }
-
- /*
- * Make sure all the required memory ops for given memory type
- * are available.
- */
- if (create->memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
- dprintk(1, "%s(): MMAP for current setup unsupported\n", __func__);
- return -EINVAL;
- }
-
- if (create->memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
- dprintk(1, "%s(): USERPTR for current setup unsupported\n", __func__);
- return -EINVAL;
- }
+ int ret;
if (q->num_buffers == VIDEO_MAX_FRAME) {
dprintk(1, "%s(): maximum number of buffers already allocated\n",
@@ -653,8 +649,6 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
return -ENOBUFS;
}
- create->index = q->num_buffers;
-
if (!q->num_buffers) {
memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
@@ -675,9 +669,9 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
/* Finally, allocate buffers and video memory */
ret = __vb2_queue_alloc(q, create->memory, num_buffers,
num_planes);
- if (ret < 0) {
- dprintk(1, "Memory allocation failed with error: %d\n", ret);
- return ret;
+ if (ret == 0) {
+ dprintk(1, "Memory allocation failed\n");
+ return -ENOMEM;
}
allocated_buffers = ret;
@@ -708,7 +702,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
if (ret < 0) {
__vb2_queue_free(q, allocated_buffers);
- return ret;
+ return -ENOMEM;
}
/*
@@ -719,6 +713,23 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
return 0;
}
+
+/**
+ * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the
+ * memory and type values.
+ * @q: videobuf2 queue
+ * @create: creation parameters, passed from userspace to vidioc_create_bufs
+ * handler in driver
+ */
+int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
+{
+ int ret = __verify_memory_type(q, create->memory, create->format.type);
+
+ create->index = q->num_buffers;
+ if (create->count == 0)
+ return ret != -EBUSY ? ret : 0;
+ return ret ? ret : __create_bufs(q, create);
+}
EXPORT_SYMBOL_GPL(vb2_create_bufs);
/**
@@ -860,7 +871,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
vb->v4l2_buf.field = b->field;
vb->v4l2_buf.timestamp = b->timestamp;
- vb->v4l2_buf.input = b->input;
vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS;
return 0;
@@ -2115,6 +2125,263 @@ size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
}
EXPORT_SYMBOL_GPL(vb2_write);
+
+/*
+ * The following functions are not part of the vb2 core API, but are helper
+ * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
+ * and struct vb2_ops.
+ * They contain boilerplate code that most if not all drivers have to do
+ * and so they simplify the driver code.
+ */
+
+/* The queue is busy if there is a owner and you are not that owner. */
+static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
+{
+ return vdev->queue->owner && vdev->queue->owner != file->private_data;
+}
+
+/* vb2 ioctl helpers */
+
+int vb2_ioctl_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct video_device *vdev = video_devdata(file);
+ int res = __verify_memory_type(vdev->queue, p->memory, p->type);
+
+ if (res)
+ return res;
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ res = __reqbufs(vdev->queue, p);
+ /* If count == 0, then the owner has released all buffers and he
+ is no longer owner of the queue. Otherwise we have a new owner. */
+ if (res == 0)
+ vdev->queue->owner = p->count ? file->private_data : NULL;
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
+
+int vb2_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *p)
+{
+ struct video_device *vdev = video_devdata(file);
+ int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
+
+ p->index = vdev->queue->num_buffers;
+ /* If count == 0, then just check if memory and type are valid.
+ Any -EBUSY result from __verify_memory_type can be mapped to 0. */
+ if (p->count == 0)
+ return res != -EBUSY ? res : 0;
+ if (res)
+ return res;
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ res = __create_bufs(vdev->queue, p);
+ if (res == 0)
+ vdev->queue->owner = file->private_data;
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
+
+int vb2_ioctl_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_prepare_buf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
+
+int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
+ return vb2_querybuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
+
+int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_qbuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
+
+int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
+
+int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_streamon(vdev->queue, i);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
+
+int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev, file))
+ return -EBUSY;
+ return vb2_streamoff(vdev->queue, i);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
+
+/* v4l2_file_operations helpers */
+
+int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ return vb2_mmap(vdev->queue, vma);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_mmap);
+
+int vb2_fop_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (file->private_data == vdev->queue->owner) {
+ vb2_queue_release(vdev->queue);
+ vdev->queue->owner = NULL;
+ }
+ return v4l2_fh_release(file);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_release);
+
+ssize_t vb2_fop_write(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+ bool must_lock = !test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags) && lock;
+ int err = -EBUSY;
+
+ if (must_lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ if (vb2_queue_is_busy(vdev, file))
+ goto exit;
+ err = vb2_write(vdev->queue, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
+ if (err >= 0)
+ vdev->queue->owner = file->private_data;
+exit:
+ if (must_lock)
+ mutex_unlock(lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_write);
+
+ssize_t vb2_fop_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+ bool must_lock = !test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags) && vdev->lock;
+ int err = -EBUSY;
+
+ if (must_lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ if (vb2_queue_is_busy(vdev, file))
+ goto exit;
+ err = vb2_read(vdev->queue, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
+ if (err >= 0)
+ vdev->queue->owner = file->private_data;
+exit:
+ if (must_lock)
+ mutex_unlock(lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_read);
+
+unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct vb2_queue *q = vdev->queue;
+ struct mutex *lock = q->lock ? q->lock : vdev->lock;
+ unsigned long req_events = poll_requested_events(wait);
+ unsigned res;
+ void *fileio;
+ /* Yuck. We really need to get rid of this flag asap. If it is
+ set, then the core took the serialization lock before calling
+ poll(). This is being phased out, but for now we have to handle
+ this case. */
+ bool locked = test_bit(V4L2_FL_LOCK_ALL_FOPS, &vdev->flags);
+ bool must_lock = false;
+
+ /* Try to be smart: only lock if polling might start fileio,
+ otherwise locking will only introduce unwanted delays. */
+ if (q->num_buffers == 0 && q->fileio == NULL) {
+ if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
+ (req_events & (POLLIN | POLLRDNORM)))
+ must_lock = true;
+ else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
+ (req_events & (POLLOUT | POLLWRNORM)))
+ must_lock = true;
+ }
+
+ /* If locking is needed, but this helper doesn't know how, then you
+ shouldn't be using this helper but you should write your own. */
+ WARN_ON(must_lock && !locked && !lock);
+
+ if (must_lock && !locked && lock && mutex_lock_interruptible(lock))
+ return POLLERR;
+
+ fileio = q->fileio;
+
+ res = vb2_poll(vdev->queue, file, wait);
+
+ /* If fileio was started, then we have a new queue owner. */
+ if (must_lock && !fileio && q->fileio)
+ q->owner = file->private_data;
+ if (must_lock && !locked && lock)
+ mutex_unlock(lock);
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_poll);
+
+#ifndef CONFIG_MMU
+unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
+#endif
+
+/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
+
+void vb2_ops_wait_prepare(struct vb2_queue *vq)
+{
+ mutex_unlock(vq->lock);
+}
+EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
+
+void vb2_ops_wait_finish(struct vb2_queue *vq)
+{
+ mutex_lock(vq->lock);
+}
+EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
+
MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 08c10240e70f..a05494b71b20 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -188,6 +188,7 @@ struct vivi_dev {
struct list_head vivi_devlist;
struct v4l2_device v4l2_dev;
struct v4l2_ctrl_handler ctrl_handler;
+ struct video_device vdev;
/* controls */
struct v4l2_ctrl *brightness;
@@ -213,9 +214,6 @@ struct vivi_dev {
spinlock_t slock;
struct mutex mutex;
- /* various device info */
- struct video_device *vfd;
-
struct vivi_dmaqueue vidq;
/* Several counters */
@@ -232,7 +230,6 @@ struct vivi_dev {
struct vivi_fmt *fmt;
unsigned int width, height;
struct vb2_queue vb_vidq;
- enum v4l2_field field;
unsigned int field_count;
u8 bars[9][3];
@@ -625,7 +622,7 @@ static void vivi_fillbuff(struct vivi_dev *dev, struct vivi_buffer *buf)
dev->mv_count += 2;
- buf->vb.v4l2_buf.field = dev->field;
+ buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
dev->field_count++;
buf->vb.v4l2_buf.sequence = dev->field_count >> 1;
do_gettimeofday(&ts);
@@ -769,7 +766,13 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
struct vivi_dev *dev = vb2_get_drv_priv(vq);
unsigned long size;
- size = dev->width * dev->height * dev->pixelsize;
+ if (fmt)
+ size = fmt->fmt.pix.sizeimage;
+ else
+ size = dev->width * dev->height * dev->pixelsize;
+
+ if (size == 0)
+ return -EINVAL;
if (0 == *nbuffers)
*nbuffers = 32;
@@ -792,27 +795,6 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
return 0;
}
-static int buffer_init(struct vb2_buffer *vb)
-{
- struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
-
- BUG_ON(NULL == dev->fmt);
-
- /*
- * This callback is called once per buffer, after its allocation.
- *
- * Vivi does not allow changing format during streaming, but it is
- * possible to do so when streaming is paused (i.e. in streamoff state).
- * Buffers however are not freed when going into streamoff and so
- * buffer size verification has to be done in buffer_prepare, on each
- * qbuf.
- * It would be best to move verification code here to buf_init and
- * s_fmt though.
- */
-
- return 0;
-}
-
static int buffer_prepare(struct vb2_buffer *vb)
{
struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
@@ -850,20 +832,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
return 0;
}
-static int buffer_finish(struct vb2_buffer *vb)
-{
- struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- dprintk(dev, 1, "%s\n", __func__);
- return 0;
-}
-
-static void buffer_cleanup(struct vb2_buffer *vb)
-{
- struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
- dprintk(dev, 1, "%s\n", __func__);
-
-}
-
static void buffer_queue(struct vb2_buffer *vb)
{
struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
@@ -909,10 +877,7 @@ static void vivi_unlock(struct vb2_queue *vq)
static struct vb2_ops vivi_video_qops = {
.queue_setup = queue_setup,
- .buf_init = buffer_init,
.buf_prepare = buffer_prepare,
- .buf_finish = buffer_finish,
- .buf_cleanup = buffer_cleanup,
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
@@ -959,7 +924,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
- f->fmt.pix.field = dev->field;
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
f->fmt.pix.pixelformat = dev->fmt->fourcc;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * dev->fmt->depth) >> 3;
@@ -978,25 +943,16 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
{
struct vivi_dev *dev = video_drvdata(file);
struct vivi_fmt *fmt;
- enum v4l2_field field;
fmt = get_format(f);
if (!fmt) {
- dprintk(dev, 1, "Fourcc format (0x%08x) invalid.\n",
+ dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
f->fmt.pix.pixelformat);
- return -EINVAL;
- }
-
- field = f->fmt.pix.field;
-
- if (field == V4L2_FIELD_ANY) {
- field = V4L2_FIELD_INTERLACED;
- } else if (V4L2_FIELD_INTERLACED != field) {
- dprintk(dev, 1, "Field type invalid.\n");
- return -EINVAL;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
+ fmt = get_format(f);
}
- f->fmt.pix.field = field;
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
&f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
f->fmt.pix.bytesperline =
@@ -1021,7 +977,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
if (ret < 0)
return ret;
- if (vb2_is_streaming(q)) {
+ if (vb2_is_busy(q)) {
dprintk(dev, 1, "%s device busy\n", __func__);
return -EBUSY;
}
@@ -1030,53 +986,10 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
dev->pixelsize = dev->fmt->depth / 8;
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
- dev->field = f->fmt.pix.field;
return 0;
}
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct vivi_dev *dev = video_drvdata(file);
- return vb2_reqbufs(&dev->vb_vidq, p);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct vivi_dev *dev = video_drvdata(file);
- return vb2_querybuf(&dev->vb_vidq, p);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct vivi_dev *dev = video_drvdata(file);
- return vb2_qbuf(&dev->vb_vidq, p);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct vivi_dev *dev = video_drvdata(file);
- return vb2_dqbuf(&dev->vb_vidq, p, file->f_flags & O_NONBLOCK);
-}
-
-static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct vivi_dev *dev = video_drvdata(file);
- return vb2_streamon(&dev->vb_vidq, i);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct vivi_dev *dev = video_drvdata(file);
- return vb2_streamoff(&dev->vb_vidq, i);
-}
-
-static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *i)
-{
- return 0;
-}
-
/* only one input in this sample driver */
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
@@ -1085,7 +998,6 @@ static int vidioc_enum_input(struct file *file, void *priv,
return -EINVAL;
inp->type = V4L2_INPUT_TYPE_CAMERA;
- inp->std = V4L2_STD_525_60;
sprintf(inp->name, "Camera %u", inp->index);
return 0;
}
@@ -1145,58 +1057,6 @@ static int vivi_s_ctrl(struct v4l2_ctrl *ctrl)
File operations for the device
------------------------------------------------------------------*/
-static ssize_t
-vivi_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
-{
- struct vivi_dev *dev = video_drvdata(file);
- int err;
-
- dprintk(dev, 1, "read called\n");
- mutex_lock(&dev->mutex);
- err = vb2_read(&dev->vb_vidq, data, count, ppos,
- file->f_flags & O_NONBLOCK);
- mutex_unlock(&dev->mutex);
- return err;
-}
-
-static unsigned int
-vivi_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct vivi_dev *dev = video_drvdata(file);
- struct vb2_queue *q = &dev->vb_vidq;
-
- dprintk(dev, 1, "%s\n", __func__);
- return vb2_poll(q, file, wait);
-}
-
-static int vivi_close(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
- struct vivi_dev *dev = video_drvdata(file);
-
- dprintk(dev, 1, "close called (dev=%s), file %p\n",
- video_device_node_name(vdev), file);
-
- if (v4l2_fh_is_singular_file(file))
- vb2_queue_release(&dev->vb_vidq);
- return v4l2_fh_release(file);
-}
-
-static int vivi_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct vivi_dev *dev = video_drvdata(file);
- int ret;
-
- dprintk(dev, 1, "mmap called, vma=0x%08lx\n", (unsigned long)vma);
-
- ret = vb2_mmap(&dev->vb_vidq, vma);
- dprintk(dev, 1, "vma start=0x%08lx, size=%ld, ret=%d\n",
- (unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end - (unsigned long)vma->vm_start,
- ret);
- return ret;
-}
-
static const struct v4l2_ctrl_ops vivi_ctrl_ops = {
.g_volatile_ctrl = vivi_g_volatile_ctrl,
.s_ctrl = vivi_s_ctrl,
@@ -1301,11 +1161,11 @@ static const struct v4l2_ctrl_config vivi_ctrl_int_menu = {
static const struct v4l2_file_operations vivi_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
- .release = vivi_close,
- .read = vivi_read,
- .poll = vivi_poll,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
- .mmap = vivi_mmap,
+ .mmap = vb2_fop_mmap,
};
static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
@@ -1314,16 +1174,17 @@ static const struct v4l2_ioctl_ops vivi_ioctl_ops = {
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_s_std = vidioc_s_std,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_log_status = v4l2_ctrl_log_status,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
@@ -1333,10 +1194,7 @@ static struct video_device vivi_template = {
.name = "vivi",
.fops = &vivi_fops,
.ioctl_ops = &vivi_ioctl_ops,
- .release = video_device_release,
-
- .tvnorms = V4L2_STD_525_60,
- .current_norm = V4L2_STD_NTSC_M,
+ .release = video_device_release_empty,
};
/* -----------------------------------------------------------------
@@ -1354,8 +1212,8 @@ static int vivi_release(void)
dev = list_entry(list, struct vivi_dev, vivi_devlist);
v4l2_info(&dev->v4l2_dev, "unregistering %s\n",
- video_device_node_name(dev->vfd));
- video_unregister_device(dev->vfd);
+ video_device_node_name(&dev->vdev));
+ video_unregister_device(&dev->vdev);
v4l2_device_unregister(&dev->v4l2_dev);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
kfree(dev);
@@ -1440,14 +1298,11 @@ static int __init vivi_create_instance(int inst)
INIT_LIST_HEAD(&dev->vidq.active);
init_waitqueue_head(&dev->vidq.wq);
- ret = -ENOMEM;
- vfd = video_device_alloc();
- if (!vfd)
- goto unreg_dev;
-
+ vfd = &dev->vdev;
*vfd = vivi_template;
vfd->debug = debug;
vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->queue = q;
set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags);
/*
@@ -1455,26 +1310,19 @@ static int __init vivi_create_instance(int inst)
* all fops and v4l2 ioctls.
*/
vfd->lock = &dev->mutex;
+ video_set_drvdata(vfd, dev);
ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
if (ret < 0)
- goto rel_vdev;
-
- video_set_drvdata(vfd, dev);
+ goto unreg_dev;
/* Now that everything is fine, let's add it to device list */
list_add_tail(&dev->vivi_devlist, &vivi_devlist);
- if (video_nr != -1)
- video_nr++;
-
- dev->vfd = vfd;
v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
video_device_node_name(vfd));
return 0;
-rel_vdev:
- video_device_release(vfd);
unreg_dev:
v4l2_ctrl_handler_free(hdl);
v4l2_device_unregister(&dev->v4l2_dev);
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index e44cb330bbc8..9afab35878b4 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -37,6 +37,10 @@
#include <linux/highmem.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
#include <media/videobuf-vmalloc.h>
@@ -120,11 +124,6 @@ static struct usb_device_id device_table[] = {
MODULE_DEVICE_TABLE(usb, device_table);
-struct zr364xx_mode {
- u32 color; /* output video color format */
- u32 brightness; /* brightness */
-};
-
/* frame structure */
struct zr364xx_framei {
unsigned long ulState; /* ulState:ZR364XX_READ_IDLE,
@@ -173,7 +172,10 @@ static const struct zr364xx_fmt formats[] = {
struct zr364xx_camera {
struct usb_device *udev; /* save off the usb device pointer */
struct usb_interface *interface;/* the interface for this device */
- struct video_device *vdev; /* v4l video device */
+ struct v4l2_device v4l2_dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct video_device vdev; /* v4l video device */
+ struct v4l2_fh *owner; /* owns the streaming */
int nb;
struct zr364xx_bufferi buffer;
int skip;
@@ -181,12 +183,9 @@ struct zr364xx_camera {
int height;
int method;
struct mutex lock;
- struct mutex open_lock;
- int users;
spinlock_t slock;
struct zr364xx_dmaqueue vidq;
- int resources;
int last_frame;
int cur_frame;
unsigned long frame_count;
@@ -197,8 +196,7 @@ struct zr364xx_camera {
const struct zr364xx_fmt *fmt;
struct videobuf_queue vb_vidq;
- enum v4l2_buf_type type;
- struct zr364xx_mode mode;
+ bool was_streaming;
};
/* buffer for one video frame */
@@ -230,11 +228,6 @@ static int send_control_msg(struct usb_device *udev, u8 request, u16 value,
transfer_buffer, size, CTRL_TIMEOUT);
kfree(transfer_buffer);
-
- if (status < 0)
- dev_err(&udev->dev,
- "Failed sending control message, error %d.\n", status);
-
return status;
}
@@ -468,6 +461,7 @@ static ssize_t zr364xx_read(struct file *file, char __user *buf, size_t count,
loff_t * ppos)
{
struct zr364xx_camera *cam = video_drvdata(file);
+ int err = 0;
_DBG("%s\n", __func__);
@@ -477,17 +471,21 @@ static ssize_t zr364xx_read(struct file *file, char __user *buf, size_t count,
if (!count)
return -EINVAL;
- if (cam->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- zr364xx_vidioc_streamon(file, cam, cam->type) == 0) {
- DBG("%s: reading %d bytes at pos %d.\n", __func__, (int) count,
- (int) *ppos);
+ if (mutex_lock_interruptible(&cam->lock))
+ return -ERESTARTSYS;
+
+ err = zr364xx_vidioc_streamon(file, file->private_data,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ if (err == 0) {
+ DBG("%s: reading %d bytes at pos %d.\n", __func__,
+ (int) count, (int) *ppos);
/* NoMan Sux ! */
- return videobuf_read_one(&cam->vb_vidq, buf, count, ppos,
+ err = videobuf_read_one(&cam->vb_vidq, buf, count, ppos,
file->f_flags & O_NONBLOCK);
}
-
- return 0;
+ mutex_unlock(&cam->lock);
+ return err;
}
/* video buffer vmalloc implementation based partly on VIVI driver which is
@@ -702,35 +700,6 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam,
return 0;
}
-static int res_get(struct zr364xx_camera *cam)
-{
- /* is it free? */
- mutex_lock(&cam->lock);
- if (cam->resources) {
- /* no, someone else uses it */
- mutex_unlock(&cam->lock);
- return 0;
- }
- /* it's free, grab it */
- cam->resources = 1;
- _DBG("res: get\n");
- mutex_unlock(&cam->lock);
- return 1;
-}
-
-static inline int res_check(struct zr364xx_camera *cam)
-{
- return cam->resources;
-}
-
-static void res_free(struct zr364xx_camera *cam)
-{
- mutex_lock(&cam->lock);
- cam->resources = 0;
- mutex_unlock(&cam->lock);
- _DBG("res: put\n");
-}
-
static int zr364xx_vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
@@ -740,9 +709,10 @@ static int zr364xx_vidioc_querycap(struct file *file, void *priv,
strlcpy(cap->card, cam->udev->product, sizeof(cap->card));
strlcpy(cap->bus_info, dev_name(&cam->udev->dev),
sizeof(cap->bus_info));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -772,50 +742,18 @@ static int zr364xx_vidioc_s_input(struct file *file, void *priv,
return 0;
}
-static int zr364xx_vidioc_queryctrl(struct file *file, void *priv,
- struct v4l2_queryctrl *c)
+static int zr364xx_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct zr364xx_camera *cam;
-
- if (file == NULL)
- return -ENODEV;
- cam = video_drvdata(file);
-
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- c->type = V4L2_CTRL_TYPE_INTEGER;
- strcpy(c->name, "Brightness");
- c->minimum = 0;
- c->maximum = 127;
- c->step = 1;
- c->default_value = cam->mode.brightness;
- c->flags = 0;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int zr364xx_vidioc_s_ctrl(struct file *file, void *priv,
- struct v4l2_control *c)
-{
- struct zr364xx_camera *cam;
+ struct zr364xx_camera *cam =
+ container_of(ctrl->handler, struct zr364xx_camera, ctrl_handler);
int temp;
- if (file == NULL)
- return -ENODEV;
- cam = video_drvdata(file);
-
- switch (c->id) {
+ switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
- cam->mode.brightness = c->value;
/* hardware brightness */
- mutex_lock(&cam->lock);
send_control_msg(cam->udev, 1, 0x2001, 0, NULL, 0);
- temp = (0x60 << 8) + 127 - cam->mode.brightness;
+ temp = (0x60 << 8) + 127 - ctrl->val;
send_control_msg(cam->udev, 1, temp, 0, NULL, 0);
- mutex_unlock(&cam->lock);
break;
default:
return -EINVAL;
@@ -824,25 +762,6 @@ static int zr364xx_vidioc_s_ctrl(struct file *file, void *priv,
return 0;
}
-static int zr364xx_vidioc_g_ctrl(struct file *file, void *priv,
- struct v4l2_control *c)
-{
- struct zr364xx_camera *cam;
-
- if (file == NULL)
- return -ENODEV;
- cam = video_drvdata(file);
-
- switch (c->id) {
- case V4L2_CID_BRIGHTNESS:
- c->value = cam->mode.brightness;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
static int zr364xx_vidioc_enum_fmt_vid_cap(struct file *file,
void *priv, struct v4l2_fmtdesc *f)
{
@@ -888,7 +807,7 @@ static int zr364xx_vidioc_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
- f->fmt.pix.colorspace = 0;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
f->fmt.pix.priv = 0;
DBG("%s: V4L2_PIX_FMT_%s (%d) ok!\n", __func__,
decode_fourcc(f->fmt.pix.pixelformat, pixelformat_name),
@@ -911,7 +830,7 @@ static int zr364xx_vidioc_g_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.height = cam->height;
f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
- f->fmt.pix.colorspace = 0;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
f->fmt.pix.priv = 0;
return 0;
}
@@ -936,7 +855,7 @@ static int zr364xx_vidioc_s_fmt_vid_cap(struct file *file, void *priv,
goto out;
}
- if (res_check(cam)) {
+ if (cam->owner) {
DBG("%s can't change format after started\n", __func__);
ret = -EBUSY;
goto out;
@@ -944,14 +863,13 @@ static int zr364xx_vidioc_s_fmt_vid_cap(struct file *file, void *priv,
cam->width = f->fmt.pix.width;
cam->height = f->fmt.pix.height;
- dev_info(&cam->udev->dev, "%s: %dx%d mode selected\n", __func__,
+ DBG("%s: %dx%d mode selected\n", __func__,
cam->width, cam->height);
f->fmt.pix.bytesperline = f->fmt.pix.width * 2;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
- f->fmt.pix.colorspace = 0;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
f->fmt.pix.priv = 0;
cam->vb_vidq.field = f->fmt.pix.field;
- cam->mode.color = V4L2_PIX_FMT_JPEG;
if (f->fmt.pix.width == 160 && f->fmt.pix.height == 120)
mode = 1;
@@ -1015,10 +933,11 @@ out:
static int zr364xx_vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
- int rc;
struct zr364xx_camera *cam = video_drvdata(file);
- rc = videobuf_reqbufs(&cam->vb_vidq, p);
- return rc;
+
+ if (cam->owner && cam->owner != priv)
+ return -EBUSY;
+ return videobuf_reqbufs(&cam->vb_vidq, p);
}
static int zr364xx_vidioc_querybuf(struct file *file,
@@ -1038,6 +957,8 @@ static int zr364xx_vidioc_qbuf(struct file *file,
int rc;
struct zr364xx_camera *cam = video_drvdata(file);
_DBG("%s\n", __func__);
+ if (cam->owner && cam->owner != priv)
+ return -EBUSY;
rc = videobuf_qbuf(&cam->vb_vidq, p);
return rc;
}
@@ -1049,6 +970,8 @@ static int zr364xx_vidioc_dqbuf(struct file *file,
int rc;
struct zr364xx_camera *cam = video_drvdata(file);
_DBG("%s\n", __func__);
+ if (cam->owner && cam->owner != priv)
+ return -EBUSY;
rc = videobuf_dqbuf(&cam->vb_vidq, p, file->f_flags & O_NONBLOCK);
return rc;
}
@@ -1197,29 +1120,23 @@ static inline int zr364xx_stop_acquire(struct zr364xx_camera *cam)
return 0;
}
-static int zr364xx_vidioc_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
+static int zr364xx_prepare(struct zr364xx_camera *cam)
{
- struct zr364xx_camera *cam = video_drvdata(file);
- int j;
int res;
+ int i, j;
- DBG("%s\n", __func__);
-
- if (cam->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- dev_err(&cam->udev->dev, "invalid fh type0\n");
- return -EINVAL;
- }
- if (cam->type != type) {
- dev_err(&cam->udev->dev, "invalid fh type1\n");
- return -EINVAL;
- }
-
- if (!res_get(cam)) {
- dev_err(&cam->udev->dev, "stream busy\n");
- return -EBUSY;
+ for (i = 0; init[cam->method][i].size != -1; i++) {
+ res = send_control_msg(cam->udev, 1, init[cam->method][i].value,
+ 0, init[cam->method][i].bytes,
+ init[cam->method][i].size);
+ if (res < 0) {
+ dev_err(&cam->udev->dev,
+ "error during open sequence: %d\n", i);
+ return res;
+ }
}
+ cam->skip = 2;
cam->last_frame = -1;
cam->cur_frame = 0;
cam->frame_count = 0;
@@ -1227,11 +1144,31 @@ static int zr364xx_vidioc_streamon(struct file *file, void *priv,
cam->buffer.frame[j].ulState = ZR364XX_READ_IDLE;
cam->buffer.frame[j].cur_size = 0;
}
+ v4l2_ctrl_handler_setup(&cam->ctrl_handler);
+ return 0;
+}
+
+static int zr364xx_vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct zr364xx_camera *cam = video_drvdata(file);
+ int res;
+
+ DBG("%s\n", __func__);
+
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (cam->owner && cam->owner != priv)
+ return -EBUSY;
+
+ res = zr364xx_prepare(cam);
+ if (res)
+ return res;
res = videobuf_streamon(&cam->vb_vidq);
if (res == 0) {
zr364xx_start_acquire(cam);
- } else {
- res_free(cam);
+ cam->owner = file->private_data;
}
return res;
}
@@ -1239,67 +1176,32 @@ static int zr364xx_vidioc_streamon(struct file *file, void *priv,
static int zr364xx_vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- int res;
struct zr364xx_camera *cam = video_drvdata(file);
DBG("%s\n", __func__);
- if (cam->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- dev_err(&cam->udev->dev, "invalid fh type0\n");
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- }
- if (cam->type != type) {
- dev_err(&cam->udev->dev, "invalid fh type1\n");
- return -EINVAL;
- }
+ if (cam->owner && cam->owner != priv)
+ return -EBUSY;
zr364xx_stop_acquire(cam);
- res = videobuf_streamoff(&cam->vb_vidq);
- if (res < 0)
- return res;
- res_free(cam);
- return 0;
+ return videobuf_streamoff(&cam->vb_vidq);
}
/* open the camera */
static int zr364xx_open(struct file *file)
{
- struct video_device *vdev = video_devdata(file);
struct zr364xx_camera *cam = video_drvdata(file);
- struct usb_device *udev = cam->udev;
- int i, err;
+ int err;
DBG("%s\n", __func__);
- mutex_lock(&cam->open_lock);
+ if (mutex_lock_interruptible(&cam->lock))
+ return -ERESTARTSYS;
- if (cam->users) {
- err = -EBUSY;
+ err = v4l2_fh_open(file);
+ if (err)
goto out;
- }
-
- for (i = 0; init[cam->method][i].size != -1; i++) {
- err =
- send_control_msg(udev, 1, init[cam->method][i].value,
- 0, init[cam->method][i].bytes,
- init[cam->method][i].size);
- if (err < 0) {
- dev_err(&cam->udev->dev,
- "error during open sequence: %d\n", i);
- goto out;
- }
- }
-
- cam->skip = 2;
- cam->users++;
- file->private_data = vdev;
- cam->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- cam->fmt = formats;
-
- videobuf_queue_vmalloc_init(&cam->vb_vidq, &zr364xx_video_qops,
- NULL, &cam->slock,
- cam->type,
- V4L2_FIELD_NONE,
- sizeof(struct zr364xx_buffer), cam, NULL);
/* Added some delay here, since opening/closing the camera quickly,
* like Ekiga does during its startup, can crash the webcam
@@ -1308,29 +1210,20 @@ static int zr364xx_open(struct file *file)
err = 0;
out:
- mutex_unlock(&cam->open_lock);
+ mutex_unlock(&cam->lock);
DBG("%s: %d\n", __func__, err);
return err;
}
-static void zr364xx_destroy(struct zr364xx_camera *cam)
+static void zr364xx_release(struct v4l2_device *v4l2_dev)
{
+ struct zr364xx_camera *cam =
+ container_of(v4l2_dev, struct zr364xx_camera, v4l2_dev);
unsigned long i;
- if (!cam) {
- printk(KERN_ERR KBUILD_MODNAME ", %s: no device\n", __func__);
- return;
- }
- mutex_lock(&cam->open_lock);
- if (cam->vdev)
- video_unregister_device(cam->vdev);
- cam->vdev = NULL;
-
- /* stops the read pipe if it is running */
- if (cam->b_acquire)
- zr364xx_stop_acquire(cam);
+ v4l2_device_unregister(&cam->v4l2_dev);
- zr364xx_stop_readpipe(cam);
+ videobuf_mmap_free(&cam->vb_vidq);
/* release sys buffers */
for (i = 0; i < FRAMES; i++) {
@@ -1341,62 +1234,45 @@ static void zr364xx_destroy(struct zr364xx_camera *cam)
cam->buffer.frame[i].lpvbits = NULL;
}
+ v4l2_ctrl_handler_free(&cam->ctrl_handler);
/* release transfer buffer */
kfree(cam->pipe->transfer_buffer);
- cam->pipe->transfer_buffer = NULL;
- mutex_unlock(&cam->open_lock);
kfree(cam);
- cam = NULL;
}
/* release the camera */
-static int zr364xx_release(struct file *file)
+static int zr364xx_close(struct file *file)
{
struct zr364xx_camera *cam;
struct usb_device *udev;
- int i, err;
+ int i;
DBG("%s\n", __func__);
cam = video_drvdata(file);
- if (!cam)
- return -ENODEV;
-
- mutex_lock(&cam->open_lock);
+ mutex_lock(&cam->lock);
udev = cam->udev;
- /* turn off stream */
- if (res_check(cam)) {
+ if (file->private_data == cam->owner) {
+ /* turn off stream */
if (cam->b_acquire)
zr364xx_stop_acquire(cam);
videobuf_streamoff(&cam->vb_vidq);
- res_free(cam);
- }
-
- cam->users--;
- file->private_data = NULL;
- for (i = 0; i < 2; i++) {
- err =
- send_control_msg(udev, 1, init[cam->method][i].value,
- 0, init[cam->method][i].bytes,
- init[cam->method][i].size);
- if (err < 0) {
- dev_err(&udev->dev, "error during release sequence\n");
- goto out;
+ for (i = 0; i < 2; i++) {
+ send_control_msg(udev, 1, init[cam->method][i].value,
+ 0, init[cam->method][i].bytes,
+ init[cam->method][i].size);
}
+ cam->owner = NULL;
}
/* Added some delay here, since opening/closing the camera quickly,
* like Ekiga does during its startup, can crash the webcam
*/
mdelay(100);
- err = 0;
-
-out:
- mutex_unlock(&cam->open_lock);
-
- return err;
+ mutex_unlock(&cam->lock);
+ return v4l2_fh_release(file);
}
@@ -1424,21 +1300,24 @@ static unsigned int zr364xx_poll(struct file *file,
{
struct zr364xx_camera *cam = video_drvdata(file);
struct videobuf_queue *q = &cam->vb_vidq;
- _DBG("%s\n", __func__);
+ unsigned res = v4l2_ctrl_poll(file, wait);
- if (cam->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return POLLERR;
+ _DBG("%s\n", __func__);
- return videobuf_poll_stream(file, q, wait);
+ return res | videobuf_poll_stream(file, q, wait);
}
+static const struct v4l2_ctrl_ops zr364xx_ctrl_ops = {
+ .s_ctrl = zr364xx_s_ctrl,
+};
+
static const struct v4l2_file_operations zr364xx_fops = {
.owner = THIS_MODULE,
.open = zr364xx_open,
- .release = zr364xx_release,
+ .release = zr364xx_close,
.read = zr364xx_read,
.mmap = zr364xx_mmap,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
.poll = zr364xx_poll,
};
@@ -1453,20 +1332,20 @@ static const struct v4l2_ioctl_ops zr364xx_ioctl_ops = {
.vidioc_s_input = zr364xx_vidioc_s_input,
.vidioc_streamon = zr364xx_vidioc_streamon,
.vidioc_streamoff = zr364xx_vidioc_streamoff,
- .vidioc_queryctrl = zr364xx_vidioc_queryctrl,
- .vidioc_g_ctrl = zr364xx_vidioc_g_ctrl,
- .vidioc_s_ctrl = zr364xx_vidioc_s_ctrl,
.vidioc_reqbufs = zr364xx_vidioc_reqbufs,
.vidioc_querybuf = zr364xx_vidioc_querybuf,
.vidioc_qbuf = zr364xx_vidioc_qbuf,
.vidioc_dqbuf = zr364xx_vidioc_dqbuf,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct video_device zr364xx_template = {
.name = DRIVER_DESC,
.fops = &zr364xx_fops,
.ioctl_ops = &zr364xx_ioctl_ops,
- .release = video_device_release,
+ .release = video_device_release_empty,
};
@@ -1540,6 +1419,7 @@ static int zr364xx_probe(struct usb_interface *intf,
struct zr364xx_camera *cam = NULL;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
+ struct v4l2_ctrl_handler *hdl;
int err;
int i;
@@ -1555,21 +1435,34 @@ static int zr364xx_probe(struct usb_interface *intf,
dev_err(&udev->dev, "cam: out of memory !\n");
return -ENOMEM;
}
- /* save the init method used by this camera */
- cam->method = id->driver_info;
- cam->vdev = video_device_alloc();
- if (cam->vdev == NULL) {
- dev_err(&udev->dev, "cam->vdev: out of memory !\n");
+ cam->v4l2_dev.release = zr364xx_release;
+ err = v4l2_device_register(&intf->dev, &cam->v4l2_dev);
+ if (err < 0) {
+ dev_err(&udev->dev, "couldn't register v4l2_device\n");
kfree(cam);
- cam = NULL;
- return -ENOMEM;
+ return err;
}
- memcpy(cam->vdev, &zr364xx_template, sizeof(zr364xx_template));
- cam->vdev->parent = &intf->dev;
- video_set_drvdata(cam->vdev, cam);
+ hdl = &cam->ctrl_handler;
+ v4l2_ctrl_handler_init(hdl, 1);
+ v4l2_ctrl_new_std(hdl, &zr364xx_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, 0, 127, 1, 64);
+ if (hdl->error) {
+ err = hdl->error;
+ dev_err(&udev->dev, "couldn't register control\n");
+ goto fail;
+ }
+ /* save the init method used by this camera */
+ cam->method = id->driver_info;
+ mutex_init(&cam->lock);
+ cam->vdev = zr364xx_template;
+ cam->vdev.lock = &cam->lock;
+ cam->vdev.v4l2_dev = &cam->v4l2_dev;
+ cam->vdev.ctrl_handler = &cam->ctrl_handler;
+ set_bit(V4L2_FL_USE_FH_PRIO, &cam->vdev.flags);
+ video_set_drvdata(&cam->vdev, cam);
if (debug)
- cam->vdev->debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG;
+ cam->vdev.debug = V4L2_DEBUG_IOCTL | V4L2_DEBUG_IOCTL_ARG;
cam->udev = udev;
@@ -1615,11 +1508,7 @@ static int zr364xx_probe(struct usb_interface *intf,
header2[439] = cam->width / 256;
header2[440] = cam->width % 256;
- cam->users = 0;
cam->nb = 0;
- cam->mode.brightness = 64;
- mutex_init(&cam->lock);
- mutex_init(&cam->open_lock);
DBG("dev: %p, udev %p interface %p\n", cam, cam->udev, intf);
@@ -1635,52 +1524,100 @@ static int zr364xx_probe(struct usb_interface *intf,
}
if (!cam->read_endpoint) {
+ err = -ENOMEM;
dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
- video_device_release(cam->vdev);
- kfree(cam);
- cam = NULL;
- return -ENOMEM;
+ goto fail;
}
/* v4l */
INIT_LIST_HEAD(&cam->vidq.active);
cam->vidq.cam = cam;
- err = video_register_device(cam->vdev, VFL_TYPE_GRABBER, -1);
- if (err) {
- dev_err(&udev->dev, "video_register_device failed\n");
- video_device_release(cam->vdev);
- kfree(cam);
- cam = NULL;
- return err;
- }
usb_set_intfdata(intf, cam);
/* load zr364xx board specific */
err = zr364xx_board_init(cam);
- if (err) {
- spin_lock_init(&cam->slock);
- return err;
- }
+ if (!err)
+ err = v4l2_ctrl_handler_setup(hdl);
+ if (err)
+ goto fail;
spin_lock_init(&cam->slock);
+ cam->fmt = formats;
+
+ videobuf_queue_vmalloc_init(&cam->vb_vidq, &zr364xx_video_qops,
+ NULL, &cam->slock,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ V4L2_FIELD_NONE,
+ sizeof(struct zr364xx_buffer), cam, &cam->lock);
+
+ err = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
+ if (err) {
+ dev_err(&udev->dev, "video_register_device failed\n");
+ goto fail;
+ }
+
dev_info(&udev->dev, DRIVER_DESC " controlling device %s\n",
- video_device_node_name(cam->vdev));
+ video_device_node_name(&cam->vdev));
return 0;
+
+fail:
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_device_unregister(&cam->v4l2_dev);
+ kfree(cam);
+ return err;
}
static void zr364xx_disconnect(struct usb_interface *intf)
{
struct zr364xx_camera *cam = usb_get_intfdata(intf);
- videobuf_mmap_free(&cam->vb_vidq);
+
+ mutex_lock(&cam->lock);
usb_set_intfdata(intf, NULL);
dev_info(&intf->dev, DRIVER_DESC " webcam unplugged\n");
- zr364xx_destroy(cam);
+ video_unregister_device(&cam->vdev);
+ v4l2_device_disconnect(&cam->v4l2_dev);
+
+ /* stops the read pipe if it is running */
+ if (cam->b_acquire)
+ zr364xx_stop_acquire(cam);
+
+ zr364xx_stop_readpipe(cam);
+ mutex_unlock(&cam->lock);
+ v4l2_device_put(&cam->v4l2_dev);
}
+#ifdef CONFIG_PM
+static int zr364xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct zr364xx_camera *cam = usb_get_intfdata(intf);
+
+ cam->was_streaming = cam->b_acquire;
+ if (!cam->was_streaming)
+ return 0;
+ zr364xx_stop_acquire(cam);
+ zr364xx_stop_readpipe(cam);
+ return 0;
+}
+
+static int zr364xx_resume(struct usb_interface *intf)
+{
+ struct zr364xx_camera *cam = usb_get_intfdata(intf);
+ int res;
+
+ if (!cam->was_streaming)
+ return 0;
+
+ zr364xx_start_readpipe(cam);
+ res = zr364xx_prepare(cam);
+ if (!res)
+ zr364xx_start_acquire(cam);
+ return res;
+}
+#endif
/**********************/
/* Module integration */
@@ -1690,6 +1627,11 @@ static struct usb_driver zr364xx_driver = {
.name = "zr364xx",
.probe = zr364xx_probe,
.disconnect = zr364xx_disconnect,
+#ifdef CONFIG_PM
+ .suspend = zr364xx_suspend,
+ .resume = zr364xx_resume,
+ .reset_resume = zr364xx_resume,
+#endif
.id_table = device_table
};
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 098de2b35784..9a49c243a6ac 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -188,6 +188,13 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type)
if (!dev)
return -ENXIO;
+ /*
+ * Stop users being able to try and allocate arbitary amounts
+ * of DMA space. 64K is way more than sufficient for this.
+ */
+ if (kcmd.oplen > 65536)
+ return -EMSGSIZE;
+
ops = memdup_user(kcmd.opbuf, kcmd.oplen);
if (IS_ERR(ops))
return PTR_ERR(ops);
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 506c36f6e1db..8001aa6bfb48 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -255,9 +255,8 @@ static char *scsi_devices[] = {
"Array Controller Device"
};
-static char *chtostr(u8 * chars, int n)
+static char *chtostr(char *tmp, u8 *chars, int n)
{
- char tmp[256];
tmp[0] = 0;
return strncat(tmp, (char *)chars, n);
}
@@ -791,6 +790,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
} *result;
i2o_exec_execute_ddm_table ddm_table;
+ char tmp[28 + 1];
result = kmalloc(sizeof(*result), GFP_KERNEL);
if (!result)
@@ -826,7 +826,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
seq_printf(seq, "%-#8x", ddm_table.module_id);
seq_printf(seq, "%-29s",
- chtostr(ddm_table.module_name_version, 28));
+ chtostr(tmp, ddm_table.module_name_version, 28));
seq_printf(seq, "%9d ", ddm_table.data_size);
seq_printf(seq, "%8d", ddm_table.code_size);
@@ -893,6 +893,7 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
i2o_driver_result_table *result;
i2o_driver_store_table *dst;
+ char tmp[28 + 1];
result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
if (result == NULL)
@@ -927,8 +928,9 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
seq_printf(seq, "%-#8x", dst->module_id);
- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
+ seq_printf(seq, "%-29s",
+ chtostr(tmp, dst->module_name_version, 28));
+ seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
seq_printf(seq, "%8d ", dst->module_size);
seq_printf(seq, "%8d ", dst->mpb_size);
seq_printf(seq, "0x%04x", dst->module_flags);
@@ -1248,6 +1250,7 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
// == (allow) 512d bytes (max)
static u16 *work16 = (u16 *) work32;
int token;
+ char tmp[16 + 1];
token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
@@ -1260,13 +1263,13 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
seq_printf(seq, "Vendor info : %s\n",
- chtostr((u8 *) (work32 + 2), 16));
+ chtostr(tmp, (u8 *) (work32 + 2), 16));
seq_printf(seq, "Product info : %s\n",
- chtostr((u8 *) (work32 + 6), 16));
+ chtostr(tmp, (u8 *) (work32 + 6), 16));
seq_printf(seq, "Description : %s\n",
- chtostr((u8 *) (work32 + 10), 16));
+ chtostr(tmp, (u8 *) (work32 + 10), 16));
seq_printf(seq, "Product rev. : %s\n",
- chtostr((u8 *) (work32 + 14), 8));
+ chtostr(tmp, (u8 *) (work32 + 14), 8));
seq_printf(seq, "Serial number : ");
print_serial_number(seq, (u8 *) (work32 + 16),
@@ -1303,6 +1306,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
u8 pad[256]; // allow up to 256 byte (max) serial number
} result;
+ char tmp[24 + 1];
+
token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
if (token < 0) {
@@ -1312,9 +1317,9 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
seq_printf(seq, "Module name : %s\n",
- chtostr(result.module_name, 24));
+ chtostr(tmp, result.module_name, 24));
seq_printf(seq, "Module revision : %s\n",
- chtostr(result.module_rev, 8));
+ chtostr(tmp, result.module_rev, 8));
seq_printf(seq, "Serial number : ");
print_serial_number(seq, result.serial_number, sizeof(result) - 36);
@@ -1338,6 +1343,8 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
u8 instance_number[4];
} result;
+ char tmp[64 + 1];
+
token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
if (token < 0) {
@@ -1346,13 +1353,13 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
}
seq_printf(seq, "Device name : %s\n",
- chtostr(result.device_name, 64));
+ chtostr(tmp, result.device_name, 64));
seq_printf(seq, "Service name : %s\n",
- chtostr(result.service_name, 64));
+ chtostr(tmp, result.service_name, 64));
seq_printf(seq, "Physical name : %s\n",
- chtostr(result.physical_location, 64));
+ chtostr(tmp, result.physical_location, 64));
seq_printf(seq, "Instance number : %s\n",
- chtostr(result.instance_number, 4));
+ chtostr(tmp, result.instance_number, 4));
return 0;
}
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
new file mode 100644
index 000000000000..b67a3018b136
--- /dev/null
+++ b/drivers/mfd/88pm800.c
@@ -0,0 +1,596 @@
+/*
+ * Base driver for Marvell 88PM800
+ *
+ * Copyright (C) 2012 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ * Joseph(Yossi) Hanin <yhanin@marvell.com>
+ * Qiao Zhou <zhouqiao@marvell.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/88pm80x.h>
+#include <linux/slab.h>
+
+#define PM800_CHIP_ID (0x00)
+
+/* Interrupt Registers */
+#define PM800_INT_STATUS1 (0x05)
+#define PM800_ONKEY_INT_STS1 (1 << 0)
+#define PM800_EXTON_INT_STS1 (1 << 1)
+#define PM800_CHG_INT_STS1 (1 << 2)
+#define PM800_BAT_INT_STS1 (1 << 3)
+#define PM800_RTC_INT_STS1 (1 << 4)
+#define PM800_CLASSD_OC_INT_STS1 (1 << 5)
+
+#define PM800_INT_STATUS2 (0x06)
+#define PM800_VBAT_INT_STS2 (1 << 0)
+#define PM800_VSYS_INT_STS2 (1 << 1)
+#define PM800_VCHG_INT_STS2 (1 << 2)
+#define PM800_TINT_INT_STS2 (1 << 3)
+#define PM800_GPADC0_INT_STS2 (1 << 4)
+#define PM800_TBAT_INT_STS2 (1 << 5)
+#define PM800_GPADC2_INT_STS2 (1 << 6)
+#define PM800_GPADC3_INT_STS2 (1 << 7)
+
+#define PM800_INT_STATUS3 (0x07)
+
+#define PM800_INT_STATUS4 (0x08)
+#define PM800_GPIO0_INT_STS4 (1 << 0)
+#define PM800_GPIO1_INT_STS4 (1 << 1)
+#define PM800_GPIO2_INT_STS4 (1 << 2)
+#define PM800_GPIO3_INT_STS4 (1 << 3)
+#define PM800_GPIO4_INT_STS4 (1 << 4)
+
+#define PM800_INT_ENA_1 (0x09)
+#define PM800_ONKEY_INT_ENA1 (1 << 0)
+#define PM800_EXTON_INT_ENA1 (1 << 1)
+#define PM800_CHG_INT_ENA1 (1 << 2)
+#define PM800_BAT_INT_ENA1 (1 << 3)
+#define PM800_RTC_INT_ENA1 (1 << 4)
+#define PM800_CLASSD_OC_INT_ENA1 (1 << 5)
+
+#define PM800_INT_ENA_2 (0x0A)
+#define PM800_VBAT_INT_ENA2 (1 << 0)
+#define PM800_VSYS_INT_ENA2 (1 << 1)
+#define PM800_VCHG_INT_ENA2 (1 << 2)
+#define PM800_TINT_INT_ENA2 (1 << 3)
+
+#define PM800_INT_ENA_3 (0x0B)
+#define PM800_GPADC0_INT_ENA3 (1 << 0)
+#define PM800_GPADC1_INT_ENA3 (1 << 1)
+#define PM800_GPADC2_INT_ENA3 (1 << 2)
+#define PM800_GPADC3_INT_ENA3 (1 << 3)
+#define PM800_GPADC4_INT_ENA3 (1 << 4)
+
+#define PM800_INT_ENA_4 (0x0C)
+#define PM800_GPIO0_INT_ENA4 (1 << 0)
+#define PM800_GPIO1_INT_ENA4 (1 << 1)
+#define PM800_GPIO2_INT_ENA4 (1 << 2)
+#define PM800_GPIO3_INT_ENA4 (1 << 3)
+#define PM800_GPIO4_INT_ENA4 (1 << 4)
+
+/* number of INT_ENA & INT_STATUS regs */
+#define PM800_INT_REG_NUM (4)
+
+/* Interrupt Number in 88PM800 */
+enum {
+ PM800_IRQ_ONKEY, /*EN1b0 *//*0 */
+ PM800_IRQ_EXTON, /*EN1b1 */
+ PM800_IRQ_CHG, /*EN1b2 */
+ PM800_IRQ_BAT, /*EN1b3 */
+ PM800_IRQ_RTC, /*EN1b4 */
+ PM800_IRQ_CLASSD, /*EN1b5 *//*5 */
+ PM800_IRQ_VBAT, /*EN2b0 */
+ PM800_IRQ_VSYS, /*EN2b1 */
+ PM800_IRQ_VCHG, /*EN2b2 */
+ PM800_IRQ_TINT, /*EN2b3 */
+ PM800_IRQ_GPADC0, /*EN3b0 *//*10 */
+ PM800_IRQ_GPADC1, /*EN3b1 */
+ PM800_IRQ_GPADC2, /*EN3b2 */
+ PM800_IRQ_GPADC3, /*EN3b3 */
+ PM800_IRQ_GPADC4, /*EN3b4 */
+ PM800_IRQ_GPIO0, /*EN4b0 *//*15 */
+ PM800_IRQ_GPIO1, /*EN4b1 */
+ PM800_IRQ_GPIO2, /*EN4b2 */
+ PM800_IRQ_GPIO3, /*EN4b3 */
+ PM800_IRQ_GPIO4, /*EN4b4 *//*19 */
+ PM800_MAX_IRQ,
+};
+
+enum {
+ /* Procida */
+ PM800_CHIP_A0 = 0x60,
+ PM800_CHIP_A1 = 0x61,
+ PM800_CHIP_B0 = 0x62,
+ PM800_CHIP_C0 = 0x63,
+ PM800_CHIP_END = PM800_CHIP_C0,
+
+ /* Make sure to update this to the last stepping */
+ PM8XXX_CHIP_END = PM800_CHIP_END
+};
+
+static const struct i2c_device_id pm80x_id_table[] = {
+ {"88PM800", CHIP_PM800},
+ {} /* NULL terminated */
+};
+MODULE_DEVICE_TABLE(i2c, pm80x_id_table);
+
+static struct resource rtc_resources[] = {
+ {
+ .name = "88pm80x-rtc",
+ .start = PM800_IRQ_RTC,
+ .end = PM800_IRQ_RTC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell rtc_devs[] = {
+ {
+ .name = "88pm80x-rtc",
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ .resources = &rtc_resources[0],
+ .id = -1,
+ },
+};
+
+static struct resource onkey_resources[] = {
+ {
+ .name = "88pm80x-onkey",
+ .start = PM800_IRQ_ONKEY,
+ .end = PM800_IRQ_ONKEY,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell onkey_devs[] = {
+ {
+ .name = "88pm80x-onkey",
+ .num_resources = 1,
+ .resources = &onkey_resources[0],
+ .id = -1,
+ },
+};
+
+static const struct regmap_irq pm800_irqs[] = {
+ /* INT0 */
+ [PM800_IRQ_ONKEY] = {
+ .mask = PM800_ONKEY_INT_ENA1,
+ },
+ [PM800_IRQ_EXTON] = {
+ .mask = PM800_EXTON_INT_ENA1,
+ },
+ [PM800_IRQ_CHG] = {
+ .mask = PM800_CHG_INT_ENA1,
+ },
+ [PM800_IRQ_BAT] = {
+ .mask = PM800_BAT_INT_ENA1,
+ },
+ [PM800_IRQ_RTC] = {
+ .mask = PM800_RTC_INT_ENA1,
+ },
+ [PM800_IRQ_CLASSD] = {
+ .mask = PM800_CLASSD_OC_INT_ENA1,
+ },
+ /* INT1 */
+ [PM800_IRQ_VBAT] = {
+ .reg_offset = 1,
+ .mask = PM800_VBAT_INT_ENA2,
+ },
+ [PM800_IRQ_VSYS] = {
+ .reg_offset = 1,
+ .mask = PM800_VSYS_INT_ENA2,
+ },
+ [PM800_IRQ_VCHG] = {
+ .reg_offset = 1,
+ .mask = PM800_VCHG_INT_ENA2,
+ },
+ [PM800_IRQ_TINT] = {
+ .reg_offset = 1,
+ .mask = PM800_TINT_INT_ENA2,
+ },
+ /* INT2 */
+ [PM800_IRQ_GPADC0] = {
+ .reg_offset = 2,
+ .mask = PM800_GPADC0_INT_ENA3,
+ },
+ [PM800_IRQ_GPADC1] = {
+ .reg_offset = 2,
+ .mask = PM800_GPADC1_INT_ENA3,
+ },
+ [PM800_IRQ_GPADC2] = {
+ .reg_offset = 2,
+ .mask = PM800_GPADC2_INT_ENA3,
+ },
+ [PM800_IRQ_GPADC3] = {
+ .reg_offset = 2,
+ .mask = PM800_GPADC3_INT_ENA3,
+ },
+ [PM800_IRQ_GPADC4] = {
+ .reg_offset = 2,
+ .mask = PM800_GPADC4_INT_ENA3,
+ },
+ /* INT3 */
+ [PM800_IRQ_GPIO0] = {
+ .reg_offset = 3,
+ .mask = PM800_GPIO0_INT_ENA4,
+ },
+ [PM800_IRQ_GPIO1] = {
+ .reg_offset = 3,
+ .mask = PM800_GPIO1_INT_ENA4,
+ },
+ [PM800_IRQ_GPIO2] = {
+ .reg_offset = 3,
+ .mask = PM800_GPIO2_INT_ENA4,
+ },
+ [PM800_IRQ_GPIO3] = {
+ .reg_offset = 3,
+ .mask = PM800_GPIO3_INT_ENA4,
+ },
+ [PM800_IRQ_GPIO4] = {
+ .reg_offset = 3,
+ .mask = PM800_GPIO4_INT_ENA4,
+ },
+};
+
+static int __devinit device_gpadc_init(struct pm80x_chip *chip,
+ struct pm80x_platform_data *pdata)
+{
+ struct pm80x_subchip *subchip = chip->subchip;
+ struct regmap *map = subchip->regmap_gpadc;
+ int data = 0, mask = 0, ret = 0;
+
+ if (!map) {
+ dev_warn(chip->dev,
+ "Warning: gpadc regmap is not available!\n");
+ return -EINVAL;
+ }
+ /*
+ * initialize GPADC without activating it turn on GPADC
+ * measurments
+ */
+ ret = regmap_update_bits(map,
+ PM800_GPADC_MISC_CONFIG2,
+ PM800_GPADC_MISC_GPFSM_EN,
+ PM800_GPADC_MISC_GPFSM_EN);
+ if (ret < 0)
+ goto out;
+ /*
+ * This function configures the ADC as requires for
+ * CP implementation.CP does not "own" the ADC configuration
+ * registers and relies on AP.
+ * Reason: enable automatic ADC measurements needed
+ * for CP to get VBAT and RF temperature readings.
+ */
+ ret = regmap_update_bits(map, PM800_GPADC_MEAS_EN1,
+ PM800_MEAS_EN1_VBAT, PM800_MEAS_EN1_VBAT);
+ if (ret < 0)
+ goto out;
+ ret = regmap_update_bits(map, PM800_GPADC_MEAS_EN2,
+ (PM800_MEAS_EN2_RFTMP | PM800_MEAS_GP0_EN),
+ (PM800_MEAS_EN2_RFTMP | PM800_MEAS_GP0_EN));
+ if (ret < 0)
+ goto out;
+
+ /*
+ * the defult of PM800 is GPADC operates at 100Ks/s rate
+ * and Number of GPADC slots with active current bias prior
+ * to GPADC sampling = 1 slot for all GPADCs set for
+ * Temprature mesurmants
+ */
+ mask = (PM800_GPADC_GP_BIAS_EN0 | PM800_GPADC_GP_BIAS_EN1 |
+ PM800_GPADC_GP_BIAS_EN2 | PM800_GPADC_GP_BIAS_EN3);
+
+ if (pdata && (pdata->batt_det == 0))
+ data = (PM800_GPADC_GP_BIAS_EN0 | PM800_GPADC_GP_BIAS_EN1 |
+ PM800_GPADC_GP_BIAS_EN2 | PM800_GPADC_GP_BIAS_EN3);
+ else
+ data = (PM800_GPADC_GP_BIAS_EN0 | PM800_GPADC_GP_BIAS_EN2 |
+ PM800_GPADC_GP_BIAS_EN3);
+
+ ret = regmap_update_bits(map, PM800_GP_BIAS_ENA1, mask, data);
+ if (ret < 0)
+ goto out;
+
+ dev_info(chip->dev, "pm800 device_gpadc_init: Done\n");
+ return 0;
+
+out:
+ dev_info(chip->dev, "pm800 device_gpadc_init: Failed!\n");
+ return ret;
+}
+
+static int __devinit device_irq_init_800(struct pm80x_chip *chip)
+{
+ struct regmap *map = chip->regmap;
+ unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
+ int data, mask, ret = -EINVAL;
+
+ if (!map || !chip->irq) {
+ dev_err(chip->dev, "incorrect parameters\n");
+ return -EINVAL;
+ }
+
+ /*
+ * irq_mode defines the way of clearing interrupt. it's read-clear by
+ * default.
+ */
+ mask =
+ PM800_WAKEUP2_INV_INT | PM800_WAKEUP2_INT_CLEAR |
+ PM800_WAKEUP2_INT_MASK;
+
+ data = PM800_WAKEUP2_INT_CLEAR;
+ ret = regmap_update_bits(map, PM800_WAKEUP2, mask, data);
+
+ if (ret < 0)
+ goto out;
+
+ ret =
+ regmap_add_irq_chip(chip->regmap, chip->irq, flags, -1,
+ chip->regmap_irq_chip, &chip->irq_data);
+
+out:
+ return ret;
+}
+
+static void device_irq_exit_800(struct pm80x_chip *chip)
+{
+ regmap_del_irq_chip(chip->irq, chip->irq_data);
+}
+
+static struct regmap_irq_chip pm800_irq_chip = {
+ .name = "88pm800",
+ .irqs = pm800_irqs,
+ .num_irqs = ARRAY_SIZE(pm800_irqs),
+
+ .num_regs = 4,
+ .status_base = PM800_INT_STATUS1,
+ .mask_base = PM800_INT_ENA_1,
+ .ack_base = PM800_INT_STATUS1,
+};
+
+static int pm800_pages_init(struct pm80x_chip *chip)
+{
+ struct pm80x_subchip *subchip;
+ struct i2c_client *client = chip->client;
+
+ subchip = chip->subchip;
+ /* PM800 block power: i2c addr 0x31 */
+ if (subchip->power_page_addr) {
+ subchip->power_page =
+ i2c_new_dummy(client->adapter, subchip->power_page_addr);
+ subchip->regmap_power =
+ devm_regmap_init_i2c(subchip->power_page,
+ &pm80x_regmap_config);
+ i2c_set_clientdata(subchip->power_page, chip);
+ } else
+ dev_info(chip->dev,
+ "PM800 block power 0x31: No power_page_addr\n");
+
+ /* PM800 block GPADC: i2c addr 0x32 */
+ if (subchip->gpadc_page_addr) {
+ subchip->gpadc_page = i2c_new_dummy(client->adapter,
+ subchip->gpadc_page_addr);
+ subchip->regmap_gpadc =
+ devm_regmap_init_i2c(subchip->gpadc_page,
+ &pm80x_regmap_config);
+ i2c_set_clientdata(subchip->gpadc_page, chip);
+ } else
+ dev_info(chip->dev,
+ "PM800 block GPADC 0x32: No gpadc_page_addr\n");
+
+ return 0;
+}
+
+static void pm800_pages_exit(struct pm80x_chip *chip)
+{
+ struct pm80x_subchip *subchip;
+
+ regmap_exit(chip->regmap);
+ i2c_unregister_device(chip->client);
+
+ subchip = chip->subchip;
+ if (subchip->power_page) {
+ regmap_exit(subchip->regmap_power);
+ i2c_unregister_device(subchip->power_page);
+ }
+ if (subchip->gpadc_page) {
+ regmap_exit(subchip->regmap_gpadc);
+ i2c_unregister_device(subchip->gpadc_page);
+ }
+}
+
+static int __devinit device_800_init(struct pm80x_chip *chip,
+ struct pm80x_platform_data *pdata)
+{
+ int ret, pmic_id;
+ unsigned int val;
+
+ ret = regmap_read(chip->regmap, PM800_CHIP_ID, &val);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read CHIP ID: %d\n", ret);
+ goto out;
+ }
+
+ pmic_id = val & PM80X_VERSION_MASK;
+
+ if ((pmic_id >= PM800_CHIP_A0) && (pmic_id <= PM800_CHIP_END)) {
+ chip->version = val;
+ dev_info(chip->dev,
+ "88PM80x:Marvell 88PM800 (ID:0x%x) detected\n", val);
+ } else {
+ dev_err(chip->dev,
+ "Failed to detect Marvell 88PM800:ChipID[0x%x]\n", val);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * alarm wake up bit will be clear in device_irq_init(),
+ * read before that
+ */
+ ret = regmap_read(chip->regmap, PM800_RTC_CONTROL, &val);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read RTC register: %d\n", ret);
+ goto out;
+ }
+ if (val & PM800_ALARM_WAKEUP) {
+ if (pdata && pdata->rtc)
+ pdata->rtc->rtc_wakeup = 1;
+ }
+
+ ret = device_gpadc_init(chip, pdata);
+ if (ret < 0) {
+ dev_err(chip->dev, "[%s]Failed to init gpadc\n", __func__);
+ goto out;
+ }
+
+ chip->regmap_irq_chip = &pm800_irq_chip;
+
+ ret = device_irq_init_800(chip);
+ if (ret < 0) {
+ dev_err(chip->dev, "[%s]Failed to init pm800 irq\n", __func__);
+ goto out;
+ }
+
+ ret =
+ mfd_add_devices(chip->dev, 0, &onkey_devs[0],
+ ARRAY_SIZE(onkey_devs), &onkey_resources[0], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add onkey subdev\n");
+ goto out_dev;
+ } else
+ dev_info(chip->dev, "[%s]:Added mfd onkey_devs\n", __func__);
+
+ if (pdata && pdata->rtc) {
+ rtc_devs[0].platform_data = pdata->rtc;
+ rtc_devs[0].pdata_size = sizeof(struct pm80x_rtc_pdata);
+ ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0],
+ ARRAY_SIZE(rtc_devs), NULL, 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add rtc subdev\n");
+ goto out_dev;
+ } else
+ dev_info(chip->dev,
+ "[%s]:Added mfd rtc_devs\n", __func__);
+ }
+
+ return 0;
+out_dev:
+ mfd_remove_devices(chip->dev);
+ device_irq_exit_800(chip);
+out:
+ return ret;
+}
+
+static int __devinit pm800_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct pm80x_chip *chip;
+ struct pm80x_platform_data *pdata = client->dev.platform_data;
+ struct pm80x_subchip *subchip;
+
+ ret = pm80x_init(client, id);
+ if (ret) {
+ dev_err(&client->dev, "pm800_init fail\n");
+ goto out_init;
+ }
+
+ chip = i2c_get_clientdata(client);
+
+ /* init subchip for PM800 */
+ subchip =
+ devm_kzalloc(&client->dev, sizeof(struct pm80x_subchip),
+ GFP_KERNEL);
+ if (!subchip) {
+ ret = -ENOMEM;
+ goto err_subchip_alloc;
+ }
+
+ subchip->power_page_addr = pdata->power_page_addr;
+ subchip->gpadc_page_addr = pdata->gpadc_page_addr;
+ chip->subchip = subchip;
+
+ ret = device_800_init(chip, pdata);
+ if (ret) {
+ dev_err(chip->dev, "%s id 0x%x failed!\n", __func__, chip->id);
+ goto err_800_init;
+ }
+
+ ret = pm800_pages_init(chip);
+ if (ret) {
+ dev_err(&client->dev, "pm800_pages_init failed!\n");
+ goto err_page_init;
+ }
+
+ if (pdata->plat_config)
+ pdata->plat_config(chip, pdata);
+
+err_page_init:
+ mfd_remove_devices(chip->dev);
+ device_irq_exit_800(chip);
+err_800_init:
+ devm_kfree(&client->dev, subchip);
+err_subchip_alloc:
+ pm80x_deinit(client);
+out_init:
+ return ret;
+}
+
+static int __devexit pm800_remove(struct i2c_client *client)
+{
+ struct pm80x_chip *chip = i2c_get_clientdata(client);
+
+ mfd_remove_devices(chip->dev);
+ device_irq_exit_800(chip);
+
+ pm800_pages_exit(chip);
+ devm_kfree(&client->dev, chip->subchip);
+
+ pm80x_deinit(client);
+
+ return 0;
+}
+
+static struct i2c_driver pm800_driver = {
+ .driver = {
+ .name = "88PM80X",
+ .owner = THIS_MODULE,
+ .pm = &pm80x_pm_ops,
+ },
+ .probe = pm800_probe,
+ .remove = __devexit_p(pm800_remove),
+ .id_table = pm80x_id_table,
+};
+
+static int __init pm800_i2c_init(void)
+{
+ return i2c_add_driver(&pm800_driver);
+}
+subsys_initcall(pm800_i2c_init);
+
+static void __exit pm800_i2c_exit(void)
+{
+ i2c_del_driver(&pm800_driver);
+}
+module_exit(pm800_i2c_exit);
+
+MODULE_DESCRIPTION("PMIC Driver for Marvell 88PM800");
+MODULE_AUTHOR("Qiao Zhou <zhouqiao@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
new file mode 100644
index 000000000000..6146583589f6
--- /dev/null
+++ b/drivers/mfd/88pm805.c
@@ -0,0 +1,301 @@
+/*
+ * Base driver for Marvell 88PM805
+ *
+ * Copyright (C) 2012 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ * Joseph(Yossi) Hanin <yhanin@marvell.com>
+ * Qiao Zhou <zhouqiao@marvell.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/88pm80x.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#define PM805_CHIP_ID (0x00)
+
+static const struct i2c_device_id pm80x_id_table[] = {
+ {"88PM805", CHIP_PM805},
+ {} /* NULL terminated */
+};
+MODULE_DEVICE_TABLE(i2c, pm80x_id_table);
+
+/* Interrupt Number in 88PM805 */
+enum {
+ PM805_IRQ_LDO_OFF, /*0 */
+ PM805_IRQ_SRC_DPLL_LOCK, /*1 */
+ PM805_IRQ_CLIP_FAULT,
+ PM805_IRQ_MIC_CONFLICT,
+ PM805_IRQ_HP2_SHRT,
+ PM805_IRQ_HP1_SHRT, /*5 */
+ PM805_IRQ_FINE_PLL_FAULT,
+ PM805_IRQ_RAW_PLL_FAULT,
+ PM805_IRQ_VOLP_BTN_DET,
+ PM805_IRQ_VOLM_BTN_DET,
+ PM805_IRQ_SHRT_BTN_DET, /*10 */
+ PM805_IRQ_MIC_DET, /*11 */
+
+ PM805_MAX_IRQ,
+};
+
+static struct resource codec_resources[] = {
+ {
+ /* Headset microphone insertion or removal */
+ .name = "micin",
+ .start = PM805_IRQ_MIC_DET,
+ .end = PM805_IRQ_MIC_DET,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* Audio short HP1 */
+ .name = "audio-short1",
+ .start = PM805_IRQ_HP1_SHRT,
+ .end = PM805_IRQ_HP1_SHRT,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* Audio short HP2 */
+ .name = "audio-short2",
+ .start = PM805_IRQ_HP2_SHRT,
+ .end = PM805_IRQ_HP2_SHRT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell codec_devs[] = {
+ {
+ .name = "88pm80x-codec",
+ .num_resources = ARRAY_SIZE(codec_resources),
+ .resources = &codec_resources[0],
+ .id = -1,
+ },
+};
+
+static struct regmap_irq pm805_irqs[] = {
+ /* INT0 */
+ [PM805_IRQ_LDO_OFF] = {
+ .mask = PM805_INT1_HP1_SHRT,
+ },
+ [PM805_IRQ_SRC_DPLL_LOCK] = {
+ .mask = PM805_INT1_HP2_SHRT,
+ },
+ [PM805_IRQ_CLIP_FAULT] = {
+ .mask = PM805_INT1_MIC_CONFLICT,
+ },
+ [PM805_IRQ_MIC_CONFLICT] = {
+ .mask = PM805_INT1_CLIP_FAULT,
+ },
+ [PM805_IRQ_HP2_SHRT] = {
+ .mask = PM805_INT1_LDO_OFF,
+ },
+ [PM805_IRQ_HP1_SHRT] = {
+ .mask = PM805_INT1_SRC_DPLL_LOCK,
+ },
+ /* INT1 */
+ [PM805_IRQ_FINE_PLL_FAULT] = {
+ .reg_offset = 1,
+ .mask = PM805_INT2_MIC_DET,
+ },
+ [PM805_IRQ_RAW_PLL_FAULT] = {
+ .reg_offset = 1,
+ .mask = PM805_INT2_SHRT_BTN_DET,
+ },
+ [PM805_IRQ_VOLP_BTN_DET] = {
+ .reg_offset = 1,
+ .mask = PM805_INT2_VOLM_BTN_DET,
+ },
+ [PM805_IRQ_VOLM_BTN_DET] = {
+ .reg_offset = 1,
+ .mask = PM805_INT2_VOLP_BTN_DET,
+ },
+ [PM805_IRQ_SHRT_BTN_DET] = {
+ .reg_offset = 1,
+ .mask = PM805_INT2_RAW_PLL_FAULT,
+ },
+ [PM805_IRQ_MIC_DET] = {
+ .reg_offset = 1,
+ .mask = PM805_INT2_FINE_PLL_FAULT,
+ },
+};
+
+static int __devinit device_irq_init_805(struct pm80x_chip *chip)
+{
+ struct regmap *map = chip->regmap;
+ unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
+ int data, mask, ret = -EINVAL;
+
+ if (!map || !chip->irq) {
+ dev_err(chip->dev, "incorrect parameters\n");
+ return -EINVAL;
+ }
+
+ /*
+ * irq_mode defines the way of clearing interrupt. it's read-clear by
+ * default.
+ */
+ mask =
+ PM805_STATUS0_INT_CLEAR | PM805_STATUS0_INV_INT |
+ PM800_STATUS0_INT_MASK;
+
+ data = PM805_STATUS0_INT_CLEAR;
+ ret = regmap_update_bits(map, PM805_INT_STATUS0, mask, data);
+ /*
+ * PM805_INT_STATUS is under 32K clock domain, so need to
+ * add proper delay before the next I2C register access.
+ */
+ msleep(1);
+
+ if (ret < 0)
+ goto out;
+
+ ret =
+ regmap_add_irq_chip(chip->regmap, chip->irq, flags, -1,
+ chip->regmap_irq_chip, &chip->irq_data);
+
+out:
+ return ret;
+}
+
+static void device_irq_exit_805(struct pm80x_chip *chip)
+{
+ regmap_del_irq_chip(chip->irq, chip->irq_data);
+}
+
+static struct regmap_irq_chip pm805_irq_chip = {
+ .name = "88pm805",
+ .irqs = pm805_irqs,
+ .num_irqs = ARRAY_SIZE(pm805_irqs),
+
+ .num_regs = 2,
+ .status_base = PM805_INT_STATUS1,
+ .mask_base = PM805_INT_MASK1,
+ .ack_base = PM805_INT_STATUS1,
+};
+
+static int __devinit device_805_init(struct pm80x_chip *chip)
+{
+ int ret = 0;
+ unsigned int val;
+ struct regmap *map = chip->regmap;
+
+ if (!map) {
+ dev_err(chip->dev, "regmap is invalid\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_read(map, PM805_CHIP_ID, &val);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read CHIP ID: %d\n", ret);
+ goto out_irq_init;
+ }
+ chip->version = val;
+
+ chip->regmap_irq_chip = &pm805_irq_chip;
+
+ ret = device_irq_init_805(chip);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to init pm805 irq!\n");
+ goto out_irq_init;
+ }
+
+ ret = mfd_add_devices(chip->dev, 0, &codec_devs[0],
+ ARRAY_SIZE(codec_devs), &codec_resources[0], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add codec subdev\n");
+ goto out_codec;
+ } else
+ dev_info(chip->dev, "[%s]:Added mfd codec_devs\n", __func__);
+
+ return 0;
+
+out_codec:
+ device_irq_exit_805(chip);
+out_irq_init:
+ return ret;
+}
+
+static int __devinit pm805_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret = 0;
+ struct pm80x_chip *chip;
+ struct pm80x_platform_data *pdata = client->dev.platform_data;
+
+ ret = pm80x_init(client, id);
+ if (ret) {
+ dev_err(&client->dev, "pm805_init fail!\n");
+ goto out_init;
+ }
+
+ chip = i2c_get_clientdata(client);
+
+ ret = device_805_init(chip);
+ if (ret) {
+ dev_err(chip->dev, "%s id 0x%x failed!\n", __func__, chip->id);
+ goto err_805_init;
+ }
+
+ if (pdata->plat_config)
+ pdata->plat_config(chip, pdata);
+
+err_805_init:
+ pm80x_deinit(client);
+out_init:
+ return ret;
+}
+
+static int __devexit pm805_remove(struct i2c_client *client)
+{
+ struct pm80x_chip *chip = i2c_get_clientdata(client);
+
+ mfd_remove_devices(chip->dev);
+ device_irq_exit_805(chip);
+
+ pm80x_deinit(client);
+
+ return 0;
+}
+
+static struct i2c_driver pm805_driver = {
+ .driver = {
+ .name = "88PM80X",
+ .owner = THIS_MODULE,
+ .pm = &pm80x_pm_ops,
+ },
+ .probe = pm805_probe,
+ .remove = __devexit_p(pm805_remove),
+ .id_table = pm80x_id_table,
+};
+
+static int __init pm805_i2c_init(void)
+{
+ return i2c_add_driver(&pm805_driver);
+}
+subsys_initcall(pm805_i2c_init);
+
+static void __exit pm805_i2c_exit(void)
+{
+ i2c_del_driver(&pm805_driver);
+}
+module_exit(pm805_i2c_exit);
+
+MODULE_DESCRIPTION("PMIC Driver for Marvell 88PM805");
+MODULE_AUTHOR("Qiao Zhou <zhouqiao@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/88pm80x.c b/drivers/mfd/88pm80x.c
new file mode 100644
index 000000000000..cd0bf527d764
--- /dev/null
+++ b/drivers/mfd/88pm80x.c
@@ -0,0 +1,145 @@
+/*
+ * I2C driver for Marvell 88PM80x
+ *
+ * Copyright (C) 2012 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ * Joseph(Yossi) Hanin <yhanin@marvell.com>
+ * Qiao Zhou <zhouqiao@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mfd/88pm80x.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/err.h>
+
+/*
+ * workaround: some registers needed by pm805 are defined in pm800, so
+ * need to use this global variable to maintain the relation between
+ * pm800 and pm805. would remove it after HW chip fixes the issue.
+ */
+static struct pm80x_chip *g_pm80x_chip;
+
+const struct regmap_config pm80x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+EXPORT_SYMBOL_GPL(pm80x_regmap_config);
+
+int __devinit pm80x_init(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pm80x_chip *chip;
+ struct regmap *map;
+ int ret = 0;
+
+ chip =
+ devm_kzalloc(&client->dev, sizeof(struct pm80x_chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ map = devm_regmap_init_i2c(client, &pm80x_regmap_config);
+ if (IS_ERR(map)) {
+ ret = PTR_ERR(map);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ goto err_regmap_init;
+ }
+
+ chip->id = id->driver_data;
+ if (chip->id < CHIP_PM800 || chip->id > CHIP_PM805) {
+ ret = -EINVAL;
+ goto err_chip_id;
+ }
+
+ chip->client = client;
+ chip->regmap = map;
+
+ chip->irq = client->irq;
+
+ chip->dev = &client->dev;
+ dev_set_drvdata(chip->dev, chip);
+ i2c_set_clientdata(chip->client, chip);
+
+ device_init_wakeup(&client->dev, 1);
+
+ /*
+ * workaround: set g_pm80x_chip to the first probed chip. if the
+ * second chip is probed, just point to the companion to each
+ * other so that pm805 can access those specific register. would
+ * remove it after HW chip fixes the issue.
+ */
+ if (!g_pm80x_chip)
+ g_pm80x_chip = chip;
+ else {
+ chip->companion = g_pm80x_chip->client;
+ g_pm80x_chip->companion = chip->client;
+ }
+
+ return 0;
+
+err_chip_id:
+ regmap_exit(map);
+err_regmap_init:
+ devm_kfree(&client->dev, chip);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm80x_init);
+
+int pm80x_deinit(struct i2c_client *client)
+{
+ struct pm80x_chip *chip = i2c_get_clientdata(client);
+
+ /*
+ * workaround: clear the dependency between pm800 and pm805.
+ * would remove it after HW chip fixes the issue.
+ */
+ if (g_pm80x_chip->companion)
+ g_pm80x_chip->companion = NULL;
+ else
+ g_pm80x_chip = NULL;
+
+ regmap_exit(chip->regmap);
+ devm_kfree(&client->dev, chip);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm80x_deinit);
+
+#ifdef CONFIG_PM_SLEEP
+static int pm80x_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct pm80x_chip *chip = i2c_get_clientdata(client);
+
+ if (chip && chip->wu_flag)
+ if (device_may_wakeup(chip->dev))
+ enable_irq_wake(chip->irq);
+
+ return 0;
+}
+
+static int pm80x_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct pm80x_chip *chip = i2c_get_clientdata(client);
+
+ if (chip && chip->wu_flag)
+ if (device_may_wakeup(chip->dev))
+ disable_irq_wake(chip->irq);
+
+ return 0;
+}
+#endif
+
+SIMPLE_DEV_PM_OPS(pm80x_pm_ops, pm80x_suspend, pm80x_resume);
+EXPORT_SYMBOL_GPL(pm80x_pm_ops);
+
+MODULE_DESCRIPTION("I2C Driver for Marvell 88PM80x");
+MODULE_AUTHOR("Qiao Zhou <zhouqiao@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index 87bd5ba38d5b..d09918cf1b15 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -90,6 +90,10 @@ static struct resource charger_resources[] __devinitdata = {
{PM8607_IRQ_VCHG, PM8607_IRQ_VCHG, "vchg voltage", IORESOURCE_IRQ,},
};
+static struct resource preg_resources[] __devinitdata = {
+ {PM8606_ID_PREG, PM8606_ID_PREG, "preg", IORESOURCE_IO,},
+};
+
static struct resource rtc_resources[] __devinitdata = {
{PM8607_IRQ_RTC, PM8607_IRQ_RTC, "rtc", IORESOURCE_IRQ,},
};
@@ -142,9 +146,19 @@ static struct mfd_cell codec_devs[] = {
{"88pm860x-codec", -1,},
};
+static struct regulator_consumer_supply preg_supply[] = {
+ REGULATOR_SUPPLY("preg", "charger-manager"),
+};
+
+static struct regulator_init_data preg_init_data = {
+ .num_consumer_supplies = ARRAY_SIZE(preg_supply),
+ .consumer_supplies = &preg_supply[0],
+};
+
static struct mfd_cell power_devs[] = {
{"88pm860x-battery", -1,},
{"88pm860x-charger", -1,},
+ {"88pm860x-preg", -1,},
};
static struct mfd_cell rtc_devs[] = {
@@ -768,6 +782,15 @@ static void __devinit device_power_init(struct pm860x_chip *chip,
&charger_resources[0], chip->irq_base);
if (ret < 0)
dev_err(chip->dev, "Failed to add charger subdev\n");
+
+ power_devs[2].platform_data = &preg_init_data;
+ power_devs[2].pdata_size = sizeof(struct regulator_init_data);
+ power_devs[2].num_resources = ARRAY_SIZE(preg_resources);
+ power_devs[2].resources = &preg_resources[0],
+ ret = mfd_add_devices(chip->dev, 0, &power_devs[2], 1,
+ &preg_resources[0], chip->irq_base);
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to add preg subdev\n");
}
static void __devinit device_onkey_init(struct pm860x_chip *chip,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 92144ed1ad46..b1a146205c08 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -7,6 +7,7 @@ menu "Multifunction device drivers"
config MFD_CORE
tristate
+ select IRQ_DOMAIN
default n
config MFD_88PM860X
@@ -20,6 +21,30 @@ config MFD_88PM860X
select individual components like voltage regulators, RTC and
battery-charger under the corresponding menus.
+config MFD_88PM800
+ tristate "Support Marvell 88PM800"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select MFD_CORE
+ help
+ This supports for Marvell 88PM800 Power Management IC.
+ This includes the I2C driver and the core APIs _only_, you have to
+ select individual components like voltage regulators, RTC and
+ battery-charger under the corresponding menus.
+
+config MFD_88PM805
+ tristate "Support Marvell 88PM805"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select MFD_CORE
+ help
+ This supports for Marvell 88PM805 Power Management IC. This includes
+ the I2C driver and the core APIs _only_, you have to select individual
+ components like codec device, headset/Mic device under the
+ corresponding menus.
+
config MFD_SM501
tristate "Support for Silicon Motion SM501"
---help---
@@ -173,8 +198,9 @@ config MFD_TPS65217
config MFD_TPS6586X
bool "TPS6586x Power Management chips"
- depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
+ depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
+ select REGMAP_I2C
depends on REGULATOR
help
If you say yes here you get support for the TPS6586X series of
@@ -276,6 +302,7 @@ config TWL6030_PWM
tristate "TWL6030 PWM (Pulse Width Modulator) Support"
depends on TWL4030_CORE
select HAVE_PWM
+ depends on !PWM
default n
help
Say yes here if you want support for TWL6030 PWM.
@@ -368,7 +395,8 @@ config MFD_TC6387XB
config MFD_TC6393XB
bool "Support Toshiba TC6393XB"
- depends on GPIOLIB && ARM && HAVE_CLK
+ depends on ARM && HAVE_CLK
+ select GPIOLIB
select MFD_CORE
select MFD_TMIO
help
@@ -423,6 +451,19 @@ config PMIC_ADP5520
individual components like LCD backlight, LEDs, GPIOs and Kepad
under the corresponding menus.
+config MFD_MAX77686
+ bool "Maxim Semiconductor MAX77686 PMIC Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ select IRQ_DOMAIN
+ help
+ Say yes here to support for Maxim Semiconductor MAX77686.
+ This is a Power Management IC with RTC on chip.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
config MFD_MAX77693
bool "Maxim Semiconductor MAX77693 PMIC Support"
depends on I2C=y && GENERIC_HARDIRQS
@@ -450,6 +491,7 @@ config MFD_MAX8997
bool "Maxim Semiconductor MAX8997/8966 PMIC Support"
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
+ select IRQ_DOMAIN
help
Say yes here to support for Maxim Semiconductor MAX8997/8966.
This is a Power Management IC with RTC, Flash, Fuel Gauge, Haptic,
@@ -469,17 +511,56 @@ config MFD_MAX8998
additional drivers must be enabled in order to use the functionality
of the device.
-config MFD_S5M_CORE
- bool "SAMSUNG S5M Series Support"
+config MFD_SEC_CORE
+ bool "SAMSUNG Electronics PMIC Series Support"
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
help
- Support for the Samsung Electronics S5M MFD series.
+ Support for the Samsung Electronics MFD series.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the functionality
of the device
+config MFD_ARIZONA
+ select REGMAP
+ select REGMAP_IRQ
+ select MFD_CORE
+ bool
+
+config MFD_ARIZONA_I2C
+ tristate "Support Wolfson Microelectronics Arizona platform with I2C"
+ select MFD_ARIZONA
+ select MFD_CORE
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Support for the Wolfson Microelectronics Arizona platform audio SoC
+ core functionality controlled via I2C.
+
+config MFD_ARIZONA_SPI
+ tristate "Support Wolfson Microelectronics Arizona platform with SPI"
+ select MFD_ARIZONA
+ select MFD_CORE
+ select REGMAP_SPI
+ depends on SPI_MASTER
+ help
+ Support for the Wolfson Microelectronics Arizona platform audio SoC
+ core functionality controlled via I2C.
+
+config MFD_WM5102
+ bool "Support Wolfson Microelectronics WM5102"
+ depends on MFD_ARIZONA
+ help
+ Support for Wolfson Microelectronics WM5102 low power audio SoC
+
+config MFD_WM5110
+ bool "Support Wolfson Microelectronics WM5110"
+ depends on MFD_ARIZONA
+ help
+ Support for Wolfson Microelectronics WM5110 low power audio SoC
+
config MFD_WM8400
bool "Support Wolfson Microelectronics WM8400"
select MFD_CORE
@@ -697,6 +778,7 @@ config AB8500_CORE
bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU
select MFD_CORE
+ select IRQ_DOMAIN
help
Select this option to enable access to AB8500 power management
chip. This connects to U8500 either on the SSP/SPI bus (deprecated
@@ -704,16 +786,6 @@ config AB8500_CORE
the irq_chip parts for handling the Mixed Signal chip events.
This chip embeds various other multimedia funtionalities as well.
-config AB8500_I2C_CORE
- bool "AB8500 register access via PRCMU I2C"
- depends on AB8500_CORE && MFD_DB8500_PRCMU
- default y
- help
- This enables register access to the AB8500 chip via PRCMU I2C.
- The AB8500 chip can be accessed via SPI or I2C. On DB8500 hardware
- the I2C bus is connected to the Power Reset
- and Mangagement Unit, PRCMU.
-
config AB8500_DEBUG
bool "Enable debug info via debugfs"
depends on AB8500_CORE && DEBUG_FS
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 75f6ed68a4b9..79dd22d1dc3d 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -4,6 +4,8 @@
88pm860x-objs := 88pm860x-core.o 88pm860x-i2c.o
obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o
+obj-$(CONFIG_MFD_88PM800) += 88pm800.o 88pm80x.o
+obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
@@ -24,6 +26,16 @@ obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
+obj-$(CONFIG_MFD_ARIZONA) += arizona-core.o
+obj-$(CONFIG_MFD_ARIZONA) += arizona-irq.o
+obj-$(CONFIG_MFD_ARIZONA_I2C) += arizona-i2c.o
+obj-$(CONFIG_MFD_ARIZONA_SPI) += arizona-spi.o
+ifneq ($(CONFIG_MFD_WM5102),n)
+obj-$(CONFIG_MFD_ARIZONA) += wm5102-tables.o
+endif
+ifneq ($(CONFIG_MFD_WM5110),n)
+obj-$(CONFIG_MFD_ARIZONA) += wm5110-tables.o
+endif
obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
wm831x-objs += wm831x-auxadc.o
@@ -78,6 +90,7 @@ obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
+obj-$(CONFIG_MFD_MAX77686) += max77686.o max77686-irq.o
obj-$(CONFIG_MFD_MAX77693) += max77693.o max77693-irq.o
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
@@ -116,6 +129,6 @@ obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
obj-$(CONFIG_MFD_PALMAS) += palmas.o
obj-$(CONFIG_MFD_RC5T583) += rc5t583.o rc5t583-irq.o
-obj-$(CONFIG_MFD_S5M_CORE) += s5m-core.o s5m-irq.o
+obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o sec-irq.o
obj-$(CONFIG_MFD_ANATOP) += anatop-mfd.o
obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 1efad20fb175..78fca2902c8d 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
u32 fatevent;
int err;
- add_interrupt_randomness(irq);
-
err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
event_regs, 3);
if (err)
@@ -867,7 +865,7 @@ static int __devinit ab3100_probe(struct i2c_client *client,
int err;
int i;
- ab3100 = kzalloc(sizeof(struct ab3100), GFP_KERNEL);
+ ab3100 = devm_kzalloc(&client->dev, sizeof(struct ab3100), GFP_KERNEL);
if (!ab3100) {
dev_err(&client->dev, "could not allocate AB3100 device\n");
return -ENOMEM;
@@ -921,7 +919,7 @@ static int __devinit ab3100_probe(struct i2c_client *client,
/* Attach a second dummy i2c_client to the test register address */
ab3100->testreg_client = i2c_new_dummy(client->adapter,
- client->addr + 1);
+ client->addr + 1);
if (!ab3100->testreg_client) {
err = -ENOMEM;
goto exit_no_testreg_client;
@@ -931,11 +929,9 @@ static int __devinit ab3100_probe(struct i2c_client *client,
if (err)
goto exit_no_setup;
- err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler,
- IRQF_ONESHOT, "ab3100-core", ab3100);
- /* This real unpredictable IRQ is of course sampled for entropy */
- rand_initialize_irq(client->irq);
-
+ err = devm_request_threaded_irq(&client->dev,
+ client->irq, NULL, ab3100_irq_handler,
+ IRQF_ONESHOT, "ab3100-core", ab3100);
if (err)
goto exit_no_irq;
@@ -962,7 +958,6 @@ static int __devinit ab3100_probe(struct i2c_client *client,
i2c_unregister_device(ab3100->testreg_client);
exit_no_testreg_client:
exit_no_detect:
- kfree(ab3100);
return err;
}
@@ -972,16 +967,8 @@ static int __devexit ab3100_remove(struct i2c_client *client)
/* Unregister subdevices */
mfd_remove_devices(&client->dev);
-
ab3100_remove_debugfs();
i2c_unregister_device(ab3100->testreg_client);
-
- /*
- * At this point, all subscribers should have unregistered
- * their notifiers so deactivate IRQ
- */
- free_irq(client->irq, ab3100);
- kfree(ab3100);
return 0;
}
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index dac0e2998603..626b4ecaf647 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -140,7 +141,7 @@ static const char ab8500_version_str[][7] = {
[AB8500_VERSION_AB8540] = "AB8540",
};
-static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
+static int ab8500_prcmu_write(struct ab8500 *ab8500, u16 addr, u8 data)
{
int ret;
@@ -150,7 +151,7 @@ static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
return ret;
}
-static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
+static int ab8500_prcmu_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
u8 data)
{
int ret;
@@ -162,7 +163,7 @@ static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
return ret;
}
-static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
+static int ab8500_prcmu_read(struct ab8500 *ab8500, u16 addr)
{
int ret;
u8 data;
@@ -361,7 +362,7 @@ static void ab8500_irq_sync_unlock(struct irq_data *data)
static void ab8500_irq_mask(struct irq_data *data)
{
struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
- int offset = data->irq - ab8500->irq_base;
+ int offset = data->hwirq;
int index = offset / 8;
int mask = 1 << (offset % 8);
@@ -371,7 +372,7 @@ static void ab8500_irq_mask(struct irq_data *data)
static void ab8500_irq_unmask(struct irq_data *data)
{
struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
- int offset = data->irq - ab8500->irq_base;
+ int offset = data->hwirq;
int index = offset / 8;
int mask = 1 << (offset % 8);
@@ -510,38 +511,51 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int ab8500_irq_init(struct ab8500 *ab8500)
+/**
+ * ab8500_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
+ *
+ * @ab8500: ab8500_irq controller to operate on.
+ * @irq: index of the interrupt requested in the chip IRQs
+ *
+ * Useful for drivers to request their own IRQs.
+ */
+int ab8500_irq_get_virq(struct ab8500 *ab8500, int irq)
{
- int base = ab8500->irq_base;
- int irq;
- int num_irqs;
+ if (!ab8500)
+ return -EINVAL;
- if (is_ab9540(ab8500))
- num_irqs = AB9540_NR_IRQS;
- else if (is_ab8505(ab8500))
- num_irqs = AB8505_NR_IRQS;
- else
- num_irqs = AB8500_NR_IRQS;
+ return irq_create_mapping(ab8500->domain, irq);
+}
+EXPORT_SYMBOL_GPL(ab8500_irq_get_virq);
+
+static int ab8500_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct ab8500 *ab8500 = d->host_data;
- for (irq = base; irq < base + num_irqs; irq++) {
- irq_set_chip_data(irq, ab8500);
- irq_set_chip_and_handler(irq, &ab8500_irq_chip,
- handle_simple_irq);
- irq_set_nested_thread(irq, 1);
+ if (!ab8500)
+ return -EINVAL;
+
+ irq_set_chip_data(virq, ab8500);
+ irq_set_chip_and_handler(virq, &ab8500_irq_chip,
+ handle_simple_irq);
+ irq_set_nested_thread(virq, 1);
#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
+ set_irq_flags(virq, IRQF_VALID);
#else
- irq_set_noprobe(irq);
+ irq_set_noprobe(virq);
#endif
- }
return 0;
}
-static void ab8500_irq_remove(struct ab8500 *ab8500)
+static struct irq_domain_ops ab8500_irq_ops = {
+ .map = ab8500_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np)
{
- int base = ab8500->irq_base;
- int irq;
int num_irqs;
if (is_ab9540(ab8500))
@@ -551,13 +565,22 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
else
num_irqs = AB8500_NR_IRQS;
- for (irq = base; irq < base + num_irqs; irq++) {
-#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
-#endif
- irq_set_chip_and_handler(irq, NULL, NULL);
- irq_set_chip_data(irq, NULL);
+ if (ab8500->irq_base) {
+ ab8500->domain = irq_domain_add_legacy(
+ NULL, num_irqs, ab8500->irq_base,
+ 0, &ab8500_irq_ops, ab8500);
+ }
+ else {
+ ab8500->domain = irq_domain_add_linear(
+ np, num_irqs, &ab8500_irq_ops, ab8500);
+ }
+
+ if (!ab8500->domain) {
+ dev_err(ab8500->dev, "Failed to create irqdomain\n");
+ return -ENOSYS;
}
+
+ return 0;
}
int ab8500_suspend(struct ab8500 *ab8500)
@@ -947,54 +970,69 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
#ifdef CONFIG_DEBUG_FS
{
.name = "ab8500-debug",
+ .of_compatible = "stericsson,ab8500-debug",
.num_resources = ARRAY_SIZE(ab8500_debug_resources),
.resources = ab8500_debug_resources,
},
#endif
{
.name = "ab8500-sysctrl",
+ .of_compatible = "stericsson,ab8500-sysctrl",
},
{
.name = "ab8500-regulator",
+ .of_compatible = "stericsson,ab8500-regulator",
},
{
.name = "ab8500-gpadc",
+ .of_compatible = "stericsson,ab8500-gpadc",
.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
.resources = ab8500_gpadc_resources,
},
{
.name = "ab8500-rtc",
+ .of_compatible = "stericsson,ab8500-rtc",
.num_resources = ARRAY_SIZE(ab8500_rtc_resources),
.resources = ab8500_rtc_resources,
},
{
.name = "ab8500-acc-det",
+ .of_compatible = "stericsson,ab8500-acc-det",
.num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
.resources = ab8500_av_acc_detect_resources,
},
{
.name = "ab8500-poweron-key",
+ .of_compatible = "stericsson,ab8500-poweron-key",
.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
.resources = ab8500_poweronkey_db_resources,
},
{
.name = "ab8500-pwm",
+ .of_compatible = "stericsson,ab8500-pwm",
.id = 1,
},
{
.name = "ab8500-pwm",
+ .of_compatible = "stericsson,ab8500-pwm",
.id = 2,
},
{
.name = "ab8500-pwm",
+ .of_compatible = "stericsson,ab8500-pwm",
.id = 3,
},
- { .name = "ab8500-leds", },
+ {
+ .name = "ab8500-leds",
+ .of_compatible = "stericsson,ab8500-leds",
+ },
{
.name = "ab8500-denc",
+ .of_compatible = "stericsson,ab8500-denc",
},
{
.name = "ab8500-temp",
+ .of_compatible = "stericsson,ab8500-temp",
.num_resources = ARRAY_SIZE(ab8500_temp_resources),
.resources = ab8500_temp_resources,
},
@@ -1026,11 +1064,13 @@ static struct mfd_cell __devinitdata ab8500_bm_devs[] = {
static struct mfd_cell __devinitdata ab8500_devs[] = {
{
.name = "ab8500-gpio",
+ .of_compatible = "stericsson,ab8500-gpio",
.num_resources = ARRAY_SIZE(ab8500_gpio_resources),
.resources = ab8500_gpio_resources,
},
{
.name = "ab8500-usb",
+ .of_compatible = "stericsson,ab8500-usb",
.num_resources = ARRAY_SIZE(ab8500_usb_resources),
.resources = ab8500_usb_resources,
},
@@ -1207,16 +1247,17 @@ static struct attribute_group ab9540_attr_group = {
.attrs = ab9540_sysfs_entries,
};
-static const struct of_device_id ab8500_match[] = {
- {
- .compatible = "stericsson,ab8500",
- .data = (void *)AB8500_VERSION_AB8500,
- },
- {},
-};
-
static int __devinit ab8500_probe(struct platform_device *pdev)
{
+ static char *switch_off_status[] = {
+ "Swoff bit programming",
+ "Thermal protection activation",
+ "Vbat lower then BattOk falling threshold",
+ "Watchdog expired",
+ "Non presence of 32kHz clock",
+ "Battery level lower than power on reset threshold",
+ "Power on key 1 pressed longer than 10 seconds",
+ "DB8500 thermal shutdown"};
struct ab8500_platform_data *plat = dev_get_platdata(&pdev->dev);
const struct platform_device_id *platid = platform_get_device_id(pdev);
enum ab8500_version version = AB8500_VERSION_UNDEFINED;
@@ -1233,14 +1274,6 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
if (plat)
ab8500->irq_base = plat->irq_base;
- else if (np)
- ret = of_property_read_u32(np, "stericsson,irq-base", &ab8500->irq_base);
-
- if (!ab8500->irq_base) {
- dev_info(&pdev->dev, "couldn't find irq-base\n");
- ret = -EINVAL;
- goto out_free_ab8500;
- }
ab8500->dev = &pdev->dev;
@@ -1252,9 +1285,9 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
ab8500->irq = resource->start;
- ab8500->read = ab8500_i2c_read;
- ab8500->write = ab8500_i2c_write;
- ab8500->write_masked = ab8500_i2c_write_masked;
+ ab8500->read = ab8500_prcmu_read;
+ ab8500->write = ab8500_prcmu_write;
+ ab8500->write_masked = ab8500_prcmu_write_masked;
mutex_init(&ab8500->lock);
mutex_init(&ab8500->irq_lock);
@@ -1264,9 +1297,6 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
if (platid)
version = platid->driver_data;
- else if (np)
- version = (unsigned int)
- of_match_device(ab8500_match, &pdev->dev)->data;
if (version != AB8500_VERSION_UNDEFINED)
ab8500->version = version;
@@ -1323,7 +1353,20 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
AB8500_SWITCH_OFF_STATUS, &value);
if (ret < 0)
return ret;
- dev_info(ab8500->dev, "switch off status: %#x", value);
+ dev_info(ab8500->dev, "switch off cause(s) (%#x): ", value);
+
+ if (value) {
+ for (i = 0; i < ARRAY_SIZE(switch_off_status); i++) {
+ if (value & 1)
+ printk(KERN_CONT " \"%s\"",
+ switch_off_status[i]);
+ value = value >> 1;
+
+ }
+ printk(KERN_CONT "\n");
+ } else {
+ printk(KERN_CONT " None\n");
+ }
if (plat && plat->init)
plat->init(ab8500);
@@ -1352,53 +1395,50 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
for (i = 0; i < ab8500->mask_size; i++)
ab8500->mask[i] = ab8500->oldmask[i] = 0xff;
- if (ab8500->irq_base) {
- ret = ab8500_irq_init(ab8500);
- if (ret)
- goto out_freeoldmask;
+ ret = ab8500_irq_init(ab8500, np);
+ if (ret)
+ goto out_freeoldmask;
- /* Activate this feature only in ab9540 */
- /* till tests are done on ab8500 1p2 or later*/
- if (is_ab9540(ab8500))
- ret = request_threaded_irq(ab8500->irq, NULL,
+ /* Activate this feature only in ab9540 */
+ /* till tests are done on ab8500 1p2 or later*/
+ if (is_ab9540(ab8500)) {
+ ret = request_threaded_irq(ab8500->irq, NULL,
ab8500_hierarchical_irq,
IRQF_ONESHOT | IRQF_NO_SUSPEND,
"ab8500", ab8500);
- else
- ret = request_threaded_irq(ab8500->irq, NULL,
+ }
+ else {
+ ret = request_threaded_irq(ab8500->irq, NULL,
ab8500_irq,
IRQF_ONESHOT | IRQF_NO_SUSPEND,
"ab8500", ab8500);
if (ret)
- goto out_removeirq;
+ goto out_freeoldmask;
}
- if (!np) {
- ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
- ARRAY_SIZE(abx500_common_devs), NULL,
- ab8500->irq_base);
+ ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
+ ARRAY_SIZE(abx500_common_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ goto out_freeirq;
- if (ret)
- goto out_freeirq;
-
- if (is_ab9540(ab8500))
- ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
- ARRAY_SIZE(ab9540_devs), NULL,
- ab8500->irq_base);
- else
- ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
- ARRAY_SIZE(ab8500_devs), NULL,
- ab8500->irq_base);
- if (ret)
- goto out_freeirq;
+ if (is_ab9540(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
+ ARRAY_SIZE(ab9540_devs), NULL,
+ ab8500->irq_base);
+ else
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
+ ARRAY_SIZE(ab8500_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ goto out_freeirq;
- if (is_ab9540(ab8500) || is_ab8505(ab8500))
- ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
- ARRAY_SIZE(ab9540_ab8505_devs), NULL,
- ab8500->irq_base);
- if (ret)
- goto out_freeirq;
- }
+ if (is_ab9540(ab8500) || is_ab8505(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
+ ARRAY_SIZE(ab9540_ab8505_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ goto out_freeirq;
if (!no_bm) {
/* Add battery management devices */
@@ -1417,15 +1457,11 @@ static int __devinit ab8500_probe(struct platform_device *pdev)
&ab8500_attr_group);
if (ret)
dev_err(ab8500->dev, "error creating sysfs entries\n");
- else
- return ret;
+
+ return ret;
out_freeirq:
- if (ab8500->irq_base)
- free_irq(ab8500->irq, ab8500);
-out_removeirq:
- if (ab8500->irq_base)
- ab8500_irq_remove(ab8500);
+ free_irq(ab8500->irq, ab8500);
out_freeoldmask:
kfree(ab8500->oldmask);
out_freemask:
@@ -1444,11 +1480,10 @@ static int __devexit ab8500_remove(struct platform_device *pdev)
sysfs_remove_group(&ab8500->dev->kobj, &ab9540_attr_group);
else
sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
+
mfd_remove_devices(ab8500->dev);
- if (ab8500->irq_base) {
- free_irq(ab8500->irq, ab8500);
- ab8500_irq_remove(ab8500);
- }
+ free_irq(ab8500->irq, ab8500);
+
kfree(ab8500->oldmask);
kfree(ab8500->mask);
kfree(ab8500);
@@ -1468,7 +1503,6 @@ static struct platform_driver ab8500_core_driver = {
.driver = {
.name = "ab8500-core",
.owner = THIS_MODULE,
- .of_match_table = ab8500_match,
},
.probe = ab8500_probe,
.remove = __devexit_p(ab8500_remove),
@@ -1484,7 +1518,7 @@ static void __exit ab8500_core_exit(void)
{
platform_driver_unregister(&ab8500_core_driver);
}
-arch_initcall(ab8500_core_init);
+core_initcall(ab8500_core_init);
module_exit(ab8500_core_exit);
MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 50c4c89ab220..c4cb806978ac 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -31,12 +31,12 @@ struct ab8500_reg_range {
};
/**
- * struct ab8500_i2c_ranges
+ * struct ab8500_prcmu_ranges
* @num_ranges: the number of ranges in the list
* @bankid: bank identifier
* @range: the list of register ranges
*/
-struct ab8500_i2c_ranges {
+struct ab8500_prcmu_ranges {
u8 num_ranges;
u8 bankid;
const struct ab8500_reg_range *range;
@@ -47,7 +47,7 @@ struct ab8500_i2c_ranges {
#define AB8500_REV_REG 0x80
-static struct ab8500_i2c_ranges debug_ranges[AB8500_NUM_BANKS] = {
+static struct ab8500_prcmu_ranges debug_ranges[AB8500_NUM_BANKS] = {
[0x0] = {
.num_ranges = 0,
.range = 0,
@@ -608,16 +608,10 @@ static int __devexit ab8500_debug_remove(struct platform_device *plf)
return 0;
}
-static const struct of_device_id ab8500_debug_match[] = {
- { .compatible = "stericsson,ab8500-debug", },
- {}
-};
-
static struct platform_driver ab8500_debug_driver = {
.driver = {
.name = "ab8500-debug",
.owner = THIS_MODULE,
- .of_match_table = ab8500_debug_match,
},
.probe = ab8500_debug_probe,
.remove = __devexit_p(ab8500_debug_remove)
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index b86fd8e1ec3f..866f95960b4b 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -599,7 +599,8 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
/* Register interrupt - SwAdcComplete */
ret = request_threaded_irq(gpadc->irq, NULL,
ab8500_bm_gpswadcconvend_handler,
- IRQF_NO_SUSPEND | IRQF_SHARED, "ab8500-gpadc", gpadc);
+ IRQF_ONESHOT | IRQF_NO_SUSPEND | IRQF_SHARED,
+ "ab8500-gpadc", gpadc);
if (ret < 0) {
dev_err(gpadc->dev, "Failed to register interrupt, irq: %d\n",
gpadc->irq);
@@ -648,18 +649,12 @@ static int __devexit ab8500_gpadc_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ab8500_gpadc_match[] = {
- { .compatible = "stericsson,ab8500-gpadc", },
- {}
-};
-
static struct platform_driver ab8500_gpadc_driver = {
.probe = ab8500_gpadc_probe,
.remove = __devexit_p(ab8500_gpadc_remove),
.driver = {
.name = "ab8500-gpadc",
.owner = THIS_MODULE,
- .of_match_table = ab8500_gpadc_match,
},
};
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 5a3e51ccf258..c28d4eb1eff0 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -61,16 +61,10 @@ static int __devexit ab8500_sysctrl_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ab8500_sysctrl_match[] = {
- { .compatible = "stericsson,ab8500-sysctrl", },
- {}
-};
-
static struct platform_driver ab8500_sysctrl_driver = {
.driver = {
.name = "ab8500-sysctrl",
.owner = THIS_MODULE,
- .of_match_table = ab8500_sysctrl_match,
},
.probe = ab8500_sysctrl_probe,
.remove = __devexit_p(ab8500_sysctrl_remove),
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index 8d816cce8322..ea8b9475731d 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -320,7 +320,7 @@ static int __devexit adp5520_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int adp5520_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
diff --git a/drivers/mfd/anatop-mfd.c b/drivers/mfd/anatop-mfd.c
index 6da06341f6c9..5576e07576de 100644
--- a/drivers/mfd/anatop-mfd.c
+++ b/drivers/mfd/anatop-mfd.c
@@ -83,7 +83,7 @@ static int __devinit of_anatop_probe(struct platform_device *pdev)
drvdata->ioreg = ioreg;
spin_lock_init(&drvdata->reglock);
platform_set_drvdata(pdev, drvdata);
- of_platform_populate(np, of_anatop_match, NULL, dev);
+ of_platform_populate(np, NULL, NULL, dev);
return 0;
}
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
new file mode 100644
index 000000000000..c7983e862549
--- /dev/null
+++ b/drivers/mfd/arizona-core.c
@@ -0,0 +1,566 @@
+/*
+ * Arizona core driver
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+
+#include "arizona.h"
+
+static const char *wm5102_core_supplies[] = {
+ "AVDD",
+ "DBVDD1",
+};
+
+int arizona_clk32k_enable(struct arizona *arizona)
+{
+ int ret = 0;
+
+ mutex_lock(&arizona->clk_lock);
+
+ arizona->clk32k_ref++;
+
+ if (arizona->clk32k_ref == 1)
+ ret = regmap_update_bits(arizona->regmap, ARIZONA_CLOCK_32K_1,
+ ARIZONA_CLK_32K_ENA,
+ ARIZONA_CLK_32K_ENA);
+
+ if (ret != 0)
+ arizona->clk32k_ref--;
+
+ mutex_unlock(&arizona->clk_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(arizona_clk32k_enable);
+
+int arizona_clk32k_disable(struct arizona *arizona)
+{
+ int ret = 0;
+
+ mutex_lock(&arizona->clk_lock);
+
+ BUG_ON(arizona->clk32k_ref <= 0);
+
+ arizona->clk32k_ref--;
+
+ if (arizona->clk32k_ref == 0)
+ regmap_update_bits(arizona->regmap, ARIZONA_CLOCK_32K_1,
+ ARIZONA_CLK_32K_ENA, 0);
+
+ mutex_unlock(&arizona->clk_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(arizona_clk32k_disable);
+
+static irqreturn_t arizona_clkgen_err(int irq, void *data)
+{
+ struct arizona *arizona = data;
+
+ dev_err(arizona->dev, "CLKGEN error\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t arizona_underclocked(int irq, void *data)
+{
+ struct arizona *arizona = data;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(arizona->regmap, ARIZONA_INTERRUPT_RAW_STATUS_8,
+ &val);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read underclock status: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ if (val & ARIZONA_AIF3_UNDERCLOCKED_STS)
+ dev_err(arizona->dev, "AIF3 underclocked\n");
+ if (val & ARIZONA_AIF3_UNDERCLOCKED_STS)
+ dev_err(arizona->dev, "AIF3 underclocked\n");
+ if (val & ARIZONA_AIF2_UNDERCLOCKED_STS)
+ dev_err(arizona->dev, "AIF1 underclocked\n");
+ if (val & ARIZONA_ISRC2_UNDERCLOCKED_STS)
+ dev_err(arizona->dev, "ISRC2 underclocked\n");
+ if (val & ARIZONA_ISRC1_UNDERCLOCKED_STS)
+ dev_err(arizona->dev, "ISRC1 underclocked\n");
+ if (val & ARIZONA_FX_UNDERCLOCKED_STS)
+ dev_err(arizona->dev, "FX underclocked\n");
+ if (val & ARIZONA_ASRC_UNDERCLOCKED_STS)
+ dev_err(arizona->dev, "ASRC underclocked\n");
+ if (val & ARIZONA_DAC_UNDERCLOCKED_STS)
+ dev_err(arizona->dev, "DAC underclocked\n");
+ if (val & ARIZONA_ADC_UNDERCLOCKED_STS)
+ dev_err(arizona->dev, "ADC underclocked\n");
+ if (val & ARIZONA_MIXER_UNDERCLOCKED_STS)
+ dev_err(arizona->dev, "Mixer underclocked\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t arizona_overclocked(int irq, void *data)
+{
+ struct arizona *arizona = data;
+ unsigned int val[2];
+ int ret;
+
+ ret = regmap_bulk_read(arizona->regmap, ARIZONA_INTERRUPT_RAW_STATUS_6,
+ &val[0], 2);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read overclock status: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ if (val[0] & ARIZONA_PWM_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "PWM overclocked\n");
+ if (val[0] & ARIZONA_FX_CORE_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "FX core overclocked\n");
+ if (val[0] & ARIZONA_DAC_SYS_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "DAC SYS overclocked\n");
+ if (val[0] & ARIZONA_DAC_WARP_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "DAC WARP overclocked\n");
+ if (val[0] & ARIZONA_ADC_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "ADC overclocked\n");
+ if (val[0] & ARIZONA_MIXER_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "Mixer overclocked\n");
+ if (val[0] & ARIZONA_AIF3_SYNC_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "AIF3 overclocked\n");
+ if (val[0] & ARIZONA_AIF2_SYNC_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "AIF2 overclocked\n");
+ if (val[0] & ARIZONA_AIF1_SYNC_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "AIF1 overclocked\n");
+ if (val[0] & ARIZONA_PAD_CTRL_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "Pad control overclocked\n");
+
+ if (val[1] & ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "Slimbus subsystem overclocked\n");
+ if (val[1] & ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "Slimbus async overclocked\n");
+ if (val[1] & ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "Slimbus sync overclocked\n");
+ if (val[1] & ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "ASRC async system overclocked\n");
+ if (val[1] & ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "ASRC async WARP overclocked\n");
+ if (val[1] & ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "ASRC sync system overclocked\n");
+ if (val[1] & ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "ASRC sync WARP overclocked\n");
+ if (val[1] & ARIZONA_ADSP2_1_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "DSP1 overclocked\n");
+ if (val[1] & ARIZONA_ISRC2_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "ISRC2 overclocked\n");
+ if (val[1] & ARIZONA_ISRC1_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "ISRC1 overclocked\n");
+
+ return IRQ_HANDLED;
+}
+
+static int arizona_wait_for_boot(struct arizona *arizona)
+{
+ unsigned int reg;
+ int ret, i;
+
+ /*
+ * We can't use an interrupt as we need to runtime resume to do so,
+ * we won't race with the interrupt handler as it'll be blocked on
+ * runtime resume.
+ */
+ for (i = 0; i < 5; i++) {
+ msleep(1);
+
+ ret = regmap_read(arizona->regmap,
+ ARIZONA_INTERRUPT_RAW_STATUS_5, &reg);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to read boot state: %d\n",
+ ret);
+ continue;
+ }
+
+ if (reg & ARIZONA_BOOT_DONE_STS)
+ break;
+ }
+
+ if (reg & ARIZONA_BOOT_DONE_STS) {
+ regmap_write(arizona->regmap, ARIZONA_INTERRUPT_STATUS_5,
+ ARIZONA_BOOT_DONE_STS);
+ } else {
+ dev_err(arizona->dev, "Device boot timed out: %x\n", reg);
+ return -ETIMEDOUT;
+ }
+
+ pm_runtime_mark_last_busy(arizona->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int arizona_runtime_resume(struct device *dev)
+{
+ struct arizona *arizona = dev_get_drvdata(dev);
+ int ret;
+
+ dev_dbg(arizona->dev, "Leaving AoD mode\n");
+
+ ret = regulator_enable(arizona->dcvdd);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to enable DCVDD: %d\n", ret);
+ return ret;
+ }
+
+ regcache_cache_only(arizona->regmap, false);
+
+ ret = arizona_wait_for_boot(arizona);
+ if (ret != 0) {
+ regulator_disable(arizona->dcvdd);
+ return ret;
+ }
+
+ regcache_sync(arizona->regmap);
+
+ return 0;
+}
+
+static int arizona_runtime_suspend(struct device *dev)
+{
+ struct arizona *arizona = dev_get_drvdata(dev);
+
+ dev_dbg(arizona->dev, "Entering AoD mode\n");
+
+ regulator_disable(arizona->dcvdd);
+ regcache_cache_only(arizona->regmap, true);
+ regcache_mark_dirty(arizona->regmap);
+
+ return 0;
+}
+#endif
+
+const struct dev_pm_ops arizona_pm_ops = {
+ SET_RUNTIME_PM_OPS(arizona_runtime_suspend,
+ arizona_runtime_resume,
+ NULL)
+};
+EXPORT_SYMBOL_GPL(arizona_pm_ops);
+
+static struct mfd_cell early_devs[] = {
+ { .name = "arizona-ldo1" },
+};
+
+static struct mfd_cell wm5102_devs[] = {
+ { .name = "arizona-extcon" },
+ { .name = "arizona-gpio" },
+ { .name = "arizona-micsupp" },
+ { .name = "arizona-pwm" },
+ { .name = "wm5102-codec" },
+};
+
+static struct mfd_cell wm5110_devs[] = {
+ { .name = "arizona-extcon" },
+ { .name = "arizona-gpio" },
+ { .name = "arizona-micsupp" },
+ { .name = "arizona-pwm" },
+ { .name = "wm5110-codec" },
+};
+
+int __devinit arizona_dev_init(struct arizona *arizona)
+{
+ struct device *dev = arizona->dev;
+ const char *type_name;
+ unsigned int reg, val;
+ int ret, i;
+
+ dev_set_drvdata(arizona->dev, arizona);
+ mutex_init(&arizona->clk_lock);
+
+ if (dev_get_platdata(arizona->dev))
+ memcpy(&arizona->pdata, dev_get_platdata(arizona->dev),
+ sizeof(arizona->pdata));
+
+ regcache_cache_only(arizona->regmap, true);
+
+ switch (arizona->type) {
+ case WM5102:
+ case WM5110:
+ for (i = 0; i < ARRAY_SIZE(wm5102_core_supplies); i++)
+ arizona->core_supplies[i].supply
+ = wm5102_core_supplies[i];
+ arizona->num_core_supplies = ARRAY_SIZE(wm5102_core_supplies);
+ break;
+ default:
+ dev_err(arizona->dev, "Unknown device type %d\n",
+ arizona->type);
+ return -EINVAL;
+ }
+
+ ret = mfd_add_devices(arizona->dev, -1, early_devs,
+ ARRAY_SIZE(early_devs), NULL, 0);
+ if (ret != 0) {
+ dev_err(dev, "Failed to add early children: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_regulator_bulk_get(dev, arizona->num_core_supplies,
+ arizona->core_supplies);
+ if (ret != 0) {
+ dev_err(dev, "Failed to request core supplies: %d\n",
+ ret);
+ goto err_early;
+ }
+
+ arizona->dcvdd = devm_regulator_get(arizona->dev, "DCVDD");
+ if (IS_ERR(arizona->dcvdd)) {
+ ret = PTR_ERR(arizona->dcvdd);
+ dev_err(dev, "Failed to request DCVDD: %d\n", ret);
+ goto err_early;
+ }
+
+ ret = regulator_bulk_enable(arizona->num_core_supplies,
+ arizona->core_supplies);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable core supplies: %d\n",
+ ret);
+ goto err_early;
+ }
+
+ ret = regulator_enable(arizona->dcvdd);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable DCVDD: %d\n", ret);
+ goto err_enable;
+ }
+
+ if (arizona->pdata.reset) {
+ /* Start out with /RESET low to put the chip into reset */
+ ret = gpio_request_one(arizona->pdata.reset,
+ GPIOF_DIR_OUT | GPIOF_INIT_LOW,
+ "arizona /RESET");
+ if (ret != 0) {
+ dev_err(dev, "Failed to request /RESET: %d\n", ret);
+ goto err_dcvdd;
+ }
+
+ gpio_set_value_cansleep(arizona->pdata.reset, 1);
+ }
+
+ regcache_cache_only(arizona->regmap, false);
+
+ ret = regmap_read(arizona->regmap, ARIZONA_SOFTWARE_RESET, &reg);
+ if (ret != 0) {
+ dev_err(dev, "Failed to read ID register: %d\n", ret);
+ goto err_reset;
+ }
+
+ ret = regmap_read(arizona->regmap, ARIZONA_DEVICE_REVISION,
+ &arizona->rev);
+ if (ret != 0) {
+ dev_err(dev, "Failed to read revision register: %d\n", ret);
+ goto err_reset;
+ }
+ arizona->rev &= ARIZONA_DEVICE_REVISION_MASK;
+
+ switch (reg) {
+#ifdef CONFIG_MFD_WM5102
+ case 0x5102:
+ type_name = "WM5102";
+ if (arizona->type != WM5102) {
+ dev_err(arizona->dev, "WM5102 registered as %d\n",
+ arizona->type);
+ arizona->type = WM5102;
+ }
+ ret = wm5102_patch(arizona);
+ break;
+#endif
+#ifdef CONFIG_MFD_WM5110
+ case 0x5110:
+ type_name = "WM5110";
+ if (arizona->type != WM5110) {
+ dev_err(arizona->dev, "WM5110 registered as %d\n",
+ arizona->type);
+ arizona->type = WM5110;
+ }
+ ret = wm5110_patch(arizona);
+ break;
+#endif
+ default:
+ dev_err(arizona->dev, "Unknown device ID %x\n", reg);
+ goto err_reset;
+ }
+
+ dev_info(dev, "%s revision %c\n", type_name, arizona->rev + 'A');
+
+ if (ret != 0)
+ dev_err(arizona->dev, "Failed to apply patch: %d\n", ret);
+
+ /* If we have a /RESET GPIO we'll already be reset */
+ if (!arizona->pdata.reset) {
+ ret = regmap_write(arizona->regmap, ARIZONA_SOFTWARE_RESET, 0);
+ if (ret != 0) {
+ dev_err(dev, "Failed to reset device: %d\n", ret);
+ goto err_reset;
+ }
+ }
+
+ ret = arizona_wait_for_boot(arizona);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Device failed initial boot: %d\n", ret);
+ goto err_reset;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
+ if (!arizona->pdata.gpio_defaults[i])
+ continue;
+
+ regmap_write(arizona->regmap, ARIZONA_GPIO1_CTRL + i,
+ arizona->pdata.gpio_defaults[i]);
+ }
+
+ pm_runtime_set_autosuspend_delay(arizona->dev, 100);
+ pm_runtime_use_autosuspend(arizona->dev);
+ pm_runtime_enable(arizona->dev);
+
+ /* Chip default */
+ if (!arizona->pdata.clk32k_src)
+ arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
+
+ switch (arizona->pdata.clk32k_src) {
+ case ARIZONA_32KZ_MCLK1:
+ case ARIZONA_32KZ_MCLK2:
+ regmap_update_bits(arizona->regmap, ARIZONA_CLOCK_32K_1,
+ ARIZONA_CLK_32K_SRC_MASK,
+ arizona->pdata.clk32k_src - 1);
+ break;
+ case ARIZONA_32KZ_NONE:
+ regmap_update_bits(arizona->regmap, ARIZONA_CLOCK_32K_1,
+ ARIZONA_CLK_32K_SRC_MASK, 2);
+ break;
+ default:
+ dev_err(arizona->dev, "Invalid 32kHz clock source: %d\n",
+ arizona->pdata.clk32k_src);
+ ret = -EINVAL;
+ goto err_reset;
+ }
+
+ for (i = 0; i < ARIZONA_MAX_INPUT; i++) {
+ /* Default for both is 0 so noop with defaults */
+ val = arizona->pdata.dmic_ref[i]
+ << ARIZONA_IN1_DMIC_SUP_SHIFT;
+ val |= arizona->pdata.inmode[i] << ARIZONA_IN1_MODE_SHIFT;
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_IN1L_CONTROL + (i * 8),
+ ARIZONA_IN1_DMIC_SUP_MASK |
+ ARIZONA_IN1_MODE_MASK, val);
+ }
+
+ for (i = 0; i < ARIZONA_MAX_OUTPUT; i++) {
+ /* Default is 0 so noop with defaults */
+ if (arizona->pdata.out_mono[i])
+ val = ARIZONA_OUT1_MONO;
+ else
+ val = 0;
+
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_OUTPUT_PATH_CONFIG_1L + (i * 8),
+ ARIZONA_OUT1_MONO, val);
+ }
+
+ for (i = 0; i < ARIZONA_MAX_PDM_SPK; i++) {
+ if (arizona->pdata.spk_mute[i])
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_PDM_SPK1_CTRL_1 + (i * 2),
+ ARIZONA_SPK1_MUTE_ENDIAN_MASK |
+ ARIZONA_SPK1_MUTE_SEQ1_MASK,
+ arizona->pdata.spk_mute[i]);
+
+ if (arizona->pdata.spk_fmt[i])
+ regmap_update_bits(arizona->regmap,
+ ARIZONA_PDM_SPK1_CTRL_2 + (i * 2),
+ ARIZONA_SPK1_FMT_MASK,
+ arizona->pdata.spk_fmt[i]);
+ }
+
+ /* Set up for interrupts */
+ ret = arizona_irq_init(arizona);
+ if (ret != 0)
+ goto err_reset;
+
+ arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
+ arizona_clkgen_err, arizona);
+ arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
+ arizona_overclocked, arizona);
+ arizona_request_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, "Underclocked",
+ arizona_underclocked, arizona);
+
+ switch (arizona->type) {
+ case WM5102:
+ ret = mfd_add_devices(arizona->dev, -1, wm5102_devs,
+ ARRAY_SIZE(wm5102_devs), NULL, 0);
+ break;
+ case WM5110:
+ ret = mfd_add_devices(arizona->dev, -1, wm5110_devs,
+ ARRAY_SIZE(wm5102_devs), NULL, 0);
+ break;
+ }
+
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to add subdevices: %d\n", ret);
+ goto err_irq;
+ }
+
+#ifdef CONFIG_PM_RUNTIME
+ regulator_disable(arizona->dcvdd);
+#endif
+
+ return 0;
+
+err_irq:
+ arizona_irq_exit(arizona);
+err_reset:
+ if (arizona->pdata.reset) {
+ gpio_set_value_cansleep(arizona->pdata.reset, 1);
+ gpio_free(arizona->pdata.reset);
+ }
+err_dcvdd:
+ regulator_disable(arizona->dcvdd);
+err_enable:
+ regulator_bulk_disable(arizona->num_core_supplies,
+ arizona->core_supplies);
+err_early:
+ mfd_remove_devices(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(arizona_dev_init);
+
+int __devexit arizona_dev_exit(struct arizona *arizona)
+{
+ mfd_remove_devices(arizona->dev);
+ arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona);
+ arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona);
+ arizona_free_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, arizona);
+ pm_runtime_disable(arizona->dev);
+ arizona_irq_exit(arizona);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_dev_exit);
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
new file mode 100644
index 000000000000..570c4b438086
--- /dev/null
+++ b/drivers/mfd/arizona-i2c.c
@@ -0,0 +1,97 @@
+/*
+ * Arizona-i2c.c -- Arizona I2C bus interface
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/arizona/core.h>
+
+#include "arizona.h"
+
+static __devinit int arizona_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct arizona *arizona;
+ const struct regmap_config *regmap_config;
+ int ret;
+
+ switch (id->driver_data) {
+#ifdef CONFIG_MFD_WM5102
+ case WM5102:
+ regmap_config = &wm5102_i2c_regmap;
+ break;
+#endif
+#ifdef CONFIG_MFD_WM5110
+ case WM5110:
+ regmap_config = &wm5110_i2c_regmap;
+ break;
+#endif
+ default:
+ dev_err(&i2c->dev, "Unknown device type %ld\n",
+ id->driver_data);
+ return -EINVAL;
+ }
+
+ arizona = devm_kzalloc(&i2c->dev, sizeof(*arizona), GFP_KERNEL);
+ if (arizona == NULL)
+ return -ENOMEM;
+
+ arizona->regmap = devm_regmap_init_i2c(i2c, regmap_config);
+ if (IS_ERR(arizona->regmap)) {
+ ret = PTR_ERR(arizona->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ arizona->type = id->driver_data;
+ arizona->dev = &i2c->dev;
+ arizona->irq = i2c->irq;
+
+ return arizona_dev_init(arizona);
+}
+
+static int __devexit arizona_i2c_remove(struct i2c_client *i2c)
+{
+ struct arizona *arizona = dev_get_drvdata(&i2c->dev);
+ arizona_dev_exit(arizona);
+ return 0;
+}
+
+static const struct i2c_device_id arizona_i2c_id[] = {
+ { "wm5102", WM5102 },
+ { "wm5110", WM5110 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, arizona_i2c_id);
+
+static struct i2c_driver arizona_i2c_driver = {
+ .driver = {
+ .name = "arizona",
+ .owner = THIS_MODULE,
+ .pm = &arizona_pm_ops,
+ },
+ .probe = arizona_i2c_probe,
+ .remove = __devexit_p(arizona_i2c_remove),
+ .id_table = arizona_i2c_id,
+};
+
+module_i2c_driver(arizona_i2c_driver);
+
+MODULE_DESCRIPTION("Arizona I2C bus interface");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
new file mode 100644
index 000000000000..98ac345f468e
--- /dev/null
+++ b/drivers/mfd/arizona-irq.c
@@ -0,0 +1,275 @@
+/*
+ * Arizona interrupt support
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+
+#include "arizona.h"
+
+static int arizona_map_irq(struct arizona *arizona, int irq)
+{
+ int ret;
+
+ ret = regmap_irq_get_virq(arizona->aod_irq_chip, irq);
+ if (ret < 0)
+ ret = regmap_irq_get_virq(arizona->irq_chip, irq);
+
+ return ret;
+}
+
+int arizona_request_irq(struct arizona *arizona, int irq, char *name,
+ irq_handler_t handler, void *data)
+{
+ irq = arizona_map_irq(arizona, irq);
+ if (irq < 0)
+ return irq;
+
+ return request_threaded_irq(irq, NULL, handler, IRQF_ONESHOT,
+ name, data);
+}
+EXPORT_SYMBOL_GPL(arizona_request_irq);
+
+void arizona_free_irq(struct arizona *arizona, int irq, void *data)
+{
+ irq = arizona_map_irq(arizona, irq);
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+EXPORT_SYMBOL_GPL(arizona_free_irq);
+
+int arizona_set_irq_wake(struct arizona *arizona, int irq, int on)
+{
+ irq = arizona_map_irq(arizona, irq);
+ if (irq < 0)
+ return irq;
+
+ return irq_set_irq_wake(irq, on);
+}
+EXPORT_SYMBOL_GPL(arizona_set_irq_wake);
+
+static irqreturn_t arizona_boot_done(int irq, void *data)
+{
+ struct arizona *arizona = data;
+
+ dev_dbg(arizona->dev, "Boot done\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t arizona_ctrlif_err(int irq, void *data)
+{
+ struct arizona *arizona = data;
+
+ /*
+ * For pretty much all potential sources a register cache sync
+ * won't help, we've just got a software bug somewhere.
+ */
+ dev_err(arizona->dev, "Control interface error\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t arizona_irq_thread(int irq, void *data)
+{
+ struct arizona *arizona = data;
+ int i, ret;
+
+ ret = pm_runtime_get_sync(arizona->dev);
+ if (ret < 0) {
+ dev_err(arizona->dev, "Failed to resume device: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ /* Check both domains */
+ for (i = 0; i < 2; i++)
+ handle_nested_irq(irq_find_mapping(arizona->virq, i));
+
+ pm_runtime_mark_last_busy(arizona->dev);
+ pm_runtime_put_autosuspend(arizona->dev);
+
+ return IRQ_HANDLED;
+}
+
+static void arizona_irq_enable(struct irq_data *data)
+{
+}
+
+static void arizona_irq_disable(struct irq_data *data)
+{
+}
+
+static struct irq_chip arizona_irq_chip = {
+ .name = "arizona",
+ .irq_disable = arizona_irq_disable,
+ .irq_enable = arizona_irq_enable,
+};
+
+static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct regmap_irq_chip_data *data = h->host_data;
+
+ irq_set_chip_data(virq, data);
+ irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops arizona_domain_ops = {
+ .map = arizona_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+int arizona_irq_init(struct arizona *arizona)
+{
+ int flags = IRQF_ONESHOT;
+ int ret, i;
+ const struct regmap_irq_chip *aod, *irq;
+
+ switch (arizona->type) {
+#ifdef CONFIG_MFD_WM5102
+ case WM5102:
+ aod = &wm5102_aod;
+ irq = &wm5102_irq;
+ break;
+#endif
+#ifdef CONFIG_MFD_WM5110
+ case WM5110:
+ aod = &wm5110_aod;
+ irq = &wm5110_irq;
+ break;
+#endif
+ default:
+ BUG_ON("Unknown Arizona class device" == NULL);
+ return -EINVAL;
+ }
+
+ if (arizona->pdata.irq_active_high) {
+ ret = regmap_update_bits(arizona->regmap, ARIZONA_IRQ_CTRL_1,
+ ARIZONA_IRQ_POL, 0);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Couldn't set IRQ polarity: %d\n",
+ ret);
+ goto err;
+ }
+
+ flags |= IRQF_TRIGGER_HIGH;
+ } else {
+ flags |= IRQF_TRIGGER_LOW;
+ }
+
+ /* Allocate a virtual IRQ domain to distribute to the regmap domains */
+ arizona->virq = irq_domain_add_linear(NULL, 2, &arizona_domain_ops,
+ arizona);
+ if (!arizona->virq) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = regmap_add_irq_chip(arizona->regmap,
+ irq_create_mapping(arizona->virq, 0),
+ IRQF_ONESHOT, -1, aod,
+ &arizona->aod_irq_chip);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to add AOD IRQs: %d\n", ret);
+ goto err_domain;
+ }
+
+ ret = regmap_add_irq_chip(arizona->regmap,
+ irq_create_mapping(arizona->virq, 1),
+ IRQF_ONESHOT, -1, irq,
+ &arizona->irq_chip);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to add AOD IRQs: %d\n", ret);
+ goto err_aod;
+ }
+
+ /* Make sure the boot done IRQ is unmasked for resumes */
+ i = arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE);
+ ret = request_threaded_irq(i, NULL, arizona_boot_done, IRQF_ONESHOT,
+ "Boot done", arizona);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to request boot done %d: %d\n",
+ arizona->irq, ret);
+ goto err_boot_done;
+ }
+
+ /* Handle control interface errors in the core */
+ i = arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR);
+ ret = request_threaded_irq(i, NULL, arizona_ctrlif_err, IRQF_ONESHOT,
+ "Control interface error", arizona);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to request boot done %d: %d\n",
+ arizona->irq, ret);
+ goto err_ctrlif;
+ }
+
+ ret = request_threaded_irq(arizona->irq, NULL, arizona_irq_thread,
+ flags, "arizona", arizona);
+
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to request IRQ %d: %d\n",
+ arizona->irq, ret);
+ goto err_main_irq;
+ }
+
+ return 0;
+
+err_main_irq:
+ free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR), arizona);
+err_ctrlif:
+ free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE), arizona);
+err_boot_done:
+ regmap_del_irq_chip(irq_create_mapping(arizona->virq, 1),
+ arizona->irq_chip);
+err_aod:
+ regmap_del_irq_chip(irq_create_mapping(arizona->virq, 0),
+ arizona->aod_irq_chip);
+err_domain:
+err:
+ return ret;
+}
+
+int arizona_irq_exit(struct arizona *arizona)
+{
+ free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR), arizona);
+ free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE), arizona);
+ regmap_del_irq_chip(irq_create_mapping(arizona->virq, 1),
+ arizona->irq_chip);
+ regmap_del_irq_chip(irq_create_mapping(arizona->virq, 0),
+ arizona->aod_irq_chip);
+ free_irq(arizona->irq, arizona);
+
+ return 0;
+}
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
new file mode 100644
index 000000000000..df2e5a8bee28
--- /dev/null
+++ b/drivers/mfd/arizona-spi.c
@@ -0,0 +1,97 @@
+/*
+ * arizona-spi.c -- Arizona SPI bus interface
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include <linux/mfd/arizona/core.h>
+
+#include "arizona.h"
+
+static int __devinit arizona_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct arizona *arizona;
+ const struct regmap_config *regmap_config;
+ int ret;
+
+ switch (id->driver_data) {
+#ifdef CONFIG_MFD_WM5102
+ case WM5102:
+ regmap_config = &wm5102_spi_regmap;
+ break;
+#endif
+#ifdef CONFIG_MFD_WM5110
+ case WM5110:
+ regmap_config = &wm5110_spi_regmap;
+ break;
+#endif
+ default:
+ dev_err(&spi->dev, "Unknown device type %ld\n",
+ id->driver_data);
+ return -EINVAL;
+ }
+
+ arizona = devm_kzalloc(&spi->dev, sizeof(*arizona), GFP_KERNEL);
+ if (arizona == NULL)
+ return -ENOMEM;
+
+ arizona->regmap = devm_regmap_init_spi(spi, regmap_config);
+ if (IS_ERR(arizona->regmap)) {
+ ret = PTR_ERR(arizona->regmap);
+ dev_err(&spi->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ arizona->type = id->driver_data;
+ arizona->dev = &spi->dev;
+ arizona->irq = spi->irq;
+
+ return arizona_dev_init(arizona);
+}
+
+static int __devexit arizona_spi_remove(struct spi_device *spi)
+{
+ struct arizona *arizona = dev_get_drvdata(&spi->dev);
+ arizona_dev_exit(arizona);
+ return 0;
+}
+
+static const struct spi_device_id arizona_spi_ids[] = {
+ { "wm5102", WM5102 },
+ { "wm5110", WM5110 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, arizona_spi_ids);
+
+static struct spi_driver arizona_spi_driver = {
+ .driver = {
+ .name = "arizona",
+ .owner = THIS_MODULE,
+ .pm = &arizona_pm_ops,
+ },
+ .probe = arizona_spi_probe,
+ .remove = __devexit_p(arizona_spi_remove),
+ .id_table = arizona_spi_ids,
+};
+
+module_spi_driver(arizona_spi_driver);
+
+MODULE_DESCRIPTION("Arizona SPI bus interface");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/arizona.h b/drivers/mfd/arizona.h
new file mode 100644
index 000000000000..9798ae5da67b
--- /dev/null
+++ b/drivers/mfd/arizona.h
@@ -0,0 +1,40 @@
+/*
+ * wm5102.h -- WM5102 MFD internals
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM5102_H
+#define _WM5102_H
+
+#include <linux/regmap.h>
+#include <linux/pm.h>
+
+struct wm_arizona;
+
+extern const struct regmap_config wm5102_i2c_regmap;
+extern const struct regmap_config wm5102_spi_regmap;
+
+extern const struct regmap_config wm5110_i2c_regmap;
+extern const struct regmap_config wm5110_spi_regmap;
+
+extern const struct dev_pm_ops arizona_pm_ops;
+
+extern const struct regmap_irq_chip wm5102_aod;
+extern const struct regmap_irq_chip wm5102_irq;
+
+extern const struct regmap_irq_chip wm5110_aod;
+extern const struct regmap_irq_chip wm5110_irq;
+
+int arizona_dev_init(struct arizona *arizona);
+int arizona_dev_exit(struct arizona *arizona);
+int arizona_irq_init(struct arizona *arizona);
+int arizona_irq_exit(struct arizona *arizona);
+
+#endif
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 383421bf5760..683e18a23329 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -925,6 +925,7 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
goto out;
}
+ ret = 0;
if (pdata->leds) {
int i;
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 1f1313c90573..2544910e1fd6 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -772,7 +772,6 @@ EXPORT_SYMBOL_GPL(da9052_regmap_config);
int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
{
struct da9052_pdata *pdata = da9052->dev->platform_data;
- struct irq_desc *desc;
int ret;
mutex_init(&da9052->auxadc_lock);
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 50e83dc5dc49..7040a0081130 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -28,6 +28,7 @@
#include <linux/uaccess.h>
#include <linux/mfd/core.h>
#include <linux/mfd/dbx500-prcmu.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/db8500-prcmu.h>
#include <linux/regulator/machine.h>
#include <asm/hardware/gic.h>
@@ -2269,10 +2270,10 @@ int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
/**
* prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem
*/
-void prcmu_ac_wake_req(void)
+int prcmu_ac_wake_req(void)
{
u32 val;
- u32 status;
+ int ret = 0;
mutex_lock(&mb0_transfer.ac_wake_lock);
@@ -2282,39 +2283,32 @@ void prcmu_ac_wake_req(void)
atomic_set(&ac_wake_req_state, 1);
-retry:
- writel((val | PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ), PRCM_HOSTACCESS_REQ);
+ /*
+ * Force Modem Wake-up before hostaccess_req ping-pong.
+ * It prevents Modem to enter in Sleep while acking the hostaccess
+ * request. The 31us delay has been calculated by HWI.
+ */
+ val |= PRCM_HOSTACCESS_REQ_WAKE_REQ;
+ writel(val, PRCM_HOSTACCESS_REQ);
+
+ udelay(31);
+
+ val |= PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ;
+ writel(val, PRCM_HOSTACCESS_REQ);
if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
msecs_to_jiffies(5000))) {
+#if defined(CONFIG_DBX500_PRCMU_DEBUG)
+ db8500_prcmu_debug_dump(__func__, true, true);
+#endif
pr_crit("prcmu: %s timed out (5 s) waiting for a reply.\n",
__func__);
- goto unlock_and_return;
- }
-
- /*
- * The modem can generate an AC_WAKE_ACK, and then still go to sleep.
- * As a workaround, we wait, and then check that the modem is indeed
- * awake (in terms of the value of the PRCM_MOD_AWAKE_STATUS
- * register, which may not be the whole truth).
- */
- udelay(400);
- status = (readl(PRCM_MOD_AWAKE_STATUS) & BITS(0, 2));
- if (status != (PRCM_MOD_AWAKE_STATUS_PRCM_MOD_AAPD_AWAKE |
- PRCM_MOD_AWAKE_STATUS_PRCM_MOD_COREPD_AWAKE)) {
- pr_err("prcmu: %s received ack, but modem not awake (0x%X).\n",
- __func__, status);
- udelay(1200);
- writel(val, PRCM_HOSTACCESS_REQ);
- if (wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
- msecs_to_jiffies(5000)))
- goto retry;
- pr_crit("prcmu: %s timed out (5 s) waiting for AC_SLEEP_ACK.\n",
- __func__);
+ ret = -EFAULT;
}
unlock_and_return:
mutex_unlock(&mb0_transfer.ac_wake_lock);
+ return ret;
}
/**
@@ -2945,14 +2939,31 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
},
};
+static struct resource ab8500_resources[] = {
+ [0] = {
+ .start = IRQ_DB8500_AB8500,
+ .end = IRQ_DB8500_AB8500,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
static struct mfd_cell db8500_prcmu_devs[] = {
{
.name = "db8500-prcmu-regulators",
+ .of_compatible = "stericsson,db8500-prcmu-regulator",
.platform_data = &db8500_regulators,
.pdata_size = sizeof(db8500_regulators),
},
{
.name = "cpufreq-u8500",
+ .of_compatible = "stericsson,cpufreq-u8500",
+ },
+ {
+ .name = "ab8500-core",
+ .of_compatible = "stericsson,ab8500",
+ .num_resources = ARRAY_SIZE(ab8500_resources),
+ .resources = ab8500_resources,
+ .id = AB8500_VERSION_AB8500,
},
};
@@ -2962,8 +2973,9 @@ static struct mfd_cell db8500_prcmu_devs[] = {
*/
static int __devinit db8500_prcmu_probe(struct platform_device *pdev)
{
+ struct ab8500_platform_data *ab8500_platdata = pdev->dev.platform_data;
struct device_node *np = pdev->dev.of_node;
- int irq = 0, err = 0;
+ int irq = 0, err = 0, i;
if (ux500_is_svp())
return -ENODEV;
@@ -2987,16 +2999,21 @@ static int __devinit db8500_prcmu_probe(struct platform_device *pdev)
goto no_irq_return;
}
+ for (i = 0; i < ARRAY_SIZE(db8500_prcmu_devs); i++) {
+ if (!strcmp(db8500_prcmu_devs[i].name, "ab8500-core")) {
+ db8500_prcmu_devs[i].platform_data = ab8500_platdata;
+ db8500_prcmu_devs[i].pdata_size = sizeof(struct ab8500_platform_data);
+ }
+ }
+
if (cpu_is_u8500v20_or_later())
prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
- if (!np) {
- err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
- ARRAY_SIZE(db8500_prcmu_devs), NULL, 0);
- if (err) {
- pr_err("prcmu: Failed to add subdevices\n");
- return err;
- }
+ err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
+ ARRAY_SIZE(db8500_prcmu_devs), NULL, 0);
+ if (err) {
+ pr_err("prcmu: Failed to add subdevices\n");
+ return err;
}
pr_info("DB8500 PRCMU initialized\n");
@@ -3004,11 +3021,16 @@ static int __devinit db8500_prcmu_probe(struct platform_device *pdev)
no_irq_return:
return err;
}
+static const struct of_device_id db8500_prcmu_match[] = {
+ { .compatible = "stericsson,db8500-prcmu"},
+ { },
+};
static struct platform_driver db8500_prcmu_driver = {
.driver = {
.name = "db8500-prcmu",
.owner = THIS_MODULE,
+ .of_match_table = db8500_prcmu_match,
},
.probe = db8500_prcmu_probe,
};
@@ -3018,7 +3040,7 @@ static int __init db8500_prcmu_init(void)
return platform_driver_register(&db8500_prcmu_driver);
}
-arch_initcall(db8500_prcmu_init);
+core_initcall(db8500_prcmu_init);
MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>");
MODULE_DESCRIPTION("DB8500 PRCM Unit driver");
diff --git a/drivers/mfd/dbx500-prcmu-regs.h b/drivers/mfd/dbx500-prcmu-regs.h
index 3a0bf91d7780..23108a6e3167 100644
--- a/drivers/mfd/dbx500-prcmu-regs.h
+++ b/drivers/mfd/dbx500-prcmu-regs.h
@@ -106,6 +106,7 @@
#define PRCM_HOSTACCESS_REQ (_PRCMU_BASE + 0x334)
#define PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ 0x1
+#define PRCM_HOSTACCESS_REQ_WAKE_REQ BIT(16)
#define ARM_WAKEUP_MODEM 0x1
#define PRCM_ARM_IT1_CLR (_PRCMU_BASE + 0x48C)
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 43a76c41cfcc..db662e2dcfa5 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work)
}
local_irq_enable();
ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
- } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
+ } while (gpio_get_value(pdata->gpio));
}
static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/mfd/max77686-irq.c b/drivers/mfd/max77686-irq.c
new file mode 100644
index 000000000000..cdc3280e2ec7
--- /dev/null
+++ b/drivers/mfd/max77686-irq.c
@@ -0,0 +1,319 @@
+/*
+ * max77686-irq.c - Interrupt controller support for MAX77686
+ *
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Chiwoong Byun <woong.byun@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997-irq.c
+ */
+
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/mfd/max77686.h>
+#include <linux/mfd/max77686-private.h>
+#include <linux/irqdomain.h>
+#include <linux/regmap.h>
+
+enum {
+ MAX77686_DEBUG_IRQ_INFO = 1 << 0,
+ MAX77686_DEBUG_IRQ_MASK = 1 << 1,
+ MAX77686_DEBUG_IRQ_INT = 1 << 2,
+};
+
+static int debug_mask = 0;
+module_param(debug_mask, int, 0);
+MODULE_PARM_DESC(debug_mask, "Set debug_mask : 0x0=off 0x1=IRQ_INFO 0x2=IRQ_MASK 0x4=IRQ_INI)");
+
+static const u8 max77686_mask_reg[] = {
+ [PMIC_INT1] = MAX77686_REG_INT1MSK,
+ [PMIC_INT2] = MAX77686_REG_INT2MSK,
+ [RTC_INT] = MAX77686_RTC_INTM,
+};
+
+static struct regmap *max77686_get_regmap(struct max77686_dev *max77686,
+ enum max77686_irq_source src)
+{
+ switch (src) {
+ case PMIC_INT1 ... PMIC_INT2:
+ return max77686->regmap;
+ case RTC_INT:
+ return max77686->rtc_regmap;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+struct max77686_irq_data {
+ int mask;
+ enum max77686_irq_source group;
+};
+
+#define DECLARE_IRQ(idx, _group, _mask) \
+ [(idx)] = { .group = (_group), .mask = (_mask) }
+static const struct max77686_irq_data max77686_irqs[] = {
+ DECLARE_IRQ(MAX77686_PMICIRQ_PWRONF, PMIC_INT1, 1 << 0),
+ DECLARE_IRQ(MAX77686_PMICIRQ_PWRONR, PMIC_INT1, 1 << 1),
+ DECLARE_IRQ(MAX77686_PMICIRQ_JIGONBF, PMIC_INT1, 1 << 2),
+ DECLARE_IRQ(MAX77686_PMICIRQ_JIGONBR, PMIC_INT1, 1 << 3),
+ DECLARE_IRQ(MAX77686_PMICIRQ_ACOKBF, PMIC_INT1, 1 << 4),
+ DECLARE_IRQ(MAX77686_PMICIRQ_ACOKBR, PMIC_INT1, 1 << 5),
+ DECLARE_IRQ(MAX77686_PMICIRQ_ONKEY1S, PMIC_INT1, 1 << 6),
+ DECLARE_IRQ(MAX77686_PMICIRQ_MRSTB, PMIC_INT1, 1 << 7),
+ DECLARE_IRQ(MAX77686_PMICIRQ_140C, PMIC_INT2, 1 << 0),
+ DECLARE_IRQ(MAX77686_PMICIRQ_120C, PMIC_INT2, 1 << 1),
+ DECLARE_IRQ(MAX77686_RTCIRQ_RTC60S, RTC_INT, 1 << 0),
+ DECLARE_IRQ(MAX77686_RTCIRQ_RTCA1, RTC_INT, 1 << 1),
+ DECLARE_IRQ(MAX77686_RTCIRQ_RTCA2, RTC_INT, 1 << 2),
+ DECLARE_IRQ(MAX77686_RTCIRQ_SMPL, RTC_INT, 1 << 3),
+ DECLARE_IRQ(MAX77686_RTCIRQ_RTC1S, RTC_INT, 1 << 4),
+ DECLARE_IRQ(MAX77686_RTCIRQ_WTSR, RTC_INT, 1 << 5),
+};
+
+static void max77686_irq_lock(struct irq_data *data)
+{
+ struct max77686_dev *max77686 = irq_get_chip_data(data->irq);
+
+ if (debug_mask & MAX77686_DEBUG_IRQ_MASK)
+ pr_info("%s\n", __func__);
+
+ mutex_lock(&max77686->irqlock);
+}
+
+static void max77686_irq_sync_unlock(struct irq_data *data)
+{
+ struct max77686_dev *max77686 = irq_get_chip_data(data->irq);
+ int i;
+
+ for (i = 0; i < MAX77686_IRQ_GROUP_NR; i++) {
+ u8 mask_reg = max77686_mask_reg[i];
+ struct regmap *map = max77686_get_regmap(max77686, i);
+
+ if (debug_mask & MAX77686_DEBUG_IRQ_MASK)
+ pr_debug("%s: mask_reg[%d]=0x%x, cur=0x%x\n",
+ __func__, i, mask_reg, max77686->irq_masks_cur[i]);
+
+ if (mask_reg == MAX77686_REG_INVALID ||
+ IS_ERR_OR_NULL(map))
+ continue;
+
+ max77686->irq_masks_cache[i] = max77686->irq_masks_cur[i];
+
+ regmap_write(map, max77686_mask_reg[i],
+ max77686->irq_masks_cur[i]);
+ }
+
+ mutex_unlock(&max77686->irqlock);
+}
+
+static const inline struct max77686_irq_data *to_max77686_irq(int irq)
+{
+ struct irq_data *data = irq_get_irq_data(irq);
+ return &max77686_irqs[data->hwirq];
+}
+
+static void max77686_irq_mask(struct irq_data *data)
+{
+ struct max77686_dev *max77686 = irq_get_chip_data(data->irq);
+ const struct max77686_irq_data *irq_data = to_max77686_irq(data->irq);
+
+ max77686->irq_masks_cur[irq_data->group] |= irq_data->mask;
+
+ if (debug_mask & MAX77686_DEBUG_IRQ_MASK)
+ pr_info("%s: group=%d, cur=0x%x\n",
+ __func__, irq_data->group,
+ max77686->irq_masks_cur[irq_data->group]);
+}
+
+static void max77686_irq_unmask(struct irq_data *data)
+{
+ struct max77686_dev *max77686 = irq_get_chip_data(data->irq);
+ const struct max77686_irq_data *irq_data = to_max77686_irq(data->irq);
+
+ max77686->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
+
+ if (debug_mask & MAX77686_DEBUG_IRQ_MASK)
+ pr_info("%s: group=%d, cur=0x%x\n",
+ __func__, irq_data->group,
+ max77686->irq_masks_cur[irq_data->group]);
+}
+
+static struct irq_chip max77686_irq_chip = {
+ .name = "max77686",
+ .irq_bus_lock = max77686_irq_lock,
+ .irq_bus_sync_unlock = max77686_irq_sync_unlock,
+ .irq_mask = max77686_irq_mask,
+ .irq_unmask = max77686_irq_unmask,
+};
+
+static irqreturn_t max77686_irq_thread(int irq, void *data)
+{
+ struct max77686_dev *max77686 = data;
+ unsigned int irq_reg[MAX77686_IRQ_GROUP_NR] = {};
+ unsigned int irq_src;
+ int ret;
+ int i, cur_irq;
+
+ ret = regmap_read(max77686->regmap, MAX77686_REG_INTSRC, &irq_src);
+ if (ret < 0) {
+ dev_err(max77686->dev, "Failed to read interrupt source: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ if (debug_mask & MAX77686_DEBUG_IRQ_INT)
+ pr_info("%s: irq_src=0x%x\n", __func__, irq_src);
+
+ if (irq_src == MAX77686_IRQSRC_PMIC) {
+ ret = regmap_bulk_read(max77686->regmap,
+ MAX77686_REG_INT1, irq_reg, 2);
+ if (ret < 0) {
+ dev_err(max77686->dev, "Failed to read interrupt source: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ if (debug_mask & MAX77686_DEBUG_IRQ_INT)
+ pr_info("%s: int1=0x%x, int2=0x%x\n", __func__,
+ irq_reg[PMIC_INT1], irq_reg[PMIC_INT2]);
+ }
+
+ if (irq_src & MAX77686_IRQSRC_RTC) {
+ ret = regmap_read(max77686->rtc_regmap,
+ MAX77686_RTC_INT, &irq_reg[RTC_INT]);
+ if (ret < 0) {
+ dev_err(max77686->dev, "Failed to read interrupt source: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ if (debug_mask & MAX77686_DEBUG_IRQ_INT)
+ pr_info("%s: rtc int=0x%x\n", __func__,
+ irq_reg[RTC_INT]);
+
+ }
+
+ for (i = 0; i < MAX77686_IRQ_GROUP_NR; i++)
+ irq_reg[i] &= ~max77686->irq_masks_cur[i];
+
+ for (i = 0; i < MAX77686_IRQ_NR; i++) {
+ if (irq_reg[max77686_irqs[i].group] & max77686_irqs[i].mask) {
+ cur_irq = irq_find_mapping(max77686->irq_domain, i);
+ if (cur_irq)
+ handle_nested_irq(cur_irq);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int max77686_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct max77686_dev *max77686 = d->host_data;
+
+ irq_set_chip_data(irq, max77686);
+ irq_set_chip_and_handler(irq, &max77686_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+ return 0;
+}
+
+static struct irq_domain_ops max77686_irq_domain_ops = {
+ .map = max77686_irq_domain_map,
+};
+
+int max77686_irq_init(struct max77686_dev *max77686)
+{
+ struct irq_domain *domain;
+ int i;
+ int ret;
+ int val;
+ struct regmap *map;
+
+ mutex_init(&max77686->irqlock);
+
+ if (max77686->irq_gpio && !max77686->irq) {
+ max77686->irq = gpio_to_irq(max77686->irq_gpio);
+
+ if (debug_mask & MAX77686_DEBUG_IRQ_INT) {
+ ret = gpio_request(max77686->irq_gpio, "pmic_irq");
+ if (ret < 0) {
+ dev_err(max77686->dev,
+ "Failed to request gpio %d with ret:"
+ "%d\n", max77686->irq_gpio, ret);
+ return IRQ_NONE;
+ }
+
+ gpio_direction_input(max77686->irq_gpio);
+ val = gpio_get_value(max77686->irq_gpio);
+ gpio_free(max77686->irq_gpio);
+ pr_info("%s: gpio_irq=%x\n", __func__, val);
+ }
+ }
+
+ if (!max77686->irq) {
+ dev_err(max77686->dev, "irq is not specified\n");
+ return -ENODEV;
+ }
+
+ /* Mask individual interrupt sources */
+ for (i = 0; i < MAX77686_IRQ_GROUP_NR; i++) {
+ max77686->irq_masks_cur[i] = 0xff;
+ max77686->irq_masks_cache[i] = 0xff;
+ map = max77686_get_regmap(max77686, i);
+
+ if (IS_ERR_OR_NULL(map))
+ continue;
+ if (max77686_mask_reg[i] == MAX77686_REG_INVALID)
+ continue;
+
+ regmap_write(map, max77686_mask_reg[i], 0xff);
+ }
+ domain = irq_domain_add_linear(NULL, MAX77686_IRQ_NR,
+ &max77686_irq_domain_ops, max77686);
+ if (!domain) {
+ dev_err(max77686->dev, "could not create irq domain\n");
+ return -ENODEV;
+ }
+ max77686->irq_domain = domain;
+
+ ret = request_threaded_irq(max77686->irq, NULL, max77686_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "max77686-irq", max77686);
+
+ if (ret)
+ dev_err(max77686->dev, "Failed to request IRQ %d: %d\n",
+ max77686->irq, ret);
+
+
+ if (debug_mask & MAX77686_DEBUG_IRQ_INFO)
+ pr_info("%s-\n", __func__);
+
+ return 0;
+}
+
+void max77686_irq_exit(struct max77686_dev *max77686)
+{
+ if (max77686->irq)
+ free_irq(max77686->irq, max77686);
+}
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
new file mode 100644
index 000000000000..c03e12b51924
--- /dev/null
+++ b/drivers/mfd/max77686.c
@@ -0,0 +1,187 @@
+/*
+ * max77686.c - mfd core driver for the Maxim 77686
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ * Chiwoong Byun <woong.byun@smasung.com>
+ * Jonghwa Lee <jonghwa3.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997.c
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77686.h>
+#include <linux/mfd/max77686-private.h>
+#include <linux/err.h>
+
+#define I2C_ADDR_RTC (0x0C >> 1)
+
+static struct mfd_cell max77686_devs[] = {
+ { .name = "max77686-pmic", },
+ { .name = "max77686-rtc", },
+};
+
+static struct regmap_config max77686_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+#ifdef CONFIG_OF
+static struct of_device_id __devinitdata max77686_pmic_dt_match[] = {
+ {.compatible = "maxim,max77686", .data = 0},
+ {},
+};
+
+static struct max77686_platform_data *max77686_i2c_parse_dt_pdata(struct device
+ *dev)
+{
+ struct max77686_platform_data *pd;
+
+ pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
+ dev_err(dev, "could not allocate memory for pdata\n");
+ return NULL;
+ }
+
+ dev->platform_data = pd;
+ return pd;
+}
+#else
+static struct max77686_platform_data *max77686_i2c_parse_dt_pdata(struct device
+ *dev)
+{
+ return 0;
+}
+#endif
+
+static int max77686_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct max77686_dev *max77686 = NULL;
+ struct max77686_platform_data *pdata = i2c->dev.platform_data;
+ unsigned int data;
+ int ret = 0;
+
+ if (i2c->dev.of_node)
+ pdata = max77686_i2c_parse_dt_pdata(&i2c->dev);
+
+ if (!pdata) {
+ ret = -EIO;
+ dev_err(&i2c->dev, "No platform data found.\n");
+ goto err;
+ }
+
+ max77686 = kzalloc(sizeof(struct max77686_dev), GFP_KERNEL);
+ if (max77686 == NULL)
+ return -ENOMEM;
+
+ max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
+ if (IS_ERR(max77686->regmap)) {
+ ret = PTR_ERR(max77686->regmap);
+ dev_err(max77686->dev, "Failed to allocate register map: %d\n",
+ ret);
+ kfree(max77686);
+ return ret;
+ }
+
+ i2c_set_clientdata(i2c, max77686);
+ max77686->dev = &i2c->dev;
+ max77686->i2c = i2c;
+ max77686->type = id->driver_data;
+
+ max77686->wakeup = pdata->wakeup;
+ max77686->irq_gpio = pdata->irq_gpio;
+ max77686->irq = i2c->irq;
+
+ if (regmap_read(max77686->regmap,
+ MAX77686_REG_DEVICE_ID, &data) < 0) {
+ dev_err(max77686->dev,
+ "device not found on this channel (this is not an error)\n");
+ ret = -ENODEV;
+ goto err;
+ } else
+ dev_info(max77686->dev, "device found\n");
+
+ max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
+ i2c_set_clientdata(max77686->rtc, max77686);
+
+ max77686_irq_init(max77686);
+
+ ret = mfd_add_devices(max77686->dev, -1, max77686_devs,
+ ARRAY_SIZE(max77686_devs), NULL, 0);
+
+ if (ret < 0)
+ goto err_mfd;
+
+ return ret;
+
+err_mfd:
+ mfd_remove_devices(max77686->dev);
+ i2c_unregister_device(max77686->rtc);
+err:
+ kfree(max77686);
+ return ret;
+}
+
+static int max77686_i2c_remove(struct i2c_client *i2c)
+{
+ struct max77686_dev *max77686 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(max77686->dev);
+ i2c_unregister_device(max77686->rtc);
+ kfree(max77686);
+
+ return 0;
+}
+
+static const struct i2c_device_id max77686_i2c_id[] = {
+ { "max77686", TYPE_MAX77686 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max77686_i2c_id);
+
+static struct i2c_driver max77686_i2c_driver = {
+ .driver = {
+ .name = "max77686",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(max77686_pmic_dt_match),
+ },
+ .probe = max77686_i2c_probe,
+ .remove = max77686_i2c_remove,
+ .id_table = max77686_i2c_id,
+};
+
+static int __init max77686_i2c_init(void)
+{
+ return i2c_add_driver(&max77686_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(max77686_i2c_init);
+
+static void __exit max77686_i2c_exit(void)
+{
+ i2c_del_driver(&max77686_i2c_driver);
+}
+module_exit(max77686_i2c_exit);
+
+MODULE_DESCRIPTION("MAXIM 77686 multi-function core driver");
+MODULE_AUTHOR("Chiwoong Byun <woong.byun@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index e9e4278722f3..a1811cb50ec7 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -138,8 +138,6 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
max77693->wakeup = pdata->wakeup;
- mutex_init(&max77693->iolock);
-
if (max77693_read_reg(max77693->regmap,
MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) {
dev_err(max77693->dev, "device not found on this channel\n");
@@ -156,7 +154,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
ret = max77693_irq_init(max77693);
if (ret < 0)
- goto err_mfd;
+ goto err_irq;
pm_runtime_set_active(max77693->dev);
@@ -170,11 +168,11 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
return ret;
err_mfd:
+ max77693_irq_exit(max77693);
+err_irq:
i2c_unregister_device(max77693->muic);
i2c_unregister_device(max77693->haptic);
err_regmap:
- kfree(max77693);
-
return ret;
}
@@ -183,6 +181,7 @@ static int max77693_i2c_remove(struct i2c_client *i2c)
struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
mfd_remove_devices(max77693->dev);
+ max77693_irq_exit(max77693);
i2c_unregister_device(max77693->muic);
i2c_unregister_device(max77693->haptic);
@@ -215,7 +214,7 @@ static int max77693_resume(struct device *dev)
return max77693_irq_resume(max77693);
}
-const struct dev_pm_ops max77693_pm = {
+static const struct dev_pm_ops max77693_pm = {
.suspend = max77693_suspend,
.resume = max77693_resume,
};
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index ca881efedf75..825a7f06d9ba 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -75,9 +75,9 @@ static struct mfd_cell power_devs[] = {
static struct resource rtc_resources[] = {
{
.name = "max8925-rtc",
- .start = MAX8925_RTC_IRQ,
- .end = MAX8925_RTC_IRQ_MASK,
- .flags = IORESOURCE_IO,
+ .start = MAX8925_IRQ_RTC_ALARM0,
+ .end = MAX8925_IRQ_RTC_ALARM0,
+ .flags = IORESOURCE_IRQ,
},
};
@@ -598,7 +598,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip,
ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0],
ARRAY_SIZE(rtc_devs),
- &rtc_resources[0], 0);
+ &rtc_resources[0], chip->irq_base);
if (ret < 0) {
dev_err(chip->dev, "Failed to add rtc subdev\n");
goto out;
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index 09274cf7c33b..43fa61413e93 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -142,7 +142,8 @@ static void max8997_irq_sync_unlock(struct irq_data *data)
static const inline struct max8997_irq_data *
irq_to_max8997_irq(struct max8997_dev *max8997, int irq)
{
- return &max8997_irqs[irq - max8997->irq_base];
+ struct irq_data *data = irq_get_irq_data(irq);
+ return &max8997_irqs[data->hwirq];
}
static void max8997_irq_mask(struct irq_data *data)
@@ -182,7 +183,7 @@ static irqreturn_t max8997_irq_thread(int irq, void *data)
u8 irq_reg[MAX8997_IRQ_GROUP_NR] = {};
u8 irq_src;
int ret;
- int i;
+ int i, cur_irq;
ret = max8997_read_reg(max8997->i2c, MAX8997_REG_INTSRC, &irq_src);
if (ret < 0) {
@@ -269,8 +270,11 @@ static irqreturn_t max8997_irq_thread(int irq, void *data)
/* Report */
for (i = 0; i < MAX8997_IRQ_NR; i++) {
- if (irq_reg[max8997_irqs[i].group] & max8997_irqs[i].mask)
- handle_nested_irq(max8997->irq_base + i);
+ if (irq_reg[max8997_irqs[i].group] & max8997_irqs[i].mask) {
+ cur_irq = irq_find_mapping(max8997->irq_domain, i);
+ if (cur_irq)
+ handle_nested_irq(cur_irq);
+ }
}
return IRQ_HANDLED;
@@ -278,26 +282,40 @@ static irqreturn_t max8997_irq_thread(int irq, void *data)
int max8997_irq_resume(struct max8997_dev *max8997)
{
- if (max8997->irq && max8997->irq_base)
- max8997_irq_thread(max8997->irq_base, max8997);
+ if (max8997->irq && max8997->irq_domain)
+ max8997_irq_thread(0, max8997);
+ return 0;
+}
+
+static int max8997_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct max8997_dev *max8997 = d->host_data;
+
+ irq_set_chip_data(irq, max8997);
+ irq_set_chip_and_handler(irq, &max8997_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
return 0;
}
+static struct irq_domain_ops max8997_irq_domain_ops = {
+ .map = max8997_irq_domain_map,
+};
+
int max8997_irq_init(struct max8997_dev *max8997)
{
+ struct irq_domain *domain;
int i;
- int cur_irq;
int ret;
u8 val;
if (!max8997->irq) {
dev_warn(max8997->dev, "No interrupt specified.\n");
- max8997->irq_base = 0;
- return 0;
- }
-
- if (!max8997->irq_base) {
- dev_err(max8997->dev, "No interrupt base specified.\n");
return 0;
}
@@ -327,19 +345,13 @@ int max8997_irq_init(struct max8997_dev *max8997)
true : false;
}
- /* Register with genirq */
- for (i = 0; i < MAX8997_IRQ_NR; i++) {
- cur_irq = i + max8997->irq_base;
- irq_set_chip_data(cur_irq, max8997);
- irq_set_chip_and_handler(cur_irq, &max8997_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
+ domain = irq_domain_add_linear(NULL, MAX8997_IRQ_NR,
+ &max8997_irq_domain_ops, max8997);
+ if (!domain) {
+ dev_err(max8997->dev, "could not create irq domain\n");
+ return -ENODEV;
}
+ max8997->irq_domain = domain;
ret = request_threaded_irq(max8997->irq, NULL, max8997_irq_thread,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index cb83a7ab53e7..10b629c245b6 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -143,7 +143,6 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
if (!pdata)
goto err;
- max8997->irq_base = pdata->irq_base;
max8997->ono = pdata->ono;
mutex_init(&max8997->iolock);
@@ -206,7 +205,7 @@ static const struct i2c_device_id max8997_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
-u8 max8997_dumpaddr_pmic[] = {
+static u8 max8997_dumpaddr_pmic[] = {
MAX8997_REG_INT1MSK,
MAX8997_REG_INT2MSK,
MAX8997_REG_INT3MSK,
@@ -331,7 +330,7 @@ u8 max8997_dumpaddr_pmic[] = {
MAX8997_REG_DVSOKTIMER5,
};
-u8 max8997_dumpaddr_muic[] = {
+static u8 max8997_dumpaddr_muic[] = {
MAX8997_MUIC_REG_INTMASK1,
MAX8997_MUIC_REG_INTMASK2,
MAX8997_MUIC_REG_INTMASK3,
@@ -341,7 +340,7 @@ u8 max8997_dumpaddr_muic[] = {
MAX8997_MUIC_REG_CONTROL3,
};
-u8 max8997_dumpaddr_haptic[] = {
+static u8 max8997_dumpaddr_haptic[] = {
MAX8997_HAPTIC_REG_CONF1,
MAX8997_HAPTIC_REG_CONF2,
MAX8997_HAPTIC_REG_DRVCONF,
@@ -423,7 +422,7 @@ static int max8997_resume(struct device *dev)
return max8997_irq_resume(max8997);
}
-const struct dev_pm_ops max8997_pm = {
+static const struct dev_pm_ops max8997_pm = {
.suspend = max8997_suspend,
.resume = max8997_resume,
.freeze = max8997_freeze,
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index f0ea3b8b3e4a..b801dc72f041 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -723,10 +723,6 @@ void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx)
free_irq(mc13xxx->irq, mc13xxx);
mfd_remove_devices(mc13xxx->dev);
-
- regmap_exit(mc13xxx->regmap);
-
- kfree(mc13xxx);
}
EXPORT_SYMBOL_GPL(mc13xxx_common_cleanup);
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index d22501dad6a6..9d18dde3cd2a 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -53,17 +53,11 @@ static struct regmap_config mc13xxx_regmap_i2c_config = {
static int mc13xxx_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- const struct of_device_id *of_id;
- struct i2c_driver *idrv = to_i2c_driver(client->dev.driver);
struct mc13xxx *mc13xxx;
struct mc13xxx_platform_data *pdata = dev_get_platdata(&client->dev);
int ret;
- of_id = of_match_device(mc13xxx_dt_ids, &client->dev);
- if (of_id)
- idrv->id_table = (const struct i2c_device_id*) of_id->data;
-
- mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+ mc13xxx = devm_kzalloc(&client->dev, sizeof(*mc13xxx), GFP_KERNEL);
if (!mc13xxx)
return -ENOMEM;
@@ -72,13 +66,13 @@ static int mc13xxx_i2c_probe(struct i2c_client *client,
mc13xxx->dev = &client->dev;
mutex_init(&mc13xxx->lock);
- mc13xxx->regmap = regmap_init_i2c(client, &mc13xxx_regmap_i2c_config);
+ mc13xxx->regmap = devm_regmap_init_i2c(client,
+ &mc13xxx_regmap_i2c_config);
if (IS_ERR(mc13xxx->regmap)) {
ret = PTR_ERR(mc13xxx->regmap);
dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
ret);
dev_set_drvdata(&client->dev, NULL);
- kfree(mc13xxx);
return ret;
}
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 03df422feb76..0bdb43a0aff0 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -119,17 +119,11 @@ static struct regmap_bus regmap_mc13xxx_bus = {
static int mc13xxx_spi_probe(struct spi_device *spi)
{
- const struct of_device_id *of_id;
- struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
struct mc13xxx *mc13xxx;
struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
int ret;
- of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
- if (of_id)
- sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
-
- mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+ mc13xxx = devm_kzalloc(&spi->dev, sizeof(*mc13xxx), GFP_KERNEL);
if (!mc13xxx)
return -ENOMEM;
@@ -139,15 +133,14 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
mc13xxx->dev = &spi->dev;
mutex_init(&mc13xxx->lock);
- mc13xxx->regmap = regmap_init(&spi->dev, &regmap_mc13xxx_bus, &spi->dev,
- &mc13xxx_regmap_spi_config);
-
+ mc13xxx->regmap = devm_regmap_init(&spi->dev, &regmap_mc13xxx_bus,
+ &spi->dev,
+ &mc13xxx_regmap_spi_config);
if (IS_ERR(mc13xxx->regmap)) {
ret = PTR_ERR(mc13xxx->regmap);
dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
ret);
dev_set_drvdata(&spi->dev, NULL);
- kfree(mc13xxx);
return ret;
}
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index ffc3d48676ae..0c3a01cde2f7 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -18,6 +18,8 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
int mfd_cell_enable(struct platform_device *pdev)
{
@@ -76,6 +78,8 @@ static int mfd_add_device(struct device *parent, int id,
{
struct resource *res;
struct platform_device *pdev;
+ struct device_node *np = NULL;
+ struct irq_domain *domain = NULL;
int ret = -ENOMEM;
int r;
@@ -89,6 +93,16 @@ static int mfd_add_device(struct device *parent, int id,
pdev->dev.parent = parent;
+ if (parent->of_node && cell->of_compatible) {
+ for_each_child_of_node(parent->of_node, np) {
+ if (of_device_is_compatible(np, cell->of_compatible)) {
+ pdev->dev.of_node = np;
+ domain = irq_find_host(parent->of_node);
+ break;
+ }
+ }
+ }
+
if (cell->pdata_size) {
ret = platform_device_add_data(pdev,
cell->platform_data, cell->pdata_size);
@@ -112,10 +126,18 @@ static int mfd_add_device(struct device *parent, int id,
res[r].end = mem_base->start +
cell->resources[r].end;
} else if (cell->resources[r].flags & IORESOURCE_IRQ) {
- res[r].start = irq_base +
- cell->resources[r].start;
- res[r].end = irq_base +
- cell->resources[r].end;
+ if (domain) {
+ /* Unable to create mappings for IRQ ranges. */
+ WARN_ON(cell->resources[r].start !=
+ cell->resources[r].end);
+ res[r].start = res[r].end = irq_create_mapping(
+ domain, cell->resources[r].start);
+ } else {
+ res[r].start = irq_base +
+ cell->resources[r].start;
+ res[r].end = irq_base +
+ cell->resources[r].end;
+ }
} else {
res[r].parent = cell->resources[r].parent;
res[r].start = cell->resources[r].start;
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 29c122bf28ea..45ce1fb5a549 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -253,8 +253,13 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
}
pdev->dev.parent = pcf->dev;
- platform_device_add_data(pdev, &pdata->reg_init_data[i],
- sizeof(pdata->reg_init_data[i]));
+ if (platform_device_add_data(pdev, &pdata->reg_init_data[i],
+ sizeof(pdata->reg_init_data[i])) < 0) {
+ platform_device_put(pdev);
+ dev_err(pcf->dev, "Out of memory for regulator parameters %d\n",
+ i);
+ continue;
+ }
pcf->regulator_pdev[i] = pdev;
platform_device_add(pdev);
diff --git a/drivers/mfd/s5m-core.c b/drivers/mfd/s5m-core.c
deleted file mode 100644
index dd170307e60e..000000000000
--- a/drivers/mfd/s5m-core.c
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * s5m87xx.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/pm_runtime.h>
-#include <linux/mutex.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/s5m87xx/s5m-core.h>
-#include <linux/mfd/s5m87xx/s5m-pmic.h>
-#include <linux/mfd/s5m87xx/s5m-rtc.h>
-#include <linux/regmap.h>
-
-static struct mfd_cell s5m8751_devs[] = {
- {
- .name = "s5m8751-pmic",
- }, {
- .name = "s5m-charger",
- }, {
- .name = "s5m8751-codec",
- },
-};
-
-static struct mfd_cell s5m8763_devs[] = {
- {
- .name = "s5m8763-pmic",
- }, {
- .name = "s5m-rtc",
- }, {
- .name = "s5m-charger",
- },
-};
-
-static struct mfd_cell s5m8767_devs[] = {
- {
- .name = "s5m8767-pmic",
- }, {
- .name = "s5m-rtc",
- },
-};
-
-int s5m_reg_read(struct s5m87xx_dev *s5m87xx, u8 reg, void *dest)
-{
- return regmap_read(s5m87xx->regmap, reg, dest);
-}
-EXPORT_SYMBOL_GPL(s5m_reg_read);
-
-int s5m_bulk_read(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf)
-{
- return regmap_bulk_read(s5m87xx->regmap, reg, buf, count);
-}
-EXPORT_SYMBOL_GPL(s5m_bulk_read);
-
-int s5m_reg_write(struct s5m87xx_dev *s5m87xx, u8 reg, u8 value)
-{
- return regmap_write(s5m87xx->regmap, reg, value);
-}
-EXPORT_SYMBOL_GPL(s5m_reg_write);
-
-int s5m_bulk_write(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf)
-{
- return regmap_raw_write(s5m87xx->regmap, reg, buf, count);
-}
-EXPORT_SYMBOL_GPL(s5m_bulk_write);
-
-int s5m_reg_update(struct s5m87xx_dev *s5m87xx, u8 reg, u8 val, u8 mask)
-{
- return regmap_update_bits(s5m87xx->regmap, reg, mask, val);
-}
-EXPORT_SYMBOL_GPL(s5m_reg_update);
-
-static struct regmap_config s5m_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
-static int s5m87xx_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
-{
- struct s5m_platform_data *pdata = i2c->dev.platform_data;
- struct s5m87xx_dev *s5m87xx;
- int ret;
-
- s5m87xx = devm_kzalloc(&i2c->dev, sizeof(struct s5m87xx_dev),
- GFP_KERNEL);
- if (s5m87xx == NULL)
- return -ENOMEM;
-
- i2c_set_clientdata(i2c, s5m87xx);
- s5m87xx->dev = &i2c->dev;
- s5m87xx->i2c = i2c;
- s5m87xx->irq = i2c->irq;
- s5m87xx->type = id->driver_data;
-
- if (pdata) {
- s5m87xx->device_type = pdata->device_type;
- s5m87xx->ono = pdata->ono;
- s5m87xx->irq_base = pdata->irq_base;
- s5m87xx->wakeup = pdata->wakeup;
- }
-
- s5m87xx->regmap = devm_regmap_init_i2c(i2c, &s5m_regmap_config);
- if (IS_ERR(s5m87xx->regmap)) {
- ret = PTR_ERR(s5m87xx->regmap);
- dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
- ret);
- return ret;
- }
-
- s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
- i2c_set_clientdata(s5m87xx->rtc, s5m87xx);
-
- if (pdata && pdata->cfg_pmic_irq)
- pdata->cfg_pmic_irq();
-
- s5m_irq_init(s5m87xx);
-
- pm_runtime_set_active(s5m87xx->dev);
-
- switch (s5m87xx->device_type) {
- case S5M8751X:
- ret = mfd_add_devices(s5m87xx->dev, -1, s5m8751_devs,
- ARRAY_SIZE(s5m8751_devs), NULL, 0);
- break;
- case S5M8763X:
- ret = mfd_add_devices(s5m87xx->dev, -1, s5m8763_devs,
- ARRAY_SIZE(s5m8763_devs), NULL, 0);
- break;
- case S5M8767X:
- ret = mfd_add_devices(s5m87xx->dev, -1, s5m8767_devs,
- ARRAY_SIZE(s5m8767_devs), NULL, 0);
- break;
- default:
- /* If this happens the probe function is problem */
- BUG();
- }
-
- if (ret < 0)
- goto err;
-
- return ret;
-
-err:
- mfd_remove_devices(s5m87xx->dev);
- s5m_irq_exit(s5m87xx);
- i2c_unregister_device(s5m87xx->rtc);
- return ret;
-}
-
-static int s5m87xx_i2c_remove(struct i2c_client *i2c)
-{
- struct s5m87xx_dev *s5m87xx = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(s5m87xx->dev);
- s5m_irq_exit(s5m87xx);
- i2c_unregister_device(s5m87xx->rtc);
- return 0;
-}
-
-static const struct i2c_device_id s5m87xx_i2c_id[] = {
- { "s5m87xx", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, s5m87xx_i2c_id);
-
-static struct i2c_driver s5m87xx_i2c_driver = {
- .driver = {
- .name = "s5m87xx",
- .owner = THIS_MODULE,
- },
- .probe = s5m87xx_i2c_probe,
- .remove = s5m87xx_i2c_remove,
- .id_table = s5m87xx_i2c_id,
-};
-
-static int __init s5m87xx_i2c_init(void)
-{
- return i2c_add_driver(&s5m87xx_i2c_driver);
-}
-
-subsys_initcall(s5m87xx_i2c_init);
-
-static void __exit s5m87xx_i2c_exit(void)
-{
- i2c_del_driver(&s5m87xx_i2c_driver);
-}
-module_exit(s5m87xx_i2c_exit);
-
-MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("Core support for the S5M MFD");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/s5m-irq.c b/drivers/mfd/s5m-irq.c
deleted file mode 100644
index 0236676085cf..000000000000
--- a/drivers/mfd/s5m-irq.c
+++ /dev/null
@@ -1,495 +0,0 @@
-/*
- * s5m-irq.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/mfd/s5m87xx/s5m-core.h>
-
-struct s5m_irq_data {
- int reg;
- int mask;
-};
-
-static struct s5m_irq_data s5m8767_irqs[] = {
- [S5M8767_IRQ_PWRR] = {
- .reg = 1,
- .mask = S5M8767_IRQ_PWRR_MASK,
- },
- [S5M8767_IRQ_PWRF] = {
- .reg = 1,
- .mask = S5M8767_IRQ_PWRF_MASK,
- },
- [S5M8767_IRQ_PWR1S] = {
- .reg = 1,
- .mask = S5M8767_IRQ_PWR1S_MASK,
- },
- [S5M8767_IRQ_JIGR] = {
- .reg = 1,
- .mask = S5M8767_IRQ_JIGR_MASK,
- },
- [S5M8767_IRQ_JIGF] = {
- .reg = 1,
- .mask = S5M8767_IRQ_JIGF_MASK,
- },
- [S5M8767_IRQ_LOWBAT2] = {
- .reg = 1,
- .mask = S5M8767_IRQ_LOWBAT2_MASK,
- },
- [S5M8767_IRQ_LOWBAT1] = {
- .reg = 1,
- .mask = S5M8767_IRQ_LOWBAT1_MASK,
- },
- [S5M8767_IRQ_MRB] = {
- .reg = 2,
- .mask = S5M8767_IRQ_MRB_MASK,
- },
- [S5M8767_IRQ_DVSOK2] = {
- .reg = 2,
- .mask = S5M8767_IRQ_DVSOK2_MASK,
- },
- [S5M8767_IRQ_DVSOK3] = {
- .reg = 2,
- .mask = S5M8767_IRQ_DVSOK3_MASK,
- },
- [S5M8767_IRQ_DVSOK4] = {
- .reg = 2,
- .mask = S5M8767_IRQ_DVSOK4_MASK,
- },
- [S5M8767_IRQ_RTC60S] = {
- .reg = 3,
- .mask = S5M8767_IRQ_RTC60S_MASK,
- },
- [S5M8767_IRQ_RTCA1] = {
- .reg = 3,
- .mask = S5M8767_IRQ_RTCA1_MASK,
- },
- [S5M8767_IRQ_RTCA2] = {
- .reg = 3,
- .mask = S5M8767_IRQ_RTCA2_MASK,
- },
- [S5M8767_IRQ_SMPL] = {
- .reg = 3,
- .mask = S5M8767_IRQ_SMPL_MASK,
- },
- [S5M8767_IRQ_RTC1S] = {
- .reg = 3,
- .mask = S5M8767_IRQ_RTC1S_MASK,
- },
- [S5M8767_IRQ_WTSR] = {
- .reg = 3,
- .mask = S5M8767_IRQ_WTSR_MASK,
- },
-};
-
-static struct s5m_irq_data s5m8763_irqs[] = {
- [S5M8763_IRQ_DCINF] = {
- .reg = 1,
- .mask = S5M8763_IRQ_DCINF_MASK,
- },
- [S5M8763_IRQ_DCINR] = {
- .reg = 1,
- .mask = S5M8763_IRQ_DCINR_MASK,
- },
- [S5M8763_IRQ_JIGF] = {
- .reg = 1,
- .mask = S5M8763_IRQ_JIGF_MASK,
- },
- [S5M8763_IRQ_JIGR] = {
- .reg = 1,
- .mask = S5M8763_IRQ_JIGR_MASK,
- },
- [S5M8763_IRQ_PWRONF] = {
- .reg = 1,
- .mask = S5M8763_IRQ_PWRONF_MASK,
- },
- [S5M8763_IRQ_PWRONR] = {
- .reg = 1,
- .mask = S5M8763_IRQ_PWRONR_MASK,
- },
- [S5M8763_IRQ_WTSREVNT] = {
- .reg = 2,
- .mask = S5M8763_IRQ_WTSREVNT_MASK,
- },
- [S5M8763_IRQ_SMPLEVNT] = {
- .reg = 2,
- .mask = S5M8763_IRQ_SMPLEVNT_MASK,
- },
- [S5M8763_IRQ_ALARM1] = {
- .reg = 2,
- .mask = S5M8763_IRQ_ALARM1_MASK,
- },
- [S5M8763_IRQ_ALARM0] = {
- .reg = 2,
- .mask = S5M8763_IRQ_ALARM0_MASK,
- },
- [S5M8763_IRQ_ONKEY1S] = {
- .reg = 3,
- .mask = S5M8763_IRQ_ONKEY1S_MASK,
- },
- [S5M8763_IRQ_TOPOFFR] = {
- .reg = 3,
- .mask = S5M8763_IRQ_TOPOFFR_MASK,
- },
- [S5M8763_IRQ_DCINOVPR] = {
- .reg = 3,
- .mask = S5M8763_IRQ_DCINOVPR_MASK,
- },
- [S5M8763_IRQ_CHGRSTF] = {
- .reg = 3,
- .mask = S5M8763_IRQ_CHGRSTF_MASK,
- },
- [S5M8763_IRQ_DONER] = {
- .reg = 3,
- .mask = S5M8763_IRQ_DONER_MASK,
- },
- [S5M8763_IRQ_CHGFAULT] = {
- .reg = 3,
- .mask = S5M8763_IRQ_CHGFAULT_MASK,
- },
- [S5M8763_IRQ_LOBAT1] = {
- .reg = 4,
- .mask = S5M8763_IRQ_LOBAT1_MASK,
- },
- [S5M8763_IRQ_LOBAT2] = {
- .reg = 4,
- .mask = S5M8763_IRQ_LOBAT2_MASK,
- },
-};
-
-static inline struct s5m_irq_data *
-irq_to_s5m8767_irq(struct s5m87xx_dev *s5m87xx, int irq)
-{
- return &s5m8767_irqs[irq - s5m87xx->irq_base];
-}
-
-static void s5m8767_irq_lock(struct irq_data *data)
-{
- struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&s5m87xx->irqlock);
-}
-
-static void s5m8767_irq_sync_unlock(struct irq_data *data)
-{
- struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(s5m87xx->irq_masks_cur); i++) {
- if (s5m87xx->irq_masks_cur[i] != s5m87xx->irq_masks_cache[i]) {
- s5m87xx->irq_masks_cache[i] = s5m87xx->irq_masks_cur[i];
- s5m_reg_write(s5m87xx, S5M8767_REG_INT1M + i,
- s5m87xx->irq_masks_cur[i]);
- }
- }
-
- mutex_unlock(&s5m87xx->irqlock);
-}
-
-static void s5m8767_irq_unmask(struct irq_data *data)
-{
- struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
- struct s5m_irq_data *irq_data = irq_to_s5m8767_irq(s5m87xx,
- data->irq);
-
- s5m87xx->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
-}
-
-static void s5m8767_irq_mask(struct irq_data *data)
-{
- struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
- struct s5m_irq_data *irq_data = irq_to_s5m8767_irq(s5m87xx,
- data->irq);
-
- s5m87xx->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
-}
-
-static struct irq_chip s5m8767_irq_chip = {
- .name = "s5m8767",
- .irq_bus_lock = s5m8767_irq_lock,
- .irq_bus_sync_unlock = s5m8767_irq_sync_unlock,
- .irq_mask = s5m8767_irq_mask,
- .irq_unmask = s5m8767_irq_unmask,
-};
-
-static inline struct s5m_irq_data *
-irq_to_s5m8763_irq(struct s5m87xx_dev *s5m87xx, int irq)
-{
- return &s5m8763_irqs[irq - s5m87xx->irq_base];
-}
-
-static void s5m8763_irq_lock(struct irq_data *data)
-{
- struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&s5m87xx->irqlock);
-}
-
-static void s5m8763_irq_sync_unlock(struct irq_data *data)
-{
- struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(s5m87xx->irq_masks_cur); i++) {
- if (s5m87xx->irq_masks_cur[i] != s5m87xx->irq_masks_cache[i]) {
- s5m87xx->irq_masks_cache[i] = s5m87xx->irq_masks_cur[i];
- s5m_reg_write(s5m87xx, S5M8763_REG_IRQM1 + i,
- s5m87xx->irq_masks_cur[i]);
- }
- }
-
- mutex_unlock(&s5m87xx->irqlock);
-}
-
-static void s5m8763_irq_unmask(struct irq_data *data)
-{
- struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
- struct s5m_irq_data *irq_data = irq_to_s5m8763_irq(s5m87xx,
- data->irq);
-
- s5m87xx->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
-}
-
-static void s5m8763_irq_mask(struct irq_data *data)
-{
- struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
- struct s5m_irq_data *irq_data = irq_to_s5m8763_irq(s5m87xx,
- data->irq);
-
- s5m87xx->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
-}
-
-static struct irq_chip s5m8763_irq_chip = {
- .name = "s5m8763",
- .irq_bus_lock = s5m8763_irq_lock,
- .irq_bus_sync_unlock = s5m8763_irq_sync_unlock,
- .irq_mask = s5m8763_irq_mask,
- .irq_unmask = s5m8763_irq_unmask,
-};
-
-
-static irqreturn_t s5m8767_irq_thread(int irq, void *data)
-{
- struct s5m87xx_dev *s5m87xx = data;
- u8 irq_reg[NUM_IRQ_REGS-1];
- int ret;
- int i;
-
-
- ret = s5m_bulk_read(s5m87xx, S5M8767_REG_INT1,
- NUM_IRQ_REGS - 1, irq_reg);
- if (ret < 0) {
- dev_err(s5m87xx->dev, "Failed to read interrupt register: %d\n",
- ret);
- return IRQ_NONE;
- }
-
- for (i = 0; i < NUM_IRQ_REGS - 1; i++)
- irq_reg[i] &= ~s5m87xx->irq_masks_cur[i];
-
- for (i = 0; i < S5M8767_IRQ_NR; i++) {
- if (irq_reg[s5m8767_irqs[i].reg - 1] & s5m8767_irqs[i].mask)
- handle_nested_irq(s5m87xx->irq_base + i);
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t s5m8763_irq_thread(int irq, void *data)
-{
- struct s5m87xx_dev *s5m87xx = data;
- u8 irq_reg[NUM_IRQ_REGS];
- int ret;
- int i;
-
- ret = s5m_bulk_read(s5m87xx, S5M8763_REG_IRQ1,
- NUM_IRQ_REGS, irq_reg);
- if (ret < 0) {
- dev_err(s5m87xx->dev, "Failed to read interrupt register: %d\n",
- ret);
- return IRQ_NONE;
- }
-
- for (i = 0; i < NUM_IRQ_REGS; i++)
- irq_reg[i] &= ~s5m87xx->irq_masks_cur[i];
-
- for (i = 0; i < S5M8763_IRQ_NR; i++) {
- if (irq_reg[s5m8763_irqs[i].reg - 1] & s5m8763_irqs[i].mask)
- handle_nested_irq(s5m87xx->irq_base + i);
- }
-
- return IRQ_HANDLED;
-}
-
-int s5m_irq_resume(struct s5m87xx_dev *s5m87xx)
-{
- if (s5m87xx->irq && s5m87xx->irq_base){
- switch (s5m87xx->device_type) {
- case S5M8763X:
- s5m8763_irq_thread(s5m87xx->irq_base, s5m87xx);
- break;
- case S5M8767X:
- s5m8767_irq_thread(s5m87xx->irq_base, s5m87xx);
- break;
- default:
- dev_err(s5m87xx->dev,
- "Unknown device type %d\n",
- s5m87xx->device_type);
- return -EINVAL;
-
- }
- }
- return 0;
-}
-
-int s5m_irq_init(struct s5m87xx_dev *s5m87xx)
-{
- int i;
- int cur_irq;
- int ret = 0;
- int type = s5m87xx->device_type;
-
- if (!s5m87xx->irq) {
- dev_warn(s5m87xx->dev,
- "No interrupt specified, no interrupts\n");
- s5m87xx->irq_base = 0;
- return 0;
- }
-
- if (!s5m87xx->irq_base) {
- dev_err(s5m87xx->dev,
- "No interrupt base specified, no interrupts\n");
- return 0;
- }
-
- mutex_init(&s5m87xx->irqlock);
-
- switch (type) {
- case S5M8763X:
- for (i = 0; i < NUM_IRQ_REGS; i++) {
- s5m87xx->irq_masks_cur[i] = 0xff;
- s5m87xx->irq_masks_cache[i] = 0xff;
- s5m_reg_write(s5m87xx, S5M8763_REG_IRQM1 + i,
- 0xff);
- }
-
- s5m_reg_write(s5m87xx, S5M8763_REG_STATUSM1, 0xff);
- s5m_reg_write(s5m87xx, S5M8763_REG_STATUSM2, 0xff);
-
- for (i = 0; i < S5M8763_IRQ_NR; i++) {
- cur_irq = i + s5m87xx->irq_base;
- irq_set_chip_data(cur_irq, s5m87xx);
- irq_set_chip_and_handler(cur_irq, &s5m8763_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
- }
-
- ret = request_threaded_irq(s5m87xx->irq, NULL,
- s5m8763_irq_thread,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "s5m87xx-irq", s5m87xx);
- if (ret) {
- dev_err(s5m87xx->dev, "Failed to request IRQ %d: %d\n",
- s5m87xx->irq, ret);
- return ret;
- }
- break;
- case S5M8767X:
- for (i = 0; i < NUM_IRQ_REGS - 1; i++) {
- s5m87xx->irq_masks_cur[i] = 0xff;
- s5m87xx->irq_masks_cache[i] = 0xff;
- s5m_reg_write(s5m87xx, S5M8767_REG_INT1M + i,
- 0xff);
- }
- for (i = 0; i < S5M8767_IRQ_NR; i++) {
- cur_irq = i + s5m87xx->irq_base;
- irq_set_chip_data(cur_irq, s5m87xx);
- if (ret) {
- dev_err(s5m87xx->dev,
- "Failed to irq_set_chip_data %d: %d\n",
- s5m87xx->irq, ret);
- return ret;
- }
-
- irq_set_chip_and_handler(cur_irq, &s5m8767_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
- }
-
- ret = request_threaded_irq(s5m87xx->irq, NULL,
- s5m8767_irq_thread,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "s5m87xx-irq", s5m87xx);
- if (ret) {
- dev_err(s5m87xx->dev, "Failed to request IRQ %d: %d\n",
- s5m87xx->irq, ret);
- return ret;
- }
- break;
- default:
- dev_err(s5m87xx->dev,
- "Unknown device type %d\n", s5m87xx->device_type);
- return -EINVAL;
- }
-
- if (!s5m87xx->ono)
- return 0;
-
- switch (type) {
- case S5M8763X:
- ret = request_threaded_irq(s5m87xx->ono, NULL,
- s5m8763_irq_thread,
- IRQF_TRIGGER_FALLING |
- IRQF_TRIGGER_RISING |
- IRQF_ONESHOT, "s5m87xx-ono",
- s5m87xx);
- break;
- case S5M8767X:
- ret = request_threaded_irq(s5m87xx->ono, NULL,
- s5m8767_irq_thread,
- IRQF_TRIGGER_FALLING |
- IRQF_TRIGGER_RISING |
- IRQF_ONESHOT, "s5m87xx-ono", s5m87xx);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- if (ret) {
- dev_err(s5m87xx->dev, "Failed to request IRQ %d: %d\n",
- s5m87xx->ono, ret);
- return ret;
- }
-
- return 0;
-}
-
-void s5m_irq_exit(struct s5m87xx_dev *s5m87xx)
-{
- if (s5m87xx->ono)
- free_irq(s5m87xx->ono, s5m87xx);
-
- if (s5m87xx->irq)
- free_irq(s5m87xx->irq, s5m87xx);
-}
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
new file mode 100644
index 000000000000..2988efde11eb
--- /dev/null
+++ b/drivers/mfd/sec-core.c
@@ -0,0 +1,216 @@
+/*
+ * sec-core.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/irq.h>
+#include <linux/mfd/samsung/rtc.h>
+#include <linux/regmap.h>
+
+static struct mfd_cell s5m8751_devs[] = {
+ {
+ .name = "s5m8751-pmic",
+ }, {
+ .name = "s5m-charger",
+ }, {
+ .name = "s5m8751-codec",
+ },
+};
+
+static struct mfd_cell s5m8763_devs[] = {
+ {
+ .name = "s5m8763-pmic",
+ }, {
+ .name = "s5m-rtc",
+ }, {
+ .name = "s5m-charger",
+ },
+};
+
+static struct mfd_cell s5m8767_devs[] = {
+ {
+ .name = "s5m8767-pmic",
+ }, {
+ .name = "s5m-rtc",
+ },
+};
+
+static struct mfd_cell s2mps11_devs[] = {
+ {
+ .name = "s2mps11-pmic",
+ },
+};
+
+int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest)
+{
+ return regmap_read(sec_pmic->regmap, reg, dest);
+}
+EXPORT_SYMBOL_GPL(sec_reg_read);
+
+int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
+{
+ return regmap_bulk_read(sec_pmic->regmap, reg, buf, count);
+}
+EXPORT_SYMBOL_GPL(sec_bulk_read);
+
+int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value)
+{
+ return regmap_write(sec_pmic->regmap, reg, value);
+}
+EXPORT_SYMBOL_GPL(sec_reg_write);
+
+int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
+{
+ return regmap_raw_write(sec_pmic->regmap, reg, buf, count);
+}
+EXPORT_SYMBOL_GPL(sec_bulk_write);
+
+int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask)
+{
+ return regmap_update_bits(sec_pmic->regmap, reg, mask, val);
+}
+EXPORT_SYMBOL_GPL(sec_reg_update);
+
+static struct regmap_config sec_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int sec_pmic_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct sec_platform_data *pdata = i2c->dev.platform_data;
+ struct sec_pmic_dev *sec_pmic;
+ int ret;
+
+ sec_pmic = devm_kzalloc(&i2c->dev, sizeof(struct sec_pmic_dev),
+ GFP_KERNEL);
+ if (sec_pmic == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, sec_pmic);
+ sec_pmic->dev = &i2c->dev;
+ sec_pmic->i2c = i2c;
+ sec_pmic->irq = i2c->irq;
+ sec_pmic->type = id->driver_data;
+
+ if (pdata) {
+ sec_pmic->device_type = pdata->device_type;
+ sec_pmic->ono = pdata->ono;
+ sec_pmic->irq_base = pdata->irq_base;
+ sec_pmic->wakeup = pdata->wakeup;
+ }
+
+ sec_pmic->regmap = devm_regmap_init_i2c(i2c, &sec_regmap_config);
+ if (IS_ERR(sec_pmic->regmap)) {
+ ret = PTR_ERR(sec_pmic->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
+ i2c_set_clientdata(sec_pmic->rtc, sec_pmic);
+
+ if (pdata && pdata->cfg_pmic_irq)
+ pdata->cfg_pmic_irq();
+
+ sec_irq_init(sec_pmic);
+
+ pm_runtime_set_active(sec_pmic->dev);
+
+ switch (sec_pmic->device_type) {
+ case S5M8751X:
+ ret = mfd_add_devices(sec_pmic->dev, -1, s5m8751_devs,
+ ARRAY_SIZE(s5m8751_devs), NULL, 0);
+ break;
+ case S5M8763X:
+ ret = mfd_add_devices(sec_pmic->dev, -1, s5m8763_devs,
+ ARRAY_SIZE(s5m8763_devs), NULL, 0);
+ break;
+ case S5M8767X:
+ ret = mfd_add_devices(sec_pmic->dev, -1, s5m8767_devs,
+ ARRAY_SIZE(s5m8767_devs), NULL, 0);
+ break;
+ case S2MPS11X:
+ ret = mfd_add_devices(sec_pmic->dev, -1, s2mps11_devs,
+ ARRAY_SIZE(s2mps11_devs), NULL, 0);
+ break;
+ default:
+ /* If this happens the probe function is problem */
+ BUG();
+ }
+
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ mfd_remove_devices(sec_pmic->dev);
+ sec_irq_exit(sec_pmic);
+ i2c_unregister_device(sec_pmic->rtc);
+ return ret;
+}
+
+static int sec_pmic_remove(struct i2c_client *i2c)
+{
+ struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(sec_pmic->dev);
+ sec_irq_exit(sec_pmic);
+ i2c_unregister_device(sec_pmic->rtc);
+ return 0;
+}
+
+static const struct i2c_device_id sec_pmic_id[] = {
+ { "sec_pmic", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sec_pmic_id);
+
+static struct i2c_driver sec_pmic_driver = {
+ .driver = {
+ .name = "sec_pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = sec_pmic_probe,
+ .remove = sec_pmic_remove,
+ .id_table = sec_pmic_id,
+};
+
+static int __init sec_pmic_init(void)
+{
+ return i2c_add_driver(&sec_pmic_driver);
+}
+
+subsys_initcall(sec_pmic_init);
+
+static void __exit sec_pmic_exit(void)
+{
+ i2c_del_driver(&sec_pmic_driver);
+}
+module_exit(sec_pmic_exit);
+
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_DESCRIPTION("Core support for the S5M MFD");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
new file mode 100644
index 000000000000..c901fa50fea1
--- /dev/null
+++ b/drivers/mfd/sec-irq.c
@@ -0,0 +1,317 @@
+/*
+ * sec-irq.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/irq.h>
+#include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s5m8763.h>
+#include <linux/mfd/samsung/s5m8767.h>
+
+static struct regmap_irq s2mps11_irqs[] = {
+ [S2MPS11_IRQ_PWRONF] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_PWRONF_MASK,
+ },
+ [S2MPS11_IRQ_PWRONR] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_PWRONR_MASK,
+ },
+ [S2MPS11_IRQ_JIGONBF] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_JIGONBF_MASK,
+ },
+ [S2MPS11_IRQ_JIGONBR] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_JIGONBR_MASK,
+ },
+ [S2MPS11_IRQ_ACOKBF] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_ACOKBF_MASK,
+ },
+ [S2MPS11_IRQ_ACOKBR] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_ACOKBR_MASK,
+ },
+ [S2MPS11_IRQ_PWRON1S] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_PWRON1S_MASK,
+ },
+ [S2MPS11_IRQ_MRB] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_MRB_MASK,
+ },
+ [S2MPS11_IRQ_RTC60S] = {
+ .reg_offset = 2,
+ .mask = S2MPS11_IRQ_RTC60S_MASK,
+ },
+ [S2MPS11_IRQ_RTCA1] = {
+ .reg_offset = 2,
+ .mask = S2MPS11_IRQ_RTCA1_MASK,
+ },
+ [S2MPS11_IRQ_RTCA2] = {
+ .reg_offset = 2,
+ .mask = S2MPS11_IRQ_RTCA2_MASK,
+ },
+ [S2MPS11_IRQ_SMPL] = {
+ .reg_offset = 2,
+ .mask = S2MPS11_IRQ_SMPL_MASK,
+ },
+ [S2MPS11_IRQ_RTC1S] = {
+ .reg_offset = 2,
+ .mask = S2MPS11_IRQ_RTC1S_MASK,
+ },
+ [S2MPS11_IRQ_WTSR] = {
+ .reg_offset = 2,
+ .mask = S2MPS11_IRQ_WTSR_MASK,
+ },
+ [S2MPS11_IRQ_INT120C] = {
+ .reg_offset = 3,
+ .mask = S2MPS11_IRQ_INT120C_MASK,
+ },
+ [S2MPS11_IRQ_INT140C] = {
+ .reg_offset = 3,
+ .mask = S2MPS11_IRQ_INT140C_MASK,
+ },
+};
+
+
+static struct regmap_irq s5m8767_irqs[] = {
+ [S5M8767_IRQ_PWRR] = {
+ .reg_offset = 1,
+ .mask = S5M8767_IRQ_PWRR_MASK,
+ },
+ [S5M8767_IRQ_PWRF] = {
+ .reg_offset = 1,
+ .mask = S5M8767_IRQ_PWRF_MASK,
+ },
+ [S5M8767_IRQ_PWR1S] = {
+ .reg_offset = 1,
+ .mask = S5M8767_IRQ_PWR1S_MASK,
+ },
+ [S5M8767_IRQ_JIGR] = {
+ .reg_offset = 1,
+ .mask = S5M8767_IRQ_JIGR_MASK,
+ },
+ [S5M8767_IRQ_JIGF] = {
+ .reg_offset = 1,
+ .mask = S5M8767_IRQ_JIGF_MASK,
+ },
+ [S5M8767_IRQ_LOWBAT2] = {
+ .reg_offset = 1,
+ .mask = S5M8767_IRQ_LOWBAT2_MASK,
+ },
+ [S5M8767_IRQ_LOWBAT1] = {
+ .reg_offset = 1,
+ .mask = S5M8767_IRQ_LOWBAT1_MASK,
+ },
+ [S5M8767_IRQ_MRB] = {
+ .reg_offset = 2,
+ .mask = S5M8767_IRQ_MRB_MASK,
+ },
+ [S5M8767_IRQ_DVSOK2] = {
+ .reg_offset = 2,
+ .mask = S5M8767_IRQ_DVSOK2_MASK,
+ },
+ [S5M8767_IRQ_DVSOK3] = {
+ .reg_offset = 2,
+ .mask = S5M8767_IRQ_DVSOK3_MASK,
+ },
+ [S5M8767_IRQ_DVSOK4] = {
+ .reg_offset = 2,
+ .mask = S5M8767_IRQ_DVSOK4_MASK,
+ },
+ [S5M8767_IRQ_RTC60S] = {
+ .reg_offset = 3,
+ .mask = S5M8767_IRQ_RTC60S_MASK,
+ },
+ [S5M8767_IRQ_RTCA1] = {
+ .reg_offset = 3,
+ .mask = S5M8767_IRQ_RTCA1_MASK,
+ },
+ [S5M8767_IRQ_RTCA2] = {
+ .reg_offset = 3,
+ .mask = S5M8767_IRQ_RTCA2_MASK,
+ },
+ [S5M8767_IRQ_SMPL] = {
+ .reg_offset = 3,
+ .mask = S5M8767_IRQ_SMPL_MASK,
+ },
+ [S5M8767_IRQ_RTC1S] = {
+ .reg_offset = 3,
+ .mask = S5M8767_IRQ_RTC1S_MASK,
+ },
+ [S5M8767_IRQ_WTSR] = {
+ .reg_offset = 3,
+ .mask = S5M8767_IRQ_WTSR_MASK,
+ },
+};
+
+static struct regmap_irq s5m8763_irqs[] = {
+ [S5M8763_IRQ_DCINF] = {
+ .reg_offset = 1,
+ .mask = S5M8763_IRQ_DCINF_MASK,
+ },
+ [S5M8763_IRQ_DCINR] = {
+ .reg_offset = 1,
+ .mask = S5M8763_IRQ_DCINR_MASK,
+ },
+ [S5M8763_IRQ_JIGF] = {
+ .reg_offset = 1,
+ .mask = S5M8763_IRQ_JIGF_MASK,
+ },
+ [S5M8763_IRQ_JIGR] = {
+ .reg_offset = 1,
+ .mask = S5M8763_IRQ_JIGR_MASK,
+ },
+ [S5M8763_IRQ_PWRONF] = {
+ .reg_offset = 1,
+ .mask = S5M8763_IRQ_PWRONF_MASK,
+ },
+ [S5M8763_IRQ_PWRONR] = {
+ .reg_offset = 1,
+ .mask = S5M8763_IRQ_PWRONR_MASK,
+ },
+ [S5M8763_IRQ_WTSREVNT] = {
+ .reg_offset = 2,
+ .mask = S5M8763_IRQ_WTSREVNT_MASK,
+ },
+ [S5M8763_IRQ_SMPLEVNT] = {
+ .reg_offset = 2,
+ .mask = S5M8763_IRQ_SMPLEVNT_MASK,
+ },
+ [S5M8763_IRQ_ALARM1] = {
+ .reg_offset = 2,
+ .mask = S5M8763_IRQ_ALARM1_MASK,
+ },
+ [S5M8763_IRQ_ALARM0] = {
+ .reg_offset = 2,
+ .mask = S5M8763_IRQ_ALARM0_MASK,
+ },
+ [S5M8763_IRQ_ONKEY1S] = {
+ .reg_offset = 3,
+ .mask = S5M8763_IRQ_ONKEY1S_MASK,
+ },
+ [S5M8763_IRQ_TOPOFFR] = {
+ .reg_offset = 3,
+ .mask = S5M8763_IRQ_TOPOFFR_MASK,
+ },
+ [S5M8763_IRQ_DCINOVPR] = {
+ .reg_offset = 3,
+ .mask = S5M8763_IRQ_DCINOVPR_MASK,
+ },
+ [S5M8763_IRQ_CHGRSTF] = {
+ .reg_offset = 3,
+ .mask = S5M8763_IRQ_CHGRSTF_MASK,
+ },
+ [S5M8763_IRQ_DONER] = {
+ .reg_offset = 3,
+ .mask = S5M8763_IRQ_DONER_MASK,
+ },
+ [S5M8763_IRQ_CHGFAULT] = {
+ .reg_offset = 3,
+ .mask = S5M8763_IRQ_CHGFAULT_MASK,
+ },
+ [S5M8763_IRQ_LOBAT1] = {
+ .reg_offset = 4,
+ .mask = S5M8763_IRQ_LOBAT1_MASK,
+ },
+ [S5M8763_IRQ_LOBAT2] = {
+ .reg_offset = 4,
+ .mask = S5M8763_IRQ_LOBAT2_MASK,
+ },
+};
+
+static struct regmap_irq_chip s2mps11_irq_chip = {
+ .name = "s2mps11",
+ .irqs = s2mps11_irqs,
+ .num_irqs = ARRAY_SIZE(s2mps11_irqs),
+ .num_regs = 3,
+ .status_base = S2MPS11_REG_INT1,
+ .mask_base = S2MPS11_REG_INT1M,
+ .ack_base = S2MPS11_REG_INT1,
+};
+
+static struct regmap_irq_chip s5m8767_irq_chip = {
+ .name = "s5m8767",
+ .irqs = s5m8767_irqs,
+ .num_irqs = ARRAY_SIZE(s5m8767_irqs),
+ .num_regs = 3,
+ .status_base = S5M8767_REG_INT1,
+ .mask_base = S5M8767_REG_INT1M,
+ .ack_base = S5M8767_REG_INT1,
+};
+
+static struct regmap_irq_chip s5m8763_irq_chip = {
+ .name = "s5m8763",
+ .irqs = s5m8763_irqs,
+ .num_irqs = ARRAY_SIZE(s5m8763_irqs),
+ .num_regs = 4,
+ .status_base = S5M8763_REG_IRQ1,
+ .mask_base = S5M8763_REG_IRQM1,
+ .ack_base = S5M8763_REG_IRQ1,
+};
+
+int sec_irq_init(struct sec_pmic_dev *sec_pmic)
+{
+ int ret = 0;
+ int type = sec_pmic->device_type;
+
+ if (!sec_pmic->irq) {
+ dev_warn(sec_pmic->dev,
+ "No interrupt specified, no interrupts\n");
+ sec_pmic->irq_base = 0;
+ return 0;
+ }
+
+ switch (type) {
+ case S5M8763X:
+ ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ sec_pmic->irq_base, &s5m8763_irq_chip,
+ &sec_pmic->irq_data);
+ break;
+ case S5M8767X:
+ ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ sec_pmic->irq_base, &s5m8767_irq_chip,
+ &sec_pmic->irq_data);
+ break;
+ case S2MPS11X:
+ ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ sec_pmic->irq_base, &s2mps11_irq_chip,
+ &sec_pmic->irq_data);
+ break;
+ default:
+ dev_err(sec_pmic->dev, "Unknown device type %d\n",
+ sec_pmic->device_type);
+ return -EINVAL;
+ }
+
+ if (ret != 0) {
+ dev_err(sec_pmic->dev, "Failed to register IRQ chip: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void sec_irq_exit(struct sec_pmic_dev *sec_pmic)
+{
+ regmap_del_irq_chip(sec_pmic->irq, sec_pmic->irq_data);
+}
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index de979742c6fc..048bf0532a09 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -357,7 +357,7 @@ static int __devexit tc3589x_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int tc3589x_suspend(struct device *dev)
{
struct tc3589x *tc3589x = dev_get_drvdata(dev);
@@ -385,11 +385,10 @@ static int tc3589x_resume(struct device *dev)
return ret;
}
-
-static const SIMPLE_DEV_PM_OPS(tc3589x_dev_pm_ops, tc3589x_suspend,
- tc3589x_resume);
#endif
+static SIMPLE_DEV_PM_OPS(tc3589x_dev_pm_ops, tc3589x_suspend, tc3589x_resume);
+
static const struct i2c_device_id tc3589x_id[] = {
{ "tc3589x", 24 },
{ }
@@ -399,9 +398,7 @@ MODULE_DEVICE_TABLE(i2c, tc3589x_id);
static struct i2c_driver tc3589x_driver = {
.driver.name = "tc3589x",
.driver.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.driver.pm = &tc3589x_dev_pm_ops,
-#endif
.probe = tc3589x_probe,
.remove = __devexit_p(tc3589x_remove),
.id_table = tc3589x_id,
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 0ba26fb12cf5..a447f4ec11fb 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -83,7 +83,7 @@ timberdale_xiic_platform_data = {
static __devinitdata struct ocores_i2c_platform_data
timberdale_ocores_platform_data = {
- .regstep = 4,
+ .reg_shift = 2,
.clock_khz = 62500,
.devices = timberdale_i2c_board_info,
.num_devices = ARRAY_SIZE(timberdale_i2c_board_info)
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 93d5fdf020c7..da2691f22e11 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -563,8 +563,7 @@ static int tps65010_probe(struct i2c_client *client,
*/
if (client->irq > 0) {
status = request_irq(client->irq, tps65010_irq,
- IRQF_SAMPLE_RANDOM | IRQF_TRIGGER_FALLING,
- DRIVER_NAME, tps);
+ IRQF_TRIGGER_FALLING, DRIVER_NAME, tps);
if (status < 0) {
dev_dbg(&client->dev, "can't get IRQ %d, err %d\n",
client->irq, status);
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 396b9d1b6bd6..80e24f4b47bf 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -71,10 +71,10 @@ static const struct tps65090_irq_data tps65090_irqs[] = {
static struct mfd_cell tps65090s[] = {
{
- .name = "tps65910-pmic",
+ .name = "tps65090-pmic",
},
{
- .name = "tps65910-regulator",
+ .name = "tps65090-regulator",
},
};
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index c84b5506d5fb..353c34812120 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -21,17 +21,14 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6586x.h>
-/* GPIO control registers */
-#define TPS6586X_GPIOSET1 0x5d
-#define TPS6586X_GPIOSET2 0x5e
-
/* interrupt control registers */
#define TPS6586X_INT_ACK1 0xb5
#define TPS6586X_INT_ACK2 0xb6
@@ -48,6 +45,9 @@
/* device id */
#define TPS6586X_VERSIONCRC 0xcd
+/* Maximum register */
+#define TPS6586X_MAX_REGISTER (TPS6586X_VERSIONCRC + 1)
+
struct tps6586x_irq_data {
u8 mask_reg;
u8 mask_mask;
@@ -89,226 +89,96 @@ static const struct tps6586x_irq_data tps6586x_irqs[] = {
[TPS6586X_INT_RTC_ALM2] = TPS6586X_IRQ(TPS6586X_INT_MASK4, 1 << 1),
};
+static struct mfd_cell tps6586x_cell[] = {
+ {
+ .name = "tps6586x-gpio",
+ },
+ {
+ .name = "tps6586x-rtc",
+ },
+ {
+ .name = "tps6586x-onkey",
+ },
+};
+
struct tps6586x {
- struct mutex lock;
struct device *dev;
struct i2c_client *client;
+ struct regmap *regmap;
- struct gpio_chip gpio;
struct irq_chip irq_chip;
struct mutex irq_lock;
int irq_base;
u32 irq_en;
- u8 mask_cache[5];
u8 mask_reg[5];
};
-static inline int __tps6586x_read(struct i2c_client *client,
- int reg, uint8_t *val)
-{
- int ret;
-
- ret = i2c_smbus_read_byte_data(client, reg);
- if (ret < 0) {
- dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
- return ret;
- }
-
- *val = (uint8_t)ret;
-
- return 0;
-}
-
-static inline int __tps6586x_reads(struct i2c_client *client, int reg,
- int len, uint8_t *val)
-{
- int ret;
-
- ret = i2c_smbus_read_i2c_block_data(client, reg, len, val);
- if (ret < 0) {
- dev_err(&client->dev, "failed reading from 0x%02x\n", reg);
- return ret;
- }
-
- return 0;
-}
-
-static inline int __tps6586x_write(struct i2c_client *client,
- int reg, uint8_t val)
+static inline struct tps6586x *dev_to_tps6586x(struct device *dev)
{
- int ret;
-
- ret = i2c_smbus_write_byte_data(client, reg, val);
- if (ret < 0) {
- dev_err(&client->dev, "failed writing 0x%02x to 0x%02x\n",
- val, reg);
- return ret;
- }
-
- return 0;
-}
-
-static inline int __tps6586x_writes(struct i2c_client *client, int reg,
- int len, uint8_t *val)
-{
- int ret, i;
-
- for (i = 0; i < len; i++) {
- ret = __tps6586x_write(client, reg + i, *(val + i));
- if (ret < 0)
- return ret;
- }
-
- return 0;
+ return i2c_get_clientdata(to_i2c_client(dev));
}
int tps6586x_write(struct device *dev, int reg, uint8_t val)
{
- return __tps6586x_write(to_i2c_client(dev), reg, val);
+ struct tps6586x *tps6586x = dev_to_tps6586x(dev);
+
+ return regmap_write(tps6586x->regmap, reg, val);
}
EXPORT_SYMBOL_GPL(tps6586x_write);
int tps6586x_writes(struct device *dev, int reg, int len, uint8_t *val)
{
- return __tps6586x_writes(to_i2c_client(dev), reg, len, val);
+ struct tps6586x *tps6586x = dev_to_tps6586x(dev);
+
+ return regmap_bulk_write(tps6586x->regmap, reg, val, len);
}
EXPORT_SYMBOL_GPL(tps6586x_writes);
int tps6586x_read(struct device *dev, int reg, uint8_t *val)
{
- return __tps6586x_read(to_i2c_client(dev), reg, val);
+ struct tps6586x *tps6586x = dev_to_tps6586x(dev);
+ unsigned int rval;
+ int ret;
+
+ ret = regmap_read(tps6586x->regmap, reg, &rval);
+ if (!ret)
+ *val = rval;
+ return ret;
}
EXPORT_SYMBOL_GPL(tps6586x_read);
int tps6586x_reads(struct device *dev, int reg, int len, uint8_t *val)
{
- return __tps6586x_reads(to_i2c_client(dev), reg, len, val);
+ struct tps6586x *tps6586x = dev_to_tps6586x(dev);
+
+ return regmap_bulk_read(tps6586x->regmap, reg, val, len);
}
EXPORT_SYMBOL_GPL(tps6586x_reads);
int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
{
- struct tps6586x *tps6586x = dev_get_drvdata(dev);
- uint8_t reg_val;
- int ret = 0;
-
- mutex_lock(&tps6586x->lock);
+ struct tps6586x *tps6586x = dev_to_tps6586x(dev);
- ret = __tps6586x_read(to_i2c_client(dev), reg, &reg_val);
- if (ret)
- goto out;
-
- if ((reg_val & bit_mask) != bit_mask) {
- reg_val |= bit_mask;
- ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
- }
-out:
- mutex_unlock(&tps6586x->lock);
- return ret;
+ return regmap_update_bits(tps6586x->regmap, reg, bit_mask, bit_mask);
}
EXPORT_SYMBOL_GPL(tps6586x_set_bits);
int tps6586x_clr_bits(struct device *dev, int reg, uint8_t bit_mask)
{
- struct tps6586x *tps6586x = dev_get_drvdata(dev);
- uint8_t reg_val;
- int ret = 0;
-
- mutex_lock(&tps6586x->lock);
+ struct tps6586x *tps6586x = dev_to_tps6586x(dev);
- ret = __tps6586x_read(to_i2c_client(dev), reg, &reg_val);
- if (ret)
- goto out;
-
- if (reg_val & bit_mask) {
- reg_val &= ~bit_mask;
- ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
- }
-out:
- mutex_unlock(&tps6586x->lock);
- return ret;
+ return regmap_update_bits(tps6586x->regmap, reg, bit_mask, 0);
}
EXPORT_SYMBOL_GPL(tps6586x_clr_bits);
int tps6586x_update(struct device *dev, int reg, uint8_t val, uint8_t mask)
{
- struct tps6586x *tps6586x = dev_get_drvdata(dev);
- uint8_t reg_val;
- int ret = 0;
-
- mutex_lock(&tps6586x->lock);
+ struct tps6586x *tps6586x = dev_to_tps6586x(dev);
- ret = __tps6586x_read(tps6586x->client, reg, &reg_val);
- if (ret)
- goto out;
-
- if ((reg_val & mask) != val) {
- reg_val = (reg_val & ~mask) | val;
- ret = __tps6586x_write(tps6586x->client, reg, reg_val);
- }
-out:
- mutex_unlock(&tps6586x->lock);
- return ret;
+ return regmap_update_bits(tps6586x->regmap, reg, mask, val);
}
EXPORT_SYMBOL_GPL(tps6586x_update);
-static int tps6586x_gpio_get(struct gpio_chip *gc, unsigned offset)
-{
- struct tps6586x *tps6586x = container_of(gc, struct tps6586x, gpio);
- uint8_t val;
- int ret;
-
- ret = __tps6586x_read(tps6586x->client, TPS6586X_GPIOSET2, &val);
- if (ret)
- return ret;
-
- return !!(val & (1 << offset));
-}
-
-
-static void tps6586x_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct tps6586x *tps6586x = container_of(chip, struct tps6586x, gpio);
-
- tps6586x_update(tps6586x->dev, TPS6586X_GPIOSET2,
- value << offset, 1 << offset);
-}
-
-static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
- int value)
-{
- struct tps6586x *tps6586x = container_of(gc, struct tps6586x, gpio);
- uint8_t val, mask;
-
- tps6586x_gpio_set(gc, offset, value);
-
- val = 0x1 << (offset * 2);
- mask = 0x3 << (offset * 2);
-
- return tps6586x_update(tps6586x->dev, TPS6586X_GPIOSET1, val, mask);
-}
-
-static int tps6586x_gpio_init(struct tps6586x *tps6586x, int gpio_base)
-{
- if (!gpio_base)
- return 0;
-
- tps6586x->gpio.owner = THIS_MODULE;
- tps6586x->gpio.label = tps6586x->client->name;
- tps6586x->gpio.dev = tps6586x->dev;
- tps6586x->gpio.base = gpio_base;
- tps6586x->gpio.ngpio = 4;
- tps6586x->gpio.can_sleep = 1;
-
- /* FIXME: add handling of GPIOs as dedicated inputs */
- tps6586x->gpio.direction_output = tps6586x_gpio_output;
- tps6586x->gpio.set = tps6586x_gpio_set;
- tps6586x->gpio.get = tps6586x_gpio_get;
-
- return gpiochip_add(&tps6586x->gpio);
-}
-
static int __remove_subdev(struct device *dev, void *unused)
{
platform_device_unregister(to_platform_device(dev));
@@ -354,12 +224,11 @@ static void tps6586x_irq_sync_unlock(struct irq_data *data)
int i;
for (i = 0; i < ARRAY_SIZE(tps6586x->mask_reg); i++) {
- if (tps6586x->mask_reg[i] != tps6586x->mask_cache[i]) {
- if (!WARN_ON(tps6586x_write(tps6586x->dev,
- TPS6586X_INT_MASK1 + i,
- tps6586x->mask_reg[i])))
- tps6586x->mask_cache[i] = tps6586x->mask_reg[i];
- }
+ int ret;
+ ret = tps6586x_write(tps6586x->dev,
+ TPS6586X_INT_MASK1 + i,
+ tps6586x->mask_reg[i]);
+ WARN_ON(ret);
}
mutex_unlock(&tps6586x->irq_lock);
@@ -406,7 +275,6 @@ static int __devinit tps6586x_irq_init(struct tps6586x *tps6586x, int irq,
mutex_init(&tps6586x->irq_lock);
for (i = 0; i < 5; i++) {
- tps6586x->mask_cache[i] = 0xff;
tps6586x->mask_reg[i] = 0xff;
tps6586x_write(tps6586x->dev, TPS6586X_INT_MASK1 + i, 0xff);
}
@@ -556,6 +424,23 @@ static struct tps6586x_platform_data *tps6586x_parse_dt(struct i2c_client *clien
}
#endif
+static bool is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ /* Cache all interrupt mask register */
+ if ((reg >= TPS6586X_INT_MASK1) && (reg <= TPS6586X_INT_MASK5))
+ return false;
+
+ return true;
+}
+
+static const struct regmap_config tps6586x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TPS6586X_MAX_REGISTER - 1,
+ .volatile_reg = is_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+};
+
static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -579,29 +464,39 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
dev_info(&client->dev, "VERSIONCRC is %02x\n", ret);
- tps6586x = kzalloc(sizeof(struct tps6586x), GFP_KERNEL);
- if (tps6586x == NULL)
+ tps6586x = devm_kzalloc(&client->dev, sizeof(*tps6586x), GFP_KERNEL);
+ if (tps6586x == NULL) {
+ dev_err(&client->dev, "memory for tps6586x alloc failed\n");
return -ENOMEM;
+ }
tps6586x->client = client;
tps6586x->dev = &client->dev;
i2c_set_clientdata(client, tps6586x);
- mutex_init(&tps6586x->lock);
+ tps6586x->regmap = devm_regmap_init_i2c(client,
+ &tps6586x_regmap_config);
+ if (IS_ERR(tps6586x->regmap)) {
+ ret = PTR_ERR(tps6586x->regmap);
+ dev_err(&client->dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
if (client->irq) {
ret = tps6586x_irq_init(tps6586x, client->irq,
pdata->irq_base);
if (ret) {
dev_err(&client->dev, "IRQ init failed: %d\n", ret);
- goto err_irq_init;
+ return ret;
}
}
- ret = tps6586x_gpio_init(tps6586x, pdata->gpio_base);
- if (ret) {
- dev_err(&client->dev, "GPIO registration failed: %d\n", ret);
- goto err_gpio_init;
+ ret = mfd_add_devices(tps6586x->dev, -1,
+ tps6586x_cell, ARRAY_SIZE(tps6586x_cell), NULL, 0);
+ if (ret < 0) {
+ dev_err(&client->dev, "mfd_add_devices failed: %d\n", ret);
+ goto err_mfd_add;
}
ret = tps6586x_add_subdevs(tps6586x, pdata);
@@ -613,38 +508,21 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client,
return 0;
err_add_devs:
- if (pdata->gpio_base) {
- ret = gpiochip_remove(&tps6586x->gpio);
- if (ret)
- dev_err(&client->dev, "Can't remove gpio chip: %d\n",
- ret);
- }
-err_gpio_init:
+ mfd_remove_devices(tps6586x->dev);
+err_mfd_add:
if (client->irq)
free_irq(client->irq, tps6586x);
-err_irq_init:
- kfree(tps6586x);
return ret;
}
static int __devexit tps6586x_i2c_remove(struct i2c_client *client)
{
struct tps6586x *tps6586x = i2c_get_clientdata(client);
- struct tps6586x_platform_data *pdata = client->dev.platform_data;
- int ret;
+ tps6586x_remove_subdevs(tps6586x);
+ mfd_remove_devices(tps6586x->dev);
if (client->irq)
free_irq(client->irq, tps6586x);
-
- if (pdata->gpio_base) {
- ret = gpiochip_remove(&tps6586x->gpio);
- if (ret)
- dev_err(&client->dev, "Can't remove gpio chip: %d\n",
- ret);
- }
-
- tps6586x_remove_subdevs(tps6586x);
- kfree(tps6586x);
return 0;
}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index be9e07b77325..1c563792c777 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -68,6 +68,24 @@ static const struct regmap_config tps65910_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
+static int __devinit tps65910_ck32k_init(struct tps65910 *tps65910,
+ struct tps65910_board *pmic_pdata)
+{
+ int ret;
+
+ if (!pmic_pdata->en_ck32k_xtal)
+ return 0;
+
+ ret = tps65910_reg_clear_bits(tps65910, TPS65910_DEVCTRL,
+ DEVCTRL_CK32K_CTRL_MASK);
+ if (ret < 0) {
+ dev_err(tps65910->dev, "clear ck32k_ctrl failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int __devinit tps65910_sleepinit(struct tps65910 *tps65910,
struct tps65910_board *pmic_pdata)
{
@@ -175,6 +193,9 @@ static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
else if (*chip_id == TPS65911)
dev_warn(&client->dev, "VMBCH2-Threshold not specified");
+ prop = of_property_read_bool(np, "ti,en-ck32k-xtal");
+ board_info->en_ck32k_xtal = prop;
+
board_info->irq = client->irq;
board_info->irq_base = -1;
@@ -243,7 +264,7 @@ static __devinit int tps65910_i2c_probe(struct i2c_client *i2c,
init_data->irq_base = pmic_plat_data->irq_base;
tps65910_irq_init(tps65910, init_data->irq, init_data);
-
+ tps65910_ck32k_init(tps65910, pmic_plat_data);
tps65910_sleepinit(tps65910, pmic_plat_data);
return ret;
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 6fc90befa79e..1c32afed28aa 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -568,7 +568,6 @@ add_numbered_child(unsigned chip, const char *name, int num,
goto err;
}
- device_init_wakeup(&pdev->dev, can_wakeup);
pdev->dev.parent = &twl->client->dev;
if (pdata) {
@@ -593,6 +592,8 @@ add_numbered_child(unsigned chip, const char *name, int num,
}
status = platform_device_add(pdev);
+ if (status == 0)
+ device_init_wakeup(&pdev->dev, can_wakeup);
err:
if (status < 0) {
@@ -716,8 +717,9 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
static struct regulator_consumer_supply usb1v8 = {
.supply = "usb1v8",
};
- static struct regulator_consumer_supply usb3v1 = {
- .supply = "usb3v1",
+ static struct regulator_consumer_supply usb3v1[] = {
+ { .supply = "usb3v1" },
+ { .supply = "bci3v1" },
};
/* First add the regulators so that they can be used by transceiver */
@@ -745,7 +747,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
return PTR_ERR(child);
child = add_regulator_linked(TWL4030_REG_VUSB3V1,
- &usb_fixed, &usb3v1, 1,
+ &usb_fixed, usb3v1, 2,
features);
if (IS_ERR(child))
return PTR_ERR(child);
@@ -766,7 +768,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
if (twl_has_regulator() && child) {
usb1v5.dev_name = dev_name(child);
usb1v8.dev_name = dev_name(child);
- usb3v1.dev_name = dev_name(child);
+ usb3v1[0].dev_name = dev_name(child);
}
}
if (twl_has_usb() && pdata->usb && twl_class_is_6030()) {
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index 4ded9e7aa246..b0fad0ffca56 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -64,19 +64,15 @@ int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
int ret;
unsigned int val;
- mutex_lock(&twl6040->io_mutex);
/* Vibra control registers from cache */
if (unlikely(reg == TWL6040_REG_VIBCTLL ||
reg == TWL6040_REG_VIBCTLR)) {
val = twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)];
} else {
ret = regmap_read(twl6040->regmap, reg, &val);
- if (ret < 0) {
- mutex_unlock(&twl6040->io_mutex);
+ if (ret < 0)
return ret;
- }
}
- mutex_unlock(&twl6040->io_mutex);
return val;
}
@@ -86,12 +82,10 @@ int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, u8 val)
{
int ret;
- mutex_lock(&twl6040->io_mutex);
ret = regmap_write(twl6040->regmap, reg, val);
/* Cache the vibra control registers */
if (reg == TWL6040_REG_VIBCTLL || reg == TWL6040_REG_VIBCTLR)
twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)] = val;
- mutex_unlock(&twl6040->io_mutex);
return ret;
}
@@ -99,23 +93,13 @@ EXPORT_SYMBOL(twl6040_reg_write);
int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
{
- int ret;
-
- mutex_lock(&twl6040->io_mutex);
- ret = regmap_update_bits(twl6040->regmap, reg, mask, mask);
- mutex_unlock(&twl6040->io_mutex);
- return ret;
+ return regmap_update_bits(twl6040->regmap, reg, mask, mask);
}
EXPORT_SYMBOL(twl6040_set_bits);
int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
{
- int ret;
-
- mutex_lock(&twl6040->io_mutex);
- ret = regmap_update_bits(twl6040->regmap, reg, mask, 0);
- mutex_unlock(&twl6040->io_mutex);
- return ret;
+ return regmap_update_bits(twl6040->regmap, reg, mask, 0);
}
EXPORT_SYMBOL(twl6040_clear_bits);
@@ -573,7 +557,6 @@ static int __devinit twl6040_probe(struct i2c_client *client,
twl6040->irq = client->irq;
mutex_init(&twl6040->mutex);
- mutex_init(&twl6040->io_mutex);
init_completion(&twl6040->ready);
twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
@@ -696,6 +679,7 @@ static int __devexit twl6040_remove(struct i2c_client *client)
static const struct i2c_device_id twl6040_i2c_id[] = {
{ "twl6040", 0, },
+ { "twl6041", 0, },
{ },
};
MODULE_DEVICE_TABLE(i2c, twl6040_i2c_id);
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
new file mode 100644
index 000000000000..01b9255ed631
--- /dev/null
+++ b/drivers/mfd/wm5102-tables.c
@@ -0,0 +1,2399 @@
+/*
+ * wm5102-tables.c -- WM5102 data tables
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+
+#include "arizona.h"
+
+#define WM5102_NUM_AOD_ISR 2
+#define WM5102_NUM_ISR 5
+
+static const struct reg_default wm5102_reva_patch[] = {
+ { 0x80, 0x0003 },
+ { 0x221, 0x0090 },
+ { 0x211, 0x0014 },
+ { 0x212, 0x0000 },
+ { 0x214, 0x000C },
+ { 0x171, 0x0002 },
+ { 0x171, 0x0000 },
+ { 0x461, 0x8000 },
+ { 0x463, 0x50F0 },
+ { 0x465, 0x4820 },
+ { 0x467, 0x4040 },
+ { 0x469, 0x3940 },
+ { 0x46B, 0x3310 },
+ { 0x46D, 0x2D80 },
+ { 0x46F, 0x2890 },
+ { 0x471, 0x1990 },
+ { 0x473, 0x1450 },
+ { 0x475, 0x1020 },
+ { 0x477, 0x0CD0 },
+ { 0x479, 0x0A30 },
+ { 0x47B, 0x0810 },
+ { 0x47D, 0x0510 },
+ { 0x500, 0x000D },
+ { 0x507, 0x1820 },
+ { 0x508, 0x1820 },
+ { 0x540, 0x000D },
+ { 0x547, 0x1820 },
+ { 0x548, 0x1820 },
+ { 0x580, 0x000D },
+ { 0x587, 0x1820 },
+ { 0x588, 0x1820 },
+ { 0x101, 0x8140 },
+ { 0x3000, 0x2225 },
+ { 0x3001, 0x3a03 },
+ { 0x3002, 0x0225 },
+ { 0x3003, 0x0801 },
+ { 0x3004, 0x6249 },
+ { 0x3005, 0x0c04 },
+ { 0x3006, 0x0225 },
+ { 0x3007, 0x5901 },
+ { 0x3008, 0xe249 },
+ { 0x3009, 0x030d },
+ { 0x300a, 0x0249 },
+ { 0x300b, 0x2c01 },
+ { 0x300c, 0xe249 },
+ { 0x300d, 0x4342 },
+ { 0x300e, 0xe249 },
+ { 0x300f, 0x73c0 },
+ { 0x3010, 0x4249 },
+ { 0x3011, 0x0c00 },
+ { 0x3012, 0x0225 },
+ { 0x3013, 0x1f01 },
+ { 0x3014, 0x0225 },
+ { 0x3015, 0x1e01 },
+ { 0x3016, 0x0225 },
+ { 0x3017, 0xfa00 },
+ { 0x3018, 0x0000 },
+ { 0x3019, 0xf000 },
+ { 0x301a, 0x0000 },
+ { 0x301b, 0xf000 },
+ { 0x301c, 0x0000 },
+ { 0x301d, 0xf000 },
+ { 0x301e, 0x0000 },
+ { 0x301f, 0xf000 },
+ { 0x3020, 0x0000 },
+ { 0x3021, 0xf000 },
+ { 0x3022, 0x0000 },
+ { 0x3023, 0xf000 },
+ { 0x3024, 0x0000 },
+ { 0x3025, 0xf000 },
+ { 0x3026, 0x0000 },
+ { 0x3027, 0xf000 },
+ { 0x3028, 0x0000 },
+ { 0x3029, 0xf000 },
+ { 0x302a, 0x0000 },
+ { 0x302b, 0xf000 },
+ { 0x302c, 0x0000 },
+ { 0x302d, 0xf000 },
+ { 0x302e, 0x0000 },
+ { 0x302f, 0xf000 },
+ { 0x3030, 0x0225 },
+ { 0x3031, 0x1a01 },
+ { 0x3032, 0x0225 },
+ { 0x3033, 0x1e00 },
+ { 0x3034, 0x0225 },
+ { 0x3035, 0x1f00 },
+ { 0x3036, 0x6225 },
+ { 0x3037, 0xf800 },
+ { 0x3038, 0x0000 },
+ { 0x3039, 0xf000 },
+ { 0x303a, 0x0000 },
+ { 0x303b, 0xf000 },
+ { 0x303c, 0x0000 },
+ { 0x303d, 0xf000 },
+ { 0x303e, 0x0000 },
+ { 0x303f, 0xf000 },
+ { 0x3040, 0x2226 },
+ { 0x3041, 0x3a03 },
+ { 0x3042, 0x0226 },
+ { 0x3043, 0x0801 },
+ { 0x3044, 0x6249 },
+ { 0x3045, 0x0c06 },
+ { 0x3046, 0x0226 },
+ { 0x3047, 0x5901 },
+ { 0x3048, 0xe249 },
+ { 0x3049, 0x030d },
+ { 0x304a, 0x0249 },
+ { 0x304b, 0x2c01 },
+ { 0x304c, 0xe249 },
+ { 0x304d, 0x4342 },
+ { 0x304e, 0xe249 },
+ { 0x304f, 0x73c0 },
+ { 0x3050, 0x4249 },
+ { 0x3051, 0x0c00 },
+ { 0x3052, 0x0226 },
+ { 0x3053, 0x1f01 },
+ { 0x3054, 0x0226 },
+ { 0x3055, 0x1e01 },
+ { 0x3056, 0x0226 },
+ { 0x3057, 0xfa00 },
+ { 0x3058, 0x0000 },
+ { 0x3059, 0xf000 },
+ { 0x305a, 0x0000 },
+ { 0x305b, 0xf000 },
+ { 0x305c, 0x0000 },
+ { 0x305d, 0xf000 },
+ { 0x305e, 0x0000 },
+ { 0x305f, 0xf000 },
+ { 0x3060, 0x0000 },
+ { 0x3061, 0xf000 },
+ { 0x3062, 0x0000 },
+ { 0x3063, 0xf000 },
+ { 0x3064, 0x0000 },
+ { 0x3065, 0xf000 },
+ { 0x3066, 0x0000 },
+ { 0x3067, 0xf000 },
+ { 0x3068, 0x0000 },
+ { 0x3069, 0xf000 },
+ { 0x306a, 0x0000 },
+ { 0x306b, 0xf000 },
+ { 0x306c, 0x0000 },
+ { 0x306d, 0xf000 },
+ { 0x306e, 0x0000 },
+ { 0x306f, 0xf000 },
+ { 0x3070, 0x0226 },
+ { 0x3071, 0x1a01 },
+ { 0x3072, 0x0226 },
+ { 0x3073, 0x1e00 },
+ { 0x3074, 0x0226 },
+ { 0x3075, 0x1f00 },
+ { 0x3076, 0x6226 },
+ { 0x3077, 0xf800 },
+ { 0x3078, 0x0000 },
+ { 0x3079, 0xf000 },
+ { 0x307a, 0x0000 },
+ { 0x307b, 0xf000 },
+ { 0x307c, 0x0000 },
+ { 0x307d, 0xf000 },
+ { 0x307e, 0x0000 },
+ { 0x307f, 0xf000 },
+ { 0x3080, 0x2227 },
+ { 0x3081, 0x3a03 },
+ { 0x3082, 0x0227 },
+ { 0x3083, 0x0801 },
+ { 0x3084, 0x6255 },
+ { 0x3085, 0x0c04 },
+ { 0x3086, 0x0227 },
+ { 0x3087, 0x5901 },
+ { 0x3088, 0xe255 },
+ { 0x3089, 0x030d },
+ { 0x308a, 0x0255 },
+ { 0x308b, 0x2c01 },
+ { 0x308c, 0xe255 },
+ { 0x308d, 0x4342 },
+ { 0x308e, 0xe255 },
+ { 0x308f, 0x73c0 },
+ { 0x3090, 0x4255 },
+ { 0x3091, 0x0c00 },
+ { 0x3092, 0x0227 },
+ { 0x3093, 0x1f01 },
+ { 0x3094, 0x0227 },
+ { 0x3095, 0x1e01 },
+ { 0x3096, 0x0227 },
+ { 0x3097, 0xfa00 },
+ { 0x3098, 0x0000 },
+ { 0x3099, 0xf000 },
+ { 0x309a, 0x0000 },
+ { 0x309b, 0xf000 },
+ { 0x309c, 0x0000 },
+ { 0x309d, 0xf000 },
+ { 0x309e, 0x0000 },
+ { 0x309f, 0xf000 },
+ { 0x30a0, 0x0000 },
+ { 0x30a1, 0xf000 },
+ { 0x30a2, 0x0000 },
+ { 0x30a3, 0xf000 },
+ { 0x30a4, 0x0000 },
+ { 0x30a5, 0xf000 },
+ { 0x30a6, 0x0000 },
+ { 0x30a7, 0xf000 },
+ { 0x30a8, 0x0000 },
+ { 0x30a9, 0xf000 },
+ { 0x30aa, 0x0000 },
+ { 0x30ab, 0xf000 },
+ { 0x30ac, 0x0000 },
+ { 0x30ad, 0xf000 },
+ { 0x30ae, 0x0000 },
+ { 0x30af, 0xf000 },
+ { 0x30b0, 0x0227 },
+ { 0x30b1, 0x1a01 },
+ { 0x30b2, 0x0227 },
+ { 0x30b3, 0x1e00 },
+ { 0x30b4, 0x0227 },
+ { 0x30b5, 0x1f00 },
+ { 0x30b6, 0x6227 },
+ { 0x30b7, 0xf800 },
+ { 0x30b8, 0x0000 },
+ { 0x30b9, 0xf000 },
+ { 0x30ba, 0x0000 },
+ { 0x30bb, 0xf000 },
+ { 0x30bc, 0x0000 },
+ { 0x30bd, 0xf000 },
+ { 0x30be, 0x0000 },
+ { 0x30bf, 0xf000 },
+ { 0x30c0, 0x2228 },
+ { 0x30c1, 0x3a03 },
+ { 0x30c2, 0x0228 },
+ { 0x30c3, 0x0801 },
+ { 0x30c4, 0x6255 },
+ { 0x30c5, 0x0c06 },
+ { 0x30c6, 0x0228 },
+ { 0x30c7, 0x5901 },
+ { 0x30c8, 0xe255 },
+ { 0x30c9, 0x030d },
+ { 0x30ca, 0x0255 },
+ { 0x30cb, 0x2c01 },
+ { 0x30cc, 0xe255 },
+ { 0x30cd, 0x4342 },
+ { 0x30ce, 0xe255 },
+ { 0x30cf, 0x73c0 },
+ { 0x30d0, 0x4255 },
+ { 0x30d1, 0x0c00 },
+ { 0x30d2, 0x0228 },
+ { 0x30d3, 0x1f01 },
+ { 0x30d4, 0x0228 },
+ { 0x30d5, 0x1e01 },
+ { 0x30d6, 0x0228 },
+ { 0x30d7, 0xfa00 },
+ { 0x30d8, 0x0000 },
+ { 0x30d9, 0xf000 },
+ { 0x30da, 0x0000 },
+ { 0x30db, 0xf000 },
+ { 0x30dc, 0x0000 },
+ { 0x30dd, 0xf000 },
+ { 0x30de, 0x0000 },
+ { 0x30df, 0xf000 },
+ { 0x30e0, 0x0000 },
+ { 0x30e1, 0xf000 },
+ { 0x30e2, 0x0000 },
+ { 0x30e3, 0xf000 },
+ { 0x30e4, 0x0000 },
+ { 0x30e5, 0xf000 },
+ { 0x30e6, 0x0000 },
+ { 0x30e7, 0xf000 },
+ { 0x30e8, 0x0000 },
+ { 0x30e9, 0xf000 },
+ { 0x30ea, 0x0000 },
+ { 0x30eb, 0xf000 },
+ { 0x30ec, 0x0000 },
+ { 0x30ed, 0xf000 },
+ { 0x30ee, 0x0000 },
+ { 0x30ef, 0xf000 },
+ { 0x30f0, 0x0228 },
+ { 0x30f1, 0x1a01 },
+ { 0x30f2, 0x0228 },
+ { 0x30f3, 0x1e00 },
+ { 0x30f4, 0x0228 },
+ { 0x30f5, 0x1f00 },
+ { 0x30f6, 0x6228 },
+ { 0x30f7, 0xf800 },
+ { 0x30f8, 0x0000 },
+ { 0x30f9, 0xf000 },
+ { 0x30fa, 0x0000 },
+ { 0x30fb, 0xf000 },
+ { 0x30fc, 0x0000 },
+ { 0x30fd, 0xf000 },
+ { 0x30fe, 0x0000 },
+ { 0x30ff, 0xf000 },
+ { 0x3100, 0x222b },
+ { 0x3101, 0x3a03 },
+ { 0x3102, 0x222b },
+ { 0x3103, 0x5803 },
+ { 0x3104, 0xe26f },
+ { 0x3105, 0x030d },
+ { 0x3106, 0x626f },
+ { 0x3107, 0x2c01 },
+ { 0x3108, 0xe26f },
+ { 0x3109, 0x4342 },
+ { 0x310a, 0xe26f },
+ { 0x310b, 0x73c0 },
+ { 0x310c, 0x026f },
+ { 0x310d, 0x0c00 },
+ { 0x310e, 0x022b },
+ { 0x310f, 0x1f01 },
+ { 0x3110, 0x022b },
+ { 0x3111, 0x1e01 },
+ { 0x3112, 0x022b },
+ { 0x3113, 0xfa00 },
+ { 0x3114, 0x0000 },
+ { 0x3115, 0xf000 },
+ { 0x3116, 0x0000 },
+ { 0x3117, 0xf000 },
+ { 0x3118, 0x0000 },
+ { 0x3119, 0xf000 },
+ { 0x311a, 0x0000 },
+ { 0x311b, 0xf000 },
+ { 0x311c, 0x0000 },
+ { 0x311d, 0xf000 },
+ { 0x311e, 0x0000 },
+ { 0x311f, 0xf000 },
+ { 0x3120, 0x022b },
+ { 0x3121, 0x0a01 },
+ { 0x3122, 0x022b },
+ { 0x3123, 0x1e00 },
+ { 0x3124, 0x022b },
+ { 0x3125, 0x1f00 },
+ { 0x3126, 0x622b },
+ { 0x3127, 0xf800 },
+ { 0x3128, 0x0000 },
+ { 0x3129, 0xf000 },
+ { 0x312a, 0x0000 },
+ { 0x312b, 0xf000 },
+ { 0x312c, 0x0000 },
+ { 0x312d, 0xf000 },
+ { 0x312e, 0x0000 },
+ { 0x312f, 0xf000 },
+ { 0x3130, 0x0000 },
+ { 0x3131, 0xf000 },
+ { 0x3132, 0x0000 },
+ { 0x3133, 0xf000 },
+ { 0x3134, 0x0000 },
+ { 0x3135, 0xf000 },
+ { 0x3136, 0x0000 },
+ { 0x3137, 0xf000 },
+ { 0x3138, 0x0000 },
+ { 0x3139, 0xf000 },
+ { 0x313a, 0x0000 },
+ { 0x313b, 0xf000 },
+ { 0x313c, 0x0000 },
+ { 0x313d, 0xf000 },
+ { 0x313e, 0x0000 },
+ { 0x313f, 0xf000 },
+ { 0x3140, 0x0000 },
+ { 0x3141, 0xf000 },
+ { 0x3142, 0x0000 },
+ { 0x3143, 0xf000 },
+ { 0x3144, 0x0000 },
+ { 0x3145, 0xf000 },
+ { 0x3146, 0x0000 },
+ { 0x3147, 0xf000 },
+ { 0x3148, 0x0000 },
+ { 0x3149, 0xf000 },
+ { 0x314a, 0x0000 },
+ { 0x314b, 0xf000 },
+ { 0x314c, 0x0000 },
+ { 0x314d, 0xf000 },
+ { 0x314e, 0x0000 },
+ { 0x314f, 0xf000 },
+ { 0x3150, 0x0000 },
+ { 0x3151, 0xf000 },
+ { 0x3152, 0x0000 },
+ { 0x3153, 0xf000 },
+ { 0x3154, 0x0000 },
+ { 0x3155, 0xf000 },
+ { 0x3156, 0x0000 },
+ { 0x3157, 0xf000 },
+ { 0x3158, 0x0000 },
+ { 0x3159, 0xf000 },
+ { 0x315a, 0x0000 },
+ { 0x315b, 0xf000 },
+ { 0x315c, 0x0000 },
+ { 0x315d, 0xf000 },
+ { 0x315e, 0x0000 },
+ { 0x315f, 0xf000 },
+ { 0x3160, 0x0000 },
+ { 0x3161, 0xf000 },
+ { 0x3162, 0x0000 },
+ { 0x3163, 0xf000 },
+ { 0x3164, 0x0000 },
+ { 0x3165, 0xf000 },
+ { 0x3166, 0x0000 },
+ { 0x3167, 0xf000 },
+ { 0x3168, 0x0000 },
+ { 0x3169, 0xf000 },
+ { 0x316a, 0x0000 },
+ { 0x316b, 0xf000 },
+ { 0x316c, 0x0000 },
+ { 0x316d, 0xf000 },
+ { 0x316e, 0x0000 },
+ { 0x316f, 0xf000 },
+ { 0x3170, 0x0000 },
+ { 0x3171, 0xf000 },
+ { 0x3172, 0x0000 },
+ { 0x3173, 0xf000 },
+ { 0x3174, 0x0000 },
+ { 0x3175, 0xf000 },
+ { 0x3176, 0x0000 },
+ { 0x3177, 0xf000 },
+ { 0x3178, 0x0000 },
+ { 0x3179, 0xf000 },
+ { 0x317a, 0x0000 },
+ { 0x317b, 0xf000 },
+ { 0x317c, 0x0000 },
+ { 0x317d, 0xf000 },
+ { 0x317e, 0x0000 },
+ { 0x317f, 0xf000 },
+ { 0x3180, 0x2001 },
+ { 0x3181, 0xf101 },
+ { 0x3182, 0x0000 },
+ { 0x3183, 0xf000 },
+ { 0x3184, 0x0000 },
+ { 0x3185, 0xf000 },
+ { 0x3186, 0x0000 },
+ { 0x3187, 0xf000 },
+ { 0x3188, 0x0000 },
+ { 0x3189, 0xf000 },
+ { 0x318a, 0x0000 },
+ { 0x318b, 0xf000 },
+ { 0x318c, 0x0000 },
+ { 0x318d, 0xf000 },
+ { 0x318e, 0x0000 },
+ { 0x318f, 0xf000 },
+ { 0x3190, 0x0000 },
+ { 0x3191, 0xf000 },
+ { 0x3192, 0x0000 },
+ { 0x3193, 0xf000 },
+ { 0x3194, 0x0000 },
+ { 0x3195, 0xf000 },
+ { 0x3196, 0x0000 },
+ { 0x3197, 0xf000 },
+ { 0x3198, 0x0000 },
+ { 0x3199, 0xf000 },
+ { 0x319a, 0x0000 },
+ { 0x319b, 0xf000 },
+ { 0x319c, 0x0000 },
+ { 0x319d, 0xf000 },
+ { 0x319e, 0x0000 },
+ { 0x319f, 0xf000 },
+ { 0x31a0, 0x0000 },
+ { 0x31a1, 0xf000 },
+ { 0x31a2, 0x0000 },
+ { 0x31a3, 0xf000 },
+ { 0x31a4, 0x0000 },
+ { 0x31a5, 0xf000 },
+ { 0x31a6, 0x0000 },
+ { 0x31a7, 0xf000 },
+ { 0x31a8, 0x0000 },
+ { 0x31a9, 0xf000 },
+ { 0x31aa, 0x0000 },
+ { 0x31ab, 0xf000 },
+ { 0x31ac, 0x0000 },
+ { 0x31ad, 0xf000 },
+ { 0x31ae, 0x0000 },
+ { 0x31af, 0xf000 },
+ { 0x31b0, 0x0000 },
+ { 0x31b1, 0xf000 },
+ { 0x31b2, 0x0000 },
+ { 0x31b3, 0xf000 },
+ { 0x31b4, 0x0000 },
+ { 0x31b5, 0xf000 },
+ { 0x31b6, 0x0000 },
+ { 0x31b7, 0xf000 },
+ { 0x31b8, 0x0000 },
+ { 0x31b9, 0xf000 },
+ { 0x31ba, 0x0000 },
+ { 0x31bb, 0xf000 },
+ { 0x31bc, 0x0000 },
+ { 0x31bd, 0xf000 },
+ { 0x31be, 0x0000 },
+ { 0x31bf, 0xf000 },
+ { 0x31c0, 0x0000 },
+ { 0x31c1, 0xf000 },
+ { 0x31c2, 0x0000 },
+ { 0x31c3, 0xf000 },
+ { 0x31c4, 0x0000 },
+ { 0x31c5, 0xf000 },
+ { 0x31c6, 0x0000 },
+ { 0x31c7, 0xf000 },
+ { 0x31c8, 0x0000 },
+ { 0x31c9, 0xf000 },
+ { 0x31ca, 0x0000 },
+ { 0x31cb, 0xf000 },
+ { 0x31cc, 0x0000 },
+ { 0x31cd, 0xf000 },
+ { 0x31ce, 0x0000 },
+ { 0x31cf, 0xf000 },
+ { 0x31d0, 0x0000 },
+ { 0x31d1, 0xf000 },
+ { 0x31d2, 0x0000 },
+ { 0x31d3, 0xf000 },
+ { 0x31d4, 0x0000 },
+ { 0x31d5, 0xf000 },
+ { 0x31d6, 0x0000 },
+ { 0x31d7, 0xf000 },
+ { 0x31d8, 0x0000 },
+ { 0x31d9, 0xf000 },
+ { 0x31da, 0x0000 },
+ { 0x31db, 0xf000 },
+ { 0x31dc, 0x0000 },
+ { 0x31dd, 0xf000 },
+ { 0x31de, 0x0000 },
+ { 0x31df, 0xf000 },
+ { 0x31e0, 0x0000 },
+ { 0x31e1, 0xf000 },
+ { 0x31e2, 0x0000 },
+ { 0x31e3, 0xf000 },
+ { 0x31e4, 0x0000 },
+ { 0x31e5, 0xf000 },
+ { 0x31e6, 0x0000 },
+ { 0x31e7, 0xf000 },
+ { 0x31e8, 0x0000 },
+ { 0x31e9, 0xf000 },
+ { 0x31ea, 0x0000 },
+ { 0x31eb, 0xf000 },
+ { 0x31ec, 0x0000 },
+ { 0x31ed, 0xf000 },
+ { 0x31ee, 0x0000 },
+ { 0x31ef, 0xf000 },
+ { 0x31f0, 0x0000 },
+ { 0x31f1, 0xf000 },
+ { 0x31f2, 0x0000 },
+ { 0x31f3, 0xf000 },
+ { 0x31f4, 0x0000 },
+ { 0x31f5, 0xf000 },
+ { 0x31f6, 0x0000 },
+ { 0x31f7, 0xf000 },
+ { 0x31f8, 0x0000 },
+ { 0x31f9, 0xf000 },
+ { 0x31fa, 0x0000 },
+ { 0x31fb, 0xf000 },
+ { 0x31fc, 0x0000 },
+ { 0x31fd, 0xf000 },
+ { 0x31fe, 0x0000 },
+ { 0x31ff, 0xf000 },
+ { 0x024d, 0xff50 },
+ { 0x0252, 0xff50 },
+ { 0x0259, 0x0112 },
+ { 0x025e, 0x0112 },
+ { 0x101, 0x0304 },
+ { 0x80, 0x0000 },
+};
+
+/* We use a function so we can use ARRAY_SIZE() */
+int wm5102_patch(struct arizona *arizona)
+{
+ switch (arizona->rev) {
+ case 0:
+ return regmap_register_patch(arizona->regmap,
+ wm5102_reva_patch,
+ ARRAY_SIZE(wm5102_reva_patch));
+ default:
+ return 0;
+ }
+}
+
+static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = {
+ [ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
+ [ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
+ [ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
+ [ARIZONA_IRQ_JD_RISE] = { .mask = ARIZONA_JD1_RISE_EINT1 },
+};
+
+const struct regmap_irq_chip wm5102_aod = {
+ .name = "wm5102 AOD",
+ .status_base = ARIZONA_AOD_IRQ1,
+ .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
+ .ack_base = ARIZONA_AOD_IRQ1,
+ .wake_base = ARIZONA_WAKE_CONTROL,
+ .num_regs = 1,
+ .irqs = wm5102_aod_irqs,
+ .num_irqs = ARRAY_SIZE(wm5102_aod_irqs),
+};
+
+static const struct regmap_irq wm5102_irqs[ARIZONA_NUM_IRQ] = {
+ [ARIZONA_IRQ_GP4] = { .reg_offset = 0, .mask = ARIZONA_GP4_EINT1 },
+ [ARIZONA_IRQ_GP3] = { .reg_offset = 0, .mask = ARIZONA_GP3_EINT1 },
+ [ARIZONA_IRQ_GP2] = { .reg_offset = 0, .mask = ARIZONA_GP2_EINT1 },
+ [ARIZONA_IRQ_GP1] = { .reg_offset = 0, .mask = ARIZONA_GP1_EINT1 },
+
+ [ARIZONA_IRQ_DSP1_RAM_RDY] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP1_RAM_RDY_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ2] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ2_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ1] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ1_EINT1
+ },
+
+ [ARIZONA_IRQ_SPK_SHUTDOWN_WARN] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_WARN_EINT1
+ },
+ [ARIZONA_IRQ_SPK_SHUTDOWN] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_EINT1
+ },
+ [ARIZONA_IRQ_HPDET] = {
+ .reg_offset = 2, .mask = ARIZONA_HPDET_EINT1
+ },
+ [ARIZONA_IRQ_MICDET] = {
+ .reg_offset = 2, .mask = ARIZONA_MICDET_EINT1
+ },
+ [ARIZONA_IRQ_WSEQ_DONE] = {
+ .reg_offset = 2, .mask = ARIZONA_WSEQ_DONE_EINT1
+ },
+ [ARIZONA_IRQ_DRC2_SIG_DET] = {
+ .reg_offset = 2, .mask = ARIZONA_DRC2_SIG_DET_EINT1
+ },
+ [ARIZONA_IRQ_DRC1_SIG_DET] = {
+ .reg_offset = 2, .mask = ARIZONA_DRC1_SIG_DET_EINT1
+ },
+ [ARIZONA_IRQ_ASRC2_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_ASRC2_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_ASRC1_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_ASRC1_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_UNDERCLOCKED] = {
+ .reg_offset = 2, .mask = ARIZONA_UNDERCLOCKED_EINT1
+ },
+ [ARIZONA_IRQ_OVERCLOCKED] = {
+ .reg_offset = 2, .mask = ARIZONA_OVERCLOCKED_EINT1
+ },
+ [ARIZONA_IRQ_FLL2_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_FLL2_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_FLL1_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_FLL1_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_CLKGEN_ERR] = {
+ .reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_EINT1
+ },
+ [ARIZONA_IRQ_CLKGEN_ERR_ASYNC] = {
+ .reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_ASYNC_EINT1
+ },
+
+ [ARIZONA_IRQ_ASRC_CFG_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_ASRC_CFG_ERR_EINT1
+ },
+ [ARIZONA_IRQ_AIF3_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_AIF3_ERR_EINT1
+ },
+ [ARIZONA_IRQ_AIF2_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_AIF2_ERR_EINT1
+ },
+ [ARIZONA_IRQ_AIF1_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_AIF1_ERR_EINT1
+ },
+ [ARIZONA_IRQ_CTRLIF_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_CTRLIF_ERR_EINT1
+ },
+ [ARIZONA_IRQ_MIXER_DROPPED_SAMPLES] = {
+ .reg_offset = 3, .mask = ARIZONA_MIXER_DROPPED_SAMPLE_EINT1
+ },
+ [ARIZONA_IRQ_ASYNC_CLK_ENA_LOW] = {
+ .reg_offset = 3, .mask = ARIZONA_ASYNC_CLK_ENA_LOW_EINT1
+ },
+ [ARIZONA_IRQ_SYSCLK_ENA_LOW] = {
+ .reg_offset = 3, .mask = ARIZONA_SYSCLK_ENA_LOW_EINT1
+ },
+ [ARIZONA_IRQ_ISRC1_CFG_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_ISRC1_CFG_ERR_EINT1
+ },
+ [ARIZONA_IRQ_ISRC2_CFG_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_ISRC2_CFG_ERR_EINT1
+ },
+
+ [ARIZONA_IRQ_BOOT_DONE] = {
+ .reg_offset = 4, .mask = ARIZONA_BOOT_DONE_EINT1
+ },
+ [ARIZONA_IRQ_DCS_DAC_DONE] = {
+ .reg_offset = 4, .mask = ARIZONA_DCS_DAC_DONE_EINT1
+ },
+ [ARIZONA_IRQ_DCS_HP_DONE] = {
+ .reg_offset = 4, .mask = ARIZONA_DCS_HP_DONE_EINT1
+ },
+ [ARIZONA_IRQ_FLL2_CLOCK_OK] = {
+ .reg_offset = 4, .mask = ARIZONA_FLL2_CLOCK_OK_EINT1
+ },
+ [ARIZONA_IRQ_FLL1_CLOCK_OK] = {
+ .reg_offset = 4, .mask = ARIZONA_FLL1_CLOCK_OK_EINT1
+ },
+};
+
+const struct regmap_irq_chip wm5102_irq = {
+ .name = "wm5102 IRQ",
+ .status_base = ARIZONA_INTERRUPT_STATUS_1,
+ .mask_base = ARIZONA_INTERRUPT_STATUS_1_MASK,
+ .ack_base = ARIZONA_INTERRUPT_STATUS_1,
+ .num_regs = 5,
+ .irqs = wm5102_irqs,
+ .num_irqs = ARRAY_SIZE(wm5102_irqs),
+};
+
+static const struct reg_default wm5102_reg_default[] = {
+ { 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */
+ { 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
+ { 0x0000000D, 0x0000 }, /* R13 - Ctrl IF Status 1 */
+ { 0x00000016, 0x0000 }, /* R22 - Write Sequencer Ctrl 0 */
+ { 0x00000017, 0x0000 }, /* R23 - Write Sequencer Ctrl 1 */
+ { 0x00000018, 0x0000 }, /* R24 - Write Sequencer Ctrl 2 */
+ { 0x0000001A, 0x0000 }, /* R26 - Write Sequencer PROM */
+ { 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */
+ { 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */
+ { 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */
+ { 0x00000023, 0x1000 }, /* R35 - Tone Generator 4 */
+ { 0x00000024, 0x0000 }, /* R36 - Tone Generator 5 */
+ { 0x00000030, 0x0000 }, /* R48 - PWM Drive 1 */
+ { 0x00000031, 0x0100 }, /* R49 - PWM Drive 2 */
+ { 0x00000032, 0x0100 }, /* R50 - PWM Drive 3 */
+ { 0x00000040, 0x0000 }, /* R64 - Wake control */
+ { 0x00000041, 0x0000 }, /* R65 - Sequence control */
+ { 0x00000061, 0x01FF }, /* R97 - Sample Rate Sequence Select 1 */
+ { 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */
+ { 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */
+ { 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */
+ { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 1 */
+ { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 2 */
+ { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 3 */
+ { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 4 */
+ { 0x0000006C, 0x01FF }, /* R108 - Always On Triggers Sequence Select 5 */
+ { 0x0000006D, 0x01FF }, /* R109 - Always On Triggers Sequence Select 6 */
+ { 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */
+ { 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */
+ { 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */
+ { 0x00000092, 0x0000 }, /* R146 - Haptics phase 1 intensity */
+ { 0x00000093, 0x0000 }, /* R147 - Haptics phase 1 duration */
+ { 0x00000094, 0x0000 }, /* R148 - Haptics phase 2 intensity */
+ { 0x00000095, 0x0000 }, /* R149 - Haptics phase 2 duration */
+ { 0x00000096, 0x0000 }, /* R150 - Haptics phase 3 intensity */
+ { 0x00000097, 0x0000 }, /* R151 - Haptics phase 3 duration */
+ { 0x00000100, 0x0001 }, /* R256 - Clock 32k 1 */
+ { 0x00000101, 0x0304 }, /* R257 - System Clock 1 */
+ { 0x00000102, 0x0011 }, /* R258 - Sample rate 1 */
+ { 0x00000103, 0x0011 }, /* R259 - Sample rate 2 */
+ { 0x00000104, 0x0011 }, /* R260 - Sample rate 3 */
+ { 0x00000112, 0x0305 }, /* R274 - Async clock 1 */
+ { 0x00000113, 0x0011 }, /* R275 - Async sample rate 1 */
+ { 0x00000149, 0x0000 }, /* R329 - Output system clock */
+ { 0x0000014A, 0x0000 }, /* R330 - Output async clock */
+ { 0x00000152, 0x0000 }, /* R338 - Rate Estimator 1 */
+ { 0x00000153, 0x0000 }, /* R339 - Rate Estimator 2 */
+ { 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */
+ { 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
+ { 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
+ { 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */
+ { 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
+ { 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
+ { 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
+ { 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
+ { 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
+ { 0x00000177, 0x0181 }, /* R375 - FLL1 Loop Filter Test 1 */
+ { 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
+ { 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
+ { 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */
+ { 0x00000184, 0x0000 }, /* R388 - FLL1 Synchroniser 4 */
+ { 0x00000185, 0x0000 }, /* R389 - FLL1 Synchroniser 5 */
+ { 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */
+ { 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */
+ { 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */
+ { 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */
+ { 0x00000192, 0x0008 }, /* R402 - FLL2 Control 2 */
+ { 0x00000193, 0x0018 }, /* R403 - FLL2 Control 3 */
+ { 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
+ { 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
+ { 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
+ { 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
+ { 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
+ { 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
+ { 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */
+ { 0x000001A4, 0x0000 }, /* R420 - FLL2 Synchroniser 4 */
+ { 0x000001A5, 0x0000 }, /* R421 - FLL2 Synchroniser 5 */
+ { 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */
+ { 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */
+ { 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
+ { 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
+ { 0x00000210, 0x00D4 }, /* R528 - LDO1 Control 1 */
+ { 0x00000213, 0x0344 }, /* R531 - LDO2 Control 1 */
+ { 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
+ { 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
+ { 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
+ { 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
+ { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
+ { 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
+ { 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
+ { 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
+ { 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
+ { 0x000002CB, 0x0000 }, /* R715 - Isolation control */
+ { 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */
+ { 0x00000300, 0x0000 }, /* R768 - Input Enables */
+ { 0x00000308, 0x0000 }, /* R776 - Input Rate */
+ { 0x00000309, 0x0022 }, /* R777 - Input Volume Ramp */
+ { 0x00000310, 0x2080 }, /* R784 - IN1L Control */
+ { 0x00000311, 0x0180 }, /* R785 - ADC Digital Volume 1L */
+ { 0x00000312, 0x0000 }, /* R786 - DMIC1L Control */
+ { 0x00000314, 0x0080 }, /* R788 - IN1R Control */
+ { 0x00000315, 0x0180 }, /* R789 - ADC Digital Volume 1R */
+ { 0x00000316, 0x0000 }, /* R790 - DMIC1R Control */
+ { 0x00000318, 0x2080 }, /* R792 - IN2L Control */
+ { 0x00000319, 0x0180 }, /* R793 - ADC Digital Volume 2L */
+ { 0x0000031A, 0x0000 }, /* R794 - DMIC2L Control */
+ { 0x0000031C, 0x0080 }, /* R796 - IN2R Control */
+ { 0x0000031D, 0x0180 }, /* R797 - ADC Digital Volume 2R */
+ { 0x0000031E, 0x0000 }, /* R798 - DMIC2R Control */
+ { 0x00000320, 0x2080 }, /* R800 - IN3L Control */
+ { 0x00000321, 0x0180 }, /* R801 - ADC Digital Volume 3L */
+ { 0x00000322, 0x0000 }, /* R802 - DMIC3L Control */
+ { 0x00000324, 0x0080 }, /* R804 - IN3R Control */
+ { 0x00000325, 0x0180 }, /* R805 - ADC Digital Volume 3R */
+ { 0x00000326, 0x0000 }, /* R806 - DMIC3R Control */
+ { 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */
+ { 0x00000408, 0x0000 }, /* R1032 - Output Rate 1 */
+ { 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
+ { 0x00000410, 0x0080 }, /* R1040 - Output Path Config 1L */
+ { 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
+ { 0x00000412, 0x0080 }, /* R1042 - DAC Volume Limit 1L */
+ { 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
+ { 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */
+ { 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
+ { 0x00000416, 0x0080 }, /* R1046 - DAC Volume Limit 1R */
+ { 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
+ { 0x00000418, 0x0080 }, /* R1048 - Output Path Config 2L */
+ { 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
+ { 0x0000041A, 0x0080 }, /* R1050 - DAC Volume Limit 2L */
+ { 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
+ { 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */
+ { 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
+ { 0x0000041E, 0x0080 }, /* R1054 - DAC Volume Limit 2R */
+ { 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
+ { 0x00000420, 0x0080 }, /* R1056 - Output Path Config 3L */
+ { 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
+ { 0x00000422, 0x0080 }, /* R1058 - DAC Volume Limit 3L */
+ { 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
+ { 0x00000424, 0x0080 }, /* R1060 - Output Path Config 3R */
+ { 0x00000425, 0x0180 }, /* R1061 - DAC Digital Volume 3R */
+ { 0x00000426, 0x0080 }, /* R1062 - DAC Volume Limit 3R */
+ { 0x00000428, 0x0000 }, /* R1064 - Output Path Config 4L */
+ { 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
+ { 0x0000042A, 0x0080 }, /* R1066 - Out Volume 4L */
+ { 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
+ { 0x0000042C, 0x0000 }, /* R1068 - Output Path Config 4R */
+ { 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */
+ { 0x0000042E, 0x0080 }, /* R1070 - Out Volume 4R */
+ { 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */
+ { 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */
+ { 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */
+ { 0x00000432, 0x0080 }, /* R1074 - DAC Volume Limit 5L */
+ { 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */
+ { 0x00000434, 0x0000 }, /* R1076 - Output Path Config 5R */
+ { 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
+ { 0x00000436, 0x0080 }, /* R1078 - DAC Volume Limit 5R */
+ { 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
+ { 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
+ { 0x00000458, 0x0001 }, /* R1112 - Noise Gate Control */
+ { 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
+ { 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
+ { 0x000004DC, 0x0000 }, /* R1244 - DAC comp 1 */
+ { 0x000004DD, 0x0000 }, /* R1245 - DAC comp 2 */
+ { 0x000004DE, 0x0000 }, /* R1246 - DAC comp 3 */
+ { 0x000004DF, 0x0000 }, /* R1247 - DAC comp 4 */
+ { 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
+ { 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
+ { 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
+ { 0x00000503, 0x0000 }, /* R1283 - AIF1 Rate Ctrl */
+ { 0x00000504, 0x0000 }, /* R1284 - AIF1 Format */
+ { 0x00000505, 0x0040 }, /* R1285 - AIF1 Tx BCLK Rate */
+ { 0x00000506, 0x0040 }, /* R1286 - AIF1 Rx BCLK Rate */
+ { 0x00000507, 0x1818 }, /* R1287 - AIF1 Frame Ctrl 1 */
+ { 0x00000508, 0x1818 }, /* R1288 - AIF1 Frame Ctrl 2 */
+ { 0x00000509, 0x0000 }, /* R1289 - AIF1 Frame Ctrl 3 */
+ { 0x0000050A, 0x0001 }, /* R1290 - AIF1 Frame Ctrl 4 */
+ { 0x0000050B, 0x0002 }, /* R1291 - AIF1 Frame Ctrl 5 */
+ { 0x0000050C, 0x0003 }, /* R1292 - AIF1 Frame Ctrl 6 */
+ { 0x0000050D, 0x0004 }, /* R1293 - AIF1 Frame Ctrl 7 */
+ { 0x0000050E, 0x0005 }, /* R1294 - AIF1 Frame Ctrl 8 */
+ { 0x0000050F, 0x0006 }, /* R1295 - AIF1 Frame Ctrl 9 */
+ { 0x00000510, 0x0007 }, /* R1296 - AIF1 Frame Ctrl 10 */
+ { 0x00000511, 0x0000 }, /* R1297 - AIF1 Frame Ctrl 11 */
+ { 0x00000512, 0x0001 }, /* R1298 - AIF1 Frame Ctrl 12 */
+ { 0x00000513, 0x0002 }, /* R1299 - AIF1 Frame Ctrl 13 */
+ { 0x00000514, 0x0003 }, /* R1300 - AIF1 Frame Ctrl 14 */
+ { 0x00000515, 0x0004 }, /* R1301 - AIF1 Frame Ctrl 15 */
+ { 0x00000516, 0x0005 }, /* R1302 - AIF1 Frame Ctrl 16 */
+ { 0x00000517, 0x0006 }, /* R1303 - AIF1 Frame Ctrl 17 */
+ { 0x00000518, 0x0007 }, /* R1304 - AIF1 Frame Ctrl 18 */
+ { 0x00000519, 0x0000 }, /* R1305 - AIF1 Tx Enables */
+ { 0x0000051A, 0x0000 }, /* R1306 - AIF1 Rx Enables */
+ { 0x0000051B, 0x0000 }, /* R1307 - AIF1 Force Write */
+ { 0x00000540, 0x000C }, /* R1344 - AIF2 BCLK Ctrl */
+ { 0x00000541, 0x0008 }, /* R1345 - AIF2 Tx Pin Ctrl */
+ { 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */
+ { 0x00000543, 0x0000 }, /* R1347 - AIF2 Rate Ctrl */
+ { 0x00000544, 0x0000 }, /* R1348 - AIF2 Format */
+ { 0x00000545, 0x0040 }, /* R1349 - AIF2 Tx BCLK Rate */
+ { 0x00000546, 0x0040 }, /* R1350 - AIF2 Rx BCLK Rate */
+ { 0x00000547, 0x1818 }, /* R1351 - AIF2 Frame Ctrl 1 */
+ { 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */
+ { 0x00000549, 0x0000 }, /* R1353 - AIF2 Frame Ctrl 3 */
+ { 0x0000054A, 0x0001 }, /* R1354 - AIF2 Frame Ctrl 4 */
+ { 0x00000551, 0x0000 }, /* R1361 - AIF2 Frame Ctrl 11 */
+ { 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */
+ { 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */
+ { 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */
+ { 0x0000055B, 0x0000 }, /* R1371 - AIF2 Force Write */
+ { 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */
+ { 0x00000581, 0x0008 }, /* R1409 - AIF3 Tx Pin Ctrl */
+ { 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */
+ { 0x00000583, 0x0000 }, /* R1411 - AIF3 Rate Ctrl */
+ { 0x00000584, 0x0000 }, /* R1412 - AIF3 Format */
+ { 0x00000585, 0x0040 }, /* R1413 - AIF3 Tx BCLK Rate */
+ { 0x00000586, 0x0040 }, /* R1414 - AIF3 Rx BCLK Rate */
+ { 0x00000587, 0x1818 }, /* R1415 - AIF3 Frame Ctrl 1 */
+ { 0x00000588, 0x1818 }, /* R1416 - AIF3 Frame Ctrl 2 */
+ { 0x00000589, 0x0000 }, /* R1417 - AIF3 Frame Ctrl 3 */
+ { 0x0000058A, 0x0001 }, /* R1418 - AIF3 Frame Ctrl 4 */
+ { 0x00000591, 0x0000 }, /* R1425 - AIF3 Frame Ctrl 11 */
+ { 0x00000592, 0x0001 }, /* R1426 - AIF3 Frame Ctrl 12 */
+ { 0x00000599, 0x0000 }, /* R1433 - AIF3 Tx Enables */
+ { 0x0000059A, 0x0000 }, /* R1434 - AIF3 Rx Enables */
+ { 0x0000059B, 0x0000 }, /* R1435 - AIF3 Force Write */
+ { 0x000005E3, 0x0004 }, /* R1507 - SLIMbus Framer Ref Gear */
+ { 0x000005E5, 0x0000 }, /* R1509 - SLIMbus Rates 1 */
+ { 0x000005E6, 0x0000 }, /* R1510 - SLIMbus Rates 2 */
+ { 0x000005E7, 0x0000 }, /* R1511 - SLIMbus Rates 3 */
+ { 0x000005E8, 0x0000 }, /* R1512 - SLIMbus Rates 4 */
+ { 0x000005E9, 0x0000 }, /* R1513 - SLIMbus Rates 5 */
+ { 0x000005EA, 0x0000 }, /* R1514 - SLIMbus Rates 6 */
+ { 0x000005EB, 0x0000 }, /* R1515 - SLIMbus Rates 7 */
+ { 0x000005EC, 0x0000 }, /* R1516 - SLIMbus Rates 8 */
+ { 0x000005F5, 0x0000 }, /* R1525 - SLIMbus RX Channel Enable */
+ { 0x000005F6, 0x0000 }, /* R1526 - SLIMbus TX Channel Enable */
+ { 0x00000640, 0x0000 }, /* R1600 - PWM1MIX Input 1 Source */
+ { 0x00000641, 0x0080 }, /* R1601 - PWM1MIX Input 1 Volume */
+ { 0x00000642, 0x0000 }, /* R1602 - PWM1MIX Input 2 Source */
+ { 0x00000643, 0x0080 }, /* R1603 - PWM1MIX Input 2 Volume */
+ { 0x00000644, 0x0000 }, /* R1604 - PWM1MIX Input 3 Source */
+ { 0x00000645, 0x0080 }, /* R1605 - PWM1MIX Input 3 Volume */
+ { 0x00000646, 0x0000 }, /* R1606 - PWM1MIX Input 4 Source */
+ { 0x00000647, 0x0080 }, /* R1607 - PWM1MIX Input 4 Volume */
+ { 0x00000648, 0x0000 }, /* R1608 - PWM2MIX Input 1 Source */
+ { 0x00000649, 0x0080 }, /* R1609 - PWM2MIX Input 1 Volume */
+ { 0x0000064A, 0x0000 }, /* R1610 - PWM2MIX Input 2 Source */
+ { 0x0000064B, 0x0080 }, /* R1611 - PWM2MIX Input 2 Volume */
+ { 0x0000064C, 0x0000 }, /* R1612 - PWM2MIX Input 3 Source */
+ { 0x0000064D, 0x0080 }, /* R1613 - PWM2MIX Input 3 Volume */
+ { 0x0000064E, 0x0000 }, /* R1614 - PWM2MIX Input 4 Source */
+ { 0x0000064F, 0x0080 }, /* R1615 - PWM2MIX Input 4 Volume */
+ { 0x00000660, 0x0000 }, /* R1632 - MICMIX Input 1 Source */
+ { 0x00000661, 0x0080 }, /* R1633 - MICMIX Input 1 Volume */
+ { 0x00000662, 0x0000 }, /* R1634 - MICMIX Input 2 Source */
+ { 0x00000663, 0x0080 }, /* R1635 - MICMIX Input 2 Volume */
+ { 0x00000664, 0x0000 }, /* R1636 - MICMIX Input 3 Source */
+ { 0x00000665, 0x0080 }, /* R1637 - MICMIX Input 3 Volume */
+ { 0x00000666, 0x0000 }, /* R1638 - MICMIX Input 4 Source */
+ { 0x00000667, 0x0080 }, /* R1639 - MICMIX Input 4 Volume */
+ { 0x00000668, 0x0000 }, /* R1640 - NOISEMIX Input 1 Source */
+ { 0x00000669, 0x0080 }, /* R1641 - NOISEMIX Input 1 Volume */
+ { 0x0000066A, 0x0000 }, /* R1642 - NOISEMIX Input 2 Source */
+ { 0x0000066B, 0x0080 }, /* R1643 - NOISEMIX Input 2 Volume */
+ { 0x0000066C, 0x0000 }, /* R1644 - NOISEMIX Input 3 Source */
+ { 0x0000066D, 0x0080 }, /* R1645 - NOISEMIX Input 3 Volume */
+ { 0x0000066E, 0x0000 }, /* R1646 - NOISEMIX Input 4 Source */
+ { 0x0000066F, 0x0080 }, /* R1647 - NOISEMIX Input 4 Volume */
+ { 0x00000680, 0x0000 }, /* R1664 - OUT1LMIX Input 1 Source */
+ { 0x00000681, 0x0080 }, /* R1665 - OUT1LMIX Input 1 Volume */
+ { 0x00000682, 0x0000 }, /* R1666 - OUT1LMIX Input 2 Source */
+ { 0x00000683, 0x0080 }, /* R1667 - OUT1LMIX Input 2 Volume */
+ { 0x00000684, 0x0000 }, /* R1668 - OUT1LMIX Input 3 Source */
+ { 0x00000685, 0x0080 }, /* R1669 - OUT1LMIX Input 3 Volume */
+ { 0x00000686, 0x0000 }, /* R1670 - OUT1LMIX Input 4 Source */
+ { 0x00000687, 0x0080 }, /* R1671 - OUT1LMIX Input 4 Volume */
+ { 0x00000688, 0x0000 }, /* R1672 - OUT1RMIX Input 1 Source */
+ { 0x00000689, 0x0080 }, /* R1673 - OUT1RMIX Input 1 Volume */
+ { 0x0000068A, 0x0000 }, /* R1674 - OUT1RMIX Input 2 Source */
+ { 0x0000068B, 0x0080 }, /* R1675 - OUT1RMIX Input 2 Volume */
+ { 0x0000068C, 0x0000 }, /* R1676 - OUT1RMIX Input 3 Source */
+ { 0x0000068D, 0x0080 }, /* R1677 - OUT1RMIX Input 3 Volume */
+ { 0x0000068E, 0x0000 }, /* R1678 - OUT1RMIX Input 4 Source */
+ { 0x0000068F, 0x0080 }, /* R1679 - OUT1RMIX Input 4 Volume */
+ { 0x00000690, 0x0000 }, /* R1680 - OUT2LMIX Input 1 Source */
+ { 0x00000691, 0x0080 }, /* R1681 - OUT2LMIX Input 1 Volume */
+ { 0x00000692, 0x0000 }, /* R1682 - OUT2LMIX Input 2 Source */
+ { 0x00000693, 0x0080 }, /* R1683 - OUT2LMIX Input 2 Volume */
+ { 0x00000694, 0x0000 }, /* R1684 - OUT2LMIX Input 3 Source */
+ { 0x00000695, 0x0080 }, /* R1685 - OUT2LMIX Input 3 Volume */
+ { 0x00000696, 0x0000 }, /* R1686 - OUT2LMIX Input 4 Source */
+ { 0x00000697, 0x0080 }, /* R1687 - OUT2LMIX Input 4 Volume */
+ { 0x00000698, 0x0000 }, /* R1688 - OUT2RMIX Input 1 Source */
+ { 0x00000699, 0x0080 }, /* R1689 - OUT2RMIX Input 1 Volume */
+ { 0x0000069A, 0x0000 }, /* R1690 - OUT2RMIX Input 2 Source */
+ { 0x0000069B, 0x0080 }, /* R1691 - OUT2RMIX Input 2 Volume */
+ { 0x0000069C, 0x0000 }, /* R1692 - OUT2RMIX Input 3 Source */
+ { 0x0000069D, 0x0080 }, /* R1693 - OUT2RMIX Input 3 Volume */
+ { 0x0000069E, 0x0000 }, /* R1694 - OUT2RMIX Input 4 Source */
+ { 0x0000069F, 0x0080 }, /* R1695 - OUT2RMIX Input 4 Volume */
+ { 0x000006A0, 0x0000 }, /* R1696 - OUT3LMIX Input 1 Source */
+ { 0x000006A1, 0x0080 }, /* R1697 - OUT3LMIX Input 1 Volume */
+ { 0x000006A2, 0x0000 }, /* R1698 - OUT3LMIX Input 2 Source */
+ { 0x000006A3, 0x0080 }, /* R1699 - OUT3LMIX Input 2 Volume */
+ { 0x000006A4, 0x0000 }, /* R1700 - OUT3LMIX Input 3 Source */
+ { 0x000006A5, 0x0080 }, /* R1701 - OUT3LMIX Input 3 Volume */
+ { 0x000006A6, 0x0000 }, /* R1702 - OUT3LMIX Input 4 Source */
+ { 0x000006A7, 0x0080 }, /* R1703 - OUT3LMIX Input 4 Volume */
+ { 0x000006B0, 0x0000 }, /* R1712 - OUT4LMIX Input 1 Source */
+ { 0x000006B1, 0x0080 }, /* R1713 - OUT4LMIX Input 1 Volume */
+ { 0x000006B2, 0x0000 }, /* R1714 - OUT4LMIX Input 2 Source */
+ { 0x000006B3, 0x0080 }, /* R1715 - OUT4LMIX Input 2 Volume */
+ { 0x000006B4, 0x0000 }, /* R1716 - OUT4LMIX Input 3 Source */
+ { 0x000006B5, 0x0080 }, /* R1717 - OUT4LMIX Input 3 Volume */
+ { 0x000006B6, 0x0000 }, /* R1718 - OUT4LMIX Input 4 Source */
+ { 0x000006B7, 0x0080 }, /* R1719 - OUT4LMIX Input 4 Volume */
+ { 0x000006B8, 0x0000 }, /* R1720 - OUT4RMIX Input 1 Source */
+ { 0x000006B9, 0x0080 }, /* R1721 - OUT4RMIX Input 1 Volume */
+ { 0x000006BA, 0x0000 }, /* R1722 - OUT4RMIX Input 2 Source */
+ { 0x000006BB, 0x0080 }, /* R1723 - OUT4RMIX Input 2 Volume */
+ { 0x000006BC, 0x0000 }, /* R1724 - OUT4RMIX Input 3 Source */
+ { 0x000006BD, 0x0080 }, /* R1725 - OUT4RMIX Input 3 Volume */
+ { 0x000006BE, 0x0000 }, /* R1726 - OUT4RMIX Input 4 Source */
+ { 0x000006BF, 0x0080 }, /* R1727 - OUT4RMIX Input 4 Volume */
+ { 0x000006C0, 0x0000 }, /* R1728 - OUT5LMIX Input 1 Source */
+ { 0x000006C1, 0x0080 }, /* R1729 - OUT5LMIX Input 1 Volume */
+ { 0x000006C2, 0x0000 }, /* R1730 - OUT5LMIX Input 2 Source */
+ { 0x000006C3, 0x0080 }, /* R1731 - OUT5LMIX Input 2 Volume */
+ { 0x000006C4, 0x0000 }, /* R1732 - OUT5LMIX Input 3 Source */
+ { 0x000006C5, 0x0080 }, /* R1733 - OUT5LMIX Input 3 Volume */
+ { 0x000006C6, 0x0000 }, /* R1734 - OUT5LMIX Input 4 Source */
+ { 0x000006C7, 0x0080 }, /* R1735 - OUT5LMIX Input 4 Volume */
+ { 0x000006C8, 0x0000 }, /* R1736 - OUT5RMIX Input 1 Source */
+ { 0x000006C9, 0x0080 }, /* R1737 - OUT5RMIX Input 1 Volume */
+ { 0x000006CA, 0x0000 }, /* R1738 - OUT5RMIX Input 2 Source */
+ { 0x000006CB, 0x0080 }, /* R1739 - OUT5RMIX Input 2 Volume */
+ { 0x000006CC, 0x0000 }, /* R1740 - OUT5RMIX Input 3 Source */
+ { 0x000006CD, 0x0080 }, /* R1741 - OUT5RMIX Input 3 Volume */
+ { 0x000006CE, 0x0000 }, /* R1742 - OUT5RMIX Input 4 Source */
+ { 0x000006CF, 0x0080 }, /* R1743 - OUT5RMIX Input 4 Volume */
+ { 0x00000700, 0x0000 }, /* R1792 - AIF1TX1MIX Input 1 Source */
+ { 0x00000701, 0x0080 }, /* R1793 - AIF1TX1MIX Input 1 Volume */
+ { 0x00000702, 0x0000 }, /* R1794 - AIF1TX1MIX Input 2 Source */
+ { 0x00000703, 0x0080 }, /* R1795 - AIF1TX1MIX Input 2 Volume */
+ { 0x00000704, 0x0000 }, /* R1796 - AIF1TX1MIX Input 3 Source */
+ { 0x00000705, 0x0080 }, /* R1797 - AIF1TX1MIX Input 3 Volume */
+ { 0x00000706, 0x0000 }, /* R1798 - AIF1TX1MIX Input 4 Source */
+ { 0x00000707, 0x0080 }, /* R1799 - AIF1TX1MIX Input 4 Volume */
+ { 0x00000708, 0x0000 }, /* R1800 - AIF1TX2MIX Input 1 Source */
+ { 0x00000709, 0x0080 }, /* R1801 - AIF1TX2MIX Input 1 Volume */
+ { 0x0000070A, 0x0000 }, /* R1802 - AIF1TX2MIX Input 2 Source */
+ { 0x0000070B, 0x0080 }, /* R1803 - AIF1TX2MIX Input 2 Volume */
+ { 0x0000070C, 0x0000 }, /* R1804 - AIF1TX2MIX Input 3 Source */
+ { 0x0000070D, 0x0080 }, /* R1805 - AIF1TX2MIX Input 3 Volume */
+ { 0x0000070E, 0x0000 }, /* R1806 - AIF1TX2MIX Input 4 Source */
+ { 0x0000070F, 0x0080 }, /* R1807 - AIF1TX2MIX Input 4 Volume */
+ { 0x00000710, 0x0000 }, /* R1808 - AIF1TX3MIX Input 1 Source */
+ { 0x00000711, 0x0080 }, /* R1809 - AIF1TX3MIX Input 1 Volume */
+ { 0x00000712, 0x0000 }, /* R1810 - AIF1TX3MIX Input 2 Source */
+ { 0x00000713, 0x0080 }, /* R1811 - AIF1TX3MIX Input 2 Volume */
+ { 0x00000714, 0x0000 }, /* R1812 - AIF1TX3MIX Input 3 Source */
+ { 0x00000715, 0x0080 }, /* R1813 - AIF1TX3MIX Input 3 Volume */
+ { 0x00000716, 0x0000 }, /* R1814 - AIF1TX3MIX Input 4 Source */
+ { 0x00000717, 0x0080 }, /* R1815 - AIF1TX3MIX Input 4 Volume */
+ { 0x00000718, 0x0000 }, /* R1816 - AIF1TX4MIX Input 1 Source */
+ { 0x00000719, 0x0080 }, /* R1817 - AIF1TX4MIX Input 1 Volume */
+ { 0x0000071A, 0x0000 }, /* R1818 - AIF1TX4MIX Input 2 Source */
+ { 0x0000071B, 0x0080 }, /* R1819 - AIF1TX4MIX Input 2 Volume */
+ { 0x0000071C, 0x0000 }, /* R1820 - AIF1TX4MIX Input 3 Source */
+ { 0x0000071D, 0x0080 }, /* R1821 - AIF1TX4MIX Input 3 Volume */
+ { 0x0000071E, 0x0000 }, /* R1822 - AIF1TX4MIX Input 4 Source */
+ { 0x0000071F, 0x0080 }, /* R1823 - AIF1TX4MIX Input 4 Volume */
+ { 0x00000720, 0x0000 }, /* R1824 - AIF1TX5MIX Input 1 Source */
+ { 0x00000721, 0x0080 }, /* R1825 - AIF1TX5MIX Input 1 Volume */
+ { 0x00000722, 0x0000 }, /* R1826 - AIF1TX5MIX Input 2 Source */
+ { 0x00000723, 0x0080 }, /* R1827 - AIF1TX5MIX Input 2 Volume */
+ { 0x00000724, 0x0000 }, /* R1828 - AIF1TX5MIX Input 3 Source */
+ { 0x00000725, 0x0080 }, /* R1829 - AIF1TX5MIX Input 3 Volume */
+ { 0x00000726, 0x0000 }, /* R1830 - AIF1TX5MIX Input 4 Source */
+ { 0x00000727, 0x0080 }, /* R1831 - AIF1TX5MIX Input 4 Volume */
+ { 0x00000728, 0x0000 }, /* R1832 - AIF1TX6MIX Input 1 Source */
+ { 0x00000729, 0x0080 }, /* R1833 - AIF1TX6MIX Input 1 Volume */
+ { 0x0000072A, 0x0000 }, /* R1834 - AIF1TX6MIX Input 2 Source */
+ { 0x0000072B, 0x0080 }, /* R1835 - AIF1TX6MIX Input 2 Volume */
+ { 0x0000072C, 0x0000 }, /* R1836 - AIF1TX6MIX Input 3 Source */
+ { 0x0000072D, 0x0080 }, /* R1837 - AIF1TX6MIX Input 3 Volume */
+ { 0x0000072E, 0x0000 }, /* R1838 - AIF1TX6MIX Input 4 Source */
+ { 0x0000072F, 0x0080 }, /* R1839 - AIF1TX6MIX Input 4 Volume */
+ { 0x00000730, 0x0000 }, /* R1840 - AIF1TX7MIX Input 1 Source */
+ { 0x00000731, 0x0080 }, /* R1841 - AIF1TX7MIX Input 1 Volume */
+ { 0x00000732, 0x0000 }, /* R1842 - AIF1TX7MIX Input 2 Source */
+ { 0x00000733, 0x0080 }, /* R1843 - AIF1TX7MIX Input 2 Volume */
+ { 0x00000734, 0x0000 }, /* R1844 - AIF1TX7MIX Input 3 Source */
+ { 0x00000735, 0x0080 }, /* R1845 - AIF1TX7MIX Input 3 Volume */
+ { 0x00000736, 0x0000 }, /* R1846 - AIF1TX7MIX Input 4 Source */
+ { 0x00000737, 0x0080 }, /* R1847 - AIF1TX7MIX Input 4 Volume */
+ { 0x00000738, 0x0000 }, /* R1848 - AIF1TX8MIX Input 1 Source */
+ { 0x00000739, 0x0080 }, /* R1849 - AIF1TX8MIX Input 1 Volume */
+ { 0x0000073A, 0x0000 }, /* R1850 - AIF1TX8MIX Input 2 Source */
+ { 0x0000073B, 0x0080 }, /* R1851 - AIF1TX8MIX Input 2 Volume */
+ { 0x0000073C, 0x0000 }, /* R1852 - AIF1TX8MIX Input 3 Source */
+ { 0x0000073D, 0x0080 }, /* R1853 - AIF1TX8MIX Input 3 Volume */
+ { 0x0000073E, 0x0000 }, /* R1854 - AIF1TX8MIX Input 4 Source */
+ { 0x0000073F, 0x0080 }, /* R1855 - AIF1TX8MIX Input 4 Volume */
+ { 0x00000740, 0x0000 }, /* R1856 - AIF2TX1MIX Input 1 Source */
+ { 0x00000741, 0x0080 }, /* R1857 - AIF2TX1MIX Input 1 Volume */
+ { 0x00000742, 0x0000 }, /* R1858 - AIF2TX1MIX Input 2 Source */
+ { 0x00000743, 0x0080 }, /* R1859 - AIF2TX1MIX Input 2 Volume */
+ { 0x00000744, 0x0000 }, /* R1860 - AIF2TX1MIX Input 3 Source */
+ { 0x00000745, 0x0080 }, /* R1861 - AIF2TX1MIX Input 3 Volume */
+ { 0x00000746, 0x0000 }, /* R1862 - AIF2TX1MIX Input 4 Source */
+ { 0x00000747, 0x0080 }, /* R1863 - AIF2TX1MIX Input 4 Volume */
+ { 0x00000748, 0x0000 }, /* R1864 - AIF2TX2MIX Input 1 Source */
+ { 0x00000749, 0x0080 }, /* R1865 - AIF2TX2MIX Input 1 Volume */
+ { 0x0000074A, 0x0000 }, /* R1866 - AIF2TX2MIX Input 2 Source */
+ { 0x0000074B, 0x0080 }, /* R1867 - AIF2TX2MIX Input 2 Volume */
+ { 0x0000074C, 0x0000 }, /* R1868 - AIF2TX2MIX Input 3 Source */
+ { 0x0000074D, 0x0080 }, /* R1869 - AIF2TX2MIX Input 3 Volume */
+ { 0x0000074E, 0x0000 }, /* R1870 - AIF2TX2MIX Input 4 Source */
+ { 0x0000074F, 0x0080 }, /* R1871 - AIF2TX2MIX Input 4 Volume */
+ { 0x00000780, 0x0000 }, /* R1920 - AIF3TX1MIX Input 1 Source */
+ { 0x00000781, 0x0080 }, /* R1921 - AIF3TX1MIX Input 1 Volume */
+ { 0x00000782, 0x0000 }, /* R1922 - AIF3TX1MIX Input 2 Source */
+ { 0x00000783, 0x0080 }, /* R1923 - AIF3TX1MIX Input 2 Volume */
+ { 0x00000784, 0x0000 }, /* R1924 - AIF3TX1MIX Input 3 Source */
+ { 0x00000785, 0x0080 }, /* R1925 - AIF3TX1MIX Input 3 Volume */
+ { 0x00000786, 0x0000 }, /* R1926 - AIF3TX1MIX Input 4 Source */
+ { 0x00000787, 0x0080 }, /* R1927 - AIF3TX1MIX Input 4 Volume */
+ { 0x00000788, 0x0000 }, /* R1928 - AIF3TX2MIX Input 1 Source */
+ { 0x00000789, 0x0080 }, /* R1929 - AIF3TX2MIX Input 1 Volume */
+ { 0x0000078A, 0x0000 }, /* R1930 - AIF3TX2MIX Input 2 Source */
+ { 0x0000078B, 0x0080 }, /* R1931 - AIF3TX2MIX Input 2 Volume */
+ { 0x0000078C, 0x0000 }, /* R1932 - AIF3TX2MIX Input 3 Source */
+ { 0x0000078D, 0x0080 }, /* R1933 - AIF3TX2MIX Input 3 Volume */
+ { 0x0000078E, 0x0000 }, /* R1934 - AIF3TX2MIX Input 4 Source */
+ { 0x0000078F, 0x0080 }, /* R1935 - AIF3TX2MIX Input 4 Volume */
+ { 0x000007C0, 0x0000 }, /* R1984 - SLIMTX1MIX Input 1 Source */
+ { 0x000007C1, 0x0080 }, /* R1985 - SLIMTX1MIX Input 1 Volume */
+ { 0x000007C2, 0x0000 }, /* R1986 - SLIMTX1MIX Input 2 Source */
+ { 0x000007C3, 0x0080 }, /* R1987 - SLIMTX1MIX Input 2 Volume */
+ { 0x000007C4, 0x0000 }, /* R1988 - SLIMTX1MIX Input 3 Source */
+ { 0x000007C5, 0x0080 }, /* R1989 - SLIMTX1MIX Input 3 Volume */
+ { 0x000007C6, 0x0000 }, /* R1990 - SLIMTX1MIX Input 4 Source */
+ { 0x000007C7, 0x0080 }, /* R1991 - SLIMTX1MIX Input 4 Volume */
+ { 0x000007C8, 0x0000 }, /* R1992 - SLIMTX2MIX Input 1 Source */
+ { 0x000007C9, 0x0080 }, /* R1993 - SLIMTX2MIX Input 1 Volume */
+ { 0x000007CA, 0x0000 }, /* R1994 - SLIMTX2MIX Input 2 Source */
+ { 0x000007CB, 0x0080 }, /* R1995 - SLIMTX2MIX Input 2 Volume */
+ { 0x000007CC, 0x0000 }, /* R1996 - SLIMTX2MIX Input 3 Source */
+ { 0x000007CD, 0x0080 }, /* R1997 - SLIMTX2MIX Input 3 Volume */
+ { 0x000007CE, 0x0000 }, /* R1998 - SLIMTX2MIX Input 4 Source */
+ { 0x000007CF, 0x0080 }, /* R1999 - SLIMTX2MIX Input 4 Volume */
+ { 0x000007D0, 0x0000 }, /* R2000 - SLIMTX3MIX Input 1 Source */
+ { 0x000007D1, 0x0080 }, /* R2001 - SLIMTX3MIX Input 1 Volume */
+ { 0x000007D2, 0x0000 }, /* R2002 - SLIMTX3MIX Input 2 Source */
+ { 0x000007D3, 0x0080 }, /* R2003 - SLIMTX3MIX Input 2 Volume */
+ { 0x000007D4, 0x0000 }, /* R2004 - SLIMTX3MIX Input 3 Source */
+ { 0x000007D5, 0x0080 }, /* R2005 - SLIMTX3MIX Input 3 Volume */
+ { 0x000007D6, 0x0000 }, /* R2006 - SLIMTX3MIX Input 4 Source */
+ { 0x000007D7, 0x0080 }, /* R2007 - SLIMTX3MIX Input 4 Volume */
+ { 0x000007D8, 0x0000 }, /* R2008 - SLIMTX4MIX Input 1 Source */
+ { 0x000007D9, 0x0080 }, /* R2009 - SLIMTX4MIX Input 1 Volume */
+ { 0x000007DA, 0x0000 }, /* R2010 - SLIMTX4MIX Input 2 Source */
+ { 0x000007DB, 0x0080 }, /* R2011 - SLIMTX4MIX Input 2 Volume */
+ { 0x000007DC, 0x0000 }, /* R2012 - SLIMTX4MIX Input 3 Source */
+ { 0x000007DD, 0x0080 }, /* R2013 - SLIMTX4MIX Input 3 Volume */
+ { 0x000007DE, 0x0000 }, /* R2014 - SLIMTX4MIX Input 4 Source */
+ { 0x000007DF, 0x0080 }, /* R2015 - SLIMTX4MIX Input 4 Volume */
+ { 0x000007E0, 0x0000 }, /* R2016 - SLIMTX5MIX Input 1 Source */
+ { 0x000007E1, 0x0080 }, /* R2017 - SLIMTX5MIX Input 1 Volume */
+ { 0x000007E2, 0x0000 }, /* R2018 - SLIMTX5MIX Input 2 Source */
+ { 0x000007E3, 0x0080 }, /* R2019 - SLIMTX5MIX Input 2 Volume */
+ { 0x000007E4, 0x0000 }, /* R2020 - SLIMTX5MIX Input 3 Source */
+ { 0x000007E5, 0x0080 }, /* R2021 - SLIMTX5MIX Input 3 Volume */
+ { 0x000007E6, 0x0000 }, /* R2022 - SLIMTX5MIX Input 4 Source */
+ { 0x000007E7, 0x0080 }, /* R2023 - SLIMTX5MIX Input 4 Volume */
+ { 0x000007E8, 0x0000 }, /* R2024 - SLIMTX6MIX Input 1 Source */
+ { 0x000007E9, 0x0080 }, /* R2025 - SLIMTX6MIX Input 1 Volume */
+ { 0x000007EA, 0x0000 }, /* R2026 - SLIMTX6MIX Input 2 Source */
+ { 0x000007EB, 0x0080 }, /* R2027 - SLIMTX6MIX Input 2 Volume */
+ { 0x000007EC, 0x0000 }, /* R2028 - SLIMTX6MIX Input 3 Source */
+ { 0x000007ED, 0x0080 }, /* R2029 - SLIMTX6MIX Input 3 Volume */
+ { 0x000007EE, 0x0000 }, /* R2030 - SLIMTX6MIX Input 4 Source */
+ { 0x000007EF, 0x0080 }, /* R2031 - SLIMTX6MIX Input 4 Volume */
+ { 0x000007F0, 0x0000 }, /* R2032 - SLIMTX7MIX Input 1 Source */
+ { 0x000007F1, 0x0080 }, /* R2033 - SLIMTX7MIX Input 1 Volume */
+ { 0x000007F2, 0x0000 }, /* R2034 - SLIMTX7MIX Input 2 Source */
+ { 0x000007F3, 0x0080 }, /* R2035 - SLIMTX7MIX Input 2 Volume */
+ { 0x000007F4, 0x0000 }, /* R2036 - SLIMTX7MIX Input 3 Source */
+ { 0x000007F5, 0x0080 }, /* R2037 - SLIMTX7MIX Input 3 Volume */
+ { 0x000007F6, 0x0000 }, /* R2038 - SLIMTX7MIX Input 4 Source */
+ { 0x000007F7, 0x0080 }, /* R2039 - SLIMTX7MIX Input 4 Volume */
+ { 0x000007F8, 0x0000 }, /* R2040 - SLIMTX8MIX Input 1 Source */
+ { 0x000007F9, 0x0080 }, /* R2041 - SLIMTX8MIX Input 1 Volume */
+ { 0x000007FA, 0x0000 }, /* R2042 - SLIMTX8MIX Input 2 Source */
+ { 0x000007FB, 0x0080 }, /* R2043 - SLIMTX8MIX Input 2 Volume */
+ { 0x000007FC, 0x0000 }, /* R2044 - SLIMTX8MIX Input 3 Source */
+ { 0x000007FD, 0x0080 }, /* R2045 - SLIMTX8MIX Input 3 Volume */
+ { 0x000007FE, 0x0000 }, /* R2046 - SLIMTX8MIX Input 4 Source */
+ { 0x000007FF, 0x0080 }, /* R2047 - SLIMTX8MIX Input 4 Volume */
+ { 0x00000880, 0x0000 }, /* R2176 - EQ1MIX Input 1 Source */
+ { 0x00000881, 0x0080 }, /* R2177 - EQ1MIX Input 1 Volume */
+ { 0x00000882, 0x0000 }, /* R2178 - EQ1MIX Input 2 Source */
+ { 0x00000883, 0x0080 }, /* R2179 - EQ1MIX Input 2 Volume */
+ { 0x00000884, 0x0000 }, /* R2180 - EQ1MIX Input 3 Source */
+ { 0x00000885, 0x0080 }, /* R2181 - EQ1MIX Input 3 Volume */
+ { 0x00000886, 0x0000 }, /* R2182 - EQ1MIX Input 4 Source */
+ { 0x00000887, 0x0080 }, /* R2183 - EQ1MIX Input 4 Volume */
+ { 0x00000888, 0x0000 }, /* R2184 - EQ2MIX Input 1 Source */
+ { 0x00000889, 0x0080 }, /* R2185 - EQ2MIX Input 1 Volume */
+ { 0x0000088A, 0x0000 }, /* R2186 - EQ2MIX Input 2 Source */
+ { 0x0000088B, 0x0080 }, /* R2187 - EQ2MIX Input 2 Volume */
+ { 0x0000088C, 0x0000 }, /* R2188 - EQ2MIX Input 3 Source */
+ { 0x0000088D, 0x0080 }, /* R2189 - EQ2MIX Input 3 Volume */
+ { 0x0000088E, 0x0000 }, /* R2190 - EQ2MIX Input 4 Source */
+ { 0x0000088F, 0x0080 }, /* R2191 - EQ2MIX Input 4 Volume */
+ { 0x00000890, 0x0000 }, /* R2192 - EQ3MIX Input 1 Source */
+ { 0x00000891, 0x0080 }, /* R2193 - EQ3MIX Input 1 Volume */
+ { 0x00000892, 0x0000 }, /* R2194 - EQ3MIX Input 2 Source */
+ { 0x00000893, 0x0080 }, /* R2195 - EQ3MIX Input 2 Volume */
+ { 0x00000894, 0x0000 }, /* R2196 - EQ3MIX Input 3 Source */
+ { 0x00000895, 0x0080 }, /* R2197 - EQ3MIX Input 3 Volume */
+ { 0x00000896, 0x0000 }, /* R2198 - EQ3MIX Input 4 Source */
+ { 0x00000897, 0x0080 }, /* R2199 - EQ3MIX Input 4 Volume */
+ { 0x00000898, 0x0000 }, /* R2200 - EQ4MIX Input 1 Source */
+ { 0x00000899, 0x0080 }, /* R2201 - EQ4MIX Input 1 Volume */
+ { 0x0000089A, 0x0000 }, /* R2202 - EQ4MIX Input 2 Source */
+ { 0x0000089B, 0x0080 }, /* R2203 - EQ4MIX Input 2 Volume */
+ { 0x0000089C, 0x0000 }, /* R2204 - EQ4MIX Input 3 Source */
+ { 0x0000089D, 0x0080 }, /* R2205 - EQ4MIX Input 3 Volume */
+ { 0x0000089E, 0x0000 }, /* R2206 - EQ4MIX Input 4 Source */
+ { 0x0000089F, 0x0080 }, /* R2207 - EQ4MIX Input 4 Volume */
+ { 0x000008C0, 0x0000 }, /* R2240 - DRC1LMIX Input 1 Source */
+ { 0x000008C1, 0x0080 }, /* R2241 - DRC1LMIX Input 1 Volume */
+ { 0x000008C2, 0x0000 }, /* R2242 - DRC1LMIX Input 2 Source */
+ { 0x000008C3, 0x0080 }, /* R2243 - DRC1LMIX Input 2 Volume */
+ { 0x000008C4, 0x0000 }, /* R2244 - DRC1LMIX Input 3 Source */
+ { 0x000008C5, 0x0080 }, /* R2245 - DRC1LMIX Input 3 Volume */
+ { 0x000008C6, 0x0000 }, /* R2246 - DRC1LMIX Input 4 Source */
+ { 0x000008C7, 0x0080 }, /* R2247 - DRC1LMIX Input 4 Volume */
+ { 0x000008C8, 0x0000 }, /* R2248 - DRC1RMIX Input 1 Source */
+ { 0x000008C9, 0x0080 }, /* R2249 - DRC1RMIX Input 1 Volume */
+ { 0x000008CA, 0x0000 }, /* R2250 - DRC1RMIX Input 2 Source */
+ { 0x000008CB, 0x0080 }, /* R2251 - DRC1RMIX Input 2 Volume */
+ { 0x000008CC, 0x0000 }, /* R2252 - DRC1RMIX Input 3 Source */
+ { 0x000008CD, 0x0080 }, /* R2253 - DRC1RMIX Input 3 Volume */
+ { 0x000008CE, 0x0000 }, /* R2254 - DRC1RMIX Input 4 Source */
+ { 0x000008CF, 0x0080 }, /* R2255 - DRC1RMIX Input 4 Volume */
+ { 0x000008D0, 0x0000 }, /* R2256 - DRC2LMIX Input 1 Source */
+ { 0x000008D1, 0x0080 }, /* R2257 - DRC2LMIX Input 1 Volume */
+ { 0x000008D2, 0x0000 }, /* R2258 - DRC2LMIX Input 2 Source */
+ { 0x000008D3, 0x0080 }, /* R2259 - DRC2LMIX Input 2 Volume */
+ { 0x000008D4, 0x0000 }, /* R2260 - DRC2LMIX Input 3 Source */
+ { 0x000008D5, 0x0080 }, /* R2261 - DRC2LMIX Input 3 Volume */
+ { 0x000008D6, 0x0000 }, /* R2262 - DRC2LMIX Input 4 Source */
+ { 0x000008D7, 0x0080 }, /* R2263 - DRC2LMIX Input 4 Volume */
+ { 0x000008D8, 0x0000 }, /* R2264 - DRC2RMIX Input 1 Source */
+ { 0x000008D9, 0x0080 }, /* R2265 - DRC2RMIX Input 1 Volume */
+ { 0x000008DA, 0x0000 }, /* R2266 - DRC2RMIX Input 2 Source */
+ { 0x000008DB, 0x0080 }, /* R2267 - DRC2RMIX Input 2 Volume */
+ { 0x000008DC, 0x0000 }, /* R2268 - DRC2RMIX Input 3 Source */
+ { 0x000008DD, 0x0080 }, /* R2269 - DRC2RMIX Input 3 Volume */
+ { 0x000008DE, 0x0000 }, /* R2270 - DRC2RMIX Input 4 Source */
+ { 0x000008DF, 0x0080 }, /* R2271 - DRC2RMIX Input 4 Volume */
+ { 0x00000900, 0x0000 }, /* R2304 - HPLP1MIX Input 1 Source */
+ { 0x00000901, 0x0080 }, /* R2305 - HPLP1MIX Input 1 Volume */
+ { 0x00000902, 0x0000 }, /* R2306 - HPLP1MIX Input 2 Source */
+ { 0x00000903, 0x0080 }, /* R2307 - HPLP1MIX Input 2 Volume */
+ { 0x00000904, 0x0000 }, /* R2308 - HPLP1MIX Input 3 Source */
+ { 0x00000905, 0x0080 }, /* R2309 - HPLP1MIX Input 3 Volume */
+ { 0x00000906, 0x0000 }, /* R2310 - HPLP1MIX Input 4 Source */
+ { 0x00000907, 0x0080 }, /* R2311 - HPLP1MIX Input 4 Volume */
+ { 0x00000908, 0x0000 }, /* R2312 - HPLP2MIX Input 1 Source */
+ { 0x00000909, 0x0080 }, /* R2313 - HPLP2MIX Input 1 Volume */
+ { 0x0000090A, 0x0000 }, /* R2314 - HPLP2MIX Input 2 Source */
+ { 0x0000090B, 0x0080 }, /* R2315 - HPLP2MIX Input 2 Volume */
+ { 0x0000090C, 0x0000 }, /* R2316 - HPLP2MIX Input 3 Source */
+ { 0x0000090D, 0x0080 }, /* R2317 - HPLP2MIX Input 3 Volume */
+ { 0x0000090E, 0x0000 }, /* R2318 - HPLP2MIX Input 4 Source */
+ { 0x0000090F, 0x0080 }, /* R2319 - HPLP2MIX Input 4 Volume */
+ { 0x00000910, 0x0000 }, /* R2320 - HPLP3MIX Input 1 Source */
+ { 0x00000911, 0x0080 }, /* R2321 - HPLP3MIX Input 1 Volume */
+ { 0x00000912, 0x0000 }, /* R2322 - HPLP3MIX Input 2 Source */
+ { 0x00000913, 0x0080 }, /* R2323 - HPLP3MIX Input 2 Volume */
+ { 0x00000914, 0x0000 }, /* R2324 - HPLP3MIX Input 3 Source */
+ { 0x00000915, 0x0080 }, /* R2325 - HPLP3MIX Input 3 Volume */
+ { 0x00000916, 0x0000 }, /* R2326 - HPLP3MIX Input 4 Source */
+ { 0x00000917, 0x0080 }, /* R2327 - HPLP3MIX Input 4 Volume */
+ { 0x00000918, 0x0000 }, /* R2328 - HPLP4MIX Input 1 Source */
+ { 0x00000919, 0x0080 }, /* R2329 - HPLP4MIX Input 1 Volume */
+ { 0x0000091A, 0x0000 }, /* R2330 - HPLP4MIX Input 2 Source */
+ { 0x0000091B, 0x0080 }, /* R2331 - HPLP4MIX Input 2 Volume */
+ { 0x0000091C, 0x0000 }, /* R2332 - HPLP4MIX Input 3 Source */
+ { 0x0000091D, 0x0080 }, /* R2333 - HPLP4MIX Input 3 Volume */
+ { 0x0000091E, 0x0000 }, /* R2334 - HPLP4MIX Input 4 Source */
+ { 0x0000091F, 0x0080 }, /* R2335 - HPLP4MIX Input 4 Volume */
+ { 0x00000940, 0x0000 }, /* R2368 - DSP1LMIX Input 1 Source */
+ { 0x00000941, 0x0080 }, /* R2369 - DSP1LMIX Input 1 Volume */
+ { 0x00000942, 0x0000 }, /* R2370 - DSP1LMIX Input 2 Source */
+ { 0x00000943, 0x0080 }, /* R2371 - DSP1LMIX Input 2 Volume */
+ { 0x00000944, 0x0000 }, /* R2372 - DSP1LMIX Input 3 Source */
+ { 0x00000945, 0x0080 }, /* R2373 - DSP1LMIX Input 3 Volume */
+ { 0x00000946, 0x0000 }, /* R2374 - DSP1LMIX Input 4 Source */
+ { 0x00000947, 0x0080 }, /* R2375 - DSP1LMIX Input 4 Volume */
+ { 0x00000948, 0x0000 }, /* R2376 - DSP1RMIX Input 1 Source */
+ { 0x00000949, 0x0080 }, /* R2377 - DSP1RMIX Input 1 Volume */
+ { 0x0000094A, 0x0000 }, /* R2378 - DSP1RMIX Input 2 Source */
+ { 0x0000094B, 0x0080 }, /* R2379 - DSP1RMIX Input 2 Volume */
+ { 0x0000094C, 0x0000 }, /* R2380 - DSP1RMIX Input 3 Source */
+ { 0x0000094D, 0x0080 }, /* R2381 - DSP1RMIX Input 3 Volume */
+ { 0x0000094E, 0x0000 }, /* R2382 - DSP1RMIX Input 4 Source */
+ { 0x0000094F, 0x0080 }, /* R2383 - DSP1RMIX Input 4 Volume */
+ { 0x00000950, 0x0000 }, /* R2384 - DSP1AUX1MIX Input 1 Source */
+ { 0x00000958, 0x0000 }, /* R2392 - DSP1AUX2MIX Input 1 Source */
+ { 0x00000960, 0x0000 }, /* R2400 - DSP1AUX3MIX Input 1 Source */
+ { 0x00000968, 0x0000 }, /* R2408 - DSP1AUX4MIX Input 1 Source */
+ { 0x00000970, 0x0000 }, /* R2416 - DSP1AUX5MIX Input 1 Source */
+ { 0x00000978, 0x0000 }, /* R2424 - DSP1AUX6MIX Input 1 Source */
+ { 0x00000A80, 0x0000 }, /* R2688 - ASRC1LMIX Input 1 Source */
+ { 0x00000A88, 0x0000 }, /* R2696 - ASRC1RMIX Input 1 Source */
+ { 0x00000A90, 0x0000 }, /* R2704 - ASRC2LMIX Input 1 Source */
+ { 0x00000A98, 0x0000 }, /* R2712 - ASRC2RMIX Input 1 Source */
+ { 0x00000B00, 0x0000 }, /* R2816 - ISRC1DEC1MIX Input 1 Source */
+ { 0x00000B08, 0x0000 }, /* R2824 - ISRC1DEC2MIX Input 1 Source */
+ { 0x00000B20, 0x0000 }, /* R2848 - ISRC1INT1MIX Input 1 Source */
+ { 0x00000B28, 0x0000 }, /* R2856 - ISRC1INT2MIX Input 1 Source */
+ { 0x00000B40, 0x0000 }, /* R2880 - ISRC2DEC1MIX Input 1 Source */
+ { 0x00000B48, 0x0000 }, /* R2888 - ISRC2DEC2MIX Input 1 Source */
+ { 0x00000B60, 0x0000 }, /* R2912 - ISRC2INT1MIX Input 1 Source */
+ { 0x00000B68, 0x0000 }, /* R2920 - ISRC2INT2MIX Input 1 Source */
+ { 0x00000C00, 0xA101 }, /* R3072 - GPIO1 CTRL */
+ { 0x00000C01, 0xA101 }, /* R3073 - GPIO2 CTRL */
+ { 0x00000C02, 0xA101 }, /* R3074 - GPIO3 CTRL */
+ { 0x00000C03, 0xA101 }, /* R3075 - GPIO4 CTRL */
+ { 0x00000C04, 0xA101 }, /* R3076 - GPIO5 CTRL */
+ { 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
+ { 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
+ { 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
+ { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
+ { 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
+ { 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
+ { 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
+ { 0x00000C25, 0x0000 }, /* R3109 - Misc Pad Ctrl 6 */
+ { 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */
+ { 0x00000D09, 0xFFFF }, /* R3337 - Interrupt Status 2 Mask */
+ { 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */
+ { 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */
+ { 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */
+ { 0x00000D0F, 0x0000 }, /* R3343 - Interrupt Control */
+ { 0x00000D18, 0xFFFF }, /* R3352 - IRQ2 Status 1 Mask */
+ { 0x00000D19, 0xFFFF }, /* R3353 - IRQ2 Status 2 Mask */
+ { 0x00000D1A, 0xFFFF }, /* R3354 - IRQ2 Status 3 Mask */
+ { 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
+ { 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
+ { 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
+ { 0x00000D41, 0x0000 }, /* R3393 - ADSP2 IRQ0 */
+ { 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
+ { 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
+ { 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
+ { 0x00000E00, 0x0000 }, /* R3584 - FX_Ctrl1 */
+ { 0x00000E01, 0x0000 }, /* R3585 - FX_Ctrl2 */
+ { 0x00000E10, 0x6318 }, /* R3600 - EQ1_1 */
+ { 0x00000E11, 0x6300 }, /* R3601 - EQ1_2 */
+ { 0x00000E12, 0x0FC8 }, /* R3602 - EQ1_3 */
+ { 0x00000E13, 0x03FE }, /* R3603 - EQ1_4 */
+ { 0x00000E14, 0x00E0 }, /* R3604 - EQ1_5 */
+ { 0x00000E15, 0x1EC4 }, /* R3605 - EQ1_6 */
+ { 0x00000E16, 0xF136 }, /* R3606 - EQ1_7 */
+ { 0x00000E17, 0x0409 }, /* R3607 - EQ1_8 */
+ { 0x00000E18, 0x04CC }, /* R3608 - EQ1_9 */
+ { 0x00000E19, 0x1C9B }, /* R3609 - EQ1_10 */
+ { 0x00000E1A, 0xF337 }, /* R3610 - EQ1_11 */
+ { 0x00000E1B, 0x040B }, /* R3611 - EQ1_12 */
+ { 0x00000E1C, 0x0CBB }, /* R3612 - EQ1_13 */
+ { 0x00000E1D, 0x16F8 }, /* R3613 - EQ1_14 */
+ { 0x00000E1E, 0xF7D9 }, /* R3614 - EQ1_15 */
+ { 0x00000E1F, 0x040A }, /* R3615 - EQ1_16 */
+ { 0x00000E20, 0x1F14 }, /* R3616 - EQ1_17 */
+ { 0x00000E21, 0x058C }, /* R3617 - EQ1_18 */
+ { 0x00000E22, 0x0563 }, /* R3618 - EQ1_19 */
+ { 0x00000E23, 0x4000 }, /* R3619 - EQ1_20 */
+ { 0x00000E24, 0x0B75 }, /* R3620 - EQ1_21 */
+ { 0x00000E26, 0x6318 }, /* R3622 - EQ2_1 */
+ { 0x00000E27, 0x6300 }, /* R3623 - EQ2_2 */
+ { 0x00000E28, 0x0FC8 }, /* R3624 - EQ2_3 */
+ { 0x00000E29, 0x03FE }, /* R3625 - EQ2_4 */
+ { 0x00000E2A, 0x00E0 }, /* R3626 - EQ2_5 */
+ { 0x00000E2B, 0x1EC4 }, /* R3627 - EQ2_6 */
+ { 0x00000E2C, 0xF136 }, /* R3628 - EQ2_7 */
+ { 0x00000E2D, 0x0409 }, /* R3629 - EQ2_8 */
+ { 0x00000E2E, 0x04CC }, /* R3630 - EQ2_9 */
+ { 0x00000E2F, 0x1C9B }, /* R3631 - EQ2_10 */
+ { 0x00000E30, 0xF337 }, /* R3632 - EQ2_11 */
+ { 0x00000E31, 0x040B }, /* R3633 - EQ2_12 */
+ { 0x00000E32, 0x0CBB }, /* R3634 - EQ2_13 */
+ { 0x00000E33, 0x16F8 }, /* R3635 - EQ2_14 */
+ { 0x00000E34, 0xF7D9 }, /* R3636 - EQ2_15 */
+ { 0x00000E35, 0x040A }, /* R3637 - EQ2_16 */
+ { 0x00000E36, 0x1F14 }, /* R3638 - EQ2_17 */
+ { 0x00000E37, 0x058C }, /* R3639 - EQ2_18 */
+ { 0x00000E38, 0x0563 }, /* R3640 - EQ2_19 */
+ { 0x00000E39, 0x4000 }, /* R3641 - EQ2_20 */
+ { 0x00000E3A, 0x0B75 }, /* R3642 - EQ2_21 */
+ { 0x00000E3C, 0x6318 }, /* R3644 - EQ3_1 */
+ { 0x00000E3D, 0x6300 }, /* R3645 - EQ3_2 */
+ { 0x00000E3E, 0x0FC8 }, /* R3646 - EQ3_3 */
+ { 0x00000E3F, 0x03FE }, /* R3647 - EQ3_4 */
+ { 0x00000E40, 0x00E0 }, /* R3648 - EQ3_5 */
+ { 0x00000E41, 0x1EC4 }, /* R3649 - EQ3_6 */
+ { 0x00000E42, 0xF136 }, /* R3650 - EQ3_7 */
+ { 0x00000E43, 0x0409 }, /* R3651 - EQ3_8 */
+ { 0x00000E44, 0x04CC }, /* R3652 - EQ3_9 */
+ { 0x00000E45, 0x1C9B }, /* R3653 - EQ3_10 */
+ { 0x00000E46, 0xF337 }, /* R3654 - EQ3_11 */
+ { 0x00000E47, 0x040B }, /* R3655 - EQ3_12 */
+ { 0x00000E48, 0x0CBB }, /* R3656 - EQ3_13 */
+ { 0x00000E49, 0x16F8 }, /* R3657 - EQ3_14 */
+ { 0x00000E4A, 0xF7D9 }, /* R3658 - EQ3_15 */
+ { 0x00000E4B, 0x040A }, /* R3659 - EQ3_16 */
+ { 0x00000E4C, 0x1F14 }, /* R3660 - EQ3_17 */
+ { 0x00000E4D, 0x058C }, /* R3661 - EQ3_18 */
+ { 0x00000E4E, 0x0563 }, /* R3662 - EQ3_19 */
+ { 0x00000E4F, 0x4000 }, /* R3663 - EQ3_20 */
+ { 0x00000E50, 0x0B75 }, /* R3664 - EQ3_21 */
+ { 0x00000E52, 0x6318 }, /* R3666 - EQ4_1 */
+ { 0x00000E53, 0x6300 }, /* R3667 - EQ4_2 */
+ { 0x00000E54, 0x0FC8 }, /* R3668 - EQ4_3 */
+ { 0x00000E55, 0x03FE }, /* R3669 - EQ4_4 */
+ { 0x00000E56, 0x00E0 }, /* R3670 - EQ4_5 */
+ { 0x00000E57, 0x1EC4 }, /* R3671 - EQ4_6 */
+ { 0x00000E58, 0xF136 }, /* R3672 - EQ4_7 */
+ { 0x00000E59, 0x0409 }, /* R3673 - EQ4_8 */
+ { 0x00000E5A, 0x04CC }, /* R3674 - EQ4_9 */
+ { 0x00000E5B, 0x1C9B }, /* R3675 - EQ4_10 */
+ { 0x00000E5C, 0xF337 }, /* R3676 - EQ4_11 */
+ { 0x00000E5D, 0x040B }, /* R3677 - EQ4_12 */
+ { 0x00000E5E, 0x0CBB }, /* R3678 - EQ4_13 */
+ { 0x00000E5F, 0x16F8 }, /* R3679 - EQ4_14 */
+ { 0x00000E60, 0xF7D9 }, /* R3680 - EQ4_15 */
+ { 0x00000E61, 0x040A }, /* R3681 - EQ4_16 */
+ { 0x00000E62, 0x1F14 }, /* R3682 - EQ4_17 */
+ { 0x00000E63, 0x058C }, /* R3683 - EQ4_18 */
+ { 0x00000E64, 0x0563 }, /* R3684 - EQ4_19 */
+ { 0x00000E65, 0x4000 }, /* R3685 - EQ4_20 */
+ { 0x00000E66, 0x0B75 }, /* R3686 - EQ4_21 */
+ { 0x00000E80, 0x0018 }, /* R3712 - DRC1 ctrl1 */
+ { 0x00000E81, 0x0933 }, /* R3713 - DRC1 ctrl2 */
+ { 0x00000E82, 0x0018 }, /* R3714 - DRC1 ctrl3 */
+ { 0x00000E83, 0x0000 }, /* R3715 - DRC1 ctrl4 */
+ { 0x00000E84, 0x0000 }, /* R3716 - DRC1 ctrl5 */
+ { 0x00000E89, 0x0018 }, /* R3721 - DRC2 ctrl1 */
+ { 0x00000E8A, 0x0933 }, /* R3722 - DRC2 ctrl2 */
+ { 0x00000E8B, 0x0018 }, /* R3723 - DRC2 ctrl3 */
+ { 0x00000E8C, 0x0000 }, /* R3724 - DRC2 ctrl4 */
+ { 0x00000E8D, 0x0000 }, /* R3725 - DRC2 ctrl5 */
+ { 0x00000EC0, 0x0000 }, /* R3776 - HPLPF1_1 */
+ { 0x00000EC1, 0x0000 }, /* R3777 - HPLPF1_2 */
+ { 0x00000EC4, 0x0000 }, /* R3780 - HPLPF2_1 */
+ { 0x00000EC5, 0x0000 }, /* R3781 - HPLPF2_2 */
+ { 0x00000EC8, 0x0000 }, /* R3784 - HPLPF3_1 */
+ { 0x00000EC9, 0x0000 }, /* R3785 - HPLPF3_2 */
+ { 0x00000ECC, 0x0000 }, /* R3788 - HPLPF4_1 */
+ { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
+ { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
+ { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
+ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
+ { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
+ { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
+ { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
+ { 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
+ { 0x00000EF4, 0x0000 }, /* R3828 - ISRC 2 CTRL 2 */
+ { 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
+ { 0x00000EF6, 0x0000 }, /* R3830 - ISRC 3 CTRL 1 */
+ { 0x00000EF7, 0x0000 }, /* R3831 - ISRC 3 CTRL 2 */
+ { 0x00000EF8, 0x0000 }, /* R3832 - ISRC 3 CTRL 3 */
+ { 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */
+ { 0x00001101, 0x0000 }, /* R4353 - DSP1 Clocking 1 */
+};
+
+static bool wm5102_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ARIZONA_SOFTWARE_RESET:
+ case ARIZONA_DEVICE_REVISION:
+ case ARIZONA_CTRL_IF_SPI_CFG_1:
+ case ARIZONA_CTRL_IF_I2C1_CFG_1:
+ case ARIZONA_CTRL_IF_STATUS_1:
+ case ARIZONA_WRITE_SEQUENCER_CTRL_0:
+ case ARIZONA_WRITE_SEQUENCER_CTRL_1:
+ case ARIZONA_WRITE_SEQUENCER_CTRL_2:
+ case ARIZONA_WRITE_SEQUENCER_PROM:
+ case ARIZONA_TONE_GENERATOR_1:
+ case ARIZONA_TONE_GENERATOR_2:
+ case ARIZONA_TONE_GENERATOR_3:
+ case ARIZONA_TONE_GENERATOR_4:
+ case ARIZONA_TONE_GENERATOR_5:
+ case ARIZONA_PWM_DRIVE_1:
+ case ARIZONA_PWM_DRIVE_2:
+ case ARIZONA_PWM_DRIVE_3:
+ case ARIZONA_WAKE_CONTROL:
+ case ARIZONA_SEQUENCE_CONTROL:
+ case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1:
+ case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2:
+ case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3:
+ case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6:
+ case ARIZONA_COMFORT_NOISE_GENERATOR:
+ case ARIZONA_HAPTICS_CONTROL_1:
+ case ARIZONA_HAPTICS_CONTROL_2:
+ case ARIZONA_HAPTICS_PHASE_1_INTENSITY:
+ case ARIZONA_HAPTICS_PHASE_1_DURATION:
+ case ARIZONA_HAPTICS_PHASE_2_INTENSITY:
+ case ARIZONA_HAPTICS_PHASE_2_DURATION:
+ case ARIZONA_HAPTICS_PHASE_3_INTENSITY:
+ case ARIZONA_HAPTICS_PHASE_3_DURATION:
+ case ARIZONA_HAPTICS_STATUS:
+ case ARIZONA_CLOCK_32K_1:
+ case ARIZONA_SYSTEM_CLOCK_1:
+ case ARIZONA_SAMPLE_RATE_1:
+ case ARIZONA_SAMPLE_RATE_2:
+ case ARIZONA_SAMPLE_RATE_3:
+ case ARIZONA_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_SAMPLE_RATE_2_STATUS:
+ case ARIZONA_SAMPLE_RATE_3_STATUS:
+ case ARIZONA_ASYNC_CLOCK_1:
+ case ARIZONA_ASYNC_SAMPLE_RATE_1:
+ case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_OUTPUT_SYSTEM_CLOCK:
+ case ARIZONA_OUTPUT_ASYNC_CLOCK:
+ case ARIZONA_RATE_ESTIMATOR_1:
+ case ARIZONA_RATE_ESTIMATOR_2:
+ case ARIZONA_RATE_ESTIMATOR_3:
+ case ARIZONA_RATE_ESTIMATOR_4:
+ case ARIZONA_RATE_ESTIMATOR_5:
+ case ARIZONA_FLL1_CONTROL_1:
+ case ARIZONA_FLL1_CONTROL_2:
+ case ARIZONA_FLL1_CONTROL_3:
+ case ARIZONA_FLL1_CONTROL_4:
+ case ARIZONA_FLL1_CONTROL_5:
+ case ARIZONA_FLL1_CONTROL_6:
+ case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
+ case ARIZONA_FLL1_SYNCHRONISER_1:
+ case ARIZONA_FLL1_SYNCHRONISER_2:
+ case ARIZONA_FLL1_SYNCHRONISER_3:
+ case ARIZONA_FLL1_SYNCHRONISER_4:
+ case ARIZONA_FLL1_SYNCHRONISER_5:
+ case ARIZONA_FLL1_SYNCHRONISER_6:
+ case ARIZONA_FLL1_SPREAD_SPECTRUM:
+ case ARIZONA_FLL1_GPIO_CLOCK:
+ case ARIZONA_FLL2_CONTROL_1:
+ case ARIZONA_FLL2_CONTROL_2:
+ case ARIZONA_FLL2_CONTROL_3:
+ case ARIZONA_FLL2_CONTROL_4:
+ case ARIZONA_FLL2_CONTROL_5:
+ case ARIZONA_FLL2_CONTROL_6:
+ case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
+ case ARIZONA_FLL2_SYNCHRONISER_1:
+ case ARIZONA_FLL2_SYNCHRONISER_2:
+ case ARIZONA_FLL2_SYNCHRONISER_3:
+ case ARIZONA_FLL2_SYNCHRONISER_4:
+ case ARIZONA_FLL2_SYNCHRONISER_5:
+ case ARIZONA_FLL2_SYNCHRONISER_6:
+ case ARIZONA_FLL2_SPREAD_SPECTRUM:
+ case ARIZONA_FLL2_GPIO_CLOCK:
+ case ARIZONA_MIC_CHARGE_PUMP_1:
+ case ARIZONA_LDO1_CONTROL_1:
+ case ARIZONA_LDO2_CONTROL_1:
+ case ARIZONA_MIC_BIAS_CTRL_1:
+ case ARIZONA_MIC_BIAS_CTRL_2:
+ case ARIZONA_MIC_BIAS_CTRL_3:
+ case ARIZONA_ACCESSORY_DETECT_MODE_1:
+ case ARIZONA_HEADPHONE_DETECT_1:
+ case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_MIC_DETECT_1:
+ case ARIZONA_MIC_DETECT_2:
+ case ARIZONA_MIC_DETECT_3:
+ case ARIZONA_MIC_NOISE_MIX_CONTROL_1:
+ case ARIZONA_ISOLATION_CONTROL:
+ case ARIZONA_JACK_DETECT_ANALOGUE:
+ case ARIZONA_INPUT_ENABLES:
+ case ARIZONA_INPUT_RATE:
+ case ARIZONA_INPUT_VOLUME_RAMP:
+ case ARIZONA_IN1L_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_1L:
+ case ARIZONA_DMIC1L_CONTROL:
+ case ARIZONA_IN1R_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_1R:
+ case ARIZONA_DMIC1R_CONTROL:
+ case ARIZONA_IN2L_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_2L:
+ case ARIZONA_DMIC2L_CONTROL:
+ case ARIZONA_IN2R_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_2R:
+ case ARIZONA_DMIC2R_CONTROL:
+ case ARIZONA_IN3L_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_3L:
+ case ARIZONA_DMIC3L_CONTROL:
+ case ARIZONA_IN3R_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_3R:
+ case ARIZONA_DMIC3R_CONTROL:
+ case ARIZONA_OUTPUT_ENABLES_1:
+ case ARIZONA_OUTPUT_STATUS_1:
+ case ARIZONA_OUTPUT_RATE_1:
+ case ARIZONA_OUTPUT_VOLUME_RAMP:
+ case ARIZONA_OUTPUT_PATH_CONFIG_1L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_1L:
+ case ARIZONA_DAC_VOLUME_LIMIT_1L:
+ case ARIZONA_NOISE_GATE_SELECT_1L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_1R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_1R:
+ case ARIZONA_DAC_VOLUME_LIMIT_1R:
+ case ARIZONA_NOISE_GATE_SELECT_1R:
+ case ARIZONA_OUTPUT_PATH_CONFIG_2L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_2L:
+ case ARIZONA_DAC_VOLUME_LIMIT_2L:
+ case ARIZONA_NOISE_GATE_SELECT_2L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_2R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_2R:
+ case ARIZONA_DAC_VOLUME_LIMIT_2R:
+ case ARIZONA_NOISE_GATE_SELECT_2R:
+ case ARIZONA_OUTPUT_PATH_CONFIG_3L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_3L:
+ case ARIZONA_DAC_VOLUME_LIMIT_3L:
+ case ARIZONA_NOISE_GATE_SELECT_3L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_3R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_3R:
+ case ARIZONA_DAC_VOLUME_LIMIT_3R:
+ case ARIZONA_OUTPUT_PATH_CONFIG_4L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_4L:
+ case ARIZONA_OUT_VOLUME_4L:
+ case ARIZONA_NOISE_GATE_SELECT_4L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_4R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_4R:
+ case ARIZONA_OUT_VOLUME_4R:
+ case ARIZONA_NOISE_GATE_SELECT_4R:
+ case ARIZONA_OUTPUT_PATH_CONFIG_5L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_5L:
+ case ARIZONA_DAC_VOLUME_LIMIT_5L:
+ case ARIZONA_NOISE_GATE_SELECT_5L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_5R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_5R:
+ case ARIZONA_DAC_VOLUME_LIMIT_5R:
+ case ARIZONA_NOISE_GATE_SELECT_5R:
+ case ARIZONA_DAC_AEC_CONTROL_1:
+ case ARIZONA_NOISE_GATE_CONTROL:
+ case ARIZONA_PDM_SPK1_CTRL_1:
+ case ARIZONA_PDM_SPK1_CTRL_2:
+ case ARIZONA_DAC_COMP_1:
+ case ARIZONA_DAC_COMP_2:
+ case ARIZONA_DAC_COMP_3:
+ case ARIZONA_DAC_COMP_4:
+ case ARIZONA_AIF1_BCLK_CTRL:
+ case ARIZONA_AIF1_TX_PIN_CTRL:
+ case ARIZONA_AIF1_RX_PIN_CTRL:
+ case ARIZONA_AIF1_RATE_CTRL:
+ case ARIZONA_AIF1_FORMAT:
+ case ARIZONA_AIF1_TX_BCLK_RATE:
+ case ARIZONA_AIF1_RX_BCLK_RATE:
+ case ARIZONA_AIF1_FRAME_CTRL_1:
+ case ARIZONA_AIF1_FRAME_CTRL_2:
+ case ARIZONA_AIF1_FRAME_CTRL_3:
+ case ARIZONA_AIF1_FRAME_CTRL_4:
+ case ARIZONA_AIF1_FRAME_CTRL_5:
+ case ARIZONA_AIF1_FRAME_CTRL_6:
+ case ARIZONA_AIF1_FRAME_CTRL_7:
+ case ARIZONA_AIF1_FRAME_CTRL_8:
+ case ARIZONA_AIF1_FRAME_CTRL_9:
+ case ARIZONA_AIF1_FRAME_CTRL_10:
+ case ARIZONA_AIF1_FRAME_CTRL_11:
+ case ARIZONA_AIF1_FRAME_CTRL_12:
+ case ARIZONA_AIF1_FRAME_CTRL_13:
+ case ARIZONA_AIF1_FRAME_CTRL_14:
+ case ARIZONA_AIF1_FRAME_CTRL_15:
+ case ARIZONA_AIF1_FRAME_CTRL_16:
+ case ARIZONA_AIF1_FRAME_CTRL_17:
+ case ARIZONA_AIF1_FRAME_CTRL_18:
+ case ARIZONA_AIF1_TX_ENABLES:
+ case ARIZONA_AIF1_RX_ENABLES:
+ case ARIZONA_AIF1_FORCE_WRITE:
+ case ARIZONA_AIF2_BCLK_CTRL:
+ case ARIZONA_AIF2_TX_PIN_CTRL:
+ case ARIZONA_AIF2_RX_PIN_CTRL:
+ case ARIZONA_AIF2_RATE_CTRL:
+ case ARIZONA_AIF2_FORMAT:
+ case ARIZONA_AIF2_TX_BCLK_RATE:
+ case ARIZONA_AIF2_RX_BCLK_RATE:
+ case ARIZONA_AIF2_FRAME_CTRL_1:
+ case ARIZONA_AIF2_FRAME_CTRL_2:
+ case ARIZONA_AIF2_FRAME_CTRL_3:
+ case ARIZONA_AIF2_FRAME_CTRL_4:
+ case ARIZONA_AIF2_FRAME_CTRL_11:
+ case ARIZONA_AIF2_FRAME_CTRL_12:
+ case ARIZONA_AIF2_TX_ENABLES:
+ case ARIZONA_AIF2_RX_ENABLES:
+ case ARIZONA_AIF2_FORCE_WRITE:
+ case ARIZONA_AIF3_BCLK_CTRL:
+ case ARIZONA_AIF3_TX_PIN_CTRL:
+ case ARIZONA_AIF3_RX_PIN_CTRL:
+ case ARIZONA_AIF3_RATE_CTRL:
+ case ARIZONA_AIF3_FORMAT:
+ case ARIZONA_AIF3_TX_BCLK_RATE:
+ case ARIZONA_AIF3_RX_BCLK_RATE:
+ case ARIZONA_AIF3_FRAME_CTRL_1:
+ case ARIZONA_AIF3_FRAME_CTRL_2:
+ case ARIZONA_AIF3_FRAME_CTRL_3:
+ case ARIZONA_AIF3_FRAME_CTRL_4:
+ case ARIZONA_AIF3_FRAME_CTRL_11:
+ case ARIZONA_AIF3_FRAME_CTRL_12:
+ case ARIZONA_AIF3_TX_ENABLES:
+ case ARIZONA_AIF3_RX_ENABLES:
+ case ARIZONA_AIF3_FORCE_WRITE:
+ case ARIZONA_SLIMBUS_FRAMER_REF_GEAR:
+ case ARIZONA_SLIMBUS_RATES_1:
+ case ARIZONA_SLIMBUS_RATES_2:
+ case ARIZONA_SLIMBUS_RATES_3:
+ case ARIZONA_SLIMBUS_RATES_4:
+ case ARIZONA_SLIMBUS_RATES_5:
+ case ARIZONA_SLIMBUS_RATES_6:
+ case ARIZONA_SLIMBUS_RATES_7:
+ case ARIZONA_SLIMBUS_RATES_8:
+ case ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE:
+ case ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE:
+ case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+ case ARIZONA_SLIMBUS_TX_PORT_STATUS:
+ case ARIZONA_PWM1MIX_INPUT_1_SOURCE:
+ case ARIZONA_PWM1MIX_INPUT_1_VOLUME:
+ case ARIZONA_PWM1MIX_INPUT_2_SOURCE:
+ case ARIZONA_PWM1MIX_INPUT_2_VOLUME:
+ case ARIZONA_PWM1MIX_INPUT_3_SOURCE:
+ case ARIZONA_PWM1MIX_INPUT_3_VOLUME:
+ case ARIZONA_PWM1MIX_INPUT_4_SOURCE:
+ case ARIZONA_PWM1MIX_INPUT_4_VOLUME:
+ case ARIZONA_PWM2MIX_INPUT_1_SOURCE:
+ case ARIZONA_PWM2MIX_INPUT_1_VOLUME:
+ case ARIZONA_PWM2MIX_INPUT_2_SOURCE:
+ case ARIZONA_PWM2MIX_INPUT_2_VOLUME:
+ case ARIZONA_PWM2MIX_INPUT_3_SOURCE:
+ case ARIZONA_PWM2MIX_INPUT_3_VOLUME:
+ case ARIZONA_PWM2MIX_INPUT_4_SOURCE:
+ case ARIZONA_PWM2MIX_INPUT_4_VOLUME:
+ case ARIZONA_MICMIX_INPUT_1_SOURCE:
+ case ARIZONA_MICMIX_INPUT_1_VOLUME:
+ case ARIZONA_MICMIX_INPUT_2_SOURCE:
+ case ARIZONA_MICMIX_INPUT_2_VOLUME:
+ case ARIZONA_MICMIX_INPUT_3_SOURCE:
+ case ARIZONA_MICMIX_INPUT_3_VOLUME:
+ case ARIZONA_MICMIX_INPUT_4_SOURCE:
+ case ARIZONA_MICMIX_INPUT_4_VOLUME:
+ case ARIZONA_NOISEMIX_INPUT_1_SOURCE:
+ case ARIZONA_NOISEMIX_INPUT_1_VOLUME:
+ case ARIZONA_NOISEMIX_INPUT_2_SOURCE:
+ case ARIZONA_NOISEMIX_INPUT_2_VOLUME:
+ case ARIZONA_NOISEMIX_INPUT_3_SOURCE:
+ case ARIZONA_NOISEMIX_INPUT_3_VOLUME:
+ case ARIZONA_NOISEMIX_INPUT_4_SOURCE:
+ case ARIZONA_NOISEMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT1LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT1LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT1LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT1LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT1LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT1LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT1LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT1LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT1RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT1RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT1RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT1RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT1RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT1RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT1RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT1RMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT2LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT2LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT2LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT2LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT2LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT2LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT2LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT2LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT2RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT2RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT2RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT2RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT2RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT2RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT2RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT2RMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT3LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT3LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT3LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT3LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT3LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT3LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT3LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT3LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT4LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT4LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT4LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT4LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT4LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT4LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT4LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT4LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT4RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT4RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT4RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT4RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT4RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT4RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT4RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT4RMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT5LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT5LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT5LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT5LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT5LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT5LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT5LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT5LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT5RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT5RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT5RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT5RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT5RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT5RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT5RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT5RMIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX1MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX1MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX1MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX1MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX1MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX1MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX2MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX2MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX2MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX2MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX2MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX2MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX3MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX3MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX3MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX3MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX3MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX3MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX3MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX4MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX4MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX4MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX4MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX4MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX4MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX4MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX5MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX5MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX5MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX5MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX5MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX5MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX5MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX6MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX6MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX6MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX6MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX6MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX6MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX6MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX7MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX7MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX7MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX7MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX7MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX7MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX7MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX8MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX8MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX8MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX8MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX8MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX8MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX8MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX1MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX1MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX1MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX1MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX1MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX1MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX2MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX2MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX2MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF3TX1MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF3TX1MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF3TX1MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF3TX1MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF3TX1MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF3TX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF3TX2MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF3TX2MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF3TX2MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF3TX2MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF3TX2MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF3TX2MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX1MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX1MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX1MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX1MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX1MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX1MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX2MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX2MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX2MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX2MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX2MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX2MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX3MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX3MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX3MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX3MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX3MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX3MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX3MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX4MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX4MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX4MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX4MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX4MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX4MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX4MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX5MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX5MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX5MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX5MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX5MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX5MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX5MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX6MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX6MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX6MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX6MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX6MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX6MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX6MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX7MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX7MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX7MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX7MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX7MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX7MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX7MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX8MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX8MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX8MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX8MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME:
+ case ARIZONA_EQ1MIX_INPUT_1_SOURCE:
+ case ARIZONA_EQ1MIX_INPUT_1_VOLUME:
+ case ARIZONA_EQ1MIX_INPUT_2_SOURCE:
+ case ARIZONA_EQ1MIX_INPUT_2_VOLUME:
+ case ARIZONA_EQ1MIX_INPUT_3_SOURCE:
+ case ARIZONA_EQ1MIX_INPUT_3_VOLUME:
+ case ARIZONA_EQ1MIX_INPUT_4_SOURCE:
+ case ARIZONA_EQ1MIX_INPUT_4_VOLUME:
+ case ARIZONA_EQ2MIX_INPUT_1_SOURCE:
+ case ARIZONA_EQ2MIX_INPUT_1_VOLUME:
+ case ARIZONA_EQ2MIX_INPUT_2_SOURCE:
+ case ARIZONA_EQ2MIX_INPUT_2_VOLUME:
+ case ARIZONA_EQ2MIX_INPUT_3_SOURCE:
+ case ARIZONA_EQ2MIX_INPUT_3_VOLUME:
+ case ARIZONA_EQ2MIX_INPUT_4_SOURCE:
+ case ARIZONA_EQ2MIX_INPUT_4_VOLUME:
+ case ARIZONA_EQ3MIX_INPUT_1_SOURCE:
+ case ARIZONA_EQ3MIX_INPUT_1_VOLUME:
+ case ARIZONA_EQ3MIX_INPUT_2_SOURCE:
+ case ARIZONA_EQ3MIX_INPUT_2_VOLUME:
+ case ARIZONA_EQ3MIX_INPUT_3_SOURCE:
+ case ARIZONA_EQ3MIX_INPUT_3_VOLUME:
+ case ARIZONA_EQ3MIX_INPUT_4_SOURCE:
+ case ARIZONA_EQ3MIX_INPUT_4_VOLUME:
+ case ARIZONA_EQ4MIX_INPUT_1_SOURCE:
+ case ARIZONA_EQ4MIX_INPUT_1_VOLUME:
+ case ARIZONA_EQ4MIX_INPUT_2_SOURCE:
+ case ARIZONA_EQ4MIX_INPUT_2_VOLUME:
+ case ARIZONA_EQ4MIX_INPUT_3_SOURCE:
+ case ARIZONA_EQ4MIX_INPUT_3_VOLUME:
+ case ARIZONA_EQ4MIX_INPUT_4_SOURCE:
+ case ARIZONA_EQ4MIX_INPUT_4_VOLUME:
+ case ARIZONA_DRC1LMIX_INPUT_1_SOURCE:
+ case ARIZONA_DRC1LMIX_INPUT_1_VOLUME:
+ case ARIZONA_DRC1LMIX_INPUT_2_SOURCE:
+ case ARIZONA_DRC1LMIX_INPUT_2_VOLUME:
+ case ARIZONA_DRC1LMIX_INPUT_3_SOURCE:
+ case ARIZONA_DRC1LMIX_INPUT_3_VOLUME:
+ case ARIZONA_DRC1LMIX_INPUT_4_SOURCE:
+ case ARIZONA_DRC1LMIX_INPUT_4_VOLUME:
+ case ARIZONA_DRC1RMIX_INPUT_1_SOURCE:
+ case ARIZONA_DRC1RMIX_INPUT_1_VOLUME:
+ case ARIZONA_DRC1RMIX_INPUT_2_SOURCE:
+ case ARIZONA_DRC1RMIX_INPUT_2_VOLUME:
+ case ARIZONA_DRC1RMIX_INPUT_3_SOURCE:
+ case ARIZONA_DRC1RMIX_INPUT_3_VOLUME:
+ case ARIZONA_DRC1RMIX_INPUT_4_SOURCE:
+ case ARIZONA_DRC1RMIX_INPUT_4_VOLUME:
+ case ARIZONA_DRC2LMIX_INPUT_1_SOURCE:
+ case ARIZONA_DRC2LMIX_INPUT_1_VOLUME:
+ case ARIZONA_DRC2LMIX_INPUT_2_SOURCE:
+ case ARIZONA_DRC2LMIX_INPUT_2_VOLUME:
+ case ARIZONA_DRC2LMIX_INPUT_3_SOURCE:
+ case ARIZONA_DRC2LMIX_INPUT_3_VOLUME:
+ case ARIZONA_DRC2LMIX_INPUT_4_SOURCE:
+ case ARIZONA_DRC2LMIX_INPUT_4_VOLUME:
+ case ARIZONA_DRC2RMIX_INPUT_1_SOURCE:
+ case ARIZONA_DRC2RMIX_INPUT_1_VOLUME:
+ case ARIZONA_DRC2RMIX_INPUT_2_SOURCE:
+ case ARIZONA_DRC2RMIX_INPUT_2_VOLUME:
+ case ARIZONA_DRC2RMIX_INPUT_3_SOURCE:
+ case ARIZONA_DRC2RMIX_INPUT_3_VOLUME:
+ case ARIZONA_DRC2RMIX_INPUT_4_SOURCE:
+ case ARIZONA_DRC2RMIX_INPUT_4_VOLUME:
+ case ARIZONA_HPLP1MIX_INPUT_1_SOURCE:
+ case ARIZONA_HPLP1MIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP1MIX_INPUT_2_SOURCE:
+ case ARIZONA_HPLP1MIX_INPUT_2_VOLUME:
+ case ARIZONA_HPLP1MIX_INPUT_3_SOURCE:
+ case ARIZONA_HPLP1MIX_INPUT_3_VOLUME:
+ case ARIZONA_HPLP1MIX_INPUT_4_SOURCE:
+ case ARIZONA_HPLP1MIX_INPUT_4_VOLUME:
+ case ARIZONA_HPLP2MIX_INPUT_1_SOURCE:
+ case ARIZONA_HPLP2MIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP2MIX_INPUT_2_SOURCE:
+ case ARIZONA_HPLP2MIX_INPUT_2_VOLUME:
+ case ARIZONA_HPLP2MIX_INPUT_3_SOURCE:
+ case ARIZONA_HPLP2MIX_INPUT_3_VOLUME:
+ case ARIZONA_HPLP2MIX_INPUT_4_SOURCE:
+ case ARIZONA_HPLP2MIX_INPUT_4_VOLUME:
+ case ARIZONA_HPLP3MIX_INPUT_1_SOURCE:
+ case ARIZONA_HPLP3MIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP3MIX_INPUT_2_SOURCE:
+ case ARIZONA_HPLP3MIX_INPUT_2_VOLUME:
+ case ARIZONA_HPLP3MIX_INPUT_3_SOURCE:
+ case ARIZONA_HPLP3MIX_INPUT_3_VOLUME:
+ case ARIZONA_HPLP3MIX_INPUT_4_SOURCE:
+ case ARIZONA_HPLP3MIX_INPUT_4_VOLUME:
+ case ARIZONA_HPLP4MIX_INPUT_1_SOURCE:
+ case ARIZONA_HPLP4MIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP4MIX_INPUT_2_SOURCE:
+ case ARIZONA_HPLP4MIX_INPUT_2_VOLUME:
+ case ARIZONA_HPLP4MIX_INPUT_3_SOURCE:
+ case ARIZONA_HPLP4MIX_INPUT_3_VOLUME:
+ case ARIZONA_HPLP4MIX_INPUT_4_SOURCE:
+ case ARIZONA_HPLP4MIX_INPUT_4_VOLUME:
+ case ARIZONA_DSP1LMIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1LMIX_INPUT_1_VOLUME:
+ case ARIZONA_DSP1LMIX_INPUT_2_SOURCE:
+ case ARIZONA_DSP1LMIX_INPUT_2_VOLUME:
+ case ARIZONA_DSP1LMIX_INPUT_3_SOURCE:
+ case ARIZONA_DSP1LMIX_INPUT_3_VOLUME:
+ case ARIZONA_DSP1LMIX_INPUT_4_SOURCE:
+ case ARIZONA_DSP1LMIX_INPUT_4_VOLUME:
+ case ARIZONA_DSP1RMIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1RMIX_INPUT_1_VOLUME:
+ case ARIZONA_DSP1RMIX_INPUT_2_SOURCE:
+ case ARIZONA_DSP1RMIX_INPUT_2_VOLUME:
+ case ARIZONA_DSP1RMIX_INPUT_3_SOURCE:
+ case ARIZONA_DSP1RMIX_INPUT_3_VOLUME:
+ case ARIZONA_DSP1RMIX_INPUT_4_SOURCE:
+ case ARIZONA_DSP1RMIX_INPUT_4_VOLUME:
+ case ARIZONA_DSP1AUX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1AUX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1AUX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1AUX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1AUX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1AUX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_ASRC1LMIX_INPUT_1_SOURCE:
+ case ARIZONA_ASRC1RMIX_INPUT_1_SOURCE:
+ case ARIZONA_ASRC2LMIX_INPUT_1_SOURCE:
+ case ARIZONA_ASRC2RMIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE:
+ case ARIZONA_GPIO1_CTRL:
+ case ARIZONA_GPIO2_CTRL:
+ case ARIZONA_GPIO3_CTRL:
+ case ARIZONA_GPIO4_CTRL:
+ case ARIZONA_GPIO5_CTRL:
+ case ARIZONA_IRQ_CTRL_1:
+ case ARIZONA_GPIO_DEBOUNCE_CONFIG:
+ case ARIZONA_MISC_PAD_CTRL_1:
+ case ARIZONA_MISC_PAD_CTRL_2:
+ case ARIZONA_MISC_PAD_CTRL_3:
+ case ARIZONA_MISC_PAD_CTRL_4:
+ case ARIZONA_MISC_PAD_CTRL_5:
+ case ARIZONA_MISC_PAD_CTRL_6:
+ case ARIZONA_INTERRUPT_STATUS_1:
+ case ARIZONA_INTERRUPT_STATUS_2:
+ case ARIZONA_INTERRUPT_STATUS_3:
+ case ARIZONA_INTERRUPT_STATUS_4:
+ case ARIZONA_INTERRUPT_STATUS_5:
+ case ARIZONA_INTERRUPT_STATUS_1_MASK:
+ case ARIZONA_INTERRUPT_STATUS_2_MASK:
+ case ARIZONA_INTERRUPT_STATUS_3_MASK:
+ case ARIZONA_INTERRUPT_STATUS_4_MASK:
+ case ARIZONA_INTERRUPT_STATUS_5_MASK:
+ case ARIZONA_INTERRUPT_CONTROL:
+ case ARIZONA_IRQ2_STATUS_1:
+ case ARIZONA_IRQ2_STATUS_2:
+ case ARIZONA_IRQ2_STATUS_3:
+ case ARIZONA_IRQ2_STATUS_4:
+ case ARIZONA_IRQ2_STATUS_5:
+ case ARIZONA_IRQ2_STATUS_1_MASK:
+ case ARIZONA_IRQ2_STATUS_2_MASK:
+ case ARIZONA_IRQ2_STATUS_3_MASK:
+ case ARIZONA_IRQ2_STATUS_4_MASK:
+ case ARIZONA_IRQ2_STATUS_5_MASK:
+ case ARIZONA_IRQ2_CONTROL:
+ case ARIZONA_INTERRUPT_RAW_STATUS_2:
+ case ARIZONA_INTERRUPT_RAW_STATUS_3:
+ case ARIZONA_INTERRUPT_RAW_STATUS_4:
+ case ARIZONA_INTERRUPT_RAW_STATUS_5:
+ case ARIZONA_INTERRUPT_RAW_STATUS_6:
+ case ARIZONA_INTERRUPT_RAW_STATUS_7:
+ case ARIZONA_INTERRUPT_RAW_STATUS_8:
+ case ARIZONA_IRQ_PIN_STATUS:
+ case ARIZONA_ADSP2_IRQ0:
+ case ARIZONA_AOD_WKUP_AND_TRIG:
+ case ARIZONA_AOD_IRQ1:
+ case ARIZONA_AOD_IRQ2:
+ case ARIZONA_AOD_IRQ_MASK_IRQ1:
+ case ARIZONA_AOD_IRQ_MASK_IRQ2:
+ case ARIZONA_AOD_IRQ_RAW_STATUS:
+ case ARIZONA_JACK_DETECT_DEBOUNCE:
+ case ARIZONA_FX_CTRL1:
+ case ARIZONA_FX_CTRL2:
+ case ARIZONA_EQ1_1:
+ case ARIZONA_EQ1_2:
+ case ARIZONA_EQ1_3:
+ case ARIZONA_EQ1_4:
+ case ARIZONA_EQ1_5:
+ case ARIZONA_EQ1_6:
+ case ARIZONA_EQ1_7:
+ case ARIZONA_EQ1_8:
+ case ARIZONA_EQ1_9:
+ case ARIZONA_EQ1_10:
+ case ARIZONA_EQ1_11:
+ case ARIZONA_EQ1_12:
+ case ARIZONA_EQ1_13:
+ case ARIZONA_EQ1_14:
+ case ARIZONA_EQ1_15:
+ case ARIZONA_EQ1_16:
+ case ARIZONA_EQ1_17:
+ case ARIZONA_EQ1_18:
+ case ARIZONA_EQ1_19:
+ case ARIZONA_EQ1_20:
+ case ARIZONA_EQ1_21:
+ case ARIZONA_EQ2_1:
+ case ARIZONA_EQ2_2:
+ case ARIZONA_EQ2_3:
+ case ARIZONA_EQ2_4:
+ case ARIZONA_EQ2_5:
+ case ARIZONA_EQ2_6:
+ case ARIZONA_EQ2_7:
+ case ARIZONA_EQ2_8:
+ case ARIZONA_EQ2_9:
+ case ARIZONA_EQ2_10:
+ case ARIZONA_EQ2_11:
+ case ARIZONA_EQ2_12:
+ case ARIZONA_EQ2_13:
+ case ARIZONA_EQ2_14:
+ case ARIZONA_EQ2_15:
+ case ARIZONA_EQ2_16:
+ case ARIZONA_EQ2_17:
+ case ARIZONA_EQ2_18:
+ case ARIZONA_EQ2_19:
+ case ARIZONA_EQ2_20:
+ case ARIZONA_EQ2_21:
+ case ARIZONA_EQ3_1:
+ case ARIZONA_EQ3_2:
+ case ARIZONA_EQ3_3:
+ case ARIZONA_EQ3_4:
+ case ARIZONA_EQ3_5:
+ case ARIZONA_EQ3_6:
+ case ARIZONA_EQ3_7:
+ case ARIZONA_EQ3_8:
+ case ARIZONA_EQ3_9:
+ case ARIZONA_EQ3_10:
+ case ARIZONA_EQ3_11:
+ case ARIZONA_EQ3_12:
+ case ARIZONA_EQ3_13:
+ case ARIZONA_EQ3_14:
+ case ARIZONA_EQ3_15:
+ case ARIZONA_EQ3_16:
+ case ARIZONA_EQ3_17:
+ case ARIZONA_EQ3_18:
+ case ARIZONA_EQ3_19:
+ case ARIZONA_EQ3_20:
+ case ARIZONA_EQ3_21:
+ case ARIZONA_EQ4_1:
+ case ARIZONA_EQ4_2:
+ case ARIZONA_EQ4_3:
+ case ARIZONA_EQ4_4:
+ case ARIZONA_EQ4_5:
+ case ARIZONA_EQ4_6:
+ case ARIZONA_EQ4_7:
+ case ARIZONA_EQ4_8:
+ case ARIZONA_EQ4_9:
+ case ARIZONA_EQ4_10:
+ case ARIZONA_EQ4_11:
+ case ARIZONA_EQ4_12:
+ case ARIZONA_EQ4_13:
+ case ARIZONA_EQ4_14:
+ case ARIZONA_EQ4_15:
+ case ARIZONA_EQ4_16:
+ case ARIZONA_EQ4_17:
+ case ARIZONA_EQ4_18:
+ case ARIZONA_EQ4_19:
+ case ARIZONA_EQ4_20:
+ case ARIZONA_EQ4_21:
+ case ARIZONA_DRC1_CTRL1:
+ case ARIZONA_DRC1_CTRL2:
+ case ARIZONA_DRC1_CTRL3:
+ case ARIZONA_DRC1_CTRL4:
+ case ARIZONA_DRC1_CTRL5:
+ case ARIZONA_DRC2_CTRL1:
+ case ARIZONA_DRC2_CTRL2:
+ case ARIZONA_DRC2_CTRL3:
+ case ARIZONA_DRC2_CTRL4:
+ case ARIZONA_DRC2_CTRL5:
+ case ARIZONA_HPLPF1_1:
+ case ARIZONA_HPLPF1_2:
+ case ARIZONA_HPLPF2_1:
+ case ARIZONA_HPLPF2_2:
+ case ARIZONA_HPLPF3_1:
+ case ARIZONA_HPLPF3_2:
+ case ARIZONA_HPLPF4_1:
+ case ARIZONA_HPLPF4_2:
+ case ARIZONA_ASRC_ENABLE:
+ case ARIZONA_ASRC_RATE1:
+ case ARIZONA_ASRC_RATE2:
+ case ARIZONA_ISRC_1_CTRL_1:
+ case ARIZONA_ISRC_1_CTRL_2:
+ case ARIZONA_ISRC_1_CTRL_3:
+ case ARIZONA_ISRC_2_CTRL_1:
+ case ARIZONA_ISRC_2_CTRL_2:
+ case ARIZONA_ISRC_2_CTRL_3:
+ case ARIZONA_ISRC_3_CTRL_1:
+ case ARIZONA_ISRC_3_CTRL_2:
+ case ARIZONA_ISRC_3_CTRL_3:
+ case ARIZONA_DSP1_CONTROL_1:
+ case ARIZONA_DSP1_CLOCKING_1:
+ case ARIZONA_DSP1_STATUS_1:
+ case ARIZONA_DSP1_STATUS_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ARIZONA_SOFTWARE_RESET:
+ case ARIZONA_DEVICE_REVISION:
+ case ARIZONA_OUTPUT_STATUS_1:
+ case ARIZONA_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_SAMPLE_RATE_2_STATUS:
+ case ARIZONA_SAMPLE_RATE_3_STATUS:
+ case ARIZONA_HAPTICS_STATUS:
+ case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_FX_CTRL2:
+ case ARIZONA_INTERRUPT_STATUS_1:
+ case ARIZONA_INTERRUPT_STATUS_2:
+ case ARIZONA_INTERRUPT_STATUS_3:
+ case ARIZONA_INTERRUPT_STATUS_4:
+ case ARIZONA_INTERRUPT_STATUS_5:
+ case ARIZONA_IRQ2_STATUS_1:
+ case ARIZONA_IRQ2_STATUS_2:
+ case ARIZONA_IRQ2_STATUS_3:
+ case ARIZONA_IRQ2_STATUS_4:
+ case ARIZONA_IRQ2_STATUS_5:
+ case ARIZONA_INTERRUPT_RAW_STATUS_2:
+ case ARIZONA_INTERRUPT_RAW_STATUS_3:
+ case ARIZONA_INTERRUPT_RAW_STATUS_4:
+ case ARIZONA_INTERRUPT_RAW_STATUS_5:
+ case ARIZONA_INTERRUPT_RAW_STATUS_6:
+ case ARIZONA_INTERRUPT_RAW_STATUS_7:
+ case ARIZONA_INTERRUPT_RAW_STATUS_8:
+ case ARIZONA_IRQ_PIN_STATUS:
+ case ARIZONA_AOD_WKUP_AND_TRIG:
+ case ARIZONA_AOD_IRQ1:
+ case ARIZONA_AOD_IRQ2:
+ case ARIZONA_AOD_IRQ_RAW_STATUS:
+ case ARIZONA_DSP1_STATUS_1:
+ case ARIZONA_DSP1_STATUS_2:
+ case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_MIC_DETECT_3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config wm5102_spi_regmap = {
+ .reg_bits = 32,
+ .pad_bits = 16,
+ .val_bits = 16,
+
+ .max_register = ARIZONA_DSP1_STATUS_2,
+ .readable_reg = wm5102_readable_register,
+ .volatile_reg = wm5102_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = wm5102_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(wm5102_reg_default),
+};
+EXPORT_SYMBOL_GPL(wm5102_spi_regmap);
+
+const struct regmap_config wm5102_i2c_regmap = {
+ .reg_bits = 32,
+ .val_bits = 16,
+
+ .max_register = ARIZONA_DSP1_STATUS_2,
+ .readable_reg = wm5102_readable_register,
+ .volatile_reg = wm5102_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = wm5102_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(wm5102_reg_default),
+};
+EXPORT_SYMBOL_GPL(wm5102_i2c_regmap);
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
new file mode 100644
index 000000000000..bd8782c8896b
--- /dev/null
+++ b/drivers/mfd/wm5110-tables.c
@@ -0,0 +1,2281 @@
+/*
+ * wm5110-tables.c -- WM5110 data tables
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+
+#include "arizona.h"
+
+#define WM5110_NUM_AOD_ISR 2
+#define WM5110_NUM_ISR 5
+
+static const struct reg_default wm5110_reva_patch[] = {
+ { 0x80, 0x3 },
+ { 0x44, 0x20 },
+ { 0x45, 0x40 },
+ { 0x46, 0x60 },
+ { 0x47, 0x80 },
+ { 0x48, 0xa0 },
+ { 0x51, 0x13 },
+ { 0x52, 0x33 },
+ { 0x53, 0x53 },
+ { 0x54, 0x73 },
+ { 0x55, 0x75 },
+ { 0x56, 0xb3 },
+ { 0x2ef, 0x124 },
+ { 0x2ef, 0x124 },
+ { 0x2f0, 0x124 },
+ { 0x2f0, 0x124 },
+ { 0x2f1, 0x124 },
+ { 0x2f1, 0x124 },
+ { 0x2f2, 0x124 },
+ { 0x2f2, 0x124 },
+ { 0x2f3, 0x124 },
+ { 0x2f3, 0x124 },
+ { 0x2f4, 0x124 },
+ { 0x2f4, 0x124 },
+ { 0x2eb, 0x60 },
+ { 0x2ec, 0x60 },
+ { 0x2ed, 0x60 },
+ { 0xc30, 0x3e3e },
+ { 0xc30, 0x3e3e },
+ { 0xc31, 0x3e },
+ { 0xc32, 0x3e3e },
+ { 0xc32, 0x3e3e },
+ { 0xc33, 0x3e3e },
+ { 0xc33, 0x3e3e },
+ { 0xc34, 0x3e3e },
+ { 0xc34, 0x3e3e },
+ { 0xc35, 0x3e3e },
+ { 0xc35, 0x3e3e },
+ { 0xc36, 0x3e3e },
+ { 0xc36, 0x3e3e },
+ { 0xc37, 0x3e3e },
+ { 0xc37, 0x3e3e },
+ { 0xc38, 0x3e3e },
+ { 0xc38, 0x3e3e },
+ { 0xc30, 0x3e3e },
+ { 0xc30, 0x3e3e },
+ { 0xc39, 0x3e3e },
+ { 0xc39, 0x3e3e },
+ { 0xc3a, 0x3e3e },
+ { 0xc3a, 0x3e3e },
+ { 0xc3b, 0x3e3e },
+ { 0xc3b, 0x3e3e },
+ { 0xc3c, 0x3e },
+ { 0x201, 0x18a5 },
+ { 0x201, 0x18a5 },
+ { 0x201, 0x18a5 },
+ { 0x202, 0x4100 },
+ { 0x460, 0xc00 },
+ { 0x461, 0x8000 },
+ { 0x462, 0xc01 },
+ { 0x463, 0x50f0 },
+ { 0x464, 0xc01 },
+ { 0x465, 0x4820 },
+ { 0x466, 0xc01 },
+ { 0x466, 0xc01 },
+ { 0x467, 0x4040 },
+ { 0x468, 0xc01 },
+ { 0x468, 0xc01 },
+ { 0x469, 0x3940 },
+ { 0x46a, 0xc01 },
+ { 0x46a, 0xc01 },
+ { 0x46a, 0xc01 },
+ { 0x46b, 0x3310 },
+ { 0x46c, 0x801 },
+ { 0x46c, 0x801 },
+ { 0x46d, 0x2d80 },
+ { 0x46e, 0x801 },
+ { 0x46e, 0x801 },
+ { 0x46f, 0x2890 },
+ { 0x470, 0x801 },
+ { 0x470, 0x801 },
+ { 0x471, 0x1990 },
+ { 0x472, 0x801 },
+ { 0x472, 0x801 },
+ { 0x473, 0x1450 },
+ { 0x474, 0x801 },
+ { 0x474, 0x801 },
+ { 0x474, 0x801 },
+ { 0x475, 0x1020 },
+ { 0x476, 0x801 },
+ { 0x476, 0x801 },
+ { 0x476, 0x801 },
+ { 0x477, 0xcd0 },
+ { 0x478, 0x806 },
+ { 0x478, 0x806 },
+ { 0x479, 0xa30 },
+ { 0x47a, 0x806 },
+ { 0x47a, 0x806 },
+ { 0x47b, 0x810 },
+ { 0x47c, 0x80e },
+ { 0x47c, 0x80e },
+ { 0x47d, 0x510 },
+ { 0x47e, 0x81f },
+ { 0x47e, 0x81f },
+ { 0x2DB, 0x0A00 },
+ { 0x2DD, 0x0023 },
+ { 0x2DF, 0x0102 },
+ { 0x80, 0x0 },
+ { 0xC20, 0x0002 },
+ { 0x209, 0x002A },
+};
+
+/* We use a function so we can use ARRAY_SIZE() */
+int wm5110_patch(struct arizona *arizona)
+{
+ switch (arizona->rev) {
+ case 0:
+ case 1:
+ return regmap_register_patch(arizona->regmap,
+ wm5110_reva_patch,
+ ARRAY_SIZE(wm5110_reva_patch));
+ default:
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(wm5110_patch);
+
+static const struct regmap_irq wm5110_aod_irqs[ARIZONA_NUM_IRQ] = {
+ [ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
+ [ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
+ [ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
+ [ARIZONA_IRQ_JD_RISE] = { .mask = ARIZONA_JD1_RISE_EINT1 },
+};
+
+const struct regmap_irq_chip wm5110_aod = {
+ .name = "wm5110 AOD",
+ .status_base = ARIZONA_AOD_IRQ1,
+ .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
+ .ack_base = ARIZONA_AOD_IRQ1,
+ .wake_base = ARIZONA_WAKE_CONTROL,
+ .num_regs = 1,
+ .irqs = wm5110_aod_irqs,
+ .num_irqs = ARRAY_SIZE(wm5110_aod_irqs),
+};
+EXPORT_SYMBOL_GPL(wm5110_aod);
+
+static const struct regmap_irq wm5110_irqs[ARIZONA_NUM_IRQ] = {
+ [ARIZONA_IRQ_GP4] = { .reg_offset = 0, .mask = ARIZONA_GP4_EINT1 },
+ [ARIZONA_IRQ_GP3] = { .reg_offset = 0, .mask = ARIZONA_GP3_EINT1 },
+ [ARIZONA_IRQ_GP2] = { .reg_offset = 0, .mask = ARIZONA_GP2_EINT1 },
+ [ARIZONA_IRQ_GP1] = { .reg_offset = 0, .mask = ARIZONA_GP1_EINT1 },
+
+ [ARIZONA_IRQ_DSP4_RAM_RDY] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP4_RAM_RDY_EINT1
+ },
+ [ARIZONA_IRQ_DSP3_RAM_RDY] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP3_RAM_RDY_EINT1
+ },
+ [ARIZONA_IRQ_DSP2_RAM_RDY] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP2_RAM_RDY_EINT1
+ },
+ [ARIZONA_IRQ_DSP1_RAM_RDY] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP1_RAM_RDY_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ8] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ8_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ7] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ7_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ6] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ6_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ5] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ5_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ4] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ4_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ3] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ3_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ2] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ2_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ1] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ1_EINT1
+ },
+
+ [ARIZONA_IRQ_SPK_SHUTDOWN_WARN] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_WARN_EINT1
+ },
+ [ARIZONA_IRQ_SPK_SHUTDOWN] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_EINT1
+ },
+ [ARIZONA_IRQ_HPDET] = {
+ .reg_offset = 2, .mask = ARIZONA_HPDET_EINT1
+ },
+ [ARIZONA_IRQ_MICDET] = {
+ .reg_offset = 2, .mask = ARIZONA_MICDET_EINT1
+ },
+ [ARIZONA_IRQ_WSEQ_DONE] = {
+ .reg_offset = 2, .mask = ARIZONA_WSEQ_DONE_EINT1
+ },
+ [ARIZONA_IRQ_DRC2_SIG_DET] = {
+ .reg_offset = 2, .mask = ARIZONA_DRC2_SIG_DET_EINT1
+ },
+ [ARIZONA_IRQ_DRC1_SIG_DET] = {
+ .reg_offset = 2, .mask = ARIZONA_DRC1_SIG_DET_EINT1
+ },
+ [ARIZONA_IRQ_ASRC2_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_ASRC2_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_ASRC1_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_ASRC1_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_UNDERCLOCKED] = {
+ .reg_offset = 2, .mask = ARIZONA_UNDERCLOCKED_EINT1
+ },
+ [ARIZONA_IRQ_OVERCLOCKED] = {
+ .reg_offset = 2, .mask = ARIZONA_OVERCLOCKED_EINT1
+ },
+ [ARIZONA_IRQ_FLL2_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_FLL2_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_FLL1_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_FLL1_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_CLKGEN_ERR] = {
+ .reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_EINT1
+ },
+ [ARIZONA_IRQ_CLKGEN_ERR_ASYNC] = {
+ .reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_ASYNC_EINT1
+ },
+
+ [ARIZONA_IRQ_ASRC_CFG_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_ASRC_CFG_ERR_EINT1
+ },
+ [ARIZONA_IRQ_AIF3_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_AIF3_ERR_EINT1
+ },
+ [ARIZONA_IRQ_AIF2_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_AIF2_ERR_EINT1
+ },
+ [ARIZONA_IRQ_AIF1_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_AIF1_ERR_EINT1
+ },
+ [ARIZONA_IRQ_CTRLIF_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_CTRLIF_ERR_EINT1
+ },
+ [ARIZONA_IRQ_MIXER_DROPPED_SAMPLES] = {
+ .reg_offset = 3, .mask = ARIZONA_MIXER_DROPPED_SAMPLE_EINT1
+ },
+ [ARIZONA_IRQ_ASYNC_CLK_ENA_LOW] = {
+ .reg_offset = 3, .mask = ARIZONA_ASYNC_CLK_ENA_LOW_EINT1
+ },
+ [ARIZONA_IRQ_SYSCLK_ENA_LOW] = {
+ .reg_offset = 3, .mask = ARIZONA_SYSCLK_ENA_LOW_EINT1
+ },
+ [ARIZONA_IRQ_ISRC1_CFG_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_ISRC1_CFG_ERR_EINT1
+ },
+ [ARIZONA_IRQ_ISRC2_CFG_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_ISRC2_CFG_ERR_EINT1
+ },
+
+ [ARIZONA_IRQ_BOOT_DONE] = {
+ .reg_offset = 4, .mask = ARIZONA_BOOT_DONE_EINT1
+ },
+ [ARIZONA_IRQ_DCS_DAC_DONE] = {
+ .reg_offset = 4, .mask = ARIZONA_DCS_DAC_DONE_EINT1
+ },
+ [ARIZONA_IRQ_DCS_HP_DONE] = {
+ .reg_offset = 4, .mask = ARIZONA_DCS_HP_DONE_EINT1
+ },
+ [ARIZONA_IRQ_FLL2_CLOCK_OK] = {
+ .reg_offset = 4, .mask = ARIZONA_FLL2_CLOCK_OK_EINT1
+ },
+ [ARIZONA_IRQ_FLL1_CLOCK_OK] = {
+ .reg_offset = 4, .mask = ARIZONA_FLL1_CLOCK_OK_EINT1
+ },
+};
+
+const struct regmap_irq_chip wm5110_irq = {
+ .name = "wm5110 IRQ",
+ .status_base = ARIZONA_INTERRUPT_STATUS_1,
+ .mask_base = ARIZONA_INTERRUPT_STATUS_1_MASK,
+ .ack_base = ARIZONA_INTERRUPT_STATUS_1,
+ .num_regs = 5,
+ .irqs = wm5110_irqs,
+ .num_irqs = ARRAY_SIZE(wm5110_irqs),
+};
+EXPORT_SYMBOL_GPL(wm5110_irq);
+
+static const struct reg_default wm5110_reg_default[] = {
+ { 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */
+ { 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
+ { 0x0000000A, 0x0001 }, /* R10 - Ctrl IF I2C2 CFG 1 */
+ { 0x0000000B, 0x0036 }, /* R11 - Ctrl IF I2C1 CFG 2 */
+ { 0x0000000C, 0x0036 }, /* R12 - Ctrl IF I2C2 CFG 2 */
+ { 0x00000016, 0x0000 }, /* R22 - Write Sequencer Ctrl 0 */
+ { 0x00000017, 0x0000 }, /* R23 - Write Sequencer Ctrl 1 */
+ { 0x00000018, 0x0000 }, /* R24 - Write Sequencer Ctrl 2 */
+ { 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */
+ { 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */
+ { 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */
+ { 0x00000023, 0x1000 }, /* R35 - Tone Generator 4 */
+ { 0x00000024, 0x0000 }, /* R36 - Tone Generator 5 */
+ { 0x00000030, 0x0000 }, /* R48 - PWM Drive 1 */
+ { 0x00000031, 0x0100 }, /* R49 - PWM Drive 2 */
+ { 0x00000032, 0x0100 }, /* R50 - PWM Drive 3 */
+ { 0x00000040, 0x0000 }, /* R64 - Wake control */
+ { 0x00000041, 0x0000 }, /* R65 - Sequence control */
+ { 0x00000061, 0x01FF }, /* R97 - Sample Rate Sequence Select 1 */
+ { 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */
+ { 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */
+ { 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */
+ { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 1 */
+ { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 2 */
+ { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 3 */
+ { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 4 */
+ { 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */
+ { 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */
+ { 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */
+ { 0x00000092, 0x0000 }, /* R146 - Haptics phase 1 intensity */
+ { 0x00000093, 0x0000 }, /* R147 - Haptics phase 1 duration */
+ { 0x00000094, 0x0000 }, /* R148 - Haptics phase 2 intensity */
+ { 0x00000095, 0x0000 }, /* R149 - Haptics phase 2 duration */
+ { 0x00000096, 0x0000 }, /* R150 - Haptics phase 3 intensity */
+ { 0x00000097, 0x0000 }, /* R151 - Haptics phase 3 duration */
+ { 0x00000100, 0x0001 }, /* R256 - Clock 32k 1 */
+ { 0x00000101, 0x0504 }, /* R257 - System Clock 1 */
+ { 0x00000102, 0x0011 }, /* R258 - Sample rate 1 */
+ { 0x00000103, 0x0011 }, /* R259 - Sample rate 2 */
+ { 0x00000104, 0x0011 }, /* R260 - Sample rate 3 */
+ { 0x00000112, 0x0305 }, /* R274 - Async clock 1 */
+ { 0x00000113, 0x0011 }, /* R275 - Async sample rate 1 */
+ { 0x00000149, 0x0000 }, /* R329 - Output system clock */
+ { 0x0000014A, 0x0000 }, /* R330 - Output async clock */
+ { 0x00000152, 0x0000 }, /* R338 - Rate Estimator 1 */
+ { 0x00000153, 0x0000 }, /* R339 - Rate Estimator 2 */
+ { 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */
+ { 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
+ { 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
+ { 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */
+ { 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
+ { 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
+ { 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
+ { 0x00000175, 0x0006 }, /* R373 - FLL1 Control 5 */
+ { 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
+ { 0x00000177, 0x0281 }, /* R375 - FLL1 Loop Filter Test 1 */
+ { 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
+ { 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
+ { 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
+ { 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */
+ { 0x00000184, 0x0000 }, /* R388 - FLL1 Synchroniser 4 */
+ { 0x00000185, 0x0000 }, /* R389 - FLL1 Synchroniser 5 */
+ { 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */
+ { 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */
+ { 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */
+ { 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */
+ { 0x00000192, 0x0008 }, /* R402 - FLL2 Control 2 */
+ { 0x00000193, 0x0018 }, /* R403 - FLL2 Control 3 */
+ { 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
+ { 0x00000195, 0x000C }, /* R405 - FLL2 Control 5 */
+ { 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
+ { 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
+ { 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
+ { 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
+ { 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
+ { 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */
+ { 0x000001A4, 0x0000 }, /* R420 - FLL2 Synchroniser 4 */
+ { 0x000001A5, 0x0000 }, /* R421 - FLL2 Synchroniser 5 */
+ { 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */
+ { 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */
+ { 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
+ { 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
+ { 0x00000210, 0x0184 }, /* R528 - LDO1 Control 1 */
+ { 0x00000213, 0x0344 }, /* R531 - LDO2 Control 1 */
+ { 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
+ { 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
+ { 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
+ { 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
+ { 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
+ { 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */
+ { 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
+ { 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
+ { 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
+ { 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */
+ { 0x00000300, 0x0000 }, /* R768 - Input Enables */
+ { 0x00000308, 0x0000 }, /* R776 - Input Rate */
+ { 0x00000309, 0x0022 }, /* R777 - Input Volume Ramp */
+ { 0x00000310, 0x2080 }, /* R784 - IN1L Control */
+ { 0x00000311, 0x0180 }, /* R785 - ADC Digital Volume 1L */
+ { 0x00000312, 0x0000 }, /* R786 - DMIC1L Control */
+ { 0x00000314, 0x0080 }, /* R788 - IN1R Control */
+ { 0x00000315, 0x0180 }, /* R789 - ADC Digital Volume 1R */
+ { 0x00000316, 0x0000 }, /* R790 - DMIC1R Control */
+ { 0x00000318, 0x2080 }, /* R792 - IN2L Control */
+ { 0x00000319, 0x0180 }, /* R793 - ADC Digital Volume 2L */
+ { 0x0000031A, 0x0000 }, /* R794 - DMIC2L Control */
+ { 0x0000031C, 0x0080 }, /* R796 - IN2R Control */
+ { 0x0000031D, 0x0180 }, /* R797 - ADC Digital Volume 2R */
+ { 0x0000031E, 0x0000 }, /* R798 - DMIC2R Control */
+ { 0x00000320, 0x2080 }, /* R800 - IN3L Control */
+ { 0x00000321, 0x0180 }, /* R801 - ADC Digital Volume 3L */
+ { 0x00000322, 0x0000 }, /* R802 - DMIC3L Control */
+ { 0x00000324, 0x0080 }, /* R804 - IN3R Control */
+ { 0x00000325, 0x0180 }, /* R805 - ADC Digital Volume 3R */
+ { 0x00000326, 0x0000 }, /* R806 - DMIC3R Control */
+ { 0x00000328, 0x2000 }, /* R808 - IN4L Control */
+ { 0x00000329, 0x0180 }, /* R809 - ADC Digital Volume 4L */
+ { 0x0000032A, 0x0000 }, /* R810 - DMIC4L Control */
+ { 0x0000032D, 0x0180 }, /* R813 - ADC Digital Volume 4R */
+ { 0x0000032E, 0x0000 }, /* R814 - DMIC4R Control */
+ { 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */
+ { 0x00000408, 0x0000 }, /* R1032 - Output Rate 1 */
+ { 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
+ { 0x00000410, 0x0080 }, /* R1040 - Output Path Config 1L */
+ { 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
+ { 0x00000412, 0x0080 }, /* R1042 - DAC Volume Limit 1L */
+ { 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
+ { 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */
+ { 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
+ { 0x00000416, 0x0080 }, /* R1046 - DAC Volume Limit 1R */
+ { 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
+ { 0x00000418, 0x0080 }, /* R1048 - Output Path Config 2L */
+ { 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
+ { 0x0000041A, 0x0080 }, /* R1050 - DAC Volume Limit 2L */
+ { 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
+ { 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */
+ { 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
+ { 0x0000041E, 0x0080 }, /* R1054 - DAC Volume Limit 2R */
+ { 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
+ { 0x00000420, 0x0080 }, /* R1056 - Output Path Config 3L */
+ { 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
+ { 0x00000422, 0x0080 }, /* R1058 - DAC Volume Limit 3L */
+ { 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
+ { 0x00000424, 0x0080 }, /* R1060 - Output Path Config 3R */
+ { 0x00000425, 0x0180 }, /* R1061 - DAC Digital Volume 3R */
+ { 0x00000426, 0x0080 }, /* R1062 - DAC Volume Limit 3R */
+ { 0x00000427, 0x0020 }, /* R1063 - Noise Gate Select 3R */
+ { 0x00000428, 0x0000 }, /* R1064 - Output Path Config 4L */
+ { 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
+ { 0x0000042A, 0x0080 }, /* R1066 - Out Volume 4L */
+ { 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
+ { 0x0000042C, 0x0000 }, /* R1068 - Output Path Config 4R */
+ { 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */
+ { 0x0000042E, 0x0080 }, /* R1070 - Out Volume 4R */
+ { 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */
+ { 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */
+ { 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */
+ { 0x00000432, 0x0080 }, /* R1074 - DAC Volume Limit 5L */
+ { 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */
+ { 0x00000434, 0x0000 }, /* R1076 - Output Path Config 5R */
+ { 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
+ { 0x00000436, 0x0080 }, /* R1078 - DAC Volume Limit 5R */
+ { 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
+ { 0x00000438, 0x0000 }, /* R1080 - Output Path Config 6L */
+ { 0x00000439, 0x0180 }, /* R1081 - DAC Digital Volume 6L */
+ { 0x0000043A, 0x0080 }, /* R1082 - DAC Volume Limit 6L */
+ { 0x0000043B, 0x0400 }, /* R1083 - Noise Gate Select 6L */
+ { 0x0000043C, 0x0000 }, /* R1084 - Output Path Config 6R */
+ { 0x0000043D, 0x0180 }, /* R1085 - DAC Digital Volume 6R */
+ { 0x0000043E, 0x0080 }, /* R1086 - DAC Volume Limit 6R */
+ { 0x0000043F, 0x0800 }, /* R1087 - Noise Gate Select 6R */
+ { 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
+ { 0x00000458, 0x0001 }, /* R1112 - Noise Gate Control */
+ { 0x00000480, 0x0040 }, /* R1152 - Class W ANC Threshold 1 */
+ { 0x00000481, 0x0040 }, /* R1153 - Class W ANC Threshold 2 */
+ { 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
+ { 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
+ { 0x00000492, 0x0069 }, /* R1170 - PDM SPK2 CTRL 1 */
+ { 0x00000493, 0x0000 }, /* R1171 - PDM SPK2 CTRL 2 */
+ { 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
+ { 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
+ { 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
+ { 0x00000503, 0x0000 }, /* R1283 - AIF1 Rate Ctrl */
+ { 0x00000504, 0x0000 }, /* R1284 - AIF1 Format */
+ { 0x00000505, 0x0040 }, /* R1285 - AIF1 Tx BCLK Rate */
+ { 0x00000506, 0x0040 }, /* R1286 - AIF1 Rx BCLK Rate */
+ { 0x00000507, 0x1818 }, /* R1287 - AIF1 Frame Ctrl 1 */
+ { 0x00000508, 0x1818 }, /* R1288 - AIF1 Frame Ctrl 2 */
+ { 0x00000509, 0x0000 }, /* R1289 - AIF1 Frame Ctrl 3 */
+ { 0x0000050A, 0x0001 }, /* R1290 - AIF1 Frame Ctrl 4 */
+ { 0x0000050B, 0x0002 }, /* R1291 - AIF1 Frame Ctrl 5 */
+ { 0x0000050C, 0x0003 }, /* R1292 - AIF1 Frame Ctrl 6 */
+ { 0x0000050D, 0x0004 }, /* R1293 - AIF1 Frame Ctrl 7 */
+ { 0x0000050E, 0x0005 }, /* R1294 - AIF1 Frame Ctrl 8 */
+ { 0x0000050F, 0x0006 }, /* R1295 - AIF1 Frame Ctrl 9 */
+ { 0x00000510, 0x0007 }, /* R1296 - AIF1 Frame Ctrl 10 */
+ { 0x00000511, 0x0000 }, /* R1297 - AIF1 Frame Ctrl 11 */
+ { 0x00000512, 0x0001 }, /* R1298 - AIF1 Frame Ctrl 12 */
+ { 0x00000513, 0x0002 }, /* R1299 - AIF1 Frame Ctrl 13 */
+ { 0x00000514, 0x0003 }, /* R1300 - AIF1 Frame Ctrl 14 */
+ { 0x00000515, 0x0004 }, /* R1301 - AIF1 Frame Ctrl 15 */
+ { 0x00000516, 0x0005 }, /* R1302 - AIF1 Frame Ctrl 16 */
+ { 0x00000517, 0x0006 }, /* R1303 - AIF1 Frame Ctrl 17 */
+ { 0x00000518, 0x0007 }, /* R1304 - AIF1 Frame Ctrl 18 */
+ { 0x00000519, 0x0000 }, /* R1305 - AIF1 Tx Enables */
+ { 0x0000051A, 0x0000 }, /* R1306 - AIF1 Rx Enables */
+ { 0x00000540, 0x000C }, /* R1344 - AIF2 BCLK Ctrl */
+ { 0x00000541, 0x0008 }, /* R1345 - AIF2 Tx Pin Ctrl */
+ { 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */
+ { 0x00000543, 0x0000 }, /* R1347 - AIF2 Rate Ctrl */
+ { 0x00000544, 0x0000 }, /* R1348 - AIF2 Format */
+ { 0x00000545, 0x0040 }, /* R1349 - AIF2 Tx BCLK Rate */
+ { 0x00000546, 0x0040 }, /* R1350 - AIF2 Rx BCLK Rate */
+ { 0x00000547, 0x1818 }, /* R1351 - AIF2 Frame Ctrl 1 */
+ { 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */
+ { 0x00000549, 0x0000 }, /* R1353 - AIF2 Frame Ctrl 3 */
+ { 0x0000054A, 0x0001 }, /* R1354 - AIF2 Frame Ctrl 4 */
+ { 0x00000551, 0x0000 }, /* R1361 - AIF2 Frame Ctrl 11 */
+ { 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */
+ { 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */
+ { 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */
+ { 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */
+ { 0x00000581, 0x0008 }, /* R1409 - AIF3 Tx Pin Ctrl */
+ { 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */
+ { 0x00000583, 0x0000 }, /* R1411 - AIF3 Rate Ctrl */
+ { 0x00000584, 0x0000 }, /* R1412 - AIF3 Format */
+ { 0x00000585, 0x0040 }, /* R1413 - AIF3 Tx BCLK Rate */
+ { 0x00000586, 0x0040 }, /* R1414 - AIF3 Rx BCLK Rate */
+ { 0x00000587, 0x1818 }, /* R1415 - AIF3 Frame Ctrl 1 */
+ { 0x00000588, 0x1818 }, /* R1416 - AIF3 Frame Ctrl 2 */
+ { 0x00000589, 0x0000 }, /* R1417 - AIF3 Frame Ctrl 3 */
+ { 0x0000058A, 0x0001 }, /* R1418 - AIF3 Frame Ctrl 4 */
+ { 0x00000591, 0x0000 }, /* R1425 - AIF3 Frame Ctrl 11 */
+ { 0x00000592, 0x0001 }, /* R1426 - AIF3 Frame Ctrl 12 */
+ { 0x00000599, 0x0000 }, /* R1433 - AIF3 Tx Enables */
+ { 0x0000059A, 0x0000 }, /* R1434 - AIF3 Rx Enables */
+ { 0x000005E3, 0x0004 }, /* R1507 - SLIMbus Framer Ref Gear */
+ { 0x000005E5, 0x0000 }, /* R1509 - SLIMbus Rates 1 */
+ { 0x000005E6, 0x0000 }, /* R1510 - SLIMbus Rates 2 */
+ { 0x000005E7, 0x0000 }, /* R1511 - SLIMbus Rates 3 */
+ { 0x000005E8, 0x0000 }, /* R1512 - SLIMbus Rates 4 */
+ { 0x000005E9, 0x0000 }, /* R1513 - SLIMbus Rates 5 */
+ { 0x000005EA, 0x0000 }, /* R1514 - SLIMbus Rates 6 */
+ { 0x000005EB, 0x0000 }, /* R1515 - SLIMbus Rates 7 */
+ { 0x000005EC, 0x0000 }, /* R1516 - SLIMbus Rates 8 */
+ { 0x000005F5, 0x0000 }, /* R1525 - SLIMbus RX Channel Enable */
+ { 0x000005F6, 0x0000 }, /* R1526 - SLIMbus TX Channel Enable */
+ { 0x00000640, 0x0000 }, /* R1600 - PWM1MIX Input 1 Source */
+ { 0x00000641, 0x0080 }, /* R1601 - PWM1MIX Input 1 Volume */
+ { 0x00000642, 0x0000 }, /* R1602 - PWM1MIX Input 2 Source */
+ { 0x00000643, 0x0080 }, /* R1603 - PWM1MIX Input 2 Volume */
+ { 0x00000644, 0x0000 }, /* R1604 - PWM1MIX Input 3 Source */
+ { 0x00000645, 0x0080 }, /* R1605 - PWM1MIX Input 3 Volume */
+ { 0x00000646, 0x0000 }, /* R1606 - PWM1MIX Input 4 Source */
+ { 0x00000647, 0x0080 }, /* R1607 - PWM1MIX Input 4 Volume */
+ { 0x00000648, 0x0000 }, /* R1608 - PWM2MIX Input 1 Source */
+ { 0x00000649, 0x0080 }, /* R1609 - PWM2MIX Input 1 Volume */
+ { 0x0000064A, 0x0000 }, /* R1610 - PWM2MIX Input 2 Source */
+ { 0x0000064B, 0x0080 }, /* R1611 - PWM2MIX Input 2 Volume */
+ { 0x0000064C, 0x0000 }, /* R1612 - PWM2MIX Input 3 Source */
+ { 0x0000064D, 0x0080 }, /* R1613 - PWM2MIX Input 3 Volume */
+ { 0x0000064E, 0x0000 }, /* R1614 - PWM2MIX Input 4 Source */
+ { 0x0000064F, 0x0080 }, /* R1615 - PWM2MIX Input 4 Volume */
+ { 0x00000660, 0x0000 }, /* R1632 - MICMIX Input 1 Source */
+ { 0x00000661, 0x0080 }, /* R1633 - MICMIX Input 1 Volume */
+ { 0x00000662, 0x0000 }, /* R1634 - MICMIX Input 2 Source */
+ { 0x00000663, 0x0080 }, /* R1635 - MICMIX Input 2 Volume */
+ { 0x00000664, 0x0000 }, /* R1636 - MICMIX Input 3 Source */
+ { 0x00000665, 0x0080 }, /* R1637 - MICMIX Input 3 Volume */
+ { 0x00000666, 0x0000 }, /* R1638 - MICMIX Input 4 Source */
+ { 0x00000667, 0x0080 }, /* R1639 - MICMIX Input 4 Volume */
+ { 0x00000668, 0x0000 }, /* R1640 - NOISEMIX Input 1 Source */
+ { 0x00000669, 0x0080 }, /* R1641 - NOISEMIX Input 1 Volume */
+ { 0x0000066A, 0x0000 }, /* R1642 - NOISEMIX Input 2 Source */
+ { 0x0000066B, 0x0080 }, /* R1643 - NOISEMIX Input 2 Volume */
+ { 0x0000066C, 0x0000 }, /* R1644 - NOISEMIX Input 3 Source */
+ { 0x0000066D, 0x0080 }, /* R1645 - NOISEMIX Input 3 Volume */
+ { 0x0000066E, 0x0000 }, /* R1646 - NOISEMIX Input 4 Source */
+ { 0x0000066F, 0x0080 }, /* R1647 - NOISEMIX Input 4 Volume */
+ { 0x00000680, 0x0000 }, /* R1664 - OUT1LMIX Input 1 Source */
+ { 0x00000681, 0x0080 }, /* R1665 - OUT1LMIX Input 1 Volume */
+ { 0x00000682, 0x0000 }, /* R1666 - OUT1LMIX Input 2 Source */
+ { 0x00000683, 0x0080 }, /* R1667 - OUT1LMIX Input 2 Volume */
+ { 0x00000684, 0x0000 }, /* R1668 - OUT1LMIX Input 3 Source */
+ { 0x00000685, 0x0080 }, /* R1669 - OUT1LMIX Input 3 Volume */
+ { 0x00000686, 0x0000 }, /* R1670 - OUT1LMIX Input 4 Source */
+ { 0x00000687, 0x0080 }, /* R1671 - OUT1LMIX Input 4 Volume */
+ { 0x00000688, 0x0000 }, /* R1672 - OUT1RMIX Input 1 Source */
+ { 0x00000689, 0x0080 }, /* R1673 - OUT1RMIX Input 1 Volume */
+ { 0x0000068A, 0x0000 }, /* R1674 - OUT1RMIX Input 2 Source */
+ { 0x0000068B, 0x0080 }, /* R1675 - OUT1RMIX Input 2 Volume */
+ { 0x0000068C, 0x0000 }, /* R1676 - OUT1RMIX Input 3 Source */
+ { 0x0000068D, 0x0080 }, /* R1677 - OUT1RMIX Input 3 Volume */
+ { 0x0000068E, 0x0000 }, /* R1678 - OUT1RMIX Input 4 Source */
+ { 0x0000068F, 0x0080 }, /* R1679 - OUT1RMIX Input 4 Volume */
+ { 0x00000690, 0x0000 }, /* R1680 - OUT2LMIX Input 1 Source */
+ { 0x00000691, 0x0080 }, /* R1681 - OUT2LMIX Input 1 Volume */
+ { 0x00000692, 0x0000 }, /* R1682 - OUT2LMIX Input 2 Source */
+ { 0x00000693, 0x0080 }, /* R1683 - OUT2LMIX Input 2 Volume */
+ { 0x00000694, 0x0000 }, /* R1684 - OUT2LMIX Input 3 Source */
+ { 0x00000695, 0x0080 }, /* R1685 - OUT2LMIX Input 3 Volume */
+ { 0x00000696, 0x0000 }, /* R1686 - OUT2LMIX Input 4 Source */
+ { 0x00000697, 0x0080 }, /* R1687 - OUT2LMIX Input 4 Volume */
+ { 0x00000698, 0x0000 }, /* R1688 - OUT2RMIX Input 1 Source */
+ { 0x00000699, 0x0080 }, /* R1689 - OUT2RMIX Input 1 Volume */
+ { 0x0000069A, 0x0000 }, /* R1690 - OUT2RMIX Input 2 Source */
+ { 0x0000069B, 0x0080 }, /* R1691 - OUT2RMIX Input 2 Volume */
+ { 0x0000069C, 0x0000 }, /* R1692 - OUT2RMIX Input 3 Source */
+ { 0x0000069D, 0x0080 }, /* R1693 - OUT2RMIX Input 3 Volume */
+ { 0x0000069E, 0x0000 }, /* R1694 - OUT2RMIX Input 4 Source */
+ { 0x0000069F, 0x0080 }, /* R1695 - OUT2RMIX Input 4 Volume */
+ { 0x000006A0, 0x0000 }, /* R1696 - OUT3LMIX Input 1 Source */
+ { 0x000006A1, 0x0080 }, /* R1697 - OUT3LMIX Input 1 Volume */
+ { 0x000006A2, 0x0000 }, /* R1698 - OUT3LMIX Input 2 Source */
+ { 0x000006A3, 0x0080 }, /* R1699 - OUT3LMIX Input 2 Volume */
+ { 0x000006A4, 0x0000 }, /* R1700 - OUT3LMIX Input 3 Source */
+ { 0x000006A5, 0x0080 }, /* R1701 - OUT3LMIX Input 3 Volume */
+ { 0x000006A6, 0x0000 }, /* R1702 - OUT3LMIX Input 4 Source */
+ { 0x000006A7, 0x0080 }, /* R1703 - OUT3LMIX Input 4 Volume */
+ { 0x000006A8, 0x0000 }, /* R1704 - OUT3RMIX Input 1 Source */
+ { 0x000006A9, 0x0080 }, /* R1705 - OUT3RMIX Input 1 Volume */
+ { 0x000006AA, 0x0000 }, /* R1706 - OUT3RMIX Input 2 Source */
+ { 0x000006AB, 0x0080 }, /* R1707 - OUT3RMIX Input 2 Volume */
+ { 0x000006AC, 0x0000 }, /* R1708 - OUT3RMIX Input 3 Source */
+ { 0x000006AD, 0x0080 }, /* R1709 - OUT3RMIX Input 3 Volume */
+ { 0x000006AE, 0x0000 }, /* R1710 - OUT3RMIX Input 4 Source */
+ { 0x000006AF, 0x0080 }, /* R1711 - OUT3RMIX Input 4 Volume */
+ { 0x000006B0, 0x0000 }, /* R1712 - OUT4LMIX Input 1 Source */
+ { 0x000006B1, 0x0080 }, /* R1713 - OUT4LMIX Input 1 Volume */
+ { 0x000006B2, 0x0000 }, /* R1714 - OUT4LMIX Input 2 Source */
+ { 0x000006B3, 0x0080 }, /* R1715 - OUT4LMIX Input 2 Volume */
+ { 0x000006B4, 0x0000 }, /* R1716 - OUT4LMIX Input 3 Source */
+ { 0x000006B5, 0x0080 }, /* R1717 - OUT4LMIX Input 3 Volume */
+ { 0x000006B6, 0x0000 }, /* R1718 - OUT4LMIX Input 4 Source */
+ { 0x000006B7, 0x0080 }, /* R1719 - OUT4LMIX Input 4 Volume */
+ { 0x000006B8, 0x0000 }, /* R1720 - OUT4RMIX Input 1 Source */
+ { 0x000006B9, 0x0080 }, /* R1721 - OUT4RMIX Input 1 Volume */
+ { 0x000006BA, 0x0000 }, /* R1722 - OUT4RMIX Input 2 Source */
+ { 0x000006BB, 0x0080 }, /* R1723 - OUT4RMIX Input 2 Volume */
+ { 0x000006BC, 0x0000 }, /* R1724 - OUT4RMIX Input 3 Source */
+ { 0x000006BD, 0x0080 }, /* R1725 - OUT4RMIX Input 3 Volume */
+ { 0x000006BE, 0x0000 }, /* R1726 - OUT4RMIX Input 4 Source */
+ { 0x000006BF, 0x0080 }, /* R1727 - OUT4RMIX Input 4 Volume */
+ { 0x000006C0, 0x0000 }, /* R1728 - OUT5LMIX Input 1 Source */
+ { 0x000006C1, 0x0080 }, /* R1729 - OUT5LMIX Input 1 Volume */
+ { 0x000006C2, 0x0000 }, /* R1730 - OUT5LMIX Input 2 Source */
+ { 0x000006C3, 0x0080 }, /* R1731 - OUT5LMIX Input 2 Volume */
+ { 0x000006C4, 0x0000 }, /* R1732 - OUT5LMIX Input 3 Source */
+ { 0x000006C5, 0x0080 }, /* R1733 - OUT5LMIX Input 3 Volume */
+ { 0x000006C6, 0x0000 }, /* R1734 - OUT5LMIX Input 4 Source */
+ { 0x000006C7, 0x0080 }, /* R1735 - OUT5LMIX Input 4 Volume */
+ { 0x000006C8, 0x0000 }, /* R1736 - OUT5RMIX Input 1 Source */
+ { 0x000006C9, 0x0080 }, /* R1737 - OUT5RMIX Input 1 Volume */
+ { 0x000006CA, 0x0000 }, /* R1738 - OUT5RMIX Input 2 Source */
+ { 0x000006CB, 0x0080 }, /* R1739 - OUT5RMIX Input 2 Volume */
+ { 0x000006CC, 0x0000 }, /* R1740 - OUT5RMIX Input 3 Source */
+ { 0x000006CD, 0x0080 }, /* R1741 - OUT5RMIX Input 3 Volume */
+ { 0x000006CE, 0x0000 }, /* R1742 - OUT5RMIX Input 4 Source */
+ { 0x000006CF, 0x0080 }, /* R1743 - OUT5RMIX Input 4 Volume */
+ { 0x000006D0, 0x0000 }, /* R1744 - OUT6LMIX Input 1 Source */
+ { 0x000006D1, 0x0080 }, /* R1745 - OUT6LMIX Input 1 Volume */
+ { 0x000006D2, 0x0000 }, /* R1746 - OUT6LMIX Input 2 Source */
+ { 0x000006D3, 0x0080 }, /* R1747 - OUT6LMIX Input 2 Volume */
+ { 0x000006D4, 0x0000 }, /* R1748 - OUT6LMIX Input 3 Source */
+ { 0x000006D5, 0x0080 }, /* R1749 - OUT6LMIX Input 3 Volume */
+ { 0x000006D6, 0x0000 }, /* R1750 - OUT6LMIX Input 4 Source */
+ { 0x000006D7, 0x0080 }, /* R1751 - OUT6LMIX Input 4 Volume */
+ { 0x000006D8, 0x0000 }, /* R1752 - OUT6RMIX Input 1 Source */
+ { 0x000006D9, 0x0080 }, /* R1753 - OUT6RMIX Input 1 Volume */
+ { 0x000006DA, 0x0000 }, /* R1754 - OUT6RMIX Input 2 Source */
+ { 0x000006DB, 0x0080 }, /* R1755 - OUT6RMIX Input 2 Volume */
+ { 0x000006DC, 0x0000 }, /* R1756 - OUT6RMIX Input 3 Source */
+ { 0x000006DD, 0x0080 }, /* R1757 - OUT6RMIX Input 3 Volume */
+ { 0x000006DE, 0x0000 }, /* R1758 - OUT6RMIX Input 4 Source */
+ { 0x000006DF, 0x0080 }, /* R1759 - OUT6RMIX Input 4 Volume */
+ { 0x00000700, 0x0000 }, /* R1792 - AIF1TX1MIX Input 1 Source */
+ { 0x00000701, 0x0080 }, /* R1793 - AIF1TX1MIX Input 1 Volume */
+ { 0x00000702, 0x0000 }, /* R1794 - AIF1TX1MIX Input 2 Source */
+ { 0x00000703, 0x0080 }, /* R1795 - AIF1TX1MIX Input 2 Volume */
+ { 0x00000704, 0x0000 }, /* R1796 - AIF1TX1MIX Input 3 Source */
+ { 0x00000705, 0x0080 }, /* R1797 - AIF1TX1MIX Input 3 Volume */
+ { 0x00000706, 0x0000 }, /* R1798 - AIF1TX1MIX Input 4 Source */
+ { 0x00000707, 0x0080 }, /* R1799 - AIF1TX1MIX Input 4 Volume */
+ { 0x00000708, 0x0000 }, /* R1800 - AIF1TX2MIX Input 1 Source */
+ { 0x00000709, 0x0080 }, /* R1801 - AIF1TX2MIX Input 1 Volume */
+ { 0x0000070A, 0x0000 }, /* R1802 - AIF1TX2MIX Input 2 Source */
+ { 0x0000070B, 0x0080 }, /* R1803 - AIF1TX2MIX Input 2 Volume */
+ { 0x0000070C, 0x0000 }, /* R1804 - AIF1TX2MIX Input 3 Source */
+ { 0x0000070D, 0x0080 }, /* R1805 - AIF1TX2MIX Input 3 Volume */
+ { 0x0000070E, 0x0000 }, /* R1806 - AIF1TX2MIX Input 4 Source */
+ { 0x0000070F, 0x0080 }, /* R1807 - AIF1TX2MIX Input 4 Volume */
+ { 0x00000710, 0x0000 }, /* R1808 - AIF1TX3MIX Input 1 Source */
+ { 0x00000711, 0x0080 }, /* R1809 - AIF1TX3MIX Input 1 Volume */
+ { 0x00000712, 0x0000 }, /* R1810 - AIF1TX3MIX Input 2 Source */
+ { 0x00000713, 0x0080 }, /* R1811 - AIF1TX3MIX Input 2 Volume */
+ { 0x00000714, 0x0000 }, /* R1812 - AIF1TX3MIX Input 3 Source */
+ { 0x00000715, 0x0080 }, /* R1813 - AIF1TX3MIX Input 3 Volume */
+ { 0x00000716, 0x0000 }, /* R1814 - AIF1TX3MIX Input 4 Source */
+ { 0x00000717, 0x0080 }, /* R1815 - AIF1TX3MIX Input 4 Volume */
+ { 0x00000718, 0x0000 }, /* R1816 - AIF1TX4MIX Input 1 Source */
+ { 0x00000719, 0x0080 }, /* R1817 - AIF1TX4MIX Input 1 Volume */
+ { 0x0000071A, 0x0000 }, /* R1818 - AIF1TX4MIX Input 2 Source */
+ { 0x0000071B, 0x0080 }, /* R1819 - AIF1TX4MIX Input 2 Volume */
+ { 0x0000071C, 0x0000 }, /* R1820 - AIF1TX4MIX Input 3 Source */
+ { 0x0000071D, 0x0080 }, /* R1821 - AIF1TX4MIX Input 3 Volume */
+ { 0x0000071E, 0x0000 }, /* R1822 - AIF1TX4MIX Input 4 Source */
+ { 0x0000071F, 0x0080 }, /* R1823 - AIF1TX4MIX Input 4 Volume */
+ { 0x00000720, 0x0000 }, /* R1824 - AIF1TX5MIX Input 1 Source */
+ { 0x00000721, 0x0080 }, /* R1825 - AIF1TX5MIX Input 1 Volume */
+ { 0x00000722, 0x0000 }, /* R1826 - AIF1TX5MIX Input 2 Source */
+ { 0x00000723, 0x0080 }, /* R1827 - AIF1TX5MIX Input 2 Volume */
+ { 0x00000724, 0x0000 }, /* R1828 - AIF1TX5MIX Input 3 Source */
+ { 0x00000725, 0x0080 }, /* R1829 - AIF1TX5MIX Input 3 Volume */
+ { 0x00000726, 0x0000 }, /* R1830 - AIF1TX5MIX Input 4 Source */
+ { 0x00000727, 0x0080 }, /* R1831 - AIF1TX5MIX Input 4 Volume */
+ { 0x00000728, 0x0000 }, /* R1832 - AIF1TX6MIX Input 1 Source */
+ { 0x00000729, 0x0080 }, /* R1833 - AIF1TX6MIX Input 1 Volume */
+ { 0x0000072A, 0x0000 }, /* R1834 - AIF1TX6MIX Input 2 Source */
+ { 0x0000072B, 0x0080 }, /* R1835 - AIF1TX6MIX Input 2 Volume */
+ { 0x0000072C, 0x0000 }, /* R1836 - AIF1TX6MIX Input 3 Source */
+ { 0x0000072D, 0x0080 }, /* R1837 - AIF1TX6MIX Input 3 Volume */
+ { 0x0000072E, 0x0000 }, /* R1838 - AIF1TX6MIX Input 4 Source */
+ { 0x0000072F, 0x0080 }, /* R1839 - AIF1TX6MIX Input 4 Volume */
+ { 0x00000730, 0x0000 }, /* R1840 - AIF1TX7MIX Input 1 Source */
+ { 0x00000731, 0x0080 }, /* R1841 - AIF1TX7MIX Input 1 Volume */
+ { 0x00000732, 0x0000 }, /* R1842 - AIF1TX7MIX Input 2 Source */
+ { 0x00000733, 0x0080 }, /* R1843 - AIF1TX7MIX Input 2 Volume */
+ { 0x00000734, 0x0000 }, /* R1844 - AIF1TX7MIX Input 3 Source */
+ { 0x00000735, 0x0080 }, /* R1845 - AIF1TX7MIX Input 3 Volume */
+ { 0x00000736, 0x0000 }, /* R1846 - AIF1TX7MIX Input 4 Source */
+ { 0x00000737, 0x0080 }, /* R1847 - AIF1TX7MIX Input 4 Volume */
+ { 0x00000738, 0x0000 }, /* R1848 - AIF1TX8MIX Input 1 Source */
+ { 0x00000739, 0x0080 }, /* R1849 - AIF1TX8MIX Input 1 Volume */
+ { 0x0000073A, 0x0000 }, /* R1850 - AIF1TX8MIX Input 2 Source */
+ { 0x0000073B, 0x0080 }, /* R1851 - AIF1TX8MIX Input 2 Volume */
+ { 0x0000073C, 0x0000 }, /* R1852 - AIF1TX8MIX Input 3 Source */
+ { 0x0000073D, 0x0080 }, /* R1853 - AIF1TX8MIX Input 3 Volume */
+ { 0x0000073E, 0x0000 }, /* R1854 - AIF1TX8MIX Input 4 Source */
+ { 0x0000073F, 0x0080 }, /* R1855 - AIF1TX8MIX Input 4 Volume */
+ { 0x00000740, 0x0000 }, /* R1856 - AIF2TX1MIX Input 1 Source */
+ { 0x00000741, 0x0080 }, /* R1857 - AIF2TX1MIX Input 1 Volume */
+ { 0x00000742, 0x0000 }, /* R1858 - AIF2TX1MIX Input 2 Source */
+ { 0x00000743, 0x0080 }, /* R1859 - AIF2TX1MIX Input 2 Volume */
+ { 0x00000744, 0x0000 }, /* R1860 - AIF2TX1MIX Input 3 Source */
+ { 0x00000745, 0x0080 }, /* R1861 - AIF2TX1MIX Input 3 Volume */
+ { 0x00000746, 0x0000 }, /* R1862 - AIF2TX1MIX Input 4 Source */
+ { 0x00000747, 0x0080 }, /* R1863 - AIF2TX1MIX Input 4 Volume */
+ { 0x00000748, 0x0000 }, /* R1864 - AIF2TX2MIX Input 1 Source */
+ { 0x00000749, 0x0080 }, /* R1865 - AIF2TX2MIX Input 1 Volume */
+ { 0x0000074A, 0x0000 }, /* R1866 - AIF2TX2MIX Input 2 Source */
+ { 0x0000074B, 0x0080 }, /* R1867 - AIF2TX2MIX Input 2 Volume */
+ { 0x0000074C, 0x0000 }, /* R1868 - AIF2TX2MIX Input 3 Source */
+ { 0x0000074D, 0x0080 }, /* R1869 - AIF2TX2MIX Input 3 Volume */
+ { 0x0000074E, 0x0000 }, /* R1870 - AIF2TX2MIX Input 4 Source */
+ { 0x0000074F, 0x0080 }, /* R1871 - AIF2TX2MIX Input 4 Volume */
+ { 0x00000780, 0x0000 }, /* R1920 - AIF3TX1MIX Input 1 Source */
+ { 0x00000781, 0x0080 }, /* R1921 - AIF3TX1MIX Input 1 Volume */
+ { 0x00000782, 0x0000 }, /* R1922 - AIF3TX1MIX Input 2 Source */
+ { 0x00000783, 0x0080 }, /* R1923 - AIF3TX1MIX Input 2 Volume */
+ { 0x00000784, 0x0000 }, /* R1924 - AIF3TX1MIX Input 3 Source */
+ { 0x00000785, 0x0080 }, /* R1925 - AIF3TX1MIX Input 3 Volume */
+ { 0x00000786, 0x0000 }, /* R1926 - AIF3TX1MIX Input 4 Source */
+ { 0x00000787, 0x0080 }, /* R1927 - AIF3TX1MIX Input 4 Volume */
+ { 0x00000788, 0x0000 }, /* R1928 - AIF3TX2MIX Input 1 Source */
+ { 0x00000789, 0x0080 }, /* R1929 - AIF3TX2MIX Input 1 Volume */
+ { 0x0000078A, 0x0000 }, /* R1930 - AIF3TX2MIX Input 2 Source */
+ { 0x0000078B, 0x0080 }, /* R1931 - AIF3TX2MIX Input 2 Volume */
+ { 0x0000078C, 0x0000 }, /* R1932 - AIF3TX2MIX Input 3 Source */
+ { 0x0000078D, 0x0080 }, /* R1933 - AIF3TX2MIX Input 3 Volume */
+ { 0x0000078E, 0x0000 }, /* R1934 - AIF3TX2MIX Input 4 Source */
+ { 0x0000078F, 0x0080 }, /* R1935 - AIF3TX2MIX Input 4 Volume */
+ { 0x000007C0, 0x0000 }, /* R1984 - SLIMTX1MIX Input 1 Source */
+ { 0x000007C1, 0x0080 }, /* R1985 - SLIMTX1MIX Input 1 Volume */
+ { 0x000007C2, 0x0000 }, /* R1986 - SLIMTX1MIX Input 2 Source */
+ { 0x000007C3, 0x0080 }, /* R1987 - SLIMTX1MIX Input 2 Volume */
+ { 0x000007C4, 0x0000 }, /* R1988 - SLIMTX1MIX Input 3 Source */
+ { 0x000007C5, 0x0080 }, /* R1989 - SLIMTX1MIX Input 3 Volume */
+ { 0x000007C6, 0x0000 }, /* R1990 - SLIMTX1MIX Input 4 Source */
+ { 0x000007C7, 0x0080 }, /* R1991 - SLIMTX1MIX Input 4 Volume */
+ { 0x000007C8, 0x0000 }, /* R1992 - SLIMTX2MIX Input 1 Source */
+ { 0x000007C9, 0x0080 }, /* R1993 - SLIMTX2MIX Input 1 Volume */
+ { 0x000007CA, 0x0000 }, /* R1994 - SLIMTX2MIX Input 2 Source */
+ { 0x000007CB, 0x0080 }, /* R1995 - SLIMTX2MIX Input 2 Volume */
+ { 0x000007CC, 0x0000 }, /* R1996 - SLIMTX2MIX Input 3 Source */
+ { 0x000007CD, 0x0080 }, /* R1997 - SLIMTX2MIX Input 3 Volume */
+ { 0x000007CE, 0x0000 }, /* R1998 - SLIMTX2MIX Input 4 Source */
+ { 0x000007CF, 0x0080 }, /* R1999 - SLIMTX2MIX Input 4 Volume */
+ { 0x000007D0, 0x0000 }, /* R2000 - SLIMTX3MIX Input 1 Source */
+ { 0x000007D1, 0x0080 }, /* R2001 - SLIMTX3MIX Input 1 Volume */
+ { 0x000007D2, 0x0000 }, /* R2002 - SLIMTX3MIX Input 2 Source */
+ { 0x000007D3, 0x0080 }, /* R2003 - SLIMTX3MIX Input 2 Volume */
+ { 0x000007D4, 0x0000 }, /* R2004 - SLIMTX3MIX Input 3 Source */
+ { 0x000007D5, 0x0080 }, /* R2005 - SLIMTX3MIX Input 3 Volume */
+ { 0x000007D6, 0x0000 }, /* R2006 - SLIMTX3MIX Input 4 Source */
+ { 0x000007D7, 0x0080 }, /* R2007 - SLIMTX3MIX Input 4 Volume */
+ { 0x000007D8, 0x0000 }, /* R2008 - SLIMTX4MIX Input 1 Source */
+ { 0x000007D9, 0x0080 }, /* R2009 - SLIMTX4MIX Input 1 Volume */
+ { 0x000007DA, 0x0000 }, /* R2010 - SLIMTX4MIX Input 2 Source */
+ { 0x000007DB, 0x0080 }, /* R2011 - SLIMTX4MIX Input 2 Volume */
+ { 0x000007DC, 0x0000 }, /* R2012 - SLIMTX4MIX Input 3 Source */
+ { 0x000007DD, 0x0080 }, /* R2013 - SLIMTX4MIX Input 3 Volume */
+ { 0x000007DE, 0x0000 }, /* R2014 - SLIMTX4MIX Input 4 Source */
+ { 0x000007DF, 0x0080 }, /* R2015 - SLIMTX4MIX Input 4 Volume */
+ { 0x000007E0, 0x0000 }, /* R2016 - SLIMTX5MIX Input 1 Source */
+ { 0x000007E1, 0x0080 }, /* R2017 - SLIMTX5MIX Input 1 Volume */
+ { 0x000007E2, 0x0000 }, /* R2018 - SLIMTX5MIX Input 2 Source */
+ { 0x000007E3, 0x0080 }, /* R2019 - SLIMTX5MIX Input 2 Volume */
+ { 0x000007E4, 0x0000 }, /* R2020 - SLIMTX5MIX Input 3 Source */
+ { 0x000007E5, 0x0080 }, /* R2021 - SLIMTX5MIX Input 3 Volume */
+ { 0x000007E6, 0x0000 }, /* R2022 - SLIMTX5MIX Input 4 Source */
+ { 0x000007E7, 0x0080 }, /* R2023 - SLIMTX5MIX Input 4 Volume */
+ { 0x000007E8, 0x0000 }, /* R2024 - SLIMTX6MIX Input 1 Source */
+ { 0x000007E9, 0x0080 }, /* R2025 - SLIMTX6MIX Input 1 Volume */
+ { 0x000007EA, 0x0000 }, /* R2026 - SLIMTX6MIX Input 2 Source */
+ { 0x000007EB, 0x0080 }, /* R2027 - SLIMTX6MIX Input 2 Volume */
+ { 0x000007EC, 0x0000 }, /* R2028 - SLIMTX6MIX Input 3 Source */
+ { 0x000007ED, 0x0080 }, /* R2029 - SLIMTX6MIX Input 3 Volume */
+ { 0x000007EE, 0x0000 }, /* R2030 - SLIMTX6MIX Input 4 Source */
+ { 0x000007EF, 0x0080 }, /* R2031 - SLIMTX6MIX Input 4 Volume */
+ { 0x000007F0, 0x0000 }, /* R2032 - SLIMTX7MIX Input 1 Source */
+ { 0x000007F1, 0x0080 }, /* R2033 - SLIMTX7MIX Input 1 Volume */
+ { 0x000007F2, 0x0000 }, /* R2034 - SLIMTX7MIX Input 2 Source */
+ { 0x000007F3, 0x0080 }, /* R2035 - SLIMTX7MIX Input 2 Volume */
+ { 0x000007F4, 0x0000 }, /* R2036 - SLIMTX7MIX Input 3 Source */
+ { 0x000007F5, 0x0080 }, /* R2037 - SLIMTX7MIX Input 3 Volume */
+ { 0x000007F6, 0x0000 }, /* R2038 - SLIMTX7MIX Input 4 Source */
+ { 0x000007F7, 0x0080 }, /* R2039 - SLIMTX7MIX Input 4 Volume */
+ { 0x000007F8, 0x0000 }, /* R2040 - SLIMTX8MIX Input 1 Source */
+ { 0x000007F9, 0x0080 }, /* R2041 - SLIMTX8MIX Input 1 Volume */
+ { 0x000007FA, 0x0000 }, /* R2042 - SLIMTX8MIX Input 2 Source */
+ { 0x000007FB, 0x0080 }, /* R2043 - SLIMTX8MIX Input 2 Volume */
+ { 0x000007FC, 0x0000 }, /* R2044 - SLIMTX8MIX Input 3 Source */
+ { 0x000007FD, 0x0080 }, /* R2045 - SLIMTX8MIX Input 3 Volume */
+ { 0x000007FE, 0x0000 }, /* R2046 - SLIMTX8MIX Input 4 Source */
+ { 0x000007FF, 0x0080 }, /* R2047 - SLIMTX8MIX Input 4 Volume */
+ { 0x00000880, 0x0000 }, /* R2176 - EQ1MIX Input 1 Source */
+ { 0x00000881, 0x0080 }, /* R2177 - EQ1MIX Input 1 Volume */
+ { 0x00000882, 0x0000 }, /* R2178 - EQ1MIX Input 2 Source */
+ { 0x00000883, 0x0080 }, /* R2179 - EQ1MIX Input 2 Volume */
+ { 0x00000884, 0x0000 }, /* R2180 - EQ1MIX Input 3 Source */
+ { 0x00000885, 0x0080 }, /* R2181 - EQ1MIX Input 3 Volume */
+ { 0x00000886, 0x0000 }, /* R2182 - EQ1MIX Input 4 Source */
+ { 0x00000887, 0x0080 }, /* R2183 - EQ1MIX Input 4 Volume */
+ { 0x00000888, 0x0000 }, /* R2184 - EQ2MIX Input 1 Source */
+ { 0x00000889, 0x0080 }, /* R2185 - EQ2MIX Input 1 Volume */
+ { 0x0000088A, 0x0000 }, /* R2186 - EQ2MIX Input 2 Source */
+ { 0x0000088B, 0x0080 }, /* R2187 - EQ2MIX Input 2 Volume */
+ { 0x0000088C, 0x0000 }, /* R2188 - EQ2MIX Input 3 Source */
+ { 0x0000088D, 0x0080 }, /* R2189 - EQ2MIX Input 3 Volume */
+ { 0x0000088E, 0x0000 }, /* R2190 - EQ2MIX Input 4 Source */
+ { 0x0000088F, 0x0080 }, /* R2191 - EQ2MIX Input 4 Volume */
+ { 0x00000890, 0x0000 }, /* R2192 - EQ3MIX Input 1 Source */
+ { 0x00000891, 0x0080 }, /* R2193 - EQ3MIX Input 1 Volume */
+ { 0x00000892, 0x0000 }, /* R2194 - EQ3MIX Input 2 Source */
+ { 0x00000893, 0x0080 }, /* R2195 - EQ3MIX Input 2 Volume */
+ { 0x00000894, 0x0000 }, /* R2196 - EQ3MIX Input 3 Source */
+ { 0x00000895, 0x0080 }, /* R2197 - EQ3MIX Input 3 Volume */
+ { 0x00000896, 0x0000 }, /* R2198 - EQ3MIX Input 4 Source */
+ { 0x00000897, 0x0080 }, /* R2199 - EQ3MIX Input 4 Volume */
+ { 0x00000898, 0x0000 }, /* R2200 - EQ4MIX Input 1 Source */
+ { 0x00000899, 0x0080 }, /* R2201 - EQ4MIX Input 1 Volume */
+ { 0x0000089A, 0x0000 }, /* R2202 - EQ4MIX Input 2 Source */
+ { 0x0000089B, 0x0080 }, /* R2203 - EQ4MIX Input 2 Volume */
+ { 0x0000089C, 0x0000 }, /* R2204 - EQ4MIX Input 3 Source */
+ { 0x0000089D, 0x0080 }, /* R2205 - EQ4MIX Input 3 Volume */
+ { 0x0000089E, 0x0000 }, /* R2206 - EQ4MIX Input 4 Source */
+ { 0x0000089F, 0x0080 }, /* R2207 - EQ4MIX Input 4 Volume */
+ { 0x000008C0, 0x0000 }, /* R2240 - DRC1LMIX Input 1 Source */
+ { 0x000008C1, 0x0080 }, /* R2241 - DRC1LMIX Input 1 Volume */
+ { 0x000008C2, 0x0000 }, /* R2242 - DRC1LMIX Input 2 Source */
+ { 0x000008C3, 0x0080 }, /* R2243 - DRC1LMIX Input 2 Volume */
+ { 0x000008C4, 0x0000 }, /* R2244 - DRC1LMIX Input 3 Source */
+ { 0x000008C5, 0x0080 }, /* R2245 - DRC1LMIX Input 3 Volume */
+ { 0x000008C6, 0x0000 }, /* R2246 - DRC1LMIX Input 4 Source */
+ { 0x000008C7, 0x0080 }, /* R2247 - DRC1LMIX Input 4 Volume */
+ { 0x000008C8, 0x0000 }, /* R2248 - DRC1RMIX Input 1 Source */
+ { 0x000008C9, 0x0080 }, /* R2249 - DRC1RMIX Input 1 Volume */
+ { 0x000008CA, 0x0000 }, /* R2250 - DRC1RMIX Input 2 Source */
+ { 0x000008CB, 0x0080 }, /* R2251 - DRC1RMIX Input 2 Volume */
+ { 0x000008CC, 0x0000 }, /* R2252 - DRC1RMIX Input 3 Source */
+ { 0x000008CD, 0x0080 }, /* R2253 - DRC1RMIX Input 3 Volume */
+ { 0x000008CE, 0x0000 }, /* R2254 - DRC1RMIX Input 4 Source */
+ { 0x000008CF, 0x0080 }, /* R2255 - DRC1RMIX Input 4 Volume */
+ { 0x000008D0, 0x0000 }, /* R2256 - DRC2LMIX Input 1 Source */
+ { 0x000008D1, 0x0080 }, /* R2257 - DRC2LMIX Input 1 Volume */
+ { 0x000008D2, 0x0000 }, /* R2258 - DRC2LMIX Input 2 Source */
+ { 0x000008D3, 0x0080 }, /* R2259 - DRC2LMIX Input 2 Volume */
+ { 0x000008D4, 0x0000 }, /* R2260 - DRC2LMIX Input 3 Source */
+ { 0x000008D5, 0x0080 }, /* R2261 - DRC2LMIX Input 3 Volume */
+ { 0x000008D6, 0x0000 }, /* R2262 - DRC2LMIX Input 4 Source */
+ { 0x000008D7, 0x0080 }, /* R2263 - DRC2LMIX Input 4 Volume */
+ { 0x000008D8, 0x0000 }, /* R2264 - DRC2RMIX Input 1 Source */
+ { 0x000008D9, 0x0080 }, /* R2265 - DRC2RMIX Input 1 Volume */
+ { 0x000008DA, 0x0000 }, /* R2266 - DRC2RMIX Input 2 Source */
+ { 0x000008DB, 0x0080 }, /* R2267 - DRC2RMIX Input 2 Volume */
+ { 0x000008DC, 0x0000 }, /* R2268 - DRC2RMIX Input 3 Source */
+ { 0x000008DD, 0x0080 }, /* R2269 - DRC2RMIX Input 3 Volume */
+ { 0x000008DE, 0x0000 }, /* R2270 - DRC2RMIX Input 4 Source */
+ { 0x000008DF, 0x0080 }, /* R2271 - DRC2RMIX Input 4 Volume */
+ { 0x00000900, 0x0000 }, /* R2304 - HPLP1MIX Input 1 Source */
+ { 0x00000901, 0x0080 }, /* R2305 - HPLP1MIX Input 1 Volume */
+ { 0x00000902, 0x0000 }, /* R2306 - HPLP1MIX Input 2 Source */
+ { 0x00000903, 0x0080 }, /* R2307 - HPLP1MIX Input 2 Volume */
+ { 0x00000904, 0x0000 }, /* R2308 - HPLP1MIX Input 3 Source */
+ { 0x00000905, 0x0080 }, /* R2309 - HPLP1MIX Input 3 Volume */
+ { 0x00000906, 0x0000 }, /* R2310 - HPLP1MIX Input 4 Source */
+ { 0x00000907, 0x0080 }, /* R2311 - HPLP1MIX Input 4 Volume */
+ { 0x00000908, 0x0000 }, /* R2312 - HPLP2MIX Input 1 Source */
+ { 0x00000909, 0x0080 }, /* R2313 - HPLP2MIX Input 1 Volume */
+ { 0x0000090A, 0x0000 }, /* R2314 - HPLP2MIX Input 2 Source */
+ { 0x0000090B, 0x0080 }, /* R2315 - HPLP2MIX Input 2 Volume */
+ { 0x0000090C, 0x0000 }, /* R2316 - HPLP2MIX Input 3 Source */
+ { 0x0000090D, 0x0080 }, /* R2317 - HPLP2MIX Input 3 Volume */
+ { 0x0000090E, 0x0000 }, /* R2318 - HPLP2MIX Input 4 Source */
+ { 0x0000090F, 0x0080 }, /* R2319 - HPLP2MIX Input 4 Volume */
+ { 0x00000910, 0x0000 }, /* R2320 - HPLP3MIX Input 1 Source */
+ { 0x00000911, 0x0080 }, /* R2321 - HPLP3MIX Input 1 Volume */
+ { 0x00000912, 0x0000 }, /* R2322 - HPLP3MIX Input 2 Source */
+ { 0x00000913, 0x0080 }, /* R2323 - HPLP3MIX Input 2 Volume */
+ { 0x00000914, 0x0000 }, /* R2324 - HPLP3MIX Input 3 Source */
+ { 0x00000915, 0x0080 }, /* R2325 - HPLP3MIX Input 3 Volume */
+ { 0x00000916, 0x0000 }, /* R2326 - HPLP3MIX Input 4 Source */
+ { 0x00000917, 0x0080 }, /* R2327 - HPLP3MIX Input 4 Volume */
+ { 0x00000918, 0x0000 }, /* R2328 - HPLP4MIX Input 1 Source */
+ { 0x00000919, 0x0080 }, /* R2329 - HPLP4MIX Input 1 Volume */
+ { 0x0000091A, 0x0000 }, /* R2330 - HPLP4MIX Input 2 Source */
+ { 0x0000091B, 0x0080 }, /* R2331 - HPLP4MIX Input 2 Volume */
+ { 0x0000091C, 0x0000 }, /* R2332 - HPLP4MIX Input 3 Source */
+ { 0x0000091D, 0x0080 }, /* R2333 - HPLP4MIX Input 3 Volume */
+ { 0x0000091E, 0x0000 }, /* R2334 - HPLP4MIX Input 4 Source */
+ { 0x0000091F, 0x0080 }, /* R2335 - HPLP4MIX Input 4 Volume */
+ { 0x00000940, 0x0000 }, /* R2368 - DSP1LMIX Input 1 Source */
+ { 0x00000941, 0x0080 }, /* R2369 - DSP1LMIX Input 1 Volume */
+ { 0x00000942, 0x0000 }, /* R2370 - DSP1LMIX Input 2 Source */
+ { 0x00000943, 0x0080 }, /* R2371 - DSP1LMIX Input 2 Volume */
+ { 0x00000944, 0x0000 }, /* R2372 - DSP1LMIX Input 3 Source */
+ { 0x00000945, 0x0080 }, /* R2373 - DSP1LMIX Input 3 Volume */
+ { 0x00000946, 0x0000 }, /* R2374 - DSP1LMIX Input 4 Source */
+ { 0x00000947, 0x0080 }, /* R2375 - DSP1LMIX Input 4 Volume */
+ { 0x00000948, 0x0000 }, /* R2376 - DSP1RMIX Input 1 Source */
+ { 0x00000949, 0x0080 }, /* R2377 - DSP1RMIX Input 1 Volume */
+ { 0x0000094A, 0x0000 }, /* R2378 - DSP1RMIX Input 2 Source */
+ { 0x0000094B, 0x0080 }, /* R2379 - DSP1RMIX Input 2 Volume */
+ { 0x0000094C, 0x0000 }, /* R2380 - DSP1RMIX Input 3 Source */
+ { 0x0000094D, 0x0080 }, /* R2381 - DSP1RMIX Input 3 Volume */
+ { 0x0000094E, 0x0000 }, /* R2382 - DSP1RMIX Input 4 Source */
+ { 0x0000094F, 0x0080 }, /* R2383 - DSP1RMIX Input 4 Volume */
+ { 0x00000950, 0x0000 }, /* R2384 - DSP1AUX1MIX Input 1 Source */
+ { 0x00000958, 0x0000 }, /* R2392 - DSP1AUX2MIX Input 1 Source */
+ { 0x00000960, 0x0000 }, /* R2400 - DSP1AUX3MIX Input 1 Source */
+ { 0x00000968, 0x0000 }, /* R2408 - DSP1AUX4MIX Input 1 Source */
+ { 0x00000970, 0x0000 }, /* R2416 - DSP1AUX5MIX Input 1 Source */
+ { 0x00000978, 0x0000 }, /* R2424 - DSP1AUX6MIX Input 1 Source */
+ { 0x00000980, 0x0000 }, /* R2432 - DSP2LMIX Input 1 Source */
+ { 0x00000981, 0x0080 }, /* R2433 - DSP2LMIX Input 1 Volume */
+ { 0x00000982, 0x0000 }, /* R2434 - DSP2LMIX Input 2 Source */
+ { 0x00000983, 0x0080 }, /* R2435 - DSP2LMIX Input 2 Volume */
+ { 0x00000984, 0x0000 }, /* R2436 - DSP2LMIX Input 3 Source */
+ { 0x00000985, 0x0080 }, /* R2437 - DSP2LMIX Input 3 Volume */
+ { 0x00000986, 0x0000 }, /* R2438 - DSP2LMIX Input 4 Source */
+ { 0x00000987, 0x0080 }, /* R2439 - DSP2LMIX Input 4 Volume */
+ { 0x00000988, 0x0000 }, /* R2440 - DSP2RMIX Input 1 Source */
+ { 0x00000989, 0x0080 }, /* R2441 - DSP2RMIX Input 1 Volume */
+ { 0x0000098A, 0x0000 }, /* R2442 - DSP2RMIX Input 2 Source */
+ { 0x0000098B, 0x0080 }, /* R2443 - DSP2RMIX Input 2 Volume */
+ { 0x0000098C, 0x0000 }, /* R2444 - DSP2RMIX Input 3 Source */
+ { 0x0000098D, 0x0080 }, /* R2445 - DSP2RMIX Input 3 Volume */
+ { 0x0000098E, 0x0000 }, /* R2446 - DSP2RMIX Input 4 Source */
+ { 0x0000098F, 0x0080 }, /* R2447 - DSP2RMIX Input 4 Volume */
+ { 0x00000990, 0x0000 }, /* R2448 - DSP2AUX1MIX Input 1 Source */
+ { 0x00000998, 0x0000 }, /* R2456 - DSP2AUX2MIX Input 1 Source */
+ { 0x000009A0, 0x0000 }, /* R2464 - DSP2AUX3MIX Input 1 Source */
+ { 0x000009A8, 0x0000 }, /* R2472 - DSP2AUX4MIX Input 1 Source */
+ { 0x000009B0, 0x0000 }, /* R2480 - DSP2AUX5MIX Input 1 Source */
+ { 0x000009B8, 0x0000 }, /* R2488 - DSP2AUX6MIX Input 1 Source */
+ { 0x000009C0, 0x0000 }, /* R2496 - DSP3LMIX Input 1 Source */
+ { 0x000009C1, 0x0080 }, /* R2497 - DSP3LMIX Input 1 Volume */
+ { 0x000009C2, 0x0000 }, /* R2498 - DSP3LMIX Input 2 Source */
+ { 0x000009C3, 0x0080 }, /* R2499 - DSP3LMIX Input 2 Volume */
+ { 0x000009C4, 0x0000 }, /* R2500 - DSP3LMIX Input 3 Source */
+ { 0x000009C5, 0x0080 }, /* R2501 - DSP3LMIX Input 3 Volume */
+ { 0x000009C6, 0x0000 }, /* R2502 - DSP3LMIX Input 4 Source */
+ { 0x000009C7, 0x0080 }, /* R2503 - DSP3LMIX Input 4 Volume */
+ { 0x000009C8, 0x0000 }, /* R2504 - DSP3RMIX Input 1 Source */
+ { 0x000009C9, 0x0080 }, /* R2505 - DSP3RMIX Input 1 Volume */
+ { 0x000009CA, 0x0000 }, /* R2506 - DSP3RMIX Input 2 Source */
+ { 0x000009CB, 0x0080 }, /* R2507 - DSP3RMIX Input 2 Volume */
+ { 0x000009CC, 0x0000 }, /* R2508 - DSP3RMIX Input 3 Source */
+ { 0x000009CD, 0x0080 }, /* R2509 - DSP3RMIX Input 3 Volume */
+ { 0x000009CE, 0x0000 }, /* R2510 - DSP3RMIX Input 4 Source */
+ { 0x000009CF, 0x0080 }, /* R2511 - DSP3RMIX Input 4 Volume */
+ { 0x000009D0, 0x0000 }, /* R2512 - DSP3AUX1MIX Input 1 Source */
+ { 0x000009D8, 0x0000 }, /* R2520 - DSP3AUX2MIX Input 1 Source */
+ { 0x000009E0, 0x0000 }, /* R2528 - DSP3AUX3MIX Input 1 Source */
+ { 0x000009E8, 0x0000 }, /* R2536 - DSP3AUX4MIX Input 1 Source */
+ { 0x000009F0, 0x0000 }, /* R2544 - DSP3AUX5MIX Input 1 Source */
+ { 0x000009F8, 0x0000 }, /* R2552 - DSP3AUX6MIX Input 1 Source */
+ { 0x00000A00, 0x0000 }, /* R2560 - DSP4LMIX Input 1 Source */
+ { 0x00000A01, 0x0080 }, /* R2561 - DSP4LMIX Input 1 Volume */
+ { 0x00000A02, 0x0000 }, /* R2562 - DSP4LMIX Input 2 Source */
+ { 0x00000A03, 0x0080 }, /* R2563 - DSP4LMIX Input 2 Volume */
+ { 0x00000A04, 0x0000 }, /* R2564 - DSP4LMIX Input 3 Source */
+ { 0x00000A05, 0x0080 }, /* R2565 - DSP4LMIX Input 3 Volume */
+ { 0x00000A06, 0x0000 }, /* R2566 - DSP4LMIX Input 4 Source */
+ { 0x00000A07, 0x0080 }, /* R2567 - DSP4LMIX Input 4 Volume */
+ { 0x00000A08, 0x0000 }, /* R2568 - DSP4RMIX Input 1 Source */
+ { 0x00000A09, 0x0080 }, /* R2569 - DSP4RMIX Input 1 Volume */
+ { 0x00000A0A, 0x0000 }, /* R2570 - DSP4RMIX Input 2 Source */
+ { 0x00000A0B, 0x0080 }, /* R2571 - DSP4RMIX Input 2 Volume */
+ { 0x00000A0C, 0x0000 }, /* R2572 - DSP4RMIX Input 3 Source */
+ { 0x00000A0D, 0x0080 }, /* R2573 - DSP4RMIX Input 3 Volume */
+ { 0x00000A0E, 0x0000 }, /* R2574 - DSP4RMIX Input 4 Source */
+ { 0x00000A0F, 0x0080 }, /* R2575 - DSP4RMIX Input 4 Volume */
+ { 0x00000A10, 0x0000 }, /* R2576 - DSP4AUX1MIX Input 1 Source */
+ { 0x00000A18, 0x0000 }, /* R2584 - DSP4AUX2MIX Input 1 Source */
+ { 0x00000A20, 0x0000 }, /* R2592 - DSP4AUX3MIX Input 1 Source */
+ { 0x00000A28, 0x0000 }, /* R2600 - DSP4AUX4MIX Input 1 Source */
+ { 0x00000A30, 0x0000 }, /* R2608 - DSP4AUX5MIX Input 1 Source */
+ { 0x00000A38, 0x0000 }, /* R2616 - DSP4AUX6MIX Input 1 Source */
+ { 0x00000A80, 0x0000 }, /* R2688 - ASRC1LMIX Input 1 Source */
+ { 0x00000A88, 0x0000 }, /* R2696 - ASRC1RMIX Input 1 Source */
+ { 0x00000A90, 0x0000 }, /* R2704 - ASRC2LMIX Input 1 Source */
+ { 0x00000A98, 0x0000 }, /* R2712 - ASRC2RMIX Input 1 Source */
+ { 0x00000B00, 0x0000 }, /* R2816 - ISRC1DEC1MIX Input 1 Source */
+ { 0x00000B08, 0x0000 }, /* R2824 - ISRC1DEC2MIX Input 1 Source */
+ { 0x00000B10, 0x0000 }, /* R2832 - ISRC1DEC3MIX Input 1 Source */
+ { 0x00000B18, 0x0000 }, /* R2840 - ISRC1DEC4MIX Input 1 Source */
+ { 0x00000B20, 0x0000 }, /* R2848 - ISRC1INT1MIX Input 1 Source */
+ { 0x00000B28, 0x0000 }, /* R2856 - ISRC1INT2MIX Input 1 Source */
+ { 0x00000B30, 0x0000 }, /* R2864 - ISRC1INT3MIX Input 1 Source */
+ { 0x00000B38, 0x0000 }, /* R2872 - ISRC1INT4MIX Input 1 Source */
+ { 0x00000B40, 0x0000 }, /* R2880 - ISRC2DEC1MIX Input 1 Source */
+ { 0x00000B48, 0x0000 }, /* R2888 - ISRC2DEC2MIX Input 1 Source */
+ { 0x00000B50, 0x0000 }, /* R2896 - ISRC2DEC3MIX Input 1 Source */
+ { 0x00000B58, 0x0000 }, /* R2904 - ISRC2DEC4MIX Input 1 Source */
+ { 0x00000B60, 0x0000 }, /* R2912 - ISRC2INT1MIX Input 1 Source */
+ { 0x00000B68, 0x0000 }, /* R2920 - ISRC2INT2MIX Input 1 Source */
+ { 0x00000B70, 0x0000 }, /* R2928 - ISRC2INT3MIX Input 1 Source */
+ { 0x00000B78, 0x0000 }, /* R2936 - ISRC2INT4MIX Input 1 Source */
+ { 0x00000B80, 0x0000 }, /* R2944 - ISRC3DEC1MIX Input 1 Source */
+ { 0x00000B88, 0x0000 }, /* R2952 - ISRC3DEC2MIX Input 1 Source */
+ { 0x00000B90, 0x0000 }, /* R2960 - ISRC3DEC3MIX Input 1 Source */
+ { 0x00000B98, 0x0000 }, /* R2968 - ISRC3DEC4MIX Input 1 Source */
+ { 0x00000BA0, 0x0000 }, /* R2976 - ISRC3INT1MIX Input 1 Source */
+ { 0x00000BA8, 0x0000 }, /* R2984 - ISRC3INT2MIX Input 1 Source */
+ { 0x00000BB0, 0x0000 }, /* R2992 - ISRC3INT3MIX Input 1 Source */
+ { 0x00000BB8, 0x0000 }, /* R3000 - ISRC3INT4MIX Input 1 Source */
+ { 0x00000C00, 0xA101 }, /* R3072 - GPIO1 CTRL */
+ { 0x00000C01, 0xA101 }, /* R3073 - GPIO2 CTRL */
+ { 0x00000C02, 0xA101 }, /* R3074 - GPIO3 CTRL */
+ { 0x00000C03, 0xA101 }, /* R3075 - GPIO4 CTRL */
+ { 0x00000C04, 0xA101 }, /* R3076 - GPIO5 CTRL */
+ { 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
+ { 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
+ { 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
+ { 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
+ { 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
+ { 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
+ { 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
+ { 0x00000C25, 0x0000 }, /* R3109 - Misc Pad Ctrl 6 */
+ { 0x00000C30, 0x8282 }, /* R3120 - Misc Pad Ctrl 7 */
+ { 0x00000C31, 0x0082 }, /* R3121 - Misc Pad Ctrl 8 */
+ { 0x00000C32, 0x8282 }, /* R3122 - Misc Pad Ctrl 9 */
+ { 0x00000C33, 0x8282 }, /* R3123 - Misc Pad Ctrl 10 */
+ { 0x00000C34, 0x8282 }, /* R3124 - Misc Pad Ctrl 11 */
+ { 0x00000C35, 0x8282 }, /* R3125 - Misc Pad Ctrl 12 */
+ { 0x00000C36, 0x8282 }, /* R3126 - Misc Pad Ctrl 13 */
+ { 0x00000C37, 0x8282 }, /* R3127 - Misc Pad Ctrl 14 */
+ { 0x00000C38, 0x8282 }, /* R3128 - Misc Pad Ctrl 15 */
+ { 0x00000C39, 0x8282 }, /* R3129 - Misc Pad Ctrl 16 */
+ { 0x00000C3A, 0x8282 }, /* R3130 - Misc Pad Ctrl 17 */
+ { 0x00000C3B, 0x8282 }, /* R3131 - Misc Pad Ctrl 18 */
+ { 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */
+ { 0x00000D09, 0xFFFF }, /* R3337 - Interrupt Status 2 Mask */
+ { 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */
+ { 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */
+ { 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */
+ { 0x00000D0F, 0x0000 }, /* R3343 - Interrupt Control */
+ { 0x00000D18, 0xFFFF }, /* R3352 - IRQ2 Status 1 Mask */
+ { 0x00000D19, 0xFFFF }, /* R3353 - IRQ2 Status 2 Mask */
+ { 0x00000D1A, 0xFFFF }, /* R3354 - IRQ2 Status 3 Mask */
+ { 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
+ { 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
+ { 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
+ { 0x00000D50, 0x0000 }, /* R3408 - AOD wkup and trig */
+ { 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
+ { 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
+ { 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
+ { 0x00000E00, 0x0000 }, /* R3584 - FX_Ctrl1 */
+ { 0x00000E01, 0x0000 }, /* R3585 - FX_Ctrl2 */
+ { 0x00000E10, 0x6318 }, /* R3600 - EQ1_1 */
+ { 0x00000E11, 0x6300 }, /* R3601 - EQ1_2 */
+ { 0x00000E12, 0x0FC8 }, /* R3602 - EQ1_3 */
+ { 0x00000E13, 0x03FE }, /* R3603 - EQ1_4 */
+ { 0x00000E14, 0x00E0 }, /* R3604 - EQ1_5 */
+ { 0x00000E15, 0x1EC4 }, /* R3605 - EQ1_6 */
+ { 0x00000E16, 0xF136 }, /* R3606 - EQ1_7 */
+ { 0x00000E17, 0x0409 }, /* R3607 - EQ1_8 */
+ { 0x00000E18, 0x04CC }, /* R3608 - EQ1_9 */
+ { 0x00000E19, 0x1C9B }, /* R3609 - EQ1_10 */
+ { 0x00000E1A, 0xF337 }, /* R3610 - EQ1_11 */
+ { 0x00000E1B, 0x040B }, /* R3611 - EQ1_12 */
+ { 0x00000E1C, 0x0CBB }, /* R3612 - EQ1_13 */
+ { 0x00000E1D, 0x16F8 }, /* R3613 - EQ1_14 */
+ { 0x00000E1E, 0xF7D9 }, /* R3614 - EQ1_15 */
+ { 0x00000E1F, 0x040A }, /* R3615 - EQ1_16 */
+ { 0x00000E20, 0x1F14 }, /* R3616 - EQ1_17 */
+ { 0x00000E21, 0x058C }, /* R3617 - EQ1_18 */
+ { 0x00000E22, 0x0563 }, /* R3618 - EQ1_19 */
+ { 0x00000E23, 0x4000 }, /* R3619 - EQ1_20 */
+ { 0x00000E24, 0x0B75 }, /* R3620 - EQ1_21 */
+ { 0x00000E26, 0x6318 }, /* R3622 - EQ2_1 */
+ { 0x00000E27, 0x6300 }, /* R3623 - EQ2_2 */
+ { 0x00000E28, 0x0FC8 }, /* R3624 - EQ2_3 */
+ { 0x00000E29, 0x03FE }, /* R3625 - EQ2_4 */
+ { 0x00000E2A, 0x00E0 }, /* R3626 - EQ2_5 */
+ { 0x00000E2B, 0x1EC4 }, /* R3627 - EQ2_6 */
+ { 0x00000E2C, 0xF136 }, /* R3628 - EQ2_7 */
+ { 0x00000E2D, 0x0409 }, /* R3629 - EQ2_8 */
+ { 0x00000E2E, 0x04CC }, /* R3630 - EQ2_9 */
+ { 0x00000E2F, 0x1C9B }, /* R3631 - EQ2_10 */
+ { 0x00000E30, 0xF337 }, /* R3632 - EQ2_11 */
+ { 0x00000E31, 0x040B }, /* R3633 - EQ2_12 */
+ { 0x00000E32, 0x0CBB }, /* R3634 - EQ2_13 */
+ { 0x00000E33, 0x16F8 }, /* R3635 - EQ2_14 */
+ { 0x00000E34, 0xF7D9 }, /* R3636 - EQ2_15 */
+ { 0x00000E35, 0x040A }, /* R3637 - EQ2_16 */
+ { 0x00000E36, 0x1F14 }, /* R3638 - EQ2_17 */
+ { 0x00000E37, 0x058C }, /* R3639 - EQ2_18 */
+ { 0x00000E38, 0x0563 }, /* R3640 - EQ2_19 */
+ { 0x00000E39, 0x4000 }, /* R3641 - EQ2_20 */
+ { 0x00000E3A, 0x0B75 }, /* R3642 - EQ2_21 */
+ { 0x00000E3C, 0x6318 }, /* R3644 - EQ3_1 */
+ { 0x00000E3D, 0x6300 }, /* R3645 - EQ3_2 */
+ { 0x00000E3E, 0x0FC8 }, /* R3646 - EQ3_3 */
+ { 0x00000E3F, 0x03FE }, /* R3647 - EQ3_4 */
+ { 0x00000E40, 0x00E0 }, /* R3648 - EQ3_5 */
+ { 0x00000E41, 0x1EC4 }, /* R3649 - EQ3_6 */
+ { 0x00000E42, 0xF136 }, /* R3650 - EQ3_7 */
+ { 0x00000E43, 0x0409 }, /* R3651 - EQ3_8 */
+ { 0x00000E44, 0x04CC }, /* R3652 - EQ3_9 */
+ { 0x00000E45, 0x1C9B }, /* R3653 - EQ3_10 */
+ { 0x00000E46, 0xF337 }, /* R3654 - EQ3_11 */
+ { 0x00000E47, 0x040B }, /* R3655 - EQ3_12 */
+ { 0x00000E48, 0x0CBB }, /* R3656 - EQ3_13 */
+ { 0x00000E49, 0x16F8 }, /* R3657 - EQ3_14 */
+ { 0x00000E4A, 0xF7D9 }, /* R3658 - EQ3_15 */
+ { 0x00000E4B, 0x040A }, /* R3659 - EQ3_16 */
+ { 0x00000E4C, 0x1F14 }, /* R3660 - EQ3_17 */
+ { 0x00000E4D, 0x058C }, /* R3661 - EQ3_18 */
+ { 0x00000E4E, 0x0563 }, /* R3662 - EQ3_19 */
+ { 0x00000E4F, 0x4000 }, /* R3663 - EQ3_20 */
+ { 0x00000E50, 0x0B75 }, /* R3664 - EQ3_21 */
+ { 0x00000E52, 0x6318 }, /* R3666 - EQ4_1 */
+ { 0x00000E53, 0x6300 }, /* R3667 - EQ4_2 */
+ { 0x00000E54, 0x0FC8 }, /* R3668 - EQ4_3 */
+ { 0x00000E55, 0x03FE }, /* R3669 - EQ4_4 */
+ { 0x00000E56, 0x00E0 }, /* R3670 - EQ4_5 */
+ { 0x00000E57, 0x1EC4 }, /* R3671 - EQ4_6 */
+ { 0x00000E58, 0xF136 }, /* R3672 - EQ4_7 */
+ { 0x00000E59, 0x0409 }, /* R3673 - EQ4_8 */
+ { 0x00000E5A, 0x04CC }, /* R3674 - EQ4_9 */
+ { 0x00000E5B, 0x1C9B }, /* R3675 - EQ4_10 */
+ { 0x00000E5C, 0xF337 }, /* R3676 - EQ4_11 */
+ { 0x00000E5D, 0x040B }, /* R3677 - EQ4_12 */
+ { 0x00000E5E, 0x0CBB }, /* R3678 - EQ4_13 */
+ { 0x00000E5F, 0x16F8 }, /* R3679 - EQ4_14 */
+ { 0x00000E60, 0xF7D9 }, /* R3680 - EQ4_15 */
+ { 0x00000E61, 0x040A }, /* R3681 - EQ4_16 */
+ { 0x00000E62, 0x1F14 }, /* R3682 - EQ4_17 */
+ { 0x00000E63, 0x058C }, /* R3683 - EQ4_18 */
+ { 0x00000E64, 0x0563 }, /* R3684 - EQ4_19 */
+ { 0x00000E65, 0x4000 }, /* R3685 - EQ4_20 */
+ { 0x00000E66, 0x0B75 }, /* R3686 - EQ4_21 */
+ { 0x00000E80, 0x0018 }, /* R3712 - DRC1 ctrl1 */
+ { 0x00000E81, 0x0933 }, /* R3713 - DRC1 ctrl2 */
+ { 0x00000E82, 0x0018 }, /* R3714 - DRC1 ctrl3 */
+ { 0x00000E83, 0x0000 }, /* R3715 - DRC1 ctrl4 */
+ { 0x00000E84, 0x0000 }, /* R3716 - DRC1 ctrl5 */
+ { 0x00000E89, 0x0018 }, /* R3721 - DRC2 ctrl1 */
+ { 0x00000E8A, 0x0933 }, /* R3722 - DRC2 ctrl2 */
+ { 0x00000E8B, 0x0018 }, /* R3723 - DRC2 ctrl3 */
+ { 0x00000E8C, 0x0000 }, /* R3724 - DRC2 ctrl4 */
+ { 0x00000E8D, 0x0000 }, /* R3725 - DRC2 ctrl5 */
+ { 0x00000EC0, 0x0000 }, /* R3776 - HPLPF1_1 */
+ { 0x00000EC1, 0x0000 }, /* R3777 - HPLPF1_2 */
+ { 0x00000EC4, 0x0000 }, /* R3780 - HPLPF2_1 */
+ { 0x00000EC5, 0x0000 }, /* R3781 - HPLPF2_2 */
+ { 0x00000EC8, 0x0000 }, /* R3784 - HPLPF3_1 */
+ { 0x00000EC9, 0x0000 }, /* R3785 - HPLPF3_2 */
+ { 0x00000ECC, 0x0000 }, /* R3788 - HPLPF4_1 */
+ { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
+ { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
+ { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
+ { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
+ { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
+ { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
+ { 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
+ { 0x00000EF4, 0x0000 }, /* R3828 - ISRC 2 CTRL 2 */
+ { 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
+ { 0x00000EF6, 0x0000 }, /* R3830 - ISRC 3 CTRL 1 */
+ { 0x00000EF7, 0x0000 }, /* R3831 - ISRC 3 CTRL 2 */
+ { 0x00000EF8, 0x0000 }, /* R3832 - ISRC 3 CTRL 3 */
+ { 0x00000F00, 0x0000 }, /* R3840 - Clock Control */
+ { 0x00000F01, 0x0000 }, /* R3841 - ANC_SRC */
+ { 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */
+ { 0x00001101, 0x0000 }, /* R4353 - DSP1 Clocking 1 */
+ { 0x00001200, 0x0010 }, /* R4608 - DSP2 Control 1 */
+ { 0x00001201, 0x0000 }, /* R4609 - DSP2 Clocking 1 */
+ { 0x00001300, 0x0010 }, /* R4864 - DSP3 Control 1 */
+ { 0x00001301, 0x0000 }, /* R4865 - DSP3 Clocking 1 */
+ { 0x00001400, 0x0010 }, /* R5120 - DSP4 Control 1 */
+ { 0x00001401, 0x0000 }, /* R5121 - DSP4 Clocking 1 */
+ { 0x00001404, 0x0000 }, /* R5124 - DSP4 Status 1 */
+};
+
+static bool wm5110_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ARIZONA_SOFTWARE_RESET:
+ case ARIZONA_DEVICE_REVISION:
+ case ARIZONA_CTRL_IF_SPI_CFG_1:
+ case ARIZONA_CTRL_IF_I2C1_CFG_1:
+ case ARIZONA_CTRL_IF_I2C2_CFG_1:
+ case ARIZONA_CTRL_IF_I2C1_CFG_2:
+ case ARIZONA_CTRL_IF_I2C2_CFG_2:
+ case ARIZONA_WRITE_SEQUENCER_CTRL_0:
+ case ARIZONA_WRITE_SEQUENCER_CTRL_1:
+ case ARIZONA_WRITE_SEQUENCER_CTRL_2:
+ case ARIZONA_TONE_GENERATOR_1:
+ case ARIZONA_TONE_GENERATOR_2:
+ case ARIZONA_TONE_GENERATOR_3:
+ case ARIZONA_TONE_GENERATOR_4:
+ case ARIZONA_TONE_GENERATOR_5:
+ case ARIZONA_PWM_DRIVE_1:
+ case ARIZONA_PWM_DRIVE_2:
+ case ARIZONA_PWM_DRIVE_3:
+ case ARIZONA_WAKE_CONTROL:
+ case ARIZONA_SEQUENCE_CONTROL:
+ case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1:
+ case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2:
+ case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3:
+ case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3:
+ case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4:
+ case ARIZONA_COMFORT_NOISE_GENERATOR:
+ case ARIZONA_HAPTICS_CONTROL_1:
+ case ARIZONA_HAPTICS_CONTROL_2:
+ case ARIZONA_HAPTICS_PHASE_1_INTENSITY:
+ case ARIZONA_HAPTICS_PHASE_1_DURATION:
+ case ARIZONA_HAPTICS_PHASE_2_INTENSITY:
+ case ARIZONA_HAPTICS_PHASE_2_DURATION:
+ case ARIZONA_HAPTICS_PHASE_3_INTENSITY:
+ case ARIZONA_HAPTICS_PHASE_3_DURATION:
+ case ARIZONA_HAPTICS_STATUS:
+ case ARIZONA_CLOCK_32K_1:
+ case ARIZONA_SYSTEM_CLOCK_1:
+ case ARIZONA_SAMPLE_RATE_1:
+ case ARIZONA_SAMPLE_RATE_2:
+ case ARIZONA_SAMPLE_RATE_3:
+ case ARIZONA_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_SAMPLE_RATE_2_STATUS:
+ case ARIZONA_SAMPLE_RATE_3_STATUS:
+ case ARIZONA_ASYNC_CLOCK_1:
+ case ARIZONA_ASYNC_SAMPLE_RATE_1:
+ case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_OUTPUT_SYSTEM_CLOCK:
+ case ARIZONA_OUTPUT_ASYNC_CLOCK:
+ case ARIZONA_RATE_ESTIMATOR_1:
+ case ARIZONA_RATE_ESTIMATOR_2:
+ case ARIZONA_RATE_ESTIMATOR_3:
+ case ARIZONA_RATE_ESTIMATOR_4:
+ case ARIZONA_RATE_ESTIMATOR_5:
+ case ARIZONA_FLL1_CONTROL_1:
+ case ARIZONA_FLL1_CONTROL_2:
+ case ARIZONA_FLL1_CONTROL_3:
+ case ARIZONA_FLL1_CONTROL_4:
+ case ARIZONA_FLL1_CONTROL_5:
+ case ARIZONA_FLL1_CONTROL_6:
+ case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
+ case ARIZONA_FLL1_NCO_TEST_0:
+ case ARIZONA_FLL1_SYNCHRONISER_1:
+ case ARIZONA_FLL1_SYNCHRONISER_2:
+ case ARIZONA_FLL1_SYNCHRONISER_3:
+ case ARIZONA_FLL1_SYNCHRONISER_4:
+ case ARIZONA_FLL1_SYNCHRONISER_5:
+ case ARIZONA_FLL1_SYNCHRONISER_6:
+ case ARIZONA_FLL1_SPREAD_SPECTRUM:
+ case ARIZONA_FLL1_GPIO_CLOCK:
+ case ARIZONA_FLL2_CONTROL_1:
+ case ARIZONA_FLL2_CONTROL_2:
+ case ARIZONA_FLL2_CONTROL_3:
+ case ARIZONA_FLL2_CONTROL_4:
+ case ARIZONA_FLL2_CONTROL_5:
+ case ARIZONA_FLL2_CONTROL_6:
+ case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
+ case ARIZONA_FLL2_NCO_TEST_0:
+ case ARIZONA_FLL2_SYNCHRONISER_1:
+ case ARIZONA_FLL2_SYNCHRONISER_2:
+ case ARIZONA_FLL2_SYNCHRONISER_3:
+ case ARIZONA_FLL2_SYNCHRONISER_4:
+ case ARIZONA_FLL2_SYNCHRONISER_5:
+ case ARIZONA_FLL2_SYNCHRONISER_6:
+ case ARIZONA_FLL2_SPREAD_SPECTRUM:
+ case ARIZONA_FLL2_GPIO_CLOCK:
+ case ARIZONA_MIC_CHARGE_PUMP_1:
+ case ARIZONA_LDO1_CONTROL_1:
+ case ARIZONA_LDO2_CONTROL_1:
+ case ARIZONA_MIC_BIAS_CTRL_1:
+ case ARIZONA_MIC_BIAS_CTRL_2:
+ case ARIZONA_MIC_BIAS_CTRL_3:
+ case ARIZONA_ACCESSORY_DETECT_MODE_1:
+ case ARIZONA_HEADPHONE_DETECT_1:
+ case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_MIC_DETECT_1:
+ case ARIZONA_MIC_DETECT_2:
+ case ARIZONA_MIC_DETECT_3:
+ case ARIZONA_MIC_NOISE_MIX_CONTROL_1:
+ case ARIZONA_JACK_DETECT_ANALOGUE:
+ case ARIZONA_INPUT_ENABLES:
+ case ARIZONA_INPUT_ENABLES_STATUS:
+ case ARIZONA_INPUT_RATE:
+ case ARIZONA_INPUT_VOLUME_RAMP:
+ case ARIZONA_IN1L_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_1L:
+ case ARIZONA_DMIC1L_CONTROL:
+ case ARIZONA_IN1R_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_1R:
+ case ARIZONA_DMIC1R_CONTROL:
+ case ARIZONA_IN2L_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_2L:
+ case ARIZONA_DMIC2L_CONTROL:
+ case ARIZONA_IN2R_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_2R:
+ case ARIZONA_DMIC2R_CONTROL:
+ case ARIZONA_IN3L_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_3L:
+ case ARIZONA_DMIC3L_CONTROL:
+ case ARIZONA_IN3R_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_3R:
+ case ARIZONA_DMIC3R_CONTROL:
+ case ARIZONA_IN4L_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_4L:
+ case ARIZONA_DMIC4L_CONTROL:
+ case ARIZONA_ADC_DIGITAL_VOLUME_4R:
+ case ARIZONA_DMIC4R_CONTROL:
+ case ARIZONA_OUTPUT_ENABLES_1:
+ case ARIZONA_OUTPUT_STATUS_1:
+ case ARIZONA_RAW_OUTPUT_STATUS_1:
+ case ARIZONA_OUTPUT_RATE_1:
+ case ARIZONA_OUTPUT_VOLUME_RAMP:
+ case ARIZONA_OUTPUT_PATH_CONFIG_1L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_1L:
+ case ARIZONA_DAC_VOLUME_LIMIT_1L:
+ case ARIZONA_NOISE_GATE_SELECT_1L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_1R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_1R:
+ case ARIZONA_DAC_VOLUME_LIMIT_1R:
+ case ARIZONA_NOISE_GATE_SELECT_1R:
+ case ARIZONA_OUTPUT_PATH_CONFIG_2L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_2L:
+ case ARIZONA_DAC_VOLUME_LIMIT_2L:
+ case ARIZONA_NOISE_GATE_SELECT_2L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_2R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_2R:
+ case ARIZONA_DAC_VOLUME_LIMIT_2R:
+ case ARIZONA_NOISE_GATE_SELECT_2R:
+ case ARIZONA_OUTPUT_PATH_CONFIG_3L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_3L:
+ case ARIZONA_DAC_VOLUME_LIMIT_3L:
+ case ARIZONA_NOISE_GATE_SELECT_3L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_3R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_3R:
+ case ARIZONA_DAC_VOLUME_LIMIT_3R:
+ case ARIZONA_NOISE_GATE_SELECT_3R:
+ case ARIZONA_OUTPUT_PATH_CONFIG_4L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_4L:
+ case ARIZONA_OUT_VOLUME_4L:
+ case ARIZONA_NOISE_GATE_SELECT_4L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_4R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_4R:
+ case ARIZONA_OUT_VOLUME_4R:
+ case ARIZONA_NOISE_GATE_SELECT_4R:
+ case ARIZONA_OUTPUT_PATH_CONFIG_5L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_5L:
+ case ARIZONA_DAC_VOLUME_LIMIT_5L:
+ case ARIZONA_NOISE_GATE_SELECT_5L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_5R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_5R:
+ case ARIZONA_DAC_VOLUME_LIMIT_5R:
+ case ARIZONA_NOISE_GATE_SELECT_5R:
+ case ARIZONA_OUTPUT_PATH_CONFIG_6L:
+ case ARIZONA_DAC_DIGITAL_VOLUME_6L:
+ case ARIZONA_DAC_VOLUME_LIMIT_6L:
+ case ARIZONA_NOISE_GATE_SELECT_6L:
+ case ARIZONA_OUTPUT_PATH_CONFIG_6R:
+ case ARIZONA_DAC_DIGITAL_VOLUME_6R:
+ case ARIZONA_DAC_VOLUME_LIMIT_6R:
+ case ARIZONA_NOISE_GATE_SELECT_6R:
+ case ARIZONA_DAC_AEC_CONTROL_1:
+ case ARIZONA_NOISE_GATE_CONTROL:
+ case ARIZONA_PDM_SPK1_CTRL_1:
+ case ARIZONA_PDM_SPK1_CTRL_2:
+ case ARIZONA_PDM_SPK2_CTRL_1:
+ case ARIZONA_PDM_SPK2_CTRL_2:
+ case ARIZONA_AIF1_BCLK_CTRL:
+ case ARIZONA_AIF1_TX_PIN_CTRL:
+ case ARIZONA_AIF1_RX_PIN_CTRL:
+ case ARIZONA_AIF1_RATE_CTRL:
+ case ARIZONA_AIF1_FORMAT:
+ case ARIZONA_AIF1_TX_BCLK_RATE:
+ case ARIZONA_AIF1_RX_BCLK_RATE:
+ case ARIZONA_AIF1_FRAME_CTRL_1:
+ case ARIZONA_AIF1_FRAME_CTRL_2:
+ case ARIZONA_AIF1_FRAME_CTRL_3:
+ case ARIZONA_AIF1_FRAME_CTRL_4:
+ case ARIZONA_AIF1_FRAME_CTRL_5:
+ case ARIZONA_AIF1_FRAME_CTRL_6:
+ case ARIZONA_AIF1_FRAME_CTRL_7:
+ case ARIZONA_AIF1_FRAME_CTRL_8:
+ case ARIZONA_AIF1_FRAME_CTRL_9:
+ case ARIZONA_AIF1_FRAME_CTRL_10:
+ case ARIZONA_AIF1_FRAME_CTRL_11:
+ case ARIZONA_AIF1_FRAME_CTRL_12:
+ case ARIZONA_AIF1_FRAME_CTRL_13:
+ case ARIZONA_AIF1_FRAME_CTRL_14:
+ case ARIZONA_AIF1_FRAME_CTRL_15:
+ case ARIZONA_AIF1_FRAME_CTRL_16:
+ case ARIZONA_AIF1_FRAME_CTRL_17:
+ case ARIZONA_AIF1_FRAME_CTRL_18:
+ case ARIZONA_AIF1_TX_ENABLES:
+ case ARIZONA_AIF1_RX_ENABLES:
+ case ARIZONA_AIF2_BCLK_CTRL:
+ case ARIZONA_AIF2_TX_PIN_CTRL:
+ case ARIZONA_AIF2_RX_PIN_CTRL:
+ case ARIZONA_AIF2_RATE_CTRL:
+ case ARIZONA_AIF2_FORMAT:
+ case ARIZONA_AIF2_TX_BCLK_RATE:
+ case ARIZONA_AIF2_RX_BCLK_RATE:
+ case ARIZONA_AIF2_FRAME_CTRL_1:
+ case ARIZONA_AIF2_FRAME_CTRL_2:
+ case ARIZONA_AIF2_FRAME_CTRL_3:
+ case ARIZONA_AIF2_FRAME_CTRL_4:
+ case ARIZONA_AIF2_FRAME_CTRL_11:
+ case ARIZONA_AIF2_FRAME_CTRL_12:
+ case ARIZONA_AIF2_TX_ENABLES:
+ case ARIZONA_AIF2_RX_ENABLES:
+ case ARIZONA_AIF3_BCLK_CTRL:
+ case ARIZONA_AIF3_TX_PIN_CTRL:
+ case ARIZONA_AIF3_RX_PIN_CTRL:
+ case ARIZONA_AIF3_RATE_CTRL:
+ case ARIZONA_AIF3_FORMAT:
+ case ARIZONA_AIF3_TX_BCLK_RATE:
+ case ARIZONA_AIF3_RX_BCLK_RATE:
+ case ARIZONA_AIF3_FRAME_CTRL_1:
+ case ARIZONA_AIF3_FRAME_CTRL_2:
+ case ARIZONA_AIF3_FRAME_CTRL_3:
+ case ARIZONA_AIF3_FRAME_CTRL_4:
+ case ARIZONA_AIF3_FRAME_CTRL_11:
+ case ARIZONA_AIF3_FRAME_CTRL_12:
+ case ARIZONA_AIF3_TX_ENABLES:
+ case ARIZONA_AIF3_RX_ENABLES:
+ case ARIZONA_SLIMBUS_FRAMER_REF_GEAR:
+ case ARIZONA_SLIMBUS_RATES_1:
+ case ARIZONA_SLIMBUS_RATES_2:
+ case ARIZONA_SLIMBUS_RATES_3:
+ case ARIZONA_SLIMBUS_RATES_4:
+ case ARIZONA_SLIMBUS_RATES_5:
+ case ARIZONA_SLIMBUS_RATES_6:
+ case ARIZONA_SLIMBUS_RATES_7:
+ case ARIZONA_SLIMBUS_RATES_8:
+ case ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE:
+ case ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE:
+ case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+ case ARIZONA_SLIMBUS_TX_PORT_STATUS:
+ case ARIZONA_PWM1MIX_INPUT_1_SOURCE:
+ case ARIZONA_PWM1MIX_INPUT_1_VOLUME:
+ case ARIZONA_PWM1MIX_INPUT_2_SOURCE:
+ case ARIZONA_PWM1MIX_INPUT_2_VOLUME:
+ case ARIZONA_PWM1MIX_INPUT_3_SOURCE:
+ case ARIZONA_PWM1MIX_INPUT_3_VOLUME:
+ case ARIZONA_PWM1MIX_INPUT_4_SOURCE:
+ case ARIZONA_PWM1MIX_INPUT_4_VOLUME:
+ case ARIZONA_PWM2MIX_INPUT_1_SOURCE:
+ case ARIZONA_PWM2MIX_INPUT_1_VOLUME:
+ case ARIZONA_PWM2MIX_INPUT_2_SOURCE:
+ case ARIZONA_PWM2MIX_INPUT_2_VOLUME:
+ case ARIZONA_PWM2MIX_INPUT_3_SOURCE:
+ case ARIZONA_PWM2MIX_INPUT_3_VOLUME:
+ case ARIZONA_PWM2MIX_INPUT_4_SOURCE:
+ case ARIZONA_PWM2MIX_INPUT_4_VOLUME:
+ case ARIZONA_MICMIX_INPUT_1_SOURCE:
+ case ARIZONA_MICMIX_INPUT_1_VOLUME:
+ case ARIZONA_MICMIX_INPUT_2_SOURCE:
+ case ARIZONA_MICMIX_INPUT_2_VOLUME:
+ case ARIZONA_MICMIX_INPUT_3_SOURCE:
+ case ARIZONA_MICMIX_INPUT_3_VOLUME:
+ case ARIZONA_MICMIX_INPUT_4_SOURCE:
+ case ARIZONA_MICMIX_INPUT_4_VOLUME:
+ case ARIZONA_NOISEMIX_INPUT_1_SOURCE:
+ case ARIZONA_NOISEMIX_INPUT_1_VOLUME:
+ case ARIZONA_NOISEMIX_INPUT_2_SOURCE:
+ case ARIZONA_NOISEMIX_INPUT_2_VOLUME:
+ case ARIZONA_NOISEMIX_INPUT_3_SOURCE:
+ case ARIZONA_NOISEMIX_INPUT_3_VOLUME:
+ case ARIZONA_NOISEMIX_INPUT_4_SOURCE:
+ case ARIZONA_NOISEMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT1LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT1LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT1LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT1LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT1LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT1LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT1LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT1LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT1RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT1RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT1RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT1RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT1RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT1RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT1RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT1RMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT2LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT2LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT2LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT2LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT2LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT2LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT2LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT2LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT2RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT2RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT2RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT2RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT2RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT2RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT2RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT2RMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT3LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT3LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT3LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT3LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT3LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT3LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT3LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT3LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT3RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT3RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT3RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT3RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT3RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT3RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT3RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT3RMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT4LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT4LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT4LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT4LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT4LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT4LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT4LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT4LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT4RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT4RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT4RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT4RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT4RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT4RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT4RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT4RMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT5LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT5LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT5LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT5LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT5LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT5LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT5LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT5LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT5RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT5RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT5RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT5RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT5RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT5RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT5RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT5RMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT6LMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT6LMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT6LMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT6LMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT6LMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT6LMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT6LMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT6LMIX_INPUT_4_VOLUME:
+ case ARIZONA_OUT6RMIX_INPUT_1_SOURCE:
+ case ARIZONA_OUT6RMIX_INPUT_1_VOLUME:
+ case ARIZONA_OUT6RMIX_INPUT_2_SOURCE:
+ case ARIZONA_OUT6RMIX_INPUT_2_VOLUME:
+ case ARIZONA_OUT6RMIX_INPUT_3_SOURCE:
+ case ARIZONA_OUT6RMIX_INPUT_3_VOLUME:
+ case ARIZONA_OUT6RMIX_INPUT_4_SOURCE:
+ case ARIZONA_OUT6RMIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX1MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX1MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX1MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX1MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX1MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX1MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX2MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX2MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX2MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX2MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX2MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX2MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX3MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX3MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX3MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX3MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX3MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX3MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX3MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX4MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX4MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX4MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX4MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX4MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX4MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX4MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX5MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX5MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX5MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX5MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX5MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX5MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX5MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX6MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX6MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX6MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX6MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX6MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX6MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX6MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX7MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX7MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX7MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX7MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX7MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX7MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX7MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF1TX8MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF1TX8MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF1TX8MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF1TX8MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF1TX8MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF1TX8MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF1TX8MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX1MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX1MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX1MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX1MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX1MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX1MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF2TX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF2TX2MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF2TX2MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF2TX2MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF3TX1MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF3TX1MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF3TX1MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF3TX1MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF3TX1MIX_INPUT_4_VOLUME:
+ case ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_AIF3TX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_AIF3TX2MIX_INPUT_2_SOURCE:
+ case ARIZONA_AIF3TX2MIX_INPUT_2_VOLUME:
+ case ARIZONA_AIF3TX2MIX_INPUT_3_SOURCE:
+ case ARIZONA_AIF3TX2MIX_INPUT_3_VOLUME:
+ case ARIZONA_AIF3TX2MIX_INPUT_4_SOURCE:
+ case ARIZONA_AIF3TX2MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX1MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX1MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX1MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX1MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX1MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX1MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX1MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX2MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX2MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX2MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX2MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX2MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX2MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX2MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX3MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX3MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX3MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX3MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX3MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX3MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX3MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX4MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX4MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX4MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX4MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX4MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX4MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX4MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX5MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX5MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX5MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX5MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX5MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX5MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX5MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX6MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX6MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX6MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX6MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX6MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX6MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX6MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX7MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX7MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX7MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX7MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX7MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX7MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX7MIX_INPUT_4_VOLUME:
+ case ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE:
+ case ARIZONA_SLIMTX8MIX_INPUT_1_VOLUME:
+ case ARIZONA_SLIMTX8MIX_INPUT_2_SOURCE:
+ case ARIZONA_SLIMTX8MIX_INPUT_2_VOLUME:
+ case ARIZONA_SLIMTX8MIX_INPUT_3_SOURCE:
+ case ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME:
+ case ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE:
+ case ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME:
+ case ARIZONA_EQ1MIX_INPUT_1_SOURCE:
+ case ARIZONA_EQ1MIX_INPUT_1_VOLUME:
+ case ARIZONA_EQ1MIX_INPUT_2_SOURCE:
+ case ARIZONA_EQ1MIX_INPUT_2_VOLUME:
+ case ARIZONA_EQ1MIX_INPUT_3_SOURCE:
+ case ARIZONA_EQ1MIX_INPUT_3_VOLUME:
+ case ARIZONA_EQ1MIX_INPUT_4_SOURCE:
+ case ARIZONA_EQ1MIX_INPUT_4_VOLUME:
+ case ARIZONA_EQ2MIX_INPUT_1_SOURCE:
+ case ARIZONA_EQ2MIX_INPUT_1_VOLUME:
+ case ARIZONA_EQ2MIX_INPUT_2_SOURCE:
+ case ARIZONA_EQ2MIX_INPUT_2_VOLUME:
+ case ARIZONA_EQ2MIX_INPUT_3_SOURCE:
+ case ARIZONA_EQ2MIX_INPUT_3_VOLUME:
+ case ARIZONA_EQ2MIX_INPUT_4_SOURCE:
+ case ARIZONA_EQ2MIX_INPUT_4_VOLUME:
+ case ARIZONA_EQ3MIX_INPUT_1_SOURCE:
+ case ARIZONA_EQ3MIX_INPUT_1_VOLUME:
+ case ARIZONA_EQ3MIX_INPUT_2_SOURCE:
+ case ARIZONA_EQ3MIX_INPUT_2_VOLUME:
+ case ARIZONA_EQ3MIX_INPUT_3_SOURCE:
+ case ARIZONA_EQ3MIX_INPUT_3_VOLUME:
+ case ARIZONA_EQ3MIX_INPUT_4_SOURCE:
+ case ARIZONA_EQ3MIX_INPUT_4_VOLUME:
+ case ARIZONA_EQ4MIX_INPUT_1_SOURCE:
+ case ARIZONA_EQ4MIX_INPUT_1_VOLUME:
+ case ARIZONA_EQ4MIX_INPUT_2_SOURCE:
+ case ARIZONA_EQ4MIX_INPUT_2_VOLUME:
+ case ARIZONA_EQ4MIX_INPUT_3_SOURCE:
+ case ARIZONA_EQ4MIX_INPUT_3_VOLUME:
+ case ARIZONA_EQ4MIX_INPUT_4_SOURCE:
+ case ARIZONA_EQ4MIX_INPUT_4_VOLUME:
+ case ARIZONA_DRC1LMIX_INPUT_1_SOURCE:
+ case ARIZONA_DRC1LMIX_INPUT_1_VOLUME:
+ case ARIZONA_DRC1LMIX_INPUT_2_SOURCE:
+ case ARIZONA_DRC1LMIX_INPUT_2_VOLUME:
+ case ARIZONA_DRC1LMIX_INPUT_3_SOURCE:
+ case ARIZONA_DRC1LMIX_INPUT_3_VOLUME:
+ case ARIZONA_DRC1LMIX_INPUT_4_SOURCE:
+ case ARIZONA_DRC1LMIX_INPUT_4_VOLUME:
+ case ARIZONA_DRC1RMIX_INPUT_1_SOURCE:
+ case ARIZONA_DRC1RMIX_INPUT_1_VOLUME:
+ case ARIZONA_DRC1RMIX_INPUT_2_SOURCE:
+ case ARIZONA_DRC1RMIX_INPUT_2_VOLUME:
+ case ARIZONA_DRC1RMIX_INPUT_3_SOURCE:
+ case ARIZONA_DRC1RMIX_INPUT_3_VOLUME:
+ case ARIZONA_DRC1RMIX_INPUT_4_SOURCE:
+ case ARIZONA_DRC1RMIX_INPUT_4_VOLUME:
+ case ARIZONA_DRC2LMIX_INPUT_1_SOURCE:
+ case ARIZONA_DRC2LMIX_INPUT_1_VOLUME:
+ case ARIZONA_DRC2LMIX_INPUT_2_SOURCE:
+ case ARIZONA_DRC2LMIX_INPUT_2_VOLUME:
+ case ARIZONA_DRC2LMIX_INPUT_3_SOURCE:
+ case ARIZONA_DRC2LMIX_INPUT_3_VOLUME:
+ case ARIZONA_DRC2LMIX_INPUT_4_SOURCE:
+ case ARIZONA_DRC2LMIX_INPUT_4_VOLUME:
+ case ARIZONA_DRC2RMIX_INPUT_1_SOURCE:
+ case ARIZONA_DRC2RMIX_INPUT_1_VOLUME:
+ case ARIZONA_DRC2RMIX_INPUT_2_SOURCE:
+ case ARIZONA_DRC2RMIX_INPUT_2_VOLUME:
+ case ARIZONA_DRC2RMIX_INPUT_3_SOURCE:
+ case ARIZONA_DRC2RMIX_INPUT_3_VOLUME:
+ case ARIZONA_DRC2RMIX_INPUT_4_SOURCE:
+ case ARIZONA_DRC2RMIX_INPUT_4_VOLUME:
+ case ARIZONA_HPLP1MIX_INPUT_1_SOURCE:
+ case ARIZONA_HPLP1MIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP1MIX_INPUT_2_SOURCE:
+ case ARIZONA_HPLP1MIX_INPUT_2_VOLUME:
+ case ARIZONA_HPLP1MIX_INPUT_3_SOURCE:
+ case ARIZONA_HPLP1MIX_INPUT_3_VOLUME:
+ case ARIZONA_HPLP1MIX_INPUT_4_SOURCE:
+ case ARIZONA_HPLP1MIX_INPUT_4_VOLUME:
+ case ARIZONA_HPLP2MIX_INPUT_1_SOURCE:
+ case ARIZONA_HPLP2MIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP2MIX_INPUT_2_SOURCE:
+ case ARIZONA_HPLP2MIX_INPUT_2_VOLUME:
+ case ARIZONA_HPLP2MIX_INPUT_3_SOURCE:
+ case ARIZONA_HPLP2MIX_INPUT_3_VOLUME:
+ case ARIZONA_HPLP2MIX_INPUT_4_SOURCE:
+ case ARIZONA_HPLP2MIX_INPUT_4_VOLUME:
+ case ARIZONA_HPLP3MIX_INPUT_1_SOURCE:
+ case ARIZONA_HPLP3MIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP3MIX_INPUT_2_SOURCE:
+ case ARIZONA_HPLP3MIX_INPUT_2_VOLUME:
+ case ARIZONA_HPLP3MIX_INPUT_3_SOURCE:
+ case ARIZONA_HPLP3MIX_INPUT_3_VOLUME:
+ case ARIZONA_HPLP3MIX_INPUT_4_SOURCE:
+ case ARIZONA_HPLP3MIX_INPUT_4_VOLUME:
+ case ARIZONA_HPLP4MIX_INPUT_1_SOURCE:
+ case ARIZONA_HPLP4MIX_INPUT_1_VOLUME:
+ case ARIZONA_HPLP4MIX_INPUT_2_SOURCE:
+ case ARIZONA_HPLP4MIX_INPUT_2_VOLUME:
+ case ARIZONA_HPLP4MIX_INPUT_3_SOURCE:
+ case ARIZONA_HPLP4MIX_INPUT_3_VOLUME:
+ case ARIZONA_HPLP4MIX_INPUT_4_SOURCE:
+ case ARIZONA_HPLP4MIX_INPUT_4_VOLUME:
+ case ARIZONA_DSP1LMIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1LMIX_INPUT_1_VOLUME:
+ case ARIZONA_DSP1LMIX_INPUT_2_SOURCE:
+ case ARIZONA_DSP1LMIX_INPUT_2_VOLUME:
+ case ARIZONA_DSP1LMIX_INPUT_3_SOURCE:
+ case ARIZONA_DSP1LMIX_INPUT_3_VOLUME:
+ case ARIZONA_DSP1LMIX_INPUT_4_SOURCE:
+ case ARIZONA_DSP1LMIX_INPUT_4_VOLUME:
+ case ARIZONA_DSP1RMIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1RMIX_INPUT_1_VOLUME:
+ case ARIZONA_DSP1RMIX_INPUT_2_SOURCE:
+ case ARIZONA_DSP1RMIX_INPUT_2_VOLUME:
+ case ARIZONA_DSP1RMIX_INPUT_3_SOURCE:
+ case ARIZONA_DSP1RMIX_INPUT_3_VOLUME:
+ case ARIZONA_DSP1RMIX_INPUT_4_SOURCE:
+ case ARIZONA_DSP1RMIX_INPUT_4_VOLUME:
+ case ARIZONA_DSP1AUX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1AUX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1AUX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1AUX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1AUX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP1AUX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP2LMIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP2LMIX_INPUT_1_VOLUME:
+ case ARIZONA_DSP2LMIX_INPUT_2_SOURCE:
+ case ARIZONA_DSP2LMIX_INPUT_2_VOLUME:
+ case ARIZONA_DSP2LMIX_INPUT_3_SOURCE:
+ case ARIZONA_DSP2LMIX_INPUT_3_VOLUME:
+ case ARIZONA_DSP2LMIX_INPUT_4_SOURCE:
+ case ARIZONA_DSP2LMIX_INPUT_4_VOLUME:
+ case ARIZONA_DSP2RMIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP2RMIX_INPUT_1_VOLUME:
+ case ARIZONA_DSP2RMIX_INPUT_2_SOURCE:
+ case ARIZONA_DSP2RMIX_INPUT_2_VOLUME:
+ case ARIZONA_DSP2RMIX_INPUT_3_SOURCE:
+ case ARIZONA_DSP2RMIX_INPUT_3_VOLUME:
+ case ARIZONA_DSP2RMIX_INPUT_4_SOURCE:
+ case ARIZONA_DSP2RMIX_INPUT_4_VOLUME:
+ case ARIZONA_DSP2AUX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP2AUX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP2AUX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP2AUX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP2AUX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP2AUX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP3LMIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP3LMIX_INPUT_1_VOLUME:
+ case ARIZONA_DSP3LMIX_INPUT_2_SOURCE:
+ case ARIZONA_DSP3LMIX_INPUT_2_VOLUME:
+ case ARIZONA_DSP3LMIX_INPUT_3_SOURCE:
+ case ARIZONA_DSP3LMIX_INPUT_3_VOLUME:
+ case ARIZONA_DSP3LMIX_INPUT_4_SOURCE:
+ case ARIZONA_DSP3LMIX_INPUT_4_VOLUME:
+ case ARIZONA_DSP3RMIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP3RMIX_INPUT_1_VOLUME:
+ case ARIZONA_DSP3RMIX_INPUT_2_SOURCE:
+ case ARIZONA_DSP3RMIX_INPUT_2_VOLUME:
+ case ARIZONA_DSP3RMIX_INPUT_3_SOURCE:
+ case ARIZONA_DSP3RMIX_INPUT_3_VOLUME:
+ case ARIZONA_DSP3RMIX_INPUT_4_SOURCE:
+ case ARIZONA_DSP3RMIX_INPUT_4_VOLUME:
+ case ARIZONA_DSP3AUX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP3AUX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP3AUX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP3AUX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP3AUX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP3AUX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP4LMIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP4LMIX_INPUT_1_VOLUME:
+ case ARIZONA_DSP4LMIX_INPUT_2_SOURCE:
+ case ARIZONA_DSP4LMIX_INPUT_2_VOLUME:
+ case ARIZONA_DSP4LMIX_INPUT_3_SOURCE:
+ case ARIZONA_DSP4LMIX_INPUT_3_VOLUME:
+ case ARIZONA_DSP4LMIX_INPUT_4_SOURCE:
+ case ARIZONA_DSP4LMIX_INPUT_4_VOLUME:
+ case ARIZONA_DSP4RMIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP4RMIX_INPUT_1_VOLUME:
+ case ARIZONA_DSP4RMIX_INPUT_2_SOURCE:
+ case ARIZONA_DSP4RMIX_INPUT_2_VOLUME:
+ case ARIZONA_DSP4RMIX_INPUT_3_SOURCE:
+ case ARIZONA_DSP4RMIX_INPUT_3_VOLUME:
+ case ARIZONA_DSP4RMIX_INPUT_4_SOURCE:
+ case ARIZONA_DSP4RMIX_INPUT_4_VOLUME:
+ case ARIZONA_DSP4AUX1MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP4AUX2MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP4AUX3MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP4AUX4MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP4AUX5MIX_INPUT_1_SOURCE:
+ case ARIZONA_DSP4AUX6MIX_INPUT_1_SOURCE:
+ case ARIZONA_ASRC1LMIX_INPUT_1_SOURCE:
+ case ARIZONA_ASRC1RMIX_INPUT_1_SOURCE:
+ case ARIZONA_ASRC2LMIX_INPUT_1_SOURCE:
+ case ARIZONA_ASRC2RMIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1DEC3MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1DEC4MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2DEC3MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2DEC4MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2INT3MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC2INT4MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC3DEC1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC3DEC2MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC3DEC3MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC3DEC4MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC3INT1MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC3INT2MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC3INT3MIX_INPUT_1_SOURCE:
+ case ARIZONA_ISRC3INT4MIX_INPUT_1_SOURCE:
+ case ARIZONA_GPIO1_CTRL:
+ case ARIZONA_GPIO2_CTRL:
+ case ARIZONA_GPIO3_CTRL:
+ case ARIZONA_GPIO4_CTRL:
+ case ARIZONA_GPIO5_CTRL:
+ case ARIZONA_IRQ_CTRL_1:
+ case ARIZONA_GPIO_DEBOUNCE_CONFIG:
+ case ARIZONA_MISC_PAD_CTRL_1:
+ case ARIZONA_MISC_PAD_CTRL_2:
+ case ARIZONA_MISC_PAD_CTRL_3:
+ case ARIZONA_MISC_PAD_CTRL_4:
+ case ARIZONA_MISC_PAD_CTRL_5:
+ case ARIZONA_MISC_PAD_CTRL_6:
+ case ARIZONA_MISC_PAD_CTRL_7:
+ case ARIZONA_MISC_PAD_CTRL_8:
+ case ARIZONA_MISC_PAD_CTRL_9:
+ case ARIZONA_MISC_PAD_CTRL_10:
+ case ARIZONA_MISC_PAD_CTRL_11:
+ case ARIZONA_MISC_PAD_CTRL_12:
+ case ARIZONA_MISC_PAD_CTRL_13:
+ case ARIZONA_MISC_PAD_CTRL_14:
+ case ARIZONA_MISC_PAD_CTRL_15:
+ case ARIZONA_MISC_PAD_CTRL_16:
+ case ARIZONA_MISC_PAD_CTRL_17:
+ case ARIZONA_MISC_PAD_CTRL_18:
+ case ARIZONA_INTERRUPT_STATUS_1:
+ case ARIZONA_INTERRUPT_STATUS_2:
+ case ARIZONA_INTERRUPT_STATUS_3:
+ case ARIZONA_INTERRUPT_STATUS_4:
+ case ARIZONA_INTERRUPT_STATUS_5:
+ case ARIZONA_INTERRUPT_STATUS_1_MASK:
+ case ARIZONA_INTERRUPT_STATUS_2_MASK:
+ case ARIZONA_INTERRUPT_STATUS_3_MASK:
+ case ARIZONA_INTERRUPT_STATUS_4_MASK:
+ case ARIZONA_INTERRUPT_STATUS_5_MASK:
+ case ARIZONA_INTERRUPT_CONTROL:
+ case ARIZONA_IRQ2_STATUS_1:
+ case ARIZONA_IRQ2_STATUS_2:
+ case ARIZONA_IRQ2_STATUS_3:
+ case ARIZONA_IRQ2_STATUS_4:
+ case ARIZONA_IRQ2_STATUS_5:
+ case ARIZONA_IRQ2_STATUS_1_MASK:
+ case ARIZONA_IRQ2_STATUS_2_MASK:
+ case ARIZONA_IRQ2_STATUS_3_MASK:
+ case ARIZONA_IRQ2_STATUS_4_MASK:
+ case ARIZONA_IRQ2_STATUS_5_MASK:
+ case ARIZONA_IRQ2_CONTROL:
+ case ARIZONA_INTERRUPT_RAW_STATUS_2:
+ case ARIZONA_INTERRUPT_RAW_STATUS_3:
+ case ARIZONA_INTERRUPT_RAW_STATUS_4:
+ case ARIZONA_INTERRUPT_RAW_STATUS_5:
+ case ARIZONA_INTERRUPT_RAW_STATUS_6:
+ case ARIZONA_INTERRUPT_RAW_STATUS_7:
+ case ARIZONA_INTERRUPT_RAW_STATUS_8:
+ case ARIZONA_IRQ_PIN_STATUS:
+ case ARIZONA_AOD_WKUP_AND_TRIG:
+ case ARIZONA_AOD_IRQ1:
+ case ARIZONA_AOD_IRQ2:
+ case ARIZONA_AOD_IRQ_MASK_IRQ1:
+ case ARIZONA_AOD_IRQ_MASK_IRQ2:
+ case ARIZONA_AOD_IRQ_RAW_STATUS:
+ case ARIZONA_JACK_DETECT_DEBOUNCE:
+ case ARIZONA_FX_CTRL1:
+ case ARIZONA_FX_CTRL2:
+ case ARIZONA_EQ1_1:
+ case ARIZONA_EQ1_2:
+ case ARIZONA_EQ1_3:
+ case ARIZONA_EQ1_4:
+ case ARIZONA_EQ1_5:
+ case ARIZONA_EQ1_6:
+ case ARIZONA_EQ1_7:
+ case ARIZONA_EQ1_8:
+ case ARIZONA_EQ1_9:
+ case ARIZONA_EQ1_10:
+ case ARIZONA_EQ1_11:
+ case ARIZONA_EQ1_12:
+ case ARIZONA_EQ1_13:
+ case ARIZONA_EQ1_14:
+ case ARIZONA_EQ1_15:
+ case ARIZONA_EQ1_16:
+ case ARIZONA_EQ1_17:
+ case ARIZONA_EQ1_18:
+ case ARIZONA_EQ1_19:
+ case ARIZONA_EQ1_20:
+ case ARIZONA_EQ1_21:
+ case ARIZONA_EQ2_1:
+ case ARIZONA_EQ2_2:
+ case ARIZONA_EQ2_3:
+ case ARIZONA_EQ2_4:
+ case ARIZONA_EQ2_5:
+ case ARIZONA_EQ2_6:
+ case ARIZONA_EQ2_7:
+ case ARIZONA_EQ2_8:
+ case ARIZONA_EQ2_9:
+ case ARIZONA_EQ2_10:
+ case ARIZONA_EQ2_11:
+ case ARIZONA_EQ2_12:
+ case ARIZONA_EQ2_13:
+ case ARIZONA_EQ2_14:
+ case ARIZONA_EQ2_15:
+ case ARIZONA_EQ2_16:
+ case ARIZONA_EQ2_17:
+ case ARIZONA_EQ2_18:
+ case ARIZONA_EQ2_19:
+ case ARIZONA_EQ2_20:
+ case ARIZONA_EQ2_21:
+ case ARIZONA_EQ3_1:
+ case ARIZONA_EQ3_2:
+ case ARIZONA_EQ3_3:
+ case ARIZONA_EQ3_4:
+ case ARIZONA_EQ3_5:
+ case ARIZONA_EQ3_6:
+ case ARIZONA_EQ3_7:
+ case ARIZONA_EQ3_8:
+ case ARIZONA_EQ3_9:
+ case ARIZONA_EQ3_10:
+ case ARIZONA_EQ3_11:
+ case ARIZONA_EQ3_12:
+ case ARIZONA_EQ3_13:
+ case ARIZONA_EQ3_14:
+ case ARIZONA_EQ3_15:
+ case ARIZONA_EQ3_16:
+ case ARIZONA_EQ3_17:
+ case ARIZONA_EQ3_18:
+ case ARIZONA_EQ3_19:
+ case ARIZONA_EQ3_20:
+ case ARIZONA_EQ3_21:
+ case ARIZONA_EQ4_1:
+ case ARIZONA_EQ4_2:
+ case ARIZONA_EQ4_3:
+ case ARIZONA_EQ4_4:
+ case ARIZONA_EQ4_5:
+ case ARIZONA_EQ4_6:
+ case ARIZONA_EQ4_7:
+ case ARIZONA_EQ4_8:
+ case ARIZONA_EQ4_9:
+ case ARIZONA_EQ4_10:
+ case ARIZONA_EQ4_11:
+ case ARIZONA_EQ4_12:
+ case ARIZONA_EQ4_13:
+ case ARIZONA_EQ4_14:
+ case ARIZONA_EQ4_15:
+ case ARIZONA_EQ4_16:
+ case ARIZONA_EQ4_17:
+ case ARIZONA_EQ4_18:
+ case ARIZONA_EQ4_19:
+ case ARIZONA_EQ4_20:
+ case ARIZONA_EQ4_21:
+ case ARIZONA_DRC1_CTRL1:
+ case ARIZONA_DRC1_CTRL2:
+ case ARIZONA_DRC1_CTRL3:
+ case ARIZONA_DRC1_CTRL4:
+ case ARIZONA_DRC1_CTRL5:
+ case ARIZONA_DRC2_CTRL1:
+ case ARIZONA_DRC2_CTRL2:
+ case ARIZONA_DRC2_CTRL3:
+ case ARIZONA_DRC2_CTRL4:
+ case ARIZONA_DRC2_CTRL5:
+ case ARIZONA_HPLPF1_1:
+ case ARIZONA_HPLPF1_2:
+ case ARIZONA_HPLPF2_1:
+ case ARIZONA_HPLPF2_2:
+ case ARIZONA_HPLPF3_1:
+ case ARIZONA_HPLPF3_2:
+ case ARIZONA_HPLPF4_1:
+ case ARIZONA_HPLPF4_2:
+ case ARIZONA_ASRC_ENABLE:
+ case ARIZONA_ASRC_STATUS:
+ case ARIZONA_ASRC_RATE1:
+ case ARIZONA_ISRC_1_CTRL_1:
+ case ARIZONA_ISRC_1_CTRL_2:
+ case ARIZONA_ISRC_1_CTRL_3:
+ case ARIZONA_ISRC_2_CTRL_1:
+ case ARIZONA_ISRC_2_CTRL_2:
+ case ARIZONA_ISRC_2_CTRL_3:
+ case ARIZONA_ISRC_3_CTRL_1:
+ case ARIZONA_ISRC_3_CTRL_2:
+ case ARIZONA_ISRC_3_CTRL_3:
+ case ARIZONA_CLOCK_CONTROL:
+ case ARIZONA_ANC_SRC:
+ case ARIZONA_DSP_STATUS:
+ case ARIZONA_DSP1_CONTROL_1:
+ case ARIZONA_DSP1_CLOCKING_1:
+ case ARIZONA_DSP1_STATUS_1:
+ case ARIZONA_DSP1_STATUS_2:
+ case ARIZONA_DSP2_CONTROL_1:
+ case ARIZONA_DSP2_CLOCKING_1:
+ case ARIZONA_DSP2_STATUS_1:
+ case ARIZONA_DSP2_STATUS_2:
+ case ARIZONA_DSP3_CONTROL_1:
+ case ARIZONA_DSP3_CLOCKING_1:
+ case ARIZONA_DSP3_STATUS_1:
+ case ARIZONA_DSP3_STATUS_2:
+ case ARIZONA_DSP4_CONTROL_1:
+ case ARIZONA_DSP4_CLOCKING_1:
+ case ARIZONA_DSP4_STATUS_1:
+ case ARIZONA_DSP4_STATUS_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool wm5110_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ARIZONA_SOFTWARE_RESET:
+ case ARIZONA_DEVICE_REVISION:
+ case ARIZONA_HAPTICS_STATUS:
+ case ARIZONA_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_SAMPLE_RATE_2_STATUS:
+ case ARIZONA_SAMPLE_RATE_3_STATUS:
+ case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+ case ARIZONA_MIC_DETECT_3:
+ case ARIZONA_HEADPHONE_DETECT_2:
+ case ARIZONA_INPUT_ENABLES_STATUS:
+ case ARIZONA_OUTPUT_STATUS_1:
+ case ARIZONA_RAW_OUTPUT_STATUS_1:
+ case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+ case ARIZONA_SLIMBUS_TX_PORT_STATUS:
+ case ARIZONA_INTERRUPT_STATUS_1:
+ case ARIZONA_INTERRUPT_STATUS_2:
+ case ARIZONA_INTERRUPT_STATUS_3:
+ case ARIZONA_INTERRUPT_STATUS_4:
+ case ARIZONA_INTERRUPT_STATUS_5:
+ case ARIZONA_IRQ2_STATUS_1:
+ case ARIZONA_IRQ2_STATUS_2:
+ case ARIZONA_IRQ2_STATUS_3:
+ case ARIZONA_IRQ2_STATUS_4:
+ case ARIZONA_IRQ2_STATUS_5:
+ case ARIZONA_INTERRUPT_RAW_STATUS_2:
+ case ARIZONA_INTERRUPT_RAW_STATUS_3:
+ case ARIZONA_INTERRUPT_RAW_STATUS_4:
+ case ARIZONA_INTERRUPT_RAW_STATUS_5:
+ case ARIZONA_INTERRUPT_RAW_STATUS_6:
+ case ARIZONA_INTERRUPT_RAW_STATUS_7:
+ case ARIZONA_INTERRUPT_RAW_STATUS_8:
+ case ARIZONA_IRQ_PIN_STATUS:
+ case ARIZONA_AOD_IRQ1:
+ case ARIZONA_AOD_IRQ2:
+ case ARIZONA_ASRC_STATUS:
+ case ARIZONA_DSP_STATUS:
+ case ARIZONA_DSP1_CONTROL_1:
+ case ARIZONA_DSP1_CLOCKING_1:
+ case ARIZONA_DSP1_STATUS_1:
+ case ARIZONA_DSP1_STATUS_2:
+ case ARIZONA_DSP2_STATUS_1:
+ case ARIZONA_DSP2_STATUS_2:
+ case ARIZONA_DSP3_STATUS_1:
+ case ARIZONA_DSP3_STATUS_2:
+ case ARIZONA_DSP4_STATUS_1:
+ case ARIZONA_DSP4_STATUS_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config wm5110_spi_regmap = {
+ .reg_bits = 32,
+ .pad_bits = 16,
+ .val_bits = 16,
+
+ .max_register = ARIZONA_DSP1_STATUS_2,
+ .readable_reg = wm5110_readable_register,
+ .volatile_reg = wm5110_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = wm5110_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(wm5110_reg_default),
+};
+EXPORT_SYMBOL_GPL(wm5110_spi_regmap);
+
+const struct regmap_config wm5110_i2c_regmap = {
+ .reg_bits = 32,
+ .val_bits = 16,
+
+ .max_register = ARIZONA_DSP1_STATUS_2,
+ .readable_reg = wm5110_readable_register,
+ .volatile_reg = wm5110_volatile_register,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = wm5110_reg_default,
+ .num_reg_defaults = ARRAY_SIZE(wm5110_reg_default),
+};
+EXPORT_SYMBOL_GPL(wm5110_i2c_regmap);
diff --git a/drivers/mfd/wm831x-otp.c b/drivers/mfd/wm831x-otp.c
index f742745ff354..b90f3e06b6c9 100644
--- a/drivers/mfd/wm831x-otp.c
+++ b/drivers/mfd/wm831x-otp.c
@@ -18,6 +18,7 @@
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/mfd/core.h>
+#include <linux/random.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/otp.h>
@@ -66,6 +67,7 @@ static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL);
int wm831x_otp_init(struct wm831x *wm831x)
{
+ char uuid[WM831X_UNIQUE_ID_LEN];
int ret;
ret = device_create_file(wm831x->dev, &dev_attr_unique_id);
@@ -73,6 +75,12 @@ int wm831x_otp_init(struct wm831x *wm831x)
dev_err(wm831x->dev, "Unique ID attribute not created: %d\n",
ret);
+ ret = wm831x_unique_id_read(wm831x, uuid);
+ if (ret == 0)
+ add_device_randomness(uuid, sizeof(uuid));
+ else
+ dev_err(wm831x->dev, "Failed to read UUID: %d\n", ret);
+
return ret;
}
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index 8a9b11ca076a..7c1ae24605d9 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -32,9 +32,6 @@
#include <linux/mfd/wm8350/supply.h>
#include <linux/mfd/wm8350/wdt.h>
-#define WM8350_UNLOCK_KEY 0x0013
-#define WM8350_LOCK_KEY 0x0000
-
#define WM8350_CLOCK_CONTROL_1 0x28
#define WM8350_AIF_TEST 0x74
@@ -63,181 +60,32 @@
/*
* WM8350 Device IO
*/
-static DEFINE_MUTEX(io_mutex);
static DEFINE_MUTEX(reg_lock_mutex);
-/* Perform a physical read from the device.
- */
-static int wm8350_phys_read(struct wm8350 *wm8350, u8 reg, int num_regs,
- u16 *dest)
-{
- int i, ret;
- int bytes = num_regs * 2;
-
- dev_dbg(wm8350->dev, "volatile read\n");
- ret = regmap_raw_read(wm8350->regmap, reg, dest, bytes);
-
- for (i = reg; i < reg + num_regs; i++) {
- /* Cache is CPU endian */
- dest[i - reg] = be16_to_cpu(dest[i - reg]);
-
- /* Mask out non-readable bits */
- dest[i - reg] &= wm8350_reg_io_map[i].readable;
- }
-
- dump(num_regs, dest);
-
- return ret;
-}
-
-static int wm8350_read(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *dest)
-{
- int i;
- int end = reg + num_regs;
- int ret = 0;
- int bytes = num_regs * 2;
-
- if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
- dev_err(wm8350->dev, "invalid reg %x\n",
- reg + num_regs - 1);
- return -EINVAL;
- }
-
- dev_dbg(wm8350->dev,
- "%s R%d(0x%2.2x) %d regs\n", __func__, reg, reg, num_regs);
-
-#if WM8350_BUS_DEBUG
- /* we can _safely_ read any register, but warn if read not supported */
- for (i = reg; i < end; i++) {
- if (!wm8350_reg_io_map[i].readable)
- dev_warn(wm8350->dev,
- "reg R%d is not readable\n", i);
- }
-#endif
-
- /* if any volatile registers are required, then read back all */
- for (i = reg; i < end; i++)
- if (wm8350_reg_io_map[i].vol)
- return wm8350_phys_read(wm8350, reg, num_regs, dest);
-
- /* no volatiles, then cache is good */
- dev_dbg(wm8350->dev, "cache read\n");
- memcpy(dest, &wm8350->reg_cache[reg], bytes);
- dump(num_regs, dest);
- return ret;
-}
-
-static inline int is_reg_locked(struct wm8350 *wm8350, u8 reg)
-{
- if (reg == WM8350_SECURITY ||
- wm8350->reg_cache[WM8350_SECURITY] == WM8350_UNLOCK_KEY)
- return 0;
-
- if ((reg >= WM8350_GPIO_FUNCTION_SELECT_1 &&
- reg <= WM8350_GPIO_FUNCTION_SELECT_4) ||
- (reg >= WM8350_BATTERY_CHARGER_CONTROL_1 &&
- reg <= WM8350_BATTERY_CHARGER_CONTROL_3))
- return 1;
- return 0;
-}
-
-static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
-{
- int i;
- int end = reg + num_regs;
- int bytes = num_regs * 2;
-
- if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
- dev_err(wm8350->dev, "invalid reg %x\n",
- reg + num_regs - 1);
- return -EINVAL;
- }
-
- /* it's generally not a good idea to write to RO or locked registers */
- for (i = reg; i < end; i++) {
- if (!wm8350_reg_io_map[i].writable) {
- dev_err(wm8350->dev,
- "attempted write to read only reg R%d\n", i);
- return -EINVAL;
- }
-
- if (is_reg_locked(wm8350, i)) {
- dev_err(wm8350->dev,
- "attempted write to locked reg R%d\n", i);
- return -EINVAL;
- }
-
- src[i - reg] &= wm8350_reg_io_map[i].writable;
-
- wm8350->reg_cache[i] =
- (wm8350->reg_cache[i] & ~wm8350_reg_io_map[i].writable)
- | src[i - reg];
-
- src[i - reg] = cpu_to_be16(src[i - reg]);
- }
-
- /* Actually write it out */
- return regmap_raw_write(wm8350->regmap, reg, src, bytes);
-}
-
/*
* Safe read, modify, write methods
*/
int wm8350_clear_bits(struct wm8350 *wm8350, u16 reg, u16 mask)
{
- u16 data;
- int err;
-
- mutex_lock(&io_mutex);
- err = wm8350_read(wm8350, reg, 1, &data);
- if (err) {
- dev_err(wm8350->dev, "read from reg R%d failed\n", reg);
- goto out;
- }
-
- data &= ~mask;
- err = wm8350_write(wm8350, reg, 1, &data);
- if (err)
- dev_err(wm8350->dev, "write to reg R%d failed\n", reg);
-out:
- mutex_unlock(&io_mutex);
- return err;
+ return regmap_update_bits(wm8350->regmap, reg, mask, 0);
}
EXPORT_SYMBOL_GPL(wm8350_clear_bits);
int wm8350_set_bits(struct wm8350 *wm8350, u16 reg, u16 mask)
{
- u16 data;
- int err;
-
- mutex_lock(&io_mutex);
- err = wm8350_read(wm8350, reg, 1, &data);
- if (err) {
- dev_err(wm8350->dev, "read from reg R%d failed\n", reg);
- goto out;
- }
-
- data |= mask;
- err = wm8350_write(wm8350, reg, 1, &data);
- if (err)
- dev_err(wm8350->dev, "write to reg R%d failed\n", reg);
-out:
- mutex_unlock(&io_mutex);
- return err;
+ return regmap_update_bits(wm8350->regmap, reg, mask, mask);
}
EXPORT_SYMBOL_GPL(wm8350_set_bits);
u16 wm8350_reg_read(struct wm8350 *wm8350, int reg)
{
- u16 data;
+ unsigned int data;
int err;
- mutex_lock(&io_mutex);
- err = wm8350_read(wm8350, reg, 1, &data);
+ err = regmap_read(wm8350->regmap, reg, &data);
if (err)
dev_err(wm8350->dev, "read from reg R%d failed\n", reg);
- mutex_unlock(&io_mutex);
return data;
}
EXPORT_SYMBOL_GPL(wm8350_reg_read);
@@ -245,13 +93,11 @@ EXPORT_SYMBOL_GPL(wm8350_reg_read);
int wm8350_reg_write(struct wm8350 *wm8350, int reg, u16 val)
{
int ret;
- u16 data = val;
- mutex_lock(&io_mutex);
- ret = wm8350_write(wm8350, reg, 1, &data);
+ ret = regmap_write(wm8350->regmap, reg, val);
+
if (ret)
dev_err(wm8350->dev, "write to reg R%d failed\n", reg);
- mutex_unlock(&io_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(wm8350_reg_write);
@@ -261,12 +107,11 @@ int wm8350_block_read(struct wm8350 *wm8350, int start_reg, int regs,
{
int err = 0;
- mutex_lock(&io_mutex);
- err = wm8350_read(wm8350, start_reg, regs, dest);
+ err = regmap_bulk_read(wm8350->regmap, start_reg, dest, regs);
if (err)
dev_err(wm8350->dev, "block read starting from R%d failed\n",
start_reg);
- mutex_unlock(&io_mutex);
+
return err;
}
EXPORT_SYMBOL_GPL(wm8350_block_read);
@@ -276,12 +121,11 @@ int wm8350_block_write(struct wm8350 *wm8350, int start_reg, int regs,
{
int ret = 0;
- mutex_lock(&io_mutex);
- ret = wm8350_write(wm8350, start_reg, regs, src);
+ ret = regmap_bulk_write(wm8350->regmap, start_reg, src, regs);
if (ret)
dev_err(wm8350->dev, "block write starting at R%d failed\n",
start_reg);
- mutex_unlock(&io_mutex);
+
return ret;
}
EXPORT_SYMBOL_GPL(wm8350_block_write);
@@ -295,15 +139,20 @@ EXPORT_SYMBOL_GPL(wm8350_block_write);
*/
int wm8350_reg_lock(struct wm8350 *wm8350)
{
- u16 key = WM8350_LOCK_KEY;
int ret;
+ mutex_lock(&reg_lock_mutex);
+
ldbg(__func__);
- mutex_lock(&io_mutex);
- ret = wm8350_write(wm8350, WM8350_SECURITY, 1, &key);
+
+ ret = wm8350_reg_write(wm8350, WM8350_SECURITY, WM8350_LOCK_KEY);
if (ret)
dev_err(wm8350->dev, "lock failed\n");
- mutex_unlock(&io_mutex);
+
+ wm8350->unlocked = false;
+
+ mutex_unlock(&reg_lock_mutex);
+
return ret;
}
EXPORT_SYMBOL_GPL(wm8350_reg_lock);
@@ -319,15 +168,20 @@ EXPORT_SYMBOL_GPL(wm8350_reg_lock);
*/
int wm8350_reg_unlock(struct wm8350 *wm8350)
{
- u16 key = WM8350_UNLOCK_KEY;
int ret;
+ mutex_lock(&reg_lock_mutex);
+
ldbg(__func__);
- mutex_lock(&io_mutex);
- ret = wm8350_write(wm8350, WM8350_SECURITY, 1, &key);
+
+ ret = wm8350_reg_write(wm8350, WM8350_SECURITY, WM8350_UNLOCK_KEY);
if (ret)
dev_err(wm8350->dev, "unlock failed\n");
- mutex_unlock(&io_mutex);
+
+ wm8350->unlocked = true;
+
+ mutex_unlock(&reg_lock_mutex);
+
return ret;
}
EXPORT_SYMBOL_GPL(wm8350_reg_unlock);
@@ -395,146 +249,6 @@ static irqreturn_t wm8350_auxadc_irq(int irq, void *irq_data)
}
/*
- * Cache is always host endian.
- */
-static int wm8350_create_cache(struct wm8350 *wm8350, int type, int mode)
-{
- int i, ret = 0;
- u16 value;
- const u16 *reg_map;
-
- switch (type) {
- case 0:
- switch (mode) {
-#ifdef CONFIG_MFD_WM8350_CONFIG_MODE_0
- case 0:
- reg_map = wm8350_mode0_defaults;
- break;
-#endif
-#ifdef CONFIG_MFD_WM8350_CONFIG_MODE_1
- case 1:
- reg_map = wm8350_mode1_defaults;
- break;
-#endif
-#ifdef CONFIG_MFD_WM8350_CONFIG_MODE_2
- case 2:
- reg_map = wm8350_mode2_defaults;
- break;
-#endif
-#ifdef CONFIG_MFD_WM8350_CONFIG_MODE_3
- case 3:
- reg_map = wm8350_mode3_defaults;
- break;
-#endif
- default:
- dev_err(wm8350->dev,
- "WM8350 configuration mode %d not supported\n",
- mode);
- return -EINVAL;
- }
- break;
-
- case 1:
- switch (mode) {
-#ifdef CONFIG_MFD_WM8351_CONFIG_MODE_0
- case 0:
- reg_map = wm8351_mode0_defaults;
- break;
-#endif
-#ifdef CONFIG_MFD_WM8351_CONFIG_MODE_1
- case 1:
- reg_map = wm8351_mode1_defaults;
- break;
-#endif
-#ifdef CONFIG_MFD_WM8351_CONFIG_MODE_2
- case 2:
- reg_map = wm8351_mode2_defaults;
- break;
-#endif
-#ifdef CONFIG_MFD_WM8351_CONFIG_MODE_3
- case 3:
- reg_map = wm8351_mode3_defaults;
- break;
-#endif
- default:
- dev_err(wm8350->dev,
- "WM8351 configuration mode %d not supported\n",
- mode);
- return -EINVAL;
- }
- break;
-
- case 2:
- switch (mode) {
-#ifdef CONFIG_MFD_WM8352_CONFIG_MODE_0
- case 0:
- reg_map = wm8352_mode0_defaults;
- break;
-#endif
-#ifdef CONFIG_MFD_WM8352_CONFIG_MODE_1
- case 1:
- reg_map = wm8352_mode1_defaults;
- break;
-#endif
-#ifdef CONFIG_MFD_WM8352_CONFIG_MODE_2
- case 2:
- reg_map = wm8352_mode2_defaults;
- break;
-#endif
-#ifdef CONFIG_MFD_WM8352_CONFIG_MODE_3
- case 3:
- reg_map = wm8352_mode3_defaults;
- break;
-#endif
- default:
- dev_err(wm8350->dev,
- "WM8352 configuration mode %d not supported\n",
- mode);
- return -EINVAL;
- }
- break;
-
- default:
- dev_err(wm8350->dev,
- "WM835x configuration mode %d not supported\n",
- mode);
- return -EINVAL;
- }
-
- wm8350->reg_cache =
- kmalloc(sizeof(u16) * (WM8350_MAX_REGISTER + 1), GFP_KERNEL);
- if (wm8350->reg_cache == NULL)
- return -ENOMEM;
-
- /* Read the initial cache state back from the device - this is
- * a PMIC so the device many not be in a virgin state and we
- * can't rely on the silicon values.
- */
- ret = regmap_raw_read(wm8350->regmap, 0, wm8350->reg_cache,
- sizeof(u16) * (WM8350_MAX_REGISTER + 1));
- if (ret < 0) {
- dev_err(wm8350->dev,
- "failed to read initial cache values\n");
- goto out;
- }
-
- /* Mask out uncacheable/unreadable bits and the audio. */
- for (i = 0; i < WM8350_MAX_REGISTER; i++) {
- if (wm8350_reg_io_map[i].readable &&
- (i < WM8350_CLOCK_CONTROL_1 || i > WM8350_AIF_TEST)) {
- value = be16_to_cpu(wm8350->reg_cache[i]);
- value &= wm8350_reg_io_map[i].readable;
- wm8350->reg_cache[i] = value;
- } else
- wm8350->reg_cache[i] = reg_map[i];
- }
-
-out:
- kfree(wm8350->reg_cache);
- return ret;
-}
-
-/*
* Register a client device. This is non-fatal since there is no need to
* fail the entire device init due to a single platform device failing.
*/
@@ -681,18 +395,12 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
goto err;
}
- ret = wm8350_create_cache(wm8350, mask_rev, mode);
- if (ret < 0) {
- dev_err(wm8350->dev, "Failed to create register cache\n");
- return ret;
- }
-
mutex_init(&wm8350->auxadc_mutex);
init_completion(&wm8350->auxadc_done);
ret = wm8350_irq_init(wm8350, irq, pdata);
if (ret < 0)
- goto err_free;
+ goto err;
if (wm8350->irq_base) {
ret = request_threaded_irq(wm8350->irq_base +
@@ -730,8 +438,6 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
err_irq:
wm8350_irq_exit(wm8350);
-err_free:
- kfree(wm8350->reg_cache);
err:
return ret;
}
@@ -758,8 +464,6 @@ void wm8350_device_exit(struct wm8350 *wm8350)
free_irq(wm8350->irq_base + WM8350_IRQ_AUXADC_DATARDY, wm8350);
wm8350_irq_exit(wm8350);
-
- kfree(wm8350->reg_cache);
}
EXPORT_SYMBOL_GPL(wm8350_device_exit);
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index a68aceb4e48c..2e57101c8d3d 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -23,11 +23,6 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-static const struct regmap_config wm8350_regmap = {
- .reg_bits = 8,
- .val_bits = 16,
-};
-
static int wm8350_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index 9fd01bf63c51..624ff90501cd 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -432,11 +432,9 @@ static void wm8350_irq_sync_unlock(struct irq_data *data)
for (i = 0; i < ARRAY_SIZE(wm8350->irq_masks); i++) {
/* If there's been a change in the mask write it back
* to the hardware. */
- if (wm8350->irq_masks[i] !=
- wm8350->reg_cache[WM8350_INT_STATUS_1_MASK + i])
- WARN_ON(wm8350_reg_write(wm8350,
- WM8350_INT_STATUS_1_MASK + i,
- wm8350->irq_masks[i]));
+ WARN_ON(regmap_update_bits(wm8350->regmap,
+ WM8350_INT_STATUS_1_MASK + i,
+ 0xffff, wm8350->irq_masks[i]));
}
mutex_unlock(&wm8350->irq_lock);
diff --git a/drivers/mfd/wm8350-regmap.c b/drivers/mfd/wm8350-regmap.c
index e965139e5cd5..9efc64750fb6 100644
--- a/drivers/mfd/wm8350-regmap.c
+++ b/drivers/mfd/wm8350-regmap.c
@@ -14,3170 +14,18 @@
#include <linux/mfd/wm8350/core.h>
-#ifdef CONFIG_MFD_WM8350_CONFIG_MODE_0
-
-#undef WM8350_HAVE_CONFIG_MODE
-#define WM8350_HAVE_CONFIG_MODE
-
-const u16 wm8350_mode0_defaults[] = {
- 0x17FF, /* R0 - Reset/ID */
- 0x1000, /* R1 - ID */
- 0x0000, /* R2 */
- 0x1002, /* R3 - System Control 1 */
- 0x0004, /* R4 - System Control 2 */
- 0x0000, /* R5 - System Hibernate */
- 0x8A00, /* R6 - Interface Control */
- 0x0000, /* R7 */
- 0x8000, /* R8 - Power mgmt (1) */
- 0x0000, /* R9 - Power mgmt (2) */
- 0x0000, /* R10 - Power mgmt (3) */
- 0x2000, /* R11 - Power mgmt (4) */
- 0x0E00, /* R12 - Power mgmt (5) */
- 0x0000, /* R13 - Power mgmt (6) */
- 0x0000, /* R14 - Power mgmt (7) */
- 0x0000, /* R15 */
- 0x0000, /* R16 - RTC Seconds/Minutes */
- 0x0100, /* R17 - RTC Hours/Day */
- 0x0101, /* R18 - RTC Date/Month */
- 0x1400, /* R19 - RTC Year */
- 0x0000, /* R20 - Alarm Seconds/Minutes */
- 0x0000, /* R21 - Alarm Hours/Day */
- 0x0000, /* R22 - Alarm Date/Month */
- 0x0320, /* R23 - RTC Time Control */
- 0x0000, /* R24 - System Interrupts */
- 0x0000, /* R25 - Interrupt Status 1 */
- 0x0000, /* R26 - Interrupt Status 2 */
- 0x0000, /* R27 - Power Up Interrupt Status */
- 0x0000, /* R28 - Under Voltage Interrupt status */
- 0x0000, /* R29 - Over Current Interrupt status */
- 0x0000, /* R30 - GPIO Interrupt Status */
- 0x0000, /* R31 - Comparator Interrupt Status */
- 0x3FFF, /* R32 - System Interrupts Mask */
- 0x0000, /* R33 - Interrupt Status 1 Mask */
- 0x0000, /* R34 - Interrupt Status 2 Mask */
- 0x0000, /* R35 - Power Up Interrupt Status Mask */
- 0x0000, /* R36 - Under Voltage Interrupt status Mask */
- 0x0000, /* R37 - Over Current Interrupt status Mask */
- 0x0000, /* R38 - GPIO Interrupt Status Mask */
- 0x0000, /* R39 - Comparator Interrupt Status Mask */
- 0x0040, /* R40 - Clock Control 1 */
- 0x0000, /* R41 - Clock Control 2 */
- 0x3B00, /* R42 - FLL Control 1 */
- 0x7086, /* R43 - FLL Control 2 */
- 0xC226, /* R44 - FLL Control 3 */
- 0x0000, /* R45 - FLL Control 4 */
- 0x0000, /* R46 */
- 0x0000, /* R47 */
- 0x0000, /* R48 - DAC Control */
- 0x0000, /* R49 */
- 0x00C0, /* R50 - DAC Digital Volume L */
- 0x00C0, /* R51 - DAC Digital Volume R */
- 0x0000, /* R52 */
- 0x0040, /* R53 - DAC LR Rate */
- 0x0000, /* R54 - DAC Clock Control */
- 0x0000, /* R55 */
- 0x0000, /* R56 */
- 0x0000, /* R57 */
- 0x4000, /* R58 - DAC Mute */
- 0x0000, /* R59 - DAC Mute Volume */
- 0x0000, /* R60 - DAC Side */
- 0x0000, /* R61 */
- 0x0000, /* R62 */
- 0x0000, /* R63 */
- 0x8000, /* R64 - ADC Control */
- 0x0000, /* R65 */
- 0x00C0, /* R66 - ADC Digital Volume L */
- 0x00C0, /* R67 - ADC Digital Volume R */
- 0x0000, /* R68 - ADC Divider */
- 0x0000, /* R69 */
- 0x0040, /* R70 - ADC LR Rate */
- 0x0000, /* R71 */
- 0x0303, /* R72 - Input Control */
- 0x0000, /* R73 - IN3 Input Control */
- 0x0000, /* R74 - Mic Bias Control */
- 0x0000, /* R75 */
- 0x0000, /* R76 - Output Control */
- 0x0000, /* R77 - Jack Detect */
- 0x0000, /* R78 - Anti Pop Control */
- 0x0000, /* R79 */
- 0x0040, /* R80 - Left Input Volume */
- 0x0040, /* R81 - Right Input Volume */
- 0x0000, /* R82 */
- 0x0000, /* R83 */
- 0x0000, /* R84 */
- 0x0000, /* R85 */
- 0x0000, /* R86 */
- 0x0000, /* R87 */
- 0x0800, /* R88 - Left Mixer Control */
- 0x1000, /* R89 - Right Mixer Control */
- 0x0000, /* R90 */
- 0x0000, /* R91 */
- 0x0000, /* R92 - OUT3 Mixer Control */
- 0x0000, /* R93 - OUT4 Mixer Control */
- 0x0000, /* R94 */
- 0x0000, /* R95 */
- 0x0000, /* R96 - Output Left Mixer Volume */
- 0x0000, /* R97 - Output Right Mixer Volume */
- 0x0000, /* R98 - Input Mixer Volume L */
- 0x0000, /* R99 - Input Mixer Volume R */
- 0x0000, /* R100 - Input Mixer Volume */
- 0x0000, /* R101 */
- 0x0000, /* R102 */
- 0x0000, /* R103 */
- 0x00E4, /* R104 - LOUT1 Volume */
- 0x00E4, /* R105 - ROUT1 Volume */
- 0x00E4, /* R106 - LOUT2 Volume */
- 0x02E4, /* R107 - ROUT2 Volume */
- 0x0000, /* R108 */
- 0x0000, /* R109 */
- 0x0000, /* R110 */
- 0x0000, /* R111 - BEEP Volume */
- 0x0A00, /* R112 - AI Formating */
- 0x0000, /* R113 - ADC DAC COMP */
- 0x0020, /* R114 - AI ADC Control */
- 0x0020, /* R115 - AI DAC Control */
- 0x0000, /* R116 - AIF Test */
- 0x0000, /* R117 */
- 0x0000, /* R118 */
- 0x0000, /* R119 */
- 0x0000, /* R120 */
- 0x0000, /* R121 */
- 0x0000, /* R122 */
- 0x0000, /* R123 */
- 0x0000, /* R124 */
- 0x0000, /* R125 */
- 0x0000, /* R126 */
- 0x0000, /* R127 */
- 0x1FFF, /* R128 - GPIO Debounce */
- 0x0000, /* R129 - GPIO Pin pull up Control */
- 0x03FC, /* R130 - GPIO Pull down Control */
- 0x0000, /* R131 - GPIO Interrupt Mode */
- 0x0000, /* R132 */
- 0x0000, /* R133 - GPIO Control */
- 0x0FFC, /* R134 - GPIO Configuration (i/o) */
- 0x0FFC, /* R135 - GPIO Pin Polarity / Type */
- 0x0000, /* R136 */
- 0x0000, /* R137 */
- 0x0000, /* R138 */
- 0x0000, /* R139 */
- 0x0013, /* R140 - GPIO Function Select 1 */
- 0x0000, /* R141 - GPIO Function Select 2 */
- 0x0000, /* R142 - GPIO Function Select 3 */
- 0x0003, /* R143 - GPIO Function Select 4 */
- 0x0000, /* R144 - Digitiser Control (1) */
- 0x0002, /* R145 - Digitiser Control (2) */
- 0x0000, /* R146 */
- 0x0000, /* R147 */
- 0x0000, /* R148 */
- 0x0000, /* R149 */
- 0x0000, /* R150 */
- 0x0000, /* R151 */
- 0x7000, /* R152 - AUX1 Readback */
- 0x7000, /* R153 - AUX2 Readback */
- 0x7000, /* R154 - AUX3 Readback */
- 0x7000, /* R155 - AUX4 Readback */
- 0x0000, /* R156 - USB Voltage Readback */
- 0x0000, /* R157 - LINE Voltage Readback */
- 0x0000, /* R158 - BATT Voltage Readback */
- 0x0000, /* R159 - Chip Temp Readback */
- 0x0000, /* R160 */
- 0x0000, /* R161 */
- 0x0000, /* R162 */
- 0x0000, /* R163 - Generic Comparator Control */
- 0x0000, /* R164 - Generic comparator 1 */
- 0x0000, /* R165 - Generic comparator 2 */
- 0x0000, /* R166 - Generic comparator 3 */
- 0x0000, /* R167 - Generic comparator 4 */
- 0xA00F, /* R168 - Battery Charger Control 1 */
- 0x0B06, /* R169 - Battery Charger Control 2 */
- 0x0000, /* R170 - Battery Charger Control 3 */
- 0x0000, /* R171 */
- 0x0000, /* R172 - Current Sink Driver A */
- 0x0000, /* R173 - CSA Flash control */
- 0x0000, /* R174 - Current Sink Driver B */
- 0x0000, /* R175 - CSB Flash control */
- 0x0000, /* R176 - DCDC/LDO requested */
- 0x002D, /* R177 - DCDC Active options */
- 0x0000, /* R178 - DCDC Sleep options */
- 0x0025, /* R179 - Power-check comparator */
- 0x000E, /* R180 - DCDC1 Control */
- 0x0000, /* R181 - DCDC1 Timeouts */
- 0x1006, /* R182 - DCDC1 Low Power */
- 0x0018, /* R183 - DCDC2 Control */
- 0x0000, /* R184 - DCDC2 Timeouts */
- 0x0000, /* R185 */
- 0x0000, /* R186 - DCDC3 Control */
- 0x0000, /* R187 - DCDC3 Timeouts */
- 0x0006, /* R188 - DCDC3 Low Power */
- 0x0000, /* R189 - DCDC4 Control */
- 0x0000, /* R190 - DCDC4 Timeouts */
- 0x0006, /* R191 - DCDC4 Low Power */
- 0x0008, /* R192 - DCDC5 Control */
- 0x0000, /* R193 - DCDC5 Timeouts */
- 0x0000, /* R194 */
- 0x0000, /* R195 - DCDC6 Control */
- 0x0000, /* R196 - DCDC6 Timeouts */
- 0x0006, /* R197 - DCDC6 Low Power */
- 0x0000, /* R198 */
- 0x0003, /* R199 - Limit Switch Control */
- 0x001C, /* R200 - LDO1 Control */
- 0x0000, /* R201 - LDO1 Timeouts */
- 0x001C, /* R202 - LDO1 Low Power */
- 0x001B, /* R203 - LDO2 Control */
- 0x0000, /* R204 - LDO2 Timeouts */
- 0x001C, /* R205 - LDO2 Low Power */
- 0x001B, /* R206 - LDO3 Control */
- 0x0000, /* R207 - LDO3 Timeouts */
- 0x001C, /* R208 - LDO3 Low Power */
- 0x001B, /* R209 - LDO4 Control */
- 0x0000, /* R210 - LDO4 Timeouts */
- 0x001C, /* R211 - LDO4 Low Power */
- 0x0000, /* R212 */
- 0x0000, /* R213 */
- 0x0000, /* R214 */
- 0x0000, /* R215 - VCC_FAULT Masks */
- 0x001F, /* R216 - Main Bandgap Control */
- 0x0000, /* R217 - OSC Control */
- 0x9000, /* R218 - RTC Tick Control */
- 0x0000, /* R219 */
- 0x4000, /* R220 - RAM BIST 1 */
- 0x0000, /* R221 */
- 0x0000, /* R222 */
- 0x0000, /* R223 */
- 0x0000, /* R224 */
- 0x0000, /* R225 - DCDC/LDO status */
- 0x0000, /* R226 */
- 0x0000, /* R227 */
- 0x0000, /* R228 */
- 0x0000, /* R229 */
- 0xE000, /* R230 - GPIO Pin Status */
- 0x0000, /* R231 */
- 0x0000, /* R232 */
- 0x0000, /* R233 */
- 0x0000, /* R234 */
- 0x0000, /* R235 */
- 0x0000, /* R236 */
- 0x0000, /* R237 */
- 0x0000, /* R238 */
- 0x0000, /* R239 */
- 0x0000, /* R240 */
- 0x0000, /* R241 */
- 0x0000, /* R242 */
- 0x0000, /* R243 */
- 0x0000, /* R244 */
- 0x0000, /* R245 */
- 0x0000, /* R246 */
- 0x0000, /* R247 */
- 0x0000, /* R248 */
- 0x0000, /* R249 */
- 0x0000, /* R250 */
- 0x0000, /* R251 */
- 0x0000, /* R252 */
- 0x0000, /* R253 */
- 0x0000, /* R254 */
- 0x0000, /* R255 */
-};
-#endif
-
-#ifdef CONFIG_MFD_WM8350_CONFIG_MODE_1
-
-#undef WM8350_HAVE_CONFIG_MODE
-#define WM8350_HAVE_CONFIG_MODE
-
-const u16 wm8350_mode1_defaults[] = {
- 0x17FF, /* R0 - Reset/ID */
- 0x1000, /* R1 - ID */
- 0x0000, /* R2 */
- 0x1002, /* R3 - System Control 1 */
- 0x0014, /* R4 - System Control 2 */
- 0x0000, /* R5 - System Hibernate */
- 0x8A00, /* R6 - Interface Control */
- 0x0000, /* R7 */
- 0x8000, /* R8 - Power mgmt (1) */
- 0x0000, /* R9 - Power mgmt (2) */
- 0x0000, /* R10 - Power mgmt (3) */
- 0x2000, /* R11 - Power mgmt (4) */
- 0x0E00, /* R12 - Power mgmt (5) */
- 0x0000, /* R13 - Power mgmt (6) */
- 0x0000, /* R14 - Power mgmt (7) */
- 0x0000, /* R15 */
- 0x0000, /* R16 - RTC Seconds/Minutes */
- 0x0100, /* R17 - RTC Hours/Day */
- 0x0101, /* R18 - RTC Date/Month */
- 0x1400, /* R19 - RTC Year */
- 0x0000, /* R20 - Alarm Seconds/Minutes */
- 0x0000, /* R21 - Alarm Hours/Day */
- 0x0000, /* R22 - Alarm Date/Month */
- 0x0320, /* R23 - RTC Time Control */
- 0x0000, /* R24 - System Interrupts */
- 0x0000, /* R25 - Interrupt Status 1 */
- 0x0000, /* R26 - Interrupt Status 2 */
- 0x0000, /* R27 - Power Up Interrupt Status */
- 0x0000, /* R28 - Under Voltage Interrupt status */
- 0x0000, /* R29 - Over Current Interrupt status */
- 0x0000, /* R30 - GPIO Interrupt Status */
- 0x0000, /* R31 - Comparator Interrupt Status */
- 0x3FFF, /* R32 - System Interrupts Mask */
- 0x0000, /* R33 - Interrupt Status 1 Mask */
- 0x0000, /* R34 - Interrupt Status 2 Mask */
- 0x0000, /* R35 - Power Up Interrupt Status Mask */
- 0x0000, /* R36 - Under Voltage Interrupt status Mask */
- 0x0000, /* R37 - Over Current Interrupt status Mask */
- 0x0000, /* R38 - GPIO Interrupt Status Mask */
- 0x0000, /* R39 - Comparator Interrupt Status Mask */
- 0x0040, /* R40 - Clock Control 1 */
- 0x0000, /* R41 - Clock Control 2 */
- 0x3B00, /* R42 - FLL Control 1 */
- 0x7086, /* R43 - FLL Control 2 */
- 0xC226, /* R44 - FLL Control 3 */
- 0x0000, /* R45 - FLL Control 4 */
- 0x0000, /* R46 */
- 0x0000, /* R47 */
- 0x0000, /* R48 - DAC Control */
- 0x0000, /* R49 */
- 0x00C0, /* R50 - DAC Digital Volume L */
- 0x00C0, /* R51 - DAC Digital Volume R */
- 0x0000, /* R52 */
- 0x0040, /* R53 - DAC LR Rate */
- 0x0000, /* R54 - DAC Clock Control */
- 0x0000, /* R55 */
- 0x0000, /* R56 */
- 0x0000, /* R57 */
- 0x4000, /* R58 - DAC Mute */
- 0x0000, /* R59 - DAC Mute Volume */
- 0x0000, /* R60 - DAC Side */
- 0x0000, /* R61 */
- 0x0000, /* R62 */
- 0x0000, /* R63 */
- 0x8000, /* R64 - ADC Control */
- 0x0000, /* R65 */
- 0x00C0, /* R66 - ADC Digital Volume L */
- 0x00C0, /* R67 - ADC Digital Volume R */
- 0x0000, /* R68 - ADC Divider */
- 0x0000, /* R69 */
- 0x0040, /* R70 - ADC LR Rate */
- 0x0000, /* R71 */
- 0x0303, /* R72 - Input Control */
- 0x0000, /* R73 - IN3 Input Control */
- 0x0000, /* R74 - Mic Bias Control */
- 0x0000, /* R75 */
- 0x0000, /* R76 - Output Control */
- 0x0000, /* R77 - Jack Detect */
- 0x0000, /* R78 - Anti Pop Control */
- 0x0000, /* R79 */
- 0x0040, /* R80 - Left Input Volume */
- 0x0040, /* R81 - Right Input Volume */
- 0x0000, /* R82 */
- 0x0000, /* R83 */
- 0x0000, /* R84 */
- 0x0000, /* R85 */
- 0x0000, /* R86 */
- 0x0000, /* R87 */
- 0x0800, /* R88 - Left Mixer Control */
- 0x1000, /* R89 - Right Mixer Control */
- 0x0000, /* R90 */
- 0x0000, /* R91 */
- 0x0000, /* R92 - OUT3 Mixer Control */
- 0x0000, /* R93 - OUT4 Mixer Control */
- 0x0000, /* R94 */
- 0x0000, /* R95 */
- 0x0000, /* R96 - Output Left Mixer Volume */
- 0x0000, /* R97 - Output Right Mixer Volume */
- 0x0000, /* R98 - Input Mixer Volume L */
- 0x0000, /* R99 - Input Mixer Volume R */
- 0x0000, /* R100 - Input Mixer Volume */
- 0x0000, /* R101 */
- 0x0000, /* R102 */
- 0x0000, /* R103 */
- 0x00E4, /* R104 - LOUT1 Volume */
- 0x00E4, /* R105 - ROUT1 Volume */
- 0x00E4, /* R106 - LOUT2 Volume */
- 0x02E4, /* R107 - ROUT2 Volume */
- 0x0000, /* R108 */
- 0x0000, /* R109 */
- 0x0000, /* R110 */
- 0x0000, /* R111 - BEEP Volume */
- 0x0A00, /* R112 - AI Formating */
- 0x0000, /* R113 - ADC DAC COMP */
- 0x0020, /* R114 - AI ADC Control */
- 0x0020, /* R115 - AI DAC Control */
- 0x0000, /* R116 - AIF Test */
- 0x0000, /* R117 */
- 0x0000, /* R118 */
- 0x0000, /* R119 */
- 0x0000, /* R120 */
- 0x0000, /* R121 */
- 0x0000, /* R122 */
- 0x0000, /* R123 */
- 0x0000, /* R124 */
- 0x0000, /* R125 */
- 0x0000, /* R126 */
- 0x0000, /* R127 */
- 0x1FFF, /* R128 - GPIO Debounce */
- 0x0000, /* R129 - GPIO Pin pull up Control */
- 0x03FC, /* R130 - GPIO Pull down Control */
- 0x0000, /* R131 - GPIO Interrupt Mode */
- 0x0000, /* R132 */
- 0x0000, /* R133 - GPIO Control */
- 0x00FB, /* R134 - GPIO Configuration (i/o) */
- 0x04FE, /* R135 - GPIO Pin Polarity / Type */
- 0x0000, /* R136 */
- 0x0000, /* R137 */
- 0x0000, /* R138 */
- 0x0000, /* R139 */
- 0x0312, /* R140 - GPIO Function Select 1 */
- 0x1003, /* R141 - GPIO Function Select 2 */
- 0x1331, /* R142 - GPIO Function Select 3 */
- 0x0003, /* R143 - GPIO Function Select 4 */
- 0x0000, /* R144 - Digitiser Control (1) */
- 0x0002, /* R145 - Digitiser Control (2) */
- 0x0000, /* R146 */
- 0x0000, /* R147 */
- 0x0000, /* R148 */
- 0x0000, /* R149 */
- 0x0000, /* R150 */
- 0x0000, /* R151 */
- 0x7000, /* R152 - AUX1 Readback */
- 0x7000, /* R153 - AUX2 Readback */
- 0x7000, /* R154 - AUX3 Readback */
- 0x7000, /* R155 - AUX4 Readback */
- 0x0000, /* R156 - USB Voltage Readback */
- 0x0000, /* R157 - LINE Voltage Readback */
- 0x0000, /* R158 - BATT Voltage Readback */
- 0x0000, /* R159 - Chip Temp Readback */
- 0x0000, /* R160 */
- 0x0000, /* R161 */
- 0x0000, /* R162 */
- 0x0000, /* R163 - Generic Comparator Control */
- 0x0000, /* R164 - Generic comparator 1 */
- 0x0000, /* R165 - Generic comparator 2 */
- 0x0000, /* R166 - Generic comparator 3 */
- 0x0000, /* R167 - Generic comparator 4 */
- 0xA00F, /* R168 - Battery Charger Control 1 */
- 0x0B06, /* R169 - Battery Charger Control 2 */
- 0x0000, /* R170 - Battery Charger Control 3 */
- 0x0000, /* R171 */
- 0x0000, /* R172 - Current Sink Driver A */
- 0x0000, /* R173 - CSA Flash control */
- 0x0000, /* R174 - Current Sink Driver B */
- 0x0000, /* R175 - CSB Flash control */
- 0x0000, /* R176 - DCDC/LDO requested */
- 0x002D, /* R177 - DCDC Active options */
- 0x0000, /* R178 - DCDC Sleep options */
- 0x0025, /* R179 - Power-check comparator */
- 0x0062, /* R180 - DCDC1 Control */
- 0x0400, /* R181 - DCDC1 Timeouts */
- 0x1006, /* R182 - DCDC1 Low Power */
- 0x0018, /* R183 - DCDC2 Control */
- 0x0000, /* R184 - DCDC2 Timeouts */
- 0x0000, /* R185 */
- 0x0026, /* R186 - DCDC3 Control */
- 0x0400, /* R187 - DCDC3 Timeouts */
- 0x0006, /* R188 - DCDC3 Low Power */
- 0x0062, /* R189 - DCDC4 Control */
- 0x0400, /* R190 - DCDC4 Timeouts */
- 0x0006, /* R191 - DCDC4 Low Power */
- 0x0008, /* R192 - DCDC5 Control */
- 0x0000, /* R193 - DCDC5 Timeouts */
- 0x0000, /* R194 */
- 0x0026, /* R195 - DCDC6 Control */
- 0x0800, /* R196 - DCDC6 Timeouts */
- 0x0006, /* R197 - DCDC6 Low Power */
- 0x0000, /* R198 */
- 0x0003, /* R199 - Limit Switch Control */
- 0x0006, /* R200 - LDO1 Control */
- 0x0400, /* R201 - LDO1 Timeouts */
- 0x001C, /* R202 - LDO1 Low Power */
- 0x0006, /* R203 - LDO2 Control */
- 0x0400, /* R204 - LDO2 Timeouts */
- 0x001C, /* R205 - LDO2 Low Power */
- 0x001B, /* R206 - LDO3 Control */
- 0x0000, /* R207 - LDO3 Timeouts */
- 0x001C, /* R208 - LDO3 Low Power */
- 0x001B, /* R209 - LDO4 Control */
- 0x0000, /* R210 - LDO4 Timeouts */
- 0x001C, /* R211 - LDO4 Low Power */
- 0x0000, /* R212 */
- 0x0000, /* R213 */
- 0x0000, /* R214 */
- 0x0000, /* R215 - VCC_FAULT Masks */
- 0x001F, /* R216 - Main Bandgap Control */
- 0x0000, /* R217 - OSC Control */
- 0x9000, /* R218 - RTC Tick Control */
- 0x0000, /* R219 */
- 0x4000, /* R220 - RAM BIST 1 */
- 0x0000, /* R221 */
- 0x0000, /* R222 */
- 0x0000, /* R223 */
- 0x0000, /* R224 */
- 0x0000, /* R225 - DCDC/LDO status */
- 0x0000, /* R226 */
- 0x0000, /* R227 */
- 0x0000, /* R228 */
- 0x0000, /* R229 */
- 0xE000, /* R230 - GPIO Pin Status */
- 0x0000, /* R231 */
- 0x0000, /* R232 */
- 0x0000, /* R233 */
- 0x0000, /* R234 */
- 0x0000, /* R235 */
- 0x0000, /* R236 */
- 0x0000, /* R237 */
- 0x0000, /* R238 */
- 0x0000, /* R239 */
- 0x0000, /* R240 */
- 0x0000, /* R241 */
- 0x0000, /* R242 */
- 0x0000, /* R243 */
- 0x0000, /* R244 */
- 0x0000, /* R245 */
- 0x0000, /* R246 */
- 0x0000, /* R247 */
- 0x0000, /* R248 */
- 0x0000, /* R249 */
- 0x0000, /* R250 */
- 0x0000, /* R251 */
- 0x0000, /* R252 */
- 0x0000, /* R253 */
- 0x0000, /* R254 */
- 0x0000, /* R255 */
-};
-#endif
-
-#ifdef CONFIG_MFD_WM8350_CONFIG_MODE_2
-
-#undef WM8350_HAVE_CONFIG_MODE
-#define WM8350_HAVE_CONFIG_MODE
-
-const u16 wm8350_mode2_defaults[] = {
- 0x17FF, /* R0 - Reset/ID */
- 0x1000, /* R1 - ID */
- 0x0000, /* R2 */
- 0x1002, /* R3 - System Control 1 */
- 0x0014, /* R4 - System Control 2 */
- 0x0000, /* R5 - System Hibernate */
- 0x8A00, /* R6 - Interface Control */
- 0x0000, /* R7 */
- 0x8000, /* R8 - Power mgmt (1) */
- 0x0000, /* R9 - Power mgmt (2) */
- 0x0000, /* R10 - Power mgmt (3) */
- 0x2000, /* R11 - Power mgmt (4) */
- 0x0E00, /* R12 - Power mgmt (5) */
- 0x0000, /* R13 - Power mgmt (6) */
- 0x0000, /* R14 - Power mgmt (7) */
- 0x0000, /* R15 */
- 0x0000, /* R16 - RTC Seconds/Minutes */
- 0x0100, /* R17 - RTC Hours/Day */
- 0x0101, /* R18 - RTC Date/Month */
- 0x1400, /* R19 - RTC Year */
- 0x0000, /* R20 - Alarm Seconds/Minutes */
- 0x0000, /* R21 - Alarm Hours/Day */
- 0x0000, /* R22 - Alarm Date/Month */
- 0x0320, /* R23 - RTC Time Control */
- 0x0000, /* R24 - System Interrupts */
- 0x0000, /* R25 - Interrupt Status 1 */
- 0x0000, /* R26 - Interrupt Status 2 */
- 0x0000, /* R27 - Power Up Interrupt Status */
- 0x0000, /* R28 - Under Voltage Interrupt status */
- 0x0000, /* R29 - Over Current Interrupt status */
- 0x0000, /* R30 - GPIO Interrupt Status */
- 0x0000, /* R31 - Comparator Interrupt Status */
- 0x3FFF, /* R32 - System Interrupts Mask */
- 0x0000, /* R33 - Interrupt Status 1 Mask */
- 0x0000, /* R34 - Interrupt Status 2 Mask */
- 0x0000, /* R35 - Power Up Interrupt Status Mask */
- 0x0000, /* R36 - Under Voltage Interrupt status Mask */
- 0x0000, /* R37 - Over Current Interrupt status Mask */
- 0x0000, /* R38 - GPIO Interrupt Status Mask */
- 0x0000, /* R39 - Comparator Interrupt Status Mask */
- 0x0040, /* R40 - Clock Control 1 */
- 0x0000, /* R41 - Clock Control 2 */
- 0x3B00, /* R42 - FLL Control 1 */
- 0x7086, /* R43 - FLL Control 2 */
- 0xC226, /* R44 - FLL Control 3 */
- 0x0000, /* R45 - FLL Control 4 */
- 0x0000, /* R46 */
- 0x0000, /* R47 */
- 0x0000, /* R48 - DAC Control */
- 0x0000, /* R49 */
- 0x00C0, /* R50 - DAC Digital Volume L */
- 0x00C0, /* R51 - DAC Digital Volume R */
- 0x0000, /* R52 */
- 0x0040, /* R53 - DAC LR Rate */
- 0x0000, /* R54 - DAC Clock Control */
- 0x0000, /* R55 */
- 0x0000, /* R56 */
- 0x0000, /* R57 */
- 0x4000, /* R58 - DAC Mute */
- 0x0000, /* R59 - DAC Mute Volume */
- 0x0000, /* R60 - DAC Side */
- 0x0000, /* R61 */
- 0x0000, /* R62 */
- 0x0000, /* R63 */
- 0x8000, /* R64 - ADC Control */
- 0x0000, /* R65 */
- 0x00C0, /* R66 - ADC Digital Volume L */
- 0x00C0, /* R67 - ADC Digital Volume R */
- 0x0000, /* R68 - ADC Divider */
- 0x0000, /* R69 */
- 0x0040, /* R70 - ADC LR Rate */
- 0x0000, /* R71 */
- 0x0303, /* R72 - Input Control */
- 0x0000, /* R73 - IN3 Input Control */
- 0x0000, /* R74 - Mic Bias Control */
- 0x0000, /* R75 */
- 0x0000, /* R76 - Output Control */
- 0x0000, /* R77 - Jack Detect */
- 0x0000, /* R78 - Anti Pop Control */
- 0x0000, /* R79 */
- 0x0040, /* R80 - Left Input Volume */
- 0x0040, /* R81 - Right Input Volume */
- 0x0000, /* R82 */
- 0x0000, /* R83 */
- 0x0000, /* R84 */
- 0x0000, /* R85 */
- 0x0000, /* R86 */
- 0x0000, /* R87 */
- 0x0800, /* R88 - Left Mixer Control */
- 0x1000, /* R89 - Right Mixer Control */
- 0x0000, /* R90 */
- 0x0000, /* R91 */
- 0x0000, /* R92 - OUT3 Mixer Control */
- 0x0000, /* R93 - OUT4 Mixer Control */
- 0x0000, /* R94 */
- 0x0000, /* R95 */
- 0x0000, /* R96 - Output Left Mixer Volume */
- 0x0000, /* R97 - Output Right Mixer Volume */
- 0x0000, /* R98 - Input Mixer Volume L */
- 0x0000, /* R99 - Input Mixer Volume R */
- 0x0000, /* R100 - Input Mixer Volume */
- 0x0000, /* R101 */
- 0x0000, /* R102 */
- 0x0000, /* R103 */
- 0x00E4, /* R104 - LOUT1 Volume */
- 0x00E4, /* R105 - ROUT1 Volume */
- 0x00E4, /* R106 - LOUT2 Volume */
- 0x02E4, /* R107 - ROUT2 Volume */
- 0x0000, /* R108 */
- 0x0000, /* R109 */
- 0x0000, /* R110 */
- 0x0000, /* R111 - BEEP Volume */
- 0x0A00, /* R112 - AI Formating */
- 0x0000, /* R113 - ADC DAC COMP */
- 0x0020, /* R114 - AI ADC Control */
- 0x0020, /* R115 - AI DAC Control */
- 0x0000, /* R116 - AIF Test */
- 0x0000, /* R117 */
- 0x0000, /* R118 */
- 0x0000, /* R119 */
- 0x0000, /* R120 */
- 0x0000, /* R121 */
- 0x0000, /* R122 */
- 0x0000, /* R123 */
- 0x0000, /* R124 */
- 0x0000, /* R125 */
- 0x0000, /* R126 */
- 0x0000, /* R127 */
- 0x1FFF, /* R128 - GPIO Debounce */
- 0x0000, /* R129 - GPIO Pin pull up Control */
- 0x03FC, /* R130 - GPIO Pull down Control */
- 0x0000, /* R131 - GPIO Interrupt Mode */
- 0x0000, /* R132 */
- 0x0000, /* R133 - GPIO Control */
- 0x08FB, /* R134 - GPIO Configuration (i/o) */
- 0x0CFE, /* R135 - GPIO Pin Polarity / Type */
- 0x0000, /* R136 */
- 0x0000, /* R137 */
- 0x0000, /* R138 */
- 0x0000, /* R139 */
- 0x0312, /* R140 - GPIO Function Select 1 */
- 0x0003, /* R141 - GPIO Function Select 2 */
- 0x2331, /* R142 - GPIO Function Select 3 */
- 0x0003, /* R143 - GPIO Function Select 4 */
- 0x0000, /* R144 - Digitiser Control (1) */
- 0x0002, /* R145 - Digitiser Control (2) */
- 0x0000, /* R146 */
- 0x0000, /* R147 */
- 0x0000, /* R148 */
- 0x0000, /* R149 */
- 0x0000, /* R150 */
- 0x0000, /* R151 */
- 0x7000, /* R152 - AUX1 Readback */
- 0x7000, /* R153 - AUX2 Readback */
- 0x7000, /* R154 - AUX3 Readback */
- 0x7000, /* R155 - AUX4 Readback */
- 0x0000, /* R156 - USB Voltage Readback */
- 0x0000, /* R157 - LINE Voltage Readback */
- 0x0000, /* R158 - BATT Voltage Readback */
- 0x0000, /* R159 - Chip Temp Readback */
- 0x0000, /* R160 */
- 0x0000, /* R161 */
- 0x0000, /* R162 */
- 0x0000, /* R163 - Generic Comparator Control */
- 0x0000, /* R164 - Generic comparator 1 */
- 0x0000, /* R165 - Generic comparator 2 */
- 0x0000, /* R166 - Generic comparator 3 */
- 0x0000, /* R167 - Generic comparator 4 */
- 0xA00F, /* R168 - Battery Charger Control 1 */
- 0x0B06, /* R169 - Battery Charger Control 2 */
- 0x0000, /* R170 - Battery Charger Control 3 */
- 0x0000, /* R171 */
- 0x0000, /* R172 - Current Sink Driver A */
- 0x0000, /* R173 - CSA Flash control */
- 0x0000, /* R174 - Current Sink Driver B */
- 0x0000, /* R175 - CSB Flash control */
- 0x0000, /* R176 - DCDC/LDO requested */
- 0x002D, /* R177 - DCDC Active options */
- 0x0000, /* R178 - DCDC Sleep options */
- 0x0025, /* R179 - Power-check comparator */
- 0x000E, /* R180 - DCDC1 Control */
- 0x0400, /* R181 - DCDC1 Timeouts */
- 0x1006, /* R182 - DCDC1 Low Power */
- 0x0018, /* R183 - DCDC2 Control */
- 0x0000, /* R184 - DCDC2 Timeouts */
- 0x0000, /* R185 */
- 0x002E, /* R186 - DCDC3 Control */
- 0x0800, /* R187 - DCDC3 Timeouts */
- 0x0006, /* R188 - DCDC3 Low Power */
- 0x000E, /* R189 - DCDC4 Control */
- 0x0800, /* R190 - DCDC4 Timeouts */
- 0x0006, /* R191 - DCDC4 Low Power */
- 0x0008, /* R192 - DCDC5 Control */
- 0x0000, /* R193 - DCDC5 Timeouts */
- 0x0000, /* R194 */
- 0x0026, /* R195 - DCDC6 Control */
- 0x0C00, /* R196 - DCDC6 Timeouts */
- 0x0006, /* R197 - DCDC6 Low Power */
- 0x0000, /* R198 */
- 0x0003, /* R199 - Limit Switch Control */
- 0x001A, /* R200 - LDO1 Control */
- 0x0800, /* R201 - LDO1 Timeouts */
- 0x001C, /* R202 - LDO1 Low Power */
- 0x0010, /* R203 - LDO2 Control */
- 0x0800, /* R204 - LDO2 Timeouts */
- 0x001C, /* R205 - LDO2 Low Power */
- 0x000A, /* R206 - LDO3 Control */
- 0x0C00, /* R207 - LDO3 Timeouts */
- 0x001C, /* R208 - LDO3 Low Power */
- 0x001A, /* R209 - LDO4 Control */
- 0x0800, /* R210 - LDO4 Timeouts */
- 0x001C, /* R211 - LDO4 Low Power */
- 0x0000, /* R212 */
- 0x0000, /* R213 */
- 0x0000, /* R214 */
- 0x0000, /* R215 - VCC_FAULT Masks */
- 0x001F, /* R216 - Main Bandgap Control */
- 0x0000, /* R217 - OSC Control */
- 0x9000, /* R218 - RTC Tick Control */
- 0x0000, /* R219 */
- 0x4000, /* R220 - RAM BIST 1 */
- 0x0000, /* R221 */
- 0x0000, /* R222 */
- 0x0000, /* R223 */
- 0x0000, /* R224 */
- 0x0000, /* R225 - DCDC/LDO status */
- 0x0000, /* R226 */
- 0x0000, /* R227 */
- 0x0000, /* R228 */
- 0x0000, /* R229 */
- 0xE000, /* R230 - GPIO Pin Status */
- 0x0000, /* R231 */
- 0x0000, /* R232 */
- 0x0000, /* R233 */
- 0x0000, /* R234 */
- 0x0000, /* R235 */
- 0x0000, /* R236 */
- 0x0000, /* R237 */
- 0x0000, /* R238 */
- 0x0000, /* R239 */
- 0x0000, /* R240 */
- 0x0000, /* R241 */
- 0x0000, /* R242 */
- 0x0000, /* R243 */
- 0x0000, /* R244 */
- 0x0000, /* R245 */
- 0x0000, /* R246 */
- 0x0000, /* R247 */
- 0x0000, /* R248 */
- 0x0000, /* R249 */
- 0x0000, /* R250 */
- 0x0000, /* R251 */
- 0x0000, /* R252 */
- 0x0000, /* R253 */
- 0x0000, /* R254 */
- 0x0000, /* R255 */
-};
-#endif
-
-#ifdef CONFIG_MFD_WM8350_CONFIG_MODE_3
-
-#undef WM8350_HAVE_CONFIG_MODE
-#define WM8350_HAVE_CONFIG_MODE
-
-const u16 wm8350_mode3_defaults[] = {
- 0x17FF, /* R0 - Reset/ID */
- 0x1000, /* R1 - ID */
- 0x0000, /* R2 */
- 0x1000, /* R3 - System Control 1 */
- 0x0004, /* R4 - System Control 2 */
- 0x0000, /* R5 - System Hibernate */
- 0x8A00, /* R6 - Interface Control */
- 0x0000, /* R7 */
- 0x8000, /* R8 - Power mgmt (1) */
- 0x0000, /* R9 - Power mgmt (2) */
- 0x0000, /* R10 - Power mgmt (3) */
- 0x2000, /* R11 - Power mgmt (4) */
- 0x0E00, /* R12 - Power mgmt (5) */
- 0x0000, /* R13 - Power mgmt (6) */
- 0x0000, /* R14 - Power mgmt (7) */
- 0x0000, /* R15 */
- 0x0000, /* R16 - RTC Seconds/Minutes */
- 0x0100, /* R17 - RTC Hours/Day */
- 0x0101, /* R18 - RTC Date/Month */
- 0x1400, /* R19 - RTC Year */
- 0x0000, /* R20 - Alarm Seconds/Minutes */
- 0x0000, /* R21 - Alarm Hours/Day */
- 0x0000, /* R22 - Alarm Date/Month */
- 0x0320, /* R23 - RTC Time Control */
- 0x0000, /* R24 - System Interrupts */
- 0x0000, /* R25 - Interrupt Status 1 */
- 0x0000, /* R26 - Interrupt Status 2 */
- 0x0000, /* R27 - Power Up Interrupt Status */
- 0x0000, /* R28 - Under Voltage Interrupt status */
- 0x0000, /* R29 - Over Current Interrupt status */
- 0x0000, /* R30 - GPIO Interrupt Status */
- 0x0000, /* R31 - Comparator Interrupt Status */
- 0x3FFF, /* R32 - System Interrupts Mask */
- 0x0000, /* R33 - Interrupt Status 1 Mask */
- 0x0000, /* R34 - Interrupt Status 2 Mask */
- 0x0000, /* R35 - Power Up Interrupt Status Mask */
- 0x0000, /* R36 - Under Voltage Interrupt status Mask */
- 0x0000, /* R37 - Over Current Interrupt status Mask */
- 0x0000, /* R38 - GPIO Interrupt Status Mask */
- 0x0000, /* R39 - Comparator Interrupt Status Mask */
- 0x0040, /* R40 - Clock Control 1 */
- 0x0000, /* R41 - Clock Control 2 */
- 0x3B00, /* R42 - FLL Control 1 */
- 0x7086, /* R43 - FLL Control 2 */
- 0xC226, /* R44 - FLL Control 3 */
- 0x0000, /* R45 - FLL Control 4 */
- 0x0000, /* R46 */
- 0x0000, /* R47 */
- 0x0000, /* R48 - DAC Control */
- 0x0000, /* R49 */
- 0x00C0, /* R50 - DAC Digital Volume L */
- 0x00C0, /* R51 - DAC Digital Volume R */
- 0x0000, /* R52 */
- 0x0040, /* R53 - DAC LR Rate */
- 0x0000, /* R54 - DAC Clock Control */
- 0x0000, /* R55 */
- 0x0000, /* R56 */
- 0x0000, /* R57 */
- 0x4000, /* R58 - DAC Mute */
- 0x0000, /* R59 - DAC Mute Volume */
- 0x0000, /* R60 - DAC Side */
- 0x0000, /* R61 */
- 0x0000, /* R62 */
- 0x0000, /* R63 */
- 0x8000, /* R64 - ADC Control */
- 0x0000, /* R65 */
- 0x00C0, /* R66 - ADC Digital Volume L */
- 0x00C0, /* R67 - ADC Digital Volume R */
- 0x0000, /* R68 - ADC Divider */
- 0x0000, /* R69 */
- 0x0040, /* R70 - ADC LR Rate */
- 0x0000, /* R71 */
- 0x0303, /* R72 - Input Control */
- 0x0000, /* R73 - IN3 Input Control */
- 0x0000, /* R74 - Mic Bias Control */
- 0x0000, /* R75 */
- 0x0000, /* R76 - Output Control */
- 0x0000, /* R77 - Jack Detect */
- 0x0000, /* R78 - Anti Pop Control */
- 0x0000, /* R79 */
- 0x0040, /* R80 - Left Input Volume */
- 0x0040, /* R81 - Right Input Volume */
- 0x0000, /* R82 */
- 0x0000, /* R83 */
- 0x0000, /* R84 */
- 0x0000, /* R85 */
- 0x0000, /* R86 */
- 0x0000, /* R87 */
- 0x0800, /* R88 - Left Mixer Control */
- 0x1000, /* R89 - Right Mixer Control */
- 0x0000, /* R90 */
- 0x0000, /* R91 */
- 0x0000, /* R92 - OUT3 Mixer Control */
- 0x0000, /* R93 - OUT4 Mixer Control */
- 0x0000, /* R94 */
- 0x0000, /* R95 */
- 0x0000, /* R96 - Output Left Mixer Volume */
- 0x0000, /* R97 - Output Right Mixer Volume */
- 0x0000, /* R98 - Input Mixer Volume L */
- 0x0000, /* R99 - Input Mixer Volume R */
- 0x0000, /* R100 - Input Mixer Volume */
- 0x0000, /* R101 */
- 0x0000, /* R102 */
- 0x0000, /* R103 */
- 0x00E4, /* R104 - LOUT1 Volume */
- 0x00E4, /* R105 - ROUT1 Volume */
- 0x00E4, /* R106 - LOUT2 Volume */
- 0x02E4, /* R107 - ROUT2 Volume */
- 0x0000, /* R108 */
- 0x0000, /* R109 */
- 0x0000, /* R110 */
- 0x0000, /* R111 - BEEP Volume */
- 0x0A00, /* R112 - AI Formating */
- 0x0000, /* R113 - ADC DAC COMP */
- 0x0020, /* R114 - AI ADC Control */
- 0x0020, /* R115 - AI DAC Control */
- 0x0000, /* R116 - AIF Test */
- 0x0000, /* R117 */
- 0x0000, /* R118 */
- 0x0000, /* R119 */
- 0x0000, /* R120 */
- 0x0000, /* R121 */
- 0x0000, /* R122 */
- 0x0000, /* R123 */
- 0x0000, /* R124 */
- 0x0000, /* R125 */
- 0x0000, /* R126 */
- 0x0000, /* R127 */
- 0x1FFF, /* R128 - GPIO Debounce */
- 0x0000, /* R129 - GPIO Pin pull up Control */
- 0x03FC, /* R130 - GPIO Pull down Control */
- 0x0000, /* R131 - GPIO Interrupt Mode */
- 0x0000, /* R132 */
- 0x0000, /* R133 - GPIO Control */
- 0x0A7B, /* R134 - GPIO Configuration (i/o) */
- 0x06FE, /* R135 - GPIO Pin Polarity / Type */
- 0x0000, /* R136 */
- 0x0000, /* R137 */
- 0x0000, /* R138 */
- 0x0000, /* R139 */
- 0x1312, /* R140 - GPIO Function Select 1 */
- 0x1030, /* R141 - GPIO Function Select 2 */
- 0x2231, /* R142 - GPIO Function Select 3 */
- 0x0003, /* R143 - GPIO Function Select 4 */
- 0x0000, /* R144 - Digitiser Control (1) */
- 0x0002, /* R145 - Digitiser Control (2) */
- 0x0000, /* R146 */
- 0x0000, /* R147 */
- 0x0000, /* R148 */
- 0x0000, /* R149 */
- 0x0000, /* R150 */
- 0x0000, /* R151 */
- 0x7000, /* R152 - AUX1 Readback */
- 0x7000, /* R153 - AUX2 Readback */
- 0x7000, /* R154 - AUX3 Readback */
- 0x7000, /* R155 - AUX4 Readback */
- 0x0000, /* R156 - USB Voltage Readback */
- 0x0000, /* R157 - LINE Voltage Readback */
- 0x0000, /* R158 - BATT Voltage Readback */
- 0x0000, /* R159 - Chip Temp Readback */
- 0x0000, /* R160 */
- 0x0000, /* R161 */
- 0x0000, /* R162 */
- 0x0000, /* R163 - Generic Comparator Control */
- 0x0000, /* R164 - Generic comparator 1 */
- 0x0000, /* R165 - Generic comparator 2 */
- 0x0000, /* R166 - Generic comparator 3 */
- 0x0000, /* R167 - Generic comparator 4 */
- 0xA00F, /* R168 - Battery Charger Control 1 */
- 0x0B06, /* R169 - Battery Charger Control 2 */
- 0x0000, /* R170 - Battery Charger Control 3 */
- 0x0000, /* R171 */
- 0x0000, /* R172 - Current Sink Driver A */
- 0x0000, /* R173 - CSA Flash control */
- 0x0000, /* R174 - Current Sink Driver B */
- 0x0000, /* R175 - CSB Flash control */
- 0x0000, /* R176 - DCDC/LDO requested */
- 0x002D, /* R177 - DCDC Active options */
- 0x0000, /* R178 - DCDC Sleep options */
- 0x0025, /* R179 - Power-check comparator */
- 0x000E, /* R180 - DCDC1 Control */
- 0x0400, /* R181 - DCDC1 Timeouts */
- 0x1006, /* R182 - DCDC1 Low Power */
- 0x0018, /* R183 - DCDC2 Control */
- 0x0000, /* R184 - DCDC2 Timeouts */
- 0x0000, /* R185 */
- 0x000E, /* R186 - DCDC3 Control */
- 0x0400, /* R187 - DCDC3 Timeouts */
- 0x0006, /* R188 - DCDC3 Low Power */
- 0x0026, /* R189 - DCDC4 Control */
- 0x0400, /* R190 - DCDC4 Timeouts */
- 0x0006, /* R191 - DCDC4 Low Power */
- 0x0008, /* R192 - DCDC5 Control */
- 0x0000, /* R193 - DCDC5 Timeouts */
- 0x0000, /* R194 */
- 0x0026, /* R195 - DCDC6 Control */
- 0x0400, /* R196 - DCDC6 Timeouts */
- 0x0006, /* R197 - DCDC6 Low Power */
- 0x0000, /* R198 */
- 0x0003, /* R199 - Limit Switch Control */
- 0x001C, /* R200 - LDO1 Control */
- 0x0000, /* R201 - LDO1 Timeouts */
- 0x001C, /* R202 - LDO1 Low Power */
- 0x001C, /* R203 - LDO2 Control */
- 0x0400, /* R204 - LDO2 Timeouts */
- 0x001C, /* R205 - LDO2 Low Power */
- 0x001C, /* R206 - LDO3 Control */
- 0x0400, /* R207 - LDO3 Timeouts */
- 0x001C, /* R208 - LDO3 Low Power */
- 0x001F, /* R209 - LDO4 Control */
- 0x0400, /* R210 - LDO4 Timeouts */
- 0x001C, /* R211 - LDO4 Low Power */
- 0x0000, /* R212 */
- 0x0000, /* R213 */
- 0x0000, /* R214 */
- 0x0000, /* R215 - VCC_FAULT Masks */
- 0x001F, /* R216 - Main Bandgap Control */
- 0x0000, /* R217 - OSC Control */
- 0x9000, /* R218 - RTC Tick Control */
- 0x0000, /* R219 */
- 0x4000, /* R220 - RAM BIST 1 */
- 0x0000, /* R221 */
- 0x0000, /* R222 */
- 0x0000, /* R223 */
- 0x0000, /* R224 */
- 0x0000, /* R225 - DCDC/LDO status */
- 0x0000, /* R226 */
- 0x0000, /* R227 */
- 0x0000, /* R228 */
- 0x0000, /* R229 */
- 0xE000, /* R230 - GPIO Pin Status */
- 0x0000, /* R231 */
- 0x0000, /* R232 */
- 0x0000, /* R233 */
- 0x0000, /* R234 */
- 0x0000, /* R235 */
- 0x0000, /* R236 */
- 0x0000, /* R237 */
- 0x0000, /* R238 */
- 0x0000, /* R239 */
- 0x0000, /* R240 */
- 0x0000, /* R241 */
- 0x0000, /* R242 */
- 0x0000, /* R243 */
- 0x0000, /* R244 */
- 0x0000, /* R245 */
- 0x0000, /* R246 */
- 0x0000, /* R247 */
- 0x0000, /* R248 */
- 0x0000, /* R249 */
- 0x0000, /* R250 */
- 0x0000, /* R251 */
- 0x0000, /* R252 */
- 0x0000, /* R253 */
- 0x0000, /* R254 */
- 0x0000, /* R255 */
-};
-#endif
-
-#ifdef CONFIG_MFD_WM8351_CONFIG_MODE_0
-
-#undef WM8350_HAVE_CONFIG_MODE
-#define WM8350_HAVE_CONFIG_MODE
-
-const u16 wm8351_mode0_defaults[] = {
- 0x6143, /* R0 - Reset/ID */
- 0x0000, /* R1 - ID */
- 0x0001, /* R2 - Revision */
- 0x1C02, /* R3 - System Control 1 */
- 0x0004, /* R4 - System Control 2 */
- 0x0000, /* R5 - System Hibernate */
- 0x8A00, /* R6 - Interface Control */
- 0x0000, /* R7 */
- 0x8000, /* R8 - Power mgmt (1) */
- 0x0000, /* R9 - Power mgmt (2) */
- 0x0000, /* R10 - Power mgmt (3) */
- 0x2000, /* R11 - Power mgmt (4) */
- 0x0E00, /* R12 - Power mgmt (5) */
- 0x0000, /* R13 - Power mgmt (6) */
- 0x0000, /* R14 - Power mgmt (7) */
- 0x0000, /* R15 */
- 0x0000, /* R16 - RTC Seconds/Minutes */
- 0x0100, /* R17 - RTC Hours/Day */
- 0x0101, /* R18 - RTC Date/Month */
- 0x1400, /* R19 - RTC Year */
- 0x0000, /* R20 - Alarm Seconds/Minutes */
- 0x0000, /* R21 - Alarm Hours/Day */
- 0x0000, /* R22 - Alarm Date/Month */
- 0x0320, /* R23 - RTC Time Control */
- 0x0000, /* R24 - System Interrupts */
- 0x0000, /* R25 - Interrupt Status 1 */
- 0x0000, /* R26 - Interrupt Status 2 */
- 0x0000, /* R27 */
- 0x0000, /* R28 - Under Voltage Interrupt status */
- 0x0000, /* R29 - Over Current Interrupt status */
- 0x0000, /* R30 - GPIO Interrupt Status */
- 0x0000, /* R31 - Comparator Interrupt Status */
- 0x3FFF, /* R32 - System Interrupts Mask */
- 0x0000, /* R33 - Interrupt Status 1 Mask */
- 0x0000, /* R34 - Interrupt Status 2 Mask */
- 0x0000, /* R35 */
- 0x0000, /* R36 - Under Voltage Interrupt status Mask */
- 0x0000, /* R37 - Over Current Interrupt status Mask */
- 0x0000, /* R38 - GPIO Interrupt Status Mask */
- 0x0000, /* R39 - Comparator Interrupt Status Mask */
- 0x0040, /* R40 - Clock Control 1 */
- 0x0000, /* R41 - Clock Control 2 */
- 0x3A00, /* R42 - FLL Control 1 */
- 0x7086, /* R43 - FLL Control 2 */
- 0xC226, /* R44 - FLL Control 3 */
- 0x0000, /* R45 - FLL Control 4 */
- 0x0000, /* R46 */
- 0x0000, /* R47 */
- 0x0000, /* R48 - DAC Control */
- 0x0000, /* R49 */
- 0x00C0, /* R50 - DAC Digital Volume L */
- 0x00C0, /* R51 - DAC Digital Volume R */
- 0x0000, /* R52 */
- 0x0040, /* R53 - DAC LR Rate */
- 0x0000, /* R54 - DAC Clock Control */
- 0x0000, /* R55 */
- 0x0000, /* R56 */
- 0x0000, /* R57 */
- 0x4000, /* R58 - DAC Mute */
- 0x0000, /* R59 - DAC Mute Volume */
- 0x0000, /* R60 - DAC Side */
- 0x0000, /* R61 */
- 0x0000, /* R62 */
- 0x0000, /* R63 */
- 0x8000, /* R64 - ADC Control */
- 0x0000, /* R65 */
- 0x00C0, /* R66 - ADC Digital Volume L */
- 0x00C0, /* R67 - ADC Digital Volume R */
- 0x0000, /* R68 - ADC Divider */
- 0x0000, /* R69 */
- 0x0040, /* R70 - ADC LR Rate */
- 0x0000, /* R71 */
- 0x0303, /* R72 - Input Control */
- 0x0000, /* R73 - IN3 Input Control */
- 0x0000, /* R74 - Mic Bias Control */
- 0x0000, /* R75 */
- 0x0000, /* R76 - Output Control */
- 0x0000, /* R77 - Jack Detect */
- 0x0000, /* R78 - Anti Pop Control */
- 0x0000, /* R79 */
- 0x0040, /* R80 - Left Input Volume */
- 0x0040, /* R81 - Right Input Volume */
- 0x0000, /* R82 */
- 0x0000, /* R83 */
- 0x0000, /* R84 */
- 0x0000, /* R85 */
- 0x0000, /* R86 */
- 0x0000, /* R87 */
- 0x0800, /* R88 - Left Mixer Control */
- 0x1000, /* R89 - Right Mixer Control */
- 0x0000, /* R90 */
- 0x0000, /* R91 */
- 0x0000, /* R92 - OUT3 Mixer Control */
- 0x0000, /* R93 - OUT4 Mixer Control */
- 0x0000, /* R94 */
- 0x0000, /* R95 */
- 0x0000, /* R96 - Output Left Mixer Volume */
- 0x0000, /* R97 - Output Right Mixer Volume */
- 0x0000, /* R98 - Input Mixer Volume L */
- 0x0000, /* R99 - Input Mixer Volume R */
- 0x0000, /* R100 - Input Mixer Volume */
- 0x0000, /* R101 */
- 0x0000, /* R102 */
- 0x0000, /* R103 */
- 0x00E4, /* R104 - OUT1L Volume */
- 0x00E4, /* R105 - OUT1R Volume */
- 0x00E4, /* R106 - OUT2L Volume */
- 0x02E4, /* R107 - OUT2R Volume */
- 0x0000, /* R108 */
- 0x0000, /* R109 */
- 0x0000, /* R110 */
- 0x0000, /* R111 - BEEP Volume */
- 0x0A00, /* R112 - AI Formating */
- 0x0000, /* R113 - ADC DAC COMP */
- 0x0020, /* R114 - AI ADC Control */
- 0x0020, /* R115 - AI DAC Control */
- 0x0000, /* R116 */
- 0x0000, /* R117 */
- 0x0000, /* R118 */
- 0x0000, /* R119 */
- 0x0000, /* R120 */
- 0x0000, /* R121 */
- 0x0000, /* R122 */
- 0x0000, /* R123 */
- 0x0000, /* R124 */
- 0x0000, /* R125 */
- 0x0000, /* R126 */
- 0x0000, /* R127 */
- 0x1FFF, /* R128 - GPIO Debounce */
- 0x0000, /* R129 - GPIO Pin pull up Control */
- 0x0000, /* R130 - GPIO Pull down Control */
- 0x0000, /* R131 - GPIO Interrupt Mode */
- 0x0000, /* R132 */
- 0x0000, /* R133 - GPIO Control */
- 0x0FFC, /* R134 - GPIO Configuration (i/o) */
- 0x0FFC, /* R135 - GPIO Pin Polarity / Type */
- 0x0000, /* R136 */
- 0x0000, /* R137 */
- 0x0000, /* R138 */
- 0x0000, /* R139 */
- 0x0013, /* R140 - GPIO Function Select 1 */
- 0x0000, /* R141 - GPIO Function Select 2 */
- 0x0000, /* R142 - GPIO Function Select 3 */
- 0x0003, /* R143 - GPIO Function Select 4 */
- 0x0000, /* R144 - Digitiser Control (1) */
- 0x0002, /* R145 - Digitiser Control (2) */
- 0x0000, /* R146 */
- 0x0000, /* R147 */
- 0x0000, /* R148 */
- 0x0000, /* R149 */
- 0x0000, /* R150 */
- 0x0000, /* R151 */
- 0x7000, /* R152 - AUX1 Readback */
- 0x7000, /* R153 - AUX2 Readback */
- 0x7000, /* R154 - AUX3 Readback */
- 0x7000, /* R155 - AUX4 Readback */
- 0x0000, /* R156 - USB Voltage Readback */
- 0x0000, /* R157 - LINE Voltage Readback */
- 0x0000, /* R158 - BATT Voltage Readback */
- 0x0000, /* R159 - Chip Temp Readback */
- 0x0000, /* R160 */
- 0x0000, /* R161 */
- 0x0000, /* R162 */
- 0x0000, /* R163 - Generic Comparator Control */
- 0x0000, /* R164 - Generic comparator 1 */
- 0x0000, /* R165 - Generic comparator 2 */
- 0x0000, /* R166 - Generic comparator 3 */
- 0x0000, /* R167 - Generic comparator 4 */
- 0xA00F, /* R168 - Battery Charger Control 1 */
- 0x0B06, /* R169 - Battery Charger Control 2 */
- 0x0000, /* R170 - Battery Charger Control 3 */
- 0x0000, /* R171 */
- 0x0000, /* R172 - Current Sink Driver A */
- 0x0000, /* R173 - CSA Flash control */
- 0x0000, /* R174 */
- 0x0000, /* R175 */
- 0x0000, /* R176 - DCDC/LDO requested */
- 0x032D, /* R177 - DCDC Active options */
- 0x0000, /* R178 - DCDC Sleep options */
- 0x0025, /* R179 - Power-check comparator */
- 0x000E, /* R180 - DCDC1 Control */
- 0x0000, /* R181 - DCDC1 Timeouts */
- 0x1006, /* R182 - DCDC1 Low Power */
- 0x0018, /* R183 - DCDC2 Control */
- 0x0000, /* R184 - DCDC2 Timeouts */
- 0x0000, /* R185 */
- 0x0000, /* R186 - DCDC3 Control */
- 0x0000, /* R187 - DCDC3 Timeouts */
- 0x0006, /* R188 - DCDC3 Low Power */
- 0x0000, /* R189 - DCDC4 Control */
- 0x0000, /* R190 - DCDC4 Timeouts */
- 0x0006, /* R191 - DCDC4 Low Power */
- 0x0008, /* R192 */
- 0x0000, /* R193 */
- 0x0000, /* R194 */
- 0x0000, /* R195 */
- 0x0000, /* R196 */
- 0x0006, /* R197 */
- 0x0000, /* R198 */
- 0x0003, /* R199 - Limit Switch Control */
- 0x001C, /* R200 - LDO1 Control */
- 0x0000, /* R201 - LDO1 Timeouts */
- 0x001C, /* R202 - LDO1 Low Power */
- 0x001B, /* R203 - LDO2 Control */
- 0x0000, /* R204 - LDO2 Timeouts */
- 0x001C, /* R205 - LDO2 Low Power */
- 0x001B, /* R206 - LDO3 Control */
- 0x0000, /* R207 - LDO3 Timeouts */
- 0x001C, /* R208 - LDO3 Low Power */
- 0x001B, /* R209 - LDO4 Control */
- 0x0000, /* R210 - LDO4 Timeouts */
- 0x001C, /* R211 - LDO4 Low Power */
- 0x0000, /* R212 */
- 0x0000, /* R213 */
- 0x0000, /* R214 */
- 0x0000, /* R215 - VCC_FAULT Masks */
- 0x001F, /* R216 - Main Bandgap Control */
- 0x0000, /* R217 - OSC Control */
- 0x9000, /* R218 - RTC Tick Control */
- 0x0000, /* R219 - Security1 */
- 0x4000, /* R220 */
- 0x0000, /* R221 */
- 0x0000, /* R222 */
- 0x0000, /* R223 */
- 0x0000, /* R224 - Signal overrides */
- 0x0000, /* R225 - DCDC/LDO status */
- 0x0000, /* R226 - Charger Overides/status */
- 0x0000, /* R227 - misc overrides */
- 0x0000, /* R228 - Supply overrides/status 1 */
- 0x0000, /* R229 - Supply overrides/status 2 */
- 0xE000, /* R230 - GPIO Pin Status */
- 0x0000, /* R231 - comparotor overrides */
- 0x0000, /* R232 */
- 0x0000, /* R233 - State Machine status */
- 0x1200, /* R234 - FLL Test 1 */
- 0x0000, /* R235 */
- 0x8000, /* R236 */
- 0x0000, /* R237 */
- 0x0000, /* R238 */
- 0x0000, /* R239 */
- 0x0003, /* R240 */
- 0x0000, /* R241 */
- 0x0000, /* R242 */
- 0x0004, /* R243 */
- 0x0300, /* R244 */
- 0x0000, /* R245 */
- 0x0200, /* R246 */
- 0x0000, /* R247 */
- 0x1000, /* R248 - DCDC1 Test Controls */
- 0x1000, /* R249 */
- 0x1000, /* R250 - DCDC3 Test Controls */
- 0x1000, /* R251 - DCDC4 Test Controls */
-};
-#endif
-
-#ifdef CONFIG_MFD_WM8351_CONFIG_MODE_1
-
-#undef WM8350_HAVE_CONFIG_MODE
-#define WM8350_HAVE_CONFIG_MODE
-
-const u16 wm8351_mode1_defaults[] = {
- 0x6143, /* R0 - Reset/ID */
- 0x0000, /* R1 - ID */
- 0x0001, /* R2 - Revision */
- 0x1C02, /* R3 - System Control 1 */
- 0x0204, /* R4 - System Control 2 */
- 0x0000, /* R5 - System Hibernate */
- 0x8A00, /* R6 - Interface Control */
- 0x0000, /* R7 */
- 0x8000, /* R8 - Power mgmt (1) */
- 0x0000, /* R9 - Power mgmt (2) */
- 0x0000, /* R10 - Power mgmt (3) */
- 0x2000, /* R11 - Power mgmt (4) */
- 0x0E00, /* R12 - Power mgmt (5) */
- 0x0000, /* R13 - Power mgmt (6) */
- 0x0000, /* R14 - Power mgmt (7) */
- 0x0000, /* R15 */
- 0x0000, /* R16 - RTC Seconds/Minutes */
- 0x0100, /* R17 - RTC Hours/Day */
- 0x0101, /* R18 - RTC Date/Month */
- 0x1400, /* R19 - RTC Year */
- 0x0000, /* R20 - Alarm Seconds/Minutes */
- 0x0000, /* R21 - Alarm Hours/Day */
- 0x0000, /* R22 - Alarm Date/Month */
- 0x0320, /* R23 - RTC Time Control */
- 0x0000, /* R24 - System Interrupts */
- 0x0000, /* R25 - Interrupt Status 1 */
- 0x0000, /* R26 - Interrupt Status 2 */
- 0x0000, /* R27 */
- 0x0000, /* R28 - Under Voltage Interrupt status */
- 0x0000, /* R29 - Over Current Interrupt status */
- 0x0000, /* R30 - GPIO Interrupt Status */
- 0x0000, /* R31 - Comparator Interrupt Status */
- 0x3FFF, /* R32 - System Interrupts Mask */
- 0x0000, /* R33 - Interrupt Status 1 Mask */
- 0x0000, /* R34 - Interrupt Status 2 Mask */
- 0x0000, /* R35 */
- 0x0000, /* R36 - Under Voltage Interrupt status Mask */
- 0x0000, /* R37 - Over Current Interrupt status Mask */
- 0x0000, /* R38 - GPIO Interrupt Status Mask */
- 0x0000, /* R39 - Comparator Interrupt Status Mask */
- 0x0040, /* R40 - Clock Control 1 */
- 0x0000, /* R41 - Clock Control 2 */
- 0x3A00, /* R42 - FLL Control 1 */
- 0x7086, /* R43 - FLL Control 2 */
- 0xC226, /* R44 - FLL Control 3 */
- 0x0000, /* R45 - FLL Control 4 */
- 0x0000, /* R46 */
- 0x0000, /* R47 */
- 0x0000, /* R48 - DAC Control */
- 0x0000, /* R49 */
- 0x00C0, /* R50 - DAC Digital Volume L */
- 0x00C0, /* R51 - DAC Digital Volume R */
- 0x0000, /* R52 */
- 0x0040, /* R53 - DAC LR Rate */
- 0x0000, /* R54 - DAC Clock Control */
- 0x0000, /* R55 */
- 0x0000, /* R56 */
- 0x0000, /* R57 */
- 0x4000, /* R58 - DAC Mute */
- 0x0000, /* R59 - DAC Mute Volume */
- 0x0000, /* R60 - DAC Side */
- 0x0000, /* R61 */
- 0x0000, /* R62 */
- 0x0000, /* R63 */
- 0x8000, /* R64 - ADC Control */
- 0x0000, /* R65 */
- 0x00C0, /* R66 - ADC Digital Volume L */
- 0x00C0, /* R67 - ADC Digital Volume R */
- 0x0000, /* R68 - ADC Divider */
- 0x0000, /* R69 */
- 0x0040, /* R70 - ADC LR Rate */
- 0x0000, /* R71 */
- 0x0303, /* R72 - Input Control */
- 0x0000, /* R73 - IN3 Input Control */
- 0x0000, /* R74 - Mic Bias Control */
- 0x0000, /* R75 */
- 0x0000, /* R76 - Output Control */
- 0x0000, /* R77 - Jack Detect */
- 0x0000, /* R78 - Anti Pop Control */
- 0x0000, /* R79 */
- 0x0040, /* R80 - Left Input Volume */
- 0x0040, /* R81 - Right Input Volume */
- 0x0000, /* R82 */
- 0x0000, /* R83 */
- 0x0000, /* R84 */
- 0x0000, /* R85 */
- 0x0000, /* R86 */
- 0x0000, /* R87 */
- 0x0800, /* R88 - Left Mixer Control */
- 0x1000, /* R89 - Right Mixer Control */
- 0x0000, /* R90 */
- 0x0000, /* R91 */
- 0x0000, /* R92 - OUT3 Mixer Control */
- 0x0000, /* R93 - OUT4 Mixer Control */
- 0x0000, /* R94 */
- 0x0000, /* R95 */
- 0x0000, /* R96 - Output Left Mixer Volume */
- 0x0000, /* R97 - Output Right Mixer Volume */
- 0x0000, /* R98 - Input Mixer Volume L */
- 0x0000, /* R99 - Input Mixer Volume R */
- 0x0000, /* R100 - Input Mixer Volume */
- 0x0000, /* R101 */
- 0x0000, /* R102 */
- 0x0000, /* R103 */
- 0x00E4, /* R104 - OUT1L Volume */
- 0x00E4, /* R105 - OUT1R Volume */
- 0x00E4, /* R106 - OUT2L Volume */
- 0x02E4, /* R107 - OUT2R Volume */
- 0x0000, /* R108 */
- 0x0000, /* R109 */
- 0x0000, /* R110 */
- 0x0000, /* R111 - BEEP Volume */
- 0x0A00, /* R112 - AI Formating */
- 0x0000, /* R113 - ADC DAC COMP */
- 0x0020, /* R114 - AI ADC Control */
- 0x0020, /* R115 - AI DAC Control */
- 0x0000, /* R116 */
- 0x0000, /* R117 */
- 0x0000, /* R118 */
- 0x0000, /* R119 */
- 0x0000, /* R120 */
- 0x0000, /* R121 */
- 0x0000, /* R122 */
- 0x0000, /* R123 */
- 0x0000, /* R124 */
- 0x0000, /* R125 */
- 0x0000, /* R126 */
- 0x0000, /* R127 */
- 0x1FFF, /* R128 - GPIO Debounce */
- 0x0000, /* R129 - GPIO Pin pull up Control */
- 0x0000, /* R130 - GPIO Pull down Control */
- 0x0000, /* R131 - GPIO Interrupt Mode */
- 0x0000, /* R132 */
- 0x0000, /* R133 - GPIO Control */
- 0x0CFB, /* R134 - GPIO Configuration (i/o) */
- 0x0C1F, /* R135 - GPIO Pin Polarity / Type */
- 0x0000, /* R136 */
- 0x0000, /* R137 */
- 0x0000, /* R138 */
- 0x0000, /* R139 */
- 0x0300, /* R140 - GPIO Function Select 1 */
- 0x1110, /* R141 - GPIO Function Select 2 */
- 0x0013, /* R142 - GPIO Function Select 3 */
- 0x0003, /* R143 - GPIO Function Select 4 */
- 0x0000, /* R144 - Digitiser Control (1) */
- 0x0002, /* R145 - Digitiser Control (2) */
- 0x0000, /* R146 */
- 0x0000, /* R147 */
- 0x0000, /* R148 */
- 0x0000, /* R149 */
- 0x0000, /* R150 */
- 0x0000, /* R151 */
- 0x7000, /* R152 - AUX1 Readback */
- 0x7000, /* R153 - AUX2 Readback */
- 0x7000, /* R154 - AUX3 Readback */
- 0x7000, /* R155 - AUX4 Readback */
- 0x0000, /* R156 - USB Voltage Readback */
- 0x0000, /* R157 - LINE Voltage Readback */
- 0x0000, /* R158 - BATT Voltage Readback */
- 0x0000, /* R159 - Chip Temp Readback */
- 0x0000, /* R160 */
- 0x0000, /* R161 */
- 0x0000, /* R162 */
- 0x0000, /* R163 - Generic Comparator Control */
- 0x0000, /* R164 - Generic comparator 1 */
- 0x0000, /* R165 - Generic comparator 2 */
- 0x0000, /* R166 - Generic comparator 3 */
- 0x0000, /* R167 - Generic comparator 4 */
- 0xA00F, /* R168 - Battery Charger Control 1 */
- 0x0B06, /* R169 - Battery Charger Control 2 */
- 0x0000, /* R170 - Battery Charger Control 3 */
- 0x0000, /* R171 */
- 0x0000, /* R172 - Current Sink Driver A */
- 0x0000, /* R173 - CSA Flash control */
- 0x0000, /* R174 */
- 0x0000, /* R175 */
- 0x0000, /* R176 - DCDC/LDO requested */
- 0x032D, /* R177 - DCDC Active options */
- 0x0000, /* R178 - DCDC Sleep options */
- 0x0025, /* R179 - Power-check comparator */
- 0x000E, /* R180 - DCDC1 Control */
- 0x0C00, /* R181 - DCDC1 Timeouts */
- 0x1006, /* R182 - DCDC1 Low Power */
- 0x0018, /* R183 - DCDC2 Control */
- 0x0000, /* R184 - DCDC2 Timeouts */
- 0x0000, /* R185 */
- 0x0026, /* R186 - DCDC3 Control */
- 0x0400, /* R187 - DCDC3 Timeouts */
- 0x0006, /* R188 - DCDC3 Low Power */
- 0x0062, /* R189 - DCDC4 Control */
- 0x0800, /* R190 - DCDC4 Timeouts */
- 0x0006, /* R191 - DCDC4 Low Power */
- 0x0008, /* R192 */
- 0x0000, /* R193 */
- 0x0000, /* R194 */
- 0x000A, /* R195 */
- 0x1000, /* R196 */
- 0x0006, /* R197 */
- 0x0000, /* R198 */
- 0x0003, /* R199 - Limit Switch Control */
- 0x0006, /* R200 - LDO1 Control */
- 0x0000, /* R201 - LDO1 Timeouts */
- 0x001C, /* R202 - LDO1 Low Power */
- 0x0010, /* R203 - LDO2 Control */
- 0x0C00, /* R204 - LDO2 Timeouts */
- 0x001C, /* R205 - LDO2 Low Power */
- 0x001F, /* R206 - LDO3 Control */
- 0x0800, /* R207 - LDO3 Timeouts */
- 0x001C, /* R208 - LDO3 Low Power */
- 0x000A, /* R209 - LDO4 Control */
- 0x0800, /* R210 - LDO4 Timeouts */
- 0x001C, /* R211 - LDO4 Low Power */
- 0x0000, /* R212 */
- 0x0000, /* R213 */
- 0x0000, /* R214 */
- 0x0000, /* R215 - VCC_FAULT Masks */
- 0x001F, /* R216 - Main Bandgap Control */
- 0x0000, /* R217 - OSC Control */
- 0x9000, /* R218 - RTC Tick Control */
- 0x0000, /* R219 - Security1 */
- 0x4000, /* R220 */
- 0x0000, /* R221 */
- 0x0000, /* R222 */
- 0x0000, /* R223 */
- 0x0000, /* R224 - Signal overrides */
- 0x0000, /* R225 - DCDC/LDO status */
- 0x0000, /* R226 - Charger Overides/status */
- 0x0000, /* R227 - misc overrides */
- 0x0000, /* R228 - Supply overrides/status 1 */
- 0x0000, /* R229 - Supply overrides/status 2 */
- 0xE000, /* R230 - GPIO Pin Status */
- 0x0000, /* R231 - comparotor overrides */
- 0x0000, /* R232 */
- 0x0000, /* R233 - State Machine status */
- 0x1200, /* R234 - FLL Test 1 */
- 0x0000, /* R235 */
- 0x8000, /* R236 */
- 0x0000, /* R237 */
- 0x0000, /* R238 */
- 0x0000, /* R239 */
- 0x0003, /* R240 */
- 0x0000, /* R241 */
- 0x0000, /* R242 */
- 0x0004, /* R243 */
- 0x0300, /* R244 */
- 0x0000, /* R245 */
- 0x0200, /* R246 */
- 0x1000, /* R247 */
- 0x1000, /* R248 - DCDC1 Test Controls */
- 0x1000, /* R249 */
- 0x1000, /* R250 - DCDC3 Test Controls */
- 0x1000, /* R251 - DCDC4 Test Controls */
-};
-#endif
-
-#ifdef CONFIG_MFD_WM8351_CONFIG_MODE_2
-
-#undef WM8350_HAVE_CONFIG_MODE
-#define WM8350_HAVE_CONFIG_MODE
-
-const u16 wm8351_mode2_defaults[] = {
- 0x6143, /* R0 - Reset/ID */
- 0x0000, /* R1 - ID */
- 0x0001, /* R2 - Revision */
- 0x1C02, /* R3 - System Control 1 */
- 0x0214, /* R4 - System Control 2 */
- 0x0000, /* R5 - System Hibernate */
- 0x8A00, /* R6 - Interface Control */
- 0x0000, /* R7 */
- 0x8000, /* R8 - Power mgmt (1) */
- 0x0000, /* R9 - Power mgmt (2) */
- 0x0000, /* R10 - Power mgmt (3) */
- 0x2000, /* R11 - Power mgmt (4) */
- 0x0E00, /* R12 - Power mgmt (5) */
- 0x0000, /* R13 - Power mgmt (6) */
- 0x0000, /* R14 - Power mgmt (7) */
- 0x0000, /* R15 */
- 0x0000, /* R16 - RTC Seconds/Minutes */
- 0x0100, /* R17 - RTC Hours/Day */
- 0x0101, /* R18 - RTC Date/Month */
- 0x1400, /* R19 - RTC Year */
- 0x0000, /* R20 - Alarm Seconds/Minutes */
- 0x0000, /* R21 - Alarm Hours/Day */
- 0x0000, /* R22 - Alarm Date/Month */
- 0x0320, /* R23 - RTC Time Control */
- 0x0000, /* R24 - System Interrupts */
- 0x0000, /* R25 - Interrupt Status 1 */
- 0x0000, /* R26 - Interrupt Status 2 */
- 0x0000, /* R27 */
- 0x0000, /* R28 - Under Voltage Interrupt status */
- 0x0000, /* R29 - Over Current Interrupt status */
- 0x0000, /* R30 - GPIO Interrupt Status */
- 0x0000, /* R31 - Comparator Interrupt Status */
- 0x3FFF, /* R32 - System Interrupts Mask */
- 0x0000, /* R33 - Interrupt Status 1 Mask */
- 0x0000, /* R34 - Interrupt Status 2 Mask */
- 0x0000, /* R35 */
- 0x0000, /* R36 - Under Voltage Interrupt status Mask */
- 0x0000, /* R37 - Over Current Interrupt status Mask */
- 0x0000, /* R38 - GPIO Interrupt Status Mask */
- 0x0000, /* R39 - Comparator Interrupt Status Mask */
- 0x0040, /* R40 - Clock Control 1 */
- 0x0000, /* R41 - Clock Control 2 */
- 0x3A00, /* R42 - FLL Control 1 */
- 0x7086, /* R43 - FLL Control 2 */
- 0xC226, /* R44 - FLL Control 3 */
- 0x0000, /* R45 - FLL Control 4 */
- 0x0000, /* R46 */
- 0x0000, /* R47 */
- 0x0000, /* R48 - DAC Control */
- 0x0000, /* R49 */
- 0x00C0, /* R50 - DAC Digital Volume L */
- 0x00C0, /* R51 - DAC Digital Volume R */
- 0x0000, /* R52 */
- 0x0040, /* R53 - DAC LR Rate */
- 0x0000, /* R54 - DAC Clock Control */
- 0x0000, /* R55 */
- 0x0000, /* R56 */
- 0x0000, /* R57 */
- 0x4000, /* R58 - DAC Mute */
- 0x0000, /* R59 - DAC Mute Volume */
- 0x0000, /* R60 - DAC Side */
- 0x0000, /* R61 */
- 0x0000, /* R62 */
- 0x0000, /* R63 */
- 0x8000, /* R64 - ADC Control */
- 0x0000, /* R65 */
- 0x00C0, /* R66 - ADC Digital Volume L */
- 0x00C0, /* R67 - ADC Digital Volume R */
- 0x0000, /* R68 - ADC Divider */
- 0x0000, /* R69 */
- 0x0040, /* R70 - ADC LR Rate */
- 0x0000, /* R71 */
- 0x0303, /* R72 - Input Control */
- 0x0000, /* R73 - IN3 Input Control */
- 0x0000, /* R74 - Mic Bias Control */
- 0x0000, /* R75 */
- 0x0000, /* R76 - Output Control */
- 0x0000, /* R77 - Jack Detect */
- 0x0000, /* R78 - Anti Pop Control */
- 0x0000, /* R79 */
- 0x0040, /* R80 - Left Input Volume */
- 0x0040, /* R81 - Right Input Volume */
- 0x0000, /* R82 */
- 0x0000, /* R83 */
- 0x0000, /* R84 */
- 0x0000, /* R85 */
- 0x0000, /* R86 */
- 0x0000, /* R87 */
- 0x0800, /* R88 - Left Mixer Control */
- 0x1000, /* R89 - Right Mixer Control */
- 0x0000, /* R90 */
- 0x0000, /* R91 */
- 0x0000, /* R92 - OUT3 Mixer Control */
- 0x0000, /* R93 - OUT4 Mixer Control */
- 0x0000, /* R94 */
- 0x0000, /* R95 */
- 0x0000, /* R96 - Output Left Mixer Volume */
- 0x0000, /* R97 - Output Right Mixer Volume */
- 0x0000, /* R98 - Input Mixer Volume L */
- 0x0000, /* R99 - Input Mixer Volume R */
- 0x0000, /* R100 - Input Mixer Volume */
- 0x0000, /* R101 */
- 0x0000, /* R102 */
- 0x0000, /* R103 */
- 0x00E4, /* R104 - OUT1L Volume */
- 0x00E4, /* R105 - OUT1R Volume */
- 0x00E4, /* R106 - OUT2L Volume */
- 0x02E4, /* R107 - OUT2R Volume */
- 0x0000, /* R108 */
- 0x0000, /* R109 */
- 0x0000, /* R110 */
- 0x0000, /* R111 - BEEP Volume */
- 0x0A00, /* R112 - AI Formating */
- 0x0000, /* R113 - ADC DAC COMP */
- 0x0020, /* R114 - AI ADC Control */
- 0x0020, /* R115 - AI DAC Control */
- 0x0000, /* R116 */
- 0x0000, /* R117 */
- 0x0000, /* R118 */
- 0x0000, /* R119 */
- 0x0000, /* R120 */
- 0x0000, /* R121 */
- 0x0000, /* R122 */
- 0x0000, /* R123 */
- 0x0000, /* R124 */
- 0x0000, /* R125 */
- 0x0000, /* R126 */
- 0x0000, /* R127 */
- 0x1FFF, /* R128 - GPIO Debounce */
- 0x0000, /* R129 - GPIO Pin pull up Control */
- 0x0110, /* R130 - GPIO Pull down Control */
- 0x0000, /* R131 - GPIO Interrupt Mode */
- 0x0000, /* R132 */
- 0x0000, /* R133 - GPIO Control */
- 0x09FA, /* R134 - GPIO Configuration (i/o) */
- 0x0DF6, /* R135 - GPIO Pin Polarity / Type */
- 0x0000, /* R136 */
- 0x0000, /* R137 */
- 0x0000, /* R138 */
- 0x0000, /* R139 */
- 0x1310, /* R140 - GPIO Function Select 1 */
- 0x0003, /* R141 - GPIO Function Select 2 */
- 0x2000, /* R142 - GPIO Function Select 3 */
- 0x0000, /* R143 - GPIO Function Select 4 */
- 0x0000, /* R144 - Digitiser Control (1) */
- 0x0002, /* R145 - Digitiser Control (2) */
- 0x0000, /* R146 */
- 0x0000, /* R147 */
- 0x0000, /* R148 */
- 0x0000, /* R149 */
- 0x0000, /* R150 */
- 0x0000, /* R151 */
- 0x7000, /* R152 - AUX1 Readback */
- 0x7000, /* R153 - AUX2 Readback */
- 0x7000, /* R154 - AUX3 Readback */
- 0x7000, /* R155 - AUX4 Readback */
- 0x0000, /* R156 - USB Voltage Readback */
- 0x0000, /* R157 - LINE Voltage Readback */
- 0x0000, /* R158 - BATT Voltage Readback */
- 0x0000, /* R159 - Chip Temp Readback */
- 0x0000, /* R160 */
- 0x0000, /* R161 */
- 0x0000, /* R162 */
- 0x0000, /* R163 - Generic Comparator Control */
- 0x0000, /* R164 - Generic comparator 1 */
- 0x0000, /* R165 - Generic comparator 2 */
- 0x0000, /* R166 - Generic comparator 3 */
- 0x0000, /* R167 - Generic comparator 4 */
- 0xA00F, /* R168 - Battery Charger Control 1 */
- 0x0B06, /* R169 - Battery Charger Control 2 */
- 0x0000, /* R170 - Battery Charger Control 3 */
- 0x0000, /* R171 */
- 0x0000, /* R172 - Current Sink Driver A */
- 0x0000, /* R173 - CSA Flash control */
- 0x0000, /* R174 */
- 0x0000, /* R175 */
- 0x0000, /* R176 - DCDC/LDO requested */
- 0x032D, /* R177 - DCDC Active options */
- 0x0000, /* R178 - DCDC Sleep options */
- 0x0025, /* R179 - Power-check comparator */
- 0x001A, /* R180 - DCDC1 Control */
- 0x0800, /* R181 - DCDC1 Timeouts */
- 0x1006, /* R182 - DCDC1 Low Power */
- 0x0018, /* R183 - DCDC2 Control */
- 0x0000, /* R184 - DCDC2 Timeouts */
- 0x0000, /* R185 */
- 0x0056, /* R186 - DCDC3 Control */
- 0x0400, /* R187 - DCDC3 Timeouts */
- 0x0006, /* R188 - DCDC3 Low Power */
- 0x0026, /* R189 - DCDC4 Control */
- 0x0C00, /* R190 - DCDC4 Timeouts */
- 0x0006, /* R191 - DCDC4 Low Power */
- 0x0008, /* R192 */
- 0x0000, /* R193 */
- 0x0000, /* R194 */
- 0x0026, /* R195 */
- 0x0C00, /* R196 */
- 0x0006, /* R197 */
- 0x0000, /* R198 */
- 0x0003, /* R199 - Limit Switch Control */
- 0x001C, /* R200 - LDO1 Control */
- 0x0400, /* R201 - LDO1 Timeouts */
- 0x001C, /* R202 - LDO1 Low Power */
- 0x0010, /* R203 - LDO2 Control */
- 0x0C00, /* R204 - LDO2 Timeouts */
- 0x001C, /* R205 - LDO2 Low Power */
- 0x0015, /* R206 - LDO3 Control */
- 0x0000, /* R207 - LDO3 Timeouts */
- 0x001C, /* R208 - LDO3 Low Power */
- 0x001A, /* R209 - LDO4 Control */
- 0x0000, /* R210 - LDO4 Timeouts */
- 0x001C, /* R211 - LDO4 Low Power */
- 0x0000, /* R212 */
- 0x0000, /* R213 */
- 0x0000, /* R214 */
- 0x0000, /* R215 - VCC_FAULT Masks */
- 0x001F, /* R216 - Main Bandgap Control */
- 0x0000, /* R217 - OSC Control */
- 0x9000, /* R218 - RTC Tick Control */
- 0x0000, /* R219 - Security1 */
- 0x4000, /* R220 */
- 0x0000, /* R221 */
- 0x0000, /* R222 */
- 0x0000, /* R223 */
- 0x0000, /* R224 - Signal overrides */
- 0x0000, /* R225 - DCDC/LDO status */
- 0x0000, /* R226 - Charger Overides/status */
- 0x0000, /* R227 - misc overrides */
- 0x0000, /* R228 - Supply overrides/status 1 */
- 0x0000, /* R229 - Supply overrides/status 2 */
- 0xE000, /* R230 - GPIO Pin Status */
- 0x0000, /* R231 - comparotor overrides */
- 0x0000, /* R232 */
- 0x0000, /* R233 - State Machine status */
- 0x1200, /* R234 - FLL Test 1 */
- 0x0000, /* R235 */
- 0x8000, /* R236 */
- 0x0000, /* R237 */
- 0x0000, /* R238 */
- 0x0000, /* R239 */
- 0x0003, /* R240 */
- 0x0000, /* R241 */
- 0x0000, /* R242 */
- 0x0004, /* R243 */
- 0x0300, /* R244 */
- 0x0000, /* R245 */
- 0x0200, /* R246 */
- 0x0000, /* R247 */
- 0x1000, /* R248 - DCDC1 Test Controls */
- 0x1000, /* R249 */
- 0x1000, /* R250 - DCDC3 Test Controls */
- 0x1000, /* R251 - DCDC4 Test Controls */
-};
-#endif
-
-#ifdef CONFIG_MFD_WM8351_CONFIG_MODE_3
-
-#undef WM8350_HAVE_CONFIG_MODE
-#define WM8350_HAVE_CONFIG_MODE
-
-const u16 wm8351_mode3_defaults[] = {
- 0x6143, /* R0 - Reset/ID */
- 0x0000, /* R1 - ID */
- 0x0001, /* R2 - Revision */
- 0x1C02, /* R3 - System Control 1 */
- 0x0204, /* R4 - System Control 2 */
- 0x0000, /* R5 - System Hibernate */
- 0x8A00, /* R6 - Interface Control */
- 0x0000, /* R7 */
- 0x8000, /* R8 - Power mgmt (1) */
- 0x0000, /* R9 - Power mgmt (2) */
- 0x0000, /* R10 - Power mgmt (3) */
- 0x2000, /* R11 - Power mgmt (4) */
- 0x0E00, /* R12 - Power mgmt (5) */
- 0x0000, /* R13 - Power mgmt (6) */
- 0x0000, /* R14 - Power mgmt (7) */
- 0x0000, /* R15 */
- 0x0000, /* R16 - RTC Seconds/Minutes */
- 0x0100, /* R17 - RTC Hours/Day */
- 0x0101, /* R18 - RTC Date/Month */
- 0x1400, /* R19 - RTC Year */
- 0x0000, /* R20 - Alarm Seconds/Minutes */
- 0x0000, /* R21 - Alarm Hours/Day */
- 0x0000, /* R22 - Alarm Date/Month */
- 0x0320, /* R23 - RTC Time Control */
- 0x0000, /* R24 - System Interrupts */
- 0x0000, /* R25 - Interrupt Status 1 */
- 0x0000, /* R26 - Interrupt Status 2 */
- 0x0000, /* R27 */
- 0x0000, /* R28 - Under Voltage Interrupt status */
- 0x0000, /* R29 - Over Current Interrupt status */
- 0x0000, /* R30 - GPIO Interrupt Status */
- 0x0000, /* R31 - Comparator Interrupt Status */
- 0x3FFF, /* R32 - System Interrupts Mask */
- 0x0000, /* R33 - Interrupt Status 1 Mask */
- 0x0000, /* R34 - Interrupt Status 2 Mask */
- 0x0000, /* R35 */
- 0x0000, /* R36 - Under Voltage Interrupt status Mask */
- 0x0000, /* R37 - Over Current Interrupt status Mask */
- 0x0000, /* R38 - GPIO Interrupt Status Mask */
- 0x0000, /* R39 - Comparator Interrupt Status Mask */
- 0x0040, /* R40 - Clock Control 1 */
- 0x0000, /* R41 - Clock Control 2 */
- 0x3A00, /* R42 - FLL Control 1 */
- 0x7086, /* R43 - FLL Control 2 */
- 0xC226, /* R44 - FLL Control 3 */
- 0x0000, /* R45 - FLL Control 4 */
- 0x0000, /* R46 */
- 0x0000, /* R47 */
- 0x0000, /* R48 - DAC Control */
- 0x0000, /* R49 */
- 0x00C0, /* R50 - DAC Digital Volume L */
- 0x00C0, /* R51 - DAC Digital Volume R */
- 0x0000, /* R52 */
- 0x0040, /* R53 - DAC LR Rate */
- 0x0000, /* R54 - DAC Clock Control */
- 0x0000, /* R55 */
- 0x0000, /* R56 */
- 0x0000, /* R57 */
- 0x4000, /* R58 - DAC Mute */
- 0x0000, /* R59 - DAC Mute Volume */
- 0x0000, /* R60 - DAC Side */
- 0x0000, /* R61 */
- 0x0000, /* R62 */
- 0x0000, /* R63 */
- 0x8000, /* R64 - ADC Control */
- 0x0000, /* R65 */
- 0x00C0, /* R66 - ADC Digital Volume L */
- 0x00C0, /* R67 - ADC Digital Volume R */
- 0x0000, /* R68 - ADC Divider */
- 0x0000, /* R69 */
- 0x0040, /* R70 - ADC LR Rate */
- 0x0000, /* R71 */
- 0x0303, /* R72 - Input Control */
- 0x0000, /* R73 - IN3 Input Control */
- 0x0000, /* R74 - Mic Bias Control */
- 0x0000, /* R75 */
- 0x0000, /* R76 - Output Control */
- 0x0000, /* R77 - Jack Detect */
- 0x0000, /* R78 - Anti Pop Control */
- 0x0000, /* R79 */
- 0x0040, /* R80 - Left Input Volume */
- 0x0040, /* R81 - Right Input Volume */
- 0x0000, /* R82 */
- 0x0000, /* R83 */
- 0x0000, /* R84 */
- 0x0000, /* R85 */
- 0x0000, /* R86 */
- 0x0000, /* R87 */
- 0x0800, /* R88 - Left Mixer Control */
- 0x1000, /* R89 - Right Mixer Control */
- 0x0000, /* R90 */
- 0x0000, /* R91 */
- 0x0000, /* R92 - OUT3 Mixer Control */
- 0x0000, /* R93 - OUT4 Mixer Control */
- 0x0000, /* R94 */
- 0x0000, /* R95 */
- 0x0000, /* R96 - Output Left Mixer Volume */
- 0x0000, /* R97 - Output Right Mixer Volume */
- 0x0000, /* R98 - Input Mixer Volume L */
- 0x0000, /* R99 - Input Mixer Volume R */
- 0x0000, /* R100 - Input Mixer Volume */
- 0x0000, /* R101 */
- 0x0000, /* R102 */
- 0x0000, /* R103 */
- 0x00E4, /* R104 - OUT1L Volume */
- 0x00E4, /* R105 - OUT1R Volume */
- 0x00E4, /* R106 - OUT2L Volume */
- 0x02E4, /* R107 - OUT2R Volume */
- 0x0000, /* R108 */
- 0x0000, /* R109 */
- 0x0000, /* R110 */
- 0x0000, /* R111 - BEEP Volume */
- 0x0A00, /* R112 - AI Formating */
- 0x0000, /* R113 - ADC DAC COMP */
- 0x0020, /* R114 - AI ADC Control */
- 0x0020, /* R115 - AI DAC Control */
- 0x0000, /* R116 */
- 0x0000, /* R117 */
- 0x0000, /* R118 */
- 0x0000, /* R119 */
- 0x0000, /* R120 */
- 0x0000, /* R121 */
- 0x0000, /* R122 */
- 0x0000, /* R123 */
- 0x0000, /* R124 */
- 0x0000, /* R125 */
- 0x0000, /* R126 */
- 0x0000, /* R127 */
- 0x1FFF, /* R128 - GPIO Debounce */
- 0x0010, /* R129 - GPIO Pin pull up Control */
- 0x0000, /* R130 - GPIO Pull down Control */
- 0x0000, /* R131 - GPIO Interrupt Mode */
- 0x0000, /* R132 */
- 0x0000, /* R133 - GPIO Control */
- 0x0BFB, /* R134 - GPIO Configuration (i/o) */
- 0x0FFD, /* R135 - GPIO Pin Polarity / Type */
- 0x0000, /* R136 */
- 0x0000, /* R137 */
- 0x0000, /* R138 */
- 0x0000, /* R139 */
- 0x0310, /* R140 - GPIO Function Select 1 */
- 0x0001, /* R141 - GPIO Function Select 2 */
- 0x2300, /* R142 - GPIO Function Select 3 */
- 0x0003, /* R143 - GPIO Function Select 4 */
- 0x0000, /* R144 - Digitiser Control (1) */
- 0x0002, /* R145 - Digitiser Control (2) */
- 0x0000, /* R146 */
- 0x0000, /* R147 */
- 0x0000, /* R148 */
- 0x0000, /* R149 */
- 0x0000, /* R150 */
- 0x0000, /* R151 */
- 0x7000, /* R152 - AUX1 Readback */
- 0x7000, /* R153 - AUX2 Readback */
- 0x7000, /* R154 - AUX3 Readback */
- 0x7000, /* R155 - AUX4 Readback */
- 0x0000, /* R156 - USB Voltage Readback */
- 0x0000, /* R157 - LINE Voltage Readback */
- 0x0000, /* R158 - BATT Voltage Readback */
- 0x0000, /* R159 - Chip Temp Readback */
- 0x0000, /* R160 */
- 0x0000, /* R161 */
- 0x0000, /* R162 */
- 0x0000, /* R163 - Generic Comparator Control */
- 0x0000, /* R164 - Generic comparator 1 */
- 0x0000, /* R165 - Generic comparator 2 */
- 0x0000, /* R166 - Generic comparator 3 */
- 0x0000, /* R167 - Generic comparator 4 */
- 0xA00F, /* R168 - Battery Charger Control 1 */
- 0x0B06, /* R169 - Battery Charger Control 2 */
- 0x0000, /* R170 - Battery Charger Control 3 */
- 0x0000, /* R171 */
- 0x0000, /* R172 - Current Sink Driver A */
- 0x0000, /* R173 - CSA Flash control */
- 0x0000, /* R174 */
- 0x0000, /* R175 */
- 0x0000, /* R176 - DCDC/LDO requested */
- 0x032D, /* R177 - DCDC Active options */
- 0x0000, /* R178 - DCDC Sleep options */
- 0x0025, /* R179 - Power-check comparator */
- 0x000E, /* R180 - DCDC1 Control */
- 0x0400, /* R181 - DCDC1 Timeouts */
- 0x1006, /* R182 - DCDC1 Low Power */
- 0x0018, /* R183 - DCDC2 Control */
- 0x0000, /* R184 - DCDC2 Timeouts */
- 0x0000, /* R185 */
- 0x0026, /* R186 - DCDC3 Control */
- 0x0800, /* R187 - DCDC3 Timeouts */
- 0x0006, /* R188 - DCDC3 Low Power */
- 0x0062, /* R189 - DCDC4 Control */
- 0x1400, /* R190 - DCDC4 Timeouts */
- 0x0006, /* R191 - DCDC4 Low Power */
- 0x0008, /* R192 */
- 0x0000, /* R193 */
- 0x0000, /* R194 */
- 0x0026, /* R195 */
- 0x0400, /* R196 */
- 0x0006, /* R197 */
- 0x0000, /* R198 */
- 0x0003, /* R199 - Limit Switch Control */
- 0x0006, /* R200 - LDO1 Control */
- 0x0C00, /* R201 - LDO1 Timeouts */
- 0x001C, /* R202 - LDO1 Low Power */
- 0x0016, /* R203 - LDO2 Control */
- 0x0000, /* R204 - LDO2 Timeouts */
- 0x001C, /* R205 - LDO2 Low Power */
- 0x0019, /* R206 - LDO3 Control */
- 0x0000, /* R207 - LDO3 Timeouts */
- 0x001C, /* R208 - LDO3 Low Power */
- 0x001A, /* R209 - LDO4 Control */
- 0x1000, /* R210 - LDO4 Timeouts */
- 0x001C, /* R211 - LDO4 Low Power */
- 0x0000, /* R212 */
- 0x0000, /* R213 */
- 0x0000, /* R214 */
- 0x0000, /* R215 - VCC_FAULT Masks */
- 0x001F, /* R216 - Main Bandgap Control */
- 0x0000, /* R217 - OSC Control */
- 0x9000, /* R218 - RTC Tick Control */
- 0x0000, /* R219 - Security1 */
- 0x4000, /* R220 */
- 0x0000, /* R221 */
- 0x0000, /* R222 */
- 0x0000, /* R223 */
- 0x0000, /* R224 - Signal overrides */
- 0x0000, /* R225 - DCDC/LDO status */
- 0x0000, /* R226 - Charger Overides/status */
- 0x0000, /* R227 - misc overrides */
- 0x0000, /* R228 - Supply overrides/status 1 */
- 0x0000, /* R229 - Supply overrides/status 2 */
- 0xE000, /* R230 - GPIO Pin Status */
- 0x0000, /* R231 - comparotor overrides */
- 0x0000, /* R232 */
- 0x0000, /* R233 - State Machine status */
- 0x1200, /* R234 - FLL Test 1 */
- 0x0000, /* R235 */
- 0x8000, /* R236 */
- 0x0000, /* R237 */
- 0x0000, /* R238 */
- 0x0000, /* R239 */
- 0x0003, /* R240 */
- 0x0000, /* R241 */
- 0x0000, /* R242 */
- 0x0004, /* R243 */
- 0x0300, /* R244 */
- 0x0000, /* R245 */
- 0x0200, /* R246 */
- 0x0000, /* R247 */
- 0x1000, /* R248 - DCDC1 Test Controls */
- 0x1000, /* R249 */
- 0x1000, /* R250 - DCDC3 Test Controls */
- 0x1000, /* R251 - DCDC4 Test Controls */
-};
-#endif
-
-#ifdef CONFIG_MFD_WM8352_CONFIG_MODE_0
-
-#undef WM8350_HAVE_CONFIG_MODE
-#define WM8350_HAVE_CONFIG_MODE
-
-const u16 wm8352_mode0_defaults[] = {
- 0x6143, /* R0 - Reset/ID */
- 0x0000, /* R1 - ID */
- 0x0002, /* R2 - Revision */
- 0x1C02, /* R3 - System Control 1 */
- 0x0004, /* R4 - System Control 2 */
- 0x0000, /* R5 - System Hibernate */
- 0x8A00, /* R6 - Interface Control */
- 0x0000, /* R7 */
- 0x8000, /* R8 - Power mgmt (1) */
- 0x0000, /* R9 - Power mgmt (2) */
- 0x0000, /* R10 - Power mgmt (3) */
- 0x2000, /* R11 - Power mgmt (4) */
- 0x0E00, /* R12 - Power mgmt (5) */
- 0x0000, /* R13 - Power mgmt (6) */
- 0x0000, /* R14 - Power mgmt (7) */
- 0x0000, /* R15 */
- 0x0000, /* R16 - RTC Seconds/Minutes */
- 0x0100, /* R17 - RTC Hours/Day */
- 0x0101, /* R18 - RTC Date/Month */
- 0x1400, /* R19 - RTC Year */
- 0x0000, /* R20 - Alarm Seconds/Minutes */
- 0x0000, /* R21 - Alarm Hours/Day */
- 0x0000, /* R22 - Alarm Date/Month */
- 0x0320, /* R23 - RTC Time Control */
- 0x0000, /* R24 - System Interrupts */
- 0x0000, /* R25 - Interrupt Status 1 */
- 0x0000, /* R26 - Interrupt Status 2 */
- 0x0000, /* R27 */
- 0x0000, /* R28 - Under Voltage Interrupt status */
- 0x0000, /* R29 - Over Current Interrupt status */
- 0x0000, /* R30 - GPIO Interrupt Status */
- 0x0000, /* R31 - Comparator Interrupt Status */
- 0x3FFF, /* R32 - System Interrupts Mask */
- 0x0000, /* R33 - Interrupt Status 1 Mask */
- 0x0000, /* R34 - Interrupt Status 2 Mask */
- 0x0000, /* R35 */
- 0x0000, /* R36 - Under Voltage Interrupt status Mask */
- 0x0000, /* R37 - Over Current Interrupt status Mask */
- 0x0000, /* R38 - GPIO Interrupt Status Mask */
- 0x0000, /* R39 - Comparator Interrupt Status Mask */
- 0x0040, /* R40 - Clock Control 1 */
- 0x0000, /* R41 - Clock Control 2 */
- 0x3A00, /* R42 - FLL Control 1 */
- 0x7086, /* R43 - FLL Control 2 */
- 0xC226, /* R44 - FLL Control 3 */
- 0x0000, /* R45 - FLL Control 4 */
- 0x0000, /* R46 */
- 0x0000, /* R47 */
- 0x0000, /* R48 - DAC Control */
- 0x0000, /* R49 */
- 0x00C0, /* R50 - DAC Digital Volume L */
- 0x00C0, /* R51 - DAC Digital Volume R */
- 0x0000, /* R52 */
- 0x0040, /* R53 - DAC LR Rate */
- 0x0000, /* R54 - DAC Clock Control */
- 0x0000, /* R55 */
- 0x0000, /* R56 */
- 0x0000, /* R57 */
- 0x4000, /* R58 - DAC Mute */
- 0x0000, /* R59 - DAC Mute Volume */
- 0x0000, /* R60 - DAC Side */
- 0x0000, /* R61 */
- 0x0000, /* R62 */
- 0x0000, /* R63 */
- 0x8000, /* R64 - ADC Control */
- 0x0000, /* R65 */
- 0x00C0, /* R66 - ADC Digital Volume L */
- 0x00C0, /* R67 - ADC Digital Volume R */
- 0x0000, /* R68 - ADC Divider */
- 0x0000, /* R69 */
- 0x0040, /* R70 - ADC LR Rate */
- 0x0000, /* R71 */
- 0x0303, /* R72 - Input Control */
- 0x0000, /* R73 - IN3 Input Control */
- 0x0000, /* R74 - Mic Bias Control */
- 0x0000, /* R75 */
- 0x0000, /* R76 - Output Control */
- 0x0000, /* R77 - Jack Detect */
- 0x0000, /* R78 - Anti Pop Control */
- 0x0000, /* R79 */
- 0x0040, /* R80 - Left Input Volume */
- 0x0040, /* R81 - Right Input Volume */
- 0x0000, /* R82 */
- 0x0000, /* R83 */
- 0x0000, /* R84 */
- 0x0000, /* R85 */
- 0x0000, /* R86 */
- 0x0000, /* R87 */
- 0x0800, /* R88 - Left Mixer Control */
- 0x1000, /* R89 - Right Mixer Control */
- 0x0000, /* R90 */
- 0x0000, /* R91 */
- 0x0000, /* R92 - OUT3 Mixer Control */
- 0x0000, /* R93 - OUT4 Mixer Control */
- 0x0000, /* R94 */
- 0x0000, /* R95 */
- 0x0000, /* R96 - Output Left Mixer Volume */
- 0x0000, /* R97 - Output Right Mixer Volume */
- 0x0000, /* R98 - Input Mixer Volume L */
- 0x0000, /* R99 - Input Mixer Volume R */
- 0x0000, /* R100 - Input Mixer Volume */
- 0x0000, /* R101 */
- 0x0000, /* R102 */
- 0x0000, /* R103 */
- 0x00E4, /* R104 - OUT1L Volume */
- 0x00E4, /* R105 - OUT1R Volume */
- 0x00E4, /* R106 - OUT2L Volume */
- 0x02E4, /* R107 - OUT2R Volume */
- 0x0000, /* R108 */
- 0x0000, /* R109 */
- 0x0000, /* R110 */
- 0x0000, /* R111 - BEEP Volume */
- 0x0A00, /* R112 - AI Formating */
- 0x0000, /* R113 - ADC DAC COMP */
- 0x0020, /* R114 - AI ADC Control */
- 0x0020, /* R115 - AI DAC Control */
- 0x0000, /* R116 */
- 0x0000, /* R117 */
- 0x0000, /* R118 */
- 0x0000, /* R119 */
- 0x0000, /* R120 */
- 0x0000, /* R121 */
- 0x0000, /* R122 */
- 0x0000, /* R123 */
- 0x0000, /* R124 */
- 0x0000, /* R125 */
- 0x0000, /* R126 */
- 0x0000, /* R127 */
- 0x1FFF, /* R128 - GPIO Debounce */
- 0x0000, /* R129 - GPIO Pin pull up Control */
- 0x0000, /* R130 - GPIO Pull down Control */
- 0x0000, /* R131 - GPIO Interrupt Mode */
- 0x0000, /* R132 */
- 0x0000, /* R133 - GPIO Control */
- 0x0FFC, /* R134 - GPIO Configuration (i/o) */
- 0x0FFC, /* R135 - GPIO Pin Polarity / Type */
- 0x0000, /* R136 */
- 0x0000, /* R137 */
- 0x0000, /* R138 */
- 0x0000, /* R139 */
- 0x0013, /* R140 - GPIO Function Select 1 */
- 0x0000, /* R141 - GPIO Function Select 2 */
- 0x0000, /* R142 - GPIO Function Select 3 */
- 0x0003, /* R143 - GPIO Function Select 4 */
- 0x0000, /* R144 - Digitiser Control (1) */
- 0x0002, /* R145 - Digitiser Control (2) */
- 0x0000, /* R146 */
- 0x0000, /* R147 */
- 0x0000, /* R148 */
- 0x0000, /* R149 */
- 0x0000, /* R150 */
- 0x0000, /* R151 */
- 0x7000, /* R152 - AUX1 Readback */
- 0x7000, /* R153 - AUX2 Readback */
- 0x7000, /* R154 - AUX3 Readback */
- 0x7000, /* R155 - AUX4 Readback */
- 0x0000, /* R156 - USB Voltage Readback */
- 0x0000, /* R157 - LINE Voltage Readback */
- 0x0000, /* R158 - BATT Voltage Readback */
- 0x0000, /* R159 - Chip Temp Readback */
- 0x0000, /* R160 */
- 0x0000, /* R161 */
- 0x0000, /* R162 */
- 0x0000, /* R163 - Generic Comparator Control */
- 0x0000, /* R164 - Generic comparator 1 */
- 0x0000, /* R165 - Generic comparator 2 */
- 0x0000, /* R166 - Generic comparator 3 */
- 0x0000, /* R167 - Generic comparator 4 */
- 0xA00F, /* R168 - Battery Charger Control 1 */
- 0x0B06, /* R169 - Battery Charger Control 2 */
- 0x0000, /* R170 - Battery Charger Control 3 */
- 0x0000, /* R171 */
- 0x0000, /* R172 - Current Sink Driver A */
- 0x0000, /* R173 - CSA Flash control */
- 0x0000, /* R174 - Current Sink Driver B */
- 0x0000, /* R175 - CSB Flash control */
- 0x0000, /* R176 - DCDC/LDO requested */
- 0x032D, /* R177 - DCDC Active options */
- 0x0000, /* R178 - DCDC Sleep options */
- 0x0025, /* R179 - Power-check comparator */
- 0x000E, /* R180 - DCDC1 Control */
- 0x0000, /* R181 - DCDC1 Timeouts */
- 0x1006, /* R182 - DCDC1 Low Power */
- 0x0018, /* R183 - DCDC2 Control */
- 0x0000, /* R184 - DCDC2 Timeouts */
- 0x0000, /* R185 */
- 0x0000, /* R186 - DCDC3 Control */
- 0x0000, /* R187 - DCDC3 Timeouts */
- 0x0006, /* R188 - DCDC3 Low Power */
- 0x0000, /* R189 - DCDC4 Control */
- 0x0000, /* R190 - DCDC4 Timeouts */
- 0x0006, /* R191 - DCDC4 Low Power */
- 0x0008, /* R192 - DCDC5 Control */
- 0x0000, /* R193 - DCDC5 Timeouts */
- 0x0000, /* R194 */
- 0x0000, /* R195 - DCDC6 Control */
- 0x0000, /* R196 - DCDC6 Timeouts */
- 0x0006, /* R197 - DCDC6 Low Power */
- 0x0000, /* R198 */
- 0x0003, /* R199 - Limit Switch Control */
- 0x001C, /* R200 - LDO1 Control */
- 0x0000, /* R201 - LDO1 Timeouts */
- 0x001C, /* R202 - LDO1 Low Power */
- 0x001B, /* R203 - LDO2 Control */
- 0x0000, /* R204 - LDO2 Timeouts */
- 0x001C, /* R205 - LDO2 Low Power */
- 0x001B, /* R206 - LDO3 Control */
- 0x0000, /* R207 - LDO3 Timeouts */
- 0x001C, /* R208 - LDO3 Low Power */
- 0x001B, /* R209 - LDO4 Control */
- 0x0000, /* R210 - LDO4 Timeouts */
- 0x001C, /* R211 - LDO4 Low Power */
- 0x0000, /* R212 */
- 0x0000, /* R213 */
- 0x0000, /* R214 */
- 0x0000, /* R215 - VCC_FAULT Masks */
- 0x001F, /* R216 - Main Bandgap Control */
- 0x0000, /* R217 - OSC Control */
- 0x9000, /* R218 - RTC Tick Control */
- 0x0000, /* R219 - Security1 */
- 0x4000, /* R220 */
- 0x0000, /* R221 */
- 0x0000, /* R222 */
- 0x0000, /* R223 */
- 0x0000, /* R224 - Signal overrides */
- 0x0000, /* R225 - DCDC/LDO status */
- 0x0000, /* R226 - Charger Overides/status */
- 0x0000, /* R227 - misc overrides */
- 0x0000, /* R228 - Supply overrides/status 1 */
- 0x0000, /* R229 - Supply overrides/status 2 */
- 0xE000, /* R230 - GPIO Pin Status */
- 0x0000, /* R231 - comparotor overrides */
- 0x0000, /* R232 */
- 0x0000, /* R233 - State Machine status */
- 0x1200, /* R234 */
- 0x0000, /* R235 */
- 0x8000, /* R236 */
- 0x0000, /* R237 */
- 0x0000, /* R238 */
- 0x0000, /* R239 */
- 0x0003, /* R240 */
- 0x0000, /* R241 */
- 0x0000, /* R242 */
- 0x0004, /* R243 */
- 0x0300, /* R244 */
- 0x0000, /* R245 */
- 0x0200, /* R246 */
- 0x0000, /* R247 */
- 0x1000, /* R248 - DCDC1 Test Controls */
- 0x5000, /* R249 */
- 0x1000, /* R250 - DCDC3 Test Controls */
- 0x1000, /* R251 - DCDC4 Test Controls */
- 0x5100, /* R252 */
- 0x1000, /* R253 - DCDC6 Test Controls */
-};
-#endif
-
-#ifdef CONFIG_MFD_WM8352_CONFIG_MODE_1
-
-#undef WM8350_HAVE_CONFIG_MODE
-#define WM8350_HAVE_CONFIG_MODE
-
-const u16 wm8352_mode1_defaults[] = {
- 0x6143, /* R0 - Reset/ID */
- 0x0000, /* R1 - ID */
- 0x0002, /* R2 - Revision */
- 0x1C02, /* R3 - System Control 1 */
- 0x0204, /* R4 - System Control 2 */
- 0x0000, /* R5 - System Hibernate */
- 0x8A00, /* R6 - Interface Control */
- 0x0000, /* R7 */
- 0x8000, /* R8 - Power mgmt (1) */
- 0x0000, /* R9 - Power mgmt (2) */
- 0x0000, /* R10 - Power mgmt (3) */
- 0x2000, /* R11 - Power mgmt (4) */
- 0x0E00, /* R12 - Power mgmt (5) */
- 0x0000, /* R13 - Power mgmt (6) */
- 0x0000, /* R14 - Power mgmt (7) */
- 0x0000, /* R15 */
- 0x0000, /* R16 - RTC Seconds/Minutes */
- 0x0100, /* R17 - RTC Hours/Day */
- 0x0101, /* R18 - RTC Date/Month */
- 0x1400, /* R19 - RTC Year */
- 0x0000, /* R20 - Alarm Seconds/Minutes */
- 0x0000, /* R21 - Alarm Hours/Day */
- 0x0000, /* R22 - Alarm Date/Month */
- 0x0320, /* R23 - RTC Time Control */
- 0x0000, /* R24 - System Interrupts */
- 0x0000, /* R25 - Interrupt Status 1 */
- 0x0000, /* R26 - Interrupt Status 2 */
- 0x0000, /* R27 */
- 0x0000, /* R28 - Under Voltage Interrupt status */
- 0x0000, /* R29 - Over Current Interrupt status */
- 0x0000, /* R30 - GPIO Interrupt Status */
- 0x0000, /* R31 - Comparator Interrupt Status */
- 0x3FFF, /* R32 - System Interrupts Mask */
- 0x0000, /* R33 - Interrupt Status 1 Mask */
- 0x0000, /* R34 - Interrupt Status 2 Mask */
- 0x0000, /* R35 */
- 0x0000, /* R36 - Under Voltage Interrupt status Mask */
- 0x0000, /* R37 - Over Current Interrupt status Mask */
- 0x0000, /* R38 - GPIO Interrupt Status Mask */
- 0x0000, /* R39 - Comparator Interrupt Status Mask */
- 0x0040, /* R40 - Clock Control 1 */
- 0x0000, /* R41 - Clock Control 2 */
- 0x3A00, /* R42 - FLL Control 1 */
- 0x7086, /* R43 - FLL Control 2 */
- 0xC226, /* R44 - FLL Control 3 */
- 0x0000, /* R45 - FLL Control 4 */
- 0x0000, /* R46 */
- 0x0000, /* R47 */
- 0x0000, /* R48 - DAC Control */
- 0x0000, /* R49 */
- 0x00C0, /* R50 - DAC Digital Volume L */
- 0x00C0, /* R51 - DAC Digital Volume R */
- 0x0000, /* R52 */
- 0x0040, /* R53 - DAC LR Rate */
- 0x0000, /* R54 - DAC Clock Control */
- 0x0000, /* R55 */
- 0x0000, /* R56 */
- 0x0000, /* R57 */
- 0x4000, /* R58 - DAC Mute */
- 0x0000, /* R59 - DAC Mute Volume */
- 0x0000, /* R60 - DAC Side */
- 0x0000, /* R61 */
- 0x0000, /* R62 */
- 0x0000, /* R63 */
- 0x8000, /* R64 - ADC Control */
- 0x0000, /* R65 */
- 0x00C0, /* R66 - ADC Digital Volume L */
- 0x00C0, /* R67 - ADC Digital Volume R */
- 0x0000, /* R68 - ADC Divider */
- 0x0000, /* R69 */
- 0x0040, /* R70 - ADC LR Rate */
- 0x0000, /* R71 */
- 0x0303, /* R72 - Input Control */
- 0x0000, /* R73 - IN3 Input Control */
- 0x0000, /* R74 - Mic Bias Control */
- 0x0000, /* R75 */
- 0x0000, /* R76 - Output Control */
- 0x0000, /* R77 - Jack Detect */
- 0x0000, /* R78 - Anti Pop Control */
- 0x0000, /* R79 */
- 0x0040, /* R80 - Left Input Volume */
- 0x0040, /* R81 - Right Input Volume */
- 0x0000, /* R82 */
- 0x0000, /* R83 */
- 0x0000, /* R84 */
- 0x0000, /* R85 */
- 0x0000, /* R86 */
- 0x0000, /* R87 */
- 0x0800, /* R88 - Left Mixer Control */
- 0x1000, /* R89 - Right Mixer Control */
- 0x0000, /* R90 */
- 0x0000, /* R91 */
- 0x0000, /* R92 - OUT3 Mixer Control */
- 0x0000, /* R93 - OUT4 Mixer Control */
- 0x0000, /* R94 */
- 0x0000, /* R95 */
- 0x0000, /* R96 - Output Left Mixer Volume */
- 0x0000, /* R97 - Output Right Mixer Volume */
- 0x0000, /* R98 - Input Mixer Volume L */
- 0x0000, /* R99 - Input Mixer Volume R */
- 0x0000, /* R100 - Input Mixer Volume */
- 0x0000, /* R101 */
- 0x0000, /* R102 */
- 0x0000, /* R103 */
- 0x00E4, /* R104 - OUT1L Volume */
- 0x00E4, /* R105 - OUT1R Volume */
- 0x00E4, /* R106 - OUT2L Volume */
- 0x02E4, /* R107 - OUT2R Volume */
- 0x0000, /* R108 */
- 0x0000, /* R109 */
- 0x0000, /* R110 */
- 0x0000, /* R111 - BEEP Volume */
- 0x0A00, /* R112 - AI Formating */
- 0x0000, /* R113 - ADC DAC COMP */
- 0x0020, /* R114 - AI ADC Control */
- 0x0020, /* R115 - AI DAC Control */
- 0x0000, /* R116 */
- 0x0000, /* R117 */
- 0x0000, /* R118 */
- 0x0000, /* R119 */
- 0x0000, /* R120 */
- 0x0000, /* R121 */
- 0x0000, /* R122 */
- 0x0000, /* R123 */
- 0x0000, /* R124 */
- 0x0000, /* R125 */
- 0x0000, /* R126 */
- 0x0000, /* R127 */
- 0x1FFF, /* R128 - GPIO Debounce */
- 0x0000, /* R129 - GPIO Pin pull up Control */
- 0x0000, /* R130 - GPIO Pull down Control */
- 0x0000, /* R131 - GPIO Interrupt Mode */
- 0x0000, /* R132 */
- 0x0000, /* R133 - GPIO Control */
- 0x0BFB, /* R134 - GPIO Configuration (i/o) */
- 0x0FFF, /* R135 - GPIO Pin Polarity / Type */
- 0x0000, /* R136 */
- 0x0000, /* R137 */
- 0x0000, /* R138 */
- 0x0000, /* R139 */
- 0x0300, /* R140 - GPIO Function Select 1 */
- 0x0000, /* R141 - GPIO Function Select 2 */
- 0x2300, /* R142 - GPIO Function Select 3 */
- 0x0003, /* R143 - GPIO Function Select 4 */
- 0x0000, /* R144 - Digitiser Control (1) */
- 0x0002, /* R145 - Digitiser Control (2) */
- 0x0000, /* R146 */
- 0x0000, /* R147 */
- 0x0000, /* R148 */
- 0x0000, /* R149 */
- 0x0000, /* R150 */
- 0x0000, /* R151 */
- 0x7000, /* R152 - AUX1 Readback */
- 0x7000, /* R153 - AUX2 Readback */
- 0x7000, /* R154 - AUX3 Readback */
- 0x7000, /* R155 - AUX4 Readback */
- 0x0000, /* R156 - USB Voltage Readback */
- 0x0000, /* R157 - LINE Voltage Readback */
- 0x0000, /* R158 - BATT Voltage Readback */
- 0x0000, /* R159 - Chip Temp Readback */
- 0x0000, /* R160 */
- 0x0000, /* R161 */
- 0x0000, /* R162 */
- 0x0000, /* R163 - Generic Comparator Control */
- 0x0000, /* R164 - Generic comparator 1 */
- 0x0000, /* R165 - Generic comparator 2 */
- 0x0000, /* R166 - Generic comparator 3 */
- 0x0000, /* R167 - Generic comparator 4 */
- 0xA00F, /* R168 - Battery Charger Control 1 */
- 0x0B06, /* R169 - Battery Charger Control 2 */
- 0x0000, /* R170 - Battery Charger Control 3 */
- 0x0000, /* R171 */
- 0x0000, /* R172 - Current Sink Driver A */
- 0x0000, /* R173 - CSA Flash control */
- 0x0000, /* R174 - Current Sink Driver B */
- 0x0000, /* R175 - CSB Flash control */
- 0x0000, /* R176 - DCDC/LDO requested */
- 0x032D, /* R177 - DCDC Active options */
- 0x0000, /* R178 - DCDC Sleep options */
- 0x0025, /* R179 - Power-check comparator */
- 0x0062, /* R180 - DCDC1 Control */
- 0x0400, /* R181 - DCDC1 Timeouts */
- 0x1006, /* R182 - DCDC1 Low Power */
- 0x0018, /* R183 - DCDC2 Control */
- 0x0000, /* R184 - DCDC2 Timeouts */
- 0x0000, /* R185 */
- 0x0006, /* R186 - DCDC3 Control */
- 0x0800, /* R187 - DCDC3 Timeouts */
- 0x0006, /* R188 - DCDC3 Low Power */
- 0x0006, /* R189 - DCDC4 Control */
- 0x0C00, /* R190 - DCDC4 Timeouts */
- 0x0006, /* R191 - DCDC4 Low Power */
- 0x0008, /* R192 - DCDC5 Control */
- 0x0000, /* R193 - DCDC5 Timeouts */
- 0x0000, /* R194 */
- 0x0026, /* R195 - DCDC6 Control */
- 0x1000, /* R196 - DCDC6 Timeouts */
- 0x0006, /* R197 - DCDC6 Low Power */
- 0x0000, /* R198 */
- 0x0003, /* R199 - Limit Switch Control */
- 0x0002, /* R200 - LDO1 Control */
- 0x0000, /* R201 - LDO1 Timeouts */
- 0x001C, /* R202 - LDO1 Low Power */
- 0x001A, /* R203 - LDO2 Control */
- 0x0000, /* R204 - LDO2 Timeouts */
- 0x001C, /* R205 - LDO2 Low Power */
- 0x001F, /* R206 - LDO3 Control */
- 0x0000, /* R207 - LDO3 Timeouts */
- 0x001C, /* R208 - LDO3 Low Power */
- 0x001F, /* R209 - LDO4 Control */
- 0x0000, /* R210 - LDO4 Timeouts */
- 0x001C, /* R211 - LDO4 Low Power */
- 0x0000, /* R212 */
- 0x0000, /* R213 */
- 0x0000, /* R214 */
- 0x0000, /* R215 - VCC_FAULT Masks */
- 0x001F, /* R216 - Main Bandgap Control */
- 0x0000, /* R217 - OSC Control */
- 0x9000, /* R218 - RTC Tick Control */
- 0x0000, /* R219 - Security1 */
- 0x4000, /* R220 */
- 0x0000, /* R221 */
- 0x0000, /* R222 */
- 0x0000, /* R223 */
- 0x0000, /* R224 - Signal overrides */
- 0x0000, /* R225 - DCDC/LDO status */
- 0x0000, /* R226 - Charger Overides/status */
- 0x0000, /* R227 - misc overrides */
- 0x0000, /* R228 - Supply overrides/status 1 */
- 0x0000, /* R229 - Supply overrides/status 2 */
- 0xE000, /* R230 - GPIO Pin Status */
- 0x0000, /* R231 - comparotor overrides */
- 0x0000, /* R232 */
- 0x0000, /* R233 - State Machine status */
- 0x1200, /* R234 */
- 0x0000, /* R235 */
- 0x8000, /* R236 */
- 0x0000, /* R237 */
- 0x0000, /* R238 */
- 0x0000, /* R239 */
- 0x0003, /* R240 */
- 0x0000, /* R241 */
- 0x0000, /* R242 */
- 0x0004, /* R243 */
- 0x0300, /* R244 */
- 0x0000, /* R245 */
- 0x0200, /* R246 */
- 0x0000, /* R247 */
- 0x1000, /* R248 - DCDC1 Test Controls */
- 0x5000, /* R249 */
- 0x1000, /* R250 - DCDC3 Test Controls */
- 0x1000, /* R251 - DCDC4 Test Controls */
- 0x5100, /* R252 */
- 0x1000, /* R253 - DCDC6 Test Controls */
-};
-#endif
-
-#ifdef CONFIG_MFD_WM8352_CONFIG_MODE_2
-
-#undef WM8350_HAVE_CONFIG_MODE
-#define WM8350_HAVE_CONFIG_MODE
-
-const u16 wm8352_mode2_defaults[] = {
- 0x6143, /* R0 - Reset/ID */
- 0x0000, /* R1 - ID */
- 0x0002, /* R2 - Revision */
- 0x1C02, /* R3 - System Control 1 */
- 0x0204, /* R4 - System Control 2 */
- 0x0000, /* R5 - System Hibernate */
- 0x8A00, /* R6 - Interface Control */
- 0x0000, /* R7 */
- 0x8000, /* R8 - Power mgmt (1) */
- 0x0000, /* R9 - Power mgmt (2) */
- 0x0000, /* R10 - Power mgmt (3) */
- 0x2000, /* R11 - Power mgmt (4) */
- 0x0E00, /* R12 - Power mgmt (5) */
- 0x0000, /* R13 - Power mgmt (6) */
- 0x0000, /* R14 - Power mgmt (7) */
- 0x0000, /* R15 */
- 0x0000, /* R16 - RTC Seconds/Minutes */
- 0x0100, /* R17 - RTC Hours/Day */
- 0x0101, /* R18 - RTC Date/Month */
- 0x1400, /* R19 - RTC Year */
- 0x0000, /* R20 - Alarm Seconds/Minutes */
- 0x0000, /* R21 - Alarm Hours/Day */
- 0x0000, /* R22 - Alarm Date/Month */
- 0x0320, /* R23 - RTC Time Control */
- 0x0000, /* R24 - System Interrupts */
- 0x0000, /* R25 - Interrupt Status 1 */
- 0x0000, /* R26 - Interrupt Status 2 */
- 0x0000, /* R27 */
- 0x0000, /* R28 - Under Voltage Interrupt status */
- 0x0000, /* R29 - Over Current Interrupt status */
- 0x0000, /* R30 - GPIO Interrupt Status */
- 0x0000, /* R31 - Comparator Interrupt Status */
- 0x3FFF, /* R32 - System Interrupts Mask */
- 0x0000, /* R33 - Interrupt Status 1 Mask */
- 0x0000, /* R34 - Interrupt Status 2 Mask */
- 0x0000, /* R35 */
- 0x0000, /* R36 - Under Voltage Interrupt status Mask */
- 0x0000, /* R37 - Over Current Interrupt status Mask */
- 0x0000, /* R38 - GPIO Interrupt Status Mask */
- 0x0000, /* R39 - Comparator Interrupt Status Mask */
- 0x0040, /* R40 - Clock Control 1 */
- 0x0000, /* R41 - Clock Control 2 */
- 0x3A00, /* R42 - FLL Control 1 */
- 0x7086, /* R43 - FLL Control 2 */
- 0xC226, /* R44 - FLL Control 3 */
- 0x0000, /* R45 - FLL Control 4 */
- 0x0000, /* R46 */
- 0x0000, /* R47 */
- 0x0000, /* R48 - DAC Control */
- 0x0000, /* R49 */
- 0x00C0, /* R50 - DAC Digital Volume L */
- 0x00C0, /* R51 - DAC Digital Volume R */
- 0x0000, /* R52 */
- 0x0040, /* R53 - DAC LR Rate */
- 0x0000, /* R54 - DAC Clock Control */
- 0x0000, /* R55 */
- 0x0000, /* R56 */
- 0x0000, /* R57 */
- 0x4000, /* R58 - DAC Mute */
- 0x0000, /* R59 - DAC Mute Volume */
- 0x0000, /* R60 - DAC Side */
- 0x0000, /* R61 */
- 0x0000, /* R62 */
- 0x0000, /* R63 */
- 0x8000, /* R64 - ADC Control */
- 0x0000, /* R65 */
- 0x00C0, /* R66 - ADC Digital Volume L */
- 0x00C0, /* R67 - ADC Digital Volume R */
- 0x0000, /* R68 - ADC Divider */
- 0x0000, /* R69 */
- 0x0040, /* R70 - ADC LR Rate */
- 0x0000, /* R71 */
- 0x0303, /* R72 - Input Control */
- 0x0000, /* R73 - IN3 Input Control */
- 0x0000, /* R74 - Mic Bias Control */
- 0x0000, /* R75 */
- 0x0000, /* R76 - Output Control */
- 0x0000, /* R77 - Jack Detect */
- 0x0000, /* R78 - Anti Pop Control */
- 0x0000, /* R79 */
- 0x0040, /* R80 - Left Input Volume */
- 0x0040, /* R81 - Right Input Volume */
- 0x0000, /* R82 */
- 0x0000, /* R83 */
- 0x0000, /* R84 */
- 0x0000, /* R85 */
- 0x0000, /* R86 */
- 0x0000, /* R87 */
- 0x0800, /* R88 - Left Mixer Control */
- 0x1000, /* R89 - Right Mixer Control */
- 0x0000, /* R90 */
- 0x0000, /* R91 */
- 0x0000, /* R92 - OUT3 Mixer Control */
- 0x0000, /* R93 - OUT4 Mixer Control */
- 0x0000, /* R94 */
- 0x0000, /* R95 */
- 0x0000, /* R96 - Output Left Mixer Volume */
- 0x0000, /* R97 - Output Right Mixer Volume */
- 0x0000, /* R98 - Input Mixer Volume L */
- 0x0000, /* R99 - Input Mixer Volume R */
- 0x0000, /* R100 - Input Mixer Volume */
- 0x0000, /* R101 */
- 0x0000, /* R102 */
- 0x0000, /* R103 */
- 0x00E4, /* R104 - OUT1L Volume */
- 0x00E4, /* R105 - OUT1R Volume */
- 0x00E4, /* R106 - OUT2L Volume */
- 0x02E4, /* R107 - OUT2R Volume */
- 0x0000, /* R108 */
- 0x0000, /* R109 */
- 0x0000, /* R110 */
- 0x0000, /* R111 - BEEP Volume */
- 0x0A00, /* R112 - AI Formating */
- 0x0000, /* R113 - ADC DAC COMP */
- 0x0020, /* R114 - AI ADC Control */
- 0x0020, /* R115 - AI DAC Control */
- 0x0000, /* R116 */
- 0x0000, /* R117 */
- 0x0000, /* R118 */
- 0x0000, /* R119 */
- 0x0000, /* R120 */
- 0x0000, /* R121 */
- 0x0000, /* R122 */
- 0x0000, /* R123 */
- 0x0000, /* R124 */
- 0x0000, /* R125 */
- 0x0000, /* R126 */
- 0x0000, /* R127 */
- 0x1FFF, /* R128 - GPIO Debounce */
- 0x0000, /* R129 - GPIO Pin pull up Control */
- 0x0110, /* R130 - GPIO Pull down Control */
- 0x0000, /* R131 - GPIO Interrupt Mode */
- 0x0000, /* R132 */
- 0x0000, /* R133 - GPIO Control */
- 0x09DA, /* R134 - GPIO Configuration (i/o) */
- 0x0DD6, /* R135 - GPIO Pin Polarity / Type */
- 0x0000, /* R136 */
- 0x0000, /* R137 */
- 0x0000, /* R138 */
- 0x0000, /* R139 */
- 0x1310, /* R140 - GPIO Function Select 1 */
- 0x0033, /* R141 - GPIO Function Select 2 */
- 0x2000, /* R142 - GPIO Function Select 3 */
- 0x0000, /* R143 - GPIO Function Select 4 */
- 0x0000, /* R144 - Digitiser Control (1) */
- 0x0002, /* R145 - Digitiser Control (2) */
- 0x0000, /* R146 */
- 0x0000, /* R147 */
- 0x0000, /* R148 */
- 0x0000, /* R149 */
- 0x0000, /* R150 */
- 0x0000, /* R151 */
- 0x7000, /* R152 - AUX1 Readback */
- 0x7000, /* R153 - AUX2 Readback */
- 0x7000, /* R154 - AUX3 Readback */
- 0x7000, /* R155 - AUX4 Readback */
- 0x0000, /* R156 - USB Voltage Readback */
- 0x0000, /* R157 - LINE Voltage Readback */
- 0x0000, /* R158 - BATT Voltage Readback */
- 0x0000, /* R159 - Chip Temp Readback */
- 0x0000, /* R160 */
- 0x0000, /* R161 */
- 0x0000, /* R162 */
- 0x0000, /* R163 - Generic Comparator Control */
- 0x0000, /* R164 - Generic comparator 1 */
- 0x0000, /* R165 - Generic comparator 2 */
- 0x0000, /* R166 - Generic comparator 3 */
- 0x0000, /* R167 - Generic comparator 4 */
- 0xA00F, /* R168 - Battery Charger Control 1 */
- 0x0B06, /* R169 - Battery Charger Control 2 */
- 0x0000, /* R170 - Battery Charger Control 3 */
- 0x0000, /* R171 */
- 0x0000, /* R172 - Current Sink Driver A */
- 0x0000, /* R173 - CSA Flash control */
- 0x0000, /* R174 - Current Sink Driver B */
- 0x0000, /* R175 - CSB Flash control */
- 0x0000, /* R176 - DCDC/LDO requested */
- 0x032D, /* R177 - DCDC Active options */
- 0x0000, /* R178 - DCDC Sleep options */
- 0x0025, /* R179 - Power-check comparator */
- 0x000E, /* R180 - DCDC1 Control */
- 0x0800, /* R181 - DCDC1 Timeouts */
- 0x1006, /* R182 - DCDC1 Low Power */
- 0x0018, /* R183 - DCDC2 Control */
- 0x0000, /* R184 - DCDC2 Timeouts */
- 0x0000, /* R185 */
- 0x0056, /* R186 - DCDC3 Control */
- 0x1800, /* R187 - DCDC3 Timeouts */
- 0x0006, /* R188 - DCDC3 Low Power */
- 0x000E, /* R189 - DCDC4 Control */
- 0x1000, /* R190 - DCDC4 Timeouts */
- 0x0006, /* R191 - DCDC4 Low Power */
- 0x0008, /* R192 - DCDC5 Control */
- 0x0000, /* R193 - DCDC5 Timeouts */
- 0x0000, /* R194 */
- 0x0026, /* R195 - DCDC6 Control */
- 0x0C00, /* R196 - DCDC6 Timeouts */
- 0x0006, /* R197 - DCDC6 Low Power */
- 0x0000, /* R198 */
- 0x0003, /* R199 - Limit Switch Control */
- 0x001C, /* R200 - LDO1 Control */
- 0x0000, /* R201 - LDO1 Timeouts */
- 0x001C, /* R202 - LDO1 Low Power */
- 0x0006, /* R203 - LDO2 Control */
- 0x0400, /* R204 - LDO2 Timeouts */
- 0x001C, /* R205 - LDO2 Low Power */
- 0x001C, /* R206 - LDO3 Control */
- 0x1400, /* R207 - LDO3 Timeouts */
- 0x001C, /* R208 - LDO3 Low Power */
- 0x001A, /* R209 - LDO4 Control */
- 0x0000, /* R210 - LDO4 Timeouts */
- 0x001C, /* R211 - LDO4 Low Power */
- 0x0000, /* R212 */
- 0x0000, /* R213 */
- 0x0000, /* R214 */
- 0x0000, /* R215 - VCC_FAULT Masks */
- 0x001F, /* R216 - Main Bandgap Control */
- 0x0000, /* R217 - OSC Control */
- 0x9000, /* R218 - RTC Tick Control */
- 0x0000, /* R219 - Security1 */
- 0x4000, /* R220 */
- 0x0000, /* R221 */
- 0x0000, /* R222 */
- 0x0000, /* R223 */
- 0x0000, /* R224 - Signal overrides */
- 0x0000, /* R225 - DCDC/LDO status */
- 0x0000, /* R226 - Charger Overides/status */
- 0x0000, /* R227 - misc overrides */
- 0x0000, /* R228 - Supply overrides/status 1 */
- 0x0000, /* R229 - Supply overrides/status 2 */
- 0xE000, /* R230 - GPIO Pin Status */
- 0x0000, /* R231 - comparotor overrides */
- 0x0000, /* R232 */
- 0x0000, /* R233 - State Machine status */
- 0x1200, /* R234 */
- 0x0000, /* R235 */
- 0x8000, /* R236 */
- 0x0000, /* R237 */
- 0x0000, /* R238 */
- 0x0000, /* R239 */
- 0x0003, /* R240 */
- 0x0000, /* R241 */
- 0x0000, /* R242 */
- 0x0004, /* R243 */
- 0x0300, /* R244 */
- 0x0000, /* R245 */
- 0x0200, /* R246 */
- 0x0000, /* R247 */
- 0x1000, /* R248 - DCDC1 Test Controls */
- 0x5000, /* R249 */
- 0x1000, /* R250 - DCDC3 Test Controls */
- 0x1000, /* R251 - DCDC4 Test Controls */
- 0x5100, /* R252 */
- 0x1000, /* R253 - DCDC6 Test Controls */
-};
-#endif
-
-#ifdef CONFIG_MFD_WM8352_CONFIG_MODE_3
-
-#undef WM8350_HAVE_CONFIG_MODE
-#define WM8350_HAVE_CONFIG_MODE
-
-const u16 wm8352_mode3_defaults[] = {
- 0x6143, /* R0 - Reset/ID */
- 0x0000, /* R1 - ID */
- 0x0002, /* R2 - Revision */
- 0x1C02, /* R3 - System Control 1 */
- 0x0204, /* R4 - System Control 2 */
- 0x0000, /* R5 - System Hibernate */
- 0x8A00, /* R6 - Interface Control */
- 0x0000, /* R7 */
- 0x8000, /* R8 - Power mgmt (1) */
- 0x0000, /* R9 - Power mgmt (2) */
- 0x0000, /* R10 - Power mgmt (3) */
- 0x2000, /* R11 - Power mgmt (4) */
- 0x0E00, /* R12 - Power mgmt (5) */
- 0x0000, /* R13 - Power mgmt (6) */
- 0x0000, /* R14 - Power mgmt (7) */
- 0x0000, /* R15 */
- 0x0000, /* R16 - RTC Seconds/Minutes */
- 0x0100, /* R17 - RTC Hours/Day */
- 0x0101, /* R18 - RTC Date/Month */
- 0x1400, /* R19 - RTC Year */
- 0x0000, /* R20 - Alarm Seconds/Minutes */
- 0x0000, /* R21 - Alarm Hours/Day */
- 0x0000, /* R22 - Alarm Date/Month */
- 0x0320, /* R23 - RTC Time Control */
- 0x0000, /* R24 - System Interrupts */
- 0x0000, /* R25 - Interrupt Status 1 */
- 0x0000, /* R26 - Interrupt Status 2 */
- 0x0000, /* R27 */
- 0x0000, /* R28 - Under Voltage Interrupt status */
- 0x0000, /* R29 - Over Current Interrupt status */
- 0x0000, /* R30 - GPIO Interrupt Status */
- 0x0000, /* R31 - Comparator Interrupt Status */
- 0x3FFF, /* R32 - System Interrupts Mask */
- 0x0000, /* R33 - Interrupt Status 1 Mask */
- 0x0000, /* R34 - Interrupt Status 2 Mask */
- 0x0000, /* R35 */
- 0x0000, /* R36 - Under Voltage Interrupt status Mask */
- 0x0000, /* R37 - Over Current Interrupt status Mask */
- 0x0000, /* R38 - GPIO Interrupt Status Mask */
- 0x0000, /* R39 - Comparator Interrupt Status Mask */
- 0x0040, /* R40 - Clock Control 1 */
- 0x0000, /* R41 - Clock Control 2 */
- 0x3A00, /* R42 - FLL Control 1 */
- 0x7086, /* R43 - FLL Control 2 */
- 0xC226, /* R44 - FLL Control 3 */
- 0x0000, /* R45 - FLL Control 4 */
- 0x0000, /* R46 */
- 0x0000, /* R47 */
- 0x0000, /* R48 - DAC Control */
- 0x0000, /* R49 */
- 0x00C0, /* R50 - DAC Digital Volume L */
- 0x00C0, /* R51 - DAC Digital Volume R */
- 0x0000, /* R52 */
- 0x0040, /* R53 - DAC LR Rate */
- 0x0000, /* R54 - DAC Clock Control */
- 0x0000, /* R55 */
- 0x0000, /* R56 */
- 0x0000, /* R57 */
- 0x4000, /* R58 - DAC Mute */
- 0x0000, /* R59 - DAC Mute Volume */
- 0x0000, /* R60 - DAC Side */
- 0x0000, /* R61 */
- 0x0000, /* R62 */
- 0x0000, /* R63 */
- 0x8000, /* R64 - ADC Control */
- 0x0000, /* R65 */
- 0x00C0, /* R66 - ADC Digital Volume L */
- 0x00C0, /* R67 - ADC Digital Volume R */
- 0x0000, /* R68 - ADC Divider */
- 0x0000, /* R69 */
- 0x0040, /* R70 - ADC LR Rate */
- 0x0000, /* R71 */
- 0x0303, /* R72 - Input Control */
- 0x0000, /* R73 - IN3 Input Control */
- 0x0000, /* R74 - Mic Bias Control */
- 0x0000, /* R75 */
- 0x0000, /* R76 - Output Control */
- 0x0000, /* R77 - Jack Detect */
- 0x0000, /* R78 - Anti Pop Control */
- 0x0000, /* R79 */
- 0x0040, /* R80 - Left Input Volume */
- 0x0040, /* R81 - Right Input Volume */
- 0x0000, /* R82 */
- 0x0000, /* R83 */
- 0x0000, /* R84 */
- 0x0000, /* R85 */
- 0x0000, /* R86 */
- 0x0000, /* R87 */
- 0x0800, /* R88 - Left Mixer Control */
- 0x1000, /* R89 - Right Mixer Control */
- 0x0000, /* R90 */
- 0x0000, /* R91 */
- 0x0000, /* R92 - OUT3 Mixer Control */
- 0x0000, /* R93 - OUT4 Mixer Control */
- 0x0000, /* R94 */
- 0x0000, /* R95 */
- 0x0000, /* R96 - Output Left Mixer Volume */
- 0x0000, /* R97 - Output Right Mixer Volume */
- 0x0000, /* R98 - Input Mixer Volume L */
- 0x0000, /* R99 - Input Mixer Volume R */
- 0x0000, /* R100 - Input Mixer Volume */
- 0x0000, /* R101 */
- 0x0000, /* R102 */
- 0x0000, /* R103 */
- 0x00E4, /* R104 - OUT1L Volume */
- 0x00E4, /* R105 - OUT1R Volume */
- 0x00E4, /* R106 - OUT2L Volume */
- 0x02E4, /* R107 - OUT2R Volume */
- 0x0000, /* R108 */
- 0x0000, /* R109 */
- 0x0000, /* R110 */
- 0x0000, /* R111 - BEEP Volume */
- 0x0A00, /* R112 - AI Formating */
- 0x0000, /* R113 - ADC DAC COMP */
- 0x0020, /* R114 - AI ADC Control */
- 0x0020, /* R115 - AI DAC Control */
- 0x0000, /* R116 */
- 0x0000, /* R117 */
- 0x0000, /* R118 */
- 0x0000, /* R119 */
- 0x0000, /* R120 */
- 0x0000, /* R121 */
- 0x0000, /* R122 */
- 0x0000, /* R123 */
- 0x0000, /* R124 */
- 0x0000, /* R125 */
- 0x0000, /* R126 */
- 0x0000, /* R127 */
- 0x1FFF, /* R128 - GPIO Debounce */
- 0x0010, /* R129 - GPIO Pin pull up Control */
- 0x0000, /* R130 - GPIO Pull down Control */
- 0x0000, /* R131 - GPIO Interrupt Mode */
- 0x0000, /* R132 */
- 0x0000, /* R133 - GPIO Control */
- 0x0BFB, /* R134 - GPIO Configuration (i/o) */
- 0x0FFD, /* R135 - GPIO Pin Polarity / Type */
- 0x0000, /* R136 */
- 0x0000, /* R137 */
- 0x0000, /* R138 */
- 0x0000, /* R139 */
- 0x0310, /* R140 - GPIO Function Select 1 */
- 0x0001, /* R141 - GPIO Function Select 2 */
- 0x2300, /* R142 - GPIO Function Select 3 */
- 0x0003, /* R143 - GPIO Function Select 4 */
- 0x0000, /* R144 - Digitiser Control (1) */
- 0x0002, /* R145 - Digitiser Control (2) */
- 0x0000, /* R146 */
- 0x0000, /* R147 */
- 0x0000, /* R148 */
- 0x0000, /* R149 */
- 0x0000, /* R150 */
- 0x0000, /* R151 */
- 0x7000, /* R152 - AUX1 Readback */
- 0x7000, /* R153 - AUX2 Readback */
- 0x7000, /* R154 - AUX3 Readback */
- 0x7000, /* R155 - AUX4 Readback */
- 0x0000, /* R156 - USB Voltage Readback */
- 0x0000, /* R157 - LINE Voltage Readback */
- 0x0000, /* R158 - BATT Voltage Readback */
- 0x0000, /* R159 - Chip Temp Readback */
- 0x0000, /* R160 */
- 0x0000, /* R161 */
- 0x0000, /* R162 */
- 0x0000, /* R163 - Generic Comparator Control */
- 0x0000, /* R164 - Generic comparator 1 */
- 0x0000, /* R165 - Generic comparator 2 */
- 0x0000, /* R166 - Generic comparator 3 */
- 0x0000, /* R167 - Generic comparator 4 */
- 0xA00F, /* R168 - Battery Charger Control 1 */
- 0x0B06, /* R169 - Battery Charger Control 2 */
- 0x0000, /* R170 - Battery Charger Control 3 */
- 0x0000, /* R171 */
- 0x0000, /* R172 - Current Sink Driver A */
- 0x0000, /* R173 - CSA Flash control */
- 0x0000, /* R174 - Current Sink Driver B */
- 0x0000, /* R175 - CSB Flash control */
- 0x0000, /* R176 - DCDC/LDO requested */
- 0x032D, /* R177 - DCDC Active options */
- 0x0000, /* R178 - DCDC Sleep options */
- 0x0025, /* R179 - Power-check comparator */
- 0x0006, /* R180 - DCDC1 Control */
- 0x0400, /* R181 - DCDC1 Timeouts */
- 0x1006, /* R182 - DCDC1 Low Power */
- 0x0018, /* R183 - DCDC2 Control */
- 0x0000, /* R184 - DCDC2 Timeouts */
- 0x0000, /* R185 */
- 0x0050, /* R186 - DCDC3 Control */
- 0x0C00, /* R187 - DCDC3 Timeouts */
- 0x0006, /* R188 - DCDC3 Low Power */
- 0x000E, /* R189 - DCDC4 Control */
- 0x0400, /* R190 - DCDC4 Timeouts */
- 0x0006, /* R191 - DCDC4 Low Power */
- 0x0008, /* R192 - DCDC5 Control */
- 0x0000, /* R193 - DCDC5 Timeouts */
- 0x0000, /* R194 */
- 0x0029, /* R195 - DCDC6 Control */
- 0x0800, /* R196 - DCDC6 Timeouts */
- 0x0006, /* R197 - DCDC6 Low Power */
- 0x0000, /* R198 */
- 0x0003, /* R199 - Limit Switch Control */
- 0x001D, /* R200 - LDO1 Control */
- 0x1000, /* R201 - LDO1 Timeouts */
- 0x001C, /* R202 - LDO1 Low Power */
- 0x0017, /* R203 - LDO2 Control */
- 0x1000, /* R204 - LDO2 Timeouts */
- 0x001C, /* R205 - LDO2 Low Power */
- 0x0006, /* R206 - LDO3 Control */
- 0x1000, /* R207 - LDO3 Timeouts */
- 0x001C, /* R208 - LDO3 Low Power */
- 0x0010, /* R209 - LDO4 Control */
- 0x1000, /* R210 - LDO4 Timeouts */
- 0x001C, /* R211 - LDO4 Low Power */
- 0x0000, /* R212 */
- 0x0000, /* R213 */
- 0x0000, /* R214 */
- 0x0000, /* R215 - VCC_FAULT Masks */
- 0x001F, /* R216 - Main Bandgap Control */
- 0x0000, /* R217 - OSC Control */
- 0x9000, /* R218 - RTC Tick Control */
- 0x0000, /* R219 - Security1 */
- 0x4000, /* R220 */
- 0x0000, /* R221 */
- 0x0000, /* R222 */
- 0x0000, /* R223 */
- 0x0000, /* R224 - Signal overrides */
- 0x0000, /* R225 - DCDC/LDO status */
- 0x0000, /* R226 - Charger Overides/status */
- 0x0000, /* R227 - misc overrides */
- 0x0000, /* R228 - Supply overrides/status 1 */
- 0x0000, /* R229 - Supply overrides/status 2 */
- 0xE000, /* R230 - GPIO Pin Status */
- 0x0000, /* R231 - comparotor overrides */
- 0x0000, /* R232 */
- 0x0000, /* R233 - State Machine status */
- 0x1200, /* R234 */
- 0x0000, /* R235 */
- 0x8000, /* R236 */
- 0x0000, /* R237 */
- 0x0000, /* R238 */
- 0x0000, /* R239 */
- 0x0003, /* R240 */
- 0x0000, /* R241 */
- 0x0000, /* R242 */
- 0x0004, /* R243 */
- 0x0300, /* R244 */
- 0x0000, /* R245 */
- 0x0200, /* R246 */
- 0x0000, /* R247 */
- 0x1000, /* R248 - DCDC1 Test Controls */
- 0x5000, /* R249 */
- 0x1000, /* R250 - DCDC3 Test Controls */
- 0x1000, /* R251 - DCDC4 Test Controls */
- 0x5100, /* R252 */
- 0x1000, /* R253 - DCDC6 Test Controls */
-};
-#endif
-
/*
* Access masks.
*/
-const struct wm8350_reg_access wm8350_reg_io_map[] = {
+static const struct wm8350_reg_access {
+ u16 readable; /* Mask of readable bits */
+ u16 writable; /* Mask of writable bits */
+ u16 vol; /* Mask of volatile bits */
+} wm8350_reg_io_map[] = {
/* read write volatile */
- { 0xFFFF, 0xFFFF, 0xFFFF }, /* R0 - Reset/ID */
- { 0x7CFF, 0x0C00, 0x7FFF }, /* R1 - ID */
+ { 0xFFFF, 0xFFFF, 0x0000 }, /* R0 - Reset/ID */
+ { 0x7CFF, 0x0C00, 0x0000 }, /* R1 - ID */
{ 0x007F, 0x0000, 0x0000 }, /* R2 - ROM Mask ID */
{ 0xBE3B, 0xBE3B, 0x8000 }, /* R3 - System Control 1 */
{ 0xFEF7, 0xFEF7, 0xF800 }, /* R4 - System Control 2 */
@@ -3433,3 +281,59 @@ const struct wm8350_reg_access wm8350_reg_io_map[] = {
{ 0x0000, 0x0000, 0x0000 }, /* R254 */
{ 0x0000, 0x0000, 0x0000 }, /* R255 */
};
+
+static bool wm8350_readable(struct device *dev, unsigned int reg)
+{
+ return wm8350_reg_io_map[reg].readable;
+}
+
+static bool wm8350_writeable(struct device *dev, unsigned int reg)
+{
+ struct wm8350 *wm8350 = dev_get_drvdata(dev);
+
+ if (!wm8350->unlocked) {
+ if ((reg >= WM8350_GPIO_FUNCTION_SELECT_1 &&
+ reg <= WM8350_GPIO_FUNCTION_SELECT_4) ||
+ (reg >= WM8350_BATTERY_CHARGER_CONTROL_1 &&
+ reg <= WM8350_BATTERY_CHARGER_CONTROL_3))
+ return false;
+ }
+
+ return wm8350_reg_io_map[reg].writable;
+}
+
+static bool wm8350_volatile(struct device *dev, unsigned int reg)
+{
+ return wm8350_reg_io_map[reg].vol;
+}
+
+static bool wm8350_precious(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM8350_SYSTEM_INTERRUPTS:
+ case WM8350_INT_STATUS_1:
+ case WM8350_INT_STATUS_2:
+ case WM8350_POWER_UP_INT_STATUS:
+ case WM8350_UNDER_VOLTAGE_INT_STATUS:
+ case WM8350_OVER_CURRENT_INT_STATUS:
+ case WM8350_GPIO_INT_STATUS:
+ case WM8350_COMPARATOR_INT_STATUS:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config wm8350_regmap = {
+ .reg_bits = 8,
+ .val_bits = 16,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .max_register = WM8350_MAX_REGISTER,
+ .readable_reg = wm8350_readable,
+ .writeable_reg = wm8350_writeable,
+ .volatile_reg = wm8350_volatile,
+ .precious_reg = wm8350_precious,
+};
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 1e321d349777..eec74aa55fdf 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -283,9 +283,24 @@ static int wm8994_suspend(struct device *dev)
wm8994_reg_write(wm8994, WM8994_SOFTWARE_RESET,
wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET));
- regcache_cache_only(wm8994->regmap, true);
regcache_mark_dirty(wm8994->regmap);
+ /* Restore GPIO registers to prevent problems with mismatched
+ * pin configurations.
+ */
+ ret = regcache_sync_region(wm8994->regmap, WM8994_GPIO_1,
+ WM8994_GPIO_11);
+ if (ret != 0)
+ dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
+
+ /* In case one of the GPIOs is used as a wake input. */
+ ret = regcache_sync_region(wm8994->regmap,
+ WM8994_INTERRUPT_STATUS_1_MASK,
+ WM8994_INTERRUPT_STATUS_1_MASK);
+ if (ret != 0)
+ dev_err(dev, "Failed to restore interrupt mask: %d\n", ret);
+
+ regcache_cache_only(wm8994->regmap, true);
wm8994->suspended = true;
ret = regulator_bulk_disable(wm8994->num_supplies,
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index f1837f669755..0aac4aff17a5 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -21,6 +21,7 @@
#include <linux/regmap.h>
#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/pdata.h>
#include <linux/mfd/wm8994/registers.h>
#include <linux/delay.h>
@@ -139,6 +140,8 @@ static struct regmap_irq_chip wm8994_irq_chip = {
int wm8994_irq_init(struct wm8994 *wm8994)
{
int ret;
+ unsigned long irqflags;
+ struct wm8994_pdata *pdata = wm8994->dev->platform_data;
if (!wm8994->irq) {
dev_warn(wm8994->dev,
@@ -147,8 +150,13 @@ int wm8994_irq_init(struct wm8994 *wm8994)
return 0;
}
+ /* select user or default irq flags */
+ irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+ if (pdata->irq_flags)
+ irqflags = pdata->irq_flags;
+
ret = regmap_add_irq_chip(wm8994->regmap, wm8994->irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ irqflags,
wm8994->irq_base, &wm8994_irq_chip,
&wm8994->irq_data);
if (ret != 0) {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 154f3ef07631..98a442da892a 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -64,6 +64,7 @@ config AB8500_PWM
bool "AB8500 PWM support"
depends on AB8500_CORE && ARCH_U8500
select HAVE_PWM
+ depends on !PWM
help
This driver exports functions to enable/disble/config/free Pulse
Width Modulation in the Analog Baseband Chip AB8500.
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index 042a8fe4efaa..d7a9aa14e5d5 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -142,16 +142,10 @@ static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ab8500_pwm_match[] = {
- { .compatible = "stericsson,ab8500-pwm", },
- {}
-};
-
static struct platform_driver ab8500_pwm_driver = {
.driver = {
.name = "ab8500-pwm",
.owner = THIS_MODULE,
- .of_match_table = ab8500_pwm_match,
},
.probe = ab8500_pwm_probe,
.remove = __devexit_p(ab8500_pwm_remove),
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 28adefe70f96..08aad69c8da4 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -477,6 +477,8 @@ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
int i, n, out;
buf = (char *)__get_free_page(GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
for (i = 0; i < ARRAY_SIZE(cp_type); i++)
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index c6ffbbe5a6c0..d78c05e693f7 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -1253,7 +1253,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
if (dev->wd_timeout)
*slots -= mei_data2slots(MEI_START_WD_DATA_SIZE);
else
- *slots -= mei_data2slots(MEI_START_WD_DATA_SIZE);
+ *slots -= mei_data2slots(MEI_WD_PARAMS_SIZE);
}
}
if (dev->stop)
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 092330208869..7422c7652845 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -925,6 +925,27 @@ static struct miscdevice mei_misc_device = {
};
/**
+ * mei_quirk_probe - probe for devices that doesn't valid ME interface
+ * @pdev: PCI device structure
+ * @ent: entry into pci_device_table
+ *
+ * returns true if ME Interface is valid, false otherwise
+ */
+static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ u32 reg;
+ if (ent->device == MEI_DEV_ID_PBG_1) {
+ pci_read_config_dword(pdev, 0x48, &reg);
+ /* make sure that bit 9 is up and bit 10 is down */
+ if ((reg & 0x600) == 0x200) {
+ dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
+ return false;
+ }
+ }
+ return true;
+}
+/**
* mei_probe - Device Initialization Routine
*
* @pdev: PCI device structure
@@ -939,6 +960,12 @@ static int __devinit mei_probe(struct pci_dev *pdev,
int err;
mutex_lock(&mei_mutex);
+
+ if (!mei_quirk_probe(pdev, ent)) {
+ err = -ENODEV;
+ goto end;
+ }
+
if (mei_device) {
err = -EEXIST;
goto end;
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 87b251ab6ec5..b9e2000969f0 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -18,6 +18,8 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <asm/uv/uv_hub.h>
@@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
XPC_NOTIFY_MSG_SIZE_UV)
#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
+static int xpc_mq_node = -1;
+
static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
@@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
#if defined CONFIG_X86_64
mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
UV_AFFINITY_CPU);
- if (mq->irq < 0) {
- dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
- -mq->irq);
+ if (mq->irq < 0)
return mq->irq;
- }
mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
@@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
mq->mmr_blade = uv_cpu_to_blade_id(cpu);
nid = cpu_to_node(cpu);
- page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
- pg_order);
+ page = alloc_pages_exact_node(nid,
+ GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ pg_order);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
"bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
@@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
.notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
};
+static int
+xpc_init_mq_node(int nid)
+{
+ int cpu;
+
+ get_online_cpus();
+
+ for_each_cpu(cpu, cpumask_of_node(nid)) {
+ xpc_activate_mq_uv =
+ xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
+ XPC_ACTIVATE_IRQ_NAME,
+ xpc_handle_activate_IRQ_uv);
+ if (!IS_ERR(xpc_activate_mq_uv))
+ break;
+ }
+ if (IS_ERR(xpc_activate_mq_uv)) {
+ put_online_cpus();
+ return PTR_ERR(xpc_activate_mq_uv);
+ }
+
+ for_each_cpu(cpu, cpumask_of_node(nid)) {
+ xpc_notify_mq_uv =
+ xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
+ XPC_NOTIFY_IRQ_NAME,
+ xpc_handle_notify_IRQ_uv);
+ if (!IS_ERR(xpc_notify_mq_uv))
+ break;
+ }
+ if (IS_ERR(xpc_notify_mq_uv)) {
+ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+ put_online_cpus();
+ return PTR_ERR(xpc_notify_mq_uv);
+ }
+
+ put_online_cpus();
+ return 0;
+}
+
int
xpc_init_uv(void)
{
+ int nid;
+ int ret = 0;
+
xpc_arch_ops = xpc_arch_ops_uv;
if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
@@ -1742,21 +1785,21 @@ xpc_init_uv(void)
return -E2BIG;
}
- xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
- XPC_ACTIVATE_IRQ_NAME,
- xpc_handle_activate_IRQ_uv);
- if (IS_ERR(xpc_activate_mq_uv))
- return PTR_ERR(xpc_activate_mq_uv);
+ if (xpc_mq_node < 0)
+ for_each_online_node(nid) {
+ ret = xpc_init_mq_node(nid);
- xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
- XPC_NOTIFY_IRQ_NAME,
- xpc_handle_notify_IRQ_uv);
- if (IS_ERR(xpc_notify_mq_uv)) {
- xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
- return PTR_ERR(xpc_notify_mq_uv);
- }
+ if (!ret)
+ break;
+ }
+ else
+ ret = xpc_init_mq_node(xpc_mq_node);
- return 0;
+ if (ret < 0)
+ dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
+ -ret);
+
+ return ret;
}
void
@@ -1765,3 +1808,6 @@ xpc_exit_uv(void)
xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
}
+
+module_param(xpc_mq_node, int, 0);
+MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 2b62232c2c6a..acfaeeb9e01a 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -349,6 +349,11 @@ void st_int_recv(void *disc_data,
st_gdata->rx_skb = alloc_skb(
st_gdata->list[type]->max_frame_size,
GFP_ATOMIC);
+ if (st_gdata->rx_skb == NULL) {
+ pr_err("out of memory: dropping\n");
+ goto done;
+ }
+
skb_reserve(st_gdata->rx_skb,
st_gdata->list[type]->reserve);
/* next 2 required for BT only */
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
index 1ff460a8e9c7..93b4d67cc4a3 100644
--- a/drivers/misc/ti-st/st_ll.c
+++ b/drivers/misc/ti-st/st_ll.c
@@ -87,7 +87,7 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data)
/* communicate to platform about chip wakeup */
kim_data = st_data->kim_data;
pdata = kim_data->kim_pdev->dev.platform_data;
- if (pdata->chip_asleep)
+ if (pdata->chip_awake)
pdata->chip_awake(NULL);
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f1c84decb192..172a768036d8 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1411,7 +1411,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
- if (req->cmd_flags & REQ_SECURE)
+ if (req->cmd_flags & REQ_SECURE &&
+ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
ret = mmc_blk_issue_discard_rq(mq, req);
@@ -1716,6 +1717,7 @@ force_ro_fail:
#define CID_MANFID_SANDISK 0x2
#define CID_MANFID_TOSHIBA 0x11
#define CID_MANFID_MICRON 0x13
+#define CID_MANFID_SAMSUNG 0x15
static const struct mmc_fixup blk_fixups[] =
{
@@ -1752,6 +1754,28 @@ static const struct mmc_fixup blk_fixups[] =
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
MMC_QUIRK_LONG_READ_TIME),
+ /*
+ * On these Samsung MoviNAND parts, performing secure erase or
+ * secure trim can result in unrecoverable corruption due to a
+ * firmware bug.
+ */
+ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
END_FIXUP
};
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 322412cec4ee..a53c7c478e05 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -81,6 +81,7 @@ struct atmel_mci_caps {
bool has_bad_data_ordering;
bool need_reset_after_xfer;
bool need_blksz_mul_4;
+ bool need_notbusy_for_read_ops;
};
struct atmel_mci_dma {
@@ -1625,7 +1626,8 @@ static void atmci_tasklet_func(unsigned long priv)
__func__);
atmci_set_completed(host, EVENT_XFER_COMPLETE);
- if (host->data->flags & MMC_DATA_WRITE) {
+ if (host->caps.need_notbusy_for_read_ops ||
+ (host->data->flags & MMC_DATA_WRITE)) {
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
} else if (host->mrq->stop) {
@@ -2218,6 +2220,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
host->caps.has_bad_data_ordering = 1;
host->caps.need_reset_after_xfer = 1;
host->caps.need_blksz_mul_4 = 1;
+ host->caps.need_notbusy_for_read_ops = 0;
/* keep only major version number */
switch (version & 0xf00) {
@@ -2238,6 +2241,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
case 0x200:
host->caps.has_rwproof = 1;
host->caps.need_blksz_mul_4 = 0;
+ host->caps.need_notbusy_for_read_ops = 1;
case 0x100:
host->caps.has_bad_data_ordering = 0;
host->caps.need_reset_after_xfer = 0;
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 03666174ca48..a17dd7363ceb 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -49,13 +49,6 @@
#define bfin_write_SDH_CFG bfin_write_RSI_CFG
#endif
-struct dma_desc_array {
- unsigned long start_addr;
- unsigned short cfg;
- unsigned short x_count;
- short x_modify;
-} __packed;
-
struct sdh_host {
struct mmc_host *mmc;
spinlock_t lock;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 72dc3cde646d..af40d227bece 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -627,6 +627,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
{
struct dw_mci *host = slot->host;
u32 div;
+ u32 clk_en_a;
if (slot->clock != host->current_speed) {
div = host->bus_hz / slot->clock;
@@ -659,9 +660,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
mci_send_cmd(slot,
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
- /* enable clock */
- mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE |
- SDMMC_CLKEN_LOW_PWR) << slot->id));
+ /* enable clock; only low power if no SDIO */
+ clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
+ if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
+ clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
+ mci_writel(host, CLKENA, clk_en_a);
/* inform CIU */
mci_send_cmd(slot,
@@ -862,6 +865,30 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
return present;
}
+/*
+ * Disable lower power mode.
+ *
+ * Low power mode will stop the card clock when idle. According to the
+ * description of the CLKENA register we should disable low power mode
+ * for SDIO cards if we need SDIO interrupts to work.
+ *
+ * This function is fast if low power mode is already disabled.
+ */
+static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
+{
+ struct dw_mci *host = slot->host;
+ u32 clk_en_a;
+ const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
+
+ clk_en_a = mci_readl(host, CLKENA);
+
+ if (clk_en_a & clken_low_pwr) {
+ mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
+ SDMMC_CMD_PRV_DAT_WAIT, 0);
+ }
+}
+
static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
@@ -871,6 +898,14 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
/* Enable/disable Slot Specific SDIO interrupt */
int_mask = mci_readl(host, INTMASK);
if (enb) {
+ /*
+ * Turn off low power mode if it was enabled. This is a bit of
+ * a heavy operation and we disable / enable IRQs a lot, so
+ * we'll leave low power mode disabled and it will get
+ * re-enabled again in dw_mci_setup_bus().
+ */
+ dw_mci_disable_low_power(slot);
+
mci_writel(host, INTMASK,
(int_mask | SDMMC_INT_SDIO(slot->id)));
} else {
@@ -1429,22 +1464,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
nbytes += len;
remain -= len;
} while (remain);
- sg_miter->consumed = offset;
+ sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
- if (status & DW_MCI_DATA_ERROR_FLAGS) {
- host->data_status = status;
- data->bytes_xfered += nbytes;
- sg_miter_stop(sg_miter);
- host->sg = NULL;
- smp_wmb();
-
- set_bit(EVENT_DATA_ERROR, &host->pending_events);
-
- tasklet_schedule(&host->tasklet);
- return;
- }
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
data->bytes_xfered += nbytes;
@@ -1497,23 +1520,10 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
nbytes += len;
remain -= len;
} while (remain);
- sg_miter->consumed = offset;
+ sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
- if (status & DW_MCI_DATA_ERROR_FLAGS) {
- host->data_status = status;
- data->bytes_xfered += nbytes;
- sg_miter_stop(sg_miter);
- host->sg = NULL;
-
- smp_wmb();
-
- set_bit(EVENT_DATA_ERROR, &host->pending_events);
-
- tasklet_schedule(&host->tasklet);
- return;
- }
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
data->bytes_xfered += nbytes;
@@ -1547,12 +1557,11 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
{
struct dw_mci *host = dev_id;
- u32 status, pending;
+ u32 pending;
unsigned int pass_count = 0;
int i;
do {
- status = mci_readl(host, RINTSTS);
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
/*
@@ -1570,7 +1579,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
- host->cmd_status = status;
+ host->cmd_status = pending;
smp_wmb();
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
}
@@ -1578,18 +1587,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
- host->data_status = status;
+ host->data_status = pending;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
- if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC |
- SDMMC_INT_SBE | SDMMC_INT_EBE)))
- tasklet_schedule(&host->tasklet);
+ tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_DATA_OVER) {
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
if (!host->data_status)
- host->data_status = status;
+ host->data_status = pending;
smp_wmb();
if (host->dir_status == DW_MCI_RECV_STATUS) {
if (host->sg != NULL)
@@ -1613,7 +1620,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & SDMMC_INT_CMD_DONE) {
mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
- dw_mci_cmd_interrupt(host, status);
+ dw_mci_cmd_interrupt(host, pending);
}
if (pending & SDMMC_INT_CD) {
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 3b9136c1a475..a61cb5fca22d 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -839,6 +839,10 @@ out:
if (r)
release_resource(r);
if (mmc)
+ if (!IS_ERR_OR_NULL(host->clk)) {
+ clk_disable_unprepare(host->clk);
+ clk_put(host->clk);
+ }
mmc_free_host(mmc);
return ret;
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index a51f9309ffbb..ad3fcea1269e 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -285,11 +285,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
writel(stat & MXS_MMC_IRQ_BITS,
host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
+ spin_unlock(&host->lock);
+
if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
mmc_signal_sdio_irq(host->mmc);
- spin_unlock(&host->lock);
-
if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
cmd->error = -ETIMEDOUT;
else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
@@ -644,11 +644,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
-
- if (readl(host->base + HW_SSP_STATUS(host)) &
- BM_SSP_STATUS_SDIO_IRQ)
- mmc_signal_sdio_irq(host->mmc);
-
} else {
writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
@@ -657,6 +652,11 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
}
spin_unlock_irqrestore(&host->lock, flags);
+
+ if (enable && readl(host->base + HW_SSP_STATUS(host)) &
+ BM_SSP_STATUS_SDIO_IRQ)
+ mmc_signal_sdio_irq(host->mmc);
+
}
static const struct mmc_host_ops mxs_mmc_ops = {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 3e8dcf8d2e05..a5999a74496a 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -17,10 +17,12 @@
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
+#include <linux/omap-dma.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/clk.h>
@@ -128,6 +130,10 @@ struct mmc_omap_host {
unsigned char id; /* 16xx chips have 2 MMC blocks */
struct clk * iclk;
struct clk * fclk;
+ struct dma_chan *dma_rx;
+ u32 dma_rx_burst;
+ struct dma_chan *dma_tx;
+ u32 dma_tx_burst;
struct resource *mem_res;
void __iomem *virt_base;
unsigned int phys_base;
@@ -153,12 +159,8 @@ struct mmc_omap_host {
unsigned use_dma:1;
unsigned brs_received:1, dma_done:1;
- unsigned dma_is_read:1;
unsigned dma_in_use:1;
- int dma_ch;
spinlock_t dma_lock;
- struct timer_list dma_timer;
- unsigned dma_len;
struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS];
struct mmc_omap_slot *current_slot;
@@ -406,18 +408,25 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
int abort)
{
enum dma_data_direction dma_data_dir;
+ struct device *dev = mmc_dev(host->mmc);
+ struct dma_chan *c;
- BUG_ON(host->dma_ch < 0);
- if (data->error)
- omap_stop_dma(host->dma_ch);
- /* Release DMA channel lazily */
- mod_timer(&host->dma_timer, jiffies + HZ);
- if (data->flags & MMC_DATA_WRITE)
+ if (data->flags & MMC_DATA_WRITE) {
dma_data_dir = DMA_TO_DEVICE;
- else
+ c = host->dma_tx;
+ } else {
dma_data_dir = DMA_FROM_DEVICE;
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
- dma_data_dir);
+ c = host->dma_rx;
+ }
+ if (c) {
+ if (data->error) {
+ dmaengine_terminate_all(c);
+ /* Claim nothing transferred on error... */
+ data->bytes_xfered = 0;
+ }
+ dev = c->device->dev;
+ }
+ dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
}
static void mmc_omap_send_stop_work(struct work_struct *work)
@@ -525,16 +534,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
}
static void
-mmc_omap_dma_timer(unsigned long data)
-{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
-
- BUG_ON(host->dma_ch < 0);
- omap_free_dma(host->dma_ch);
- host->dma_ch = -1;
-}
-
-static void
mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
{
unsigned long flags;
@@ -669,7 +668,7 @@ mmc_omap_clk_timer(unsigned long data)
static void
mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
{
- int n;
+ int n, nwords;
if (host->buffer_bytes_left == 0) {
host->sg_idx++;
@@ -679,15 +678,23 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
n = 64;
if (n > host->buffer_bytes_left)
n = host->buffer_bytes_left;
+
+ nwords = n / 2;
+ nwords += n & 1; /* handle odd number of bytes to transfer */
+
host->buffer_bytes_left -= n;
host->total_bytes_left -= n;
host->data->bytes_xfered += n;
if (write) {
- __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
+ __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
+ host->buffer, nwords);
} else {
- __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
+ __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
+ host->buffer, nwords);
}
+
+ host->buffer += nwords;
}
static inline void mmc_omap_report_irq(u16 status)
@@ -891,159 +898,15 @@ static void mmc_omap_cover_handler(unsigned long param)
jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
}
-/* Prepare to transfer the next segment of a scatterlist */
-static void
-mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
-{
- int dma_ch = host->dma_ch;
- unsigned long data_addr;
- u16 buf, frame;
- u32 count;
- struct scatterlist *sg = &data->sg[host->sg_idx];
- int src_port = 0;
- int dst_port = 0;
- int sync_dev = 0;
-
- data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
- frame = data->blksz;
- count = sg_dma_len(sg);
-
- if ((data->blocks == 1) && (count > data->blksz))
- count = frame;
-
- host->dma_len = count;
-
- /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
- * Use 16 or 32 word frames when the blocksize is at least that large.
- * Blocksize is usually 512 bytes; but not for some SD reads.
- */
- if (cpu_is_omap15xx() && frame > 32)
- frame = 32;
- else if (frame > 64)
- frame = 64;
- count /= frame;
- frame >>= 1;
-
- if (!(data->flags & MMC_DATA_WRITE)) {
- buf = 0x800f | ((frame - 1) << 8);
-
- if (cpu_class_is_omap1()) {
- src_port = OMAP_DMA_PORT_TIPB;
- dst_port = OMAP_DMA_PORT_EMIFF;
- }
- if (cpu_is_omap24xx())
- sync_dev = OMAP24XX_DMA_MMC1_RX;
-
- omap_set_dma_src_params(dma_ch, src_port,
- OMAP_DMA_AMODE_CONSTANT,
- data_addr, 0, 0);
- omap_set_dma_dest_params(dma_ch, dst_port,
- OMAP_DMA_AMODE_POST_INC,
- sg_dma_address(sg), 0, 0);
- omap_set_dma_dest_data_pack(dma_ch, 1);
- omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
- } else {
- buf = 0x0f80 | ((frame - 1) << 0);
-
- if (cpu_class_is_omap1()) {
- src_port = OMAP_DMA_PORT_EMIFF;
- dst_port = OMAP_DMA_PORT_TIPB;
- }
- if (cpu_is_omap24xx())
- sync_dev = OMAP24XX_DMA_MMC1_TX;
-
- omap_set_dma_dest_params(dma_ch, dst_port,
- OMAP_DMA_AMODE_CONSTANT,
- data_addr, 0, 0);
- omap_set_dma_src_params(dma_ch, src_port,
- OMAP_DMA_AMODE_POST_INC,
- sg_dma_address(sg), 0, 0);
- omap_set_dma_src_data_pack(dma_ch, 1);
- omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
- }
-
- /* Max limit for DMA frame count is 0xffff */
- BUG_ON(count > 0xffff);
-
- OMAP_MMC_WRITE(host, BUF, buf);
- omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
- frame, count, OMAP_DMA_SYNC_FRAME,
- sync_dev, 0);
-}
-
-/* A scatterlist segment completed */
-static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
+static void mmc_omap_dma_callback(void *priv)
{
- struct mmc_omap_host *host = (struct mmc_omap_host *) data;
- struct mmc_data *mmcdat = host->data;
+ struct mmc_omap_host *host = priv;
+ struct mmc_data *data = host->data;
- if (unlikely(host->dma_ch < 0)) {
- dev_err(mmc_dev(host->mmc),
- "DMA callback while DMA not enabled\n");
- return;
- }
- /* FIXME: We really should do something to _handle_ the errors */
- if (ch_status & OMAP1_DMA_TOUT_IRQ) {
- dev_err(mmc_dev(host->mmc),"DMA timeout\n");
- return;
- }
- if (ch_status & OMAP_DMA_DROP_IRQ) {
- dev_err(mmc_dev(host->mmc), "DMA sync error\n");
- return;
- }
- if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
- return;
- }
- mmcdat->bytes_xfered += host->dma_len;
- host->sg_idx++;
- if (host->sg_idx < host->sg_len) {
- mmc_omap_prepare_dma(host, host->data);
- omap_start_dma(host->dma_ch);
- } else
- mmc_omap_dma_done(host, host->data);
-}
-
-static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
-{
- const char *dma_dev_name;
- int sync_dev, dma_ch, is_read, r;
-
- is_read = !(data->flags & MMC_DATA_WRITE);
- del_timer_sync(&host->dma_timer);
- if (host->dma_ch >= 0) {
- if (is_read == host->dma_is_read)
- return 0;
- omap_free_dma(host->dma_ch);
- host->dma_ch = -1;
- }
-
- if (is_read) {
- if (host->id == 0) {
- sync_dev = OMAP_DMA_MMC_RX;
- dma_dev_name = "MMC1 read";
- } else {
- sync_dev = OMAP_DMA_MMC2_RX;
- dma_dev_name = "MMC2 read";
- }
- } else {
- if (host->id == 0) {
- sync_dev = OMAP_DMA_MMC_TX;
- dma_dev_name = "MMC1 write";
- } else {
- sync_dev = OMAP_DMA_MMC2_TX;
- dma_dev_name = "MMC2 write";
- }
- }
- r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
- host, &dma_ch);
- if (r != 0) {
- dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
- return r;
- }
- host->dma_ch = dma_ch;
- host->dma_is_read = is_read;
+ /* If we got to the end of DMA, assume everything went well */
+ data->bytes_xfered += data->blocks * data->blksz;
- return 0;
+ mmc_omap_dma_done(host, data);
}
static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
@@ -1118,33 +981,85 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
host->sg_idx = 0;
if (use_dma) {
- if (mmc_omap_get_dma_channel(host, data) == 0) {
- enum dma_data_direction dma_data_dir;
-
- if (data->flags & MMC_DATA_WRITE)
- dma_data_dir = DMA_TO_DEVICE;
- else
- dma_data_dir = DMA_FROM_DEVICE;
-
- host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
- sg_len, dma_data_dir);
- host->total_bytes_left = 0;
- mmc_omap_prepare_dma(host, req->data);
- host->brs_received = 0;
- host->dma_done = 0;
- host->dma_in_use = 1;
- } else
- use_dma = 0;
+ enum dma_data_direction dma_data_dir;
+ struct dma_async_tx_descriptor *tx;
+ struct dma_chan *c;
+ u32 burst, *bp;
+ u16 buf;
+
+ /*
+ * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
+ * and 24xx. Use 16 or 32 word frames when the
+ * blocksize is at least that large. Blocksize is
+ * usually 512 bytes; but not for some SD reads.
+ */
+ burst = cpu_is_omap15xx() ? 32 : 64;
+ if (burst > data->blksz)
+ burst = data->blksz;
+
+ burst >>= 1;
+
+ if (data->flags & MMC_DATA_WRITE) {
+ c = host->dma_tx;
+ bp = &host->dma_tx_burst;
+ buf = 0x0f80 | (burst - 1) << 0;
+ dma_data_dir = DMA_TO_DEVICE;
+ } else {
+ c = host->dma_rx;
+ bp = &host->dma_rx_burst;
+ buf = 0x800f | (burst - 1) << 8;
+ dma_data_dir = DMA_FROM_DEVICE;
+ }
+
+ if (!c)
+ goto use_pio;
+
+ /* Only reconfigure if we have a different burst size */
+ if (*bp != burst) {
+ struct dma_slave_config cfg;
+
+ cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
+ cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.src_maxburst = burst;
+ cfg.dst_maxburst = burst;
+
+ if (dmaengine_slave_config(c, &cfg))
+ goto use_pio;
+
+ *bp = burst;
+ }
+
+ host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
+ dma_data_dir);
+ if (host->sg_len == 0)
+ goto use_pio;
+
+ tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
+ data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx)
+ goto use_pio;
+
+ OMAP_MMC_WRITE(host, BUF, buf);
+
+ tx->callback = mmc_omap_dma_callback;
+ tx->callback_param = host;
+ dmaengine_submit(tx);
+ host->brs_received = 0;
+ host->dma_done = 0;
+ host->dma_in_use = 1;
+ return;
}
+ use_pio:
/* Revert to PIO? */
- if (!use_dma) {
- OMAP_MMC_WRITE(host, BUF, 0x1f1f);
- host->total_bytes_left = data->blocks * block_size;
- host->sg_len = sg_len;
- mmc_omap_sg_to_buf(host);
- host->dma_in_use = 0;
- }
+ OMAP_MMC_WRITE(host, BUF, 0x1f1f);
+ host->total_bytes_left = data->blocks * block_size;
+ host->sg_len = sg_len;
+ mmc_omap_sg_to_buf(host);
+ host->dma_in_use = 0;
}
static void mmc_omap_start_request(struct mmc_omap_host *host,
@@ -1157,8 +1072,12 @@ static void mmc_omap_start_request(struct mmc_omap_host *host,
/* only touch fifo AFTER the controller readies it */
mmc_omap_prepare_data(host, req);
mmc_omap_start_command(host, req->cmd);
- if (host->dma_in_use)
- omap_start_dma(host->dma_ch);
+ if (host->dma_in_use) {
+ struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
+ host->dma_tx : host->dma_rx;
+
+ dma_async_issue_pending(c);
+ }
}
static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
@@ -1400,6 +1319,8 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_omap_host *host = NULL;
struct resource *res;
+ dma_cap_mask_t mask;
+ unsigned sig;
int i, ret = 0;
int irq;
@@ -1439,7 +1360,6 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
spin_lock_init(&host->dma_lock);
- setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
spin_lock_init(&host->slot_lock);
init_waitqueue_head(&host->slot_wq);
@@ -1450,11 +1370,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
host->id = pdev->id;
host->mem_res = res;
host->irq = irq;
-
host->use_dma = 1;
- host->dev->dma_mask = &pdata->dma_mask;
- host->dma_ch = -1;
-
host->irq = irq;
host->phys_base = host->mem_res->start;
host->virt_base = ioremap(res->start, resource_size(res));
@@ -1474,9 +1390,48 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
goto err_free_iclk;
}
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->dma_tx_burst = -1;
+ host->dma_rx_burst = -1;
+
+ if (cpu_is_omap24xx())
+ sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
+ else
+ sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
+ host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+#if 0
+ if (!host->dma_tx) {
+ dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
+ sig);
+ goto err_dma;
+ }
+#else
+ if (!host->dma_tx)
+ dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
+ sig);
+#endif
+ if (cpu_is_omap24xx())
+ sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
+ else
+ sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
+ host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+#if 0
+ if (!host->dma_rx) {
+ dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
+ sig);
+ goto err_dma;
+ }
+#else
+ if (!host->dma_rx)
+ dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
+ sig);
+#endif
+
ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
if (ret)
- goto err_free_fclk;
+ goto err_free_dma;
if (pdata->init != NULL) {
ret = pdata->init(&pdev->dev);
@@ -1510,7 +1465,11 @@ err_plat_cleanup:
pdata->cleanup(&pdev->dev);
err_free_irq:
free_irq(host->irq, host);
-err_free_fclk:
+err_free_dma:
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
clk_put(host->fclk);
err_free_iclk:
clk_disable(host->iclk);
@@ -1545,6 +1504,11 @@ static int __devexit mmc_omap_remove(struct platform_device *pdev)
clk_disable(host->iclk);
clk_put(host->iclk);
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ if (host->dma_rx)
+ dma_release_channel(host->dma_rx);
+
iounmap(host->virt_base);
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index bc28627af66b..3a09f93cc3b6 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
+#include <linux/dmaengine.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -29,6 +30,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
+#include <linux/omap-dma.h>
#include <linux/mmc/host.h>
#include <linux/mmc/core.h>
#include <linux/mmc/mmc.h>
@@ -37,7 +39,6 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
-#include <plat/dma.h>
#include <mach/hardware.h>
#include <plat/board.h>
#include <plat/mmc.h>
@@ -166,7 +167,8 @@ struct omap_hsmmc_host {
int suspended;
int irq;
int use_dma, dma_ch;
- int dma_line_tx, dma_line_rx;
+ struct dma_chan *tx_chan;
+ struct dma_chan *rx_chan;
int slot_id;
int response_busy;
int context_loss;
@@ -797,6 +799,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
return DMA_FROM_DEVICE;
}
+static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
+ struct mmc_data *data)
+{
+ return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
+}
+
static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
{
int dma_ch;
@@ -889,10 +897,13 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
spin_unlock_irqrestore(&host->irq_lock, flags);
if (host->use_dma && dma_ch != -1) {
- dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
- host->data->sg_len,
+ struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
+
+ dmaengine_terminate_all(chan);
+ dma_unmap_sg(chan->device->dev,
+ host->data->sg, host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
- omap_free_dma(dma_ch);
+
host->data->host_cookie = 0;
}
host->data = NULL;
@@ -1190,90 +1201,29 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host,
- struct mmc_data *data)
-{
- int sync_dev;
-
- if (data->flags & MMC_DATA_WRITE)
- sync_dev = host->dma_line_tx;
- else
- sync_dev = host->dma_line_rx;
- return sync_dev;
-}
-
-static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
- struct mmc_data *data,
- struct scatterlist *sgl)
-{
- int blksz, nblk, dma_ch;
-
- dma_ch = host->dma_ch;
- if (data->flags & MMC_DATA_WRITE) {
- omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
- omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- sg_dma_address(sgl), 0, 0);
- } else {
- omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- (host->mapbase + OMAP_HSMMC_DATA), 0, 0);
- omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- sg_dma_address(sgl), 0, 0);
- }
-
- blksz = host->data->blksz;
- nblk = sg_dma_len(sgl) / blksz;
-
- omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
- blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
- omap_hsmmc_get_dma_sync_dev(host, data),
- !(data->flags & MMC_DATA_WRITE));
-
- omap_start_dma(dma_ch);
-}
-
-/*
- * DMA call back function
- */
-static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
+static void omap_hsmmc_dma_callback(void *param)
{
- struct omap_hsmmc_host *host = cb_data;
+ struct omap_hsmmc_host *host = param;
+ struct dma_chan *chan;
struct mmc_data *data;
- int dma_ch, req_in_progress;
- unsigned long flags;
-
- if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
- dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
- ch_status);
- return;
- }
+ int req_in_progress;
- spin_lock_irqsave(&host->irq_lock, flags);
+ spin_lock_irq(&host->irq_lock);
if (host->dma_ch < 0) {
- spin_unlock_irqrestore(&host->irq_lock, flags);
+ spin_unlock_irq(&host->irq_lock);
return;
}
data = host->mrq->data;
- host->dma_sg_idx++;
- if (host->dma_sg_idx < host->dma_len) {
- /* Fire up the next transfer. */
- omap_hsmmc_config_dma_params(host, data,
- data->sg + host->dma_sg_idx);
- spin_unlock_irqrestore(&host->irq_lock, flags);
- return;
- }
-
+ chan = omap_hsmmc_get_dma_chan(host, data);
if (!data->host_cookie)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ dma_unmap_sg(chan->device->dev,
+ data->sg, data->sg_len,
omap_hsmmc_get_dma_dir(host, data));
req_in_progress = host->req_in_progress;
- dma_ch = host->dma_ch;
host->dma_ch = -1;
- spin_unlock_irqrestore(&host->irq_lock, flags);
-
- omap_free_dma(dma_ch);
+ spin_unlock_irq(&host->irq_lock);
/* If DMA has finished after TC, complete the request */
if (!req_in_progress) {
@@ -1286,7 +1236,8 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_data *data,
- struct omap_hsmmc_next *next)
+ struct omap_hsmmc_next *next,
+ struct dma_chan *chan)
{
int dma_len;
@@ -1301,8 +1252,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
/* Check if next job is already prepared */
if (next ||
(!next && data->host_cookie != host->next_data.cookie)) {
- dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len,
+ dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
omap_hsmmc_get_dma_dir(host, data));
} else {
@@ -1329,8 +1279,11 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_request *req)
{
- int dma_ch = 0, ret = 0, i;
+ struct dma_slave_config cfg;
+ struct dma_async_tx_descriptor *tx;
+ int ret = 0, i;
struct mmc_data *data = req->data;
+ struct dma_chan *chan;
/* Sanity check: all the SG entries must be aligned by block size. */
for (i = 0; i < data->sg_len; i++) {
@@ -1348,22 +1301,41 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
BUG_ON(host->dma_ch != -1);
- ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
- "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
- if (ret != 0) {
- dev_err(mmc_dev(host->mmc),
- "%s: omap_request_dma() failed with %d\n",
- mmc_hostname(host->mmc), ret);
+ chan = omap_hsmmc_get_dma_chan(host, data);
+
+ cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
+ cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = data->blksz / 4;
+ cfg.dst_maxburst = data->blksz / 4;
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret)
return ret;
- }
- ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
+
+ ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
if (ret)
return ret;
- host->dma_ch = dma_ch;
- host->dma_sg_idx = 0;
+ tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx) {
+ dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
+ /* FIXME: cleanup */
+ return -1;
+ }
+
+ tx->callback = omap_hsmmc_dma_callback;
+ tx->callback_param = host;
- omap_hsmmc_config_dma_params(host, data, data->sg);
+ /* Does not fail */
+ dmaengine_submit(tx);
+
+ host->dma_ch = 1;
+
+ dma_async_issue_pending(chan);
return 0;
}
@@ -1445,11 +1417,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct omap_hsmmc_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
- if (host->use_dma) {
- if (data->host_cookie)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ if (host->use_dma && data->host_cookie) {
+ struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
+
+ dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
data->host_cookie = 0;
}
}
@@ -1464,10 +1436,13 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
return ;
}
- if (host->use_dma)
+ if (host->use_dma) {
+ struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
+
if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
- &host->next_data))
+ &host->next_data, c))
mrq->data->host_cookie = 0;
+ }
}
/*
@@ -1800,6 +1775,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
const struct of_device_id *match;
+ dma_cap_mask_t mask;
+ unsigned tx_req, rx_req;
match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
if (match) {
@@ -1844,7 +1821,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
host->pdata = pdata;
host->dev = &pdev->dev;
host->use_dma = 1;
- host->dev->dma_mask = &pdata->dma_mask;
host->dma_ch = -1;
host->irq = irq;
host->slot_id = 0;
@@ -1934,7 +1910,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
ret = -ENXIO;
goto err_irq;
}
- host->dma_line_tx = res->start;
+ tx_req = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
if (!res) {
@@ -1942,7 +1918,24 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
ret = -ENXIO;
goto err_irq;
}
- host->dma_line_rx = res->start;
+ rx_req = res->start;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
+ if (!host->rx_chan) {
+ dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
+ ret = -ENXIO;
+ goto err_irq;
+ }
+
+ host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
+ if (!host->tx_chan) {
+ dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
+ ret = -ENXIO;
+ goto err_irq;
+ }
/* Request IRQ for MMC operations */
ret = request_irq(host->irq, omap_hsmmc_irq, 0,
@@ -2021,6 +2014,10 @@ err_reg:
err_irq_cd_init:
free_irq(host->irq, host);
err_irq:
+ if (host->tx_chan)
+ dma_release_channel(host->tx_chan);
+ if (host->rx_chan)
+ dma_release_channel(host->rx_chan);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_put(host->fclk);
@@ -2056,6 +2053,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
if (mmc_slot(host).card_detect_irq)
free_irq(mmc_slot(host).card_detect_irq, host);
+ if (host->tx_chan)
+ dma_release_channel(host->tx_chan);
+ if (host->rx_chan)
+ dma_release_channel(host->rx_chan);
+
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_put(host->fclk);
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index b97b2f5dafdb..d25f9ab9a54d 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
int div = 1;
u32 temp;
+ if (clock == 0)
+ goto out;
+
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
| ESDHC_CLOCK_MASK);
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- if (clock == 0)
- goto out;
-
while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
pre_div *= 2;
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index cfff454f628b..c3bb304eca07 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -19,14 +19,13 @@
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
+#include <asm/sections.h>
/****************************************************************************/
-extern char _ebss;
-
struct map_info uclinux_ram_map = {
.name = "RAM",
- .phys = (unsigned long)&_ebss,
+ .phys = (unsigned long)__bss_stop,
.size = 0,
};
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 31bb7e5b504a..8ca417614c57 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -480,7 +480,7 @@ config MTD_NAND_NANDSIM
config MTD_NAND_GPMI_NAND
bool "GPMI NAND Flash Controller driver"
- depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q)
+ depends on MTD_NAND && MXS_DMA
help
Enables NAND Flash support for IMX23 or IMX28.
The GPMI controller is very powerful, with the help of BCH
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index a6fa884ae49b..100b6775e175 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -52,9 +52,10 @@
#define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1)
#define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1)
+#define JZ_NAND_CTRL_ASSERT_CHIP_MASK 0xaa
-#define JZ_NAND_MEM_ADDR_OFFSET 0x10000
#define JZ_NAND_MEM_CMD_OFFSET 0x08000
+#define JZ_NAND_MEM_ADDR_OFFSET 0x10000
struct jz_nand {
struct mtd_info mtd;
@@ -62,8 +63,11 @@ struct jz_nand {
void __iomem *base;
struct resource *mem;
- void __iomem *bank_base;
- struct resource *bank_mem;
+ unsigned char banks[JZ_NAND_NUM_BANKS];
+ void __iomem *bank_base[JZ_NAND_NUM_BANKS];
+ struct resource *bank_mem[JZ_NAND_NUM_BANKS];
+
+ int selected_bank;
struct jz_nand_platform_data *pdata;
bool is_reading;
@@ -74,26 +78,50 @@ static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd)
return container_of(mtd, struct jz_nand, mtd);
}
+static void jz_nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+ struct jz_nand *nand = mtd_to_jz_nand(mtd);
+ struct nand_chip *chip = mtd->priv;
+ uint32_t ctrl;
+ int banknr;
+
+ ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
+ ctrl &= ~JZ_NAND_CTRL_ASSERT_CHIP_MASK;
+
+ if (chipnr == -1) {
+ banknr = -1;
+ } else {
+ banknr = nand->banks[chipnr] - 1;
+ chip->IO_ADDR_R = nand->bank_base[banknr];
+ chip->IO_ADDR_W = nand->bank_base[banknr];
+ }
+ writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
+
+ nand->selected_bank = banknr;
+}
+
static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
{
struct jz_nand *nand = mtd_to_jz_nand(mtd);
struct nand_chip *chip = mtd->priv;
uint32_t reg;
+ void __iomem *bank_base = nand->bank_base[nand->selected_bank];
+
+ BUG_ON(nand->selected_bank < 0);
if (ctrl & NAND_CTRL_CHANGE) {
BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE));
if (ctrl & NAND_ALE)
- chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_ADDR_OFFSET;
+ bank_base += JZ_NAND_MEM_ADDR_OFFSET;
else if (ctrl & NAND_CLE)
- chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_CMD_OFFSET;
- else
- chip->IO_ADDR_W = nand->bank_base;
+ bank_base += JZ_NAND_MEM_CMD_OFFSET;
+ chip->IO_ADDR_W = bank_base;
reg = readl(nand->base + JZ_REG_NAND_CTRL);
if (ctrl & NAND_NCE)
- reg |= JZ_NAND_CTRL_ASSERT_CHIP(0);
+ reg |= JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank);
else
- reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(0);
+ reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank);
writel(reg, nand->base + JZ_REG_NAND_CTRL);
}
if (dat != NAND_CMD_NONE)
@@ -252,7 +280,7 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
}
static int jz_nand_ioremap_resource(struct platform_device *pdev,
- const char *name, struct resource **res, void __iomem **base)
+ const char *name, struct resource **res, void *__iomem *base)
{
int ret;
@@ -288,6 +316,90 @@ err:
return ret;
}
+static inline void jz_nand_iounmap_resource(struct resource *res, void __iomem *base)
+{
+ iounmap(base);
+ release_mem_region(res->start, resource_size(res));
+}
+
+static int __devinit jz_nand_detect_bank(struct platform_device *pdev, struct jz_nand *nand, unsigned char bank, size_t chipnr, uint8_t *nand_maf_id, uint8_t *nand_dev_id) {
+ int ret;
+ int gpio;
+ char gpio_name[9];
+ char res_name[6];
+ uint32_t ctrl;
+ struct mtd_info *mtd = &nand->mtd;
+ struct nand_chip *chip = &nand->chip;
+
+ /* Request GPIO port. */
+ gpio = JZ_GPIO_MEM_CS0 + bank - 1;
+ sprintf(gpio_name, "NAND CS%d", bank);
+ ret = gpio_request(gpio, gpio_name);
+ if (ret) {
+ dev_warn(&pdev->dev,
+ "Failed to request %s gpio %d: %d\n",
+ gpio_name, gpio, ret);
+ goto notfound_gpio;
+ }
+
+ /* Request I/O resource. */
+ sprintf(res_name, "bank%d", bank);
+ ret = jz_nand_ioremap_resource(pdev, res_name,
+ &nand->bank_mem[bank - 1],
+ &nand->bank_base[bank - 1]);
+ if (ret)
+ goto notfound_resource;
+
+ /* Enable chip in bank. */
+ jz_gpio_set_function(gpio, JZ_GPIO_FUNC_MEM_CS0);
+ ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
+ ctrl |= JZ_NAND_CTRL_ENABLE_CHIP(bank - 1);
+ writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
+
+ if (chipnr == 0) {
+ /* Detect first chip. */
+ ret = nand_scan_ident(mtd, 1, NULL);
+ if (ret)
+ goto notfound_id;
+
+ /* Retrieve the IDs from the first chip. */
+ chip->select_chip(mtd, 0);
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ *nand_maf_id = chip->read_byte(mtd);
+ *nand_dev_id = chip->read_byte(mtd);
+ } else {
+ /* Detect additional chip. */
+ chip->select_chip(mtd, chipnr);
+ chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+ chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+ if (*nand_maf_id != chip->read_byte(mtd)
+ || *nand_dev_id != chip->read_byte(mtd)) {
+ ret = -ENODEV;
+ goto notfound_id;
+ }
+
+ /* Update size of the MTD. */
+ chip->numchips++;
+ mtd->size += chip->chipsize;
+ }
+
+ dev_info(&pdev->dev, "Found chip %i on bank %i\n", chipnr, bank);
+ return 0;
+
+notfound_id:
+ dev_info(&pdev->dev, "No chip found on bank %i\n", bank);
+ ctrl &= ~(JZ_NAND_CTRL_ENABLE_CHIP(bank - 1));
+ writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
+ jz_gpio_set_function(gpio, JZ_GPIO_FUNC_NONE);
+ jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
+ nand->bank_base[bank - 1]);
+notfound_resource:
+ gpio_free(gpio);
+notfound_gpio:
+ return ret;
+}
+
static int __devinit jz_nand_probe(struct platform_device *pdev)
{
int ret;
@@ -295,6 +407,8 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
struct nand_chip *chip;
struct mtd_info *mtd;
struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
+ size_t chipnr, bank_idx;
+ uint8_t nand_maf_id = 0, nand_dev_id = 0;
nand = kzalloc(sizeof(*nand), GFP_KERNEL);
if (!nand) {
@@ -305,10 +419,6 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base);
if (ret)
goto err_free;
- ret = jz_nand_ioremap_resource(pdev, "bank", &nand->bank_mem,
- &nand->bank_base);
- if (ret)
- goto err_iounmap_mmio;
if (pdata && gpio_is_valid(pdata->busy_gpio)) {
ret = gpio_request(pdata->busy_gpio, "NAND busy pin");
@@ -316,7 +426,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Failed to request busy gpio %d: %d\n",
pdata->busy_gpio, ret);
- goto err_iounmap_mem;
+ goto err_iounmap_mmio;
}
}
@@ -339,22 +449,51 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
chip->chip_delay = 50;
chip->cmd_ctrl = jz_nand_cmd_ctrl;
+ chip->select_chip = jz_nand_select_chip;
if (pdata && gpio_is_valid(pdata->busy_gpio))
chip->dev_ready = jz_nand_dev_ready;
- chip->IO_ADDR_R = nand->bank_base;
- chip->IO_ADDR_W = nand->bank_base;
-
nand->pdata = pdata;
platform_set_drvdata(pdev, nand);
- writel(JZ_NAND_CTRL_ENABLE_CHIP(0), nand->base + JZ_REG_NAND_CTRL);
-
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret) {
- dev_err(&pdev->dev, "Failed to scan nand\n");
- goto err_gpio_free;
+ /* We are going to autodetect NAND chips in the banks specified in the
+ * platform data. Although nand_scan_ident() can detect multiple chips,
+ * it requires those chips to be numbered consecuitively, which is not
+ * always the case for external memory banks. And a fixed chip-to-bank
+ * mapping is not practical either, since for example Dingoo units
+ * produced at different times have NAND chips in different banks.
+ */
+ chipnr = 0;
+ for (bank_idx = 0; bank_idx < JZ_NAND_NUM_BANKS; bank_idx++) {
+ unsigned char bank;
+
+ /* If there is no platform data, look for NAND in bank 1,
+ * which is the most likely bank since it is the only one
+ * that can be booted from.
+ */
+ bank = pdata ? pdata->banks[bank_idx] : bank_idx ^ 1;
+ if (bank == 0)
+ break;
+ if (bank > JZ_NAND_NUM_BANKS) {
+ dev_warn(&pdev->dev,
+ "Skipping non-existing bank: %d\n", bank);
+ continue;
+ }
+ /* The detection routine will directly or indirectly call
+ * jz_nand_select_chip(), so nand->banks has to contain the
+ * bank we're checking.
+ */
+ nand->banks[chipnr] = bank;
+ if (jz_nand_detect_bank(pdev, nand, bank, chipnr,
+ &nand_maf_id, &nand_dev_id) == 0)
+ chipnr++;
+ else
+ nand->banks[chipnr] = 0;
+ }
+ if (chipnr == 0) {
+ dev_err(&pdev->dev, "No NAND chips found\n");
+ goto err_gpio_busy;
}
if (pdata && pdata->ident_callback) {
@@ -364,8 +503,8 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
ret = nand_scan_tail(mtd);
if (ret) {
- dev_err(&pdev->dev, "Failed to scan nand\n");
- goto err_gpio_free;
+ dev_err(&pdev->dev, "Failed to scan NAND\n");
+ goto err_unclaim_banks;
}
ret = mtd_device_parse_register(mtd, NULL, NULL,
@@ -382,14 +521,21 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
return 0;
err_nand_release:
- nand_release(&nand->mtd);
-err_gpio_free:
+ nand_release(mtd);
+err_unclaim_banks:
+ while (chipnr--) {
+ unsigned char bank = nand->banks[chipnr];
+ gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
+ jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
+ nand->bank_base[bank - 1]);
+ }
+ writel(0, nand->base + JZ_REG_NAND_CTRL);
+err_gpio_busy:
+ if (pdata && gpio_is_valid(pdata->busy_gpio))
+ gpio_free(pdata->busy_gpio);
platform_set_drvdata(pdev, NULL);
- gpio_free(pdata->busy_gpio);
-err_iounmap_mem:
- iounmap(nand->bank_base);
err_iounmap_mmio:
- iounmap(nand->base);
+ jz_nand_iounmap_resource(nand->mem, nand->base);
err_free:
kfree(nand);
return ret;
@@ -398,16 +544,26 @@ err_free:
static int __devexit jz_nand_remove(struct platform_device *pdev)
{
struct jz_nand *nand = platform_get_drvdata(pdev);
+ struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
+ size_t i;
nand_release(&nand->mtd);
/* Deassert and disable all chips */
writel(0, nand->base + JZ_REG_NAND_CTRL);
- iounmap(nand->bank_base);
- release_mem_region(nand->bank_mem->start, resource_size(nand->bank_mem));
- iounmap(nand->base);
- release_mem_region(nand->mem->start, resource_size(nand->mem));
+ for (i = 0; i < JZ_NAND_NUM_BANKS; ++i) {
+ unsigned char bank = nand->banks[i];
+ if (bank != 0) {
+ jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
+ nand->bank_base[bank - 1]);
+ gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
+ }
+ }
+ if (pdata && gpio_is_valid(pdata->busy_gpio))
+ gpio_free(pdata->busy_gpio);
+
+ jz_nand_iounmap_resource(nand->mem, nand->base);
platform_set_drvdata(pdev, NULL);
kfree(nand);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7f681d0c9b9..ac4fd756eda3 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -9,6 +9,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -18,6 +19,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/omap-dma.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -123,7 +125,7 @@ struct omap_nand_info {
int gpmc_cs;
unsigned long phys_base;
struct completion comp;
- int dma_ch;
+ struct dma_chan *dma;
int gpmc_irq;
enum {
OMAP_NAND_IO_READ = 0, /* read */
@@ -336,12 +338,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
}
/*
- * omap_nand_dma_cb: callback on the completion of dma transfer
- * @lch: logical channel
- * @ch_satuts: channel status
+ * omap_nand_dma_callback: callback on the completion of dma transfer
* @data: pointer to completion data structure
*/
-static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
+static void omap_nand_dma_callback(void *data)
{
complete((struct completion *) data);
}
@@ -358,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
{
struct omap_nand_info *info = container_of(mtd,
struct omap_nand_info, mtd);
+ struct dma_async_tx_descriptor *tx;
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
- dma_addr_t dma_addr;
- int ret;
+ struct scatterlist sg;
unsigned long tim, limit;
-
- /* The fifo depth is 64 bytes max.
- * But configure the FIFO-threahold to 32 to get a sync at each frame
- * and frame length is 32 bytes.
- */
- int buf_len = len >> 6;
+ unsigned n;
+ int ret;
if (addr >= high_memory) {
struct page *p1;
@@ -382,40 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
}
- dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
- if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
+ sg_init_one(&sg, addr, len);
+ n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+ if (n == 0) {
dev_err(&info->pdev->dev,
"Couldn't DMA map a %d byte buffer\n", len);
goto out_copy;
}
- if (is_write) {
- omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- info->phys_base, 0, 0);
- omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr, 0, 0);
- omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
- 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
- OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
- } else {
- omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
- info->phys_base, 0, 0);
- omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
- dma_addr, 0, 0);
- omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
- 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
- OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
- }
- /* configure and start prefetch transfer */
+ tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+ is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tx)
+ goto out_copy_unmap;
+
+ tx->callback = omap_nand_dma_callback;
+ tx->callback_param = &info->comp;
+ dmaengine_submit(tx);
+
+ /* configure and start prefetch transfer */
ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy_unmap;
init_completion(&info->comp);
-
- omap_start_dma(info->dma_ch);
+ dma_async_issue_pending(info->dma);
/* setup and start DMA using dma_addr */
wait_for_completion(&info->comp);
@@ -427,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
/* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs);
- dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
return 0;
out_copy_unmap:
- dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
+ dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
out_copy:
if (info->nand.options & NAND_BUSWIDTH_16)
is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -1164,6 +1153,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
struct omap_nand_platform_data *pdata;
int err;
int i, offset;
+ dma_cap_mask_t mask;
+ unsigned sig;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -1244,18 +1235,30 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
break;
case NAND_OMAP_PREFETCH_DMA:
- err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
- omap_nand_dma_cb, &info->comp, &info->dma_ch);
- if (err < 0) {
- info->dma_ch = -1;
- dev_err(&pdev->dev, "DMA request failed!\n");
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ sig = OMAP24XX_DMA_GPMC;
+ info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ if (!info->dma) {
+ dev_err(&pdev->dev, "DMA engine request failed\n");
+ err = -ENXIO;
goto out_release_mem_region;
} else {
- omap_set_dma_dest_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
- omap_set_dma_src_burst_mode(info->dma_ch,
- OMAP_DMA_DATA_BURST_16);
-
+ struct dma_slave_config cfg;
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = info->phys_base;
+ cfg.dst_addr = info->phys_base;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 16;
+ cfg.dst_maxburst = 16;
+ err = dmaengine_slave_config(info->dma, &cfg);
+ if (err) {
+ dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
+ err);
+ goto out_release_mem_region;
+ }
info->nand.read_buf = omap_read_buf_dma_pref;
info->nand.write_buf = omap_write_buf_dma_pref;
}
@@ -1358,6 +1361,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
return 0;
out_release_mem_region:
+ if (info->dma)
+ dma_release_channel(info->dma);
release_mem_region(info->phys_base, NAND_IO_SIZE);
out_free_info:
kfree(info);
@@ -1373,8 +1378,8 @@ static int omap_nand_remove(struct platform_device *pdev)
omap3_free_bch(&info->mtd);
platform_set_drvdata(pdev, NULL);
- if (info->dma_ch != -1)
- omap_free_dma(info->dma_ch);
+ if (info->dma)
+ dma_release_channel(info->dma);
if (info->gpmc_irq)
free_irq(info->gpmc_irq, info);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 513dc88a05ca..fc5a868c436e 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -183,6 +183,10 @@ static int __init orion_nand_probe(struct platform_device *pdev)
return 0;
no_dev:
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
platform_set_drvdata(pdev, NULL);
iounmap(io_base);
no_res:
@@ -214,7 +218,7 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static struct of_device_id orion_nand_of_match_table[] = {
- { .compatible = "mrvl,orion-nand", },
+ { .compatible = "marvell,orion-nand", },
{},
};
#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 437bc193e170..568307cc7caf 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -340,7 +340,7 @@ retry:
* of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
*/
err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
- kfree(new_aeb);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -353,7 +353,7 @@ write_error:
list_add(&new_aeb->u.list, &ai->erase);
goto retry;
}
- kfree(new_aeb);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 545c09ed9079..cff6f023c03a 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -996,9 +996,7 @@ static int __init cops_module_init(void)
printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
cardname);
cops_dev = cops_probe(-1);
- if (IS_ERR(cops_dev))
- return PTR_ERR(cops_dev);
- return 0;
+ return PTR_RET(cops_dev);
}
static void __exit cops_module_exit(void)
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 0910dce3996d..b5782cdf0bca 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1243,9 +1243,7 @@ static int __init ltpc_module_init(void)
"ltpc: Autoprobing is not recommended for modules\n");
dev_ltpc = ltpc_probe();
- if (IS_ERR(dev_ltpc))
- return PTR_ERR(dev_ltpc);
- return 0;
+ return PTR_RET(dev_ltpc);
}
module_init(ltpc_module_init);
#endif
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6fae5f3ec7f6..d688a8af432c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -398,7 +398,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
- if (unlikely(netpoll_tx_running(slave_dev)))
+ if (unlikely(netpoll_tx_running(bond->dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
else
dev_queue_xmit(skb);
@@ -1235,12 +1235,12 @@ static inline int slave_enable_netpoll(struct slave *slave)
struct netpoll *np;
int err = 0;
- np = kzalloc(sizeof(*np), GFP_KERNEL);
+ np = kzalloc(sizeof(*np), GFP_ATOMIC);
err = -ENOMEM;
if (!np)
goto out;
- err = __netpoll_setup(np, slave->dev);
+ err = __netpoll_setup(np, slave->dev, GFP_ATOMIC);
if (err) {
kfree(np);
goto out;
@@ -1257,9 +1257,7 @@ static inline void slave_disable_netpoll(struct slave *slave)
return;
slave->np = NULL;
- synchronize_rcu_bh();
- __netpoll_cleanup(np);
- kfree(np);
+ __netpoll_free_rcu(np);
}
static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
{
@@ -1292,7 +1290,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
read_unlock(&bond->lock);
}
-static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
+static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
{
struct bonding *bond = netdev_priv(dev);
struct slave *slave;
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 8a3054b84812..5de74e762021 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -325,6 +325,9 @@ static int ldisc_open(struct tty_struct *tty)
sprintf(name, "cf%s", tty->name);
dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
+ if (!dev)
+ return -ENOMEM;
+
ser = netdev_priv(dev);
ser->tty = tty_kref_get(tty);
ser->dev = dev;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index f0921d16f0a9..6ff7ad006c30 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -74,7 +74,6 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
const struct platform_device_id *id;
struct resource *mem;
int irq;
-#ifdef CONFIG_HAVE_CLK
struct clk *clk;
/* get the appropriate clk */
@@ -84,7 +83,6 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
ret = -ENODEV;
goto exit;
}
-#endif
/* get the platform data */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -145,10 +143,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
dev->irq = irq;
priv->base = addr;
-#ifdef CONFIG_HAVE_CLK
priv->can.clock.freq = clk_get_rate(clk);
priv->priv = clk;
-#endif
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -172,10 +168,8 @@ exit_iounmap:
exit_release_mem:
release_mem_region(mem->start, resource_size(mem));
exit_free_clk:
-#ifdef CONFIG_HAVE_CLK
clk_put(clk);
exit:
-#endif
dev_err(&pdev->dev, "probe failed\n");
return ret;
@@ -196,9 +190,7 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
-#ifdef CONFIG_HAVE_CLK
clk_put(priv->priv);
-#endif
return 0;
}
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 4f50145f6483..662c5f7eb0c5 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
dev->irq = res_irq->start;
- priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
+ priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+ priv->irq_flags |= IRQF_SHARED;
priv->reg_base = addr;
/* The CAN clock frequency is half the oscillator clock frequency */
priv->can.clock.freq = pdata->osc_freq / 2;
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 310596175676..b595d3422b9f 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
const uint8_t *mem, *end, *dat;
uint16_t type, len;
uint32_t addr;
- uint8_t *buf = NULL;
+ uint8_t *buf = NULL, *new_buf;
int buflen = 0;
int8_t type_end = 0;
@@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
if (len > buflen) {
/* align buflen */
buflen = (len + (1024-1)) & ~(1024-1);
- buf = krealloc(buf, buflen, GFP_KERNEL);
- if (!buf) {
+ new_buf = krealloc(buf, buflen, GFP_KERNEL);
+ if (!new_buf) {
ret = -ENOMEM;
goto failed;
}
+ buf = new_buf;
}
/* verify record data */
memcpy_fromio(buf, &dpram[addr + offset], len);
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index f0c8bd54ce29..021d69c5d9bc 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1712,7 +1712,7 @@ e100_set_network_leds(int active)
static void
e100_netpoll(struct net_device* netdev)
{
- e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
+ e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev);
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 77bcd4cb4ffb..6d1a24acb77e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1278,7 +1278,7 @@ struct bnx2x {
#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT)
#define BNX2X_FW_RX_ALIGN_END \
- max(1UL << BNX2X_RX_ALIGN_SHIFT, \
+ max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT, \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
@@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
continue; \
else
-#define for_each_napi_rx_queue(bp, var) \
- for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
-
/* Skip OOO FP */
#define for_each_tx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e879e19eb0d6..af20c6ee2cd9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
*/
bnx2x_setup_tc(bp->dev, bp->max_cos);
+ /* Add all NAPI objects */
+ bnx2x_add_all_napi(bp);
bnx2x_napi_enable(bp);
/* set pf load just before approaching the MCP */
@@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
/* Release IRQs */
bnx2x_free_irq(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index dfa757e74296..21b553229ea4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
bp->num_napi_queues = bp->num_queues;
/* Add NAPI objects */
- for_each_napi_rx_queue(bp, i)
+ for_each_rx_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_napi_rx_queue(bp, i)
+ for_each_rx_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index fc4e0e3885b0..c37a68d68090 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
*/
static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
{
- bnx2x_del_all_napi(bp);
bnx2x_disable_msi(bp);
BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
bnx2x_set_int_mode(bp);
- bnx2x_add_all_napi(bp);
}
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 9aaf863b4237..21054987257a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4041,20 +4041,6 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
return val != 0;
}
-/*
- * Reset the load status for the current engine.
- */
-static void bnx2x_clear_load_status(struct bnx2x *bp)
-{
- u32 val;
- u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
- BNX2X_PATH0_LOAD_CNT_MASK);
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
- val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
- REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
-}
-
static void _print_next_block(int idx, const char *blk)
{
pr_cont("%s%s", idx ? ", " : "", blk);
@@ -8441,6 +8427,8 @@ unload_error:
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
/* Release IRQs */
bnx2x_free_irq(bp);
@@ -9360,8 +9348,7 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
struct bnx2x_prev_path_list *tmp_list;
int rc;
- tmp_list = (struct bnx2x_prev_path_list *)
- kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
+ tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
if (!tmp_list) {
BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
return -ENOMEM;
@@ -9385,32 +9372,24 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
return rc;
}
-static bool __devinit bnx2x_can_flr(struct bnx2x *bp)
-{
- int pos;
- u32 cap;
- struct pci_dev *dev = bp->pdev;
-
- pos = pci_pcie_cap(dev);
- if (!pos)
- return false;
-
- pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
- if (!(cap & PCI_EXP_DEVCAP_FLR))
- return false;
-
- return true;
-}
-
static int __devinit bnx2x_do_flr(struct bnx2x *bp)
{
int i, pos;
u16 status;
struct pci_dev *dev = bp->pdev;
- /* probe the capability first */
- if (bnx2x_can_flr(bp))
- return -ENOTTY;
+
+ if (CHIP_IS_E1x(bp)) {
+ BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
+ return -EINVAL;
+ }
+
+ /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
+ if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
+ BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
+ bp->common.bc_ver);
+ return -EINVAL;
+ }
pos = pci_pcie_cap(dev);
if (!pos)
@@ -9430,12 +9409,8 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp)
"transaction is not cleared; proceeding with reset anyway\n");
clear:
- if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
- BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
- bp->common.bc_ver);
- return -EINVAL;
- }
+ BNX2X_DEV_INFO("Initiating FLR\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
return 0;
@@ -9455,8 +9430,21 @@ static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
- if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp))
- return bnx2x_do_flr(bp);
+ rc = bnx2x_test_firmware_version(bp, false);
+
+ if (!rc) {
+ /* fw version is good */
+ BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
+ rc = bnx2x_do_flr(bp);
+ }
+
+ if (!rc) {
+ /* FLR was performed */
+ BNX2X_DEV_INFO("FLR successful\n");
+ return 0;
+ }
+
+ BNX2X_DEV_INFO("Could not FLR\n");
/* Close the MCP request, return failure*/
rc = bnx2x_prev_mcp_done(bp);
@@ -11243,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void poll_bnx2x(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
+ int i;
- disable_irq(bp->pdev->irq);
- bnx2x_interrupt(bp->pdev->irq, dev);
- enable_irq(bp->pdev->irq);
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+ }
}
#endif
@@ -11428,9 +11418,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
if (!chip_is_e1x)
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
- /* Reset the load counter */
- bnx2x_clear_load_status(bp);
-
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11916,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
*/
bnx2x_set_int_mode(bp);
- /* Add all NAPI objects */
- bnx2x_add_all_napi(bp);
-
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
@@ -11993,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
-
/* Power on: we can't let PCI layer write to us while we are in D3 */
bnx2x_set_power_state(bp, PCI_D0);
@@ -12042,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bnx2x_tx_disable(bp);
bnx2x_netif_stop(bp, 0);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
del_timer_sync(&bp->timer);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index ec62a5c8bd37..28a0bcfe61ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1603,6 +1603,10 @@
* of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
* register. Reset on hard reset. */
#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 0xa8b8
+/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number
+ * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
+ * register. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P1 0xa8bc
/* [RW 32] The following driver registers(1...16) represent 16 drivers and
32 clients. Each client can be controlled by one driver only. One in each
bit represent that this driver control the appropriate client (Ex: bit 5
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 734fd87cd990..62f754bd0dfe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2485,6 +2485,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
break;
default:
+ kfree(new_cmd);
BNX2X_ERR("Unknown command: %d\n", cmd);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 667d89042d35..332db64dd5be 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -785,9 +785,11 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
pstats->host_port_stats_counter++;
- if (CHIP_IS_E3(bp))
- estats->eee_tx_lpi += REG_RD(bp,
- MISC_REG_CPMU_LP_SM_ENT_CNT_P0);
+ if (CHIP_IS_E3(bp)) {
+ u32 lpi_reg = BP_PORT(bp) ? MISC_REG_CPMU_LP_SM_ENT_CNT_P1
+ : MISC_REG_CPMU_LP_SM_ENT_CNT_P0;
+ estats->eee_tx_lpi += REG_RD(bp, lpi_reg);
+ }
if (!BP_NOMCP(bp)) {
u32 nig_timer_max =
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 9a009fd6ea1b..bf906c51d82a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -92,7 +92,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 123
+#define TG3_MIN_NUM 124
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
#define DRV_MODULE_RELDATE "March 21, 2012"
@@ -672,6 +672,12 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
else
bit = 1 << tp->pci_fn;
break;
+ case TG3_APE_LOCK_PHY0:
+ case TG3_APE_LOCK_PHY1:
+ case TG3_APE_LOCK_PHY2:
+ case TG3_APE_LOCK_PHY3:
+ bit = APE_LOCK_REQ_DRIVER;
+ break;
default:
return -EINVAL;
}
@@ -723,6 +729,12 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
else
bit = 1 << tp->pci_fn;
break;
+ case TG3_APE_LOCK_PHY0:
+ case TG3_APE_LOCK_PHY1:
+ case TG3_APE_LOCK_PHY2:
+ case TG3_APE_LOCK_PHY3:
+ bit = APE_LOCK_GRANT_DRIVER;
+ break;
default:
return;
}
@@ -1052,6 +1064,8 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
udelay(80);
}
+ tg3_ape_lock(tp, tp->phy_ape_lock);
+
*val = 0x0;
frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
@@ -1086,6 +1100,8 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
udelay(80);
}
+ tg3_ape_unlock(tp, tp->phy_ape_lock);
+
return ret;
}
@@ -1105,6 +1121,8 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
udelay(80);
}
+ tg3_ape_lock(tp, tp->phy_ape_lock);
+
frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
@@ -1135,6 +1153,8 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
udelay(80);
}
+ tg3_ape_unlock(tp, tp->phy_ape_lock);
+
return ret;
}
@@ -9066,8 +9086,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_PLUS)) {
val = tr32(TG3_RDMA_RSRVCTRL_REG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
@@ -9257,6 +9276,19 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(RDMAC_MODE, rdmac_mode);
udelay(40);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
+ if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
+ break;
+ }
+ if (i < TG3_NUM_RDMA_CHANNELS) {
+ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
+ val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
+ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
+ tg3_flag_set(tp, 5719_RDMA_BUG);
+ }
+ }
+
tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
if (!tg3_flag(tp, 5705_PLUS))
tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
@@ -9616,6 +9648,16 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
+ if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
+ (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
+ sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
+ u32 val;
+
+ val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
+ val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
+ tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
+ tg3_flag_clear(tp, 5719_RDMA_BUG);
+ }
TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
@@ -12482,10 +12524,12 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
{
struct tg3 *tp = netdev_priv(dev);
- if (!tp->hw_stats)
+ spin_lock_bh(&tp->lock);
+ if (!tp->hw_stats) {
+ spin_unlock_bh(&tp->lock);
return &tp->net_stats_prev;
+ }
- spin_lock_bh(&tp->lock);
tg3_get_nstats(tp, stats);
spin_unlock_bh(&tp->lock);
@@ -13648,6 +13692,23 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
tg3_flag_set(tp, PAUSE_AUTONEG);
tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ if (tg3_flag(tp, ENABLE_APE)) {
+ switch (tp->pci_fn) {
+ case 0:
+ tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
+ break;
+ case 1:
+ tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
+ break;
+ case 2:
+ tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
+ break;
+ case 3:
+ tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
+ break;
+ }
+ }
+
if (tg3_flag(tp, USE_PHYLIB))
return tg3_phy_init(tp);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index a1b75cd67b9d..6d52cb286826 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1376,7 +1376,11 @@
#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K 0x00030000
#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K 0x000c0000
-/* 0x4914 --> 0x4c00 unused */
+#define TG3_LSO_RD_DMA_TX_LENGTH_WA 0x02000000
+/* 0x4914 --> 0x4be0 unused */
+
+#define TG3_NUM_RDMA_CHANNELS 4
+#define TG3_RDMA_LENGTH 0x00004be0
/* Write DMA control registers */
#define WDMAC_MODE 0x00004c00
@@ -2959,6 +2963,7 @@ enum TG3_FLAGS {
TG3_FLAG_L1PLLPD_EN,
TG3_FLAG_APE_HAS_NCSI,
TG3_FLAG_4K_FIFO_LIMIT,
+ TG3_FLAG_5719_RDMA_BUG,
TG3_FLAG_RESET_TASK_PENDING,
TG3_FLAG_5705_PLUS,
TG3_FLAG_IS_5788,
@@ -3107,6 +3112,7 @@ struct tg3 {
int old_link;
u8 phy_addr;
+ u8 phy_ape_lock;
/* PHY info */
u32 phy_id;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 8596acaa402b..d49933ed551f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
#endif
while (n--) {
- pg = alloc_page(gfp);
+ pg = __skb_alloc_page(gfp, NULL);
if (unlikely(!pg)) {
q->alloc_failed++;
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f2d1ecdcaf98..8877fbfefb63 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -653,7 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
alloc_small_pages:
while (n--) {
- page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD);
+ page = __skb_alloc_page(gfp | __GFP_NOWARN, NULL);
if (unlikely(!page)) {
fl->alloc_failed++;
break;
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 845b2020f291..138446957786 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
unsigned long flags;
+ u16 cfg;
spin_lock_irqsave(&lp->lock, flags);
if (dev->flags & IFF_PROMISC)
@@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev)
/* in promiscuous mode, we accept errored packets,
* so we have to enable interrupts on them also
*/
- writereg(dev, PP_RxCFG,
- (lp->curr_rx_cfg |
- (lp->rx_mode == RX_ALL_ACCEPT)
- ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL)
- : 0));
+ cfg = lp->curr_rx_cfg;
+ if (lp->rx_mode == RX_ALL_ACCEPT)
+ cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL;
+ writereg(dev, PP_RxCFG, cfg);
spin_unlock_irqrestore(&lp->lock, flags);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 7fac97b4bb59..8c63d06ab12b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
int num = 0, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- spin_lock_bh(&adapter->mcc_cq_lock);
+ spin_lock(&adapter->mcc_cq_lock);
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
/* Interpret flags as an async trailer */
@@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
if (num)
be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
- spin_unlock_bh(&adapter->mcc_cq_lock);
+ spin_unlock(&adapter->mcc_cq_lock);
return status;
}
@@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (be_error(adapter))
return -EIO;
+ local_bh_disable();
status = be_process_mcc(adapter);
+ local_bh_enable();
if (atomic_read(&mcc_obj->q.used) == 0)
break;
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index e34be1c7ae8a..c0e700653f96 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -910,8 +910,9 @@ static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
if (!status) {
cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
sizeof(struct be_cmd_resp_hdr));
- for (i = 0; i < cfgs->num_modules; i++) {
- for (j = 0; j < cfgs->module[i].num_modes; j++) {
+ for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
+ u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
+ for (j = 0; j < num_modes; j++) {
if (cfgs->module[i].trace_lvl[j].mode ==
MODE_UART)
cfgs->module[i].trace_lvl[j].dbg_lvl =
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4d9677174490..78b8aa8069f0 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1397,7 +1397,7 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
rxcp->pkt_type =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
rxcp->rss_hash =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
if (rxcp->vlanf) {
rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
compl);
@@ -1429,7 +1429,7 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
rxcp->pkt_type =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
rxcp->rss_hash =
- AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
if (rxcp->vlanf) {
rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
compl);
@@ -1948,7 +1948,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
if (adapter->num_rx_qs != MAX_RX_QS)
dev_info(&adapter->pdev->dev,
- "Created only %d receive queues", adapter->num_rx_qs);
+ "Created only %d receive queues\n", adapter->num_rx_qs);
return 0;
}
@@ -3579,7 +3579,7 @@ u32 be_get_fw_log_level(struct be_adapter *adapter)
if (!status) {
cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
sizeof(struct be_cmd_resp_hdr));
- for (j = 0; j < cfgs->module[0].num_modes; j++) {
+ for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
level = cfgs->module[0].trace_lvl[j].dbg_lvl;
}
@@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work)
/* when interrupts are not yet enabled, just reap any pending
* mcc completions */
if (!netif_running(adapter->netdev)) {
+ local_bh_disable();
be_process_mcc(adapter);
+ local_bh_enable();
goto reschedule;
}
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 0f2d1a710909..151453309401 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -174,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
new_bus->phy_mask = ~0;
new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
- if (!new_bus->irq)
+ if (!new_bus->irq) {
+ ret = -ENOMEM;
goto out_unmap_regs;
+ }
new_bus->parent = &ofdev->dev;
dev_set_drvdata(&ofdev->dev, new_bus);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 55bb867258e6..cdf702a59485 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -137,8 +137,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
fec->fecp = ioremap(res.start, resource_size(&res));
- if (!fec->fecp)
+ if (!fec->fecp) {
+ ret = -ENOMEM;
goto out_fec;
+ }
if (get_bus_freq) {
clock = get_bus_freq(ofdev->dev.of_node);
@@ -172,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
new_bus->phy_mask = ~0;
new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
- if (!new_bus->irq)
+ if (!new_bus->irq) {
+ ret = -ENOMEM;
goto out_unmap_regs;
+ }
new_bus->parent = &ofdev->dev;
dev_set_drvdata(&ofdev->dev, new_bus);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4605f7246687..d3233f59a82e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->features |= NETIF_F_HW_VLAN_RX;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 0b3bade957fd..080c89093feb 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
- u32 ctrl, ctrl_ext, eecd;
+ u32 ctrl, ctrl_ext, eecd, tctl;
s32 ret_val;
/*
@@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
ew32(RCTL, 0);
- ew32(TCTL, E1000_TCTL_PSP);
+ tctl = er32(TCTL);
+ tctl &= ~E1000_TCTL_EN;
+ ew32(TCTL, tctl);
e1e_flush();
usleep_range(10000, 20000);
@@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
- * If the partner code word is null, stop forcing
- * and restart auto negotiation.
*/
- if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
+ if (rxcw & E1000_RXCW_C) {
/* Enable autoneg, and unforce link up */
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index cd153326c3cf..cb3356c9af80 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -310,6 +310,7 @@ struct e1000_adapter {
*/
struct e1000_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
+ u32 tx_fifo_limit;
struct napi_struct napi;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 95b245310f17..d01a099475a1 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
}
+static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
+ struct e1000_buffer *bi)
+{
+ int i;
+ struct e1000_ps_page *ps_page;
+
+ for (i = 0; i < adapter->rx_ps_pages; i++) {
+ ps_page = &bi->ps_pages[i];
+
+ if (ps_page->page) {
+ pr_info("packet dump for ps_page %d:\n", i);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+ 16, 1, page_address(ps_page->page),
+ PAGE_SIZE, true);
+ }
+ }
+}
+
/*
* e1000e_dump - Print registers, Tx-ring and Rx-ring
*/
@@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter)
(unsigned long long)buffer_info->time_stamp,
buffer_info->skb, next_desc);
- if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
- buffer_info->length, true);
+ 16, 1, buffer_info->skb->data,
+ buffer_info->skb->len, true);
}
/* Print Rx Ring Summary */
@@ -381,10 +399,8 @@ rx_ring_summary:
buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS, 16, 1,
- phys_to_virt(buffer_info->dma),
- adapter->rx_ps_bsize0, true);
+ e1000e_dump_ps_pages(adapter,
+ buffer_info);
}
}
break;
@@ -444,12 +460,12 @@ rx_ring_summary:
(unsigned long long)buffer_info->dma,
buffer_info->skb, next_desc);
- if (netif_msg_pktdata(adapter))
+ if (netif_msg_pktdata(adapter) &&
+ buffer_info->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16,
1,
- phys_to_virt
- (buffer_info->dma),
+ buffer_info->skb->data,
adapter->rx_buffer_len,
true);
}
@@ -3501,6 +3517,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
}
/*
+ * Alignment of Tx data is on an arbitrary byte boundary with the
+ * maximum size per Tx descriptor limited only to the transmit
+ * allocation of the packet buffer minus 96 bytes with an upper
+ * limit of 24KB due to receive synchronization limitations.
+ */
+ adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
+ 24 << 10);
+
+ /*
* Disable Adaptive Interrupt Moderation if 2 full packets cannot
* fit in receive buffer.
*/
@@ -4769,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
return 1;
}
-#define E1000_MAX_PER_TXD 8192
-#define E1000_MAX_TXD_PWR 12
-
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
unsigned int first, unsigned int max_per_txd,
- unsigned int nr_frags, unsigned int mss)
+ unsigned int nr_frags)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct pci_dev *pdev = adapter->pdev;
@@ -5007,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
{
+ BUG_ON(size > tx_ring->count);
+
if (e1000_desc_unused(tx_ring) >= size)
return 0;
return __e1000_maybe_stop_tx(tx_ring, size);
}
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int first;
- unsigned int max_per_txd = E1000_MAX_PER_TXD;
- unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
@@ -5040,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
}
mss = skb_shinfo(skb)->gso_size;
- /*
- * The controller does a simple calculation to
- * make sure there is enough room in the FIFO before
- * initiating the DMA for each buffer. The calc is:
- * 4 = ceil(buffer len/mss). To make sure we don't
- * overrun the FIFO, adjust the max buffer len if mss
- * drops.
- */
if (mss) {
u8 hdr_len;
- max_per_txd = min(mss << 2, max_per_txd);
- max_txd_pwr = fls(max_per_txd) - 1;
/*
* TSO Workaround for 82571/2/3 Controllers -- if skb->data
@@ -5081,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count++;
count++;
- count += TXD_USE_COUNT(len, max_txd_pwr);
+ count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++)
- count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
- max_txd_pwr);
+ count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+ adapter->tx_fifo_limit);
if (adapter->hw.mac.tx_pkt_filtering)
e1000_transfer_dhcp_info(adapter, skb);
@@ -5128,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
tx_flags |= E1000_TX_FLAGS_NO_FCS;
/* if count is 0 then mapping error has occurred */
- count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
+ count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
+ nr_frags);
if (count) {
skb_tx_timestamp(skb);
netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
- e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
-
+ e1000_maybe_stop_tx(tx_ring,
+ (MAX_SKB_FRAGS *
+ DIV_ROUND_UP(PAGE_SIZE,
+ adapter->tx_fifo_limit) + 2));
} else {
dev_kfree_skb_any(skb);
tx_ring->buffer_info[first].time_stamp = 0;
@@ -6311,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->hw.phy.autoneg_advertised = 0x2f;
/* ring size defaults */
- adapter->rx_ring->count = 256;
- adapter->tx_ring->count = 256;
+ adapter->rx_ring->count = E1000_DEFAULT_RXD;
+ adapter->tx_ring->count = E1000_DEFAULT_TXD;
/*
* Initial Wake on LAN setting - If APM wake is enabled in
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 5e84eaac48c1..ba994fb4cec6 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -254,6 +254,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
+ /*
+ * Check for invalid size
+ */
+ if ((hw->mac.type == e1000_82576) && (size > 15)) {
+ pr_notice("The NVM size is not valid, defaulting to 32K\n");
+ size = 15;
+ }
+
nvm->word_size = 1 << size;
if (hw->mac.type < e1000_i210) {
nvm->opcode_bits = 8;
@@ -281,14 +289,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
} else
nvm->type = e1000_nvm_flash_hw;
- /*
- * Check for invalid size
- */
- if ((hw->mac.type == e1000_82576) && (size > 15)) {
- pr_notice("The NVM size is not valid, defaulting to 32K\n");
- size = 15;
- }
-
/* NVM Function Pointers */
switch (hw->mac.type) {
case e1000_82580:
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 10efcd88dca0..28394bea5253 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -156,8 +156,12 @@
: (0x0E018 + ((_n) * 0x40)))
#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
: (0x0E028 + ((_n) * 0x40)))
-#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
-#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
+#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+ (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
+#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+ (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
: (0x0E038 + ((_n) * 0x40)))
#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index a19c84cad0e9..70591117051b 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -209,8 +209,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed */
if (igb_check_reset_block(hw)) {
- dev_err(&adapter->pdev->dev, "Cannot change link "
- "characteristics when SoL/IDER is active.\n");
+ dev_err(&adapter->pdev->dev,
+ "Cannot change link characteristics when SoL/IDER is active.\n");
return -EINVAL;
}
@@ -1089,8 +1089,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
wr32(reg, (_test[pat] & write));
val = rd32(reg) & mask;
if (val != (_test[pat] & write & mask)) {
- dev_err(&adapter->pdev->dev, "pattern test reg %04X "
- "failed: got 0x%08X expected 0x%08X\n",
+ dev_err(&adapter->pdev->dev,
+ "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (_test[pat] & write & mask));
*data = reg;
return 1;
@@ -1108,8 +1108,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
wr32(reg, write & mask);
val = rd32(reg);
if ((write & mask) != (val & mask)) {
- dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
- " got 0x%08X expected 0x%08X\n", reg,
+ dev_err(&adapter->pdev->dev,
+ "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
(val & mask), (write & mask));
*data = reg;
return 1;
@@ -1171,8 +1171,9 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_STATUS, toggle);
after = rd32(E1000_STATUS) & toggle;
if (value != after) {
- dev_err(&adapter->pdev->dev, "failed STATUS register test "
- "got: 0x%08X expected: 0x%08X\n", after, value);
+ dev_err(&adapter->pdev->dev,
+ "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+ after, value);
*data = 1;
return 1;
}
@@ -1497,6 +1498,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
break;
}
+ /* add small delay to avoid loopback test failure */
+ msleep(50);
+
/* force 1000, set loopback */
igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
@@ -1777,16 +1781,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
* sessions are active */
if (igb_check_reset_block(&adapter->hw)) {
dev_err(&adapter->pdev->dev,
- "Cannot do PHY loopback test "
- "when SoL/IDER is active.\n");
+ "Cannot do PHY loopback test when SoL/IDER is active.\n");
*data = 0;
goto out;
}
if ((adapter->hw.mac.type == e1000_i210)
- || (adapter->hw.mac.type == e1000_i210)) {
+ || (adapter->hw.mac.type == e1000_i211)) {
dev_err(&adapter->pdev->dev,
- "Loopback test not supported "
- "on this part at this time.\n");
+ "Loopback test not supported on this part at this time.\n");
*data = 0;
goto out;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1050411e7ca3..48cc4fb1a307 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -462,10 +462,10 @@ static void igb_dump(struct igb_adapter *adapter)
(u64)buffer_info->time_stamp,
buffer_info->skb, next_desc);
- if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+ if (netif_msg_pktdata(adapter) && buffer_info->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
- 16, 1, phys_to_virt(buffer_info->dma),
+ 16, 1, buffer_info->skb->data,
buffer_info->length, true);
}
}
@@ -547,18 +547,17 @@ rx_ring_summary:
(u64)buffer_info->dma,
buffer_info->skb, next_desc);
- if (netif_msg_pktdata(adapter)) {
+ if (netif_msg_pktdata(adapter) &&
+ buffer_info->dma && buffer_info->skb) {
print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1,
- phys_to_virt(buffer_info->dma),
- IGB_RX_HDR_LEN, true);
+ DUMP_PREFIX_ADDRESS,
+ 16, 1, buffer_info->skb->data,
+ IGB_RX_HDR_LEN, true);
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
- phys_to_virt(
- buffer_info->page_dma +
- buffer_info->page_offset),
+ page_address(buffer_info->page) +
+ buffer_info->page_offset,
PAGE_SIZE/2, true);
}
}
@@ -6235,7 +6234,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
if (!page) {
- page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
bi->page = page;
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 50fc137501da..18bf08c9d7a4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -804,12 +804,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
/* Set KX4/KX/KR support according to speed requested */
autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
- if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
autoc |= IXGBE_AUTOC_KX4_SUPP;
if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
(hw->phy.smart_speed_active == false))
autoc |= IXGBE_AUTOC_KR_SUPP;
+ }
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3b6784cf134a..4326f74f7137 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -396,11 +396,10 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
pr_cont("\n");
if (netif_msg_pktdata(adapter) &&
- dma_unmap_len(tx_buffer, len) != 0)
+ tx_buffer->skb)
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16, 1,
- phys_to_virt(dma_unmap_addr(tx_buffer,
- dma)),
+ tx_buffer->skb->data,
dma_unmap_len(tx_buffer, len),
true);
}
@@ -474,10 +473,12 @@ rx_ring_summary:
(u64)rx_buffer_info->dma,
rx_buffer_info->skb);
- if (netif_msg_pktdata(adapter)) {
+ if (netif_msg_pktdata(adapter) &&
+ rx_buffer_info->dma) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS, 16, 1,
- phys_to_virt(rx_buffer_info->dma),
+ page_address(rx_buffer_info->page) +
+ rx_buffer_info->page_offset,
ixgbe_rx_bufsz(rx_ring), true);
}
}
@@ -1140,8 +1141,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
/* alloc new page for storage */
if (likely(!page)) {
- page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
- ixgbe_rx_pg_order(rx_ring));
+ page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
+ bi->skb, ixgbe_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 3f9841d619ad..60ef64587412 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -352,7 +352,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
-
bi->skb = skb;
}
if (!bi->dma) {
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 770ee557924c..087b9e0669f1 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2983,6 +2983,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
return 0;
out:
+#if defined(CONFIG_HAVE_CLK)
+ if (!IS_ERR(mp->clk)) {
+ clk_disable_unprepare(mp->clk);
+ clk_put(mp->clk);
+ }
+#endif
free_netdev(dev);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 915e947b422d..9c656fe4983d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -69,16 +69,21 @@ static void poll_catas(unsigned long dev_ptr)
struct mlx4_priv *priv = mlx4_priv(dev);
if (readl(priv->catas_err.map)) {
- dump_err_buf(dev);
-
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
+ /* If the device is off-line, we cannot try to recover it */
+ if (pci_channel_offline(dev->pdev))
+ mod_timer(&priv->catas_err.timer,
+ round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
+ else {
+ dump_err_buf(dev);
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_CATASTROPHIC_ERROR, 0);
- if (internal_err_reset) {
- spin_lock(&catas_lock);
- list_add(&priv->catas_err.list, &catas_list);
- spin_unlock(&catas_lock);
+ if (internal_err_reset) {
+ spin_lock(&catas_lock);
+ list_add(&priv->catas_err.list, &catas_list);
+ spin_unlock(&catas_lock);
- queue_work(mlx4_wq, &catas_work);
+ queue_work(mlx4_wq, &catas_work);
+ }
}
} else
mod_timer(&priv->catas_err.timer,
@@ -100,6 +105,10 @@ static void catas_reset(struct work_struct *work)
list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
struct pci_dev *pdev = priv->dev.pdev;
+ /* If the device is off-line, we cannot reset it */
+ if (pci_channel_offline(pdev))
+ continue;
+
ret = mlx4_restart_one(priv->dev.pdev);
/* 'priv' now is not valid */
if (ret)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 7e94987d030c..c8fef4353021 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -296,7 +296,12 @@ int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
static int cmd_pending(struct mlx4_dev *dev)
{
- u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
+ u32 status;
+
+ if (pci_channel_offline(dev->pdev))
+ return -EIO;
+
+ status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
return (status & swab32(1 << HCR_GO_BIT)) ||
(mlx4_priv(dev)->cmd.toggle ==
@@ -314,11 +319,29 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
mutex_lock(&cmd->hcr_mutex);
+ if (pci_channel_offline(dev->pdev)) {
+ /*
+ * Device is going through error recovery
+ * and cannot accept commands.
+ */
+ ret = -EIO;
+ goto out;
+ }
+
end = jiffies;
if (event)
end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
while (cmd_pending(dev)) {
+ if (pci_channel_offline(dev->pdev)) {
+ /*
+ * Device is going through error recovery
+ * and cannot accept commands.
+ */
+ ret = -EIO;
+ goto out;
+ }
+
if (time_after_eq(jiffies, end)) {
mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
goto out;
@@ -431,14 +454,33 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
down(&priv->cmd.poll_sem);
+ if (pci_channel_offline(dev->pdev)) {
+ /*
+ * Device is going through error recovery
+ * and cannot accept commands.
+ */
+ err = -EIO;
+ goto out;
+ }
+
err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
if (err)
goto out;
end = msecs_to_jiffies(timeout) + jiffies;
- while (cmd_pending(dev) && time_before(jiffies, end))
+ while (cmd_pending(dev) && time_before(jiffies, end)) {
+ if (pci_channel_offline(dev->pdev)) {
+ /*
+ * Device is going through error recovery
+ * and cannot accept commands.
+ */
+ err = -EIO;
+ goto out;
+ }
+
cond_resched();
+ }
if (cmd_pending(dev)) {
err = -ETIMEDOUT;
@@ -532,6 +574,9 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout, int native)
{
+ if (pci_channel_offline(dev->pdev))
+ return -EIO;
+
if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
if (mlx4_priv(dev)->cmd.use_events)
return mlx4_cmd_wait(dev, in_param, out_param,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 8864d8b53737..edd9cb8d3e1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -201,7 +201,7 @@ mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
filter->flow_id = flow_id;
- filter->id = priv->last_filter_id++;
+ filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
list_add_tail(&filter->next, &priv->filters);
hlist_add_head(&filter->filter_chain,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f32e70300770..5aba5ecdf1e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -614,8 +614,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* If source MAC is equal to our own MAC and not performing
* the selftest or flb disabled - drop the packet */
if (s_mac == priv->mac &&
- (!(dev->features & NETIF_F_LOOPBACK) ||
- !priv->validate_loopback))
+ !((dev->features & NETIF_F_LOOPBACK) ||
+ priv->validate_loopback))
goto next;
/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 019d856b1334..10bba09c44ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -164,7 +164,6 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
ring->cons = 0xffffffff;
ring->last_nr_txbb = 1;
ring->poll_cnt = 0;
- ring->blocked = 0;
memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
memset(ring->buf, 0, ring->buf_size);
@@ -365,14 +364,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
ring->cons += txbbs_skipped;
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
- /* Wakeup Tx queue if this ring stopped it */
- if (unlikely(ring->blocked)) {
- if ((u32) (ring->prod - ring->cons) <=
- ring->size - HEADROOM - MAX_DESC_TXBBS) {
- ring->blocked = 0;
- netif_tx_wake_queue(ring->tx_queue);
- priv->port_stats.wake_queue++;
- }
+ /*
+ * Wakeup Tx queue if this stopped, and at least 1 packet
+ * was completed
+ */
+ if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
+ netif_tx_wake_queue(ring->tx_queue);
+ priv->port_stats.wake_queue++;
}
}
@@ -592,7 +590,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
/* every full Tx ring stops queue */
netif_tx_stop_queue(ring->tx_queue);
- ring->blocked = 1;
priv->port_stats.queue_stopped++;
return NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 88b7b3e75ab1..daf417923661 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -358,13 +358,14 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
}
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
- u64 virt, int obj_size, int nobj, int reserved,
+ u64 virt, int obj_size, u32 nobj, int reserved,
int use_lowmem, int use_coherent)
{
int obj_per_chunk;
int num_icm;
unsigned chunk_size;
int i;
+ u64 size;
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
@@ -380,10 +381,12 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
table->coherent = use_coherent;
mutex_init(&table->mutex);
+ size = (u64) nobj * obj_size;
for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
chunk_size = MLX4_TABLE_CHUNK_SIZE;
- if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
- chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
+ if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
+ chunk_size = PAGE_ALIGN(size -
+ i * MLX4_TABLE_CHUNK_SIZE);
table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index 19e4efc0b342..a67744f53506 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -78,7 +78,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end);
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
- u64 virt, int obj_size, int nobj, int reserved,
+ u64 virt, int obj_size, u32 nobj, int reserved,
int use_lowmem, int use_coherent);
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index e8f8ebb4ae65..827b72dfce99 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -157,9 +157,6 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
"on this HCA, aborting.\n");
return -EINVAL;
}
- if (port_type[i] == MLX4_PORT_TYPE_ETH &&
- port_type[i + 1] == MLX4_PORT_TYPE_IB)
- return -EINVAL;
}
}
@@ -1817,6 +1814,9 @@ static int mlx4_get_ownership(struct mlx4_dev *dev)
void __iomem *owner;
u32 ret;
+ if (pci_channel_offline(dev->pdev))
+ return -EIO;
+
owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
MLX4_OWNER_SIZE);
if (!owner) {
@@ -1833,6 +1833,9 @@ static void mlx4_free_ownership(struct mlx4_dev *dev)
{
void __iomem *owner;
+ if (pci_channel_offline(dev->pdev))
+ return;
+
owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
MLX4_OWNER_SIZE);
if (!owner) {
@@ -2279,11 +2282,33 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
+static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ mlx4_remove_one(pdev);
+
+ return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
+{
+ int ret = __mlx4_init_one(pdev, NULL);
+
+ return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static struct pci_error_handlers mlx4_err_handler = {
+ .error_detected = mlx4_pci_err_detected,
+ .slot_reset = mlx4_pci_slot_reset,
+};
+
static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
- .remove = __devexit_p(mlx4_remove_one)
+ .remove = __devexit_p(mlx4_remove_one),
+ .err_handler = &mlx4_err_handler,
};
static int __init mlx4_verify_params(void)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 4ec3835e1bc2..a018ea2a43de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -432,8 +432,10 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
/* Entry already exists, add to duplicates */
dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
- if (!dqp)
+ if (!dqp) {
+ err = -ENOMEM;
goto out_mailbox;
+ }
dqp->qpn = qpn;
list_add_tail(&dqp->list, &entry->duplicates);
found = true;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 59ebc0339638..4d9df8f2a126 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -249,7 +249,7 @@ struct mlx4_bitmap {
struct mlx4_buddy {
unsigned long **bits;
unsigned int *num_free;
- int max_order;
+ u32 max_order;
spinlock_t lock;
};
@@ -258,7 +258,7 @@ struct mlx4_icm;
struct mlx4_icm_table {
u64 virt;
int num_icm;
- int num_obj;
+ u32 num_obj;
int obj_size;
int lowmem;
int coherent;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 5f1ab105debc..9d27e42264e2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -248,7 +248,6 @@ struct mlx4_en_tx_ring {
u32 doorbell_qpn;
void *buf;
u16 poll_cnt;
- int blocked;
struct mlx4_en_tx_info *tx_info;
u8 *bounce_buf;
u32 last_nr_txbb;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index af55b7ce5341..c202d3ad2a0e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -37,6 +37,7 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/vmalloc.h>
#include <linux/mlx4/cmd.h>
@@ -120,7 +121,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
- buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
+ buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *),
GFP_KERNEL);
buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
GFP_KERNEL);
@@ -129,10 +130,12 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
- if (!buddy->bits[i])
- goto err_out_free;
- bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i));
+ buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN);
+ if (!buddy->bits[i]) {
+ buddy->bits[i] = vzalloc(s * sizeof(long));
+ if (!buddy->bits[i])
+ goto err_out_free;
+ }
}
set_bit(0, buddy->bits[buddy->max_order]);
@@ -142,7 +145,10 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i]))
+ vfree(buddy->bits[i]);
+ else
+ kfree(buddy->bits[i]);
err_out:
kfree(buddy->bits);
@@ -156,7 +162,10 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
int i;
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ if (is_vmalloc_addr(buddy->bits[i]))
+ vfree(buddy->bits[i]);
+ else
+ kfree(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
@@ -668,7 +677,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
return err;
err = mlx4_buddy_init(&mr_table->mtt_buddy,
- ilog2(dev->caps.num_mtts /
+ ilog2((u32)dev->caps.num_mtts /
(1 << log_mtts_per_seg)));
if (err)
goto err_buddy;
@@ -678,7 +687,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
mlx4_alloc_mtt_range(dev,
fls(dev->caps.reserved_mtts - 1));
if (priv->reserved_mtts < 0) {
- mlx4_warn(dev, "MTT table of order %d is too small.\n",
+ mlx4_warn(dev, "MTT table of order %u is too small.\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
goto err_reserve_mtts;
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 9ee4725363d5..8e0c3cc2a1ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -76,7 +76,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
u64 size;
u64 start;
int type;
- int num;
+ u32 num;
int log_num;
};
@@ -105,7 +105,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
si_meminfo(&si);
request->num_mtt =
roundup_pow_of_two(max_t(unsigned, request->num_mtt,
- min(1UL << 31,
+ min(1UL << (31 - log_mtts_per_seg),
si.totalram >> (log_mtts_per_seg - 1))));
profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index 802498293528..34ee09bae36e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -81,20 +81,6 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev,
}
/*
- * Adjust port configuration:
- * If port 1 sensed nothing and port 2 is IB, set both as IB
- * If port 2 sensed nothing and port 1 is Eth, set both as Eth
- */
- if (stype[0] == MLX4_PORT_TYPE_ETH) {
- for (i = 1; i < dev->caps.num_ports; i++)
- stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH;
- }
- if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) {
- for (i = 0; i < dev->caps.num_ports - 1; i++)
- stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB;
- }
-
- /*
* If sensed nothing, remain in current configuration.
*/
for (i = 0; i < dev->caps.num_ports; i++)
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 4069edab229e..53743f7a2ca9 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -346,28 +346,15 @@ static phy_interface_t lpc_phy_interface_mode(struct device *dev)
"phy-mode", NULL);
if (mode && !strcmp(mode, "mii"))
return PHY_INTERFACE_MODE_MII;
- return PHY_INTERFACE_MODE_RMII;
}
-
- /* non-DT */
-#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT
- return PHY_INTERFACE_MODE_MII;
-#else
return PHY_INTERFACE_MODE_RMII;
-#endif
}
static bool use_iram_for_net(struct device *dev)
{
if (dev && dev->of_node)
return of_property_read_bool(dev->of_node, "use-iram");
-
- /* non-DT */
-#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET
- return true;
-#else
return false;
-#endif
}
/* Receive Status information word */
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index cd827ff4a021..c42bbb16cdae 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -6,19 +6,21 @@
* Copyright (C) 2009 Cavium Networks
*/
-#include <linux/capability.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/capability.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if.h>
+#include <linux/spinlock.h>
#include <linux/if_vlan.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/init.h>
#include <linux/slab.h>
#include <linux/phy.h>
-#include <linux/spinlock.h>
+#include <linux/io.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-mixx-defs.h>
@@ -58,8 +60,56 @@ union mgmt_port_ring_entry {
} s;
};
+#define MIX_ORING1 0x0
+#define MIX_ORING2 0x8
+#define MIX_IRING1 0x10
+#define MIX_IRING2 0x18
+#define MIX_CTL 0x20
+#define MIX_IRHWM 0x28
+#define MIX_IRCNT 0x30
+#define MIX_ORHWM 0x38
+#define MIX_ORCNT 0x40
+#define MIX_ISR 0x48
+#define MIX_INTENA 0x50
+#define MIX_REMCNT 0x58
+#define MIX_BIST 0x78
+
+#define AGL_GMX_PRT_CFG 0x10
+#define AGL_GMX_RX_FRM_CTL 0x18
+#define AGL_GMX_RX_FRM_MAX 0x30
+#define AGL_GMX_RX_JABBER 0x38
+#define AGL_GMX_RX_STATS_CTL 0x50
+
+#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
+#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
+#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
+
+#define AGL_GMX_RX_ADR_CTL 0x100
+#define AGL_GMX_RX_ADR_CAM_EN 0x108
+#define AGL_GMX_RX_ADR_CAM0 0x180
+#define AGL_GMX_RX_ADR_CAM1 0x188
+#define AGL_GMX_RX_ADR_CAM2 0x190
+#define AGL_GMX_RX_ADR_CAM3 0x198
+#define AGL_GMX_RX_ADR_CAM4 0x1a0
+#define AGL_GMX_RX_ADR_CAM5 0x1a8
+
+#define AGL_GMX_TX_STATS_CTL 0x268
+#define AGL_GMX_TX_CTL 0x270
+#define AGL_GMX_TX_STAT0 0x280
+#define AGL_GMX_TX_STAT1 0x288
+#define AGL_GMX_TX_STAT2 0x290
+#define AGL_GMX_TX_STAT3 0x298
+#define AGL_GMX_TX_STAT4 0x2a0
+#define AGL_GMX_TX_STAT5 0x2a8
+#define AGL_GMX_TX_STAT6 0x2b0
+#define AGL_GMX_TX_STAT7 0x2b8
+#define AGL_GMX_TX_STAT8 0x2c0
+#define AGL_GMX_TX_STAT9 0x2c8
+
struct octeon_mgmt {
struct net_device *netdev;
+ u64 mix;
+ u64 agl;
int port;
int irq;
u64 *tx_ring;
@@ -85,31 +135,34 @@ struct octeon_mgmt {
struct napi_struct napi;
struct tasklet_struct tx_clean_tasklet;
struct phy_device *phydev;
+ struct device_node *phy_np;
+ resource_size_t mix_phys;
+ resource_size_t mix_size;
+ resource_size_t agl_phys;
+ resource_size_t agl_size;
};
static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
{
- int port = p->port;
union cvmx_mixx_intena mix_intena;
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
- mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
+ mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
mix_intena.s.ithena = enable ? 1 : 0;
- cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
+ cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
spin_unlock_irqrestore(&p->lock, flags);
}
static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
{
- int port = p->port;
union cvmx_mixx_intena mix_intena;
unsigned long flags;
spin_lock_irqsave(&p->lock, flags);
- mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port));
+ mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
mix_intena.s.othena = enable ? 1 : 0;
- cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
+ cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
spin_unlock_irqrestore(&p->lock, flags);
}
@@ -146,7 +199,6 @@ static unsigned int ring_size_to_bytes(unsigned int ring_size)
static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- int port = p->port;
while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
unsigned int size;
@@ -177,24 +229,23 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
(p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
p->rx_current_fill++;
/* Ring the bell. */
- cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
+ cvmx_write_csr(p->mix + MIX_IRING2, 1);
}
}
static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
{
- int port = p->port;
union cvmx_mixx_orcnt mix_orcnt;
union mgmt_port_ring_entry re;
struct sk_buff *skb;
int cleaned = 0;
unsigned long flags;
- mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+ mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
while (mix_orcnt.s.orcnt) {
spin_lock_irqsave(&p->tx_list.lock, flags);
- mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+ mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
if (mix_orcnt.s.orcnt == 0) {
spin_unlock_irqrestore(&p->tx_list.lock, flags);
@@ -214,7 +265,7 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
mix_orcnt.s.orcnt = 1;
/* Acknowledge to hardware that we have the buffer. */
- cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64);
+ cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
p->tx_current_fill--;
spin_unlock_irqrestore(&p->tx_list.lock, flags);
@@ -224,7 +275,7 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
dev_kfree_skb_any(skb);
cleaned++;
- mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
+ mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
}
if (cleaned && netif_queue_stopped(p->netdev))
@@ -241,13 +292,12 @@ static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- int port = p->port;
unsigned long flags;
u64 drop, bad;
/* These reads also clear the count registers. */
- drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port));
- bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port));
+ drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
+ bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
if (drop || bad) {
/* Do an atomic update. */
@@ -261,15 +311,14 @@ static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- int port = p->port;
unsigned long flags;
union cvmx_agl_gmx_txx_stat0 s0;
union cvmx_agl_gmx_txx_stat1 s1;
/* These reads also clear the count registers. */
- s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port));
- s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port));
+ s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
+ s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
/* Do an atomic update. */
@@ -308,7 +357,6 @@ static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
{
- int port = p->port;
struct net_device *netdev = p->netdev;
union cvmx_mixx_ircnt mix_ircnt;
union mgmt_port_ring_entry re;
@@ -381,18 +429,17 @@ done:
/* Tell the hardware we processed a packet. */
mix_ircnt.u64 = 0;
mix_ircnt.s.ircnt = 1;
- cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
+ cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
return rc;
}
static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
{
- int port = p->port;
unsigned int work_done = 0;
union cvmx_mixx_ircnt mix_ircnt;
int rc;
- mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
+ mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
while (work_done < budget && mix_ircnt.s.ircnt) {
rc = octeon_mgmt_receive_one(p);
@@ -400,7 +447,7 @@ static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
work_done++;
/* Check for more packets. */
- mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
+ mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
}
octeon_mgmt_rx_fill_ring(p->netdev);
@@ -434,16 +481,16 @@ static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
union cvmx_agl_gmx_bist agl_gmx_bist;
mix_ctl.u64 = 0;
- cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
+ cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
do {
- mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port));
+ mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
} while (mix_ctl.s.busy);
mix_ctl.s.reset = 1;
- cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64);
- cvmx_read_csr(CVMX_MIXX_CTL(p->port));
+ cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
+ cvmx_read_csr(p->mix + MIX_CTL);
cvmx_wait(64);
- mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port));
+ mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
if (mix_bist.u64)
dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
(unsigned long long)mix_bist.u64);
@@ -474,7 +521,6 @@ static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- int port = p->port;
union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
unsigned long flags;
@@ -520,29 +566,29 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
spin_lock_irqsave(&p->lock, flags);
/* Disable packet I/O. */
- agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
prev_packet_enable = agl_gmx_prtx.s.en;
agl_gmx_prtx.s.en = 0;
- cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
adr_ctl.u64 = 0;
adr_ctl.s.cam_mode = cam_mode;
adr_ctl.s.mcst = multicast_mode;
adr_ctl.s.bcst = 1; /* Allow broadcast */
- cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
- cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]);
- cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]);
- cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]);
- cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]);
- cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]);
- cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]);
- cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
/* Restore packet I/O. */
agl_gmx_prtx.s.en = prev_packet_enable;
- cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
spin_unlock_irqrestore(&p->lock, flags);
}
@@ -564,7 +610,6 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- int port = p->port;
int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
/*
@@ -580,8 +625,8 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
- cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
- cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port),
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
(size_without_fcs + 7) & 0xfff8);
return 0;
@@ -591,14 +636,13 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
{
struct net_device *netdev = dev_id;
struct octeon_mgmt *p = netdev_priv(netdev);
- int port = p->port;
union cvmx_mixx_isr mixx_isr;
- mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
+ mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
/* Clear any pending interrupts */
- cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64);
- cvmx_read_csr(CVMX_MIXX_ISR(port));
+ cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
+ cvmx_read_csr(p->mix + MIX_ISR);
if (mixx_isr.s.irthresh) {
octeon_mgmt_disable_rx_irq(p);
@@ -629,7 +673,6 @@ static int octeon_mgmt_ioctl(struct net_device *netdev,
static void octeon_mgmt_adjust_link(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- int port = p->port;
union cvmx_agl_gmx_prtx_cfg prtx_cfg;
unsigned long flags;
int link_changed = 0;
@@ -640,11 +683,9 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev)
link_changed = 1;
if (p->last_duplex != p->phydev->duplex) {
p->last_duplex = p->phydev->duplex;
- prtx_cfg.u64 =
- cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
prtx_cfg.s.duplex = p->phydev->duplex;
- cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port),
- prtx_cfg.u64);
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
}
} else {
if (p->last_link)
@@ -670,18 +711,16 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev)
static int octeon_mgmt_init_phy(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- char phy_id[MII_BUS_ID_SIZE + 3];
- if (octeon_is_simulation()) {
+ if (octeon_is_simulation() || p->phy_np == NULL) {
/* No PHYs in the simulator. */
netif_carrier_on(netdev);
return 0;
}
- snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "mdio-octeon-0", p->port);
-
- p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
+ p->phydev = of_phy_connect(netdev, p->phy_np,
+ octeon_mgmt_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(p->phydev)) {
p->phydev = NULL;
@@ -737,14 +776,14 @@ static int octeon_mgmt_open(struct net_device *netdev)
octeon_mgmt_reset_hw(p);
- mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
+ mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
/* Bring it out of reset if needed. */
if (mix_ctl.s.reset) {
mix_ctl.s.reset = 0;
- cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
+ cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
do {
- mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
+ mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
} while (mix_ctl.s.reset);
}
@@ -755,17 +794,17 @@ static int octeon_mgmt_open(struct net_device *netdev)
oring1.u64 = 0;
oring1.s.obase = p->tx_ring_handle >> 3;
oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
- cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
+ cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
iring1.u64 = 0;
iring1.s.ibase = p->rx_ring_handle >> 3;
iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
- cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
+ cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
/* Disable packet I/O. */
- prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
prtx_cfg.s.en = 0;
- cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
octeon_mgmt_set_mac_address(netdev, &sa);
@@ -782,7 +821,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
mix_ctl.s.nbtarb = 0; /* Arbitration mode */
/* MII CB-request FIFO programmable high watermark */
mix_ctl.s.mrq_hwm = 1;
- cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
+ cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
@@ -809,16 +848,16 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* Clear statistics. */
/* Clear on read. */
- cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
- cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
- cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
- cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
- cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
- cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);
+ cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
+ cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
+ cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
/* Clear any pending interrupts */
- cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));
+ cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
netdev)) {
@@ -829,18 +868,18 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* Interrupt every single RX packet */
mix_irhwm.u64 = 0;
mix_irhwm.s.irhwm = 0;
- cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
+ cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
/* Interrupt when we have 1 or more packets to clean. */
mix_orhwm.u64 = 0;
mix_orhwm.s.orhwm = 1;
- cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
+ cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
/* Enable receive and transmit interrupts */
mix_intena.u64 = 0;
mix_intena.s.ithena = 1;
mix_intena.s.othena = 1;
- cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);
+ cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
/* Enable packet I/O. */
@@ -871,7 +910,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
* frame. GMX checks that the PREAMBLE is sent correctly.
*/
rxx_frm_ctl.s.pre_chk = 1;
- cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
/* Enable the AGL block */
agl_gmx_inf_mode.u64 = 0;
@@ -879,13 +918,13 @@ static int octeon_mgmt_open(struct net_device *netdev)
cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
/* Configure the port duplex and enables */
- prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
prtx_cfg.s.tx_en = 1;
prtx_cfg.s.rx_en = 1;
prtx_cfg.s.en = 1;
p->last_duplex = 1;
prtx_cfg.s.duplex = p->last_duplex;
- cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
p->last_link = 0;
netif_carrier_off(netdev);
@@ -949,7 +988,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- int port = p->port;
union mgmt_port_ring_entry re;
unsigned long flags;
int rv = NETDEV_TX_BUSY;
@@ -993,7 +1031,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
netdev->stats.tx_bytes += skb->len;
/* Ring the bell. */
- cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
+ cvmx_write_csr(p->mix + MIX_ORING2, 1);
rv = NETDEV_TX_OK;
out:
@@ -1071,10 +1109,14 @@ static const struct net_device_ops octeon_mgmt_ops = {
static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
{
- struct resource *res_irq;
struct net_device *netdev;
struct octeon_mgmt *p;
- int i;
+ const __be32 *data;
+ const u8 *mac;
+ struct resource *res_mix;
+ struct resource *res_agl;
+ int len;
+ int result;
netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
if (netdev == NULL)
@@ -1088,14 +1130,63 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
p->netdev = netdev;
p->dev = &pdev->dev;
- p->port = pdev->id;
+ data = of_get_property(pdev->dev.of_node, "cell-index", &len);
+ if (data && len == sizeof(*data)) {
+ p->port = be32_to_cpup(data);
+ } else {
+ dev_err(&pdev->dev, "no 'cell-index' property\n");
+ result = -ENXIO;
+ goto err;
+ }
+
snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq)
+ result = platform_get_irq(pdev, 0);
+ if (result < 0)
+ goto err;
+
+ p->irq = result;
+
+ res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mix == NULL) {
+ dev_err(&pdev->dev, "no 'reg' resource\n");
+ result = -ENXIO;
+ goto err;
+ }
+
+ res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_agl == NULL) {
+ dev_err(&pdev->dev, "no 'reg' resource\n");
+ result = -ENXIO;
+ goto err;
+ }
+
+ p->mix_phys = res_mix->start;
+ p->mix_size = resource_size(res_mix);
+ p->agl_phys = res_agl->start;
+ p->agl_size = resource_size(res_agl);
+
+
+ if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
+ res_mix->name)) {
+ dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
+ res_mix->name);
+ result = -ENXIO;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
+ res_agl->name)) {
+ result = -ENXIO;
+ dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
+ res_agl->name);
goto err;
+ }
+
+
+ p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
+ p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
- p->irq = res_irq->start;
spin_lock_init(&p->lock);
skb_queue_head_init(&p->tx_list);
@@ -1108,24 +1199,26 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
netdev->netdev_ops = &octeon_mgmt_ops;
netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
- /* The mgmt ports get the first N MACs. */
- for (i = 0; i < 6; i++)
- netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
- netdev->dev_addr[5] += p->port;
+ mac = of_get_mac_address(pdev->dev.of_node);
+
+ if (mac)
+ memcpy(netdev->dev_addr, mac, 6);
- if (p->port >= octeon_bootinfo->mac_addr_count)
- dev_err(&pdev->dev,
- "Error %s: Using MAC outside of the assigned range: %pM\n",
- netdev->name, netdev->dev_addr);
+ p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- if (register_netdev(netdev))
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ result = register_netdev(netdev);
+ if (result)
goto err;
dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
return 0;
+
err:
free_netdev(netdev);
- return -ENOENT;
+ return result;
}
static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
@@ -1137,10 +1230,19 @@ static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
return 0;
}
+static struct of_device_id octeon_mgmt_match[] = {
+ {
+ .compatible = "cavium,octeon-5750-mix",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
+
static struct platform_driver octeon_mgmt_driver = {
.driver = {
.name = "octeon_mgmt",
.owner = THIS_MODULE,
+ .of_match_table = octeon_mgmt_match,
},
.probe = octeon_mgmt_probe,
.remove = __devexit_p(octeon_mgmt_remove),
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 9dbf38c10a68..24b787be6062 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -129,7 +129,6 @@ static int pch_gbe_set_settings(struct net_device *netdev,
hw->mac.link_duplex = ecmd->duplex;
hw->phy.autoneg_advertised = ecmd->advertising;
hw->mac.autoneg = ecmd->autoneg;
- pch_gbe_hal_phy_sw_reset(hw);
/* reset the link */
if (netif_running(adapter->netdev)) {
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index b1006563f736..feb85d56c750 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -26,7 +26,7 @@
#include <linux/ptp_classify.h>
#endif
-#define DRV_VERSION "1.00"
+#define DRV_VERSION "1.01"
const char pch_driver_version[] = DRV_VERSION;
#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */
@@ -35,7 +35,7 @@ const char pch_driver_version[] = DRV_VERSION;
#define DSC_INIT16 0xC000
#define PCH_GBE_DMA_ALIGN 0
#define PCH_GBE_DMA_PADDING 2
-#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
+#define PCH_GBE_WATCHDOG_PERIOD (5 * HZ) /* watchdog time */
#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
@@ -1579,7 +1579,8 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
struct sk_buff *skb;
unsigned int i;
unsigned int cleaned_count = 0;
- bool cleaned = true;
+ bool cleaned = false;
+ int unused, thresh;
pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
@@ -1588,10 +1589,36 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
pr_debug("gbec_status:0x%04x dma_status:0x%04x\n",
tx_desc->gbec_status, tx_desc->dma_status);
+ unused = PCH_GBE_DESC_UNUSED(tx_ring);
+ thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
+ if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
+ { /* current marked clean, tx queue filling up, do extra clean */
+ int j, k;
+ if (unused < 8) { /* tx queue nearly full */
+ pr_debug("clean_tx: transmit queue warning (%x,%x) unused=%d\n",
+ tx_ring->next_to_clean,tx_ring->next_to_use,unused);
+ }
+
+ /* current marked clean, scan for more that need cleaning. */
+ k = i;
+ for (j = 0; j < PCH_GBE_TX_WEIGHT; j++)
+ {
+ tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
+ if (tx_desc->gbec_status != DSC_INIT16) break; /*found*/
+ if (++k >= tx_ring->count) k = 0; /*increment, wrap*/
+ }
+ if (j < PCH_GBE_TX_WEIGHT) {
+ pr_debug("clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
+ unused,j, i,k, tx_ring->next_to_use, tx_desc->gbec_status);
+ i = k; /*found one to clean, usu gbec_status==2000.*/
+ }
+ }
+
while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status);
buffer_info = &tx_ring->buffer_info[i];
skb = buffer_info->skb;
+ cleaned = true;
if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
adapter->stats.tx_aborted_errors++;
@@ -1639,18 +1666,21 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
}
pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n",
cleaned_count);
- /* Recover from running out of Tx resources in xmit_frame */
- spin_lock(&tx_ring->tx_lock);
- if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) {
- netif_wake_queue(adapter->netdev);
- adapter->stats.tx_restart_count++;
- pr_debug("Tx wake queue\n");
- }
+ if (cleaned_count > 0) { /*skip this if nothing cleaned*/
+ /* Recover from running out of Tx resources in xmit_frame */
+ spin_lock(&tx_ring->tx_lock);
+ if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
+ {
+ netif_wake_queue(adapter->netdev);
+ adapter->stats.tx_restart_count++;
+ pr_debug("Tx wake queue\n");
+ }
- tx_ring->next_to_clean = i;
+ tx_ring->next_to_clean = i;
- pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
- spin_unlock(&tx_ring->tx_lock);
+ pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean);
+ spin_unlock(&tx_ring->tx_lock);
+ }
return cleaned;
}
@@ -1988,6 +2018,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
void pch_gbe_down(struct pch_gbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
/* signal that we're down so the interrupt handler does not
@@ -2004,7 +2035,8 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
netif_carrier_off(netdev);
netif_stop_queue(netdev);
- pch_gbe_reset(adapter);
+ if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
+ pch_gbe_reset(adapter);
pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
@@ -2127,13 +2159,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
unsigned long flags;
- if (unlikely(skb->len > (adapter->hw.mac.max_frame_size - 4))) {
- pr_err("Transfer length Error: skb len: %d > max: %d\n",
- skb->len, adapter->hw.mac.max_frame_size);
- dev_kfree_skb_any(skb);
- adapter->stats.tx_length_errors++;
- return NETDEV_TX_OK;
- }
if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
/* Collision - tell upper layer to requeue */
return NETDEV_TX_LOCKED;
@@ -2387,7 +2412,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
- if (!cleaned)
+ if (cleaned)
work_done = budget;
/* If no Tx and not enough Rx work done,
* exit the polling mode
@@ -2793,6 +2818,7 @@ static int __init pch_gbe_init_module(void)
{
int ret;
+ pr_info("EG20T PCH Gigabit Ethernet Driver - version %s\n",DRV_VERSION);
ret = pci_register_driver(&pch_gbe_driver);
if (copybreak != PCH_GBE_COPYBREAK_DEFAULT) {
if (copybreak == 0) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 3769f5711cc3..b53a3b60b648 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4682,6 +4682,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
ndev->features = ndev->hw_features |
NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ ndev->vlan_features = ndev->hw_features;
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 46df3a04030c..24c2305d7948 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -8,7 +8,7 @@ config SH_ETH
(CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
- CPU_SUBTYPE_SH7757 || ARCH_R8A7740)
+ CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || ARCH_R8A7779)
select CRC32
select NET_CORE
select MII
@@ -18,4 +18,4 @@ config SH_ETH
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- and R8A7740.
+ R8A7740 and R8A7779.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index af0b867a6cf6..bad8f2eec9b4 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -78,7 +78,7 @@ static void sh_eth_select_mii(struct net_device *ndev)
#endif
/* There is CPU dependent code */
-#if defined(CONFIG_CPU_SUBTYPE_SH7724)
+#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779)
#define SH_ETH_RESET_DEFAULT 1
static void sh_eth_set_duplex(struct net_device *ndev)
{
@@ -93,13 +93,18 @@ static void sh_eth_set_duplex(struct net_device *ndev)
static void sh_eth_set_rate(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
+ unsigned int bits = ECMR_RTM;
+
+#if defined(CONFIG_ARCH_R8A7779)
+ bits |= ECMR_ELB;
+#endif
switch (mdp->speed) {
case 10: /* 10BASE */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR);
break;
case 100:/* 100BASE */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR);
break;
default:
break;
diff --git a/drivers/net/ethernet/seeq/seeq8005.c b/drivers/net/ethernet/seeq/seeq8005.c
index 698edbbfc149..d6e50de71186 100644
--- a/drivers/net/ethernet/seeq/seeq8005.c
+++ b/drivers/net/ethernet/seeq/seeq8005.c
@@ -736,9 +736,7 @@ MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number");
int __init init_module(void)
{
dev_seeq = seeq8005_probe(-1);
- if (IS_ERR(dev_seeq))
- return PTR_ERR(dev_seeq);
- return 0;
+ return PTR_RET(dev_seeq);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 70554a1b2b02..65a8d49106a4 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx)
goto fail2;
}
+ BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
+ if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
+ rc = -EINVAL;
+ goto fail3;
+ }
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
rc = efx_probe_filters(efx);
@@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops;
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+ net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index be8f9158a714..70755c97251a 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -30,6 +30,7 @@ extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
/* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_EVQ_SIZE 16384UL
#define EFX_MIN_EVQ_SIZE 512UL
-/* The smallest [rt]xq_entries that the driver supports. Callers of
- * efx_wake_queue() assume that they can subsequently send at least one
- * skb. Falcon/A1 may require up to three descriptors per skb_frag. */
-#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS))
+/* Maximum number of TCP segments we support for soft-TSO */
+#define EFX_TSO_MAX_SEGS 100
+
+/* The smallest [rt]xq_entries that the driver supports. RX minimum
+ * is a bit arbitrary. For TX, we must have space for at least 2
+ * TSO skbs.
+ */
+#define EFX_RXQ_MIN_ENT 128U
+#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
/* Filters */
extern int efx_probe_filters(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 10536f93b561..5faedd855b77 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ u32 txq_entries;
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
ring->tx_pending > EFX_MAX_DMAQ_SIZE)
return -EINVAL;
- if (ring->rx_pending < EFX_MIN_RING_SIZE ||
- ring->tx_pending < EFX_MIN_RING_SIZE) {
+ if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
netif_err(efx, drv, efx->net_dev,
- "TX and RX queues cannot be smaller than %ld\n",
- EFX_MIN_RING_SIZE);
+ "RX queues cannot be smaller than %u\n",
+ EFX_RXQ_MIN_ENT);
return -EINVAL;
}
- return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending);
+ txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
+ if (txq_entries != ring->tx_pending)
+ netif_warn(efx, drv, efx->net_dev,
+ "increasing TX queue size to minimum of %u\n",
+ txq_entries);
+
+ return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
}
static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
@@ -857,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
&ip_entry->ip4dst, &ip_entry->pdst);
if (rc != 0) {
rc = efx_filter_get_ipv4_full(
- &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
- &ip_entry->ip4dst, &ip_entry->pdst);
+ &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
+ &ip_entry->ip4src, &ip_entry->psrc);
EFX_WARN_ON_PARANOID(rc);
ip_mask->ip4src = ~0;
ip_mask->psrc = ~0;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 9b225a7769f7..18713436b443 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
return len;
}
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+{
+ /* Header and payload descriptor for each output segment, plus
+ * one for every input fragment boundary within a segment
+ */
+ unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+ /* Possibly one more per segment for the alignment workaround */
+ if (EFX_WORKAROUND_5391(efx))
+ max_descs += EFX_TSO_MAX_SEGS;
+
+ /* Possibly more for PCIe page boundaries within input fragments */
+ if (PAGE_SIZE > EFX_PAGE_SIZE)
+ max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+ DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+ return max_descs;
+}
+
/*
* Add a socket buffer to a TX queue
*
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e2d083228f3a..719be3912aa9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __COMMON_H__
+#define __COMMON_H__
+
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
@@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
+
+#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 9820ec842cc0..223adf95fd03 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -20,6 +20,10 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+
+#ifndef __DESCS_H__
+#define __DESCS_H__
+
struct dma_desc {
/* Receive descriptor */
union {
@@ -166,3 +170,5 @@ enum tdes_csum_insertion {
* is not calculated */
cic_full = 3, /* IP header and pseudoheader */
};
+
+#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index dd8d6e19dff6..7ee9499a6e38 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -27,6 +27,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __DESC_COM_H__
+#define __DESC_COM_H__
+
#if defined(CONFIG_STMMAC_RING)
static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
{
@@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
p->des01.tx.buffer1_size = len;
}
#endif
+
+#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 7c6d857a9cc7..2ec6aeae349e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __DWMAC100_H__
+#define __DWMAC100_H__
+
#include <linux/phy.h>
#include "common.h"
@@ -119,3 +122,5 @@ enum ttc_control {
#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
extern const struct stmmac_dma_ops dwmac100_dma_ops;
+
+#endif /* __DWMAC100_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index f90fcb5f9573..0e4cacedc1f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -19,6 +19,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __DWMAC1000_H__
+#define __DWMAC1000_H__
#include <linux/phy.h>
#include "common.h"
@@ -229,6 +231,7 @@ enum rtc_control {
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
/* Synopsys Core versions */
-#define DWMAC_CORE_3_40 34
+#define DWMAC_CORE_3_40 0x34
extern const struct stmmac_dma_ops dwmac1000_dma_ops;
+#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index e678ce39d014..e49c9a0fd6ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __DWMAC_DMA_H__
+#define __DWMAC_DMA_H__
+
/* DMA CRS Control and Status Register Mapping */
#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
@@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
extern int dwmac_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x);
+
+#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index a38352024cb8..67995ef25251 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __MMC_H__
+#define __MMC_H__
+
/* MMC control register */
/* When set, all counter are reset */
#define MMC_CNTRL_COUNTER_RESET 0x1
@@ -129,3 +132,5 @@ struct stmmac_counters {
extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+
+#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index c07cfe989f6e..0c74a702d461 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -33,7 +33,7 @@
#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
-#define MMC_DEFAUL_MASK 0xffffffff
+#define MMC_DEFAULT_MASK 0xffffffff
/* MMC TX counter registers */
@@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
/* To mask all all interrupts.*/
void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
{
- writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
- writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
}
/* This reads the MAC core counters (if actaully supported).
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index ab4c376cb276..e872e1da3137 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -20,6 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __STMMAC_H__
+#define __STMMAC_H__
+
#define STMMAC_RESOURCE_NAME "stmmaceth"
#define DRV_MODULE_VERSION "March_2012"
@@ -82,9 +85,7 @@ struct stmmac_priv {
struct stmmac_counters mmc;
struct dma_features dma_cap;
int hw_cap_support;
-#ifdef CONFIG_HAVE_CLK
struct clk *stmmac_clk;
-#endif
int clk_csr;
int synopsys_id;
struct timer_list eee_ctrl_timer;
@@ -113,46 +114,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
-#ifdef CONFIG_HAVE_CLK
-static inline int stmmac_clk_enable(struct stmmac_priv *priv)
-{
- if (!IS_ERR(priv->stmmac_clk))
- return clk_prepare_enable(priv->stmmac_clk);
-
- return 0;
-}
-
-static inline void stmmac_clk_disable(struct stmmac_priv *priv)
-{
- if (IS_ERR(priv->stmmac_clk))
- return;
-
- clk_disable_unprepare(priv->stmmac_clk);
-}
-static inline int stmmac_clk_get(struct stmmac_priv *priv)
-{
- priv->stmmac_clk = clk_get(priv->device, NULL);
-
- if (IS_ERR(priv->stmmac_clk))
- return PTR_ERR(priv->stmmac_clk);
-
- return 0;
-}
-#else
-static inline int stmmac_clk_enable(struct stmmac_priv *priv)
-{
- return 0;
-}
-static inline void stmmac_clk_disable(struct stmmac_priv *priv)
-{
-}
-static inline int stmmac_clk_get(struct stmmac_priv *priv)
-{
- return 0;
-}
-#endif /* CONFIG_HAVE_CLK */
-
-
#ifdef CONFIG_STMMAC_PLATFORM
extern struct platform_driver stmmac_pltfr_driver;
static inline int stmmac_register_platform(void)
@@ -208,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
{
}
#endif /* CONFIG_STMMAC_PCI */
+
+#endif /* __STMMAC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f6b04c1a3672..c136162e6473 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -28,6 +28,7 @@
https://bugzilla.stlinux.com/
*******************************************************************************/
+#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
@@ -173,12 +174,8 @@ static void stmmac_verify_args(void)
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
{
-#ifdef CONFIG_HAVE_CLK
u32 clk_rate;
- if (IS_ERR(priv->stmmac_clk))
- return;
-
clk_rate = clk_get_rate(priv->stmmac_clk);
/* Platform provided default clk_csr would be assumed valid
@@ -200,7 +197,6 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
* we can not estimate the proper divider as it is not known
* the frequency of clk_csr_i. So we do not change the default
* divider. */
-#endif
}
#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
@@ -1070,7 +1066,7 @@ static int stmmac_open(struct net_device *dev)
} else
priv->tm->enable = 1;
#endif
- stmmac_clk_enable(priv);
+ clk_enable(priv->stmmac_clk);
stmmac_check_ether_addr(priv);
@@ -1192,7 +1188,7 @@ open_error:
if (priv->phydev)
phy_disconnect(priv->phydev);
- stmmac_clk_disable(priv);
+ clk_disable(priv->stmmac_clk);
return ret;
}
@@ -1250,7 +1246,7 @@ static int stmmac_release(struct net_device *dev)
#ifdef CONFIG_STMMAC_DEBUG_FS
stmmac_exit_fs();
#endif
- stmmac_clk_disable(priv);
+ clk_disable(priv->stmmac_clk);
return 0;
}
@@ -2078,11 +2074,14 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
ret = register_netdev(ndev);
if (ret) {
pr_err("%s: ERROR %i registering the device\n", __func__, ret);
- goto error;
+ goto error_netdev_register;
}
- if (stmmac_clk_get(priv))
+ priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
+ if (IS_ERR(priv->stmmac_clk)) {
pr_warning("%s: warning: cannot get CSR clock\n", __func__);
+ goto error_clk_get;
+ }
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
@@ -2100,15 +2099,17 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
if (ret < 0) {
pr_debug("%s: MDIO bus (id: %d) registration failed",
__func__, priv->plat->bus_id);
- goto error;
+ goto error_mdio_register;
}
return priv;
-error:
- netif_napi_del(&priv->napi);
-
+error_mdio_register:
+ clk_put(priv->stmmac_clk);
+error_clk_get:
unregister_netdev(ndev);
+error_netdev_register:
+ netif_napi_del(&priv->napi);
free_netdev(ndev);
return NULL;
@@ -2177,7 +2178,7 @@ int stmmac_suspend(struct net_device *ndev)
else {
stmmac_set_mac(priv->ioaddr, false);
/* Disable clock in case of PWM is off */
- stmmac_clk_disable(priv);
+ clk_disable(priv->stmmac_clk);
}
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
@@ -2202,7 +2203,7 @@ int stmmac_resume(struct net_device *ndev)
priv->hw->mac->pmt(priv->ioaddr, 0);
else
/* enable the clk prevously disabled */
- stmmac_clk_enable(priv);
+ clk_enable(priv->stmmac_clk);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index cd01ee7ecef1..b93245c11995 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -74,7 +74,7 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
* the necessary resources and invokes the main to init
* the net device, register the mdio bus etc.
*/
-static int stmmac_pltfr_probe(struct platform_device *pdev)
+static int __devinit stmmac_pltfr_probe(struct platform_device *pdev)
{
int ret = 0;
struct resource *res;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
index 6863590d184b..aea9b14cdfbe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
@@ -21,6 +21,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __STMMAC_TIMER_H__
+#define __STMMAC_TIMER_H__
struct stmmac_timer {
void (*timer_start) (unsigned int new_freq);
@@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
extern int tmu2_register_user(void *fnt, void *data);
extern void tmu2_unregister_user(void);
#endif
+
+#endif /* __STMMAC_TIMER_H__ */
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 3b5c4571b55e..d15c888e9df8 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -538,11 +538,12 @@ EXPORT_SYMBOL_GPL(cpdma_chan_create);
int cpdma_chan_destroy(struct cpdma_chan *chan)
{
- struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_ctlr *ctlr;
unsigned long flags;
if (!chan)
return -EINVAL;
+ ctlr = chan->ctlr;
spin_lock_irqsave(&ctlr->lock, flags);
if (chan->state != CPDMA_STATE_IDLE)
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index cd7ee204e94a..a9ca4a03d31b 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct davinci_mdio_data *data = dev_get_drvdata(dev);
- if (data->bus)
+ if (data->bus) {
+ mdiobus_unregister(data->bus);
mdiobus_free(data->bus);
+ }
if (data->clk)
clk_put(data->clk);
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
index cb18043f5830..b4d281626fb4 100644
--- a/drivers/net/ethernet/wiznet/Kconfig
+++ b/drivers/net/ethernet/wiznet/Kconfig
@@ -4,6 +4,7 @@
config NET_VENDOR_WIZNET
bool "WIZnet devices"
+ depends on HAS_IOMEM
default y
---help---
If you have a network (Ethernet) card belonging to this class, say Y
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 482648fcf0b6..98934bdf6acf 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1003,6 +1003,7 @@ static int ixp4xx_nway_reset(struct net_device *dev)
}
int ixp46x_phc_index = -1;
+EXPORT_SYMBOL_GPL(ixp46x_phc_index);
static int ixp4xx_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 24d8566cfd8b..441b4dc79450 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
sm_pm_get_ls(smc,port_to_mib(smc,port))) ;
break ;
case SMT_P_REASON :
- * (u_long *) to = 0 ;
+ *(u32 *)to = 0 ;
sp_len = 4 ;
goto sp_done ;
case SMT_P1033 : /* time stamp */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 6cee2917eb02..4a1a5f58fa73 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -383,13 +383,6 @@ int netvsc_device_remove(struct hv_device *device)
unsigned long flags;
net_device = hv_get_drvdata(device);
- spin_lock_irqsave(&device->channel->inbound_lock, flags);
- net_device->destroy = true;
- spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
-
- /* Wait for all send completions */
- wait_event(net_device->wait_drain,
- atomic_read(&net_device->num_outstanding_sends) == 0);
netvsc_disconnect_vsp(net_device);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8e23c084c4a7..8c5a1c43c81d 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -47,7 +47,7 @@ struct net_device_context {
struct work_struct work;
};
-
+#define RING_SIZE_MIN 64
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
@@ -518,6 +518,11 @@ static void __exit netvsc_drv_exit(void)
static int __init netvsc_drv_init(void)
{
+ if (ring_size < RING_SIZE_MIN) {
+ ring_size = RING_SIZE_MIN;
+ pr_info("Increased ring_size to %d (min allowed)\n",
+ ring_size);
+ }
return vmbus_driver_register(&netvsc_drv);
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index fbf539468205..1e88a1095934 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -718,6 +718,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
{
struct rndis_request *request;
struct rndis_halt_request *halt;
+ struct netvsc_device *nvdev = dev->net_dev;
+ struct hv_device *hdev = nvdev->dev;
+ ulong flags;
/* Attempt to do a rndis device halt */
request = get_rndis_request(dev, RNDIS_MSG_HALT,
@@ -735,6 +738,14 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
dev->state = RNDIS_DEV_UNINITIALIZED;
cleanup:
+ spin_lock_irqsave(&hdev->channel->inbound_lock, flags);
+ nvdev->destroy = true;
+ spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
+
+ /* Wait for all send completions */
+ wait_event(nvdev->wait_drain,
+ atomic_read(&nvdev->num_outstanding_sends) == 0);
+
if (request)
put_rndis_request(dev, request);
return;
@@ -804,18 +815,15 @@ int rndis_filter_device_add(struct hv_device *dev,
/* Send the rndis initialization message */
ret = rndis_filter_init_device(rndis_device);
if (ret != 0) {
- /*
- * TODO: If rndis init failed, we will need to shut down the
- * channel
- */
+ rndis_filter_device_remove(dev);
+ return ret;
}
/* Get the mac address */
ret = rndis_filter_query_device_mac(rndis_device);
if (ret != 0) {
- /*
- * TODO: shutdown rndis device and the channel
- */
+ rndis_filter_device_remove(dev);
+ return ret;
}
memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index a561ae44a9ac..c6a0299aa9f9 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -158,7 +158,7 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed)
/* If not add the 'RPOLC', we can't catch the receive interrupt.
* It's related with the HW layout and the IR transiver.
*/
- val |= IREN | RPOLC;
+ val |= UMOD_IRDA | RPOLC;
UART_PUT_GCTL(port, val);
return ret;
}
@@ -432,7 +432,7 @@ static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev
bfin_sir_stop_rx(port);
val = UART_GET_GCTL(port);
- val &= ~(UCEN | IREN | RPOLC);
+ val &= ~(UCEN | UMOD_MASK | RPOLC);
UART_PUT_GCTL(port, val);
#ifdef CONFIG_SIR_BFIN_DMA
@@ -518,10 +518,10 @@ static void bfin_sir_send_work(struct work_struct *work)
* reset all the UART.
*/
val = UART_GET_GCTL(port);
- val &= ~(IREN | RPOLC);
+ val &= ~(UMOD_MASK | RPOLC);
UART_PUT_GCTL(port, val);
SSYNC();
- val |= IREN | RPOLC;
+ val |= UMOD_IRDA | RPOLC;
UART_PUT_GCTL(port, val);
SSYNC();
/* bfin_sir_set_speed(port, self->speed); */
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 824e2a93fe8a..5f3aeac3f86d 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -542,6 +542,7 @@ static int ks959_net_open(struct net_device *netdev)
sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
if (!kingsun->irlap) {
+ err = -ENOMEM;
dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
goto free_mem;
}
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 5a278ab83c2f..2d4b6a1ab202 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -436,6 +436,7 @@ static int ksdazzle_net_open(struct net_device *netdev)
sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
if (!kingsun->irlap) {
+ err = -ENOMEM;
dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
goto free_mem;
}
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index ff16daf33ae1..8d5476707912 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -289,7 +289,7 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
}
lsr = STLSR;
}
- si->last_oscr = OSCR;
+ si->last_oscr = readl_relaxed(OSCR);
break;
case 0x04: /* Received Data Available */
@@ -300,7 +300,7 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
dev->stats.rx_bytes++;
async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR);
} while (STLSR & LSR_DR);
- si->last_oscr = OSCR;
+ si->last_oscr = readl_relaxed(OSCR);
break;
case 0x02: /* Transmit FIFO Data Request */
@@ -316,7 +316,7 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
/* We need to ensure that the transmitter has finished. */
while ((STLSR & LSR_TEMT) == 0)
cpu_relax();
- si->last_oscr = OSCR;
+ si->last_oscr = readl_relaxed(OSCR);
/*
* Ok, we've finished transmitting. Now enable
@@ -370,7 +370,7 @@ static void pxa_irda_fir_dma_tx_irq(int channel, void *data)
while (ICSR1 & ICSR1_TBY)
cpu_relax();
- si->last_oscr = OSCR;
+ si->last_oscr = readl_relaxed(OSCR);
/*
* HACK: It looks like the TBY bit is dropped too soon.
@@ -470,7 +470,7 @@ static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
/* stop RX DMA */
DCSR(si->rxdma) &= ~DCSR_RUN;
- si->last_oscr = OSCR;
+ si->last_oscr = readl_relaxed(OSCR);
icsr0 = ICSR0;
if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
@@ -546,7 +546,7 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
if (mtt)
- while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
+ while ((unsigned)(readl_relaxed(OSCR) - si->last_oscr)/4 < mtt)
cpu_relax();
/* stop RX DMA, disable FICP */
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0737bd4d1669..0f0f9ce3a776 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -94,7 +94,8 @@ static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q)
int i;
for (i = 0; i < MAX_MACVTAP_QUEUES; i++) {
- if (rcu_dereference(vlan->taps[i]) == q)
+ if (rcu_dereference_protected(vlan->taps[i],
+ lockdep_is_held(&macvtap_lock)) == q)
return i;
}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index f9347ea3d381..b3321129a83c 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -640,15 +640,9 @@ static int netconsole_netdev_event(struct notifier_block *this,
* rtnl_lock already held
*/
if (nt->np.dev) {
- spin_unlock_irqrestore(
- &target_list_lock,
- flags);
__netpoll_cleanup(&nt->np);
- spin_lock_irqsave(&target_list_lock,
- flags);
dev_put(nt->np.dev);
nt->np.dev = NULL;
- netconsole_target_put(nt);
}
nt->enabled = 0;
stopped = true;
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index e0cc4ef33dee..eefe49e8713c 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -101,7 +101,6 @@ err:
n--;
gpio_free(s->gpio[n]);
}
- devm_kfree(&pdev->dev, s);
return r;
}
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 5c120189ec86..4d4d25efc1e1 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -132,7 +132,7 @@ int mdio_mux_init(struct device *dev,
pb->mii_bus = parent_bus;
ret_val = -ENODEV;
- for_each_child_of_node(dev->of_node, child_bus_node) {
+ for_each_available_child_of_node(dev->of_node, child_bus_node) {
u32 v;
r = of_property_read_u32(child_bus_node, "reg", &v);
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index 826d961f39f7..d4015aa663e6 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -3,14 +3,17 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2009 Cavium Networks
+ * Copyright (C) 2009,2011 Cavium, Inc.
*/
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
#include <linux/phy.h>
+#include <linux/io.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-smix-defs.h>
@@ -18,9 +21,17 @@
#define DRV_VERSION "1.0"
#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver"
+#define SMI_CMD 0x0
+#define SMI_WR_DAT 0x8
+#define SMI_RD_DAT 0x10
+#define SMI_CLK 0x18
+#define SMI_EN 0x20
+
struct octeon_mdiobus {
struct mii_bus *mii_bus;
- int unit;
+ u64 register_base;
+ resource_size_t mdio_phys;
+ resource_size_t regsize;
int phy_irq[PHY_MAX_ADDR];
};
@@ -35,15 +46,15 @@ static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = regnum;
- cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64);
+ cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
do {
/*
* Wait 1000 clocks so we don't saturate the RSL bus
* doing reads.
*/
- cvmx_wait(1000);
- smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(p->unit));
+ __delay(1000);
+ smi_rd.u64 = cvmx_read_csr(p->register_base + SMI_RD_DAT);
} while (smi_rd.s.pending && --timeout);
if (smi_rd.s.val)
@@ -62,21 +73,21 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
smi_wr.u64 = 0;
smi_wr.s.dat = val;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(p->unit), smi_wr.u64);
+ cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = regnum;
- cvmx_write_csr(CVMX_SMIX_CMD(p->unit), smi_cmd.u64);
+ cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
do {
/*
* Wait 1000 clocks so we don't saturate the RSL bus
* doing reads.
*/
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(p->unit));
+ __delay(1000);
+ smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
} while (smi_wr.s.pending && --timeout);
if (timeout <= 0)
@@ -88,38 +99,44 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
static int __devinit octeon_mdiobus_probe(struct platform_device *pdev)
{
struct octeon_mdiobus *bus;
+ struct resource *res_mem;
union cvmx_smix_en smi_en;
- int i;
int err = -ENOENT;
bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
if (!bus)
return -ENOMEM;
- /* The platform_device id is our unit number. */
- bus->unit = pdev->id;
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (res_mem == NULL) {
+ dev_err(&pdev->dev, "found no memory resource\n");
+ err = -ENXIO;
+ goto fail;
+ }
+ bus->mdio_phys = res_mem->start;
+ bus->regsize = resource_size(res_mem);
+ if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize,
+ res_mem->name)) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ goto fail;
+ }
+ bus->register_base =
+ (u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize);
bus->mii_bus = mdiobus_alloc();
if (!bus->mii_bus)
- goto err;
+ goto fail;
smi_en.u64 = 0;
smi_en.s.en = 1;
- cvmx_write_csr(CVMX_SMIX_EN(bus->unit), smi_en.u64);
-
- /*
- * Standard Octeon evaluation boards don't support phy
- * interrupts, we need to poll.
- */
- for (i = 0; i < PHY_MAX_ADDR; i++)
- bus->phy_irq[i] = PHY_POLL;
+ cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
bus->mii_bus->priv = bus;
bus->mii_bus->irq = bus->phy_irq;
bus->mii_bus->name = "mdio-octeon";
- snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- bus->mii_bus->name, bus->unit);
+ snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", bus->register_base);
bus->mii_bus->parent = &pdev->dev;
bus->mii_bus->read = octeon_mdiobus_read;
@@ -127,20 +144,18 @@ static int __devinit octeon_mdiobus_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, bus);
- err = mdiobus_register(bus->mii_bus);
+ err = of_mdiobus_register(bus->mii_bus, pdev->dev.of_node);
if (err)
- goto err_register;
+ goto fail_register;
dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
return 0;
-err_register:
+fail_register:
mdiobus_free(bus->mii_bus);
-
-err:
- devm_kfree(&pdev->dev, bus);
+fail:
smi_en.u64 = 0;
- cvmx_write_csr(CVMX_SMIX_EN(bus->unit), smi_en.u64);
+ cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
return err;
}
@@ -154,14 +169,23 @@ static int __devexit octeon_mdiobus_remove(struct platform_device *pdev)
mdiobus_unregister(bus->mii_bus);
mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
- cvmx_write_csr(CVMX_SMIX_EN(bus->unit), smi_en.u64);
+ cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
return 0;
}
+static struct of_device_id octeon_mdiobus_match[] = {
+ {
+ .compatible = "cavium,octeon-3860-mdio",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_mdiobus_match);
+
static struct platform_driver octeon_mdiobus_driver = {
.driver = {
.name = "mdio-octeon",
.owner = THIS_MODULE,
+ .of_match_table = octeon_mdiobus_match,
},
.probe = octeon_mdiobus_probe,
.remove = __devexit_p(octeon_mdiobus_remove),
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 1c98321b56cc..162464fe86bf 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
goto tx_error;
- rt = ip_route_output_ports(&init_net, &fl4, NULL,
+ rt = ip_route_output_ports(sock_net(sk), &fl4, NULL,
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0, IPPROTO_GRE,
@@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.private = sk;
po->chan.ops = &pptp_chan_ops;
- rt = ip_route_output_ports(&init_net, &fl4, sk,
+ rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index b104c05225f7..341b65dbbcd3 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -795,16 +795,17 @@ static void team_port_leave(struct team *team, struct team_port *port)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port,
+ gfp_t gfp)
{
struct netpoll *np;
int err;
- np = kzalloc(sizeof(*np), GFP_KERNEL);
+ np = kzalloc(sizeof(*np), gfp);
if (!np)
return -ENOMEM;
- err = __netpoll_setup(np, port->dev);
+ err = __netpoll_setup(np, port->dev, gfp);
if (err) {
kfree(np);
return err;
@@ -833,7 +834,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
}
#else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team *team, struct team_port *port,
+ gfp_t gfp)
{
return 0;
}
@@ -913,7 +915,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
}
if (team_netpoll_info(team)) {
- err = team_port_enable_netpoll(team, port);
+ err = team_port_enable_netpoll(team, port, GFP_KERNEL);
if (err) {
netdev_err(dev, "Failed to enable netpoll on device %s\n",
portname);
@@ -1443,15 +1445,15 @@ static void team_netpoll_cleanup(struct net_device *dev)
}
static int team_netpoll_setup(struct net_device *dev,
- struct netpoll_info *npifo)
+ struct netpoll_info *npifo, gfp_t gfp)
{
struct team *team = netdev_priv(dev);
struct team_port *port;
- int err;
+ int err = 0;
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = team_port_enable_netpoll(team, port);
+ err = team_port_enable_netpoll(team, port, gfp);
if (err) {
__team_netpoll_cleanup(team);
break;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index c62163e272cd..3a16d4fdaa05 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -187,7 +187,6 @@ static void __tun_detach(struct tun_struct *tun)
netif_tx_lock_bh(tun->dev);
netif_carrier_off(tun->dev);
tun->tfile = NULL;
- tun->socket.file = NULL;
netif_tx_unlock_bh(tun->dev);
/* Drop read queue */
@@ -1379,10 +1378,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
int vnet_hdr_sz;
int ret;
- if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
+ if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
-
+ } else {
+ memset(&ifr, 0, sizeof(ifr));
+ }
if (cmd == TUNGETFEATURES) {
/* Currently this just means: "what IFF flags are valid?".
* This is needed because we never checked for invalid flags on
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index 187c144c5e5b..7d78669000d7 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
struct page *page;
int err;
- page = alloc_page(gfp_flags);
+ page = __skb_alloc_page(gfp_flags | __GFP_NOMEMALLOC, NULL);
if (!page)
return -ENOMEM;
@@ -232,6 +232,7 @@ static int usbpn_open(struct net_device *dev)
struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
+ usb_free_urb(req);
usbpn_close(dev);
return -ENOMEM;
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 4b9513fcf275..4cd582a4f625 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -138,20 +138,7 @@ struct cdc_ncm_ctx {
static void cdc_ncm_txpath_bh(unsigned long param);
static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
-static const struct driver_info cdc_ncm_info;
static struct usb_driver cdc_ncm_driver;
-static const struct ethtool_ops cdc_ncm_ethtool_ops;
-
-static const struct usb_device_id cdc_devs[] = {
- { USB_INTERFACE_INFO(USB_CLASS_COMM,
- USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long)&cdc_ncm_info,
- },
- {
- },
-};
-
-MODULE_DEVICE_TABLE(usb, cdc_devs);
static void
cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
@@ -454,6 +441,16 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
kfree(ctx);
}
+static const struct ethtool_ops cdc_ncm_ethtool_ops = {
+ .get_drvinfo = cdc_ncm_get_drvinfo,
+ .get_link = usbnet_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct cdc_ncm_ctx *ctx;
@@ -1203,6 +1200,61 @@ static const struct driver_info cdc_ncm_info = {
.tx_fixup = cdc_ncm_tx_fixup,
};
+/* Same as cdc_ncm_info, but with FLAG_WWAN */
+static const struct driver_info wwan_info = {
+ .description = "Mobile Broadband Network Device",
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_WWAN,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .check_connect = cdc_ncm_check_connect,
+ .manage_power = cdc_ncm_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+};
+
+static const struct usb_device_id cdc_devs[] = {
+ /* Ericsson MBM devices like F5521gw */
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_VENDOR,
+ .idVendor = 0x0bdb,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ .driver_info = (unsigned long) &wwan_info,
+ },
+
+ /* Dell branded MBM devices like DW5550 */
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_VENDOR,
+ .idVendor = 0x413c,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ .driver_info = (unsigned long) &wwan_info,
+ },
+
+ /* Toshiba branded MBM devices */
+ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_VENDOR,
+ .idVendor = 0x0930,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM,
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE,
+ .driver_info = (unsigned long) &wwan_info,
+ },
+
+ /* Generic CDC-NCM devices */
+ { USB_INTERFACE_INFO(USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_ncm_info,
+ },
+ {
+ },
+};
+MODULE_DEVICE_TABLE(usb, cdc_devs);
+
static struct usb_driver cdc_ncm_driver = {
.name = "cdc_ncm",
.id_table = cdc_devs,
@@ -1215,16 +1267,6 @@ static struct usb_driver cdc_ncm_driver = {
.disable_hub_initiated_lpm = 1,
};
-static const struct ethtool_ops cdc_ncm_ethtool_ops = {
- .get_drvinfo = cdc_ncm_get_drvinfo,
- .get_link = usbnet_get_link,
- .get_msglevel = usbnet_get_msglevel,
- .set_msglevel = usbnet_set_msglevel,
- .get_settings = usbnet_get_settings,
- .set_settings = usbnet_set_settings,
- .nway_reset = usbnet_nway_reset,
-};
-
module_usb_driver(cdc_ncm_driver);
MODULE_AUTHOR("Hans Petter Selasky");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index d8ad55284389..c3d03490c97d 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1314,7 +1314,7 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
int retv;
int length = 0; /* shut up GCC */
- urb = usb_alloc_urb(0, GFP_NOIO);
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
return -ENOMEM;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 2ea126a16d79..adfab3fc5478 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -247,30 +247,12 @@ err:
*/
static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
{
- int rv;
struct qmi_wwan_state *info = (void *)&dev->data;
- /* ZTE makes devices where the interface descriptors and endpoint
- * configurations of two or more interfaces are identical, even
- * though the functions are completely different. If set, then
- * driver_info->data is a bitmap of acceptable interface numbers
- * allowing us to bind to one such interface without binding to
- * all of them
- */
- if (dev->driver_info->data &&
- !test_bit(intf->cur_altsetting->desc.bInterfaceNumber, &dev->driver_info->data)) {
- dev_info(&intf->dev, "not on our whitelist - ignored");
- rv = -ENODEV;
- goto err;
- }
-
/* control and data is shared */
info->control = intf;
info->data = intf;
- rv = qmi_wwan_register_subdriver(dev);
-
-err:
- return rv;
+ return qmi_wwan_register_subdriver(dev);
}
static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
@@ -356,214 +338,64 @@ static const struct driver_info qmi_wwan_shared = {
.manage_power = qmi_wwan_manage_power,
};
-static const struct driver_info qmi_wwan_force_int0 = {
- .description = "Qualcomm WWAN/QMI device",
- .flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind,
- .manage_power = qmi_wwan_manage_power,
- .data = BIT(0), /* interface whitelist bitmap */
-};
-
-static const struct driver_info qmi_wwan_force_int1 = {
- .description = "Qualcomm WWAN/QMI device",
- .flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind,
- .manage_power = qmi_wwan_manage_power,
- .data = BIT(1), /* interface whitelist bitmap */
-};
-
-static const struct driver_info qmi_wwan_force_int2 = {
- .description = "Qualcomm WWAN/QMI device",
- .flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind,
- .manage_power = qmi_wwan_manage_power,
- .data = BIT(2), /* interface whitelist bitmap */
-};
-
-static const struct driver_info qmi_wwan_force_int3 = {
- .description = "Qualcomm WWAN/QMI device",
- .flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind,
- .manage_power = qmi_wwan_manage_power,
- .data = BIT(3), /* interface whitelist bitmap */
-};
-
-static const struct driver_info qmi_wwan_force_int4 = {
- .description = "Qualcomm WWAN/QMI device",
- .flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind,
- .manage_power = qmi_wwan_manage_power,
- .data = BIT(4), /* interface whitelist bitmap */
-};
-
-/* Sierra Wireless provide equally useless interface descriptors
- * Devices in QMI mode can be switched between two different
- * configurations:
- * a) USB interface #8 is QMI/wwan
- * b) USB interfaces #8, #19 and #20 are QMI/wwan
- *
- * Both configurations provide a number of other interfaces (serial++),
- * some of which have the same endpoint configuration as we expect, so
- * a whitelist or blacklist is necessary.
- *
- * FIXME: The below whitelist should include BIT(20). It does not
- * because I cannot get it to work...
- */
-static const struct driver_info qmi_wwan_sierra = {
- .description = "Sierra Wireless wwan/QMI device",
- .flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind,
- .manage_power = qmi_wwan_manage_power,
- .data = BIT(8) | BIT(19), /* interface whitelist bitmap */
-};
-
#define HUAWEI_VENDOR_ID 0x12D1
+/* map QMI/wwan function by a fixed interface number */
+#define QMI_FIXED_INTF(vend, prod, num) \
+ USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
+ .driver_info = (unsigned long)&qmi_wwan_shared
+
/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
#define QMI_GOBI1K_DEVICE(vend, prod) \
- USB_DEVICE(vend, prod), \
- .driver_info = (unsigned long)&qmi_wwan_force_int3
+ QMI_FIXED_INTF(vend, prod, 3)
-/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */
+/* Gobi 2000/3000 QMI/wwan interface number is 0 according to qcserial */
#define QMI_GOBI_DEVICE(vend, prod) \
- USB_DEVICE(vend, prod), \
- .driver_info = (unsigned long)&qmi_wwan_force_int0
+ QMI_FIXED_INTF(vend, prod, 0)
static const struct usb_device_id products[] = {
+ /* 1. CDC ECM like devices match on the control interface */
{ /* Huawei E392, E398 and possibly others sharing both device id and more... */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = HUAWEI_VENDOR_ID,
- .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
- .bInterfaceSubClass = 1,
- .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 9),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = HUAWEI_VENDOR_ID,
- .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
- .bInterfaceSubClass = 1,
- .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57),
.driver_info = (unsigned long)&qmi_wwan_info,
},
- { /* Huawei E392, E398 and possibly others in "Windows mode"
- * using a combined control and data interface without any CDC
- * functional descriptors
- */
- .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = HUAWEI_VENDOR_ID,
- .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
- .bInterfaceSubClass = 1,
- .bInterfaceProtocol = 17,
+
+ /* 2. Combined interface devices matching on class+protocol */
+ { /* Huawei E392, E398 and possibly others in "Windows mode" */
+ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17),
.driver_info = (unsigned long)&qmi_wwan_shared,
},
{ /* Pantech UML290 */
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = 0x106c,
- .idProduct = 0x3718,
- .bInterfaceClass = 0xff,
- .bInterfaceSubClass = 0xf0,
- .bInterfaceProtocol = 0xff,
+ USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff),
.driver_info = (unsigned long)&qmi_wwan_shared,
},
- { /* ZTE MF820D */
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = 0x19d2,
- .idProduct = 0x0167,
- .bInterfaceClass = 0xff,
- .bInterfaceSubClass = 0xff,
- .bInterfaceProtocol = 0xff,
- .driver_info = (unsigned long)&qmi_wwan_force_int4,
- },
- { /* ZTE MF821D */
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = 0x19d2,
- .idProduct = 0x0326,
- .bInterfaceClass = 0xff,
- .bInterfaceSubClass = 0xff,
- .bInterfaceProtocol = 0xff,
- .driver_info = (unsigned long)&qmi_wwan_force_int4,
- },
- { /* ZTE (Vodafone) K3520-Z */
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = 0x19d2,
- .idProduct = 0x0055,
- .bInterfaceClass = 0xff,
- .bInterfaceSubClass = 0xff,
- .bInterfaceProtocol = 0xff,
- .driver_info = (unsigned long)&qmi_wwan_force_int1,
- },
- { /* ZTE (Vodafone) K3565-Z */
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = 0x19d2,
- .idProduct = 0x0063,
- .bInterfaceClass = 0xff,
- .bInterfaceSubClass = 0xff,
- .bInterfaceProtocol = 0xff,
- .driver_info = (unsigned long)&qmi_wwan_force_int4,
- },
- { /* ZTE (Vodafone) K3570-Z */
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = 0x19d2,
- .idProduct = 0x1008,
- .bInterfaceClass = 0xff,
- .bInterfaceSubClass = 0xff,
- .bInterfaceProtocol = 0xff,
- .driver_info = (unsigned long)&qmi_wwan_force_int4,
- },
- { /* ZTE (Vodafone) K3571-Z */
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = 0x19d2,
- .idProduct = 0x1010,
- .bInterfaceClass = 0xff,
- .bInterfaceSubClass = 0xff,
- .bInterfaceProtocol = 0xff,
- .driver_info = (unsigned long)&qmi_wwan_force_int4,
- },
- { /* ZTE (Vodafone) K3765-Z */
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = 0x19d2,
- .idProduct = 0x2002,
- .bInterfaceClass = 0xff,
- .bInterfaceSubClass = 0xff,
- .bInterfaceProtocol = 0xff,
- .driver_info = (unsigned long)&qmi_wwan_force_int4,
- },
- { /* ZTE (Vodafone) K4505-Z */
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = 0x19d2,
- .idProduct = 0x0104,
- .bInterfaceClass = 0xff,
- .bInterfaceSubClass = 0xff,
- .bInterfaceProtocol = 0xff,
- .driver_info = (unsigned long)&qmi_wwan_force_int4,
- },
- { /* ZTE MF60 */
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = 0x19d2,
- .idProduct = 0x1402,
- .bInterfaceClass = 0xff,
- .bInterfaceSubClass = 0xff,
- .bInterfaceProtocol = 0xff,
- .driver_info = (unsigned long)&qmi_wwan_force_int2,
- },
- { /* Sierra Wireless MC77xx in QMI mode */
- .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
- .idVendor = 0x1199,
- .idProduct = 0x68a2,
- .bInterfaceClass = 0xff,
- .bInterfaceSubClass = 0xff,
- .bInterfaceProtocol = 0xff,
- .driver_info = (unsigned long)&qmi_wwan_sierra,
+ { /* Pantech UML290 - newer firmware */
+ USB_DEVICE_AND_INTERFACE_INFO(0x106c, 0x3718, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff),
+ .driver_info = (unsigned long)&qmi_wwan_shared,
},
- /* Gobi 1000 devices */
+ /* 3. Combined interface devices matching on interface number */
+ {QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */
+ {QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */
+ {QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */
+ {QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */
+ {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */
+ {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */
+ {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */
+ {QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */
+ {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
+ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
+ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
+ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
+ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
+ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
+ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
+
+ /* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
{QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
@@ -579,9 +411,11 @@ static const struct usb_device_id products[] = {
{QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
- /* Gobi 2000 and 3000 devices */
+ /* 5. Gobi 2000 and 3000 devices */
{QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
+ {QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
+ {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
{QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
{QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
@@ -589,6 +423,8 @@ static const struct usb_device_id products[] = {
{QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */
{QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */
{QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */
+ {QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
+ {QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */
{QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
@@ -600,11 +436,16 @@ static const struct usb_device_id products[] = {
{QMI_GOBI_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */
{QMI_GOBI_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */
+ {QMI_FIXED_INTF(0x1199, 0x9011, 5)}, /* alternate interface number!? */
{QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
{QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
+ {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
+ {QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */
+ {QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */
+
{ } /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index d75d1f56becf..7be49ea60b6d 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -68,15 +68,8 @@ static atomic_t iface_counter = ATOMIC_INIT(0);
*/
#define SIERRA_NET_USBCTL_BUF_LEN 1024
-/* list of interface numbers - used for constructing interface lists */
-struct sierra_net_iface_info {
- const u32 infolen; /* number of interface numbers on list */
- const u8 *ifaceinfo; /* pointer to the array holding the numbers */
-};
-
struct sierra_net_info_data {
u16 rx_urb_size;
- struct sierra_net_iface_info whitelist;
};
/* Private data structure */
@@ -637,21 +630,6 @@ static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
return usbnet_change_mtu(net, new_mtu);
}
-static int is_whitelisted(const u8 ifnum,
- const struct sierra_net_iface_info *whitelist)
-{
- if (whitelist) {
- const u8 *list = whitelist->ifaceinfo;
- int i;
-
- for (i = 0; i < whitelist->infolen; i++) {
- if (list[i] == ifnum)
- return 1;
- }
- }
- return 0;
-}
-
static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
{
int result = 0;
@@ -706,11 +684,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
dev_dbg(&dev->udev->dev, "%s", __func__);
ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
- /* We only accept certain interfaces */
- if (!is_whitelisted(ifacenum, &data->whitelist)) {
- dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum);
- return -ENODEV;
- }
numendpoints = intf->cur_altsetting->desc.bNumEndpoints;
/* We have three endpoints, bulk in and out, and a status */
if (numendpoints != 3) {
@@ -945,13 +918,8 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
return NULL;
}
-static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
.rx_urb_size = 8 * 1024,
- .whitelist = {
- .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
- .ifaceinfo = sierra_net_ifnum_list
- }
};
static const struct driver_info sierra_net_info_direct_ip = {
@@ -965,15 +933,19 @@ static const struct driver_info sierra_net_info_direct_ip = {
.data = (unsigned long)&sierra_net_info_data_direct_ip,
};
+#define DIRECT_IP_DEVICE(vend, prod) \
+ {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \
+ .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \
+ {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 10), \
+ .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \
+ {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 11), \
+ .driver_info = (unsigned long)&sierra_net_info_direct_ip}
+
static const struct usb_device_id products[] = {
- {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
- .driver_info = (unsigned long) &sierra_net_info_direct_ip},
- {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
- .driver_info = (unsigned long) &sierra_net_info_direct_ip},
- {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
- .driver_info = (unsigned long) &sierra_net_info_direct_ip},
- {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
- .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+ DIRECT_IP_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
+ DIRECT_IP_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
+ DIRECT_IP_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
+ DIRECT_IP_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
{}, /* last item */
};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8531c1caac28..fd4b26d46fd5 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1573,7 +1573,7 @@ int usbnet_resume (struct usb_interface *intf)
netif_device_present(dev->net) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_HALT, &dev->flags))
- rx_alloc_submit(dev, GFP_KERNEL);
+ rx_alloc_submit(dev, GFP_NOIO);
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 93e0cfb739b8..ce9d4f2c9776 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3019,6 +3019,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
netdev->watchdog_timeo = 5 * HZ;
INIT_WORK(&adapter->work, vmxnet3_reset_work);
+ set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
if (adapter->intr.type == VMXNET3_IT_MSIX) {
int i;
@@ -3043,7 +3044,6 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_register;
}
- set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
vmxnet3_check_link(adapter, false);
atomic_inc(&devices_found);
return 0;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 9eb6479306d6..ef36cafd44b7 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -774,14 +774,15 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev,
}
/* Global interrupt queue */
writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
+
+ rc = -ENOMEM;
+
priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev,
IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma);
if (!priv->iqcfg)
goto err_free_irq_5;
writel(priv->iqcfg_dma, ioaddr + IQCFG);
- rc = -ENOMEM;
-
/*
* SCC 0-3 private rx/tx irq structures
* IQRX/TXi needs to be set soon. Learned it the hard way...
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 283237f6f074..def12b38cbf7 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -326,8 +326,10 @@ int i2400m_barker_db_init(const char *_options)
unsigned barker;
options_orig = kstrdup(_options, GFP_KERNEL);
- if (options_orig == NULL)
+ if (options_orig == NULL) {
+ result = -ENOMEM;
goto error_parse;
+ }
options = options_orig;
while ((token = strsep(&options, ",")) != NULL) {
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index efc162e0b511..88b8d64c90f1 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -342,7 +342,7 @@ static int at76_dfu_get_status(struct usb_device *udev,
return ret;
}
-static u8 at76_dfu_get_state(struct usb_device *udev, u8 *state)
+static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
{
int ret;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 8c4c040a47b8..2aab20ee9f38 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2056,9 +2056,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
void
ath5k_beacon_config(struct ath5k_hw *ah)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ah->block, flags);
+ spin_lock_bh(&ah->block);
ah->bmisscount = 0;
ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
@@ -2085,7 +2083,7 @@ ath5k_beacon_config(struct ath5k_hw *ah)
ath5k_hw_set_imr(ah, ah->imask);
mmiowb();
- spin_unlock_irqrestore(&ah->block, flags);
+ spin_unlock_bh(&ah->block);
}
static void ath5k_tasklet_beacon(unsigned long data)
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 4026c906cc7b..b7e0258887e7 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
case AR5K_EEPROM_MODE_11A:
offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
rate_pcal_info = ee->ee_rate_tpwr_a;
- ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN;
+ ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
break;
case AR5K_EEPROM_MODE_11B:
offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index dc2bcfeadeb4..94a9bbea6874 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -182,6 +182,7 @@
#define AR5K_EEPROM_EEP_DELTA 10
#define AR5K_EEPROM_N_MODES 3
#define AR5K_EEPROM_N_5GHZ_CHAN 10
+#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8
#define AR5K_EEPROM_N_2GHZ_CHAN 3
#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 260e7dc7f751..d56453e43d7e 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -254,7 +254,6 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ath5k_vif *avf = (void *)vif->drv_priv;
struct ath5k_hw *ah = hw->priv;
struct ath_common *common = ath5k_hw_common(ah);
- unsigned long flags;
mutex_lock(&ah->lock);
@@ -300,9 +299,9 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
if (changes & BSS_CHANGED_BEACON) {
- spin_lock_irqsave(&ah->block, flags);
+ spin_lock_bh(&ah->block);
ath5k_beacon_update(hw, vif);
- spin_unlock_irqrestore(&ah->block, flags);
+ spin_unlock_bh(&ah->block);
}
if (changes & BSS_CHANGED_BEACON_ENABLED)
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index cfa91ab7acf8..60b6a9daff7e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -730,6 +730,7 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR9300_DEVID_QCA955X:
case AR9300_DEVID_AR9580:
case AR9300_DEVID_AR9462:
+ case AR9485_DEVID_AR1111:
break;
default:
if (common->bus_ops->ath_bus_type == ATH_USB)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dd0c146d81dc..ce7332c64efb 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -49,6 +49,7 @@
#define AR9300_DEVID_AR9462 0x0034
#define AR9300_DEVID_AR9330 0x0035
#define AR9300_DEVID_QCA955X 0x0038
+#define AR9485_DEVID_AR1111 0x0037
#define AR5416_AR9100_DEVID 0x000b
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 7990cd55599c..b42be910a83d 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -773,15 +773,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_intrpend);
-void ath9k_hw_disable_interrupts(struct ath_hw *ah)
+void ath9k_hw_kill_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- if (!(ah->imask & ATH9K_INT_GLOBAL))
- atomic_set(&ah->intr_ref_cnt, -1);
- else
- atomic_dec(&ah->intr_ref_cnt);
-
ath_dbg(common, INTERRUPT, "disable IER\n");
REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
(void) REG_READ(ah, AR_IER);
@@ -793,6 +788,17 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)
(void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
}
}
+EXPORT_SYMBOL(ath9k_hw_kill_interrupts);
+
+void ath9k_hw_disable_interrupts(struct ath_hw *ah)
+{
+ if (!(ah->imask & ATH9K_INT_GLOBAL))
+ atomic_set(&ah->intr_ref_cnt, -1);
+ else
+ atomic_dec(&ah->intr_ref_cnt);
+
+ ath9k_hw_kill_interrupts(ah);
+}
EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
void ath9k_hw_enable_interrupts(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 0eba36dca6f8..4a745e68dd94 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -738,6 +738,7 @@ bool ath9k_hw_intrpend(struct ath_hw *ah);
void ath9k_hw_set_interrupts(struct ath_hw *ah);
void ath9k_hw_enable_interrupts(struct ath_hw *ah);
void ath9k_hw_disable_interrupts(struct ath_hw *ah);
+void ath9k_hw_kill_interrupts(struct ath_hw *ah);
void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6049d8b82855..a22df749b8db 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -462,8 +462,10 @@ irqreturn_t ath_isr(int irq, void *dev)
if (!ath9k_hw_intrpend(ah))
return IRQ_NONE;
- if(test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+ if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
+ ath9k_hw_kill_interrupts(ah);
return IRQ_HANDLED;
+ }
/*
* Figure out the reason(s) for the interrupt. Note
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 87b89d55e637..a978984d78a5 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -37,6 +37,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
{ PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */
+ { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */
{ 0 }
};
@@ -320,6 +321,7 @@ static int ath_pci_suspend(struct device *device)
* Otherwise the chip never moved to full sleep,
* when no interface is up.
*/
+ ath9k_stop_btcoex(sc);
ath9k_hw_disable(sc->sc_ah);
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 12aca02228c2..4480c0cc655f 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -1044,7 +1044,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
int retval;
- bool decrypt_error = false;
struct ath_rx_status rs;
enum ath9k_rx_qtype qtype;
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
@@ -1066,6 +1065,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
tsf_lower = tsf & 0xffffffff;
do {
+ bool decrypt_error = false;
/* If handling rx interrupt and flush is in progress => exit */
if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
break;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index b80352b308d5..a140165dfee0 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2719,32 +2719,37 @@ static int b43_gpio_init(struct b43_wldev *dev)
if (dev->dev->chip_id == 0x4301) {
mask |= 0x0060;
set |= 0x0060;
+ } else if (dev->dev->chip_id == 0x5354) {
+ /* Don't allow overtaking buttons GPIOs */
+ set &= 0x2; /* 0x2 is LED GPIO on BCM5354 */
}
- if (dev->dev->chip_id == 0x5354)
- set &= 0xff02;
+
if (0 /* FIXME: conditional unknown */ ) {
b43_write16(dev, B43_MMIO_GPIO_MASK,
b43_read16(dev, B43_MMIO_GPIO_MASK)
| 0x0100);
- mask |= 0x0180;
- set |= 0x0180;
+ /* BT Coexistance Input */
+ mask |= 0x0080;
+ set |= 0x0080;
+ /* BT Coexistance Out */
+ mask |= 0x0100;
+ set |= 0x0100;
}
if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) {
+ /* PA is controlled by gpio 9, let ucode handle it */
b43_write16(dev, B43_MMIO_GPIO_MASK,
b43_read16(dev, B43_MMIO_GPIO_MASK)
| 0x0200);
mask |= 0x0200;
set |= 0x0200;
}
- if (dev->dev->core_rev >= 2)
- mask |= 0x0010; /* FIXME: This is redundant. */
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL,
(bcma_cc_read32(&dev->dev->bdev->bus->drv_cc,
- BCMA_CC_GPIOCTL) & mask) | set);
+ BCMA_CC_GPIOCTL) & ~mask) | set);
break;
#endif
#ifdef CONFIG_B43_SSB
@@ -2753,7 +2758,7 @@ static int b43_gpio_init(struct b43_wldev *dev)
if (gpiodev)
ssb_write32(gpiodev, B43_GPIO_CONTROL,
(ssb_read32(gpiodev, B43_GPIO_CONTROL)
- & mask) | set);
+ & ~mask) | set);
break;
#endif
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 57bf1d7ee80f..9ab24528f9b9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1188,7 +1188,7 @@ exit:
kfree(buf);
/* close file before return */
if (fp)
- filp_close(fp, current->files);
+ filp_close(fp, NULL);
/* restore previous address limit */
set_fs(old_fs);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 9a4c63f927cb..7ed7d7577024 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -382,9 +382,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
{
struct brcms_c_info *wlc = wlc_cm->wlc;
struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
- const struct ieee80211_reg_rule *reg_rule;
struct txpwr_limits txpwr;
- int ret;
brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
@@ -393,8 +391,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
);
/* set or restore gmode as required by regulatory */
- ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, &reg_rule);
- if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM))
+ if (ch->flags & IEEE80211_CHAN_NO_OFDM)
brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
else
brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 9e79d47e077f..a5edebeb0b4f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -121,7 +121,8 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = {
IEEE80211_CHAN_NO_HT40PLUS),
CHAN2GHZ(14, 2484,
IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS |
- IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
+ IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS |
+ IEEE80211_CHAN_NO_OFDM)
};
static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = {
@@ -1232,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl)
/* dpc will not be rescheduled */
wl->resched = false;
+ /* inform publicly that interface is down */
+ wl->pub->up = false;
+
return 0;
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 95aa8e1683ec..83324b321652 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
return;
}
len = ETH_ALEN;
- ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len);
+ ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
+ &len);
if (ret) {
IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
__LINE__);
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 46782f1102ac..a47b306b522c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
const struct fw_img *img;
size_t bufsz;
+ if (!iwl_is_ready_rf(priv))
+ return -EAGAIN;
+
/* default is to dump the entire data segment */
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
priv->dbgfs_sram_offset = 0x800000;
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 6fddd2785e6e..a82f46c10f5e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -707,11 +707,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
*/
static bool rs_use_green(struct ieee80211_sta *sta)
{
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->ctx;
-
- return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
- !(ctx->ht.non_gf_sta_present);
+ /*
+ * There's a bug somewhere in this code that causes the
+ * scaling to get stuck because GF+SGI can't be combined
+ * in SISO rates. Until we find that bug, disable GF, it
+ * has only limited benefit and we still interoperate with
+ * GF APs since we can always receive GF transmissions.
+ */
+ return false;
}
/**
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index d9694c58208c..4ffc18dc3a57 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q);
/*****************************************************
* Error handling
******************************************************/
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
+int iwl_dump_fh(struct iwl_trans *trans, char **buf);
void iwl_dump_csr(struct iwl_trans *trans);
/*****************************************************
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 39a6ca1f009c..d1a61ba6247a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
}
iwl_dump_csr(trans);
- iwl_dump_fh(trans, NULL, false);
+ iwl_dump_fh(trans, NULL);
iwl_op_mode_nic_error(trans->op_mode);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 939c2f78df58..1e86ea2266d4 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd)
#undef IWL_CMD
}
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
+int iwl_dump_fh(struct iwl_trans *trans, char **buf)
{
int i;
-#ifdef CONFIG_IWLWIFI_DEBUG
- int pos = 0;
- size_t bufsz = 0;
-#endif
static const u32 fh_tbl[] = {
FH_RSCSR_CHNL0_STTS_WPTR_REG,
FH_RSCSR_CHNL0_RBDCB_BASE_REG,
@@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
FH_TSSR_TX_STATUS_REG,
FH_TSSR_TX_ERROR_REG
};
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (display) {
- bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (buf) {
+ int pos = 0;
+ size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
*buf = kmalloc(bufsz, GFP_KERNEL);
if (!*buf)
return -ENOMEM;
+
pos += scnprintf(*buf + pos, bufsz - pos,
"FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
pos += scnprintf(*buf + pos, bufsz - pos,
" %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]),
iwl_read_direct32(trans, fh_tbl[i]));
- }
+
return pos;
}
#endif
+
IWL_ERR(trans, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
IWL_ERR(trans, " %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]),
iwl_read_direct32(trans, fh_tbl[i]));
- }
+
return 0;
}
@@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
- char *buf;
+ char *buf = NULL;
int pos = 0;
ssize_t ret = -EFAULT;
- ret = pos = iwl_dump_fh(trans, &buf, true);
+ ret = pos = iwl_dump_fh(trans, &buf);
if (buf) {
ret = simple_read_from_buffer(user_buf,
count, ppos, buf, pos);
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index eb5de800ed90..1c10b542ab23 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1254,6 +1254,7 @@ static int lbs_associate(struct lbs_private *priv,
netif_tx_wake_all_queues(priv->dev);
}
+ kfree(cmd);
done:
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 76caebaa4397..e970897f6ab5 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -1314,6 +1314,7 @@ static void if_sdio_remove(struct sdio_func *func)
kfree(packet);
}
+ kfree(card);
lbs_deb_leave(LBS_DEB_SDIO);
}
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 55a77e41170a..27980778d992 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/olpc-ec.h>
#ifdef CONFIG_OLPC
#include <asm/olpc.h>
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 58048189bd24..fe1ea43c5149 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -571,7 +571,10 @@ static int lbs_thread(void *data)
netdev_info(dev, "Timeout submitting command 0x%04x\n",
le16_to_cpu(cmdnode->cmdbuf->command));
lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
- if (priv->reset_card)
+
+ /* Reset card, but only when it isn't in the process
+ * of being shutdown anyway. */
+ if (!dev->dismantle && priv->reset_card)
priv->reset_card(priv);
}
priv->cmd_timed_out = 0;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 643f968b05ee..00838395778c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -739,11 +739,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
txi = IEEE80211_SKB_CB(skb);
- if (txi->control.vif)
- hwsim_check_magic(txi->control.vif);
- if (txi->control.sta)
- hwsim_check_sta_magic(txi->control.sta);
-
ieee80211_tx_info_clear_status(txi);
/* frame was transmitted at most favorable rate at first attempt */
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 7f207b6e9552..effb044a8a9d 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -42,7 +42,7 @@ MODULE_FIRMWARE("isl3887usb");
* whenever you add a new device.
*/
-static struct usb_device_id p54u_table[] __devinitdata = {
+static struct usb_device_id p54u_table[] = {
/* Version 1 devices (pci chip + net2280) */
{USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
{USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 241162e8111d..7a4ae9ee1c63 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1803,6 +1803,7 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
struct cfg80211_pmksa *pmksa,
int max_pmkids)
{
+ struct ndis_80211_pmkid *new_pmkids;
int i, err, newlen;
unsigned int count;
@@ -1833,11 +1834,12 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
/* add new pmkid */
newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]);
- pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
- if (!pmkids) {
+ new_pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
+ if (!new_pmkids) {
err = -ENOMEM;
goto error;
}
+ pmkids = new_pmkids;
pmkids->length = cpu_to_le32(newlen);
pmkids->bssid_info_count = cpu_to_le32(count + 1);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 88455b1b9fe0..cb8c2aca54e4 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -221,6 +221,67 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
mutex_unlock(&rt2x00dev->csr_mutex);
}
+static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+ int i, count;
+
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+ if (rt2x00_get_field32(reg, WLAN_EN))
+ return 0;
+
+ rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
+ rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
+ rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
+ rt2x00_set_field32(&reg, WLAN_EN, 1);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+
+ udelay(REGISTER_BUSY_DELAY);
+
+ count = 0;
+ do {
+ /*
+ * Check PLL_LD & XTAL_RDY.
+ */
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
+ if (rt2x00_get_field32(reg, PLL_LD) &&
+ rt2x00_get_field32(reg, XTAL_RDY))
+ break;
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ if (i >= REGISTER_BUSY_COUNT) {
+
+ if (count >= 10)
+ return -EIO;
+
+ rt2800_register_write(rt2x00dev, 0x58, 0x018);
+ udelay(REGISTER_BUSY_DELAY);
+ rt2800_register_write(rt2x00dev, 0x58, 0x418);
+ udelay(REGISTER_BUSY_DELAY);
+ rt2800_register_write(rt2x00dev, 0x58, 0x618);
+ udelay(REGISTER_BUSY_DELAY);
+ count++;
+ } else {
+ count = 0;
+ }
+
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+ rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
+ rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
+ rt2x00_set_field32(&reg, WLAN_RESET, 1);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+ udelay(10);
+ rt2x00_set_field32(&reg, WLAN_RESET, 0);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+ udelay(10);
+ rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
+ } while (count != 0);
+
+ return 0;
+}
+
void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
const u8 command, const u8 token,
const u8 arg0, const u8 arg1)
@@ -400,6 +461,13 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
{
unsigned int i;
u32 reg;
+ int retval;
+
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ retval = rt2800_enable_wlan_rt3290(rt2x00dev);
+ if (retval)
+ return -EBUSY;
+ }
/*
* If driver doesn't wake up firmware here,
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 235376e9cb04..98aa426a3564 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -980,66 +980,6 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
return rt2800_validate_eeprom(rt2x00dev);
}
-static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
-{
- u32 reg;
- int i, count;
-
- rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
- if (rt2x00_get_field32(reg, WLAN_EN))
- return 0;
-
- rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
- rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
- rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
- rt2x00_set_field32(&reg, WLAN_EN, 1);
- rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
-
- udelay(REGISTER_BUSY_DELAY);
-
- count = 0;
- do {
- /*
- * Check PLL_LD & XTAL_RDY.
- */
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
- if (rt2x00_get_field32(reg, PLL_LD) &&
- rt2x00_get_field32(reg, XTAL_RDY))
- break;
- udelay(REGISTER_BUSY_DELAY);
- }
-
- if (i >= REGISTER_BUSY_COUNT) {
-
- if (count >= 10)
- return -EIO;
-
- rt2800_register_write(rt2x00dev, 0x58, 0x018);
- udelay(REGISTER_BUSY_DELAY);
- rt2800_register_write(rt2x00dev, 0x58, 0x418);
- udelay(REGISTER_BUSY_DELAY);
- rt2800_register_write(rt2x00dev, 0x58, 0x618);
- udelay(REGISTER_BUSY_DELAY);
- count++;
- } else {
- count = 0;
- }
-
- rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
- rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
- rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
- rt2x00_set_field32(&reg, WLAN_RESET, 1);
- rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
- udelay(10);
- rt2x00_set_field32(&reg, WLAN_RESET, 0);
- rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
- udelay(10);
- rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
- } while (count != 0);
-
- return 0;
-}
static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
@@ -1063,17 +1003,6 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
return retval;
/*
- * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan
- * clk for rt3290. That avoid the MCU fail in start phase.
- */
- if (rt2x00_rt(rt2x00dev, RT3290)) {
- retval = rt2800_enable_wlan_rt3290(rt2x00dev);
-
- if (retval)
- return retval;
- }
-
- /*
* This device has multiple filters for control frames
* and has a separate filter for PS Poll frames.
*/
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index f32259686b45..3f7bc5cadf9a 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
{
- struct ieee80211_conf conf = { .flags = 0 };
- struct rt2x00lib_conf libconf = { .conf = &conf };
+ struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
}
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 71a30b026089..533024095c43 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -44,7 +44,7 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver");
MODULE_LICENSE("GPL");
-static struct usb_device_id rtl8187_table[] __devinitdata = {
+static struct usb_device_id rtl8187_table[] = {
/* Asus */
{USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187},
/* Belkin */
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 30899901aef5..650f79a1f2bd 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -57,8 +57,7 @@
static const struct ethtool_ops xennet_ethtool_ops;
struct netfront_cb {
- struct page *page;
- unsigned offset;
+ int pull_to;
};
#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
@@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev,
struct sk_buff *skb;
while ((skb = __skb_dequeue(rxq)) != NULL) {
- struct page *page = NETFRONT_SKB_CB(skb)->page;
- void *vaddr = page_address(page);
- unsigned offset = NETFRONT_SKB_CB(skb)->offset;
-
- memcpy(skb->data, vaddr + offset,
- skb_headlen(skb));
+ int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
- if (page != skb_frag_page(&skb_shinfo(skb)->frags[0]))
- __free_page(page);
+ __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
@@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
struct sk_buff_head errq;
struct sk_buff_head tmpq;
unsigned long flags;
- unsigned int len;
int err;
spin_lock(&np->rx_lock);
@@ -955,24 +947,13 @@ err:
}
}
- NETFRONT_SKB_CB(skb)->page =
- skb_frag_page(&skb_shinfo(skb)->frags[0]);
- NETFRONT_SKB_CB(skb)->offset = rx->offset;
-
- len = rx->status;
- if (len > RX_COPY_THRESHOLD)
- len = RX_COPY_THRESHOLD;
- skb_put(skb, len);
+ NETFRONT_SKB_CB(skb)->pull_to = rx->status;
+ if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
+ NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
- if (rx->status > len) {
- skb_shinfo(skb)->frags[0].page_offset =
- rx->offset + len;
- skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
- skb->data_len = rx->status - len;
- } else {
- __skb_fill_page_desc(skb, 0, NULL, 0, 0);
- skb_shinfo(skb)->nr_frags = 0;
- }
+ skb_shinfo(skb)->frags[0].page_offset = rx->offset;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
+ skb->data_len = rx->status;
i = xennet_fill_frags(np, skb, &tmpq);
@@ -999,7 +980,7 @@ err:
* receive throughout using the standard receive
* buffer size was cut by 25%(!!!).
*/
- skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
+ skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
skb->len += skb->data_len;
if (rx->flags & XEN_NETRXF_csum_blank)
diff --git a/drivers/of/base.c b/drivers/of/base.c
index c181b94abc36..d4a1c9a043e1 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -364,6 +364,33 @@ struct device_node *of_get_next_child(const struct device_node *node,
EXPORT_SYMBOL(of_get_next_child);
/**
+ * of_get_next_available_child - Find the next available child node
+ * @node: parent node
+ * @prev: previous child of the parent node, or NULL to get first
+ *
+ * This function is like of_get_next_child(), except that it
+ * automatically skips any disabled nodes (i.e. status = "disabled").
+ */
+struct device_node *of_get_next_available_child(const struct device_node *node,
+ struct device_node *prev)
+{
+ struct device_node *next;
+
+ read_lock(&devtree_lock);
+ next = prev ? prev->sibling : node->child;
+ for (; next; next = next->sibling) {
+ if (!of_device_is_available(next))
+ continue;
+ if (of_node_get(next))
+ break;
+ }
+ of_node_put(prev);
+ read_unlock(&devtree_lock);
+ return next;
+}
+EXPORT_SYMBOL(of_get_next_available_child);
+
+/**
* of_find_node_by_path - Find a node matching a full OF path
* @path: The full path to match
*
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index fbf7b26c7c8a..c5792d622dc4 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -266,8 +266,8 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
}
if (!error)
- dev_printk(KERN_INFO, &dev->dev,
- "power state changed by ACPI to D%d\n", state);
+ dev_info(&dev->dev, "power state changed by ACPI to %s\n",
+ pci_power_name(state));
return error;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 185be3703343..d6fd6b6d9d4b 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -280,8 +280,12 @@ static long local_pci_probe(void *_ddi)
{
struct drv_dev_and_id *ddi = _ddi;
struct device *dev = &ddi->dev->dev;
+ struct device *parent = dev->parent;
int rc;
+ /* The parent bridge must be in active state when probing */
+ if (parent)
+ pm_runtime_get_sync(parent);
/* Unbound PCI devices are always set to disabled and suspended.
* During probe, the device is set to enabled and active and the
* usage count is incremented. If the driver supports runtime PM,
@@ -298,6 +302,8 @@ static long local_pci_probe(void *_ddi)
pm_runtime_set_suspended(dev);
pm_runtime_put_noidle(dev);
}
+ if (parent)
+ pm_runtime_put(parent);
return rc;
}
@@ -959,6 +965,13 @@ static int pci_pm_poweroff_noirq(struct device *dev)
if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
pci_prepare_to_sleep(pci_dev);
+ /*
+ * The reason for doing this here is the same as for the analogous code
+ * in pci_pm_suspend_noirq().
+ */
+ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
return 0;
}
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 6869009c7393..02d107b15281 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -458,6 +458,40 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf)
}
struct device_attribute vga_attr = __ATTR_RO(boot_vga);
+static void
+pci_config_pm_runtime_get(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+
+ if (parent)
+ pm_runtime_get_sync(parent);
+ pm_runtime_get_noresume(dev);
+ /*
+ * pdev->current_state is set to PCI_D3cold during suspending,
+ * so wait until suspending completes
+ */
+ pm_runtime_barrier(dev);
+ /*
+ * Only need to resume devices in D3cold, because config
+ * registers are still accessible for devices suspended but
+ * not in D3cold.
+ */
+ if (pdev->current_state == PCI_D3cold)
+ pm_runtime_resume(dev);
+}
+
+static void
+pci_config_pm_runtime_put(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+
+ pm_runtime_put(dev);
+ if (parent)
+ pm_runtime_put_sync(parent);
+}
+
static ssize_t
pci_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -484,6 +518,8 @@ pci_read_config(struct file *filp, struct kobject *kobj,
size = count;
}
+ pci_config_pm_runtime_get(dev);
+
if ((off & 1) && size) {
u8 val;
pci_user_read_config_byte(dev, off, &val);
@@ -529,6 +565,8 @@ pci_read_config(struct file *filp, struct kobject *kobj,
--size;
}
+ pci_config_pm_runtime_put(dev);
+
return count;
}
@@ -549,6 +587,8 @@ pci_write_config(struct file* filp, struct kobject *kobj,
count = size;
}
+ pci_config_pm_runtime_get(dev);
+
if ((off & 1) && size) {
pci_user_write_config_byte(dev, off, data[off - init_off]);
off++;
@@ -587,6 +627,8 @@ pci_write_config(struct file* filp, struct kobject *kobj,
--size;
}
+ pci_config_pm_runtime_put(dev);
+
return count;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index f3ea977a5b1b..ab4bf5a4c2f1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1941,6 +1941,7 @@ void pci_pm_init(struct pci_dev *dev)
dev->pm_cap = pm;
dev->d3_delay = PCI_PM_D3_WAIT;
dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
+ dev->d3cold_allowed = true;
dev->d1_support = false;
dev->d2_support = false;
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 3a7eefcb270a..e76b44777dbf 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -140,9 +140,17 @@ static int pcie_port_runtime_resume(struct device *dev)
{
return 0;
}
+
+static int pcie_port_runtime_idle(struct device *dev)
+{
+ /* Delay for a short while to prevent too frequent suspend/resume */
+ pm_schedule_suspend(dev, 10);
+ return -EBUSY;
+}
#else
#define pcie_port_runtime_suspend NULL
#define pcie_port_runtime_resume NULL
+#define pcie_port_runtime_idle NULL
#endif
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
@@ -155,6 +163,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.resume_noirq = pcie_port_resume_noirq,
.runtime_suspend = pcie_port_runtime_suspend,
.runtime_resume = pcie_port_runtime_resume,
+ .runtime_idle = pcie_port_runtime_idle,
};
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
@@ -200,6 +209,11 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
return status;
pci_save_state(dev);
+ /*
+ * D3cold may not work properly on some PCIe port, so disable
+ * it by default.
+ */
+ dev->d3cold_allowed = false;
if (!pci_match_id(port_runtime_pm_black_list, dev))
pm_runtime_put_noidle(&dev->dev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 6c143b4497ca..9f8a6b79a8ec 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -144,15 +144,13 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
case PCI_BASE_ADDRESS_MEM_TYPE_32:
break;
case PCI_BASE_ADDRESS_MEM_TYPE_1M:
- dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n");
+ /* 1M mem BAR treated as 32-bit BAR */
break;
case PCI_BASE_ADDRESS_MEM_TYPE_64:
flags |= IORESOURCE_MEM_64;
break;
default:
- dev_warn(&dev->dev,
- "mem unknown type %x treated as 32-bit BAR\n",
- mem_type);
+ /* mem unknown type treated as 32-bit BAR */
break;
}
return flags;
@@ -173,9 +171,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
u32 l, sz, mask;
u16 orig_cmd;
struct pci_bus_region region;
+ bool bar_too_big = false, bar_disabled = false;
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+ /* No printks while decoding is disabled! */
if (!dev->mmio_always_on) {
pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
pci_write_config_word(dev, PCI_COMMAND,
@@ -240,8 +240,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
goto fail;
if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
- dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n",
- pos);
+ bar_too_big = true;
goto fail;
}
@@ -252,12 +251,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
region.start = 0;
region.end = sz64;
pcibios_bus_to_resource(dev, res, &region);
+ bar_disabled = true;
} else {
region.start = l64;
region.end = l64 + sz64;
pcibios_bus_to_resource(dev, res, &region);
- dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n",
- pos, res);
}
} else {
sz = pci_size(l, sz, mask);
@@ -268,18 +266,23 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
region.start = l;
region.end = l + sz;
pcibios_bus_to_resource(dev, res, &region);
-
- dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
}
- out:
+ goto out;
+
+
+fail:
+ res->flags = 0;
+out:
if (!dev->mmio_always_on)
pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
+ if (bar_too_big)
+ dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos);
+ if (res->flags && !bar_disabled)
+ dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
+
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
- fail:
- res->flags = 0;
- goto out;
}
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
diff --git a/drivers/pcmcia/sa1100_shannon.c b/drivers/pcmcia/sa1100_shannon.c
index decb34730bcf..56ab73915602 100644
--- a/drivers/pcmcia/sa1100_shannon.c
+++ b/drivers/pcmcia/sa1100_shannon.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index fb7f3bebdc69..dc5c126e398a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -657,11 +657,7 @@ static struct pinctrl *pinctrl_get_locked(struct device *dev)
if (p != NULL)
return ERR_PTR(-EBUSY);
- p = create_pinctrl(dev);
- if (IS_ERR(p))
- return p;
-
- return p;
+ return create_pinctrl(dev);
}
/**
@@ -738,11 +734,8 @@ static struct pinctrl_state *pinctrl_lookup_state_locked(struct pinctrl *p,
dev_dbg(p->dev, "using pinctrl dummy state (%s)\n",
name);
state = create_state(p, name);
- if (IS_ERR(state))
- return state;
- } else {
- return ERR_PTR(-ENODEV);
- }
+ } else
+ state = ERR_PTR(-ENODEV);
}
return state;
diff --git a/drivers/pinctrl/pinctrl-imx23.c b/drivers/pinctrl/pinctrl-imx23.c
index 75d3eff94296..3674d877ed7c 100644
--- a/drivers/pinctrl/pinctrl-imx23.c
+++ b/drivers/pinctrl/pinctrl-imx23.c
@@ -292,7 +292,7 @@ static int __init imx23_pinctrl_init(void)
{
return platform_driver_register(&imx23_pinctrl_driver);
}
-arch_initcall(imx23_pinctrl_init);
+postcore_initcall(imx23_pinctrl_init);
static void __exit imx23_pinctrl_exit(void)
{
diff --git a/drivers/pinctrl/pinctrl-imx28.c b/drivers/pinctrl/pinctrl-imx28.c
index b973026811a2..0f5b2122b1ba 100644
--- a/drivers/pinctrl/pinctrl-imx28.c
+++ b/drivers/pinctrl/pinctrl-imx28.c
@@ -408,7 +408,7 @@ static int __init imx28_pinctrl_init(void)
{
return platform_driver_register(&imx28_pinctrl_driver);
}
-arch_initcall(imx28_pinctrl_init);
+postcore_initcall(imx28_pinctrl_init);
static void __exit imx28_pinctrl_exit(void)
{
diff --git a/drivers/pinctrl/pinctrl-imx51.c b/drivers/pinctrl/pinctrl-imx51.c
index 689b3c88dd2e..9fd02162a3c2 100644
--- a/drivers/pinctrl/pinctrl-imx51.c
+++ b/drivers/pinctrl/pinctrl-imx51.c
@@ -974,7 +974,7 @@ static struct imx_pin_reg imx51_pin_regs[] = {
IMX_PIN_REG(MX51_PAD_EIM_DA13, NO_PAD, 0x050, 0, 0x000, 0), /* MX51_PAD_EIM_DA13__EIM_DA13 */
IMX_PIN_REG(MX51_PAD_EIM_DA14, NO_PAD, 0x054, 0, 0x000, 0), /* MX51_PAD_EIM_DA14__EIM_DA14 */
IMX_PIN_REG(MX51_PAD_EIM_DA15, NO_PAD, 0x058, 0, 0x000, 0), /* MX51_PAD_EIM_DA15__EIM_DA15 */
- IMX_PIN_REG(MX51_PAD_SD2_CMD, NO_PAD, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */
+ IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */
IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 1, 0x9b0, 2), /* MX51_PAD_SD2_CMD__I2C1_SCL */
IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 0, 0x000, 0), /* MX51_PAD_SD2_CMD__SD2_CMD */
IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 2, 0x914, 3), /* MX51_PAD_SD2_CLK__CSPI_SCLK */
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c
index 6f99769c6733..a39fb7a6fc51 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c
@@ -505,6 +505,8 @@ static const unsigned kp_b_1_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1,
DB8500_PIN_J3, DB8500_PIN_H2, DB8500_PIN_J2, DB8500_PIN_H1,
DB8500_PIN_F4, DB8500_PIN_E3, DB8500_PIN_E4, DB8500_PIN_D2,
DB8500_PIN_C1, DB8500_PIN_D3, DB8500_PIN_C2, DB8500_PIN_D5 };
+static const unsigned kp_b_2_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1,
+ DB8500_PIN_G3, DB8500_PIN_G2, DB8500_PIN_F4, DB8500_PIN_E3};
static const unsigned sm_b_1_pins[] = { DB8500_PIN_C6, DB8500_PIN_B3,
DB8500_PIN_C4, DB8500_PIN_E6, DB8500_PIN_A3, DB8500_PIN_B6,
DB8500_PIN_D6, DB8500_PIN_B7, DB8500_PIN_D7, DB8500_PIN_D8,
@@ -662,6 +664,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(spi3_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B),
+ DB8500_PIN_GROUP(kp_b_2, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B),
@@ -751,7 +754,7 @@ DB8500_FUNC_GROUPS(msp1, "msp1txrx_a_1", "msp1_a_1", "msp1txrx_b_1");
DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1");
DB8500_FUNC_GROUPS(lcd, "lcdvsi0_a_1", "lcdvsi1_a_1", "lcd_d0_d7_a_1",
"lcd_d8_d11_a_1", "lcd_d12_d23_a_1", "lcd_b_1");
-DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_c_1", "kp_oc1_1");
+DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_b_2", "kp_c_1", "kp_oc1_1");
DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2rstn_c_1");
DB8500_FUNC_GROUPS(ssp1, "ssp1_a_1");
DB8500_FUNC_GROUPS(ssp0, "ssp0_a_1");
@@ -766,7 +769,7 @@ DB8500_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio1_a_1", "ipgpio7_b_1",
DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1");
DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1");
DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1");
-DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1", "hsit_a_2");
+DB8500_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2");
DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1");
DB8500_FUNC_GROUPS(usb, "usb_a_1");
DB8500_FUNC_GROUPS(trig, "trig_b_1");
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 53b0d49a7a1c..3dde6537adb8 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -1292,7 +1292,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
NOMADIK_GPIO_TO_IRQ(pdata->first_gpio),
0, &nmk_gpio_irq_simple_ops, nmk_chip);
if (!nmk_chip->domain) {
- pr_err("%s: Failed to create irqdomain\n", np->full_name);
+ dev_err(&dev->dev, "failed to create irqdomain\n");
ret = -ENOSYS;
goto out;
}
@@ -1731,7 +1731,6 @@ static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
if (!nmk_gpio_chips[i]) {
dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
- devm_kfree(&pdev->dev, npct);
return -EPROBE_DEFER;
}
npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip;
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index 2aae8a8978e9..7fca6ce5952b 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1217,7 +1217,6 @@ out_no_rsc_remap:
iounmap(spmx->gpio_virtbase);
out_no_gpio_remap:
platform_set_drvdata(pdev, NULL);
- devm_kfree(&pdev->dev, spmx);
return ret;
}
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index a7ad8c112d91..309f5b9a70ec 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1121,10 +1121,8 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev)
upmx->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENOENT;
- goto out_no_resource;
- }
+ if (!res)
+ return -ENOENT;
upmx->phybase = res->start;
upmx->physize = resource_size(res);
@@ -1165,8 +1163,6 @@ out_no_remap:
platform_set_drvdata(pdev, NULL);
out_no_memregion:
release_mem_region(upmx->phybase, upmx->physize);
-out_no_resource:
- devm_kfree(&pdev->dev, upmx);
return ret;
}
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 782953ae4c03..b17c16ce54ad 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_X86) += x86/
+obj-$(CONFIG_OLPC) += olpc/
diff --git a/drivers/platform/olpc/Makefile b/drivers/platform/olpc/Makefile
new file mode 100644
index 000000000000..dc8b26bc7209
--- /dev/null
+++ b/drivers/platform/olpc/Makefile
@@ -0,0 +1,4 @@
+#
+# OLPC XO platform-specific drivers
+#
+obj-$(CONFIG_OLPC) += olpc-ec.o
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
new file mode 100644
index 000000000000..0f9f8596b300
--- /dev/null
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -0,0 +1,336 @@
+/*
+ * Generic driver for the OLPC Embedded Controller.
+ *
+ * Copyright (C) 2011-2012 One Laptop per Child Foundation.
+ *
+ * Licensed under the GPL v2 or later.
+ */
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/olpc-ec.h>
+#include <asm/olpc.h>
+
+struct ec_cmd_desc {
+ u8 cmd;
+ u8 *inbuf, *outbuf;
+ size_t inlen, outlen;
+
+ int err;
+ struct completion finished;
+ struct list_head node;
+
+ void *priv;
+};
+
+struct olpc_ec_priv {
+ struct olpc_ec_driver *drv;
+ struct work_struct worker;
+ struct mutex cmd_lock;
+
+ /* Pending EC commands */
+ struct list_head cmd_q;
+ spinlock_t cmd_q_lock;
+
+ struct dentry *dbgfs_dir;
+
+ /*
+ * Running an EC command while suspending means we don't always finish
+ * the command before the machine suspends. This means that the EC
+ * is expecting the command protocol to finish, but we after a period
+ * of time (while the OS is asleep) the EC times out and restarts its
+ * idle loop. Meanwhile, the OS wakes up, thinks it's still in the
+ * middle of the command protocol, starts throwing random things at
+ * the EC... and everyone's uphappy.
+ */
+ bool suspended;
+};
+
+static struct olpc_ec_driver *ec_driver;
+static struct olpc_ec_priv *ec_priv;
+static void *ec_cb_arg;
+
+void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg)
+{
+ ec_driver = drv;
+ ec_cb_arg = arg;
+}
+EXPORT_SYMBOL_GPL(olpc_ec_driver_register);
+
+static void olpc_ec_worker(struct work_struct *w)
+{
+ struct olpc_ec_priv *ec = container_of(w, struct olpc_ec_priv, worker);
+ struct ec_cmd_desc *desc = NULL;
+ unsigned long flags;
+
+ /* Grab the first pending command from the queue */
+ spin_lock_irqsave(&ec->cmd_q_lock, flags);
+ if (!list_empty(&ec->cmd_q)) {
+ desc = list_first_entry(&ec->cmd_q, struct ec_cmd_desc, node);
+ list_del(&desc->node);
+ }
+ spin_unlock_irqrestore(&ec->cmd_q_lock, flags);
+
+ /* Do we actually have anything to do? */
+ if (!desc)
+ return;
+
+ /* Protect the EC hw with a mutex; only run one cmd at a time */
+ mutex_lock(&ec->cmd_lock);
+ desc->err = ec_driver->ec_cmd(desc->cmd, desc->inbuf, desc->inlen,
+ desc->outbuf, desc->outlen, ec_cb_arg);
+ mutex_unlock(&ec->cmd_lock);
+
+ /* Finished, wake up olpc_ec_cmd() */
+ complete(&desc->finished);
+
+ /* Run the worker thread again in case there are more cmds pending */
+ schedule_work(&ec->worker);
+}
+
+/*
+ * Throw a cmd descripter onto the list. We now have SMP OLPC machines, so
+ * locking is pretty critical.
+ */
+static void queue_ec_descriptor(struct ec_cmd_desc *desc,
+ struct olpc_ec_priv *ec)
+{
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&desc->node);
+
+ spin_lock_irqsave(&ec->cmd_q_lock, flags);
+ list_add_tail(&desc->node, &ec->cmd_q);
+ spin_unlock_irqrestore(&ec->cmd_q_lock, flags);
+
+ schedule_work(&ec->worker);
+}
+
+int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen)
+{
+ struct olpc_ec_priv *ec = ec_priv;
+ struct ec_cmd_desc desc;
+
+ /* Ensure a driver and ec hook have been registered */
+ if (WARN_ON(!ec_driver || !ec_driver->ec_cmd))
+ return -ENODEV;
+
+ if (!ec)
+ return -ENOMEM;
+
+ /* Suspending in the middle of a command hoses things really badly */
+ if (WARN_ON(ec->suspended))
+ return -EBUSY;
+
+ might_sleep();
+
+ desc.cmd = cmd;
+ desc.inbuf = inbuf;
+ desc.outbuf = outbuf;
+ desc.inlen = inlen;
+ desc.outlen = outlen;
+ desc.err = 0;
+ init_completion(&desc.finished);
+
+ queue_ec_descriptor(&desc, ec);
+
+ /* Timeouts must be handled in the platform-specific EC hook */
+ wait_for_completion(&desc.finished);
+
+ /* The worker thread dequeues the cmd; no need to do anything here */
+ return desc.err;
+}
+EXPORT_SYMBOL_GPL(olpc_ec_cmd);
+
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * debugfs support for "generic commands", to allow sending
+ * arbitrary EC commands from userspace.
+ */
+
+#define EC_MAX_CMD_ARGS (5 + 1) /* cmd byte + 5 args */
+#define EC_MAX_CMD_REPLY (8)
+
+static DEFINE_MUTEX(ec_dbgfs_lock);
+static unsigned char ec_dbgfs_resp[EC_MAX_CMD_REPLY];
+static unsigned int ec_dbgfs_resp_bytes;
+
+static ssize_t ec_dbgfs_cmd_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ int i, m;
+ unsigned char ec_cmd[EC_MAX_CMD_ARGS];
+ unsigned int ec_cmd_int[EC_MAX_CMD_ARGS];
+ char cmdbuf[64];
+ int ec_cmd_bytes;
+
+ mutex_lock(&ec_dbgfs_lock);
+
+ size = simple_write_to_buffer(cmdbuf, sizeof(cmdbuf), ppos, buf, size);
+
+ m = sscanf(cmdbuf, "%x:%u %x %x %x %x %x", &ec_cmd_int[0],
+ &ec_dbgfs_resp_bytes, &ec_cmd_int[1], &ec_cmd_int[2],
+ &ec_cmd_int[3], &ec_cmd_int[4], &ec_cmd_int[5]);
+ if (m < 2 || ec_dbgfs_resp_bytes > EC_MAX_CMD_REPLY) {
+ /* reset to prevent overflow on read */
+ ec_dbgfs_resp_bytes = 0;
+
+ pr_debug("olpc-ec: bad ec cmd: cmd:response-count [arg1 [arg2 ...]]\n");
+ size = -EINVAL;
+ goto out;
+ }
+
+ /* convert scanf'd ints to char */
+ ec_cmd_bytes = m - 2;
+ for (i = 0; i <= ec_cmd_bytes; i++)
+ ec_cmd[i] = ec_cmd_int[i];
+
+ pr_debug("olpc-ec: debugfs cmd 0x%02x with %d args %02x %02x %02x %02x %02x, want %d returns\n",
+ ec_cmd[0], ec_cmd_bytes, ec_cmd[1], ec_cmd[2],
+ ec_cmd[3], ec_cmd[4], ec_cmd[5], ec_dbgfs_resp_bytes);
+
+ olpc_ec_cmd(ec_cmd[0], (ec_cmd_bytes == 0) ? NULL : &ec_cmd[1],
+ ec_cmd_bytes, ec_dbgfs_resp, ec_dbgfs_resp_bytes);
+
+ pr_debug("olpc-ec: response %02x %02x %02x %02x %02x %02x %02x %02x (%d bytes expected)\n",
+ ec_dbgfs_resp[0], ec_dbgfs_resp[1], ec_dbgfs_resp[2],
+ ec_dbgfs_resp[3], ec_dbgfs_resp[4], ec_dbgfs_resp[5],
+ ec_dbgfs_resp[6], ec_dbgfs_resp[7],
+ ec_dbgfs_resp_bytes);
+
+out:
+ mutex_unlock(&ec_dbgfs_lock);
+ return size;
+}
+
+static ssize_t ec_dbgfs_cmd_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ unsigned int i, r;
+ char *rp;
+ char respbuf[64];
+
+ mutex_lock(&ec_dbgfs_lock);
+ rp = respbuf;
+ rp += sprintf(rp, "%02x", ec_dbgfs_resp[0]);
+ for (i = 1; i < ec_dbgfs_resp_bytes; i++)
+ rp += sprintf(rp, ", %02x", ec_dbgfs_resp[i]);
+ mutex_unlock(&ec_dbgfs_lock);
+ rp += sprintf(rp, "\n");
+
+ r = rp - respbuf;
+ return simple_read_from_buffer(buf, size, ppos, respbuf, r);
+}
+
+static const struct file_operations ec_dbgfs_ops = {
+ .write = ec_dbgfs_cmd_write,
+ .read = ec_dbgfs_cmd_read,
+};
+
+static struct dentry *olpc_ec_setup_debugfs(void)
+{
+ struct dentry *dbgfs_dir;
+
+ dbgfs_dir = debugfs_create_dir("olpc-ec", NULL);
+ if (IS_ERR_OR_NULL(dbgfs_dir))
+ return NULL;
+
+ debugfs_create_file("cmd", 0600, dbgfs_dir, NULL, &ec_dbgfs_ops);
+
+ return dbgfs_dir;
+}
+
+#else
+
+static struct dentry *olpc_ec_setup_debugfs(void)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int olpc_ec_probe(struct platform_device *pdev)
+{
+ struct olpc_ec_priv *ec;
+ int err;
+
+ if (!ec_driver)
+ return -ENODEV;
+
+ ec = kzalloc(sizeof(*ec), GFP_KERNEL);
+ if (!ec)
+ return -ENOMEM;
+
+ ec->drv = ec_driver;
+ INIT_WORK(&ec->worker, olpc_ec_worker);
+ mutex_init(&ec->cmd_lock);
+
+ INIT_LIST_HEAD(&ec->cmd_q);
+ spin_lock_init(&ec->cmd_q_lock);
+
+ ec_priv = ec;
+ platform_set_drvdata(pdev, ec);
+
+ err = ec_driver->probe ? ec_driver->probe(pdev) : 0;
+ if (err) {
+ ec_priv = NULL;
+ kfree(ec);
+ } else {
+ ec->dbgfs_dir = olpc_ec_setup_debugfs();
+ }
+
+ return err;
+}
+
+static int olpc_ec_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct olpc_ec_priv *ec = platform_get_drvdata(pdev);
+ int err = 0;
+
+ if (ec_driver->suspend)
+ err = ec_driver->suspend(pdev);
+ if (!err)
+ ec->suspended = true;
+
+ return err;
+}
+
+static int olpc_ec_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct olpc_ec_priv *ec = platform_get_drvdata(pdev);
+
+ ec->suspended = false;
+ return ec_driver->resume ? ec_driver->resume(pdev) : 0;
+}
+
+static const struct dev_pm_ops olpc_ec_pm_ops = {
+ .suspend_late = olpc_ec_suspend,
+ .resume_early = olpc_ec_resume,
+};
+
+static struct platform_driver olpc_ec_plat_driver = {
+ .probe = olpc_ec_probe,
+ .driver = {
+ .name = "olpc-ec",
+ .pm = &olpc_ec_pm_ops,
+ },
+};
+
+static int __init olpc_ec_init_module(void)
+{
+ return platform_driver_register(&olpc_ec_plat_driver);
+}
+
+module_init(olpc_ec_init_module);
+
+MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 2a262f5c5c0c..c86bae828c28 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -289,6 +289,7 @@ config IDEAPAD_LAPTOP
tristate "Lenovo IdeaPad Laptop Extras"
depends on ACPI
depends on RFKILL && INPUT
+ depends on SERIO_I8042
select INPUT_SPARSEKMAP
help
This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
@@ -758,8 +759,11 @@ config SAMSUNG_Q10
config APPLE_GMUX
tristate "Apple Gmux Driver"
+ depends on ACPI
depends on PNP
- select BACKLIGHT_CLASS_DEVICE
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
+ depends on ACPI_VIDEO=n || ACPI_VIDEO
---help---
This driver provides support for the gmux device found on many
Apple laptops, which controls the display mux for the hybrid
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c8f40c9c0428..3782e1cd3697 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -95,6 +95,7 @@ MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026");
enum acer_wmi_event_ids {
WMID_HOTKEY_EVENT = 0x1,
+ WMID_ACCEL_EVENT = 0x5,
};
static const struct key_entry acer_wmi_keymap[] = {
@@ -130,6 +131,7 @@ static const struct key_entry acer_wmi_keymap[] = {
};
static struct input_dev *acer_wmi_input_dev;
+static struct input_dev *acer_wmi_accel_dev;
struct event_return_value {
u8 function;
@@ -200,6 +202,7 @@ struct hotkey_function_type_aa {
#define ACER_CAP_BLUETOOTH (1<<2)
#define ACER_CAP_BRIGHTNESS (1<<3)
#define ACER_CAP_THREEG (1<<4)
+#define ACER_CAP_ACCEL (1<<5)
#define ACER_CAP_ANY (0xFFFFFFFF)
/*
@@ -1399,6 +1402,60 @@ static void acer_backlight_exit(void)
}
/*
+ * Accelerometer device
+ */
+static acpi_handle gsensor_handle;
+
+static int acer_gsensor_init(void)
+{
+ acpi_status status;
+ struct acpi_buffer output;
+ union acpi_object out_obj;
+
+ output.length = sizeof(out_obj);
+ output.pointer = &out_obj;
+ status = acpi_evaluate_object(gsensor_handle, "_INI", NULL, &output);
+ if (ACPI_FAILURE(status))
+ return -1;
+
+ return 0;
+}
+
+static int acer_gsensor_open(struct input_dev *input)
+{
+ return acer_gsensor_init();
+}
+
+static int acer_gsensor_event(void)
+{
+ acpi_status status;
+ struct acpi_buffer output;
+ union acpi_object out_obj[5];
+
+ if (!has_cap(ACER_CAP_ACCEL))
+ return -1;
+
+ output.length = sizeof(out_obj);
+ output.pointer = out_obj;
+
+ status = acpi_evaluate_object(gsensor_handle, "RDVL", NULL, &output);
+ if (ACPI_FAILURE(status))
+ return -1;
+
+ if (out_obj->package.count != 4)
+ return -1;
+
+ input_report_abs(acer_wmi_accel_dev, ABS_X,
+ (s16)out_obj->package.elements[0].integer.value);
+ input_report_abs(acer_wmi_accel_dev, ABS_Y,
+ (s16)out_obj->package.elements[1].integer.value);
+ input_report_abs(acer_wmi_accel_dev, ABS_Z,
+ (s16)out_obj->package.elements[2].integer.value);
+ input_sync(acer_wmi_accel_dev);
+ return 0;
+}
+
+/*
* Rfkill devices
*/
static void acer_rfkill_update(struct work_struct *ignored);
@@ -1673,6 +1730,9 @@ static void acer_wmi_notify(u32 value, void *context)
1, true);
}
break;
+ case WMID_ACCEL_EVENT:
+ acer_gsensor_event();
+ break;
default:
pr_warn("Unknown function number - %d - %d\n",
return_value.function, return_value.key_num);
@@ -1758,6 +1818,73 @@ static int acer_wmi_enable_lm(void)
return status;
}
+static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level,
+ void *ctx, void **retval)
+{
+ *(acpi_handle *)retval = ah;
+ return AE_OK;
+}
+
+static int __init acer_wmi_get_handle(const char *name, const char *prop,
+ acpi_handle *ah)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ BUG_ON(!name || !ah);
+
+ handle = NULL;
+ status = acpi_get_devices(prop, acer_wmi_get_handle_cb,
+ (void *)name, &handle);
+
+ if (ACPI_SUCCESS(status)) {
+ *ah = handle;
+ return 0;
+ } else {
+ return -ENODEV;
+ }
+}
+
+static int __init acer_wmi_accel_setup(void)
+{
+ int err;
+
+ err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle);
+ if (err)
+ return err;
+
+ interface->capability |= ACER_CAP_ACCEL;
+
+ acer_wmi_accel_dev = input_allocate_device();
+ if (!acer_wmi_accel_dev)
+ return -ENOMEM;
+
+ acer_wmi_accel_dev->open = acer_gsensor_open;
+
+ acer_wmi_accel_dev->name = "Acer BMA150 accelerometer";
+ acer_wmi_accel_dev->phys = "wmi/input1";
+ acer_wmi_accel_dev->id.bustype = BUS_HOST;
+ acer_wmi_accel_dev->evbit[0] = BIT_MASK(EV_ABS);
+ input_set_abs_params(acer_wmi_accel_dev, ABS_X, -16384, 16384, 0, 0);
+ input_set_abs_params(acer_wmi_accel_dev, ABS_Y, -16384, 16384, 0, 0);
+ input_set_abs_params(acer_wmi_accel_dev, ABS_Z, -16384, 16384, 0, 0);
+
+ err = input_register_device(acer_wmi_accel_dev);
+ if (err)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ input_free_device(acer_wmi_accel_dev);
+ return err;
+}
+
+static void acer_wmi_accel_destroy(void)
+{
+ input_unregister_device(acer_wmi_accel_dev);
+}
+
static int __init acer_wmi_input_setup(void)
{
acpi_status status;
@@ -1912,6 +2039,9 @@ static int acer_resume(struct device *dev)
if (has_cap(ACER_CAP_BRIGHTNESS))
set_u32(data->brightness, ACER_CAP_BRIGHTNESS);
+ if (has_cap(ACER_CAP_ACCEL))
+ acer_gsensor_init();
+
return 0;
}
@@ -2060,14 +2190,16 @@ static int __init acer_wmi_init(void)
set_quirks();
+ if (dmi_check_system(video_vendor_dmi_table))
+ acpi_video_dmi_promote_vendor();
if (acpi_video_backlight_support()) {
- if (dmi_check_system(video_vendor_dmi_table)) {
- acpi_video_unregister();
- } else {
- interface->capability &= ~ACER_CAP_BRIGHTNESS;
- pr_info("Brightness must be controlled by "
- "acpi video driver\n");
- }
+ interface->capability &= ~ACER_CAP_BRIGHTNESS;
+ pr_info("Brightness must be controlled by acpi video driver\n");
+ } else {
+#ifdef CONFIG_ACPI_VIDEO
+ pr_info("Disabling ACPI video driver\n");
+ acpi_video_unregister();
+#endif
}
if (wmi_has_guid(WMID_GUID3)) {
@@ -2090,6 +2222,8 @@ static int __init acer_wmi_init(void)
return err;
}
+ acer_wmi_accel_setup();
+
err = platform_driver_register(&acer_platform_driver);
if (err) {
pr_err("Unable to register platform driver\n");
@@ -2133,6 +2267,8 @@ error_device_alloc:
error_platform_register:
if (wmi_has_guid(ACERWMID_EVENT_GUID))
acer_wmi_input_destroy();
+ if (has_cap(ACER_CAP_ACCEL))
+ acer_wmi_accel_destroy();
return err;
}
@@ -2142,6 +2278,9 @@ static void __exit acer_wmi_exit(void)
if (wmi_has_guid(ACERWMID_EVENT_GUID))
acer_wmi_input_destroy();
+ if (has_cap(ACER_CAP_ACCEL))
+ acer_wmi_accel_destroy();
+
remove_sysfs(acer_platform_device);
remove_debugfs();
platform_device_unregister(acer_platform_device);
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 2fd9d36acd15..39abb150bdd4 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -660,7 +660,7 @@ static int acerhdf_register_thermal(void)
if (IS_ERR(cl_dev))
return -EINVAL;
- thz_dev = thermal_zone_device_register("acerhdf", 1, NULL,
+ thz_dev = thermal_zone_device_register("acerhdf", 1, 0, NULL,
&acerhdf_dev_ops, 0, 0, 0,
(kernelmode) ? interval*1000 : 0);
if (IS_ERR(thz_dev))
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 694a15a56230..dfb1a92ce949 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -2,6 +2,7 @@
* Gmux driver for Apple laptops
*
* Copyright (C) Canonical Ltd. <seth.forshee@canonical.com>
+ * Copyright (C) 2010-2012 Andreas Heider <andreas@meetr.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -18,16 +19,30 @@
#include <linux/pnp.h>
#include <linux/apple_bl.h>
#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/vga_switcheroo.h>
#include <acpi/video.h>
#include <asm/io.h>
struct apple_gmux_data {
unsigned long iostart;
unsigned long iolen;
+ bool indexed;
+ struct mutex index_lock;
struct backlight_device *bdev;
+
+ /* switcheroo data */
+ acpi_handle dhandle;
+ int gpe;
+ enum vga_switcheroo_client_id resume_client_id;
+ enum vga_switcheroo_state power_state;
+ struct completion powerchange_done;
};
+static struct apple_gmux_data *apple_gmux_data;
+
/*
* gmux port offsets. Many of these are not yet used, but may be in the
* future, and it's useful to have them documented here anyhow.
@@ -45,6 +60,9 @@ struct apple_gmux_data {
#define GMUX_PORT_DISCRETE_POWER 0x50
#define GMUX_PORT_MAX_BRIGHTNESS 0x70
#define GMUX_PORT_BRIGHTNESS 0x74
+#define GMUX_PORT_VALUE 0xc2
+#define GMUX_PORT_READ 0xd0
+#define GMUX_PORT_WRITE 0xd4
#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4)
@@ -59,22 +77,172 @@ struct apple_gmux_data {
#define GMUX_BRIGHTNESS_MASK 0x00ffffff
#define GMUX_MAX_BRIGHTNESS GMUX_BRIGHTNESS_MASK
-static inline u8 gmux_read8(struct apple_gmux_data *gmux_data, int port)
+static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port)
{
return inb(gmux_data->iostart + port);
}
-static inline void gmux_write8(struct apple_gmux_data *gmux_data, int port,
+static void gmux_pio_write8(struct apple_gmux_data *gmux_data, int port,
u8 val)
{
outb(val, gmux_data->iostart + port);
}
-static inline u32 gmux_read32(struct apple_gmux_data *gmux_data, int port)
+static u32 gmux_pio_read32(struct apple_gmux_data *gmux_data, int port)
{
return inl(gmux_data->iostart + port);
}
+static void gmux_pio_write32(struct apple_gmux_data *gmux_data, int port,
+ u32 val)
+{
+ int i;
+ u8 tmpval;
+
+ for (i = 0; i < 4; i++) {
+ tmpval = (val >> (i * 8)) & 0xff;
+ outb(tmpval, port + i);
+ }
+}
+
+static int gmux_index_wait_ready(struct apple_gmux_data *gmux_data)
+{
+ int i = 200;
+ u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
+
+ while (i && (gwr & 0x01)) {
+ inb(gmux_data->iostart + GMUX_PORT_READ);
+ gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
+ udelay(100);
+ i--;
+ }
+
+ return !!i;
+}
+
+static int gmux_index_wait_complete(struct apple_gmux_data *gmux_data)
+{
+ int i = 200;
+ u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
+
+ while (i && !(gwr & 0x01)) {
+ gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE);
+ udelay(100);
+ i--;
+ }
+
+ if (gwr & 0x01)
+ inb(gmux_data->iostart + GMUX_PORT_READ);
+
+ return !!i;
+}
+
+static u8 gmux_index_read8(struct apple_gmux_data *gmux_data, int port)
+{
+ u8 val;
+
+ mutex_lock(&gmux_data->index_lock);
+ outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ);
+ gmux_index_wait_ready(gmux_data);
+ val = inb(gmux_data->iostart + GMUX_PORT_VALUE);
+ mutex_unlock(&gmux_data->index_lock);
+
+ return val;
+}
+
+static void gmux_index_write8(struct apple_gmux_data *gmux_data, int port,
+ u8 val)
+{
+ mutex_lock(&gmux_data->index_lock);
+ outb(val, gmux_data->iostart + GMUX_PORT_VALUE);
+ gmux_index_wait_ready(gmux_data);
+ outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE);
+ gmux_index_wait_complete(gmux_data);
+ mutex_unlock(&gmux_data->index_lock);
+}
+
+static u32 gmux_index_read32(struct apple_gmux_data *gmux_data, int port)
+{
+ u32 val;
+
+ mutex_lock(&gmux_data->index_lock);
+ outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ);
+ gmux_index_wait_ready(gmux_data);
+ val = inl(gmux_data->iostart + GMUX_PORT_VALUE);
+ mutex_unlock(&gmux_data->index_lock);
+
+ return val;
+}
+
+static void gmux_index_write32(struct apple_gmux_data *gmux_data, int port,
+ u32 val)
+{
+ int i;
+ u8 tmpval;
+
+ mutex_lock(&gmux_data->index_lock);
+
+ for (i = 0; i < 4; i++) {
+ tmpval = (val >> (i * 8)) & 0xff;
+ outb(tmpval, gmux_data->iostart + GMUX_PORT_VALUE + i);
+ }
+
+ gmux_index_wait_ready(gmux_data);
+ outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE);
+ gmux_index_wait_complete(gmux_data);
+ mutex_unlock(&gmux_data->index_lock);
+}
+
+static u8 gmux_read8(struct apple_gmux_data *gmux_data, int port)
+{
+ if (gmux_data->indexed)
+ return gmux_index_read8(gmux_data, port);
+ else
+ return gmux_pio_read8(gmux_data, port);
+}
+
+static void gmux_write8(struct apple_gmux_data *gmux_data, int port, u8 val)
+{
+ if (gmux_data->indexed)
+ gmux_index_write8(gmux_data, port, val);
+ else
+ gmux_pio_write8(gmux_data, port, val);
+}
+
+static u32 gmux_read32(struct apple_gmux_data *gmux_data, int port)
+{
+ if (gmux_data->indexed)
+ return gmux_index_read32(gmux_data, port);
+ else
+ return gmux_pio_read32(gmux_data, port);
+}
+
+static void gmux_write32(struct apple_gmux_data *gmux_data, int port,
+ u32 val)
+{
+ if (gmux_data->indexed)
+ gmux_index_write32(gmux_data, port, val);
+ else
+ gmux_pio_write32(gmux_data, port, val);
+}
+
+static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
+{
+ u16 val;
+
+ outb(0xaa, gmux_data->iostart + 0xcc);
+ outb(0x55, gmux_data->iostart + 0xcd);
+ outb(0x00, gmux_data->iostart + 0xce);
+
+ val = inb(gmux_data->iostart + 0xcc) |
+ (inb(gmux_data->iostart + 0xcd) << 8);
+
+ if (val == 0x55aa)
+ return true;
+
+ return false;
+}
+
static int gmux_get_brightness(struct backlight_device *bd)
{
struct apple_gmux_data *gmux_data = bl_get_data(bd);
@@ -90,16 +258,7 @@ static int gmux_update_status(struct backlight_device *bd)
if (bd->props.state & BL_CORE_SUSPENDED)
return 0;
- /*
- * Older gmux versions require writing out lower bytes first then
- * setting the upper byte to 0 to flush the values. Newer versions
- * accept a single u32 write, but the old method also works, so we
- * just use the old method for all gmux versions.
- */
- gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS, brightness);
- gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 1, brightness >> 8);
- gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 2, brightness >> 16);
- gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 3, 0);
+ gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness);
return 0;
}
@@ -110,6 +269,146 @@ static const struct backlight_ops gmux_bl_ops = {
.update_status = gmux_update_status,
};
+static int gmux_switchto(enum vga_switcheroo_client_id id)
+{
+ if (id == VGA_SWITCHEROO_IGD) {
+ gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1);
+ gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2);
+ gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2);
+ } else {
+ gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2);
+ gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3);
+ gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3);
+ }
+
+ return 0;
+}
+
+static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data,
+ enum vga_switcheroo_state state)
+{
+ INIT_COMPLETION(gmux_data->powerchange_done);
+
+ if (state == VGA_SWITCHEROO_ON) {
+ gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1);
+ gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 3);
+ pr_debug("Discrete card powered up\n");
+ } else {
+ gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1);
+ gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 0);
+ pr_debug("Discrete card powered down\n");
+ }
+
+ gmux_data->power_state = state;
+
+ if (gmux_data->gpe >= 0 &&
+ !wait_for_completion_interruptible_timeout(&gmux_data->powerchange_done,
+ msecs_to_jiffies(200)))
+ pr_warn("Timeout waiting for gmux switch to complete\n");
+
+ return 0;
+}
+
+static int gmux_set_power_state(enum vga_switcheroo_client_id id,
+ enum vga_switcheroo_state state)
+{
+ if (id == VGA_SWITCHEROO_IGD)
+ return 0;
+
+ return gmux_set_discrete_state(apple_gmux_data, state);
+}
+
+static int gmux_get_client_id(struct pci_dev *pdev)
+{
+ /*
+ * Early Macbook Pros with switchable graphics use nvidia
+ * integrated graphics. Hardcode that the 9400M is integrated.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ return VGA_SWITCHEROO_IGD;
+ else if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
+ pdev->device == 0x0863)
+ return VGA_SWITCHEROO_IGD;
+ else
+ return VGA_SWITCHEROO_DIS;
+}
+
+static enum vga_switcheroo_client_id
+gmux_active_client(struct apple_gmux_data *gmux_data)
+{
+ if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2)
+ return VGA_SWITCHEROO_IGD;
+
+ return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler gmux_handler = {
+ .switchto = gmux_switchto,
+ .power_state = gmux_set_power_state,
+ .get_client_id = gmux_get_client_id,
+};
+
+static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data)
+{
+ gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE,
+ GMUX_INTERRUPT_DISABLE);
+}
+
+static inline void gmux_enable_interrupts(struct apple_gmux_data *gmux_data)
+{
+ gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE,
+ GMUX_INTERRUPT_ENABLE);
+}
+
+static inline u8 gmux_interrupt_get_status(struct apple_gmux_data *gmux_data)
+{
+ return gmux_read8(gmux_data, GMUX_PORT_INTERRUPT_STATUS);
+}
+
+static void gmux_clear_interrupts(struct apple_gmux_data *gmux_data)
+{
+ u8 status;
+
+ /* to clear interrupts write back current status */
+ status = gmux_interrupt_get_status(gmux_data);
+ gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_STATUS, status);
+}
+
+static void gmux_notify_handler(acpi_handle device, u32 value, void *context)
+{
+ u8 status;
+ struct pnp_dev *pnp = (struct pnp_dev *)context;
+ struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+
+ status = gmux_interrupt_get_status(gmux_data);
+ gmux_disable_interrupts(gmux_data);
+ pr_debug("Notify handler called: status %d\n", status);
+
+ gmux_clear_interrupts(gmux_data);
+ gmux_enable_interrupts(gmux_data);
+
+ if (status & GMUX_INTERRUPT_STATUS_POWER)
+ complete(&gmux_data->powerchange_done);
+}
+
+static int gmux_suspend(struct pnp_dev *pnp, pm_message_t state)
+{
+ struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+ gmux_data->resume_client_id = gmux_active_client(gmux_data);
+ gmux_disable_interrupts(gmux_data);
+ return 0;
+}
+
+static int gmux_resume(struct pnp_dev *pnp)
+{
+ struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+ gmux_enable_interrupts(gmux_data);
+ gmux_switchto(gmux_data->resume_client_id);
+ if (gmux_data->power_state == VGA_SWITCHEROO_OFF)
+ gmux_set_discrete_state(gmux_data, gmux_data->power_state);
+ return 0;
+}
+
static int __devinit gmux_probe(struct pnp_dev *pnp,
const struct pnp_device_id *id)
{
@@ -119,6 +418,11 @@ static int __devinit gmux_probe(struct pnp_dev *pnp,
struct backlight_device *bdev;
u8 ver_major, ver_minor, ver_release;
int ret = -ENXIO;
+ acpi_status status;
+ unsigned long long gpe;
+
+ if (apple_gmux_data)
+ return -EBUSY;
gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL);
if (!gmux_data)
@@ -147,22 +451,29 @@ static int __devinit gmux_probe(struct pnp_dev *pnp,
}
/*
- * On some machines the gmux is in ACPI even thought the machine
- * doesn't really have a gmux. Check for invalid version information
- * to detect this.
+ * Invalid version information may indicate either that the gmux
+ * device isn't present or that it's a new one that uses indexed
+ * io
*/
+
ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR);
ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR);
ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE);
if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) {
- pr_info("gmux device not present\n");
- ret = -ENODEV;
- goto err_release;
+ if (gmux_is_indexed(gmux_data)) {
+ mutex_init(&gmux_data->index_lock);
+ gmux_data->indexed = true;
+ } else {
+ pr_info("gmux device not present\n");
+ ret = -ENODEV;
+ goto err_release;
+ }
+ pr_info("Found indexed gmux\n");
+ } else {
+ pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor,
+ ver_release);
}
- pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor,
- ver_release);
-
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
@@ -193,11 +504,68 @@ static int __devinit gmux_probe(struct pnp_dev *pnp,
* backlight control and supports more levels than other options.
* Disable the other backlight choices.
*/
+ acpi_video_dmi_promote_vendor();
+#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE)
acpi_video_unregister();
+#endif
apple_bl_unregister();
+ gmux_data->power_state = VGA_SWITCHEROO_ON;
+
+ gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev);
+ if (!gmux_data->dhandle) {
+ pr_err("Cannot find acpi handle for pnp device %s\n",
+ dev_name(&pnp->dev));
+ ret = -ENODEV;
+ goto err_notify;
+ }
+
+ status = acpi_evaluate_integer(gmux_data->dhandle, "GMGP", NULL, &gpe);
+ if (ACPI_SUCCESS(status)) {
+ gmux_data->gpe = (int)gpe;
+
+ status = acpi_install_notify_handler(gmux_data->dhandle,
+ ACPI_DEVICE_NOTIFY,
+ &gmux_notify_handler, pnp);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Install notify handler failed: %s\n",
+ acpi_format_exception(status));
+ ret = -ENODEV;
+ goto err_notify;
+ }
+
+ status = acpi_enable_gpe(NULL, gmux_data->gpe);
+ if (ACPI_FAILURE(status)) {
+ pr_err("Cannot enable gpe: %s\n",
+ acpi_format_exception(status));
+ goto err_enable_gpe;
+ }
+ } else {
+ pr_warn("No GPE found for gmux\n");
+ gmux_data->gpe = -1;
+ }
+
+ if (vga_switcheroo_register_handler(&gmux_handler)) {
+ ret = -ENODEV;
+ goto err_register_handler;
+ }
+
+ init_completion(&gmux_data->powerchange_done);
+ apple_gmux_data = gmux_data;
+ gmux_enable_interrupts(gmux_data);
+
return 0;
+err_register_handler:
+ if (gmux_data->gpe >= 0)
+ acpi_disable_gpe(NULL, gmux_data->gpe);
+err_enable_gpe:
+ if (gmux_data->gpe >= 0)
+ acpi_remove_notify_handler(gmux_data->dhandle,
+ ACPI_DEVICE_NOTIFY,
+ &gmux_notify_handler);
+err_notify:
+ backlight_device_unregister(bdev);
err_release:
release_region(gmux_data->iostart, gmux_data->iolen);
err_free:
@@ -209,11 +577,25 @@ static void __devexit gmux_remove(struct pnp_dev *pnp)
{
struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
+ vga_switcheroo_unregister_handler();
+ gmux_disable_interrupts(gmux_data);
+ if (gmux_data->gpe >= 0) {
+ acpi_disable_gpe(NULL, gmux_data->gpe);
+ acpi_remove_notify_handler(gmux_data->dhandle,
+ ACPI_DEVICE_NOTIFY,
+ &gmux_notify_handler);
+ }
+
backlight_device_unregister(gmux_data->bdev);
+
release_region(gmux_data->iostart, gmux_data->iolen);
+ apple_gmux_data = NULL;
kfree(gmux_data);
+ acpi_video_dmi_demote_vendor();
+#if defined (CONFIG_ACPI_VIDEO) || defined (CONFIG_ACPI_VIDEO_MODULE)
acpi_video_register();
+#endif
apple_bl_register();
}
@@ -227,6 +609,8 @@ static struct pnp_driver gmux_pnp_driver = {
.probe = gmux_probe,
.remove = __devexit_p(gmux_remove),
.id_table = gmux_device_ids,
+ .suspend = gmux_suspend,
+ .resume = gmux_resume
};
static int __init apple_gmux_init(void)
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 99a30b513137..6b0ebdeae916 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -26,6 +26,7 @@
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/fb.h>
+#include <linux/dmi.h>
#include "asus-wmi.h"
@@ -48,18 +49,115 @@ MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID);
* 1 | Hardware | Software
* 4 | Software | Software
*/
-static uint wapf;
+static int wapf = -1;
module_param(wapf, uint, 0444);
MODULE_PARM_DESC(wapf, "WAPF value");
+static struct quirk_entry *quirks;
+
static struct quirk_entry quirk_asus_unknown = {
+ .wapf = 0,
+};
+
+static struct quirk_entry quirk_asus_x401u = {
+ .wapf = 4,
+};
+
+static int dmi_matched(const struct dmi_system_id *dmi)
+{
+ quirks = dmi->driver_data;
+ return 1;
+}
+
+static struct dmi_system_id asus_quirks[] = {
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X401U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X401U"),
+ },
+ .driver_data = &quirk_asus_x401u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X401A1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X401A1"),
+ },
+ .driver_data = &quirk_asus_x401u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X501U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X501U"),
+ },
+ .driver_data = &quirk_asus_x401u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X501A1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X501A1"),
+ },
+ .driver_data = &quirk_asus_x401u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X55A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X55A"),
+ },
+ .driver_data = &quirk_asus_x401u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X55C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X55C"),
+ },
+ .driver_data = &quirk_asus_x401u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X55U",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X55U"),
+ },
+ .driver_data = &quirk_asus_x401u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X55VD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X55VD"),
+ },
+ .driver_data = &quirk_asus_x401u,
+ },
+ {},
};
static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
{
- driver->quirks = &quirk_asus_unknown;
- driver->quirks->wapf = wapf;
+ quirks = &quirk_asus_unknown;
+ dmi_check_system(asus_quirks);
+
+ driver->quirks = quirks;
driver->panel_power = FB_BLANK_UNBLANK;
+
+ /* overwrite the wapf setting if the wapf paramater is specified */
+ if (wapf != -1)
+ quirks->wapf = wapf;
+ else
+ wapf = quirks->wapf;
}
static const struct key_entry asus_nb_wmi_keymap[] = {
@@ -94,6 +192,10 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x8A, { KEY_PROG1 } },
{ KE_KEY, 0x95, { KEY_MEDIA } },
{ KE_KEY, 0x99, { KEY_PHONE } },
+ { KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
+ { KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
+ { KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
+ { KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
{ KE_KEY, 0xb5, { KEY_CALC } },
{ KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 77aadde5281c..2eb9fe8e8efd 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -47,6 +47,9 @@
#include <linux/thermal.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
+#ifdef CONFIG_ACPI_VIDEO
+#include <acpi/video.h>
+#endif
#include "asus-wmi.h"
@@ -98,6 +101,7 @@ MODULE_LICENSE("GPL");
#define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002
#define ASUS_WMI_DEVID_CWAP 0x00010003
#define ASUS_WMI_DEVID_WLAN 0x00010011
+#define ASUS_WMI_DEVID_WLAN_LED 0x00010012
#define ASUS_WMI_DEVID_BLUETOOTH 0x00010013
#define ASUS_WMI_DEVID_GPS 0x00010015
#define ASUS_WMI_DEVID_WIMAX 0x00010017
@@ -136,6 +140,9 @@ MODULE_LICENSE("GPL");
/* Power */
#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
+/* Deep S3 / Resume on LID open */
+#define ASUS_WMI_DEVID_LID_RESUME 0x00120031
+
/* DSTS masks */
#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
#define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002
@@ -725,8 +732,21 @@ static int asus_rfkill_set(void *data, bool blocked)
{
struct asus_rfkill *priv = data;
u32 ctrl_param = !blocked;
+ u32 dev_id = priv->dev_id;
- return asus_wmi_set_devstate(priv->dev_id, ctrl_param, NULL);
+ /*
+ * If the user bit is set, BIOS can't set and record the wlan status,
+ * it will report the value read from id ASUS_WMI_DEVID_WLAN_LED
+ * while we query the wlan status through WMI(ASUS_WMI_DEVID_WLAN).
+ * So, we have to record wlan status in id ASUS_WMI_DEVID_WLAN_LED
+ * while setting the wlan status through WMI.
+ * This is also the behavior that windows app will do.
+ */
+ if ((dev_id == ASUS_WMI_DEVID_WLAN) &&
+ priv->asus->driver->wlan_ctrl_by_user)
+ dev_id = ASUS_WMI_DEVID_WLAN_LED;
+
+ return asus_wmi_set_devstate(dev_id, ctrl_param, NULL);
}
static void asus_rfkill_query(struct rfkill *rfkill, void *data)
@@ -1365,6 +1385,7 @@ static ssize_t show_sys_wmi(struct asus_wmi *asus, int devid, char *buf)
ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD);
ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA);
ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
+ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME);
static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1390,6 +1411,7 @@ static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_touchpad.attr,
+ &dev_attr_lid_resume.attr,
NULL
};
@@ -1408,6 +1430,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
devid = ASUS_WMI_DEVID_CARDREADER;
else if (attr == &dev_attr_touchpad.attr)
devid = ASUS_WMI_DEVID_TOUCHPAD;
+ else if (attr == &dev_attr_lid_resume.attr)
+ devid = ASUS_WMI_DEVID_LID_RESUME;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
@@ -1467,14 +1491,9 @@ static int asus_wmi_platform_init(struct asus_wmi *asus)
*/
if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, 0, 0, NULL))
asus->dsts_id = ASUS_WMI_METHODID_DSTS;
- else if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2, 0, 0, NULL))
+ else
asus->dsts_id = ASUS_WMI_METHODID_DSTS2;
- if (!asus->dsts_id) {
- pr_err("Can't find DSTS");
- return -ENODEV;
- }
-
/* CWAP allow to define the behavior of the Fn+F2 key,
* this method doesn't seems to be present on Eee PCs */
if (asus->driver->quirks->wapf >= 0)
@@ -1648,6 +1667,7 @@ static int asus_wmi_add(struct platform_device *pdev)
struct asus_wmi *asus;
acpi_status status;
int err;
+ u32 result;
asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL);
if (!asus)
@@ -1681,7 +1701,13 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_rfkill;
+ if (asus->driver->quirks->wmi_backlight_power)
+ acpi_video_dmi_promote_vendor();
if (!acpi_video_backlight_support()) {
+#ifdef CONFIG_ACPI_VIDEO
+ pr_info("Disabling ACPI video driver\n");
+ acpi_video_unregister();
+#endif
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
goto fail_backlight;
@@ -1700,6 +1726,10 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_debugfs;
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
+ if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
+ asus->driver->wlan_ctrl_by_user = 1;
+
return 0;
fail_debugfs:
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index d43b66742004..4c9bd38bb0a2 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -39,12 +39,14 @@ struct quirk_entry {
bool hotplug_wireless;
bool scalar_panel_brightness;
bool store_backlight_power;
+ bool wmi_backlight_power;
int wapf;
};
struct asus_wmi_driver {
int brightness;
int panel_power;
+ int wlan_ctrl_by_user;
const char *name;
struct module *owner;
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index e2230a2b2f8e..c87ff16873f9 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -31,15 +31,21 @@ MODULE_LICENSE("GPL");
struct cmpc_accel {
int sensitivity;
+ int g_select;
+ int inputdev_state;
};
-#define CMPC_ACCEL_SENSITIVITY_DEFAULT 5
+#define CMPC_ACCEL_DEV_STATE_CLOSED 0
+#define CMPC_ACCEL_DEV_STATE_OPEN 1
+#define CMPC_ACCEL_SENSITIVITY_DEFAULT 5
+#define CMPC_ACCEL_G_SELECT_DEFAULT 0
#define CMPC_ACCEL_HID "ACCE0000"
+#define CMPC_ACCEL_HID_V4 "ACCE0001"
#define CMPC_TABLET_HID "TBLT0000"
#define CMPC_IPML_HID "IPML200"
-#define CMPC_KEYS_HID "FnBT0000"
+#define CMPC_KEYS_HID "FNBT0000"
/*
* Generic input device code.
@@ -76,7 +82,393 @@ static int cmpc_remove_acpi_notify_device(struct acpi_device *acpi)
}
/*
- * Accelerometer code.
+ * Accelerometer code for Classmate V4
+ */
+static acpi_status cmpc_start_accel_v4(acpi_handle handle)
+{
+ union acpi_object param[4];
+ struct acpi_object_list input;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x3;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = 0;
+ param[2].type = ACPI_TYPE_INTEGER;
+ param[2].integer.value = 0;
+ param[3].type = ACPI_TYPE_INTEGER;
+ param[3].integer.value = 0;
+ input.count = 4;
+ input.pointer = param;
+ status = acpi_evaluate_object(handle, "ACMD", &input, NULL);
+ return status;
+}
+
+static acpi_status cmpc_stop_accel_v4(acpi_handle handle)
+{
+ union acpi_object param[4];
+ struct acpi_object_list input;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x4;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = 0;
+ param[2].type = ACPI_TYPE_INTEGER;
+ param[2].integer.value = 0;
+ param[3].type = ACPI_TYPE_INTEGER;
+ param[3].integer.value = 0;
+ input.count = 4;
+ input.pointer = param;
+ status = acpi_evaluate_object(handle, "ACMD", &input, NULL);
+ return status;
+}
+
+static acpi_status cmpc_accel_set_sensitivity_v4(acpi_handle handle, int val)
+{
+ union acpi_object param[4];
+ struct acpi_object_list input;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x02;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = val;
+ param[2].type = ACPI_TYPE_INTEGER;
+ param[2].integer.value = 0;
+ param[3].type = ACPI_TYPE_INTEGER;
+ param[3].integer.value = 0;
+ input.count = 4;
+ input.pointer = param;
+ return acpi_evaluate_object(handle, "ACMD", &input, NULL);
+}
+
+static acpi_status cmpc_accel_set_g_select_v4(acpi_handle handle, int val)
+{
+ union acpi_object param[4];
+ struct acpi_object_list input;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x05;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = val;
+ param[2].type = ACPI_TYPE_INTEGER;
+ param[2].integer.value = 0;
+ param[3].type = ACPI_TYPE_INTEGER;
+ param[3].integer.value = 0;
+ input.count = 4;
+ input.pointer = param;
+ return acpi_evaluate_object(handle, "ACMD", &input, NULL);
+}
+
+static acpi_status cmpc_get_accel_v4(acpi_handle handle,
+ int16_t *x,
+ int16_t *y,
+ int16_t *z)
+{
+ union acpi_object param[4];
+ struct acpi_object_list input;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ int16_t *locs;
+ acpi_status status;
+
+ param[0].type = ACPI_TYPE_INTEGER;
+ param[0].integer.value = 0x01;
+ param[1].type = ACPI_TYPE_INTEGER;
+ param[1].integer.value = 0;
+ param[2].type = ACPI_TYPE_INTEGER;
+ param[2].integer.value = 0;
+ param[3].type = ACPI_TYPE_INTEGER;
+ param[3].integer.value = 0;
+ input.count = 4;
+ input.pointer = param;
+ status = acpi_evaluate_object(handle, "ACMD", &input, &output);
+ if (ACPI_SUCCESS(status)) {
+ union acpi_object *obj;
+ obj = output.pointer;
+ locs = (int16_t *) obj->buffer.pointer;
+ *x = locs[0];
+ *y = locs[1];
+ *z = locs[2];
+ kfree(output.pointer);
+ }
+ return status;
+}
+
+static void cmpc_accel_handler_v4(struct acpi_device *dev, u32 event)
+{
+ if (event == 0x81) {
+ int16_t x, y, z;
+ acpi_status status;
+
+ status = cmpc_get_accel_v4(dev->handle, &x, &y, &z);
+ if (ACPI_SUCCESS(status)) {
+ struct input_dev *inputdev = dev_get_drvdata(&dev->dev);
+
+ input_report_abs(inputdev, ABS_X, x);
+ input_report_abs(inputdev, ABS_Y, y);
+ input_report_abs(inputdev, ABS_Z, z);
+ input_sync(inputdev);
+ }
+ }
+}
+
+static ssize_t cmpc_accel_sensitivity_show_v4(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ acpi = to_acpi_device(dev);
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ return sprintf(buf, "%d\n", accel->sensitivity);
+}
+
+static ssize_t cmpc_accel_sensitivity_store_v4(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+ unsigned long sensitivity;
+ int r;
+
+ acpi = to_acpi_device(dev);
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ r = kstrtoul(buf, 0, &sensitivity);
+ if (r)
+ return r;
+
+ /* sensitivity must be between 1 and 127 */
+ if (sensitivity < 1 || sensitivity > 127)
+ return -EINVAL;
+
+ accel->sensitivity = sensitivity;
+ cmpc_accel_set_sensitivity_v4(acpi->handle, sensitivity);
+
+ return strnlen(buf, count);
+}
+
+static struct device_attribute cmpc_accel_sensitivity_attr_v4 = {
+ .attr = { .name = "sensitivity", .mode = 0660 },
+ .show = cmpc_accel_sensitivity_show_v4,
+ .store = cmpc_accel_sensitivity_store_v4
+};
+
+static ssize_t cmpc_accel_g_select_show_v4(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *acpi;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ acpi = to_acpi_device(dev);
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ return sprintf(buf, "%d\n", accel->g_select);
+}
+
+static ssize_t cmpc_accel_g_select_store_v4(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct acpi_device *acpi;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+ unsigned long g_select;
+ int r;
+
+ acpi = to_acpi_device(dev);
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ r = kstrtoul(buf, 0, &g_select);
+ if (r)
+ return r;
+
+ /* 0 means 1.5g, 1 means 6g, everything else is wrong */
+ if (g_select != 0 && g_select != 1)
+ return -EINVAL;
+
+ accel->g_select = g_select;
+ cmpc_accel_set_g_select_v4(acpi->handle, g_select);
+
+ return strnlen(buf, count);
+}
+
+static struct device_attribute cmpc_accel_g_select_attr_v4 = {
+ .attr = { .name = "g_select", .mode = 0660 },
+ .show = cmpc_accel_g_select_show_v4,
+ .store = cmpc_accel_g_select_store_v4
+};
+
+static int cmpc_accel_open_v4(struct input_dev *input)
+{
+ struct acpi_device *acpi;
+ struct cmpc_accel *accel;
+
+ acpi = to_acpi_device(input->dev.parent);
+ accel = dev_get_drvdata(&input->dev);
+
+ cmpc_accel_set_sensitivity_v4(acpi->handle, accel->sensitivity);
+ cmpc_accel_set_g_select_v4(acpi->handle, accel->g_select);
+
+ if (ACPI_SUCCESS(cmpc_start_accel_v4(acpi->handle))) {
+ accel->inputdev_state = CMPC_ACCEL_DEV_STATE_OPEN;
+ return 0;
+ }
+ return -EIO;
+}
+
+static void cmpc_accel_close_v4(struct input_dev *input)
+{
+ struct acpi_device *acpi;
+ struct cmpc_accel *accel;
+
+ acpi = to_acpi_device(input->dev.parent);
+ accel = dev_get_drvdata(&input->dev);
+
+ cmpc_stop_accel_v4(acpi->handle);
+ accel->inputdev_state = CMPC_ACCEL_DEV_STATE_CLOSED;
+}
+
+static void cmpc_accel_idev_init_v4(struct input_dev *inputdev)
+{
+ set_bit(EV_ABS, inputdev->evbit);
+ input_set_abs_params(inputdev, ABS_X, -255, 255, 16, 0);
+ input_set_abs_params(inputdev, ABS_Y, -255, 255, 16, 0);
+ input_set_abs_params(inputdev, ABS_Z, -255, 255, 16, 0);
+ inputdev->open = cmpc_accel_open_v4;
+ inputdev->close = cmpc_accel_close_v4;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cmpc_accel_suspend_v4(struct device *dev)
+{
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ inputdev = dev_get_drvdata(dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ if (accel->inputdev_state == CMPC_ACCEL_DEV_STATE_OPEN)
+ return cmpc_stop_accel_v4(to_acpi_device(dev)->handle);
+
+ return 0;
+}
+
+static int cmpc_accel_resume_v4(struct device *dev)
+{
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ inputdev = dev_get_drvdata(dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ if (accel->inputdev_state == CMPC_ACCEL_DEV_STATE_OPEN) {
+ cmpc_accel_set_sensitivity_v4(to_acpi_device(dev)->handle,
+ accel->sensitivity);
+ cmpc_accel_set_g_select_v4(to_acpi_device(dev)->handle,
+ accel->g_select);
+
+ if (ACPI_FAILURE(cmpc_start_accel_v4(to_acpi_device(dev)->handle)))
+ return -EIO;
+ }
+
+ return 0;
+}
+#endif
+
+static int cmpc_accel_add_v4(struct acpi_device *acpi)
+{
+ int error;
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ accel = kmalloc(sizeof(*accel), GFP_KERNEL);
+ if (!accel)
+ return -ENOMEM;
+
+ accel->inputdev_state = CMPC_ACCEL_DEV_STATE_CLOSED;
+
+ accel->sensitivity = CMPC_ACCEL_SENSITIVITY_DEFAULT;
+ cmpc_accel_set_sensitivity_v4(acpi->handle, accel->sensitivity);
+
+ error = device_create_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4);
+ if (error)
+ goto failed_sensitivity;
+
+ accel->g_select = CMPC_ACCEL_G_SELECT_DEFAULT;
+ cmpc_accel_set_g_select_v4(acpi->handle, accel->g_select);
+
+ error = device_create_file(&acpi->dev, &cmpc_accel_g_select_attr_v4);
+ if (error)
+ goto failed_g_select;
+
+ error = cmpc_add_acpi_notify_device(acpi, "cmpc_accel_v4",
+ cmpc_accel_idev_init_v4);
+ if (error)
+ goto failed_input;
+
+ inputdev = dev_get_drvdata(&acpi->dev);
+ dev_set_drvdata(&inputdev->dev, accel);
+
+ return 0;
+
+failed_input:
+ device_remove_file(&acpi->dev, &cmpc_accel_g_select_attr_v4);
+failed_g_select:
+ device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4);
+failed_sensitivity:
+ kfree(accel);
+ return error;
+}
+
+static int cmpc_accel_remove_v4(struct acpi_device *acpi, int type)
+{
+ struct input_dev *inputdev;
+ struct cmpc_accel *accel;
+
+ inputdev = dev_get_drvdata(&acpi->dev);
+ accel = dev_get_drvdata(&inputdev->dev);
+
+ device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4);
+ device_remove_file(&acpi->dev, &cmpc_accel_g_select_attr_v4);
+ return cmpc_remove_acpi_notify_device(acpi);
+}
+
+static SIMPLE_DEV_PM_OPS(cmpc_accel_pm, cmpc_accel_suspend_v4,
+ cmpc_accel_resume_v4);
+
+static const struct acpi_device_id cmpc_accel_device_ids_v4[] = {
+ {CMPC_ACCEL_HID_V4, 0},
+ {"", 0}
+};
+
+static struct acpi_driver cmpc_accel_acpi_driver_v4 = {
+ .owner = THIS_MODULE,
+ .name = "cmpc_accel_v4",
+ .class = "cmpc_accel_v4",
+ .ids = cmpc_accel_device_ids_v4,
+ .ops = {
+ .add = cmpc_accel_add_v4,
+ .remove = cmpc_accel_remove_v4,
+ .notify = cmpc_accel_handler_v4,
+ },
+ .drv.pm = &cmpc_accel_pm,
+};
+
+
+/*
+ * Accelerometer code for Classmate versions prior to V4
*/
static acpi_status cmpc_start_accel(acpi_handle handle)
{
@@ -333,8 +725,10 @@ static void cmpc_tablet_handler(struct acpi_device *dev, u32 event)
struct input_dev *inputdev = dev_get_drvdata(&dev->dev);
if (event == 0x81) {
- if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val)))
+ if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) {
input_report_switch(inputdev, SW_TABLET_MODE, !val);
+ input_sync(inputdev);
+ }
}
}
@@ -347,8 +741,10 @@ static void cmpc_tablet_idev_init(struct input_dev *inputdev)
set_bit(SW_TABLET_MODE, inputdev->swbit);
acpi = to_acpi_device(inputdev->dev.parent);
- if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val)))
+ if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) {
input_report_switch(inputdev, SW_TABLET_MODE, !val);
+ input_sync(inputdev);
+ }
}
static int cmpc_tablet_add(struct acpi_device *acpi)
@@ -362,15 +758,19 @@ static int cmpc_tablet_remove(struct acpi_device *acpi, int type)
return cmpc_remove_acpi_notify_device(acpi);
}
+#ifdef CONFIG_PM_SLEEP
static int cmpc_tablet_resume(struct device *dev)
{
struct input_dev *inputdev = dev_get_drvdata(dev);
unsigned long long val = 0;
- if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val)))
+ if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) {
input_report_switch(inputdev, SW_TABLET_MODE, !val);
+ input_sync(inputdev);
+ }
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume);
@@ -726,8 +1126,15 @@ static int cmpc_init(void)
if (r)
goto failed_accel;
+ r = acpi_bus_register_driver(&cmpc_accel_acpi_driver_v4);
+ if (r)
+ goto failed_accel_v4;
+
return r;
+failed_accel_v4:
+ acpi_bus_unregister_driver(&cmpc_accel_acpi_driver);
+
failed_accel:
acpi_bus_unregister_driver(&cmpc_tablet_acpi_driver);
@@ -743,6 +1150,7 @@ failed_keys:
static void cmpc_exit(void)
{
+ acpi_bus_unregister_driver(&cmpc_accel_acpi_driver_v4);
acpi_bus_unregister_driver(&cmpc_accel_acpi_driver);
acpi_bus_unregister_driver(&cmpc_tablet_acpi_driver);
acpi_bus_unregister_driver(&cmpc_ipml_acpi_driver);
@@ -754,6 +1162,7 @@ module_exit(cmpc_exit);
static const struct acpi_device_id cmpc_device_ids[] = {
{CMPC_ACCEL_HID, 0},
+ {CMPC_ACCEL_HID_V4, 0},
{CMPC_TABLET_HID, 0},
{CMPC_IPML_HID, 0},
{CMPC_KEYS_HID, 0},
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 5f78aac9b163..927c33af67ec 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -206,6 +206,60 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
},
.driver_data = &quirk_dell_vostro_v130,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 5420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5420"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 5520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5520"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 5720",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5720"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 7420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7420"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 7520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron 7720",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"),
+ },
+ .driver_data = &quirk_dell_vostro_v130,
+ },
{ }
};
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 656761380342..5838332ea5bd 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -79,7 +79,7 @@ static const struct key_entry eeepc_wmi_keymap[] = {
{ KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */
{ KE_KEY, HOME_PRESS, { KEY_CONFIG } }, /* Home/Express gate key */
{ KE_KEY, 0xe8, { KEY_SCREENLOCK } },
- { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } },
+ { KE_KEY, 0xe9, { KEY_DISPLAYTOGGLE } },
{ KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } },
{ KE_KEY, 0xec, { KEY_CAMERA_UP } },
{ KE_KEY, 0xed, { KEY_CAMERA_DOWN } },
@@ -107,6 +107,11 @@ static struct quirk_entry quirk_asus_et2012_type3 = {
.store_backlight_power = true,
};
+static struct quirk_entry quirk_asus_x101ch = {
+ /* We need this when ACPI function doesn't do this well */
+ .wmi_backlight_power = true,
+};
+
static struct quirk_entry *quirks;
static void et2012_quirks(void)
@@ -157,6 +162,24 @@ static struct dmi_system_id asus_quirks[] = {
},
.driver_data = &quirk_asus_unknown,
},
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK Computer INC. X101CH",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X101CH"),
+ },
+ .driver_data = &quirk_asus_x101ch,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK Computer INC. 1015CX",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1015CX"),
+ },
+ .driver_data = &quirk_asus_x101ch,
+ },
{},
};
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index d2e41735a47b..7acae3f85f3b 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -440,11 +440,13 @@ static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int acpi_fujitsu_resume(struct device *dev)
{
fujitsu_reset();
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume);
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index d9ab6f64dcec..777c7e3dda51 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -305,10 +305,12 @@ static int hdaps_probe(struct platform_device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int hdaps_resume(struct device *dev)
{
return hdaps_device_init();
}
+#endif
static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume);
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index f4d91154ad67..6b9af989632b 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -352,7 +352,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int lis3lv02d_suspend(struct device *dev)
{
/* make sure the device is off when we suspend */
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 17f6dfd8dbfb..dae7abe1d711 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -36,6 +36,7 @@
#include <linux/fb.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/i8042.h>
#define IDEAPAD_RFKILL_DEV_NUM (3)
@@ -63,8 +64,11 @@ enum {
VPCCMD_R_3G,
VPCCMD_W_3G,
VPCCMD_R_ODD, /* 0x21 */
- VPCCMD_R_RF = 0x23,
+ VPCCMD_W_FAN,
+ VPCCMD_R_RF,
VPCCMD_W_RF,
+ VPCCMD_R_FAN = 0x2B,
+ VPCCMD_R_SPECIAL_BUTTONS = 0x31,
VPCCMD_W_BL_POWER = 0x33,
};
@@ -356,14 +360,46 @@ static ssize_t store_ideapad_cam(struct device *dev,
return -EINVAL;
ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state);
if (ret < 0)
- return ret;
+ return -EIO;
return count;
}
static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam);
+static ssize_t show_ideapad_fan(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long result;
+
+ if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result))
+ return sprintf(buf, "-1\n");
+ return sprintf(buf, "%lu\n", result);
+}
+
+static ssize_t store_ideapad_fan(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret, state;
+
+ if (!count)
+ return 0;
+ if (sscanf(buf, "%i", &state) != 1)
+ return -EINVAL;
+ if (state < 0 || state > 4 || state == 3)
+ return -EINVAL;
+ ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state);
+ if (ret < 0)
+ return -EIO;
+ return count;
+}
+
+static DEVICE_ATTR(fan_mode, 0644, show_ideapad_fan, store_ideapad_fan);
+
static struct attribute *ideapad_attributes[] = {
&dev_attr_camera_power.attr,
+ &dev_attr_fan_mode.attr,
NULL
};
@@ -377,7 +413,10 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
if (attr == &dev_attr_camera_power.attr)
supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
- else
+ else if (attr == &dev_attr_fan_mode.attr) {
+ unsigned long value;
+ supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value);
+ } else
supported = true;
return supported ? attr->mode : 0;
@@ -518,9 +557,15 @@ static void ideapad_platform_exit(struct ideapad_private *priv)
*/
static const struct key_entry ideapad_keymap[] = {
{ KE_KEY, 6, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 7, { KEY_CAMERA } },
+ { KE_KEY, 11, { KEY_F16 } },
{ KE_KEY, 13, { KEY_WLAN } },
{ KE_KEY, 16, { KEY_PROG1 } },
{ KE_KEY, 17, { KEY_PROG2 } },
+ { KE_KEY, 64, { KEY_PROG3 } },
+ { KE_KEY, 65, { KEY_PROG4 } },
+ { KE_KEY, 66, { KEY_TOUCHPAD_OFF } },
+ { KE_KEY, 67, { KEY_TOUCHPAD_ON } },
{ KE_END, 0 },
};
@@ -587,6 +632,28 @@ static void ideapad_input_novokey(struct ideapad_private *priv)
ideapad_input_report(priv, 16);
}
+static void ideapad_check_special_buttons(struct ideapad_private *priv)
+{
+ unsigned long bit, value;
+
+ read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
+
+ for (bit = 0; bit < 16; bit++) {
+ if (test_bit(bit, &value)) {
+ switch (bit) {
+ case 6:
+ /* Thermal Management button */
+ ideapad_input_report(priv, 65);
+ break;
+ case 1:
+ /* OneKey Theater button */
+ ideapad_input_report(priv, 64);
+ break;
+ }
+ }
+ }
+}
+
/*
* backlight
*/
@@ -691,6 +758,24 @@ static const struct acpi_device_id ideapad_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
+static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
+{
+ struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
+ unsigned long value;
+
+ /* Without reading from EC touchpad LED doesn't switch state */
+ if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) {
+ /* Some IdeaPads don't really turn off touchpad - they only
+ * switch the LED state. We (de)activate KBC AUX port to turn
+ * touchpad off and on. We send KEY_TOUCHPAD_OFF and
+ * KEY_TOUCHPAD_ON to not to get out of sync with LED */
+ unsigned char param;
+ i8042_command(&param, value ? I8042_CMD_AUX_ENABLE :
+ I8042_CMD_AUX_DISABLE);
+ ideapad_input_report(priv, value ? 67 : 66);
+ }
+}
+
static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
{
int ret, i;
@@ -727,6 +812,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
priv->rfk[i] = NULL;
}
ideapad_sync_rfk_state(priv);
+ ideapad_sync_touchpad_state(adevice);
if (!acpi_video_backlight_support()) {
ret = ideapad_backlight_init(priv);
@@ -785,9 +871,14 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
ideapad_sync_rfk_state(priv);
break;
case 13:
+ case 11:
+ case 7:
case 6:
ideapad_input_report(priv, vpc_bit);
break;
+ case 5:
+ ideapad_sync_touchpad_state(adevice);
+ break;
case 4:
ideapad_backlight_notify_brightness(priv);
break;
@@ -797,6 +888,9 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
case 2:
ideapad_backlight_notify_power(priv);
break;
+ case 0:
+ ideapad_check_special_buttons(priv);
+ break;
default:
pr_info("Unknown event: %lu\n", vpc_bit);
}
@@ -804,6 +898,15 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
}
}
+static int ideapad_acpi_resume(struct device *device)
+{
+ ideapad_sync_rfk_state(ideapad_priv);
+ ideapad_sync_touchpad_state(to_acpi_device(device));
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume);
+
static struct acpi_driver ideapad_acpi_driver = {
.name = "ideapad_acpi",
.class = "IdeaPad",
@@ -811,6 +914,7 @@ static struct acpi_driver ideapad_acpi_driver = {
.ops.add = ideapad_acpi_add,
.ops.remove = ideapad_acpi_remove,
.ops.notify = ideapad_acpi_notify,
+ .drv.pm = &ideapad_pm,
.owner = THIS_MODULE,
};
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index ea7422f6fa03..3a27113deda9 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -502,7 +502,7 @@ static int mid_thermal_probe(struct platform_device *pdev)
goto err;
}
pinfo->tzd[i] = thermal_zone_device_register(name[i],
- 0, td_info, &tzd_ops, 0, 0, 0, 0);
+ 0, 0, td_info, &tzd_ops, 0, 0, 0, 0);
if (IS_ERR(pinfo->tzd[i])) {
kfree(td_info);
ret = PTR_ERR(pinfo->tzd[i]);
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index f64441844317..2111dbb7e1e3 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -85,7 +85,9 @@
#define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4
#define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4)
+#ifdef CONFIG_PM_SLEEP
static int msi_laptop_resume(struct device *device);
+#endif
static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume);
#define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f
@@ -753,6 +755,7 @@ err_bluetooth:
return retval;
}
+#ifdef CONFIG_PM_SLEEP
static int msi_laptop_resume(struct device *device)
{
u8 data;
@@ -773,6 +776,7 @@ static int msi_laptop_resume(struct device *device)
return 0;
}
+#endif
static int __init msi_laptop_input_setup(void)
{
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 24480074bcf0..8e8caa767d6a 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -188,7 +188,9 @@ static const struct acpi_device_id pcc_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, pcc_device_ids);
+#ifdef CONFIG_PM_SLEEP
static int acpi_pcc_hotkey_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume);
static struct acpi_driver acpi_pcc_driver = {
@@ -540,6 +542,7 @@ static void acpi_pcc_destroy_input(struct pcc_acpi *pcc)
/* kernel module interface */
+#ifdef CONFIG_PM_SLEEP
static int acpi_pcc_hotkey_resume(struct device *dev)
{
struct pcc_acpi *pcc;
@@ -556,6 +559,7 @@ static int acpi_pcc_hotkey_resume(struct device *dev)
return acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode);
}
+#endif
static int acpi_pcc_hotkey_add(struct acpi_device *device)
{
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index e2a34b42ddc1..c1ca7bcebb66 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -26,7 +26,7 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/ctype.h>
-#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
+#ifdef CONFIG_ACPI_VIDEO
#include <acpi/video.h>
#endif
@@ -1465,6 +1465,15 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
},
},
+ /* DMI ids for laptops with bad Chassis Type */
+ {
+ .ident = "R40/R41",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R40/R41"),
+ DMI_MATCH(DMI_BOARD_NAME, "R40/R41"),
+ },
+ },
/* Specific DMI ids for laptop with quirks */
{
.callback = samsung_dmi_matched,
@@ -1506,6 +1515,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
},
.driver_data = &samsung_broken_acpi_video,
},
+ {
+ .callback = samsung_dmi_matched,
+ .ident = "X360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X360"),
+ DMI_MATCH(DMI_BOARD_NAME, "X360"),
+ },
+ .driver_data = &samsung_broken_acpi_video,
+ },
{ },
};
MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
@@ -1530,15 +1549,18 @@ static int __init samsung_init(void)
samsung->quirks = quirks;
-#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
+#ifdef CONFIG_ACPI
+ if (samsung->quirks->broken_acpi_video)
+ acpi_video_dmi_promote_vendor();
+
/* Don't handle backlight here if the acpi video already handle it */
if (acpi_video_backlight_support()) {
- if (samsung->quirks->broken_acpi_video) {
- pr_info("Disabling ACPI video driver\n");
- acpi_video_unregister();
- } else {
- samsung->handle_backlight = false;
- }
+ samsung->handle_backlight = false;
+ } else if (samsung->quirks->broken_acpi_video) {
+ pr_info("Disabling ACPI video driver\n");
+#ifdef CONFIG_ACPI_VIDEO
+ acpi_video_unregister();
+#endif
}
#endif
@@ -1552,8 +1574,7 @@ static int __init samsung_init(void)
#ifdef CONFIG_ACPI
/* Only log that if we are really on a sabi platform */
- if (acpi_video_backlight_support() &&
- !samsung->quirks->broken_acpi_video)
+ if (acpi_video_backlight_support())
pr_info("Backlight controlled by ACPI video driver\n");
#endif
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 9363969ad07a..daaddec68def 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -140,7 +140,10 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
"1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
"(default: 0)");
+#ifdef CONFIG_PM_SLEEP
static void sony_nc_kbd_backlight_resume(void);
+static void sony_nc_thermal_resume(void);
+#endif
static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
unsigned int handle);
static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
@@ -151,7 +154,6 @@ static void sony_nc_battery_care_cleanup(struct platform_device *pd);
static int sony_nc_thermal_setup(struct platform_device *pd);
static void sony_nc_thermal_cleanup(struct platform_device *pd);
-static void sony_nc_thermal_resume(void);
static int sony_nc_lid_resume_setup(struct platform_device *pd);
static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
@@ -1431,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
sony_nc_handles_cleanup(pd);
}
+#ifdef CONFIG_PM_SLEEP
static void sony_nc_function_resume(void)
{
unsigned int i, result, bitmask, arg;
@@ -1508,6 +1511,7 @@ static int sony_nc_resume(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume);
@@ -1872,6 +1876,7 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
}
}
+#ifdef CONFIG_PM_SLEEP
static void sony_nc_kbd_backlight_resume(void)
{
int ignore = 0;
@@ -1888,6 +1893,7 @@ static void sony_nc_kbd_backlight_resume(void)
(kbdbl_ctl->base + 0x200) |
(kbdbl_ctl->timeout << 0x10), &ignore);
}
+#endif
struct battery_care_control {
struct device_attribute attrs[2];
@@ -2210,6 +2216,7 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd)
}
}
+#ifdef CONFIG_PM_SLEEP
static void sony_nc_thermal_resume(void)
{
unsigned int status = sony_nc_thermal_mode_get();
@@ -2217,6 +2224,7 @@ static void sony_nc_thermal_resume(void)
if (status != th_handle->mode)
sony_nc_thermal_mode_set(th_handle->mode);
}
+#endif
/* resume on LID open */
struct snc_lid_resume_control {
@@ -4287,6 +4295,7 @@ err_free_resources:
return result;
}
+#ifdef CONFIG_PM_SLEEP
static int sony_pic_suspend(struct device *dev)
{
if (sony_pic_disable(to_acpi_device(dev)))
@@ -4300,6 +4309,7 @@ static int sony_pic_resume(struct device *dev)
spic_dev.cur_ioport, spic_dev.cur_irq);
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index d5fd4a1193f8..80e377949314 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -922,6 +922,7 @@ static struct input_dev *tpacpi_inputdev;
static struct mutex tpacpi_inputdev_send_mutex;
static LIST_HEAD(tpacpi_all_drivers);
+#ifdef CONFIG_PM_SLEEP
static int tpacpi_suspend_handler(struct device *dev)
{
struct ibm_struct *ibm, *itmp;
@@ -949,6 +950,7 @@ static int tpacpi_resume_handler(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(tpacpi_pm,
tpacpi_suspend_handler, tpacpi_resume_handler);
@@ -3015,8 +3017,6 @@ static void hotkey_exit(void)
if (hotkey_dev_attributes)
delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj);
- kfree(hotkey_keycode_map);
-
dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
"restoring original HKEY status and mask\n");
/* yes, there is a bitwise or below, we want the
@@ -5217,6 +5217,7 @@ static void led_exit(void)
led_classdev_unregister(&tpacpi_leds[i].led_classdev);
}
+ flush_workqueue(tpacpi_wq);
kfree(tpacpi_leds);
}
@@ -8663,6 +8664,13 @@ static int __must_check __init get_thinkpad_model_data(
tp->model_str = kstrdup(s, GFP_KERNEL);
if (!tp->model_str)
return -ENOMEM;
+ } else {
+ s = dmi_get_system_info(DMI_BIOS_VENDOR);
+ if (s && !(strnicmp(s, "Lenovo", 6))) {
+ tp->model_str = kstrdup(s, GFP_KERNEL);
+ if (!tp->model_str)
+ return -ENOMEM;
+ }
}
s = dmi_get_system_info(DMI_PRODUCT_NAME);
@@ -8936,6 +8944,7 @@ static void thinkpad_acpi_module_exit(void)
input_unregister_device(tpacpi_inputdev);
else
input_free_device(tpacpi_inputdev);
+ kfree(hotkey_keycode_map);
}
if (tpacpi_hwmon)
@@ -8969,6 +8978,7 @@ static void thinkpad_acpi_module_exit(void)
kfree(thinkpad_id.bios_version_str);
kfree(thinkpad_id.ec_version_str);
kfree(thinkpad_id.model_str);
+ kfree(thinkpad_id.nummodel_str);
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index c13ba5bac93f..5f1256d5e933 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1296,6 +1296,7 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
}
}
+#ifdef CONFIG_PM_SLEEP
static int toshiba_acpi_suspend(struct device *device)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
@@ -1317,6 +1318,7 @@ static int toshiba_acpi_resume(struct device *device)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm,
toshiba_acpi_suspend, toshiba_acpi_resume);
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 715a43cb5e3c..5e5d6317d690 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -41,7 +41,9 @@ static const struct acpi_device_id bt_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, bt_device_ids);
+#ifdef CONFIG_PM_SLEEP
static int toshiba_bt_resume(struct device *dev);
+#endif
static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume);
static struct acpi_driver toshiba_bt_rfkill_driver = {
@@ -90,10 +92,12 @@ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
toshiba_bluetooth_enable(device->handle);
}
+#ifdef CONFIG_PM_SLEEP
static int toshiba_bt_resume(struct device *dev)
{
return toshiba_bluetooth_enable(to_acpi_device(dev)->handle);
}
+#endif
static int toshiba_bt_rfkill_add(struct acpi_device *device)
{
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
index b57ad8641480..1da13ed34b04 100644
--- a/drivers/platform/x86/xo1-rfkill.c
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -12,8 +12,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/rfkill.h>
-
-#include <asm/olpc.h>
+#include <linux/olpc-ec.h>
static bool card_blocked;
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index 849c07c13bf6..38ba39d7ca7d 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -77,10 +77,12 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event)
}
}
+#ifdef CONFIG_PM_SLEEP
static int ebook_switch_resume(struct device *dev)
{
return ebook_send_state(to_acpi_device(dev));
}
+#endif
static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume);
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index aa764ecc4e60..c1892f321c46 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -268,6 +268,7 @@ config CHARGER_GPIO
config CHARGER_MANAGER
bool "Battery charger manager for multiple chargers"
depends on REGULATOR && RTC_CLASS
+ select EXTCON
help
Say Y to enable charger-manager support, which allows multiple
chargers attached to a battery and multiple batteries attached to a
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index f5d6d379f2fb..181ddece5181 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -22,6 +22,7 @@
* Datasheets:
* http://focus.ti.com/docs/prod/folders/print/bq27000.html
* http://focus.ti.com/docs/prod/folders/print/bq27500.html
+ * http://www.ti.com/product/bq27425-g1
*/
#include <linux/module.h>
@@ -51,6 +52,7 @@
#define BQ27x00_REG_LMD 0x12 /* Last measured discharge */
#define BQ27x00_REG_CYCT 0x2A /* Cycle count total */
#define BQ27x00_REG_AE 0x22 /* Available energy */
+#define BQ27x00_POWER_AVG 0x24
#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */
#define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */
@@ -66,15 +68,21 @@
#define BQ27500_FLAG_SOCF BIT(1) /* State-of-Charge threshold final */
#define BQ27500_FLAG_SOC1 BIT(2) /* State-of-Charge threshold 1 */
#define BQ27500_FLAG_FC BIT(9)
+#define BQ27500_FLAG_OTC BIT(15)
+
+/* bq27425 register addresses are same as bq27x00 addresses minus 4 */
+#define BQ27425_REG_OFFSET 0x04
+#define BQ27425_REG_SOC 0x18 /* Register address plus offset */
#define BQ27000_RS 20 /* Resistor sense */
+#define BQ27x00_POWER_CONSTANT (256 * 29200 / 1000)
struct bq27x00_device_info;
struct bq27x00_access_methods {
int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
};
-enum bq27x00_chip { BQ27000, BQ27500 };
+enum bq27x00_chip { BQ27000, BQ27500, BQ27425};
struct bq27x00_reg_cache {
int temperature;
@@ -86,6 +94,8 @@ struct bq27x00_reg_cache {
int capacity;
int energy;
int flags;
+ int power_avg;
+ int health;
};
struct bq27x00_device_info {
@@ -123,6 +133,22 @@ static enum power_supply_property bq27x00_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+static enum power_supply_property bq27425_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
};
static unsigned int poll_interval = 360;
@@ -137,10 +163,24 @@ MODULE_PARM_DESC(poll_interval, "battery poll interval in seconds - " \
static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg,
bool single)
{
+ if (di->chip == BQ27425)
+ return di->bus.read(di, reg - BQ27425_REG_OFFSET, single);
return di->bus.read(di, reg, single);
}
/*
+ * Higher versions of the chip like BQ27425 and BQ27500
+ * differ from BQ27000 and BQ27200 in calculation of certain
+ * parameters. Hence we need to check for the chip type.
+ */
+static bool bq27xxx_is_chip_version_higher(struct bq27x00_device_info *di)
+{
+ if (di->chip == BQ27425 || di->chip == BQ27500)
+ return true;
+ return false;
+}
+
+/*
* Return the battery Relative State-of-Charge
* Or < 0 if something fails.
*/
@@ -150,6 +190,8 @@ static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di)
if (di->chip == BQ27500)
rsoc = bq27x00_read(di, BQ27500_REG_SOC, false);
+ else if (di->chip == BQ27425)
+ rsoc = bq27x00_read(di, BQ27425_REG_SOC, false);
else
rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true);
@@ -174,7 +216,7 @@ static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg)
return charge;
}
- if (di->chip == BQ27500)
+ if (bq27xxx_is_chip_version_higher(di))
charge *= 1000;
else
charge = charge * 3570 / BQ27000_RS;
@@ -208,7 +250,7 @@ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
{
int ilmd;
- if (di->chip == BQ27500)
+ if (bq27xxx_is_chip_version_higher(di))
ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false);
else
ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true);
@@ -218,7 +260,7 @@ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
return ilmd;
}
- if (di->chip == BQ27500)
+ if (bq27xxx_is_chip_version_higher(di))
ilmd *= 1000;
else
ilmd = ilmd * 256 * 3570 / BQ27000_RS;
@@ -262,7 +304,7 @@ static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di)
return temp;
}
- if (di->chip == BQ27500)
+ if (bq27xxx_is_chip_version_higher(di))
temp -= 2731;
else
temp = ((temp * 5) - 5463) / 2;
@@ -306,14 +348,70 @@ static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg)
return tval * 60;
}
+/*
+ * Read a power avg register.
+ * Return < 0 if something fails.
+ */
+static int bq27x00_battery_read_pwr_avg(struct bq27x00_device_info *di, u8 reg)
+{
+ int tval;
+
+ tval = bq27x00_read(di, reg, false);
+ if (tval < 0) {
+ dev_err(di->dev, "error reading power avg rgister %02x: %d\n",
+ reg, tval);
+ return tval;
+ }
+
+ if (di->chip == BQ27500)
+ return tval;
+ else
+ return (tval * BQ27x00_POWER_CONSTANT) / BQ27000_RS;
+}
+
+/*
+ * Read flag register.
+ * Return < 0 if something fails.
+ */
+static int bq27x00_battery_read_health(struct bq27x00_device_info *di)
+{
+ int tval;
+
+ tval = bq27x00_read(di, BQ27x00_REG_FLAGS, false);
+ if (tval < 0) {
+ dev_err(di->dev, "error reading flag register:%d\n", tval);
+ return tval;
+ }
+
+ if ((di->chip == BQ27500)) {
+ if (tval & BQ27500_FLAG_SOCF)
+ tval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (tval & BQ27500_FLAG_OTC)
+ tval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else
+ tval = POWER_SUPPLY_HEALTH_GOOD;
+ return tval;
+ } else {
+ if (tval & BQ27000_FLAG_EDV1)
+ tval = POWER_SUPPLY_HEALTH_DEAD;
+ else
+ tval = POWER_SUPPLY_HEALTH_GOOD;
+ return tval;
+ }
+
+ return -1;
+}
+
static void bq27x00_update(struct bq27x00_device_info *di)
{
struct bq27x00_reg_cache cache = {0, };
bool is_bq27500 = di->chip == BQ27500;
+ bool is_bq27425 = di->chip == BQ27425;
cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, !is_bq27500);
if (cache.flags >= 0) {
- if (!is_bq27500 && (cache.flags & BQ27000_FLAG_CI)) {
+ if (!is_bq27500 && !is_bq27425
+ && (cache.flags & BQ27000_FLAG_CI)) {
dev_info(di->dev, "battery is not calibrated! ignoring capacity values\n");
cache.capacity = -ENODATA;
cache.energy = -ENODATA;
@@ -321,16 +419,30 @@ static void bq27x00_update(struct bq27x00_device_info *di)
cache.time_to_empty_avg = -ENODATA;
cache.time_to_full = -ENODATA;
cache.charge_full = -ENODATA;
+ cache.health = -ENODATA;
} else {
cache.capacity = bq27x00_battery_read_rsoc(di);
- cache.energy = bq27x00_battery_read_energy(di);
- cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE);
- cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
- cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
+ if (!is_bq27425) {
+ cache.energy = bq27x00_battery_read_energy(di);
+ cache.time_to_empty =
+ bq27x00_battery_read_time(di,
+ BQ27x00_REG_TTE);
+ cache.time_to_empty_avg =
+ bq27x00_battery_read_time(di,
+ BQ27x00_REG_TTECP);
+ cache.time_to_full =
+ bq27x00_battery_read_time(di,
+ BQ27x00_REG_TTF);
+ }
cache.charge_full = bq27x00_battery_read_lmd(di);
+ cache.health = bq27x00_battery_read_health(di);
}
cache.temperature = bq27x00_battery_read_temperature(di);
+ if (!is_bq27425)
+ cache.cycle_count = bq27x00_battery_read_cyct(di);
cache.cycle_count = bq27x00_battery_read_cyct(di);
+ cache.power_avg =
+ bq27x00_battery_read_pwr_avg(di, BQ27x00_POWER_AVG);
/* We only have to read charge design full once */
if (di->charge_design_full <= 0)
@@ -376,7 +488,7 @@ static int bq27x00_battery_current(struct bq27x00_device_info *di,
return curr;
}
- if (di->chip == BQ27500) {
+ if (bq27xxx_is_chip_version_higher(di)) {
/* bq27500 returns signed value */
val->intval = (int)((s16)curr) * 1000;
} else {
@@ -397,7 +509,7 @@ static int bq27x00_battery_status(struct bq27x00_device_info *di,
{
int status;
- if (di->chip == BQ27500) {
+ if (bq27xxx_is_chip_version_higher(di)) {
if (di->cache.flags & BQ27500_FLAG_FC)
status = POWER_SUPPLY_STATUS_FULL;
else if (di->cache.flags & BQ27500_FLAG_DSC)
@@ -425,7 +537,7 @@ static int bq27x00_battery_capacity_level(struct bq27x00_device_info *di,
{
int level;
- if (di->chip == BQ27500) {
+ if (bq27xxx_is_chip_version_higher(di)) {
if (di->cache.flags & BQ27500_FLAG_FC)
level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
else if (di->cache.flags & BQ27500_FLAG_SOC1)
@@ -550,6 +662,12 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_ENERGY_NOW:
ret = bq27x00_simple_value(di->cache.energy, val);
break;
+ case POWER_SUPPLY_PROP_POWER_AVG:
+ ret = bq27x00_simple_value(di->cache.power_avg, val);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = bq27x00_simple_value(di->cache.health, val);
+ break;
default:
return -EINVAL;
}
@@ -570,8 +688,14 @@ static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
int ret;
di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
- di->bat.properties = bq27x00_battery_props;
- di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props);
+ di->chip = BQ27425;
+ if (di->chip == BQ27425) {
+ di->bat.properties = bq27425_battery_props;
+ di->bat.num_properties = ARRAY_SIZE(bq27425_battery_props);
+ } else {
+ di->bat.properties = bq27x00_battery_props;
+ di->bat.num_properties = ARRAY_SIZE(bq27x00_battery_props);
+ }
di->bat.get_property = bq27x00_battery_get_property;
di->bat.external_power_changed = bq27x00_external_power_changed;
@@ -729,6 +853,7 @@ static int bq27x00_battery_remove(struct i2c_client *client)
static const struct i2c_device_id bq27x00_id[] = {
{ "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */
{ "bq27500", BQ27500 },
+ { "bq27425", BQ27425 },
{},
};
MODULE_DEVICE_TABLE(i2c, bq27x00_id);
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 86935ec18954..526e5c931294 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -271,16 +271,13 @@ static int try_charger_enable(struct charger_manager *cm, bool enable)
if (enable) {
if (cm->emergency_stop)
return -EAGAIN;
- err = regulator_bulk_enable(desc->num_charger_regulators,
- desc->charger_regulators);
+ for (i = 0 ; i < desc->num_charger_regulators ; i++)
+ regulator_enable(desc->charger_regulators[i].consumer);
} else {
/*
* Abnormal battery state - Stop charging forcibly,
* even if charger was enabled at the other places
*/
- err = regulator_bulk_disable(desc->num_charger_regulators,
- desc->charger_regulators);
-
for (i = 0; i < desc->num_charger_regulators; i++) {
if (regulator_is_enabled(
desc->charger_regulators[i].consumer)) {
@@ -288,7 +285,7 @@ static int try_charger_enable(struct charger_manager *cm, bool enable)
desc->charger_regulators[i].consumer);
dev_warn(cm->dev,
"Disable regulator(%s) forcibly.\n",
- desc->charger_regulators[i].supply);
+ desc->charger_regulators[i].regulator_name);
}
}
}
@@ -994,11 +991,92 @@ int setup_charger_manager(struct charger_global_desc *gd)
}
EXPORT_SYMBOL_GPL(setup_charger_manager);
+/**
+ * charger_extcon_work - enable/diable charger according to the state
+ * of charger cable
+ *
+ * @work: work_struct of the function charger_extcon_work.
+ */
+static void charger_extcon_work(struct work_struct *work)
+{
+ struct charger_cable *cable =
+ container_of(work, struct charger_cable, wq);
+ int ret;
+
+ if (cable->attached && cable->min_uA != 0 && cable->max_uA != 0) {
+ ret = regulator_set_current_limit(cable->charger->consumer,
+ cable->min_uA, cable->max_uA);
+ if (ret < 0) {
+ pr_err("Cannot set current limit of %s (%s)\n",
+ cable->charger->regulator_name, cable->name);
+ return;
+ }
+
+ pr_info("Set current limit of %s : %duA ~ %duA\n",
+ cable->charger->regulator_name,
+ cable->min_uA, cable->max_uA);
+ }
+
+ try_charger_enable(cable->cm, cable->attached);
+}
+
+/**
+ * charger_extcon_notifier - receive the state of charger cable
+ * when registered cable is attached or detached.
+ *
+ * @self: the notifier block of the charger_extcon_notifier.
+ * @event: the cable state.
+ * @ptr: the data pointer of notifier block.
+ */
+static int charger_extcon_notifier(struct notifier_block *self,
+ unsigned long event, void *ptr)
+{
+ struct charger_cable *cable =
+ container_of(self, struct charger_cable, nb);
+
+ cable->attached = event;
+ schedule_work(&cable->wq);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * charger_extcon_init - register external connector to use it
+ * as the charger cable
+ *
+ * @cm: the Charger Manager representing the battery.
+ * @cable: the Charger cable representing the external connector.
+ */
+static int charger_extcon_init(struct charger_manager *cm,
+ struct charger_cable *cable)
+{
+ int ret = 0;
+
+ /*
+ * Charger manager use Extcon framework to identify
+ * the charger cable among various external connector
+ * cable (e.g., TA, USB, MHL, Dock).
+ */
+ INIT_WORK(&cable->wq, charger_extcon_work);
+ cable->nb.notifier_call = charger_extcon_notifier;
+ ret = extcon_register_interest(&cable->extcon_dev,
+ cable->extcon_name, cable->name, &cable->nb);
+ if (ret < 0) {
+ pr_info("Cannot register extcon_dev for %s(cable: %s).\n",
+ cable->extcon_name,
+ cable->name);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static int charger_manager_probe(struct platform_device *pdev)
{
struct charger_desc *desc = dev_get_platdata(&pdev->dev);
struct charger_manager *cm;
int ret = 0, i = 0;
+ int j = 0;
union power_supply_propval val;
if (g_desc && !rtc_dev && g_desc->rtc_name) {
@@ -1167,11 +1245,31 @@ static int charger_manager_probe(struct platform_device *pdev)
goto err_register;
}
- ret = regulator_bulk_get(&pdev->dev, desc->num_charger_regulators,
- desc->charger_regulators);
- if (ret) {
- dev_err(&pdev->dev, "Cannot get charger regulators.\n");
- goto err_bulk_get;
+ for (i = 0 ; i < desc->num_charger_regulators ; i++) {
+ struct charger_regulator *charger
+ = &desc->charger_regulators[i];
+
+ charger->consumer = regulator_get(&pdev->dev,
+ charger->regulator_name);
+ if (charger->consumer == NULL) {
+ dev_err(&pdev->dev, "Cannot find charger(%s)n",
+ charger->regulator_name);
+ ret = -EINVAL;
+ goto err_chg_get;
+ }
+
+ for (j = 0 ; j < charger->num_cables ; j++) {
+ struct charger_cable *cable = &charger->cables[j];
+
+ ret = charger_extcon_init(cm, cable);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Cannot find charger(%s)n",
+ charger->regulator_name);
+ goto err_extcon;
+ }
+ cable->charger = charger;
+ cable->cm = cm;
+ }
}
ret = try_charger_enable(cm, true);
@@ -1197,9 +1295,19 @@ static int charger_manager_probe(struct platform_device *pdev)
return 0;
err_chg_enable:
- regulator_bulk_free(desc->num_charger_regulators,
- desc->charger_regulators);
-err_bulk_get:
+err_extcon:
+ for (i = 0 ; i < desc->num_charger_regulators ; i++) {
+ struct charger_regulator *charger
+ = &desc->charger_regulators[i];
+ for (j = 0 ; j < charger->num_cables ; j++) {
+ struct charger_cable *cable = &charger->cables[j];
+ extcon_unregister_interest(&cable->extcon_dev);
+ }
+ }
+err_chg_get:
+ for (i = 0 ; i < desc->num_charger_regulators ; i++)
+ regulator_put(desc->charger_regulators[i].consumer);
+
power_supply_unregister(&cm->charger_psy);
err_register:
kfree(cm->charger_psy.properties);
@@ -1218,6 +1326,8 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
{
struct charger_manager *cm = platform_get_drvdata(pdev);
struct charger_desc *desc = cm->desc;
+ int i = 0;
+ int j = 0;
/* Remove from the list */
mutex_lock(&cm_list_mtx);
@@ -1229,8 +1339,18 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
if (delayed_work_pending(&cm_monitor_work))
cancel_delayed_work_sync(&cm_monitor_work);
- regulator_bulk_free(desc->num_charger_regulators,
- desc->charger_regulators);
+ for (i = 0 ; i < desc->num_charger_regulators ; i++) {
+ struct charger_regulator *charger
+ = &desc->charger_regulators[i];
+ for (j = 0 ; j < charger->num_cables ; j++) {
+ struct charger_cable *cable = &charger->cables[j];
+ extcon_unregister_interest(&cable->extcon_dev);
+ }
+ }
+
+ for (i = 0 ; i < desc->num_charger_regulators ; i++)
+ regulator_put(desc->charger_regulators[i].consumer);
+
power_supply_unregister(&cm->charger_psy);
try_charger_enable(cm, false);
diff --git a/drivers/power/ds2781_battery.c b/drivers/power/ds2781_battery.c
index 5f92a4bb33f9..7a1ff4e4cf9a 100644
--- a/drivers/power/ds2781_battery.c
+++ b/drivers/power/ds2781_battery.c
@@ -64,7 +64,7 @@ static inline int ds2781_battery_io(struct ds2781_device_info *dev_info,
return w1_ds2781_io(dev_info->w1_dev, buf, addr, count, io);
}
-int w1_ds2781_read(struct ds2781_device_info *dev_info, char *buf,
+static int w1_ds2781_read(struct ds2781_device_info *dev_info, char *buf,
int addr, size_t count)
{
return ds2781_battery_io(dev_info, buf, addr, count, 0);
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index 8672c9177dd7..cb2aa3195687 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -54,7 +54,7 @@ static int gpio_charger_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = gpio_get_value(pdata->gpio);
+ val->intval = gpio_get_value_cansleep(pdata->gpio);
val->intval ^= pdata->gpio_active_low;
break;
default:
diff --git a/drivers/power/lp8727_charger.c b/drivers/power/lp8727_charger.c
index d8b75780bfef..6a364f4798f7 100644
--- a/drivers/power/lp8727_charger.c
+++ b/drivers/power/lp8727_charger.c
@@ -15,7 +15,7 @@
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/power_supply.h>
-#include <linux/lp8727.h>
+#include <linux/platform_data/lp8727.h>
#define DEBOUNCE_MSEC 270
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 140788b309f8..74abc6c755b4 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -113,6 +113,7 @@ static enum power_supply_property max17042_battery_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_OCV,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
@@ -201,6 +202,13 @@ static int max17042_get_property(struct power_supply *psy,
val->intval = ret * 1000 / 2;
break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ ret = max17042_read_reg(chip->client, MAX17042_QH);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret * 1000 / 2;
+ break;
case POWER_SUPPLY_PROP_TEMP:
ret = max17042_read_reg(chip->client, MAX17042_TEMP);
if (ret < 0)
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 7385092f9bc8..a89a41acf9c5 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -17,6 +17,7 @@
#include <linux/power_supply.h>
#include <linux/jiffies.h>
#include <linux/sched.h>
+#include <linux/olpc-ec.h>
#include <asm/olpc.h>
@@ -231,11 +232,9 @@ static int olpc_bat_get_charge_full_design(union power_supply_propval *val)
case POWER_SUPPLY_TECHNOLOGY_LiFe:
switch (mfr) {
- case 1: /* Gold Peak */
- val->intval = 2800000;
- break;
+ case 1: /* Gold Peak, fall through */
case 2: /* BYD */
- val->intval = 3100000;
+ val->intval = 2800000;
break;
default:
return -EIO;
@@ -267,6 +266,55 @@ static int olpc_bat_get_charge_now(union power_supply_propval *val)
return 0;
}
+static int olpc_bat_get_voltage_max_design(union power_supply_propval *val)
+{
+ uint8_t ec_byte;
+ union power_supply_propval tech;
+ int mfr;
+ int ret;
+
+ ret = olpc_bat_get_tech(&tech);
+ if (ret)
+ return ret;
+
+ ec_byte = BAT_ADDR_MFR_TYPE;
+ ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &ec_byte, 1);
+ if (ret)
+ return ret;
+
+ mfr = ec_byte >> 4;
+
+ switch (tech.intval) {
+ case POWER_SUPPLY_TECHNOLOGY_NiMH:
+ switch (mfr) {
+ case 1: /* Gold Peak */
+ val->intval = 6000000;
+ break;
+ default:
+ return -EIO;
+ }
+ break;
+
+ case POWER_SUPPLY_TECHNOLOGY_LiFe:
+ switch (mfr) {
+ case 1: /* Gold Peak */
+ val->intval = 6400000;
+ break;
+ case 2: /* BYD */
+ val->intval = 6500000;
+ break;
+ default:
+ return -EIO;
+ }
+ break;
+
+ default:
+ return -EIO;
+ }
+
+ return ret;
+}
+
/*********************************************************************
* Battery properties
*********************************************************************/
@@ -401,6 +449,11 @@ static int olpc_bat_get_property(struct power_supply *psy,
sprintf(bat_serial, "%016llx", (long long)be64_to_cpu(ser_buf));
val->strval = bat_serial;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ ret = olpc_bat_get_voltage_max_design(val);
+ if (ret)
+ return ret;
+ break;
default:
ret = -EINVAL;
break;
@@ -428,6 +481,7 @@ static enum power_supply_property olpc_xo1_bat_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
};
/* XO-1.5 does not have ambient temperature property */
@@ -449,6 +503,7 @@ static enum power_supply_property olpc_xo15_bat_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
};
/* EEPROM reading goes completely around the power_supply API, sadly */
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index 8dbcd53c5e67..7312f2651647 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -24,11 +24,7 @@
static inline unsigned int get_irq_flags(struct resource *res)
{
- unsigned int flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
-
- flags |= res->flags & IRQF_TRIGGER_MASK;
-
- return flags;
+ return IRQF_SHARED | (res->flags & IRQF_TRIGGER_MASK);
}
static struct device *dev;
@@ -134,13 +130,13 @@ static void update_charger(void)
regulator_set_current_limit(ac_draw, max_uA, max_uA);
if (!regulator_enabled) {
dev_dbg(dev, "charger on (AC)\n");
- regulator_enable(ac_draw);
+ WARN_ON(regulator_enable(ac_draw));
regulator_enabled = 1;
}
} else {
if (regulator_enabled) {
dev_dbg(dev, "charger off\n");
- regulator_disable(ac_draw);
+ WARN_ON(regulator_disable(ac_draw));
regulator_enabled = 0;
}
}
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 6ad612726785..08cc8a3c15af 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/power_supply.h>
+#include <linux/thermal.h>
#include "power_supply.h"
/* exported for the APM Power driver, APM emulation */
@@ -169,6 +170,63 @@ static void power_supply_dev_release(struct device *dev)
kfree(dev);
}
+#ifdef CONFIG_THERMAL
+static int power_supply_read_temp(struct thermal_zone_device *tzd,
+ unsigned long *temp)
+{
+ struct power_supply *psy;
+ union power_supply_propval val;
+ int ret;
+
+ WARN_ON(tzd == NULL);
+ psy = tzd->devdata;
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+
+ /* Convert tenths of degree Celsius to milli degree Celsius. */
+ if (!ret)
+ *temp = val.intval * 100;
+
+ return ret;
+}
+
+static struct thermal_zone_device_ops psy_tzd_ops = {
+ .get_temp = power_supply_read_temp,
+};
+
+static int psy_register_thermal(struct power_supply *psy)
+{
+ int i;
+
+ /* Register battery zone device psy reports temperature */
+ for (i = 0; i < psy->num_properties; i++) {
+ if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) {
+ psy->tzd = thermal_zone_device_register(psy->name, 0, 0,
+ psy, &psy_tzd_ops, 0, 0, 0, 0);
+ if (IS_ERR(psy->tzd))
+ return PTR_ERR(psy->tzd);
+ break;
+ }
+ }
+ return 0;
+}
+
+static void psy_unregister_thermal(struct power_supply *psy)
+{
+ if (IS_ERR_OR_NULL(psy->tzd))
+ return;
+ thermal_zone_device_unregister(psy->tzd);
+}
+#else
+static int psy_register_thermal(struct power_supply *psy)
+{
+ return 0;
+}
+
+static void psy_unregister_thermal(struct power_supply *psy)
+{
+}
+#endif
+
int power_supply_register(struct device *parent, struct power_supply *psy)
{
struct device *dev;
@@ -197,6 +255,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
if (rc)
goto device_add_failed;
+ rc = psy_register_thermal(psy);
+ if (rc)
+ goto register_thermal_failed;
+
rc = power_supply_create_triggers(psy);
if (rc)
goto create_triggers_failed;
@@ -206,6 +268,8 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
goto success;
create_triggers_failed:
+ psy_unregister_thermal(psy);
+register_thermal_failed:
device_del(dev);
kobject_set_name_failed:
device_add_failed:
@@ -220,6 +284,7 @@ void power_supply_unregister(struct power_supply *psy)
cancel_work_sync(&psy->changed_work);
sysfs_remove_link(&psy->dev->kobj, "powers");
power_supply_remove_triggers(psy);
+ psy_unregister_thermal(psy);
device_unregister(psy->dev);
}
EXPORT_SYMBOL_GPL(power_supply_unregister);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 4150747f9186..1d96614a17a4 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -159,6 +159,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(charge_now),
POWER_SUPPLY_ATTR(charge_avg),
POWER_SUPPLY_ATTR(charge_counter),
+ POWER_SUPPLY_ATTR(constant_charge_current),
+ POWER_SUPPLY_ATTR(constant_charge_voltage),
POWER_SUPPLY_ATTR(energy_full_design),
POWER_SUPPLY_ATTR(energy_empty_design),
POWER_SUPPLY_ATTR(energy_full),
@@ -166,9 +168,15 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(energy_now),
POWER_SUPPLY_ATTR(energy_avg),
POWER_SUPPLY_ATTR(capacity),
+ POWER_SUPPLY_ATTR(capacity_alert_min),
+ POWER_SUPPLY_ATTR(capacity_alert_max),
POWER_SUPPLY_ATTR(capacity_level),
POWER_SUPPLY_ATTR(temp),
+ POWER_SUPPLY_ATTR(temp_alert_min),
+ POWER_SUPPLY_ATTR(temp_alert_max),
POWER_SUPPLY_ATTR(temp_ambient),
+ POWER_SUPPLY_ATTR(temp_ambient_alert_min),
+ POWER_SUPPLY_ATTR(temp_ambient_alert_max),
POWER_SUPPLY_ATTR(time_to_empty_now),
POWER_SUPPLY_ATTR(time_to_empty_avg),
POWER_SUPPLY_ATTR(time_to_full_now),
diff --git a/drivers/power/sbs-battery.c b/drivers/power/sbs-battery.c
index a5b6849d4123..a65e8f54157e 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/sbs-battery.c
@@ -469,7 +469,7 @@ static int sbs_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
- break;
+ goto done; /* don't trigger power_supply_changed()! */
case POWER_SUPPLY_PROP_ENERGY_NOW:
case POWER_SUPPLY_PROP_ENERGY_FULL:
diff --git a/drivers/power/smb347-charger.c b/drivers/power/smb347-charger.c
index f8eedd8a676f..332dd0110bda 100644
--- a/drivers/power/smb347-charger.c
+++ b/drivers/power/smb347-charger.c
@@ -196,6 +196,14 @@ static const unsigned int ccc_tbl[] = {
1200000,
};
+/* Convert register value to current using lookup table */
+static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val)
+{
+ if (val >= size)
+ return -EINVAL;
+ return tbl[val];
+}
+
/* Convert current to register value using lookup table */
static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
{
@@ -841,22 +849,101 @@ fail:
return ret;
}
+/*
+ * Returns the constant charge current programmed
+ * into the charger in uA.
+ */
+static int get_const_charge_current(struct smb347_charger *smb)
+{
+ int ret, intval;
+ unsigned int v;
+
+ if (!smb347_is_ps_online(smb))
+ return -ENODATA;
+
+ ret = regmap_read(smb->regmap, STAT_B, &v);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The current value is composition of FCC and PCC values
+ * and we can detect which table to use from bit 5.
+ */
+ if (v & 0x20) {
+ intval = hw_to_current(fcc_tbl, ARRAY_SIZE(fcc_tbl), v & 7);
+ } else {
+ v >>= 3;
+ intval = hw_to_current(pcc_tbl, ARRAY_SIZE(pcc_tbl), v & 7);
+ }
+
+ return intval;
+}
+
+/*
+ * Returns the constant charge voltage programmed
+ * into the charger in uV.
+ */
+static int get_const_charge_voltage(struct smb347_charger *smb)
+{
+ int ret, intval;
+ unsigned int v;
+
+ if (!smb347_is_ps_online(smb))
+ return -ENODATA;
+
+ ret = regmap_read(smb->regmap, STAT_A, &v);
+ if (ret < 0)
+ return ret;
+
+ v &= STAT_A_FLOAT_VOLTAGE_MASK;
+ if (v > 0x3d)
+ v = 0x3d;
+
+ intval = 3500000 + v * 20000;
+
+ return intval;
+}
+
static int smb347_mains_get_property(struct power_supply *psy,
enum power_supply_property prop,
union power_supply_propval *val)
{
struct smb347_charger *smb =
container_of(psy, struct smb347_charger, mains);
+ int ret;
- if (prop == POWER_SUPPLY_PROP_ONLINE) {
+ switch (prop) {
+ case POWER_SUPPLY_PROP_ONLINE:
val->intval = smb->mains_online;
- return 0;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = get_const_charge_voltage(smb);
+ if (ret < 0)
+ return ret;
+ else
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = get_const_charge_current(smb);
+ if (ret < 0)
+ return ret;
+ else
+ val->intval = ret;
+ break;
+
+ default:
+ return -EINVAL;
}
- return -EINVAL;
+
+ return 0;
}
static enum power_supply_property smb347_mains_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
};
static int smb347_usb_get_property(struct power_supply *psy,
@@ -865,16 +952,40 @@ static int smb347_usb_get_property(struct power_supply *psy,
{
struct smb347_charger *smb =
container_of(psy, struct smb347_charger, usb);
+ int ret;
- if (prop == POWER_SUPPLY_PROP_ONLINE) {
+ switch (prop) {
+ case POWER_SUPPLY_PROP_ONLINE:
val->intval = smb->usb_online;
- return 0;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ ret = get_const_charge_voltage(smb);
+ if (ret < 0)
+ return ret;
+ else
+ val->intval = ret;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = get_const_charge_current(smb);
+ if (ret < 0)
+ return ret;
+ else
+ val->intval = ret;
+ break;
+
+ default:
+ return -EINVAL;
}
- return -EINVAL;
+
+ return 0;
}
static enum power_supply_property smb347_usb_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
};
static int smb347_battery_get_property(struct power_supply *psy,
diff --git a/drivers/power/test_power.c b/drivers/power/test_power.c
index b527c93bf2f3..b99a452a4fda 100644
--- a/drivers/power/test_power.c
+++ b/drivers/power/test_power.c
@@ -22,11 +22,13 @@
#include <linux/vermagic.h>
static int ac_online = 1;
+static int usb_online = 1;
static int battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
static int battery_health = POWER_SUPPLY_HEALTH_GOOD;
static int battery_present = 1; /* true */
static int battery_technology = POWER_SUPPLY_TECHNOLOGY_LION;
static int battery_capacity = 50;
+static int battery_voltage = 3300;
static int test_power_get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
@@ -42,6 +44,20 @@ static int test_power_get_ac_property(struct power_supply *psy,
return 0;
}
+static int test_power_get_usb_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = usb_online;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int test_power_get_battery_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -86,6 +102,12 @@ static int test_power_get_battery_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
val->intval = 3600;
break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = 26;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = battery_voltage;
+ break;
default:
pr_info("%s: some properties deliberately report errors.\n",
__func__);
@@ -114,6 +136,8 @@ static enum power_supply_property test_power_battery_props[] = {
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
};
static char *test_power_ac_supplied_to[] = {
@@ -135,6 +159,14 @@ static struct power_supply test_power_supplies[] = {
.properties = test_power_battery_props,
.num_properties = ARRAY_SIZE(test_power_battery_props),
.get_property = test_power_get_battery_property,
+ }, {
+ .name = "test_usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .supplied_to = test_power_ac_supplied_to,
+ .num_supplicants = ARRAY_SIZE(test_power_ac_supplied_to),
+ .properties = test_power_ac_props,
+ .num_properties = ARRAY_SIZE(test_power_ac_props),
+ .get_property = test_power_get_usb_property,
},
};
@@ -167,6 +199,7 @@ static void __exit test_power_exit(void)
/* Let's see how we handle changes... */
ac_online = 0;
+ usb_online = 0;
battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++)
power_supply_changed(&test_power_supplies[i]);
@@ -275,6 +308,19 @@ static int param_get_ac_online(char *buffer, const struct kernel_param *kp)
return strlen(buffer);
}
+static int param_set_usb_online(const char *key, const struct kernel_param *kp)
+{
+ usb_online = map_get_value(map_ac_online, key, usb_online);
+ power_supply_changed(&test_power_supplies[2]);
+ return 0;
+}
+
+static int param_get_usb_online(char *buffer, const struct kernel_param *kp)
+{
+ strcpy(buffer, map_get_key(map_ac_online, usb_online, "unknown"));
+ return strlen(buffer);
+}
+
static int param_set_battery_status(const char *key,
const struct kernel_param *kp)
{
@@ -350,13 +396,31 @@ static int param_set_battery_capacity(const char *key,
#define param_get_battery_capacity param_get_int
+static int param_set_battery_voltage(const char *key,
+ const struct kernel_param *kp)
+{
+ int tmp;
+
+ if (1 != sscanf(key, "%d", &tmp))
+ return -EINVAL;
+
+ battery_voltage = tmp;
+ power_supply_changed(&test_power_supplies[1]);
+ return 0;
+}
+#define param_get_battery_voltage param_get_int
static struct kernel_param_ops param_ops_ac_online = {
.set = param_set_ac_online,
.get = param_get_ac_online,
};
+static struct kernel_param_ops param_ops_usb_online = {
+ .set = param_set_usb_online,
+ .get = param_get_usb_online,
+};
+
static struct kernel_param_ops param_ops_battery_status = {
.set = param_set_battery_status,
.get = param_get_battery_status,
@@ -382,18 +446,27 @@ static struct kernel_param_ops param_ops_battery_capacity = {
.get = param_get_battery_capacity,
};
+static struct kernel_param_ops param_ops_battery_voltage = {
+ .set = param_set_battery_voltage,
+ .get = param_get_battery_voltage,
+};
#define param_check_ac_online(name, p) __param_check(name, p, void);
+#define param_check_usb_online(name, p) __param_check(name, p, void);
#define param_check_battery_status(name, p) __param_check(name, p, void);
#define param_check_battery_present(name, p) __param_check(name, p, void);
#define param_check_battery_technology(name, p) __param_check(name, p, void);
#define param_check_battery_health(name, p) __param_check(name, p, void);
#define param_check_battery_capacity(name, p) __param_check(name, p, void);
+#define param_check_battery_voltage(name, p) __param_check(name, p, void);
module_param(ac_online, ac_online, 0644);
MODULE_PARM_DESC(ac_online, "AC charging state <on|off>");
+module_param(usb_online, usb_online, 0644);
+MODULE_PARM_DESC(usb_online, "USB charging state <on|off>");
+
module_param(battery_status, battery_status, 0644);
MODULE_PARM_DESC(battery_status,
"battery status <charging|discharging|not-charging|full>");
@@ -413,6 +486,8 @@ MODULE_PARM_DESC(battery_health,
module_param(battery_capacity, battery_capacity, 0644);
MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)");
+module_param(battery_voltage, battery_voltage, 0644);
+MODULE_PARM_DESC(battery_voltage, "battery voltage (millivolts)");
MODULE_DESCRIPTION("Power supply driver for testing");
MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index 7cacbaa68efe..15f4d5d8611b 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -22,6 +22,7 @@
#include <linux/power_supply.h>
#include <linux/notifier.h>
#include <linux/usb/otg.h>
+#include <linux/regulator/machine.h>
#define TWL4030_BCIMSTATEC 0x02
#define TWL4030_BCIICHG 0x08
@@ -29,6 +30,7 @@
#define TWL4030_BCIVBUS 0x0c
#define TWL4030_BCIMFSTS4 0x10
#define TWL4030_BCICTL1 0x23
+#define TWL4030_BB_CFG 0x12
#define TWL4030_BCIAUTOWEN BIT(5)
#define TWL4030_CONFIG_DONE BIT(4)
@@ -38,6 +40,17 @@
#define TWL4030_USBFASTMCHG BIT(2)
#define TWL4030_STS_VBUS BIT(7)
#define TWL4030_STS_USB_ID BIT(2)
+#define TWL4030_BBCHEN BIT(4)
+#define TWL4030_BBSEL_MASK 0b1100
+#define TWL4030_BBSEL_2V5 0b0000
+#define TWL4030_BBSEL_3V0 0b0100
+#define TWL4030_BBSEL_3V1 0b1000
+#define TWL4030_BBSEL_3V2 0b1100
+#define TWL4030_BBISEL_MASK 0b11
+#define TWL4030_BBISEL_25uA 0b00
+#define TWL4030_BBISEL_150uA 0b01
+#define TWL4030_BBISEL_500uA 0b10
+#define TWL4030_BBISEL_1000uA 0b11
/* BCI interrupts */
#define TWL4030_WOVF BIT(0) /* Watchdog overflow */
@@ -75,6 +88,8 @@ struct twl4030_bci {
struct work_struct work;
int irq_chg;
int irq_bci;
+ struct regulator *usb_reg;
+ int usb_enabled;
unsigned long event;
};
@@ -104,7 +119,7 @@ static int twl4030_bci_read(u8 reg, u8 *val)
static int twl4030_clear_set_boot_bci(u8 clear, u8 set)
{
- return twl4030_clear_set(TWL4030_MODULE_PM_MASTER, 0,
+ return twl4030_clear_set(TWL4030_MODULE_PM_MASTER, clear,
TWL4030_CONFIG_DONE | TWL4030_BCIAUTOWEN | set,
TWL4030_PM_MASTER_BOOT_BCI);
}
@@ -152,14 +167,14 @@ static int twl4030_bci_have_vbus(struct twl4030_bci *bci)
}
/*
- * Enable/Disable USB Charge funtionality.
+ * Enable/Disable USB Charge functionality.
*/
static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
{
int ret;
if (enable) {
- /* Check for USB charger conneted */
+ /* Check for USB charger connected */
if (!twl4030_bci_have_vbus(bci))
return -ENODEV;
@@ -172,6 +187,12 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
return -EACCES;
}
+ /* Need to keep regulator on */
+ if (!bci->usb_enabled) {
+ regulator_enable(bci->usb_reg);
+ bci->usb_enabled = 1;
+ }
+
/* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */
ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB);
if (ret < 0)
@@ -182,6 +203,10 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4);
} else {
ret = twl4030_clear_set_boot_bci(TWL4030_BCIAUTOUSB, 0);
+ if (bci->usb_enabled) {
+ regulator_disable(bci->usb_reg);
+ bci->usb_enabled = 0;
+ }
}
return ret;
@@ -203,6 +228,49 @@ static int twl4030_charger_enable_ac(bool enable)
}
/*
+ * Enable/Disable charging of Backup Battery.
+ */
+static int twl4030_charger_enable_backup(int uvolt, int uamp)
+{
+ int ret;
+ u8 flags;
+
+ if (uvolt < 2500000 ||
+ uamp < 25) {
+ /* disable charging of backup battery */
+ ret = twl4030_clear_set(TWL4030_MODULE_PM_RECEIVER,
+ TWL4030_BBCHEN, 0, TWL4030_BB_CFG);
+ return ret;
+ }
+
+ flags = TWL4030_BBCHEN;
+ if (uvolt >= 3200000)
+ flags |= TWL4030_BBSEL_3V2;
+ else if (uvolt >= 3100000)
+ flags |= TWL4030_BBSEL_3V1;
+ else if (uvolt >= 3000000)
+ flags |= TWL4030_BBSEL_3V0;
+ else
+ flags |= TWL4030_BBSEL_2V5;
+
+ if (uamp >= 1000)
+ flags |= TWL4030_BBISEL_1000uA;
+ else if (uamp >= 500)
+ flags |= TWL4030_BBISEL_500uA;
+ else if (uamp >= 150)
+ flags |= TWL4030_BBISEL_150uA;
+ else
+ flags |= TWL4030_BBISEL_25uA;
+
+ ret = twl4030_clear_set(TWL4030_MODULE_PM_RECEIVER,
+ TWL4030_BBSEL_MASK | TWL4030_BBISEL_MASK,
+ flags,
+ TWL4030_BB_CFG);
+
+ return ret;
+}
+
+/*
* TWL4030 CHG_PRES (AC charger presence) events
*/
static irqreturn_t twl4030_charger_interrupt(int irq, void *arg)
@@ -425,6 +493,7 @@ static enum power_supply_property twl4030_charger_props[] = {
static int __init twl4030_bci_probe(struct platform_device *pdev)
{
struct twl4030_bci *bci;
+ struct twl4030_bci_platform_data *pdata = pdev->dev.platform_data;
int ret;
u32 reg;
@@ -456,6 +525,8 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
bci->usb.num_properties = ARRAY_SIZE(twl4030_charger_props);
bci->usb.get_property = twl4030_bci_get_property;
+ bci->usb_reg = regulator_get(bci->dev, "bci3v1");
+
ret = power_supply_register(&pdev->dev, &bci->usb);
if (ret) {
dev_err(&pdev->dev, "failed to register usb: %d\n", ret);
@@ -504,6 +575,8 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
twl4030_charger_enable_ac(true);
twl4030_charger_enable_usb(bci, true);
+ twl4030_charger_enable_backup(pdata->bb_uvolt,
+ pdata->bb_uamp);
return 0;
@@ -532,6 +605,7 @@ static int __exit twl4030_bci_remove(struct platform_device *pdev)
twl4030_charger_enable_ac(false);
twl4030_charger_enable_usb(bci, false);
+ twl4030_charger_enable_backup(0, 0);
/* mask interrupts */
twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, 0xff,
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 98fbe62694d4..e771487132f7 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -327,8 +327,10 @@ int pps_register_cdev(struct pps_device *pps)
}
pps->dev = device_create(pps_class, pps->info.dev, devt, pps,
"pps%d", pps->id);
- if (IS_ERR(pps->dev))
+ if (IS_ERR(pps->dev)) {
+ err = PTR_ERR(pps->dev);
goto del_cdev;
+ }
pps->dev->release = pps_device_destruct;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
new file mode 100644
index 000000000000..90c5c7357a50
--- /dev/null
+++ b/drivers/pwm/Kconfig
@@ -0,0 +1,127 @@
+menuconfig PWM
+ bool "Pulse-Width Modulation (PWM) Support"
+ depends on !MACH_JZ4740 && !PUV3_PWM
+ help
+ Generic Pulse-Width Modulation (PWM) support.
+
+ In Pulse-Width Modulation, a variation of the width of pulses
+ in a rectangular pulse signal is used as a means to alter the
+ average power of the signal. Applications include efficient
+ power delivery and voltage regulation. In computer systems,
+ PWMs are commonly used to control fans or the brightness of
+ display backlights.
+
+ This framework provides a generic interface to PWM devices
+ within the Linux kernel. On the driver side it provides an API
+ to register and unregister a PWM chip, an abstraction of a PWM
+ controller, that supports one or more PWM devices. Client
+ drivers can request PWM devices and use the generic framework
+ to configure as well as enable and disable them.
+
+ This generic framework replaces the legacy PWM framework which
+ allows only a single driver implementing the required API. Not
+ all legacy implementations have been ported to the framework
+ yet. The framework provides an API that is backward compatible
+ with the legacy framework so that existing client drivers
+ continue to work as expected.
+
+ If unsure, say no.
+
+if PWM
+
+config PWM_BFIN
+ tristate "Blackfin PWM support"
+ depends on BFIN_GPTIMERS
+ help
+ Generic PWM framework driver for Blackfin.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-bfin.
+
+config PWM_IMX
+ tristate "i.MX pwm support"
+ depends on ARCH_MXC
+ help
+ Generic PWM framework driver for i.MX.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-imx.
+
+config PWM_LPC32XX
+ tristate "LPC32XX PWM support"
+ depends on ARCH_LPC32XX
+ help
+ Generic PWM framework driver for LPC32XX. The LPC32XX SOC has two
+ PWM controllers.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-lpc32xx.
+
+config PWM_MXS
+ tristate "Freescale MXS PWM support"
+ depends on ARCH_MXS && OF
+ select STMP_DEVICE
+ help
+ Generic PWM framework driver for Freescale MXS.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-mxs.
+
+config PWM_PXA
+ tristate "PXA PWM support"
+ depends on ARCH_PXA
+ help
+ Generic PWM framework driver for PXA.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-pxa.
+
+config PWM_SAMSUNG
+ tristate "Samsung pwm support"
+ depends on PLAT_SAMSUNG
+ help
+ Generic PWM framework driver for Samsung.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-samsung.
+
+config PWM_TEGRA
+ tristate "NVIDIA Tegra PWM support"
+ depends on ARCH_TEGRA
+ help
+ Generic PWM framework driver for the PWFM controller found on NVIDIA
+ Tegra SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-tegra.
+
+config PWM_TIECAP
+ tristate "ECAP PWM support"
+ depends on SOC_AM33XX
+ help
+ PWM driver support for the ECAP APWM controller found on AM33XX
+ TI SOC
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-tiecap.
+
+config PWM_TIEHRPWM
+ tristate "EHRPWM PWM support"
+ depends on SOC_AM33XX
+ help
+ PWM driver support for the EHRPWM controller found on AM33XX
+ TI SOC
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-tiehrpwm.
+
+config PWM_VT8500
+ tristate "vt8500 pwm support"
+ depends on ARCH_VT8500
+ help
+ Generic PWM framework driver for vt8500.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-vt8500.
+
+endif
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
new file mode 100644
index 000000000000..e4b2c898964d
--- /dev/null
+++ b/drivers/pwm/Makefile
@@ -0,0 +1,11 @@
+obj-$(CONFIG_PWM) += core.o
+obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o
+obj-$(CONFIG_PWM_IMX) += pwm-imx.o
+obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
+obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
+obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
+obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
+obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
+obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
+obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
+obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
new file mode 100644
index 000000000000..c6e05078d3ad
--- /dev/null
+++ b/drivers/pwm/core.c
@@ -0,0 +1,713 @@
+/*
+ * Generic pwmlib implementation
+ *
+ * Copyright (C) 2011 Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2011-2012 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/pwm.h>
+#include <linux/radix-tree.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define MAX_PWMS 1024
+
+static DEFINE_MUTEX(pwm_lookup_lock);
+static LIST_HEAD(pwm_lookup_list);
+static DEFINE_MUTEX(pwm_lock);
+static LIST_HEAD(pwm_chips);
+static DECLARE_BITMAP(allocated_pwms, MAX_PWMS);
+static RADIX_TREE(pwm_tree, GFP_KERNEL);
+
+static struct pwm_device *pwm_to_device(unsigned int pwm)
+{
+ return radix_tree_lookup(&pwm_tree, pwm);
+}
+
+static int alloc_pwms(int pwm, unsigned int count)
+{
+ unsigned int from = 0;
+ unsigned int start;
+
+ if (pwm >= MAX_PWMS)
+ return -EINVAL;
+
+ if (pwm >= 0)
+ from = pwm;
+
+ start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, from,
+ count, 0);
+
+ if (pwm >= 0 && start != pwm)
+ return -EEXIST;
+
+ if (start + count > MAX_PWMS)
+ return -ENOSPC;
+
+ return start;
+}
+
+static void free_pwms(struct pwm_chip *chip)
+{
+ unsigned int i;
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+ radix_tree_delete(&pwm_tree, pwm->pwm);
+ }
+
+ bitmap_clear(allocated_pwms, chip->base, chip->npwm);
+
+ kfree(chip->pwms);
+ chip->pwms = NULL;
+}
+
+static struct pwm_chip *pwmchip_find_by_name(const char *name)
+{
+ struct pwm_chip *chip;
+
+ if (!name)
+ return NULL;
+
+ mutex_lock(&pwm_lock);
+
+ list_for_each_entry(chip, &pwm_chips, list) {
+ const char *chip_name = dev_name(chip->dev);
+
+ if (chip_name && strcmp(chip_name, name) == 0) {
+ mutex_unlock(&pwm_lock);
+ return chip;
+ }
+ }
+
+ mutex_unlock(&pwm_lock);
+
+ return NULL;
+}
+
+static int pwm_device_request(struct pwm_device *pwm, const char *label)
+{
+ int err;
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags))
+ return -EBUSY;
+
+ if (!try_module_get(pwm->chip->ops->owner))
+ return -ENODEV;
+
+ if (pwm->chip->ops->request) {
+ err = pwm->chip->ops->request(pwm->chip, pwm);
+ if (err) {
+ module_put(pwm->chip->ops->owner);
+ return err;
+ }
+ }
+
+ set_bit(PWMF_REQUESTED, &pwm->flags);
+ pwm->label = label;
+
+ return 0;
+}
+
+static struct pwm_device *
+of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
+{
+ struct pwm_device *pwm;
+
+ if (pc->of_pwm_n_cells < 2)
+ return ERR_PTR(-EINVAL);
+
+ if (args->args[0] >= pc->npwm)
+ return ERR_PTR(-EINVAL);
+
+ pwm = pwm_request_from_chip(pc, args->args[0], NULL);
+ if (IS_ERR(pwm))
+ return pwm;
+
+ pwm_set_period(pwm, args->args[1]);
+
+ return pwm;
+}
+
+static void of_pwmchip_add(struct pwm_chip *chip)
+{
+ if (!chip->dev || !chip->dev->of_node)
+ return;
+
+ if (!chip->of_xlate) {
+ chip->of_xlate = of_pwm_simple_xlate;
+ chip->of_pwm_n_cells = 2;
+ }
+
+ of_node_get(chip->dev->of_node);
+}
+
+static void of_pwmchip_remove(struct pwm_chip *chip)
+{
+ if (chip->dev && chip->dev->of_node)
+ of_node_put(chip->dev->of_node);
+}
+
+/**
+ * pwm_set_chip_data() - set private chip data for a PWM
+ * @pwm: PWM device
+ * @data: pointer to chip-specific data
+ */
+int pwm_set_chip_data(struct pwm_device *pwm, void *data)
+{
+ if (!pwm)
+ return -EINVAL;
+
+ pwm->chip_data = data;
+
+ return 0;
+}
+
+/**
+ * pwm_get_chip_data() - get private chip data for a PWM
+ * @pwm: PWM device
+ */
+void *pwm_get_chip_data(struct pwm_device *pwm)
+{
+ return pwm ? pwm->chip_data : NULL;
+}
+
+/**
+ * pwmchip_add() - register a new PWM chip
+ * @chip: the PWM chip to add
+ *
+ * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
+ * will be used.
+ */
+int pwmchip_add(struct pwm_chip *chip)
+{
+ struct pwm_device *pwm;
+ unsigned int i;
+ int ret;
+
+ if (!chip || !chip->dev || !chip->ops || !chip->ops->config ||
+ !chip->ops->enable || !chip->ops->disable)
+ return -EINVAL;
+
+ mutex_lock(&pwm_lock);
+
+ ret = alloc_pwms(chip->base, chip->npwm);
+ if (ret < 0)
+ goto out;
+
+ chip->pwms = kzalloc(chip->npwm * sizeof(*pwm), GFP_KERNEL);
+ if (!chip->pwms) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ chip->base = ret;
+
+ for (i = 0; i < chip->npwm; i++) {
+ pwm = &chip->pwms[i];
+
+ pwm->chip = chip;
+ pwm->pwm = chip->base + i;
+ pwm->hwpwm = i;
+
+ radix_tree_insert(&pwm_tree, pwm->pwm, pwm);
+ }
+
+ bitmap_set(allocated_pwms, chip->base, chip->npwm);
+
+ INIT_LIST_HEAD(&chip->list);
+ list_add(&chip->list, &pwm_chips);
+
+ ret = 0;
+
+ if (IS_ENABLED(CONFIG_OF))
+ of_pwmchip_add(chip);
+
+out:
+ mutex_unlock(&pwm_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pwmchip_add);
+
+/**
+ * pwmchip_remove() - remove a PWM chip
+ * @chip: the PWM chip to remove
+ *
+ * Removes a PWM chip. This function may return busy if the PWM chip provides
+ * a PWM device that is still requested.
+ */
+int pwmchip_remove(struct pwm_chip *chip)
+{
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&pwm_lock);
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+
+ list_del_init(&chip->list);
+
+ if (IS_ENABLED(CONFIG_OF))
+ of_pwmchip_remove(chip);
+
+ free_pwms(chip);
+
+out:
+ mutex_unlock(&pwm_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pwmchip_remove);
+
+/**
+ * pwm_request() - request a PWM device
+ * @pwm_id: global PWM device index
+ * @label: PWM device label
+ *
+ * This function is deprecated, use pwm_get() instead.
+ */
+struct pwm_device *pwm_request(int pwm, const char *label)
+{
+ struct pwm_device *dev;
+ int err;
+
+ if (pwm < 0 || pwm >= MAX_PWMS)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&pwm_lock);
+
+ dev = pwm_to_device(pwm);
+ if (!dev) {
+ dev = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ err = pwm_device_request(dev, label);
+ if (err < 0)
+ dev = ERR_PTR(err);
+
+out:
+ mutex_unlock(&pwm_lock);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(pwm_request);
+
+/**
+ * pwm_request_from_chip() - request a PWM device relative to a PWM chip
+ * @chip: PWM chip
+ * @index: per-chip index of the PWM to request
+ * @label: a literal description string of this PWM
+ *
+ * Returns the PWM at the given index of the given PWM chip. A negative error
+ * code is returned if the index is not valid for the specified PWM chip or
+ * if the PWM device cannot be requested.
+ */
+struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ unsigned int index,
+ const char *label)
+{
+ struct pwm_device *pwm;
+ int err;
+
+ if (!chip || index >= chip->npwm)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&pwm_lock);
+ pwm = &chip->pwms[index];
+
+ err = pwm_device_request(pwm, label);
+ if (err < 0)
+ pwm = ERR_PTR(err);
+
+ mutex_unlock(&pwm_lock);
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(pwm_request_from_chip);
+
+/**
+ * pwm_free() - free a PWM device
+ * @pwm: PWM device
+ *
+ * This function is deprecated, use pwm_put() instead.
+ */
+void pwm_free(struct pwm_device *pwm)
+{
+ pwm_put(pwm);
+}
+EXPORT_SYMBOL_GPL(pwm_free);
+
+/**
+ * pwm_config() - change a PWM device configuration
+ * @pwm: PWM device
+ * @duty_ns: "on" time (in nanoseconds)
+ * @period_ns: duration (in nanoseconds) of one cycle
+ */
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ if (!pwm || period_ns == 0 || duty_ns > period_ns)
+ return -EINVAL;
+
+ return pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns);
+}
+EXPORT_SYMBOL_GPL(pwm_config);
+
+/**
+ * pwm_enable() - start a PWM output toggling
+ * @pwm: PWM device
+ */
+int pwm_enable(struct pwm_device *pwm)
+{
+ if (pwm && !test_and_set_bit(PWMF_ENABLED, &pwm->flags))
+ return pwm->chip->ops->enable(pwm->chip, pwm);
+
+ return pwm ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(pwm_enable);
+
+/**
+ * pwm_disable() - stop a PWM output toggling
+ * @pwm: PWM device
+ */
+void pwm_disable(struct pwm_device *pwm)
+{
+ if (pwm && test_and_clear_bit(PWMF_ENABLED, &pwm->flags))
+ pwm->chip->ops->disable(pwm->chip, pwm);
+}
+EXPORT_SYMBOL_GPL(pwm_disable);
+
+static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
+{
+ struct pwm_chip *chip;
+
+ mutex_lock(&pwm_lock);
+
+ list_for_each_entry(chip, &pwm_chips, list)
+ if (chip->dev && chip->dev->of_node == np) {
+ mutex_unlock(&pwm_lock);
+ return chip;
+ }
+
+ mutex_unlock(&pwm_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+/**
+ * of_pwm_request() - request a PWM via the PWM framework
+ * @np: device node to get the PWM from
+ * @con_id: consumer name
+ *
+ * Returns the PWM device parsed from the phandle and index specified in the
+ * "pwms" property of a device tree node or a negative error-code on failure.
+ * Values parsed from the device tree are stored in the returned PWM device
+ * object.
+ *
+ * If con_id is NULL, the first PWM device listed in the "pwms" property will
+ * be requested. Otherwise the "pwm-names" property is used to do a reverse
+ * lookup of the PWM index. This also means that the "pwm-names" property
+ * becomes mandatory for devices that look up the PWM device via the con_id
+ * parameter.
+ */
+static struct pwm_device *of_pwm_request(struct device_node *np,
+ const char *con_id)
+{
+ struct pwm_device *pwm = NULL;
+ struct of_phandle_args args;
+ struct pwm_chip *pc;
+ int index = 0;
+ int err;
+
+ if (con_id) {
+ index = of_property_match_string(np, "pwm-names", con_id);
+ if (index < 0)
+ return ERR_PTR(index);
+ }
+
+ err = of_parse_phandle_with_args(np, "pwms", "#pwm-cells", index,
+ &args);
+ if (err) {
+ pr_debug("%s(): can't parse \"pwms\" property\n", __func__);
+ return ERR_PTR(err);
+ }
+
+ pc = of_node_to_pwmchip(args.np);
+ if (IS_ERR(pc)) {
+ pr_debug("%s(): PWM chip not found\n", __func__);
+ pwm = ERR_CAST(pc);
+ goto put;
+ }
+
+ if (args.args_count != pc->of_pwm_n_cells) {
+ pr_debug("%s: wrong #pwm-cells for %s\n", np->full_name,
+ args.np->full_name);
+ pwm = ERR_PTR(-EINVAL);
+ goto put;
+ }
+
+ pwm = pc->of_xlate(pc, &args);
+ if (IS_ERR(pwm))
+ goto put;
+
+ /*
+ * If a consumer name was not given, try to look it up from the
+ * "pwm-names" property if it exists. Otherwise use the name of
+ * the user device node.
+ */
+ if (!con_id) {
+ err = of_property_read_string_index(np, "pwm-names", index,
+ &con_id);
+ if (err < 0)
+ con_id = np->name;
+ }
+
+ pwm->label = con_id;
+
+put:
+ of_node_put(args.np);
+
+ return pwm;
+}
+
+/**
+ * pwm_add_table() - register PWM device consumers
+ * @table: array of consumers to register
+ * @num: number of consumers in table
+ */
+void __init pwm_add_table(struct pwm_lookup *table, size_t num)
+{
+ mutex_lock(&pwm_lookup_lock);
+
+ while (num--) {
+ list_add_tail(&table->list, &pwm_lookup_list);
+ table++;
+ }
+
+ mutex_unlock(&pwm_lookup_lock);
+}
+
+/**
+ * pwm_get() - look up and request a PWM device
+ * @dev: device for PWM consumer
+ * @con_id: consumer name
+ *
+ * Lookup is first attempted using DT. If the device was not instantiated from
+ * a device tree, a PWM chip and a relative index is looked up via a table
+ * supplied by board setup code (see pwm_add_table()).
+ *
+ * Once a PWM chip has been found the specified PWM device will be requested
+ * and is ready to be used.
+ */
+struct pwm_device *pwm_get(struct device *dev, const char *con_id)
+{
+ struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER);
+ const char *dev_id = dev ? dev_name(dev) : NULL;
+ struct pwm_chip *chip = NULL;
+ unsigned int index = 0;
+ unsigned int best = 0;
+ struct pwm_lookup *p;
+ unsigned int match;
+
+ /* look up via DT first */
+ if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
+ return of_pwm_request(dev->of_node, con_id);
+
+ /*
+ * We look up the provider in the static table typically provided by
+ * board setup code. We first try to lookup the consumer device by
+ * name. If the consumer device was passed in as NULL or if no match
+ * was found, we try to find the consumer by directly looking it up
+ * by name.
+ *
+ * If a match is found, the provider PWM chip is looked up by name
+ * and a PWM device is requested using the PWM device per-chip index.
+ *
+ * The lookup algorithm was shamelessly taken from the clock
+ * framework:
+ *
+ * We do slightly fuzzy matching here:
+ * An entry with a NULL ID is assumed to be a wildcard.
+ * If an entry has a device ID, it must match
+ * If an entry has a connection ID, it must match
+ * Then we take the most specific entry - with the following order
+ * of precedence: dev+con > dev only > con only.
+ */
+ mutex_lock(&pwm_lookup_lock);
+
+ list_for_each_entry(p, &pwm_lookup_list, list) {
+ match = 0;
+
+ if (p->dev_id) {
+ if (!dev_id || strcmp(p->dev_id, dev_id))
+ continue;
+
+ match += 2;
+ }
+
+ if (p->con_id) {
+ if (!con_id || strcmp(p->con_id, con_id))
+ continue;
+
+ match += 1;
+ }
+
+ if (match > best) {
+ chip = pwmchip_find_by_name(p->provider);
+ index = p->index;
+
+ if (match != 3)
+ best = match;
+ else
+ break;
+ }
+ }
+
+ if (chip)
+ pwm = pwm_request_from_chip(chip, index, con_id ?: dev_id);
+
+ mutex_unlock(&pwm_lookup_lock);
+
+ return pwm;
+}
+EXPORT_SYMBOL_GPL(pwm_get);
+
+/**
+ * pwm_put() - release a PWM device
+ * @pwm: PWM device
+ */
+void pwm_put(struct pwm_device *pwm)
+{
+ if (!pwm)
+ return;
+
+ mutex_lock(&pwm_lock);
+
+ if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) {
+ pr_warn("PWM device already freed\n");
+ goto out;
+ }
+
+ if (pwm->chip->ops->free)
+ pwm->chip->ops->free(pwm->chip, pwm);
+
+ pwm->label = NULL;
+
+ module_put(pwm->chip->ops->owner);
+out:
+ mutex_unlock(&pwm_lock);
+}
+EXPORT_SYMBOL_GPL(pwm_put);
+
+#ifdef CONFIG_DEBUG_FS
+static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
+{
+ unsigned int i;
+
+ for (i = 0; i < chip->npwm; i++) {
+ struct pwm_device *pwm = &chip->pwms[i];
+
+ seq_printf(s, " pwm-%-3d (%-20.20s):", i, pwm->label);
+
+ if (test_bit(PWMF_REQUESTED, &pwm->flags))
+ seq_printf(s, " requested");
+
+ if (test_bit(PWMF_ENABLED, &pwm->flags))
+ seq_printf(s, " enabled");
+
+ seq_printf(s, "\n");
+ }
+}
+
+static void *pwm_seq_start(struct seq_file *s, loff_t *pos)
+{
+ mutex_lock(&pwm_lock);
+ s->private = "";
+
+ return seq_list_start(&pwm_chips, *pos);
+}
+
+static void *pwm_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ s->private = "\n";
+
+ return seq_list_next(v, &pwm_chips, pos);
+}
+
+static void pwm_seq_stop(struct seq_file *s, void *v)
+{
+ mutex_unlock(&pwm_lock);
+}
+
+static int pwm_seq_show(struct seq_file *s, void *v)
+{
+ struct pwm_chip *chip = list_entry(v, struct pwm_chip, list);
+
+ seq_printf(s, "%s%s/%s, %d PWM device%s\n", (char *)s->private,
+ chip->dev->bus ? chip->dev->bus->name : "no-bus",
+ dev_name(chip->dev), chip->npwm,
+ (chip->npwm != 1) ? "s" : "");
+
+ if (chip->ops->dbg_show)
+ chip->ops->dbg_show(chip, s);
+ else
+ pwm_dbg_show(chip, s);
+
+ return 0;
+}
+
+static const struct seq_operations pwm_seq_ops = {
+ .start = pwm_seq_start,
+ .next = pwm_seq_next,
+ .stop = pwm_seq_stop,
+ .show = pwm_seq_show,
+};
+
+static int pwm_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &pwm_seq_ops);
+}
+
+static const struct file_operations pwm_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = pwm_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init pwm_debugfs_init(void)
+{
+ debugfs_create_file("pwm", S_IFREG | S_IRUGO, NULL, NULL,
+ &pwm_debugfs_ops);
+
+ return 0;
+}
+
+subsys_initcall(pwm_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/pwm/pwm-bfin.c b/drivers/pwm/pwm-bfin.c
new file mode 100644
index 000000000000..d53c4e7941ef
--- /dev/null
+++ b/drivers/pwm/pwm-bfin.c
@@ -0,0 +1,162 @@
+/*
+ * Blackfin Pulse Width Modulation (PWM) core
+ *
+ * Copyright (c) 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+#include <asm/gptimers.h>
+#include <asm/portmux.h>
+
+struct bfin_pwm_chip {
+ struct pwm_chip chip;
+};
+
+struct bfin_pwm {
+ unsigned short pin;
+};
+
+static const unsigned short pwm_to_gptimer_per[] = {
+ P_TMR0, P_TMR1, P_TMR2, P_TMR3, P_TMR4, P_TMR5,
+ P_TMR6, P_TMR7, P_TMR8, P_TMR9, P_TMR10, P_TMR11,
+};
+
+static int bfin_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct bfin_pwm *priv;
+ int ret;
+
+ if (pwm->hwpwm >= ARRAY_SIZE(pwm_to_gptimer_per))
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pin = pwm_to_gptimer_per[pwm->hwpwm];
+
+ ret = peripheral_request(priv->pin, NULL);
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
+
+ pwm_set_chip_data(pwm, priv);
+
+ return 0;
+}
+
+static void bfin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct bfin_pwm *priv = pwm_get_chip_data(pwm);
+
+ if (priv) {
+ peripheral_free(priv->pin);
+ kfree(priv);
+ }
+}
+
+static int bfin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct bfin_pwm *priv = pwm_get_chip_data(pwm);
+ unsigned long period, duty;
+ unsigned long long val;
+
+ if (duty_ns < 0 || duty_ns > period_ns)
+ return -EINVAL;
+
+ val = (unsigned long long)get_sclk() * period_ns;
+ do_div(val, NSEC_PER_SEC);
+ period = val;
+
+ val = (unsigned long long)period * duty_ns;
+ do_div(val, period_ns);
+ duty = period - val;
+
+ if (duty >= period)
+ duty = period - 1;
+
+ set_gptimer_config(priv->pin, TIMER_MODE_PWM | TIMER_PERIOD_CNT);
+ set_gptimer_pwidth(priv->pin, duty);
+ set_gptimer_period(priv->pin, period);
+
+ return 0;
+}
+
+static int bfin_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct bfin_pwm *priv = pwm_get_chip_data(pwm);
+
+ enable_gptimer(priv->pin);
+
+ return 0;
+}
+
+static void bfin_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct bfin_pwm *priv = pwm_get_chip_data(pwm);
+
+ disable_gptimer(priv->pin);
+}
+
+static struct pwm_ops bfin_pwm_ops = {
+ .request = bfin_pwm_request,
+ .free = bfin_pwm_free,
+ .config = bfin_pwm_config,
+ .enable = bfin_pwm_enable,
+ .disable = bfin_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int bfin_pwm_probe(struct platform_device *pdev)
+{
+ struct bfin_pwm_chip *pwm;
+ int ret;
+
+ pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+ if (!pwm) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, pwm);
+
+ pwm->chip.dev = &pdev->dev;
+ pwm->chip.ops = &bfin_pwm_ops;
+ pwm->chip.base = -1;
+ pwm->chip.npwm = 12;
+
+ ret = pwmchip_add(&pwm->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devexit bfin_pwm_remove(struct platform_device *pdev)
+{
+ struct bfin_pwm_chip *pwm = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&pwm->chip);
+}
+
+static struct platform_driver bfin_pwm_driver = {
+ .driver = {
+ .name = "bfin-pwm",
+ },
+ .probe = bfin_pwm_probe,
+ .remove = __devexit_p(bfin_pwm_remove),
+};
+
+module_platform_driver(bfin_pwm_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/plat-mxc/pwm.c b/drivers/pwm/pwm-imx.c
index c0cab2270dd1..2a0b35333972 100644
--- a/arch/arm/plat-mxc/pwm.c
+++ b/drivers/pwm/pwm-imx.c
@@ -39,33 +39,28 @@
#define MX3_PWMCR_CLKSRC_IPG (1 << 16)
#define MX3_PWMCR_EN (1 << 0)
-
-
-struct pwm_device {
- struct list_head node;
- struct platform_device *pdev;
-
- const char *label;
+struct imx_chip {
struct clk *clk;
int clk_enabled;
void __iomem *mmio_base;
- unsigned int use_count;
- unsigned int pwm_id;
+ struct pwm_chip chip;
};
-int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+#define to_imx_chip(chip) container_of(chip, struct imx_chip, chip)
+
+static int imx_pwm_config(struct pwm_chip *chip,
+ struct pwm_device *pwm, int duty_ns, int period_ns)
{
- if (pwm == NULL || period_ns == 0 || duty_ns > period_ns)
- return -EINVAL;
+ struct imx_chip *imx = to_imx_chip(chip);
if (!(cpu_is_mx1() || cpu_is_mx21())) {
unsigned long long c;
unsigned long period_cycles, duty_cycles, prescale;
u32 cr;
- c = clk_get_rate(pwm->clk);
+ c = clk_get_rate(imx->clk);
c = c * period_ns;
do_div(c, 1000000000);
period_cycles = c;
@@ -86,8 +81,8 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
else
period_cycles = 0;
- writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
- writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
+ writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
+ writel(period_cycles, imx->mmio_base + MX3_PWMPR);
cr = MX3_PWMCR_PRESCALER(prescale) |
MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
@@ -98,7 +93,7 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
else
cr |= MX3_PWMCR_CLKSRC_IPG_HIGH;
- writel(cr, pwm->mmio_base + MX3_PWMCR);
+ writel(cr, imx->mmio_base + MX3_PWMCR);
} else if (cpu_is_mx1() || cpu_is_mx21()) {
/* The PWM subsystem allows for exact frequencies. However,
* I cannot connect a scope on my device to the PWM line and
@@ -116,191 +111,120 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
* both the prescaler (/1 .. /128) and then by CLKSEL
* (/2 .. /16).
*/
- u32 max = readl(pwm->mmio_base + MX1_PWMP);
+ u32 max = readl(imx->mmio_base + MX1_PWMP);
u32 p = max * duty_ns / period_ns;
- writel(max - p, pwm->mmio_base + MX1_PWMS);
+ writel(max - p, imx->mmio_base + MX1_PWMS);
} else {
BUG();
}
return 0;
}
-EXPORT_SYMBOL(pwm_config);
-int pwm_enable(struct pwm_device *pwm)
+static int imx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct imx_chip *imx = to_imx_chip(chip);
int rc = 0;
- if (!pwm->clk_enabled) {
- rc = clk_prepare_enable(pwm->clk);
+ if (!imx->clk_enabled) {
+ rc = clk_prepare_enable(imx->clk);
if (!rc)
- pwm->clk_enabled = 1;
+ imx->clk_enabled = 1;
}
return rc;
}
-EXPORT_SYMBOL(pwm_enable);
-void pwm_disable(struct pwm_device *pwm)
+static void imx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- writel(0, pwm->mmio_base + MX3_PWMCR);
+ struct imx_chip *imx = to_imx_chip(chip);
- if (pwm->clk_enabled) {
- clk_disable_unprepare(pwm->clk);
- pwm->clk_enabled = 0;
- }
-}
-EXPORT_SYMBOL(pwm_disable);
+ writel(0, imx->mmio_base + MX3_PWMCR);
-static DEFINE_MUTEX(pwm_lock);
-static LIST_HEAD(pwm_list);
-
-struct pwm_device *pwm_request(int pwm_id, const char *label)
-{
- struct pwm_device *pwm;
- int found = 0;
-
- mutex_lock(&pwm_lock);
-
- list_for_each_entry(pwm, &pwm_list, node) {
- if (pwm->pwm_id == pwm_id) {
- found = 1;
- break;
- }
+ if (imx->clk_enabled) {
+ clk_disable_unprepare(imx->clk);
+ imx->clk_enabled = 0;
}
-
- if (found) {
- if (pwm->use_count == 0) {
- pwm->use_count++;
- pwm->label = label;
- } else
- pwm = ERR_PTR(-EBUSY);
- } else
- pwm = ERR_PTR(-ENOENT);
-
- mutex_unlock(&pwm_lock);
- return pwm;
}
-EXPORT_SYMBOL(pwm_request);
-void pwm_free(struct pwm_device *pwm)
-{
- mutex_lock(&pwm_lock);
-
- if (pwm->use_count) {
- pwm->use_count--;
- pwm->label = NULL;
- } else
- pr_warning("PWM device already freed\n");
-
- mutex_unlock(&pwm_lock);
-}
-EXPORT_SYMBOL(pwm_free);
+static struct pwm_ops imx_pwm_ops = {
+ .enable = imx_pwm_enable,
+ .disable = imx_pwm_disable,
+ .config = imx_pwm_config,
+ .owner = THIS_MODULE,
+};
-static int __devinit mxc_pwm_probe(struct platform_device *pdev)
+static int __devinit imx_pwm_probe(struct platform_device *pdev)
{
- struct pwm_device *pwm;
+ struct imx_chip *imx;
struct resource *r;
int ret = 0;
- pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL);
- if (pwm == NULL) {
+ imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
+ if (imx == NULL) {
dev_err(&pdev->dev, "failed to allocate memory\n");
return -ENOMEM;
}
- pwm->clk = clk_get(&pdev->dev, "pwm");
+ imx->clk = devm_clk_get(&pdev->dev, "pwm");
- if (IS_ERR(pwm->clk)) {
- ret = PTR_ERR(pwm->clk);
- goto err_free;
- }
+ if (IS_ERR(imx->clk))
+ return PTR_ERR(imx->clk);
- pwm->clk_enabled = 0;
+ imx->chip.ops = &imx_pwm_ops;
+ imx->chip.dev = &pdev->dev;
+ imx->chip.base = -1;
+ imx->chip.npwm = 1;
- pwm->use_count = 0;
- pwm->pwm_id = pdev->id;
- pwm->pdev = pdev;
+ imx->clk_enabled = 0;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
dev_err(&pdev->dev, "no memory resource defined\n");
- ret = -ENODEV;
- goto err_free_clk;
- }
-
- r = request_mem_region(r->start, resource_size(r), pdev->name);
- if (r == NULL) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- ret = -EBUSY;
- goto err_free_clk;
+ return -ENODEV;
}
- pwm->mmio_base = ioremap(r->start, resource_size(r));
- if (pwm->mmio_base == NULL) {
- dev_err(&pdev->dev, "failed to ioremap() registers\n");
- ret = -ENODEV;
- goto err_free_mem;
- }
+ imx->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
+ if (imx->mmio_base == NULL)
+ return -EADDRNOTAVAIL;
- mutex_lock(&pwm_lock);
- list_add_tail(&pwm->node, &pwm_list);
- mutex_unlock(&pwm_lock);
+ ret = pwmchip_add(&imx->chip);
+ if (ret < 0)
+ return ret;
- platform_set_drvdata(pdev, pwm);
+ platform_set_drvdata(pdev, imx);
return 0;
-
-err_free_mem:
- release_mem_region(r->start, resource_size(r));
-err_free_clk:
- clk_put(pwm->clk);
-err_free:
- kfree(pwm);
- return ret;
}
-static int __devexit mxc_pwm_remove(struct platform_device *pdev)
+static int __devexit imx_pwm_remove(struct platform_device *pdev)
{
- struct pwm_device *pwm;
- struct resource *r;
+ struct imx_chip *imx;
- pwm = platform_get_drvdata(pdev);
- if (pwm == NULL)
+ imx = platform_get_drvdata(pdev);
+ if (imx == NULL)
return -ENODEV;
- mutex_lock(&pwm_lock);
- list_del(&pwm->node);
- mutex_unlock(&pwm_lock);
-
- iounmap(pwm->mmio_base);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, resource_size(r));
-
- clk_put(pwm->clk);
-
- kfree(pwm);
- return 0;
+ return pwmchip_remove(&imx->chip);
}
-static struct platform_driver mxc_pwm_driver = {
+static struct platform_driver imx_pwm_driver = {
.driver = {
.name = "mxc_pwm",
},
- .probe = mxc_pwm_probe,
- .remove = __devexit_p(mxc_pwm_remove),
+ .probe = imx_pwm_probe,
+ .remove = __devexit_p(imx_pwm_remove),
};
-static int __init mxc_pwm_init(void)
+static int __init imx_pwm_init(void)
{
- return platform_driver_register(&mxc_pwm_driver);
+ return platform_driver_register(&imx_pwm_driver);
}
-arch_initcall(mxc_pwm_init);
+arch_initcall(imx_pwm_init);
-static void __exit mxc_pwm_exit(void)
+static void __exit imx_pwm_exit(void)
{
- platform_driver_unregister(&mxc_pwm_driver);
+ platform_driver_unregister(&imx_pwm_driver);
}
-module_exit(mxc_pwm_exit);
+module_exit(imx_pwm_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
new file mode 100644
index 000000000000..adb87f0c1633
--- /dev/null
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2012 Alexandre Pereira da Silva <aletes.xgr@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+
+struct lpc32xx_pwm_chip {
+ struct pwm_chip chip;
+ struct clk *clk;
+ void __iomem *base;
+};
+
+#define PWM_ENABLE (1 << 31)
+#define PWM_RELOADV(x) (((x) & 0xFF) << 8)
+#define PWM_DUTY(x) ((x) & 0xFF)
+
+#define to_lpc32xx_pwm_chip(_chip) \
+ container_of(_chip, struct lpc32xx_pwm_chip, chip)
+
+static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
+ unsigned long long c;
+ int period_cycles, duty_cycles;
+
+ c = clk_get_rate(lpc32xx->clk) / 256;
+ c = c * period_ns;
+ do_div(c, NSEC_PER_SEC);
+
+ /* Handle high and low extremes */
+ if (c == 0)
+ c = 1;
+ if (c > 255)
+ c = 0; /* 0 set division by 256 */
+ period_cycles = c;
+
+ c = 256 * duty_ns;
+ do_div(c, period_ns);
+ duty_cycles = c;
+
+ writel(PWM_ENABLE | PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles),
+ lpc32xx->base + (pwm->hwpwm << 2));
+
+ return 0;
+}
+
+static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
+
+ return clk_enable(lpc32xx->clk);
+}
+
+static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
+
+ writel(0, lpc32xx->base + (pwm->hwpwm << 2));
+ clk_disable(lpc32xx->clk);
+}
+
+static const struct pwm_ops lpc32xx_pwm_ops = {
+ .config = lpc32xx_pwm_config,
+ .enable = lpc32xx_pwm_enable,
+ .disable = lpc32xx_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int lpc32xx_pwm_probe(struct platform_device *pdev)
+{
+ struct lpc32xx_pwm_chip *lpc32xx;
+ struct resource *res;
+ int ret;
+
+ lpc32xx = devm_kzalloc(&pdev->dev, sizeof(*lpc32xx), GFP_KERNEL);
+ if (!lpc32xx)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ lpc32xx->base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!lpc32xx->base)
+ return -EADDRNOTAVAIL;
+
+ lpc32xx->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(lpc32xx->clk))
+ return PTR_ERR(lpc32xx->clk);
+
+ lpc32xx->chip.dev = &pdev->dev;
+ lpc32xx->chip.ops = &lpc32xx_pwm_ops;
+ lpc32xx->chip.npwm = 2;
+
+ ret = pwmchip_add(&lpc32xx->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, lpc32xx);
+
+ return 0;
+}
+
+static int __devexit lpc32xx_pwm_remove(struct platform_device *pdev)
+{
+ struct lpc32xx_pwm_chip *lpc32xx = platform_get_drvdata(pdev);
+
+ clk_disable(lpc32xx->clk);
+ return pwmchip_remove(&lpc32xx->chip);
+}
+
+static struct of_device_id lpc32xx_pwm_dt_ids[] = {
+ { .compatible = "nxp,lpc3220-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_pwm_dt_ids);
+
+static struct platform_driver lpc32xx_pwm_driver = {
+ .driver = {
+ .name = "lpc32xx-pwm",
+ .of_match_table = of_match_ptr(lpc32xx_pwm_dt_ids),
+ },
+ .probe = lpc32xx_pwm_probe,
+ .remove = __devexit_p(lpc32xx_pwm_remove),
+};
+module_platform_driver(lpc32xx_pwm_driver);
+
+MODULE_ALIAS("platform:lpc32xx-pwm");
+MODULE_AUTHOR("Alexandre Pereira da Silva <aletes.xgr@gmail.com>");
+MODULE_DESCRIPTION("LPC32XX PWM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
new file mode 100644
index 000000000000..e5852646f082
--- /dev/null
+++ b/drivers/pwm/pwm-mxs.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
+#include <linux/stmp_device.h>
+
+#define SET 0x4
+#define CLR 0x8
+#define TOG 0xc
+
+#define PWM_CTRL 0x0
+#define PWM_ACTIVE0 0x10
+#define PWM_PERIOD0 0x20
+#define PERIOD_PERIOD(p) ((p) & 0xffff)
+#define PERIOD_PERIOD_MAX 0x10000
+#define PERIOD_ACTIVE_HIGH (3 << 16)
+#define PERIOD_INACTIVE_LOW (2 << 18)
+#define PERIOD_CDIV(div) (((div) & 0x7) << 20)
+#define PERIOD_CDIV_MAX 8
+
+struct mxs_pwm_chip {
+ struct pwm_chip chip;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *base;
+};
+
+#define to_mxs_pwm_chip(_chip) container_of(_chip, struct mxs_pwm_chip, chip)
+
+static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip);
+ int ret, div = 0;
+ unsigned int period_cycles, duty_cycles;
+ unsigned long rate;
+ unsigned long long c;
+
+ rate = clk_get_rate(mxs->clk);
+ while (1) {
+ c = rate / (1 << div);
+ c = c * period_ns;
+ do_div(c, 1000000000);
+ if (c < PERIOD_PERIOD_MAX)
+ break;
+ div++;
+ if (div > PERIOD_CDIV_MAX)
+ return -EINVAL;
+ }
+
+ period_cycles = c;
+ c *= duty_ns;
+ do_div(c, period_ns);
+ duty_cycles = c;
+
+ /*
+ * If the PWM channel is disabled, make sure to turn on the clock
+ * before writing the register. Otherwise, keep it enabled.
+ */
+ if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ ret = clk_prepare_enable(mxs->clk);
+ if (ret)
+ return ret;
+ }
+
+ writel(duty_cycles << 16,
+ mxs->base + PWM_ACTIVE0 + pwm->hwpwm * 0x20);
+ writel(PERIOD_PERIOD(period_cycles) | PERIOD_ACTIVE_HIGH |
+ PERIOD_INACTIVE_LOW | PERIOD_CDIV(div),
+ mxs->base + PWM_PERIOD0 + pwm->hwpwm * 0x20);
+
+ /*
+ * If the PWM is not enabled, turn the clock off again to save power.
+ */
+ if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ clk_disable_unprepare(mxs->clk);
+
+ return 0;
+}
+
+static int mxs_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip);
+ int ret;
+
+ ret = clk_prepare_enable(mxs->clk);
+ if (ret)
+ return ret;
+
+ writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + SET);
+
+ return 0;
+}
+
+static void mxs_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip);
+
+ writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + CLR);
+
+ clk_disable_unprepare(mxs->clk);
+}
+
+static const struct pwm_ops mxs_pwm_ops = {
+ .config = mxs_pwm_config,
+ .enable = mxs_pwm_enable,
+ .disable = mxs_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int mxs_pwm_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mxs_pwm_chip *mxs;
+ struct resource *res;
+ struct pinctrl *pinctrl;
+ int ret;
+
+ mxs = devm_kzalloc(&pdev->dev, sizeof(*mxs), GFP_KERNEL);
+ if (!mxs)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mxs->base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!mxs->base)
+ return -EADDRNOTAVAIL;
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl))
+ return PTR_ERR(pinctrl);
+
+ mxs->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mxs->clk))
+ return PTR_ERR(mxs->clk);
+
+ mxs->chip.dev = &pdev->dev;
+ mxs->chip.ops = &mxs_pwm_ops;
+ mxs->chip.base = -1;
+ ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get pwm number: %d\n", ret);
+ return ret;
+ }
+
+ ret = pwmchip_add(&mxs->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add pwm chip %d\n", ret);
+ return ret;
+ }
+
+ mxs->dev = &pdev->dev;
+ platform_set_drvdata(pdev, mxs);
+
+ stmp_reset_block(mxs->base);
+
+ return 0;
+}
+
+static int __devexit mxs_pwm_remove(struct platform_device *pdev)
+{
+ struct mxs_pwm_chip *mxs = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&mxs->chip);
+}
+
+static struct of_device_id mxs_pwm_dt_ids[] = {
+ { .compatible = "fsl,imx23-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_pwm_dt_ids);
+
+static struct platform_driver mxs_pwm_driver = {
+ .driver = {
+ .name = "mxs-pwm",
+ .of_match_table = of_match_ptr(mxs_pwm_dt_ids),
+ },
+ .probe = mxs_pwm_probe,
+ .remove = __devexit_p(mxs_pwm_remove),
+};
+module_platform_driver(mxs_pwm_driver);
+
+MODULE_ALIAS("platform:mxs-pwm");
+MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
+MODULE_DESCRIPTION("Freescale MXS PWM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
new file mode 100644
index 000000000000..bd5867a1c700
--- /dev/null
+++ b/drivers/pwm/pwm-pxa.c
@@ -0,0 +1,218 @@
+/*
+ * drivers/pwm/pwm-pxa.c
+ *
+ * simple driver for PWM (Pulse Width Modulator) controller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 2008-02-13 initial version
+ * eric miao <eric.miao@marvell.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/pwm.h>
+
+#include <asm/div64.h>
+
+#define HAS_SECONDARY_PWM 0x10
+#define PWM_ID_BASE(d) ((d) & 0xf)
+
+static const struct platform_device_id pwm_id_table[] = {
+ /* PWM has_secondary_pwm? */
+ { "pxa25x-pwm", 0 },
+ { "pxa27x-pwm", 0 | HAS_SECONDARY_PWM },
+ { "pxa168-pwm", 1 },
+ { "pxa910-pwm", 1 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, pwm_id_table);
+
+/* PWM registers and bits definitions */
+#define PWMCR (0x00)
+#define PWMDCR (0x04)
+#define PWMPCR (0x08)
+
+#define PWMCR_SD (1 << 6)
+#define PWMDCR_FD (1 << 10)
+
+struct pxa_pwm_chip {
+ struct pwm_chip chip;
+ struct device *dev;
+
+ struct clk *clk;
+ int clk_enabled;
+ void __iomem *mmio_base;
+};
+
+static inline struct pxa_pwm_chip *to_pxa_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct pxa_pwm_chip, chip);
+}
+
+/*
+ * period_ns = 10^9 * (PRESCALE + 1) * (PV + 1) / PWM_CLK_RATE
+ * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
+ */
+static int pxa_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct pxa_pwm_chip *pc = to_pxa_pwm_chip(chip);
+ unsigned long long c;
+ unsigned long period_cycles, prescale, pv, dc;
+ unsigned long offset;
+ int rc;
+
+ if (period_ns == 0 || duty_ns > period_ns)
+ return -EINVAL;
+
+ offset = pwm->hwpwm ? 0x10 : 0;
+
+ c = clk_get_rate(pc->clk);
+ c = c * period_ns;
+ do_div(c, 1000000000);
+ period_cycles = c;
+
+ if (period_cycles < 1)
+ period_cycles = 1;
+ prescale = (period_cycles - 1) / 1024;
+ pv = period_cycles / (prescale + 1) - 1;
+
+ if (prescale > 63)
+ return -EINVAL;
+
+ if (duty_ns == period_ns)
+ dc = PWMDCR_FD;
+ else
+ dc = (pv + 1) * duty_ns / period_ns;
+
+ /* NOTE: the clock to PWM has to be enabled first
+ * before writing to the registers
+ */
+ rc = clk_prepare_enable(pc->clk);
+ if (rc < 0)
+ return rc;
+
+ writel(prescale, pc->mmio_base + offset + PWMCR);
+ writel(dc, pc->mmio_base + offset + PWMDCR);
+ writel(pv, pc->mmio_base + offset + PWMPCR);
+
+ clk_disable_unprepare(pc->clk);
+ return 0;
+}
+
+static int pxa_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pxa_pwm_chip *pc = to_pxa_pwm_chip(chip);
+ int rc = 0;
+
+ if (!pc->clk_enabled) {
+ rc = clk_prepare_enable(pc->clk);
+ if (!rc)
+ pc->clk_enabled++;
+ }
+ return rc;
+}
+
+static void pxa_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct pxa_pwm_chip *pc = to_pxa_pwm_chip(chip);
+
+ if (pc->clk_enabled) {
+ clk_disable_unprepare(pc->clk);
+ pc->clk_enabled--;
+ }
+}
+
+static struct pwm_ops pxa_pwm_ops = {
+ .config = pxa_pwm_config,
+ .enable = pxa_pwm_enable,
+ .disable = pxa_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int __devinit pwm_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct pxa_pwm_chip *pwm;
+ struct resource *r;
+ int ret = 0;
+
+ pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+ if (pwm == NULL) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ pwm->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pwm->clk))
+ return PTR_ERR(pwm->clk);
+
+ pwm->clk_enabled = 0;
+
+ pwm->chip.dev = &pdev->dev;
+ pwm->chip.ops = &pxa_pwm_ops;
+ pwm->chip.base = -1;
+ pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
+ if (pwm->mmio_base == NULL)
+ return -EADDRNOTAVAIL;
+
+ ret = pwmchip_add(&pwm->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pwm);
+ return 0;
+}
+
+static int __devexit pwm_remove(struct platform_device *pdev)
+{
+ struct pxa_pwm_chip *chip;
+
+ chip = platform_get_drvdata(pdev);
+ if (chip == NULL)
+ return -ENODEV;
+
+ return pwmchip_remove(&chip->chip);
+}
+
+static struct platform_driver pwm_driver = {
+ .driver = {
+ .name = "pxa25x-pwm",
+ .owner = THIS_MODULE,
+ },
+ .probe = pwm_probe,
+ .remove = __devexit_p(pwm_remove),
+ .id_table = pwm_id_table,
+};
+
+static int __init pwm_init(void)
+{
+ return platform_driver_register(&pwm_driver);
+}
+arch_initcall(pwm_init);
+
+static void __exit pwm_exit(void)
+{
+ platform_driver_unregister(&pwm_driver);
+}
+module_exit(pwm_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/plat-samsung/pwm.c b/drivers/pwm/pwm-samsung.c
index d3583050fb05..e5187c0ade9f 100644
--- a/arch/arm/plat-samsung/pwm.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -1,4 +1,4 @@
-/* arch/arm/plat-s3c/pwm.c
+/* drivers/pwm/pwm-samsung.c
*
* Copyright (c) 2007 Ben Dooks
* Copyright (c) 2008 Simtec Electronics
@@ -11,6 +11,8 @@
* the Free Software Foundation; either version 2 of the License.
*/
+#define pr_fmt(fmt) "pwm-samsung: " fmt
+
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -24,8 +26,7 @@
#include <plat/regs-timer.h>
-struct pwm_device {
- struct list_head list;
+struct s3c_chip {
struct platform_device *pdev;
struct clk *clk_div;
@@ -36,81 +37,36 @@ struct pwm_device {
unsigned int duty_ns;
unsigned char tcon_base;
- unsigned char use_count;
unsigned char pwm_id;
+ struct pwm_chip chip;
};
+#define to_s3c_chip(chip) container_of(chip, struct s3c_chip, chip)
+
#define pwm_dbg(_pwm, msg...) dev_dbg(&(_pwm)->pdev->dev, msg)
static struct clk *clk_scaler[2];
-static inline int pwm_is_tdiv(struct pwm_device *pwm)
+static inline int pwm_is_tdiv(struct s3c_chip *chip)
{
- return clk_get_parent(pwm->clk) == pwm->clk_div;
+ return clk_get_parent(chip->clk) == chip->clk_div;
}
-static DEFINE_MUTEX(pwm_lock);
-static LIST_HEAD(pwm_list);
-
-struct pwm_device *pwm_request(int pwm_id, const char *label)
-{
- struct pwm_device *pwm;
- int found = 0;
-
- mutex_lock(&pwm_lock);
-
- list_for_each_entry(pwm, &pwm_list, list) {
- if (pwm->pwm_id == pwm_id) {
- found = 1;
- break;
- }
- }
-
- if (found) {
- if (pwm->use_count == 0) {
- pwm->use_count = 1;
- pwm->label = label;
- } else
- pwm = ERR_PTR(-EBUSY);
- } else
- pwm = ERR_PTR(-ENOENT);
-
- mutex_unlock(&pwm_lock);
- return pwm;
-}
-
-EXPORT_SYMBOL(pwm_request);
-
-
-void pwm_free(struct pwm_device *pwm)
-{
- mutex_lock(&pwm_lock);
-
- if (pwm->use_count) {
- pwm->use_count--;
- pwm->label = NULL;
- } else
- printk(KERN_ERR "PWM%d device already freed\n", pwm->pwm_id);
-
- mutex_unlock(&pwm_lock);
-}
-
-EXPORT_SYMBOL(pwm_free);
-
#define pwm_tcon_start(pwm) (1 << (pwm->tcon_base + 0))
#define pwm_tcon_invert(pwm) (1 << (pwm->tcon_base + 2))
#define pwm_tcon_autoreload(pwm) (1 << (pwm->tcon_base + 3))
#define pwm_tcon_manulupdate(pwm) (1 << (pwm->tcon_base + 1))
-int pwm_enable(struct pwm_device *pwm)
+static int s3c_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct s3c_chip *s3c = to_s3c_chip(chip);
unsigned long flags;
unsigned long tcon;
local_irq_save(flags);
tcon = __raw_readl(S3C2410_TCON);
- tcon |= pwm_tcon_start(pwm);
+ tcon |= pwm_tcon_start(s3c);
__raw_writel(tcon, S3C2410_TCON);
local_irq_restore(flags);
@@ -118,31 +74,28 @@ int pwm_enable(struct pwm_device *pwm)
return 0;
}
-EXPORT_SYMBOL(pwm_enable);
-
-void pwm_disable(struct pwm_device *pwm)
+static void s3c_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct s3c_chip *s3c = to_s3c_chip(chip);
unsigned long flags;
unsigned long tcon;
local_irq_save(flags);
tcon = __raw_readl(S3C2410_TCON);
- tcon &= ~pwm_tcon_start(pwm);
+ tcon &= ~pwm_tcon_start(s3c);
__raw_writel(tcon, S3C2410_TCON);
local_irq_restore(flags);
}
-EXPORT_SYMBOL(pwm_disable);
-
-static unsigned long pwm_calc_tin(struct pwm_device *pwm, unsigned long freq)
+static unsigned long pwm_calc_tin(struct s3c_chip *s3c, unsigned long freq)
{
unsigned long tin_parent_rate;
unsigned int div;
- tin_parent_rate = clk_get_rate(clk_get_parent(pwm->clk_div));
- pwm_dbg(pwm, "tin parent at %lu\n", tin_parent_rate);
+ tin_parent_rate = clk_get_rate(clk_get_parent(s3c->clk_div));
+ pwm_dbg(s3c, "tin parent at %lu\n", tin_parent_rate);
for (div = 2; div <= 16; div *= 2) {
if ((tin_parent_rate / (div << 16)) < freq)
@@ -154,8 +107,10 @@ static unsigned long pwm_calc_tin(struct pwm_device *pwm, unsigned long freq)
#define NS_IN_HZ (1000000000UL)
-int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+static int s3c_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
{
+ struct s3c_chip *s3c = to_s3c_chip(chip);
unsigned long tin_rate;
unsigned long tin_ns;
unsigned long period;
@@ -174,38 +129,38 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
if (duty_ns > period_ns)
return -EINVAL;
- if (period_ns == pwm->period_ns &&
- duty_ns == pwm->duty_ns)
+ if (period_ns == s3c->period_ns &&
+ duty_ns == s3c->duty_ns)
return 0;
/* The TCMP and TCNT can be read without a lock, they're not
* shared between the timers. */
- tcmp = __raw_readl(S3C2410_TCMPB(pwm->pwm_id));
- tcnt = __raw_readl(S3C2410_TCNTB(pwm->pwm_id));
+ tcmp = __raw_readl(S3C2410_TCMPB(s3c->pwm_id));
+ tcnt = __raw_readl(S3C2410_TCNTB(s3c->pwm_id));
period = NS_IN_HZ / period_ns;
- pwm_dbg(pwm, "duty_ns=%d, period_ns=%d (%lu)\n",
+ pwm_dbg(s3c, "duty_ns=%d, period_ns=%d (%lu)\n",
duty_ns, period_ns, period);
/* Check to see if we are changing the clock rate of the PWM */
- if (pwm->period_ns != period_ns) {
- if (pwm_is_tdiv(pwm)) {
- tin_rate = pwm_calc_tin(pwm, period);
- clk_set_rate(pwm->clk_div, tin_rate);
+ if (s3c->period_ns != period_ns) {
+ if (pwm_is_tdiv(s3c)) {
+ tin_rate = pwm_calc_tin(s3c, period);
+ clk_set_rate(s3c->clk_div, tin_rate);
} else
- tin_rate = clk_get_rate(pwm->clk);
+ tin_rate = clk_get_rate(s3c->clk);
- pwm->period_ns = period_ns;
+ s3c->period_ns = period_ns;
- pwm_dbg(pwm, "tin_rate=%lu\n", tin_rate);
+ pwm_dbg(s3c, "tin_rate=%lu\n", tin_rate);
tin_ns = NS_IN_HZ / tin_rate;
tcnt = period_ns / tin_ns;
} else
- tin_ns = NS_IN_HZ / clk_get_rate(pwm->clk);
+ tin_ns = NS_IN_HZ / clk_get_rate(s3c->clk);
/* Note, counters count down */
@@ -216,7 +171,7 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
if (tcmp == tcnt)
tcmp--;
- pwm_dbg(pwm, "tin_ns=%lu, tcmp=%ld/%lu\n", tin_ns, tcmp, tcnt);
+ pwm_dbg(s3c, "tin_ns=%lu, tcmp=%ld/%lu\n", tin_ns, tcmp, tcnt);
if (tcmp < 0)
tcmp = 0;
@@ -225,15 +180,15 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
local_irq_save(flags);
- __raw_writel(tcmp, S3C2410_TCMPB(pwm->pwm_id));
- __raw_writel(tcnt, S3C2410_TCNTB(pwm->pwm_id));
+ __raw_writel(tcmp, S3C2410_TCMPB(s3c->pwm_id));
+ __raw_writel(tcnt, S3C2410_TCNTB(s3c->pwm_id));
tcon = __raw_readl(S3C2410_TCON);
- tcon |= pwm_tcon_manulupdate(pwm);
- tcon |= pwm_tcon_autoreload(pwm);
+ tcon |= pwm_tcon_manulupdate(s3c);
+ tcon |= pwm_tcon_autoreload(s3c);
__raw_writel(tcon, S3C2410_TCON);
- tcon &= ~pwm_tcon_manulupdate(pwm);
+ tcon &= ~pwm_tcon_manulupdate(s3c);
__raw_writel(tcon, S3C2410_TCON);
local_irq_restore(flags);
@@ -241,24 +196,17 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
return 0;
}
-EXPORT_SYMBOL(pwm_config);
-
-static int pwm_register(struct pwm_device *pwm)
-{
- pwm->duty_ns = -1;
- pwm->period_ns = -1;
-
- mutex_lock(&pwm_lock);
- list_add_tail(&pwm->list, &pwm_list);
- mutex_unlock(&pwm_lock);
-
- return 0;
-}
+static struct pwm_ops s3c_pwm_ops = {
+ .enable = s3c_pwm_enable,
+ .disable = s3c_pwm_disable,
+ .config = s3c_pwm_config,
+ .owner = THIS_MODULE,
+};
static int s3c_pwm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct pwm_device *pwm;
+ struct s3c_chip *s3c;
unsigned long flags;
unsigned long tcon;
unsigned int id = pdev->id;
@@ -269,83 +217,76 @@ static int s3c_pwm_probe(struct platform_device *pdev)
return -ENXIO;
}
- pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL);
- if (pwm == NULL) {
+ s3c = devm_kzalloc(&pdev->dev, sizeof(*s3c), GFP_KERNEL);
+ if (s3c == NULL) {
dev_err(dev, "failed to allocate pwm_device\n");
return -ENOMEM;
}
- pwm->pdev = pdev;
- pwm->pwm_id = id;
-
/* calculate base of control bits in TCON */
- pwm->tcon_base = id == 0 ? 0 : (id * 4) + 4;
-
- pwm->clk = clk_get(dev, "pwm-tin");
- if (IS_ERR(pwm->clk)) {
+ s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4;
+ s3c->chip.dev = &pdev->dev;
+ s3c->chip.ops = &s3c_pwm_ops;
+ s3c->chip.base = -1;
+ s3c->chip.npwm = 1;
+
+ s3c->clk = devm_clk_get(dev, "pwm-tin");
+ if (IS_ERR(s3c->clk)) {
dev_err(dev, "failed to get pwm tin clk\n");
- ret = PTR_ERR(pwm->clk);
- goto err_alloc;
+ return PTR_ERR(s3c->clk);
}
- pwm->clk_div = clk_get(dev, "pwm-tdiv");
- if (IS_ERR(pwm->clk_div)) {
+ s3c->clk_div = devm_clk_get(dev, "pwm-tdiv");
+ if (IS_ERR(s3c->clk_div)) {
dev_err(dev, "failed to get pwm tdiv clk\n");
- ret = PTR_ERR(pwm->clk_div);
- goto err_clk_tin;
+ return PTR_ERR(s3c->clk_div);
}
- clk_enable(pwm->clk);
- clk_enable(pwm->clk_div);
+ clk_enable(s3c->clk);
+ clk_enable(s3c->clk_div);
local_irq_save(flags);
tcon = __raw_readl(S3C2410_TCON);
- tcon |= pwm_tcon_invert(pwm);
+ tcon |= pwm_tcon_invert(s3c);
__raw_writel(tcon, S3C2410_TCON);
local_irq_restore(flags);
-
- ret = pwm_register(pwm);
- if (ret) {
+ ret = pwmchip_add(&s3c->chip);
+ if (ret < 0) {
dev_err(dev, "failed to register pwm\n");
goto err_clk_tdiv;
}
- pwm_dbg(pwm, "config bits %02x\n",
- (__raw_readl(S3C2410_TCON) >> pwm->tcon_base) & 0x0f);
+ pwm_dbg(s3c, "config bits %02x\n",
+ (__raw_readl(S3C2410_TCON) >> s3c->tcon_base) & 0x0f);
dev_info(dev, "tin at %lu, tdiv at %lu, tin=%sclk, base %d\n",
- clk_get_rate(pwm->clk),
- clk_get_rate(pwm->clk_div),
- pwm_is_tdiv(pwm) ? "div" : "ext", pwm->tcon_base);
+ clk_get_rate(s3c->clk),
+ clk_get_rate(s3c->clk_div),
+ pwm_is_tdiv(s3c) ? "div" : "ext", s3c->tcon_base);
- platform_set_drvdata(pdev, pwm);
+ platform_set_drvdata(pdev, s3c);
return 0;
err_clk_tdiv:
- clk_disable(pwm->clk_div);
- clk_disable(pwm->clk);
- clk_put(pwm->clk_div);
-
- err_clk_tin:
- clk_put(pwm->clk);
-
- err_alloc:
- kfree(pwm);
+ clk_disable(s3c->clk_div);
+ clk_disable(s3c->clk);
return ret;
}
static int __devexit s3c_pwm_remove(struct platform_device *pdev)
{
- struct pwm_device *pwm = platform_get_drvdata(pdev);
+ struct s3c_chip *s3c = platform_get_drvdata(pdev);
+ int err;
+
+ err = pwmchip_remove(&s3c->chip);
+ if (err < 0)
+ return err;
- clk_disable(pwm->clk_div);
- clk_disable(pwm->clk);
- clk_put(pwm->clk_div);
- clk_put(pwm->clk);
- kfree(pwm);
+ clk_disable(s3c->clk_div);
+ clk_disable(s3c->clk);
return 0;
}
@@ -353,26 +294,26 @@ static int __devexit s3c_pwm_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int s3c_pwm_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct pwm_device *pwm = platform_get_drvdata(pdev);
+ struct s3c_chip *s3c = platform_get_drvdata(pdev);
/* No one preserve these values during suspend so reset them
* Otherwise driver leaves PWM unconfigured if same values
* passed to pwm_config
*/
- pwm->period_ns = 0;
- pwm->duty_ns = 0;
+ s3c->period_ns = 0;
+ s3c->duty_ns = 0;
return 0;
}
static int s3c_pwm_resume(struct platform_device *pdev)
{
- struct pwm_device *pwm = platform_get_drvdata(pdev);
+ struct s3c_chip *s3c = platform_get_drvdata(pdev);
unsigned long tcon;
/* Restore invertion */
tcon = __raw_readl(S3C2410_TCON);
- tcon |= pwm_tcon_invert(pwm);
+ tcon |= pwm_tcon_invert(s3c);
__raw_writel(tcon, S3C2410_TCON);
return 0;
@@ -402,13 +343,13 @@ static int __init pwm_init(void)
clk_scaler[1] = clk_get(NULL, "pwm-scaler1");
if (IS_ERR(clk_scaler[0]) || IS_ERR(clk_scaler[1])) {
- printk(KERN_ERR "%s: failed to get scaler clocks\n", __func__);
+ pr_err("failed to get scaler clocks\n");
return -EINVAL;
}
ret = platform_driver_register(&s3c_pwm_driver);
if (ret)
- printk(KERN_ERR "%s: failed to add pwm driver\n", __func__);
+ pr_err("failed to add pwm driver\n");
return ret;
}
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
new file mode 100644
index 000000000000..057465e0553c
--- /dev/null
+++ b/drivers/pwm/pwm-tegra.c
@@ -0,0 +1,259 @@
+/*
+ * drivers/pwm/pwm-tegra.c
+ *
+ * Tegra pulse-width-modulation controller driver
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ * Based on arch/arm/plat-mxc/pwm.c by Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pwm.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define PWM_ENABLE (1 << 31)
+#define PWM_DUTY_WIDTH 8
+#define PWM_DUTY_SHIFT 16
+#define PWM_SCALE_WIDTH 13
+#define PWM_SCALE_SHIFT 0
+
+#define NUM_PWM 4
+
+struct tegra_pwm_chip {
+ struct pwm_chip chip;
+ struct device *dev;
+
+ struct clk *clk;
+
+ void __iomem *mmio_base;
+};
+
+static inline struct tegra_pwm_chip *to_tegra_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct tegra_pwm_chip, chip);
+}
+
+static inline u32 pwm_readl(struct tegra_pwm_chip *chip, unsigned int num)
+{
+ return readl(chip->mmio_base + (num << 4));
+}
+
+static inline void pwm_writel(struct tegra_pwm_chip *chip, unsigned int num,
+ unsigned long val)
+{
+ writel(val, chip->mmio_base + (num << 4));
+}
+
+static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
+ unsigned long long c;
+ unsigned long rate, hz;
+ u32 val = 0;
+ int err;
+
+ /*
+ * Convert from duty_ns / period_ns to a fixed number of duty ticks
+ * per (1 << PWM_DUTY_WIDTH) cycles and make sure to round to the
+ * nearest integer during division.
+ */
+ c = duty_ns * ((1 << PWM_DUTY_WIDTH) - 1) + period_ns / 2;
+ do_div(c, period_ns);
+
+ val = (u32)c << PWM_DUTY_SHIFT;
+
+ /*
+ * Compute the prescaler value for which (1 << PWM_DUTY_WIDTH)
+ * cycles at the PWM clock rate will take period_ns nanoseconds.
+ */
+ rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH;
+ hz = 1000000000ul / period_ns;
+
+ rate = (rate + (hz / 2)) / hz;
+
+ /*
+ * Since the actual PWM divider is the register's frequency divider
+ * field minus 1, we need to decrement to get the correct value to
+ * write to the register.
+ */
+ if (rate > 0)
+ rate--;
+
+ /*
+ * Make sure that the rate will fit in the register's frequency
+ * divider field.
+ */
+ if (rate >> PWM_SCALE_WIDTH)
+ return -EINVAL;
+
+ val |= rate << PWM_SCALE_SHIFT;
+
+ /*
+ * If the PWM channel is disabled, make sure to turn on the clock
+ * before writing the register. Otherwise, keep it enabled.
+ */
+ if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ err = clk_prepare_enable(pc->clk);
+ if (err < 0)
+ return err;
+ } else
+ val |= PWM_ENABLE;
+
+ pwm_writel(pc, pwm->hwpwm, val);
+
+ /*
+ * If the PWM is not enabled, turn the clock off again to save power.
+ */
+ if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ clk_disable_unprepare(pc->clk);
+
+ return 0;
+}
+
+static int tegra_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
+ int rc = 0;
+ u32 val;
+
+ rc = clk_prepare_enable(pc->clk);
+ if (rc < 0)
+ return rc;
+
+ val = pwm_readl(pc, pwm->hwpwm);
+ val |= PWM_ENABLE;
+ pwm_writel(pc, pwm->hwpwm, val);
+
+ return 0;
+}
+
+static void tegra_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
+ u32 val;
+
+ val = pwm_readl(pc, pwm->hwpwm);
+ val &= ~PWM_ENABLE;
+ pwm_writel(pc, pwm->hwpwm, val);
+
+ clk_disable_unprepare(pc->clk);
+}
+
+static const struct pwm_ops tegra_pwm_ops = {
+ .config = tegra_pwm_config,
+ .enable = tegra_pwm_enable,
+ .disable = tegra_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int tegra_pwm_probe(struct platform_device *pdev)
+{
+ struct tegra_pwm_chip *pwm;
+ struct resource *r;
+ int ret;
+
+ pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
+ if (!pwm) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ pwm->dev = &pdev->dev;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resources defined\n");
+ return -ENODEV;
+ }
+
+ pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
+ if (!pwm->mmio_base)
+ return -EADDRNOTAVAIL;
+
+ platform_set_drvdata(pdev, pwm);
+
+ pwm->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pwm->clk))
+ return PTR_ERR(pwm->clk);
+
+ pwm->chip.dev = &pdev->dev;
+ pwm->chip.ops = &tegra_pwm_ops;
+ pwm->chip.base = -1;
+ pwm->chip.npwm = NUM_PWM;
+
+ ret = pwmchip_add(&pwm->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devexit tegra_pwm_remove(struct platform_device *pdev)
+{
+ struct tegra_pwm_chip *pc = platform_get_drvdata(pdev);
+ int i;
+
+ if (WARN_ON(!pc))
+ return -ENODEV;
+
+ for (i = 0; i < NUM_PWM; i++) {
+ struct pwm_device *pwm = &pc->chip.pwms[i];
+
+ if (!test_bit(PWMF_ENABLED, &pwm->flags))
+ if (clk_prepare_enable(pc->clk) < 0)
+ continue;
+
+ pwm_writel(pc, i, 0);
+
+ clk_disable_unprepare(pc->clk);
+ }
+
+ return pwmchip_remove(&pc->chip);
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id tegra_pwm_of_match[] = {
+ { .compatible = "nvidia,tegra20-pwm" },
+ { .compatible = "nvidia,tegra30-pwm" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, tegra_pwm_of_match);
+#endif
+
+static struct platform_driver tegra_pwm_driver = {
+ .driver = {
+ .name = "tegra-pwm",
+ .of_match_table = of_match_ptr(tegra_pwm_of_match),
+ },
+ .probe = tegra_pwm_probe,
+ .remove = __devexit_p(tegra_pwm_remove),
+};
+
+module_platform_driver(tegra_pwm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("NVIDIA Corporation");
+MODULE_ALIAS("platform:tegra-pwm");
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
new file mode 100644
index 000000000000..0b66d0f25922
--- /dev/null
+++ b/drivers/pwm/pwm-tiecap.c
@@ -0,0 +1,230 @@
+/*
+ * ECAP PWM driver
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc. - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/pwm.h>
+
+/* ECAP registers and bits definitions */
+#define CAP1 0x08
+#define CAP2 0x0C
+#define CAP3 0x10
+#define CAP4 0x14
+#define ECCTL2 0x2A
+#define ECCTL2_APWM_MODE BIT(9)
+#define ECCTL2_SYNC_SEL_DISA (BIT(7) | BIT(6))
+#define ECCTL2_TSCTR_FREERUN BIT(4)
+
+struct ecap_pwm_chip {
+ struct pwm_chip chip;
+ unsigned int clk_rate;
+ void __iomem *mmio_base;
+};
+
+static inline struct ecap_pwm_chip *to_ecap_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct ecap_pwm_chip, chip);
+}
+
+/*
+ * period_ns = 10^9 * period_cycles / PWM_CLK_RATE
+ * duty_ns = 10^9 * duty_cycles / PWM_CLK_RATE
+ */
+static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip);
+ unsigned long long c;
+ unsigned long period_cycles, duty_cycles;
+ unsigned int reg_val;
+
+ if (period_ns < 0 || duty_ns < 0 || period_ns > NSEC_PER_SEC)
+ return -ERANGE;
+
+ c = pc->clk_rate;
+ c = c * period_ns;
+ do_div(c, NSEC_PER_SEC);
+ period_cycles = (unsigned long)c;
+
+ if (period_cycles < 1) {
+ period_cycles = 1;
+ duty_cycles = 1;
+ } else {
+ c = pc->clk_rate;
+ c = c * duty_ns;
+ do_div(c, NSEC_PER_SEC);
+ duty_cycles = (unsigned long)c;
+ }
+
+ pm_runtime_get_sync(pc->chip.dev);
+
+ reg_val = readw(pc->mmio_base + ECCTL2);
+
+ /* Configure APWM mode & disable sync option */
+ reg_val |= ECCTL2_APWM_MODE | ECCTL2_SYNC_SEL_DISA;
+
+ writew(reg_val, pc->mmio_base + ECCTL2);
+
+ if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+ /* Update active registers if not running */
+ writel(duty_cycles, pc->mmio_base + CAP2);
+ writel(period_cycles, pc->mmio_base + CAP1);
+ } else {
+ /*
+ * Update shadow registers to configure period and
+ * compare values. This helps current PWM period to
+ * complete on reconfiguring
+ */
+ writel(duty_cycles, pc->mmio_base + CAP4);
+ writel(period_cycles, pc->mmio_base + CAP3);
+ }
+
+ pm_runtime_put_sync(pc->chip.dev);
+ return 0;
+}
+
+static int ecap_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip);
+ unsigned int reg_val;
+
+ /* Leave clock enabled on enabling PWM */
+ pm_runtime_get_sync(pc->chip.dev);
+
+ /*
+ * Enable 'Free run Time stamp counter mode' to start counter
+ * and 'APWM mode' to enable APWM output
+ */
+ reg_val = readw(pc->mmio_base + ECCTL2);
+ reg_val |= ECCTL2_TSCTR_FREERUN | ECCTL2_APWM_MODE;
+ writew(reg_val, pc->mmio_base + ECCTL2);
+ return 0;
+}
+
+static void ecap_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip);
+ unsigned int reg_val;
+
+ /*
+ * Disable 'Free run Time stamp counter mode' to stop counter
+ * and 'APWM mode' to put APWM output to low
+ */
+ reg_val = readw(pc->mmio_base + ECCTL2);
+ reg_val &= ~(ECCTL2_TSCTR_FREERUN | ECCTL2_APWM_MODE);
+ writew(reg_val, pc->mmio_base + ECCTL2);
+
+ /* Disable clock on PWM disable */
+ pm_runtime_put_sync(pc->chip.dev);
+}
+
+static void ecap_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ dev_warn(chip->dev, "Removing PWM device without disabling\n");
+ pm_runtime_put_sync(chip->dev);
+ }
+}
+
+static const struct pwm_ops ecap_pwm_ops = {
+ .free = ecap_pwm_free,
+ .config = ecap_pwm_config,
+ .enable = ecap_pwm_enable,
+ .disable = ecap_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int __devinit ecap_pwm_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+ struct clk *clk;
+ struct ecap_pwm_chip *pc;
+
+ pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ clk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+
+ pc->clk_rate = clk_get_rate(clk);
+ if (!pc->clk_rate) {
+ dev_err(&pdev->dev, "failed to get clock rate\n");
+ return -EINVAL;
+ }
+
+ pc->chip.dev = &pdev->dev;
+ pc->chip.ops = &ecap_pwm_ops;
+ pc->chip.base = -1;
+ pc->chip.npwm = 1;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
+ if (!pc->mmio_base)
+ return -EADDRNOTAVAIL;
+
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ platform_set_drvdata(pdev, pc);
+ return 0;
+}
+
+static int __devexit ecap_pwm_remove(struct platform_device *pdev)
+{
+ struct ecap_pwm_chip *pc = platform_get_drvdata(pdev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return pwmchip_remove(&pc->chip);
+}
+
+static struct platform_driver ecap_pwm_driver = {
+ .driver = {
+ .name = "ecap",
+ },
+ .probe = ecap_pwm_probe,
+ .remove = __devexit_p(ecap_pwm_remove),
+};
+
+module_platform_driver(ecap_pwm_driver);
+
+MODULE_DESCRIPTION("ECAP PWM driver");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
new file mode 100644
index 000000000000..c3756d1be194
--- /dev/null
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -0,0 +1,409 @@
+/*
+ * EHRPWM PWM driver
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc. - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
+/* EHRPWM registers and bits definitions */
+
+/* Time base module registers */
+#define TBCTL 0x00
+#define TBPRD 0x0A
+
+#define TBCTL_RUN_MASK (BIT(15) | BIT(14))
+#define TBCTL_STOP_NEXT 0
+#define TBCTL_STOP_ON_CYCLE BIT(14)
+#define TBCTL_FREE_RUN (BIT(15) | BIT(14))
+#define TBCTL_PRDLD_MASK BIT(3)
+#define TBCTL_PRDLD_SHDW 0
+#define TBCTL_PRDLD_IMDT BIT(3)
+#define TBCTL_CLKDIV_MASK (BIT(12) | BIT(11) | BIT(10) | BIT(9) | \
+ BIT(8) | BIT(7))
+#define TBCTL_CTRMODE_MASK (BIT(1) | BIT(0))
+#define TBCTL_CTRMODE_UP 0
+#define TBCTL_CTRMODE_DOWN BIT(0)
+#define TBCTL_CTRMODE_UPDOWN BIT(1)
+#define TBCTL_CTRMODE_FREEZE (BIT(1) | BIT(0))
+
+#define TBCTL_HSPCLKDIV_SHIFT 7
+#define TBCTL_CLKDIV_SHIFT 10
+
+#define CLKDIV_MAX 7
+#define HSPCLKDIV_MAX 7
+#define PERIOD_MAX 0xFFFF
+
+/* compare module registers */
+#define CMPA 0x12
+#define CMPB 0x14
+
+/* Action qualifier module registers */
+#define AQCTLA 0x16
+#define AQCTLB 0x18
+#define AQSFRC 0x1A
+#define AQCSFRC 0x1C
+
+#define AQCTL_CBU_MASK (BIT(9) | BIT(8))
+#define AQCTL_CBU_FRCLOW BIT(8)
+#define AQCTL_CBU_FRCHIGH BIT(9)
+#define AQCTL_CBU_FRCTOGGLE (BIT(9) | BIT(8))
+#define AQCTL_CAU_MASK (BIT(5) | BIT(4))
+#define AQCTL_CAU_FRCLOW BIT(4)
+#define AQCTL_CAU_FRCHIGH BIT(5)
+#define AQCTL_CAU_FRCTOGGLE (BIT(5) | BIT(4))
+#define AQCTL_PRD_MASK (BIT(3) | BIT(2))
+#define AQCTL_PRD_FRCLOW BIT(2)
+#define AQCTL_PRD_FRCHIGH BIT(3)
+#define AQCTL_PRD_FRCTOGGLE (BIT(3) | BIT(2))
+#define AQCTL_ZRO_MASK (BIT(1) | BIT(0))
+#define AQCTL_ZRO_FRCLOW BIT(0)
+#define AQCTL_ZRO_FRCHIGH BIT(1)
+#define AQCTL_ZRO_FRCTOGGLE (BIT(1) | BIT(0))
+
+#define AQSFRC_RLDCSF_MASK (BIT(7) | BIT(6))
+#define AQSFRC_RLDCSF_ZRO 0
+#define AQSFRC_RLDCSF_PRD BIT(6)
+#define AQSFRC_RLDCSF_ZROPRD BIT(7)
+#define AQSFRC_RLDCSF_IMDT (BIT(7) | BIT(6))
+
+#define AQCSFRC_CSFB_MASK (BIT(3) | BIT(2))
+#define AQCSFRC_CSFB_FRCDIS 0
+#define AQCSFRC_CSFB_FRCLOW BIT(2)
+#define AQCSFRC_CSFB_FRCHIGH BIT(3)
+#define AQCSFRC_CSFB_DISSWFRC (BIT(3) | BIT(2))
+#define AQCSFRC_CSFA_MASK (BIT(1) | BIT(0))
+#define AQCSFRC_CSFA_FRCDIS 0
+#define AQCSFRC_CSFA_FRCLOW BIT(0)
+#define AQCSFRC_CSFA_FRCHIGH BIT(1)
+#define AQCSFRC_CSFA_DISSWFRC (BIT(1) | BIT(0))
+
+#define NUM_PWM_CHANNEL 2 /* EHRPWM channels */
+
+struct ehrpwm_pwm_chip {
+ struct pwm_chip chip;
+ unsigned int clk_rate;
+ void __iomem *mmio_base;
+};
+
+static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct ehrpwm_pwm_chip, chip);
+}
+
+static void ehrpwm_write(void *base, int offset, unsigned int val)
+{
+ writew(val & 0xFFFF, base + offset);
+}
+
+static void ehrpwm_modify(void *base, int offset,
+ unsigned short mask, unsigned short val)
+{
+ unsigned short regval;
+
+ regval = readw(base + offset);
+ regval &= ~mask;
+ regval |= val & mask;
+ writew(regval, base + offset);
+}
+
+/**
+ * set_prescale_div - Set up the prescaler divider function
+ * @rqst_prescaler: prescaler value min
+ * @prescale_div: prescaler value set
+ * @tb_clk_div: Time Base Control prescaler bits
+ */
+static int set_prescale_div(unsigned long rqst_prescaler,
+ unsigned short *prescale_div, unsigned short *tb_clk_div)
+{
+ unsigned int clkdiv, hspclkdiv;
+
+ for (clkdiv = 0; clkdiv <= CLKDIV_MAX; clkdiv++) {
+ for (hspclkdiv = 0; hspclkdiv <= HSPCLKDIV_MAX; hspclkdiv++) {
+
+ /*
+ * calculations for prescaler value :
+ * prescale_div = HSPCLKDIVIDER * CLKDIVIDER.
+ * HSPCLKDIVIDER = 2 ** hspclkdiv
+ * CLKDIVIDER = (1), if clkdiv == 0 *OR*
+ * (2 * clkdiv), if clkdiv != 0
+ *
+ * Configure prescale_div value such that period
+ * register value is less than 65535.
+ */
+
+ *prescale_div = (1 << clkdiv) *
+ (hspclkdiv ? (hspclkdiv * 2) : 1);
+ if (*prescale_div > rqst_prescaler) {
+ *tb_clk_div = (clkdiv << TBCTL_CLKDIV_SHIFT) |
+ (hspclkdiv << TBCTL_HSPCLKDIV_SHIFT);
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+static void configure_chans(struct ehrpwm_pwm_chip *pc, int chan,
+ unsigned long duty_cycles)
+{
+ int cmp_reg, aqctl_reg;
+ unsigned short aqctl_val, aqctl_mask;
+
+ /*
+ * Channels can be configured from action qualifier module.
+ * Channel 0 configured with compare A register and for
+ * up-counter mode.
+ * Channel 1 configured with compare B register and for
+ * up-counter mode.
+ */
+ if (chan == 1) {
+ aqctl_reg = AQCTLB;
+ cmp_reg = CMPB;
+ /* Configure PWM Low from compare B value */
+ aqctl_val = AQCTL_CBU_FRCLOW;
+ aqctl_mask = AQCTL_CBU_MASK;
+ } else {
+ cmp_reg = CMPA;
+ aqctl_reg = AQCTLA;
+ /* Configure PWM Low from compare A value*/
+ aqctl_val = AQCTL_CAU_FRCLOW;
+ aqctl_mask = AQCTL_CAU_MASK;
+ }
+
+ /* Configure PWM High from period value and zero value */
+ aqctl_val |= AQCTL_PRD_FRCHIGH | AQCTL_ZRO_FRCHIGH;
+ aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK;
+ ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val);
+
+ ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles);
+}
+
+/*
+ * period_ns = 10^9 * (ps_divval * period_cycles) / PWM_CLK_RATE
+ * duty_ns = 10^9 * (ps_divval * duty_cycles) / PWM_CLK_RATE
+ */
+static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
+ unsigned long long c;
+ unsigned long period_cycles, duty_cycles;
+ unsigned short ps_divval, tb_divval;
+
+ if (period_ns < 0 || duty_ns < 0 || period_ns > NSEC_PER_SEC)
+ return -ERANGE;
+
+ c = pc->clk_rate;
+ c = c * period_ns;
+ do_div(c, NSEC_PER_SEC);
+ period_cycles = (unsigned long)c;
+
+ if (period_cycles < 1) {
+ period_cycles = 1;
+ duty_cycles = 1;
+ } else {
+ c = pc->clk_rate;
+ c = c * duty_ns;
+ do_div(c, NSEC_PER_SEC);
+ duty_cycles = (unsigned long)c;
+ }
+
+ /* Configure clock prescaler to support Low frequency PWM wave */
+ if (set_prescale_div(period_cycles/PERIOD_MAX, &ps_divval,
+ &tb_divval)) {
+ dev_err(chip->dev, "Unsupported values\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_get_sync(chip->dev);
+
+ /* Update clock prescaler values */
+ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval);
+
+ /* Update period & duty cycle with presacler division */
+ period_cycles = period_cycles / ps_divval;
+ duty_cycles = duty_cycles / ps_divval;
+
+ /* Configure shadow loading on Period register */
+ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_PRDLD_MASK, TBCTL_PRDLD_SHDW);
+
+ ehrpwm_write(pc->mmio_base, TBPRD, period_cycles);
+
+ /* Configure ehrpwm counter for up-count mode */
+ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK,
+ TBCTL_CTRMODE_UP);
+
+ /* Configure the channel for duty cycle */
+ configure_chans(pc, pwm->hwpwm, duty_cycles);
+ pm_runtime_put_sync(chip->dev);
+ return 0;
+}
+
+static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
+ unsigned short aqcsfrc_val, aqcsfrc_mask;
+
+ /* Leave clock enabled on enabling PWM */
+ pm_runtime_get_sync(chip->dev);
+
+ /* Disabling Action Qualifier on PWM output */
+ if (pwm->hwpwm) {
+ aqcsfrc_val = AQCSFRC_CSFB_FRCDIS;
+ aqcsfrc_mask = AQCSFRC_CSFB_MASK;
+ } else {
+ aqcsfrc_val = AQCSFRC_CSFA_FRCDIS;
+ aqcsfrc_mask = AQCSFRC_CSFA_MASK;
+ }
+
+ /* Changes to shadow mode */
+ ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK,
+ AQSFRC_RLDCSF_ZRO);
+
+ ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
+
+ /* Enable time counter for free_run */
+ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN);
+ return 0;
+}
+
+static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
+ unsigned short aqcsfrc_val, aqcsfrc_mask;
+
+ /* Action Qualifier puts PWM output low forcefully */
+ if (pwm->hwpwm) {
+ aqcsfrc_val = AQCSFRC_CSFB_FRCLOW;
+ aqcsfrc_mask = AQCSFRC_CSFB_MASK;
+ } else {
+ aqcsfrc_val = AQCSFRC_CSFA_FRCLOW;
+ aqcsfrc_mask = AQCSFRC_CSFA_MASK;
+ }
+
+ /*
+ * Changes to immediate action on Action Qualifier. This puts
+ * Action Qualifier control on PWM output from next TBCLK
+ */
+ ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK,
+ AQSFRC_RLDCSF_IMDT);
+
+ ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
+
+ /* Stop Time base counter */
+ ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT);
+
+ /* Disable clock on PWM disable */
+ pm_runtime_put_sync(chip->dev);
+}
+
+static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+ dev_warn(chip->dev, "Removing PWM device without disabling\n");
+ pm_runtime_put_sync(chip->dev);
+ }
+}
+
+static const struct pwm_ops ehrpwm_pwm_ops = {
+ .free = ehrpwm_pwm_free,
+ .config = ehrpwm_pwm_config,
+ .enable = ehrpwm_pwm_enable,
+ .disable = ehrpwm_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int __devinit ehrpwm_pwm_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+ struct clk *clk;
+ struct ehrpwm_pwm_chip *pc;
+
+ pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ clk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+
+ pc->clk_rate = clk_get_rate(clk);
+ if (!pc->clk_rate) {
+ dev_err(&pdev->dev, "failed to get clock rate\n");
+ return -EINVAL;
+ }
+
+ pc->chip.dev = &pdev->dev;
+ pc->chip.ops = &ehrpwm_pwm_ops;
+ pc->chip.base = -1;
+ pc->chip.npwm = NUM_PWM_CHANNEL;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r);
+ if (!pc->mmio_base)
+ return -EADDRNOTAVAIL;
+
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ platform_set_drvdata(pdev, pc);
+ return 0;
+}
+
+static int __devexit ehrpwm_pwm_remove(struct platform_device *pdev)
+{
+ struct ehrpwm_pwm_chip *pc = platform_get_drvdata(pdev);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return pwmchip_remove(&pc->chip);
+}
+
+static struct platform_driver ehrpwm_pwm_driver = {
+ .driver = {
+ .name = "ehrpwm",
+ },
+ .probe = ehrpwm_pwm_probe,
+ .remove = __devexit_p(ehrpwm_pwm_remove),
+};
+
+module_platform_driver(ehrpwm_pwm_driver);
+
+MODULE_DESCRIPTION("EHRPWM PWM driver");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
new file mode 100644
index 000000000000..ad14389b7144
--- /dev/null
+++ b/drivers/pwm/pwm-vt8500.c
@@ -0,0 +1,177 @@
+/*
+ * drivers/pwm/pwm-vt8500.c
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/pwm.h>
+#include <linux/delay.h>
+
+#include <asm/div64.h>
+
+#define VT8500_NR_PWMS 4
+
+struct vt8500_chip {
+ struct pwm_chip chip;
+ void __iomem *base;
+};
+
+#define to_vt8500_chip(chip) container_of(chip, struct vt8500_chip, chip)
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask)
+{
+ int loops = msecs_to_loops(10);
+ while ((readb(reg) & bitmask) && --loops)
+ cpu_relax();
+
+ if (unlikely(!loops))
+ pr_warn("Waiting for status bits 0x%x to clear timed out\n",
+ bitmask);
+}
+
+static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
+ unsigned long long c;
+ unsigned long period_cycles, prescale, pv, dc;
+
+ c = 25000000/2; /* wild guess --- need to implement clocks */
+ c = c * period_ns;
+ do_div(c, 1000000000);
+ period_cycles = c;
+
+ if (period_cycles < 1)
+ period_cycles = 1;
+ prescale = (period_cycles - 1) / 4096;
+ pv = period_cycles / (prescale + 1) - 1;
+ if (pv > 4095)
+ pv = 4095;
+
+ if (prescale > 1023)
+ return -EINVAL;
+
+ c = (unsigned long long)pv * duty_ns;
+ do_div(c, period_ns);
+ dc = c;
+
+ pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 1));
+ writel(prescale, vt8500->base + 0x4 + (pwm->hwpwm << 4));
+
+ pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 2));
+ writel(pv, vt8500->base + 0x8 + (pwm->hwpwm << 4));
+
+ pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 3));
+ writel(dc, vt8500->base + 0xc + (pwm->hwpwm << 4));
+
+ return 0;
+}
+
+static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
+
+ pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0));
+ writel(5, vt8500->base + (pwm->hwpwm << 4));
+ return 0;
+}
+
+static void vt8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
+
+ pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0));
+ writel(0, vt8500->base + (pwm->hwpwm << 4));
+}
+
+static struct pwm_ops vt8500_pwm_ops = {
+ .enable = vt8500_pwm_enable,
+ .disable = vt8500_pwm_disable,
+ .config = vt8500_pwm_config,
+ .owner = THIS_MODULE,
+};
+
+static int __devinit pwm_probe(struct platform_device *pdev)
+{
+ struct vt8500_chip *chip;
+ struct resource *r;
+ int ret;
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (chip == NULL) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ chip->chip.dev = &pdev->dev;
+ chip->chip.ops = &vt8500_pwm_ops;
+ chip->chip.base = -1;
+ chip->chip.npwm = VT8500_NR_PWMS;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ chip->base = devm_request_and_ioremap(&pdev->dev, r);
+ if (chip->base == NULL)
+ return -EADDRNOTAVAIL;
+
+ ret = pwmchip_add(&chip->chip);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, chip);
+ return ret;
+}
+
+static int __devexit pwm_remove(struct platform_device *pdev)
+{
+ struct vt8500_chip *chip;
+
+ chip = platform_get_drvdata(pdev);
+ if (chip == NULL)
+ return -ENODEV;
+
+ return pwmchip_remove(&chip->chip);
+}
+
+static struct platform_driver pwm_driver = {
+ .driver = {
+ .name = "vt8500-pwm",
+ .owner = THIS_MODULE,
+ },
+ .probe = pwm_probe,
+ .remove = __devexit_p(pwm_remove),
+};
+
+static int __init pwm_init(void)
+{
+ return platform_driver_register(&pwm_driver);
+}
+arch_initcall(pwm_init);
+
+static void __exit pwm_exit(void)
+{
+ platform_driver_unregister(&pwm_driver);
+}
+module_exit(pwm_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 722246cf20ab..5d44252b7342 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -435,6 +435,9 @@ static void tsi721_db_dpc(struct work_struct *work)
" info %4.4x\n", DBELL_SID(idb.bytes),
DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
}
+
+ wr_ptr = ioread32(priv->regs +
+ TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
}
iowrite32(rd_ptr & (IDB_QSIZE - 1),
@@ -445,6 +448,10 @@ static void tsi721_db_dpc(struct work_struct *work)
regval |= TSI721_SR_CHINT_IDBQRCV;
iowrite32(regval,
priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
+
+ wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
+ if (wr_ptr != rd_ptr)
+ schedule_work(&priv->idb_work);
}
/**
@@ -2212,7 +2219,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct tsi721_device *priv;
- int i, cap;
+ int cap;
int err;
u32 regval;
@@ -2232,12 +2239,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
priv->pdev = pdev;
#ifdef DEBUG
+ {
+ int i;
for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",
i, (unsigned long long)pci_resource_start(pdev, i),
(unsigned long)pci_resource_len(pdev, i),
pci_resource_flags(pdev, i));
}
+ }
#endif
/*
* Verify BAR configuration
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index f34c3be6c9fe..4e932cc695e9 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -272,7 +272,7 @@ config REGULATOR_S2MPS11
config REGULATOR_S5M8767
tristate "Samsung S5M8767A voltage regulator"
- depends on MFD_S5M_CORE
+ depends on MFD_SEC_CORE
help
This driver supports a Samsung S5M8767A voltage output regulator
via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 182b553059c9..c151fd5d8c97 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -486,6 +486,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.id = AB3100_BUCK,
.ops = &regulator_ops_variable_sleepable,
.n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
+ .volt_table = ldo_e_buck_typ_voltages,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_time = 1000,
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 13d424fc1c14..10f2f4d4d190 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -848,18 +848,12 @@ static __devexit int ab8500_regulator_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ab8500_regulator_match[] = {
- { .compatible = "stericsson,ab8500-regulator", },
- {}
-};
-
static struct platform_driver ab8500_regulator_driver = {
.probe = ab8500_regulator_probe,
.remove = __devexit_p(ab8500_regulator_remove),
.driver = {
.name = "ab8500-regulator",
.owner = THIS_MODULE,
- .of_match_table = ab8500_regulator_match,
},
};
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index e9c2085f9dfb..ce0fe72a428e 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -64,14 +64,15 @@ static int anatop_set_voltage_sel(struct regulator_dev *reg, unsigned selector)
static int anatop_get_voltage_sel(struct regulator_dev *reg)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
- u32 val;
+ u32 val, mask;
if (!anatop_reg->control_reg)
return -ENOTSUPP;
val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg);
- val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >>
+ mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
anatop_reg->vol_bit_shift;
+ val = (val & mask) >> anatop_reg->vol_bit_shift;
return val - anatop_reg->min_bit_val;
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index f092588a078c..48385318175a 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3217,7 +3217,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
dev_set_drvdata(&rdev->dev, rdev);
- if (config->ena_gpio) {
+ if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) {
ret = gpio_request_one(config->ena_gpio,
GPIOF_DIR_OUT | config->ena_gpio_flags,
rdev_get_name(rdev));
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 9dbb491b6efa..359f8d18fc3f 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -547,16 +547,10 @@ static int __exit db8500_regulator_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id db8500_prcmu_regulator_match[] = {
- { .compatible = "stericsson,db8500-prcmu-regulator", },
- {}
-};
-
static struct platform_driver db8500_regulator_driver = {
.driver = {
.name = "db8500-prcmu-regulators",
.owner = THIS_MODULE,
- .of_match_table = db8500_prcmu_regulator_match,
},
.probe = db8500_regulator_probe,
.remove = __exit_p(db8500_regulator_remove),
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 34b67bee9323..8b5944f2d7d1 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -57,16 +57,17 @@ static int gpio_regulator_get_value(struct regulator_dev *dev)
return -EINVAL;
}
-static int gpio_regulator_set_value(struct regulator_dev *dev,
- int min, int max, unsigned *selector)
+static int gpio_regulator_set_voltage(struct regulator_dev *dev,
+ int min_uV, int max_uV,
+ unsigned *selector)
{
struct gpio_regulator_data *data = rdev_get_drvdata(dev);
int ptr, target = 0, state, best_val = INT_MAX;
for (ptr = 0; ptr < data->nr_states; ptr++)
if (data->states[ptr].value < best_val &&
- data->states[ptr].value >= min &&
- data->states[ptr].value <= max) {
+ data->states[ptr].value >= min_uV &&
+ data->states[ptr].value <= max_uV) {
target = data->states[ptr].gpios;
best_val = data->states[ptr].value;
if (selector)
@@ -85,13 +86,6 @@ static int gpio_regulator_set_value(struct regulator_dev *dev,
return 0;
}
-static int gpio_regulator_set_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV,
- unsigned *selector)
-{
- return gpio_regulator_set_value(dev, min_uV, max_uV, selector);
-}
-
static int gpio_regulator_list_voltage(struct regulator_dev *dev,
unsigned selector)
{
@@ -106,7 +100,27 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev,
static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
int min_uA, int max_uA)
{
- return gpio_regulator_set_value(dev, min_uA, max_uA, NULL);
+ struct gpio_regulator_data *data = rdev_get_drvdata(dev);
+ int ptr, target = 0, state, best_val = 0;
+
+ for (ptr = 0; ptr < data->nr_states; ptr++)
+ if (data->states[ptr].value > best_val &&
+ data->states[ptr].value >= min_uA &&
+ data->states[ptr].value <= max_uA) {
+ target = data->states[ptr].gpios;
+ best_val = data->states[ptr].value;
+ }
+
+ if (best_val == 0)
+ return -EINVAL;
+
+ for (ptr = 0; ptr < data->nr_gpios; ptr++) {
+ state = (target & (1 << ptr)) >> ptr;
+ gpio_set_value(data->gpios[ptr].gpio, state);
+ }
+ data->state = target;
+
+ return 0;
}
static struct regulator_ops gpio_regulator_voltage_ops = {
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 17d19fbbc490..46c7e88f8381 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -486,9 +486,12 @@ static int palmas_map_voltage_ldo(struct regulator_dev *rdev,
{
int ret, voltage;
- ret = ((min_uV - 900000) / 50000) + 1;
- if (ret < 0)
- return ret;
+ if (min_uV == 0)
+ return 0;
+
+ if (min_uV < 900000)
+ min_uV = 900000;
+ ret = DIV_ROUND_UP(min_uV - 900000, 50000) + 1;
/* Map back into a voltage to verify we're still in bounds */
voltage = palmas_list_voltage_ldo(rdev, ret);
@@ -586,7 +589,7 @@ static int palmas_ldo_init(struct palmas *palmas, int id,
addr = palmas_regs_info[id].ctrl_addr;
- ret = palmas_smps_read(palmas, addr, &reg);
+ ret = palmas_ldo_read(palmas, addr, &reg);
if (ret)
return ret;
@@ -596,7 +599,7 @@ static int palmas_ldo_init(struct palmas *palmas, int id,
if (reg_init->mode_sleep)
reg |= PALMAS_LDO1_CTRL_MODE_SLEEP;
- ret = palmas_smps_write(palmas, addr, reg);
+ ret = palmas_ldo_write(palmas, addr, reg);
if (ret)
return ret;
@@ -630,7 +633,7 @@ static __devinit int palmas_probe(struct platform_device *pdev)
ret = palmas_smps_read(palmas, PALMAS_SMPS_CTRL, &reg);
if (ret)
- goto err_unregister_regulator;
+ return ret;
if (reg & PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN)
pmic->smps123 = 1;
@@ -676,7 +679,9 @@ static __devinit int palmas_probe(struct platform_device *pdev)
case PALMAS_REG_SMPS10:
pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES;
pmic->desc[id].ops = &palmas_ops_smps10;
- pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL;
+ pmic->desc[id].vsel_reg =
+ PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+ PALMAS_SMPS10_CTRL);
pmic->desc[id].vsel_mask = SMPS10_VSEL;
pmic->desc[id].enable_reg =
PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
@@ -778,8 +783,10 @@ static __devinit int palmas_probe(struct platform_device *pdev)
reg_init = pdata->reg_init[id];
if (reg_init) {
ret = palmas_ldo_init(palmas, id, reg_init);
- if (ret)
+ if (ret) {
+ regulator_unregister(pmic->rdev[id]);
goto err_unregister_regulator;
+ }
}
}
}
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 102287fa7ecb..abe64a32aedf 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -19,15 +19,15 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#include <linux/mfd/s5m87xx/s5m-core.h>
-#include <linux/mfd/s5m87xx/s5m-pmic.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/s5m8767.h>
struct s5m8767_info {
struct device *dev;
- struct s5m87xx_dev *iodev;
+ struct sec_pmic_dev *iodev;
int num_regulators;
struct regulator_dev **rdev;
- struct s5m_opmode_data *opmode;
+ struct sec_opmode_data *opmode;
int ramp_delay;
bool buck2_ramp;
@@ -45,43 +45,43 @@ struct s5m8767_info {
int buck_gpioindex;
};
-struct s5m_voltage_desc {
+struct sec_voltage_desc {
int max;
int min;
int step;
};
-static const struct s5m_voltage_desc buck_voltage_val1 = {
+static const struct sec_voltage_desc buck_voltage_val1 = {
.max = 2225000,
.min = 650000,
.step = 6250,
};
-static const struct s5m_voltage_desc buck_voltage_val2 = {
+static const struct sec_voltage_desc buck_voltage_val2 = {
.max = 1600000,
.min = 600000,
.step = 6250,
};
-static const struct s5m_voltage_desc buck_voltage_val3 = {
+static const struct sec_voltage_desc buck_voltage_val3 = {
.max = 3000000,
.min = 750000,
.step = 12500,
};
-static const struct s5m_voltage_desc ldo_voltage_val1 = {
+static const struct sec_voltage_desc ldo_voltage_val1 = {
.max = 3950000,
.min = 800000,
.step = 50000,
};
-static const struct s5m_voltage_desc ldo_voltage_val2 = {
+static const struct sec_voltage_desc ldo_voltage_val2 = {
.max = 2375000,
.min = 800000,
.step = 25000,
};
-static const struct s5m_voltage_desc *reg_voltage_map[] = {
+static const struct sec_voltage_desc *reg_voltage_map[] = {
[S5M8767_LDO1] = &ldo_voltage_val2,
[S5M8767_LDO2] = &ldo_voltage_val2,
[S5M8767_LDO3] = &ldo_voltage_val1,
@@ -213,7 +213,7 @@ static int s5m8767_reg_is_enabled(struct regulator_dev *rdev)
else if (ret)
return ret;
- ret = s5m_reg_read(s5m8767->iodev, reg, &val);
+ ret = sec_reg_read(s5m8767->iodev, reg, &val);
if (ret)
return ret;
@@ -230,7 +230,7 @@ static int s5m8767_reg_enable(struct regulator_dev *rdev)
if (ret)
return ret;
- return s5m_reg_update(s5m8767->iodev, reg, enable_ctrl, mask);
+ return sec_reg_update(s5m8767->iodev, reg, enable_ctrl, mask);
}
static int s5m8767_reg_disable(struct regulator_dev *rdev)
@@ -243,7 +243,7 @@ static int s5m8767_reg_disable(struct regulator_dev *rdev)
if (ret)
return ret;
- return s5m_reg_update(s5m8767->iodev, reg, ~mask, mask);
+ return sec_reg_update(s5m8767->iodev, reg, ~mask, mask);
}
static int s5m8767_get_voltage_register(struct regulator_dev *rdev, int *_reg)
@@ -305,7 +305,7 @@ static int s5m8767_get_voltage_sel(struct regulator_dev *rdev)
mask = (reg_id < S5M8767_BUCK1) ? 0x3f : 0xff;
- ret = s5m_reg_read(s5m8767->iodev, reg, &val);
+ ret = sec_reg_read(s5m8767->iodev, reg, &val);
if (ret)
return ret;
@@ -315,7 +315,7 @@ static int s5m8767_get_voltage_sel(struct regulator_dev *rdev)
}
static int s5m8767_convert_voltage_to_sel(
- const struct s5m_voltage_desc *desc,
+ const struct sec_voltage_desc *desc,
int min_vol, int max_vol)
{
int selector = 0;
@@ -407,7 +407,7 @@ static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
if (ret)
return ret;
- return s5m_reg_update(s5m8767->iodev, reg, selector, mask);
+ return sec_reg_update(s5m8767->iodev, reg, selector, mask);
}
}
@@ -416,7 +416,7 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_sel)
{
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- const struct s5m_voltage_desc *desc;
+ const struct sec_voltage_desc *desc;
int reg_id = rdev_get_id(rdev);
desc = reg_voltage_map[reg_id];
@@ -501,8 +501,8 @@ static struct regulator_desc regulators[] = {
static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
{
- struct s5m87xx_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct s5m_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
struct regulator_config config = { };
struct regulator_dev **rdev;
struct s5m8767_info *s5m8767;
@@ -572,21 +572,21 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
pdata->buck2_init +
buck_voltage_val2.step);
- s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS2, buck_init);
+ sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS2, buck_init);
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
pdata->buck3_init,
pdata->buck3_init +
buck_voltage_val2.step);
- s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS2, buck_init);
+ sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS2, buck_init);
buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
pdata->buck4_init,
pdata->buck4_init +
buck_voltage_val2.step);
- s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS2, buck_init);
+ sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS2, buck_init);
for (i = 0; i < 8; i++) {
if (s5m8767->buck2_gpiodvs) {
@@ -671,13 +671,13 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
pdata->buck4_gpiodvs) {
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL,
+ sec_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL,
(pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1),
1 << 1);
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL,
+ sec_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL,
(pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1),
1 << 1);
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL,
+ sec_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL,
(pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1),
1 << 1);
}
@@ -685,61 +685,61 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
/* Initialize GPIO DVS registers */
for (i = 0; i < 8; i++) {
if (s5m8767->buck2_gpiodvs) {
- s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS1 + i,
+ sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS1 + i,
s5m8767->buck2_vol[i]);
}
if (s5m8767->buck3_gpiodvs) {
- s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS1 + i,
+ sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS1 + i,
s5m8767->buck3_vol[i]);
}
if (s5m8767->buck4_gpiodvs) {
- s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS1 + i,
+ sec_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS1 + i,
s5m8767->buck4_vol[i]);
}
}
if (s5m8767->buck2_ramp)
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x08, 0x08);
+ sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x08, 0x08);
if (s5m8767->buck3_ramp)
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x04, 0x04);
+ sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x04, 0x04);
if (s5m8767->buck4_ramp)
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x02, 0x02);
+ sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x02, 0x02);
if (s5m8767->buck2_ramp || s5m8767->buck3_ramp
|| s5m8767->buck4_ramp) {
switch (s5m8767->ramp_delay) {
case 5:
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
+ sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
0x40, 0xf0);
break;
case 10:
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
+ sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
0x90, 0xf0);
break;
case 25:
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
+ sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
0xd0, 0xf0);
break;
case 50:
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
+ sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
0xe0, 0xf0);
break;
case 100:
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
+ sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
0xf0, 0xf0);
break;
default:
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
+ sec_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
0x90, 0xf0);
}
}
for (i = 0; i < pdata->num_regulators; i++) {
- const struct s5m_voltage_desc *desc;
+ const struct sec_voltage_desc *desc;
int id = pdata->regulators[i].id;
desc = reg_voltage_map[id];
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index e6da90ab5153..19241fc30050 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -240,14 +240,16 @@ static struct tps6586x_regulator tps6586x_regulator[] = {
TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
TPS6586X_LDO(LDO_RTC, NULL, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),
TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
- TPS6586X_LDO(SM_2, "sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
+ TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3,
ENB, 3, VCC2, 6),
TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3,
END, 3, VCC1, 6),
- TPS6586X_DVM(SM_0, "sm0", dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2),
- TPS6586X_DVM(SM_1, "sm1", dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0),
+ TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1,
+ ENB, 1, VCC1, 2),
+ TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0,
+ ENB, 0, VCC1, 0),
};
/*
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 242fe90dc565..77a71a5c17c3 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1037,7 +1037,7 @@ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300);
TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300);
TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300);
TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300);
-TWL4030_FIXED_LDO(VINTANA2, 0x3f, 1500, 11, 100, 0x08);
+TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);
TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);
TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08);
TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08);
@@ -1048,7 +1048,6 @@ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0);
TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0);
TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0);
TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0);
-TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0);
TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34);
TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10);
TWL6025_ADJUSTABLE_SMPS(VIO, 0x16);
@@ -1117,7 +1116,7 @@ static const struct of_device_id twl_of_match[] __devinitconst = {
TWL6025_OF_MATCH("ti,twl6025-ldo6", LDO6),
TWL6025_OF_MATCH("ti,twl6025-ldoln", LDOLN),
TWL6025_OF_MATCH("ti,twl6025-ldousb", LDOUSB),
- TWLFIXED_OF_MATCH("ti,twl4030-vintana2", VINTANA2),
+ TWLFIXED_OF_MATCH("ti,twl4030-vintana1", VINTANA1),
TWLFIXED_OF_MATCH("ti,twl4030-vintdig", VINTDIG),
TWLFIXED_OF_MATCH("ti,twl4030-vusb1v5", VUSB1V5),
TWLFIXED_OF_MATCH("ti,twl4030-vusb1v8", VUSB1V8),
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 5445d9b23294..934ce6e2c66b 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -6,4 +6,5 @@ obj-$(CONFIG_REMOTEPROC) += remoteproc.o
remoteproc-y := remoteproc_core.o
remoteproc-y += remoteproc_debugfs.o
remoteproc-y += remoteproc_virtio.o
+remoteproc-y += remoteproc_elf_loader.o
obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index de138e30d3e6..a1f7ac1f8cf6 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -66,7 +66,7 @@ static int omap_rproc_mbox_callback(struct notifier_block *this,
{
mbox_msg_t msg = (mbox_msg_t) data;
struct omap_rproc *oproc = container_of(this, struct omap_rproc, nb);
- struct device *dev = oproc->rproc->dev;
+ struct device *dev = oproc->rproc->dev.parent;
const char *name = oproc->rproc->name;
dev_dbg(dev, "mbox msg: 0x%x\n", msg);
@@ -92,12 +92,13 @@ static int omap_rproc_mbox_callback(struct notifier_block *this,
static void omap_rproc_kick(struct rproc *rproc, int vqid)
{
struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
int ret;
/* send the index of the triggered virtqueue in the mailbox payload */
ret = omap_mbox_msg_send(oproc->mbox, vqid);
if (ret)
- dev_err(rproc->dev, "omap_mbox_msg_send failed: %d\n", ret);
+ dev_err(dev, "omap_mbox_msg_send failed: %d\n", ret);
}
/*
@@ -110,7 +111,8 @@ static void omap_rproc_kick(struct rproc *rproc, int vqid)
static int omap_rproc_start(struct rproc *rproc)
{
struct omap_rproc *oproc = rproc->priv;
- struct platform_device *pdev = to_platform_device(rproc->dev);
+ struct device *dev = rproc->dev.parent;
+ struct platform_device *pdev = to_platform_device(dev);
struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
int ret;
@@ -120,7 +122,7 @@ static int omap_rproc_start(struct rproc *rproc)
oproc->mbox = omap_mbox_get(pdata->mbox_name, &oproc->nb);
if (IS_ERR(oproc->mbox)) {
ret = PTR_ERR(oproc->mbox);
- dev_err(rproc->dev, "omap_mbox_get failed: %d\n", ret);
+ dev_err(dev, "omap_mbox_get failed: %d\n", ret);
return ret;
}
@@ -133,13 +135,13 @@ static int omap_rproc_start(struct rproc *rproc)
*/
ret = omap_mbox_msg_send(oproc->mbox, RP_MBOX_ECHO_REQUEST);
if (ret) {
- dev_err(rproc->dev, "omap_mbox_get failed: %d\n", ret);
+ dev_err(dev, "omap_mbox_get failed: %d\n", ret);
goto put_mbox;
}
ret = pdata->device_enable(pdev);
if (ret) {
- dev_err(rproc->dev, "omap_device_enable failed: %d\n", ret);
+ dev_err(dev, "omap_device_enable failed: %d\n", ret);
goto put_mbox;
}
@@ -153,7 +155,8 @@ put_mbox:
/* power off the remote processor */
static int omap_rproc_stop(struct rproc *rproc)
{
- struct platform_device *pdev = to_platform_device(rproc->dev);
+ struct device *dev = rproc->dev.parent;
+ struct platform_device *pdev = to_platform_device(dev);
struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
struct omap_rproc *oproc = rproc->priv;
int ret;
@@ -196,14 +199,14 @@ static int __devinit omap_rproc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rproc);
- ret = rproc_register(rproc);
+ ret = rproc_add(rproc);
if (ret)
goto free_rproc;
return 0;
free_rproc:
- rproc_free(rproc);
+ rproc_put(rproc);
return ret;
}
@@ -211,7 +214,10 @@ static int __devexit omap_rproc_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
- return rproc_unregister(rproc);
+ rproc_del(rproc);
+ rproc_put(rproc);
+
+ return 0;
}
static struct platform_driver omap_rproc_driver = {
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 66324ee4678f..d5c2dbfc7443 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -35,7 +35,7 @@
#include <linux/debugfs.h>
#include <linux/remoteproc.h>
#include <linux/iommu.h>
-#include <linux/klist.h>
+#include <linux/idr.h>
#include <linux/elf.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
@@ -43,29 +43,13 @@
#include "remoteproc_internal.h"
-static void klist_rproc_get(struct klist_node *n);
-static void klist_rproc_put(struct klist_node *n);
-
-/*
- * klist of the available remote processors.
- *
- * We need this in order to support name-based lookups (needed by the
- * rproc_get_by_name()).
- *
- * That said, we don't use rproc_get_by_name() at this point.
- * The use cases that do require its existence should be
- * scrutinized, and hopefully migrated to rproc_boot() using device-based
- * binding.
- *
- * If/when this materializes, we could drop the klist (and the by_name
- * API).
- */
-static DEFINE_KLIST(rprocs, klist_rproc_get, klist_rproc_put);
-
typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
struct resource_table *table, int len);
typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail);
+/* Unique indices for remoteproc devices */
+static DEFINE_IDA(rproc_dev_index);
+
/*
* This is the IOMMU fault handler we register with the IOMMU API
* (when relevant; not all remote processors access memory through
@@ -92,7 +76,7 @@ static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
static int rproc_enable_iommu(struct rproc *rproc)
{
struct iommu_domain *domain;
- struct device *dev = rproc->dev;
+ struct device *dev = rproc->dev.parent;
int ret;
/*
@@ -137,7 +121,7 @@ free_domain:
static void rproc_disable_iommu(struct rproc *rproc)
{
struct iommu_domain *domain = rproc->domain;
- struct device *dev = rproc->dev;
+ struct device *dev = rproc->dev.parent;
if (!domain)
return;
@@ -165,7 +149,7 @@ static void rproc_disable_iommu(struct rproc *rproc)
* but only on kernel direct mapped RAM memory. Instead, we're just using
* here the output of the DMA API, which should be more correct.
*/
-static void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
{
struct rproc_mem_entry *carveout;
void *ptr = NULL;
@@ -188,125 +172,19 @@ static void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
return ptr;
}
+EXPORT_SYMBOL(rproc_da_to_va);
-/**
- * rproc_load_segments() - load firmware segments to memory
- * @rproc: remote processor which will be booted using these fw segments
- * @elf_data: the content of the ELF firmware image
- * @len: firmware size (in bytes)
- *
- * This function loads the firmware segments to memory, where the remote
- * processor expects them.
- *
- * Some remote processors will expect their code and data to be placed
- * in specific device addresses, and can't have them dynamically assigned.
- *
- * We currently support only those kind of remote processors, and expect
- * the program header's paddr member to contain those addresses. We then go
- * through the physically contiguous "carveout" memory regions which we
- * allocated (and mapped) earlier on behalf of the remote processor,
- * and "translate" device address to kernel addresses, so we can copy the
- * segments where they are expected.
- *
- * Currently we only support remote processors that required carveout
- * allocations and got them mapped onto their iommus. Some processors
- * might be different: they might not have iommus, and would prefer to
- * directly allocate memory for every segment/resource. This is not yet
- * supported, though.
- */
-static int
-rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
-{
- struct device *dev = rproc->dev;
- struct elf32_hdr *ehdr;
- struct elf32_phdr *phdr;
- int i, ret = 0;
-
- ehdr = (struct elf32_hdr *)elf_data;
- phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
-
- /* go through the available ELF segments */
- for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
- u32 da = phdr->p_paddr;
- u32 memsz = phdr->p_memsz;
- u32 filesz = phdr->p_filesz;
- u32 offset = phdr->p_offset;
- void *ptr;
-
- if (phdr->p_type != PT_LOAD)
- continue;
-
- dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
- phdr->p_type, da, memsz, filesz);
-
- if (filesz > memsz) {
- dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
- filesz, memsz);
- ret = -EINVAL;
- break;
- }
-
- if (offset + filesz > len) {
- dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
- offset + filesz, len);
- ret = -EINVAL;
- break;
- }
-
- /* grab the kernel address for this device address */
- ptr = rproc_da_to_va(rproc, da, memsz);
- if (!ptr) {
- dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
- ret = -EINVAL;
- break;
- }
-
- /* put the segment where the remote processor expects it */
- if (phdr->p_filesz)
- memcpy(ptr, elf_data + phdr->p_offset, filesz);
-
- /*
- * Zero out remaining memory for this segment.
- *
- * This isn't strictly required since dma_alloc_coherent already
- * did this for us. albeit harmless, we may consider removing
- * this.
- */
- if (memsz > filesz)
- memset(ptr + filesz, 0, memsz - filesz);
- }
-
- return ret;
-}
-
-static int
-__rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
+int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
{
struct rproc *rproc = rvdev->rproc;
- struct device *dev = rproc->dev;
- struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
+ struct device *dev = &rproc->dev;
+ struct rproc_vring *rvring = &rvdev->vring[i];
dma_addr_t dma;
void *va;
int ret, size, notifyid;
- dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n",
- i, vring->da, vring->num, vring->align);
-
- /* make sure reserved bytes are zeroes */
- if (vring->reserved) {
- dev_err(dev, "vring rsc has non zero reserved bytes\n");
- return -EINVAL;
- }
-
- /* verify queue size and vring alignment are sane */
- if (!vring->num || !vring->align) {
- dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
- vring->num, vring->align);
- return -EINVAL;
- }
-
/* actual size of vring (in bytes) */
- size = PAGE_ALIGN(vring_size(vring->num, vring->align));
+ size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
if (!idr_pre_get(&rproc->notifyids, GFP_KERNEL)) {
dev_err(dev, "idr_pre_get failed\n");
@@ -316,51 +194,75 @@ __rproc_handle_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
/*
* Allocate non-cacheable memory for the vring. In the future
* this call will also configure the IOMMU for us
+ * TODO: let the rproc know the da of this vring
*/
- va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+ va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
if (!va) {
- dev_err(dev, "dma_alloc_coherent failed\n");
+ dev_err(dev->parent, "dma_alloc_coherent failed\n");
return -EINVAL;
}
- /* assign an rproc-wide unique index for this vring */
- /* TODO: assign a notifyid for rvdev updates as well */
- ret = idr_get_new(&rproc->notifyids, &rvdev->vring[i], &notifyid);
+ /*
+ * Assign an rproc-wide unique index for this vring
+ * TODO: assign a notifyid for rvdev updates as well
+ * TODO: let the rproc know the notifyid of this vring
+ * TODO: support predefined notifyids (via resource table)
+ */
+ ret = idr_get_new(&rproc->notifyids, rvring, &notifyid);
if (ret) {
dev_err(dev, "idr_get_new failed: %d\n", ret);
- dma_free_coherent(dev, size, va, dma);
+ dma_free_coherent(dev->parent, size, va, dma);
return ret;
}
- /* let the rproc know the da and notifyid of this vring */
- /* TODO: expose this to remote processor */
- vring->da = dma;
- vring->notifyid = notifyid;
-
dev_dbg(dev, "vring%d: va %p dma %x size %x idr %d\n", i, va,
dma, size, notifyid);
- rvdev->vring[i].len = vring->num;
- rvdev->vring[i].align = vring->align;
- rvdev->vring[i].va = va;
- rvdev->vring[i].dma = dma;
- rvdev->vring[i].notifyid = notifyid;
- rvdev->vring[i].rvdev = rvdev;
+ rvring->va = va;
+ rvring->dma = dma;
+ rvring->notifyid = notifyid;
return 0;
}
-static void __rproc_free_vrings(struct rproc_vdev *rvdev, int i)
+static int
+rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
{
struct rproc *rproc = rvdev->rproc;
+ struct device *dev = &rproc->dev;
+ struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
+ struct rproc_vring *rvring = &rvdev->vring[i];
+
+ dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n",
+ i, vring->da, vring->num, vring->align);
- for (i--; i >= 0; i--) {
- struct rproc_vring *rvring = &rvdev->vring[i];
- int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
+ /* make sure reserved bytes are zeroes */
+ if (vring->reserved) {
+ dev_err(dev, "vring rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
- dma_free_coherent(rproc->dev, size, rvring->va, rvring->dma);
- idr_remove(&rproc->notifyids, rvring->notifyid);
+ /* verify queue size and vring alignment are sane */
+ if (!vring->num || !vring->align) {
+ dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
+ vring->num, vring->align);
+ return -EINVAL;
}
+
+ rvring->len = vring->num;
+ rvring->align = vring->align;
+ rvring->rvdev = rvdev;
+
+ return 0;
+}
+
+void rproc_free_vring(struct rproc_vring *rvring)
+{
+ int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
+ struct rproc *rproc = rvring->rvdev->rproc;
+
+ dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma);
+ idr_remove(&rproc->notifyids, rvring->notifyid);
}
/**
@@ -393,14 +295,14 @@ static void __rproc_free_vrings(struct rproc_vdev *rvdev, int i)
static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
int avail)
{
- struct device *dev = rproc->dev;
+ struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
int i, ret;
/* make sure resource isn't truncated */
if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
+ rsc->config_len > avail) {
- dev_err(rproc->dev, "vdev rsc is truncated\n");
+ dev_err(dev, "vdev rsc is truncated\n");
return -EINVAL;
}
@@ -425,11 +327,11 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
rvdev->rproc = rproc;
- /* allocate the vrings */
+ /* parse the vrings */
for (i = 0; i < rsc->num_of_vrings; i++) {
- ret = __rproc_handle_vring(rvdev, rsc, i);
+ ret = rproc_parse_vring(rvdev, rsc, i);
if (ret)
- goto free_vrings;
+ goto free_rvdev;
}
/* remember the device features */
@@ -440,12 +342,11 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
/* it is now safe to add the virtio device */
ret = rproc_add_virtio_dev(rvdev, rsc->id);
if (ret)
- goto free_vrings;
+ goto free_rvdev;
return 0;
-free_vrings:
- __rproc_free_vrings(rvdev, i);
+free_rvdev:
kfree(rvdev);
return ret;
}
@@ -470,12 +371,12 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
int avail)
{
struct rproc_mem_entry *trace;
- struct device *dev = rproc->dev;
+ struct device *dev = &rproc->dev;
void *ptr;
char name[15];
if (sizeof(*rsc) > avail) {
- dev_err(rproc->dev, "trace rsc is truncated\n");
+ dev_err(dev, "trace rsc is truncated\n");
return -EINVAL;
}
@@ -552,6 +453,7 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
int avail)
{
struct rproc_mem_entry *mapping;
+ struct device *dev = &rproc->dev;
int ret;
/* no point in handling this resource without a valid iommu domain */
@@ -559,25 +461,25 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
return -EINVAL;
if (sizeof(*rsc) > avail) {
- dev_err(rproc->dev, "devmem rsc is truncated\n");
+ dev_err(dev, "devmem rsc is truncated\n");
return -EINVAL;
}
/* make sure reserved bytes are zeroes */
if (rsc->reserved) {
- dev_err(rproc->dev, "devmem rsc has non zero reserved bytes\n");
+ dev_err(dev, "devmem rsc has non zero reserved bytes\n");
return -EINVAL;
}
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) {
- dev_err(rproc->dev, "kzalloc mapping failed\n");
+ dev_err(dev, "kzalloc mapping failed\n");
return -ENOMEM;
}
ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
if (ret) {
- dev_err(rproc->dev, "failed to map devmem: %d\n", ret);
+ dev_err(dev, "failed to map devmem: %d\n", ret);
goto out;
}
@@ -592,7 +494,7 @@ static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
mapping->len = rsc->len;
list_add_tail(&mapping->node, &rproc->mappings);
- dev_dbg(rproc->dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
+ dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
rsc->pa, rsc->da, rsc->len);
return 0;
@@ -624,13 +526,13 @@ static int rproc_handle_carveout(struct rproc *rproc,
struct fw_rsc_carveout *rsc, int avail)
{
struct rproc_mem_entry *carveout, *mapping;
- struct device *dev = rproc->dev;
+ struct device *dev = &rproc->dev;
dma_addr_t dma;
void *va;
int ret;
if (sizeof(*rsc) > avail) {
- dev_err(rproc->dev, "carveout rsc is truncated\n");
+ dev_err(dev, "carveout rsc is truncated\n");
return -EINVAL;
}
@@ -656,9 +558,9 @@ static int rproc_handle_carveout(struct rproc *rproc,
goto free_mapping;
}
- va = dma_alloc_coherent(dev, rsc->len, &dma, GFP_KERNEL);
+ va = dma_alloc_coherent(dev->parent, rsc->len, &dma, GFP_KERNEL);
if (!va) {
- dev_err(dev, "failed to dma alloc carveout: %d\n", rsc->len);
+ dev_err(dev->parent, "dma_alloc_coherent err: %d\n", rsc->len);
ret = -ENOMEM;
goto free_carv;
}
@@ -702,23 +604,27 @@ static int rproc_handle_carveout(struct rproc *rproc,
list_add_tail(&mapping->node, &rproc->mappings);
dev_dbg(dev, "carveout mapped 0x%x to 0x%x\n", rsc->da, dma);
-
- /*
- * Some remote processors might need to know the pa
- * even though they are behind an IOMMU. E.g., OMAP4's
- * remote M3 processor needs this so it can control
- * on-chip hardware accelerators that are not behind
- * the IOMMU, and therefor must know the pa.
- *
- * Generally we don't want to expose physical addresses
- * if we don't have to (remote processors are generally
- * _not_ trusted), so we might want to do this only for
- * remote processor that _must_ have this (e.g. OMAP4's
- * dual M3 subsystem).
- */
- rsc->pa = dma;
}
+ /*
+ * Some remote processors might need to know the pa
+ * even though they are behind an IOMMU. E.g., OMAP4's
+ * remote M3 processor needs this so it can control
+ * on-chip hardware accelerators that are not behind
+ * the IOMMU, and therefor must know the pa.
+ *
+ * Generally we don't want to expose physical addresses
+ * if we don't have to (remote processors are generally
+ * _not_ trusted), so we might want to do this only for
+ * remote processor that _must_ have this (e.g. OMAP4's
+ * dual M3 subsystem).
+ *
+ * Non-IOMMU processors might also want to have this info.
+ * In this case, the device address and the physical address
+ * are the same.
+ */
+ rsc->pa = dma;
+
carveout->va = va;
carveout->len = rsc->len;
carveout->dma = dma;
@@ -729,7 +635,7 @@ static int rproc_handle_carveout(struct rproc *rproc,
return 0;
dma_free:
- dma_free_coherent(dev, rsc->len, va, dma);
+ dma_free_coherent(dev->parent, rsc->len, va, dma);
free_carv:
kfree(carveout);
free_mapping:
@@ -752,7 +658,7 @@ static rproc_handle_resource_t rproc_handle_rsc[] = {
static int
rproc_handle_boot_rsc(struct rproc *rproc, struct resource_table *table, int len)
{
- struct device *dev = rproc->dev;
+ struct device *dev = &rproc->dev;
rproc_handle_resource_t handler;
int ret = 0, i;
@@ -791,7 +697,7 @@ rproc_handle_boot_rsc(struct rproc *rproc, struct resource_table *table, int len
static int
rproc_handle_virtio_rsc(struct rproc *rproc, struct resource_table *table, int len)
{
- struct device *dev = rproc->dev;
+ struct device *dev = &rproc->dev;
int ret = 0, i;
for (i = 0; i < table->num; i++) {
@@ -822,85 +728,6 @@ rproc_handle_virtio_rsc(struct rproc *rproc, struct resource_table *table, int l
}
/**
- * rproc_find_rsc_table() - find the resource table
- * @rproc: the rproc handle
- * @elf_data: the content of the ELF firmware image
- * @len: firmware size (in bytes)
- * @tablesz: place holder for providing back the table size
- *
- * This function finds the resource table inside the remote processor's
- * firmware. It is used both upon the registration of @rproc (in order
- * to look for and register the supported virito devices), and when the
- * @rproc is booted.
- *
- * Returns the pointer to the resource table if it is found, and write its
- * size into @tablesz. If a valid table isn't found, NULL is returned
- * (and @tablesz isn't set).
- */
-static struct resource_table *
-rproc_find_rsc_table(struct rproc *rproc, const u8 *elf_data, size_t len,
- int *tablesz)
-{
- struct elf32_hdr *ehdr;
- struct elf32_shdr *shdr;
- const char *name_table;
- struct device *dev = rproc->dev;
- struct resource_table *table = NULL;
- int i;
-
- ehdr = (struct elf32_hdr *)elf_data;
- shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
- name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset;
-
- /* look for the resource table and handle it */
- for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
- int size = shdr->sh_size;
- int offset = shdr->sh_offset;
-
- if (strcmp(name_table + shdr->sh_name, ".resource_table"))
- continue;
-
- table = (struct resource_table *)(elf_data + offset);
-
- /* make sure we have the entire table */
- if (offset + size > len) {
- dev_err(dev, "resource table truncated\n");
- return NULL;
- }
-
- /* make sure table has at least the header */
- if (sizeof(struct resource_table) > size) {
- dev_err(dev, "header-less resource table\n");
- return NULL;
- }
-
- /* we don't support any version beyond the first */
- if (table->ver != 1) {
- dev_err(dev, "unsupported fw ver: %d\n", table->ver);
- return NULL;
- }
-
- /* make sure reserved bytes are zeroes */
- if (table->reserved[0] || table->reserved[1]) {
- dev_err(dev, "non zero reserved bytes\n");
- return NULL;
- }
-
- /* make sure the offsets array isn't truncated */
- if (table->num * sizeof(table->offset[0]) +
- sizeof(struct resource_table) > size) {
- dev_err(dev, "resource table incomplete\n");
- return NULL;
- }
-
- *tablesz = shdr->sh_size;
- break;
- }
-
- return table;
-}
-
-/**
* rproc_resource_cleanup() - clean up and free all acquired resources
* @rproc: rproc handle
*
@@ -910,7 +737,7 @@ rproc_find_rsc_table(struct rproc *rproc, const u8 *elf_data, size_t len,
static void rproc_resource_cleanup(struct rproc *rproc)
{
struct rproc_mem_entry *entry, *tmp;
- struct device *dev = rproc->dev;
+ struct device *dev = &rproc->dev;
/* clean up debugfs trace entries */
list_for_each_entry_safe(entry, tmp, &rproc->traces, node) {
@@ -922,7 +749,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
/* clean up carveout allocations */
list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
- dma_free_coherent(dev, entry->len, entry->va, entry->dma);
+ dma_free_coherent(dev->parent, entry->len, entry->va, entry->dma);
list_del(&entry->node);
kfree(entry);
}
@@ -943,74 +770,13 @@ static void rproc_resource_cleanup(struct rproc *rproc)
}
}
-/* make sure this fw image is sane */
-static int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
-{
- const char *name = rproc->firmware;
- struct device *dev = rproc->dev;
- struct elf32_hdr *ehdr;
- char class;
-
- if (!fw) {
- dev_err(dev, "failed to load %s\n", name);
- return -EINVAL;
- }
-
- if (fw->size < sizeof(struct elf32_hdr)) {
- dev_err(dev, "Image is too small\n");
- return -EINVAL;
- }
-
- ehdr = (struct elf32_hdr *)fw->data;
-
- /* We only support ELF32 at this point */
- class = ehdr->e_ident[EI_CLASS];
- if (class != ELFCLASS32) {
- dev_err(dev, "Unsupported class: %d\n", class);
- return -EINVAL;
- }
-
- /* We assume the firmware has the same endianess as the host */
-# ifdef __LITTLE_ENDIAN
- if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
-# else /* BIG ENDIAN */
- if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
-# endif
- dev_err(dev, "Unsupported firmware endianess\n");
- return -EINVAL;
- }
-
- if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
- dev_err(dev, "Image is too small\n");
- return -EINVAL;
- }
-
- if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
- dev_err(dev, "Image is corrupted (bad magic)\n");
- return -EINVAL;
- }
-
- if (ehdr->e_phnum == 0) {
- dev_err(dev, "No loadable segments\n");
- return -EINVAL;
- }
-
- if (ehdr->e_phoff > fw->size) {
- dev_err(dev, "Firmware size is too small\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
/*
* take a firmware and boot a remote processor with it.
*/
static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
{
- struct device *dev = rproc->dev;
+ struct device *dev = &rproc->dev;
const char *name = rproc->firmware;
- struct elf32_hdr *ehdr;
struct resource_table *table;
int ret, tablesz;
@@ -1018,8 +784,6 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
if (ret)
return ret;
- ehdr = (struct elf32_hdr *)fw->data;
-
dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
/*
@@ -1032,15 +796,10 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
return ret;
}
- /*
- * The ELF entry point is the rproc's boot addr (though this is not
- * a configurable property of all remote processors: some will always
- * boot at a specific hardcoded address).
- */
- rproc->bootaddr = ehdr->e_entry;
+ rproc->bootaddr = rproc_get_boot_addr(rproc, fw);
/* look for the resource table */
- table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
+ table = rproc_find_rsc_table(rproc, fw, &tablesz);
if (!table) {
ret = -EINVAL;
goto clean_up;
@@ -1054,7 +813,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
}
/* load the ELF segments to memory */
- ret = rproc_load_segments(rproc, fw->data, fw->size);
+ ret = rproc_load_segments(rproc, fw);
if (ret) {
dev_err(dev, "Failed to load program segments: %d\n", ret);
goto clean_up;
@@ -1097,7 +856,7 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
goto out;
/* look for the resource table */
- table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
+ table = rproc_find_rsc_table(rproc, fw, &tablesz);
if (!table)
goto out;
@@ -1108,7 +867,7 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
out:
release_firmware(fw);
- /* allow rproc_unregister() contexts, if any, to proceed */
+ /* allow rproc_del() contexts, if any, to proceed */
complete_all(&rproc->firmware_loading_complete);
}
@@ -1134,7 +893,7 @@ int rproc_boot(struct rproc *rproc)
return -EINVAL;
}
- dev = rproc->dev;
+ dev = &rproc->dev;
ret = mutex_lock_interruptible(&rproc->lock);
if (ret) {
@@ -1150,7 +909,7 @@ int rproc_boot(struct rproc *rproc)
}
/* prevent underlying implementation from being removed */
- if (!try_module_get(dev->driver->owner)) {
+ if (!try_module_get(dev->parent->driver->owner)) {
dev_err(dev, "%s: can't get owner\n", __func__);
ret = -EINVAL;
goto unlock_mutex;
@@ -1177,7 +936,7 @@ int rproc_boot(struct rproc *rproc)
downref_rproc:
if (ret) {
- module_put(dev->driver->owner);
+ module_put(dev->parent->driver->owner);
atomic_dec(&rproc->power);
}
unlock_mutex:
@@ -1204,14 +963,10 @@ EXPORT_SYMBOL(rproc_boot);
* which means that the @rproc handle stays valid even after rproc_shutdown()
* returns, and users can still use it with a subsequent rproc_boot(), if
* needed.
- * - don't call rproc_shutdown() to unroll rproc_get_by_name(), exactly
- * because rproc_shutdown() _does not_ decrement the refcount of @rproc.
- * To decrement the refcount of @rproc, use rproc_put() (but _only_ if
- * you acquired @rproc using rproc_get_by_name()).
*/
void rproc_shutdown(struct rproc *rproc)
{
- struct device *dev = rproc->dev;
+ struct device *dev = &rproc->dev;
int ret;
ret = mutex_lock_interruptible(&rproc->lock);
@@ -1244,148 +999,12 @@ void rproc_shutdown(struct rproc *rproc)
out:
mutex_unlock(&rproc->lock);
if (!ret)
- module_put(dev->driver->owner);
+ module_put(dev->parent->driver->owner);
}
EXPORT_SYMBOL(rproc_shutdown);
/**
- * rproc_release() - completely deletes the existence of a remote processor
- * @kref: the rproc's kref
- *
- * This function should _never_ be called directly.
- *
- * The only reasonable location to use it is as an argument when kref_put'ing
- * @rproc's refcount.
- *
- * This way it will be called when no one holds a valid pointer to this @rproc
- * anymore (and obviously after it is removed from the rprocs klist).
- *
- * Note: this function is not static because rproc_vdev_release() needs it when
- * it decrements @rproc's refcount.
- */
-void rproc_release(struct kref *kref)
-{
- struct rproc *rproc = container_of(kref, struct rproc, refcount);
- struct rproc_vdev *rvdev, *rvtmp;
-
- dev_info(rproc->dev, "removing %s\n", rproc->name);
-
- rproc_delete_debug_dir(rproc);
-
- /* clean up remote vdev entries */
- list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node) {
- __rproc_free_vrings(rvdev, RVDEV_NUM_VRINGS);
- list_del(&rvdev->node);
- }
-
- /*
- * At this point no one holds a reference to rproc anymore,
- * so we can directly unroll rproc_alloc()
- */
- rproc_free(rproc);
-}
-
-/* will be called when an rproc is added to the rprocs klist */
-static void klist_rproc_get(struct klist_node *n)
-{
- struct rproc *rproc = container_of(n, struct rproc, node);
-
- kref_get(&rproc->refcount);
-}
-
-/* will be called when an rproc is removed from the rprocs klist */
-static void klist_rproc_put(struct klist_node *n)
-{
- struct rproc *rproc = container_of(n, struct rproc, node);
-
- kref_put(&rproc->refcount, rproc_release);
-}
-
-static struct rproc *next_rproc(struct klist_iter *i)
-{
- struct klist_node *n;
-
- n = klist_next(i);
- if (!n)
- return NULL;
-
- return container_of(n, struct rproc, node);
-}
-
-/**
- * rproc_get_by_name() - find a remote processor by name and boot it
- * @name: name of the remote processor
- *
- * Finds an rproc handle using the remote processor's name, and then
- * boot it. If it's already powered on, then just immediately return
- * (successfully).
- *
- * Returns the rproc handle on success, and NULL on failure.
- *
- * This function increments the remote processor's refcount, so always
- * use rproc_put() to decrement it back once rproc isn't needed anymore.
- *
- * Note: currently this function (and its counterpart rproc_put()) are not
- * being used. We need to scrutinize the use cases
- * that still need them, and see if we can migrate them to use the non
- * name-based boot/shutdown interface.
- */
-struct rproc *rproc_get_by_name(const char *name)
-{
- struct rproc *rproc;
- struct klist_iter i;
- int ret;
-
- /* find the remote processor, and upref its refcount */
- klist_iter_init(&rprocs, &i);
- while ((rproc = next_rproc(&i)) != NULL)
- if (!strcmp(rproc->name, name)) {
- kref_get(&rproc->refcount);
- break;
- }
- klist_iter_exit(&i);
-
- /* can't find this rproc ? */
- if (!rproc) {
- pr_err("can't find remote processor %s\n", name);
- return NULL;
- }
-
- ret = rproc_boot(rproc);
- if (ret < 0) {
- kref_put(&rproc->refcount, rproc_release);
- return NULL;
- }
-
- return rproc;
-}
-EXPORT_SYMBOL(rproc_get_by_name);
-
-/**
- * rproc_put() - decrement the refcount of a remote processor, and shut it down
- * @rproc: the remote processor
- *
- * This function tries to shutdown @rproc, and it then decrements its
- * refcount.
- *
- * After this function returns, @rproc may _not_ be used anymore, and its
- * handle should be considered invalid.
- *
- * This function should be called _iff_ the @rproc handle was grabbed by
- * calling rproc_get_by_name().
- */
-void rproc_put(struct rproc *rproc)
-{
- /* try to power off the remote processor */
- rproc_shutdown(rproc);
-
- /* downref rproc's refcount */
- kref_put(&rproc->refcount, rproc_release);
-}
-EXPORT_SYMBOL(rproc_put);
-
-/**
- * rproc_register() - register a remote processor
+ * rproc_add() - register a remote processor
* @rproc: the remote processor handle to register
*
* Registers @rproc with the remoteproc framework, after it has been
@@ -1404,15 +1023,16 @@ EXPORT_SYMBOL(rproc_put);
* of registering this remote processor, additional virtio drivers might be
* probed.
*/
-int rproc_register(struct rproc *rproc)
+int rproc_add(struct rproc *rproc)
{
- struct device *dev = rproc->dev;
+ struct device *dev = &rproc->dev;
int ret = 0;
- /* expose to rproc_get_by_name users */
- klist_add_tail(&rproc->node, &rprocs);
+ ret = device_add(dev);
+ if (ret < 0)
+ return ret;
- dev_info(rproc->dev, "%s is available\n", rproc->name);
+ dev_info(dev, "%s is available\n", rproc->name);
dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n");
@@ -1420,7 +1040,7 @@ int rproc_register(struct rproc *rproc)
/* create debugfs entries */
rproc_create_debug_dir(rproc);
- /* rproc_unregister() calls must wait until async loader completes */
+ /* rproc_del() calls must wait until async loader completes */
init_completion(&rproc->firmware_loading_complete);
/*
@@ -1437,12 +1057,42 @@ int rproc_register(struct rproc *rproc)
if (ret < 0) {
dev_err(dev, "request_firmware_nowait failed: %d\n", ret);
complete_all(&rproc->firmware_loading_complete);
- klist_remove(&rproc->node);
}
return ret;
}
-EXPORT_SYMBOL(rproc_register);
+EXPORT_SYMBOL(rproc_add);
+
+/**
+ * rproc_type_release() - release a remote processor instance
+ * @dev: the rproc's device
+ *
+ * This function should _never_ be called directly.
+ *
+ * It will be called by the driver core when no one holds a valid pointer
+ * to @dev anymore.
+ */
+static void rproc_type_release(struct device *dev)
+{
+ struct rproc *rproc = container_of(dev, struct rproc, dev);
+
+ dev_info(&rproc->dev, "releasing %s\n", rproc->name);
+
+ rproc_delete_debug_dir(rproc);
+
+ idr_remove_all(&rproc->notifyids);
+ idr_destroy(&rproc->notifyids);
+
+ if (rproc->index >= 0)
+ ida_simple_remove(&rproc_dev_index, rproc->index);
+
+ kfree(rproc);
+}
+
+static struct device_type rproc_type = {
+ .name = "remoteproc",
+ .release = rproc_type_release,
+};
/**
* rproc_alloc() - allocate a remote processor handle
@@ -1459,13 +1109,13 @@ EXPORT_SYMBOL(rproc_register);
* of the remote processor.
*
* After creating an rproc handle using this function, and when ready,
- * implementations should then call rproc_register() to complete
+ * implementations should then call rproc_add() to complete
* the registration of the remote processor.
*
* On success the new rproc is returned, and on failure, NULL.
*
* Note: _never_ directly deallocate @rproc, even if it was not registered
- * yet. Instead, if you just need to unroll rproc_alloc(), use rproc_free().
+ * yet. Instead, when you need to unroll rproc_alloc(), use rproc_put().
*/
struct rproc *rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
@@ -1482,15 +1132,29 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
return NULL;
}
- rproc->dev = dev;
rproc->name = name;
rproc->ops = ops;
rproc->firmware = firmware;
rproc->priv = &rproc[1];
+ device_initialize(&rproc->dev);
+ rproc->dev.parent = dev;
+ rproc->dev.type = &rproc_type;
+
+ /* Assign a unique device index and name */
+ rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
+ if (rproc->index < 0) {
+ dev_err(dev, "ida_simple_get failed: %d\n", rproc->index);
+ put_device(&rproc->dev);
+ return NULL;
+ }
+
+ dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
+
atomic_set(&rproc->power, 0);
- kref_init(&rproc->refcount);
+ /* Set ELF as the default fw_ops handler */
+ rproc->fw_ops = &rproc_elf_fw_ops;
mutex_init(&rproc->lock);
@@ -1508,47 +1172,38 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
EXPORT_SYMBOL(rproc_alloc);
/**
- * rproc_free() - free an rproc handle that was allocated by rproc_alloc
+ * rproc_put() - unroll rproc_alloc()
* @rproc: the remote processor handle
*
- * This function should _only_ be used if @rproc was only allocated,
- * but not registered yet.
+ * This function decrements the rproc dev refcount.
*
- * If @rproc was already successfully registered (by calling rproc_register()),
- * then use rproc_unregister() instead.
+ * If no one holds any reference to rproc anymore, then its refcount would
+ * now drop to zero, and it would be freed.
*/
-void rproc_free(struct rproc *rproc)
+void rproc_put(struct rproc *rproc)
{
- idr_remove_all(&rproc->notifyids);
- idr_destroy(&rproc->notifyids);
-
- kfree(rproc);
+ put_device(&rproc->dev);
}
-EXPORT_SYMBOL(rproc_free);
+EXPORT_SYMBOL(rproc_put);
/**
- * rproc_unregister() - unregister a remote processor
+ * rproc_del() - unregister a remote processor
* @rproc: rproc handle to unregister
*
- * Unregisters a remote processor, and decrements its refcount.
- * If its refcount drops to zero, then @rproc will be freed. If not,
- * it will be freed later once the last reference is dropped.
- *
* This function should be called when the platform specific rproc
* implementation decides to remove the rproc device. it should
- * _only_ be called if a previous invocation of rproc_register()
+ * _only_ be called if a previous invocation of rproc_add()
* has completed successfully.
*
- * After rproc_unregister() returns, @rproc is _not_ valid anymore and
- * it shouldn't be used. More specifically, don't call rproc_free()
- * or try to directly free @rproc after rproc_unregister() returns;
- * none of these are needed, and calling them is a bug.
+ * After rproc_del() returns, @rproc isn't freed yet, because
+ * of the outstanding reference created by rproc_alloc. To decrement that
+ * one last refcount, one still needs to call rproc_put().
*
* Returns 0 on success and -EINVAL if @rproc isn't valid.
*/
-int rproc_unregister(struct rproc *rproc)
+int rproc_del(struct rproc *rproc)
{
- struct rproc_vdev *rvdev;
+ struct rproc_vdev *rvdev, *tmp;
if (!rproc)
return -EINVAL;
@@ -1557,22 +1212,19 @@ int rproc_unregister(struct rproc *rproc)
wait_for_completion(&rproc->firmware_loading_complete);
/* clean up remote vdev entries */
- list_for_each_entry(rvdev, &rproc->rvdevs, node)
+ list_for_each_entry_safe(rvdev, tmp, &rproc->rvdevs, node)
rproc_remove_virtio_dev(rvdev);
- /* the rproc is downref'ed as soon as it's removed from the klist */
- klist_del(&rproc->node);
-
- /* the rproc will only be released after its refcount drops to zero */
- kref_put(&rproc->refcount, rproc_release);
+ device_del(&rproc->dev);
return 0;
}
-EXPORT_SYMBOL(rproc_unregister);
+EXPORT_SYMBOL(rproc_del);
static int __init remoteproc_init(void)
{
rproc_init_debugfs();
+
return 0;
}
module_init(remoteproc_init);
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index 85d31a69e117..03833850f214 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -124,7 +124,7 @@ struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
tfile = debugfs_create_file(name, 0400, rproc->dbg_dir,
trace, &trace_rproc_ops);
if (!tfile) {
- dev_err(rproc->dev, "failed to create debugfs trace entry\n");
+ dev_err(&rproc->dev, "failed to create debugfs trace entry\n");
return NULL;
}
@@ -141,7 +141,7 @@ void rproc_delete_debug_dir(struct rproc *rproc)
void rproc_create_debug_dir(struct rproc *rproc)
{
- struct device *dev = rproc->dev;
+ struct device *dev = &rproc->dev;
if (!rproc_dbg)
return;
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
new file mode 100644
index 000000000000..e1f89d649733
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -0,0 +1,295 @@
+/*
+ * Remote Processor Framework Elf loader
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Ohad Ben-Cohen <ohad@wizery.com>
+ * Brian Swetland <swetland@google.com>
+ * Mark Grosen <mgrosen@ti.com>
+ * Fernando Guzman Lugo <fernando.lugo@ti.com>
+ * Suman Anna <s-anna@ti.com>
+ * Robert Tivy <rtivy@ti.com>
+ * Armando Uribe De Leon <x0095078@ti.com>
+ * Sjur Brændeland <sjur.brandeland@stericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/remoteproc.h>
+#include <linux/elf.h>
+
+#include "remoteproc_internal.h"
+
+/**
+ * rproc_elf_sanity_check() - Sanity Check ELF firmware image
+ * @rproc: the remote processor handle
+ * @fw: the ELF firmware image
+ *
+ * Make sure this fw image is sane.
+ */
+static int
+rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
+{
+ const char *name = rproc->firmware;
+ struct device *dev = &rproc->dev;
+ struct elf32_hdr *ehdr;
+ char class;
+
+ if (!fw) {
+ dev_err(dev, "failed to load %s\n", name);
+ return -EINVAL;
+ }
+
+ if (fw->size < sizeof(struct elf32_hdr)) {
+ dev_err(dev, "Image is too small\n");
+ return -EINVAL;
+ }
+
+ ehdr = (struct elf32_hdr *)fw->data;
+
+ /* We only support ELF32 at this point */
+ class = ehdr->e_ident[EI_CLASS];
+ if (class != ELFCLASS32) {
+ dev_err(dev, "Unsupported class: %d\n", class);
+ return -EINVAL;
+ }
+
+ /* We assume the firmware has the same endianess as the host */
+# ifdef __LITTLE_ENDIAN
+ if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
+# else /* BIG ENDIAN */
+ if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) {
+# endif
+ dev_err(dev, "Unsupported firmware endianess\n");
+ return -EINVAL;
+ }
+
+ if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
+ dev_err(dev, "Image is too small\n");
+ return -EINVAL;
+ }
+
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ dev_err(dev, "Image is corrupted (bad magic)\n");
+ return -EINVAL;
+ }
+
+ if (ehdr->e_phnum == 0) {
+ dev_err(dev, "No loadable segments\n");
+ return -EINVAL;
+ }
+
+ if (ehdr->e_phoff > fw->size) {
+ dev_err(dev, "Firmware size is too small\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * rproc_elf_get_boot_addr() - Get rproc's boot address.
+ * @rproc: the remote processor handle
+ * @fw: the ELF firmware image
+ *
+ * This function returns the entry point address of the ELF
+ * image.
+ *
+ * Note that the boot address is not a configurable property of all remote
+ * processors. Some will always boot at a specific hard-coded address.
+ */
+static
+u32 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
+
+ return ehdr->e_entry;
+}
+
+/**
+ * rproc_elf_load_segments() - load firmware segments to memory
+ * @rproc: remote processor which will be booted using these fw segments
+ * @fw: the ELF firmware image
+ *
+ * This function loads the firmware segments to memory, where the remote
+ * processor expects them.
+ *
+ * Some remote processors will expect their code and data to be placed
+ * in specific device addresses, and can't have them dynamically assigned.
+ *
+ * We currently support only those kind of remote processors, and expect
+ * the program header's paddr member to contain those addresses. We then go
+ * through the physically contiguous "carveout" memory regions which we
+ * allocated (and mapped) earlier on behalf of the remote processor,
+ * and "translate" device address to kernel addresses, so we can copy the
+ * segments where they are expected.
+ *
+ * Currently we only support remote processors that required carveout
+ * allocations and got them mapped onto their iommus. Some processors
+ * might be different: they might not have iommus, and would prefer to
+ * directly allocate memory for every segment/resource. This is not yet
+ * supported, though.
+ */
+static int
+rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = &rproc->dev;
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ int i, ret = 0;
+ const u8 *elf_data = fw->data;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ u32 da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+ u32 filesz = phdr->p_filesz;
+ u32 offset = phdr->p_offset;
+ void *ptr;
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+ phdr->p_type, da, memsz, filesz);
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > fw->size) {
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+ offset + filesz, fw->size);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* grab the kernel address for this device address */
+ ptr = rproc_da_to_va(rproc, da, memsz);
+ if (!ptr) {
+ dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* put the segment where the remote processor expects it */
+ if (phdr->p_filesz)
+ memcpy(ptr, elf_data + phdr->p_offset, filesz);
+
+ /*
+ * Zero out remaining memory for this segment.
+ *
+ * This isn't strictly required since dma_alloc_coherent already
+ * did this for us. albeit harmless, we may consider removing
+ * this.
+ */
+ if (memsz > filesz)
+ memset(ptr + filesz, 0, memsz - filesz);
+ }
+
+ return ret;
+}
+
+/**
+ * rproc_elf_find_rsc_table() - find the resource table
+ * @rproc: the rproc handle
+ * @fw: the ELF firmware image
+ * @tablesz: place holder for providing back the table size
+ *
+ * This function finds the resource table inside the remote processor's
+ * firmware. It is used both upon the registration of @rproc (in order
+ * to look for and register the supported virito devices), and when the
+ * @rproc is booted.
+ *
+ * Returns the pointer to the resource table if it is found, and write its
+ * size into @tablesz. If a valid table isn't found, NULL is returned
+ * (and @tablesz isn't set).
+ */
+static struct resource_table *
+rproc_elf_find_rsc_table(struct rproc *rproc, const struct firmware *fw,
+ int *tablesz)
+{
+ struct elf32_hdr *ehdr;
+ struct elf32_shdr *shdr;
+ const char *name_table;
+ struct device *dev = &rproc->dev;
+ struct resource_table *table = NULL;
+ int i;
+ const u8 *elf_data = fw->data;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
+ name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset;
+
+ /* look for the resource table and handle it */
+ for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
+ int size = shdr->sh_size;
+ int offset = shdr->sh_offset;
+
+ if (strcmp(name_table + shdr->sh_name, ".resource_table"))
+ continue;
+
+ table = (struct resource_table *)(elf_data + offset);
+
+ /* make sure we have the entire table */
+ if (offset + size > fw->size) {
+ dev_err(dev, "resource table truncated\n");
+ return NULL;
+ }
+
+ /* make sure table has at least the header */
+ if (sizeof(struct resource_table) > size) {
+ dev_err(dev, "header-less resource table\n");
+ return NULL;
+ }
+
+ /* we don't support any version beyond the first */
+ if (table->ver != 1) {
+ dev_err(dev, "unsupported fw ver: %d\n", table->ver);
+ return NULL;
+ }
+
+ /* make sure reserved bytes are zeroes */
+ if (table->reserved[0] || table->reserved[1]) {
+ dev_err(dev, "non zero reserved bytes\n");
+ return NULL;
+ }
+
+ /* make sure the offsets array isn't truncated */
+ if (table->num * sizeof(table->offset[0]) +
+ sizeof(struct resource_table) > size) {
+ dev_err(dev, "resource table incomplete\n");
+ return NULL;
+ }
+
+ *tablesz = shdr->sh_size;
+ break;
+ }
+
+ return table;
+}
+
+const struct rproc_fw_ops rproc_elf_fw_ops = {
+ .load = rproc_elf_load_segments,
+ .find_rsc_table = rproc_elf_find_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr
+};
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 9f336d6bdef3..a690ebe7aa51 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -21,9 +21,27 @@
#define REMOTEPROC_INTERNAL_H
#include <linux/irqreturn.h>
+#include <linux/firmware.h>
struct rproc;
+/**
+ * struct rproc_fw_ops - firmware format specific operations.
+ * @find_rsc_table: finds the resource table inside the firmware image
+ * @load: load firmeware to memory, where the remote processor
+ * expects to find it
+ * @sanity_check: sanity check the fw image
+ * @get_boot_addr: get boot address to entry point specified in firmware
+ */
+struct rproc_fw_ops {
+ struct resource_table *(*find_rsc_table) (struct rproc *rproc,
+ const struct firmware *fw,
+ int *tablesz);
+ int (*load)(struct rproc *rproc, const struct firmware *fw);
+ int (*sanity_check)(struct rproc *rproc, const struct firmware *fw);
+ u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw);
+};
+
/* from remoteproc_core.c */
void rproc_release(struct kref *kref);
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
@@ -41,4 +59,48 @@ void rproc_create_debug_dir(struct rproc *rproc);
void rproc_init_debugfs(void);
void rproc_exit_debugfs(void);
+void rproc_free_vring(struct rproc_vring *rvring);
+int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
+
+void *rproc_da_to_va(struct rproc *rproc, u64 da, int len);
+
+static inline
+int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
+{
+ if (rproc->fw_ops->sanity_check)
+ return rproc->fw_ops->sanity_check(rproc, fw);
+
+ return 0;
+}
+
+static inline
+u32 rproc_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
+{
+ if (rproc->fw_ops->get_boot_addr)
+ return rproc->fw_ops->get_boot_addr(rproc, fw);
+
+ return 0;
+}
+
+static inline
+int rproc_load_segments(struct rproc *rproc, const struct firmware *fw)
+{
+ if (rproc->fw_ops->load)
+ return rproc->fw_ops->load(rproc, fw);
+
+ return -EINVAL;
+}
+
+static inline
+struct resource_table *rproc_find_rsc_table(struct rproc *rproc,
+ const struct firmware *fw, int *tablesz)
+{
+ if (rproc->fw_ops->find_rsc_table)
+ return rproc->fw_ops->find_rsc_table(rproc, fw, tablesz);
+
+ return NULL;
+}
+
+extern const struct rproc_fw_ops rproc_elf_fw_ops;
+
#endif /* REMOTEPROC_INTERNAL_H */
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index ecf612130750..3541b4492f64 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -36,7 +36,7 @@ static void rproc_virtio_notify(struct virtqueue *vq)
struct rproc *rproc = rvring->rvdev->rproc;
int notifyid = rvring->notifyid;
- dev_dbg(rproc->dev, "kicking vq index: %d\n", notifyid);
+ dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
rproc->ops->kick(rproc, notifyid);
}
@@ -57,7 +57,7 @@ irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
{
struct rproc_vring *rvring;
- dev_dbg(rproc->dev, "vq index %d is interrupted\n", notifyid);
+ dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
rvring = idr_find(&rproc->notifyids, notifyid);
if (!rvring || !rvring->vq)
@@ -74,17 +74,21 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct rproc *rproc = vdev_to_rproc(vdev);
+ struct device *dev = &rproc->dev;
struct rproc_vring *rvring;
struct virtqueue *vq;
void *addr;
- int len, size;
+ int len, size, ret;
/* we're temporarily limited to two virtqueues per rvdev */
if (id >= ARRAY_SIZE(rvdev->vring))
return ERR_PTR(-EINVAL);
- rvring = &rvdev->vring[id];
+ ret = rproc_alloc_vring(rvdev, id);
+ if (ret)
+ return ERR_PTR(ret);
+ rvring = &rvdev->vring[id];
addr = rvring->va;
len = rvring->len;
@@ -92,7 +96,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
size = vring_size(len, rvring->align);
memset(addr, 0, size);
- dev_dbg(rproc->dev, "vring%d: va %p qsz %d notifyid %d\n",
+ dev_dbg(dev, "vring%d: va %p qsz %d notifyid %d\n",
id, addr, len, rvring->notifyid);
/*
@@ -102,7 +106,8 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
vq = vring_new_virtqueue(len, rvring->align, vdev, false, addr,
rproc_virtio_notify, callback, name);
if (!vq) {
- dev_err(rproc->dev, "vring_new_virtqueue %s failed\n", name);
+ dev_err(dev, "vring_new_virtqueue %s failed\n", name);
+ rproc_free_vring(rvring);
return ERR_PTR(-ENOMEM);
}
@@ -125,6 +130,7 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev)
rvring = vq->priv;
rvring->vq = NULL;
vring_del_virtqueue(vq);
+ rproc_free_vring(rvring);
}
}
@@ -147,7 +153,7 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
/* now that the vqs are all set, boot the remote processor */
ret = rproc_boot(rproc);
if (ret) {
- dev_err(rproc->dev, "rproc_boot() failed %d\n", ret);
+ dev_err(&rproc->dev, "rproc_boot() failed %d\n", ret);
goto error;
}
@@ -219,7 +225,7 @@ static struct virtio_config_ops rproc_virtio_config_ops = {
/*
* This function is called whenever vdev is released, and is responsible
- * to decrement the remote processor's refcount taken when vdev was
+ * to decrement the remote processor's refcount which was taken when vdev was
* added.
*
* Never call this function directly; it will be called by the driver
@@ -228,9 +234,13 @@ static struct virtio_config_ops rproc_virtio_config_ops = {
static void rproc_vdev_release(struct device *dev)
{
struct virtio_device *vdev = dev_to_virtio(dev);
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
struct rproc *rproc = vdev_to_rproc(vdev);
- kref_put(&rproc->refcount, rproc_release);
+ list_del(&rvdev->node);
+ kfree(rvdev);
+
+ put_device(&rproc->dev);
}
/**
@@ -245,7 +255,7 @@ static void rproc_vdev_release(struct device *dev)
int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
{
struct rproc *rproc = rvdev->rproc;
- struct device *dev = rproc->dev;
+ struct device *dev = &rproc->dev;
struct virtio_device *vdev = &rvdev->vdev;
int ret;
@@ -262,11 +272,11 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
* Therefore we must increment the rproc refcount here, and decrement
* it _only_ when the vdev is released.
*/
- kref_get(&rproc->refcount);
+ get_device(&rproc->dev);
ret = register_virtio_device(vdev);
if (ret) {
- kref_put(&rproc->refcount, rproc_release);
+ put_device(&rproc->dev);
dev_err(dev, "failed to register vdev: %d\n", ret);
goto out;
}
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index f56c8ba3a861..590cfafc7c17 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -956,7 +956,8 @@ static int rpmsg_probe(struct virtio_device *vdev)
vrp->svq = vqs[1];
/* allocate coherent memory for the buffers */
- bufs_va = dma_alloc_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE,
+ bufs_va = dma_alloc_coherent(vdev->dev.parent->parent,
+ RPMSG_TOTAL_BUF_SPACE,
&vrp->bufs_dma, GFP_KERNEL);
if (!bufs_va)
goto vqs_del;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 08cbdb900a18..fabc99a75c65 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -135,6 +135,16 @@ config RTC_DRV_88PM860X
This driver can also be built as a module. If so, the module
will be called rtc-88pm860x.
+config RTC_DRV_88PM80X
+ tristate "Marvell 88PM80x"
+ depends on RTC_CLASS && I2C && MFD_88PM800
+ help
+ If you say yes here you get support for RTC function in Marvell
+ 88PM80x chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-88pm80x.
+
config RTC_DRV_DS1307
tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025"
help
@@ -694,6 +704,7 @@ config RTC_DRV_AB3100
config RTC_DRV_AB8500
tristate "ST-Ericsson AB8500 RTC"
depends on AB8500_CORE
+ select RTC_INTF_DEV_UIE_EMUL
help
Select this to enable the ST-Ericsson AB8500 power management IC RTC
support. This chip contains a battery- and capacitor-backed RTC.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 2973921c30d8..0d5b2b66f90d 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -16,6 +16,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
# Keep the list ordered.
obj-$(CONFIG_RTC_DRV_88PM860X) += rtc-88pm860x.o
+obj-$(CONFIG_RTC_DRV_88PM80X) += rtc-88pm80x.o
obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index eb415bd76494..9592b936b71b 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -582,6 +582,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
void rtc_update_irq(struct rtc_device *rtc,
unsigned long num, unsigned long events)
{
+ pm_stay_awake(rtc->dev.parent);
schedule_work(&rtc->irqwork);
}
EXPORT_SYMBOL_GPL(rtc_update_irq);
@@ -844,6 +845,7 @@ void rtc_timer_do_work(struct work_struct *work)
mutex_lock(&rtc->ops_lock);
again:
+ pm_relax(rtc->dev.parent);
__rtc_read_time(rtc, &tm);
now = rtc_tm_to_ktime(tm);
while ((next = timerqueue_getnext(&rtc->timerqueue))) {
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
new file mode 100644
index 000000000000..6367984e0565
--- /dev/null
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -0,0 +1,369 @@
+/*
+ * Real Time Clock driver for Marvell 88PM80x PMIC
+ *
+ * Copyright (c) 2012 Marvell International Ltd.
+ * Wenzeng Chen<wzch@marvell.com>
+ * Qiao Zhou <zhouqiao@marvell.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/88pm80x.h>
+#include <linux/rtc.h>
+
+#define PM800_RTC_COUNTER1 (0xD1)
+#define PM800_RTC_COUNTER2 (0xD2)
+#define PM800_RTC_COUNTER3 (0xD3)
+#define PM800_RTC_COUNTER4 (0xD4)
+#define PM800_RTC_EXPIRE1_1 (0xD5)
+#define PM800_RTC_EXPIRE1_2 (0xD6)
+#define PM800_RTC_EXPIRE1_3 (0xD7)
+#define PM800_RTC_EXPIRE1_4 (0xD8)
+#define PM800_RTC_TRIM1 (0xD9)
+#define PM800_RTC_TRIM2 (0xDA)
+#define PM800_RTC_TRIM3 (0xDB)
+#define PM800_RTC_TRIM4 (0xDC)
+#define PM800_RTC_EXPIRE2_1 (0xDD)
+#define PM800_RTC_EXPIRE2_2 (0xDE)
+#define PM800_RTC_EXPIRE2_3 (0xDF)
+#define PM800_RTC_EXPIRE2_4 (0xE0)
+
+#define PM800_POWER_DOWN_LOG1 (0xE5)
+#define PM800_POWER_DOWN_LOG2 (0xE6)
+
+struct pm80x_rtc_info {
+ struct pm80x_chip *chip;
+ struct regmap *map;
+ struct rtc_device *rtc_dev;
+ struct device *dev;
+ struct delayed_work calib_work;
+
+ int irq;
+ int vrtc;
+};
+
+static irqreturn_t rtc_update_handler(int irq, void *data)
+{
+ struct pm80x_rtc_info *info = (struct pm80x_rtc_info *)data;
+ int mask;
+
+ mask = PM800_ALARM | PM800_ALARM_WAKEUP;
+ regmap_update_bits(info->map, PM800_RTC_CONTROL, mask | PM800_ALARM1_EN,
+ mask);
+ rtc_update_irq(info->rtc_dev, 1, RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static int pm80x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct pm80x_rtc_info *info = dev_get_drvdata(dev);
+
+ if (enabled)
+ regmap_update_bits(info->map, PM800_RTC_CONTROL,
+ PM800_ALARM1_EN, PM800_ALARM1_EN);
+ else
+ regmap_update_bits(info->map, PM800_RTC_CONTROL,
+ PM800_ALARM1_EN, 0);
+ return 0;
+}
+
+/*
+ * Calculate the next alarm time given the requested alarm time mask
+ * and the current time.
+ */
+static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now,
+ struct rtc_time *alrm)
+{
+ unsigned long next_time;
+ unsigned long now_time;
+
+ next->tm_year = now->tm_year;
+ next->tm_mon = now->tm_mon;
+ next->tm_mday = now->tm_mday;
+ next->tm_hour = alrm->tm_hour;
+ next->tm_min = alrm->tm_min;
+ next->tm_sec = alrm->tm_sec;
+
+ rtc_tm_to_time(now, &now_time);
+ rtc_tm_to_time(next, &next_time);
+
+ if (next_time < now_time) {
+ /* Advance one day */
+ next_time += 60 * 60 * 24;
+ rtc_time_to_tm(next_time, next);
+ }
+}
+
+static int pm80x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct pm80x_rtc_info *info = dev_get_drvdata(dev);
+ unsigned char buf[4];
+ unsigned long ticks, base, data;
+ regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
+ base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
+
+ /* load 32-bit read-only counter */
+ regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
+ data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ ticks = base + data;
+ dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+ base, data, ticks);
+ rtc_time_to_tm(ticks, tm);
+ return 0;
+}
+
+static int pm80x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct pm80x_rtc_info *info = dev_get_drvdata(dev);
+ unsigned char buf[4];
+ unsigned long ticks, base, data;
+ if ((tm->tm_year < 70) || (tm->tm_year > 138)) {
+ dev_dbg(info->dev,
+ "Set time %d out of range. Please set time between 1970 to 2038.\n",
+ 1900 + tm->tm_year);
+ return -EINVAL;
+ }
+ rtc_tm_to_time(tm, &ticks);
+
+ /* load 32-bit read-only counter */
+ regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
+ data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ base = ticks - data;
+ dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+ base, data, ticks);
+ buf[0] = base & 0xFF;
+ buf[1] = (base >> 8) & 0xFF;
+ buf[2] = (base >> 16) & 0xFF;
+ buf[3] = (base >> 24) & 0xFF;
+ regmap_raw_write(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
+
+ return 0;
+}
+
+static int pm80x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct pm80x_rtc_info *info = dev_get_drvdata(dev);
+ unsigned char buf[4];
+ unsigned long ticks, base, data;
+ int ret;
+
+ regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
+ base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
+
+ regmap_raw_read(info->map, PM800_RTC_EXPIRE1_1, buf, 4);
+ data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ ticks = base + data;
+ dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+ base, data, ticks);
+
+ rtc_time_to_tm(ticks, &alrm->time);
+ regmap_read(info->map, PM800_RTC_CONTROL, &ret);
+ alrm->enabled = (ret & PM800_ALARM1_EN) ? 1 : 0;
+ alrm->pending = (ret & (PM800_ALARM | PM800_ALARM_WAKEUP)) ? 1 : 0;
+ return 0;
+}
+
+static int pm80x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct pm80x_rtc_info *info = dev_get_drvdata(dev);
+ struct rtc_time now_tm, alarm_tm;
+ unsigned long ticks, base, data;
+ unsigned char buf[4];
+ int mask;
+
+ regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_ALARM1_EN, 0);
+
+ regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
+ base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
+
+ /* load 32-bit read-only counter */
+ regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
+ data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+ ticks = base + data;
+ dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+ base, data, ticks);
+
+ rtc_time_to_tm(ticks, &now_tm);
+ dev_dbg(info->dev, "%s, now time : %lu\n", __func__, ticks);
+ rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time);
+ /* get new ticks for alarm in 24 hours */
+ rtc_tm_to_time(&alarm_tm, &ticks);
+ dev_dbg(info->dev, "%s, alarm time: %lu\n", __func__, ticks);
+ data = ticks - base;
+
+ buf[0] = data & 0xff;
+ buf[1] = (data >> 8) & 0xff;
+ buf[2] = (data >> 16) & 0xff;
+ buf[3] = (data >> 24) & 0xff;
+ regmap_raw_write(info->map, PM800_RTC_EXPIRE1_1, buf, 4);
+ if (alrm->enabled) {
+ mask = PM800_ALARM | PM800_ALARM_WAKEUP | PM800_ALARM1_EN;
+ regmap_update_bits(info->map, PM800_RTC_CONTROL, mask, mask);
+ } else {
+ mask = PM800_ALARM | PM800_ALARM_WAKEUP | PM800_ALARM1_EN;
+ regmap_update_bits(info->map, PM800_RTC_CONTROL, mask,
+ PM800_ALARM | PM800_ALARM_WAKEUP);
+ }
+ return 0;
+}
+
+static const struct rtc_class_ops pm80x_rtc_ops = {
+ .read_time = pm80x_rtc_read_time,
+ .set_time = pm80x_rtc_set_time,
+ .read_alarm = pm80x_rtc_read_alarm,
+ .set_alarm = pm80x_rtc_set_alarm,
+ .alarm_irq_enable = pm80x_rtc_alarm_irq_enable,
+};
+
+#ifdef CONFIG_PM
+static int pm80x_rtc_suspend(struct device *dev)
+{
+ return pm80x_dev_suspend(dev);
+}
+
+static int pm80x_rtc_resume(struct device *dev)
+{
+ return pm80x_dev_resume(dev);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pm80x_rtc_pm_ops, pm80x_rtc_suspend, pm80x_rtc_resume);
+
+static int __devinit pm80x_rtc_probe(struct platform_device *pdev)
+{
+ struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct pm80x_platform_data *pm80x_pdata;
+ struct pm80x_rtc_pdata *pdata = NULL;
+ struct pm80x_rtc_info *info;
+ struct rtc_time tm;
+ unsigned long ticks = 0;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL)
+ dev_warn(&pdev->dev, "No platform data!\n");
+
+ info =
+ devm_kzalloc(&pdev->dev, sizeof(struct pm80x_rtc_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ info->irq = platform_get_irq(pdev, 0);
+ if (info->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ info->chip = chip;
+ info->map = chip->regmap;
+ if (!info->map) {
+ dev_err(&pdev->dev, "no regmap!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ info->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, info);
+
+ ret = pm80x_request_irq(chip, info->irq, rtc_update_handler,
+ IRQF_ONESHOT, "rtc", info);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
+ info->irq, ret);
+ goto out;
+ }
+
+ ret = pm80x_rtc_read_time(&pdev->dev, &tm);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to read initial time.\n");
+ goto out_rtc;
+ }
+ if ((tm.tm_year < 70) || (tm.tm_year > 138)) {
+ tm.tm_year = 70;
+ tm.tm_mon = 0;
+ tm.tm_mday = 1;
+ tm.tm_hour = 0;
+ tm.tm_min = 0;
+ tm.tm_sec = 0;
+ ret = pm80x_rtc_set_time(&pdev->dev, &tm);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to set initial time.\n");
+ goto out_rtc;
+ }
+ }
+ rtc_tm_to_time(&tm, &ticks);
+
+ info->rtc_dev = rtc_device_register("88pm80x-rtc", &pdev->dev,
+ &pm80x_rtc_ops, THIS_MODULE);
+ if (IS_ERR(info->rtc_dev)) {
+ ret = PTR_ERR(info->rtc_dev);
+ dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
+ goto out_rtc;
+ }
+ /*
+ * enable internal XO instead of internal 3.25MHz clock since it can
+ * free running in PMIC power-down state.
+ */
+ regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_RTC1_USE_XO,
+ PM800_RTC1_USE_XO);
+
+ if (pdev->dev.parent->platform_data) {
+ pm80x_pdata = pdev->dev.parent->platform_data;
+ pdata = pm80x_pdata->rtc;
+ if (pdata)
+ info->rtc_dev->dev.platform_data = &pdata->rtc_wakeup;
+ }
+
+ device_init_wakeup(&pdev->dev, 1);
+
+ return 0;
+out_rtc:
+ pm80x_free_irq(chip, info->irq, info);
+out:
+ return ret;
+}
+
+static int __devexit pm80x_rtc_remove(struct platform_device *pdev)
+{
+ struct pm80x_rtc_info *info = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
+ rtc_device_unregister(info->rtc_dev);
+ pm80x_free_irq(info->chip, info->irq, info);
+ return 0;
+}
+
+static struct platform_driver pm80x_rtc_driver = {
+ .driver = {
+ .name = "88pm80x-rtc",
+ .owner = THIS_MODULE,
+ .pm = &pm80x_rtc_pm_ops,
+ },
+ .probe = pm80x_rtc_probe,
+ .remove = __devexit_p(pm80x_rtc_remove),
+};
+
+module_platform_driver(pm80x_rtc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Marvell 88PM80x RTC driver");
+MODULE_AUTHOR("Qiao Zhou <zhouqiao@marvell.com>");
+MODULE_ALIAS("platform:88pm80x-rtc");
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 370889d0489b..bf3c2f669c3c 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -89,22 +89,17 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (retval < 0)
return retval;
- /* Early AB8500 chips will not clear the rtc read request bit */
- if (abx500_get_chip_id(dev) == 0) {
- usleep_range(1000, 1000);
- } else {
- /* Wait for some cycles after enabling the rtc read in ab8500 */
- while (time_before(jiffies, timeout)) {
- retval = abx500_get_register_interruptible(dev,
- AB8500_RTC, AB8500_RTC_READ_REQ_REG, &value);
- if (retval < 0)
- return retval;
-
- if (!(value & RTC_READ_REQUEST))
- break;
-
- usleep_range(1000, 5000);
- }
+ /* Wait for some cycles after enabling the rtc read in ab8500 */
+ while (time_before(jiffies, timeout)) {
+ retval = abx500_get_register_interruptible(dev,
+ AB8500_RTC, AB8500_RTC_READ_REQ_REG, &value);
+ if (retval < 0)
+ return retval;
+
+ if (!(value & RTC_READ_REQUEST))
+ break;
+
+ usleep_range(1000, 5000);
}
/* Read the Watchtime registers */
@@ -225,7 +220,8 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
int retval, i;
unsigned char buf[ARRAY_SIZE(ab8500_rtc_alarm_regs)];
- unsigned long mins, secs = 0;
+ unsigned long mins, secs = 0, cursec = 0;
+ struct rtc_time curtm;
if (alarm->time.tm_year < (AB8500_RTC_EPOCH - 1900)) {
dev_dbg(dev, "year should be equal to or greater than %d\n",
@@ -237,6 +233,18 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
rtc_tm_to_time(&alarm->time, &secs);
/*
+ * Check whether alarm is set less than 1min.
+ * Since our RTC doesn't support alarm resolution less than 1min,
+ * return -EINVAL, so UIE EMUL can take it up, incase of UIE_ON
+ */
+ ab8500_rtc_read_time(dev, &curtm); /* Read current time */
+ rtc_tm_to_time(&curtm, &cursec);
+ if ((secs - cursec) < 59) {
+ dev_dbg(dev, "Alarm less than 1 minute not supported\r\n");
+ return -EINVAL;
+ }
+
+ /*
* Convert it to the number of seconds since 01-01-2000 00:00:00, since
* we only have a small counter in the RTC.
*/
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 831868904e02..1dd61f402b04 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -58,6 +58,7 @@ struct sam9_rtc {
struct rtc_device *rtcdev;
u32 imr;
void __iomem *gpbr;
+ int irq;
};
#define rtt_readl(rtc, field) \
@@ -292,7 +293,7 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev)
{
struct resource *r, *r_gpbr;
struct sam9_rtc *rtc;
- int ret;
+ int ret, irq;
u32 mr;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -302,10 +303,18 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get interrupt resource\n");
+ return irq;
+ }
+
rtc = kzalloc(sizeof *rtc, GFP_KERNEL);
if (!rtc)
return -ENOMEM;
+ rtc->irq = irq;
+
/* platform setup code should have handled this; sigh */
if (!device_can_wakeup(&pdev->dev))
device_init_wakeup(&pdev->dev, 1);
@@ -345,11 +354,10 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev)
}
/* register irq handler after we know what name we'll use */
- ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt,
- IRQF_SHARED,
+ ret = request_irq(rtc->irq, at91_rtc_interrupt, IRQF_SHARED,
dev_name(&rtc->rtcdev->dev), rtc);
if (ret) {
- dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS);
+ dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
rtc_device_unregister(rtc->rtcdev);
goto fail_register;
}
@@ -386,7 +394,7 @@ static int __devexit at91_rtc_remove(struct platform_device *pdev)
/* disable all interrupts */
rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
- free_irq(AT91_ID_SYS, rtc);
+ free_irq(rtc->irq, rtc);
rtc_device_unregister(rtc->rtcdev);
@@ -423,7 +431,7 @@ static int at91_rtc_suspend(struct platform_device *pdev,
rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
if (rtc->imr) {
if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) {
- enable_irq_wake(AT91_ID_SYS);
+ enable_irq_wake(rtc->irq);
/* don't let RTTINC cause wakeups */
if (mr & AT91_RTT_RTTINCIEN)
rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
@@ -441,7 +449,7 @@ static int at91_rtc_resume(struct platform_device *pdev)
if (rtc->imr) {
if (device_may_wakeup(&pdev->dev))
- disable_irq_wake(AT91_ID_SYS);
+ disable_irq_wake(rtc->irq);
mr = rtt_readl(rtc, MR);
rtt_writel(rtc, MR, mr | rtc->imr);
}
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 132333d75408..4267789ca995 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -568,7 +568,6 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
hpet_mask_rtc_irq_bit(RTC_AIE);
CMOS_READ(RTC_INTR_FLAGS);
- pm_wakeup_event(cmos_rtc.dev, 0);
}
spin_unlock(&rtc_lock);
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index a5b8a0c4ea84..76b2156d3c62 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -155,13 +155,10 @@ static int __exit coh901331_remove(struct platform_device *pdev)
struct coh901331_port *rtap = dev_get_drvdata(&pdev->dev);
if (rtap) {
- free_irq(rtap->irq, rtap);
rtc_device_unregister(rtap->rtc);
+ clk_unprepare(rtap->clk);
clk_put(rtap->clk);
- iounmap(rtap->virtbase);
- release_mem_region(rtap->phybase, rtap->physize);
platform_set_drvdata(pdev, NULL);
- kfree(rtap);
}
return 0;
@@ -174,49 +171,43 @@ static int __init coh901331_probe(struct platform_device *pdev)
struct coh901331_port *rtap;
struct resource *res;
- rtap = kzalloc(sizeof(struct coh901331_port), GFP_KERNEL);
+ rtap = devm_kzalloc(&pdev->dev,
+ sizeof(struct coh901331_port), GFP_KERNEL);
if (!rtap)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENOENT;
- goto out_no_resource;
- }
+ if (!res)
+ return -ENOENT;
+
rtap->phybase = res->start;
rtap->physize = resource_size(res);
- if (request_mem_region(rtap->phybase, rtap->physize,
- "rtc-coh901331") == NULL) {
- ret = -EBUSY;
- goto out_no_memregion;
- }
+ if (devm_request_mem_region(&pdev->dev, rtap->phybase, rtap->physize,
+ "rtc-coh901331") == NULL)
+ return -EBUSY;
- rtap->virtbase = ioremap(rtap->phybase, rtap->physize);
- if (!rtap->virtbase) {
- ret = -ENOMEM;
- goto out_no_remap;
- }
+ rtap->virtbase = devm_ioremap(&pdev->dev, rtap->phybase, rtap->physize);
+ if (!rtap->virtbase)
+ return -ENOMEM;
rtap->irq = platform_get_irq(pdev, 0);
- if (request_irq(rtap->irq, coh901331_interrupt, 0,
- "RTC COH 901 331 Alarm", rtap)) {
- ret = -EIO;
- goto out_no_irq;
- }
+ if (devm_request_irq(&pdev->dev, rtap->irq, coh901331_interrupt, 0,
+ "RTC COH 901 331 Alarm", rtap))
+ return -EIO;
rtap->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(rtap->clk)) {
ret = PTR_ERR(rtap->clk);
dev_err(&pdev->dev, "could not get clock\n");
- goto out_no_clk;
+ return ret;
}
/* We enable/disable the clock only to assure it works */
- ret = clk_enable(rtap->clk);
+ ret = clk_prepare_enable(rtap->clk);
if (ret) {
dev_err(&pdev->dev, "could not enable clock\n");
- goto out_no_clk_enable;
+ goto out_no_clk_prepenable;
}
clk_disable(rtap->clk);
@@ -232,18 +223,9 @@ static int __init coh901331_probe(struct platform_device *pdev)
out_no_rtc:
platform_set_drvdata(pdev, NULL);
- out_no_clk_enable:
+ clk_unprepare(rtap->clk);
+ out_no_clk_prepenable:
clk_put(rtap->clk);
- out_no_clk:
- free_irq(rtap->irq, rtap);
- out_no_irq:
- iounmap(rtap->virtbase);
- out_no_remap:
- platform_set_drvdata(pdev, NULL);
- out_no_memregion:
- release_mem_region(rtap->phybase, SZ_4K);
- out_no_resource:
- kfree(rtap);
return ret;
}
@@ -265,6 +247,7 @@ static int coh901331_suspend(struct platform_device *pdev, pm_message_t state)
writel(0, rtap->virtbase + COH901331_IRQ_MASK);
clk_disable(rtap->clk);
}
+ clk_unprepare(rtap->clk);
return 0;
}
@@ -272,6 +255,7 @@ static int coh901331_resume(struct platform_device *pdev)
{
struct coh901331_port *rtap = dev_get_drvdata(&pdev->dev);
+ clk_prepare(rtap->clk);
if (device_may_wakeup(&pdev->dev)) {
disable_irq_wake(rtap->irq);
} else {
@@ -293,6 +277,7 @@ static void coh901331_shutdown(struct platform_device *pdev)
clk_enable(rtap->clk);
writel(0, rtap->virtbase + COH901331_IRQ_MASK);
clk_disable(rtap->clk);
+ clk_unprepare(rtap->clk);
}
static struct platform_driver coh901331_driver = {
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index da6ab5291a41..78070255bd3f 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -245,7 +245,7 @@ static int __devinit da9052_rtc_probe(struct platform_device *pdev)
"ALM", rtc);
if (ret != 0) {
rtc_err(rtc->da9052, "irq registration failed: %d\n", ret);
- goto err_mem;
+ return ret;
}
rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
@@ -259,8 +259,6 @@ static int __devinit da9052_rtc_probe(struct platform_device *pdev)
err_free_irq:
free_irq(rtc->irq, rtc);
-err_mem:
- devm_kfree(&pdev->dev, rtc);
return ret;
}
@@ -271,7 +269,6 @@ static int __devexit da9052_rtc_remove(struct platform_device *pdev)
rtc_device_unregister(rtc->rtc);
free_irq(rtc->irq, rtc);
platform_set_drvdata(pdev, NULL);
- devm_kfree(&pdev->dev, rtc);
return 0;
}
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c
index 1459055a83aa..34e4349611db 100644
--- a/drivers/rtc/rtc-max8925.c
+++ b/drivers/rtc/rtc-max8925.c
@@ -69,6 +69,7 @@ struct max8925_rtc_info {
struct max8925_chip *chip;
struct i2c_client *rtc;
struct device *dev;
+ int irq;
};
static irqreturn_t rtc_update_handler(int irq, void *data)
@@ -250,7 +251,7 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct max8925_rtc_info *info;
- int irq, ret;
+ int ret;
info = kzalloc(sizeof(struct max8925_rtc_info), GFP_KERNEL);
if (!info)
@@ -258,13 +259,13 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev)
info->chip = chip;
info->rtc = chip->rtc;
info->dev = &pdev->dev;
- irq = chip->irq_base + MAX8925_IRQ_RTC_ALARM0;
+ info->irq = platform_get_irq(pdev, 0);
- ret = request_threaded_irq(irq, NULL, rtc_update_handler,
+ ret = request_threaded_irq(info->irq, NULL, rtc_update_handler,
IRQF_ONESHOT, "rtc-alarm0", info);
if (ret < 0) {
dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
- irq, ret);
+ info->irq, ret);
goto out_irq;
}
@@ -285,7 +286,7 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev)
return 0;
out_rtc:
platform_set_drvdata(pdev, NULL);
- free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info);
+ free_irq(info->irq, info);
out_irq:
kfree(info);
return ret;
@@ -296,7 +297,7 @@ static int __devexit max8925_rtc_remove(struct platform_device *pdev)
struct max8925_rtc_info *info = platform_get_drvdata(pdev);
if (info) {
- free_irq(info->chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info);
+ free_irq(info->irq, info);
rtc_device_unregister(info->rtc_dev);
kfree(info);
}
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index 546f6850bffb..2643d8874925 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -404,9 +404,12 @@ static const struct platform_device_id mc13xxx_rtc_idtable[] = {
.name = "mc13783-rtc",
}, {
.name = "mc13892-rtc",
+ }, {
+ .name = "mc34708-rtc",
},
- { }
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(platform, mc13xxx_rtc_idtable);
static struct platform_driver mc13xxx_rtc_driver = {
.id_table = mc13xxx_rtc_idtable,
@@ -432,4 +435,3 @@ module_exit(mc13xxx_rtc_exit);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("RTC driver for Freescale MC13XXX PMIC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index b2185f4255aa..ebc1649d45d6 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -297,7 +297,7 @@ static int __exit mv_rtc_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static struct of_device_id rtc_mv_of_match_table[] = {
- { .compatible = "mrvl,orion-rtc", },
+ { .compatible = "marvell,orion-rtc", },
{}
};
#endif
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 836118795c0b..13e4df63974f 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -43,6 +43,7 @@
#include <linux/rtc.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
+#include <linux/sysfs.h>
#define DRV_VERSION "0.6"
@@ -292,6 +293,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi)
pdata->rtc = rtc;
for (i = 0; i < 16; i++) {
+ sysfs_attr_init(&pdata->regs[i].attr.attr);
sprintf(pdata->regs[i].name, "%1x", i);
pdata->regs[i].attr.attr.mode = S_IRUGO | S_IWUSR;
pdata->regs[i].attr.attr.name = pdata->regs[i].name;
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 97a3284bb7c6..c2fe426a6ef2 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -19,6 +19,7 @@
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/of.h>
#define DRV_VERSION "0.4.3"
@@ -285,9 +286,19 @@ static const struct i2c_device_id pcf8563_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pcf8563_id);
+#ifdef CONFIG_OF
+static const struct of_device_id pcf8563_of_match[] __devinitconst = {
+ { .compatible = "nxp,pcf8563" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pcf8563_of_match);
+#endif
+
static struct i2c_driver pcf8563_driver = {
.driver = {
.name = "rtc-pcf8563",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pcf8563_of_match),
},
.probe = pcf8563_probe,
.remove = pcf8563_remove,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index cc0533994f6e..08378e3cc21c 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -68,11 +68,26 @@
#define RTC_TIMER_FREQ 32768
+/**
+ * struct pl031_vendor_data - per-vendor variations
+ * @ops: the vendor-specific operations used on this silicon version
+ * @clockwatch: if this is an ST Microelectronics silicon version with a
+ * clockwatch function
+ * @st_weekday: if this is an ST Microelectronics silicon version that need
+ * the weekday fix
+ * @irqflags: special IRQ flags per variant
+ */
+struct pl031_vendor_data {
+ struct rtc_class_ops ops;
+ bool clockwatch;
+ bool st_weekday;
+ unsigned long irqflags;
+};
+
struct pl031_local {
+ struct pl031_vendor_data *vendor;
struct rtc_device *rtc;
void __iomem *base;
- u8 hw_designer;
- u8 hw_revision:4;
};
static int pl031_alarm_irq_enable(struct device *dev,
@@ -303,7 +318,8 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
struct pl031_local *ldata;
- struct rtc_class_ops *ops = id->data;
+ struct pl031_vendor_data *vendor = id->data;
+ struct rtc_class_ops *ops = &vendor->ops;
unsigned long time;
ret = amba_request_regions(adev, NULL);
@@ -315,6 +331,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
ret = -ENOMEM;
goto out;
}
+ ldata->vendor = vendor;
ldata->base = ioremap(adev->res.start, resource_size(&adev->res));
@@ -325,14 +342,11 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, ldata);
- ldata->hw_designer = amba_manf(adev);
- ldata->hw_revision = amba_rev(adev);
-
- dev_dbg(&adev->dev, "designer ID = 0x%02x\n", ldata->hw_designer);
- dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision);
+ dev_dbg(&adev->dev, "designer ID = 0x%02x\n", amba_manf(adev));
+ dev_dbg(&adev->dev, "revision = 0x%01x\n", amba_rev(adev));
/* Enable the clockwatch on ST Variants */
- if (ldata->hw_designer == AMBA_VENDOR_ST)
+ if (vendor->clockwatch)
writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
ldata->base + RTC_CR);
@@ -340,7 +354,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
* On ST PL031 variants, the RTC reset value does not provide correct
* weekday for 2000-01-01. Correct the erroneous sunday to saturday.
*/
- if (ldata->hw_designer == AMBA_VENDOR_ST) {
+ if (vendor->st_weekday) {
if (readl(ldata->base + RTC_YDR) == 0x2000) {
time = readl(ldata->base + RTC_DR);
if ((time &
@@ -361,7 +375,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
}
if (request_irq(adev->irq[0], pl031_interrupt,
- 0, "rtc-pl031", ldata)) {
+ vendor->irqflags, "rtc-pl031", ldata)) {
ret = -EIO;
goto out_no_irq;
}
@@ -383,48 +397,65 @@ err_req:
}
/* Operations for the original ARM version */
-static struct rtc_class_ops arm_pl031_ops = {
- .read_time = pl031_read_time,
- .set_time = pl031_set_time,
- .read_alarm = pl031_read_alarm,
- .set_alarm = pl031_set_alarm,
- .alarm_irq_enable = pl031_alarm_irq_enable,
+static struct pl031_vendor_data arm_pl031 = {
+ .ops = {
+ .read_time = pl031_read_time,
+ .set_time = pl031_set_time,
+ .read_alarm = pl031_read_alarm,
+ .set_alarm = pl031_set_alarm,
+ .alarm_irq_enable = pl031_alarm_irq_enable,
+ },
+ .irqflags = IRQF_NO_SUSPEND,
};
/* The First ST derivative */
-static struct rtc_class_ops stv1_pl031_ops = {
- .read_time = pl031_read_time,
- .set_time = pl031_set_time,
- .read_alarm = pl031_read_alarm,
- .set_alarm = pl031_set_alarm,
- .alarm_irq_enable = pl031_alarm_irq_enable,
+static struct pl031_vendor_data stv1_pl031 = {
+ .ops = {
+ .read_time = pl031_read_time,
+ .set_time = pl031_set_time,
+ .read_alarm = pl031_read_alarm,
+ .set_alarm = pl031_set_alarm,
+ .alarm_irq_enable = pl031_alarm_irq_enable,
+ },
+ .clockwatch = true,
+ .st_weekday = true,
+ .irqflags = IRQF_NO_SUSPEND,
};
/* And the second ST derivative */
-static struct rtc_class_ops stv2_pl031_ops = {
- .read_time = pl031_stv2_read_time,
- .set_time = pl031_stv2_set_time,
- .read_alarm = pl031_stv2_read_alarm,
- .set_alarm = pl031_stv2_set_alarm,
- .alarm_irq_enable = pl031_alarm_irq_enable,
+static struct pl031_vendor_data stv2_pl031 = {
+ .ops = {
+ .read_time = pl031_stv2_read_time,
+ .set_time = pl031_stv2_set_time,
+ .read_alarm = pl031_stv2_read_alarm,
+ .set_alarm = pl031_stv2_set_alarm,
+ .alarm_irq_enable = pl031_alarm_irq_enable,
+ },
+ .clockwatch = true,
+ .st_weekday = true,
+ /*
+ * This variant shares the IRQ with another block and must not
+ * suspend that IRQ line.
+ */
+ .irqflags = IRQF_SHARED | IRQF_NO_SUSPEND,
};
static struct amba_id pl031_ids[] = {
{
.id = 0x00041031,
.mask = 0x000fffff,
- .data = &arm_pl031_ops,
+ .data = &arm_pl031,
},
/* ST Micro variants */
{
.id = 0x00180031,
.mask = 0x00ffffff,
- .data = &stv1_pl031_ops,
+ .data = &stv1_pl031,
},
{
.id = 0x00280031,
.mask = 0x00ffffff,
- .data = &stv2_pl031_ops,
+ .data = &stv2_pl031,
},
{0, 0},
};
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index 33b6ba0afa0d..2c183ebff715 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -138,8 +138,7 @@ static int __devinit r9701_probe(struct spi_device *spi)
* contain invalid values. If so, try to write a default date:
* 2000/1/1 00:00:00
*/
- r9701_get_datetime(&spi->dev, &dt);
- if (rtc_valid_tm(&dt)) {
+ if (r9701_get_datetime(&spi->dev, &dt)) {
dev_info(&spi->dev, "trying to repair invalid date/time\n");
dt.tm_sec = 0;
dt.tm_min = 0;
@@ -148,7 +147,8 @@ static int __devinit r9701_probe(struct spi_device *spi)
dt.tm_mon = 0;
dt.tm_year = 100;
- if (r9701_set_datetime(&spi->dev, &dt)) {
+ if (r9701_set_datetime(&spi->dev, &dt) ||
+ r9701_get_datetime(&spi->dev, &dt)) {
dev_err(&spi->dev, "cannot repair RTC register\n");
return -ENODEV;
}
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 77074ccd2850..fd5c7af04ae5 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm)
tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK);
tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK);
if (!pdata->rtc_24h) {
- tm->tm_hour %= 12;
- if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM)
+ if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) {
+ tm->tm_hour -= 20;
+ tm->tm_hour %= 12;
tm->tm_hour += 12;
+ } else
+ tm->tm_hour %= 12;
}
tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK);
tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7e6af0b22f17..bfbd92c8d1c9 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -26,10 +26,10 @@
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <mach/hardware.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
#include <asm/irq.h>
#include <plat/regs-rtc.h>
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 59c6245e0421..ea5c6f857ca5 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -24,7 +24,7 @@
#include <linux/mfd/wm831x/core.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
-
+#include <linux/random.h>
/*
* R16416 (0x4020) - RTC Write Counter
@@ -96,6 +96,26 @@ struct wm831x_rtc {
unsigned int alarm_enabled:1;
};
+static void wm831x_rtc_add_randomness(struct wm831x *wm831x)
+{
+ int ret;
+ u16 reg;
+
+ /*
+ * The write counter contains a pseudo-random number which is
+ * regenerated every time we set the RTC so it should be a
+ * useful per-system source of entropy.
+ */
+ ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER);
+ if (ret >= 0) {
+ reg = ret;
+ add_device_randomness(&reg, sizeof(reg));
+ } else {
+ dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n",
+ ret);
+ }
+}
+
/*
* Read current time and date in RTC
*/
@@ -431,6 +451,8 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
alm_irq, ret);
}
+ wm831x_rtc_add_randomness(wm831x);
+
return 0;
err:
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 40a826a7295f..2fb2b9ea97ec 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3804,7 +3804,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
case BIODASDSYMMIO:
return dasd_symm_io(device, argp);
default:
- return -ENOIOCTLCMD;
+ return -ENOTTY;
}
}
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index cceae70279f6..654c6921a6d4 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -498,12 +498,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
break;
default:
/* if the discipline has an ioctl method try it. */
- if (base->discipline->ioctl) {
+ rc = -ENOTTY;
+ if (base->discipline->ioctl)
rc = base->discipline->ioctl(block, cmd, argp);
- if (rc == -ENOIOCTLCMD)
- rc = -EINVAL;
- } else
- rc = -EINVAL;
}
dasd_put_device(base);
return rc;
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 6a6f76bf6e3d..b1032931a1c4 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -242,11 +242,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
switch (sdias_evbuf.event_status) {
case EVSTATE_ALL_STORED:
TRACE("all stored\n");
+ break;
case EVSTATE_PART_STORED:
TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
break;
case EVSTATE_NO_DATA:
TRACE("no data\n");
+ /* fall through */
default:
pr_err("Error from SCLP while copying hsa. "
"Event status = %x\n",
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 8160591913f9..4ffa66c87ea5 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1854,26 +1854,11 @@ static struct attribute_group netiucv_stat_attr_group = {
.attrs = netiucv_stat_attrs,
};
-static int netiucv_add_files(struct device *dev)
-{
- int ret;
-
- IUCV_DBF_TEXT(trace, 3, __func__);
- ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
- if (ret)
- return ret;
- ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
- if (ret)
- sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
- return ret;
-}
-
-static void netiucv_remove_files(struct device *dev)
-{
- IUCV_DBF_TEXT(trace, 3, __func__);
- sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
- sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
-}
+static const struct attribute_group *netiucv_attr_groups[] = {
+ &netiucv_stat_attr_group,
+ &netiucv_attr_group,
+ NULL,
+};
static int netiucv_register_device(struct net_device *ndev)
{
@@ -1887,6 +1872,7 @@ static int netiucv_register_device(struct net_device *ndev)
dev_set_name(dev, "net%s", ndev->name);
dev->bus = &iucv_bus;
dev->parent = iucv_root;
+ dev->groups = netiucv_attr_groups;
/*
* The release function could be called after the
* module has been unloaded. It's _only_ task is to
@@ -1904,22 +1890,14 @@ static int netiucv_register_device(struct net_device *ndev)
put_device(dev);
return ret;
}
- ret = netiucv_add_files(dev);
- if (ret)
- goto out_unreg;
priv->dev = dev;
dev_set_drvdata(dev, priv);
return 0;
-
-out_unreg:
- device_unregister(dev);
- return ret;
}
static void netiucv_unregister_device(struct device *dev)
{
IUCV_DBF_TEXT(trace, 3, __func__);
- netiucv_remove_files(dev);
device_unregister(dev);
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0cf706699a04..c5f03fa70fba 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1758,6 +1758,8 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "frvaddr4");
netdev = __vlan_find_dev_deep(card->dev, vid);
+ if (!netdev)
+ return;
in_dev = in_dev_get(netdev);
if (!in_dev)
return;
@@ -1786,6 +1788,8 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "frvaddr6");
netdev = __vlan_find_dev_deep(card->dev, vid);
+ if (!netdev)
+ return;
in6_dev = in6_dev_get(netdev);
if (!in6_dev)
return;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 2d1e68db9b3f..e894ca7b54c0 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -4146,45 +4146,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
static void
fc_bsg_remove(struct request_queue *q)
{
- struct request *req; /* block request */
- int counts; /* totals for request_list count and starved */
-
if (q) {
- /* Stop taking in new requests */
- spin_lock_irq(q->queue_lock);
- blk_stop_queue(q);
-
- /* drain all requests in the queue */
- while (1) {
- /* need the lock to fetch a request
- * this may fetch the same reqeust as the previous pass
- */
- req = blk_fetch_request(q);
- /* save requests in use and starved */
- counts = q->rq.count[0] + q->rq.count[1] +
- q->rq.starved[0] + q->rq.starved[1];
- spin_unlock_irq(q->queue_lock);
- /* any requests still outstanding? */
- if (counts == 0)
- break;
-
- /* This may be the same req as the previous iteration,
- * always send the blk_end_request_all after a prefetch.
- * It is not okay to not end the request because the
- * prefetch started the request.
- */
- if (req) {
- /* return -ENXIO to indicate that this queue is
- * going away
- */
- req->errors = -ENXIO;
- blk_end_request_all(req, -ENXIO);
- }
-
- msleep(200); /* allow bsg to possibly finish */
- spin_lock_irq(q->queue_lock);
- }
-
bsg_unregister_queue(q);
blk_cleanup_queue(q);
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 09809d06eccb..fa1dfaa83e32 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -575,7 +575,7 @@ static int iscsi_remove_host(struct transport_container *tc,
struct iscsi_cls_host *ihost = shost->shost_data;
if (ihost->bsg_q) {
- bsg_remove_queue(ihost->bsg_q);
+ bsg_unregister_queue(ihost->bsg_q);
blk_cleanup_queue(ihost->bsg_q);
}
return 0;
diff --git a/drivers/sh/intc/Kconfig b/drivers/sh/intc/Kconfig
index c88cbccc62b0..a305731742a9 100644
--- a/drivers/sh/intc/Kconfig
+++ b/drivers/sh/intc/Kconfig
@@ -1,3 +1,7 @@
+config SH_INTC
+ def_bool y
+ select IRQ_DOMAIN
+
comment "Interrupt controller options"
config INTC_USERIMASK
diff --git a/drivers/sh/intc/Makefile b/drivers/sh/intc/Makefile
index 44f006d09471..54ec2a0643df 100644
--- a/drivers/sh/intc/Makefile
+++ b/drivers/sh/intc/Makefile
@@ -1,4 +1,4 @@
-obj-y := access.o chip.o core.o handle.o virq.o
+obj-y := access.o chip.o core.o handle.o irqdomain.o virq.o
obj-$(CONFIG_INTC_BALANCING) += balancing.o
obj-$(CONFIG_INTC_USERIMASK) += userimask.o
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 7e562ccb6997..32c26d795ed0 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -25,6 +25,7 @@
#include <linux/stat.h>
#include <linux/interrupt.h>
#include <linux/sh_intc.h>
+#include <linux/irqdomain.h>
#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/list.h>
@@ -310,6 +311,8 @@ int __init register_intc_controller(struct intc_desc *desc)
BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
+ intc_irq_domain_init(d, hw);
+
/* register the vectors one by one */
for (i = 0; i < hw->nr_vectors; i++) {
struct intc_vect *vect = hw->vectors + i;
@@ -319,10 +322,18 @@ int __init register_intc_controller(struct intc_desc *desc)
if (!vect->enum_id)
continue;
- res = irq_alloc_desc_at(irq, numa_node_id());
- if (res != irq && res != -EEXIST) {
- pr_err("can't get irq_desc for %d\n", irq);
- continue;
+ res = irq_create_identity_mapping(d->domain, irq);
+ if (unlikely(res)) {
+ if (res == -EEXIST) {
+ res = irq_domain_associate(d->domain, irq, irq);
+ if (unlikely(res)) {
+ pr_err("domain association failure\n");
+ continue;
+ }
+ } else {
+ pr_err("can't identity map IRQ %d\n", irq);
+ continue;
+ }
}
intc_irq_xlate_set(irq, vect->enum_id, d);
@@ -340,10 +351,21 @@ int __init register_intc_controller(struct intc_desc *desc)
* IRQ support, each vector still needs to have
* its own backing irq_desc.
*/
- res = irq_alloc_desc_at(irq2, numa_node_id());
- if (res != irq2 && res != -EEXIST) {
- pr_err("can't get irq_desc for %d\n", irq2);
- continue;
+ res = irq_create_identity_mapping(d->domain, irq2);
+ if (unlikely(res)) {
+ if (res == -EEXIST) {
+ res = irq_domain_associate(d->domain,
+ irq, irq);
+ if (unlikely(res)) {
+ pr_err("domain association "
+ "failure\n");
+ continue;
+ }
+ } else {
+ pr_err("can't identity map IRQ %d\n",
+ irq);
+ continue;
+ }
}
vect2->enum_id = 0;
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index f034a979a16f..7dff08e2a071 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -1,5 +1,6 @@
#include <linux/sh_intc.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -66,6 +67,7 @@ struct intc_desc_int {
unsigned int nr_sense;
struct intc_window *window;
unsigned int nr_windows;
+ struct irq_domain *domain;
struct irq_chip chip;
bool skip_suspend;
};
@@ -187,6 +189,9 @@ unsigned long intc_get_ack_handle(unsigned int irq);
void intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d,
intc_enum enum_id, int enable);
+/* irqdomain.c */
+void intc_irq_domain_init(struct intc_desc_int *d, struct intc_hw_desc *hw);
+
/* virq.c */
void intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d);
void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d);
diff --git a/drivers/sh/intc/irqdomain.c b/drivers/sh/intc/irqdomain.c
new file mode 100644
index 000000000000..3968f1c3c5c3
--- /dev/null
+++ b/drivers/sh/intc/irqdomain.c
@@ -0,0 +1,68 @@
+/*
+ * IRQ domain support for SH INTC subsystem
+ *
+ * Copyright (C) 2012 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "intc: " fmt
+
+#include <linux/irqdomain.h>
+#include <linux/sh_intc.h>
+#include <linux/export.h>
+#include "internals.h"
+
+/**
+ * intc_irq_domain_evt_xlate() - Generic xlate for vectored IRQs.
+ *
+ * This takes care of exception vector to hwirq translation through
+ * by way of evt2irq() translation.
+ *
+ * Note: For platforms that use a flat vector space without INTEVT this
+ * basically just mimics irq_domain_xlate_onecell() by way of a nopped
+ * out evt2irq() implementation.
+ */
+static int intc_evt_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+
+ *out_hwirq = evt2irq(intspec[0]);
+ *out_type = IRQ_TYPE_NONE;
+
+ return 0;
+}
+
+static const struct irq_domain_ops intc_evt_ops = {
+ .xlate = intc_evt_xlate,
+};
+
+void __init intc_irq_domain_init(struct intc_desc_int *d,
+ struct intc_hw_desc *hw)
+{
+ unsigned int irq_base, irq_end;
+
+ /*
+ * Quick linear revmap check
+ */
+ irq_base = evt2irq(hw->vectors[0].vect);
+ irq_end = evt2irq(hw->vectors[hw->nr_vectors - 1].vect);
+
+ /*
+ * Linear domains have a hard-wired assertion that IRQs start at
+ * 0 in order to make some performance optimizations. Lamely
+ * restrict the linear case to these conditions here, taking the
+ * tree penalty for linear cases with non-zero hwirq bases.
+ */
+ if (irq_base == 0 && irq_end == (irq_base + hw->nr_vectors - 1))
+ d->domain = irq_domain_add_linear(NULL, hw->nr_vectors,
+ &intc_evt_ops, NULL);
+ else
+ d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL);
+
+ BUG_ON(!d->domain);
+}
diff --git a/drivers/sh/pfc/pinctrl.c b/drivers/sh/pfc/pinctrl.c
index 0802b6c0d653..2804eaae804e 100644
--- a/drivers/sh/pfc/pinctrl.c
+++ b/drivers/sh/pfc/pinctrl.c
@@ -276,7 +276,6 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
unsigned long config)
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
- struct sh_pfc *pfc = pmx->pfc;
/* Validate the new type */
if (config >= PINMUX_FLAG_TYPE)
@@ -326,20 +325,6 @@ static struct pinctrl_desc sh_pfc_pinctrl_desc = {
.confops = &sh_pfc_pinconf_ops,
};
-int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
-{
- sh_pfc_pmx = kzalloc(sizeof(struct sh_pfc_pinctrl), GFP_KERNEL);
- if (unlikely(!sh_pfc_pmx))
- return -ENOMEM;
-
- spin_lock_init(&sh_pfc_pmx->lock);
-
- sh_pfc_pmx->pfc = pfc;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(sh_pfc_register_pinctrl);
-
static inline void __devinit sh_pfc_map_one_gpio(struct sh_pfc *pfc,
struct sh_pfc_pinctrl *pmx,
struct pinmux_gpio *gpio,
@@ -481,7 +466,6 @@ static int __devexit sh_pfc_pinctrl_remove(struct platform_device *pdev)
{
struct sh_pfc_pinctrl *pmx = platform_get_drvdata(pdev);
- pinctrl_remove_gpio_range(pmx->pctl, &sh_pfc_gpio_range);
pinctrl_unregister(pmx->pctl);
platform_set_drvdata(pdev, NULL);
@@ -507,7 +491,7 @@ static struct platform_device sh_pfc_pinctrl_device = {
.id = -1,
};
-static int __init sh_pfc_pinctrl_init(void)
+static int sh_pfc_pinctrl_init(void)
{
int rc;
@@ -521,10 +505,22 @@ static int __init sh_pfc_pinctrl_init(void)
return rc;
}
+int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
+{
+ sh_pfc_pmx = kzalloc(sizeof(struct sh_pfc_pinctrl), GFP_KERNEL);
+ if (unlikely(!sh_pfc_pmx))
+ return -ENOMEM;
+
+ spin_lock_init(&sh_pfc_pmx->lock);
+
+ sh_pfc_pmx->pfc = pfc;
+
+ return sh_pfc_pinctrl_init();
+}
+EXPORT_SYMBOL_GPL(sh_pfc_register_pinctrl);
+
static void __exit sh_pfc_pinctrl_exit(void)
{
platform_driver_unregister(&sh_pfc_pinctrl_driver);
}
-
-subsys_initcall(sh_pfc_pinctrl_init);
module_exit(sh_pfc_pinctrl_exit);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 4cde4fb0cd6c..5f84b5563c2d 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -144,6 +144,15 @@ config SPI_EP93XX
This enables using the Cirrus EP93xx SPI controller in master
mode.
+config SPI_FALCON
+ tristate "Falcon SPI controller support"
+ depends on SOC_FALCON
+ help
+ The external bus unit (EBU) found on the FALC-ON SoC has SPI
+ emulation that is designed for serial flash access. This driver
+ has only been tested with m25p80 type chips. The hardware has no
+ support for other types of SPI peripherals.
+
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
depends on GENERIC_GPIO
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 273f50d1127a..3920dcf4c740 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o
obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
+obj-$(CONFIG_SPI_FALCON) += spi-falcon.o
obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 6e25ef1bce91..a9f4049c6769 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -47,6 +47,8 @@ struct bcm63xx_spi {
/* Platform data */
u32 speed_hz;
unsigned fifo_size;
+ unsigned int msg_type_shift;
+ unsigned int msg_ctl_width;
/* Data buffers */
const unsigned char *tx_ptr;
@@ -221,13 +223,20 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
if (t->rx_buf && t->tx_buf)
- msg_ctl |= (SPI_FD_RW << SPI_MSG_TYPE_SHIFT);
+ msg_ctl |= (SPI_FD_RW << bs->msg_type_shift);
else if (t->rx_buf)
- msg_ctl |= (SPI_HD_R << SPI_MSG_TYPE_SHIFT);
+ msg_ctl |= (SPI_HD_R << bs->msg_type_shift);
else if (t->tx_buf)
- msg_ctl |= (SPI_HD_W << SPI_MSG_TYPE_SHIFT);
-
- bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL);
+ msg_ctl |= (SPI_HD_W << bs->msg_type_shift);
+
+ switch (bs->msg_ctl_width) {
+ case 8:
+ bcm_spi_writeb(bs, msg_ctl, SPI_MSG_CTL);
+ break;
+ case 16:
+ bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL);
+ break;
+ }
/* Issue the transfer */
cmd = SPI_CMD_START_IMMEDIATE;
@@ -406,9 +415,21 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
master->transfer_one_message = bcm63xx_spi_transfer_one;
master->mode_bits = MODEBITS;
bs->speed_hz = pdata->speed_hz;
+ bs->msg_type_shift = pdata->msg_type_shift;
+ bs->msg_ctl_width = pdata->msg_ctl_width;
bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
+ switch (bs->msg_ctl_width) {
+ case 8:
+ case 16:
+ break;
+ default:
+ dev_err(dev, "unsupported MSG_CTL width: %d\n",
+ bs->msg_ctl_width);
+ goto out_clk_disable;
+ }
+
/* Initialize hardware */
clk_enable(bs->clk);
bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
@@ -438,7 +459,7 @@ out:
static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
spi_unregister_master(master);
@@ -452,6 +473,8 @@ static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, 0);
+ spi_master_put(master);
+
return 0;
}
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index b2d4b9e4e010..764bfee75920 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -533,7 +533,6 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev)
iounmap(mcfqspi->iobase);
release_mem_region(res->start, resource_size(res));
spi_unregister_master(master);
- spi_master_put(master);
return 0;
}
@@ -541,7 +540,7 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int mcfqspi_suspend(struct device *dev)
{
- struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct spi_master *master = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
spi_master_suspend(master);
@@ -553,7 +552,7 @@ static int mcfqspi_suspend(struct device *dev)
static int mcfqspi_resume(struct device *dev)
{
- struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct spi_master *master = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
spi_master_resume(master);
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
new file mode 100644
index 000000000000..8f6aa735a24c
--- /dev/null
+++ b/drivers/spi/spi-falcon.c
@@ -0,0 +1,469 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <lantiq_soc.h>
+
+#define DRV_NAME "sflash-falcon"
+
+#define FALCON_SPI_XFER_BEGIN (1 << 0)
+#define FALCON_SPI_XFER_END (1 << 1)
+
+/* Bus Read Configuration Register0 */
+#define BUSRCON0 0x00000010
+/* Bus Write Configuration Register0 */
+#define BUSWCON0 0x00000018
+/* Serial Flash Configuration Register */
+#define SFCON 0x00000080
+/* Serial Flash Time Register */
+#define SFTIME 0x00000084
+/* Serial Flash Status Register */
+#define SFSTAT 0x00000088
+/* Serial Flash Command Register */
+#define SFCMD 0x0000008C
+/* Serial Flash Address Register */
+#define SFADDR 0x00000090
+/* Serial Flash Data Register */
+#define SFDATA 0x00000094
+/* Serial Flash I/O Control Register */
+#define SFIO 0x00000098
+/* EBU Clock Control Register */
+#define EBUCC 0x000000C4
+
+/* Dummy Phase Length */
+#define SFCMD_DUMLEN_OFFSET 16
+#define SFCMD_DUMLEN_MASK 0x000F0000
+/* Chip Select */
+#define SFCMD_CS_OFFSET 24
+#define SFCMD_CS_MASK 0x07000000
+/* field offset */
+#define SFCMD_ALEN_OFFSET 20
+#define SFCMD_ALEN_MASK 0x00700000
+/* SCK Rise-edge Position */
+#define SFTIME_SCKR_POS_OFFSET 8
+#define SFTIME_SCKR_POS_MASK 0x00000F00
+/* SCK Period */
+#define SFTIME_SCK_PER_OFFSET 0
+#define SFTIME_SCK_PER_MASK 0x0000000F
+/* SCK Fall-edge Position */
+#define SFTIME_SCKF_POS_OFFSET 12
+#define SFTIME_SCKF_POS_MASK 0x0000F000
+/* Device Size */
+#define SFCON_DEV_SIZE_A23_0 0x03000000
+#define SFCON_DEV_SIZE_MASK 0x0F000000
+/* Read Data Position */
+#define SFTIME_RD_POS_MASK 0x000F0000
+/* Data Output */
+#define SFIO_UNUSED_WD_MASK 0x0000000F
+/* Command Opcode mask */
+#define SFCMD_OPC_MASK 0x000000FF
+/* dlen bytes of data to write */
+#define SFCMD_DIR_WRITE 0x00000100
+/* Data Length offset */
+#define SFCMD_DLEN_OFFSET 9
+/* Command Error */
+#define SFSTAT_CMD_ERR 0x20000000
+/* Access Command Pending */
+#define SFSTAT_CMD_PEND 0x00400000
+/* Frequency set to 100MHz. */
+#define EBUCC_EBUDIV_SELF100 0x00000001
+/* Serial Flash */
+#define BUSRCON0_AGEN_SERIAL_FLASH 0xF0000000
+/* 8-bit multiplexed */
+#define BUSRCON0_PORTW_8_BIT_MUX 0x00000000
+/* Serial Flash */
+#define BUSWCON0_AGEN_SERIAL_FLASH 0xF0000000
+/* Chip Select after opcode */
+#define SFCMD_KEEP_CS_KEEP_SELECTED 0x00008000
+
+#define CLOCK_100M 100000000
+#define CLOCK_50M 50000000
+
+struct falcon_sflash {
+ u32 sfcmd; /* for caching of opcode, direction, ... */
+ struct spi_master *master;
+};
+
+int falcon_sflash_xfer(struct spi_device *spi, struct spi_transfer *t,
+ unsigned long flags)
+{
+ struct device *dev = &spi->dev;
+ struct falcon_sflash *priv = spi_master_get_devdata(spi->master);
+ const u8 *txp = t->tx_buf;
+ u8 *rxp = t->rx_buf;
+ unsigned int bytelen = ((8 * t->len + 7) / 8);
+ unsigned int len, alen, dumlen;
+ u32 val;
+ enum {
+ state_init,
+ state_command_prepare,
+ state_write,
+ state_read,
+ state_disable_cs,
+ state_end
+ } state = state_init;
+
+ do {
+ switch (state) {
+ case state_init: /* detect phase of upper layer sequence */
+ {
+ /* initial write ? */
+ if (flags & FALCON_SPI_XFER_BEGIN) {
+ if (!txp) {
+ dev_err(dev,
+ "BEGIN without tx data!\n");
+ return -ENODATA;
+ }
+ /*
+ * Prepare the parts of the sfcmd register,
+ * which should not change during a sequence!
+ * Only exception are the length fields,
+ * especially alen and dumlen.
+ */
+
+ priv->sfcmd = ((spi->chip_select
+ << SFCMD_CS_OFFSET)
+ & SFCMD_CS_MASK);
+ priv->sfcmd |= SFCMD_KEEP_CS_KEEP_SELECTED;
+ priv->sfcmd |= *txp;
+ txp++;
+ bytelen--;
+ if (bytelen) {
+ /*
+ * more data:
+ * maybe address and/or dummy
+ */
+ state = state_command_prepare;
+ break;
+ } else {
+ dev_dbg(dev, "write cmd %02X\n",
+ priv->sfcmd & SFCMD_OPC_MASK);
+ }
+ }
+ /* continued write ? */
+ if (txp && bytelen) {
+ state = state_write;
+ break;
+ }
+ /* read data? */
+ if (rxp && bytelen) {
+ state = state_read;
+ break;
+ }
+ /* end of sequence? */
+ if (flags & FALCON_SPI_XFER_END)
+ state = state_disable_cs;
+ else
+ state = state_end;
+ break;
+ }
+ /* collect tx data for address and dummy phase */
+ case state_command_prepare:
+ {
+ /* txp is valid, already checked */
+ val = 0;
+ alen = 0;
+ dumlen = 0;
+ while (bytelen > 0) {
+ if (alen < 3) {
+ val = (val << 8) | (*txp++);
+ alen++;
+ } else if ((dumlen < 15) && (*txp == 0)) {
+ /*
+ * assume dummy bytes are set to 0
+ * from upper layer
+ */
+ dumlen++;
+ txp++;
+ } else {
+ break;
+ }
+ bytelen--;
+ }
+ priv->sfcmd &= ~(SFCMD_ALEN_MASK | SFCMD_DUMLEN_MASK);
+ priv->sfcmd |= (alen << SFCMD_ALEN_OFFSET) |
+ (dumlen << SFCMD_DUMLEN_OFFSET);
+ if (alen > 0)
+ ltq_ebu_w32(val, SFADDR);
+
+ dev_dbg(dev, "wr %02X, alen=%d (addr=%06X) dlen=%d\n",
+ priv->sfcmd & SFCMD_OPC_MASK,
+ alen, val, dumlen);
+
+ if (bytelen > 0) {
+ /* continue with write */
+ state = state_write;
+ } else if (flags & FALCON_SPI_XFER_END) {
+ /* end of sequence? */
+ state = state_disable_cs;
+ } else {
+ /*
+ * go to end and expect another
+ * call (read or write)
+ */
+ state = state_end;
+ }
+ break;
+ }
+ case state_write:
+ {
+ /* txp still valid */
+ priv->sfcmd |= SFCMD_DIR_WRITE;
+ len = 0;
+ val = 0;
+ do {
+ if (bytelen--)
+ val |= (*txp++) << (8 * len++);
+ if ((flags & FALCON_SPI_XFER_END)
+ && (bytelen == 0)) {
+ priv->sfcmd &=
+ ~SFCMD_KEEP_CS_KEEP_SELECTED;
+ }
+ if ((len == 4) || (bytelen == 0)) {
+ ltq_ebu_w32(val, SFDATA);
+ ltq_ebu_w32(priv->sfcmd
+ | (len<<SFCMD_DLEN_OFFSET),
+ SFCMD);
+ len = 0;
+ val = 0;
+ priv->sfcmd &= ~(SFCMD_ALEN_MASK
+ | SFCMD_DUMLEN_MASK);
+ }
+ } while (bytelen);
+ state = state_end;
+ break;
+ }
+ case state_read:
+ {
+ /* read data */
+ priv->sfcmd &= ~SFCMD_DIR_WRITE;
+ do {
+ if ((flags & FALCON_SPI_XFER_END)
+ && (bytelen <= 4)) {
+ priv->sfcmd &=
+ ~SFCMD_KEEP_CS_KEEP_SELECTED;
+ }
+ len = (bytelen > 4) ? 4 : bytelen;
+ bytelen -= len;
+ ltq_ebu_w32(priv->sfcmd
+ | (len << SFCMD_DLEN_OFFSET), SFCMD);
+ priv->sfcmd &= ~(SFCMD_ALEN_MASK
+ | SFCMD_DUMLEN_MASK);
+ do {
+ val = ltq_ebu_r32(SFSTAT);
+ if (val & SFSTAT_CMD_ERR) {
+ /* reset error status */
+ dev_err(dev, "SFSTAT: CMD_ERR");
+ dev_err(dev, " (%x)\n", val);
+ ltq_ebu_w32(SFSTAT_CMD_ERR,
+ SFSTAT);
+ return -EBADE;
+ }
+ } while (val & SFSTAT_CMD_PEND);
+ val = ltq_ebu_r32(SFDATA);
+ do {
+ *rxp = (val & 0xFF);
+ rxp++;
+ val >>= 8;
+ len--;
+ } while (len);
+ } while (bytelen);
+ state = state_end;
+ break;
+ }
+ case state_disable_cs:
+ {
+ priv->sfcmd &= ~SFCMD_KEEP_CS_KEEP_SELECTED;
+ ltq_ebu_w32(priv->sfcmd | (0 << SFCMD_DLEN_OFFSET),
+ SFCMD);
+ val = ltq_ebu_r32(SFSTAT);
+ if (val & SFSTAT_CMD_ERR) {
+ /* reset error status */
+ dev_err(dev, "SFSTAT: CMD_ERR (%x)\n", val);
+ ltq_ebu_w32(SFSTAT_CMD_ERR, SFSTAT);
+ return -EBADE;
+ }
+ state = state_end;
+ break;
+ }
+ case state_end:
+ break;
+ }
+ } while (state != state_end);
+
+ return 0;
+}
+
+static int falcon_sflash_setup(struct spi_device *spi)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ if (spi->chip_select > 0)
+ return -ENODEV;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+
+ if (spi->max_speed_hz >= CLOCK_100M) {
+ /* set EBU clock to 100 MHz */
+ ltq_sys1_w32_mask(0, EBUCC_EBUDIV_SELF100, EBUCC);
+ i = 1; /* divider */
+ } else {
+ /* set EBU clock to 50 MHz */
+ ltq_sys1_w32_mask(EBUCC_EBUDIV_SELF100, 0, EBUCC);
+
+ /* search for suitable divider */
+ for (i = 1; i < 7; i++) {
+ if (CLOCK_50M / i <= spi->max_speed_hz)
+ break;
+ }
+ }
+
+ /* setup period of serial clock */
+ ltq_ebu_w32_mask(SFTIME_SCKF_POS_MASK
+ | SFTIME_SCKR_POS_MASK
+ | SFTIME_SCK_PER_MASK,
+ (i << SFTIME_SCKR_POS_OFFSET)
+ | (i << (SFTIME_SCK_PER_OFFSET + 1)),
+ SFTIME);
+
+ /*
+ * set some bits of unused_wd, to not trigger HOLD/WP
+ * signals on non QUAD flashes
+ */
+ ltq_ebu_w32((SFIO_UNUSED_WD_MASK & (0x8 | 0x4)), SFIO);
+
+ ltq_ebu_w32(BUSRCON0_AGEN_SERIAL_FLASH | BUSRCON0_PORTW_8_BIT_MUX,
+ BUSRCON0);
+ ltq_ebu_w32(BUSWCON0_AGEN_SERIAL_FLASH, BUSWCON0);
+ /* set address wrap around to maximum for 24-bit addresses */
+ ltq_ebu_w32_mask(SFCON_DEV_SIZE_MASK, SFCON_DEV_SIZE_A23_0, SFCON);
+
+ spin_unlock_irqrestore(&ebu_lock, flags);
+
+ return 0;
+}
+
+static int falcon_sflash_prepare_xfer(struct spi_master *master)
+{
+ return 0;
+}
+
+static int falcon_sflash_unprepare_xfer(struct spi_master *master)
+{
+ return 0;
+}
+
+static int falcon_sflash_xfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct falcon_sflash *priv = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ unsigned long spi_flags;
+ unsigned long flags;
+ int ret = 0;
+
+ priv->sfcmd = 0;
+ m->actual_length = 0;
+
+ spi_flags = FALCON_SPI_XFER_BEGIN;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (list_is_last(&t->transfer_list, &m->transfers))
+ spi_flags |= FALCON_SPI_XFER_END;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ ret = falcon_sflash_xfer(m->spi, t, spi_flags);
+ spin_unlock_irqrestore(&ebu_lock, flags);
+
+ if (ret)
+ break;
+
+ m->actual_length += t->len;
+
+ WARN_ON(t->delay_usecs || t->cs_change);
+ spi_flags = 0;
+ }
+
+ m->status = ret;
+ m->complete(m->context);
+
+ return 0;
+}
+
+static int __devinit falcon_sflash_probe(struct platform_device *pdev)
+{
+ struct falcon_sflash *priv;
+ struct spi_master *master;
+ int ret;
+
+ if (ltq_boot_select() != BS_SPI) {
+ dev_err(&pdev->dev, "invalid bootstrap options\n");
+ return -ENODEV;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*priv));
+ if (!master)
+ return -ENOMEM;
+
+ priv = spi_master_get_devdata(master);
+ priv->master = master;
+
+ master->mode_bits = SPI_MODE_3;
+ master->num_chipselect = 1;
+ master->bus_num = -1;
+ master->setup = falcon_sflash_setup;
+ master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
+ master->transfer_one_message = falcon_sflash_xfer_one;
+ master->unprepare_transfer_hardware = falcon_sflash_unprepare_xfer;
+ master->dev.of_node = pdev->dev.of_node;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = spi_register_master(master);
+ if (ret)
+ spi_master_put(master);
+ return ret;
+}
+
+static int __devexit falcon_sflash_remove(struct platform_device *pdev)
+{
+ struct falcon_sflash *priv = platform_get_drvdata(pdev);
+
+ spi_unregister_master(priv->master);
+
+ return 0;
+}
+
+static const struct of_device_id falcon_sflash_match[] = {
+ { .compatible = "lantiq,sflash-falcon" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, falcon_sflash_match);
+
+static struct platform_driver falcon_sflash_driver = {
+ .probe = falcon_sflash_probe,
+ .remove = __devexit_p(falcon_sflash_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = falcon_sflash_match,
+ }
+};
+
+module_platform_driver(falcon_sflash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Lantiq Falcon SPI/SFLASH controller driver");
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7d46b15e1520..b2fb141da375 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -28,6 +28,8 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
@@ -39,7 +41,6 @@
#include <linux/spi/spi.h>
-#include <plat/dma.h>
#include <plat/clock.h>
#include <plat/mcspi.h>
@@ -93,8 +94,8 @@
/* We have 2 DMA channels per CS, one for RX and one for TX */
struct omap2_mcspi_dma {
- int dma_tx_channel;
- int dma_rx_channel;
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
int dma_tx_sync_dev;
int dma_rx_sync_dev;
@@ -300,20 +301,46 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
return 0;
}
+static void omap2_mcspi_rx_callback(void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ complete(&mcspi_dma->dma_rx_completion);
+
+ /* We must disable the DMA RX request */
+ omap2_mcspi_set_dma_req(spi, 1, 0);
+}
+
+static void omap2_mcspi_tx_callback(void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ complete(&mcspi_dma->dma_tx_completion);
+
+ /* We must disable the DMA TX request */
+ omap2_mcspi_set_dma_req(spi, 0, 0);
+}
+
static unsigned
omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
{
struct omap2_mcspi *mcspi;
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi_dma *mcspi_dma;
- unsigned int count, c;
- unsigned long base, tx_reg, rx_reg;
- int word_len, data_type, element_count;
+ unsigned int count;
+ int word_len, element_count;
int elements = 0;
u32 l;
u8 * rx;
const u8 * tx;
void __iomem *chstat_reg;
+ struct dma_slave_config cfg;
+ enum dma_slave_buswidth width;
+ unsigned es;
mcspi = spi_master_get_devdata(spi->master);
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
@@ -321,68 +348,92 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ if (cs->word_len <= 8) {
+ width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ es = 1;
+ } else if (cs->word_len <= 16) {
+ width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ es = 2;
+ } else {
+ width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ es = 4;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
+ cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
+ cfg.src_addr_width = width;
+ cfg.dst_addr_width = width;
+ cfg.src_maxburst = 1;
+ cfg.dst_maxburst = 1;
+
+ if (xfer->tx_buf && mcspi_dma->dma_tx) {
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+
+ dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = xfer->tx_dma;
+ sg_dma_len(&sg) = xfer->len;
+
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_tx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
+ }
+ }
+
+ if (xfer->rx_buf && mcspi_dma->dma_rx) {
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+ size_t len = xfer->len - es;
+
+ dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
+
+ if (l & OMAP2_MCSPI_CHCONF_TURBO)
+ len -= es;
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = xfer->rx_dma;
+ sg_dma_len(&sg) = len;
+
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_rx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
+ }
+ }
+
count = xfer->len;
- c = count;
word_len = cs->word_len;
- base = cs->phys;
- tx_reg = base + OMAP2_MCSPI_TX0;
- rx_reg = base + OMAP2_MCSPI_RX0;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
if (word_len <= 8) {
- data_type = OMAP_DMA_DATA_TYPE_S8;
element_count = count;
} else if (word_len <= 16) {
- data_type = OMAP_DMA_DATA_TYPE_S16;
element_count = count >> 1;
} else /* word_len <= 32 */ {
- data_type = OMAP_DMA_DATA_TYPE_S32;
element_count = count >> 2;
}
if (tx != NULL) {
- omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
- data_type, element_count, 1,
- OMAP_DMA_SYNC_ELEMENT,
- mcspi_dma->dma_tx_sync_dev, 0);
-
- omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
- OMAP_DMA_AMODE_CONSTANT,
- tx_reg, 0, 0);
-
- omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
- OMAP_DMA_AMODE_POST_INC,
- xfer->tx_dma, 0, 0);
- }
-
- if (rx != NULL) {
- elements = element_count - 1;
- if (l & OMAP2_MCSPI_CHCONF_TURBO)
- elements--;
-
- omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
- data_type, elements, 1,
- OMAP_DMA_SYNC_ELEMENT,
- mcspi_dma->dma_rx_sync_dev, 1);
-
- omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
- OMAP_DMA_AMODE_CONSTANT,
- rx_reg, 0, 0);
-
- omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
- OMAP_DMA_AMODE_POST_INC,
- xfer->rx_dma, 0, 0);
- }
-
- if (tx != NULL) {
- omap_start_dma(mcspi_dma->dma_tx_channel);
+ dma_async_issue_pending(mcspi_dma->dma_tx);
omap2_mcspi_set_dma_req(spi, 0, 1);
}
if (rx != NULL) {
- omap_start_dma(mcspi_dma->dma_rx_channel);
+ dma_async_issue_pending(mcspi_dma->dma_rx);
omap2_mcspi_set_dma_req(spi, 1, 1);
}
@@ -408,7 +459,10 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
DMA_FROM_DEVICE);
omap2_mcspi_set_enable(spi, 0);
+ elements = element_count - 1;
+
if (l & OMAP2_MCSPI_CHCONF_TURBO) {
+ elements--;
if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
& OMAP2_MCSPI_CHSTAT_RXS)) {
@@ -725,64 +779,38 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
return 0;
}
-static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
-{
- struct spi_device *spi = data;
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
-
- mcspi = spi_master_get_devdata(spi->master);
- mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
-
- complete(&mcspi_dma->dma_rx_completion);
-
- /* We must disable the DMA RX request */
- omap2_mcspi_set_dma_req(spi, 1, 0);
-}
-
-static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
-{
- struct spi_device *spi = data;
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
-
- mcspi = spi_master_get_devdata(spi->master);
- mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
-
- complete(&mcspi_dma->dma_tx_completion);
-
- /* We must disable the DMA TX request */
- omap2_mcspi_set_dma_req(spi, 0, 0);
-}
-
static int omap2_mcspi_request_dma(struct spi_device *spi)
{
struct spi_master *master = spi->master;
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
+ dma_cap_mask_t mask;
+ unsigned sig;
mcspi = spi_master_get_devdata(master);
mcspi_dma = mcspi->dma_channels + spi->chip_select;
- if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
- omap2_mcspi_dma_rx_callback, spi,
- &mcspi_dma->dma_rx_channel)) {
- dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
+ init_completion(&mcspi_dma->dma_rx_completion);
+ init_completion(&mcspi_dma->dma_tx_completion);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ sig = mcspi_dma->dma_rx_sync_dev;
+ mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ if (!mcspi_dma->dma_rx) {
+ dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
return -EAGAIN;
}
- if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
- omap2_mcspi_dma_tx_callback, spi,
- &mcspi_dma->dma_tx_channel)) {
- omap_free_dma(mcspi_dma->dma_rx_channel);
- mcspi_dma->dma_rx_channel = -1;
- dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
+ sig = mcspi_dma->dma_tx_sync_dev;
+ mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+ if (!mcspi_dma->dma_tx) {
+ dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
return -EAGAIN;
}
- init_completion(&mcspi_dma->dma_rx_completion);
- init_completion(&mcspi_dma->dma_tx_completion);
-
return 0;
}
@@ -814,8 +842,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
list_add_tail(&cs->node, &ctx->cs);
}
- if (mcspi_dma->dma_rx_channel == -1
- || mcspi_dma->dma_tx_channel == -1) {
+ if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
ret = omap2_mcspi_request_dma(spi);
if (ret < 0)
return ret;
@@ -850,13 +877,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
if (spi->chip_select < spi->master->num_chipselect) {
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
- if (mcspi_dma->dma_rx_channel != -1) {
- omap_free_dma(mcspi_dma->dma_rx_channel);
- mcspi_dma->dma_rx_channel = -1;
+ if (mcspi_dma->dma_rx) {
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
}
- if (mcspi_dma->dma_tx_channel != -1) {
- omap_free_dma(mcspi_dma->dma_tx_channel);
- mcspi_dma->dma_tx_channel = -1;
+ if (mcspi_dma->dma_tx) {
+ dma_release_channel(mcspi_dma->dma_tx);
+ mcspi_dma->dma_tx = NULL;
}
}
}
@@ -1176,7 +1203,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
break;
}
- mcspi->dma_channels[i].dma_rx_channel = -1;
mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
sprintf(dma_ch_name, "tx%d", i);
dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
@@ -1187,7 +1213,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
break;
}
- mcspi->dma_channels[i].dma_tx_channel = -1;
mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
}
@@ -1203,18 +1228,16 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
status = spi_register_master(master);
if (status < 0)
- goto err_spi_register;
+ goto disable_pm;
return status;
-err_spi_register:
- spi_master_put(master);
disable_pm:
pm_runtime_disable(&pdev->dev);
dma_chnl_free:
kfree(mcspi->dma_channels);
free_master:
- kfree(master);
+ spi_master_put(master);
platform_set_drvdata(pdev, NULL);
return status;
}
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index aab518ec2bbc..6abbe23c39b4 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2053,7 +2053,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
adev->res.start, pl022->virtbase);
- pm_runtime_enable(dev);
pm_runtime_resume(dev);
pl022->clk = clk_get(&adev->dev, NULL);
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 646a7657fe62..d1c8441f638c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -826,7 +826,7 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs;
- struct device_node *slave_np, *data_np;
+ struct device_node *slave_np, *data_np = NULL;
u32 fb_delay = 0;
slave_np = spi->dev.of_node;
@@ -1479,40 +1479,40 @@ static const struct dev_pm_ops s3c64xx_spi_pm = {
s3c64xx_spi_runtime_resume, NULL)
};
-struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
+static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
.fifo_lvl_mask = { 0x7f },
.rx_lvl_offset = 13,
.tx_st_done = 21,
.high_speed = true,
};
-struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
+static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
.fifo_lvl_mask = { 0x7f, 0x7F },
.rx_lvl_offset = 13,
.tx_st_done = 21,
};
-struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
+static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F },
.rx_lvl_offset = 15,
.tx_st_done = 25,
};
-struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
+static struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
.fifo_lvl_mask = { 0x7f, 0x7F },
.rx_lvl_offset = 13,
.tx_st_done = 21,
.high_speed = true,
};
-struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
+static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F },
.rx_lvl_offset = 15,
.tx_st_done = 25,
.high_speed = true,
};
-struct s3c64xx_spi_port_config exynos4_spi_port_config = {
+static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
.rx_lvl_offset = 15,
.tx_st_done = 25,
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index 9a60d4cd2184..f545716c666d 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -157,12 +157,7 @@ static int create_worker_threads(struct bcm_mini_adapter *psAdapter)
static struct file *open_firmware_file(struct bcm_mini_adapter *Adapter, const char *path)
{
- struct file *flp = NULL;
- mm_segment_t oldfs;
- oldfs = get_fs();
- set_fs(get_ds());
- flp = filp_open(path, O_RDONLY, S_IRWXU);
- set_fs(oldfs);
+ struct file *flp = filp_open(path, O_RDONLY, S_IRWXU);
if (IS_ERR(flp)) {
pr_err(DRV_NAME "Unable To Open File %s, err %ld", path, PTR_ERR(flp));
flp = NULL;
@@ -183,14 +178,12 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u
{
int errorno = 0;
struct file *flp = NULL;
- mm_segment_t oldfs;
struct timeval tv = {0};
flp = open_firmware_file(Adapter, path);
if (!flp) {
- errorno = -ENOENT;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path);
- goto exit_download;
+ return -ENOENT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)flp->f_dentry->d_inode->i_size, loc);
do_gettimeofday(&tv);
@@ -201,10 +194,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u
errorno = -EIO;
goto exit_download;
}
- oldfs = get_fs();
- set_fs(get_ds());
vfs_llseek(flp, 0, 0);
- set_fs(oldfs);
if (Adapter->bcm_file_readback_from_chip(Adapter->pvInterfaceAdapter, flp, loc)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to read back firmware!");
errorno = -EIO;
@@ -212,12 +202,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u
}
exit_download:
- oldfs = get_fs();
- set_fs(get_ds());
- if (flp && !(IS_ERR(flp)))
- filp_close(flp, current->files);
- set_fs(oldfs);
-
+ filp_close(flp, NULL);
return errorno;
}
@@ -1056,10 +1041,8 @@ OUT:
static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter)
{
struct file *flp = NULL;
- mm_segment_t oldfs = {0};
char *buff;
int len = 0;
- loff_t pos = 0;
buff = kmalloc(BUFFER_1K, GFP_KERNEL);
if (!buff)
@@ -1079,20 +1062,16 @@ static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter)
Adapter->pstargetparams = NULL;
return -ENOENT;
}
- oldfs = get_fs();
- set_fs(get_ds());
- len = vfs_read(flp, (void __user __force *)buff, BUFFER_1K, &pos);
- set_fs(oldfs);
+ len = kernel_read(flp, 0, buff, BUFFER_1K);
+ filp_close(flp, NULL);
if (len != sizeof(STARGETPARAMS)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Mismatch in Target Param Structure!\n");
kfree(buff);
kfree(Adapter->pstargetparams);
Adapter->pstargetparams = NULL;
- filp_close(flp, current->files);
return -ENOENT;
}
- filp_close(flp, current->files);
/* Check for autolink in config params */
/*
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index c0fdb00783ed..2359151af7e1 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -168,7 +168,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
dev->board_ptr = comedi_recognize(driv, it->board_name);
if (dev->board_ptr)
break;
- } else if (strcmp(driv->driver_name, it->board_name))
+ } else if (strcmp(driv->driver_name, it->board_name) == 0)
break;
module_put(driv->module);
}
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 31986608eaf1..6b4d0d68e637 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -1349,9 +1349,6 @@ static struct pci_dev *pci1710_find_pci_dev(struct comedi_device *dev,
}
if (pcidev->vendor != PCI_VENDOR_ID_ADVANTECH)
continue;
- if (pci_is_enabled(pcidev))
- continue;
-
if (strcmp(this_board->name, DRV_NAME) == 0) {
for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) {
if (pcidev->device == boardtypes[i].device_id) {
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index da5ee69d2c9d..dfde0f6328dd 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -301,8 +301,6 @@ static struct pci_dev *pci1723_find_pci_dev(struct comedi_device *dev,
}
if (pcidev->vendor != PCI_VENDOR_ID_ADVANTECH)
continue;
- if (pci_is_enabled(pcidev))
- continue;
return pcidev;
}
dev_err(dev->class_dev,
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 97f06dc8e48d..2d4cb7f638b2 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -1064,8 +1064,6 @@ static struct pci_dev *pci_dio_find_pci_dev(struct comedi_device *dev,
slot != PCI_SLOT(pcidev->devfn))
continue;
}
- if (pci_is_enabled(pcidev))
- continue;
for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) {
if (boardtypes[i].vendor_id != pcidev->vendor)
continue;
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index ef28385c1482..cad559a1a730 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -718,7 +718,8 @@ static struct pci_dev *daqboard2000_find_pci_dev(struct comedi_device *dev,
continue;
}
if (pcidev->vendor != PCI_VENDOR_ID_IOTECH ||
- pcidev->device != 0x0409)
+ pcidev->device != 0x0409 ||
+ pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)
continue;
for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
@@ -739,6 +740,7 @@ static int daqboard2000_attach(struct comedi_device *dev,
{
struct pci_dev *pcidev;
struct comedi_subdevice *s;
+ resource_size_t pci_base;
void *aux_data;
unsigned int aux_len;
int result;
@@ -758,11 +760,12 @@ static int daqboard2000_attach(struct comedi_device *dev,
"failed to enable PCI device and request regions\n");
return -EIO;
}
- dev->iobase = pci_resource_start(pcidev, 2);
+ dev->iobase = 1; /* the "detach" needs this */
- devpriv->plx =
- ioremap(pci_resource_start(pcidev, 0), DAQBOARD2000_PLX_SIZE);
- devpriv->daq = ioremap(dev->iobase, DAQBOARD2000_DAQ_SIZE);
+ pci_base = pci_resource_start(pcidev, 0);
+ devpriv->plx = ioremap(pci_base, DAQBOARD2000_PLX_SIZE);
+ pci_base = pci_resource_start(pcidev, 2);
+ devpriv->daq = ioremap(pci_base, DAQBOARD2000_DAQ_SIZE);
if (!devpriv->plx || !devpriv->daq)
return -ENOMEM;
@@ -799,8 +802,6 @@ static int daqboard2000_attach(struct comedi_device *dev,
printk("Interrupt after is: %x\n", interrupt);
*/
- dev->iobase = (unsigned long)devpriv->daq;
-
dev->board_name = this_board->name;
s = dev->subdevices + 0;
@@ -824,7 +825,7 @@ static int daqboard2000_attach(struct comedi_device *dev,
s = dev->subdevices + 2;
result = subdev_8255_init(dev, s, daqboard2000_8255_cb,
- (unsigned long)(dev->iobase + 0x40));
+ (unsigned long)(devpriv->daq + 0x40));
out:
return result;
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index a6fe6c9be87e..3476cda0fff0 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -804,6 +804,7 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct pci_dev *pcidev;
struct comedi_subdevice *s;
+ resource_size_t pci_base;
int ret = 0;
dev_dbg(dev->class_dev, "dt3000:\n");
@@ -820,9 +821,10 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
ret = comedi_pci_enable(pcidev, "dt3000");
if (ret < 0)
return ret;
+ dev->iobase = 1; /* the "detach" needs this */
- dev->iobase = pci_resource_start(pcidev, 0);
- devpriv->io_addr = ioremap(dev->iobase, DT3000_SIZE);
+ pci_base = pci_resource_start(pcidev, 0);
+ devpriv->io_addr = ioremap(pci_base, DT3000_SIZE);
if (!devpriv->io_addr)
return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 112fdc3e9c69..5aa8be1e7b92 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -1619,9 +1619,8 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it)
struct rtdPrivate *devpriv;
struct pci_dev *pcidev;
struct comedi_subdevice *s;
+ resource_size_t pci_base;
int ret;
- resource_size_t physLas1; /* data area */
- resource_size_t physLcfg; /* PLX9080 */
#ifdef USE_DMA
int index;
#endif
@@ -1655,20 +1654,15 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it)
printk(KERN_INFO "Failed to enable PCI device and request regions.\n");
return ret;
}
-
- /*
- * Initialize base addresses
- */
- /* Get the physical address from PCI config */
- dev->iobase = pci_resource_start(pcidev, LAS0_PCIINDEX);
- physLas1 = pci_resource_start(pcidev, LAS1_PCIINDEX);
- physLcfg = pci_resource_start(pcidev, LCFG_PCIINDEX);
- /* Now have the kernel map this into memory */
- /* ASSUME page aligned */
- devpriv->las0 = ioremap_nocache(dev->iobase, LAS0_PCISIZE);
- devpriv->las1 = ioremap_nocache(physLas1, LAS1_PCISIZE);
- devpriv->lcfg = ioremap_nocache(physLcfg, LCFG_PCISIZE);
-
+ dev->iobase = 1; /* the "detach" needs this */
+
+ /* Initialize the base addresses */
+ pci_base = pci_resource_start(pcidev, LAS0_PCIINDEX);
+ devpriv->las0 = ioremap_nocache(pci_base, LAS0_PCISIZE);
+ pci_base = pci_resource_start(pcidev, LAS1_PCIINDEX);
+ devpriv->las1 = ioremap_nocache(pci_base, LAS1_PCISIZE);
+ pci_base = pci_resource_start(pcidev, LCFG_PCIINDEX);
+ devpriv->lcfg = ioremap_nocache(pci_base, LCFG_PCISIZE);
if (!devpriv->las0 || !devpriv->las1 || !devpriv->lcfg)
return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 848c7ec06976..11ee83681da7 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -102,6 +102,7 @@ sampling rate. If you sample two channels you get 4kHz and so on.
#define BULK_TIMEOUT 1000
/* constants for "firmware" upload and download */
+#define FIRMWARE "usbdux_firmware.bin"
#define USBDUXSUB_FIRMWARE 0xA0
#define VENDOR_DIR_IN 0xC0
#define VENDOR_DIR_OUT 0x40
@@ -2791,7 +2792,7 @@ static int usbdux_usb_probe(struct usb_interface *uinterf,
ret = request_firmware_nowait(THIS_MODULE,
FW_ACTION_HOTPLUG,
- "usbdux_firmware.bin",
+ FIRMWARE,
&udev->dev,
GFP_KERNEL,
usbduxsub + index,
@@ -2850,3 +2851,4 @@ module_comedi_usb_driver(usbdux_driver, usbdux_usb_driver);
MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");
MODULE_DESCRIPTION("Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index d9911588c10a..8eb41257c6ce 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -57,6 +57,7 @@
/*
* constants for "firmware" upload and download
*/
+#define FIRMWARE "usbduxfast_firmware.bin"
#define USBDUXFASTSUB_FIRMWARE 0xA0
#define VENDOR_DIR_IN 0xC0
#define VENDOR_DIR_OUT 0x40
@@ -1706,7 +1707,7 @@ static int usbduxfast_usb_probe(struct usb_interface *uinterf,
ret = request_firmware_nowait(THIS_MODULE,
FW_ACTION_HOTPLUG,
- "usbduxfast_firmware.bin",
+ FIRMWARE,
&udev->dev,
GFP_KERNEL,
usbduxfastsub + index,
@@ -1774,3 +1775,4 @@ module_comedi_usb_driver(usbduxfast_driver, usbduxfast_usb_driver);
MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");
MODULE_DESCRIPTION("USB-DUXfast, BerndPorr@f2s.com");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 543e604791e2..f54ab8c2fcfd 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -63,6 +63,7 @@ Status: testing
#define BULK_TIMEOUT 1000
/* constants for "firmware" upload and download */
+#define FIRMWARE "usbduxsigma_firmware.bin"
#define USBDUXSUB_FIRMWARE 0xA0
#define VENDOR_DIR_IN 0xC0
#define VENDOR_DIR_OUT 0x40
@@ -2780,7 +2781,7 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf,
ret = request_firmware_nowait(THIS_MODULE,
FW_ACTION_HOTPLUG,
- "usbduxsigma_firmware.bin",
+ FIRMWARE,
&udev->dev,
GFP_KERNEL,
usbduxsub + index,
@@ -2845,3 +2846,4 @@ module_comedi_usb_driver(usbduxsigma_driver, usbduxsigma_usb_driver);
MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");
MODULE_DESCRIPTION("Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE);
diff --git a/drivers/staging/csr/Kconfig b/drivers/staging/csr/Kconfig
index cee8d48d2af9..ad2a1096e920 100644
--- a/drivers/staging/csr/Kconfig
+++ b/drivers/staging/csr/Kconfig
@@ -1,6 +1,6 @@
config CSR_WIFI
tristate "CSR wireless driver"
- depends on MMC && CFG80211_WEXT
+ depends on MMC && CFG80211_WEXT && INET
select WIRELESS_EXT
select WEXT_PRIV
help
diff --git a/drivers/staging/gdm72xx/sdio_boot.c b/drivers/staging/gdm72xx/sdio_boot.c
index 760efee23d4a..65624bca8b3a 100644
--- a/drivers/staging/gdm72xx/sdio_boot.c
+++ b/drivers/staging/gdm72xx/sdio_boot.c
@@ -66,9 +66,8 @@ static int download_image(struct sdio_func *func, char *img_name)
return -ENOENT;
}
- if (filp->f_dentry)
- inode = filp->f_dentry->d_inode;
- if (!inode || !S_ISREG(inode->i_mode)) {
+ inode = filp->f_dentry->d_inode;
+ if (!S_ISREG(inode->i_mode)) {
printk(KERN_ERR "Invalid file type: %s\n", img_name);
ret = -EINVAL;
goto out;
@@ -123,7 +122,7 @@ static int download_image(struct sdio_func *func, char *img_name)
pno++;
}
out:
- filp_close(filp, current->files);
+ filp_close(filp, NULL);
return ret;
}
diff --git a/drivers/staging/gdm72xx/usb_boot.c b/drivers/staging/gdm72xx/usb_boot.c
index fef290c38db6..e3dbd5a552ca 100644
--- a/drivers/staging/gdm72xx/usb_boot.c
+++ b/drivers/staging/gdm72xx/usb_boot.c
@@ -173,14 +173,12 @@ int usb_boot(struct usb_device *usbdev, u16 pid)
filp = filp_open(img_name, O_RDONLY | O_LARGEFILE, 0);
if (IS_ERR(filp)) {
printk(KERN_ERR "Can't find %s.\n", img_name);
- set_fs(fs);
ret = PTR_ERR(filp);
goto restore_fs;
}
- if (filp->f_dentry)
- inode = filp->f_dentry->d_inode;
- if (!inode || !S_ISREG(inode->i_mode)) {
+ inode = filp->f_dentry->d_inode;
+ if (!S_ISREG(inode->i_mode)) {
printk(KERN_ERR "Invalid file type: %s\n", img_name);
ret = -EINVAL;
goto out;
@@ -262,7 +260,7 @@ int usb_boot(struct usb_device *usbdev, u16 pid)
ret = -EINVAL;
}
out:
- filp_close(filp, current->files);
+ filp_close(filp, NULL);
restore_fs:
set_fs(fs);
@@ -322,13 +320,11 @@ static int em_download_image(struct usb_device *usbdev, char *path,
goto restore_fs;
}
- if (filp->f_dentry) {
- inode = filp->f_dentry->d_inode;
- if (!inode || !S_ISREG(inode->i_mode)) {
- printk(KERN_ERR "Invalid file type: %s\n", path);
- ret = -EINVAL;
- goto out;
- }
+ inode = filp->f_dentry->d_inode;
+ if (!S_ISREG(inode->i_mode)) {
+ printk(KERN_ERR "Invalid file type: %s\n", path);
+ ret = -EINVAL;
+ goto out;
}
buf = kmalloc(DOWNLOAD_CHUCK + pad_size, GFP_KERNEL);
@@ -364,7 +360,7 @@ static int em_download_image(struct usb_device *usbdev, char *path,
goto out;
out:
- filp_close(filp, current->files);
+ filp_close(filp, NULL);
restore_fs:
set_fs(fs);
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 22c3923d55eb..095837285f4f 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -754,7 +754,7 @@ static ssize_t ad7192_set(struct device *dev,
else
st->mode &= ~AD7192_MODE_ACX;
- ad7192_write_reg(st, AD7192_REG_GPOCON, 3, st->mode);
+ ad7192_write_reg(st, AD7192_REG_MODE, 3, st->mode);
break;
default:
ret = -EINVAL;
@@ -798,6 +798,11 @@ static const struct attribute_group ad7195_attribute_group = {
.attrs = ad7195_attributes,
};
+static unsigned int ad7192_get_temp_scale(bool unipolar)
+{
+ return unipolar ? 2815 * 2 : 2815;
+}
+
static int ad7192_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
@@ -824,19 +829,6 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
*val = (smpl >> chan->scan_type.shift) &
((1 << (chan->scan_type.realbits)) - 1);
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (!unipolar)
- *val -= (1 << (chan->scan_type.realbits - 1));
- break;
- case IIO_TEMP:
- *val -= 0x800000;
- *val /= 2815; /* temp Kelvin */
- *val -= 273; /* temp Celsius */
- break;
- default:
- return -EINVAL;
- }
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
@@ -848,11 +840,21 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT_PLUS_NANO;
case IIO_TEMP:
- *val = 1000;
- return IIO_VAL_INT;
+ *val = 0;
+ *val2 = 1000000000 / ad7192_get_temp_scale(unipolar);
+ return IIO_VAL_INT_PLUS_NANO;
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_OFFSET:
+ if (!unipolar)
+ *val = -(1 << (chan->scan_type.realbits - 1));
+ else
+ *val = 0;
+ /* Kelvin to Celsius */
+ if (chan->type == IIO_TEMP)
+ *val -= 273 * ad7192_get_temp_scale(unipolar);
+ return IIO_VAL_INT;
}
return -EINVAL;
@@ -890,7 +892,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
}
ret = 0;
}
-
+ break;
default:
ret = -EINVAL;
}
@@ -942,20 +944,22 @@ static const struct iio_info ad7195_info = {
.channel = _chan, \
.channel2 = _chan2, \
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
- IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT, \
.address = _address, \
.scan_index = _si, \
- .scan_type = IIO_ST('s', 24, 32, 0)}
+ .scan_type = IIO_ST('u', 24, 32, 0)}
#define AD7192_CHAN(_chan, _address, _si) \
{ .type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = _chan, \
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
- IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT, \
.address = _address, \
.scan_index = _si, \
- .scan_type = IIO_ST('s', 24, 32, 0)}
+ .scan_type = IIO_ST('u', 24, 32, 0)}
#define AD7192_CHAN_TEMP(_chan, _address, _si) \
{ .type = IIO_TEMP, \
@@ -965,7 +969,7 @@ static const struct iio_info ad7195_info = {
IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
.address = _address, \
.scan_index = _si, \
- .scan_type = IIO_ST('s', 24, 32, 0)}
+ .scan_type = IIO_ST('u', 24, 32, 0)}
static struct iio_chan_spec ad7192_channels[] = {
AD7192_CHAN_DIFF(1, 2, NULL, AD7192_CH_AIN1P_AIN2M, 0),
diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c
index fd1d855ff57a..506016f01593 100644
--- a/drivers/staging/iio/adc/ad7298_ring.c
+++ b/drivers/staging/iio/adc/ad7298_ring.c
@@ -76,7 +76,7 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct ad7298_state *st = iio_priv(indio_dev);
struct iio_buffer *ring = indio_dev->buffer;
- s64 time_ns;
+ s64 time_ns = 0;
__u16 buf[16];
int b_sent, i;
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 1ece2ac8de56..19ee49c95de4 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -131,9 +131,10 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
.indexed = 1,
.channel = 0,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT,
.scan_type = {
- .sign = 's',
+ .sign = 'u',
.realbits = 24,
.storagebits = 32,
.shift = 8,
@@ -146,9 +147,10 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
.indexed = 1,
.channel = 0,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT,
.scan_type = {
- .sign = 's',
+ .sign = 'u',
.realbits = 20,
.storagebits = 32,
.shift = 12,
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
index 76fdd7145fc5..112e2b7b5bc4 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -563,8 +563,9 @@ static ssize_t ad7793_show_scale_available(struct device *dev,
return len;
}
-static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, in-in_scale_available,
- S_IRUGO, ad7793_show_scale_available, NULL, 0);
+static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available,
+ in_voltage-voltage_scale_available, S_IRUGO,
+ ad7793_show_scale_available, NULL, 0);
static struct attribute *ad7793_attributes[] = {
&iio_dev_attr_sampling_frequency.dev_attr.attr,
@@ -604,9 +605,6 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
*val = (smpl >> chan->scan_type.shift) &
((1 << (chan->scan_type.realbits)) - 1);
- if (!unipolar)
- *val -= (1 << (chan->scan_type.realbits - 1));
-
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
@@ -620,25 +618,38 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_NANO;
} else {
/* 1170mV / 2^23 * 6 */
- scale_uv = (1170ULL * 100000000ULL * 6ULL)
- >> (chan->scan_type.realbits -
- (unipolar ? 0 : 1));
+ scale_uv = (1170ULL * 100000000ULL * 6ULL);
}
break;
case IIO_TEMP:
- /* Always uses unity gain and internal ref */
- scale_uv = (2500ULL * 100000000ULL)
- >> (chan->scan_type.realbits -
- (unipolar ? 0 : 1));
+ /* 1170mV / 0.81 mV/C / 2^23 */
+ scale_uv = 1444444444444ULL;
break;
default:
return -EINVAL;
}
- *val2 = do_div(scale_uv, 100000000) * 10;
- *val = scale_uv;
-
+ scale_uv >>= (chan->scan_type.realbits - (unipolar ? 0 : 1));
+ *val = 0;
+ *val2 = scale_uv;
return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OFFSET:
+ if (!unipolar)
+ *val = -(1 << (chan->scan_type.realbits - 1));
+ else
+ *val = 0;
+
+ /* Kelvin to Celsius */
+ if (chan->type == IIO_TEMP) {
+ unsigned long long offset;
+ unsigned int shift;
+
+ shift = chan->scan_type.realbits - (unipolar ? 0 : 1);
+ offset = 273ULL << shift;
+ do_div(offset, 1444);
+ *val -= offset;
+ }
+ return IIO_VAL_INT;
}
return -EINVAL;
}
@@ -676,7 +687,7 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
}
ret = 0;
}
-
+ break;
default:
ret = -EINVAL;
}
@@ -720,9 +731,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel2 = 0,
.address = AD7793_CH_AIN1P_AIN1M,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT,
.scan_index = 0,
- .scan_type = IIO_ST('s', 24, 32, 0)
+ .scan_type = IIO_ST('u', 24, 32, 0)
},
.channel[1] = {
.type = IIO_VOLTAGE,
@@ -732,9 +744,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel2 = 1,
.address = AD7793_CH_AIN2P_AIN2M,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT,
.scan_index = 1,
- .scan_type = IIO_ST('s', 24, 32, 0)
+ .scan_type = IIO_ST('u', 24, 32, 0)
},
.channel[2] = {
.type = IIO_VOLTAGE,
@@ -744,9 +757,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel2 = 2,
.address = AD7793_CH_AIN3P_AIN3M,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT,
.scan_index = 2,
- .scan_type = IIO_ST('s', 24, 32, 0)
+ .scan_type = IIO_ST('u', 24, 32, 0)
},
.channel[3] = {
.type = IIO_VOLTAGE,
@@ -757,9 +771,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel2 = 2,
.address = AD7793_CH_AIN1M_AIN1M,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT,
.scan_index = 3,
- .scan_type = IIO_ST('s', 24, 32, 0)
+ .scan_type = IIO_ST('u', 24, 32, 0)
},
.channel[4] = {
.type = IIO_TEMP,
@@ -769,7 +784,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 4,
- .scan_type = IIO_ST('s', 24, 32, 0),
+ .scan_type = IIO_ST('u', 24, 32, 0),
},
.channel[5] = {
.type = IIO_VOLTAGE,
@@ -778,9 +793,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 4,
.address = AD7793_CH_AVDD_MONITOR,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT,
.scan_index = 5,
- .scan_type = IIO_ST('s', 24, 32, 0),
+ .scan_type = IIO_ST('u', 24, 32, 0),
},
.channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6),
},
@@ -793,9 +809,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel2 = 0,
.address = AD7793_CH_AIN1P_AIN1M,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT,
.scan_index = 0,
- .scan_type = IIO_ST('s', 16, 32, 0)
+ .scan_type = IIO_ST('u', 16, 32, 0)
},
.channel[1] = {
.type = IIO_VOLTAGE,
@@ -805,9 +822,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel2 = 1,
.address = AD7793_CH_AIN2P_AIN2M,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT,
.scan_index = 1,
- .scan_type = IIO_ST('s', 16, 32, 0)
+ .scan_type = IIO_ST('u', 16, 32, 0)
},
.channel[2] = {
.type = IIO_VOLTAGE,
@@ -817,9 +835,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel2 = 2,
.address = AD7793_CH_AIN3P_AIN3M,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT,
.scan_index = 2,
- .scan_type = IIO_ST('s', 16, 32, 0)
+ .scan_type = IIO_ST('u', 16, 32, 0)
},
.channel[3] = {
.type = IIO_VOLTAGE,
@@ -830,9 +849,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel2 = 2,
.address = AD7793_CH_AIN1M_AIN1M,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SHARED_BIT,
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT,
.scan_index = 3,
- .scan_type = IIO_ST('s', 16, 32, 0)
+ .scan_type = IIO_ST('u', 16, 32, 0)
},
.channel[4] = {
.type = IIO_TEMP,
@@ -842,7 +862,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 4,
- .scan_type = IIO_ST('s', 16, 32, 0),
+ .scan_type = IIO_ST('u', 16, 32, 0),
},
.channel[5] = {
.type = IIO_VOLTAGE,
@@ -851,9 +871,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 4,
.address = AD7793_CH_AVDD_MONITOR,
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT |
- IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT,
.scan_index = 5,
- .scan_type = IIO_ST('s', 16, 32, 0),
+ .scan_type = IIO_ST('u', 16, 32, 0),
},
.channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6),
},
@@ -901,7 +922,7 @@ static int __devinit ad7793_probe(struct spi_device *spi)
else if (voltage_uv)
st->int_vref_mv = voltage_uv / 1000;
else
- st->int_vref_mv = 2500; /* Build-in ref */
+ st->int_vref_mv = 1170; /* Build-in ref */
spi_set_drvdata(spi, indio_dev);
st->spi = spi;
diff --git a/drivers/staging/media/dt3155v4l/dt3155v4l.c b/drivers/staging/media/dt3155v4l/dt3155v4l.c
index c365cdf714ea..ebe5a27c06f5 100644
--- a/drivers/staging/media/dt3155v4l/dt3155v4l.c
+++ b/drivers/staging/media/dt3155v4l/dt3155v4l.c
@@ -971,20 +971,7 @@ static struct pci_driver pci_driver = {
.remove = __devexit_p(dt3155_remove),
};
-static int __init
-dt3155_init_module(void)
-{
- return pci_register_driver(&pci_driver);
-}
-
-static void __exit
-dt3155_exit_module(void)
-{
- pci_unregister_driver(&pci_driver);
-}
-
-module_init(dt3155_init_module);
-module_exit(dt3155_exit_module);
+module_pci_driver(pci_driver);
MODULE_DESCRIPTION("video4linux pci-driver for dt3155 frame grabber");
MODULE_AUTHOR("Marin Mitov <mitov@issp.bas.bg>");
diff --git a/drivers/staging/media/easycap/easycap_main.c b/drivers/staging/media/easycap/easycap_main.c
index a1c45e4dcdce..8269c77dbf7d 100644
--- a/drivers/staging/media/easycap/easycap_main.c
+++ b/drivers/staging/media/easycap/easycap_main.c
@@ -3083,6 +3083,7 @@ static int create_video_urbs(struct easycap *peasycap)
peasycap->allocation_video_urb += 1;
pdata_urb = kzalloc(sizeof(struct data_urb), GFP_KERNEL);
if (!pdata_urb) {
+ usb_free_urb(purb);
SAM("ERROR: Could not allocate struct data_urb.\n");
return -ENOMEM;
}
diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
index 4d20e9f74118..951007a3fc96 100644
--- a/drivers/staging/media/lirc/lirc_bt829.c
+++ b/drivers/staging/media/lirc/lirc_bt829.c
@@ -171,7 +171,7 @@ static void cycle_delay(int cycle)
}
-static int poll_main()
+static int poll_main(void)
{
unsigned char status_high, status_low;
diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
index 945d9623550b..4afc3b419738 100644
--- a/drivers/staging/media/lirc/lirc_sir.c
+++ b/drivers/staging/media/lirc/lirc_sir.c
@@ -52,6 +52,7 @@
#include <linux/io.h>
#include <asm/irq.h>
#include <linux/fcntl.h>
+#include <linux/platform_device.h>
#ifdef LIRC_ON_SA1100
#include <asm/hardware.h>
#ifdef CONFIG_SA1100_COLLIE
@@ -487,9 +488,11 @@ static struct lirc_driver driver = {
.owner = THIS_MODULE,
};
+static struct platform_device *lirc_sir_dev;
static int init_chrdev(void)
{
+ driver.dev = &lirc_sir_dev->dev;
driver.minor = lirc_register_driver(&driver);
if (driver.minor < 0) {
printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n");
@@ -1215,20 +1218,71 @@ static int init_lirc_sir(void)
return 0;
}
+static int __devinit lirc_sir_probe(struct platform_device *dev)
+{
+ return 0;
+}
+
+static int __devexit lirc_sir_remove(struct platform_device *dev)
+{
+ return 0;
+}
+
+static struct platform_driver lirc_sir_driver = {
+ .probe = lirc_sir_probe,
+ .remove = __devexit_p(lirc_sir_remove),
+ .driver = {
+ .name = "lirc_sir",
+ .owner = THIS_MODULE,
+ },
+};
static int __init lirc_sir_init(void)
{
int retval;
+ retval = platform_driver_register(&lirc_sir_driver);
+ if (retval) {
+ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform driver register "
+ "failed!\n");
+ return -ENODEV;
+ }
+
+ lirc_sir_dev = platform_device_alloc("lirc_dev", 0);
+ if (!lirc_sir_dev) {
+ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device alloc "
+ "failed!\n");
+ retval = -ENOMEM;
+ goto pdev_alloc_fail;
+ }
+
+ retval = platform_device_add(lirc_sir_dev);
+ if (retval) {
+ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device add "
+ "failed!\n");
+ retval = -ENODEV;
+ goto pdev_add_fail;
+ }
+
retval = init_chrdev();
if (retval < 0)
- return retval;
+ goto fail;
+
retval = init_lirc_sir();
if (retval) {
drop_chrdev();
- return retval;
+ goto fail;
}
+
return 0;
+
+fail:
+ platform_device_del(lirc_sir_dev);
+pdev_add_fail:
+ platform_device_put(lirc_sir_dev);
+pdev_alloc_fail:
+ platform_driver_unregister(&lirc_sir_driver);
+ return retval;
}
static void __exit lirc_sir_exit(void)
@@ -1236,6 +1290,8 @@ static void __exit lirc_sir_exit(void)
drop_hardware();
drop_chrdev();
drop_port();
+ platform_device_unregister(lirc_sir_dev);
+ platform_driver_unregister(&lirc_sir_driver);
printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n");
}
diff --git a/drivers/staging/media/solo6x10/TODO b/drivers/staging/media/solo6x10/TODO
index 7e6c4fa130df..539f739fe9e6 100644
--- a/drivers/staging/media/solo6x10/TODO
+++ b/drivers/staging/media/solo6x10/TODO
@@ -20,5 +20,5 @@ TODO (general):
- implement loopback of external sound jack with incoming audio?
- implement pause/resume
-Plase send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc Ben Collins
+Plase send patches to Mauro Carvalho Chehab <mchehab@redhat.com> and Cc Ben Collins
<bcollins@bluecherry.net>
diff --git a/drivers/staging/media/solo6x10/core.c b/drivers/staging/media/solo6x10/core.c
index d2fd842e37cf..3ee9b125797f 100644
--- a/drivers/staging/media/solo6x10/core.c
+++ b/drivers/staging/media/solo6x10/core.c
@@ -318,15 +318,4 @@ static struct pci_driver solo_pci_driver = {
.remove = solo_pci_remove,
};
-static int __init solo_module_init(void)
-{
- return pci_register_driver(&solo_pci_driver);
-}
-
-static void __exit solo_module_exit(void)
-{
- pci_unregister_driver(&solo_pci_driver);
-}
-
-module_init(solo_module_init);
-module_exit(solo_module_exit);
+module_pci_driver(solo_pci_driver);
diff --git a/drivers/staging/media/solo6x10/i2c.c b/drivers/staging/media/solo6x10/i2c.c
index ef95a500b4da..398070a3d293 100644
--- a/drivers/staging/media/solo6x10/i2c.c
+++ b/drivers/staging/media/solo6x10/i2c.c
@@ -175,7 +175,7 @@ int solo_i2c_isr(struct solo_dev *solo_dev)
solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_IIC);
- if (status & (SOLO_IIC_STATE_TRNS & SOLO_IIC_STATE_SIG_ERR) ||
+ if (status & (SOLO_IIC_STATE_TRNS | SOLO_IIC_STATE_SIG_ERR) ||
solo_dev->i2c_id < 0) {
solo_i2c_stop(solo_dev);
return -ENXIO;
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index e31949c9c87e..f15b31b37ca5 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -28,6 +28,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/ratelimit.h>
+#include <linux/of_mdio.h>
#include <net/dst.h>
@@ -161,22 +162,23 @@ static void cvm_oct_adjust_link(struct net_device *dev)
int cvm_oct_phy_setup_device(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
+ struct device_node *phy_node;
- int phy_addr = cvmx_helper_board_get_mii_address(priv->port);
- if (phy_addr != -1) {
- char phy_id[MII_BUS_ID_SIZE + 3];
+ if (!priv->of_node)
+ return 0;
- snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "mdio-octeon-0", phy_addr);
+ phy_node = of_parse_phandle(priv->of_node, "phy-handle", 0);
+ if (!phy_node)
+ return 0;
- priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ priv->phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
+ PHY_INTERFACE_MODE_GMII);
+
+ if (priv->phydev == NULL)
+ return -ENODEV;
+
+ priv->last_link = 0;
+ phy_start_aneg(priv->phydev);
- if (IS_ERR(priv->phydev)) {
- priv->phydev = NULL;
- return -1;
- }
- priv->last_link = 0;
- phy_start_aneg(priv->phydev);
- }
return 0;
}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 18f7a790f73d..683bedc74dde 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -24,6 +24,7 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
**********************************************************************/
+#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -32,6 +33,7 @@
#include <linux/phy.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/of_net.h>
#include <net/dst.h>
@@ -113,15 +115,6 @@ int rx_napi_weight = 32;
module_param(rx_napi_weight, int, 0444);
MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
-/*
- * The offset from mac_addr_base that should be used for the next port
- * that is configured. By convention, if any mgmt ports exist on the
- * chip, they get the first mac addresses, The ports controlled by
- * this driver are numbered sequencially following any mgmt addresses
- * that may exist.
- */
-static unsigned int cvm_oct_mac_addr_offset;
-
/**
* cvm_oct_poll_queue - Workqueue for polling operations.
*/
@@ -176,7 +169,7 @@ static void cvm_oct_periodic_worker(struct work_struct *work)
queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ);
}
-static __init void cvm_oct_configure_common_hw(void)
+static __devinit void cvm_oct_configure_common_hw(void)
{
/* Setup the FPA */
cvmx_fpa_enable();
@@ -396,23 +389,21 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
* Returns Zero on success
*/
-static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
+static int cvm_oct_set_mac_filter(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
union cvmx_gmxx_prtx_cfg gmx_cfg;
int interface = INTERFACE(priv->port);
int index = INDEX(priv->port);
- memcpy(dev->dev_addr, addr + 2, 6);
-
if ((interface < 2)
&& (cvmx_helper_interface_get_mode(interface) !=
CVMX_HELPER_INTERFACE_MODE_SPI)) {
int i;
- uint8_t *ptr = addr;
+ uint8_t *ptr = dev->dev_addr;
uint64_t mac = 0;
for (i = 0; i < 6; i++)
- mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
+ mac = (mac << 8) | (uint64_t)ptr[i];
gmx_cfg.u64 =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
@@ -421,17 +412,17 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
- ptr[2]);
+ ptr[0]);
cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
- ptr[3]);
+ ptr[1]);
cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
- ptr[4]);
+ ptr[2]);
cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
- ptr[5]);
+ ptr[3]);
cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
- ptr[6]);
+ ptr[4]);
cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
- ptr[7]);
+ ptr[5]);
cvm_oct_common_set_multicast_list(dev);
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
gmx_cfg.u64);
@@ -439,6 +430,15 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
return 0;
}
+static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
+{
+ int r = eth_mac_addr(dev, addr);
+
+ if (r)
+ return r;
+ return cvm_oct_set_mac_filter(dev);
+}
+
/**
* cvm_oct_common_init - per network device initialization
* @dev: Device to initialize
@@ -448,26 +448,17 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
int cvm_oct_common_init(struct net_device *dev)
{
struct octeon_ethernet *priv = netdev_priv(dev);
- struct sockaddr sa;
- u64 mac = ((u64)(octeon_bootinfo->mac_addr_base[0] & 0xff) << 40) |
- ((u64)(octeon_bootinfo->mac_addr_base[1] & 0xff) << 32) |
- ((u64)(octeon_bootinfo->mac_addr_base[2] & 0xff) << 24) |
- ((u64)(octeon_bootinfo->mac_addr_base[3] & 0xff) << 16) |
- ((u64)(octeon_bootinfo->mac_addr_base[4] & 0xff) << 8) |
- (u64)(octeon_bootinfo->mac_addr_base[5] & 0xff);
-
- mac += cvm_oct_mac_addr_offset;
- sa.sa_data[0] = (mac >> 40) & 0xff;
- sa.sa_data[1] = (mac >> 32) & 0xff;
- sa.sa_data[2] = (mac >> 24) & 0xff;
- sa.sa_data[3] = (mac >> 16) & 0xff;
- sa.sa_data[4] = (mac >> 8) & 0xff;
- sa.sa_data[5] = mac & 0xff;
-
- if (cvm_oct_mac_addr_offset >= octeon_bootinfo->mac_addr_count)
- printk(KERN_DEBUG "%s: Using MAC outside of the assigned range:"
- " %pM\n", dev->name, sa.sa_data);
- cvm_oct_mac_addr_offset++;
+ const u8 *mac = NULL;
+
+ if (priv->of_node)
+ mac = of_get_mac_address(priv->of_node);
+
+ if (mac && is_valid_ether_addr(mac)) {
+ memcpy(dev->dev_addr, mac, ETH_ALEN);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+ } else {
+ eth_hw_addr_random(dev);
+ }
/*
* Force the interface to use the POW send if always_use_pow
@@ -488,7 +479,7 @@ int cvm_oct_common_init(struct net_device *dev)
SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
cvm_oct_phy_setup_device(dev);
- dev->netdev_ops->ndo_set_mac_address(dev, &sa);
+ cvm_oct_set_mac_filter(dev);
dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
/*
@@ -595,22 +586,55 @@ static const struct net_device_ops cvm_oct_pow_netdev_ops = {
extern void octeon_mdiobus_force_mod_depencency(void);
-static int __init cvm_oct_init_module(void)
+static struct device_node * __devinit cvm_oct_of_get_child(const struct device_node *parent,
+ int reg_val)
+{
+ struct device_node *node = NULL;
+ int size;
+ const __be32 *addr;
+
+ for (;;) {
+ node = of_get_next_child(parent, node);
+ if (!node)
+ break;
+ addr = of_get_property(node, "reg", &size);
+ if (addr && (be32_to_cpu(*addr) == reg_val))
+ break;
+ }
+ return node;
+}
+
+static struct device_node * __devinit cvm_oct_node_for_port(struct device_node *pip,
+ int interface, int port)
+{
+ struct device_node *ni, *np;
+
+ ni = cvm_oct_of_get_child(pip, interface);
+ if (!ni)
+ return NULL;
+
+ np = cvm_oct_of_get_child(ni, port);
+ of_node_put(ni);
+
+ return np;
+}
+
+static int __devinit cvm_oct_probe(struct platform_device *pdev)
{
int num_interfaces;
int interface;
int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
int qos;
+ struct device_node *pip;
octeon_mdiobus_force_mod_depencency();
pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
- if (OCTEON_IS_MODEL(OCTEON_CN52XX))
- cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */
- else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
- cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */
- else
- cvm_oct_mac_addr_offset = 0;
+ pip = pdev->dev.of_node;
+ if (!pip) {
+ pr_err("Error: No 'pip' in /aliases\n");
+ return -EINVAL;
+ }
cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
if (cvm_oct_poll_queue == NULL) {
@@ -689,10 +713,11 @@ static int __init cvm_oct_init_module(void)
cvmx_helper_interface_get_mode(interface);
int num_ports = cvmx_helper_ports_on_interface(interface);
int port;
+ int port_index;
- for (port = cvmx_helper_get_ipd_port(interface, 0);
+ for (port_index = 0, port = cvmx_helper_get_ipd_port(interface, 0);
port < cvmx_helper_get_ipd_port(interface, num_ports);
- port++) {
+ port_index++, port++) {
struct octeon_ethernet *priv;
struct net_device *dev =
alloc_etherdev(sizeof(struct octeon_ethernet));
@@ -703,6 +728,7 @@ static int __init cvm_oct_init_module(void)
/* Initialize the device private structure. */
priv = netdev_priv(dev);
+ priv->of_node = cvm_oct_node_for_port(pip, interface, port_index);
INIT_DELAYED_WORK(&priv->port_periodic_work,
cvm_oct_periodic_worker);
@@ -787,7 +813,7 @@ static int __init cvm_oct_init_module(void)
return 0;
}
-static void __exit cvm_oct_cleanup_module(void)
+static int __devexit cvm_oct_remove(struct platform_device *pdev)
{
int port;
@@ -835,10 +861,29 @@ static void __exit cvm_oct_cleanup_module(void)
if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+ return 0;
}
+static struct of_device_id cvm_oct_match[] = {
+ {
+ .compatible = "cavium,octeon-3860-pip",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, cvm_oct_match);
+
+static struct platform_driver cvm_oct_driver = {
+ .probe = cvm_oct_probe,
+ .remove = __devexit_p(cvm_oct_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .of_match_table = cvm_oct_match,
+ },
+};
+
+module_platform_driver(cvm_oct_driver);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
-module_init(cvm_oct_init_module);
-module_exit(cvm_oct_cleanup_module);
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index d58192563552..9360e22e0739 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -31,6 +31,8 @@
#ifndef OCTEON_ETHERNET_H
#define OCTEON_ETHERNET_H
+#include <linux/of.h>
+
/**
* This is the definition of the Ethernet driver's private
* driver state stored in netdev_priv(dev).
@@ -59,6 +61,7 @@ struct octeon_ethernet {
void (*poll) (struct net_device *dev);
struct delayed_work port_periodic_work;
struct work_struct port_work; /* may be unused. */
+ struct device_node *of_node;
};
int cvm_oct_free_work(void *work_queue_entry);
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 992275c0d87c..2c4bd746715a 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/ctype.h>
#include <linux/reboot.h>
+#include <linux/olpc-ec.h>
#include <asm/tsc.h>
#include <asm/olpc.h>
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
index 8b864afb40b6..62e0022561bc 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -60,7 +60,7 @@ static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
}
static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/staging/omapdrm/omap_encoder.c
index 06c52cb62d2f..31c735d39217 100644
--- a/drivers/staging/omapdrm/omap_encoder.c
+++ b/drivers/staging/omapdrm/omap_encoder.c
@@ -48,7 +48,7 @@ static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
}
static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index b06fd5b723fa..d536756549e6 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -189,7 +189,7 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode");
// Static vars definitions
//
-static struct usb_device_id vt6656_table[] __devinitdata = {
+static struct usb_device_id vt6656_table[] = {
{USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)},
{}
};
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index ef360547ecec..0ca857ac473e 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -25,7 +25,7 @@ MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
-static const struct usb_device_id wb35_table[] __devinitconst = {
+static const struct usb_device_id wb35_table[] = {
{ USB_DEVICE(0x0416, 0x0035) },
{ USB_DEVICE(0x18E8, 0x6201) },
{ USB_DEVICE(0x18E8, 0x6206) },
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 9e2100551c78..cbb5aaf3e567 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -109,46 +109,29 @@ static struct se_device *fd_create_virtdevice(
struct se_subsystem_dev *se_dev,
void *p)
{
- char *dev_p = NULL;
struct se_device *dev;
struct se_dev_limits dev_limits;
struct queue_limits *limits;
struct fd_dev *fd_dev = p;
struct fd_host *fd_host = hba->hba_ptr;
- mm_segment_t old_fs;
struct file *file;
struct inode *inode = NULL;
int dev_flags = 0, flags, ret = -EINVAL;
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
- old_fs = get_fs();
- set_fs(get_ds());
- dev_p = getname(fd_dev->fd_dev_name);
- set_fs(old_fs);
-
- if (IS_ERR(dev_p)) {
- pr_err("getname(%s) failed: %lu\n",
- fd_dev->fd_dev_name, IS_ERR(dev_p));
- ret = PTR_ERR(dev_p);
- goto fail;
- }
/*
* Use O_DSYNC by default instead of O_SYNC to forgo syncing
* of pure timestamp updates.
*/
flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
- file = filp_open(dev_p, flags, 0600);
+ file = filp_open(fd_dev->fd_dev_name, flags, 0600);
if (IS_ERR(file)) {
- pr_err("filp_open(%s) failed\n", dev_p);
+ pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
ret = PTR_ERR(file);
goto fail;
}
- if (!file || !file->f_dentry) {
- pr_err("filp_open(%s) failed\n", dev_p);
- goto fail;
- }
fd_dev->fd_file = file;
/*
* If using a block backend with this struct file, we extract
@@ -212,14 +195,12 @@ static struct se_device *fd_create_virtdevice(
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
- putname(dev_p);
return dev;
fail:
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
- putname(dev_p);
return ERR_PTR(ret);
}
@@ -452,14 +433,11 @@ static ssize_t fd_set_configfs_dev_params(
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_fd_dev_name:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
+ if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
+ FD_MAX_DEV_NAME) == 0) {
+ ret = -EINVAL;
break;
}
- snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
- "%s", arg_p);
- kfree(arg_p);
pr_debug("FILEIO: Referencing Path: %s\n",
fd_dev->fd_dev_name);
fd_dev->fbd_flags |= FBDF_HAS_PATH;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 6e32ff6f2fa0..5552fa7426bc 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -673,8 +673,15 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg)
struct scsi_device *sd = pdv->pdv_sd;
int result;
struct pscsi_plugin_task *pt = cmd->priv;
- unsigned char *cdb = &pt->pscsi_cdb[0];
+ unsigned char *cdb;
+ /*
+ * Special case for REPORT_LUNs handling where pscsi_plugin_task has
+ * not been allocated because TCM is handling the emulation directly.
+ */
+ if (!pt)
+ return 0;
+ cdb = &pt->pscsi_cdb[0];
result = pt->pscsi_result;
/*
* Hack to make sure that Write-Protect modepage is set if R/O mode is
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0eaae23d12b5..4de3186dc44e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1165,8 +1165,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
cmd->data_length, size, cmd->t_task_cdb[0]);
- cmd->cmd_spdtl = size;
-
if (cmd->data_direction == DMA_TO_DEVICE) {
pr_err("Rejecting underflow/overflow"
" WRITE data\n");
@@ -2294,9 +2292,9 @@ transport_generic_get_mem(struct se_cmd *cmd)
return 0;
out:
- while (i >= 0) {
- __free_page(sg_page(&cmd->t_data_sg[i]));
+ while (i > 0) {
i--;
+ __free_page(sg_page(&cmd->t_data_sg[i]));
}
kfree(cmd->t_data_sg);
cmd->t_data_sg = NULL;
@@ -2323,9 +2321,12 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
if (ret < 0)
goto out_fail;
}
-
- /* Workaround for handling zero-length control CDBs */
- if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) {
+ /*
+ * If this command doesn't have any payload and we don't have to call
+ * into the fabric for data transfers, go ahead and complete it right
+ * away.
+ */
+ if (!cmd->data_length) {
spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_COMPLETE;
cmd->transport_state |= CMD_T_ACTIVE;
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index c5eb3c33c3db..eea69358ced3 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -131,6 +131,7 @@ extern struct list_head ft_lport_list;
extern struct mutex ft_lport_lock;
extern struct fc4_prov ft_prov;
extern struct target_fabric_configfs *ft_configfs;
+extern unsigned int ft_debug_logging;
/*
* Fabric methods.
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index b9cb5006177e..823e6922249d 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -48,7 +48,7 @@
/*
* Dump cmd state for debugging.
*/
-void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
+static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
struct fc_exch *ep;
struct fc_seq *sp;
@@ -80,6 +80,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
}
}
+void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
+{
+ if (unlikely(ft_debug_logging))
+ _ft_dump_cmd(cmd, caller);
+}
+
static void ft_free_cmd(struct ft_cmd *cmd)
{
struct fc_frame *fp;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 87901fa74dd7..3c9e5b57caab 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -456,7 +456,9 @@ static void ft_prlo(struct fc_rport_priv *rdata)
struct ft_tport *tport;
mutex_lock(&ft_lport_lock);
- tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]);
+ tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP],
+ lockdep_is_held(&ft_lport_lock));
+
if (!tport) {
mutex_unlock(&ft_lport_lock);
return;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 514a691abea0..3ab2bd540b54 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -23,6 +23,7 @@ config SPEAR_THERMAL
bool "SPEAr thermal sensor driver"
depends on THERMAL
depends on PLAT_SPEAR
+ depends on OF
help
Enable this to plug the SPEAr thermal sensor driver into the Linux
thermal framework
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index c2e32df3b164..5f8ee39f2000 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -20,9 +20,9 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/spear_thermal.h>
#include <linux/thermal.h>
#define MD_FACTOR 1000
@@ -103,21 +103,20 @@ static int spear_thermal_probe(struct platform_device *pdev)
{
struct thermal_zone_device *spear_thermal = NULL;
struct spear_thermal_dev *stdev;
- struct spear_thermal_pdata *pdata;
- int ret = 0;
+ struct device_node *np = pdev->dev.of_node;
struct resource *stres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int ret = 0, val;
+
+ if (!np || !of_property_read_u32(np, "st,thermal-flags", &val)) {
+ dev_err(&pdev->dev, "Failed: DT Pdata not passed\n");
+ return -EINVAL;
+ }
if (!stres) {
dev_err(&pdev->dev, "memory resource missing\n");
return -ENODEV;
}
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "platform data is NULL\n");
- return -EINVAL;
- }
-
stdev = devm_kzalloc(&pdev->dev, sizeof(*stdev), GFP_KERNEL);
if (!stdev) {
dev_err(&pdev->dev, "kzalloc fail\n");
@@ -144,10 +143,10 @@ static int spear_thermal_probe(struct platform_device *pdev)
goto put_clk;
}
- stdev->flags = pdata->thermal_flags;
+ stdev->flags = val;
writel_relaxed(stdev->flags, stdev->thermal_base);
- spear_thermal = thermal_zone_device_register("spear_thermal", 0,
+ spear_thermal = thermal_zone_device_register("spear_thermal", 0, 0,
stdev, &ops, 0, 0, 0, 0);
if (IS_ERR(spear_thermal)) {
dev_err(&pdev->dev, "thermal zone device is NULL\n");
@@ -189,6 +188,12 @@ static int spear_thermal_exit(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id spear_thermal_id_table[] = {
+ { .compatible = "st,thermal-spear1340" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spear_thermal_id_table);
+
static struct platform_driver spear_thermal_driver = {
.probe = spear_thermal_probe,
.remove = spear_thermal_exit,
@@ -196,6 +201,7 @@ static struct platform_driver spear_thermal_driver = {
.name = "spear_thermal",
.owner = THIS_MODULE,
.pm = &spear_thermal_pm_ops,
+ .of_match_table = of_match_ptr(spear_thermal_id_table),
},
};
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 022bacb71a7e..2ab31e4f02cc 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -196,6 +196,28 @@ trip_point_type_show(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+trip_point_temp_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int trip, ret;
+ unsigned long temperature;
+
+ if (!tz->ops->set_trip_temp)
+ return -EPERM;
+
+ if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
+ return -EINVAL;
+
+ if (kstrtoul(buf, 10, &temperature))
+ return -EINVAL;
+
+ ret = tz->ops->set_trip_temp(tz, trip, temperature);
+
+ return ret ? ret : count;
+}
+
+static ssize_t
trip_point_temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -218,6 +240,52 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int trip, ret;
+ unsigned long temperature;
+
+ if (!tz->ops->set_trip_hyst)
+ return -EPERM;
+
+ if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip))
+ return -EINVAL;
+
+ if (kstrtoul(buf, 10, &temperature))
+ return -EINVAL;
+
+ /*
+ * We are not doing any check on the 'temperature' value
+ * here. The driver implementing 'set_trip_hyst' has to
+ * take care of this.
+ */
+ ret = tz->ops->set_trip_hyst(tz, trip, temperature);
+
+ return ret ? ret : count;
+}
+
+static ssize_t
+trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int trip, ret;
+ unsigned long temperature;
+
+ if (!tz->ops->get_trip_hyst)
+ return -EPERM;
+
+ if (!sscanf(attr->attr.name, "trip_point_%d_hyst", &trip))
+ return -EINVAL;
+
+ ret = tz->ops->get_trip_hyst(tz, trip, &temperature);
+
+ return ret ? ret : sprintf(buf, "%ld\n", temperature);
+}
+
+static ssize_t
passive_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -283,33 +351,6 @@ static DEVICE_ATTR(temp, 0444, temp_show, NULL);
static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
static DEVICE_ATTR(passive, S_IRUGO | S_IWUSR, passive_show, passive_store);
-static struct device_attribute trip_point_attrs[] = {
- __ATTR(trip_point_0_type, 0444, trip_point_type_show, NULL),
- __ATTR(trip_point_0_temp, 0444, trip_point_temp_show, NULL),
- __ATTR(trip_point_1_type, 0444, trip_point_type_show, NULL),
- __ATTR(trip_point_1_temp, 0444, trip_point_temp_show, NULL),
- __ATTR(trip_point_2_type, 0444, trip_point_type_show, NULL),
- __ATTR(trip_point_2_temp, 0444, trip_point_temp_show, NULL),
- __ATTR(trip_point_3_type, 0444, trip_point_type_show, NULL),
- __ATTR(trip_point_3_temp, 0444, trip_point_temp_show, NULL),
- __ATTR(trip_point_4_type, 0444, trip_point_type_show, NULL),
- __ATTR(trip_point_4_temp, 0444, trip_point_temp_show, NULL),
- __ATTR(trip_point_5_type, 0444, trip_point_type_show, NULL),
- __ATTR(trip_point_5_temp, 0444, trip_point_temp_show, NULL),
- __ATTR(trip_point_6_type, 0444, trip_point_type_show, NULL),
- __ATTR(trip_point_6_temp, 0444, trip_point_temp_show, NULL),
- __ATTR(trip_point_7_type, 0444, trip_point_type_show, NULL),
- __ATTR(trip_point_7_temp, 0444, trip_point_temp_show, NULL),
- __ATTR(trip_point_8_type, 0444, trip_point_type_show, NULL),
- __ATTR(trip_point_8_temp, 0444, trip_point_temp_show, NULL),
- __ATTR(trip_point_9_type, 0444, trip_point_type_show, NULL),
- __ATTR(trip_point_9_temp, 0444, trip_point_temp_show, NULL),
- __ATTR(trip_point_10_type, 0444, trip_point_type_show, NULL),
- __ATTR(trip_point_10_temp, 0444, trip_point_temp_show, NULL),
- __ATTR(trip_point_11_type, 0444, trip_point_type_show, NULL),
- __ATTR(trip_point_11_temp, 0444, trip_point_temp_show, NULL),
-};
-
/* sys I/F for cooling device */
#define to_cooling_device(_dev) \
container_of(_dev, struct thermal_cooling_device, device)
@@ -1089,9 +1130,113 @@ leave:
EXPORT_SYMBOL(thermal_zone_device_update);
/**
+ * create_trip_attrs - create attributes for trip points
+ * @tz: the thermal zone device
+ * @mask: Writeable trip point bitmap.
+ */
+static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
+{
+ int indx;
+ int size = sizeof(struct thermal_attr) * tz->trips;
+
+ tz->trip_type_attrs = kzalloc(size, GFP_KERNEL);
+ if (!tz->trip_type_attrs)
+ return -ENOMEM;
+
+ tz->trip_temp_attrs = kzalloc(size, GFP_KERNEL);
+ if (!tz->trip_temp_attrs) {
+ kfree(tz->trip_type_attrs);
+ return -ENOMEM;
+ }
+
+ if (tz->ops->get_trip_hyst) {
+ tz->trip_hyst_attrs = kzalloc(size, GFP_KERNEL);
+ if (!tz->trip_hyst_attrs) {
+ kfree(tz->trip_type_attrs);
+ kfree(tz->trip_temp_attrs);
+ return -ENOMEM;
+ }
+ }
+
+
+ for (indx = 0; indx < tz->trips; indx++) {
+ /* create trip type attribute */
+ snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH,
+ "trip_point_%d_type", indx);
+
+ sysfs_attr_init(&tz->trip_type_attrs[indx].attr.attr);
+ tz->trip_type_attrs[indx].attr.attr.name =
+ tz->trip_type_attrs[indx].name;
+ tz->trip_type_attrs[indx].attr.attr.mode = S_IRUGO;
+ tz->trip_type_attrs[indx].attr.show = trip_point_type_show;
+
+ device_create_file(&tz->device,
+ &tz->trip_type_attrs[indx].attr);
+
+ /* create trip temp attribute */
+ snprintf(tz->trip_temp_attrs[indx].name, THERMAL_NAME_LENGTH,
+ "trip_point_%d_temp", indx);
+
+ sysfs_attr_init(&tz->trip_temp_attrs[indx].attr.attr);
+ tz->trip_temp_attrs[indx].attr.attr.name =
+ tz->trip_temp_attrs[indx].name;
+ tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO;
+ tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show;
+ if (mask & (1 << indx)) {
+ tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR;
+ tz->trip_temp_attrs[indx].attr.store =
+ trip_point_temp_store;
+ }
+
+ device_create_file(&tz->device,
+ &tz->trip_temp_attrs[indx].attr);
+
+ /* create Optional trip hyst attribute */
+ if (!tz->ops->get_trip_hyst)
+ continue;
+ snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH,
+ "trip_point_%d_hyst", indx);
+
+ sysfs_attr_init(&tz->trip_hyst_attrs[indx].attr.attr);
+ tz->trip_hyst_attrs[indx].attr.attr.name =
+ tz->trip_hyst_attrs[indx].name;
+ tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO;
+ tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show;
+ if (tz->ops->set_trip_hyst) {
+ tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR;
+ tz->trip_hyst_attrs[indx].attr.store =
+ trip_point_hyst_store;
+ }
+
+ device_create_file(&tz->device,
+ &tz->trip_hyst_attrs[indx].attr);
+ }
+ return 0;
+}
+
+static void remove_trip_attrs(struct thermal_zone_device *tz)
+{
+ int indx;
+
+ for (indx = 0; indx < tz->trips; indx++) {
+ device_remove_file(&tz->device,
+ &tz->trip_type_attrs[indx].attr);
+ device_remove_file(&tz->device,
+ &tz->trip_temp_attrs[indx].attr);
+ if (tz->ops->get_trip_hyst)
+ device_remove_file(&tz->device,
+ &tz->trip_hyst_attrs[indx].attr);
+ }
+ kfree(tz->trip_type_attrs);
+ kfree(tz->trip_temp_attrs);
+ kfree(tz->trip_hyst_attrs);
+}
+
+/**
* thermal_zone_device_register - register a new thermal zone device
* @type: the thermal zone device type
* @trips: the number of trip points the thermal zone support
+ * @mask: a bit string indicating the writeablility of trip points
* @devdata: private device data
* @ops: standard thermal zone device callbacks
* @tc1: thermal coefficient 1 for passive calculations
@@ -1106,8 +1251,8 @@ EXPORT_SYMBOL(thermal_zone_device_update);
* longer needed. The passive cooling formula uses tc1 and tc2 as described in
* section 11.1.5.1 of the ACPI specification 3.0.
*/
-struct thermal_zone_device *thermal_zone_device_register(char *type,
- int trips, void *devdata,
+struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ int trips, int mask, void *devdata,
const struct thermal_zone_device_ops *ops,
int tc1, int tc2, int passive_delay, int polling_delay)
{
@@ -1121,7 +1266,7 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
if (strlen(type) >= THERMAL_NAME_LENGTH)
return ERR_PTR(-EINVAL);
- if (trips > THERMAL_MAX_TRIPS || trips < 0)
+ if (trips > THERMAL_MAX_TRIPS || trips < 0 || mask >> trips)
return ERR_PTR(-EINVAL);
if (!ops || !ops->get_temp)
@@ -1175,15 +1320,11 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
goto unregister;
}
+ result = create_trip_attrs(tz, mask);
+ if (result)
+ goto unregister;
+
for (count = 0; count < trips; count++) {
- result = device_create_file(&tz->device,
- &trip_point_attrs[count * 2]);
- if (result)
- break;
- result = device_create_file(&tz->device,
- &trip_point_attrs[count * 2 + 1]);
- if (result)
- goto unregister;
tz->ops->get_trip_type(tz, count, &trip_type);
if (trip_type == THERMAL_TRIP_PASSIVE)
passive = 1;
@@ -1232,7 +1373,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
{
struct thermal_cooling_device *cdev;
struct thermal_zone_device *pos = NULL;
- int count;
if (!tz)
return;
@@ -1259,13 +1399,8 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
device_remove_file(&tz->device, &dev_attr_temp);
if (tz->ops->get_mode)
device_remove_file(&tz->device, &dev_attr_mode);
+ remove_trip_attrs(tz);
- for (count = 0; count < tz->trips; count++) {
- device_remove_file(&tz->device,
- &trip_point_attrs[count * 2]);
- device_remove_file(&tz->device,
- &trip_point_attrs[count * 2 + 1]);
- }
thermal_remove_hwmon_sysfs(tz);
release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
idr_destroy(&tz->idr);
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 6e1958a325bd..8123f784bcda 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -282,6 +282,14 @@ static const struct serial8250_config uart_config[] = {
.fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
.flags = UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR,
},
+ [PORT_LPC3220] = {
+ .name = "LPC3220",
+ .fifo_size = 64,
+ .tx_loadsz = 32,
+ .fcr = UART_FCR_DMA_SELECT | UART_FCR_ENABLE_FIFO |
+ UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
+ .flags = UART_CAP_FIFO,
+ },
};
/* Uart divisor latch read */
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 070b442c1f81..4720b4ba096a 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -160,10 +160,12 @@ config SERIAL_KS8695_CONSOLE
config SERIAL_CLPS711X
tristate "CLPS711X serial port support"
- depends on ARM && ARCH_CLPS711X
+ depends on ARCH_CLPS711X
select SERIAL_CORE
+ default y
help
- ::: To be written :::
+ This enables the driver for the on-chip UARTs of the Cirrus
+ Logic EP711x/EP721x/EP731x processors.
config SERIAL_CLPS711X_CONSOLE
bool "Support for console on CLPS711X serial port"
@@ -173,9 +175,7 @@ config SERIAL_CLPS711X_CONSOLE
Even if you say Y here, the currently visible virtual console
(/dev/tty0) will still be used as the system console by default, but
you can alter that using a kernel command line option such as
- "console=ttyCL1". (Try "man bootparam" or see the documentation of
- your boot loader (lilo or loadlin) about how to pass options to the
- kernel at boot time.)
+ "console=ttyCL1".
config SERIAL_SAMSUNG
tristate "Samsung SoC serial support"
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index c17923ec6e95..d3553b5d3fca 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -53,9 +53,9 @@
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/sizes.h>
#include <asm/io.h>
-#include <asm/sizes.h>
#define UART_NR 14
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 144cd3987d4c..3ad079ffd049 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1331,7 +1331,7 @@ static const struct spi_device_id ifx_id_table[] = {
MODULE_DEVICE_TABLE(spi, ifx_id_table);
/* spi operations */
-static const struct spi_driver ifx_spi_driver = {
+static struct spi_driver ifx_spi_driver = {
.driver = {
.name = DRVNAME,
.pm = &ifx_spi_pm,
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 2e341b81ff89..3a667eed63d6 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -73,6 +73,7 @@
#define AUART_CTRL0_CLKGATE (1 << 30)
#define AUART_CTRL2_CTSEN (1 << 15)
+#define AUART_CTRL2_RTSEN (1 << 14)
#define AUART_CTRL2_RTS (1 << 11)
#define AUART_CTRL2_RXE (1 << 9)
#define AUART_CTRL2_TXE (1 << 8)
@@ -259,9 +260,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
u32 ctrl = readl(u->membase + AUART_CTRL2);
- ctrl &= ~AUART_CTRL2_RTS;
- if (mctrl & TIOCM_RTS)
- ctrl |= AUART_CTRL2_RTS;
+ ctrl &= ~AUART_CTRL2_RTSEN;
+ if (mctrl & TIOCM_RTS) {
+ if (u->state->port.flags & ASYNC_CTS_FLOW)
+ ctrl |= AUART_CTRL2_RTSEN;
+ }
+
s->ctrl = mctrl;
writel(ctrl, u->membase + AUART_CTRL2);
}
@@ -359,9 +363,9 @@ static void mxs_auart_settermios(struct uart_port *u,
/* figure out the hardware flow control settings */
if (cflag & CRTSCTS)
- ctrl2 |= AUART_CTRL2_CTSEN;
+ ctrl2 |= AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN;
else
- ctrl2 &= ~AUART_CTRL2_CTSEN;
+ ctrl2 &= ~(AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN);
/* set baud rate */
baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk);
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 5410c0637266..34e71874a892 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -208,6 +208,7 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
{ .compatible = "ns16750", .data = (void *)PORT_16750, },
{ .compatible = "ns16850", .data = (void *)PORT_16850, },
{ .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, },
+ { .compatible = "nxp,lpc3220-uart", .data = (void *)PORT_LPC3220, },
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
{ .compatible = "ibm,qpace-nwp-serial",
.data = (void *)PORT_NWPSERIAL, },
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 4fdec6a6b758..558ce8509a9a 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -253,6 +253,9 @@ struct eg20t_port {
dma_addr_t rx_buf_dma;
struct dentry *debugfs;
+
+ /* protect the eg20t_port private structure and io access to membase */
+ spinlock_t lock;
};
/**
@@ -754,7 +757,8 @@ static void pch_dma_rx_complete(void *arg)
tty_flip_buffer_push(tty);
tty_kref_put(tty);
async_tx_ack(priv->desc_rx);
- pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
+ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
+ PCH_UART_HAL_RX_ERR_INT);
}
static void pch_dma_tx_complete(void *arg)
@@ -809,7 +813,8 @@ static int handle_rx_to(struct eg20t_port *priv)
int rx_size;
int ret;
if (!priv->start_rx) {
- pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
+ PCH_UART_HAL_RX_ERR_INT);
return 0;
}
buf = &priv->rxbuf;
@@ -1058,7 +1063,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
int next = 1;
u8 msr;
- spin_lock_irqsave(&priv->port.lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
handled = 0;
while (next) {
iid = pch_uart_hal_get_iid(priv);
@@ -1078,11 +1083,13 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
case PCH_UART_IID_RDR: /* Received Data Ready */
if (priv->use_dma) {
pch_uart_hal_disable_interrupt(priv,
- PCH_UART_HAL_RX_INT);
+ PCH_UART_HAL_RX_INT |
+ PCH_UART_HAL_RX_ERR_INT);
ret = dma_handle_rx(priv);
if (!ret)
pch_uart_hal_enable_interrupt(priv,
- PCH_UART_HAL_RX_INT);
+ PCH_UART_HAL_RX_INT |
+ PCH_UART_HAL_RX_ERR_INT);
} else {
ret = handle_rx(priv);
}
@@ -1116,7 +1123,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
handled |= (unsigned int)ret;
}
- spin_unlock_irqrestore(&priv->port.lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
return IRQ_RETVAL(handled);
}
@@ -1208,7 +1215,8 @@ static void pch_uart_stop_rx(struct uart_port *port)
struct eg20t_port *priv;
priv = container_of(port, struct eg20t_port, port);
priv->start_rx = 0;
- pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT);
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
+ PCH_UART_HAL_RX_ERR_INT);
}
/* Enable the modem status interrupts. */
@@ -1226,9 +1234,9 @@ static void pch_uart_break_ctl(struct uart_port *port, int ctl)
unsigned long flags;
priv = container_of(port, struct eg20t_port, port);
- spin_lock_irqsave(&port->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
pch_uart_hal_set_break(priv, ctl);
- spin_unlock_irqrestore(&port->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
/* Grab any interrupt resources and initialise any low level driver state. */
@@ -1263,6 +1271,7 @@ static int pch_uart_startup(struct uart_port *port)
break;
case 16:
fifo_size = PCH_UART_HAL_FIFO16;
+ break;
case 1:
default:
fifo_size = PCH_UART_HAL_FIFO_DIS;
@@ -1300,7 +1309,8 @@ static int pch_uart_startup(struct uart_port *port)
pch_request_dma(port);
priv->start_rx = 1;
- pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT);
+ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_RX_INT |
+ PCH_UART_HAL_RX_ERR_INT);
uart_update_timeout(port, CS8, default_baud);
return 0;
@@ -1358,7 +1368,7 @@ static void pch_uart_set_termios(struct uart_port *port,
stb = PCH_UART_HAL_STB1;
if (termios->c_cflag & PARENB) {
- if (!(termios->c_cflag & PARODD))
+ if (termios->c_cflag & PARODD)
parity = PCH_UART_HAL_PARITY_ODD;
else
parity = PCH_UART_HAL_PARITY_EVEN;
@@ -1376,7 +1386,8 @@ static void pch_uart_set_termios(struct uart_port *port,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
- spin_lock_irqsave(&port->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
+ spin_lock(&port->lock);
uart_update_timeout(port, termios->c_cflag, baud);
rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
@@ -1389,7 +1400,8 @@ static void pch_uart_set_termios(struct uart_port *port,
tty_termios_encode_baud_rate(termios, baud, baud);
out:
- spin_unlock_irqrestore(&port->lock, flags);
+ spin_unlock(&port->lock);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
static const char *pch_uart_type(struct uart_port *port)
@@ -1538,8 +1550,9 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
{
struct eg20t_port *priv;
unsigned long flags;
+ int priv_locked = 1;
+ int port_locked = 1;
u8 ier;
- int locked = 1;
priv = pch_uart_ports[co->index];
@@ -1547,12 +1560,16 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
local_irq_save(flags);
if (priv->port.sysrq) {
- /* serial8250_handle_port() already took the lock */
- locked = 0;
+ spin_lock(&priv->lock);
+ /* serial8250_handle_port() already took the port lock */
+ port_locked = 0;
} else if (oops_in_progress) {
- locked = spin_trylock(&priv->port.lock);
- } else
+ priv_locked = spin_trylock(&priv->lock);
+ port_locked = spin_trylock(&priv->port.lock);
+ } else {
+ spin_lock(&priv->lock);
spin_lock(&priv->port.lock);
+ }
/*
* First save the IER then disable the interrupts
@@ -1570,8 +1587,10 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
wait_for_xmitr(priv, BOTH_EMPTY);
iowrite8(ier, priv->membase + UART_IER);
- if (locked)
+ if (port_locked)
spin_unlock(&priv->port.lock);
+ if (priv_locked)
+ spin_unlock(&priv->lock);
local_irq_restore(flags);
}
@@ -1669,6 +1688,8 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
pci_enable_msi(pdev);
pci_set_master(pdev);
+ spin_lock_init(&priv->lock);
+
iobase = pci_resource_start(pdev, 0);
mapbase = pci_resource_start(pdev, 1);
priv->mapbase = mapbase;
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 654755a990df..333c8d012b0e 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1348,10 +1348,16 @@ static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser)
static int pmz_poll_get_char(struct uart_port *port)
{
struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
+ int tries = 2;
- while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0)
- udelay(5);
- return read_zsdata(uap);
+ while (tries) {
+ if ((read_zsreg(uap, R0) & Rx_CH_AV) != 0)
+ return read_zsdata(uap);
+ if (tries--)
+ udelay(5);
+ }
+
+ return NO_POLL_CHAR;
}
static void pmz_poll_put_char(struct uart_port *port, unsigned char c)
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index d8b0aee35632..02d07bfcfa8a 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1014,10 +1014,10 @@ static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb,
* a disturbance in the clock-rate over the change.
*/
- if (IS_ERR(port->clk))
+ if (IS_ERR(port->baudclk))
goto exit;
- if (port->baudclk_rate == clk_get_rate(port->clk))
+ if (port->baudclk_rate == clk_get_rate(port->baudclk))
goto exit;
if (val == CPUFREQ_PRECHANGE) {
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 246b823c1b27..a21dc8e3b7c0 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2527,14 +2527,16 @@ void uart_insert_char(struct uart_port *port, unsigned int status,
struct tty_struct *tty = port->state->port.tty;
if ((status & port->ignore_status_mask & ~overrun) == 0)
- tty_insert_flip_char(tty, ch, flag);
+ if (tty_insert_flip_char(tty, ch, flag) == 0)
+ ++port->icount.buf_overrun;
/*
* Overrun is special. Since it's reported immediately,
* it doesn't affect the current character.
*/
if (status & ~port->ignore_status_mask & overrun)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ if (tty_insert_flip_char(tty, 0, TTY_OVERRUN) == 0)
+ ++port->icount.buf_overrun;
}
EXPORT_SYMBOL_GPL(uart_insert_char);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index d4d8c9453cd8..9be296cf7295 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/sh_dma.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
@@ -1410,8 +1411,8 @@ static void work_fn_rx(struct work_struct *work)
/* Handle incomplete DMA receive */
struct tty_struct *tty = port->state->port.tty;
struct dma_chan *chan = s->chan_rx;
- struct sh_desc *sh_desc = container_of(desc, struct sh_desc,
- async_tx);
+ struct shdma_desc *sh_desc = container_of(desc,
+ struct shdma_desc, async_tx);
unsigned long flags;
int count;
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 6cd414341d5e..6579ffdd8e9b 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -216,8 +216,7 @@ static int ulite_startup(struct uart_port *port)
{
int ret;
- ret = request_irq(port->irq, ulite_isr,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM, "uartlite", port);
+ ret = request_irq(port->irq, ulite_isr, IRQF_SHARED, "uartlite", port);
if (ret)
return ret;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 9911eb6b34cd..6f99c9959f0c 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -659,7 +659,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
goto enable;
}
- if (test_bit(TTY_HUPPED, &tty->flags)) {
+ if (test_bit(TTY_HUPPING, &tty->flags)) {
/* We were raced by the hangup method. It will have stomped
the ldisc data and closed the ldisc down */
clear_bit(TTY_LDISC_CHANGING, &tty->flags);
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 64618547be11..b841f56d2e66 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -110,16 +110,7 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new)
wake_up_interruptible(&vt_event_waitqueue);
}
-/**
- * vt_event_wait - wait for an event
- * @vw: our event
- *
- * Waits for an event to occur which completes our vt_event_wait
- * structure. On return the structure has wv->done set to 1 for success
- * or 0 if some event such as a signal ended the wait.
- */
-
-static void vt_event_wait(struct vt_event_wait *vw)
+static void __vt_event_queue(struct vt_event_wait *vw)
{
unsigned long flags;
/* Prepare the event */
@@ -129,8 +120,18 @@ static void vt_event_wait(struct vt_event_wait *vw)
spin_lock_irqsave(&vt_event_lock, flags);
list_add(&vw->list, &vt_events);
spin_unlock_irqrestore(&vt_event_lock, flags);
+}
+
+static void __vt_event_wait(struct vt_event_wait *vw)
+{
/* Wait for it to pass */
wait_event_interruptible(vt_event_waitqueue, vw->done);
+}
+
+static void __vt_event_dequeue(struct vt_event_wait *vw)
+{
+ unsigned long flags;
+
/* Dequeue it */
spin_lock_irqsave(&vt_event_lock, flags);
list_del(&vw->list);
@@ -138,6 +139,22 @@ static void vt_event_wait(struct vt_event_wait *vw)
}
/**
+ * vt_event_wait - wait for an event
+ * @vw: our event
+ *
+ * Waits for an event to occur which completes our vt_event_wait
+ * structure. On return the structure has wv->done set to 1 for success
+ * or 0 if some event such as a signal ended the wait.
+ */
+
+static void vt_event_wait(struct vt_event_wait *vw)
+{
+ __vt_event_queue(vw);
+ __vt_event_wait(vw);
+ __vt_event_dequeue(vw);
+}
+
+/**
* vt_event_wait_ioctl - event ioctl handler
* @arg: argument to ioctl
*
@@ -177,10 +194,14 @@ int vt_waitactive(int n)
{
struct vt_event_wait vw;
do {
- if (n == fg_console + 1)
- break;
vw.event.event = VT_EVENT_SWITCH;
- vt_event_wait(&vw);
+ __vt_event_queue(&vw);
+ if (n == fg_console + 1) {
+ __vt_event_dequeue(&vw);
+ break;
+ }
+ __vt_event_wait(&vw);
+ __vt_event_dequeue(&vw);
if (vw.done == 0)
return -EINTR;
} while (vw.event.newev != n);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index a7773a3e02b1..7065df6036ca 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -13,7 +13,7 @@ config USB_ARCH_HAS_OHCI
default y if PXA3xx
default y if ARCH_EP93XX
default y if ARCH_AT91
- default y if ARCH_PNX4008 && I2C
+ default y if ARCH_PNX4008
default y if MFD_TC6393XB
default y if ARCH_W90X900
default y if ARCH_DAVINCI_DA8XX
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 8337fb5d988d..47e499c9c0b6 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -1,9 +1,9 @@
config USB_CHIPIDEA
tristate "ChipIdea Highspeed Dual Role Controller"
- depends on USB
+ depends on USB || USB_GADGET
help
- Say Y here if your system has a dual role high speed USB
- controller based on ChipIdea silicon IP. Currently, only the
+ Say Y here if your system has a dual role high speed USB
+ controller based on ChipIdea silicon IP. Currently, only the
peripheral mode is supported.
When compiled dynamically, the module will be called ci-hdrc.ko.
@@ -12,7 +12,7 @@ if USB_CHIPIDEA
config USB_CHIPIDEA_UDC
bool "ChipIdea device controller"
- depends on USB_GADGET
+ depends on USB_GADGET=y || USB_GADGET=USB_CHIPIDEA
select USB_GADGET_DUALSPEED
help
Say Y here to enable device controller functionality of the
@@ -20,6 +20,7 @@ config USB_CHIPIDEA_UDC
config USB_CHIPIDEA_HOST
bool "ChipIdea host controller"
+ depends on USB=y || USB=USB_CHIPIDEA
select USB_EHCI_ROOT_HUB_TT
help
Say Y here to enable host controller functionality of the
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 56d6bf668488..f763ed7ba91e 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1104,7 +1104,8 @@ skip_normal_probe:
}
- if (data_interface->cur_altsetting->desc.bNumEndpoints < 2)
+ if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
+ control_interface->cur_altsetting->desc.bNumEndpoints == 0)
return -EINVAL;
epctrl = &control_interface->cur_altsetting->endpoint[0].desc;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 821126eb8176..128a804c42f4 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -25,6 +25,7 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -2181,6 +2182,14 @@ int usb_new_device(struct usb_device *udev)
/* Tell the world! */
announce_device(udev);
+ if (udev->serial)
+ add_device_randomness(udev->serial, strlen(udev->serial));
+ if (udev->product)
+ add_device_randomness(udev->product, strlen(udev->product));
+ if (udev->manufacturer)
+ add_device_randomness(udev->manufacturer,
+ strlen(udev->manufacturer));
+
device_enable_async_suspend(&udev->dev);
/*
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index ee0ebacf8227..89dcf155d57e 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
writel(FLAG_CF, &ehci_regs->configured_flag);
/* Wait until the controller is no longer halted */
- loop = 10;
+ loop = 1000;
do {
status = readl(&ehci_regs->status);
if (!(status & STS_HALT))
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 965a6293206a..8ee9268fe253 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -301,7 +301,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
struct page *page;
int err;
- page = alloc_page(gfp_flags);
+ page = __skb_alloc_page(gfp_flags | __GFP_NOMEMALLOC, NULL);
if (!page)
return -ENOMEM;
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 3d28fb976c78..9fd7886cfa9a 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1836,7 +1836,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* init to known state, then setup irqs */
udc_reset(dev);
udc_reinit (dev);
- if (request_irq(pdev->irq, goku_irq, IRQF_SHARED/*|IRQF_SAMPLE_RANDOM*/,
+ if (request_irq(pdev->irq, goku_irq, IRQF_SHARED,
driver_name, dev) != 0) {
DBG(dev, "request interrupt %d failed\n", pdev->irq);
retval = -EBUSY;
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 8981fbb5748c..cf6bd626f3fe 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1583,12 +1583,10 @@ static int __exit m66592_remove(struct platform_device *pdev)
iounmap(m66592->reg);
free_irq(platform_get_irq(pdev, 0), m66592);
m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
-#ifdef CONFIG_HAVE_CLK
if (m66592->pdata->on_chip) {
clk_disable(m66592->clk);
clk_put(m66592->clk);
}
-#endif
kfree(m66592);
return 0;
}
@@ -1602,9 +1600,7 @@ static int __init m66592_probe(struct platform_device *pdev)
struct resource *res, *ires;
void __iomem *reg = NULL;
struct m66592 *m66592 = NULL;
-#ifdef CONFIG_HAVE_CLK
char clk_name[8];
-#endif
int ret = 0;
int i;
@@ -1671,7 +1667,6 @@ static int __init m66592_probe(struct platform_device *pdev)
goto clean_up;
}
-#ifdef CONFIG_HAVE_CLK
if (m66592->pdata->on_chip) {
snprintf(clk_name, sizeof(clk_name), "usbf%d", pdev->id);
m66592->clk = clk_get(&pdev->dev, clk_name);
@@ -1683,7 +1678,7 @@ static int __init m66592_probe(struct platform_device *pdev)
}
clk_enable(m66592->clk);
}
-#endif
+
INIT_LIST_HEAD(&m66592->gadget.ep_list);
m66592->gadget.ep0 = &m66592->ep[0].ep;
INIT_LIST_HEAD(&m66592->gadget.ep0->ep_list);
@@ -1731,13 +1726,11 @@ err_add_udc:
m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req);
clean_up3:
-#ifdef CONFIG_HAVE_CLK
if (m66592->pdata->on_chip) {
clk_disable(m66592->clk);
clk_put(m66592->clk);
}
clean_up2:
-#endif
free_irq(ires->start, m66592);
clean_up:
if (m66592) {
diff --git a/drivers/usb/gadget/m66592-udc.h b/drivers/usb/gadget/m66592-udc.h
index 88c85b4116a2..16c7e14678b8 100644
--- a/drivers/usb/gadget/m66592-udc.h
+++ b/drivers/usb/gadget/m66592-udc.h
@@ -13,10 +13,7 @@
#ifndef __M66592_UDC_H__
#define __M66592_UDC_H__
-#ifdef CONFIG_HAVE_CLK
#include <linux/clk.h>
-#endif
-
#include <linux/usb/m66592.h>
#define M66592_SYSCFG 0x00
@@ -468,9 +465,7 @@ struct m66592_ep {
struct m66592 {
spinlock_t lock;
void __iomem *reg;
-#ifdef CONFIG_HAVE_CLK
struct clk *clk;
-#endif
struct m66592_platdata *pdata;
unsigned long irq_trigger;
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 53c093b941e5..907ad3ecb341 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2201,19 +2201,15 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_LUBBOCK
if (machine_is_lubbock()) {
- retval = request_irq(LUBBOCK_USB_DISC_IRQ,
- lubbock_vbus_irq,
- IRQF_SAMPLE_RANDOM,
- driver_name, dev);
+ retval = request_irq(LUBBOCK_USB_DISC_IRQ, lubbock_vbus_irq,
+ 0, driver_name, dev);
if (retval != 0) {
pr_err("%s: can't get irq %i, err %d\n",
driver_name, LUBBOCK_USB_DISC_IRQ, retval);
goto err_irq_lub;
}
- retval = request_irq(LUBBOCK_USB_IRQ,
- lubbock_vbus_irq,
- IRQF_SAMPLE_RANDOM,
- driver_name, dev);
+ retval = request_irq(LUBBOCK_USB_IRQ, lubbock_vbus_irq,
+ 0, driver_name, dev);
if (retval != 0) {
pr_err("%s: can't get irq %i, err %d\n",
driver_name, LUBBOCK_USB_IRQ, retval);
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index f3ac2a20c27c..5a80751accb7 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1831,12 +1831,12 @@ static int __exit r8a66597_remove(struct platform_device *pdev)
iounmap(r8a66597->sudmac_reg);
free_irq(platform_get_irq(pdev, 0), r8a66597);
r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
-#ifdef CONFIG_HAVE_CLK
+
if (r8a66597->pdata->on_chip) {
clk_disable(r8a66597->clk);
clk_put(r8a66597->clk);
}
-#endif
+
device_unregister(&r8a66597->gadget.dev);
kfree(r8a66597);
return 0;
@@ -1868,9 +1868,7 @@ static int __init r8a66597_sudmac_ioremap(struct r8a66597 *r8a66597,
static int __init r8a66597_probe(struct platform_device *pdev)
{
-#ifdef CONFIG_HAVE_CLK
char clk_name[8];
-#endif
struct resource *res, *ires;
int irq;
void __iomem *reg = NULL;
@@ -1934,7 +1932,6 @@ static int __init r8a66597_probe(struct platform_device *pdev)
r8a66597->timer.data = (unsigned long)r8a66597;
r8a66597->reg = reg;
-#ifdef CONFIG_HAVE_CLK
if (r8a66597->pdata->on_chip) {
snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
r8a66597->clk = clk_get(&pdev->dev, clk_name);
@@ -1946,7 +1943,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
}
clk_enable(r8a66597->clk);
}
-#endif
+
if (r8a66597->pdata->sudmac) {
ret = r8a66597_sudmac_ioremap(r8a66597, pdev);
if (ret < 0)
@@ -2006,13 +2003,11 @@ err_add_udc:
clean_up3:
free_irq(irq, r8a66597);
clean_up2:
-#ifdef CONFIG_HAVE_CLK
if (r8a66597->pdata->on_chip) {
clk_disable(r8a66597->clk);
clk_put(r8a66597->clk);
}
clean_up_dev:
-#endif
device_unregister(&r8a66597->gadget.dev);
clean_up:
if (r8a66597) {
diff --git a/drivers/usb/gadget/r8a66597-udc.h b/drivers/usb/gadget/r8a66597-udc.h
index 99908c76ccd1..45c4b2df1785 100644
--- a/drivers/usb/gadget/r8a66597-udc.h
+++ b/drivers/usb/gadget/r8a66597-udc.h
@@ -13,10 +13,7 @@
#ifndef __R8A66597_H__
#define __R8A66597_H__
-#ifdef CONFIG_HAVE_CLK
#include <linux/clk.h>
-#endif
-
#include <linux/usb/r8a66597.h>
#define R8A66597_MAX_SAMPLING 10
@@ -92,9 +89,7 @@ struct r8a66597 {
void __iomem *reg;
void __iomem *sudmac_reg;
-#ifdef CONFIG_HAVE_CLK
struct clk *clk;
-#endif
struct r8a66597_platdata *pdata;
struct usb_gadget gadget;
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index ae8b18869b8c..8d9bcd8207c8 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -656,9 +656,8 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
if (!(filp->f_mode & FMODE_WRITE))
ro = 1;
- if (filp->f_path.dentry)
- inode = filp->f_path.dentry->d_inode;
- if (!inode || (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
+ inode = filp->f_path.dentry->d_inode;
+ if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
LINFO(curlun, "invalid file type: %s\n", filename);
goto out;
}
@@ -667,7 +666,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
* If we can't read the file, it's no good.
* If we can't write the file, use it read-only.
*/
- if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
+ if (!(filp->f_op->read || filp->f_op->aio_read)) {
LINFO(curlun, "file not readable: %s\n", filename);
goto out;
}
@@ -712,7 +711,6 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
if (fsg_lun_is_open(curlun))
fsg_lun_close(curlun);
- get_file(filp);
curlun->blksize = blksize;
curlun->blkbits = blkbits;
curlun->ro = ro;
@@ -720,10 +718,10 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
curlun->file_length = size;
curlun->num_sectors = num_sectors;
LDBG(curlun, "open backing file: %s\n", filename);
- rc = 0;
+ return 0;
out:
- filp_close(filp, current->files);
+ fput(filp);
return rc;
}
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 90e82e288eb9..0e5230926154 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -669,6 +669,8 @@ static int eth_stop(struct net_device *net)
spin_lock_irqsave(&dev->lock, flags);
if (dev->port_usb) {
struct gether *link = dev->port_usb;
+ const struct usb_endpoint_descriptor *in;
+ const struct usb_endpoint_descriptor *out;
if (link->close)
link->close(link);
@@ -682,10 +684,14 @@ static int eth_stop(struct net_device *net)
* their own pace; the network stack can handle old packets.
* For the moment we leave this here, since it works.
*/
+ in = link->in_ep->desc;
+ out = link->out_ep->desc;
usb_ep_disable(link->in_ep);
usb_ep_disable(link->out_ep);
if (netif_carrier_ok(net)) {
DBG(dev, "host still using in/out endpoints\n");
+ link->in_ep->desc = in;
+ link->out_ep->desc = out;
usb_ep_enable(link->in_ep);
usb_ep_enable(link->out_ep);
}
diff --git a/drivers/usb/gadget/u_uac1.c b/drivers/usb/gadget/u_uac1.c
index af9898982059..e0c5e88e03ed 100644
--- a/drivers/usb/gadget/u_uac1.c
+++ b/drivers/usb/gadget/u_uac1.c
@@ -275,17 +275,17 @@ static int gaudio_close_snd_dev(struct gaudio *gau)
/* Close control device */
snd = &gau->control;
if (snd->filp)
- filp_close(snd->filp, current->files);
+ filp_close(snd->filp, NULL);
/* Close PCM playback device and setup substream */
snd = &gau->playback;
if (snd->filp)
- filp_close(snd->filp, current->files);
+ filp_close(snd->filp, NULL);
/* Close PCM capture device and setup substream */
snd = &gau->capture;
if (snd->filp)
- filp_close(snd->filp, current->files);
+ filp_close(snd->filp, NULL);
return 0;
}
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index ec21f4a4a056..d7fe287d0678 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -56,15 +56,6 @@
#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
-/* Errata i693 */
-static struct clk *utmi_p1_fck;
-static struct clk *utmi_p2_fck;
-static struct clk *xclk60mhsp1_ck;
-static struct clk *xclk60mhsp2_ck;
-static struct clk *usbhost_p1_fck;
-static struct clk *usbhost_p2_fck;
-static struct clk *init_60m_fclk;
-
/*-------------------------------------------------------------------------*/
static const struct hc_driver ehci_omap_hc_driver;
@@ -80,40 +71,6 @@ static inline u32 ehci_read(void __iomem *base, u32 reg)
return __raw_readl(base + reg);
}
-/* Erratum i693 workaround sequence */
-static void omap_ehci_erratum_i693(struct ehci_hcd *ehci)
-{
- int ret = 0;
-
- /* Switch to the internal 60 MHz clock */
- ret = clk_set_parent(utmi_p1_fck, init_60m_fclk);
- if (ret != 0)
- ehci_err(ehci, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
-
- ret = clk_set_parent(utmi_p2_fck, init_60m_fclk);
- if (ret != 0)
- ehci_err(ehci, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
-
- clk_enable(usbhost_p1_fck);
- clk_enable(usbhost_p2_fck);
-
- /* Wait 1ms and switch back to the external clock */
- mdelay(1);
- ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck);
- if (ret != 0)
- ehci_err(ehci, "xclk60mhsp1_ck set parent"
- "failed error:%d\n", ret);
-
- ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck);
- if (ret != 0)
- ehci_err(ehci, "xclk60mhsp2_ck set parent"
- "failed error:%d\n", ret);
-
- clk_disable(usbhost_p1_fck);
- clk_disable(usbhost_p2_fck);
-}
static void omap_ehci_soft_phy_reset(struct usb_hcd *hcd, u8 port)
{
@@ -152,14 +109,14 @@ static int omap_ehci_init(struct usb_hcd *hcd)
struct ehci_hcd_omap_platform_data *pdata;
pdata = hcd->self.controller->platform_data;
+
+ /* Hold PHYs in reset while initializing EHCI controller */
if (pdata->phy_reset) {
if (gpio_is_valid(pdata->reset_gpio_port[0]))
- gpio_request_one(pdata->reset_gpio_port[0],
- GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+ gpio_set_value_cansleep(pdata->reset_gpio_port[0], 0);
if (gpio_is_valid(pdata->reset_gpio_port[1]))
- gpio_request_one(pdata->reset_gpio_port[1],
- GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+ gpio_set_value_cansleep(pdata->reset_gpio_port[1], 0);
/* Hold the PHY in RESET for enough time till DIR is high */
udelay(10);
@@ -195,50 +152,6 @@ static int omap_ehci_init(struct usb_hcd *hcd)
return rc;
}
-static int omap_ehci_hub_control(
- struct usb_hcd *hcd,
- u16 typeReq,
- u16 wValue,
- u16 wIndex,
- char *buf,
- u16 wLength
-)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- u32 __iomem *status_reg = &ehci->regs->port_status[
- (wIndex & 0xff) - 1];
- u32 temp;
- unsigned long flags;
- int retval = 0;
-
- spin_lock_irqsave(&ehci->lock, flags);
-
- if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
- temp = ehci_readl(ehci, status_reg);
- if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
- retval = -EPIPE;
- goto done;
- }
-
- temp &= ~PORT_WKCONN_E;
- temp |= PORT_WKDISC_E | PORT_WKOC_E;
- ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
-
- omap_ehci_erratum_i693(ehci);
-
- set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
- goto done;
- }
-
- spin_unlock_irqrestore(&ehci->lock, flags);
-
- /* Handle the hub control events here */
- return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
-done:
- spin_unlock_irqrestore(&ehci->lock, flags);
- return retval;
-}
-
static void disable_put_regulator(
struct ehci_hcd_omap_platform_data *pdata)
{
@@ -351,79 +264,9 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
goto err_pm_runtime;
}
- /* get clocks */
- utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
- if (IS_ERR(utmi_p1_fck)) {
- ret = PTR_ERR(utmi_p1_fck);
- dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
- goto err_add_hcd;
- }
-
- xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
- if (IS_ERR(xclk60mhsp1_ck)) {
- ret = PTR_ERR(xclk60mhsp1_ck);
- dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
- goto err_utmi_p1_fck;
- }
-
- utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
- if (IS_ERR(utmi_p2_fck)) {
- ret = PTR_ERR(utmi_p2_fck);
- dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
- goto err_xclk60mhsp1_ck;
- }
-
- xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
- if (IS_ERR(xclk60mhsp2_ck)) {
- ret = PTR_ERR(xclk60mhsp2_ck);
- dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
- goto err_utmi_p2_fck;
- }
-
- usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
- if (IS_ERR(usbhost_p1_fck)) {
- ret = PTR_ERR(usbhost_p1_fck);
- dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
- goto err_xclk60mhsp2_ck;
- }
-
- usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
- if (IS_ERR(usbhost_p2_fck)) {
- ret = PTR_ERR(usbhost_p2_fck);
- dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
- goto err_usbhost_p1_fck;
- }
-
- init_60m_fclk = clk_get(dev, "init_60m_fclk");
- if (IS_ERR(init_60m_fclk)) {
- ret = PTR_ERR(init_60m_fclk);
- dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
- goto err_usbhost_p2_fck;
- }
return 0;
-err_usbhost_p2_fck:
- clk_put(usbhost_p2_fck);
-
-err_usbhost_p1_fck:
- clk_put(usbhost_p1_fck);
-
-err_xclk60mhsp2_ck:
- clk_put(xclk60mhsp2_ck);
-
-err_utmi_p2_fck:
- clk_put(utmi_p2_fck);
-
-err_xclk60mhsp1_ck:
- clk_put(xclk60mhsp1_ck);
-
-err_utmi_p1_fck:
- clk_put(utmi_p1_fck);
-
-err_add_hcd:
- usb_remove_hcd(hcd);
-
err_pm_runtime:
disable_put_regulator(pdata);
pm_runtime_put_sync(dev);
@@ -454,14 +297,6 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
iounmap(hcd->regs);
usb_put_hcd(hcd);
- clk_put(utmi_p1_fck);
- clk_put(utmi_p2_fck);
- clk_put(xclk60mhsp1_ck);
- clk_put(xclk60mhsp2_ck);
- clk_put(usbhost_p1_fck);
- clk_put(usbhost_p2_fck);
- clk_put(init_60m_fclk);
-
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -532,7 +367,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
- .hub_control = omap_ehci_hub_control,
+ .hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 9408da83eaf1..8892d3642cef 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -283,6 +283,10 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
err4:
usb_put_hcd(hcd);
err3:
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
iounmap(regs);
err2:
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
index 58c96bd50d22..0c9e43cfaff5 100644
--- a/drivers/usb/host/ehci-sead3.c
+++ b/drivers/usb/host/ehci-sead3.c
@@ -40,7 +40,7 @@ static int ehci_sead3_setup(struct usb_hcd *hcd)
ehci->need_io_watchdog = 0;
/* Set burst length to 16 words. */
- ehci_writel(ehci, 0x1010, &ehci->regs->reserved[1]);
+ ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]);
return ret;
}
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 950e95efa381..26dedb30ad0b 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -799,11 +799,12 @@ static int tegra_ehci_remove(struct platform_device *pdev)
#endif
usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
tegra_usb_phy_close(tegra->phy);
iounmap(hcd->regs);
+ usb_put_hcd(hcd);
+
clk_disable_unprepare(tegra->clk);
clk_put(tegra->clk);
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 2ed112d3e159..256326322cfd 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -543,12 +543,12 @@ static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
short_ok ? "" : "not_",
PTD_GET_COUNT(ptd), ep->maxpacket, len);
+ /* save the data underrun error code for later and
+ * proceed with the status stage
+ */
+ urb->actual_length += PTD_GET_COUNT(ptd);
if (usb_pipecontrol(urb->pipe)) {
ep->nextpid = USB_PID_ACK;
- /* save the data underrun error code for later and
- * proceed with the status stage
- */
- urb->actual_length += PTD_GET_COUNT(ptd);
BUG_ON(urb->actual_length > urb->transfer_buffer_length);
if (urb->status == -EINPROGRESS)
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index e7d75d295988..f8b2d91851f7 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -403,8 +403,6 @@ err0:
static inline void
usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev)
{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
-
usb_remove_hcd(hcd);
if (!IS_ERR_OR_NULL(hcd->phy)) {
(void) otg_set_host(hcd->phy->otg, 0);
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index df0828cb2aa3..c5e9e4a76f14 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -800,6 +800,13 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
}
EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
+{
+ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
+ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
+}
+EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
+
/**
* PCI Quirks for xHCI.
*
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index b1002a8ef96f..ef004a5de20f 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -10,6 +10,7 @@ void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
#else
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index c868be65e763..4c634eb56358 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -95,9 +95,7 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597)
int i = 0;
if (r8a66597->pdata->on_chip) {
-#ifdef CONFIG_HAVE_CLK
clk_enable(r8a66597->clk);
-#endif
do {
r8a66597_write(r8a66597, SCKE, SYSCFG0);
tmp = r8a66597_read(r8a66597, SYSCFG0);
@@ -141,9 +139,7 @@ static void r8a66597_clock_disable(struct r8a66597 *r8a66597)
udelay(1);
if (r8a66597->pdata->on_chip) {
-#ifdef CONFIG_HAVE_CLK
clk_disable(r8a66597->clk);
-#endif
} else {
r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
@@ -2406,19 +2402,15 @@ static int __devexit r8a66597_remove(struct platform_device *pdev)
del_timer_sync(&r8a66597->rh_timer);
usb_remove_hcd(hcd);
iounmap(r8a66597->reg);
-#ifdef CONFIG_HAVE_CLK
if (r8a66597->pdata->on_chip)
clk_put(r8a66597->clk);
-#endif
usb_put_hcd(hcd);
return 0;
}
static int __devinit r8a66597_probe(struct platform_device *pdev)
{
-#ifdef CONFIG_HAVE_CLK
char clk_name[8];
-#endif
struct resource *res = NULL, *ires;
int irq = -1;
void __iomem *reg = NULL;
@@ -2482,7 +2474,6 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
if (r8a66597->pdata->on_chip) {
-#ifdef CONFIG_HAVE_CLK
snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
r8a66597->clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(r8a66597->clk)) {
@@ -2491,7 +2482,6 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
ret = PTR_ERR(r8a66597->clk);
goto clean_up2;
}
-#endif
r8a66597->max_root_hub = 1;
} else
r8a66597->max_root_hub = 2;
@@ -2531,11 +2521,9 @@ static int __devinit r8a66597_probe(struct platform_device *pdev)
return 0;
clean_up3:
-#ifdef CONFIG_HAVE_CLK
if (r8a66597->pdata->on_chip)
clk_put(r8a66597->clk);
clean_up2:
-#endif
usb_put_hcd(hcd);
clean_up:
diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h
index f28782d20eef..672cea307abb 100644
--- a/drivers/usb/host/r8a66597.h
+++ b/drivers/usb/host/r8a66597.h
@@ -26,10 +26,7 @@
#ifndef __R8A66597_H__
#define __R8A66597_H__
-#ifdef CONFIG_HAVE_CLK
#include <linux/clk.h>
-#endif
-
#include <linux/usb/r8a66597.h>
#define R8A66597_MAX_NUM_PIPE 10
@@ -113,9 +110,7 @@ struct r8a66597_root_hub {
struct r8a66597 {
spinlock_t lock;
void __iomem *reg;
-#ifdef CONFIG_HAVE_CLK
struct clk *clk;
-#endif
struct r8a66597_platdata *pdata;
struct r8a66597_device device0;
struct r8a66597_root_hub root_hub[R8A66597_MAX_ROOT_HUB];
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 18b231b0c5d3..9bfd4ca1153c 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -94,11 +94,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
xhci->limit_active_eps = 64;
xhci->quirks |= XHCI_SW_BW_CHECKING;
+ /*
+ * PPT desktop boards DH77EB and DH77DF will power back on after
+ * a few seconds of being shutdown. The fix for this is to
+ * switch the ports from xHCI to EHCI on shutdown. We can't use
+ * DMI information to find those particular boards (since each
+ * vendor will change the board name), so we have to key off all
+ * PPT chipsets.
+ */
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 8275645889da..643c2f3f3e73 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -145,29 +145,37 @@ static void next_trb(struct xhci_hcd *xhci,
*/
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
- union xhci_trb *next;
unsigned long long addr;
ring->deq_updates++;
- /* If this is not event ring, there is one more usable TRB */
+ /*
+ * If this is not event ring, and the dequeue pointer
+ * is not on a link TRB, there is one more usable TRB
+ */
if (ring->type != TYPE_EVENT &&
!last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
ring->num_trbs_free++;
- next = ++(ring->dequeue);
- /* Update the dequeue pointer further if that was a link TRB or we're at
- * the end of an event ring segment (which doesn't have link TRBS)
- */
- while (last_trb(xhci, ring, ring->deq_seg, next)) {
- if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci,
- ring, ring->deq_seg, next)) {
- ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ do {
+ /*
+ * Update the dequeue pointer further if that was a link TRB or
+ * we're at the end of an event ring segment (which doesn't have
+ * link TRBS)
+ */
+ if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
+ if (ring->type == TYPE_EVENT &&
+ last_trb_on_last_seg(xhci, ring,
+ ring->deq_seg, ring->dequeue)) {
+ ring->cycle_state = (ring->cycle_state ? 0 : 1);
+ }
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ } else {
+ ring->dequeue++;
}
- ring->deq_seg = ring->deq_seg->next;
- ring->dequeue = ring->deq_seg->trbs;
- next = ring->dequeue;
- }
+ } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
+
addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
}
@@ -2073,8 +2081,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
trb_comp_code = COMP_SHORT_TX;
else
- xhci_warn(xhci, "WARN Successful completion on short TX: "
- "needs XHCI_TRUST_TX_LENGTH quirk?\n");
+ xhci_warn_ratelimited(xhci,
+ "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n");
case COMP_SHORT_TX:
break;
case COMP_STOP:
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7648b2d4b268..c59d5b5b6c7d 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -166,7 +166,7 @@ int xhci_reset(struct xhci_hcd *xhci)
xhci_writel(xhci, command, &xhci->op_regs->command);
ret = handshake(xhci, &xhci->op_regs->command,
- CMD_RESET, 0, 250 * 1000);
+ CMD_RESET, 0, 10 * 1000 * 1000);
if (ret)
return ret;
@@ -175,7 +175,8 @@ int xhci_reset(struct xhci_hcd *xhci)
* xHCI cannot write to any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared.
*/
- ret = handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
+ ret = handshake(xhci, &xhci->op_regs->status,
+ STS_CNR, 0, 10 * 1000 * 1000);
for (i = 0; i < 2; ++i) {
xhci->bus_state[i].port_c_suspend = 0;
@@ -658,6 +659,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ if (xhci->quirks && XHCI_SPURIOUS_REBOOT)
+ usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
+
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
spin_unlock_irq(&xhci->lock);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 55c0785810c9..c713256297ac 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1494,6 +1494,7 @@ struct xhci_hcd {
#define XHCI_TRUST_TX_LENGTH (1 << 10)
#define XHCI_LPM_SUPPORT (1 << 11)
#define XHCI_INTEL_HOST (1 << 12)
+#define XHCI_SPURIOUS_REBOOT (1 << 13)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
@@ -1537,6 +1538,8 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_warn(xhci, fmt, args...) \
dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+#define xhci_warn_ratelimited(xhci, fmt, args...) \
+ dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
/* TODO: copied from ehci.h - can be refactored? */
/* xHCI spec says all registers are little endian */
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index ff08015b230c..ae794b90766b 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -232,7 +232,7 @@ wraperr:
return err;
}
-static const struct usb_device_id id_table[] __devinitconst = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index ef0c3f9f0947..6259f0d99324 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -8,7 +8,7 @@ config USB_MUSB_HDRC
tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
depends on USB && USB_GADGET
select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN)
- select NOP_USB_XCEIV if (SOC_OMAPTI81XX || SOC_OMAPAM33XX)
+ select NOP_USB_XCEIV if (SOC_TI81XX || SOC_AM33XX)
select TWL4030_USB if MACH_OMAP_3430SDP
select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
select USB_OTG_UTILS
@@ -57,7 +57,7 @@ config USB_MUSB_AM35X
config USB_MUSB_DSPS
tristate "TI DSPS platforms"
- depends on SOC_OMAPTI81XX || SOC_OMAPAM33XX
+ depends on SOC_TI81XX || SOC_AM33XX
config USB_MUSB_BLACKFIN
tristate "Blackfin"
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index dbcdeea30f09..586105b55a7c 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -81,14 +81,6 @@ struct musb_ep;
#define is_peripheral_active(m) (!(m)->is_host)
#define is_host_active(m) ((m)->is_host)
-#ifndef CONFIG_HAVE_CLK
-/* Dummy stub for clk framework */
-#define clk_get(dev, id) NULL
-#define clk_put(clock) do {} while (0)
-#define clk_enable(clock) do {} while (0)
-#define clk_disable(clock) do {} while (0)
-#endif
-
#ifdef CONFIG_PROC_FS
#include <linux/fs.h>
#define MUSB_CONFIG_PROC_FS
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 217808d9fbe1..494772fc9e23 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -479,9 +479,9 @@ static int __devinit dsps_create_musb_pdev(struct dsps_glue *glue, u8 id)
ret = -ENODEV;
goto err0;
}
- strcpy((u8 *)res->name, "mc");
res->parent = NULL;
resources[1] = *res;
+ resources[1].name = "mc";
/* allocate the child platform device */
musb = platform_device_alloc("musb-hdrc", -1);
@@ -566,27 +566,28 @@ static int __devinit dsps_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, glue);
- /* create the child platform device for first instances of musb */
- ret = dsps_create_musb_pdev(glue, 0);
- if (ret != 0) {
- dev_err(&pdev->dev, "failed to create child pdev\n");
- goto err2;
- }
-
/* enable the usbss clocks */
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm_runtime_get_sync FAILED");
+ goto err2;
+ }
+
+ /* create the child platform device for first instances of musb */
+ ret = dsps_create_musb_pdev(glue, 0);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "failed to create child pdev\n");
goto err3;
}
return 0;
err3:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
err2:
+ pm_runtime_disable(&pdev->dev);
kfree(glue->wrp);
err1:
kfree(glue);
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index 575fc815c932..7a88667742b6 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -1576,7 +1576,6 @@ isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
isp->irq_type = IRQF_TRIGGER_FALLING;
}
- isp->irq_type |= IRQF_SAMPLE_RANDOM;
status = request_irq(i2c->irq, isp1301_irq,
isp->irq_type, DRIVER_NAME, isp);
if (status < 0) {
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 8c9bb1ad3069..681da06170c2 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -603,12 +603,12 @@ static int usbhsc_resume(struct device *dev)
struct usbhs_priv *priv = dev_get_drvdata(dev);
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
- usbhs_platform_call(priv, phy_reset, pdev);
-
if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
usbhsc_power_ctrl(priv, 1);
- usbhsc_hotplug(priv);
+ usbhs_platform_call(priv, phy_reset, pdev);
+
+ usbhsc_drvcllbck_notify_hotplug(pdev);
return 0;
}
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 1834cf50888c..9b69a1323294 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -1266,6 +1266,12 @@ static int usbhsh_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
return ret;
}
+static int usbhsh_bus_nop(struct usb_hcd *hcd)
+{
+ /* nothing to do */
+ return 0;
+}
+
static struct hc_driver usbhsh_driver = {
.description = usbhsh_hcd_name,
.hcd_priv_size = sizeof(struct usbhsh_hpriv),
@@ -1290,6 +1296,8 @@ static struct hc_driver usbhsh_driver = {
*/
.hub_status_data = usbhsh_hub_status_data,
.hub_control = usbhsh_hub_control,
+ .bus_suspend = usbhsh_bus_nop,
+ .bus_resume = usbhsh_bus_nop,
};
/*
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index f398d1e34474..c15f2e7cefc7 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -61,18 +61,23 @@ static int usb_serial_device_probe(struct device *dev)
goto exit;
}
+ /* make sure suspend/resume doesn't race against port_probe */
+ retval = usb_autopm_get_interface(port->serial->interface);
+ if (retval)
+ goto exit;
+
driver = port->serial->type;
if (driver->port_probe) {
retval = driver->port_probe(port);
if (retval)
- goto exit;
+ goto exit_with_autopm;
}
retval = device_create_file(dev, &dev_attr_port_number);
if (retval) {
if (driver->port_remove)
retval = driver->port_remove(port);
- goto exit;
+ goto exit_with_autopm;
}
minor = port->number;
@@ -81,6 +86,8 @@ static int usb_serial_device_probe(struct device *dev)
"%s converter now attached to ttyUSB%d\n",
driver->description, minor);
+exit_with_autopm:
+ usb_autopm_put_interface(port->serial->interface);
exit:
return retval;
}
@@ -96,6 +103,9 @@ static int usb_serial_device_remove(struct device *dev)
if (!port)
return -ENODEV;
+ /* make sure suspend/resume doesn't race against port_remove */
+ usb_autopm_get_interface(port->serial->interface);
+
device_remove_file(&port->dev, &dev_attr_port_number);
driver = port->serial->type;
@@ -107,6 +117,7 @@ static int usb_serial_device_remove(struct device *dev)
dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
driver->description, minor);
+ usb_autopm_put_interface(port->serial->interface);
return retval;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index bc912e5a3beb..5620db6469e5 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -811,6 +811,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
{ USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
{ USB_DEVICE(PI_VID, PI_E861_PID) },
+ { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
{ USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 5661c7e2d415..5dd96ca6c380 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -795,6 +795,13 @@
#define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */
/*
+ * Kondo Kagaku Co.Ltd.
+ * http://www.kondo-robot.com/EN
+ */
+#define KONDO_VID 0x165c
+#define KONDO_USB_SERIAL_PID 0x0002
+
+/*
* Bayer Ascensia Contour blood glucose meter USB-converter cable.
* http://winglucofacts.com/cables/
*/
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 5811d34b6c6b..2cb30c535839 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -227,7 +227,6 @@ static void ipw_release(struct usb_serial *serial)
{
struct usb_wwan_intf_private *data = usb_get_serial_data(serial);
- usb_wwan_release(serial);
usb_set_serial_data(serial, NULL);
kfree(data);
}
@@ -309,12 +308,12 @@ static struct usb_serial_driver ipw_device = {
.description = "IPWireless converter",
.id_table = id_table,
.num_ports = 1,
- .disconnect = usb_wwan_disconnect,
.open = ipw_open,
.close = ipw_close,
.probe = ipw_probe,
.attach = usb_wwan_startup,
.release = ipw_release,
+ .port_remove = usb_wwan_port_remove,
.dtr_rts = ipw_dtr_rts,
.write = usb_wwan_write,
};
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 57eca2448424..2f6da1e89bfa 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -82,8 +82,7 @@
* Defines used for sending commands to port
*/
-#define WAIT_FOR_EVER (HZ * 0) /* timeout urb is wait for ever */
-#define MOS_WDR_TIMEOUT (HZ * 5) /* default urb timeout */
+#define MOS_WDR_TIMEOUT 5000 /* default urb timeout */
#define MOS_PORT1 0x0200
#define MOS_PORT2 0x0300
@@ -1232,9 +1231,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
- for (i = 0; i < NUM_URBS; ++i)
- if (mos7840_port->busy[i])
- chars += URB_TRANSFER_BUFFER_SIZE;
+ for (i = 0; i < NUM_URBS; ++i) {
+ if (mos7840_port->busy[i]) {
+ struct urb *urb = mos7840_port->write_urb_pool[i];
+ chars += urb->transfer_buffer_length;
+ }
+ }
spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
dbg("%s - returns %d", __func__, chars);
return chars;
@@ -1344,7 +1346,7 @@ static void mos7840_close(struct usb_serial_port *port)
static void mos7840_block_until_chase_response(struct tty_struct *tty,
struct moschip_port *mos7840_port)
{
- int timeout = 1 * HZ;
+ int timeout = msecs_to_jiffies(1000);
int wait = 10;
int count;
@@ -2672,7 +2674,7 @@ static int mos7840_startup(struct usb_serial *serial)
/* setting configuration feature to one */
usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ);
+ (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, MOS_WDR_TIMEOUT);
return 0;
error:
for (/* nothing */; i >= 0; i--) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 08ff9b862049..cc40f47ecea1 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -80,85 +80,9 @@ static void option_instat_callback(struct urb *urb);
#define OPTION_PRODUCT_GTM380_MODEM 0x7201
#define HUAWEI_VENDOR_ID 0x12D1
-#define HUAWEI_PRODUCT_E600 0x1001
-#define HUAWEI_PRODUCT_E220 0x1003
-#define HUAWEI_PRODUCT_E220BIS 0x1004
-#define HUAWEI_PRODUCT_E1401 0x1401
-#define HUAWEI_PRODUCT_E1402 0x1402
-#define HUAWEI_PRODUCT_E1403 0x1403
-#define HUAWEI_PRODUCT_E1404 0x1404
-#define HUAWEI_PRODUCT_E1405 0x1405
-#define HUAWEI_PRODUCT_E1406 0x1406
-#define HUAWEI_PRODUCT_E1407 0x1407
-#define HUAWEI_PRODUCT_E1408 0x1408
-#define HUAWEI_PRODUCT_E1409 0x1409
-#define HUAWEI_PRODUCT_E140A 0x140A
-#define HUAWEI_PRODUCT_E140B 0x140B
-#define HUAWEI_PRODUCT_E140C 0x140C
-#define HUAWEI_PRODUCT_E140D 0x140D
-#define HUAWEI_PRODUCT_E140E 0x140E
-#define HUAWEI_PRODUCT_E140F 0x140F
-#define HUAWEI_PRODUCT_E1410 0x1410
-#define HUAWEI_PRODUCT_E1411 0x1411
-#define HUAWEI_PRODUCT_E1412 0x1412
-#define HUAWEI_PRODUCT_E1413 0x1413
-#define HUAWEI_PRODUCT_E1414 0x1414
-#define HUAWEI_PRODUCT_E1415 0x1415
-#define HUAWEI_PRODUCT_E1416 0x1416
-#define HUAWEI_PRODUCT_E1417 0x1417
-#define HUAWEI_PRODUCT_E1418 0x1418
-#define HUAWEI_PRODUCT_E1419 0x1419
-#define HUAWEI_PRODUCT_E141A 0x141A
-#define HUAWEI_PRODUCT_E141B 0x141B
-#define HUAWEI_PRODUCT_E141C 0x141C
-#define HUAWEI_PRODUCT_E141D 0x141D
-#define HUAWEI_PRODUCT_E141E 0x141E
-#define HUAWEI_PRODUCT_E141F 0x141F
-#define HUAWEI_PRODUCT_E1420 0x1420
-#define HUAWEI_PRODUCT_E1421 0x1421
-#define HUAWEI_PRODUCT_E1422 0x1422
-#define HUAWEI_PRODUCT_E1423 0x1423
-#define HUAWEI_PRODUCT_E1424 0x1424
-#define HUAWEI_PRODUCT_E1425 0x1425
-#define HUAWEI_PRODUCT_E1426 0x1426
-#define HUAWEI_PRODUCT_E1427 0x1427
-#define HUAWEI_PRODUCT_E1428 0x1428
-#define HUAWEI_PRODUCT_E1429 0x1429
-#define HUAWEI_PRODUCT_E142A 0x142A
-#define HUAWEI_PRODUCT_E142B 0x142B
-#define HUAWEI_PRODUCT_E142C 0x142C
-#define HUAWEI_PRODUCT_E142D 0x142D
-#define HUAWEI_PRODUCT_E142E 0x142E
-#define HUAWEI_PRODUCT_E142F 0x142F
-#define HUAWEI_PRODUCT_E1430 0x1430
-#define HUAWEI_PRODUCT_E1431 0x1431
-#define HUAWEI_PRODUCT_E1432 0x1432
-#define HUAWEI_PRODUCT_E1433 0x1433
-#define HUAWEI_PRODUCT_E1434 0x1434
-#define HUAWEI_PRODUCT_E1435 0x1435
-#define HUAWEI_PRODUCT_E1436 0x1436
-#define HUAWEI_PRODUCT_E1437 0x1437
-#define HUAWEI_PRODUCT_E1438 0x1438
-#define HUAWEI_PRODUCT_E1439 0x1439
-#define HUAWEI_PRODUCT_E143A 0x143A
-#define HUAWEI_PRODUCT_E143B 0x143B
-#define HUAWEI_PRODUCT_E143C 0x143C
-#define HUAWEI_PRODUCT_E143D 0x143D
-#define HUAWEI_PRODUCT_E143E 0x143E
-#define HUAWEI_PRODUCT_E143F 0x143F
#define HUAWEI_PRODUCT_K4505 0x1464
#define HUAWEI_PRODUCT_K3765 0x1465
-#define HUAWEI_PRODUCT_E14AC 0x14AC
-#define HUAWEI_PRODUCT_K3806 0x14AE
#define HUAWEI_PRODUCT_K4605 0x14C6
-#define HUAWEI_PRODUCT_K5005 0x14C8
-#define HUAWEI_PRODUCT_K3770 0x14C9
-#define HUAWEI_PRODUCT_K3771 0x14CA
-#define HUAWEI_PRODUCT_K4510 0x14CB
-#define HUAWEI_PRODUCT_K4511 0x14CC
-#define HUAWEI_PRODUCT_ETS1220 0x1803
-#define HUAWEI_PRODUCT_E353 0x1506
-#define HUAWEI_PRODUCT_E173S 0x1C05
#define QUANTA_VENDOR_ID 0x0408
#define QUANTA_PRODUCT_Q101 0xEA02
@@ -615,104 +539,123 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
+
+
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -943,6 +886,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1297,8 +1242,8 @@ static struct usb_serial_driver option_1port_device = {
.tiocmset = usb_wwan_tiocmset,
.ioctl = usb_wwan_ioctl,
.attach = usb_wwan_startup,
- .disconnect = usb_wwan_disconnect,
.release = option_release,
+ .port_remove = usb_wwan_port_remove,
.read_int_callback = option_instat_callback,
#ifdef CONFIG_PM
.suspend = usb_wwan_suspend,
@@ -1414,8 +1359,6 @@ static void option_release(struct usb_serial *serial)
struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial);
struct option_private *priv = intfdata->private;
- usb_wwan_release(serial);
-
kfree(priv);
kfree(intfdata);
}
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 8d103019d6aa..bfd50779f0c9 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -199,43 +199,49 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
/* default to enabling interface */
altsetting = 0;
- switch (ifnum) {
- /* Composite mode; don't bind to the QMI/net interface as that
- * gets handled by other drivers.
- */
+ /* Composite mode; don't bind to the QMI/net interface as that
+ * gets handled by other drivers.
+ */
+
+ if (is_gobi1k) {
/* Gobi 1K USB layout:
* 0: serial port (doesn't respond)
* 1: serial port (doesn't respond)
* 2: AT-capable modem port
* 3: QMI/net
- *
- * Gobi 2K+ USB layout:
+ */
+ if (ifnum == 2)
+ dev_dbg(dev, "Modem port found\n");
+ else
+ altsetting = -1;
+ } else {
+ /* Gobi 2K+ USB layout:
* 0: QMI/net
* 1: DM/DIAG (use libqcdm from ModemManager for communication)
* 2: AT-capable modem port
* 3: NMEA
*/
-
- case 1:
- if (is_gobi1k)
+ switch (ifnum) {
+ case 0:
+ /* Don't claim the QMI/net interface */
altsetting = -1;
- else
+ break;
+ case 1:
dev_dbg(dev, "Gobi 2K+ DM/DIAG interface found\n");
- break;
- case 2:
- dev_dbg(dev, "Modem port found\n");
- break;
- case 3:
- if (is_gobi1k)
- altsetting = -1;
- else
+ break;
+ case 2:
+ dev_dbg(dev, "Modem port found\n");
+ break;
+ case 3:
/*
* NMEA (serial line 9600 8N1)
* # echo "\$GPS_START" > /dev/ttyUSBx
* # echo "\$GPS_STOP" > /dev/ttyUSBx
*/
dev_dbg(dev, "Gobi 2K+ NMEA GPS interface found\n");
+ break;
+ }
}
done:
@@ -262,8 +268,7 @@ static void qc_release(struct usb_serial *serial)
{
struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
- /* Call usb_wwan release & free the private data allocated in qcprobe */
- usb_wwan_release(serial);
+ /* Free the private data allocated in qcprobe */
usb_set_serial_data(serial, NULL);
kfree(priv);
}
@@ -283,8 +288,8 @@ static struct usb_serial_driver qcdevice = {
.write_room = usb_wwan_write_room,
.chars_in_buffer = usb_wwan_chars_in_buffer,
.attach = usb_wwan_startup,
- .disconnect = usb_wwan_disconnect,
.release = qc_release,
+ .port_remove = usb_wwan_port_remove,
#ifdef CONFIG_PM
.suspend = usb_wwan_suspend,
.resume = usb_wwan_resume,
diff --git a/drivers/usb/serial/usb-wwan.h b/drivers/usb/serial/usb-wwan.h
index c47b6ec03063..1f034d2397c6 100644
--- a/drivers/usb/serial/usb-wwan.h
+++ b/drivers/usb/serial/usb-wwan.h
@@ -9,8 +9,7 @@ extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on);
extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port);
extern void usb_wwan_close(struct usb_serial_port *port);
extern int usb_wwan_startup(struct usb_serial *serial);
-extern void usb_wwan_disconnect(struct usb_serial *serial);
-extern void usb_wwan_release(struct usb_serial *serial);
+extern int usb_wwan_port_remove(struct usb_serial_port *port);
extern int usb_wwan_write_room(struct tty_struct *tty);
extern void usb_wwan_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index f35971dff4a5..6855d5ed0331 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -565,62 +565,52 @@ bail_out_error:
}
EXPORT_SYMBOL(usb_wwan_startup);
-static void stop_read_write_urbs(struct usb_serial *serial)
+int usb_wwan_port_remove(struct usb_serial_port *port)
{
- int i, j;
- struct usb_serial_port *port;
+ int i;
struct usb_wwan_port_private *portdata;
- /* Stop reading/writing urbs */
- for (i = 0; i < serial->num_ports; ++i) {
- port = serial->port[i];
- portdata = usb_get_serial_port_data(port);
- for (j = 0; j < N_IN_URB; j++)
- usb_kill_urb(portdata->in_urbs[j]);
- for (j = 0; j < N_OUT_URB; j++)
- usb_kill_urb(portdata->out_urbs[j]);
+ portdata = usb_get_serial_port_data(port);
+ usb_set_serial_port_data(port, NULL);
+
+ /* Stop reading/writing urbs and free them */
+ for (i = 0; i < N_IN_URB; i++) {
+ usb_kill_urb(portdata->in_urbs[i]);
+ usb_free_urb(portdata->in_urbs[i]);
+ free_page((unsigned long)portdata->in_buffer[i]);
+ }
+ for (i = 0; i < N_OUT_URB; i++) {
+ usb_kill_urb(portdata->out_urbs[i]);
+ usb_free_urb(portdata->out_urbs[i]);
+ kfree(portdata->out_buffer[i]);
}
-}
-void usb_wwan_disconnect(struct usb_serial *serial)
-{
- stop_read_write_urbs(serial);
+ /* Now free port private data */
+ kfree(portdata);
+ return 0;
}
-EXPORT_SYMBOL(usb_wwan_disconnect);
+EXPORT_SYMBOL(usb_wwan_port_remove);
-void usb_wwan_release(struct usb_serial *serial)
+#ifdef CONFIG_PM
+static void stop_read_write_urbs(struct usb_serial *serial)
{
int i, j;
struct usb_serial_port *port;
struct usb_wwan_port_private *portdata;
- /* Now free them */
+ /* Stop reading/writing urbs */
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
portdata = usb_get_serial_port_data(port);
-
- for (j = 0; j < N_IN_URB; j++) {
- usb_free_urb(portdata->in_urbs[j]);
- free_page((unsigned long)
- portdata->in_buffer[j]);
- portdata->in_urbs[j] = NULL;
- }
- for (j = 0; j < N_OUT_URB; j++) {
- usb_free_urb(portdata->out_urbs[j]);
- kfree(portdata->out_buffer[j]);
- portdata->out_urbs[j] = NULL;
- }
- }
-
- /* Now free per port private data */
- for (i = 0; i < serial->num_ports; i++) {
- port = serial->port[i];
- kfree(usb_get_serial_port_data(port));
+ if (!portdata)
+ continue;
+ for (j = 0; j < N_IN_URB; j++)
+ usb_kill_urb(portdata->in_urbs[j]);
+ for (j = 0; j < N_OUT_URB; j++)
+ usb_kill_urb(portdata->out_urbs[j]);
}
}
-EXPORT_SYMBOL(usb_wwan_release);
-#ifdef CONFIG_PM
int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
{
struct usb_wwan_intf_private *intfdata = serial->private;
@@ -712,7 +702,7 @@ int usb_wwan_resume(struct usb_serial *serial)
/* skip closed ports */
spin_lock_irq(&intfdata->susp_lock);
- if (!portdata->opened) {
+ if (!portdata || !portdata->opened) {
spin_unlock_irq(&intfdata->susp_lock);
continue;
}
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
new file mode 100644
index 000000000000..7cd5dec0abd1
--- /dev/null
+++ b/drivers/vfio/Kconfig
@@ -0,0 +1,16 @@
+config VFIO_IOMMU_TYPE1
+ tristate
+ depends on VFIO
+ default n
+
+menuconfig VFIO
+ tristate "VFIO Non-Privileged userspace driver framework"
+ depends on IOMMU_API
+ select VFIO_IOMMU_TYPE1 if X86
+ help
+ VFIO provides a framework for secure userspace device drivers.
+ See Documentation/vfio.txt for more details.
+
+ If you don't know what to do here, say N.
+
+source "drivers/vfio/pci/Kconfig"
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
new file mode 100644
index 000000000000..2398d4a0e38b
--- /dev/null
+++ b/drivers/vfio/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_VFIO) += vfio.o
+obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
+obj-$(CONFIG_VFIO_PCI) += pci/
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
new file mode 100644
index 000000000000..5980758563eb
--- /dev/null
+++ b/drivers/vfio/pci/Kconfig
@@ -0,0 +1,8 @@
+config VFIO_PCI
+ tristate "VFIO support for PCI devices"
+ depends on VFIO && PCI && EVENTFD
+ help
+ Support for the PCI VFIO bus driver. This is required to make
+ use of PCI drivers using the VFIO framework.
+
+ If you don't know what to do here, say N.
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
new file mode 100644
index 000000000000..131079255fd9
--- /dev/null
+++ b/drivers/vfio/pci/Makefile
@@ -0,0 +1,4 @@
+
+vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
+
+obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
new file mode 100644
index 000000000000..6968b7232232
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+#include <linux/device.h>
+#include <linux/eventfd.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+
+#include "vfio_pci_private.h"
+
+#define DRIVER_VERSION "0.2"
+#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
+#define DRIVER_DESC "VFIO PCI - User Level meta-driver"
+
+static bool nointxmask;
+module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(nointxmask,
+ "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
+
+static int vfio_pci_enable(struct vfio_pci_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int ret;
+ u16 cmd;
+ u8 msix_pos;
+
+ vdev->reset_works = (pci_reset_function(pdev) == 0);
+ pci_save_state(pdev);
+ vdev->pci_saved_state = pci_store_saved_state(pdev);
+ if (!vdev->pci_saved_state)
+ pr_debug("%s: Couldn't store %s saved state\n",
+ __func__, dev_name(&pdev->dev));
+
+ ret = vfio_config_init(vdev);
+ if (ret)
+ goto out;
+
+ if (likely(!nointxmask))
+ vdev->pci_2_3 = pci_intx_mask_supported(pdev);
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
+ cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ msix_pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (msix_pos) {
+ u16 flags;
+ u32 table;
+
+ pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
+ pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
+
+ vdev->msix_bar = table & PCI_MSIX_FLAGS_BIRMASK;
+ vdev->msix_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
+ vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
+ } else
+ vdev->msix_bar = 0xFF;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto out;
+
+ return ret;
+
+out:
+ kfree(vdev->pci_saved_state);
+ vdev->pci_saved_state = NULL;
+ vfio_config_free(vdev);
+ return ret;
+}
+
+static void vfio_pci_disable(struct vfio_pci_device *vdev)
+{
+ int bar;
+
+ pci_disable_device(vdev->pdev);
+
+ vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
+ VFIO_IRQ_SET_ACTION_TRIGGER,
+ vdev->irq_type, 0, 0, NULL);
+
+ vdev->virq_disabled = false;
+
+ vfio_config_free(vdev);
+
+ pci_reset_function(vdev->pdev);
+
+ if (pci_load_and_free_saved_state(vdev->pdev,
+ &vdev->pci_saved_state) == 0)
+ pci_restore_state(vdev->pdev);
+ else
+ pr_info("%s: Couldn't reload %s saved state\n",
+ __func__, dev_name(&vdev->pdev->dev));
+
+ for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
+ if (!vdev->barmap[bar])
+ continue;
+ pci_iounmap(vdev->pdev, vdev->barmap[bar]);
+ pci_release_selected_regions(vdev->pdev, 1 << bar);
+ vdev->barmap[bar] = NULL;
+ }
+}
+
+static void vfio_pci_release(void *device_data)
+{
+ struct vfio_pci_device *vdev = device_data;
+
+ if (atomic_dec_and_test(&vdev->refcnt))
+ vfio_pci_disable(vdev);
+
+ module_put(THIS_MODULE);
+}
+
+static int vfio_pci_open(void *device_data)
+{
+ struct vfio_pci_device *vdev = device_data;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ if (atomic_inc_return(&vdev->refcnt) == 1) {
+ int ret = vfio_pci_enable(vdev);
+ if (ret) {
+ module_put(THIS_MODULE);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
+{
+ if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
+ u8 pin;
+ pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
+ if (pin)
+ return 1;
+
+ } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
+ u8 pos;
+ u16 flags;
+
+ pos = pci_find_capability(vdev->pdev, PCI_CAP_ID_MSI);
+ if (pos) {
+ pci_read_config_word(vdev->pdev,
+ pos + PCI_MSI_FLAGS, &flags);
+
+ return 1 << (flags & PCI_MSI_FLAGS_QMASK);
+ }
+ } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
+ u8 pos;
+ u16 flags;
+
+ pos = pci_find_capability(vdev->pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(vdev->pdev,
+ pos + PCI_MSIX_FLAGS, &flags);
+
+ return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
+ }
+ }
+
+ return 0;
+}
+
+static long vfio_pci_ioctl(void *device_data,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vfio_pci_device *vdev = device_data;
+ unsigned long minsz;
+
+ if (cmd == VFIO_DEVICE_GET_INFO) {
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.flags = VFIO_DEVICE_FLAGS_PCI;
+
+ if (vdev->reset_works)
+ info.flags |= VFIO_DEVICE_FLAGS_RESET;
+
+ info.num_regions = VFIO_PCI_NUM_REGIONS;
+ info.num_irqs = VFIO_PCI_NUM_IRQS;
+
+ return copy_to_user((void __user *)arg, &info, minsz);
+
+ } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ struct pci_dev *pdev = vdev->pdev;
+ struct vfio_region_info info;
+
+ minsz = offsetofend(struct vfio_region_info, offset);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = pdev->cfg_size;
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = pci_resource_len(pdev, info.index);
+ if (!info.size) {
+ info.flags = 0;
+ break;
+ }
+
+ info.flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ if (pci_resource_flags(pdev, info.index) &
+ IORESOURCE_MEM && info.size >= PAGE_SIZE)
+ info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
+ break;
+ case VFIO_PCI_ROM_REGION_INDEX:
+ {
+ void __iomem *io;
+ size_t size;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.flags = 0;
+
+ /* Report the BAR size, not the ROM size */
+ info.size = pci_resource_len(pdev, info.index);
+ if (!info.size)
+ break;
+
+ /* Is it really there? */
+ io = pci_map_rom(pdev, &size);
+ if (!io || !size) {
+ info.size = 0;
+ break;
+ }
+ pci_unmap_rom(pdev, io);
+
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return copy_to_user((void __user *)arg, &info, minsz);
+
+ } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
+ return -EINVAL;
+
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
+
+ info.count = vfio_pci_get_irq_count(vdev, info.index);
+
+ if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
+ info.flags |= (VFIO_IRQ_INFO_MASKABLE |
+ VFIO_IRQ_INFO_AUTOMASKED);
+ else
+ info.flags |= VFIO_IRQ_INFO_NORESIZE;
+
+ return copy_to_user((void __user *)arg, &info, minsz);
+
+ } else if (cmd == VFIO_DEVICE_SET_IRQS) {
+ struct vfio_irq_set hdr;
+ u8 *data = NULL;
+ int ret = 0;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
+ hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
+ VFIO_IRQ_SET_ACTION_TYPE_MASK))
+ return -EINVAL;
+
+ if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
+ size_t size;
+
+ if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
+ size = sizeof(uint8_t);
+ else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
+ size = sizeof(int32_t);
+ else
+ return -EINVAL;
+
+ if (hdr.argsz - minsz < hdr.count * size ||
+ hdr.count > vfio_pci_get_irq_count(vdev, hdr.index))
+ return -EINVAL;
+
+ data = kmalloc(hdr.count * size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (copy_from_user(data, (void __user *)(arg + minsz),
+ hdr.count * size)) {
+ kfree(data);
+ return -EFAULT;
+ }
+ }
+
+ mutex_lock(&vdev->igate);
+
+ ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
+ hdr.start, hdr.count, data);
+
+ mutex_unlock(&vdev->igate);
+ kfree(data);
+
+ return ret;
+
+ } else if (cmd == VFIO_DEVICE_RESET)
+ return vdev->reset_works ?
+ pci_reset_function(vdev->pdev) : -EINVAL;
+
+ return -ENOTTY;
+}
+
+static ssize_t vfio_pci_read(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ struct vfio_pci_device *vdev = device_data;
+ struct pci_dev *pdev = vdev->pdev;
+
+ if (index >= VFIO_PCI_NUM_REGIONS)
+ return -EINVAL;
+
+ if (index == VFIO_PCI_CONFIG_REGION_INDEX)
+ return vfio_pci_config_readwrite(vdev, buf, count, ppos, false);
+ else if (index == VFIO_PCI_ROM_REGION_INDEX)
+ return vfio_pci_mem_readwrite(vdev, buf, count, ppos, false);
+ else if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
+ return vfio_pci_io_readwrite(vdev, buf, count, ppos, false);
+ else if (pci_resource_flags(pdev, index) & IORESOURCE_MEM)
+ return vfio_pci_mem_readwrite(vdev, buf, count, ppos, false);
+
+ return -EINVAL;
+}
+
+static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ struct vfio_pci_device *vdev = device_data;
+ struct pci_dev *pdev = vdev->pdev;
+
+ if (index >= VFIO_PCI_NUM_REGIONS)
+ return -EINVAL;
+
+ if (index == VFIO_PCI_CONFIG_REGION_INDEX)
+ return vfio_pci_config_readwrite(vdev, (char __user *)buf,
+ count, ppos, true);
+ else if (index == VFIO_PCI_ROM_REGION_INDEX)
+ return -EINVAL;
+ else if (pci_resource_flags(pdev, index) & IORESOURCE_IO)
+ return vfio_pci_io_readwrite(vdev, (char __user *)buf,
+ count, ppos, true);
+ else if (pci_resource_flags(pdev, index) & IORESOURCE_MEM) {
+ return vfio_pci_mem_readwrite(vdev, (char __user *)buf,
+ count, ppos, true);
+ }
+
+ return -EINVAL;
+}
+
+static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
+{
+ struct vfio_pci_device *vdev = device_data;
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned int index;
+ u64 phys_len, req_len, pgoff, req_start, phys;
+ int ret;
+
+ index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
+
+ if (vma->vm_end < vma->vm_start)
+ return -EINVAL;
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+ if (index >= VFIO_PCI_ROM_REGION_INDEX)
+ return -EINVAL;
+ if (!(pci_resource_flags(pdev, index) & IORESOURCE_MEM))
+ return -EINVAL;
+
+ phys_len = pci_resource_len(pdev, index);
+ req_len = vma->vm_end - vma->vm_start;
+ pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ req_start = pgoff << PAGE_SHIFT;
+
+ if (phys_len < PAGE_SIZE || req_start + req_len > phys_len)
+ return -EINVAL;
+
+ if (index == vdev->msix_bar) {
+ /*
+ * Disallow mmaps overlapping the MSI-X table; users don't
+ * get to touch this directly. We could find somewhere
+ * else to map the overlap, but page granularity is only
+ * a recommendation, not a requirement, so the user needs
+ * to know which bits are real. Requiring them to mmap
+ * around the table makes that clear.
+ */
+
+ /* If neither entirely above nor below, then it overlaps */
+ if (!(req_start >= vdev->msix_offset + vdev->msix_size ||
+ req_start + req_len <= vdev->msix_offset))
+ return -EINVAL;
+ }
+
+ /*
+ * Even though we don't make use of the barmap for the mmap,
+ * we need to request the region and the barmap tracks that.
+ */
+ if (!vdev->barmap[index]) {
+ ret = pci_request_selected_regions(pdev,
+ 1 << index, "vfio-pci");
+ if (ret)
+ return ret;
+
+ vdev->barmap[index] = pci_iomap(pdev, index, 0);
+ }
+
+ vma->vm_private_data = vdev;
+ vma->vm_flags |= (VM_IO | VM_RESERVED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ phys = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
+
+ return remap_pfn_range(vma, vma->vm_start, phys,
+ req_len, vma->vm_page_prot);
+}
+
+static const struct vfio_device_ops vfio_pci_ops = {
+ .name = "vfio-pci",
+ .open = vfio_pci_open,
+ .release = vfio_pci_release,
+ .ioctl = vfio_pci_ioctl,
+ .read = vfio_pci_read,
+ .write = vfio_pci_write,
+ .mmap = vfio_pci_mmap,
+};
+
+static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ u8 type;
+ struct vfio_pci_device *vdev;
+ struct iommu_group *group;
+ int ret;
+
+ pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
+ if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
+ return -EINVAL;
+
+ group = iommu_group_get(&pdev->dev);
+ if (!group)
+ return -EINVAL;
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev) {
+ iommu_group_put(group);
+ return -ENOMEM;
+ }
+
+ vdev->pdev = pdev;
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ mutex_init(&vdev->igate);
+ spin_lock_init(&vdev->irqlock);
+ atomic_set(&vdev->refcnt, 0);
+
+ ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
+ if (ret) {
+ iommu_group_put(group);
+ kfree(vdev);
+ }
+
+ return ret;
+}
+
+static void vfio_pci_remove(struct pci_dev *pdev)
+{
+ struct vfio_pci_device *vdev;
+
+ vdev = vfio_del_group_dev(&pdev->dev);
+ if (!vdev)
+ return;
+
+ iommu_group_put(pdev->dev.iommu_group);
+ kfree(vdev);
+}
+
+static struct pci_driver vfio_pci_driver = {
+ .name = "vfio-pci",
+ .id_table = NULL, /* only dynamic ids */
+ .probe = vfio_pci_probe,
+ .remove = vfio_pci_remove,
+};
+
+static void __exit vfio_pci_cleanup(void)
+{
+ pci_unregister_driver(&vfio_pci_driver);
+ vfio_pci_virqfd_exit();
+ vfio_pci_uninit_perm_bits();
+}
+
+static int __init vfio_pci_init(void)
+{
+ int ret;
+
+ /* Allocate shared config space permision data used by all devices */
+ ret = vfio_pci_init_perm_bits();
+ if (ret)
+ return ret;
+
+ /* Start the virqfd cleanup handler */
+ ret = vfio_pci_virqfd_init();
+ if (ret)
+ goto out_virqfd;
+
+ /* Register and scan for devices */
+ ret = pci_register_driver(&vfio_pci_driver);
+ if (ret)
+ goto out_driver;
+
+ return 0;
+
+out_virqfd:
+ vfio_pci_virqfd_exit();
+out_driver:
+ vfio_pci_uninit_perm_bits();
+ return ret;
+}
+
+module_init(vfio_pci_init);
+module_exit(vfio_pci_cleanup);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
new file mode 100644
index 000000000000..8b8f7d11e102
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -0,0 +1,1540 @@
+/*
+ * VFIO PCI config space virtualization
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+/*
+ * This code handles reading and writing of PCI configuration registers.
+ * This is hairy because we want to allow a lot of flexibility to the
+ * user driver, but cannot trust it with all of the config fields.
+ * Tables determine which fields can be read and written, as well as
+ * which fields are 'virtualized' - special actions and translations to
+ * make it appear to the user that he has control, when in fact things
+ * must be negotiated with the underlying OS.
+ */
+
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+
+#include "vfio_pci_private.h"
+
+#define PCI_CFG_SPACE_SIZE 256
+
+/* Useful "pseudo" capabilities */
+#define PCI_CAP_ID_BASIC 0
+#define PCI_CAP_ID_INVALID 0xFF
+
+#define is_bar(offset) \
+ ((offset >= PCI_BASE_ADDRESS_0 && offset < PCI_BASE_ADDRESS_5 + 4) || \
+ (offset >= PCI_ROM_ADDRESS && offset < PCI_ROM_ADDRESS + 4))
+
+/*
+ * Lengths of PCI Config Capabilities
+ * 0: Removed from the user visible capability list
+ * FF: Variable length
+ */
+static u8 pci_cap_length[] = {
+ [PCI_CAP_ID_BASIC] = PCI_STD_HEADER_SIZEOF, /* pci config header */
+ [PCI_CAP_ID_PM] = PCI_PM_SIZEOF,
+ [PCI_CAP_ID_AGP] = PCI_AGP_SIZEOF,
+ [PCI_CAP_ID_VPD] = PCI_CAP_VPD_SIZEOF,
+ [PCI_CAP_ID_SLOTID] = 0, /* bridge - don't care */
+ [PCI_CAP_ID_MSI] = 0xFF, /* 10, 14, 20, or 24 */
+ [PCI_CAP_ID_CHSWP] = 0, /* cpci - not yet */
+ [PCI_CAP_ID_PCIX] = 0xFF, /* 8 or 24 */
+ [PCI_CAP_ID_HT] = 0xFF, /* hypertransport */
+ [PCI_CAP_ID_VNDR] = 0xFF, /* variable */
+ [PCI_CAP_ID_DBG] = 0, /* debug - don't care */
+ [PCI_CAP_ID_CCRC] = 0, /* cpci - not yet */
+ [PCI_CAP_ID_SHPC] = 0, /* hotswap - not yet */
+ [PCI_CAP_ID_SSVID] = 0, /* bridge - don't care */
+ [PCI_CAP_ID_AGP3] = 0, /* AGP8x - not yet */
+ [PCI_CAP_ID_SECDEV] = 0, /* secure device not yet */
+ [PCI_CAP_ID_EXP] = 0xFF, /* 20 or 44 */
+ [PCI_CAP_ID_MSIX] = PCI_CAP_MSIX_SIZEOF,
+ [PCI_CAP_ID_SATA] = 0xFF,
+ [PCI_CAP_ID_AF] = PCI_CAP_AF_SIZEOF,
+};
+
+/*
+ * Lengths of PCIe/PCI-X Extended Config Capabilities
+ * 0: Removed or masked from the user visible capabilty list
+ * FF: Variable length
+ */
+static u16 pci_ext_cap_length[] = {
+ [PCI_EXT_CAP_ID_ERR] = PCI_ERR_ROOT_COMMAND,
+ [PCI_EXT_CAP_ID_VC] = 0xFF,
+ [PCI_EXT_CAP_ID_DSN] = PCI_EXT_CAP_DSN_SIZEOF,
+ [PCI_EXT_CAP_ID_PWR] = PCI_EXT_CAP_PWR_SIZEOF,
+ [PCI_EXT_CAP_ID_RCLD] = 0, /* root only - don't care */
+ [PCI_EXT_CAP_ID_RCILC] = 0, /* root only - don't care */
+ [PCI_EXT_CAP_ID_RCEC] = 0, /* root only - don't care */
+ [PCI_EXT_CAP_ID_MFVC] = 0xFF,
+ [PCI_EXT_CAP_ID_VC9] = 0xFF, /* same as CAP_ID_VC */
+ [PCI_EXT_CAP_ID_RCRB] = 0, /* root only - don't care */
+ [PCI_EXT_CAP_ID_VNDR] = 0xFF,
+ [PCI_EXT_CAP_ID_CAC] = 0, /* obsolete */
+ [PCI_EXT_CAP_ID_ACS] = 0xFF,
+ [PCI_EXT_CAP_ID_ARI] = PCI_EXT_CAP_ARI_SIZEOF,
+ [PCI_EXT_CAP_ID_ATS] = PCI_EXT_CAP_ATS_SIZEOF,
+ [PCI_EXT_CAP_ID_SRIOV] = PCI_EXT_CAP_SRIOV_SIZEOF,
+ [PCI_EXT_CAP_ID_MRIOV] = 0, /* not yet */
+ [PCI_EXT_CAP_ID_MCAST] = PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF,
+ [PCI_EXT_CAP_ID_PRI] = PCI_EXT_CAP_PRI_SIZEOF,
+ [PCI_EXT_CAP_ID_AMD_XXX] = 0, /* not yet */
+ [PCI_EXT_CAP_ID_REBAR] = 0xFF,
+ [PCI_EXT_CAP_ID_DPA] = 0xFF,
+ [PCI_EXT_CAP_ID_TPH] = 0xFF,
+ [PCI_EXT_CAP_ID_LTR] = PCI_EXT_CAP_LTR_SIZEOF,
+ [PCI_EXT_CAP_ID_SECPCI] = 0, /* not yet */
+ [PCI_EXT_CAP_ID_PMUX] = 0, /* not yet */
+ [PCI_EXT_CAP_ID_PASID] = 0, /* not yet */
+};
+
+/*
+ * Read/Write Permission Bits - one bit for each bit in capability
+ * Any field can be read if it exists, but what is read depends on
+ * whether the field is 'virtualized', or just pass thru to the
+ * hardware. Any virtualized field is also virtualized for writes.
+ * Writes are only permitted if they have a 1 bit here.
+ */
+struct perm_bits {
+ u8 *virt; /* read/write virtual data, not hw */
+ u8 *write; /* writeable bits */
+ int (*readfn)(struct vfio_pci_device *vdev, int pos, int count,
+ struct perm_bits *perm, int offset, __le32 *val);
+ int (*writefn)(struct vfio_pci_device *vdev, int pos, int count,
+ struct perm_bits *perm, int offset, __le32 val);
+};
+
+#define NO_VIRT 0
+#define ALL_VIRT 0xFFFFFFFFU
+#define NO_WRITE 0
+#define ALL_WRITE 0xFFFFFFFFU
+
+static int vfio_user_config_read(struct pci_dev *pdev, int offset,
+ __le32 *val, int count)
+{
+ int ret = -EINVAL;
+ u32 tmp_val = 0;
+
+ switch (count) {
+ case 1:
+ {
+ u8 tmp;
+ ret = pci_user_read_config_byte(pdev, offset, &tmp);
+ tmp_val = tmp;
+ break;
+ }
+ case 2:
+ {
+ u16 tmp;
+ ret = pci_user_read_config_word(pdev, offset, &tmp);
+ tmp_val = tmp;
+ break;
+ }
+ case 4:
+ ret = pci_user_read_config_dword(pdev, offset, &tmp_val);
+ break;
+ }
+
+ *val = cpu_to_le32(tmp_val);
+
+ return pcibios_err_to_errno(ret);
+}
+
+static int vfio_user_config_write(struct pci_dev *pdev, int offset,
+ __le32 val, int count)
+{
+ int ret = -EINVAL;
+ u32 tmp_val = le32_to_cpu(val);
+
+ switch (count) {
+ case 1:
+ ret = pci_user_write_config_byte(pdev, offset, tmp_val);
+ break;
+ case 2:
+ ret = pci_user_write_config_word(pdev, offset, tmp_val);
+ break;
+ case 4:
+ ret = pci_user_write_config_dword(pdev, offset, tmp_val);
+ break;
+ }
+
+ return pcibios_err_to_errno(ret);
+}
+
+static int vfio_default_config_read(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ __le32 virt = 0;
+
+ memcpy(val, vdev->vconfig + pos, count);
+
+ memcpy(&virt, perm->virt + offset, count);
+
+ /* Any non-virtualized bits? */
+ if (cpu_to_le32(~0U >> (32 - (count * 8))) != virt) {
+ struct pci_dev *pdev = vdev->pdev;
+ __le32 phys_val = 0;
+ int ret;
+
+ ret = vfio_user_config_read(pdev, pos, &phys_val, count);
+ if (ret)
+ return ret;
+
+ *val = (phys_val & ~virt) | (*val & virt);
+ }
+
+ return count;
+}
+
+static int vfio_default_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ __le32 virt = 0, write = 0;
+
+ memcpy(&write, perm->write + offset, count);
+
+ if (!write)
+ return count; /* drop, no writable bits */
+
+ memcpy(&virt, perm->virt + offset, count);
+
+ /* Virtualized and writable bits go to vconfig */
+ if (write & virt) {
+ __le32 virt_val = 0;
+
+ memcpy(&virt_val, vdev->vconfig + pos, count);
+
+ virt_val &= ~(write & virt);
+ virt_val |= (val & (write & virt));
+
+ memcpy(vdev->vconfig + pos, &virt_val, count);
+ }
+
+ /* Non-virtualzed and writable bits go to hardware */
+ if (write & ~virt) {
+ struct pci_dev *pdev = vdev->pdev;
+ __le32 phys_val = 0;
+ int ret;
+
+ ret = vfio_user_config_read(pdev, pos, &phys_val, count);
+ if (ret)
+ return ret;
+
+ phys_val &= ~(write & ~virt);
+ phys_val |= (val & (write & ~virt));
+
+ ret = vfio_user_config_write(pdev, pos, phys_val, count);
+ if (ret)
+ return ret;
+ }
+
+ return count;
+}
+
+/* Allow direct read from hardware, except for capability next pointer */
+static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ int ret;
+
+ ret = vfio_user_config_read(vdev->pdev, pos, val, count);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ if (pos >= PCI_CFG_SPACE_SIZE) { /* Extended cap header mangling */
+ if (offset < 4)
+ memcpy(val, vdev->vconfig + pos, count);
+ } else if (pos >= PCI_STD_HEADER_SIZEOF) { /* Std cap mangling */
+ if (offset == PCI_CAP_LIST_ID && count > 1)
+ memcpy(val, vdev->vconfig + pos,
+ min(PCI_CAP_FLAGS, count));
+ else if (offset == PCI_CAP_LIST_NEXT)
+ memcpy(val, vdev->vconfig + pos, 1);
+ }
+
+ return count;
+}
+
+static int vfio_direct_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ int ret;
+
+ ret = vfio_user_config_write(vdev->pdev, pos, val, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+/* Default all regions to read-only, no-virtualization */
+static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = {
+ [0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
+};
+static struct perm_bits ecap_perms[PCI_EXT_CAP_ID_MAX + 1] = {
+ [0 ... PCI_EXT_CAP_ID_MAX] = { .readfn = vfio_direct_config_read }
+};
+
+static void free_perm_bits(struct perm_bits *perm)
+{
+ kfree(perm->virt);
+ kfree(perm->write);
+ perm->virt = NULL;
+ perm->write = NULL;
+}
+
+static int alloc_perm_bits(struct perm_bits *perm, int size)
+{
+ /*
+ * Round up all permission bits to the next dword, this lets us
+ * ignore whether a read/write exceeds the defined capability
+ * structure. We can do this because:
+ * - Standard config space is already dword aligned
+ * - Capabilities are all dword alinged (bits 0:1 of next reserved)
+ * - Express capabilities defined as dword aligned
+ */
+ size = round_up(size, 4);
+
+ /*
+ * Zero state is
+ * - All Readable, None Writeable, None Virtualized
+ */
+ perm->virt = kzalloc(size, GFP_KERNEL);
+ perm->write = kzalloc(size, GFP_KERNEL);
+ if (!perm->virt || !perm->write) {
+ free_perm_bits(perm);
+ return -ENOMEM;
+ }
+
+ perm->readfn = vfio_default_config_read;
+ perm->writefn = vfio_default_config_write;
+
+ return 0;
+}
+
+/*
+ * Helper functions for filling in permission tables
+ */
+static inline void p_setb(struct perm_bits *p, int off, u8 virt, u8 write)
+{
+ p->virt[off] = virt;
+ p->write[off] = write;
+}
+
+/* Handle endian-ness - pci and tables are little-endian */
+static inline void p_setw(struct perm_bits *p, int off, u16 virt, u16 write)
+{
+ *(__le16 *)(&p->virt[off]) = cpu_to_le16(virt);
+ *(__le16 *)(&p->write[off]) = cpu_to_le16(write);
+}
+
+/* Handle endian-ness - pci and tables are little-endian */
+static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
+{
+ *(__le32 *)(&p->virt[off]) = cpu_to_le32(virt);
+ *(__le32 *)(&p->write[off]) = cpu_to_le32(write);
+}
+
+/*
+ * Restore the *real* BARs after we detect a FLR or backdoor reset.
+ * (backdoor = some device specific technique that we didn't catch)
+ */
+static void vfio_bar_restore(struct vfio_pci_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u32 *rbar = vdev->rbar;
+ int i;
+
+ if (pdev->is_virtfn)
+ return;
+
+ pr_info("%s: %s reset recovery - restoring bars\n",
+ __func__, dev_name(&pdev->dev));
+
+ for (i = PCI_BASE_ADDRESS_0; i <= PCI_BASE_ADDRESS_5; i += 4, rbar++)
+ pci_user_write_config_dword(pdev, i, *rbar);
+
+ pci_user_write_config_dword(pdev, PCI_ROM_ADDRESS, *rbar);
+}
+
+static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar)
+{
+ unsigned long flags = pci_resource_flags(pdev, bar);
+ u32 val;
+
+ if (flags & IORESOURCE_IO)
+ return cpu_to_le32(PCI_BASE_ADDRESS_SPACE_IO);
+
+ val = PCI_BASE_ADDRESS_SPACE_MEMORY;
+
+ if (flags & IORESOURCE_PREFETCH)
+ val |= PCI_BASE_ADDRESS_MEM_PREFETCH;
+
+ if (flags & IORESOURCE_MEM_64)
+ val |= PCI_BASE_ADDRESS_MEM_TYPE_64;
+
+ return cpu_to_le32(val);
+}
+
+/*
+ * Pretend we're hardware and tweak the values of the *virtual* PCI BARs
+ * to reflect the hardware capabilities. This implements BAR sizing.
+ */
+static void vfio_bar_fixup(struct vfio_pci_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int i;
+ __le32 *bar;
+ u64 mask;
+
+ bar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
+
+ for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++, bar++) {
+ if (!pci_resource_start(pdev, i)) {
+ *bar = 0; /* Unmapped by host = unimplemented to user */
+ continue;
+ }
+
+ mask = ~(pci_resource_len(pdev, i) - 1);
+
+ *bar &= cpu_to_le32((u32)mask);
+ *bar |= vfio_generate_bar_flags(pdev, i);
+
+ if (*bar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
+ bar++;
+ *bar &= cpu_to_le32((u32)(mask >> 32));
+ i++;
+ }
+ }
+
+ bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
+
+ /*
+ * NB. we expose the actual BAR size here, regardless of whether
+ * we can read it. When we report the REGION_INFO for the ROM
+ * we report what PCI tells us is the actual ROM size.
+ */
+ if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
+ mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
+ mask |= PCI_ROM_ADDRESS_ENABLE;
+ *bar &= cpu_to_le32((u32)mask);
+ } else
+ *bar = 0;
+
+ vdev->bardirty = false;
+}
+
+static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ if (is_bar(offset)) /* pos == offset for basic config */
+ vfio_bar_fixup(vdev);
+
+ count = vfio_default_config_read(vdev, pos, count, perm, offset, val);
+
+ /* Mask in virtual memory enable for SR-IOV devices */
+ if (offset == PCI_COMMAND && vdev->pdev->is_virtfn) {
+ u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
+ u32 tmp_val = le32_to_cpu(*val);
+
+ tmp_val |= cmd & PCI_COMMAND_MEMORY;
+ *val = cpu_to_le32(tmp_val);
+ }
+
+ return count;
+}
+
+static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ __le16 *virt_cmd;
+ u16 new_cmd = 0;
+ int ret;
+
+ virt_cmd = (__le16 *)&vdev->vconfig[PCI_COMMAND];
+
+ if (offset == PCI_COMMAND) {
+ bool phys_mem, virt_mem, new_mem, phys_io, virt_io, new_io;
+ u16 phys_cmd;
+
+ ret = pci_user_read_config_word(pdev, PCI_COMMAND, &phys_cmd);
+ if (ret)
+ return ret;
+
+ new_cmd = le32_to_cpu(val);
+
+ phys_mem = !!(phys_cmd & PCI_COMMAND_MEMORY);
+ virt_mem = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_MEMORY);
+ new_mem = !!(new_cmd & PCI_COMMAND_MEMORY);
+
+ phys_io = !!(phys_cmd & PCI_COMMAND_IO);
+ virt_io = !!(le16_to_cpu(*virt_cmd) & PCI_COMMAND_IO);
+ new_io = !!(new_cmd & PCI_COMMAND_IO);
+
+ /*
+ * If the user is writing mem/io enable (new_mem/io) and we
+ * think it's already enabled (virt_mem/io), but the hardware
+ * shows it disabled (phys_mem/io, then the device has
+ * undergone some kind of backdoor reset and needs to be
+ * restored before we allow it to enable the bars.
+ * SR-IOV devices will trigger this, but we catch them later
+ */
+ if ((new_mem && virt_mem && !phys_mem) ||
+ (new_io && virt_io && !phys_io))
+ vfio_bar_restore(vdev);
+ }
+
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ /*
+ * Save current memory/io enable bits in vconfig to allow for
+ * the test above next time.
+ */
+ if (offset == PCI_COMMAND) {
+ u16 mask = PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+
+ *virt_cmd &= cpu_to_le16(~mask);
+ *virt_cmd |= cpu_to_le16(new_cmd & mask);
+ }
+
+ /* Emulate INTx disable */
+ if (offset >= PCI_COMMAND && offset <= PCI_COMMAND + 1) {
+ bool virt_intx_disable;
+
+ virt_intx_disable = !!(le16_to_cpu(*virt_cmd) &
+ PCI_COMMAND_INTX_DISABLE);
+
+ if (virt_intx_disable && !vdev->virq_disabled) {
+ vdev->virq_disabled = true;
+ vfio_pci_intx_mask(vdev);
+ } else if (!virt_intx_disable && vdev->virq_disabled) {
+ vdev->virq_disabled = false;
+ vfio_pci_intx_unmask(vdev);
+ }
+ }
+
+ if (is_bar(offset))
+ vdev->bardirty = true;
+
+ return count;
+}
+
+/* Permissions for the Basic PCI Header */
+static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
+{
+ if (alloc_perm_bits(perm, PCI_STD_HEADER_SIZEOF))
+ return -ENOMEM;
+
+ perm->readfn = vfio_basic_config_read;
+ perm->writefn = vfio_basic_config_write;
+
+ /* Virtualized for SR-IOV functions, which just have FFFF */
+ p_setw(perm, PCI_VENDOR_ID, (u16)ALL_VIRT, NO_WRITE);
+ p_setw(perm, PCI_DEVICE_ID, (u16)ALL_VIRT, NO_WRITE);
+
+ /*
+ * Virtualize INTx disable, we use it internally for interrupt
+ * control and can emulate it for non-PCI 2.3 devices.
+ */
+ p_setw(perm, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE, (u16)ALL_WRITE);
+
+ /* Virtualize capability list, we might want to skip/disable */
+ p_setw(perm, PCI_STATUS, PCI_STATUS_CAP_LIST, NO_WRITE);
+
+ /* No harm to write */
+ p_setb(perm, PCI_CACHE_LINE_SIZE, NO_VIRT, (u8)ALL_WRITE);
+ p_setb(perm, PCI_LATENCY_TIMER, NO_VIRT, (u8)ALL_WRITE);
+ p_setb(perm, PCI_BIST, NO_VIRT, (u8)ALL_WRITE);
+
+ /* Virtualize all bars, can't touch the real ones */
+ p_setd(perm, PCI_BASE_ADDRESS_0, ALL_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_BASE_ADDRESS_1, ALL_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_BASE_ADDRESS_2, ALL_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_BASE_ADDRESS_3, ALL_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_BASE_ADDRESS_4, ALL_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_BASE_ADDRESS_5, ALL_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_ROM_ADDRESS, ALL_VIRT, ALL_WRITE);
+
+ /* Allow us to adjust capability chain */
+ p_setb(perm, PCI_CAPABILITY_LIST, (u8)ALL_VIRT, NO_WRITE);
+
+ /* Sometimes used by sw, just virtualize */
+ p_setb(perm, PCI_INTERRUPT_LINE, (u8)ALL_VIRT, (u8)ALL_WRITE);
+ return 0;
+}
+
+/* Permissions for the Power Management capability */
+static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
+{
+ if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_PM]))
+ return -ENOMEM;
+
+ /*
+ * We always virtualize the next field so we can remove
+ * capabilities from the chain if we want to.
+ */
+ p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+
+ /*
+ * Power management is defined *per function*,
+ * so we let the user write this
+ */
+ p_setd(perm, PCI_PM_CTRL, NO_VIRT, ALL_WRITE);
+ return 0;
+}
+
+/* Permissions for PCI-X capability */
+static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
+{
+ /* Alloc 24, but only 8 are used in v0 */
+ if (alloc_perm_bits(perm, PCI_CAP_PCIX_SIZEOF_V2))
+ return -ENOMEM;
+
+ p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+
+ p_setw(perm, PCI_X_CMD, NO_VIRT, (u16)ALL_WRITE);
+ p_setd(perm, PCI_X_ECC_CSR, NO_VIRT, ALL_WRITE);
+ return 0;
+}
+
+/* Permissions for PCI Express capability */
+static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
+{
+ /* Alloc larger of two possible sizes */
+ if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
+ return -ENOMEM;
+
+ p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+
+ /*
+ * Allow writes to device control fields (includes FLR!)
+ * but not to devctl_phantom which could confuse IOMMU
+ * or to the ARI bit in devctl2 which is set at probe time
+ */
+ p_setw(perm, PCI_EXP_DEVCTL, NO_VIRT, ~PCI_EXP_DEVCTL_PHANTOM);
+ p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
+ return 0;
+}
+
+/* Permissions for Advanced Function capability */
+static int __init init_pci_cap_af_perm(struct perm_bits *perm)
+{
+ if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
+ return -ENOMEM;
+
+ p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+ p_setb(perm, PCI_AF_CTRL, NO_VIRT, PCI_AF_CTRL_FLR);
+ return 0;
+}
+
+/* Permissions for Advanced Error Reporting extended capability */
+static int __init init_pci_ext_cap_err_perm(struct perm_bits *perm)
+{
+ u32 mask;
+
+ if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_ERR]))
+ return -ENOMEM;
+
+ /*
+ * Virtualize the first dword of all express capabilities
+ * because it includes the next pointer. This lets us later
+ * remove capabilities from the chain if we need to.
+ */
+ p_setd(perm, 0, ALL_VIRT, NO_WRITE);
+
+ /* Writable bits mask */
+ mask = PCI_ERR_UNC_TRAIN | /* Training */
+ PCI_ERR_UNC_DLP | /* Data Link Protocol */
+ PCI_ERR_UNC_SURPDN | /* Surprise Down */
+ PCI_ERR_UNC_POISON_TLP | /* Poisoned TLP */
+ PCI_ERR_UNC_FCP | /* Flow Control Protocol */
+ PCI_ERR_UNC_COMP_TIME | /* Completion Timeout */
+ PCI_ERR_UNC_COMP_ABORT | /* Completer Abort */
+ PCI_ERR_UNC_UNX_COMP | /* Unexpected Completion */
+ PCI_ERR_UNC_RX_OVER | /* Receiver Overflow */
+ PCI_ERR_UNC_MALF_TLP | /* Malformed TLP */
+ PCI_ERR_UNC_ECRC | /* ECRC Error Status */
+ PCI_ERR_UNC_UNSUP | /* Unsupported Request */
+ PCI_ERR_UNC_ACSV | /* ACS Violation */
+ PCI_ERR_UNC_INTN | /* internal error */
+ PCI_ERR_UNC_MCBTLP | /* MC blocked TLP */
+ PCI_ERR_UNC_ATOMEG | /* Atomic egress blocked */
+ PCI_ERR_UNC_TLPPRE; /* TLP prefix blocked */
+ p_setd(perm, PCI_ERR_UNCOR_STATUS, NO_VIRT, mask);
+ p_setd(perm, PCI_ERR_UNCOR_MASK, NO_VIRT, mask);
+ p_setd(perm, PCI_ERR_UNCOR_SEVER, NO_VIRT, mask);
+
+ mask = PCI_ERR_COR_RCVR | /* Receiver Error Status */
+ PCI_ERR_COR_BAD_TLP | /* Bad TLP Status */
+ PCI_ERR_COR_BAD_DLLP | /* Bad DLLP Status */
+ PCI_ERR_COR_REP_ROLL | /* REPLAY_NUM Rollover */
+ PCI_ERR_COR_REP_TIMER | /* Replay Timer Timeout */
+ PCI_ERR_COR_ADV_NFAT | /* Advisory Non-Fatal */
+ PCI_ERR_COR_INTERNAL | /* Corrected Internal */
+ PCI_ERR_COR_LOG_OVER; /* Header Log Overflow */
+ p_setd(perm, PCI_ERR_COR_STATUS, NO_VIRT, mask);
+ p_setd(perm, PCI_ERR_COR_MASK, NO_VIRT, mask);
+
+ mask = PCI_ERR_CAP_ECRC_GENE | /* ECRC Generation Enable */
+ PCI_ERR_CAP_ECRC_CHKE; /* ECRC Check Enable */
+ p_setd(perm, PCI_ERR_CAP, NO_VIRT, mask);
+ return 0;
+}
+
+/* Permissions for Power Budgeting extended capability */
+static int __init init_pci_ext_cap_pwr_perm(struct perm_bits *perm)
+{
+ if (alloc_perm_bits(perm, pci_ext_cap_length[PCI_EXT_CAP_ID_PWR]))
+ return -ENOMEM;
+
+ p_setd(perm, 0, ALL_VIRT, NO_WRITE);
+
+ /* Writing the data selector is OK, the info is still read-only */
+ p_setb(perm, PCI_PWR_DATA, NO_VIRT, (u8)ALL_WRITE);
+ return 0;
+}
+
+/*
+ * Initialize the shared permission tables
+ */
+void vfio_pci_uninit_perm_bits(void)
+{
+ free_perm_bits(&cap_perms[PCI_CAP_ID_BASIC]);
+
+ free_perm_bits(&cap_perms[PCI_CAP_ID_PM]);
+ free_perm_bits(&cap_perms[PCI_CAP_ID_PCIX]);
+ free_perm_bits(&cap_perms[PCI_CAP_ID_EXP]);
+ free_perm_bits(&cap_perms[PCI_CAP_ID_AF]);
+
+ free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
+ free_perm_bits(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
+}
+
+int __init vfio_pci_init_perm_bits(void)
+{
+ int ret;
+
+ /* Basic config space */
+ ret = init_pci_cap_basic_perm(&cap_perms[PCI_CAP_ID_BASIC]);
+
+ /* Capabilities */
+ ret |= init_pci_cap_pm_perm(&cap_perms[PCI_CAP_ID_PM]);
+ cap_perms[PCI_CAP_ID_VPD].writefn = vfio_direct_config_write;
+ ret |= init_pci_cap_pcix_perm(&cap_perms[PCI_CAP_ID_PCIX]);
+ cap_perms[PCI_CAP_ID_VNDR].writefn = vfio_direct_config_write;
+ ret |= init_pci_cap_exp_perm(&cap_perms[PCI_CAP_ID_EXP]);
+ ret |= init_pci_cap_af_perm(&cap_perms[PCI_CAP_ID_AF]);
+
+ /* Extended capabilities */
+ ret |= init_pci_ext_cap_err_perm(&ecap_perms[PCI_EXT_CAP_ID_ERR]);
+ ret |= init_pci_ext_cap_pwr_perm(&ecap_perms[PCI_EXT_CAP_ID_PWR]);
+ ecap_perms[PCI_EXT_CAP_ID_VNDR].writefn = vfio_direct_config_write;
+
+ if (ret)
+ vfio_pci_uninit_perm_bits();
+
+ return ret;
+}
+
+static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos)
+{
+ u8 cap;
+ int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE :
+ PCI_STD_HEADER_SIZEOF;
+ base /= 4;
+ pos /= 4;
+
+ cap = vdev->pci_config_map[pos];
+
+ if (cap == PCI_CAP_ID_BASIC)
+ return 0;
+
+ /* XXX Can we have to abutting capabilities of the same type? */
+ while (pos - 1 >= base && vdev->pci_config_map[pos - 1] == cap)
+ pos--;
+
+ return pos * 4;
+}
+
+static int vfio_msi_config_read(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 *val)
+{
+ /* Update max available queue size from msi_qmax */
+ if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) {
+ __le16 *flags;
+ int start;
+
+ start = vfio_find_cap_start(vdev, pos);
+
+ flags = (__le16 *)&vdev->vconfig[start];
+
+ *flags &= cpu_to_le16(~PCI_MSI_FLAGS_QMASK);
+ *flags |= cpu_to_le16(vdev->msi_qmax << 1);
+ }
+
+ return vfio_default_config_read(vdev, pos, count, perm, offset, val);
+}
+
+static int vfio_msi_config_write(struct vfio_pci_device *vdev, int pos,
+ int count, struct perm_bits *perm,
+ int offset, __le32 val)
+{
+ count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
+ if (count < 0)
+ return count;
+
+ /* Fixup and write configured queue size and enable to hardware */
+ if (offset <= PCI_MSI_FLAGS && offset + count >= PCI_MSI_FLAGS) {
+ __le16 *pflags;
+ u16 flags;
+ int start, ret;
+
+ start = vfio_find_cap_start(vdev, pos);
+
+ pflags = (__le16 *)&vdev->vconfig[start + PCI_MSI_FLAGS];
+
+ flags = le16_to_cpu(*pflags);
+
+ /* MSI is enabled via ioctl */
+ if (!is_msi(vdev))
+ flags &= ~PCI_MSI_FLAGS_ENABLE;
+
+ /* Check queue size */
+ if ((flags & PCI_MSI_FLAGS_QSIZE) >> 4 > vdev->msi_qmax) {
+ flags &= ~PCI_MSI_FLAGS_QSIZE;
+ flags |= vdev->msi_qmax << 4;
+ }
+
+ /* Write back to virt and to hardware */
+ *pflags = cpu_to_le16(flags);
+ ret = pci_user_write_config_word(vdev->pdev,
+ start + PCI_MSI_FLAGS,
+ flags);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+ }
+
+ return count;
+}
+
+/*
+ * MSI determination is per-device, so this routine gets used beyond
+ * initialization time. Don't add __init
+ */
+static int init_pci_cap_msi_perm(struct perm_bits *perm, int len, u16 flags)
+{
+ if (alloc_perm_bits(perm, len))
+ return -ENOMEM;
+
+ perm->readfn = vfio_msi_config_read;
+ perm->writefn = vfio_msi_config_write;
+
+ p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+
+ /*
+ * The upper byte of the control register is reserved,
+ * just setup the lower byte.
+ */
+ p_setb(perm, PCI_MSI_FLAGS, (u8)ALL_VIRT, (u8)ALL_WRITE);
+ p_setd(perm, PCI_MSI_ADDRESS_LO, ALL_VIRT, ALL_WRITE);
+ if (flags & PCI_MSI_FLAGS_64BIT) {
+ p_setd(perm, PCI_MSI_ADDRESS_HI, ALL_VIRT, ALL_WRITE);
+ p_setw(perm, PCI_MSI_DATA_64, (u16)ALL_VIRT, (u16)ALL_WRITE);
+ if (flags & PCI_MSI_FLAGS_MASKBIT) {
+ p_setd(perm, PCI_MSI_MASK_64, NO_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_MSI_PENDING_64, NO_VIRT, ALL_WRITE);
+ }
+ } else {
+ p_setw(perm, PCI_MSI_DATA_32, (u16)ALL_VIRT, (u16)ALL_WRITE);
+ if (flags & PCI_MSI_FLAGS_MASKBIT) {
+ p_setd(perm, PCI_MSI_MASK_32, NO_VIRT, ALL_WRITE);
+ p_setd(perm, PCI_MSI_PENDING_32, NO_VIRT, ALL_WRITE);
+ }
+ }
+ return 0;
+}
+
+/* Determine MSI CAP field length; initialize msi_perms on 1st call per vdev */
+static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int len, ret;
+ u16 flags;
+
+ ret = pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &flags);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ len = 10; /* Minimum size */
+ if (flags & PCI_MSI_FLAGS_64BIT)
+ len += 4;
+ if (flags & PCI_MSI_FLAGS_MASKBIT)
+ len += 10;
+
+ if (vdev->msi_perm)
+ return len;
+
+ vdev->msi_perm = kmalloc(sizeof(struct perm_bits), GFP_KERNEL);
+ if (!vdev->msi_perm)
+ return -ENOMEM;
+
+ ret = init_pci_cap_msi_perm(vdev->msi_perm, len, flags);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+/* Determine extended capability length for VC (2 & 9) and MFVC */
+static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u32 tmp;
+ int ret, evcc, phases, vc_arb;
+ int len = PCI_CAP_VC_BASE_SIZEOF;
+
+ ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG1, &tmp);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ evcc = tmp & PCI_VC_REG1_EVCC; /* extended vc count */
+ ret = pci_read_config_dword(pdev, pos + PCI_VC_PORT_REG2, &tmp);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ if (tmp & PCI_VC_REG2_128_PHASE)
+ phases = 128;
+ else if (tmp & PCI_VC_REG2_64_PHASE)
+ phases = 64;
+ else if (tmp & PCI_VC_REG2_32_PHASE)
+ phases = 32;
+ else
+ phases = 0;
+
+ vc_arb = phases * 4;
+
+ /*
+ * Port arbitration tables are root & switch only;
+ * function arbitration tables are function 0 only.
+ * In either case, we'll never let user write them so
+ * we don't care how big they are
+ */
+ len += (1 + evcc) * PCI_CAP_VC_PER_VC_SIZEOF;
+ if (vc_arb) {
+ len = round_up(len, 16);
+ len += vc_arb / 8;
+ }
+ return len;
+}
+
+static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u16 word;
+ u8 byte;
+ int ret;
+
+ switch (cap) {
+ case PCI_CAP_ID_MSI:
+ return vfio_msi_cap_len(vdev, pos);
+ case PCI_CAP_ID_PCIX:
+ ret = pci_read_config_word(pdev, pos + PCI_X_CMD, &word);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ if (PCI_X_CMD_VERSION(word)) {
+ vdev->extended_caps = true;
+ return PCI_CAP_PCIX_SIZEOF_V2;
+ } else
+ return PCI_CAP_PCIX_SIZEOF_V0;
+ case PCI_CAP_ID_VNDR:
+ /* length follows next field */
+ ret = pci_read_config_byte(pdev, pos + PCI_CAP_FLAGS, &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ return byte;
+ case PCI_CAP_ID_EXP:
+ /* length based on version */
+ ret = pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &word);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ if ((word & PCI_EXP_FLAGS_VERS) == 1)
+ return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
+ else {
+ vdev->extended_caps = true;
+ return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
+ }
+ case PCI_CAP_ID_HT:
+ ret = pci_read_config_byte(pdev, pos + 3, &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ return (byte & HT_3BIT_CAP_MASK) ?
+ HT_CAP_SIZEOF_SHORT : HT_CAP_SIZEOF_LONG;
+ case PCI_CAP_ID_SATA:
+ ret = pci_read_config_byte(pdev, pos + PCI_SATA_REGS, &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ byte &= PCI_SATA_REGS_MASK;
+ if (byte == PCI_SATA_REGS_INLINE)
+ return PCI_SATA_SIZEOF_LONG;
+ else
+ return PCI_SATA_SIZEOF_SHORT;
+ default:
+ pr_warn("%s: %s unknown length for pci cap 0x%x@0x%x\n",
+ dev_name(&pdev->dev), __func__, cap, pos);
+ }
+
+ return 0;
+}
+
+static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u8 byte;
+ u32 dword;
+ int ret;
+
+ switch (ecap) {
+ case PCI_EXT_CAP_ID_VNDR:
+ ret = pci_read_config_dword(pdev, epos + PCI_VSEC_HDR, &dword);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ return dword >> PCI_VSEC_HDR_LEN_SHIFT;
+ case PCI_EXT_CAP_ID_VC:
+ case PCI_EXT_CAP_ID_VC9:
+ case PCI_EXT_CAP_ID_MFVC:
+ return vfio_vc_cap_len(vdev, epos);
+ case PCI_EXT_CAP_ID_ACS:
+ ret = pci_read_config_byte(pdev, epos + PCI_ACS_CAP, &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ if (byte & PCI_ACS_EC) {
+ int bits;
+
+ ret = pci_read_config_byte(pdev,
+ epos + PCI_ACS_EGRESS_BITS,
+ &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ bits = byte ? round_up(byte, 32) : 256;
+ return 8 + (bits / 8);
+ }
+ return 8;
+
+ case PCI_EXT_CAP_ID_REBAR:
+ ret = pci_read_config_byte(pdev, epos + PCI_REBAR_CTRL, &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ byte &= PCI_REBAR_CTRL_NBAR_MASK;
+ byte >>= PCI_REBAR_CTRL_NBAR_SHIFT;
+
+ return 4 + (byte * 8);
+ case PCI_EXT_CAP_ID_DPA:
+ ret = pci_read_config_byte(pdev, epos + PCI_DPA_CAP, &byte);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ byte &= PCI_DPA_CAP_SUBSTATE_MASK;
+ byte = round_up(byte + 1, 4);
+ return PCI_DPA_BASE_SIZEOF + byte;
+ case PCI_EXT_CAP_ID_TPH:
+ ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword);
+ if (ret)
+ return pcibios_err_to_errno(ret);
+
+ if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) {
+ int sts;
+
+ sts = byte & PCI_TPH_CAP_ST_MASK;
+ sts >>= PCI_TPH_CAP_ST_SHIFT;
+ return PCI_TPH_BASE_SIZEOF + round_up(sts * 2, 4);
+ }
+ return PCI_TPH_BASE_SIZEOF;
+ default:
+ pr_warn("%s: %s unknown length for pci ecap 0x%x@0x%x\n",
+ dev_name(&pdev->dev), __func__, ecap, epos);
+ }
+
+ return 0;
+}
+
+static int vfio_fill_vconfig_bytes(struct vfio_pci_device *vdev,
+ int offset, int size)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int ret = 0;
+
+ /*
+ * We try to read physical config space in the largest chunks
+ * we can, assuming that all of the fields support dword access.
+ * pci_save_state() makes this same assumption and seems to do ok.
+ */
+ while (size) {
+ int filled;
+
+ if (size >= 4 && !(offset % 4)) {
+ __le32 *dwordp = (__le32 *)&vdev->vconfig[offset];
+ u32 dword;
+
+ ret = pci_read_config_dword(pdev, offset, &dword);
+ if (ret)
+ return ret;
+ *dwordp = cpu_to_le32(dword);
+ filled = 4;
+ } else if (size >= 2 && !(offset % 2)) {
+ __le16 *wordp = (__le16 *)&vdev->vconfig[offset];
+ u16 word;
+
+ ret = pci_read_config_word(pdev, offset, &word);
+ if (ret)
+ return ret;
+ *wordp = cpu_to_le16(word);
+ filled = 2;
+ } else {
+ u8 *byte = &vdev->vconfig[offset];
+ ret = pci_read_config_byte(pdev, offset, byte);
+ if (ret)
+ return ret;
+ filled = 1;
+ }
+
+ offset += filled;
+ size -= filled;
+ }
+
+ return ret;
+}
+
+static int vfio_cap_init(struct vfio_pci_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u8 *map = vdev->pci_config_map;
+ u16 status;
+ u8 pos, *prev, cap;
+ int loops, ret, caps = 0;
+
+ /* Any capabilities? */
+ ret = pci_read_config_word(pdev, PCI_STATUS, &status);
+ if (ret)
+ return ret;
+
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0; /* Done */
+
+ ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
+ if (ret)
+ return ret;
+
+ /* Mark the previous position in case we want to skip a capability */
+ prev = &vdev->vconfig[PCI_CAPABILITY_LIST];
+
+ /* We can bound our loop, capabilities are dword aligned */
+ loops = (PCI_CFG_SPACE_SIZE - PCI_STD_HEADER_SIZEOF) / PCI_CAP_SIZEOF;
+ while (pos && loops--) {
+ u8 next;
+ int i, len = 0;
+
+ ret = pci_read_config_byte(pdev, pos, &cap);
+ if (ret)
+ return ret;
+
+ ret = pci_read_config_byte(pdev,
+ pos + PCI_CAP_LIST_NEXT, &next);
+ if (ret)
+ return ret;
+
+ if (cap <= PCI_CAP_ID_MAX) {
+ len = pci_cap_length[cap];
+ if (len == 0xFF) { /* Variable length */
+ len = vfio_cap_len(vdev, cap, pos);
+ if (len < 0)
+ return len;
+ }
+ }
+
+ if (!len) {
+ pr_info("%s: %s hiding cap 0x%x\n",
+ __func__, dev_name(&pdev->dev), cap);
+ *prev = next;
+ pos = next;
+ continue;
+ }
+
+ /* Sanity check, do we overlap other capabilities? */
+ for (i = 0; i < len; i += 4) {
+ if (likely(map[(pos + i) / 4] == PCI_CAP_ID_INVALID))
+ continue;
+
+ pr_warn("%s: %s pci config conflict @0x%x, was cap 0x%x now cap 0x%x\n",
+ __func__, dev_name(&pdev->dev),
+ pos + i, map[pos + i], cap);
+ }
+
+ memset(map + (pos / 4), cap, len / 4);
+ ret = vfio_fill_vconfig_bytes(vdev, pos, len);
+ if (ret)
+ return ret;
+
+ prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT];
+ pos = next;
+ caps++;
+ }
+
+ /* If we didn't fill any capabilities, clear the status flag */
+ if (!caps) {
+ __le16 *vstatus = (__le16 *)&vdev->vconfig[PCI_STATUS];
+ *vstatus &= ~cpu_to_le16(PCI_STATUS_CAP_LIST);
+ }
+
+ return 0;
+}
+
+static int vfio_ecap_init(struct vfio_pci_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u8 *map = vdev->pci_config_map;
+ u16 epos;
+ __le32 *prev = NULL;
+ int loops, ret, ecaps = 0;
+
+ if (!vdev->extended_caps)
+ return 0;
+
+ epos = PCI_CFG_SPACE_SIZE;
+
+ loops = (pdev->cfg_size - PCI_CFG_SPACE_SIZE) / PCI_CAP_SIZEOF;
+
+ while (loops-- && epos >= PCI_CFG_SPACE_SIZE) {
+ u32 header;
+ u16 ecap;
+ int i, len = 0;
+ bool hidden = false;
+
+ ret = pci_read_config_dword(pdev, epos, &header);
+ if (ret)
+ return ret;
+
+ ecap = PCI_EXT_CAP_ID(header);
+
+ if (ecap <= PCI_EXT_CAP_ID_MAX) {
+ len = pci_ext_cap_length[ecap];
+ if (len == 0xFF) {
+ len = vfio_ext_cap_len(vdev, ecap, epos);
+ if (len < 0)
+ return ret;
+ }
+ }
+
+ if (!len) {
+ pr_info("%s: %s hiding ecap 0x%x@0x%x\n",
+ __func__, dev_name(&pdev->dev), ecap, epos);
+
+ /* If not the first in the chain, we can skip over it */
+ if (prev) {
+ u32 val = epos = PCI_EXT_CAP_NEXT(header);
+ *prev &= cpu_to_le32(~(0xffcU << 20));
+ *prev |= cpu_to_le32(val << 20);
+ continue;
+ }
+
+ /*
+ * Otherwise, fill in a placeholder, the direct
+ * readfn will virtualize this automatically
+ */
+ len = PCI_CAP_SIZEOF;
+ hidden = true;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ if (likely(map[(epos + i) / 4] == PCI_CAP_ID_INVALID))
+ continue;
+
+ pr_warn("%s: %s pci config conflict @0x%x, was ecap 0x%x now ecap 0x%x\n",
+ __func__, dev_name(&pdev->dev),
+ epos + i, map[epos + i], ecap);
+ }
+
+ /*
+ * Even though ecap is 2 bytes, we're currently a long way
+ * from exceeding 1 byte capabilities. If we ever make it
+ * up to 0xFF we'll need to up this to a two-byte, byte map.
+ */
+ BUILD_BUG_ON(PCI_EXT_CAP_ID_MAX >= PCI_CAP_ID_INVALID);
+
+ memset(map + (epos / 4), ecap, len / 4);
+ ret = vfio_fill_vconfig_bytes(vdev, epos, len);
+ if (ret)
+ return ret;
+
+ /*
+ * If we're just using this capability to anchor the list,
+ * hide the real ID. Only count real ecaps. XXX PCI spec
+ * indicates to use cap id = 0, version = 0, next = 0 if
+ * ecaps are absent, hope users check all the way to next.
+ */
+ if (hidden)
+ *(__le32 *)&vdev->vconfig[epos] &=
+ cpu_to_le32((0xffcU << 20));
+ else
+ ecaps++;
+
+ prev = (__le32 *)&vdev->vconfig[epos];
+ epos = PCI_EXT_CAP_NEXT(header);
+ }
+
+ if (!ecaps)
+ *(u32 *)&vdev->vconfig[PCI_CFG_SPACE_SIZE] = 0;
+
+ return 0;
+}
+
+/*
+ * For each device we allocate a pci_config_map that indicates the
+ * capability occupying each dword and thus the struct perm_bits we
+ * use for read and write. We also allocate a virtualized config
+ * space which tracks reads and writes to bits that we emulate for
+ * the user. Initial values filled from device.
+ *
+ * Using shared stuct perm_bits between all vfio-pci devices saves
+ * us from allocating cfg_size buffers for virt and write for every
+ * device. We could remove vconfig and allocate individual buffers
+ * for each area requring emulated bits, but the array of pointers
+ * would be comparable in size (at least for standard config space).
+ */
+int vfio_config_init(struct vfio_pci_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ u8 *map, *vconfig;
+ int ret;
+
+ /*
+ * Config space, caps and ecaps are all dword aligned, so we can
+ * use one byte per dword to record the type.
+ */
+ map = kmalloc(pdev->cfg_size / 4, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ vconfig = kmalloc(pdev->cfg_size, GFP_KERNEL);
+ if (!vconfig) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ vdev->pci_config_map = map;
+ vdev->vconfig = vconfig;
+
+ memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF / 4);
+ memset(map + (PCI_STD_HEADER_SIZEOF / 4), PCI_CAP_ID_INVALID,
+ (pdev->cfg_size - PCI_STD_HEADER_SIZEOF) / 4);
+
+ ret = vfio_fill_vconfig_bytes(vdev, 0, PCI_STD_HEADER_SIZEOF);
+ if (ret)
+ goto out;
+
+ vdev->bardirty = true;
+
+ /*
+ * XXX can we just pci_load_saved_state/pci_restore_state?
+ * may need to rebuild vconfig after that
+ */
+
+ /* For restore after reset */
+ vdev->rbar[0] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_0]);
+ vdev->rbar[1] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_1]);
+ vdev->rbar[2] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_2]);
+ vdev->rbar[3] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_3]);
+ vdev->rbar[4] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_4]);
+ vdev->rbar[5] = le32_to_cpu(*(__le32 *)&vconfig[PCI_BASE_ADDRESS_5]);
+ vdev->rbar[6] = le32_to_cpu(*(__le32 *)&vconfig[PCI_ROM_ADDRESS]);
+
+ if (pdev->is_virtfn) {
+ *(__le16 *)&vconfig[PCI_VENDOR_ID] = cpu_to_le16(pdev->vendor);
+ *(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
+ }
+
+ ret = vfio_cap_init(vdev);
+ if (ret)
+ goto out;
+
+ ret = vfio_ecap_init(vdev);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ kfree(map);
+ vdev->pci_config_map = NULL;
+ kfree(vconfig);
+ vdev->vconfig = NULL;
+ return pcibios_err_to_errno(ret);
+}
+
+void vfio_config_free(struct vfio_pci_device *vdev)
+{
+ kfree(vdev->vconfig);
+ vdev->vconfig = NULL;
+ kfree(vdev->pci_config_map);
+ vdev->pci_config_map = NULL;
+ kfree(vdev->msi_perm);
+ vdev->msi_perm = NULL;
+}
+
+static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ struct perm_bits *perm;
+ __le32 val = 0;
+ int cap_start = 0, offset;
+ u8 cap_id;
+ ssize_t ret = count;
+
+ if (*ppos < 0 || *ppos + count > pdev->cfg_size)
+ return -EFAULT;
+
+ /*
+ * gcc can't seem to figure out we're a static function, only called
+ * with count of 1/2/4 and hits copy_from_user_overflow without this.
+ */
+ if (count > sizeof(val))
+ return -EINVAL;
+
+ cap_id = vdev->pci_config_map[*ppos / 4];
+
+ if (cap_id == PCI_CAP_ID_INVALID) {
+ if (iswrite)
+ return ret; /* drop */
+
+ /*
+ * Per PCI spec 3.0, section 6.1, reads from reserved and
+ * unimplemented registers return 0
+ */
+ if (copy_to_user(buf, &val, count))
+ return -EFAULT;
+
+ return ret;
+ }
+
+ /*
+ * All capabilities are minimum 4 bytes and aligned on dword
+ * boundaries. Since we don't support unaligned accesses, we're
+ * only ever accessing a single capability.
+ */
+ if (*ppos >= PCI_CFG_SPACE_SIZE) {
+ WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX);
+
+ perm = &ecap_perms[cap_id];
+ cap_start = vfio_find_cap_start(vdev, *ppos);
+
+ } else {
+ WARN_ON(cap_id > PCI_CAP_ID_MAX);
+
+ perm = &cap_perms[cap_id];
+
+ if (cap_id == PCI_CAP_ID_MSI)
+ perm = vdev->msi_perm;
+
+ if (cap_id > PCI_CAP_ID_BASIC)
+ cap_start = vfio_find_cap_start(vdev, *ppos);
+ }
+
+ WARN_ON(!cap_start && cap_id != PCI_CAP_ID_BASIC);
+ WARN_ON(cap_start > *ppos);
+
+ offset = *ppos - cap_start;
+
+ if (iswrite) {
+ if (!perm->writefn)
+ return ret;
+
+ if (copy_from_user(&val, buf, count))
+ return -EFAULT;
+
+ ret = perm->writefn(vdev, *ppos, count, perm, offset, val);
+ } else {
+ if (perm->readfn) {
+ ret = perm->readfn(vdev, *ppos, count,
+ perm, offset, &val);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (copy_to_user(buf, &val, count))
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+ssize_t vfio_pci_config_readwrite(struct vfio_pci_device *vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos, bool iswrite)
+{
+ size_t done = 0;
+ int ret = 0;
+ loff_t pos = *ppos;
+
+ pos &= VFIO_PCI_OFFSET_MASK;
+
+ /*
+ * We want to both keep the access size the caller users as well as
+ * support reading large chunks of config space in a single call.
+ * PCI doesn't support unaligned accesses, so we can safely break
+ * those apart.
+ */
+ while (count) {
+ if (count >= 4 && !(pos % 4))
+ ret = vfio_config_do_rw(vdev, buf, 4, &pos, iswrite);
+ else if (count >= 2 && !(pos % 2))
+ ret = vfio_config_do_rw(vdev, buf, 2, &pos, iswrite);
+ else
+ ret = vfio_config_do_rw(vdev, buf, 1, &pos, iswrite);
+
+ if (ret < 0)
+ return ret;
+
+ count -= ret;
+ done += ret;
+ buf += ret;
+ pos += ret;
+ }
+
+ *ppos += done;
+
+ return done;
+}
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
new file mode 100644
index 000000000000..211a4920b88a
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -0,0 +1,740 @@
+/*
+ * VFIO PCI interrupt handling
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/eventfd.h>
+#include <linux/pci.h>
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/vfio.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include "vfio_pci_private.h"
+
+/*
+ * IRQfd - generic
+ */
+struct virqfd {
+ struct vfio_pci_device *vdev;
+ struct eventfd_ctx *eventfd;
+ int (*handler)(struct vfio_pci_device *, void *);
+ void (*thread)(struct vfio_pci_device *, void *);
+ void *data;
+ struct work_struct inject;
+ wait_queue_t wait;
+ poll_table pt;
+ struct work_struct shutdown;
+ struct virqfd **pvirqfd;
+};
+
+static struct workqueue_struct *vfio_irqfd_cleanup_wq;
+
+int __init vfio_pci_virqfd_init(void)
+{
+ vfio_irqfd_cleanup_wq =
+ create_singlethread_workqueue("vfio-irqfd-cleanup");
+ if (!vfio_irqfd_cleanup_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void vfio_pci_virqfd_exit(void)
+{
+ destroy_workqueue(vfio_irqfd_cleanup_wq);
+}
+
+static void virqfd_deactivate(struct virqfd *virqfd)
+{
+ queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
+}
+
+static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
+{
+ struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
+ unsigned long flags = (unsigned long)key;
+
+ if (flags & POLLIN) {
+ /* An event has been signaled, call function */
+ if ((!virqfd->handler ||
+ virqfd->handler(virqfd->vdev, virqfd->data)) &&
+ virqfd->thread)
+ schedule_work(&virqfd->inject);
+ }
+
+ if (flags & POLLHUP)
+ /* The eventfd is closing, detach from VFIO */
+ virqfd_deactivate(virqfd);
+
+ return 0;
+}
+
+static void virqfd_ptable_queue_proc(struct file *file,
+ wait_queue_head_t *wqh, poll_table *pt)
+{
+ struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
+ add_wait_queue(wqh, &virqfd->wait);
+}
+
+static void virqfd_shutdown(struct work_struct *work)
+{
+ struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
+ struct virqfd **pvirqfd = virqfd->pvirqfd;
+ u64 cnt;
+
+ eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
+ flush_work(&virqfd->inject);
+ eventfd_ctx_put(virqfd->eventfd);
+
+ kfree(virqfd);
+ *pvirqfd = NULL;
+}
+
+static void virqfd_inject(struct work_struct *work)
+{
+ struct virqfd *virqfd = container_of(work, struct virqfd, inject);
+ if (virqfd->thread)
+ virqfd->thread(virqfd->vdev, virqfd->data);
+}
+
+static int virqfd_enable(struct vfio_pci_device *vdev,
+ int (*handler)(struct vfio_pci_device *, void *),
+ void (*thread)(struct vfio_pci_device *, void *),
+ void *data, struct virqfd **pvirqfd, int fd)
+{
+ struct file *file = NULL;
+ struct eventfd_ctx *ctx = NULL;
+ struct virqfd *virqfd;
+ int ret = 0;
+ unsigned int events;
+
+ if (*pvirqfd)
+ return -EBUSY;
+
+ virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
+ if (!virqfd)
+ return -ENOMEM;
+
+ virqfd->pvirqfd = pvirqfd;
+ *pvirqfd = virqfd;
+ virqfd->vdev = vdev;
+ virqfd->handler = handler;
+ virqfd->thread = thread;
+ virqfd->data = data;
+
+ INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
+ INIT_WORK(&virqfd->inject, virqfd_inject);
+
+ file = eventfd_fget(fd);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto fail;
+ }
+
+ ctx = eventfd_ctx_fileget(file);
+ if (IS_ERR(ctx)) {
+ ret = PTR_ERR(ctx);
+ goto fail;
+ }
+
+ virqfd->eventfd = ctx;
+
+ /*
+ * Install our own custom wake-up handling so we are notified via
+ * a callback whenever someone signals the underlying eventfd.
+ */
+ init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
+ init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
+
+ events = file->f_op->poll(file, &virqfd->pt);
+
+ /*
+ * Check if there was an event already pending on the eventfd
+ * before we registered and trigger it as if we didn't miss it.
+ */
+ if (events & POLLIN) {
+ if ((!handler || handler(vdev, data)) && thread)
+ schedule_work(&virqfd->inject);
+ }
+
+ /*
+ * Do not drop the file until the irqfd is fully initialized,
+ * otherwise we might race against the POLLHUP.
+ */
+ fput(file);
+
+ return 0;
+
+fail:
+ if (ctx && !IS_ERR(ctx))
+ eventfd_ctx_put(ctx);
+
+ if (file && !IS_ERR(file))
+ fput(file);
+
+ kfree(virqfd);
+ *pvirqfd = NULL;
+
+ return ret;
+}
+
+static void virqfd_disable(struct virqfd *virqfd)
+{
+ if (!virqfd)
+ return;
+
+ virqfd_deactivate(virqfd);
+
+ /* Block until we know all outstanding shutdown jobs have completed. */
+ flush_workqueue(vfio_irqfd_cleanup_wq);
+}
+
+/*
+ * INTx
+ */
+static void vfio_send_intx_eventfd(struct vfio_pci_device *vdev, void *unused)
+{
+ if (likely(is_intx(vdev) && !vdev->virq_disabled))
+ eventfd_signal(vdev->ctx[0].trigger, 1);
+}
+
+void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vdev->irqlock, flags);
+
+ /*
+ * Masking can come from interrupt, ioctl, or config space
+ * via INTx disable. The latter means this can get called
+ * even when not using intx delivery. In this case, just
+ * try to have the physical bit follow the virtual bit.
+ */
+ if (unlikely(!is_intx(vdev))) {
+ if (vdev->pci_2_3)
+ pci_intx(pdev, 0);
+ } else if (!vdev->ctx[0].masked) {
+ /*
+ * Can't use check_and_mask here because we always want to
+ * mask, not just when something is pending.
+ */
+ if (vdev->pci_2_3)
+ pci_intx(pdev, 0);
+ else
+ disable_irq_nosync(pdev->irq);
+
+ vdev->ctx[0].masked = true;
+ }
+
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+}
+
+/*
+ * If this is triggered by an eventfd, we can't call eventfd_signal
+ * or else we'll deadlock on the eventfd wait queue. Return >0 when
+ * a signal is necessary, which can then be handled via a work queue
+ * or directly depending on the caller.
+ */
+int vfio_pci_intx_unmask_handler(struct vfio_pci_device *vdev, void *unused)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&vdev->irqlock, flags);
+
+ /*
+ * Unmasking comes from ioctl or config, so again, have the
+ * physical bit follow the virtual even when not using INTx.
+ */
+ if (unlikely(!is_intx(vdev))) {
+ if (vdev->pci_2_3)
+ pci_intx(pdev, 1);
+ } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
+ /*
+ * A pending interrupt here would immediately trigger,
+ * but we can avoid that overhead by just re-sending
+ * the interrupt to the user.
+ */
+ if (vdev->pci_2_3) {
+ if (!pci_check_and_unmask_intx(pdev))
+ ret = 1;
+ } else
+ enable_irq(pdev->irq);
+
+ vdev->ctx[0].masked = (ret > 0);
+ }
+
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+
+ return ret;
+}
+
+void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
+{
+ if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
+ vfio_send_intx_eventfd(vdev, NULL);
+}
+
+static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
+{
+ struct vfio_pci_device *vdev = dev_id;
+ unsigned long flags;
+ int ret = IRQ_NONE;
+
+ spin_lock_irqsave(&vdev->irqlock, flags);
+
+ if (!vdev->pci_2_3) {
+ disable_irq_nosync(vdev->pdev->irq);
+ vdev->ctx[0].masked = true;
+ ret = IRQ_HANDLED;
+ } else if (!vdev->ctx[0].masked && /* may be shared */
+ pci_check_and_mask_intx(vdev->pdev)) {
+ vdev->ctx[0].masked = true;
+ ret = IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+
+ if (ret == IRQ_HANDLED)
+ vfio_send_intx_eventfd(vdev, NULL);
+
+ return ret;
+}
+
+static int vfio_intx_enable(struct vfio_pci_device *vdev)
+{
+ if (!is_irq_none(vdev))
+ return -EINVAL;
+
+ if (!vdev->pdev->irq)
+ return -ENODEV;
+
+ vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ if (!vdev->ctx)
+ return -ENOMEM;
+
+ vdev->num_ctx = 1;
+ vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
+
+ return 0;
+}
+
+static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ unsigned long irqflags = IRQF_SHARED;
+ struct eventfd_ctx *trigger;
+ unsigned long flags;
+ int ret;
+
+ if (vdev->ctx[0].trigger) {
+ free_irq(pdev->irq, vdev);
+ kfree(vdev->ctx[0].name);
+ eventfd_ctx_put(vdev->ctx[0].trigger);
+ vdev->ctx[0].trigger = NULL;
+ }
+
+ if (fd < 0) /* Disable only */
+ return 0;
+
+ vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
+ pci_name(pdev));
+ if (!vdev->ctx[0].name)
+ return -ENOMEM;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ kfree(vdev->ctx[0].name);
+ return PTR_ERR(trigger);
+ }
+
+ if (!vdev->pci_2_3)
+ irqflags = 0;
+
+ ret = request_irq(pdev->irq, vfio_intx_handler,
+ irqflags, vdev->ctx[0].name, vdev);
+ if (ret) {
+ kfree(vdev->ctx[0].name);
+ eventfd_ctx_put(trigger);
+ return ret;
+ }
+
+ vdev->ctx[0].trigger = trigger;
+
+ /*
+ * INTx disable will stick across the new irq setup,
+ * disable_irq won't.
+ */
+ spin_lock_irqsave(&vdev->irqlock, flags);
+ if (!vdev->pci_2_3 && (vdev->ctx[0].masked || vdev->virq_disabled))
+ disable_irq_nosync(pdev->irq);
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+
+ return 0;
+}
+
+static void vfio_intx_disable(struct vfio_pci_device *vdev)
+{
+ vfio_intx_set_signal(vdev, -1);
+ virqfd_disable(vdev->ctx[0].unmask);
+ virqfd_disable(vdev->ctx[0].mask);
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ vdev->num_ctx = 0;
+ kfree(vdev->ctx);
+}
+
+/*
+ * MSI/MSI-X
+ */
+static irqreturn_t vfio_msihandler(int irq, void *arg)
+{
+ struct eventfd_ctx *trigger = arg;
+
+ eventfd_signal(trigger, 1);
+ return IRQ_HANDLED;
+}
+
+static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int ret;
+
+ if (!is_irq_none(vdev))
+ return -EINVAL;
+
+ vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ if (!vdev->ctx)
+ return -ENOMEM;
+
+ if (msix) {
+ int i;
+
+ vdev->msix = kzalloc(nvec * sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!vdev->msix) {
+ kfree(vdev->ctx);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nvec; i++)
+ vdev->msix[i].entry = i;
+
+ ret = pci_enable_msix(pdev, vdev->msix, nvec);
+ if (ret) {
+ kfree(vdev->msix);
+ kfree(vdev->ctx);
+ return ret;
+ }
+ } else {
+ ret = pci_enable_msi_block(pdev, nvec);
+ if (ret) {
+ kfree(vdev->ctx);
+ return ret;
+ }
+ }
+
+ vdev->num_ctx = nvec;
+ vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
+ VFIO_PCI_MSI_IRQ_INDEX;
+
+ if (!msix) {
+ /*
+ * Compute the virtual hardware field for max msi vectors -
+ * it is the log base 2 of the number of vectors.
+ */
+ vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
+ }
+
+ return 0;
+}
+
+static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
+ int vector, int fd, bool msix)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int irq = msix ? vdev->msix[vector].vector : pdev->irq + vector;
+ char *name = msix ? "vfio-msix" : "vfio-msi";
+ struct eventfd_ctx *trigger;
+ int ret;
+
+ if (vector >= vdev->num_ctx)
+ return -EINVAL;
+
+ if (vdev->ctx[vector].trigger) {
+ free_irq(irq, vdev->ctx[vector].trigger);
+ kfree(vdev->ctx[vector].name);
+ eventfd_ctx_put(vdev->ctx[vector].trigger);
+ vdev->ctx[vector].trigger = NULL;
+ }
+
+ if (fd < 0)
+ return 0;
+
+ vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "%s[%d](%s)",
+ name, vector, pci_name(pdev));
+ if (!vdev->ctx[vector].name)
+ return -ENOMEM;
+
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger)) {
+ kfree(vdev->ctx[vector].name);
+ return PTR_ERR(trigger);
+ }
+
+ ret = request_irq(irq, vfio_msihandler, 0,
+ vdev->ctx[vector].name, trigger);
+ if (ret) {
+ kfree(vdev->ctx[vector].name);
+ eventfd_ctx_put(trigger);
+ return ret;
+ }
+
+ vdev->ctx[vector].trigger = trigger;
+
+ return 0;
+}
+
+static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
+ unsigned count, int32_t *fds, bool msix)
+{
+ int i, j, ret = 0;
+
+ if (start + count > vdev->num_ctx)
+ return -EINVAL;
+
+ for (i = 0, j = start; i < count && !ret; i++, j++) {
+ int fd = fds ? fds[i] : -1;
+ ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
+ }
+
+ if (ret) {
+ for (--j; j >= start; j--)
+ vfio_msi_set_vector_signal(vdev, j, -1, msix);
+ }
+
+ return ret;
+}
+
+static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ int i;
+
+ vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+
+ for (i = 0; i < vdev->num_ctx; i++) {
+ virqfd_disable(vdev->ctx[i].unmask);
+ virqfd_disable(vdev->ctx[i].mask);
+ }
+
+ if (msix) {
+ pci_disable_msix(vdev->pdev);
+ kfree(vdev->msix);
+ } else
+ pci_disable_msi(pdev);
+
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ vdev->num_ctx = 0;
+ kfree(vdev->ctx);
+}
+
+/*
+ * IOCTL support
+ */
+static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ if (!is_intx(vdev) || start != 0 || count != 1)
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ vfio_pci_intx_unmask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t unmask = *(uint8_t *)data;
+ if (unmask)
+ vfio_pci_intx_unmask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd = *(int32_t *)data;
+ if (fd >= 0)
+ return virqfd_enable(vdev, vfio_pci_intx_unmask_handler,
+ vfio_send_intx_eventfd, NULL,
+ &vdev->ctx[0].unmask, fd);
+
+ virqfd_disable(vdev->ctx[0].unmask);
+ }
+
+ return 0;
+}
+
+static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ if (!is_intx(vdev) || start != 0 || count != 1)
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ vfio_pci_intx_mask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t mask = *(uint8_t *)data;
+ if (mask)
+ vfio_pci_intx_mask(vdev);
+ } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ return -ENOTTY; /* XXX implement me */
+ }
+
+ return 0;
+}
+
+static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
+ vfio_intx_disable(vdev);
+ return 0;
+ }
+
+ if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t fd = *(int32_t *)data;
+ int ret;
+
+ if (is_intx(vdev))
+ return vfio_intx_set_signal(vdev, fd);
+
+ ret = vfio_intx_enable(vdev);
+ if (ret)
+ return ret;
+
+ ret = vfio_intx_set_signal(vdev, fd);
+ if (ret)
+ vfio_intx_disable(vdev);
+
+ return ret;
+ }
+
+ if (!is_intx(vdev))
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ vfio_send_intx_eventfd(vdev, NULL);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t trigger = *(uint8_t *)data;
+ if (trigger)
+ vfio_send_intx_eventfd(vdev, NULL);
+ }
+ return 0;
+}
+
+static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
+ unsigned index, unsigned start,
+ unsigned count, uint32_t flags, void *data)
+{
+ int i;
+ bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false;
+
+ if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
+ vfio_msi_disable(vdev, msix);
+ return 0;
+ }
+
+ if (!(irq_is(vdev, index) || is_irq_none(vdev)))
+ return -EINVAL;
+
+ if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ int32_t *fds = data;
+ int ret;
+
+ if (vdev->irq_type == index)
+ return vfio_msi_set_block(vdev, start, count,
+ fds, msix);
+
+ ret = vfio_msi_enable(vdev, start + count, msix);
+ if (ret)
+ return ret;
+
+ ret = vfio_msi_set_block(vdev, start, count, fds, msix);
+ if (ret)
+ vfio_msi_disable(vdev, msix);
+
+ return ret;
+ }
+
+ if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
+ return -EINVAL;
+
+ for (i = start; i < start + count; i++) {
+ if (!vdev->ctx[i].trigger)
+ continue;
+ if (flags & VFIO_IRQ_SET_DATA_NONE) {
+ eventfd_signal(vdev->ctx[i].trigger, 1);
+ } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
+ uint8_t *bools = data;
+ if (bools[i - start])
+ eventfd_signal(vdev->ctx[i].trigger, 1);
+ }
+ }
+ return 0;
+}
+
+int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
+ unsigned index, unsigned start, unsigned count,
+ void *data)
+{
+ int (*func)(struct vfio_pci_device *vdev, unsigned index,
+ unsigned start, unsigned count, uint32_t flags,
+ void *data) = NULL;
+
+ switch (index) {
+ case VFIO_PCI_INTX_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ func = vfio_pci_set_intx_mask;
+ break;
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ func = vfio_pci_set_intx_unmask;
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = vfio_pci_set_intx_trigger;
+ break;
+ }
+ break;
+ case VFIO_PCI_MSI_IRQ_INDEX:
+ case VFIO_PCI_MSIX_IRQ_INDEX:
+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+ case VFIO_IRQ_SET_ACTION_MASK:
+ case VFIO_IRQ_SET_ACTION_UNMASK:
+ /* XXX Need masking support exported */
+ break;
+ case VFIO_IRQ_SET_ACTION_TRIGGER:
+ func = vfio_pci_set_msi_trigger;
+ break;
+ }
+ break;
+ }
+
+ if (!func)
+ return -ENOTTY;
+
+ return func(vdev, index, start, count, flags, data);
+}
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
new file mode 100644
index 000000000000..611827cba8cd
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+#ifndef VFIO_PCI_PRIVATE_H
+#define VFIO_PCI_PRIVATE_H
+
+#define VFIO_PCI_OFFSET_SHIFT 40
+
+#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
+#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+
+struct vfio_pci_irq_ctx {
+ struct eventfd_ctx *trigger;
+ struct virqfd *unmask;
+ struct virqfd *mask;
+ char *name;
+ bool masked;
+};
+
+struct vfio_pci_device {
+ struct pci_dev *pdev;
+ void __iomem *barmap[PCI_STD_RESOURCE_END + 1];
+ u8 *pci_config_map;
+ u8 *vconfig;
+ struct perm_bits *msi_perm;
+ spinlock_t irqlock;
+ struct mutex igate;
+ struct msix_entry *msix;
+ struct vfio_pci_irq_ctx *ctx;
+ int num_ctx;
+ int irq_type;
+ u8 msi_qmax;
+ u8 msix_bar;
+ u16 msix_size;
+ u32 msix_offset;
+ u32 rbar[7];
+ bool pci_2_3;
+ bool virq_disabled;
+ bool reset_works;
+ bool extended_caps;
+ bool bardirty;
+ struct pci_saved_state *pci_saved_state;
+ atomic_t refcnt;
+};
+
+#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
+#define is_msi(vdev) (vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX)
+#define is_msix(vdev) (vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX)
+#define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev)))
+#define irq_is(vdev, type) (vdev->irq_type == type)
+
+extern void vfio_pci_intx_mask(struct vfio_pci_device *vdev);
+extern void vfio_pci_intx_unmask(struct vfio_pci_device *vdev);
+
+extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev,
+ uint32_t flags, unsigned index,
+ unsigned start, unsigned count, void *data);
+
+extern ssize_t vfio_pci_config_readwrite(struct vfio_pci_device *vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos, bool iswrite);
+extern ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos, bool iswrite);
+extern ssize_t vfio_pci_io_readwrite(struct vfio_pci_device *vdev,
+ char __user *buf, size_t count,
+ loff_t *ppos, bool iswrite);
+
+extern int vfio_pci_init_perm_bits(void);
+extern void vfio_pci_uninit_perm_bits(void);
+
+extern int vfio_pci_virqfd_init(void);
+extern void vfio_pci_virqfd_exit(void);
+
+extern int vfio_config_init(struct vfio_pci_device *vdev);
+extern void vfio_config_free(struct vfio_pci_device *vdev);
+#endif /* VFIO_PCI_PRIVATE_H */
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
new file mode 100644
index 000000000000..4362d9e7baa3
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -0,0 +1,269 @@
+/*
+ * VFIO PCI I/O Port & MMIO access
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "vfio_pci_private.h"
+
+/* I/O Port BAR access */
+ssize_t vfio_pci_io_readwrite(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ void __iomem *io;
+ size_t done = 0;
+
+ if (!pci_resource_start(pdev, bar))
+ return -EINVAL;
+
+ if (pos + count > pci_resource_len(pdev, bar))
+ return -EINVAL;
+
+ if (!vdev->barmap[bar]) {
+ int ret;
+
+ ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
+ if (ret)
+ return ret;
+
+ vdev->barmap[bar] = pci_iomap(pdev, bar, 0);
+
+ if (!vdev->barmap[bar]) {
+ pci_release_selected_regions(pdev, 1 << bar);
+ return -EINVAL;
+ }
+ }
+
+ io = vdev->barmap[bar];
+
+ while (count) {
+ int filled;
+
+ if (count >= 3 && !(pos % 4)) {
+ __le32 val;
+
+ if (iswrite) {
+ if (copy_from_user(&val, buf, 4))
+ return -EFAULT;
+
+ iowrite32(le32_to_cpu(val), io + pos);
+ } else {
+ val = cpu_to_le32(ioread32(io + pos));
+
+ if (copy_to_user(buf, &val, 4))
+ return -EFAULT;
+ }
+
+ filled = 4;
+
+ } else if ((pos % 2) == 0 && count >= 2) {
+ __le16 val;
+
+ if (iswrite) {
+ if (copy_from_user(&val, buf, 2))
+ return -EFAULT;
+
+ iowrite16(le16_to_cpu(val), io + pos);
+ } else {
+ val = cpu_to_le16(ioread16(io + pos));
+
+ if (copy_to_user(buf, &val, 2))
+ return -EFAULT;
+ }
+
+ filled = 2;
+ } else {
+ u8 val;
+
+ if (iswrite) {
+ if (copy_from_user(&val, buf, 1))
+ return -EFAULT;
+
+ iowrite8(val, io + pos);
+ } else {
+ val = ioread8(io + pos);
+
+ if (copy_to_user(buf, &val, 1))
+ return -EFAULT;
+ }
+
+ filled = 1;
+ }
+
+ count -= filled;
+ done += filled;
+ buf += filled;
+ pos += filled;
+ }
+
+ *ppos += done;
+
+ return done;
+}
+
+/*
+ * MMIO BAR access
+ * We handle two excluded ranges here as well, if the user tries to read
+ * the ROM beyond what PCI tells us is available or the MSI-X table region,
+ * we return 0xFF and writes are dropped.
+ */
+ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ struct pci_dev *pdev = vdev->pdev;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ void __iomem *io;
+ resource_size_t end;
+ size_t done = 0;
+ size_t x_start = 0, x_end = 0; /* excluded range */
+
+ if (!pci_resource_start(pdev, bar))
+ return -EINVAL;
+
+ end = pci_resource_len(pdev, bar);
+
+ if (pos > end)
+ return -EINVAL;
+
+ if (pos == end)
+ return 0;
+
+ if (pos + count > end)
+ count = end - pos;
+
+ if (bar == PCI_ROM_RESOURCE) {
+ io = pci_map_rom(pdev, &x_start);
+ x_end = end;
+ } else {
+ if (!vdev->barmap[bar]) {
+ int ret;
+
+ ret = pci_request_selected_regions(pdev, 1 << bar,
+ "vfio");
+ if (ret)
+ return ret;
+
+ vdev->barmap[bar] = pci_iomap(pdev, bar, 0);
+
+ if (!vdev->barmap[bar]) {
+ pci_release_selected_regions(pdev, 1 << bar);
+ return -EINVAL;
+ }
+ }
+
+ io = vdev->barmap[bar];
+
+ if (bar == vdev->msix_bar) {
+ x_start = vdev->msix_offset;
+ x_end = vdev->msix_offset + vdev->msix_size;
+ }
+ }
+
+ if (!io)
+ return -EINVAL;
+
+ while (count) {
+ size_t fillable, filled;
+
+ if (pos < x_start)
+ fillable = x_start - pos;
+ else if (pos >= x_end)
+ fillable = end - pos;
+ else
+ fillable = 0;
+
+ if (fillable >= 4 && !(pos % 4) && (count >= 4)) {
+ __le32 val;
+
+ if (iswrite) {
+ if (copy_from_user(&val, buf, 4))
+ goto out;
+
+ iowrite32(le32_to_cpu(val), io + pos);
+ } else {
+ val = cpu_to_le32(ioread32(io + pos));
+
+ if (copy_to_user(buf, &val, 4))
+ goto out;
+ }
+
+ filled = 4;
+ } else if (fillable >= 2 && !(pos % 2) && (count >= 2)) {
+ __le16 val;
+
+ if (iswrite) {
+ if (copy_from_user(&val, buf, 2))
+ goto out;
+
+ iowrite16(le16_to_cpu(val), io + pos);
+ } else {
+ val = cpu_to_le16(ioread16(io + pos));
+
+ if (copy_to_user(buf, &val, 2))
+ goto out;
+ }
+
+ filled = 2;
+ } else if (fillable) {
+ u8 val;
+
+ if (iswrite) {
+ if (copy_from_user(&val, buf, 1))
+ goto out;
+
+ iowrite8(val, io + pos);
+ } else {
+ val = ioread8(io + pos);
+
+ if (copy_to_user(buf, &val, 1))
+ goto out;
+ }
+
+ filled = 1;
+ } else {
+ /* Drop writes, fill reads with FF */
+ if (!iswrite) {
+ char val = 0xFF;
+ size_t i;
+
+ for (i = 0; i < x_end - pos; i++) {
+ if (put_user(val, buf + i))
+ goto out;
+ }
+ }
+
+ filled = x_end - pos;
+ }
+
+ count -= filled;
+ done += filled;
+ buf += filled;
+ pos += filled;
+ }
+
+ *ppos += done;
+
+out:
+ if (bar == PCI_ROM_RESOURCE)
+ pci_unmap_rom(pdev, io);
+
+ return count ? -EFAULT : done;
+}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
new file mode 100644
index 000000000000..17830c9c7cc6
--- /dev/null
+++ b/drivers/vfio/vfio.c
@@ -0,0 +1,1415 @@
+/*
+ * VFIO core
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ */
+
+#include <linux/cdev.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/anon_inodes.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/wait.h>
+
+#define DRIVER_VERSION "0.3"
+#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
+#define DRIVER_DESC "VFIO - User Level meta-driver"
+
+static struct vfio {
+ struct class *class;
+ struct list_head iommu_drivers_list;
+ struct mutex iommu_drivers_lock;
+ struct list_head group_list;
+ struct idr group_idr;
+ struct mutex group_lock;
+ struct cdev group_cdev;
+ struct device *dev;
+ dev_t devt;
+ struct cdev cdev;
+ wait_queue_head_t release_q;
+} vfio;
+
+struct vfio_iommu_driver {
+ const struct vfio_iommu_driver_ops *ops;
+ struct list_head vfio_next;
+};
+
+struct vfio_container {
+ struct kref kref;
+ struct list_head group_list;
+ struct mutex group_lock;
+ struct vfio_iommu_driver *iommu_driver;
+ void *iommu_data;
+};
+
+struct vfio_group {
+ struct kref kref;
+ int minor;
+ atomic_t container_users;
+ struct iommu_group *iommu_group;
+ struct vfio_container *container;
+ struct list_head device_list;
+ struct mutex device_lock;
+ struct device *dev;
+ struct notifier_block nb;
+ struct list_head vfio_next;
+ struct list_head container_next;
+};
+
+struct vfio_device {
+ struct kref kref;
+ struct device *dev;
+ const struct vfio_device_ops *ops;
+ struct vfio_group *group;
+ struct list_head group_next;
+ void *device_data;
+};
+
+/**
+ * IOMMU driver registration
+ */
+int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
+{
+ struct vfio_iommu_driver *driver, *tmp;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver)
+ return -ENOMEM;
+
+ driver->ops = ops;
+
+ mutex_lock(&vfio.iommu_drivers_lock);
+
+ /* Check for duplicates */
+ list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
+ if (tmp->ops == ops) {
+ mutex_unlock(&vfio.iommu_drivers_lock);
+ kfree(driver);
+ return -EINVAL;
+ }
+ }
+
+ list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
+
+ mutex_unlock(&vfio.iommu_drivers_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
+
+void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
+{
+ struct vfio_iommu_driver *driver;
+
+ mutex_lock(&vfio.iommu_drivers_lock);
+ list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
+ if (driver->ops == ops) {
+ list_del(&driver->vfio_next);
+ mutex_unlock(&vfio.iommu_drivers_lock);
+ kfree(driver);
+ return;
+ }
+ }
+ mutex_unlock(&vfio.iommu_drivers_lock);
+}
+EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
+
+/**
+ * Group minor allocation/free - both called with vfio.group_lock held
+ */
+static int vfio_alloc_group_minor(struct vfio_group *group)
+{
+ int ret, minor;
+
+again:
+ if (unlikely(idr_pre_get(&vfio.group_idr, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ /* index 0 is used by /dev/vfio/vfio */
+ ret = idr_get_new_above(&vfio.group_idr, group, 1, &minor);
+ if (ret == -EAGAIN)
+ goto again;
+ if (ret || minor > MINORMASK) {
+ if (minor > MINORMASK)
+ idr_remove(&vfio.group_idr, minor);
+ return -ENOSPC;
+ }
+
+ return minor;
+}
+
+static void vfio_free_group_minor(int minor)
+{
+ idr_remove(&vfio.group_idr, minor);
+}
+
+static int vfio_iommu_group_notifier(struct notifier_block *nb,
+ unsigned long action, void *data);
+static void vfio_group_get(struct vfio_group *group);
+
+/**
+ * Container objects - containers are created when /dev/vfio/vfio is
+ * opened, but their lifecycle extends until the last user is done, so
+ * it's freed via kref. Must support container/group/device being
+ * closed in any order.
+ */
+static void vfio_container_get(struct vfio_container *container)
+{
+ kref_get(&container->kref);
+}
+
+static void vfio_container_release(struct kref *kref)
+{
+ struct vfio_container *container;
+ container = container_of(kref, struct vfio_container, kref);
+
+ kfree(container);
+}
+
+static void vfio_container_put(struct vfio_container *container)
+{
+ kref_put(&container->kref, vfio_container_release);
+}
+
+/**
+ * Group objects - create, release, get, put, search
+ */
+static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
+{
+ struct vfio_group *group, *tmp;
+ struct device *dev;
+ int ret, minor;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&group->kref);
+ INIT_LIST_HEAD(&group->device_list);
+ mutex_init(&group->device_lock);
+ atomic_set(&group->container_users, 0);
+ group->iommu_group = iommu_group;
+
+ group->nb.notifier_call = vfio_iommu_group_notifier;
+
+ /*
+ * blocking notifiers acquire a rwsem around registering and hold
+ * it around callback. Therefore, need to register outside of
+ * vfio.group_lock to avoid A-B/B-A contention. Our callback won't
+ * do anything unless it can find the group in vfio.group_list, so
+ * no harm in registering early.
+ */
+ ret = iommu_group_register_notifier(iommu_group, &group->nb);
+ if (ret) {
+ kfree(group);
+ return ERR_PTR(ret);
+ }
+
+ mutex_lock(&vfio.group_lock);
+
+ minor = vfio_alloc_group_minor(group);
+ if (minor < 0) {
+ mutex_unlock(&vfio.group_lock);
+ kfree(group);
+ return ERR_PTR(minor);
+ }
+
+ /* Did we race creating this group? */
+ list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
+ if (tmp->iommu_group == iommu_group) {
+ vfio_group_get(tmp);
+ vfio_free_group_minor(minor);
+ mutex_unlock(&vfio.group_lock);
+ kfree(group);
+ return tmp;
+ }
+ }
+
+ dev = device_create(vfio.class, NULL, MKDEV(MAJOR(vfio.devt), minor),
+ group, "%d", iommu_group_id(iommu_group));
+ if (IS_ERR(dev)) {
+ vfio_free_group_minor(minor);
+ mutex_unlock(&vfio.group_lock);
+ kfree(group);
+ return (struct vfio_group *)dev; /* ERR_PTR */
+ }
+
+ group->minor = minor;
+ group->dev = dev;
+
+ list_add(&group->vfio_next, &vfio.group_list);
+
+ mutex_unlock(&vfio.group_lock);
+
+ return group;
+}
+
+/* called with vfio.group_lock held */
+static void vfio_group_release(struct kref *kref)
+{
+ struct vfio_group *group = container_of(kref, struct vfio_group, kref);
+
+ WARN_ON(!list_empty(&group->device_list));
+
+ device_destroy(vfio.class, MKDEV(MAJOR(vfio.devt), group->minor));
+ list_del(&group->vfio_next);
+ vfio_free_group_minor(group->minor);
+
+ mutex_unlock(&vfio.group_lock);
+
+ /*
+ * Unregister outside of lock. A spurious callback is harmless now
+ * that the group is no longer in vfio.group_list.
+ */
+ iommu_group_unregister_notifier(group->iommu_group, &group->nb);
+
+ kfree(group);
+}
+
+static void vfio_group_put(struct vfio_group *group)
+{
+ kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
+}
+
+/* Assume group_lock or group reference is held */
+static void vfio_group_get(struct vfio_group *group)
+{
+ kref_get(&group->kref);
+}
+
+/*
+ * Not really a try as we will sleep for mutex, but we need to make
+ * sure the group pointer is valid under lock and get a reference.
+ */
+static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
+{
+ struct vfio_group *target = group;
+
+ mutex_lock(&vfio.group_lock);
+ list_for_each_entry(group, &vfio.group_list, vfio_next) {
+ if (group == target) {
+ vfio_group_get(group);
+ mutex_unlock(&vfio.group_lock);
+ return group;
+ }
+ }
+ mutex_unlock(&vfio.group_lock);
+
+ return NULL;
+}
+
+static
+struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
+{
+ struct vfio_group *group;
+
+ mutex_lock(&vfio.group_lock);
+ list_for_each_entry(group, &vfio.group_list, vfio_next) {
+ if (group->iommu_group == iommu_group) {
+ vfio_group_get(group);
+ mutex_unlock(&vfio.group_lock);
+ return group;
+ }
+ }
+ mutex_unlock(&vfio.group_lock);
+
+ return NULL;
+}
+
+static struct vfio_group *vfio_group_get_from_minor(int minor)
+{
+ struct vfio_group *group;
+
+ mutex_lock(&vfio.group_lock);
+ group = idr_find(&vfio.group_idr, minor);
+ if (!group) {
+ mutex_unlock(&vfio.group_lock);
+ return NULL;
+ }
+ vfio_group_get(group);
+ mutex_unlock(&vfio.group_lock);
+
+ return group;
+}
+
+/**
+ * Device objects - create, release, get, put, search
+ */
+static
+struct vfio_device *vfio_group_create_device(struct vfio_group *group,
+ struct device *dev,
+ const struct vfio_device_ops *ops,
+ void *device_data)
+{
+ struct vfio_device *device;
+ int ret;
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&device->kref);
+ device->dev = dev;
+ device->group = group;
+ device->ops = ops;
+ device->device_data = device_data;
+
+ ret = dev_set_drvdata(dev, device);
+ if (ret) {
+ kfree(device);
+ return ERR_PTR(ret);
+ }
+
+ /* No need to get group_lock, caller has group reference */
+ vfio_group_get(group);
+
+ mutex_lock(&group->device_lock);
+ list_add(&device->group_next, &group->device_list);
+ mutex_unlock(&group->device_lock);
+
+ return device;
+}
+
+static void vfio_device_release(struct kref *kref)
+{
+ struct vfio_device *device = container_of(kref,
+ struct vfio_device, kref);
+ struct vfio_group *group = device->group;
+
+ list_del(&device->group_next);
+ mutex_unlock(&group->device_lock);
+
+ dev_set_drvdata(device->dev, NULL);
+
+ kfree(device);
+
+ /* vfio_del_group_dev may be waiting for this device */
+ wake_up(&vfio.release_q);
+}
+
+/* Device reference always implies a group reference */
+static void vfio_device_put(struct vfio_device *device)
+{
+ struct vfio_group *group = device->group;
+ kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
+ vfio_group_put(group);
+}
+
+static void vfio_device_get(struct vfio_device *device)
+{
+ vfio_group_get(device->group);
+ kref_get(&device->kref);
+}
+
+static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
+ struct device *dev)
+{
+ struct vfio_device *device;
+
+ mutex_lock(&group->device_lock);
+ list_for_each_entry(device, &group->device_list, group_next) {
+ if (device->dev == dev) {
+ vfio_device_get(device);
+ mutex_unlock(&group->device_lock);
+ return device;
+ }
+ }
+ mutex_unlock(&group->device_lock);
+ return NULL;
+}
+
+/*
+ * Whitelist some drivers that we know are safe (no dma) or just sit on
+ * a device. It's not always practical to leave a device within a group
+ * driverless as it could get re-bound to something unsafe.
+ */
+static const char * const vfio_driver_whitelist[] = { "pci-stub" };
+
+static bool vfio_whitelisted_driver(struct device_driver *drv)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vfio_driver_whitelist); i++) {
+ if (!strcmp(drv->name, vfio_driver_whitelist[i]))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * A vfio group is viable for use by userspace if all devices are either
+ * driver-less or bound to a vfio or whitelisted driver. We test the
+ * latter by the existence of a struct vfio_device matching the dev.
+ */
+static int vfio_dev_viable(struct device *dev, void *data)
+{
+ struct vfio_group *group = data;
+ struct vfio_device *device;
+
+ if (!dev->driver || vfio_whitelisted_driver(dev->driver))
+ return 0;
+
+ device = vfio_group_get_device(group, dev);
+ if (device) {
+ vfio_device_put(device);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * Async device support
+ */
+static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
+{
+ struct vfio_device *device;
+
+ /* Do we already know about it? We shouldn't */
+ device = vfio_group_get_device(group, dev);
+ if (WARN_ON_ONCE(device)) {
+ vfio_device_put(device);
+ return 0;
+ }
+
+ /* Nothing to do for idle groups */
+ if (!atomic_read(&group->container_users))
+ return 0;
+
+ /* TODO Prevent device auto probing */
+ WARN("Device %s added to live group %d!\n", dev_name(dev),
+ iommu_group_id(group->iommu_group));
+
+ return 0;
+}
+
+static int vfio_group_nb_del_dev(struct vfio_group *group, struct device *dev)
+{
+ struct vfio_device *device;
+
+ /*
+ * Expect to fall out here. If a device was in use, it would
+ * have been bound to a vfio sub-driver, which would have blocked
+ * in .remove at vfio_del_group_dev. Sanity check that we no
+ * longer track the device, so it's safe to remove.
+ */
+ device = vfio_group_get_device(group, dev);
+ if (likely(!device))
+ return 0;
+
+ WARN("Device %s removed from live group %d!\n", dev_name(dev),
+ iommu_group_id(group->iommu_group));
+
+ vfio_device_put(device);
+ return 0;
+}
+
+static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
+{
+ /* We don't care what happens when the group isn't in use */
+ if (!atomic_read(&group->container_users))
+ return 0;
+
+ return vfio_dev_viable(dev, group);
+}
+
+static int vfio_iommu_group_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct vfio_group *group = container_of(nb, struct vfio_group, nb);
+ struct device *dev = data;
+
+ /*
+ * Need to go through a group_lock lookup to get a reference or
+ * we risk racing a group being removed. Leave a WARN_ON for
+ * debuging, but if the group no longer exists, a spurious notify
+ * is harmless.
+ */
+ group = vfio_group_try_get(group);
+ if (WARN_ON(!group))
+ return NOTIFY_OK;
+
+ switch (action) {
+ case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
+ vfio_group_nb_add_dev(group, dev);
+ break;
+ case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
+ vfio_group_nb_del_dev(group, dev);
+ break;
+ case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
+ pr_debug("%s: Device %s, group %d binding to driver\n",
+ __func__, dev_name(dev),
+ iommu_group_id(group->iommu_group));
+ break;
+ case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
+ pr_debug("%s: Device %s, group %d bound to driver %s\n",
+ __func__, dev_name(dev),
+ iommu_group_id(group->iommu_group), dev->driver->name);
+ BUG_ON(vfio_group_nb_verify(group, dev));
+ break;
+ case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
+ pr_debug("%s: Device %s, group %d unbinding from driver %s\n",
+ __func__, dev_name(dev),
+ iommu_group_id(group->iommu_group), dev->driver->name);
+ break;
+ case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
+ pr_debug("%s: Device %s, group %d unbound from driver\n",
+ __func__, dev_name(dev),
+ iommu_group_id(group->iommu_group));
+ /*
+ * XXX An unbound device in a live group is ok, but we'd
+ * really like to avoid the above BUG_ON by preventing other
+ * drivers from binding to it. Once that occurs, we have to
+ * stop the system to maintain isolation. At a minimum, we'd
+ * want a toggle to disable driver auto probe for this device.
+ */
+ break;
+ }
+
+ vfio_group_put(group);
+ return NOTIFY_OK;
+}
+
+/**
+ * VFIO driver API
+ */
+int vfio_add_group_dev(struct device *dev,
+ const struct vfio_device_ops *ops, void *device_data)
+{
+ struct iommu_group *iommu_group;
+ struct vfio_group *group;
+ struct vfio_device *device;
+
+ iommu_group = iommu_group_get(dev);
+ if (!iommu_group)
+ return -EINVAL;
+
+ group = vfio_group_get_from_iommu(iommu_group);
+ if (!group) {
+ group = vfio_create_group(iommu_group);
+ if (IS_ERR(group)) {
+ iommu_group_put(iommu_group);
+ return PTR_ERR(group);
+ }
+ }
+
+ device = vfio_group_get_device(group, dev);
+ if (device) {
+ WARN(1, "Device %s already exists on group %d\n",
+ dev_name(dev), iommu_group_id(iommu_group));
+ vfio_device_put(device);
+ vfio_group_put(group);
+ iommu_group_put(iommu_group);
+ return -EBUSY;
+ }
+
+ device = vfio_group_create_device(group, dev, ops, device_data);
+ if (IS_ERR(device)) {
+ vfio_group_put(group);
+ iommu_group_put(iommu_group);
+ return PTR_ERR(device);
+ }
+
+ /*
+ * Added device holds reference to iommu_group and vfio_device
+ * (which in turn holds reference to vfio_group). Drop extra
+ * group reference used while acquiring device.
+ */
+ vfio_group_put(group);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vfio_add_group_dev);
+
+/* Test whether a struct device is present in our tracking */
+static bool vfio_dev_present(struct device *dev)
+{
+ struct iommu_group *iommu_group;
+ struct vfio_group *group;
+ struct vfio_device *device;
+
+ iommu_group = iommu_group_get(dev);
+ if (!iommu_group)
+ return false;
+
+ group = vfio_group_get_from_iommu(iommu_group);
+ if (!group) {
+ iommu_group_put(iommu_group);
+ return false;
+ }
+
+ device = vfio_group_get_device(group, dev);
+ if (!device) {
+ vfio_group_put(group);
+ iommu_group_put(iommu_group);
+ return false;
+ }
+
+ vfio_device_put(device);
+ vfio_group_put(group);
+ iommu_group_put(iommu_group);
+ return true;
+}
+
+/*
+ * Decrement the device reference count and wait for the device to be
+ * removed. Open file descriptors for the device... */
+void *vfio_del_group_dev(struct device *dev)
+{
+ struct vfio_device *device = dev_get_drvdata(dev);
+ struct vfio_group *group = device->group;
+ struct iommu_group *iommu_group = group->iommu_group;
+ void *device_data = device->device_data;
+
+ vfio_device_put(device);
+
+ /* TODO send a signal to encourage this to be released */
+ wait_event(vfio.release_q, !vfio_dev_present(dev));
+
+ iommu_group_put(iommu_group);
+
+ return device_data;
+}
+EXPORT_SYMBOL_GPL(vfio_del_group_dev);
+
+/**
+ * VFIO base fd, /dev/vfio/vfio
+ */
+static long vfio_ioctl_check_extension(struct vfio_container *container,
+ unsigned long arg)
+{
+ struct vfio_iommu_driver *driver = container->iommu_driver;
+ long ret = 0;
+
+ switch (arg) {
+ /* No base extensions yet */
+ default:
+ /*
+ * If no driver is set, poll all registered drivers for
+ * extensions and return the first positive result. If
+ * a driver is already set, further queries will be passed
+ * only to that driver.
+ */
+ if (!driver) {
+ mutex_lock(&vfio.iommu_drivers_lock);
+ list_for_each_entry(driver, &vfio.iommu_drivers_list,
+ vfio_next) {
+ if (!try_module_get(driver->ops->owner))
+ continue;
+
+ ret = driver->ops->ioctl(NULL,
+ VFIO_CHECK_EXTENSION,
+ arg);
+ module_put(driver->ops->owner);
+ if (ret > 0)
+ break;
+ }
+ mutex_unlock(&vfio.iommu_drivers_lock);
+ } else
+ ret = driver->ops->ioctl(container->iommu_data,
+ VFIO_CHECK_EXTENSION, arg);
+ }
+
+ return ret;
+}
+
+/* hold container->group_lock */
+static int __vfio_container_attach_groups(struct vfio_container *container,
+ struct vfio_iommu_driver *driver,
+ void *data)
+{
+ struct vfio_group *group;
+ int ret = -ENODEV;
+
+ list_for_each_entry(group, &container->group_list, container_next) {
+ ret = driver->ops->attach_group(data, group->iommu_group);
+ if (ret)
+ goto unwind;
+ }
+
+ return ret;
+
+unwind:
+ list_for_each_entry_continue_reverse(group, &container->group_list,
+ container_next) {
+ driver->ops->detach_group(data, group->iommu_group);
+ }
+
+ return ret;
+}
+
+static long vfio_ioctl_set_iommu(struct vfio_container *container,
+ unsigned long arg)
+{
+ struct vfio_iommu_driver *driver;
+ long ret = -ENODEV;
+
+ mutex_lock(&container->group_lock);
+
+ /*
+ * The container is designed to be an unprivileged interface while
+ * the group can be assigned to specific users. Therefore, only by
+ * adding a group to a container does the user get the privilege of
+ * enabling the iommu, which may allocate finite resources. There
+ * is no unset_iommu, but by removing all the groups from a container,
+ * the container is deprivileged and returns to an unset state.
+ */
+ if (list_empty(&container->group_list) || container->iommu_driver) {
+ mutex_unlock(&container->group_lock);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vfio.iommu_drivers_lock);
+ list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
+ void *data;
+
+ if (!try_module_get(driver->ops->owner))
+ continue;
+
+ /*
+ * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
+ * so test which iommu driver reported support for this
+ * extension and call open on them. We also pass them the
+ * magic, allowing a single driver to support multiple
+ * interfaces if they'd like.
+ */
+ if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
+ module_put(driver->ops->owner);
+ continue;
+ }
+
+ /* module reference holds the driver we're working on */
+ mutex_unlock(&vfio.iommu_drivers_lock);
+
+ data = driver->ops->open(arg);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ module_put(driver->ops->owner);
+ goto skip_drivers_unlock;
+ }
+
+ ret = __vfio_container_attach_groups(container, driver, data);
+ if (!ret) {
+ container->iommu_driver = driver;
+ container->iommu_data = data;
+ } else {
+ driver->ops->release(data);
+ module_put(driver->ops->owner);
+ }
+
+ goto skip_drivers_unlock;
+ }
+
+ mutex_unlock(&vfio.iommu_drivers_lock);
+skip_drivers_unlock:
+ mutex_unlock(&container->group_lock);
+
+ return ret;
+}
+
+static long vfio_fops_unl_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vfio_container *container = filep->private_data;
+ struct vfio_iommu_driver *driver;
+ void *data;
+ long ret = -EINVAL;
+
+ if (!container)
+ return ret;
+
+ driver = container->iommu_driver;
+ data = container->iommu_data;
+
+ switch (cmd) {
+ case VFIO_GET_API_VERSION:
+ ret = VFIO_API_VERSION;
+ break;
+ case VFIO_CHECK_EXTENSION:
+ ret = vfio_ioctl_check_extension(container, arg);
+ break;
+ case VFIO_SET_IOMMU:
+ ret = vfio_ioctl_set_iommu(container, arg);
+ break;
+ default:
+ if (driver) /* passthrough all unrecognized ioctls */
+ ret = driver->ops->ioctl(data, cmd, arg);
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long vfio_fops_compat_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ arg = (unsigned long)compat_ptr(arg);
+ return vfio_fops_unl_ioctl(filep, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+static int vfio_fops_open(struct inode *inode, struct file *filep)
+{
+ struct vfio_container *container;
+
+ container = kzalloc(sizeof(*container), GFP_KERNEL);
+ if (!container)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&container->group_list);
+ mutex_init(&container->group_lock);
+ kref_init(&container->kref);
+
+ filep->private_data = container;
+
+ return 0;
+}
+
+static int vfio_fops_release(struct inode *inode, struct file *filep)
+{
+ struct vfio_container *container = filep->private_data;
+
+ filep->private_data = NULL;
+
+ vfio_container_put(container);
+
+ return 0;
+}
+
+/*
+ * Once an iommu driver is set, we optionally pass read/write/mmap
+ * on to the driver, allowing management interfaces beyond ioctl.
+ */
+static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct vfio_container *container = filep->private_data;
+ struct vfio_iommu_driver *driver = container->iommu_driver;
+
+ if (unlikely(!driver || !driver->ops->read))
+ return -EINVAL;
+
+ return driver->ops->read(container->iommu_data, buf, count, ppos);
+}
+
+static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct vfio_container *container = filep->private_data;
+ struct vfio_iommu_driver *driver = container->iommu_driver;
+
+ if (unlikely(!driver || !driver->ops->write))
+ return -EINVAL;
+
+ return driver->ops->write(container->iommu_data, buf, count, ppos);
+}
+
+static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ struct vfio_container *container = filep->private_data;
+ struct vfio_iommu_driver *driver = container->iommu_driver;
+
+ if (unlikely(!driver || !driver->ops->mmap))
+ return -EINVAL;
+
+ return driver->ops->mmap(container->iommu_data, vma);
+}
+
+static const struct file_operations vfio_fops = {
+ .owner = THIS_MODULE,
+ .open = vfio_fops_open,
+ .release = vfio_fops_release,
+ .read = vfio_fops_read,
+ .write = vfio_fops_write,
+ .unlocked_ioctl = vfio_fops_unl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vfio_fops_compat_ioctl,
+#endif
+ .mmap = vfio_fops_mmap,
+};
+
+/**
+ * VFIO Group fd, /dev/vfio/$GROUP
+ */
+static void __vfio_group_unset_container(struct vfio_group *group)
+{
+ struct vfio_container *container = group->container;
+ struct vfio_iommu_driver *driver;
+
+ mutex_lock(&container->group_lock);
+
+ driver = container->iommu_driver;
+ if (driver)
+ driver->ops->detach_group(container->iommu_data,
+ group->iommu_group);
+
+ group->container = NULL;
+ list_del(&group->container_next);
+
+ /* Detaching the last group deprivileges a container, remove iommu */
+ if (driver && list_empty(&container->group_list)) {
+ driver->ops->release(container->iommu_data);
+ module_put(driver->ops->owner);
+ container->iommu_driver = NULL;
+ container->iommu_data = NULL;
+ }
+
+ mutex_unlock(&container->group_lock);
+
+ vfio_container_put(container);
+}
+
+/*
+ * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
+ * if there was no container to unset. Since the ioctl is called on
+ * the group, we know that still exists, therefore the only valid
+ * transition here is 1->0.
+ */
+static int vfio_group_unset_container(struct vfio_group *group)
+{
+ int users = atomic_cmpxchg(&group->container_users, 1, 0);
+
+ if (!users)
+ return -EINVAL;
+ if (users != 1)
+ return -EBUSY;
+
+ __vfio_group_unset_container(group);
+
+ return 0;
+}
+
+/*
+ * When removing container users, anything that removes the last user
+ * implicitly removes the group from the container. That is, if the
+ * group file descriptor is closed, as well as any device file descriptors,
+ * the group is free.
+ */
+static void vfio_group_try_dissolve_container(struct vfio_group *group)
+{
+ if (0 == atomic_dec_if_positive(&group->container_users))
+ __vfio_group_unset_container(group);
+}
+
+static int vfio_group_set_container(struct vfio_group *group, int container_fd)
+{
+ struct file *filep;
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+ int ret = 0;
+
+ if (atomic_read(&group->container_users))
+ return -EINVAL;
+
+ filep = fget(container_fd);
+ if (!filep)
+ return -EBADF;
+
+ /* Sanity check, is this really our fd? */
+ if (filep->f_op != &vfio_fops) {
+ fput(filep);
+ return -EINVAL;
+ }
+
+ container = filep->private_data;
+ WARN_ON(!container); /* fget ensures we don't race vfio_release */
+
+ mutex_lock(&container->group_lock);
+
+ driver = container->iommu_driver;
+ if (driver) {
+ ret = driver->ops->attach_group(container->iommu_data,
+ group->iommu_group);
+ if (ret)
+ goto unlock_out;
+ }
+
+ group->container = container;
+ list_add(&group->container_next, &container->group_list);
+
+ /* Get a reference on the container and mark a user within the group */
+ vfio_container_get(container);
+ atomic_inc(&group->container_users);
+
+unlock_out:
+ mutex_unlock(&container->group_lock);
+ fput(filep);
+
+ return ret;
+}
+
+static bool vfio_group_viable(struct vfio_group *group)
+{
+ return (iommu_group_for_each_dev(group->iommu_group,
+ group, vfio_dev_viable) == 0);
+}
+
+static const struct file_operations vfio_device_fops;
+
+static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
+{
+ struct vfio_device *device;
+ struct file *filep;
+ int ret = -ENODEV;
+
+ if (0 == atomic_read(&group->container_users) ||
+ !group->container->iommu_driver || !vfio_group_viable(group))
+ return -EINVAL;
+
+ mutex_lock(&group->device_lock);
+ list_for_each_entry(device, &group->device_list, group_next) {
+ if (strcmp(dev_name(device->dev), buf))
+ continue;
+
+ ret = device->ops->open(device->device_data);
+ if (ret)
+ break;
+ /*
+ * We can't use anon_inode_getfd() because we need to modify
+ * the f_mode flags directly to allow more than just ioctls
+ */
+ ret = get_unused_fd();
+ if (ret < 0) {
+ device->ops->release(device->device_data);
+ break;
+ }
+
+ filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
+ device, O_RDWR);
+ if (IS_ERR(filep)) {
+ put_unused_fd(ret);
+ ret = PTR_ERR(filep);
+ device->ops->release(device->device_data);
+ break;
+ }
+
+ /*
+ * TODO: add an anon_inode interface to do this.
+ * Appears to be missing by lack of need rather than
+ * explicitly prevented. Now there's need.
+ */
+ filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+
+ vfio_device_get(device);
+ atomic_inc(&group->container_users);
+
+ fd_install(ret, filep);
+ break;
+ }
+ mutex_unlock(&group->device_lock);
+
+ return ret;
+}
+
+static long vfio_group_fops_unl_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vfio_group *group = filep->private_data;
+ long ret = -ENOTTY;
+
+ switch (cmd) {
+ case VFIO_GROUP_GET_STATUS:
+ {
+ struct vfio_group_status status;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct vfio_group_status, flags);
+
+ if (copy_from_user(&status, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (status.argsz < minsz)
+ return -EINVAL;
+
+ status.flags = 0;
+
+ if (vfio_group_viable(group))
+ status.flags |= VFIO_GROUP_FLAGS_VIABLE;
+
+ if (group->container)
+ status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
+
+ if (copy_to_user((void __user *)arg, &status, minsz))
+ return -EFAULT;
+
+ ret = 0;
+ break;
+ }
+ case VFIO_GROUP_SET_CONTAINER:
+ {
+ int fd;
+
+ if (get_user(fd, (int __user *)arg))
+ return -EFAULT;
+
+ if (fd < 0)
+ return -EINVAL;
+
+ ret = vfio_group_set_container(group, fd);
+ break;
+ }
+ case VFIO_GROUP_UNSET_CONTAINER:
+ ret = vfio_group_unset_container(group);
+ break;
+ case VFIO_GROUP_GET_DEVICE_FD:
+ {
+ char *buf;
+
+ buf = strndup_user((const char __user *)arg, PAGE_SIZE);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = vfio_group_get_device_fd(group, buf);
+ kfree(buf);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long vfio_group_fops_compat_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ arg = (unsigned long)compat_ptr(arg);
+ return vfio_group_fops_unl_ioctl(filep, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+static int vfio_group_fops_open(struct inode *inode, struct file *filep)
+{
+ struct vfio_group *group;
+
+ group = vfio_group_get_from_minor(iminor(inode));
+ if (!group)
+ return -ENODEV;
+
+ if (group->container) {
+ vfio_group_put(group);
+ return -EBUSY;
+ }
+
+ filep->private_data = group;
+
+ return 0;
+}
+
+static int vfio_group_fops_release(struct inode *inode, struct file *filep)
+{
+ struct vfio_group *group = filep->private_data;
+
+ filep->private_data = NULL;
+
+ vfio_group_try_dissolve_container(group);
+
+ vfio_group_put(group);
+
+ return 0;
+}
+
+static const struct file_operations vfio_group_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = vfio_group_fops_unl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vfio_group_fops_compat_ioctl,
+#endif
+ .open = vfio_group_fops_open,
+ .release = vfio_group_fops_release,
+};
+
+/**
+ * VFIO Device fd
+ */
+static int vfio_device_fops_release(struct inode *inode, struct file *filep)
+{
+ struct vfio_device *device = filep->private_data;
+
+ device->ops->release(device->device_data);
+
+ vfio_group_try_dissolve_container(device->group);
+
+ vfio_device_put(device);
+
+ return 0;
+}
+
+static long vfio_device_fops_unl_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vfio_device *device = filep->private_data;
+
+ if (unlikely(!device->ops->ioctl))
+ return -EINVAL;
+
+ return device->ops->ioctl(device->device_data, cmd, arg);
+}
+
+static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct vfio_device *device = filep->private_data;
+
+ if (unlikely(!device->ops->read))
+ return -EINVAL;
+
+ return device->ops->read(device->device_data, buf, count, ppos);
+}
+
+static ssize_t vfio_device_fops_write(struct file *filep,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct vfio_device *device = filep->private_data;
+
+ if (unlikely(!device->ops->write))
+ return -EINVAL;
+
+ return device->ops->write(device->device_data, buf, count, ppos);
+}
+
+static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ struct vfio_device *device = filep->private_data;
+
+ if (unlikely(!device->ops->mmap))
+ return -EINVAL;
+
+ return device->ops->mmap(device->device_data, vma);
+}
+
+#ifdef CONFIG_COMPAT
+static long vfio_device_fops_compat_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ arg = (unsigned long)compat_ptr(arg);
+ return vfio_device_fops_unl_ioctl(filep, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+static const struct file_operations vfio_device_fops = {
+ .owner = THIS_MODULE,
+ .release = vfio_device_fops_release,
+ .read = vfio_device_fops_read,
+ .write = vfio_device_fops_write,
+ .unlocked_ioctl = vfio_device_fops_unl_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vfio_device_fops_compat_ioctl,
+#endif
+ .mmap = vfio_device_fops_mmap,
+};
+
+/**
+ * Module/class support
+ */
+static char *vfio_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
+}
+
+static int __init vfio_init(void)
+{
+ int ret;
+
+ idr_init(&vfio.group_idr);
+ mutex_init(&vfio.group_lock);
+ mutex_init(&vfio.iommu_drivers_lock);
+ INIT_LIST_HEAD(&vfio.group_list);
+ INIT_LIST_HEAD(&vfio.iommu_drivers_list);
+ init_waitqueue_head(&vfio.release_q);
+
+ vfio.class = class_create(THIS_MODULE, "vfio");
+ if (IS_ERR(vfio.class)) {
+ ret = PTR_ERR(vfio.class);
+ goto err_class;
+ }
+
+ vfio.class->devnode = vfio_devnode;
+
+ ret = alloc_chrdev_region(&vfio.devt, 0, MINORMASK, "vfio");
+ if (ret)
+ goto err_base_chrdev;
+
+ cdev_init(&vfio.cdev, &vfio_fops);
+ ret = cdev_add(&vfio.cdev, vfio.devt, 1);
+ if (ret)
+ goto err_base_cdev;
+
+ vfio.dev = device_create(vfio.class, NULL, vfio.devt, NULL, "vfio");
+ if (IS_ERR(vfio.dev)) {
+ ret = PTR_ERR(vfio.dev);
+ goto err_base_dev;
+ }
+
+ /* /dev/vfio/$GROUP */
+ cdev_init(&vfio.group_cdev, &vfio_group_fops);
+ ret = cdev_add(&vfio.group_cdev,
+ MKDEV(MAJOR(vfio.devt), 1), MINORMASK - 1);
+ if (ret)
+ goto err_groups_cdev;
+
+ pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+
+ /*
+ * Attempt to load known iommu-drivers. This gives us a working
+ * environment without the user needing to explicitly load iommu
+ * drivers.
+ */
+ request_module_nowait("vfio_iommu_type1");
+
+ return 0;
+
+err_groups_cdev:
+ device_destroy(vfio.class, vfio.devt);
+err_base_dev:
+ cdev_del(&vfio.cdev);
+err_base_cdev:
+ unregister_chrdev_region(vfio.devt, MINORMASK);
+err_base_chrdev:
+ class_destroy(vfio.class);
+ vfio.class = NULL;
+err_class:
+ return ret;
+}
+
+static void __exit vfio_cleanup(void)
+{
+ WARN_ON(!list_empty(&vfio.group_list));
+
+ idr_destroy(&vfio.group_idr);
+ cdev_del(&vfio.group_cdev);
+ device_destroy(vfio.class, vfio.devt);
+ cdev_del(&vfio.cdev);
+ unregister_chrdev_region(vfio.devt, MINORMASK);
+ class_destroy(vfio.class);
+ vfio.class = NULL;
+}
+
+module_init(vfio_init);
+module_exit(vfio_cleanup);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
new file mode 100644
index 000000000000..6f3fbc48a6c7
--- /dev/null
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -0,0 +1,753 @@
+/*
+ * VFIO: IOMMU DMA mapping support for Type1 IOMMU
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from original vfio:
+ * Copyright 2010 Cisco Systems, Inc. All rights reserved.
+ * Author: Tom Lyon, pugs@cisco.com
+ *
+ * We arbitrarily define a Type1 IOMMU as one matching the below code.
+ * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel
+ * VT-d, but that makes it harder to re-use as theoretically anyone
+ * implementing a similar IOMMU could make use of this. We expect the
+ * IOMMU to support the IOMMU API and have few to no restrictions around
+ * the IOVA range that can be mapped. The Type1 IOMMU is currently
+ * optimized for relatively static mappings of a userspace process with
+ * userpsace pages pinned into memory. We also assume devices and IOMMU
+ * domains are PCI based as the IOMMU API is still centered around a
+ * device/bus interface rather than a group interface.
+ */
+
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/pci.h> /* pci_bus_type */
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/workqueue.h>
+
+#define DRIVER_VERSION "0.2"
+#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
+#define DRIVER_DESC "Type1 IOMMU driver for VFIO"
+
+static bool allow_unsafe_interrupts;
+module_param_named(allow_unsafe_interrupts,
+ allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(allow_unsafe_interrupts,
+ "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
+
+struct vfio_iommu {
+ struct iommu_domain *domain;
+ struct mutex lock;
+ struct list_head dma_list;
+ struct list_head group_list;
+ bool cache;
+};
+
+struct vfio_dma {
+ struct list_head next;
+ dma_addr_t iova; /* Device address */
+ unsigned long vaddr; /* Process virtual addr */
+ long npage; /* Number of pages */
+ int prot; /* IOMMU_READ/WRITE */
+};
+
+struct vfio_group {
+ struct iommu_group *iommu_group;
+ struct list_head next;
+};
+
+/*
+ * This code handles mapping and unmapping of user data buffers
+ * into DMA'ble space using the IOMMU
+ */
+
+#define NPAGE_TO_SIZE(npage) ((size_t)(npage) << PAGE_SHIFT)
+
+struct vwork {
+ struct mm_struct *mm;
+ long npage;
+ struct work_struct work;
+};
+
+/* delayed decrement/increment for locked_vm */
+static void vfio_lock_acct_bg(struct work_struct *work)
+{
+ struct vwork *vwork = container_of(work, struct vwork, work);
+ struct mm_struct *mm;
+
+ mm = vwork->mm;
+ down_write(&mm->mmap_sem);
+ mm->locked_vm += vwork->npage;
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ kfree(vwork);
+}
+
+static void vfio_lock_acct(long npage)
+{
+ struct vwork *vwork;
+ struct mm_struct *mm;
+
+ if (!current->mm)
+ return; /* process exited */
+
+ if (down_write_trylock(&current->mm->mmap_sem)) {
+ current->mm->locked_vm += npage;
+ up_write(&current->mm->mmap_sem);
+ return;
+ }
+
+ /*
+ * Couldn't get mmap_sem lock, so must setup to update
+ * mm->locked_vm later. If locked_vm were atomic, we
+ * wouldn't need this silliness
+ */
+ vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
+ if (!vwork)
+ return;
+ mm = get_task_mm(current);
+ if (!mm) {
+ kfree(vwork);
+ return;
+ }
+ INIT_WORK(&vwork->work, vfio_lock_acct_bg);
+ vwork->mm = mm;
+ vwork->npage = npage;
+ schedule_work(&vwork->work);
+}
+
+/*
+ * Some mappings aren't backed by a struct page, for example an mmap'd
+ * MMIO range for our own or another device. These use a different
+ * pfn conversion and shouldn't be tracked as locked pages.
+ */
+static bool is_invalid_reserved_pfn(unsigned long pfn)
+{
+ if (pfn_valid(pfn)) {
+ bool reserved;
+ struct page *tail = pfn_to_page(pfn);
+ struct page *head = compound_trans_head(tail);
+ reserved = !!(PageReserved(head));
+ if (head != tail) {
+ /*
+ * "head" is not a dangling pointer
+ * (compound_trans_head takes care of that)
+ * but the hugepage may have been split
+ * from under us (and we may not hold a
+ * reference count on the head page so it can
+ * be reused before we run PageReferenced), so
+ * we've to check PageTail before returning
+ * what we just read.
+ */
+ smp_rmb();
+ if (PageTail(tail))
+ return reserved;
+ }
+ return PageReserved(tail);
+ }
+
+ return true;
+}
+
+static int put_pfn(unsigned long pfn, int prot)
+{
+ if (!is_invalid_reserved_pfn(pfn)) {
+ struct page *page = pfn_to_page(pfn);
+ if (prot & IOMMU_WRITE)
+ SetPageDirty(page);
+ put_page(page);
+ return 1;
+ }
+ return 0;
+}
+
+/* Unmap DMA region */
+static long __vfio_dma_do_unmap(struct vfio_iommu *iommu, dma_addr_t iova,
+ long npage, int prot)
+{
+ long i, unlocked = 0;
+
+ for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
+ unsigned long pfn;
+
+ pfn = iommu_iova_to_phys(iommu->domain, iova) >> PAGE_SHIFT;
+ if (pfn) {
+ iommu_unmap(iommu->domain, iova, PAGE_SIZE);
+ unlocked += put_pfn(pfn, prot);
+ }
+ }
+ return unlocked;
+}
+
+static void vfio_dma_unmap(struct vfio_iommu *iommu, dma_addr_t iova,
+ long npage, int prot)
+{
+ long unlocked;
+
+ unlocked = __vfio_dma_do_unmap(iommu, iova, npage, prot);
+ vfio_lock_acct(-unlocked);
+}
+
+static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn)
+{
+ struct page *page[1];
+ struct vm_area_struct *vma;
+ int ret = -EFAULT;
+
+ if (get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), page) == 1) {
+ *pfn = page_to_pfn(page[0]);
+ return 0;
+ }
+
+ down_read(&current->mm->mmap_sem);
+
+ vma = find_vma_intersection(current->mm, vaddr, vaddr + 1);
+
+ if (vma && vma->vm_flags & VM_PFNMAP) {
+ *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ if (is_invalid_reserved_pfn(*pfn))
+ ret = 0;
+ }
+
+ up_read(&current->mm->mmap_sem);
+
+ return ret;
+}
+
+/* Map DMA region */
+static int __vfio_dma_map(struct vfio_iommu *iommu, dma_addr_t iova,
+ unsigned long vaddr, long npage, int prot)
+{
+ dma_addr_t start = iova;
+ long i, locked = 0;
+ int ret;
+
+ /* Verify that pages are not already mapped */
+ for (i = 0; i < npage; i++, iova += PAGE_SIZE)
+ if (iommu_iova_to_phys(iommu->domain, iova))
+ return -EBUSY;
+
+ iova = start;
+
+ if (iommu->cache)
+ prot |= IOMMU_CACHE;
+
+ /*
+ * XXX We break mappings into pages and use get_user_pages_fast to
+ * pin the pages in memory. It's been suggested that mlock might
+ * provide a more efficient mechanism, but nothing prevents the
+ * user from munlocking the pages, which could then allow the user
+ * access to random host memory. We also have no guarantee from the
+ * IOMMU API that the iommu driver can unmap sub-pages of previous
+ * mappings. This means we might lose an entire range if a single
+ * page within it is unmapped. Single page mappings are inefficient,
+ * but provide the most flexibility for now.
+ */
+ for (i = 0; i < npage; i++, iova += PAGE_SIZE, vaddr += PAGE_SIZE) {
+ unsigned long pfn = 0;
+
+ ret = vaddr_get_pfn(vaddr, prot, &pfn);
+ if (ret) {
+ __vfio_dma_do_unmap(iommu, start, i, prot);
+ return ret;
+ }
+
+ /*
+ * Only add actual locked pages to accounting
+ * XXX We're effectively marking a page locked for every
+ * IOVA page even though it's possible the user could be
+ * backing multiple IOVAs with the same vaddr. This over-
+ * penalizes the user process, but we currently have no
+ * easy way to do this properly.
+ */
+ if (!is_invalid_reserved_pfn(pfn))
+ locked++;
+
+ ret = iommu_map(iommu->domain, iova,
+ (phys_addr_t)pfn << PAGE_SHIFT,
+ PAGE_SIZE, prot);
+ if (ret) {
+ /* Back out mappings on error */
+ put_pfn(pfn, prot);
+ __vfio_dma_do_unmap(iommu, start, i, prot);
+ return ret;
+ }
+ }
+ vfio_lock_acct(locked);
+ return 0;
+}
+
+static inline bool ranges_overlap(dma_addr_t start1, size_t size1,
+ dma_addr_t start2, size_t size2)
+{
+ if (start1 < start2)
+ return (start2 - start1 < size1);
+ else if (start2 < start1)
+ return (start1 - start2 < size2);
+ return (size1 > 0 && size2 > 0);
+}
+
+static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
+ dma_addr_t start, size_t size)
+{
+ struct vfio_dma *dma;
+
+ list_for_each_entry(dma, &iommu->dma_list, next) {
+ if (ranges_overlap(dma->iova, NPAGE_TO_SIZE(dma->npage),
+ start, size))
+ return dma;
+ }
+ return NULL;
+}
+
+static long vfio_remove_dma_overlap(struct vfio_iommu *iommu, dma_addr_t start,
+ size_t size, struct vfio_dma *dma)
+{
+ struct vfio_dma *split;
+ long npage_lo, npage_hi;
+
+ /* Existing dma region is completely covered, unmap all */
+ if (start <= dma->iova &&
+ start + size >= dma->iova + NPAGE_TO_SIZE(dma->npage)) {
+ vfio_dma_unmap(iommu, dma->iova, dma->npage, dma->prot);
+ list_del(&dma->next);
+ npage_lo = dma->npage;
+ kfree(dma);
+ return npage_lo;
+ }
+
+ /* Overlap low address of existing range */
+ if (start <= dma->iova) {
+ size_t overlap;
+
+ overlap = start + size - dma->iova;
+ npage_lo = overlap >> PAGE_SHIFT;
+
+ vfio_dma_unmap(iommu, dma->iova, npage_lo, dma->prot);
+ dma->iova += overlap;
+ dma->vaddr += overlap;
+ dma->npage -= npage_lo;
+ return npage_lo;
+ }
+
+ /* Overlap high address of existing range */
+ if (start + size >= dma->iova + NPAGE_TO_SIZE(dma->npage)) {
+ size_t overlap;
+
+ overlap = dma->iova + NPAGE_TO_SIZE(dma->npage) - start;
+ npage_hi = overlap >> PAGE_SHIFT;
+
+ vfio_dma_unmap(iommu, start, npage_hi, dma->prot);
+ dma->npage -= npage_hi;
+ return npage_hi;
+ }
+
+ /* Split existing */
+ npage_lo = (start - dma->iova) >> PAGE_SHIFT;
+ npage_hi = dma->npage - (size >> PAGE_SHIFT) - npage_lo;
+
+ split = kzalloc(sizeof *split, GFP_KERNEL);
+ if (!split)
+ return -ENOMEM;
+
+ vfio_dma_unmap(iommu, start, size >> PAGE_SHIFT, dma->prot);
+
+ dma->npage = npage_lo;
+
+ split->npage = npage_hi;
+ split->iova = start + size;
+ split->vaddr = dma->vaddr + NPAGE_TO_SIZE(npage_lo) + size;
+ split->prot = dma->prot;
+ list_add(&split->next, &iommu->dma_list);
+ return size >> PAGE_SHIFT;
+}
+
+static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
+ struct vfio_iommu_type1_dma_unmap *unmap)
+{
+ long ret = 0, npage = unmap->size >> PAGE_SHIFT;
+ struct vfio_dma *dma, *tmp;
+ uint64_t mask;
+
+ mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1;
+
+ if (unmap->iova & mask)
+ return -EINVAL;
+ if (unmap->size & mask)
+ return -EINVAL;
+
+ /* XXX We still break these down into PAGE_SIZE */
+ WARN_ON(mask & PAGE_MASK);
+
+ mutex_lock(&iommu->lock);
+
+ list_for_each_entry_safe(dma, tmp, &iommu->dma_list, next) {
+ if (ranges_overlap(dma->iova, NPAGE_TO_SIZE(dma->npage),
+ unmap->iova, unmap->size)) {
+ ret = vfio_remove_dma_overlap(iommu, unmap->iova,
+ unmap->size, dma);
+ if (ret > 0)
+ npage -= ret;
+ if (ret < 0 || npage == 0)
+ break;
+ }
+ }
+ mutex_unlock(&iommu->lock);
+ return ret > 0 ? 0 : (int)ret;
+}
+
+static int vfio_dma_do_map(struct vfio_iommu *iommu,
+ struct vfio_iommu_type1_dma_map *map)
+{
+ struct vfio_dma *dma, *pdma = NULL;
+ dma_addr_t iova = map->iova;
+ unsigned long locked, lock_limit, vaddr = map->vaddr;
+ size_t size = map->size;
+ int ret = 0, prot = 0;
+ uint64_t mask;
+ long npage;
+
+ mask = ((uint64_t)1 << __ffs(iommu->domain->ops->pgsize_bitmap)) - 1;
+
+ /* READ/WRITE from device perspective */
+ if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
+ prot |= IOMMU_WRITE;
+ if (map->flags & VFIO_DMA_MAP_FLAG_READ)
+ prot |= IOMMU_READ;
+
+ if (!prot)
+ return -EINVAL; /* No READ/WRITE? */
+
+ if (vaddr & mask)
+ return -EINVAL;
+ if (iova & mask)
+ return -EINVAL;
+ if (size & mask)
+ return -EINVAL;
+
+ /* XXX We still break these down into PAGE_SIZE */
+ WARN_ON(mask & PAGE_MASK);
+
+ /* Don't allow IOVA wrap */
+ if (iova + size && iova + size < iova)
+ return -EINVAL;
+
+ /* Don't allow virtual address wrap */
+ if (vaddr + size && vaddr + size < vaddr)
+ return -EINVAL;
+
+ npage = size >> PAGE_SHIFT;
+ if (!npage)
+ return -EINVAL;
+
+ mutex_lock(&iommu->lock);
+
+ if (vfio_find_dma(iommu, iova, size)) {
+ ret = -EBUSY;
+ goto out_lock;
+ }
+
+ /* account for locked pages */
+ locked = current->mm->locked_vm + npage;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
+ pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
+ __func__, rlimit(RLIMIT_MEMLOCK));
+ ret = -ENOMEM;
+ goto out_lock;
+ }
+
+ ret = __vfio_dma_map(iommu, iova, vaddr, npage, prot);
+ if (ret)
+ goto out_lock;
+
+ /* Check if we abut a region below - nothing below 0 */
+ if (iova) {
+ dma = vfio_find_dma(iommu, iova - 1, 1);
+ if (dma && dma->prot == prot &&
+ dma->vaddr + NPAGE_TO_SIZE(dma->npage) == vaddr) {
+
+ dma->npage += npage;
+ iova = dma->iova;
+ vaddr = dma->vaddr;
+ npage = dma->npage;
+ size = NPAGE_TO_SIZE(npage);
+
+ pdma = dma;
+ }
+ }
+
+ /* Check if we abut a region above - nothing above ~0 + 1 */
+ if (iova + size) {
+ dma = vfio_find_dma(iommu, iova + size, 1);
+ if (dma && dma->prot == prot &&
+ dma->vaddr == vaddr + size) {
+
+ dma->npage += npage;
+ dma->iova = iova;
+ dma->vaddr = vaddr;
+
+ /*
+ * If merged above and below, remove previously
+ * merged entry. New entry covers it.
+ */
+ if (pdma) {
+ list_del(&pdma->next);
+ kfree(pdma);
+ }
+ pdma = dma;
+ }
+ }
+
+ /* Isolated, new region */
+ if (!pdma) {
+ dma = kzalloc(sizeof *dma, GFP_KERNEL);
+ if (!dma) {
+ ret = -ENOMEM;
+ vfio_dma_unmap(iommu, iova, npage, prot);
+ goto out_lock;
+ }
+
+ dma->npage = npage;
+ dma->iova = iova;
+ dma->vaddr = vaddr;
+ dma->prot = prot;
+ list_add(&dma->next, &iommu->dma_list);
+ }
+
+out_lock:
+ mutex_unlock(&iommu->lock);
+ return ret;
+}
+
+static int vfio_iommu_type1_attach_group(void *iommu_data,
+ struct iommu_group *iommu_group)
+{
+ struct vfio_iommu *iommu = iommu_data;
+ struct vfio_group *group, *tmp;
+ int ret;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
+
+ mutex_lock(&iommu->lock);
+
+ list_for_each_entry(tmp, &iommu->group_list, next) {
+ if (tmp->iommu_group == iommu_group) {
+ mutex_unlock(&iommu->lock);
+ kfree(group);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * TODO: Domain have capabilities that might change as we add
+ * groups (see iommu->cache, currently never set). Check for
+ * them and potentially disallow groups to be attached when it
+ * would change capabilities (ugh).
+ */
+ ret = iommu_attach_group(iommu->domain, iommu_group);
+ if (ret) {
+ mutex_unlock(&iommu->lock);
+ kfree(group);
+ return ret;
+ }
+
+ group->iommu_group = iommu_group;
+ list_add(&group->next, &iommu->group_list);
+
+ mutex_unlock(&iommu->lock);
+
+ return 0;
+}
+
+static void vfio_iommu_type1_detach_group(void *iommu_data,
+ struct iommu_group *iommu_group)
+{
+ struct vfio_iommu *iommu = iommu_data;
+ struct vfio_group *group;
+
+ mutex_lock(&iommu->lock);
+
+ list_for_each_entry(group, &iommu->group_list, next) {
+ if (group->iommu_group == iommu_group) {
+ iommu_detach_group(iommu->domain, iommu_group);
+ list_del(&group->next);
+ kfree(group);
+ break;
+ }
+ }
+
+ mutex_unlock(&iommu->lock);
+}
+
+static void *vfio_iommu_type1_open(unsigned long arg)
+{
+ struct vfio_iommu *iommu;
+
+ if (arg != VFIO_TYPE1_IOMMU)
+ return ERR_PTR(-EINVAL);
+
+ iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&iommu->group_list);
+ INIT_LIST_HEAD(&iommu->dma_list);
+ mutex_init(&iommu->lock);
+
+ /*
+ * Wish we didn't have to know about bus_type here.
+ */
+ iommu->domain = iommu_domain_alloc(&pci_bus_type);
+ if (!iommu->domain) {
+ kfree(iommu);
+ return ERR_PTR(-EIO);
+ }
+
+ /*
+ * Wish we could specify required capabilities rather than create
+ * a domain, see what comes out and hope it doesn't change along
+ * the way. Fortunately we know interrupt remapping is global for
+ * our iommus.
+ */
+ if (!allow_unsafe_interrupts &&
+ !iommu_domain_has_cap(iommu->domain, IOMMU_CAP_INTR_REMAP)) {
+ pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
+ __func__);
+ iommu_domain_free(iommu->domain);
+ kfree(iommu);
+ return ERR_PTR(-EPERM);
+ }
+
+ return iommu;
+}
+
+static void vfio_iommu_type1_release(void *iommu_data)
+{
+ struct vfio_iommu *iommu = iommu_data;
+ struct vfio_group *group, *group_tmp;
+ struct vfio_dma *dma, *dma_tmp;
+
+ list_for_each_entry_safe(group, group_tmp, &iommu->group_list, next) {
+ iommu_detach_group(iommu->domain, group->iommu_group);
+ list_del(&group->next);
+ kfree(group);
+ }
+
+ list_for_each_entry_safe(dma, dma_tmp, &iommu->dma_list, next) {
+ vfio_dma_unmap(iommu, dma->iova, dma->npage, dma->prot);
+ list_del(&dma->next);
+ kfree(dma);
+ }
+
+ iommu_domain_free(iommu->domain);
+ iommu->domain = NULL;
+ kfree(iommu);
+}
+
+static long vfio_iommu_type1_ioctl(void *iommu_data,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vfio_iommu *iommu = iommu_data;
+ unsigned long minsz;
+
+ if (cmd == VFIO_CHECK_EXTENSION) {
+ switch (arg) {
+ case VFIO_TYPE1_IOMMU:
+ return 1;
+ default:
+ return 0;
+ }
+ } else if (cmd == VFIO_IOMMU_GET_INFO) {
+ struct vfio_iommu_type1_info info;
+
+ minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.flags = 0;
+
+ info.iova_pgsizes = iommu->domain->ops->pgsize_bitmap;
+
+ return copy_to_user((void __user *)arg, &info, minsz);
+
+ } else if (cmd == VFIO_IOMMU_MAP_DMA) {
+ struct vfio_iommu_type1_dma_map map;
+ uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
+ VFIO_DMA_MAP_FLAG_WRITE;
+
+ minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
+
+ if (copy_from_user(&map, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (map.argsz < minsz || map.flags & ~mask)
+ return -EINVAL;
+
+ return vfio_dma_do_map(iommu, &map);
+
+ } else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
+ struct vfio_iommu_type1_dma_unmap unmap;
+
+ minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
+
+ if (copy_from_user(&unmap, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (unmap.argsz < minsz || unmap.flags)
+ return -EINVAL;
+
+ return vfio_dma_do_unmap(iommu, &unmap);
+ }
+
+ return -ENOTTY;
+}
+
+static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
+ .name = "vfio-iommu-type1",
+ .owner = THIS_MODULE,
+ .open = vfio_iommu_type1_open,
+ .release = vfio_iommu_type1_release,
+ .ioctl = vfio_iommu_type1_ioctl,
+ .attach_group = vfio_iommu_type1_attach_group,
+ .detach_group = vfio_iommu_type1_detach_group,
+};
+
+static int __init vfio_iommu_type1_init(void)
+{
+ if (!iommu_present(&pci_bus_type))
+ return -ENODEV;
+
+ return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1);
+}
+
+static void __exit vfio_iommu_type1_cleanup(void)
+{
+ vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1);
+}
+
+module_init(vfio_iommu_type1_init);
+module_exit(vfio_iommu_type1_cleanup);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index e4e2fd1b5107..202bba6c997c 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -9,3 +9,6 @@ config VHOST_NET
To compile this driver as a module, choose M here: the module will
be called vhost_net.
+if STAGING
+source "drivers/vhost/Kconfig.tcm"
+endif
diff --git a/drivers/vhost/Kconfig.tcm b/drivers/vhost/Kconfig.tcm
new file mode 100644
index 000000000000..a9c6f76e3208
--- /dev/null
+++ b/drivers/vhost/Kconfig.tcm
@@ -0,0 +1,6 @@
+config TCM_VHOST
+ tristate "TCM_VHOST fabric module (EXPERIMENTAL)"
+ depends on TARGET_CORE && EVENTFD && EXPERIMENTAL && m
+ default n
+ ---help---
+ Say M here to enable the TCM_VHOST fabric module for use with virtio-scsi guests
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index 72dd02050bb9..a27b053bc9ab 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_VHOST_NET) += vhost_net.o
vhost_net-y := vhost.o net.o
+
+obj-$(CONFIG_TCM_VHOST) += tcm_vhost.o
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
new file mode 100644
index 000000000000..ed8e2e6c8df2
--- /dev/null
+++ b/drivers/vhost/tcm_vhost.c
@@ -0,0 +1,1649 @@
+/*******************************************************************************
+ * Vhost kernel TCM fabric driver for virtio SCSI initiators
+ *
+ * (C) Copyright 2010-2012 RisingTide Systems LLC.
+ * (C) Copyright 2010-2012 IBM Corp.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
+ *
+ * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ ****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <linux/compat.h>
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+#include <linux/vhost.h>
+#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */
+#include <linux/virtio_scsi.h>
+
+#include "vhost.c"
+#include "vhost.h"
+#include "tcm_vhost.h"
+
+enum {
+ VHOST_SCSI_VQ_CTL = 0,
+ VHOST_SCSI_VQ_EVT = 1,
+ VHOST_SCSI_VQ_IO = 2,
+};
+
+struct vhost_scsi {
+ struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[3];
+
+ struct vhost_work vs_completion_work; /* cmd completion work item */
+ struct list_head vs_completion_list; /* cmd completion queue */
+ spinlock_t vs_completion_lock; /* protects s_completion_list */
+};
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
+
+static struct workqueue_struct *tcm_vhost_workqueue;
+
+/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
+static DEFINE_MUTEX(tcm_vhost_mutex);
+static LIST_HEAD(tcm_vhost_list);
+
+static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static char *tcm_vhost_get_fabric_name(void)
+{
+ return "vhost";
+}
+
+static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_tport *tport = tpg->tport;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_fabric_proto_ident(se_tpg);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_fabric_proto_ident(se_tpg);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_fabric_proto_ident(se_tpg);
+ default:
+ pr_err("Unknown tport_proto_id: 0x%02x, using"
+ " SAS emulation\n", tport->tport_proto_id);
+ break;
+ }
+
+ return sas_get_fabric_proto_ident(se_tpg);
+}
+
+static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_tport *tport = tpg->tport;
+
+ return &tport->tport_name[0];
+}
+
+static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ return tpg->tport_tpgt;
+}
+
+static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_tport *tport = tpg->tport;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ default:
+ pr_err("Unknown tport_proto_id: 0x%02x, using"
+ " SAS emulation\n", tport->tport_proto_id);
+ break;
+ }
+
+ return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+}
+
+static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_tport *tport = tpg->tport;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ case SCSI_PROTOCOL_FCP:
+ return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ default:
+ pr_err("Unknown tport_proto_id: 0x%02x, using"
+ " SAS emulation\n", tport->tport_proto_id);
+ break;
+ }
+
+ return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+}
+
+static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_tport *tport = tpg->tport;
+
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ case SCSI_PROTOCOL_FCP:
+ return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ case SCSI_PROTOCOL_ISCSI:
+ return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ default:
+ pr_err("Unknown tport_proto_id: 0x%02x, using"
+ " SAS emulation\n", tport->tport_proto_id);
+ break;
+ }
+
+ return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+}
+
+static struct se_node_acl *tcm_vhost_alloc_fabric_acl(
+ struct se_portal_group *se_tpg)
+{
+ struct tcm_vhost_nacl *nacl;
+
+ nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
+ if (!nacl) {
+ pr_err("Unable to alocate struct tcm_vhost_nacl\n");
+ return NULL;
+ }
+
+ return &nacl->se_node_acl;
+}
+
+static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct tcm_vhost_nacl *nacl = container_of(se_nacl,
+ struct tcm_vhost_nacl, se_node_acl);
+ kfree(nacl);
+}
+
+static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
+{
+ return;
+}
+
+static int tcm_vhost_shutdown_session(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void tcm_vhost_close_session(struct se_session *se_sess)
+{
+ return;
+}
+
+static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
+{
+ /* Go ahead and process the write immediately */
+ target_execute_cmd(se_cmd);
+ return 0;
+}
+
+static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
+{
+ return;
+}
+
+static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd)
+{
+ struct vhost_scsi *vs = tv_cmd->tvc_vhost;
+
+ spin_lock_bh(&vs->vs_completion_lock);
+ list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list);
+ spin_unlock_bh(&vs->vs_completion_lock);
+
+ vhost_work_queue(&vs->dev, &vs->vs_completion_work);
+}
+
+static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
+ struct tcm_vhost_cmd, tvc_se_cmd);
+ vhost_scsi_complete_cmd(tv_cmd);
+ return 0;
+}
+
+static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
+{
+ struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
+ struct tcm_vhost_cmd, tvc_se_cmd);
+ vhost_scsi_complete_cmd(tv_cmd);
+ return 0;
+}
+
+static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+static u16 tcm_vhost_set_fabric_sense_len(struct se_cmd *se_cmd,
+ u32 sense_length)
+{
+ return 0;
+}
+
+static u16 tcm_vhost_get_fabric_sense_len(void)
+{
+ return 0;
+}
+
+static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
+{
+ struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+
+ /* TODO locking against target/backend threads? */
+ transport_generic_free_cmd(se_cmd, 1);
+
+ if (tv_cmd->tvc_sgl_count) {
+ u32 i;
+ for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
+ put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+
+ kfree(tv_cmd->tvc_sgl);
+ }
+
+ kfree(tv_cmd);
+}
+
+/* Dequeue a command from the completion list */
+static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion(
+ struct vhost_scsi *vs)
+{
+ struct tcm_vhost_cmd *tv_cmd = NULL;
+
+ spin_lock_bh(&vs->vs_completion_lock);
+ if (list_empty(&vs->vs_completion_list)) {
+ spin_unlock_bh(&vs->vs_completion_lock);
+ return NULL;
+ }
+
+ list_for_each_entry(tv_cmd, &vs->vs_completion_list,
+ tvc_completion_list) {
+ list_del(&tv_cmd->tvc_completion_list);
+ break;
+ }
+ spin_unlock_bh(&vs->vs_completion_lock);
+ return tv_cmd;
+}
+
+/* Fill in status and signal that we are done processing this command
+ *
+ * This is scheduled in the vhost work queue so we are called with the owner
+ * process mm and can access the vring.
+ */
+static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
+{
+ struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+ vs_completion_work);
+ struct tcm_vhost_cmd *tv_cmd;
+
+ while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) {
+ struct virtio_scsi_cmd_resp v_rsp;
+ struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+ int ret;
+
+ pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__,
+ tv_cmd, se_cmd->residual_count, se_cmd->scsi_status);
+
+ memset(&v_rsp, 0, sizeof(v_rsp));
+ v_rsp.resid = se_cmd->residual_count;
+ /* TODO is status_qualifier field needed? */
+ v_rsp.status = se_cmd->scsi_status;
+ v_rsp.sense_len = se_cmd->scsi_sense_length;
+ memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf,
+ v_rsp.sense_len);
+ ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
+ if (likely(ret == 0))
+ vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0);
+ else
+ pr_err("Faulted on virtio_scsi_cmd_resp\n");
+
+ vhost_scsi_free_cmd(tv_cmd);
+ }
+
+ vhost_signal(&vs->dev, &vs->vqs[2]);
+}
+
+static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
+ struct tcm_vhost_tpg *tv_tpg,
+ struct virtio_scsi_cmd_req *v_req,
+ u32 exp_data_len,
+ int data_direction)
+{
+ struct tcm_vhost_cmd *tv_cmd;
+ struct tcm_vhost_nexus *tv_nexus;
+ struct se_portal_group *se_tpg = &tv_tpg->se_tpg;
+ struct se_session *se_sess;
+ struct se_cmd *se_cmd;
+ int sam_task_attr;
+
+ tv_nexus = tv_tpg->tpg_nexus;
+ if (!tv_nexus) {
+ pr_err("Unable to locate active struct tcm_vhost_nexus\n");
+ return ERR_PTR(-EIO);
+ }
+ se_sess = tv_nexus->tvn_se_sess;
+
+ tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
+ if (!tv_cmd) {
+ pr_err("Unable to allocate struct tcm_vhost_cmd\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&tv_cmd->tvc_completion_list);
+ tv_cmd->tvc_tag = v_req->tag;
+
+ se_cmd = &tv_cmd->tvc_se_cmd;
+ /*
+ * Locate the SAM Task Attr from virtio_scsi_cmd_req
+ */
+ sam_task_attr = v_req->task_attr;
+ /*
+ * Initialize struct se_cmd descriptor from TCM infrastructure
+ */
+ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, exp_data_len,
+ data_direction, sam_task_attr,
+ &tv_cmd->tvc_sense_buf[0]);
+
+#if 0 /* FIXME: vhost_scsi_allocate_cmd() BIDI operation */
+ if (bidi)
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+#endif
+ return tv_cmd;
+}
+
+/*
+ * Map a user memory range into a scatterlist
+ *
+ * Returns the number of scatterlist entries used or -errno on error.
+ */
+static int vhost_scsi_map_to_sgl(struct scatterlist *sgl,
+ unsigned int sgl_count, void __user *ptr, size_t len, int write)
+{
+ struct scatterlist *sg = sgl;
+ unsigned int npages = 0;
+ int ret;
+
+ while (len > 0) {
+ struct page *page;
+ unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK;
+ unsigned int nbytes = min_t(unsigned int,
+ PAGE_SIZE - offset, len);
+
+ if (npages == sgl_count) {
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page);
+ BUG_ON(ret == 0); /* we should either get our page or fail */
+ if (ret < 0)
+ goto err;
+
+ sg_set_page(sg, page, nbytes, offset);
+ ptr += nbytes;
+ len -= nbytes;
+ sg++;
+ npages++;
+ }
+ return npages;
+
+err:
+ /* Put pages that we hold */
+ for (sg = sgl; sg != &sgl[npages]; sg++)
+ put_page(sg_page(sg));
+ return ret;
+}
+
+static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
+ struct iovec *iov, unsigned int niov, int write)
+{
+ int ret;
+ unsigned int i;
+ u32 sgl_count;
+ struct scatterlist *sg;
+
+ /*
+ * Find out how long sglist needs to be
+ */
+ sgl_count = 0;
+ for (i = 0; i < niov; i++) {
+ sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len +
+ PAGE_SIZE - 1) >> PAGE_SHIFT) -
+ ((uintptr_t)iov[i].iov_base >> PAGE_SHIFT);
+ }
+ /* TODO overflow checking */
+
+ sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC);
+ if (!sg)
+ return -ENOMEM;
+ pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__,
+ sg, sgl_count, !sg);
+ sg_init_table(sg, sgl_count);
+
+ tv_cmd->tvc_sgl = sg;
+ tv_cmd->tvc_sgl_count = sgl_count;
+
+ pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
+ for (i = 0; i < niov; i++) {
+ ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base,
+ iov[i].iov_len, write);
+ if (ret < 0) {
+ for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
+ put_page(sg_page(&tv_cmd->tvc_sgl[i]));
+ kfree(tv_cmd->tvc_sgl);
+ tv_cmd->tvc_sgl = NULL;
+ tv_cmd->tvc_sgl_count = 0;
+ return ret;
+ }
+
+ sg += ret;
+ sgl_count -= ret;
+ }
+ return 0;
+}
+
+static void tcm_vhost_submission_work(struct work_struct *work)
+{
+ struct tcm_vhost_cmd *tv_cmd =
+ container_of(work, struct tcm_vhost_cmd, work);
+ struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
+ struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
+ int rc, sg_no_bidi = 0;
+ /*
+ * Locate the struct se_lun pointer based on v_req->lun, and
+ * attach it to struct se_cmd
+ */
+ rc = transport_lookup_cmd_lun(&tv_cmd->tvc_se_cmd, tv_cmd->tvc_lun);
+ if (rc < 0) {
+ pr_err("Failed to look up lun: %d\n", tv_cmd->tvc_lun);
+ transport_send_check_condition_and_sense(&tv_cmd->tvc_se_cmd,
+ tv_cmd->tvc_se_cmd.scsi_sense_reason, 0);
+ transport_generic_free_cmd(se_cmd, 0);
+ return;
+ }
+
+ rc = target_setup_cmd_from_cdb(se_cmd, tv_cmd->tvc_cdb);
+ if (rc == -ENOMEM) {
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
+ transport_generic_free_cmd(se_cmd, 0);
+ return;
+ } else if (rc < 0) {
+ if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
+ tcm_vhost_queue_status(se_cmd);
+ else
+ transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->scsi_sense_reason, 0);
+ transport_generic_free_cmd(se_cmd, 0);
+ return;
+ }
+
+ if (tv_cmd->tvc_sgl_count) {
+ sg_ptr = tv_cmd->tvc_sgl;
+ /*
+ * For BIDI commands, pass in the extra READ buffer
+ * to transport_generic_map_mem_to_cmd() below..
+ */
+/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
+#if 0
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ sg_bidi_ptr = NULL;
+ sg_no_bidi = 0;
+ }
+#endif
+ } else {
+ sg_ptr = NULL;
+ }
+
+ rc = transport_generic_map_mem_to_cmd(se_cmd, sg_ptr,
+ tv_cmd->tvc_sgl_count, sg_bidi_ptr,
+ sg_no_bidi);
+ if (rc < 0) {
+ transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->scsi_sense_reason, 0);
+ transport_generic_free_cmd(se_cmd, 0);
+ return;
+ }
+ transport_handle_cdb_direct(se_cmd);
+}
+
+static void vhost_scsi_handle_vq(struct vhost_scsi *vs)
+{
+ struct vhost_virtqueue *vq = &vs->vqs[2];
+ struct virtio_scsi_cmd_req v_req;
+ struct tcm_vhost_tpg *tv_tpg;
+ struct tcm_vhost_cmd *tv_cmd;
+ u32 exp_data_len, data_first, data_num, data_direction;
+ unsigned out, in, i;
+ int head, ret;
+
+ /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
+ tv_tpg = vs->vs_tpg;
+ if (unlikely(!tv_tpg)) {
+ pr_err("%s endpoint not set\n", __func__);
+ return;
+ }
+
+ mutex_lock(&vq->mutex);
+ vhost_disable_notify(&vs->dev, vq);
+
+ for (;;) {
+ head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
+ ARRAY_SIZE(vq->iov), &out, &in,
+ NULL, NULL);
+ pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
+ head, out, in);
+ /* On error, stop handling until the next kick. */
+ if (unlikely(head < 0))
+ break;
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (head == vq->num) {
+ if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
+ vhost_disable_notify(&vs->dev, vq);
+ continue;
+ }
+ break;
+ }
+
+/* FIXME: BIDI operation */
+ if (out == 1 && in == 1) {
+ data_direction = DMA_NONE;
+ data_first = 0;
+ data_num = 0;
+ } else if (out == 1 && in > 1) {
+ data_direction = DMA_FROM_DEVICE;
+ data_first = out + 1;
+ data_num = in - 1;
+ } else if (out > 1 && in == 1) {
+ data_direction = DMA_TO_DEVICE;
+ data_first = 1;
+ data_num = out - 1;
+ } else {
+ vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
+ out, in);
+ break;
+ }
+
+ /*
+ * Check for a sane resp buffer so we can report errors to
+ * the guest.
+ */
+ if (unlikely(vq->iov[out].iov_len !=
+ sizeof(struct virtio_scsi_cmd_resp))) {
+ vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
+ " bytes\n", vq->iov[out].iov_len);
+ break;
+ }
+
+ if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
+ vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
+ " bytes\n", vq->iov[0].iov_len);
+ break;
+ }
+ pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
+ " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
+ ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
+ sizeof(v_req));
+ if (unlikely(ret)) {
+ vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
+ break;
+ }
+
+ exp_data_len = 0;
+ for (i = 0; i < data_num; i++)
+ exp_data_len += vq->iov[data_first + i].iov_len;
+
+ tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req,
+ exp_data_len, data_direction);
+ if (IS_ERR(tv_cmd)) {
+ vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
+ PTR_ERR(tv_cmd));
+ break;
+ }
+ pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
+ ": %d\n", tv_cmd, exp_data_len, data_direction);
+
+ tv_cmd->tvc_vhost = vs;
+
+ if (unlikely(vq->iov[out].iov_len !=
+ sizeof(struct virtio_scsi_cmd_resp))) {
+ vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
+ " bytes, out: %d, in: %d\n",
+ vq->iov[out].iov_len, out, in);
+ break;
+ }
+
+ tv_cmd->tvc_resp = vq->iov[out].iov_base;
+
+ /*
+ * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb
+ * that will be used by tcm_vhost_new_cmd_map() and down into
+ * target_setup_cmd_from_cdb()
+ */
+ memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
+ /*
+ * Check that the recieved CDB size does not exceeded our
+ * hardcoded max for tcm_vhost
+ */
+ /* TODO what if cdb was too small for varlen cdb header? */
+ if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) >
+ TCM_VHOST_MAX_CDB_SIZE)) {
+ vq_err(vq, "Received SCSI CDB with command_size: %d that"
+ " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+ scsi_command_size(tv_cmd->tvc_cdb),
+ TCM_VHOST_MAX_CDB_SIZE);
+ break; /* TODO */
+ }
+ tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
+
+ pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
+ tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun);
+
+ if (data_direction != DMA_NONE) {
+ ret = vhost_scsi_map_iov_to_sgl(tv_cmd,
+ &vq->iov[data_first], data_num,
+ data_direction == DMA_TO_DEVICE);
+ if (unlikely(ret)) {
+ vq_err(vq, "Failed to map iov to sgl\n");
+ break; /* TODO */
+ }
+ }
+
+ /*
+ * Save the descriptor from vhost_get_vq_desc() to be used to
+ * complete the virtio-scsi request in TCM callback context via
+ * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
+ */
+ tv_cmd->tvc_vq_desc = head;
+ /*
+ * Dispatch tv_cmd descriptor for cmwq execution in process
+ * context provided by tcm_vhost_workqueue. This also ensures
+ * tv_cmd is executed on the same kworker CPU as this vhost
+ * thread to gain positive L2 cache locality effects..
+ */
+ INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work);
+ queue_work(tcm_vhost_workqueue, &tv_cmd->work);
+ }
+
+ mutex_unlock(&vq->mutex);
+}
+
+static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
+{
+ pr_debug("%s: The handling func for control queue.\n", __func__);
+}
+
+static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
+{
+ pr_debug("%s: The handling func for event queue.\n", __func__);
+}
+
+static void vhost_scsi_handle_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
+ vhost_scsi_handle_vq(vs);
+}
+
+/*
+ * Called from vhost_scsi_ioctl() context to walk the list of available
+ * tcm_vhost_tpg with an active struct tcm_vhost_nexus
+ */
+static int vhost_scsi_set_endpoint(
+ struct vhost_scsi *vs,
+ struct vhost_scsi_target *t)
+{
+ struct tcm_vhost_tport *tv_tport;
+ struct tcm_vhost_tpg *tv_tpg;
+ int index;
+
+ mutex_lock(&vs->dev.mutex);
+ /* Verify that ring has been setup correctly. */
+ for (index = 0; index < vs->dev.nvqs; ++index) {
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(&vs->vqs[index])) {
+ mutex_unlock(&vs->dev.mutex);
+ return -EFAULT;
+ }
+ }
+ mutex_unlock(&vs->dev.mutex);
+
+ mutex_lock(&tcm_vhost_mutex);
+ list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
+ mutex_lock(&tv_tpg->tv_tpg_mutex);
+ if (!tv_tpg->tpg_nexus) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ continue;
+ }
+ if (tv_tpg->tv_tpg_vhost_count != 0) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ continue;
+ }
+ tv_tport = tv_tpg->tport;
+
+ if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) &&
+ (tv_tpg->tport_tpgt == t->vhost_tpgt)) {
+ tv_tpg->tv_tpg_vhost_count++;
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ mutex_unlock(&tcm_vhost_mutex);
+
+ mutex_lock(&vs->dev.mutex);
+ if (vs->vs_tpg) {
+ mutex_unlock(&vs->dev.mutex);
+ mutex_lock(&tv_tpg->tv_tpg_mutex);
+ tv_tpg->tv_tpg_vhost_count--;
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ return -EEXIST;
+ }
+
+ vs->vs_tpg = tv_tpg;
+ smp_mb__after_atomic_inc();
+ mutex_unlock(&vs->dev.mutex);
+ return 0;
+ }
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ }
+ mutex_unlock(&tcm_vhost_mutex);
+ return -EINVAL;
+}
+
+static int vhost_scsi_clear_endpoint(
+ struct vhost_scsi *vs,
+ struct vhost_scsi_target *t)
+{
+ struct tcm_vhost_tport *tv_tport;
+ struct tcm_vhost_tpg *tv_tpg;
+ int index, ret;
+
+ mutex_lock(&vs->dev.mutex);
+ /* Verify that ring has been setup correctly. */
+ for (index = 0; index < vs->dev.nvqs; ++index) {
+ if (!vhost_vq_access_ok(&vs->vqs[index])) {
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+
+ if (!vs->vs_tpg) {
+ ret = -ENODEV;
+ goto err;
+ }
+ tv_tpg = vs->vs_tpg;
+ tv_tport = tv_tpg->tport;
+
+ if (strcmp(tv_tport->tport_name, t->vhost_wwpn) ||
+ (tv_tpg->tport_tpgt != t->vhost_tpgt)) {
+ pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu"
+ " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n",
+ tv_tport->tport_name, tv_tpg->tport_tpgt,
+ t->vhost_wwpn, t->vhost_tpgt);
+ ret = -EINVAL;
+ goto err;
+ }
+ tv_tpg->tv_tpg_vhost_count--;
+ vs->vs_tpg = NULL;
+ mutex_unlock(&vs->dev.mutex);
+
+ return 0;
+
+err:
+ mutex_unlock(&vs->dev.mutex);
+ return ret;
+}
+
+static int vhost_scsi_open(struct inode *inode, struct file *f)
+{
+ struct vhost_scsi *s;
+ int r;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
+ INIT_LIST_HEAD(&s->vs_completion_list);
+ spin_lock_init(&s->vs_completion_lock);
+
+ s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
+ s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
+ s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick;
+ r = vhost_dev_init(&s->dev, s->vqs, 3);
+ if (r < 0) {
+ kfree(s);
+ return r;
+ }
+
+ f->private_data = s;
+ return 0;
+}
+
+static int vhost_scsi_release(struct inode *inode, struct file *f)
+{
+ struct vhost_scsi *s = f->private_data;
+
+ if (s->vs_tpg && s->vs_tpg->tport) {
+ struct vhost_scsi_target backend;
+
+ memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name,
+ sizeof(backend.vhost_wwpn));
+ backend.vhost_tpgt = s->vs_tpg->tport_tpgt;
+ vhost_scsi_clear_endpoint(s, &backend);
+ }
+
+ vhost_dev_cleanup(&s->dev, false);
+ kfree(s);
+ return 0;
+}
+
+static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
+{
+ vhost_poll_flush(&vs->dev.vqs[index].poll);
+}
+
+static void vhost_scsi_flush(struct vhost_scsi *vs)
+{
+ vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL);
+ vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT);
+ vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO);
+}
+
+static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
+{
+ if (features & ~VHOST_FEATURES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&vs->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&vs->dev)) {
+ mutex_unlock(&vs->dev.mutex);
+ return -EFAULT;
+ }
+ vs->dev.acked_features = features;
+ smp_wmb();
+ vhost_scsi_flush(vs);
+ mutex_unlock(&vs->dev.mutex);
+ return 0;
+}
+
+static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct vhost_scsi *vs = f->private_data;
+ struct vhost_scsi_target backend;
+ void __user *argp = (void __user *)arg;
+ u64 __user *featurep = argp;
+ u64 features;
+ int r, abi_version = VHOST_SCSI_ABI_VERSION;
+
+ switch (ioctl) {
+ case VHOST_SCSI_SET_ENDPOINT:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+ if (backend.reserved != 0)
+ return -EOPNOTSUPP;
+
+ return vhost_scsi_set_endpoint(vs, &backend);
+ case VHOST_SCSI_CLEAR_ENDPOINT:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+ if (backend.reserved != 0)
+ return -EOPNOTSUPP;
+
+ return vhost_scsi_clear_endpoint(vs, &backend);
+ case VHOST_SCSI_GET_ABI_VERSION:
+ if (copy_to_user(argp, &abi_version, sizeof abi_version))
+ return -EFAULT;
+ return 0;
+ case VHOST_GET_FEATURES:
+ features = VHOST_FEATURES;
+ if (copy_to_user(featurep, &features, sizeof features))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_FEATURES:
+ if (copy_from_user(&features, featurep, sizeof features))
+ return -EFAULT;
+ return vhost_scsi_set_features(vs, features);
+ default:
+ mutex_lock(&vs->dev.mutex);
+ r = vhost_dev_ioctl(&vs->dev, ioctl, arg);
+ mutex_unlock(&vs->dev.mutex);
+ return r;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations vhost_scsi_fops = {
+ .owner = THIS_MODULE,
+ .release = vhost_scsi_release,
+ .unlocked_ioctl = vhost_scsi_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vhost_scsi_compat_ioctl,
+#endif
+ .open = vhost_scsi_open,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice vhost_scsi_misc = {
+ MISC_DYNAMIC_MINOR,
+ "vhost-scsi",
+ &vhost_scsi_fops,
+};
+
+static int __init vhost_scsi_register(void)
+{
+ return misc_register(&vhost_scsi_misc);
+}
+
+static int vhost_scsi_deregister(void)
+{
+ return misc_deregister(&vhost_scsi_misc);
+}
+
+static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
+{
+ switch (tport->tport_proto_id) {
+ case SCSI_PROTOCOL_SAS:
+ return "SAS";
+ case SCSI_PROTOCOL_FCP:
+ return "FCP";
+ case SCSI_PROTOCOL_ISCSI:
+ return "iSCSI";
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
+ struct se_lun *lun)
+{
+ struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+
+ mutex_lock(&tv_tpg->tv_tpg_mutex);
+ tv_tpg->tv_tpg_port_count++;
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+
+ return 0;
+}
+
+static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
+ struct se_lun *se_lun)
+{
+ struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+
+ mutex_lock(&tv_tpg->tv_tpg_mutex);
+ tv_tpg->tv_tpg_port_count--;
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+}
+
+static struct se_node_acl *tcm_vhost_make_nodeacl(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct se_node_acl *se_nacl, *se_nacl_new;
+ struct tcm_vhost_nacl *nacl;
+ u64 wwpn = 0;
+ u32 nexus_depth;
+
+ /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+ return ERR_PTR(-EINVAL); */
+ se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
+ if (!se_nacl_new)
+ return ERR_PTR(-ENOMEM);
+
+ nexus_depth = 1;
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a NodeACL from demo mode -> explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+ name, nexus_depth);
+ if (IS_ERR(se_nacl)) {
+ tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
+ return se_nacl;
+ }
+ /*
+ * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
+ */
+ nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
+ nacl->iport_wwpn = wwpn;
+
+ return se_nacl;
+}
+
+static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
+{
+ struct tcm_vhost_nacl *nacl = container_of(se_acl,
+ struct tcm_vhost_nacl, se_node_acl);
+ core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
+ kfree(nacl);
+}
+
+static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg,
+ const char *name)
+{
+ struct se_portal_group *se_tpg;
+ struct tcm_vhost_nexus *tv_nexus;
+
+ mutex_lock(&tv_tpg->tv_tpg_mutex);
+ if (tv_tpg->tpg_nexus) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ pr_debug("tv_tpg->tpg_nexus already exists\n");
+ return -EEXIST;
+ }
+ se_tpg = &tv_tpg->se_tpg;
+
+ tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
+ if (!tv_nexus) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ pr_err("Unable to allocate struct tcm_vhost_nexus\n");
+ return -ENOMEM;
+ }
+ /*
+ * Initialize the struct se_session pointer
+ */
+ tv_nexus->tvn_se_sess = transport_init_session();
+ if (IS_ERR(tv_nexus->tvn_se_sess)) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ kfree(tv_nexus);
+ return -ENOMEM;
+ }
+ /*
+ * Since we are running in 'demo mode' this call with generate a
+ * struct se_node_acl for the tcm_vhost struct se_portal_group with
+ * the SCSI Initiator port name of the passed configfs group 'name'.
+ */
+ tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
+ se_tpg, (unsigned char *)name);
+ if (!tv_nexus->tvn_se_sess->se_node_acl) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ pr_debug("core_tpg_check_initiator_node_acl() failed"
+ " for %s\n", name);
+ transport_free_session(tv_nexus->tvn_se_sess);
+ kfree(tv_nexus);
+ return -ENOMEM;
+ }
+ /*
+ * Now register the TCM vhost virtual I_T Nexus as active with the
+ * call to __transport_register_session()
+ */
+ __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
+ tv_nexus->tvn_se_sess, tv_nexus);
+ tv_tpg->tpg_nexus = tv_nexus;
+
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ return 0;
+}
+
+static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
+{
+ struct se_session *se_sess;
+ struct tcm_vhost_nexus *tv_nexus;
+
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return -ENODEV;
+ }
+
+ se_sess = tv_nexus->tvn_se_sess;
+ if (!se_sess) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ return -ENODEV;
+ }
+
+ if (tpg->tv_tpg_port_count != 0) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to remove TCM_vhost I_T Nexus with"
+ " active TPG port count: %d\n",
+ tpg->tv_tpg_port_count);
+ return -EBUSY;
+ }
+
+ if (tpg->tv_tpg_vhost_count != 0) {
+ mutex_unlock(&tpg->tv_tpg_mutex);
+ pr_err("Unable to remove TCM_vhost I_T Nexus with"
+ " active TPG vhost count: %d\n",
+ tpg->tv_tpg_vhost_count);
+ return -EBUSY;
+ }
+
+ pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
+ " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
+ tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+ /*
+ * Release the SCSI I_T Nexus to the emulated vhost Target Port
+ */
+ transport_deregister_session(tv_nexus->tvn_se_sess);
+ tpg->tpg_nexus = NULL;
+ mutex_unlock(&tpg->tv_tpg_mutex);
+
+ kfree(tv_nexus);
+ return 0;
+}
+
+static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_nexus *tv_nexus;
+ ssize_t ret;
+
+ mutex_lock(&tv_tpg->tv_tpg_mutex);
+ tv_nexus = tv_tpg->tpg_nexus;
+ if (!tv_nexus) {
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+ return -ENODEV;
+ }
+ ret = snprintf(page, PAGE_SIZE, "%s\n",
+ tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
+ mutex_unlock(&tv_tpg->tv_tpg_mutex);
+
+ return ret;
+}
+
+static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+ struct tcm_vhost_tport *tport_wwn = tv_tpg->tport;
+ unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
+ int ret;
+ /*
+ * Shutdown the active I_T nexus if 'NULL' is passed..
+ */
+ if (!strncmp(page, "NULL", 4)) {
+ ret = tcm_vhost_drop_nexus(tv_tpg);
+ return (!ret) ? count : ret;
+ }
+ /*
+ * Otherwise make sure the passed virtual Initiator port WWN matches
+ * the fabric protocol_id set in tcm_vhost_make_tport(), and call
+ * tcm_vhost_make_nexus().
+ */
+ if (strlen(page) >= TCM_VHOST_NAMELEN) {
+ pr_err("Emulated NAA Sas Address: %s, exceeds"
+ " max: %d\n", page, TCM_VHOST_NAMELEN);
+ return -EINVAL;
+ }
+ snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
+
+ ptr = strstr(i_port, "naa.");
+ if (ptr) {
+ if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
+ pr_err("Passed SAS Initiator Port %s does not"
+ " match target port protoid: %s\n", i_port,
+ tcm_vhost_dump_proto_id(tport_wwn));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[0];
+ goto check_newline;
+ }
+ ptr = strstr(i_port, "fc.");
+ if (ptr) {
+ if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
+ pr_err("Passed FCP Initiator Port %s does not"
+ " match target port protoid: %s\n", i_port,
+ tcm_vhost_dump_proto_id(tport_wwn));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[3]; /* Skip over "fc." */
+ goto check_newline;
+ }
+ ptr = strstr(i_port, "iqn.");
+ if (ptr) {
+ if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
+ pr_err("Passed iSCSI Initiator Port %s does not"
+ " match target port protoid: %s\n", i_port,
+ tcm_vhost_dump_proto_id(tport_wwn));
+ return -EINVAL;
+ }
+ port_ptr = &i_port[0];
+ goto check_newline;
+ }
+ pr_err("Unable to locate prefix for emulated Initiator Port:"
+ " %s\n", i_port);
+ return -EINVAL;
+ /*
+ * Clear any trailing newline for the NAA WWN
+ */
+check_newline:
+ if (i_port[strlen(i_port)-1] == '\n')
+ i_port[strlen(i_port)-1] = '\0';
+
+ ret = tcm_vhost_make_nexus(tv_tpg, port_ptr);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
+ &tcm_vhost_tpg_nexus.attr,
+ NULL,
+};
+
+static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_vhost_tport *tport = container_of(wwn,
+ struct tcm_vhost_tport, tport_wwn);
+
+ struct tcm_vhost_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct tcm_vhost_tpg");
+ return ERR_PTR(-ENOMEM);
+ }
+ mutex_init(&tpg->tv_tpg_mutex);
+ INIT_LIST_HEAD(&tpg->tv_tpg_list);
+ tpg->tport = tport;
+ tpg->tport_tpgt = tpgt;
+
+ ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
+ &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ mutex_lock(&tcm_vhost_mutex);
+ list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
+ mutex_unlock(&tcm_vhost_mutex);
+
+ return &tpg->se_tpg;
+}
+
+static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct tcm_vhost_tpg *tpg = container_of(se_tpg,
+ struct tcm_vhost_tpg, se_tpg);
+
+ mutex_lock(&tcm_vhost_mutex);
+ list_del(&tpg->tv_tpg_list);
+ mutex_unlock(&tcm_vhost_mutex);
+ /*
+ * Release the virtual I_T Nexus for this vhost TPG
+ */
+ tcm_vhost_drop_nexus(tpg);
+ /*
+ * Deregister the se_tpg from TCM..
+ */
+ core_tpg_deregister(se_tpg);
+ kfree(tpg);
+}
+
+static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_vhost_tport *tport;
+ char *ptr;
+ u64 wwpn = 0;
+ int off = 0;
+
+ /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+ return ERR_PTR(-EINVAL); */
+
+ tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
+ if (!tport) {
+ pr_err("Unable to allocate struct tcm_vhost_tport");
+ return ERR_PTR(-ENOMEM);
+ }
+ tport->tport_wwpn = wwpn;
+ /*
+ * Determine the emulated Protocol Identifier and Target Port Name
+ * based on the incoming configfs directory name.
+ */
+ ptr = strstr(name, "naa.");
+ if (ptr) {
+ tport->tport_proto_id = SCSI_PROTOCOL_SAS;
+ goto check_len;
+ }
+ ptr = strstr(name, "fc.");
+ if (ptr) {
+ tport->tport_proto_id = SCSI_PROTOCOL_FCP;
+ off = 3; /* Skip over "fc." */
+ goto check_len;
+ }
+ ptr = strstr(name, "iqn.");
+ if (ptr) {
+ tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
+ goto check_len;
+ }
+
+ pr_err("Unable to locate prefix for emulated Target Port:"
+ " %s\n", name);
+ kfree(tport);
+ return ERR_PTR(-EINVAL);
+
+check_len:
+ if (strlen(name) >= TCM_VHOST_NAMELEN) {
+ pr_err("Emulated %s Address: %s, exceeds"
+ " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
+ TCM_VHOST_NAMELEN);
+ kfree(tport);
+ return ERR_PTR(-EINVAL);
+ }
+ snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
+
+ pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
+ " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
+
+ return &tport->tport_wwn;
+}
+
+static void tcm_vhost_drop_tport(struct se_wwn *wwn)
+{
+ struct tcm_vhost_tport *tport = container_of(wwn,
+ struct tcm_vhost_tport, tport_wwn);
+
+ pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
+ " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
+ tport->tport_name);
+
+ kfree(tport);
+}
+
+static ssize_t tcm_vhost_wwn_show_attr_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
+ "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+ utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(tcm_vhost, version);
+
+static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
+ &tcm_vhost_wwn_version.attr,
+ NULL,
+};
+
+static struct target_core_fabric_ops tcm_vhost_ops = {
+ .get_fabric_name = tcm_vhost_get_fabric_name,
+ .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident,
+ .tpg_get_wwn = tcm_vhost_get_fabric_wwn,
+ .tpg_get_tag = tcm_vhost_get_tag,
+ .tpg_get_default_depth = tcm_vhost_get_default_depth,
+ .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = tcm_vhost_check_true,
+ .tpg_check_demo_mode_cache = tcm_vhost_check_true,
+ .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
+ .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
+ .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl,
+ .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl,
+ .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index,
+ .release_cmd = tcm_vhost_release_cmd,
+ .shutdown_session = tcm_vhost_shutdown_session,
+ .close_session = tcm_vhost_close_session,
+ .sess_get_index = tcm_vhost_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = tcm_vhost_write_pending,
+ .write_pending_status = tcm_vhost_write_pending_status,
+ .set_default_node_attributes = tcm_vhost_set_default_node_attrs,
+ .get_task_tag = tcm_vhost_get_task_tag,
+ .get_cmd_state = tcm_vhost_get_cmd_state,
+ .queue_data_in = tcm_vhost_queue_data_in,
+ .queue_status = tcm_vhost_queue_status,
+ .queue_tm_rsp = tcm_vhost_queue_tm_rsp,
+ .get_fabric_sense_len = tcm_vhost_get_fabric_sense_len,
+ .set_fabric_sense_len = tcm_vhost_set_fabric_sense_len,
+ /*
+ * Setup callers for generic logic in target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = tcm_vhost_make_tport,
+ .fabric_drop_wwn = tcm_vhost_drop_tport,
+ .fabric_make_tpg = tcm_vhost_make_tpg,
+ .fabric_drop_tpg = tcm_vhost_drop_tpg,
+ .fabric_post_link = tcm_vhost_port_link,
+ .fabric_pre_unlink = tcm_vhost_port_unlink,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = tcm_vhost_make_nodeacl,
+ .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl,
+};
+
+static int tcm_vhost_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric;
+ int ret;
+
+ pr_debug("TCM_VHOST fabric module %s on %s/%s"
+ " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+ utsname()->machine);
+ /*
+ * Register the top level struct config_item_type with TCM core
+ */
+ fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
+ if (IS_ERR(fabric)) {
+ pr_err("target_fabric_configfs_init() failed\n");
+ return PTR_ERR(fabric);
+ }
+ /*
+ * Setup fabric->tf_ops from our local tcm_vhost_ops
+ */
+ fabric->tf_ops = tcm_vhost_ops;
+ /*
+ * Setup default attribute lists for various fabric->tf_cit_tmpl
+ */
+ TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ /*
+ * Register the fabric for use within TCM
+ */
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() failed"
+ " for TCM_VHOST\n");
+ return ret;
+ }
+ /*
+ * Setup our local pointer to *fabric
+ */
+ tcm_vhost_fabric_configfs = fabric;
+ pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
+ return 0;
+};
+
+static void tcm_vhost_deregister_configfs(void)
+{
+ if (!tcm_vhost_fabric_configfs)
+ return;
+
+ target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
+ tcm_vhost_fabric_configfs = NULL;
+ pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
+};
+
+static int __init tcm_vhost_init(void)
+{
+ int ret = -ENOMEM;
+ /*
+ * Use our own dedicated workqueue for submitting I/O into
+ * target core to avoid contention within system_wq.
+ */
+ tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
+ if (!tcm_vhost_workqueue)
+ goto out;
+
+ ret = vhost_scsi_register();
+ if (ret < 0)
+ goto out_destroy_workqueue;
+
+ ret = tcm_vhost_register_configfs();
+ if (ret < 0)
+ goto out_vhost_scsi_deregister;
+
+ return 0;
+
+out_vhost_scsi_deregister:
+ vhost_scsi_deregister();
+out_destroy_workqueue:
+ destroy_workqueue(tcm_vhost_workqueue);
+out:
+ return ret;
+};
+
+static void tcm_vhost_exit(void)
+{
+ tcm_vhost_deregister_configfs();
+ vhost_scsi_deregister();
+ destroy_workqueue(tcm_vhost_workqueue);
+};
+
+MODULE_DESCRIPTION("TCM_VHOST series fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_vhost_init);
+module_exit(tcm_vhost_exit);
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
new file mode 100644
index 000000000000..d9e93557d669
--- /dev/null
+++ b/drivers/vhost/tcm_vhost.h
@@ -0,0 +1,103 @@
+#define TCM_VHOST_VERSION "v0.1"
+#define TCM_VHOST_NAMELEN 256
+#define TCM_VHOST_MAX_CDB_SIZE 32
+
+struct tcm_vhost_cmd {
+ /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
+ int tvc_vq_desc;
+ /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
+ u64 tvc_tag;
+ /* The number of scatterlists associated with this cmd */
+ u32 tvc_sgl_count;
+ /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
+ u32 tvc_lun;
+ /* Pointer to the SGL formatted memory from virtio-scsi */
+ struct scatterlist *tvc_sgl;
+ /* Pointer to response */
+ struct virtio_scsi_cmd_resp __user *tvc_resp;
+ /* Pointer to vhost_scsi for our device */
+ struct vhost_scsi *tvc_vhost;
+ /* The TCM I/O descriptor that is accessed via container_of() */
+ struct se_cmd tvc_se_cmd;
+ /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
+ struct work_struct work;
+ /* Copy of the incoming SCSI command descriptor block (CDB) */
+ unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
+ /* Sense buffer that will be mapped into outgoing status */
+ unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
+ /* Completed commands list, serviced from vhost worker thread */
+ struct list_head tvc_completion_list;
+};
+
+struct tcm_vhost_nexus {
+ /* Pointer to TCM session for I_T Nexus */
+ struct se_session *tvn_se_sess;
+};
+
+struct tcm_vhost_nacl {
+ /* Binary World Wide unique Port Name for Vhost Initiator port */
+ u64 iport_wwpn;
+ /* ASCII formatted WWPN for Sas Initiator port */
+ char iport_name[TCM_VHOST_NAMELEN];
+ /* Returned by tcm_vhost_make_nodeacl() */
+ struct se_node_acl se_node_acl;
+};
+
+struct tcm_vhost_tpg {
+ /* Vhost port target portal group tag for TCM */
+ u16 tport_tpgt;
+ /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
+ int tv_tpg_port_count;
+ /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
+ int tv_tpg_vhost_count;
+ /* list for tcm_vhost_list */
+ struct list_head tv_tpg_list;
+ /* Used to protect access for tpg_nexus */
+ struct mutex tv_tpg_mutex;
+ /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
+ struct tcm_vhost_nexus *tpg_nexus;
+ /* Pointer back to tcm_vhost_tport */
+ struct tcm_vhost_tport *tport;
+ /* Returned by tcm_vhost_make_tpg() */
+ struct se_portal_group se_tpg;
+};
+
+struct tcm_vhost_tport {
+ /* SCSI protocol the tport is providing */
+ u8 tport_proto_id;
+ /* Binary World Wide unique Port Name for Vhost Target port */
+ u64 tport_wwpn;
+ /* ASCII formatted WWPN for Vhost Target port */
+ char tport_name[TCM_VHOST_NAMELEN];
+ /* Returned by tcm_vhost_make_tport() */
+ struct se_wwn tport_wwn;
+};
+
+/*
+ * As per request from MST, keep TCM_VHOST related ioctl defines out of
+ * linux/vhost.h (user-space) for now..
+ */
+
+#include <linux/vhost.h>
+
+/*
+ * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
+ *
+ * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
+ * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
+ */
+
+#define VHOST_SCSI_ABI_VERSION 0
+
+struct vhost_scsi_target {
+ int abi_version;
+ char vhost_wwpn[TRANSPORT_IQN_LEN];
+ unsigned short vhost_tpgt;
+ unsigned short reserved;
+};
+
+/* VHOST_SCSI specific defines */
+#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
+#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
+/* Changing this breaks userspace. */
+#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index b0b2ac335347..747442d2c0f6 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -90,7 +90,8 @@
#undef DEBUG
#ifdef DEBUG
-#define DBG(fmt, args...) printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args);
+#define DBG(fmt, args...) \
+ printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args);
#else
#define DBG(fmt, args...)
#endif
@@ -449,8 +450,9 @@ static int aty128_decode_var(struct fb_var_screeninfo *var,
struct aty128fb_par *par);
#if 0
static void __devinit aty128_get_pllinfo(struct aty128fb_par *par,
- void __iomem *bios);
-static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev, const struct aty128fb_par *par);
+ void __iomem *bios);
+static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev,
+ const struct aty128fb_par *par);
#endif
static void aty128_timings(struct aty128fb_par *par);
static void aty128_init_engine(struct aty128fb_par *par);
@@ -779,7 +781,8 @@ static u32 depth_to_dst(u32 depth)
#ifndef __sparc__
-static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, struct pci_dev *dev)
+static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par,
+ struct pci_dev *dev)
{
u16 dptr;
u8 rom_type;
@@ -811,13 +814,14 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s
/* Look for the PCI data to check the ROM type */
dptr = BIOS_IN16(0x18);
- /* Check the PCI data signature. If it's wrong, we still assume a normal x86 ROM
- * for now, until I've verified this works everywhere. The goal here is more
- * to phase out Open Firmware images.
+ /* Check the PCI data signature. If it's wrong, we still assume a normal
+ * x86 ROM for now, until I've verified this works everywhere.
+ * The goal here is more to phase out Open Firmware images.
*
- * Currently, we only look at the first PCI data, we could iteratre and deal with
- * them all, and we should use fb_bios_start relative to start of image and not
- * relative start of ROM, but so far, I never found a dual-image ATI card
+ * Currently, we only look at the first PCI data, we could iteratre and
+ * deal with them all, and we should use fb_bios_start relative to start
+ * of image and not relative start of ROM, but so far, I never found a
+ * dual-image ATI card.
*
* typedef struct {
* u32 signature; + 0x00
@@ -852,7 +856,8 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s
printk(KERN_INFO "aty128fb: Found HP PA-RISC ROM Image\n");
goto failed;
default:
- printk(KERN_INFO "aty128fb: Found unknown type %d ROM Image\n", rom_type);
+ printk(KERN_INFO "aty128fb: Found unknown type %d ROM Image\n",
+ rom_type);
goto failed;
}
anyway:
@@ -863,7 +868,8 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s
return NULL;
}
-static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, unsigned char __iomem *bios)
+static void __devinit aty128_get_pllinfo(struct aty128fb_par *par,
+ unsigned char __iomem *bios)
{
unsigned int bios_hdr;
unsigned int bios_pll;
@@ -1247,10 +1253,13 @@ static int aty128_crtc_to_var(const struct aty128_crtc *crtc,
static void aty128_set_crt_enable(struct aty128fb_par *par, int on)
{
if (on) {
- aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) | CRT_CRTC_ON);
- aty_st_le32(DAC_CNTL, (aty_ld_le32(DAC_CNTL) | DAC_PALETTE2_SNOOP_EN));
+ aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) |
+ CRT_CRTC_ON);
+ aty_st_le32(DAC_CNTL, (aty_ld_le32(DAC_CNTL) |
+ DAC_PALETTE2_SNOOP_EN));
} else
- aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) & ~CRT_CRTC_ON);
+ aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) &
+ ~CRT_CRTC_ON);
}
static void aty128_set_lcd_enable(struct aty128fb_par *par, int on)
@@ -1281,7 +1290,8 @@ static void aty128_set_lcd_enable(struct aty128fb_par *par, int on)
}
}
-static void aty128_set_pll(struct aty128_pll *pll, const struct aty128fb_par *par)
+static void aty128_set_pll(struct aty128_pll *pll,
+ const struct aty128fb_par *par)
{
u32 div3;
@@ -1366,7 +1376,8 @@ static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll,
}
-static int aty128_pll_to_var(const struct aty128_pll *pll, struct fb_var_screeninfo *var)
+static int aty128_pll_to_var(const struct aty128_pll *pll,
+ struct fb_var_screeninfo *var)
{
var->pixclock = 100000000 / pll->vclk;
@@ -1512,7 +1523,8 @@ static int aty128fb_set_par(struct fb_info *info)
* encode/decode the User Defined Part of the Display
*/
-static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par)
+static int aty128_decode_var(struct fb_var_screeninfo *var,
+ struct aty128fb_par *par)
{
int err;
struct aty128_crtc crtc;
@@ -1559,7 +1571,8 @@ static int aty128_encode_var(struct fb_var_screeninfo *var,
}
-static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+static int aty128fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
{
struct aty128fb_par par;
int err;
@@ -1575,7 +1588,8 @@ static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf
/*
* Pan or Wrap the Display
*/
-static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fb)
+static int aty128fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fb)
{
struct aty128fb_par *par = fb->par;
u32 xoffset, yoffset;
@@ -1594,7 +1608,8 @@ static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *f
par->crtc.xoffset = xoffset;
par->crtc.yoffset = yoffset;
- offset = ((yoffset * par->crtc.vxres + xoffset)*(par->crtc.bpp >> 3)) & ~7;
+ offset = ((yoffset * par->crtc.vxres + xoffset) * (par->crtc.bpp >> 3))
+ & ~7;
if (par->crtc.bpp == 24)
offset += 8 * (offset % 3); /* Must be multiple of 8 and 3 */
@@ -1620,11 +1635,13 @@ static void aty128_st_pal(u_int regno, u_int red, u_int green, u_int blue,
* do mirroring
*/
- aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | DAC_PALETTE_ACCESS_CNTL);
+ aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) |
+ DAC_PALETTE_ACCESS_CNTL);
aty_st_8(PALETTE_INDEX, regno);
aty_st_le32(PALETTE_DATA, (red<<16)|(green<<8)|blue);
#endif
- aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & ~DAC_PALETTE_ACCESS_CNTL);
+ aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) &
+ ~DAC_PALETTE_ACCESS_CNTL);
}
aty_st_8(PALETTE_INDEX, regno);
@@ -1753,7 +1770,8 @@ static int aty128_bl_update_status(struct backlight_device *bd)
aty_st_le32(LVDS_GEN_CNTL, reg);
}
reg &= ~LVDS_BL_MOD_LEVEL_MASK;
- reg |= (aty128_bl_get_level_brightness(par, level) << LVDS_BL_MOD_LEVEL_SHIFT);
+ reg |= (aty128_bl_get_level_brightness(par, level) <<
+ LVDS_BL_MOD_LEVEL_SHIFT);
#ifdef BACKLIGHT_LVDS_OFF
reg |= LVDS_ON | LVDS_EN;
reg &= ~LVDS_DISPLAY_DIS;
@@ -1764,7 +1782,8 @@ static int aty128_bl_update_status(struct backlight_device *bd)
#endif
} else {
reg &= ~LVDS_BL_MOD_LEVEL_MASK;
- reg |= (aty128_bl_get_level_brightness(par, 0) << LVDS_BL_MOD_LEVEL_SHIFT);
+ reg |= (aty128_bl_get_level_brightness(par, 0) <<
+ LVDS_BL_MOD_LEVEL_SHIFT);
#ifdef BACKLIGHT_LVDS_OFF
reg |= LVDS_DISPLAY_DIS;
aty_st_le32(LVDS_GEN_CNTL, reg);
@@ -1869,7 +1888,8 @@ static void aty128_early_resume(void *data)
}
#endif /* CONFIG_PPC_PMAC */
-static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit aty128_init(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct aty128fb_par *par = info->par;
@@ -1887,7 +1907,8 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i
/* range check to make sure */
if (ent->driver_data < ARRAY_SIZE(r128_family))
- strlcat(video_card, r128_family[ent->driver_data], sizeof(video_card));
+ strlcat(video_card, r128_family[ent->driver_data],
+ sizeof(video_card));
printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev);
@@ -1911,11 +1932,11 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i
/* Indicate sleep capability */
if (par->chip_gen == rage_M3) {
pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, NULL, 0, 1);
-#if 0 /* Disable the early video resume hack for now as it's causing problems, among
- * others we now rely on the PCI core restoring the config space for us, which
- * isn't the case with that hack, and that code path causes various things to
- * be called with interrupts off while they shouldn't. I'm leaving the code in
- * as it can be useful for debugging purposes
+#if 0 /* Disable the early video resume hack for now as it's causing problems,
+ * among others we now rely on the PCI core restoring the config space
+ * for us, which isn't the case with that hack, and that code path causes
+ * various things to be called with interrupts off while they shouldn't.
+ * I'm leaving the code in as it can be useful for debugging purposes
*/
pmac_set_early_video_resume(aty128_early_resume, par);
#endif
@@ -1953,11 +1974,11 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i
default_vmode = VMODE_1152_768_60;
if (default_cmode > 16)
- default_cmode = CMODE_32;
+ default_cmode = CMODE_32;
else if (default_cmode > 8)
- default_cmode = CMODE_16;
+ default_cmode = CMODE_16;
else
- default_cmode = CMODE_8;
+ default_cmode = CMODE_8;
if (mac_vmode_to_var(default_vmode, default_cmode, &var))
var = default_var;
@@ -2018,7 +2039,8 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i
#ifdef CONFIG_PCI
/* register a card ++ajoshi */
-static int __devinit aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit aty128_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
unsigned long fb_addr, reg_addr;
struct aty128fb_par *par;
@@ -2318,39 +2340,39 @@ static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty,
u_int width, u_int height,
struct fb_info_aty128 *par)
{
- u32 save_dp_datatype, save_dp_cntl, dstval;
-
- if (!width || !height)
- return;
-
- dstval = depth_to_dst(par->current_par.crtc.depth);
- if (dstval == DST_24BPP) {
- srcx *= 3;
- dstx *= 3;
- width *= 3;
- } else if (dstval == -EINVAL) {
- printk("aty128fb: invalid depth or RGBA\n");
- return;
- }
-
- wait_for_fifo(2, par);
- save_dp_datatype = aty_ld_le32(DP_DATATYPE);
- save_dp_cntl = aty_ld_le32(DP_CNTL);
-
- wait_for_fifo(6, par);
- aty_st_le32(SRC_Y_X, (srcy << 16) | srcx);
- aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT);
- aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM);
- aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR);
-
- aty_st_le32(DST_Y_X, (dsty << 16) | dstx);
- aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width);
-
- par->blitter_may_be_busy = 1;
-
- wait_for_fifo(2, par);
- aty_st_le32(DP_DATATYPE, save_dp_datatype);
- aty_st_le32(DP_CNTL, save_dp_cntl);
+ u32 save_dp_datatype, save_dp_cntl, dstval;
+
+ if (!width || !height)
+ return;
+
+ dstval = depth_to_dst(par->current_par.crtc.depth);
+ if (dstval == DST_24BPP) {
+ srcx *= 3;
+ dstx *= 3;
+ width *= 3;
+ } else if (dstval == -EINVAL) {
+ printk("aty128fb: invalid depth or RGBA\n");
+ return;
+ }
+
+ wait_for_fifo(2, par);
+ save_dp_datatype = aty_ld_le32(DP_DATATYPE);
+ save_dp_cntl = aty_ld_le32(DP_CNTL);
+
+ wait_for_fifo(6, par);
+ aty_st_le32(SRC_Y_X, (srcy << 16) | srcx);
+ aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT);
+ aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM);
+ aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR);
+
+ aty_st_le32(DST_Y_X, (dsty << 16) | dstx);
+ aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width);
+
+ par->blitter_may_be_busy = 1;
+
+ wait_for_fifo(2, par);
+ aty_st_le32(DP_DATATYPE, save_dp_datatype);
+ aty_st_le32(DP_CNTL, save_dp_cntl);
}
@@ -2358,17 +2380,17 @@ static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty,
* Text mode accelerated functions
*/
-static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy, int dx,
- int height, int width)
+static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy,
+ int dx, int height, int width)
{
- sx *= fontwidth(p);
- sy *= fontheight(p);
- dx *= fontwidth(p);
- dy *= fontheight(p);
- width *= fontwidth(p);
- height *= fontheight(p);
-
- aty128_rectcopy(sx, sy, dx, dy, width, height,
+ sx *= fontwidth(p);
+ sy *= fontheight(p);
+ dx *= fontwidth(p);
+ dy *= fontheight(p);
+ width *= fontwidth(p);
+ height *= fontheight(p);
+
+ aty128_rectcopy(sx, sy, dx, dy, width, height,
(struct fb_info_aty128 *)p->fb_info);
}
#endif /* 0 */
diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/aty/radeon_monitor.c
index 9261c918fde8..5c23eac0eb9a 100644
--- a/drivers/video/aty/radeon_monitor.c
+++ b/drivers/video/aty/radeon_monitor.c
@@ -730,6 +730,25 @@ static void radeon_videomode_to_var(struct fb_var_screeninfo *var,
var->vmode = mode->vmode;
}
+#ifdef CONFIG_PPC_PSERIES
+static int is_powerblade(const char *model)
+{
+ struct device_node *root;
+ const char* cp;
+ int len, l, rc = 0;
+
+ root = of_find_node_by_path("/");
+ if (root && model) {
+ l = strlen(model);
+ cp = of_get_property(root, "model", &len);
+ if (cp)
+ rc = memcmp(model, cp, min(len, l)) == 0;
+ of_node_put(root);
+ }
+ return rc;
+}
+#endif
+
/*
* Build the modedb for head 1 (head 2 will come later), check panel infos
* from either BIOS or EDID, and pick up the default mode
@@ -865,6 +884,22 @@ void __devinit radeon_check_modes(struct radeonfb_info *rinfo, const char *mode_
has_default_mode = 1;
}
+#ifdef CONFIG_PPC_PSERIES
+ if (!has_default_mode && (
+ is_powerblade("IBM,8842") || /* JS20 */
+ is_powerblade("IBM,8844") || /* JS21 */
+ is_powerblade("IBM,7998") || /* JS12/JS21/JS22 */
+ is_powerblade("IBM,0792") || /* QS21 */
+ is_powerblade("IBM,0793") /* QS22 */
+ )) {
+ printk("Falling back to 800x600 on JSxx hardware\n");
+ if (fb_find_mode(&info->var, info, "800x600@60",
+ info->monspecs.modedb,
+ info->monspecs.modedb_len, NULL, 8) != 0)
+ has_default_mode = 1;
+ }
+#endif
+
/*
* Still no mode, let's pick up a default from the db
*/
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c
index 77da6a2f43dc..c03ecdd31e4c 100644
--- a/drivers/video/auo_k190x.c
+++ b/drivers/video/auo_k190x.c
@@ -987,7 +987,6 @@ err_regfb:
fb_dealloc_cmap(&info->cmap);
err_cmap:
fb_deferred_io_cleanup(info);
- kfree(info->fbdefio);
err_defio:
vfree((void *)info->screen_base);
err_irq:
@@ -1022,7 +1021,6 @@ int __devexit auok190x_common_remove(struct platform_device *pdev)
fb_dealloc_cmap(&info->cmap);
fb_deferred_io_cleanup(info);
- kfree(info->fbdefio);
vfree((void *)info->screen_base);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 2979292650d6..cf282763a8dc 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -245,7 +245,7 @@ config BACKLIGHT_CARILLO_RANCH
config BACKLIGHT_PWM
tristate "Generic PWM based Backlight Driver"
- depends on HAVE_PWM
+ depends on PWM
help
If you have a LCD backlight adjustable by PWM, say Y to enable
this driver.
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index 0443a4f71858..df1cbb7ef6ca 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -127,7 +127,8 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev)
struct atmel_pwm_bl *pwmbl;
int retval;
- pwmbl = kzalloc(sizeof(struct atmel_pwm_bl), GFP_KERNEL);
+ pwmbl = devm_kzalloc(&pdev->dev, sizeof(struct atmel_pwm_bl),
+ GFP_KERNEL);
if (!pwmbl)
return -ENOMEM;
@@ -154,7 +155,8 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev)
goto err_free_mem;
if (pwmbl->gpio_on != -1) {
- retval = gpio_request(pwmbl->gpio_on, "gpio_atmel_pwm_bl");
+ retval = devm_gpio_request(&pdev->dev, pwmbl->gpio_on,
+ "gpio_atmel_pwm_bl");
if (retval) {
pwmbl->gpio_on = -1;
goto err_free_pwm;
@@ -164,7 +166,7 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev)
retval = gpio_direction_output(pwmbl->gpio_on,
0 ^ pdata->on_active_low);
if (retval)
- goto err_free_gpio;
+ goto err_free_pwm;
}
memset(&props, 0, sizeof(struct backlight_properties));
@@ -174,7 +176,7 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev)
&atmel_pwm_bl_ops, &props);
if (IS_ERR(bldev)) {
retval = PTR_ERR(bldev);
- goto err_free_gpio;
+ goto err_free_pwm;
}
pwmbl->bldev = bldev;
@@ -196,13 +198,9 @@ static int atmel_pwm_bl_probe(struct platform_device *pdev)
err_free_bl_dev:
platform_set_drvdata(pdev, NULL);
backlight_device_unregister(bldev);
-err_free_gpio:
- if (pwmbl->gpio_on != -1)
- gpio_free(pwmbl->gpio_on);
err_free_pwm:
pwm_channel_free(&pwmbl->pwmc);
err_free_mem:
- kfree(pwmbl);
return retval;
}
@@ -210,15 +208,12 @@ static int __exit atmel_pwm_bl_remove(struct platform_device *pdev)
{
struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
- if (pwmbl->gpio_on != -1) {
+ if (pwmbl->gpio_on != -1)
gpio_set_value(pwmbl->gpio_on, 0);
- gpio_free(pwmbl->gpio_on);
- }
pwm_channel_disable(&pwmbl->pwmc);
pwm_channel_free(&pwmbl->pwmc);
backlight_device_unregister(pwmbl->bldev);
platform_set_drvdata(pdev, NULL);
- kfree(pwmbl);
return 0;
}
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 23d732677ba1..c781768ba892 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -492,7 +492,8 @@ static int setup_gpio_backlight(struct corgi_lcd *lcd,
lcd->gpio_backlight_cont = -1;
if (gpio_is_valid(pdata->gpio_backlight_on)) {
- err = gpio_request(pdata->gpio_backlight_on, "BL_ON");
+ err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_on,
+ "BL_ON");
if (err) {
dev_err(&spi->dev, "failed to request GPIO%d for "
"backlight_on\n", pdata->gpio_backlight_on);
@@ -504,11 +505,12 @@ static int setup_gpio_backlight(struct corgi_lcd *lcd,
}
if (gpio_is_valid(pdata->gpio_backlight_cont)) {
- err = gpio_request(pdata->gpio_backlight_cont, "BL_CONT");
+ err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_cont,
+ "BL_CONT");
if (err) {
dev_err(&spi->dev, "failed to request GPIO%d for "
"backlight_cont\n", pdata->gpio_backlight_cont);
- goto err_free_backlight_on;
+ return err;
}
lcd->gpio_backlight_cont = pdata->gpio_backlight_cont;
@@ -525,11 +527,6 @@ static int setup_gpio_backlight(struct corgi_lcd *lcd,
}
}
return 0;
-
-err_free_backlight_on:
- if (gpio_is_valid(lcd->gpio_backlight_on))
- gpio_free(lcd->gpio_backlight_on);
- return err;
}
static int __devinit corgi_lcd_probe(struct spi_device *spi)
@@ -602,12 +599,6 @@ static int __devexit corgi_lcd_remove(struct spi_device *spi)
backlight_update_status(lcd->bl_dev);
backlight_device_unregister(lcd->bl_dev);
- if (gpio_is_valid(lcd->gpio_backlight_on))
- gpio_free(lcd->gpio_backlight_on);
-
- if (gpio_is_valid(lcd->gpio_backlight_cont))
- gpio_free(lcd->gpio_backlight_cont);
-
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->lcd_dev);
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 40f606a86093..2d90c0648aa0 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -175,28 +175,27 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
priv->spi = spi;
- ret = gpio_request_one(pdata->reset_gpio, GPIOF_OUT_INIT_HIGH,
- "lcd l4f00242t03 reset");
+ ret = devm_gpio_request_one(&spi->dev, pdata->reset_gpio,
+ GPIOF_OUT_INIT_HIGH, "lcd l4f00242t03 reset");
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 reset gpio.\n");
return ret;
}
- ret = gpio_request_one(pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW,
- "lcd l4f00242t03 data enable");
+ ret = devm_gpio_request_one(&spi->dev, pdata->data_enable_gpio,
+ GPIOF_OUT_INIT_LOW, "lcd l4f00242t03 data enable");
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 data en gpio.\n");
- goto err;
+ return ret;
}
priv->io_reg = regulator_get(&spi->dev, "vdd");
if (IS_ERR(priv->io_reg)) {
- ret = PTR_ERR(priv->io_reg);
dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
__func__);
- goto err2;
+ return PTR_ERR(priv->io_reg);
}
priv->core_reg = regulator_get(&spi->dev, "vcore");
@@ -204,14 +203,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
ret = PTR_ERR(priv->core_reg);
dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
__func__);
- goto err3;
+ goto err1;
}
priv->ld = lcd_device_register("l4f00242t03",
&spi->dev, priv, &l4f_ops);
if (IS_ERR(priv->ld)) {
ret = PTR_ERR(priv->ld);
- goto err4;
+ goto err2;
}
/* Init the LCD */
@@ -223,14 +222,10 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
return 0;
-err4:
+err2:
regulator_put(priv->core_reg);
-err3:
+err1:
regulator_put(priv->io_reg);
-err2:
- gpio_free(pdata->data_enable_gpio);
-err:
- gpio_free(pdata->reset_gpio);
return ret;
}
@@ -238,16 +233,12 @@ err:
static int __devexit l4f00242t03_remove(struct spi_device *spi)
{
struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
- struct l4f00242t03_pdata *pdata = priv->spi->dev.platform_data;
l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
lcd_device_unregister(priv->ld);
dev_set_drvdata(&spi->dev, NULL);
- gpio_free(pdata->data_enable_gpio);
- gpio_free(pdata->reset_gpio);
-
regulator_put(priv->io_reg);
regulator_put(priv->core_reg);
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
index bebeb63607db..18dca0c29c68 100644
--- a/drivers/video/backlight/lm3533_bl.c
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -295,7 +295,7 @@ static int __devinit lm3533_bl_probe(struct platform_device *pdev)
return -EINVAL;
}
- bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+ bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL);
if (!bl) {
dev_err(&pdev->dev,
"failed to allocate memory for backlight\n");
@@ -317,8 +317,7 @@ static int __devinit lm3533_bl_probe(struct platform_device *pdev)
&lm3533_bl_ops, &props);
if (IS_ERR(bd)) {
dev_err(&pdev->dev, "failed to register backlight device\n");
- ret = PTR_ERR(bd);
- goto err_free;
+ return PTR_ERR(bd);
}
bl->bd = bd;
@@ -348,8 +347,6 @@ err_sysfs_remove:
sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
err_unregister:
backlight_device_unregister(bd);
-err_free:
- kfree(bl);
return ret;
}
@@ -367,7 +364,6 @@ static int __devexit lm3533_bl_remove(struct platform_device *pdev)
lm3533_ctrlbank_disable(&bl->cb);
sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
backlight_device_unregister(bd);
- kfree(bl);
return 0;
}
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index a9f2c36966f1..ea43f2254196 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -158,29 +158,27 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
int ret = 0;
if (pdata != NULL) {
- ret = gpio_request(pdata->reset_gpio, "LMS285GF05 RESET");
+ ret = devm_gpio_request(&spi->dev, pdata->reset_gpio,
+ "LMS285GF05 RESET");
if (ret)
return ret;
ret = gpio_direction_output(pdata->reset_gpio,
!pdata->reset_inverted);
if (ret)
- goto err;
+ return ret;
}
st = devm_kzalloc(&spi->dev, sizeof(struct lms283gf05_state),
GFP_KERNEL);
if (st == NULL) {
dev_err(&spi->dev, "No memory for device state\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto err;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
st->spi = spi;
st->ld = ld;
@@ -193,24 +191,14 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
return 0;
-
-err:
- if (pdata != NULL)
- gpio_free(pdata->reset_gpio);
-
- return ret;
}
static int __devexit lms283gf05_remove(struct spi_device *spi)
{
struct lms283gf05_state *st = dev_get_drvdata(&spi->dev);
- struct lms283gf05_pdata *pdata = st->spi->dev.platform_data;
lcd_device_unregister(st->ld);
- if (pdata != NULL)
- gpio_free(pdata->reset_gpio);
-
return 0;
}
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 72a0e0c917cf..aa6d4f71131f 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -14,11 +14,15 @@
#include <linux/i2c.h>
#include <linux/backlight.h>
#include <linux/err.h>
-#include <linux/lp855x.h>
+#include <linux/platform_data/lp855x.h>
/* Registers */
-#define BRIGHTNESS_CTRL (0x00)
-#define DEVICE_CTRL (0x01)
+#define BRIGHTNESS_CTRL 0x00
+#define DEVICE_CTRL 0x01
+#define EEPROM_START 0xA0
+#define EEPROM_END 0xA7
+#define EPROM_START 0xA0
+#define EPROM_END 0xAF
#define BUF_SIZE 20
#define DEFAULT_BL_NAME "lcd-backlight"
diff --git a/drivers/video/backlight/ot200_bl.c b/drivers/video/backlight/ot200_bl.c
index f519d55a294c..469cf0f109d2 100644
--- a/drivers/video/backlight/ot200_bl.c
+++ b/drivers/video/backlight/ot200_bl.c
@@ -84,7 +84,8 @@ static int ot200_backlight_probe(struct platform_device *pdev)
int retval = 0;
/* request gpio */
- if (gpio_request(GPIO_DIMM, "ot200 backlight dimmer") < 0) {
+ if (devm_gpio_request(&pdev->dev, GPIO_DIMM,
+ "ot200 backlight dimmer") < 0) {
dev_err(&pdev->dev, "failed to request GPIO %d\n", GPIO_DIMM);
return -ENODEV;
}
@@ -93,14 +94,13 @@ static int ot200_backlight_probe(struct platform_device *pdev)
pwm_timer = cs5535_mfgpt_alloc_timer(7, MFGPT_DOMAIN_ANY);
if (!pwm_timer) {
dev_err(&pdev->dev, "MFGPT 7 not available\n");
- retval = -ENODEV;
- goto error_mfgpt_alloc;
+ return -ENODEV;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data) {
retval = -ENOMEM;
- goto error_kzalloc;
+ goto error_devm_kzalloc;
}
/* setup gpio */
@@ -122,26 +122,21 @@ static int ot200_backlight_probe(struct platform_device *pdev)
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
retval = PTR_ERR(bl);
- goto error_backlight_device_register;
+ goto error_devm_kzalloc;
}
platform_set_drvdata(pdev, bl);
return 0;
-error_backlight_device_register:
- kfree(data);
-error_kzalloc:
+error_devm_kzalloc:
cs5535_mfgpt_free_timer(pwm_timer);
-error_mfgpt_alloc:
- gpio_free(GPIO_DIMM);
return retval;
}
static int ot200_backlight_remove(struct platform_device *pdev)
{
struct backlight_device *bl = platform_get_drvdata(pdev);
- struct ot200_backlight_data *data = bl_get_data(bl);
backlight_device_unregister(bl);
@@ -152,9 +147,7 @@ static int ot200_backlight_remove(struct platform_device *pdev)
MAX_COMP2 - dim_table[100]);
cs5535_mfgpt_free_timer(pwm_timer);
- gpio_free(GPIO_DIMM);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 342b7d7cbb63..995f0164c9b0 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -26,11 +26,13 @@ struct pwm_bl_data {
struct device *dev;
unsigned int period;
unsigned int lth_brightness;
+ unsigned int *levels;
int (*notify)(struct device *,
int brightness);
void (*notify_after)(struct device *,
int brightness);
int (*check_fb)(struct device *, struct fb_info *);
+ void (*exit)(struct device *);
};
static int pwm_backlight_update_status(struct backlight_device *bl)
@@ -52,9 +54,18 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
pwm_config(pb->pwm, 0, pb->period);
pwm_disable(pb->pwm);
} else {
- brightness = pb->lth_brightness +
- (brightness * (pb->period - pb->lth_brightness) / max);
- pwm_config(pb->pwm, brightness, pb->period);
+ int duty_cycle;
+
+ if (pb->levels) {
+ duty_cycle = pb->levels[brightness];
+ max = pb->levels[max];
+ } else {
+ duty_cycle = brightness;
+ }
+
+ duty_cycle = pb->lth_brightness +
+ (duty_cycle * (pb->period - pb->lth_brightness) / max);
+ pwm_config(pb->pwm, duty_cycle, pb->period);
pwm_enable(pb->pwm);
}
@@ -83,17 +94,98 @@ static const struct backlight_ops pwm_backlight_ops = {
.check_fb = pwm_backlight_check_fb,
};
+#ifdef CONFIG_OF
+static int pwm_backlight_parse_dt(struct device *dev,
+ struct platform_pwm_backlight_data *data)
+{
+ struct device_node *node = dev->of_node;
+ struct property *prop;
+ int length;
+ u32 value;
+ int ret;
+
+ if (!node)
+ return -ENODEV;
+
+ memset(data, 0, sizeof(*data));
+
+ /* determine the number of brightness levels */
+ prop = of_find_property(node, "brightness-levels", &length);
+ if (!prop)
+ return -EINVAL;
+
+ data->max_brightness = length / sizeof(u32);
+
+ /* read brightness levels from DT property */
+ if (data->max_brightness > 0) {
+ size_t size = sizeof(*data->levels) * data->max_brightness;
+
+ data->levels = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!data->levels)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(node, "brightness-levels",
+ data->levels,
+ data->max_brightness);
+ if (ret < 0)
+ return ret;
+
+ ret = of_property_read_u32(node, "default-brightness-level",
+ &value);
+ if (ret < 0)
+ return ret;
+
+ if (value >= data->max_brightness) {
+ dev_warn(dev, "invalid default brightness level: %u, using %u\n",
+ value, data->max_brightness - 1);
+ value = data->max_brightness - 1;
+ }
+
+ data->dft_brightness = value;
+ data->max_brightness--;
+ }
+
+ /*
+ * TODO: Most users of this driver use a number of GPIOs to control
+ * backlight power. Support for specifying these needs to be
+ * added.
+ */
+
+ return 0;
+}
+
+static struct of_device_id pwm_backlight_of_match[] = {
+ { .compatible = "pwm-backlight" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, pwm_backlight_of_match);
+#else
+static int pwm_backlight_parse_dt(struct device *dev,
+ struct platform_pwm_backlight_data *data)
+{
+ return -ENODEV;
+}
+#endif
+
static int pwm_backlight_probe(struct platform_device *pdev)
{
- struct backlight_properties props;
struct platform_pwm_backlight_data *data = pdev->dev.platform_data;
+ struct platform_pwm_backlight_data defdata;
+ struct backlight_properties props;
struct backlight_device *bl;
struct pwm_bl_data *pb;
+ unsigned int max;
int ret;
if (!data) {
- dev_err(&pdev->dev, "failed to find platform data\n");
- return -EINVAL;
+ ret = pwm_backlight_parse_dt(&pdev->dev, &defdata);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to find platform data\n");
+ return ret;
+ }
+
+ data = &defdata;
}
if (data->init) {
@@ -109,21 +201,42 @@ static int pwm_backlight_probe(struct platform_device *pdev)
goto err_alloc;
}
- pb->period = data->pwm_period_ns;
+ if (data->levels) {
+ max = data->levels[data->max_brightness];
+ pb->levels = data->levels;
+ } else
+ max = data->max_brightness;
+
pb->notify = data->notify;
pb->notify_after = data->notify_after;
pb->check_fb = data->check_fb;
- pb->lth_brightness = data->lth_brightness *
- (data->pwm_period_ns / data->max_brightness);
+ pb->exit = data->exit;
pb->dev = &pdev->dev;
- pb->pwm = pwm_request(data->pwm_id, "backlight");
+ pb->pwm = pwm_get(&pdev->dev, NULL);
if (IS_ERR(pb->pwm)) {
- dev_err(&pdev->dev, "unable to request PWM for backlight\n");
- ret = PTR_ERR(pb->pwm);
- goto err_alloc;
- } else
- dev_dbg(&pdev->dev, "got pwm for backlight\n");
+ dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n");
+
+ pb->pwm = pwm_request(data->pwm_id, "pwm-backlight");
+ if (IS_ERR(pb->pwm)) {
+ dev_err(&pdev->dev, "unable to request legacy PWM\n");
+ ret = PTR_ERR(pb->pwm);
+ goto err_alloc;
+ }
+ }
+
+ dev_dbg(&pdev->dev, "got pwm for backlight\n");
+
+ /*
+ * The DT case will set the pwm_period_ns field to 0 and store the
+ * period, parsed from the DT, in the PWM device. For the non-DT case,
+ * set the period from platform data.
+ */
+ if (data->pwm_period_ns > 0)
+ pwm_set_period(pb->pwm, data->pwm_period_ns);
+
+ pb->period = pwm_get_period(pb->pwm);
+ pb->lth_brightness = data->lth_brightness * (pb->period / max);
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
@@ -143,7 +256,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
return 0;
err_bl:
- pwm_free(pb->pwm);
+ pwm_put(pb->pwm);
err_alloc:
if (data->exit)
data->exit(&pdev->dev);
@@ -152,16 +265,15 @@ err_alloc:
static int pwm_backlight_remove(struct platform_device *pdev)
{
- struct platform_pwm_backlight_data *data = pdev->dev.platform_data;
struct backlight_device *bl = platform_get_drvdata(pdev);
struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
backlight_device_unregister(bl);
pwm_config(pb->pwm, 0, pb->period);
pwm_disable(pb->pwm);
- pwm_free(pb->pwm);
- if (data->exit)
- data->exit(&pdev->dev);
+ pwm_put(pb->pwm);
+ if (pb->exit)
+ pb->exit(&pdev->dev);
return 0;
}
@@ -195,11 +307,12 @@ static SIMPLE_DEV_PM_OPS(pwm_backlight_pm_ops, pwm_backlight_suspend,
static struct platform_driver pwm_backlight_driver = {
.driver = {
- .name = "pwm-backlight",
- .owner = THIS_MODULE,
+ .name = "pwm-backlight",
+ .owner = THIS_MODULE,
#ifdef CONFIG_PM
- .pm = &pwm_backlight_pm_ops,
+ .pm = &pwm_backlight_pm_ops,
#endif
+ .of_match_table = of_match_ptr(pwm_backlight_of_match),
},
.probe = pwm_backlight_probe,
.remove = pwm_backlight_remove,
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 0d54e607e82d..49342e1d20be 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -92,14 +92,14 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
data->comadj = sharpsl_param.comadj == -1 ? COMADJ_DEFAULT : sharpsl_param.comadj;
- ret = gpio_request(TOSA_GPIO_BL_C20MA, "backlight");
+ ret = devm_gpio_request(&client->dev, TOSA_GPIO_BL_C20MA, "backlight");
if (ret) {
dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
return ret;
}
ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0);
if (ret)
- goto err_gpio_dir;
+ return ret;
i2c_set_clientdata(client, data);
data->i2c = client;
@@ -123,8 +123,6 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
err_reg:
data->bl = NULL;
-err_gpio_dir:
- gpio_free(TOSA_GPIO_BL_C20MA);
return ret;
}
@@ -135,8 +133,6 @@ static int __devexit tosa_bl_remove(struct i2c_client *client)
backlight_device_unregister(data->bl);
data->bl = NULL;
- gpio_free(TOSA_GPIO_BL_C20MA);
-
return 0;
}
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 47823b8efff0..33047a66cc24 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -193,7 +193,7 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
data->spi = spi;
dev_set_drvdata(&spi->dev, data);
- ret = gpio_request(TOSA_GPIO_TG_ON, "tg #pwr");
+ ret = devm_gpio_request(&spi->dev, TOSA_GPIO_TG_ON, "tg #pwr");
if (ret < 0)
goto err_gpio_tg;
@@ -201,7 +201,7 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
ret = gpio_direction_output(TOSA_GPIO_TG_ON, 0);
if (ret < 0)
- goto err_gpio_dir;
+ goto err_gpio_tg;
mdelay(60);
tosa_lcd_tg_init(data);
@@ -221,8 +221,6 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
err_register:
tosa_lcd_tg_off(data);
-err_gpio_dir:
- gpio_free(TOSA_GPIO_TG_ON);
err_gpio_tg:
dev_set_drvdata(&spi->dev, NULL);
return ret;
@@ -239,7 +237,6 @@ static int __devexit tosa_lcd_remove(struct spi_device *spi)
tosa_lcd_tg_off(data);
- gpio_free(TOSA_GPIO_TG_ON);
dev_set_drvdata(&spi->dev, NULL);
return 0;
diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
index 28b1a834906b..61b182bf32a2 100644
--- a/drivers/video/console/bitblit.c
+++ b/drivers/video/console/bitblit.c
@@ -162,7 +162,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
image.depth = 1;
if (attribute) {
- buf = kmalloc(cellsize, GFP_KERNEL);
+ buf = kmalloc(cellsize, GFP_ATOMIC);
if (!buf)
return;
}
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 2e471c22abf5..fdefa8fd72c4 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -372,8 +372,15 @@ static void fb_flashcursor(struct work_struct *work)
struct vc_data *vc = NULL;
int c;
int mode;
+ int ret;
+
+ /* FIXME: we should sort out the unbind locking instead */
+ /* instead we just fail to flash the cursor if we can't get
+ * the lock instead of blocking fbcon deinit */
+ ret = console_trylock();
+ if (ret == 0)
+ return;
- console_lock();
if (ops && ops->currcon != -1)
vc = vc_cons[ops->currcon].d;
@@ -442,7 +449,7 @@ static int __init fb_console_setup(char *this_opt)
while ((options = strsep(&this_opt, ",")) != NULL) {
if (!strncmp(options, "font:", 5))
- strcpy(fontname, options + 5);
+ strlcpy(fontname, options + 5, sizeof(fontname));
if (!strncmp(options, "scrollback:", 11)) {
options += 11;
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 47118c75a4c0..7ae9d53f2bf1 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -30,7 +30,10 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/console.h>
+#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/lcm.h>
#include <video/da8xx-fb.h>
#include <asm/div64.h>
@@ -160,6 +163,13 @@ struct da8xx_fb_par {
wait_queue_head_t vsync_wait;
int vsync_flag;
int vsync_timeout;
+ spinlock_t lock_for_chan_update;
+
+ /*
+ * LCDC has 2 ping pong DMA channels, channel 0
+ * and channel 1.
+ */
+ unsigned int which_dma_channel_done;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
unsigned int lcd_fck_rate;
@@ -260,10 +270,18 @@ static inline void lcd_enable_raster(void)
{
u32 reg;
+ /* Put LCDC in reset for several cycles */
+ if (lcd_revision == LCD_VERSION_2)
+ /* Write 1 to reset LCDC */
+ lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
+ mdelay(1);
+
/* Bring LCDC out of reset */
if (lcd_revision == LCD_VERSION_2)
lcdc_write(0, LCD_CLK_RESET_REG);
+ mdelay(1);
+ /* Above reset sequence doesnot reset register context */
reg = lcdc_read(LCD_RASTER_CTRL_REG);
if (!(reg & LCD_RASTER_ENABLE))
lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
@@ -277,10 +295,6 @@ static inline void lcd_disable_raster(void)
reg = lcdc_read(LCD_RASTER_CTRL_REG);
if (reg & LCD_RASTER_ENABLE)
lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG);
-
- if (lcd_revision == LCD_VERSION_2)
- /* Write 1 to reset LCDC */
- lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG);
}
static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
@@ -344,8 +358,8 @@ static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
lcd_enable_raster();
}
-/* Configure the Burst Size of DMA */
-static int lcd_cfg_dma(int burst_size)
+/* Configure the Burst Size and fifo threhold of DMA */
+static int lcd_cfg_dma(int burst_size, int fifo_th)
{
u32 reg;
@@ -369,6 +383,9 @@ static int lcd_cfg_dma(int burst_size)
default:
return -EINVAL;
}
+
+ reg |= (fifo_th << 8);
+
lcdc_write(reg, LCD_DMA_CTRL_REG);
return 0;
@@ -670,8 +687,8 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) &
~LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG);
- /* Configure the DMA burst size. */
- ret = lcd_cfg_dma(cfg->dma_burst_sz);
+ /* Configure the DMA burst size and fifo threshold. */
+ ret = lcd_cfg_dma(cfg->dma_burst_sz, cfg->fifo_th);
if (ret < 0)
return ret;
@@ -715,7 +732,6 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
{
struct da8xx_fb_par *par = arg;
u32 stat = lcdc_read(LCD_MASKED_STAT_REG);
- u32 reg_int;
if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
lcd_disable_raster();
@@ -732,10 +748,8 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
lcdc_write(stat, LCD_MASKED_STAT_REG);
- /* Disable PL completion inerrupt */
- reg_int = lcdc_read(LCD_INT_ENABLE_CLR_REG) |
- (LCD_V2_PL_INT_ENA);
- lcdc_write(reg_int, LCD_INT_ENABLE_CLR_REG);
+ /* Disable PL completion interrupt */
+ lcdc_write(LCD_V2_PL_INT_ENA, LCD_INT_ENABLE_CLR_REG);
/* Setup and start data loading mode */
lcd_blit(LOAD_DATA, par);
@@ -743,6 +757,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
lcdc_write(stat, LCD_MASKED_STAT_REG);
if (stat & LCD_END_OF_FRAME0) {
+ par->which_dma_channel_done = 0;
lcdc_write(par->dma_start,
LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
lcdc_write(par->dma_end,
@@ -752,6 +767,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
}
if (stat & LCD_END_OF_FRAME1) {
+ par->which_dma_channel_done = 1;
lcdc_write(par->dma_start,
LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
lcdc_write(par->dma_end,
@@ -798,6 +814,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg)
lcdc_write(stat, LCD_STAT_REG);
if (stat & LCD_END_OF_FRAME0) {
+ par->which_dma_channel_done = 0;
lcdc_write(par->dma_start,
LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
lcdc_write(par->dma_end,
@@ -807,6 +824,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg)
}
if (stat & LCD_END_OF_FRAME1) {
+ par->which_dma_channel_done = 1;
lcdc_write(par->dma_start,
LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
lcdc_write(par->dma_end,
@@ -1021,11 +1039,14 @@ static int cfb_blank(int blank, struct fb_info *info)
par->blank = blank;
switch (blank) {
case FB_BLANK_UNBLANK:
+ lcd_enable_raster();
+
if (par->panel_power_ctrl)
par->panel_power_ctrl(1);
-
- lcd_enable_raster();
break;
+ case FB_BLANK_NORMAL:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_POWERDOWN:
if (par->panel_power_ctrl)
par->panel_power_ctrl(0);
@@ -1052,6 +1073,7 @@ static int da8xx_pan_display(struct fb_var_screeninfo *var,
struct fb_fix_screeninfo *fix = &fbi->fix;
unsigned int end;
unsigned int start;
+ unsigned long irq_flags;
if (var->xoffset != fbi->var.xoffset ||
var->yoffset != fbi->var.yoffset) {
@@ -1069,6 +1091,21 @@ static int da8xx_pan_display(struct fb_var_screeninfo *var,
end = start + fbi->var.yres * fix->line_length - 1;
par->dma_start = start;
par->dma_end = end;
+ spin_lock_irqsave(&par->lock_for_chan_update,
+ irq_flags);
+ if (par->which_dma_channel_done == 0) {
+ lcdc_write(par->dma_start,
+ LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
+ lcdc_write(par->dma_end,
+ LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+ } else if (par->which_dma_channel_done == 1) {
+ lcdc_write(par->dma_start,
+ LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
+ lcdc_write(par->dma_end,
+ LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
+ }
+ spin_unlock_irqrestore(&par->lock_for_chan_update,
+ irq_flags);
}
}
@@ -1114,6 +1151,7 @@ static int __devinit fb_probe(struct platform_device *device)
struct da8xx_fb_par *par;
resource_size_t len;
int ret, i;
+ unsigned long ulcm;
if (fb_pdata == NULL) {
dev_err(&device->dev, "Can not get platform data\n");
@@ -1209,7 +1247,8 @@ static int __devinit fb_probe(struct platform_device *device)
/* allocate frame buffer */
par->vram_size = lcdc_info->width * lcdc_info->height * lcd_cfg->bpp;
- par->vram_size = PAGE_ALIGN(par->vram_size/8);
+ ulcm = lcm((lcdc_info->width * lcd_cfg->bpp)/8, PAGE_SIZE);
+ par->vram_size = roundup(par->vram_size/8, ulcm);
par->vram_size = par->vram_size * LCD_NUM_BUFFERS;
par->vram_virt = dma_alloc_coherent(NULL,
@@ -1296,6 +1335,8 @@ static int __devinit fb_probe(struct platform_device *device)
/* initialize the vsync wait queue */
init_waitqueue_head(&par->vsync_wait);
par->vsync_timeout = HZ / 5;
+ par->which_dma_channel_done = -1;
+ spin_lock_init(&par->lock_for_chan_update);
/* Register the Frame Buffer */
if (register_framebuffer(da8xx_fb_info) < 0) {
@@ -1382,11 +1423,12 @@ static int fb_resume(struct platform_device *dev)
struct da8xx_fb_par *par = info->par;
console_lock();
+ clk_enable(par->lcdc_clk);
+ lcd_enable_raster();
+
if (par->panel_power_ctrl)
par->panel_power_ctrl(1);
- clk_enable(par->lcdc_clk);
- lcd_enable_raster();
fb_set_suspend(info, 0);
console_unlock();
diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c
index a268cbf1cbea..68b9b511ce80 100644
--- a/drivers/video/epson1355fb.c
+++ b/drivers/video/epson1355fb.c
@@ -477,11 +477,11 @@ static __init unsigned int get_fb_size(struct fb_info *info)
return size;
}
-static int epson1355_width_tab[2][4] __initdata =
+static int epson1355_width_tab[2][4] __devinitdata =
{ {4, 8, 16, -1}, {9, 12, 16, -1} };
-static int epson1355_bpp_tab[8] __initdata = { 1, 2, 4, 8, 15, 16 };
+static int epson1355_bpp_tab[8] __devinitdata = { 1, 2, 4, 8, 15, 16 };
-static void __init fetch_hw_state(struct fb_info *info, struct epson1355_par *par)
+static void __devinit fetch_hw_state(struct fb_info *info, struct epson1355_par *par)
{
struct fb_var_screeninfo *var = &info->var;
struct fb_fix_screeninfo *fix = &info->fix;
@@ -601,7 +601,7 @@ static int epson1355fb_remove(struct platform_device *dev)
return 0;
}
-int __devinit epson1355fb_probe(struct platform_device *dev)
+static int __devinit epson1355fb_probe(struct platform_device *dev)
{
struct epson1355_par *default_par;
struct fb_info *info;
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c
index a36b2d28280e..c6c016a506ce 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/video/exynos/exynos_dp_core.c
@@ -47,7 +47,7 @@ static int exynos_dp_detect_hpd(struct exynos_dp_device *dp)
exynos_dp_init_hpd(dp);
- udelay(200);
+ usleep_range(200, 210);
while (exynos_dp_get_plug_in_status(dp) != 0) {
timeout_loop++;
@@ -55,7 +55,7 @@ static int exynos_dp_detect_hpd(struct exynos_dp_device *dp)
dev_err(dp->dev, "failed to get hpd plug status\n");
return -ETIMEDOUT;
}
- udelay(10);
+ usleep_range(10, 11);
}
return 0;
@@ -304,7 +304,7 @@ static void exynos_dp_link_start(struct exynos_dp_device *dp)
buf[lane] = DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 |
DPCD_VOLTAGE_SWING_PATTERN1_LEVEL0;
exynos_dp_write_bytes_to_dpcd(dp,
- DPCD_ADDR_TRAINING_PATTERN_SET,
+ DPCD_ADDR_TRAINING_LANE0_SET,
lane_count, buf);
}
@@ -336,7 +336,7 @@ static int exynos_dp_channel_eq_ok(u8 link_status[6], int lane_count)
u8 lane_status;
lane_align = link_status[2];
- if ((lane_align == DPCD_INTERLANE_ALIGN_DONE) == 0)
+ if ((lane_align & DPCD_INTERLANE_ALIGN_DONE) == 0)
return -EINVAL;
for (lane = 0; lane < lane_count; lane++) {
@@ -407,6 +407,9 @@ static unsigned int exynos_dp_get_lane_link_training(
case 3:
reg = exynos_dp_get_lane3_link_training(dp);
break;
+ default:
+ WARN_ON(1);
+ return 0;
}
return reg;
@@ -483,7 +486,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
u8 pre_emphasis;
u8 training_lane;
- udelay(100);
+ usleep_range(100, 101);
exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS,
6, link_status);
@@ -501,7 +504,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
buf[0] = DPCD_SCRAMBLING_DISABLED |
DPCD_TRAINING_PATTERN_2;
exynos_dp_write_byte_to_dpcd(dp,
- DPCD_ADDR_TRAINING_LANE0_SET,
+ DPCD_ADDR_TRAINING_PATTERN_SET,
buf[0]);
for (lane = 0; lane < lane_count; lane++) {
@@ -568,7 +571,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
u8 adjust_request[2];
- udelay(400);
+ usleep_range(400, 401);
exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS,
6, link_status);
@@ -736,7 +739,7 @@ static int exynos_dp_set_link_train(struct exynos_dp_device *dp,
if (retval == 0)
break;
- udelay(100);
+ usleep_range(100, 110);
}
return retval;
@@ -770,7 +773,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
return -ETIMEDOUT;
}
- udelay(1);
+ usleep_range(1, 2);
}
/* Set to use the register calculated M/N video */
@@ -804,7 +807,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
return -ETIMEDOUT;
}
- mdelay(1);
+ usleep_range(1000, 1001);
}
if (retval != 0)
diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h
index 1e0f998e0c9f..8526e548c385 100644
--- a/drivers/video/exynos/exynos_dp_core.h
+++ b/drivers/video/exynos/exynos_dp_core.h
@@ -85,10 +85,6 @@ void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype);
void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype);
void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count);
void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count);
-void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype);
-void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype);
-void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count);
-void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count);
void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable);
void exynos_dp_set_training_pattern(struct exynos_dp_device *dp,
enum pattern_set pattern);
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c
index bcb0e3ae1e9d..2db5b9aa250a 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/video/exynos/exynos_dp_reg.c
@@ -122,7 +122,7 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
LS_CLK_DOMAIN_FUNC_EN_N;
writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
- udelay(20);
+ usleep_range(20, 30);
exynos_dp_lane_swap(dp, 0);
@@ -988,7 +988,7 @@ void exynos_dp_reset_macro(struct exynos_dp_device *dp)
writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST);
/* 10 us is the minimum reset time. */
- udelay(10);
+ usleep_range(10, 20);
reg &= ~MACRO_RST;
writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST);
diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c
index 9908e75ae761..4bc2b8a5dd8b 100644
--- a/drivers/video/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/exynos/exynos_mipi_dsi.c
@@ -154,7 +154,7 @@ static int exynos_mipi_dsi_blank_mode(struct mipi_dsim_device *dsim, int power)
if (client_drv && client_drv->power_on)
client_drv->power_on(client_dev, 1);
- exynos_mipi_regulator_disable(dsim);
+ exynos_mipi_regulator_enable(dsim);
/* enable MIPI-DSI PHY. */
if (dsim->pd->phy_enable)
diff --git a/drivers/video/exynos/s6e8ax0.h b/drivers/video/exynos/s6e8ax0.h
deleted file mode 100644
index 1f1b270484b0..000000000000
--- a/drivers/video/exynos/s6e8ax0.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* linux/drivers/video/backlight/s6e8ax0.h
- *
- * MIPI-DSI based s6e8ax0 AMOLED LCD Panel definitions.
- *
- * Copyright (c) 2011 Samsung Electronics
- *
- * Inki Dae, <inki.dae@samsung.com>
- * Donghwa Lee <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _S6E8AX0_H
-#define _S6E8AX0_H
-
-extern void s6e8ax0_init(void);
-
-#endif
-
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 1ddeb11659d4..64cda560c488 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -104,6 +104,8 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
deferred framebuffer IO. then if userspace touches a page
again, we repeat the same scheme */
+ file_update_time(vma->vm_file);
+
/* protect against the workqueue changing the page list */
mutex_lock(&fbdefio->lock);
diff --git a/drivers/video/fb_draw.h b/drivers/video/fb_draw.h
index 04c01faaf772..624ee115f129 100644
--- a/drivers/video/fb_draw.h
+++ b/drivers/video/fb_draw.h
@@ -3,6 +3,7 @@
#include <asm/types.h>
#include <linux/fb.h>
+#include <linux/bug.h>
/*
* Compose two values, using a bitmask as decision value
@@ -41,7 +42,8 @@ pixel_to_pat( u32 bpp, u32 pixel)
case 32:
return 0x0000000100000001ul*pixel;
default:
- panic("pixel_to_pat(): unsupported pixelformat\n");
+ WARN(1, "pixel_to_pat(): unsupported pixelformat %d\n", bpp);
+ return 0;
}
}
#else
@@ -66,7 +68,8 @@ pixel_to_pat( u32 bpp, u32 pixel)
case 32:
return 0x00000001ul*pixel;
default:
- panic("pixel_to_pat(): unsupported pixelformat\n");
+ WARN(1, "pixel_to_pat(): unsupported pixelformat %d\n", bpp);
+ return 0;
}
}
#endif
diff --git a/drivers/video/grvga.c b/drivers/video/grvga.c
index da066c210923..5245f9a71892 100644
--- a/drivers/video/grvga.c
+++ b/drivers/video/grvga.c
@@ -354,7 +354,7 @@ static int __devinit grvga_probe(struct platform_device *dev)
*/
if (fb_get_options("grvga", &options)) {
retval = -ENODEV;
- goto err;
+ goto free_fb;
}
if (!options || !*options)
@@ -370,7 +370,7 @@ static int __devinit grvga_probe(struct platform_device *dev)
if (grvga_parse_custom(this_opt, &info->var) < 0) {
dev_err(&dev->dev, "Failed to parse custom mode (%s).\n", this_opt);
retval = -EINVAL;
- goto err1;
+ goto free_fb;
}
} else if (!strncmp(this_opt, "addr", 4))
grvga_fix_addr = simple_strtoul(this_opt + 5, NULL, 16);
@@ -387,10 +387,11 @@ static int __devinit grvga_probe(struct platform_device *dev)
info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN;
info->fix.smem_len = grvga_mem_size;
- if (!request_mem_region(dev->resource[0].start, resource_size(&dev->resource[0]), "grlib-svgactrl regs")) {
+ if (!devm_request_mem_region(&dev->dev, dev->resource[0].start,
+ resource_size(&dev->resource[0]), "grlib-svgactrl regs")) {
dev_err(&dev->dev, "registers already mapped\n");
retval = -EBUSY;
- goto err;
+ goto free_fb;
}
par->regs = of_ioremap(&dev->resource[0], 0,
@@ -400,14 +401,14 @@ static int __devinit grvga_probe(struct platform_device *dev)
if (!par->regs) {
dev_err(&dev->dev, "failed to map registers\n");
retval = -ENOMEM;
- goto err1;
+ goto free_fb;
}
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0) {
dev_err(&dev->dev, "failed to allocate mem with fb_alloc_cmap\n");
retval = -ENOMEM;
- goto err2;
+ goto unmap_regs;
}
if (mode_opt) {
@@ -415,7 +416,7 @@ static int __devinit grvga_probe(struct platform_device *dev)
grvga_modedb, sizeof(grvga_modedb), &grvga_modedb[0], 8);
if (!retval || retval == 4) {
retval = -EINVAL;
- goto err3;
+ goto dealloc_cmap;
}
}
@@ -427,10 +428,11 @@ static int __devinit grvga_probe(struct platform_device *dev)
physical_start = grvga_fix_addr;
- if (!request_mem_region(physical_start, grvga_mem_size, dev->name)) {
+ if (!devm_request_mem_region(&dev->dev, physical_start,
+ grvga_mem_size, dev->name)) {
dev_err(&dev->dev, "failed to request memory region\n");
retval = -ENOMEM;
- goto err3;
+ goto dealloc_cmap;
}
virtual_start = (unsigned long) ioremap(physical_start, grvga_mem_size);
@@ -438,7 +440,7 @@ static int __devinit grvga_probe(struct platform_device *dev)
if (!virtual_start) {
dev_err(&dev->dev, "error mapping framebuffer memory\n");
retval = -ENOMEM;
- goto err4;
+ goto dealloc_cmap;
}
} else { /* Allocate frambuffer memory */
@@ -451,7 +453,7 @@ static int __devinit grvga_probe(struct platform_device *dev)
"unable to allocate framebuffer memory (%lu bytes)\n",
grvga_mem_size);
retval = -ENOMEM;
- goto err3;
+ goto dealloc_cmap;
}
physical_start = dma_map_single(&dev->dev, (void *)virtual_start, grvga_mem_size, DMA_TO_DEVICE);
@@ -484,7 +486,7 @@ static int __devinit grvga_probe(struct platform_device *dev)
retval = register_framebuffer(info);
if (retval < 0) {
dev_err(&dev->dev, "failed to register framebuffer\n");
- goto err4;
+ goto free_mem;
}
__raw_writel(physical_start, &par->regs->fb_pos);
@@ -493,21 +495,18 @@ static int __devinit grvga_probe(struct platform_device *dev)
return 0;
-err4:
+free_mem:
dev_set_drvdata(&dev->dev, NULL);
- if (grvga_fix_addr) {
- release_mem_region(physical_start, grvga_mem_size);
+ if (grvga_fix_addr)
iounmap((void *)virtual_start);
- } else
+ else
kfree((void *)virtual_start);
-err3:
+dealloc_cmap:
fb_dealloc_cmap(&info->cmap);
-err2:
+unmap_regs:
of_iounmap(&dev->resource[0], par->regs,
resource_size(&dev->resource[0]));
-err1:
- release_mem_region(dev->resource[0].start, resource_size(&dev->resource[0]));
-err:
+free_fb:
framebuffer_release(info);
return retval;
@@ -524,12 +523,10 @@ static int __devexit grvga_remove(struct platform_device *device)
of_iounmap(&device->resource[0], par->regs,
resource_size(&device->resource[0]));
- release_mem_region(device->resource[0].start, resource_size(&device->resource[0]));
- if (!par->fb_alloced) {
- release_mem_region(info->fix.smem_start, info->fix.smem_len);
+ if (!par->fb_alloced)
iounmap(info->screen_base);
- } else
+ else
kfree((void *)info->screen_base);
framebuffer_release(info);
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index 00ce1f34b496..57d940be5f3d 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -328,6 +328,8 @@ static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd,
case MB862XX_L1_SET_CFG:
if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg)))
return -EFAULT;
+ if (l1_cfg->dh == 0 || l1_cfg->dw == 0)
+ return -EINVAL;
if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) {
/* downscaling */
outreg(cap, GC_CAP_CSC,
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index eec0d7b748eb..c89f8a8d36d2 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -269,7 +269,7 @@ struct mx3fb_info {
dma_cookie_t cookie;
struct scatterlist sg[2];
- u32 sync; /* preserve var->sync flags */
+ struct fb_var_screeninfo cur_var; /* current var info */
};
static void mx3fb_dma_done(void *);
@@ -698,9 +698,29 @@ static void mx3fb_dma_done(void *arg)
complete(&mx3_fbi->flip_cmpl);
}
+static bool mx3fb_must_set_par(struct fb_info *fbi)
+{
+ struct mx3fb_info *mx3_fbi = fbi->par;
+ struct fb_var_screeninfo old_var = mx3_fbi->cur_var;
+ struct fb_var_screeninfo new_var = fbi->var;
+
+ if ((fbi->var.activate & FB_ACTIVATE_FORCE) &&
+ (fbi->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW)
+ return true;
+
+ /*
+ * Ignore xoffset and yoffset update,
+ * because pan display handles this case.
+ */
+ old_var.xoffset = new_var.xoffset;
+ old_var.yoffset = new_var.yoffset;
+
+ return !!memcmp(&old_var, &new_var, sizeof(struct fb_var_screeninfo));
+}
+
static int __set_par(struct fb_info *fbi, bool lock)
{
- u32 mem_len;
+ u32 mem_len, cur_xoffset, cur_yoffset;
struct ipu_di_signal_cfg sig_cfg;
enum ipu_panel mode = IPU_PANEL_TFT;
struct mx3fb_info *mx3_fbi = fbi->par;
@@ -780,8 +800,25 @@ static int __set_par(struct fb_info *fbi, bool lock)
video->out_height = fbi->var.yres;
video->out_stride = fbi->var.xres_virtual;
- if (mx3_fbi->blank == FB_BLANK_UNBLANK)
+ if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
sdc_enable_channel(mx3_fbi);
+ /*
+ * sg[0] points to fb smem_start address
+ * and is actually active in controller.
+ */
+ mx3_fbi->cur_var.xoffset = 0;
+ mx3_fbi->cur_var.yoffset = 0;
+ }
+
+ /*
+ * Preserve xoffset and yoffest in case they are
+ * inactive in controller as fb is blanked.
+ */
+ cur_xoffset = mx3_fbi->cur_var.xoffset;
+ cur_yoffset = mx3_fbi->cur_var.yoffset;
+ mx3_fbi->cur_var = fbi->var;
+ mx3_fbi->cur_var.xoffset = cur_xoffset;
+ mx3_fbi->cur_var.yoffset = cur_yoffset;
return 0;
}
@@ -802,7 +839,7 @@ static int mx3fb_set_par(struct fb_info *fbi)
mutex_lock(&mx3_fbi->mutex);
- ret = __set_par(fbi, true);
+ ret = mx3fb_must_set_par(fbi) ? __set_par(fbi, true) : 0;
mutex_unlock(&mx3_fbi->mutex);
@@ -901,8 +938,8 @@ static int mx3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
var->grayscale = 0;
/* Preserve sync flags */
- var->sync |= mx3_fbi->sync;
- mx3_fbi->sync |= var->sync;
+ var->sync |= mx3_fbi->cur_var.sync;
+ mx3_fbi->cur_var.sync |= var->sync;
return 0;
}
@@ -1043,8 +1080,8 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
return -EINVAL;
}
- if (fbi->var.xoffset == var->xoffset &&
- fbi->var.yoffset == var->yoffset)
+ if (mx3_fbi->cur_var.xoffset == var->xoffset &&
+ mx3_fbi->cur_var.yoffset == var->yoffset)
return 0; /* No change, do nothing */
y_bottom = var->yoffset;
@@ -1127,6 +1164,8 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
else
fbi->var.vmode &= ~FB_VMODE_YWRAP;
+ mx3_fbi->cur_var = fbi->var;
+
mutex_unlock(&mx3_fbi->mutex);
dev_dbg(fbi->device, "Update complete\n");
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index ad741c3d1ae1..eaeed4340e04 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -487,6 +487,13 @@ static struct omap_video_timings acx_panel_timings = {
.vfp = 3,
.vsw = 3,
.vbp = 4,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
};
static int acx_panel_probe(struct omap_dss_device *dssdev)
@@ -498,8 +505,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
struct backlight_properties props;
dev_dbg(&dssdev->dev, "%s\n", __func__);
- dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS;
+
/* FIXME AC bias ? */
dssdev->panel.timings = acx_panel_timings;
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index e42f9dc22123..bc5af2500eb9 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -40,12 +40,6 @@
struct panel_config {
struct omap_video_timings timings;
- int acbi; /* ac-bias pin transitions per interrupt */
- /* Unit: line clocks */
- int acb; /* ac-bias pin frequency */
-
- enum omap_panel_config config;
-
int power_on_delay;
int power_off_delay;
@@ -73,11 +67,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 11,
.vfp = 3,
.vbp = 2,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .acbi = 0x0,
- .acb = 0x0,
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO,
.power_on_delay = 50,
.power_off_delay = 100,
.name = "sharp_lq",
@@ -98,11 +94,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 1,
.vfp = 1,
.vbp = 1,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .acbi = 0x0,
- .acb = 0x28,
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS,
.power_on_delay = 50,
.power_off_delay = 100,
.name = "sharp_ls",
@@ -123,12 +121,13 @@ static struct panel_config generic_dpi_panels[] = {
.vfp = 4,
.vsw = 2,
.vbp = 2,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
},
- .acbi = 0x0,
- .acb = 0x0,
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC |
- OMAP_DSS_LCD_ONOFF,
.power_on_delay = 0,
.power_off_delay = 0,
.name = "toppoly_tdo35s",
@@ -149,11 +148,13 @@ static struct panel_config generic_dpi_panels[] = {
.vfp = 4,
.vsw = 10,
.vbp = 12 - 10,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .acbi = 0x0,
- .acb = 0x0,
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS,
.power_on_delay = 0,
.power_off_delay = 0,
.name = "samsung_lte430wq_f0c",
@@ -174,11 +175,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 2,
.vfp = 4,
.vbp = 11,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .acbi = 0x0,
- .acb = 0x0,
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS,
.power_on_delay = 0,
.power_off_delay = 0,
.name = "seiko_70wvw1tz3",
@@ -199,11 +202,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 10,
.vfp = 2,
.vbp = 2,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .acbi = 0x0,
- .acb = 0x0,
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO,
.power_on_delay = 0,
.power_off_delay = 0,
.name = "powertip_ph480272t",
@@ -224,11 +229,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 3,
.vfp = 12,
.vbp = 25,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .acbi = 0x0,
- .acb = 0x28,
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS,
.power_on_delay = 0,
.power_off_delay = 0,
.name = "innolux_at070tn83",
@@ -249,9 +256,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 1,
.vfp = 2,
.vbp = 7,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS,
.name = "nec_nl2432dr22-11b",
},
@@ -270,9 +281,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 1,
.vfp = 1,
.vbp = 1,
- },
- .config = OMAP_DSS_LCD_TFT,
+ .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+ },
.name = "h4",
},
@@ -291,10 +306,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 10,
.vfp = 2,
.vbp = 2,
- },
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS,
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+ },
.name = "apollon",
},
/* FocalTech ETM070003DH6 */
@@ -312,9 +330,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 3,
.vfp = 13,
.vbp = 29,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS,
.name = "focaltech_etm070003dh6",
},
@@ -333,11 +355,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 23,
.vfp = 1,
.vbp = 1,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .acbi = 0x0,
- .acb = 0x0,
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
.power_on_delay = 0,
.power_off_delay = 0,
.name = "microtips_umsh_8173md",
@@ -358,9 +382,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 10,
.vfp = 4,
.vbp = 2,
- },
- .config = OMAP_DSS_LCD_TFT,
+ .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+ },
.name = "ortustech_com43h4m10xtc",
},
@@ -379,11 +407,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 10,
.vfp = 12,
.vbp = 23,
- },
- .acb = 0x0,
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO,
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+ },
.name = "innolux_at080tn52",
},
@@ -401,8 +431,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 1,
.vfp = 26,
.vbp = 1,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .config = OMAP_DSS_LCD_TFT,
.name = "mitsubishi_aa084sb01",
},
/* EDT ET0500G0DH6 */
@@ -419,8 +454,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 2,
.vfp = 35,
.vbp = 10,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .config = OMAP_DSS_LCD_TFT,
.name = "edt_et0500g0dh6",
},
@@ -439,9 +479,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 2,
.vfp = 10,
.vbp = 33,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
.name = "primeview_pd050vl1",
},
@@ -460,9 +504,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 2,
.vfp = 10,
.vbp = 33,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
.name = "primeview_pm070wl4",
},
@@ -481,9 +529,13 @@ static struct panel_config generic_dpi_panels[] = {
.vsw = 4,
.vfp = 1,
.vbp = 23,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
},
- .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
.name = "primeview_pd104slf",
},
};
@@ -573,10 +625,7 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
if (!panel_config)
return -EINVAL;
- dssdev->panel.config = panel_config->config;
dssdev->panel.timings = panel_config->timings;
- dssdev->panel.acb = panel_config->acb;
- dssdev->panel.acbi = panel_config->acbi;
drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
if (!drv_data)
diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
index 0841cc2b3f77..802807798846 100644
--- a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
@@ -40,6 +40,12 @@ static struct omap_video_timings lb035q02_timings = {
.vsw = 2,
.vfp = 4,
.vbp = 18,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
};
static int lb035q02_panel_power_on(struct omap_dss_device *dssdev)
@@ -82,8 +88,6 @@ static int lb035q02_panel_probe(struct omap_dss_device *dssdev)
struct lb035q02_data *ld;
int r;
- dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS;
dssdev->panel.timings = lb035q02_timings;
ld = kzalloc(sizeof(*ld), GFP_KERNEL);
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
index 4a34cdc1371b..e6c115373c00 100644
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -473,7 +473,6 @@ static int n8x0_panel_probe(struct omap_dss_device *dssdev)
mutex_init(&ddata->lock);
- dssdev->panel.config = OMAP_DSS_LCD_TFT;
dssdev->panel.timings.x_res = 800;
dssdev->panel.timings.y_res = 480;
dssdev->ctrl.pixel_size = 16;
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
index 8b38b39213f4..b122b0f31c43 100644
--- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
+++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
@@ -76,6 +76,12 @@ static struct omap_video_timings nec_8048_panel_timings = {
.vfp = 3,
.vsw = 1,
.vbp = 4,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
};
static int nec_8048_bl_update_status(struct backlight_device *bl)
@@ -116,9 +122,6 @@ static int nec_8048_panel_probe(struct omap_dss_device *dssdev)
struct backlight_properties props;
int r;
- dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_RF |
- OMAP_DSS_LCD_ONOFF;
dssdev->panel.timings = nec_8048_panel_timings;
necd = kzalloc(sizeof(*necd), GFP_KERNEL);
diff --git a/drivers/video/omap2/displays/panel-picodlp.c b/drivers/video/omap2/displays/panel-picodlp.c
index 98ebdaddab5a..2d35bd388860 100644
--- a/drivers/video/omap2/displays/panel-picodlp.c
+++ b/drivers/video/omap2/displays/panel-picodlp.c
@@ -69,6 +69,12 @@ static struct omap_video_timings pico_ls_timings = {
.vsw = 2,
.vfp = 3,
.vbp = 14,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
};
static inline struct picodlp_panel_data
@@ -414,9 +420,6 @@ static int picodlp_panel_probe(struct omap_dss_device *dssdev)
struct i2c_client *picodlp_i2c_client;
int r = 0, picodlp_adapter_id;
- dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_ONOFF |
- OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IVS;
- dssdev->panel.acb = 0x0;
dssdev->panel.timings = pico_ls_timings;
picod = kzalloc(sizeof(struct picodlp_data), GFP_KERNEL);
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index ba38b3ad17d6..bd86ba9ccf76 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -44,6 +44,12 @@ static struct omap_video_timings sharp_ls_timings = {
.vsw = 1,
.vfp = 1,
.vbp = 1,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
};
static int sharp_ls_bl_update_status(struct backlight_device *bl)
@@ -86,9 +92,6 @@ static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
struct sharp_data *sd;
int r;
- dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
- OMAP_DSS_LCD_IHS;
- dssdev->panel.acb = 0x28;
dssdev->panel.timings = sharp_ls_timings;
sd = kzalloc(sizeof(*sd), GFP_KERNEL);
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 901576eb5a84..3f5acc7771da 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -882,7 +882,6 @@ static int taal_probe(struct omap_dss_device *dssdev)
goto err;
}
- dssdev->panel.config = OMAP_DSS_LCD_TFT;
dssdev->panel.timings = panel_config->timings;
dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888;
diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c
index bff306e041ca..40cc0cfa5d17 100644
--- a/drivers/video/omap2/displays/panel-tfp410.c
+++ b/drivers/video/omap2/displays/panel-tfp410.c
@@ -39,6 +39,12 @@ static const struct omap_video_timings tfp410_default_timings = {
.vfp = 3,
.vsw = 4,
.vbp = 7,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
};
struct panel_drv_data {
@@ -95,7 +101,6 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
return -ENOMEM;
dssdev->panel.timings = tfp410_default_timings;
- dssdev->panel.config = OMAP_DSS_LCD_TFT;
ddata->dssdev = dssdev;
mutex_init(&ddata->lock);
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index 4b6448b3c31f..fa7baa650ae0 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -267,6 +267,12 @@ static const struct omap_video_timings tpo_td043_timings = {
.vsw = 1,
.vfp = 39,
.vbp = 34,
+
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
};
static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
@@ -423,8 +429,6 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev)
return -ENODEV;
}
- dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IHS |
- OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IPC;
dssdev->panel.timings = tpo_td043_timings;
dssdev->ctrl.pixel_size = 24;
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 43324e5ed25f..b337a8469fd8 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -52,7 +52,7 @@ config OMAP2_DSS_RFBI
DBI is a bus between the host processor and a peripheral,
such as a display or a framebuffer chip.
- See http://www.mipi.org/ for DBI spesifications.
+ See http://www.mipi.org/ for DBI specifications.
config OMAP2_DSS_VENC
bool "VENC support"
@@ -92,7 +92,7 @@ config OMAP2_DSS_DSI
DSI is a high speed half-duplex serial interface between the host
processor and a peripheral, such as a display or a framebuffer chip.
- See http://www.mipi.org/ for DSI spesifications.
+ See http://www.mipi.org/ for DSI specifications.
config OMAP2_DSS_MIN_FCK_PER_PCK
int "Minimum FCK/PCK ratio (for scaling)"
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index ab22cc224f3e..0fefc68372b9 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -104,6 +104,7 @@ struct mgr_priv_data {
bool shadow_extra_info_dirty;
struct omap_video_timings timings;
+ struct dss_lcd_mgr_config lcd_config;
};
static struct {
@@ -137,6 +138,7 @@ static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
void dss_apply_init(void)
{
const int num_ovls = dss_feat_get_num_ovls();
+ struct mgr_priv_data *mp;
int i;
spin_lock_init(&data_lock);
@@ -168,16 +170,35 @@ void dss_apply_init(void)
op->user_info = op->info;
}
+
+ /*
+ * Initialize some of the lcd_config fields for TV manager, this lets
+ * us prevent checking if the manager is LCD or TV at some places
+ */
+ mp = &dss_data.mgr_priv_data_array[OMAP_DSS_CHANNEL_DIGIT];
+
+ mp->lcd_config.video_port_width = 24;
+ mp->lcd_config.clock_info.lck_div = 1;
+ mp->lcd_config.clock_info.pck_div = 1;
}
+/*
+ * A LCD manager's stallmode decides whether it is in manual or auto update. TV
+ * manager is always auto update, stallmode field for TV manager is false by
+ * default
+ */
static bool ovl_manual_update(struct omap_overlay *ovl)
{
- return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+ struct mgr_priv_data *mp = get_mgr_priv(ovl->manager);
+
+ return mp->lcd_config.stallmode;
}
static bool mgr_manual_update(struct omap_overlay_manager *mgr)
{
- return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+ return mp->lcd_config.stallmode;
}
static int dss_check_settings_low(struct omap_overlay_manager *mgr,
@@ -214,7 +235,7 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
ois[ovl->id] = oi;
}
- return dss_mgr_check(mgr, mi, &mp->timings, ois);
+ return dss_mgr_check(mgr, mi, &mp->timings, &mp->lcd_config, ois);
}
/*
@@ -537,7 +558,7 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
struct omap_overlay_info *oi;
- bool ilace, replication;
+ bool replication;
struct mgr_priv_data *mp;
int r;
@@ -550,11 +571,9 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
mp = get_mgr_priv(ovl->manager);
- replication = dss_use_replication(ovl->manager->device, oi->color_mode);
-
- ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
+ replication = dss_ovl_use_replication(mp->lcd_config, oi->color_mode);
- r = dispc_ovl_setup(ovl->id, oi, ilace, replication, &mp->timings);
+ r = dispc_ovl_setup(ovl->id, oi, replication, &mp->timings);
if (r) {
/*
* We can't do much here, as this function can be called from
@@ -635,6 +654,24 @@ static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
dispc_mgr_set_timings(mgr->id, &mp->timings);
+ /* lcd_config parameters */
+ if (dss_mgr_is_lcd(mgr->id)) {
+ dispc_mgr_set_io_pad_mode(mp->lcd_config.io_pad_mode);
+
+ dispc_mgr_enable_stallmode(mgr->id, mp->lcd_config.stallmode);
+ dispc_mgr_enable_fifohandcheck(mgr->id,
+ mp->lcd_config.fifohandcheck);
+
+ dispc_mgr_set_clock_div(mgr->id, &mp->lcd_config.clock_info);
+
+ dispc_mgr_set_tft_data_lines(mgr->id,
+ mp->lcd_config.video_port_width);
+
+ dispc_lcd_enable_signal_polarity(mp->lcd_config.lcden_sig_polarity);
+
+ dispc_mgr_set_lcd_type_tft(mgr->id);
+ }
+
mp->extra_info_dirty = false;
if (mp->updating)
mp->shadow_extra_info_dirty = true;
@@ -1294,6 +1331,44 @@ void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
mutex_unlock(&apply_lock);
}
+static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr,
+ const struct dss_lcd_mgr_config *config)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+ mp->lcd_config = *config;
+ mp->extra_info_dirty = true;
+}
+
+void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
+ const struct dss_lcd_mgr_config *config)
+{
+ unsigned long flags;
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+ mutex_lock(&apply_lock);
+
+ if (mp->enabled) {
+ DSSERR("cannot apply lcd config for %s: manager needs to be disabled\n",
+ mgr->name);
+ goto out;
+ }
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ dss_apply_mgr_lcd_config(mgr, config);
+
+ dss_write_regs();
+ dss_set_go_bits();
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ wait_pending_extra_info_updates();
+
+out:
+ mutex_unlock(&apply_lock);
+}
+
int dss_ovl_set_info(struct omap_overlay *ovl,
struct omap_overlay_info *info)
{
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 397d4eee11bb..5b289c5f695b 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -119,6 +119,97 @@ enum omap_color_component {
DISPC_COLOR_COMPONENT_UV = 1 << 1,
};
+enum mgr_reg_fields {
+ DISPC_MGR_FLD_ENABLE,
+ DISPC_MGR_FLD_STNTFT,
+ DISPC_MGR_FLD_GO,
+ DISPC_MGR_FLD_TFTDATALINES,
+ DISPC_MGR_FLD_STALLMODE,
+ DISPC_MGR_FLD_TCKENABLE,
+ DISPC_MGR_FLD_TCKSELECTION,
+ DISPC_MGR_FLD_CPR,
+ DISPC_MGR_FLD_FIFOHANDCHECK,
+ /* used to maintain a count of the above fields */
+ DISPC_MGR_FLD_NUM,
+};
+
+static const struct {
+ const char *name;
+ u32 vsync_irq;
+ u32 framedone_irq;
+ u32 sync_lost_irq;
+ struct reg_field reg_desc[DISPC_MGR_FLD_NUM];
+} mgr_desc[] = {
+ [OMAP_DSS_CHANNEL_LCD] = {
+ .name = "LCD",
+ .vsync_irq = DISPC_IRQ_VSYNC,
+ .framedone_irq = DISPC_IRQ_FRAMEDONE,
+ .sync_lost_irq = DISPC_IRQ_SYNC_LOST,
+ .reg_desc = {
+ [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 },
+ [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 },
+ [DISPC_MGR_FLD_GO] = { DISPC_CONTROL, 5, 5 },
+ [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL, 9, 8 },
+ [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL, 11, 11 },
+ [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG, 10, 10 },
+ [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG, 11, 11 },
+ [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG, 15, 15 },
+ [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG, 16, 16 },
+ },
+ },
+ [OMAP_DSS_CHANNEL_DIGIT] = {
+ .name = "DIGIT",
+ .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN,
+ .framedone_irq = 0,
+ .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT,
+ .reg_desc = {
+ [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 },
+ [DISPC_MGR_FLD_STNTFT] = { },
+ [DISPC_MGR_FLD_GO] = { DISPC_CONTROL, 6, 6 },
+ [DISPC_MGR_FLD_TFTDATALINES] = { },
+ [DISPC_MGR_FLD_STALLMODE] = { },
+ [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG, 12, 12 },
+ [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG, 13, 13 },
+ [DISPC_MGR_FLD_CPR] = { },
+ [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG, 16, 16 },
+ },
+ },
+ [OMAP_DSS_CHANNEL_LCD2] = {
+ .name = "LCD2",
+ .vsync_irq = DISPC_IRQ_VSYNC2,
+ .framedone_irq = DISPC_IRQ_FRAMEDONE2,
+ .sync_lost_irq = DISPC_IRQ_SYNC_LOST2,
+ .reg_desc = {
+ [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 },
+ [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 },
+ [DISPC_MGR_FLD_GO] = { DISPC_CONTROL2, 5, 5 },
+ [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL2, 9, 8 },
+ [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL2, 11, 11 },
+ [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG2, 10, 10 },
+ [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG2, 11, 11 },
+ [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG2, 15, 15 },
+ [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG2, 16, 16 },
+ },
+ },
+ [OMAP_DSS_CHANNEL_LCD3] = {
+ .name = "LCD3",
+ .vsync_irq = DISPC_IRQ_VSYNC3,
+ .framedone_irq = DISPC_IRQ_FRAMEDONE3,
+ .sync_lost_irq = DISPC_IRQ_SYNC_LOST3,
+ .reg_desc = {
+ [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 },
+ [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 },
+ [DISPC_MGR_FLD_GO] = { DISPC_CONTROL3, 5, 5 },
+ [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL3, 9, 8 },
+ [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL3, 11, 11 },
+ [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG3, 10, 10 },
+ [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG3, 11, 11 },
+ [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG3, 15, 15 },
+ [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG3, 16, 16 },
+ },
+ },
+};
+
static void _omap_dispc_set_irqs(void);
static inline void dispc_write_reg(const u16 idx, u32 val)
@@ -131,6 +222,18 @@ static inline u32 dispc_read_reg(const u16 idx)
return __raw_readl(dispc.base + idx);
}
+static u32 mgr_fld_read(enum omap_channel channel, enum mgr_reg_fields regfld)
+{
+ const struct reg_field rfld = mgr_desc[channel].reg_desc[regfld];
+ return REG_GET(rfld.reg, rfld.high, rfld.low);
+}
+
+static void mgr_fld_write(enum omap_channel channel,
+ enum mgr_reg_fields regfld, int val) {
+ const struct reg_field rfld = mgr_desc[channel].reg_desc[regfld];
+ REG_FLD_MOD(rfld.reg, val, rfld.high, rfld.low);
+}
+
#define SR(reg) \
dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
#define RR(reg) \
@@ -153,6 +256,10 @@ static void dispc_save_context(void)
SR(CONTROL2);
SR(CONFIG2);
}
+ if (dss_has_feature(FEAT_MGR_LCD3)) {
+ SR(CONTROL3);
+ SR(CONFIG3);
+ }
for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
SR(DEFAULT_COLOR(i));
@@ -266,6 +373,8 @@ static void dispc_restore_context(void)
RR(GLOBAL_ALPHA);
if (dss_has_feature(FEAT_MGR_LCD2))
RR(CONFIG2);
+ if (dss_has_feature(FEAT_MGR_LCD3))
+ RR(CONFIG3);
for (i = 0; i < dss_feat_get_num_mgrs(); i++) {
RR(DEFAULT_COLOR(i));
@@ -351,6 +460,8 @@ static void dispc_restore_context(void)
RR(CONTROL);
if (dss_has_feature(FEAT_MGR_LCD2))
RR(CONTROL2);
+ if (dss_has_feature(FEAT_MGR_LCD3))
+ RR(CONTROL3);
/* clear spurious SYNC_LOST_DIGIT interrupts */
dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
@@ -387,101 +498,41 @@ void dispc_runtime_put(void)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
-{
- if (channel == OMAP_DSS_CHANNEL_LCD ||
- channel == OMAP_DSS_CHANNEL_LCD2)
- return true;
- else
- return false;
-}
-
u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
{
- switch (channel) {
- case OMAP_DSS_CHANNEL_LCD:
- return DISPC_IRQ_VSYNC;
- case OMAP_DSS_CHANNEL_LCD2:
- return DISPC_IRQ_VSYNC2;
- case OMAP_DSS_CHANNEL_DIGIT:
- return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
- default:
- BUG();
- return 0;
- }
+ return mgr_desc[channel].vsync_irq;
}
u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
{
- switch (channel) {
- case OMAP_DSS_CHANNEL_LCD:
- return DISPC_IRQ_FRAMEDONE;
- case OMAP_DSS_CHANNEL_LCD2:
- return DISPC_IRQ_FRAMEDONE2;
- case OMAP_DSS_CHANNEL_DIGIT:
- return 0;
- default:
- BUG();
- return 0;
- }
+ return mgr_desc[channel].framedone_irq;
}
bool dispc_mgr_go_busy(enum omap_channel channel)
{
- int bit;
-
- if (dispc_mgr_is_lcd(channel))
- bit = 5; /* GOLCD */
- else
- bit = 6; /* GODIGIT */
-
- if (channel == OMAP_DSS_CHANNEL_LCD2)
- return REG_GET(DISPC_CONTROL2, bit, bit) == 1;
- else
- return REG_GET(DISPC_CONTROL, bit, bit) == 1;
+ return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
}
void dispc_mgr_go(enum omap_channel channel)
{
- int bit;
bool enable_bit, go_bit;
- if (dispc_mgr_is_lcd(channel))
- bit = 0; /* LCDENABLE */
- else
- bit = 1; /* DIGITALENABLE */
-
/* if the channel is not enabled, we don't need GO */
- if (channel == OMAP_DSS_CHANNEL_LCD2)
- enable_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1;
- else
- enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
+ enable_bit = mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE) == 1;
if (!enable_bit)
return;
- if (dispc_mgr_is_lcd(channel))
- bit = 5; /* GOLCD */
- else
- bit = 6; /* GODIGIT */
-
- if (channel == OMAP_DSS_CHANNEL_LCD2)
- go_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1;
- else
- go_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1;
+ go_bit = mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
if (go_bit) {
DSSERR("GO bit not down for channel %d\n", channel);
return;
}
- DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" :
- (channel == OMAP_DSS_CHANNEL_LCD2 ? "LCD2" : "DIGIT"));
+ DSSDBG("GO %s\n", mgr_desc[channel].name);
- if (channel == OMAP_DSS_CHANNEL_LCD2)
- REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit);
- else
- REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
+ mgr_fld_write(channel, DISPC_MGR_FLD_GO, 1);
}
static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value)
@@ -832,6 +883,15 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
chan = 0;
chan2 = 1;
break;
+ case OMAP_DSS_CHANNEL_LCD3:
+ if (dss_has_feature(FEAT_MGR_LCD3)) {
+ chan = 0;
+ chan2 = 2;
+ } else {
+ BUG();
+ return;
+ }
+ break;
default:
BUG();
return;
@@ -867,7 +927,14 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
- if (dss_has_feature(FEAT_MGR_LCD2)) {
+ if (dss_has_feature(FEAT_MGR_LCD3)) {
+ if (FLD_GET(val, 31, 30) == 0)
+ channel = FLD_GET(val, shift, shift);
+ else if (FLD_GET(val, 31, 30) == 1)
+ channel = OMAP_DSS_CHANNEL_LCD2;
+ else
+ channel = OMAP_DSS_CHANNEL_LCD3;
+ } else if (dss_has_feature(FEAT_MGR_LCD2)) {
if (FLD_GET(val, 31, 30) == 0)
channel = FLD_GET(val, shift, shift);
else
@@ -922,16 +989,10 @@ void dispc_enable_gamma_table(bool enable)
static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
{
- u16 reg;
-
- if (channel == OMAP_DSS_CHANNEL_LCD)
- reg = DISPC_CONFIG;
- else if (channel == OMAP_DSS_CHANNEL_LCD2)
- reg = DISPC_CONFIG2;
- else
+ if (channel == OMAP_DSS_CHANNEL_DIGIT)
return;
- REG_FLD_MOD(reg, enable, 15, 15);
+ mgr_fld_write(channel, DISPC_MGR_FLD_CPR, enable);
}
static void dispc_mgr_set_cpr_coef(enum omap_channel channel,
@@ -939,7 +1000,7 @@ static void dispc_mgr_set_cpr_coef(enum omap_channel channel,
{
u32 coef_r, coef_g, coef_b;
- if (!dispc_mgr_is_lcd(channel))
+ if (!dss_mgr_is_lcd(channel))
return;
coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) |
@@ -1798,7 +1859,7 @@ static int check_horiz_timing_omap3(enum omap_channel channel,
nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
pclk = dispc_mgr_pclk_rate(channel);
- if (dispc_mgr_is_lcd(channel))
+ if (dss_mgr_is_lcd(channel))
lclk = dispc_mgr_lclk_rate(channel);
else
lclk = dispc_fclk_rate();
@@ -2086,8 +2147,7 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
}
int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
- bool ilace, bool replication,
- const struct omap_video_timings *mgr_timings)
+ bool replication, const struct omap_video_timings *mgr_timings)
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
bool five_taps = true;
@@ -2103,6 +2163,7 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
u16 out_width, out_height;
enum omap_channel channel;
int x_predecim = 1, y_predecim = 1;
+ bool ilace = mgr_timings->interlace;
channel = dispc_ovl_get_channel_out(plane);
@@ -2254,14 +2315,9 @@ static void dispc_disable_isr(void *data, u32 mask)
static void _enable_lcd_out(enum omap_channel channel, bool enable)
{
- if (channel == OMAP_DSS_CHANNEL_LCD2) {
- REG_FLD_MOD(DISPC_CONTROL2, enable ? 1 : 0, 0, 0);
- /* flush posted write */
- dispc_read_reg(DISPC_CONTROL2);
- } else {
- REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
- dispc_read_reg(DISPC_CONTROL);
- }
+ mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable);
+ /* flush posted write */
+ mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
}
static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable)
@@ -2274,12 +2330,9 @@ static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable)
/* When we disable LCD output, we need to wait until frame is done.
* Otherwise the DSS is still working, and turning off the clocks
* prevents DSS from going to OFF mode */
- is_on = channel == OMAP_DSS_CHANNEL_LCD2 ?
- REG_GET(DISPC_CONTROL2, 0, 0) :
- REG_GET(DISPC_CONTROL, 0, 0);
+ is_on = mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
- irq = channel == OMAP_DSS_CHANNEL_LCD2 ? DISPC_IRQ_FRAMEDONE2 :
- DISPC_IRQ_FRAMEDONE;
+ irq = mgr_desc[channel].framedone_irq;
if (!enable && is_on) {
init_completion(&frame_done_completion);
@@ -2384,21 +2437,12 @@ static void dispc_mgr_enable_digit_out(bool enable)
bool dispc_mgr_is_enabled(enum omap_channel channel)
{
- if (channel == OMAP_DSS_CHANNEL_LCD)
- return !!REG_GET(DISPC_CONTROL, 0, 0);
- else if (channel == OMAP_DSS_CHANNEL_DIGIT)
- return !!REG_GET(DISPC_CONTROL, 1, 1);
- else if (channel == OMAP_DSS_CHANNEL_LCD2)
- return !!REG_GET(DISPC_CONTROL2, 0, 0);
- else {
- BUG();
- return false;
- }
+ return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
}
void dispc_mgr_enable(enum omap_channel channel, bool enable)
{
- if (dispc_mgr_is_lcd(channel))
+ if (dss_mgr_is_lcd(channel))
dispc_mgr_enable_lcd_out(channel, enable);
else if (channel == OMAP_DSS_CHANNEL_DIGIT)
dispc_mgr_enable_digit_out(enable);
@@ -2432,36 +2476,13 @@ void dispc_pck_free_enable(bool enable)
void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable)
{
- if (channel == OMAP_DSS_CHANNEL_LCD2)
- REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16);
- else
- REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
+ mgr_fld_write(channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable);
}
-void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
- enum omap_lcd_display_type type)
+void dispc_mgr_set_lcd_type_tft(enum omap_channel channel)
{
- int mode;
-
- switch (type) {
- case OMAP_DSS_LCD_DISPLAY_STN:
- mode = 0;
- break;
-
- case OMAP_DSS_LCD_DISPLAY_TFT:
- mode = 1;
- break;
-
- default:
- BUG();
- return;
- }
-
- if (channel == OMAP_DSS_CHANNEL_LCD2)
- REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3);
- else
- REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
+ mgr_fld_write(channel, DISPC_MGR_FLD_STNTFT, 1);
}
void dispc_set_loadmode(enum omap_dss_load_mode mode)
@@ -2479,24 +2500,14 @@ static void dispc_mgr_set_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type type,
u32 trans_key)
{
- if (ch == OMAP_DSS_CHANNEL_LCD)
- REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
- else if (ch == OMAP_DSS_CHANNEL_DIGIT)
- REG_FLD_MOD(DISPC_CONFIG, type, 13, 13);
- else /* OMAP_DSS_CHANNEL_LCD2 */
- REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11);
+ mgr_fld_write(ch, DISPC_MGR_FLD_TCKSELECTION, type);
dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
}
static void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
{
- if (ch == OMAP_DSS_CHANNEL_LCD)
- REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
- else if (ch == OMAP_DSS_CHANNEL_DIGIT)
- REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
- else /* OMAP_DSS_CHANNEL_LCD2 */
- REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
+ mgr_fld_write(ch, DISPC_MGR_FLD_TCKENABLE, enable);
}
static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch,
@@ -2547,10 +2558,7 @@ void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
return;
}
- if (channel == OMAP_DSS_CHANNEL_LCD2)
- REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8);
- else
- REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
+ mgr_fld_write(channel, DISPC_MGR_FLD_TFTDATALINES, code);
}
void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
@@ -2584,10 +2592,7 @@ void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
{
- if (channel == OMAP_DSS_CHANNEL_LCD2)
- REG_FLD_MOD(DISPC_CONTROL2, enable, 11, 11);
- else
- REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11);
+ mgr_fld_write(channel, DISPC_MGR_FLD_STALLMODE, enable);
}
static bool _dispc_mgr_size_ok(u16 width, u16 height)
@@ -2627,7 +2632,7 @@ bool dispc_mgr_timings_ok(enum omap_channel channel,
timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
- if (dispc_mgr_is_lcd(channel))
+ if (dss_mgr_is_lcd(channel))
timings_ok = timings_ok && _dispc_lcd_timings_ok(timings->hsw,
timings->hfp, timings->hbp,
timings->vsw, timings->vfp,
@@ -2637,9 +2642,16 @@ bool dispc_mgr_timings_ok(enum omap_channel channel,
}
static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
- int hfp, int hbp, int vsw, int vfp, int vbp)
+ int hfp, int hbp, int vsw, int vfp, int vbp,
+ enum omap_dss_signal_level vsync_level,
+ enum omap_dss_signal_level hsync_level,
+ enum omap_dss_signal_edge data_pclk_edge,
+ enum omap_dss_signal_level de_level,
+ enum omap_dss_signal_edge sync_pclk_edge)
+
{
- u32 timing_h, timing_v;
+ u32 timing_h, timing_v, l;
+ bool onoff, rf, ipc;
if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) {
timing_h = FLD_VAL(hsw-1, 5, 0) | FLD_VAL(hfp-1, 15, 8) |
@@ -2657,6 +2669,44 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
+
+ switch (data_pclk_edge) {
+ case OMAPDSS_DRIVE_SIG_RISING_EDGE:
+ ipc = false;
+ break;
+ case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
+ ipc = true;
+ break;
+ case OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES:
+ default:
+ BUG();
+ }
+
+ switch (sync_pclk_edge) {
+ case OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES:
+ onoff = false;
+ rf = false;
+ break;
+ case OMAPDSS_DRIVE_SIG_FALLING_EDGE:
+ onoff = true;
+ rf = false;
+ break;
+ case OMAPDSS_DRIVE_SIG_RISING_EDGE:
+ onoff = true;
+ rf = true;
+ break;
+ default:
+ BUG();
+ };
+
+ l = dispc_read_reg(DISPC_POL_FREQ(channel));
+ l |= FLD_VAL(onoff, 17, 17);
+ l |= FLD_VAL(rf, 16, 16);
+ l |= FLD_VAL(de_level, 15, 15);
+ l |= FLD_VAL(ipc, 14, 14);
+ l |= FLD_VAL(hsync_level, 13, 13);
+ l |= FLD_VAL(vsync_level, 12, 12);
+ dispc_write_reg(DISPC_POL_FREQ(channel), l);
}
/* change name to mode? */
@@ -2674,9 +2724,10 @@ void dispc_mgr_set_timings(enum omap_channel channel,
return;
}
- if (dispc_mgr_is_lcd(channel)) {
+ if (dss_mgr_is_lcd(channel)) {
_dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
- t.vfp, t.vbp);
+ t.vfp, t.vbp, t.vsync_level, t.hsync_level,
+ t.data_pclk_edge, t.de_level, t.sync_pclk_edge);
xtot = t.x_res + t.hfp + t.hsw + t.hbp;
ytot = t.y_res + t.vfp + t.vsw + t.vbp;
@@ -2687,14 +2738,13 @@ void dispc_mgr_set_timings(enum omap_channel channel,
DSSDBG("pck %u\n", timings->pixel_clock);
DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
+ DSSDBG("vsync_level %d hsync_level %d data_pclk_edge %d de_level %d sync_pclk_edge %d\n",
+ t.vsync_level, t.hsync_level, t.data_pclk_edge,
+ t.de_level, t.sync_pclk_edge);
DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
} else {
- enum dss_hdmi_venc_clk_source_select source;
-
- source = dss_get_hdmi_venc_clk_source();
-
- if (source == DSS_VENC_TV_CLK)
+ if (t.interlace == true)
t.y_res /= 2;
}
@@ -2780,7 +2830,7 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
{
unsigned long r;
- if (dispc_mgr_is_lcd(channel)) {
+ if (dss_mgr_is_lcd(channel)) {
int pcd;
u32 l;
@@ -2821,12 +2871,32 @@ unsigned long dispc_core_clk_rate(void)
return fclk / lcd;
}
-void dispc_dump_clocks(struct seq_file *s)
+static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel)
{
int lcd, pcd;
+ enum omap_dss_clk_source lcd_clk_src;
+
+ seq_printf(s, "- %s -\n", mgr_desc[channel].name);
+
+ lcd_clk_src = dss_get_lcd_clk_source(channel);
+
+ seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name,
+ dss_get_generic_clk_source_name(lcd_clk_src),
+ dss_feat_get_clk_source_name(lcd_clk_src));
+
+ dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd);
+
+ seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
+ dispc_mgr_lclk_rate(channel), lcd);
+ seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
+ dispc_mgr_pclk_rate(channel), pcd);
+}
+
+void dispc_dump_clocks(struct seq_file *s)
+{
+ int lcd;
u32 l;
enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
- enum omap_dss_clk_source lcd_clk_src;
if (dispc_runtime_get())
return;
@@ -2847,36 +2917,13 @@ void dispc_dump_clocks(struct seq_file *s)
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
(dispc_fclk_rate()/lcd), lcd);
}
- seq_printf(s, "- LCD1 -\n");
-
- lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD);
-
- seq_printf(s, "lcd1_clk source = %s (%s)\n",
- dss_get_generic_clk_source_name(lcd_clk_src),
- dss_feat_get_clk_source_name(lcd_clk_src));
-
- dispc_mgr_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd);
-
- seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
- dispc_mgr_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd);
- seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
- dispc_mgr_pclk_rate(OMAP_DSS_CHANNEL_LCD), pcd);
- if (dss_has_feature(FEAT_MGR_LCD2)) {
- seq_printf(s, "- LCD2 -\n");
-
- lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD2);
- seq_printf(s, "lcd2_clk source = %s (%s)\n",
- dss_get_generic_clk_source_name(lcd_clk_src),
- dss_feat_get_clk_source_name(lcd_clk_src));
+ dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD);
- dispc_mgr_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd);
-
- seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
- dispc_mgr_lclk_rate(OMAP_DSS_CHANNEL_LCD2), lcd);
- seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
- dispc_mgr_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd);
- }
+ if (dss_has_feature(FEAT_MGR_LCD2))
+ dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD2);
+ if (dss_has_feature(FEAT_MGR_LCD3))
+ dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD3);
dispc_runtime_put();
}
@@ -2929,6 +2976,12 @@ void dispc_dump_irqs(struct seq_file *s)
PIS(ACBIAS_COUNT_STAT2);
PIS(SYNC_LOST2);
}
+ if (dss_has_feature(FEAT_MGR_LCD3)) {
+ PIS(FRAMEDONE3);
+ PIS(VSYNC3);
+ PIS(ACBIAS_COUNT_STAT3);
+ PIS(SYNC_LOST3);
+ }
#undef PIS
}
#endif
@@ -2940,6 +2993,7 @@ static void dispc_dump_regs(struct seq_file *s)
[OMAP_DSS_CHANNEL_LCD] = "LCD",
[OMAP_DSS_CHANNEL_DIGIT] = "TV",
[OMAP_DSS_CHANNEL_LCD2] = "LCD2",
+ [OMAP_DSS_CHANNEL_LCD3] = "LCD3",
};
const char *ovl_names[] = {
[OMAP_DSS_GFX] = "GFX",
@@ -2972,6 +3026,10 @@ static void dispc_dump_regs(struct seq_file *s)
DUMPREG(DISPC_CONTROL2);
DUMPREG(DISPC_CONFIG2);
}
+ if (dss_has_feature(FEAT_MGR_LCD3)) {
+ DUMPREG(DISPC_CONTROL3);
+ DUMPREG(DISPC_CONFIG3);
+ }
#undef DUMPREG
@@ -3093,41 +3151,8 @@ static void dispc_dump_regs(struct seq_file *s)
#undef DUMPREG
}
-static void _dispc_mgr_set_pol_freq(enum omap_channel channel, bool onoff,
- bool rf, bool ieo, bool ipc, bool ihs, bool ivs, u8 acbi,
- u8 acb)
-{
- u32 l = 0;
-
- DSSDBG("onoff %d rf %d ieo %d ipc %d ihs %d ivs %d acbi %d acb %d\n",
- onoff, rf, ieo, ipc, ihs, ivs, acbi, acb);
-
- l |= FLD_VAL(onoff, 17, 17);
- l |= FLD_VAL(rf, 16, 16);
- l |= FLD_VAL(ieo, 15, 15);
- l |= FLD_VAL(ipc, 14, 14);
- l |= FLD_VAL(ihs, 13, 13);
- l |= FLD_VAL(ivs, 12, 12);
- l |= FLD_VAL(acbi, 11, 8);
- l |= FLD_VAL(acb, 7, 0);
-
- dispc_write_reg(DISPC_POL_FREQ(channel), l);
-}
-
-void dispc_mgr_set_pol_freq(enum omap_channel channel,
- enum omap_panel_config config, u8 acbi, u8 acb)
-{
- _dispc_mgr_set_pol_freq(channel, (config & OMAP_DSS_LCD_ONOFF) != 0,
- (config & OMAP_DSS_LCD_RF) != 0,
- (config & OMAP_DSS_LCD_IEO) != 0,
- (config & OMAP_DSS_LCD_IPC) != 0,
- (config & OMAP_DSS_LCD_IHS) != 0,
- (config & OMAP_DSS_LCD_IVS) != 0,
- acbi, acb);
-}
-
/* with fck as input clock rate, find dispc dividers that produce req_pck */
-void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
+void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck,
struct dispc_clock_info *cinfo)
{
u16 pcd_min, pcd_max;
@@ -3138,9 +3163,6 @@ void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
pcd_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD);
pcd_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD);
- if (!is_tft)
- pcd_min = 3;
-
best_pck = 0;
best_ld = 0;
best_pd = 0;
@@ -3192,15 +3214,13 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
return 0;
}
-int dispc_mgr_set_clock_div(enum omap_channel channel,
+void dispc_mgr_set_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo)
{
DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
dispc_mgr_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div);
-
- return 0;
}
int dispc_mgr_get_clock_div(enum omap_channel channel,
@@ -3354,6 +3374,8 @@ static void print_irq_status(u32 status)
PIS(SYNC_LOST_DIGIT);
if (dss_has_feature(FEAT_MGR_LCD2))
PIS(SYNC_LOST2);
+ if (dss_has_feature(FEAT_MGR_LCD3))
+ PIS(SYNC_LOST3);
#undef PIS
printk("\n");
@@ -3450,12 +3472,6 @@ static void dispc_error_worker(struct work_struct *work)
DISPC_IRQ_VID3_FIFO_UNDERFLOW,
};
- static const unsigned sync_lost_bits[] = {
- DISPC_IRQ_SYNC_LOST,
- DISPC_IRQ_SYNC_LOST_DIGIT,
- DISPC_IRQ_SYNC_LOST2,
- };
-
spin_lock_irqsave(&dispc.irq_lock, flags);
errors = dispc.error_irqs;
dispc.error_irqs = 0;
@@ -3484,7 +3500,7 @@ static void dispc_error_worker(struct work_struct *work)
unsigned bit;
mgr = omap_dss_get_overlay_manager(i);
- bit = sync_lost_bits[i];
+ bit = mgr_desc[i].sync_lost_irq;
if (bit & errors) {
struct omap_dss_device *dssdev = mgr->device;
@@ -3603,6 +3619,8 @@ static void _omap_dispc_initialize_irq(void)
dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
if (dss_has_feature(FEAT_MGR_LCD2))
dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2;
+ if (dss_has_feature(FEAT_MGR_LCD3))
+ dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST3;
if (dss_feat_get_num_ovls() > 3)
dispc.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW;
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index f278080e1063..92d8a9be86fc 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -36,6 +36,8 @@
#define DISPC_CONTROL2 0x0238
#define DISPC_CONFIG2 0x0620
#define DISPC_DIVISOR 0x0804
+#define DISPC_CONTROL3 0x0848
+#define DISPC_CONFIG3 0x084C
/* DISPC overlay registers */
#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \
@@ -118,6 +120,8 @@ static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel)
return 0x0050;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03AC;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x0814;
default:
BUG();
return 0;
@@ -133,6 +137,8 @@ static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel)
return 0x0058;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03B0;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x0818;
default:
BUG();
return 0;
@@ -149,6 +155,8 @@ static inline u16 DISPC_TIMING_H(enum omap_channel channel)
return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x0400;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x0840;
default:
BUG();
return 0;
@@ -165,6 +173,8 @@ static inline u16 DISPC_TIMING_V(enum omap_channel channel)
return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x0404;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x0844;
default:
BUG();
return 0;
@@ -181,6 +191,8 @@ static inline u16 DISPC_POL_FREQ(enum omap_channel channel)
return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x0408;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x083C;
default:
BUG();
return 0;
@@ -197,6 +209,8 @@ static inline u16 DISPC_DIVISORo(enum omap_channel channel)
return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x040C;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x0838;
default:
BUG();
return 0;
@@ -213,6 +227,8 @@ static inline u16 DISPC_SIZE_MGR(enum omap_channel channel)
return 0x0078;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03CC;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x0834;
default:
BUG();
return 0;
@@ -229,6 +245,8 @@ static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel)
return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03C0;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x0828;
default:
BUG();
return 0;
@@ -245,6 +263,8 @@ static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel)
return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03C4;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x082C;
default:
BUG();
return 0;
@@ -261,6 +281,8 @@ static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel)
return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03C8;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x0830;
default:
BUG();
return 0;
@@ -277,6 +299,8 @@ static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel)
return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03BC;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x0824;
default:
BUG();
return 0;
@@ -293,6 +317,8 @@ static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel)
return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03B8;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x0820;
default:
BUG();
return 0;
@@ -309,6 +335,8 @@ static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel)
return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03B4;
+ case OMAP_DSS_CHANNEL_LCD3:
+ return 0x081C;
default:
BUG();
return 0;
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 249010630370..5bd957e85505 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -116,7 +116,7 @@ static ssize_t display_timings_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct omap_video_timings t;
+ struct omap_video_timings t = dssdev->panel.timings;
int r, found;
if (!dssdev->driver->set_timings || !dssdev->driver->check_timings)
@@ -316,44 +316,6 @@ void omapdss_default_get_timings(struct omap_dss_device *dssdev,
}
EXPORT_SYMBOL(omapdss_default_get_timings);
-/* Checks if replication logic should be used. Only use for active matrix,
- * when overlay is in RGB12U or RGB16 mode, and LCD interface is
- * 18bpp or 24bpp */
-bool dss_use_replication(struct omap_dss_device *dssdev,
- enum omap_color_mode mode)
-{
- int bpp;
-
- if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16)
- return false;
-
- if (dssdev->type == OMAP_DISPLAY_TYPE_DPI &&
- (dssdev->panel.config & OMAP_DSS_LCD_TFT) == 0)
- return false;
-
- switch (dssdev->type) {
- case OMAP_DISPLAY_TYPE_DPI:
- bpp = dssdev->phy.dpi.data_lines;
- break;
- case OMAP_DISPLAY_TYPE_HDMI:
- case OMAP_DISPLAY_TYPE_VENC:
- case OMAP_DISPLAY_TYPE_SDI:
- bpp = 24;
- break;
- case OMAP_DISPLAY_TYPE_DBI:
- bpp = dssdev->ctrl.pixel_size;
- break;
- case OMAP_DISPLAY_TYPE_DSI:
- bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
- break;
- default:
- BUG();
- return false;
- }
-
- return bpp > 16;
-}
-
void dss_init_device(struct platform_device *pdev,
struct omap_dss_device *dssdev)
{
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 8c2056c9537b..3266be23fc0d 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -38,6 +38,8 @@
static struct {
struct regulator *vdds_dsi_reg;
struct platform_device *dsidev;
+
+ struct dss_lcd_mgr_config mgr_config;
} dpi;
static struct platform_device *dpi_get_dsidev(enum omap_dss_clk_source clk)
@@ -64,7 +66,7 @@ static bool dpi_use_dsi_pll(struct omap_dss_device *dssdev)
return false;
}
-static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
+static int dpi_set_dsi_clk(struct omap_dss_device *dssdev,
unsigned long pck_req, unsigned long *fck, int *lck_div,
int *pck_div)
{
@@ -72,8 +74,8 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
struct dispc_clock_info dispc_cinfo;
int r;
- r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, pck_req,
- &dsi_cinfo, &dispc_cinfo);
+ r = dsi_pll_calc_clock_div_pck(dpi.dsidev, pck_req, &dsi_cinfo,
+ &dispc_cinfo);
if (r)
return r;
@@ -83,11 +85,7 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
- r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo);
- if (r) {
- dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
- return r;
- }
+ dpi.mgr_config.clock_info = dispc_cinfo;
*fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
*lck_div = dispc_cinfo.lck_div;
@@ -96,7 +94,7 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft,
return 0;
}
-static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft,
+static int dpi_set_dispc_clk(struct omap_dss_device *dssdev,
unsigned long pck_req, unsigned long *fck, int *lck_div,
int *pck_div)
{
@@ -104,7 +102,7 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft,
struct dispc_clock_info dispc_cinfo;
int r;
- r = dss_calc_clock_div(is_tft, pck_req, &dss_cinfo, &dispc_cinfo);
+ r = dss_calc_clock_div(pck_req, &dss_cinfo, &dispc_cinfo);
if (r)
return r;
@@ -112,9 +110,7 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft,
if (r)
return r;
- r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo);
- if (r)
- return r;
+ dpi.mgr_config.clock_info = dispc_cinfo;
*fck = dss_cinfo.fck;
*lck_div = dispc_cinfo.lck_div;
@@ -129,20 +125,14 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
unsigned long pck;
- bool is_tft;
int r = 0;
- dispc_mgr_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
- dssdev->panel.acbi, dssdev->panel.acb);
-
- is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
-
if (dpi_use_dsi_pll(dssdev))
- r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000,
- &fck, &lck_div, &pck_div);
+ r = dpi_set_dsi_clk(dssdev, t->pixel_clock * 1000, &fck,
+ &lck_div, &pck_div);
else
- r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000,
- &fck, &lck_div, &pck_div);
+ r = dpi_set_dispc_clk(dssdev, t->pixel_clock * 1000, &fck,
+ &lck_div, &pck_div);
if (r)
return r;
@@ -161,19 +151,18 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
return 0;
}
-static void dpi_basic_init(struct omap_dss_device *dssdev)
+static void dpi_config_lcd_manager(struct omap_dss_device *dssdev)
{
- bool is_tft;
+ dpi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
+
+ dpi.mgr_config.stallmode = false;
+ dpi.mgr_config.fifohandcheck = false;
- is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
+ dpi.mgr_config.video_port_width = dssdev->phy.dpi.data_lines;
- dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_BYPASS);
- dispc_mgr_enable_stallmode(dssdev->manager->id, false);
+ dpi.mgr_config.lcden_sig_polarity = 0;
- dispc_mgr_set_lcd_display_type(dssdev->manager->id, is_tft ?
- OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN);
- dispc_mgr_set_tft_data_lines(dssdev->manager->id,
- dssdev->phy.dpi.data_lines);
+ dss_mgr_set_lcd_config(dssdev->manager, &dpi.mgr_config);
}
int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
@@ -206,8 +195,6 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_get_dispc;
- dpi_basic_init(dssdev);
-
if (dpi_use_dsi_pll(dssdev)) {
r = dsi_runtime_get(dpi.dsidev);
if (r)
@@ -222,6 +209,8 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_set_mode;
+ dpi_config_lcd_manager(dssdev);
+
mdelay(2);
r = dss_mgr_enable(dssdev->manager);
@@ -292,7 +281,6 @@ EXPORT_SYMBOL(dpi_set_timings);
int dpi_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- bool is_tft;
int r;
int lck_div, pck_div;
unsigned long fck;
@@ -305,11 +293,9 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
if (timings->pixel_clock == 0)
return -EINVAL;
- is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
-
if (dpi_use_dsi_pll(dssdev)) {
struct dsi_clock_info dsi_cinfo;
- r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft,
+ r = dsi_pll_calc_clock_div_pck(dpi.dsidev,
timings->pixel_clock * 1000,
&dsi_cinfo, &dispc_cinfo);
@@ -319,7 +305,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk;
} else {
struct dss_clock_info dss_cinfo;
- r = dss_calc_clock_div(is_tft, timings->pixel_clock * 1000,
+ r = dss_calc_clock_div(timings->pixel_clock * 1000,
&dss_cinfo, &dispc_cinfo);
if (r)
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 14ce8cc079e3..b07e8864f82f 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -331,6 +331,8 @@ struct dsi_data {
unsigned num_lanes_used;
unsigned scp_clk_refcount;
+
+ struct dss_lcd_mgr_config mgr_config;
};
struct dsi_packet_sent_handler_data {
@@ -1085,9 +1087,9 @@ static inline void dsi_enable_pll_clock(struct platform_device *dsidev,
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (enable)
- clk_enable(dsi->sys_clk);
+ clk_prepare_enable(dsi->sys_clk);
else
- clk_disable(dsi->sys_clk);
+ clk_disable_unprepare(dsi->sys_clk);
if (enable && dsi->pll_locked) {
if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1)
@@ -1316,7 +1318,7 @@ static int dsi_calc_clock_rates(struct platform_device *dsidev,
return 0;
}
-int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
+int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
unsigned long req_pck, struct dsi_clock_info *dsi_cinfo,
struct dispc_clock_info *dispc_cinfo)
{
@@ -1335,8 +1337,8 @@ int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
dsi->cache_cinfo.clkin == dss_sys_clk) {
DSSDBG("DSI clock info found from cache\n");
*dsi_cinfo = dsi->cache_cinfo;
- dispc_find_clk_divs(is_tft, req_pck,
- dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo);
+ dispc_find_clk_divs(req_pck, dsi_cinfo->dsi_pll_hsdiv_dispc_clk,
+ dispc_cinfo);
return 0;
}
@@ -1402,7 +1404,7 @@ retry:
match = 1;
- dispc_find_clk_divs(is_tft, req_pck,
+ dispc_find_clk_divs(req_pck,
cur.dsi_pll_hsdiv_dispc_clk,
&cur_dispc);
@@ -3631,17 +3633,14 @@ static void dsi_config_vp_num_line_buffers(struct omap_dss_device *dssdev)
static void dsi_config_vp_sync_events(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- int de_pol = dssdev->panel.dsi_vm_data.vp_de_pol;
- int hsync_pol = dssdev->panel.dsi_vm_data.vp_hsync_pol;
- int vsync_pol = dssdev->panel.dsi_vm_data.vp_vsync_pol;
bool vsync_end = dssdev->panel.dsi_vm_data.vp_vsync_end;
bool hsync_end = dssdev->panel.dsi_vm_data.vp_hsync_end;
u32 r;
r = dsi_read_reg(dsidev, DSI_CTRL);
- r = FLD_MOD(r, de_pol, 9, 9); /* VP_DE_POL */
- r = FLD_MOD(r, hsync_pol, 10, 10); /* VP_HSYNC_POL */
- r = FLD_MOD(r, vsync_pol, 11, 11); /* VP_VSYNC_POL */
+ r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */
+ r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */
+ r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */
r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */
r = FLD_MOD(r, vsync_end, 16, 16); /* VP_VSYNC_END */
r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */
@@ -4340,52 +4339,101 @@ EXPORT_SYMBOL(omap_dsi_update);
/* Display funcs */
+static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dispc_clock_info dispc_cinfo;
+ int r;
+ unsigned long long fck;
+
+ fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
+
+ dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div;
+ dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div;
+
+ r = dispc_calc_clock_rates(fck, &dispc_cinfo);
+ if (r) {
+ DSSERR("Failed to calc dispc clocks\n");
+ return r;
+ }
+
+ dsi->mgr_config.clock_info = dispc_cinfo;
+
+ return 0;
+}
+
static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct omap_video_timings timings;
int r;
+ u32 irq = 0;
if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) {
u16 dw, dh;
- u32 irq;
- struct omap_video_timings timings = {
- .hsw = 1,
- .hfp = 1,
- .hbp = 1,
- .vsw = 1,
- .vfp = 0,
- .vbp = 0,
- };
dssdev->driver->get_resolution(dssdev, &dw, &dh);
+
timings.x_res = dw;
timings.y_res = dh;
+ timings.hsw = 1;
+ timings.hfp = 1;
+ timings.hbp = 1;
+ timings.vsw = 1;
+ timings.vfp = 0;
+ timings.vbp = 0;
- irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
- DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
+ irq = dispc_mgr_get_framedone_irq(dssdev->manager->id);
r = omap_dispc_register_isr(dsi_framedone_irq_callback,
(void *) dssdev, irq);
if (r) {
DSSERR("can't get FRAMEDONE irq\n");
- return r;
+ goto err;
}
- dispc_mgr_enable_stallmode(dssdev->manager->id, true);
- dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1);
-
- dss_mgr_set_timings(dssdev->manager, &timings);
+ dsi->mgr_config.stallmode = true;
+ dsi->mgr_config.fifohandcheck = true;
} else {
- dispc_mgr_enable_stallmode(dssdev->manager->id, false);
- dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0);
+ timings = dssdev->panel.timings;
- dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
+ dsi->mgr_config.stallmode = false;
+ dsi->mgr_config.fifohandcheck = false;
}
- dispc_mgr_set_lcd_display_type(dssdev->manager->id,
- OMAP_DSS_LCD_DISPLAY_TFT);
- dispc_mgr_set_tft_data_lines(dssdev->manager->id,
- dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt));
+ /*
+ * override interlace, logic level and edge related parameters in
+ * omap_video_timings with default values
+ */
+ timings.interlace = false;
+ timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+ timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
+
+ dss_mgr_set_timings(dssdev->manager, &timings);
+
+ r = dsi_configure_dispc_clocks(dssdev);
+ if (r)
+ goto err1;
+
+ dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
+ dsi->mgr_config.video_port_width =
+ dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
+ dsi->mgr_config.lcden_sig_polarity = 0;
+
+ dss_mgr_set_lcd_config(dssdev->manager, &dsi->mgr_config);
+
return 0;
+err1:
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE)
+ omap_dispc_unregister_isr(dsi_framedone_irq_callback,
+ (void *) dssdev, irq);
+err:
+ return r;
}
static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
@@ -4393,8 +4441,7 @@ static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) {
u32 irq;
- irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
- DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
+ irq = dispc_mgr_get_framedone_irq(dssdev->manager->id);
omap_dispc_unregister_isr(dsi_framedone_irq_callback,
(void *) dssdev, irq);
@@ -4426,33 +4473,6 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
return 0;
}
-static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
-{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dispc_clock_info dispc_cinfo;
- int r;
- unsigned long long fck;
-
- fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
-
- dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div;
- dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div;
-
- r = dispc_calc_clock_rates(fck, &dispc_cinfo);
- if (r) {
- DSSERR("Failed to calc dispc clocks\n");
- return r;
- }
-
- r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo);
- if (r) {
- DSSERR("Failed to set dispc clocks\n");
- return r;
- }
-
- return 0;
-}
-
static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -4474,10 +4494,6 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
DSSDBG("PLL OK\n");
- r = dsi_configure_dispc_clocks(dssdev);
- if (r)
- goto err2;
-
r = dsi_cio_init(dssdev);
if (r)
goto err2;
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index d2b57197b292..04b4586113e3 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -388,7 +388,8 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
dsi_wait_pll_hsdiv_dispc_active(dsidev);
break;
case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
- BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2);
+ BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 &&
+ channel != OMAP_DSS_CHANNEL_LCD3);
b = 1;
dsidev = dsi_get_dsidev_from_id(1);
dsi_wait_pll_hsdiv_dispc_active(dsidev);
@@ -398,10 +399,12 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
return;
}
- pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12;
+ pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
+ (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19);
REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */
- ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1;
+ ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
+ (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2);
dss.lcd_clk_source[ix] = clk_src;
}
@@ -418,7 +421,8 @@ enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module)
enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
{
if (dss_has_feature(FEAT_LCD_CLK_SRC)) {
- int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1;
+ int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
+ (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2);
return dss.lcd_clk_source[ix];
} else {
/* LCD_CLK source is the same as DISPC_FCLK source for
@@ -502,8 +506,7 @@ unsigned long dss_get_dpll4_rate(void)
return 0;
}
-int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
- struct dss_clock_info *dss_cinfo,
+int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
struct dispc_clock_info *dispc_cinfo)
{
unsigned long prate;
@@ -551,7 +554,7 @@ retry:
fck = clk_get_rate(dss.dss_clk);
fck_div = 1;
- dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
+ dispc_find_clk_divs(req_pck, fck, &cur_dispc);
match = 1;
best_dss.fck = fck;
@@ -581,7 +584,7 @@ retry:
match = 1;
- dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
+ dispc_find_clk_divs(req_pck, fck, &cur_dispc);
if (abs(cur_dispc.pck - req_pck) <
abs(best_dispc.pck - req_pck)) {
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index dd1092ceaeef..f67afe76f217 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -152,6 +152,25 @@ struct dsi_clock_info {
u16 lp_clk_div;
};
+struct reg_field {
+ u16 reg;
+ u8 high;
+ u8 low;
+};
+
+struct dss_lcd_mgr_config {
+ enum dss_io_pad_mode io_pad_mode;
+
+ bool stallmode;
+ bool fifohandcheck;
+
+ struct dispc_clock_info clock_info;
+
+ int video_port_width;
+
+ int lcden_sig_polarity;
+};
+
struct seq_file;
struct platform_device;
@@ -188,6 +207,8 @@ int dss_mgr_set_device(struct omap_overlay_manager *mgr,
int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
struct omap_video_timings *timings);
+void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr,
+ const struct dss_lcd_mgr_config *config);
const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr);
bool dss_ovl_is_enabled(struct omap_overlay *ovl);
@@ -210,8 +231,6 @@ void dss_init_device(struct platform_device *pdev,
struct omap_dss_device *dssdev);
void dss_uninit_device(struct platform_device *pdev,
struct omap_dss_device *dssdev);
-bool dss_use_replication(struct omap_dss_device *dssdev,
- enum omap_color_mode mode);
/* manager */
int dss_init_overlay_managers(struct platform_device *pdev);
@@ -223,8 +242,18 @@ int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
int dss_mgr_check(struct omap_overlay_manager *mgr,
struct omap_overlay_manager_info *info,
const struct omap_video_timings *mgr_timings,
+ const struct dss_lcd_mgr_config *config,
struct omap_overlay_info **overlay_infos);
+static inline bool dss_mgr_is_lcd(enum omap_channel id)
+{
+ if (id == OMAP_DSS_CHANNEL_LCD || id == OMAP_DSS_CHANNEL_LCD2 ||
+ id == OMAP_DSS_CHANNEL_LCD3)
+ return true;
+ else
+ return false;
+}
+
/* overlay */
void dss_init_overlays(struct platform_device *pdev);
void dss_uninit_overlays(struct platform_device *pdev);
@@ -234,6 +263,8 @@ int dss_ovl_simple_check(struct omap_overlay *ovl,
const struct omap_overlay_info *info);
int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
const struct omap_video_timings *mgr_timings);
+bool dss_ovl_use_replication(struct dss_lcd_mgr_config config,
+ enum omap_color_mode mode);
/* DSS */
int dss_init_platform_driver(void) __init;
@@ -268,8 +299,7 @@ unsigned long dss_get_dpll4_rate(void);
int dss_calc_clock_rates(struct dss_clock_info *cinfo);
int dss_set_clock_div(struct dss_clock_info *cinfo);
int dss_get_clock_div(struct dss_clock_info *cinfo);
-int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
- struct dss_clock_info *dss_cinfo,
+int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo,
struct dispc_clock_info *dispc_cinfo);
/* SDI */
@@ -296,7 +326,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev);
int dsi_pll_set_clock_div(struct platform_device *dsidev,
struct dsi_clock_info *cinfo);
-int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft,
+int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
unsigned long req_pck, struct dsi_clock_info *cinfo,
struct dispc_clock_info *dispc_cinfo);
int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk,
@@ -330,7 +360,7 @@ static inline int dsi_pll_set_clock_div(struct platform_device *dsidev,
return -ENODEV;
}
static inline int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev,
- bool is_tft, unsigned long req_pck,
+ unsigned long req_pck,
struct dsi_clock_info *dsi_cinfo,
struct dispc_clock_info *dispc_cinfo)
{
@@ -387,7 +417,7 @@ void dispc_set_loadmode(enum omap_dss_load_mode mode);
bool dispc_mgr_timings_ok(enum omap_channel channel,
const struct omap_video_timings *timings);
unsigned long dispc_fclk_rate(void);
-void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
+void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck,
struct dispc_clock_info *cinfo);
int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
@@ -398,8 +428,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
bool manual_update);
int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
- bool ilace, bool replication,
- const struct omap_video_timings *mgr_timings);
+ bool replication, const struct omap_video_timings *mgr_timings);
int dispc_ovl_enable(enum omap_plane plane, bool enable);
void dispc_ovl_set_channel_out(enum omap_plane plane,
enum omap_channel channel);
@@ -415,16 +444,13 @@ bool dispc_mgr_is_channel_enabled(enum omap_channel channel);
void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode);
void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable);
void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
-void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
- enum omap_lcd_display_type type);
+void dispc_mgr_set_lcd_type_tft(enum omap_channel channel);
void dispc_mgr_set_timings(enum omap_channel channel,
struct omap_video_timings *timings);
-void dispc_mgr_set_pol_freq(enum omap_channel channel,
- enum omap_panel_config config, u8 acbi, u8 acb);
unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
unsigned long dispc_core_clk_rate(void);
-int dispc_mgr_set_clock_div(enum omap_channel channel,
+void dispc_mgr_set_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
int dispc_mgr_get_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index bdf469f080e7..996ffcbfed58 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -24,9 +24,9 @@
#include "ti_hdmi.h"
#endif
-#define MAX_DSS_MANAGERS 3
+#define MAX_DSS_MANAGERS 4
#define MAX_DSS_OVERLAYS 4
-#define MAX_DSS_LCD_MANAGERS 2
+#define MAX_DSS_LCD_MANAGERS 3
#define MAX_NUM_DSI 2
/* DSS has feature id */
@@ -36,6 +36,7 @@ enum dss_feat_id {
FEAT_PCKFREEENABLE,
FEAT_FUNCGATED,
FEAT_MGR_LCD2,
+ FEAT_MGR_LCD3,
FEAT_LINEBUFFERSPLIT,
FEAT_ROWREPEATENABLE,
FEAT_RESIZECONF,
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 26a2430a7028..060216fdc578 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -78,43 +78,214 @@ static struct {
*/
static const struct hdmi_config cea_timings[] = {
-{ {640, 480, 25200, 96, 16, 48, 2, 10, 33, 0, 0, 0}, {1, HDMI_HDMI} },
-{ {720, 480, 27027, 62, 16, 60, 6, 9, 30, 0, 0, 0}, {2, HDMI_HDMI} },
-{ {1280, 720, 74250, 40, 110, 220, 5, 5, 20, 1, 1, 0}, {4, HDMI_HDMI} },
-{ {1920, 540, 74250, 44, 88, 148, 5, 2, 15, 1, 1, 1}, {5, HDMI_HDMI} },
-{ {1440, 240, 27027, 124, 38, 114, 3, 4, 15, 0, 0, 1}, {6, HDMI_HDMI} },
-{ {1920, 1080, 148500, 44, 88, 148, 5, 4, 36, 1, 1, 0}, {16, HDMI_HDMI} },
-{ {720, 576, 27000, 64, 12, 68, 5, 5, 39, 0, 0, 0}, {17, HDMI_HDMI} },
-{ {1280, 720, 74250, 40, 440, 220, 5, 5, 20, 1, 1, 0}, {19, HDMI_HDMI} },
-{ {1920, 540, 74250, 44, 528, 148, 5, 2, 15, 1, 1, 1}, {20, HDMI_HDMI} },
-{ {1440, 288, 27000, 126, 24, 138, 3, 2, 19, 0, 0, 1}, {21, HDMI_HDMI} },
-{ {1440, 576, 54000, 128, 24, 136, 5, 5, 39, 0, 0, 0}, {29, HDMI_HDMI} },
-{ {1920, 1080, 148500, 44, 528, 148, 5, 4, 36, 1, 1, 0}, {31, HDMI_HDMI} },
-{ {1920, 1080, 74250, 44, 638, 148, 5, 4, 36, 1, 1, 0}, {32, HDMI_HDMI} },
-{ {2880, 480, 108108, 248, 64, 240, 6, 9, 30, 0, 0, 0}, {35, HDMI_HDMI} },
-{ {2880, 576, 108000, 256, 48, 272, 5, 5, 39, 0, 0, 0}, {37, HDMI_HDMI} },
+ {
+ { 640, 480, 25200, 96, 16, 48, 2, 10, 33,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 1, HDMI_HDMI },
+ },
+ {
+ { 720, 480, 27027, 62, 16, 60, 6, 9, 30,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 2, HDMI_HDMI },
+ },
+ {
+ { 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 4, HDMI_HDMI },
+ },
+ {
+ { 1920, 540, 74250, 44, 88, 148, 5, 2, 15,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ true, },
+ { 5, HDMI_HDMI },
+ },
+ {
+ { 1440, 240, 27027, 124, 38, 114, 3, 4, 15,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ true, },
+ { 6, HDMI_HDMI },
+ },
+ {
+ { 1920, 1080, 148500, 44, 88, 148, 5, 4, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 16, HDMI_HDMI },
+ },
+ {
+ { 720, 576, 27000, 64, 12, 68, 5, 5, 39,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 17, HDMI_HDMI },
+ },
+ {
+ { 1280, 720, 74250, 40, 440, 220, 5, 5, 20,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 19, HDMI_HDMI },
+ },
+ {
+ { 1920, 540, 74250, 44, 528, 148, 5, 2, 15,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ true, },
+ { 20, HDMI_HDMI },
+ },
+ {
+ { 1440, 288, 27000, 126, 24, 138, 3, 2, 19,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ true, },
+ { 21, HDMI_HDMI },
+ },
+ {
+ { 1440, 576, 54000, 128, 24, 136, 5, 5, 39,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 29, HDMI_HDMI },
+ },
+ {
+ { 1920, 1080, 148500, 44, 528, 148, 5, 4, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 31, HDMI_HDMI },
+ },
+ {
+ { 1920, 1080, 74250, 44, 638, 148, 5, 4, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 32, HDMI_HDMI },
+ },
+ {
+ { 2880, 480, 108108, 248, 64, 240, 6, 9, 30,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 35, HDMI_HDMI },
+ },
+ {
+ { 2880, 576, 108000, 256, 48, 272, 5, 5, 39,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 37, HDMI_HDMI },
+ },
};
+
static const struct hdmi_config vesa_timings[] = {
/* VESA From Here */
-{ {640, 480, 25175, 96, 16, 48, 2 , 11, 31, 0, 0, 0}, {4, HDMI_DVI} },
-{ {800, 600, 40000, 128, 40, 88, 4 , 1, 23, 1, 1, 0}, {9, HDMI_DVI} },
-{ {848, 480, 33750, 112, 16, 112, 8 , 6, 23, 1, 1, 0}, {0xE, HDMI_DVI} },
-{ {1280, 768, 79500, 128, 64, 192, 7 , 3, 20, 1, 0, 0}, {0x17, HDMI_DVI} },
-{ {1280, 800, 83500, 128, 72, 200, 6 , 3, 22, 1, 0, 0}, {0x1C, HDMI_DVI} },
-{ {1360, 768, 85500, 112, 64, 256, 6 , 3, 18, 1, 1, 0}, {0x27, HDMI_DVI} },
-{ {1280, 960, 108000, 112, 96, 312, 3 , 1, 36, 1, 1, 0}, {0x20, HDMI_DVI} },
-{ {1280, 1024, 108000, 112, 48, 248, 3 , 1, 38, 1, 1, 0}, {0x23, HDMI_DVI} },
-{ {1024, 768, 65000, 136, 24, 160, 6, 3, 29, 0, 0, 0}, {0x10, HDMI_DVI} },
-{ {1400, 1050, 121750, 144, 88, 232, 4, 3, 32, 1, 0, 0}, {0x2A, HDMI_DVI} },
-{ {1440, 900, 106500, 152, 80, 232, 6, 3, 25, 1, 0, 0}, {0x2F, HDMI_DVI} },
-{ {1680, 1050, 146250, 176 , 104, 280, 6, 3, 30, 1, 0, 0}, {0x3A, HDMI_DVI} },
-{ {1366, 768, 85500, 143, 70, 213, 3, 3, 24, 1, 1, 0}, {0x51, HDMI_DVI} },
-{ {1920, 1080, 148500, 44, 148, 80, 5, 4, 36, 1, 1, 0}, {0x52, HDMI_DVI} },
-{ {1280, 768, 68250, 32, 48, 80, 7, 3, 12, 0, 1, 0}, {0x16, HDMI_DVI} },
-{ {1400, 1050, 101000, 32, 48, 80, 4, 3, 23, 0, 1, 0}, {0x29, HDMI_DVI} },
-{ {1680, 1050, 119000, 32, 48, 80, 6, 3, 21, 0, 1, 0}, {0x39, HDMI_DVI} },
-{ {1280, 800, 79500, 32, 48, 80, 6, 3, 14, 0, 1, 0}, {0x1B, HDMI_DVI} },
-{ {1280, 720, 74250, 40, 110, 220, 5, 5, 20, 1, 1, 0}, {0x55, HDMI_DVI} }
+ {
+ { 640, 480, 25175, 96, 16, 48, 2, 11, 31,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 4, HDMI_DVI },
+ },
+ {
+ { 800, 600, 40000, 128, 40, 88, 4, 1, 23,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 9, HDMI_DVI },
+ },
+ {
+ { 848, 480, 33750, 112, 16, 112, 8, 6, 23,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0xE, HDMI_DVI },
+ },
+ {
+ { 1280, 768, 79500, 128, 64, 192, 7, 3, 20,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x17, HDMI_DVI },
+ },
+ {
+ { 1280, 800, 83500, 128, 72, 200, 6, 3, 22,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x1C, HDMI_DVI },
+ },
+ {
+ { 1360, 768, 85500, 112, 64, 256, 6, 3, 18,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x27, HDMI_DVI },
+ },
+ {
+ { 1280, 960, 108000, 112, 96, 312, 3, 1, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x20, HDMI_DVI },
+ },
+ {
+ { 1280, 1024, 108000, 112, 48, 248, 3, 1, 38,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x23, HDMI_DVI },
+ },
+ {
+ { 1024, 768, 65000, 136, 24, 160, 6, 3, 29,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x10, HDMI_DVI },
+ },
+ {
+ { 1400, 1050, 121750, 144, 88, 232, 4, 3, 32,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x2A, HDMI_DVI },
+ },
+ {
+ { 1440, 900, 106500, 152, 80, 232, 6, 3, 25,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x2F, HDMI_DVI },
+ },
+ {
+ { 1680, 1050, 146250, 176 , 104, 280, 6, 3, 30,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
+ false, },
+ { 0x3A, HDMI_DVI },
+ },
+ {
+ { 1366, 768, 85500, 143, 70, 213, 3, 3, 24,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x51, HDMI_DVI },
+ },
+ {
+ { 1920, 1080, 148500, 44, 148, 80, 5, 4, 36,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x52, HDMI_DVI },
+ },
+ {
+ { 1280, 768, 68250, 32, 48, 80, 7, 3, 12,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x16, HDMI_DVI },
+ },
+ {
+ { 1400, 1050, 101000, 32, 48, 80, 4, 3, 23,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x29, HDMI_DVI },
+ },
+ {
+ { 1680, 1050, 119000, 32, 48, 80, 6, 3, 21,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x39, HDMI_DVI },
+ },
+ {
+ { 1280, 800, 79500, 32, 48, 80, 6, 3, 14,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x1B, HDMI_DVI },
+ },
+ {
+ { 1280, 720, 74250, 40, 110, 220, 5, 5, 20,
+ OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
+ false, },
+ { 0x55, HDMI_DVI },
+ },
};
static int hdmi_runtime_get(void)
@@ -179,7 +350,7 @@ static const struct hdmi_config *hdmi_get_timings(void)
}
static bool hdmi_timings_compare(struct omap_video_timings *timing1,
- const struct hdmi_video_timings *timing2)
+ const struct omap_video_timings *timing2)
{
int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync;
@@ -758,6 +929,7 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
hdmi.ip_data.core_av_offset = HDMI_CORE_AV;
hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
hdmi.ip_data.phy_offset = HDMI_PHY;
+ mutex_init(&hdmi.ip_data.lock);
hdmi_panel_init();
@@ -785,7 +957,7 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
static int hdmi_runtime_suspend(struct device *dev)
{
- clk_disable(hdmi.sys_clk);
+ clk_disable_unprepare(hdmi.sys_clk);
dispc_runtime_put();
@@ -800,7 +972,7 @@ static int hdmi_runtime_resume(struct device *dev)
if (r < 0)
return r;
- clk_enable(hdmi.sys_clk);
+ clk_prepare_enable(hdmi.sys_clk);
return 0;
}
diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c
index 1179e3c4b1c7..e10844faadf9 100644
--- a/drivers/video/omap2/dss/hdmi_panel.c
+++ b/drivers/video/omap2/dss/hdmi_panel.c
@@ -43,10 +43,11 @@ static int hdmi_panel_probe(struct omap_dss_device *dssdev)
{
DSSDBG("ENTER hdmi_panel_probe\n");
- dssdev->panel.config = OMAP_DSS_LCD_TFT |
- OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IHS;
-
- dssdev->panel.timings = (struct omap_video_timings){640, 480, 25175, 96, 16, 48, 2 , 11, 31};
+ dssdev->panel.timings = (struct omap_video_timings)
+ { 640, 480, 25175, 96, 16, 48, 2, 11, 31,
+ OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
+ false,
+ };
DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n",
dssdev->panel.timings.x_res,
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 0cbcde4c688a..53710fadc82d 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -500,16 +500,12 @@ static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
if (r)
return r;
- if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
+ if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC)
irq = DISPC_IRQ_EVSYNC_ODD;
- } else if (mgr->device->type == OMAP_DISPLAY_TYPE_HDMI) {
+ else if (mgr->device->type == OMAP_DISPLAY_TYPE_HDMI)
irq = DISPC_IRQ_EVSYNC_EVEN;
- } else {
- if (mgr->id == OMAP_DSS_CHANNEL_LCD)
- irq = DISPC_IRQ_VSYNC;
- else
- irq = DISPC_IRQ_VSYNC2;
- }
+ else
+ irq = dispc_mgr_get_vsync_irq(mgr->id);
r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
@@ -545,6 +541,10 @@ int dss_init_overlay_managers(struct platform_device *pdev)
mgr->name = "lcd2";
mgr->id = OMAP_DSS_CHANNEL_LCD2;
break;
+ case 3:
+ mgr->name = "lcd3";
+ mgr->id = OMAP_DSS_CHANNEL_LCD3;
+ break;
}
mgr->set_device = &dss_mgr_set_device;
@@ -665,9 +665,40 @@ int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
return 0;
}
+static int dss_mgr_check_lcd_config(struct omap_overlay_manager *mgr,
+ const struct dss_lcd_mgr_config *config)
+{
+ struct dispc_clock_info cinfo = config->clock_info;
+ int dl = config->video_port_width;
+ bool stallmode = config->stallmode;
+ bool fifohandcheck = config->fifohandcheck;
+
+ if (cinfo.lck_div < 1 || cinfo.lck_div > 255)
+ return -EINVAL;
+
+ if (cinfo.pck_div < 1 || cinfo.pck_div > 255)
+ return -EINVAL;
+
+ if (dl != 12 && dl != 16 && dl != 18 && dl != 24)
+ return -EINVAL;
+
+ /* fifohandcheck should be used only with stallmode */
+ if (stallmode == false && fifohandcheck == true)
+ return -EINVAL;
+
+ /*
+ * io pad mode can be only checked by using dssdev connected to the
+ * manager. Ignore checking these for now, add checks when manager
+ * is capable of holding information related to the connected interface
+ */
+
+ return 0;
+}
+
int dss_mgr_check(struct omap_overlay_manager *mgr,
struct omap_overlay_manager_info *info,
const struct omap_video_timings *mgr_timings,
+ const struct dss_lcd_mgr_config *lcd_config,
struct omap_overlay_info **overlay_infos)
{
struct omap_overlay *ovl;
@@ -683,6 +714,10 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
if (r)
return r;
+ r = dss_mgr_check_lcd_config(mgr, lcd_config);
+ if (r)
+ return r;
+
list_for_each_entry(ovl, &mgr->overlays, list) {
struct omap_overlay_info *oi;
int r;
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index b0ba60f88dd2..952c6fad9a81 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -528,14 +528,24 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
struct omap_overlay_manager *lcd_mgr;
struct omap_overlay_manager *tv_mgr;
struct omap_overlay_manager *lcd2_mgr = NULL;
+ struct omap_overlay_manager *lcd3_mgr = NULL;
struct omap_overlay_manager *mgr = NULL;
- lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD);
- tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV);
+ lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD);
+ tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_DIGIT);
+ if (dss_has_feature(FEAT_MGR_LCD3))
+ lcd3_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD3);
if (dss_has_feature(FEAT_MGR_LCD2))
- lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD2);
-
- if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) {
+ lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD2);
+
+ if (dssdev->channel == OMAP_DSS_CHANNEL_LCD3) {
+ if (!lcd3_mgr->device || force) {
+ if (lcd3_mgr->device)
+ lcd3_mgr->unset_device(lcd3_mgr);
+ lcd3_mgr->set_device(lcd3_mgr, dssdev);
+ mgr = lcd3_mgr;
+ }
+ } else if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) {
if (!lcd2_mgr->device || force) {
if (lcd2_mgr->device)
lcd2_mgr->unset_device(lcd2_mgr);
@@ -677,3 +687,16 @@ int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
return 0;
}
+
+/*
+ * Checks if replication logic should be used. Only use when overlay is in
+ * RGB12U or RGB16 mode, and video port width interface is 18bpp or 24bpp
+ */
+bool dss_ovl_use_replication(struct dss_lcd_mgr_config config,
+ enum omap_color_mode mode)
+{
+ if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16)
+ return false;
+
+ return config.video_port_width > 16;
+}
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 7985fa12b9b4..7c087424b634 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -300,10 +300,11 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
}
EXPORT_SYMBOL(omap_rfbi_write_pixels);
-static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
+static int rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
u16 height, void (*callback)(void *data), void *data)
{
u32 l;
+ int r;
struct omap_video_timings timings = {
.hsw = 1,
.hfp = 1,
@@ -322,7 +323,9 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
dss_mgr_set_timings(dssdev->manager, &timings);
- dispc_mgr_enable(dssdev->manager->id, true);
+ r = dss_mgr_enable(dssdev->manager);
+ if (r)
+ return r;
rfbi.framedone_callback = callback;
rfbi.framedone_callback_data = data;
@@ -335,6 +338,8 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
l = FLD_MOD(l, 1, 4, 4); /* ITE */
rfbi_write_reg(RFBI_CONTROL, l);
+
+ return 0;
}
static void framedone_callback(void *data, u32 mask)
@@ -814,8 +819,11 @@ int omap_rfbi_update(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h,
void (*callback)(void *), void *data)
{
- rfbi_transfer_area(dssdev, w, h, callback, data);
- return 0;
+ int r;
+
+ r = rfbi_transfer_area(dssdev, w, h, callback, data);
+
+ return r;
}
EXPORT_SYMBOL(omap_rfbi_update);
@@ -859,6 +867,22 @@ static void rfbi_dump_regs(struct seq_file *s)
#undef DUMPREG
}
+static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
+{
+ struct dss_lcd_mgr_config mgr_config;
+
+ mgr_config.io_pad_mode = DSS_IO_PAD_MODE_RFBI;
+
+ mgr_config.stallmode = true;
+ /* Do we need fifohandcheck for RFBI? */
+ mgr_config.fifohandcheck = false;
+
+ mgr_config.video_port_width = dssdev->ctrl.pixel_size;
+ mgr_config.lcden_sig_polarity = 0;
+
+ dss_mgr_set_lcd_config(dssdev->manager, &mgr_config);
+}
+
int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
{
int r;
@@ -885,13 +909,7 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
goto err1;
}
- dispc_mgr_set_lcd_display_type(dssdev->manager->id,
- OMAP_DSS_LCD_DISPLAY_TFT);
-
- dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_RFBI);
- dispc_mgr_enable_stallmode(dssdev->manager->id, true);
-
- dispc_mgr_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size);
+ rfbi_config_lcd_manager(dssdev);
rfbi_configure(dssdev->phy.rfbi.channel,
dssdev->ctrl.pixel_size,
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 3a43dc2a9b46..f43bfe17b3b6 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -32,19 +32,21 @@
static struct {
bool update_enabled;
struct regulator *vdds_sdi_reg;
-} sdi;
-static void sdi_basic_init(struct omap_dss_device *dssdev)
+ struct dss_lcd_mgr_config mgr_config;
+} sdi;
+static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
{
- dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_BYPASS);
- dispc_mgr_enable_stallmode(dssdev->manager->id, false);
+ sdi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
+
+ sdi.mgr_config.stallmode = false;
+ sdi.mgr_config.fifohandcheck = false;
- dispc_mgr_set_lcd_display_type(dssdev->manager->id,
- OMAP_DSS_LCD_DISPLAY_TFT);
+ sdi.mgr_config.video_port_width = 24;
+ sdi.mgr_config.lcden_sig_polarity = 1;
- dispc_mgr_set_tft_data_lines(dssdev->manager->id, 24);
- dispc_lcd_enable_signal_polarity(1);
+ dss_mgr_set_lcd_config(dssdev->manager, &sdi.mgr_config);
}
int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
@@ -52,8 +54,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
struct omap_video_timings *t = &dssdev->panel.timings;
struct dss_clock_info dss_cinfo;
struct dispc_clock_info dispc_cinfo;
- u16 lck_div, pck_div;
- unsigned long fck;
unsigned long pck;
int r;
@@ -76,24 +76,17 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_get_dispc;
- sdi_basic_init(dssdev);
-
/* 15.5.9.1.2 */
- dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF;
-
- dispc_mgr_set_pol_freq(dssdev->manager->id, dssdev->panel.config,
- dssdev->panel.acbi, dssdev->panel.acb);
+ dssdev->panel.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+ dssdev->panel.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
- r = dss_calc_clock_div(1, t->pixel_clock * 1000,
- &dss_cinfo, &dispc_cinfo);
+ r = dss_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
- fck = dss_cinfo.fck;
- lck_div = dispc_cinfo.lck_div;
- pck_div = dispc_cinfo.pck_div;
+ sdi.mgr_config.clock_info = dispc_cinfo;
- pck = fck / lck_div / pck_div / 1000;
+ pck = dss_cinfo.fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div / 1000;
if (pck != t->pixel_clock) {
DSSWARN("Could not find exact pixel clock. Requested %d kHz, "
@@ -110,9 +103,21 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_set_dss_clock_div;
- r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo);
- if (r)
- goto err_set_dispc_clock_div;
+ sdi_config_lcd_manager(dssdev);
+
+ /*
+ * LCLK and PCLK divisors are located in shadow registers, and we
+ * normally write them to DISPC registers when enabling the output.
+ * However, SDI uses pck-free as source clock for its PLL, and pck-free
+ * is affected by the divisors. And as we need the PLL before enabling
+ * the output, we need to write the divisors early.
+ *
+ * It seems just writing to the DISPC register is enough, and we don't
+ * need to care about the shadow register mechanism for pck-free. The
+ * exact reason for this is unknown.
+ */
+ dispc_mgr_set_clock_div(dssdev->manager->id,
+ &sdi.mgr_config.clock_info);
dss_sdi_init(dssdev->phy.sdi.datapairs);
r = dss_sdi_enable();
@@ -129,7 +134,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
err_mgr_enable:
dss_sdi_disable();
err_sdi_enable:
-err_set_dispc_clock_div:
err_set_dss_clock_div:
err_calc_clock_div:
dispc_runtime_put();
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
index e734cb444bc7..b046c208cb97 100644
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ b/drivers/video/omap2/dss/ti_hdmi.h
@@ -42,30 +42,13 @@ enum hdmi_clk_refsel {
HDMI_REFSEL_SYSCLK = 3
};
-/* HDMI timing structure */
-struct hdmi_video_timings {
- u16 x_res;
- u16 y_res;
- /* Unit: KHz */
- u32 pixel_clock;
- u16 hsw;
- u16 hfp;
- u16 hbp;
- u16 vsw;
- u16 vfp;
- u16 vbp;
- bool vsync_pol;
- bool hsync_pol;
- bool interlace;
-};
-
struct hdmi_cm {
int code;
int mode;
};
struct hdmi_config {
- struct hdmi_video_timings timings;
+ struct omap_video_timings timings;
struct hdmi_cm cm;
};
@@ -177,7 +160,7 @@ struct hdmi_ip_data {
/* ti_hdmi_4xxx_ip private data. These should be in a separate struct */
int hpd_gpio;
- bool phy_tx_enabled;
+ struct mutex lock;
};
int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
index 4dae1b291079..c23b85a20cdc 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -157,6 +157,10 @@ static int hdmi_pll_init(struct hdmi_ip_data *ip_data)
/* PHY_PWR_CMD */
static int hdmi_set_phy_pwr(struct hdmi_ip_data *ip_data, enum hdmi_phy_pwr val)
{
+ /* Return if already the state */
+ if (REG_GET(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, 5, 4) == val)
+ return 0;
+
/* Command for power control of HDMI PHY */
REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 7, 6);
@@ -231,21 +235,13 @@ void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data)
static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data)
{
- unsigned long flags;
bool hpd;
int r;
- /* this should be in ti_hdmi_4xxx_ip private data */
- static DEFINE_SPINLOCK(phy_tx_lock);
- spin_lock_irqsave(&phy_tx_lock, flags);
+ mutex_lock(&ip_data->lock);
hpd = gpio_get_value(ip_data->hpd_gpio);
- if (hpd == ip_data->phy_tx_enabled) {
- spin_unlock_irqrestore(&phy_tx_lock, flags);
- return 0;
- }
-
if (hpd)
r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
else
@@ -257,9 +253,8 @@ static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data)
goto err;
}
- ip_data->phy_tx_enabled = hpd;
err:
- spin_unlock_irqrestore(&phy_tx_lock, flags);
+ mutex_unlock(&ip_data->lock);
return r;
}
@@ -327,7 +322,6 @@ void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data)
free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data);
hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
- ip_data->phy_tx_enabled = false;
}
static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data)
@@ -747,11 +741,15 @@ static void hdmi_wp_video_config_format(struct hdmi_ip_data *ip_data,
static void hdmi_wp_video_config_interface(struct hdmi_ip_data *ip_data)
{
u32 r;
+ bool vsync_pol, hsync_pol;
pr_debug("Enter hdmi_wp_video_config_interface\n");
+ vsync_pol = ip_data->cfg.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+ hsync_pol = ip_data->cfg.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH;
+
r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG);
- r = FLD_MOD(r, ip_data->cfg.timings.vsync_pol, 7, 7);
- r = FLD_MOD(r, ip_data->cfg.timings.hsync_pol, 6, 6);
+ r = FLD_MOD(r, vsync_pol, 7, 7);
+ r = FLD_MOD(r, hsync_pol, 6, 6);
r = FLD_MOD(r, ip_data->cfg.timings.interlace, 3, 3);
r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */
hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, r);
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 3907c8b6ecbc..3a220877461a 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -272,6 +272,8 @@ const struct omap_video_timings omap_dss_pal_timings = {
.vsw = 5,
.vfp = 5,
.vbp = 41,
+
+ .interlace = true,
};
EXPORT_SYMBOL(omap_dss_pal_timings);
@@ -285,6 +287,8 @@ const struct omap_video_timings omap_dss_ntsc_timings = {
.vsw = 6,
.vfp = 6,
.vbp = 31,
+
+ .interlace = true,
};
EXPORT_SYMBOL(omap_dss_ntsc_timings);
@@ -930,7 +934,7 @@ static int __exit omap_venchw_remove(struct platform_device *pdev)
static int venc_runtime_suspend(struct device *dev)
{
if (venc.tv_dac_clk)
- clk_disable(venc.tv_dac_clk);
+ clk_disable_unprepare(venc.tv_dac_clk);
dispc_runtime_put();
@@ -946,7 +950,7 @@ static int venc_runtime_resume(struct device *dev)
return r;
if (venc.tv_dac_clk)
- clk_enable(venc.tv_dac_clk);
+ clk_prepare_enable(venc.tv_dac_clk);
return 0;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 3450ea0966c9..fc671d3d8004 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -733,6 +733,12 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
var->lower_margin = timings.vfp;
var->hsync_len = timings.hsw;
var->vsync_len = timings.vsw;
+ var->sync |= timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH ?
+ FB_SYNC_HOR_HIGH_ACT : 0;
+ var->sync |= timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH ?
+ FB_SYNC_VERT_HIGH_ACT : 0;
+ var->vmode = timings.interlace ?
+ FB_VMODE_INTERLACED : FB_VMODE_NONINTERLACED;
} else {
var->pixclock = 0;
var->left_margin = 0;
@@ -741,12 +747,10 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
var->lower_margin = 0;
var->hsync_len = 0;
var->vsync_len = 0;
+ var->sync = 0;
+ var->vmode = FB_VMODE_NONINTERLACED;
}
- /* TODO: get these from panel->config */
- var->vmode = FB_VMODE_NONINTERLACED;
- var->sync = 0;
-
return 0;
}
@@ -1188,7 +1192,7 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
break;
if (regno < 16) {
- u16 pal;
+ u32 pal;
pal = ((red >> (16 - var->red.length)) <<
var->red.offset) |
((green >> (16 - var->green.length)) <<
@@ -1993,6 +1997,7 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
}
static int omapfb_mode_to_timings(const char *mode_str,
+ struct omap_dss_device *display,
struct omap_video_timings *timings, u8 *bpp)
{
struct fb_info *fbi;
@@ -2046,6 +2051,14 @@ static int omapfb_mode_to_timings(const char *mode_str,
goto err;
}
+ if (display->driver->get_timings) {
+ display->driver->get_timings(display, timings);
+ } else {
+ timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+ timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
+ }
+
timings->pixel_clock = PICOS2KHZ(var->pixclock);
timings->hbp = var->left_margin;
timings->hfp = var->right_margin;
@@ -2055,6 +2068,13 @@ static int omapfb_mode_to_timings(const char *mode_str,
timings->vsw = var->vsync_len;
timings->x_res = var->xres;
timings->y_res = var->yres;
+ timings->hsync_level = var->sync & FB_SYNC_HOR_HIGH_ACT ?
+ OMAPDSS_SIG_ACTIVE_HIGH :
+ OMAPDSS_SIG_ACTIVE_LOW;
+ timings->vsync_level = var->sync & FB_SYNC_VERT_HIGH_ACT ?
+ OMAPDSS_SIG_ACTIVE_HIGH :
+ OMAPDSS_SIG_ACTIVE_LOW;
+ timings->interlace = var->vmode & FB_VMODE_INTERLACED;
switch (var->bits_per_pixel) {
case 16:
@@ -2085,7 +2105,7 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
struct omap_video_timings timings, temp_timings;
struct omapfb_display_data *d;
- r = omapfb_mode_to_timings(mode_str, &timings, &bpp);
+ r = omapfb_mode_to_timings(mode_str, display, &timings, &bpp);
if (r)
return r;
@@ -2178,8 +2198,17 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
}
static void fb_videomode_to_omap_timings(struct fb_videomode *m,
+ struct omap_dss_device *display,
struct omap_video_timings *t)
{
+ if (display->driver->get_timings) {
+ display->driver->get_timings(display, t);
+ } else {
+ t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
+ t->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
+ t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
+ }
+
t->x_res = m->xres;
t->y_res = m->yres;
t->pixel_clock = PICOS2KHZ(m->pixclock);
@@ -2189,6 +2218,13 @@ static void fb_videomode_to_omap_timings(struct fb_videomode *m,
t->vsw = m->vsync_len;
t->vfp = m->lower_margin;
t->vbp = m->upper_margin;
+ t->hsync_level = m->sync & FB_SYNC_HOR_HIGH_ACT ?
+ OMAPDSS_SIG_ACTIVE_HIGH :
+ OMAPDSS_SIG_ACTIVE_LOW;
+ t->vsync_level = m->sync & FB_SYNC_VERT_HIGH_ACT ?
+ OMAPDSS_SIG_ACTIVE_HIGH :
+ OMAPDSS_SIG_ACTIVE_LOW;
+ t->interlace = m->vmode & FB_VMODE_INTERLACED;
}
static int omapfb_find_best_mode(struct omap_dss_device *display,
@@ -2231,7 +2267,7 @@ static int omapfb_find_best_mode(struct omap_dss_device *display,
if (m->xres == 2880 || m->xres == 1440)
continue;
- fb_videomode_to_omap_timings(m, &t);
+ fb_videomode_to_omap_timings(m, display, &t);
r = display->driver->check_timings(display, &t);
if (r == 0 && best_xres < m->xres) {
@@ -2245,7 +2281,8 @@ static int omapfb_find_best_mode(struct omap_dss_device *display,
goto err2;
}
- fb_videomode_to_omap_timings(&specs->modedb[best_idx], timings);
+ fb_videomode_to_omap_timings(&specs->modedb[best_idx], display,
+ timings);
r = 0;
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 2c80246b18b8..1d007366b917 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -84,7 +84,7 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
"S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX",
"S3 Virge/GX2", "S3 Virge/GX2+", "",
"S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X",
- "S3 Trio3D"};
+ "S3 Trio3D", "S3 Virge/MX"};
#define CHIP_UNKNOWN 0x00
#define CHIP_732_TRIO32 0x01
@@ -105,6 +105,7 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64",
#define CHIP_362_TRIO3D_2X 0x11
#define CHIP_368_TRIO3D_2X 0x12
#define CHIP_365_TRIO3D 0x13
+#define CHIP_260_VIRGE_MX 0x14
#define CHIP_XXX_TRIO 0x80
#define CHIP_XXX_TRIO64V2_DXGX 0x81
@@ -280,7 +281,8 @@ static int __devinit s3fb_setup_ddc_bus(struct fb_info *info)
*/
/* vga_wseq(par->state.vgabase, 0x08, 0x06); - not needed, already unlocked */
if (par->chip == CHIP_357_VIRGE_GX2 ||
- par->chip == CHIP_359_VIRGE_GX2P)
+ par->chip == CHIP_359_VIRGE_GX2P ||
+ par->chip == CHIP_260_VIRGE_MX)
svga_wseq_mask(par->state.vgabase, 0x0d, 0x01, 0x03);
else
svga_wseq_mask(par->state.vgabase, 0x0d, 0x00, 0x03);
@@ -487,7 +489,8 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock)
par->chip == CHIP_359_VIRGE_GX2P ||
par->chip == CHIP_360_TRIO3D_1X ||
par->chip == CHIP_362_TRIO3D_2X ||
- par->chip == CHIP_368_TRIO3D_2X) {
+ par->chip == CHIP_368_TRIO3D_2X ||
+ par->chip == CHIP_260_VIRGE_MX) {
vga_wseq(par->state.vgabase, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */
vga_wseq(par->state.vgabase, 0x29, r >> 2); /* remaining highest bit of r */
} else
@@ -690,7 +693,8 @@ static int s3fb_set_par(struct fb_info *info)
par->chip != CHIP_359_VIRGE_GX2P &&
par->chip != CHIP_360_TRIO3D_1X &&
par->chip != CHIP_362_TRIO3D_2X &&
- par->chip != CHIP_368_TRIO3D_2X) {
+ par->chip != CHIP_368_TRIO3D_2X &&
+ par->chip != CHIP_260_VIRGE_MX) {
vga_wcrt(par->state.vgabase, 0x54, 0x18); /* M parameter */
vga_wcrt(par->state.vgabase, 0x60, 0xff); /* N parameter */
vga_wcrt(par->state.vgabase, 0x61, 0xff); /* L parameter */
@@ -739,7 +743,8 @@ static int s3fb_set_par(struct fb_info *info)
par->chip == CHIP_368_TRIO3D_2X ||
par->chip == CHIP_365_TRIO3D ||
par->chip == CHIP_375_VIRGE_DX ||
- par->chip == CHIP_385_VIRGE_GX) {
+ par->chip == CHIP_385_VIRGE_GX ||
+ par->chip == CHIP_260_VIRGE_MX) {
dbytes = info->var.xres * ((bpp+7)/8);
vga_wcrt(par->state.vgabase, 0x91, (dbytes + 7) / 8);
vga_wcrt(par->state.vgabase, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80);
@@ -751,7 +756,8 @@ static int s3fb_set_par(struct fb_info *info)
par->chip == CHIP_359_VIRGE_GX2P ||
par->chip == CHIP_360_TRIO3D_1X ||
par->chip == CHIP_362_TRIO3D_2X ||
- par->chip == CHIP_368_TRIO3D_2X)
+ par->chip == CHIP_368_TRIO3D_2X ||
+ par->chip == CHIP_260_VIRGE_MX)
vga_wcrt(par->state.vgabase, 0x34, 0x00);
else /* enable Data Transfer Position Control (DTPC) */
vga_wcrt(par->state.vgabase, 0x34, 0x10);
@@ -807,7 +813,8 @@ static int s3fb_set_par(struct fb_info *info)
par->chip == CHIP_359_VIRGE_GX2P ||
par->chip == CHIP_360_TRIO3D_1X ||
par->chip == CHIP_362_TRIO3D_2X ||
- par->chip == CHIP_368_TRIO3D_2X)
+ par->chip == CHIP_368_TRIO3D_2X ||
+ par->chip == CHIP_260_VIRGE_MX)
svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0xF0);
else {
svga_wcrt_mask(par->state.vgabase, 0x67, 0x10, 0xF0);
@@ -837,7 +844,8 @@ static int s3fb_set_par(struct fb_info *info)
par->chip != CHIP_359_VIRGE_GX2P &&
par->chip != CHIP_360_TRIO3D_1X &&
par->chip != CHIP_362_TRIO3D_2X &&
- par->chip != CHIP_368_TRIO3D_2X)
+ par->chip != CHIP_368_TRIO3D_2X &&
+ par->chip != CHIP_260_VIRGE_MX)
hmul = 2;
}
break;
@@ -864,7 +872,8 @@ static int s3fb_set_par(struct fb_info *info)
par->chip != CHIP_359_VIRGE_GX2P &&
par->chip != CHIP_360_TRIO3D_1X &&
par->chip != CHIP_362_TRIO3D_2X &&
- par->chip != CHIP_368_TRIO3D_2X)
+ par->chip != CHIP_368_TRIO3D_2X &&
+ par->chip != CHIP_260_VIRGE_MX)
hmul = 2;
}
break;
@@ -1208,7 +1217,8 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
break;
}
} else if (par->chip == CHIP_357_VIRGE_GX2 ||
- par->chip == CHIP_359_VIRGE_GX2P) {
+ par->chip == CHIP_359_VIRGE_GX2P ||
+ par->chip == CHIP_260_VIRGE_MX) {
switch ((regval & 0xC0) >> 6) {
case 1: /* 4MB */
info->screen_size = 4 << 20;
@@ -1515,6 +1525,7 @@ static struct pci_device_id s3_devices[] __devinitdata = {
{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P},
{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X},
{PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8904), .driver_data = CHIP_365_TRIO3D},
+ {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8C01), .driver_data = CHIP_260_VIRGE_MX},
{0, 0, 0, 0, 0, 0, 0}
};
diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/sh_mipi_dsi.c
index 4c6b84488561..3951fdae5f68 100644
--- a/drivers/video/sh_mipi_dsi.c
+++ b/drivers/video/sh_mipi_dsi.c
@@ -127,8 +127,7 @@ static void sh_mipi_shutdown(struct platform_device *pdev)
sh_mipi_dsi_enable(mipi, false);
}
-static int __init sh_mipi_setup(struct sh_mipi *mipi,
- struct sh_mipi_dsi_info *pdata)
+static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata)
{
void __iomem *base = mipi->base;
struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan;
@@ -551,7 +550,7 @@ efindslot:
return ret;
}
-static int __exit sh_mipi_remove(struct platform_device *pdev)
+static int __devexit sh_mipi_remove(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -592,7 +591,7 @@ static int __exit sh_mipi_remove(struct platform_device *pdev)
}
static struct platform_driver sh_mipi_driver = {
- .remove = __exit_p(sh_mipi_remove),
+ .remove = __devexit_p(sh_mipi_remove),
.shutdown = sh_mipi_shutdown,
.driver = {
.name = "sh-mipi-dsi",
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index e672698bd820..699487c287b2 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -12,6 +12,7 @@
#include <linux/backlight.h>
#include <linux/clk.h>
#include <linux/console.h>
+#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/gpio.h>
@@ -32,12 +33,176 @@
#include "sh_mobile_lcdcfb.h"
+/* ----------------------------------------------------------------------------
+ * Overlay register definitions
+ */
+
+#define LDBCR 0xb00
+#define LDBCR_UPC(n) (1 << ((n) + 16))
+#define LDBCR_UPF(n) (1 << ((n) + 8))
+#define LDBCR_UPD(n) (1 << ((n) + 0))
+#define LDBnBSIFR(n) (0xb20 + (n) * 0x20 + 0x00)
+#define LDBBSIFR_EN (1 << 31)
+#define LDBBSIFR_VS (1 << 29)
+#define LDBBSIFR_BRSEL (1 << 28)
+#define LDBBSIFR_MX (1 << 27)
+#define LDBBSIFR_MY (1 << 26)
+#define LDBBSIFR_CV3 (3 << 24)
+#define LDBBSIFR_CV2 (2 << 24)
+#define LDBBSIFR_CV1 (1 << 24)
+#define LDBBSIFR_CV0 (0 << 24)
+#define LDBBSIFR_CV_MASK (3 << 24)
+#define LDBBSIFR_LAY_MASK (0xff << 16)
+#define LDBBSIFR_LAY_SHIFT 16
+#define LDBBSIFR_ROP3_MASK (0xff << 16)
+#define LDBBSIFR_ROP3_SHIFT 16
+#define LDBBSIFR_AL_PL8 (3 << 14)
+#define LDBBSIFR_AL_PL1 (2 << 14)
+#define LDBBSIFR_AL_PK (1 << 14)
+#define LDBBSIFR_AL_1 (0 << 14)
+#define LDBBSIFR_AL_MASK (3 << 14)
+#define LDBBSIFR_SWPL (1 << 10)
+#define LDBBSIFR_SWPW (1 << 9)
+#define LDBBSIFR_SWPB (1 << 8)
+#define LDBBSIFR_RY (1 << 7)
+#define LDBBSIFR_CHRR_420 (2 << 0)
+#define LDBBSIFR_CHRR_422 (1 << 0)
+#define LDBBSIFR_CHRR_444 (0 << 0)
+#define LDBBSIFR_RPKF_ARGB32 (0x00 << 0)
+#define LDBBSIFR_RPKF_RGB16 (0x03 << 0)
+#define LDBBSIFR_RPKF_RGB24 (0x0b << 0)
+#define LDBBSIFR_RPKF_MASK (0x1f << 0)
+#define LDBnBSSZR(n) (0xb20 + (n) * 0x20 + 0x04)
+#define LDBBSSZR_BVSS_MASK (0xfff << 16)
+#define LDBBSSZR_BVSS_SHIFT 16
+#define LDBBSSZR_BHSS_MASK (0xfff << 0)
+#define LDBBSSZR_BHSS_SHIFT 0
+#define LDBnBLOCR(n) (0xb20 + (n) * 0x20 + 0x08)
+#define LDBBLOCR_CVLC_MASK (0xfff << 16)
+#define LDBBLOCR_CVLC_SHIFT 16
+#define LDBBLOCR_CHLC_MASK (0xfff << 0)
+#define LDBBLOCR_CHLC_SHIFT 0
+#define LDBnBSMWR(n) (0xb20 + (n) * 0x20 + 0x0c)
+#define LDBBSMWR_BSMWA_MASK (0xffff << 16)
+#define LDBBSMWR_BSMWA_SHIFT 16
+#define LDBBSMWR_BSMW_MASK (0xffff << 0)
+#define LDBBSMWR_BSMW_SHIFT 0
+#define LDBnBSAYR(n) (0xb20 + (n) * 0x20 + 0x10)
+#define LDBBSAYR_FG1A_MASK (0xff << 24)
+#define LDBBSAYR_FG1A_SHIFT 24
+#define LDBBSAYR_FG1R_MASK (0xff << 16)
+#define LDBBSAYR_FG1R_SHIFT 16
+#define LDBBSAYR_FG1G_MASK (0xff << 8)
+#define LDBBSAYR_FG1G_SHIFT 8
+#define LDBBSAYR_FG1B_MASK (0xff << 0)
+#define LDBBSAYR_FG1B_SHIFT 0
+#define LDBnBSACR(n) (0xb20 + (n) * 0x20 + 0x14)
+#define LDBBSACR_FG2A_MASK (0xff << 24)
+#define LDBBSACR_FG2A_SHIFT 24
+#define LDBBSACR_FG2R_MASK (0xff << 16)
+#define LDBBSACR_FG2R_SHIFT 16
+#define LDBBSACR_FG2G_MASK (0xff << 8)
+#define LDBBSACR_FG2G_SHIFT 8
+#define LDBBSACR_FG2B_MASK (0xff << 0)
+#define LDBBSACR_FG2B_SHIFT 0
+#define LDBnBSAAR(n) (0xb20 + (n) * 0x20 + 0x18)
+#define LDBBSAAR_AP_MASK (0xff << 24)
+#define LDBBSAAR_AP_SHIFT 24
+#define LDBBSAAR_R_MASK (0xff << 16)
+#define LDBBSAAR_R_SHIFT 16
+#define LDBBSAAR_GY_MASK (0xff << 8)
+#define LDBBSAAR_GY_SHIFT 8
+#define LDBBSAAR_B_MASK (0xff << 0)
+#define LDBBSAAR_B_SHIFT 0
+#define LDBnBPPCR(n) (0xb20 + (n) * 0x20 + 0x1c)
+#define LDBBPPCR_AP_MASK (0xff << 24)
+#define LDBBPPCR_AP_SHIFT 24
+#define LDBBPPCR_R_MASK (0xff << 16)
+#define LDBBPPCR_R_SHIFT 16
+#define LDBBPPCR_GY_MASK (0xff << 8)
+#define LDBBPPCR_GY_SHIFT 8
+#define LDBBPPCR_B_MASK (0xff << 0)
+#define LDBBPPCR_B_SHIFT 0
+#define LDBnBBGCL(n) (0xb10 + (n) * 0x04)
+#define LDBBBGCL_BGA_MASK (0xff << 24)
+#define LDBBBGCL_BGA_SHIFT 24
+#define LDBBBGCL_BGR_MASK (0xff << 16)
+#define LDBBBGCL_BGR_SHIFT 16
+#define LDBBBGCL_BGG_MASK (0xff << 8)
+#define LDBBBGCL_BGG_SHIFT 8
+#define LDBBBGCL_BGB_MASK (0xff << 0)
+#define LDBBBGCL_BGB_SHIFT 0
+
#define SIDE_B_OFFSET 0x1000
#define MIRROR_OFFSET 0x2000
#define MAX_XRES 1920
#define MAX_YRES 1080
+enum sh_mobile_lcdc_overlay_mode {
+ LCDC_OVERLAY_BLEND,
+ LCDC_OVERLAY_ROP3,
+};
+
+/*
+ * struct sh_mobile_lcdc_overlay - LCDC display overlay
+ *
+ * @channel: LCDC channel this overlay belongs to
+ * @cfg: Overlay configuration
+ * @info: Frame buffer device
+ * @index: Overlay index (0-3)
+ * @base: Overlay registers base address
+ * @enabled: True if the overlay is enabled
+ * @mode: Overlay blending mode (alpha blend or ROP3)
+ * @alpha: Global alpha blending value (0-255, for alpha blending mode)
+ * @rop3: Raster operation (for ROP3 mode)
+ * @fb_mem: Frame buffer virtual memory address
+ * @fb_size: Frame buffer size in bytes
+ * @dma_handle: Frame buffer DMA address
+ * @base_addr_y: Overlay base address (RGB or luma component)
+ * @base_addr_c: Overlay base address (chroma component)
+ * @pan_y_offset: Panning linear offset in bytes (luma component)
+ * @format: Current pixelf format
+ * @xres: Horizontal visible resolution
+ * @xres_virtual: Horizontal total resolution
+ * @yres: Vertical visible resolution
+ * @yres_virtual: Vertical total resolution
+ * @pitch: Overlay line pitch
+ * @pos_x: Horizontal overlay position
+ * @pos_y: Vertical overlay position
+ */
+struct sh_mobile_lcdc_overlay {
+ struct sh_mobile_lcdc_chan *channel;
+
+ const struct sh_mobile_lcdc_overlay_cfg *cfg;
+ struct fb_info *info;
+
+ unsigned int index;
+ unsigned long base;
+
+ bool enabled;
+ enum sh_mobile_lcdc_overlay_mode mode;
+ unsigned int alpha;
+ unsigned int rop3;
+
+ void *fb_mem;
+ unsigned long fb_size;
+
+ dma_addr_t dma_handle;
+ unsigned long base_addr_y;
+ unsigned long base_addr_c;
+ unsigned long pan_y_offset;
+
+ const struct sh_mobile_lcdc_format_info *format;
+ unsigned int xres;
+ unsigned int xres_virtual;
+ unsigned int yres;
+ unsigned int yres_virtual;
+ unsigned int pitch;
+ int pos_x;
+ int pos_y;
+};
+
struct sh_mobile_lcdc_priv {
void __iomem *base;
int irq;
@@ -45,7 +210,10 @@ struct sh_mobile_lcdc_priv {
struct device *dev;
struct clk *dot_clk;
unsigned long lddckr;
+
struct sh_mobile_lcdc_chan ch[2];
+ struct sh_mobile_lcdc_overlay overlays[4];
+
struct notifier_block notifier;
int started;
int forced_fourcc; /* 2 channel LCDC must share fourcc setting */
@@ -141,6 +309,13 @@ static unsigned long lcdc_read_chan(struct sh_mobile_lcdc_chan *chan,
return ioread32(chan->lcdc->base + chan->reg_offs[reg_nr]);
}
+static void lcdc_write_overlay(struct sh_mobile_lcdc_overlay *ovl,
+ int reg, unsigned long data)
+{
+ iowrite32(data, ovl->channel->lcdc->base + reg);
+ iowrite32(data, ovl->channel->lcdc->base + reg + SIDE_B_OFFSET);
+}
+
static void lcdc_write(struct sh_mobile_lcdc_priv *priv,
unsigned long reg_offs, unsigned long data)
{
@@ -384,8 +559,8 @@ sh_mobile_lcdc_must_reconfigure(struct sh_mobile_lcdc_chan *ch,
return true;
}
-static int sh_mobile_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info);
+static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info);
static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
enum sh_mobile_lcdc_entity_event event,
@@ -439,7 +614,7 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch,
fb_videomode_to_var(&var, mode);
var.bits_per_pixel = info->var.bits_per_pixel;
var.grayscale = info->var.grayscale;
- ret = sh_mobile_check_var(&var, info);
+ ret = sh_mobile_lcdc_check_var(&var, info);
break;
}
@@ -585,7 +760,7 @@ static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int sh_mobile_wait_for_vsync(struct sh_mobile_lcdc_chan *ch)
+static int sh_mobile_lcdc_wait_for_vsync(struct sh_mobile_lcdc_chan *ch)
{
unsigned long ldintr;
int ret;
@@ -685,8 +860,98 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch)
lcdc_write_chan(ch, LDHAJR, tmp);
}
+static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl)
+{
+ u32 format = 0;
+
+ if (!ovl->enabled) {
+ lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index));
+ lcdc_write_overlay(ovl, LDBnBSIFR(ovl->index), 0);
+ lcdc_write(ovl->channel->lcdc, LDBCR,
+ LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index));
+ return;
+ }
+
+ ovl->base_addr_y = ovl->dma_handle;
+ ovl->base_addr_c = ovl->dma_handle
+ + ovl->xres_virtual * ovl->yres_virtual;
+
+ switch (ovl->mode) {
+ case LCDC_OVERLAY_BLEND:
+ format = LDBBSIFR_EN | (ovl->alpha << LDBBSIFR_LAY_SHIFT);
+ break;
+
+ case LCDC_OVERLAY_ROP3:
+ format = LDBBSIFR_EN | LDBBSIFR_BRSEL
+ | (ovl->rop3 << LDBBSIFR_ROP3_SHIFT);
+ break;
+ }
+
+ switch (ovl->format->fourcc) {
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV42:
+ format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
+ format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ default:
+ format |= LDBBSIFR_SWPL;
+ break;
+ }
+
+ switch (ovl->format->fourcc) {
+ case V4L2_PIX_FMT_RGB565:
+ format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444;
+ break;
+ }
+
+ lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index));
+
+ lcdc_write_overlay(ovl, LDBnBSIFR(ovl->index), format);
+
+ lcdc_write_overlay(ovl, LDBnBSSZR(ovl->index),
+ (ovl->yres << LDBBSSZR_BVSS_SHIFT) |
+ (ovl->xres << LDBBSSZR_BHSS_SHIFT));
+ lcdc_write_overlay(ovl, LDBnBLOCR(ovl->index),
+ (ovl->pos_y << LDBBLOCR_CVLC_SHIFT) |
+ (ovl->pos_x << LDBBLOCR_CHLC_SHIFT));
+ lcdc_write_overlay(ovl, LDBnBSMWR(ovl->index),
+ ovl->pitch << LDBBSMWR_BSMW_SHIFT);
+
+ lcdc_write_overlay(ovl, LDBnBSAYR(ovl->index), ovl->base_addr_y);
+ lcdc_write_overlay(ovl, LDBnBSACR(ovl->index), ovl->base_addr_c);
+
+ lcdc_write(ovl->channel->lcdc, LDBCR,
+ LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index));
+}
+
/*
- * __sh_mobile_lcdc_start - Configure and tart the LCDC
+ * __sh_mobile_lcdc_start - Configure and start the LCDC
* @priv: LCDC device
*
* Configure all enabled channels and start the LCDC device. All external
@@ -839,27 +1104,25 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
/* Compute frame buffer base address and pitch for each channel. */
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
int pixelformat;
- void *meram;
+ void *cache;
ch = &priv->ch[k];
if (!ch->enabled)
continue;
ch->base_addr_y = ch->dma_handle;
- ch->base_addr_c = ch->base_addr_y + ch->xres * ch->yres_virtual;
+ ch->base_addr_c = ch->dma_handle
+ + ch->xres_virtual * ch->yres_virtual;
ch->line_size = ch->pitch;
/* Enable MERAM if possible. */
- if (mdev == NULL || mdev->ops == NULL ||
- ch->cfg->meram_cfg == NULL)
+ if (mdev == NULL || ch->cfg->meram_cfg == NULL)
continue;
- /* we need to de-init configured ICBs before we can
- * re-initialize them.
- */
- if (ch->meram) {
- mdev->ops->meram_unregister(mdev, ch->meram);
- ch->meram = NULL;
+ /* Free the allocated MERAM cache. */
+ if (ch->cache) {
+ sh_mobile_meram_cache_free(mdev, ch->cache);
+ ch->cache = NULL;
}
switch (ch->format->fourcc) {
@@ -881,17 +1144,22 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
break;
}
- meram = mdev->ops->meram_register(mdev, ch->cfg->meram_cfg,
+ cache = sh_mobile_meram_cache_alloc(mdev, ch->cfg->meram_cfg,
ch->pitch, ch->yres, pixelformat,
&ch->line_size);
- if (!IS_ERR(meram)) {
- mdev->ops->meram_update(mdev, meram,
+ if (!IS_ERR(cache)) {
+ sh_mobile_meram_cache_update(mdev, cache,
ch->base_addr_y, ch->base_addr_c,
&ch->base_addr_y, &ch->base_addr_c);
- ch->meram = meram;
+ ch->cache = cache;
}
}
+ for (k = 0; k < ARRAY_SIZE(priv->overlays); ++k) {
+ struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[k];
+ sh_mobile_lcdc_overlay_setup(ovl);
+ }
+
/* Start the LCDC. */
__sh_mobile_lcdc_start(priv);
@@ -953,12 +1221,10 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
sh_mobile_lcdc_display_off(ch);
- /* disable the meram */
- if (ch->meram) {
- struct sh_mobile_meram_info *mdev;
- mdev = priv->meram_dev;
- mdev->ops->meram_unregister(mdev, ch->meram);
- ch->meram = 0;
+ /* Free the MERAM cache. */
+ if (ch->cache) {
+ sh_mobile_meram_cache_free(priv->meram_dev, ch->cache);
+ ch->cache = 0;
}
}
@@ -975,8 +1241,511 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
sh_mobile_lcdc_clk_off(priv);
}
+static int __sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if (var->xres > MAX_XRES || var->yres > MAX_YRES)
+ return -EINVAL;
+
+ /* Make sure the virtual resolution is at least as big as the visible
+ * resolution.
+ */
+ if (var->xres_virtual < var->xres)
+ var->xres_virtual = var->xres;
+ if (var->yres_virtual < var->yres)
+ var->yres_virtual = var->yres;
+
+ if (sh_mobile_format_is_fourcc(var)) {
+ const struct sh_mobile_lcdc_format_info *format;
+
+ format = sh_mobile_format_info(var->grayscale);
+ if (format == NULL)
+ return -EINVAL;
+ var->bits_per_pixel = format->bpp;
+
+ /* Default to RGB and JPEG color-spaces for RGB and YUV formats
+ * respectively.
+ */
+ if (!format->yuv)
+ var->colorspace = V4L2_COLORSPACE_SRGB;
+ else if (var->colorspace != V4L2_COLORSPACE_REC709)
+ var->colorspace = V4L2_COLORSPACE_JPEG;
+ } else {
+ if (var->bits_per_pixel <= 16) { /* RGB 565 */
+ var->bits_per_pixel = 16;
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ } else if (var->bits_per_pixel <= 24) { /* RGB 888 */
+ var->bits_per_pixel = 24;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */
+ var->bits_per_pixel = 32;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ } else
+ return -EINVAL;
+
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ var->transp.msb_right = 0;
+ }
+
+ /* Make sure we don't exceed our allocated memory. */
+ if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 >
+ info->fix.smem_len)
+ return -EINVAL;
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
- * Frame buffer operations
+ * Frame buffer operations - Overlays
+ */
+
+static ssize_t
+overlay_alpha_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->alpha);
+}
+
+static ssize_t
+overlay_alpha_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+ unsigned int alpha;
+ char *endp;
+
+ alpha = simple_strtoul(buf, &endp, 10);
+ if (isspace(*endp))
+ endp++;
+
+ if (endp - buf != count)
+ return -EINVAL;
+
+ if (alpha > 255)
+ return -EINVAL;
+
+ if (ovl->alpha != alpha) {
+ ovl->alpha = alpha;
+
+ if (ovl->mode == LCDC_OVERLAY_BLEND && ovl->enabled)
+ sh_mobile_lcdc_overlay_setup(ovl);
+ }
+
+ return count;
+}
+
+static ssize_t
+overlay_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->mode);
+}
+
+static ssize_t
+overlay_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+ unsigned int mode;
+ char *endp;
+
+ mode = simple_strtoul(buf, &endp, 10);
+ if (isspace(*endp))
+ endp++;
+
+ if (endp - buf != count)
+ return -EINVAL;
+
+ if (mode != LCDC_OVERLAY_BLEND && mode != LCDC_OVERLAY_ROP3)
+ return -EINVAL;
+
+ if (ovl->mode != mode) {
+ ovl->mode = mode;
+
+ if (ovl->enabled)
+ sh_mobile_lcdc_overlay_setup(ovl);
+ }
+
+ return count;
+}
+
+static ssize_t
+overlay_position_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+
+ return scnprintf(buf, PAGE_SIZE, "%d,%d\n", ovl->pos_x, ovl->pos_y);
+}
+
+static ssize_t
+overlay_position_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+ char *endp;
+ int pos_x;
+ int pos_y;
+
+ pos_x = simple_strtol(buf, &endp, 10);
+ if (*endp != ',')
+ return -EINVAL;
+
+ pos_y = simple_strtol(endp + 1, &endp, 10);
+ if (isspace(*endp))
+ endp++;
+
+ if (endp - buf != count)
+ return -EINVAL;
+
+ if (ovl->pos_x != pos_x || ovl->pos_y != pos_y) {
+ ovl->pos_x = pos_x;
+ ovl->pos_y = pos_y;
+
+ if (ovl->enabled)
+ sh_mobile_lcdc_overlay_setup(ovl);
+ }
+
+ return count;
+}
+
+static ssize_t
+overlay_rop3_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->rop3);
+}
+
+static ssize_t
+overlay_rop3_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+ unsigned int rop3;
+ char *endp;
+
+ rop3 = !!simple_strtoul(buf, &endp, 10);
+ if (isspace(*endp))
+ endp++;
+
+ if (endp - buf != count)
+ return -EINVAL;
+
+ if (rop3 > 255)
+ return -EINVAL;
+
+ if (ovl->rop3 != rop3) {
+ ovl->rop3 = rop3;
+
+ if (ovl->mode == LCDC_OVERLAY_ROP3 && ovl->enabled)
+ sh_mobile_lcdc_overlay_setup(ovl);
+ }
+
+ return count;
+}
+
+static const struct device_attribute overlay_sysfs_attrs[] = {
+ __ATTR(ovl_alpha, S_IRUGO|S_IWUSR,
+ overlay_alpha_show, overlay_alpha_store),
+ __ATTR(ovl_mode, S_IRUGO|S_IWUSR,
+ overlay_mode_show, overlay_mode_store),
+ __ATTR(ovl_position, S_IRUGO|S_IWUSR,
+ overlay_position_show, overlay_position_store),
+ __ATTR(ovl_rop3, S_IRUGO|S_IWUSR,
+ overlay_rop3_show, overlay_rop3_store),
+};
+
+static const struct fb_fix_screeninfo sh_mobile_lcdc_overlay_fix = {
+ .id = "SH Mobile LCDC",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+ .accel = FB_ACCEL_NONE,
+ .xpanstep = 1,
+ .ypanstep = 1,
+ .ywrapstep = 0,
+ .capabilities = FB_CAP_FOURCC,
+};
+
+static int sh_mobile_lcdc_overlay_pan(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+ unsigned long base_addr_y;
+ unsigned long base_addr_c;
+ unsigned long y_offset;
+ unsigned long c_offset;
+
+ if (!ovl->format->yuv) {
+ y_offset = (var->yoffset * ovl->xres_virtual + var->xoffset)
+ * ovl->format->bpp / 8;
+ c_offset = 0;
+ } else {
+ unsigned int xsub = ovl->format->bpp < 24 ? 2 : 1;
+ unsigned int ysub = ovl->format->bpp < 16 ? 2 : 1;
+
+ y_offset = var->yoffset * ovl->xres_virtual + var->xoffset;
+ c_offset = var->yoffset / ysub * ovl->xres_virtual * 2 / xsub
+ + var->xoffset * 2 / xsub;
+ }
+
+ /* If the Y offset hasn't changed, the C offset hasn't either. There's
+ * nothing to do in that case.
+ */
+ if (y_offset == ovl->pan_y_offset)
+ return 0;
+
+ /* Set the source address for the next refresh */
+ base_addr_y = ovl->dma_handle + y_offset;
+ base_addr_c = ovl->dma_handle + ovl->xres_virtual * ovl->yres_virtual
+ + c_offset;
+
+ ovl->base_addr_y = base_addr_y;
+ ovl->base_addr_c = base_addr_c;
+ ovl->pan_y_offset = y_offset;
+
+ lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index));
+
+ lcdc_write_overlay(ovl, LDBnBSAYR(ovl->index), ovl->base_addr_y);
+ lcdc_write_overlay(ovl, LDBnBSACR(ovl->index), ovl->base_addr_c);
+
+ lcdc_write(ovl->channel->lcdc, LDBCR,
+ LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index));
+
+ return 0;
+}
+
+static int sh_mobile_lcdc_overlay_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+
+ switch (cmd) {
+ case FBIO_WAITFORVSYNC:
+ return sh_mobile_lcdc_wait_for_vsync(ovl->channel);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static int sh_mobile_lcdc_overlay_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ return __sh_mobile_lcdc_check_var(var, info);
+}
+
+static int sh_mobile_lcdc_overlay_set_par(struct fb_info *info)
+{
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+
+ ovl->format =
+ sh_mobile_format_info(sh_mobile_format_fourcc(&info->var));
+
+ ovl->xres = info->var.xres;
+ ovl->xres_virtual = info->var.xres_virtual;
+ ovl->yres = info->var.yres;
+ ovl->yres_virtual = info->var.yres_virtual;
+
+ if (ovl->format->yuv)
+ ovl->pitch = info->var.xres_virtual;
+ else
+ ovl->pitch = info->var.xres_virtual * ovl->format->bpp / 8;
+
+ sh_mobile_lcdc_overlay_setup(ovl);
+
+ info->fix.line_length = ovl->pitch;
+
+ if (sh_mobile_format_is_fourcc(&info->var)) {
+ info->fix.type = FB_TYPE_FOURCC;
+ info->fix.visual = FB_VISUAL_FOURCC;
+ } else {
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ }
+
+ return 0;
+}
+
+/* Overlay blanking. Disable the overlay when blanked. */
+static int sh_mobile_lcdc_overlay_blank(int blank, struct fb_info *info)
+{
+ struct sh_mobile_lcdc_overlay *ovl = info->par;
+
+ ovl->enabled = !blank;
+ sh_mobile_lcdc_overlay_setup(ovl);
+
+ /* Prevent the backlight from receiving a blanking event by returning
+ * a non-zero value.
+ */
+ return 1;
+}
+
+static struct fb_ops sh_mobile_lcdc_overlay_ops = {
+ .owner = THIS_MODULE,
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+ .fb_blank = sh_mobile_lcdc_overlay_blank,
+ .fb_pan_display = sh_mobile_lcdc_overlay_pan,
+ .fb_ioctl = sh_mobile_lcdc_overlay_ioctl,
+ .fb_check_var = sh_mobile_lcdc_overlay_check_var,
+ .fb_set_par = sh_mobile_lcdc_overlay_set_par,
+};
+
+static void
+sh_mobile_lcdc_overlay_fb_unregister(struct sh_mobile_lcdc_overlay *ovl)
+{
+ struct fb_info *info = ovl->info;
+
+ if (info == NULL || info->dev == NULL)
+ return;
+
+ unregister_framebuffer(ovl->info);
+}
+
+static int __devinit
+sh_mobile_lcdc_overlay_fb_register(struct sh_mobile_lcdc_overlay *ovl)
+{
+ struct sh_mobile_lcdc_priv *lcdc = ovl->channel->lcdc;
+ struct fb_info *info = ovl->info;
+ unsigned int i;
+ int ret;
+
+ if (info == NULL)
+ return 0;
+
+ ret = register_framebuffer(info);
+ if (ret < 0)
+ return ret;
+
+ dev_info(lcdc->dev, "registered %s/overlay %u as %dx%d %dbpp.\n",
+ dev_name(lcdc->dev), ovl->index, info->var.xres,
+ info->var.yres, info->var.bits_per_pixel);
+
+ for (i = 0; i < ARRAY_SIZE(overlay_sysfs_attrs); ++i) {
+ ret = device_create_file(info->dev, &overlay_sysfs_attrs[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+sh_mobile_lcdc_overlay_fb_cleanup(struct sh_mobile_lcdc_overlay *ovl)
+{
+ struct fb_info *info = ovl->info;
+
+ if (info == NULL || info->device == NULL)
+ return;
+
+ framebuffer_release(info);
+}
+
+static int __devinit
+sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
+{
+ struct sh_mobile_lcdc_priv *priv = ovl->channel->lcdc;
+ struct fb_var_screeninfo *var;
+ struct fb_info *info;
+
+ /* Allocate and initialize the frame buffer device. */
+ info = framebuffer_alloc(0, priv->dev);
+ if (info == NULL) {
+ dev_err(priv->dev, "unable to allocate fb_info\n");
+ return -ENOMEM;
+ }
+
+ ovl->info = info;
+
+ info->flags = FBINFO_FLAG_DEFAULT;
+ info->fbops = &sh_mobile_lcdc_overlay_ops;
+ info->device = priv->dev;
+ info->screen_base = ovl->fb_mem;
+ info->par = ovl;
+
+ /* Initialize fixed screen information. Restrict pan to 2 lines steps
+ * for NV12 and NV21.
+ */
+ info->fix = sh_mobile_lcdc_overlay_fix;
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "SH Mobile LCDC Overlay %u", ovl->index);
+ info->fix.smem_start = ovl->dma_handle;
+ info->fix.smem_len = ovl->fb_size;
+ info->fix.line_length = ovl->pitch;
+
+ if (ovl->format->yuv)
+ info->fix.visual = FB_VISUAL_FOURCC;
+ else
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+
+ switch (ovl->format->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ info->fix.ypanstep = 2;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ info->fix.xpanstep = 2;
+ }
+
+ /* Initialize variable screen information. */
+ var = &info->var;
+ memset(var, 0, sizeof(*var));
+ var->xres = ovl->xres;
+ var->yres = ovl->yres;
+ var->xres_virtual = ovl->xres_virtual;
+ var->yres_virtual = ovl->yres_virtual;
+ var->activate = FB_ACTIVATE_NOW;
+
+ /* Use the legacy API by default for RGB formats, and the FOURCC API
+ * for YUV formats.
+ */
+ if (!ovl->format->yuv)
+ var->bits_per_pixel = ovl->format->bpp;
+ else
+ var->grayscale = ovl->format->fourcc;
+
+ return sh_mobile_lcdc_overlay_check_var(var, info);
+}
+
+/* -----------------------------------------------------------------------------
+ * Frame buffer operations - main frame buffer
*/
static int sh_mobile_lcdc_setcolreg(u_int regno,
@@ -1003,12 +1772,12 @@ static int sh_mobile_lcdc_setcolreg(u_int regno,
return 0;
}
-static struct fb_fix_screeninfo sh_mobile_lcdc_fix = {
+static const struct fb_fix_screeninfo sh_mobile_lcdc_fix = {
.id = "SH Mobile LCDC",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.accel = FB_ACCEL_NONE,
- .xpanstep = 0,
+ .xpanstep = 1,
.ypanstep = 1,
.ywrapstep = 0,
.capabilities = FB_CAP_FOURCC,
@@ -1035,78 +1804,74 @@ static void sh_mobile_lcdc_imageblit(struct fb_info *info,
sh_mobile_lcdc_deferred_io_touch(info);
}
-static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
+static int sh_mobile_lcdc_pan(struct fb_var_screeninfo *var,
+ struct fb_info *info)
{
struct sh_mobile_lcdc_chan *ch = info->par;
struct sh_mobile_lcdc_priv *priv = ch->lcdc;
unsigned long ldrcntr;
- unsigned long new_pan_offset;
unsigned long base_addr_y, base_addr_c;
+ unsigned long y_offset;
unsigned long c_offset;
- if (!ch->format->yuv)
- new_pan_offset = var->yoffset * ch->pitch
- + var->xoffset * (ch->format->bpp / 8);
- else
- new_pan_offset = var->yoffset * ch->pitch + var->xoffset;
+ if (!ch->format->yuv) {
+ y_offset = (var->yoffset * ch->xres_virtual + var->xoffset)
+ * ch->format->bpp / 8;
+ c_offset = 0;
+ } else {
+ unsigned int xsub = ch->format->bpp < 24 ? 2 : 1;
+ unsigned int ysub = ch->format->bpp < 16 ? 2 : 1;
- if (new_pan_offset == ch->pan_offset)
- return 0; /* No change, do nothing */
+ y_offset = var->yoffset * ch->xres_virtual + var->xoffset;
+ c_offset = var->yoffset / ysub * ch->xres_virtual * 2 / xsub
+ + var->xoffset * 2 / xsub;
+ }
- ldrcntr = lcdc_read(priv, _LDRCNTR);
+ /* If the Y offset hasn't changed, the C offset hasn't either. There's
+ * nothing to do in that case.
+ */
+ if (y_offset == ch->pan_y_offset)
+ return 0;
/* Set the source address for the next refresh */
- base_addr_y = ch->dma_handle + new_pan_offset;
- if (ch->format->yuv) {
- /* Set y offset */
- c_offset = var->yoffset * ch->pitch
- * (ch->format->bpp - 8) / 8;
- base_addr_c = ch->dma_handle + ch->xres * ch->yres_virtual
- + c_offset;
- /* Set x offset */
- if (ch->format->fourcc == V4L2_PIX_FMT_NV24)
- base_addr_c += 2 * var->xoffset;
- else
- base_addr_c += var->xoffset;
- }
+ base_addr_y = ch->dma_handle + y_offset;
+ base_addr_c = ch->dma_handle + ch->xres_virtual * ch->yres_virtual
+ + c_offset;
- if (ch->meram) {
- struct sh_mobile_meram_info *mdev;
-
- mdev = priv->meram_dev;
- mdev->ops->meram_update(mdev, ch->meram,
- base_addr_y, base_addr_c,
- &base_addr_y, &base_addr_c);
- }
+ if (ch->cache)
+ sh_mobile_meram_cache_update(priv->meram_dev, ch->cache,
+ base_addr_y, base_addr_c,
+ &base_addr_y, &base_addr_c);
ch->base_addr_y = base_addr_y;
ch->base_addr_c = base_addr_c;
+ ch->pan_y_offset = y_offset;
lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y);
if (ch->format->yuv)
lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c);
+ ldrcntr = lcdc_read(priv, _LDRCNTR);
if (lcdc_chan_is_sublcd(ch))
lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS);
else
lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_MRS);
- ch->pan_offset = new_pan_offset;
sh_mobile_lcdc_deferred_io_touch(info);
return 0;
}
-static int sh_mobile_ioctl(struct fb_info *info, unsigned int cmd,
- unsigned long arg)
+static int sh_mobile_lcdc_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
{
+ struct sh_mobile_lcdc_chan *ch = info->par;
int retval;
switch (cmd) {
case FBIO_WAITFORVSYNC:
- retval = sh_mobile_wait_for_vsync(info->par);
+ retval = sh_mobile_lcdc_wait_for_vsync(ch);
break;
default:
@@ -1158,7 +1923,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info)
* Locking: both .fb_release() and .fb_open() are called with info->lock held if
* user == 1, or with console sem held, if user == 0.
*/
-static int sh_mobile_release(struct fb_info *info, int user)
+static int sh_mobile_lcdc_release(struct fb_info *info, int user)
{
struct sh_mobile_lcdc_chan *ch = info->par;
@@ -1179,7 +1944,7 @@ static int sh_mobile_release(struct fb_info *info, int user)
return 0;
}
-static int sh_mobile_open(struct fb_info *info, int user)
+static int sh_mobile_lcdc_open(struct fb_info *info, int user)
{
struct sh_mobile_lcdc_chan *ch = info->par;
@@ -1192,7 +1957,8 @@ static int sh_mobile_open(struct fb_info *info, int user)
return 0;
}
-static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
{
struct sh_mobile_lcdc_chan *ch = info->par;
struct sh_mobile_lcdc_priv *p = ch->lcdc;
@@ -1200,9 +1966,7 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in
unsigned int best_xres = 0;
unsigned int best_yres = 0;
unsigned int i;
-
- if (var->xres > MAX_XRES || var->yres > MAX_YRES)
- return -EINVAL;
+ int ret;
/* If board code provides us with a list of available modes, make sure
* we use one of them. Find the mode closest to the requested one. The
@@ -1237,73 +2001,9 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in
var->yres = best_yres;
}
- /* Make sure the virtual resolution is at least as big as the visible
- * resolution.
- */
- if (var->xres_virtual < var->xres)
- var->xres_virtual = var->xres;
- if (var->yres_virtual < var->yres)
- var->yres_virtual = var->yres;
-
- if (sh_mobile_format_is_fourcc(var)) {
- const struct sh_mobile_lcdc_format_info *format;
-
- format = sh_mobile_format_info(var->grayscale);
- if (format == NULL)
- return -EINVAL;
- var->bits_per_pixel = format->bpp;
-
- /* Default to RGB and JPEG color-spaces for RGB and YUV formats
- * respectively.
- */
- if (!format->yuv)
- var->colorspace = V4L2_COLORSPACE_SRGB;
- else if (var->colorspace != V4L2_COLORSPACE_REC709)
- var->colorspace = V4L2_COLORSPACE_JPEG;
- } else {
- if (var->bits_per_pixel <= 16) { /* RGB 565 */
- var->bits_per_pixel = 16;
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.offset = 0;
- var->transp.length = 0;
- } else if (var->bits_per_pixel <= 24) { /* RGB 888 */
- var->bits_per_pixel = 24;
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 0;
- var->transp.length = 0;
- } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */
- var->bits_per_pixel = 32;
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 24;
- var->transp.length = 8;
- } else
- return -EINVAL;
-
- var->red.msb_right = 0;
- var->green.msb_right = 0;
- var->blue.msb_right = 0;
- var->transp.msb_right = 0;
- }
-
- /* Make sure we don't exceed our allocated memory. */
- if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 >
- info->fix.smem_len)
- return -EINVAL;
+ ret = __sh_mobile_lcdc_check_var(var, info);
+ if (ret < 0)
+ return ret;
/* only accept the forced_fourcc for dual channel configurations */
if (p->forced_fourcc &&
@@ -1313,7 +2013,7 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in
return 0;
}
-static int sh_mobile_set_par(struct fb_info *info)
+static int sh_mobile_lcdc_set_par(struct fb_info *info)
{
struct sh_mobile_lcdc_chan *ch = info->par;
int ret;
@@ -1329,9 +2029,9 @@ static int sh_mobile_set_par(struct fb_info *info)
ch->yres_virtual = info->var.yres_virtual;
if (ch->format->yuv)
- ch->pitch = info->var.xres;
+ ch->pitch = info->var.xres_virtual;
else
- ch->pitch = info->var.xres * ch->format->bpp / 8;
+ ch->pitch = info->var.xres_virtual * ch->format->bpp / 8;
ret = sh_mobile_lcdc_start(ch->lcdc);
if (ret < 0)
@@ -1383,8 +2083,8 @@ static int sh_mobile_lcdc_blank(int blank, struct fb_info *info)
* mode will reenable the clocks and update the screen in time,
* so it does not need this. */
if (!info->fbdefio) {
- sh_mobile_wait_for_vsync(ch);
- sh_mobile_wait_for_vsync(ch);
+ sh_mobile_lcdc_wait_for_vsync(ch);
+ sh_mobile_lcdc_wait_for_vsync(ch);
}
sh_mobile_lcdc_clk_off(p);
}
@@ -1402,12 +2102,12 @@ static struct fb_ops sh_mobile_lcdc_ops = {
.fb_copyarea = sh_mobile_lcdc_copyarea,
.fb_imageblit = sh_mobile_lcdc_imageblit,
.fb_blank = sh_mobile_lcdc_blank,
- .fb_pan_display = sh_mobile_fb_pan_display,
- .fb_ioctl = sh_mobile_ioctl,
- .fb_open = sh_mobile_open,
- .fb_release = sh_mobile_release,
- .fb_check_var = sh_mobile_check_var,
- .fb_set_par = sh_mobile_set_par,
+ .fb_pan_display = sh_mobile_lcdc_pan,
+ .fb_ioctl = sh_mobile_lcdc_ioctl,
+ .fb_open = sh_mobile_lcdc_open,
+ .fb_release = sh_mobile_lcdc_release,
+ .fb_check_var = sh_mobile_lcdc_check_var,
+ .fb_set_par = sh_mobile_lcdc_set_par,
};
static void
@@ -1514,19 +2214,24 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
else
info->fix.visual = FB_VISUAL_TRUECOLOR;
- if (ch->format->fourcc == V4L2_PIX_FMT_NV12 ||
- ch->format->fourcc == V4L2_PIX_FMT_NV21)
+ switch (ch->format->fourcc) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
info->fix.ypanstep = 2;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ info->fix.xpanstep = 2;
+ }
/* Initialize variable screen information using the first mode as
- * default. The default Y virtual resolution is twice the panel size to
- * allow for double-buffering.
+ * default.
*/
var = &info->var;
fb_videomode_to_var(var, mode);
var->width = ch->cfg->panel_cfg.width;
var->height = ch->cfg->panel_cfg.height;
- var->yres_virtual = var->yres * 2;
+ var->xres_virtual = ch->xres_virtual;
+ var->yres_virtual = ch->yres_virtual;
var->activate = FB_ACTIVATE_NOW;
/* Use the legacy API by default for RGB formats, and the FOURCC API
@@ -1537,7 +2242,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
else
var->grayscale = ch->format->fourcc;
- ret = sh_mobile_check_var(var, info);
+ ret = sh_mobile_lcdc_check_var(var, info);
if (ret)
return ret;
@@ -1712,15 +2417,27 @@ static const struct fb_videomode default_720p __devinitconst = {
static int sh_mobile_lcdc_remove(struct platform_device *pdev)
{
struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev);
- int i;
+ unsigned int i;
fb_unregister_client(&priv->notifier);
+ for (i = 0; i < ARRAY_SIZE(priv->overlays); i++)
+ sh_mobile_lcdc_overlay_fb_unregister(&priv->overlays[i]);
for (i = 0; i < ARRAY_SIZE(priv->ch); i++)
sh_mobile_lcdc_channel_fb_unregister(&priv->ch[i]);
sh_mobile_lcdc_stop(priv);
+ for (i = 0; i < ARRAY_SIZE(priv->overlays); i++) {
+ struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i];
+
+ sh_mobile_lcdc_overlay_fb_cleanup(ovl);
+
+ if (ovl->fb_mem)
+ dma_free_coherent(&pdev->dev, ovl->fb_size,
+ ovl->fb_mem, ovl->dma_handle);
+ }
+
for (i = 0; i < ARRAY_SIZE(priv->ch); i++) {
struct sh_mobile_lcdc_chan *ch = &priv->ch[i];
@@ -1737,8 +2454,11 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
}
for (i = 0; i < ARRAY_SIZE(priv->ch); i++) {
- if (priv->ch[i].bl)
- sh_mobile_lcdc_bl_remove(priv->ch[i].bl);
+ struct sh_mobile_lcdc_chan *ch = &priv->ch[i];
+
+ if (ch->bl)
+ sh_mobile_lcdc_bl_remove(ch->bl);
+ mutex_destroy(&ch->open_lock);
}
if (priv->dot_clk) {
@@ -1796,6 +2516,61 @@ static int __devinit sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan *
}
static int __devinit
+sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv,
+ struct sh_mobile_lcdc_overlay *ovl)
+{
+ const struct sh_mobile_lcdc_format_info *format;
+ int ret;
+
+ if (ovl->cfg->fourcc == 0)
+ return 0;
+
+ /* Validate the format. */
+ format = sh_mobile_format_info(ovl->cfg->fourcc);
+ if (format == NULL) {
+ dev_err(priv->dev, "Invalid FOURCC %08x\n", ovl->cfg->fourcc);
+ return -EINVAL;
+ }
+
+ ovl->enabled = false;
+ ovl->mode = LCDC_OVERLAY_BLEND;
+ ovl->alpha = 255;
+ ovl->rop3 = 0;
+ ovl->pos_x = 0;
+ ovl->pos_y = 0;
+
+ /* The default Y virtual resolution is twice the panel size to allow for
+ * double-buffering.
+ */
+ ovl->format = format;
+ ovl->xres = ovl->cfg->max_xres;
+ ovl->xres_virtual = ovl->xres;
+ ovl->yres = ovl->cfg->max_yres;
+ ovl->yres_virtual = ovl->yres * 2;
+
+ if (!format->yuv)
+ ovl->pitch = ovl->xres_virtual * format->bpp / 8;
+ else
+ ovl->pitch = ovl->xres_virtual;
+
+ /* Allocate frame buffer memory. */
+ ovl->fb_size = ovl->cfg->max_xres * ovl->cfg->max_yres
+ * format->bpp / 8 * 2;
+ ovl->fb_mem = dma_alloc_coherent(priv->dev, ovl->fb_size,
+ &ovl->dma_handle, GFP_KERNEL);
+ if (!ovl->fb_mem) {
+ dev_err(priv->dev, "unable to allocate buffer\n");
+ return -ENOMEM;
+ }
+
+ ret = sh_mobile_lcdc_overlay_fb_init(ovl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int __devinit
sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
struct sh_mobile_lcdc_chan *ch)
{
@@ -1854,7 +2629,9 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
num_modes = cfg->num_modes;
}
- /* Use the first mode as default. */
+ /* Use the first mode as default. The default Y virtual resolution is
+ * twice the panel size to allow for double-buffering.
+ */
ch->format = format;
ch->xres = mode->xres;
ch->xres_virtual = mode->xres;
@@ -1863,10 +2640,10 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv,
if (!format->yuv) {
ch->colorspace = V4L2_COLORSPACE_SRGB;
- ch->pitch = ch->xres * format->bpp / 8;
+ ch->pitch = ch->xres_virtual * format->bpp / 8;
} else {
ch->colorspace = V4L2_COLORSPACE_REC709;
- ch->pitch = ch->xres;
+ ch->pitch = ch->xres_virtual;
}
ch->display.width = cfg->panel_cfg.width;
@@ -1952,7 +2729,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
}
init_waitqueue_head(&ch->frame_end_wait);
init_completion(&ch->vsync_completion);
- ch->pan_offset = 0;
/* probe the backlight is there is one defined */
if (ch->cfg->bl_info.max_brightness)
@@ -2003,6 +2779,17 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
goto err1;
}
+ for (i = 0; i < ARRAY_SIZE(pdata->overlays); i++) {
+ struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i];
+
+ ovl->cfg = &pdata->overlays[i];
+ ovl->channel = &priv->ch[0];
+
+ error = sh_mobile_lcdc_overlay_init(priv, ovl);
+ if (error)
+ goto err1;
+ }
+
error = sh_mobile_lcdc_start(priv);
if (error) {
dev_err(&pdev->dev, "unable to start hardware\n");
@@ -2017,6 +2804,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
goto err1;
}
+ for (i = 0; i < ARRAY_SIZE(pdata->overlays); i++) {
+ struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i];
+
+ error = sh_mobile_lcdc_overlay_fb_register(ovl);
+ if (error)
+ goto err1;
+ }
+
/* Failure ignored */
priv->notifier.notifier_call = sh_mobile_lcdc_notify;
fb_register_client(&priv->notifier);
diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/sh_mobile_lcdcfb.h
index 5c3bddd2cb72..0f92f6544b94 100644
--- a/drivers/video/sh_mobile_lcdcfb.h
+++ b/drivers/video/sh_mobile_lcdcfb.h
@@ -47,6 +47,7 @@ struct sh_mobile_lcdc_entity {
/*
* struct sh_mobile_lcdc_chan - LCDC display channel
*
+ * @pan_y_offset: Panning linear offset in bytes (luma component)
* @base_addr_y: Frame buffer viewport base address (luma component)
* @base_addr_c: Frame buffer viewport base address (chroma component)
* @pitch: Frame buffer line pitch
@@ -59,7 +60,7 @@ struct sh_mobile_lcdc_chan {
unsigned long *reg_offs;
unsigned long ldmt1r_value;
unsigned long enabled; /* ME and SE in LDCNT2R */
- void *meram;
+ void *cache;
struct mutex open_lock; /* protects the use counter */
int use_count;
@@ -68,7 +69,7 @@ struct sh_mobile_lcdc_chan {
unsigned long fb_size;
dma_addr_t dma_handle;
- unsigned long pan_offset;
+ unsigned long pan_y_offset;
unsigned long frame_end;
wait_queue_head_t frame_end_wait;
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
index 82ba830bf95d..7a0ba8bb3fbe 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/sh_mobile_meram.c
@@ -11,6 +11,7 @@
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -194,13 +195,28 @@ static inline unsigned long meram_read_reg(void __iomem *base, unsigned int off)
}
/* -----------------------------------------------------------------------------
- * Allocation
+ * MERAM allocation and free
+ */
+
+static unsigned long meram_alloc(struct sh_mobile_meram_priv *priv, size_t size)
+{
+ return gen_pool_alloc(priv->pool, size);
+}
+
+static void meram_free(struct sh_mobile_meram_priv *priv, unsigned long mem,
+ size_t size)
+{
+ gen_pool_free(priv->pool, mem, size);
+}
+
+/* -----------------------------------------------------------------------------
+ * LCDC cache planes allocation, init, cleanup and free
*/
/* Allocate ICBs and MERAM for a plane. */
-static int __meram_alloc(struct sh_mobile_meram_priv *priv,
- struct sh_mobile_meram_fb_plane *plane,
- size_t size)
+static int meram_plane_alloc(struct sh_mobile_meram_priv *priv,
+ struct sh_mobile_meram_fb_plane *plane,
+ size_t size)
{
unsigned long mem;
unsigned long idx;
@@ -215,7 +231,7 @@ static int __meram_alloc(struct sh_mobile_meram_priv *priv,
return -ENOMEM;
plane->marker = &priv->icbs[idx];
- mem = gen_pool_alloc(priv->pool, size * 1024);
+ mem = meram_alloc(priv, size * 1024);
if (mem == 0)
return -ENOMEM;
@@ -229,11 +245,11 @@ static int __meram_alloc(struct sh_mobile_meram_priv *priv,
}
/* Free ICBs and MERAM for a plane. */
-static void __meram_free(struct sh_mobile_meram_priv *priv,
- struct sh_mobile_meram_fb_plane *plane)
+static void meram_plane_free(struct sh_mobile_meram_priv *priv,
+ struct sh_mobile_meram_fb_plane *plane)
{
- gen_pool_free(priv->pool, priv->meram + plane->marker->offset,
- plane->marker->size * 1024);
+ meram_free(priv, priv->meram + plane->marker->offset,
+ plane->marker->size * 1024);
__clear_bit(plane->marker->index, &priv->used_icb);
__clear_bit(plane->cache->index, &priv->used_icb);
@@ -248,62 +264,6 @@ static int is_nvcolor(int cspace)
return 0;
}
-/* Allocate memory for the ICBs and mark them as used. */
-static struct sh_mobile_meram_fb_cache *
-meram_alloc(struct sh_mobile_meram_priv *priv,
- const struct sh_mobile_meram_cfg *cfg,
- int pixelformat)
-{
- struct sh_mobile_meram_fb_cache *cache;
- unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1;
- int ret;
-
- if (cfg->icb[0].meram_size == 0)
- return ERR_PTR(-EINVAL);
-
- if (nplanes == 2 && cfg->icb[1].meram_size == 0)
- return ERR_PTR(-EINVAL);
-
- cache = kzalloc(sizeof(*cache), GFP_KERNEL);
- if (cache == NULL)
- return ERR_PTR(-ENOMEM);
-
- cache->nplanes = nplanes;
-
- ret = __meram_alloc(priv, &cache->planes[0], cfg->icb[0].meram_size);
- if (ret < 0)
- goto error;
-
- cache->planes[0].marker->current_reg = 1;
- cache->planes[0].marker->pixelformat = pixelformat;
-
- if (cache->nplanes == 1)
- return cache;
-
- ret = __meram_alloc(priv, &cache->planes[1], cfg->icb[1].meram_size);
- if (ret < 0) {
- __meram_free(priv, &cache->planes[0]);
- goto error;
- }
-
- return cache;
-
-error:
- kfree(cache);
- return ERR_PTR(-ENOMEM);
-}
-
-/* Unmark the specified ICB as used. */
-static void meram_free(struct sh_mobile_meram_priv *priv,
- struct sh_mobile_meram_fb_cache *cache)
-{
- __meram_free(priv, &cache->planes[0]);
- if (cache->nplanes == 2)
- __meram_free(priv, &cache->planes[1]);
-
- kfree(cache);
-}
-
/* Set the next address to fetch. */
static void meram_set_next_addr(struct sh_mobile_meram_priv *priv,
struct sh_mobile_meram_fb_cache *cache,
@@ -355,10 +315,10 @@ meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata,
(((x) * (y) + (MERAM_LINE_WIDTH - 1)) & ~(MERAM_LINE_WIDTH - 1))
/* Initialize MERAM. */
-static int meram_init(struct sh_mobile_meram_priv *priv,
- struct sh_mobile_meram_fb_plane *plane,
- unsigned int xres, unsigned int yres,
- unsigned int *out_pitch)
+static int meram_plane_init(struct sh_mobile_meram_priv *priv,
+ struct sh_mobile_meram_fb_plane *plane,
+ unsigned int xres, unsigned int yres,
+ unsigned int *out_pitch)
{
struct sh_mobile_meram_icb *marker = plane->marker;
unsigned long total_byte_count = MERAM_CALC_BYTECOUNT(xres, yres);
@@ -427,8 +387,8 @@ static int meram_init(struct sh_mobile_meram_priv *priv,
return 0;
}
-static void meram_deinit(struct sh_mobile_meram_priv *priv,
- struct sh_mobile_meram_fb_plane *plane)
+static void meram_plane_cleanup(struct sh_mobile_meram_priv *priv,
+ struct sh_mobile_meram_fb_plane *plane)
{
/* disable ICB */
meram_write_icb(priv->base, plane->cache->index, MExxCTL,
@@ -441,20 +401,82 @@ static void meram_deinit(struct sh_mobile_meram_priv *priv,
}
/* -----------------------------------------------------------------------------
- * Registration/unregistration
+ * MERAM operations
*/
-static void *sh_mobile_meram_register(struct sh_mobile_meram_info *pdata,
- const struct sh_mobile_meram_cfg *cfg,
- unsigned int xres, unsigned int yres,
- unsigned int pixelformat,
- unsigned int *pitch)
+unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *pdata,
+ size_t size)
+{
+ struct sh_mobile_meram_priv *priv = pdata->priv;
+
+ return meram_alloc(priv, size);
+}
+EXPORT_SYMBOL_GPL(sh_mobile_meram_alloc);
+
+void sh_mobile_meram_free(struct sh_mobile_meram_info *pdata, unsigned long mem,
+ size_t size)
+{
+ struct sh_mobile_meram_priv *priv = pdata->priv;
+
+ meram_free(priv, mem, size);
+}
+EXPORT_SYMBOL_GPL(sh_mobile_meram_free);
+
+/* Allocate memory for the ICBs and mark them as used. */
+static struct sh_mobile_meram_fb_cache *
+meram_cache_alloc(struct sh_mobile_meram_priv *priv,
+ const struct sh_mobile_meram_cfg *cfg,
+ int pixelformat)
+{
+ unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1;
+ struct sh_mobile_meram_fb_cache *cache;
+ int ret;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ if (cache == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ cache->nplanes = nplanes;
+
+ ret = meram_plane_alloc(priv, &cache->planes[0],
+ cfg->icb[0].meram_size);
+ if (ret < 0)
+ goto error;
+
+ cache->planes[0].marker->current_reg = 1;
+ cache->planes[0].marker->pixelformat = pixelformat;
+
+ if (cache->nplanes == 1)
+ return cache;
+
+ ret = meram_plane_alloc(priv, &cache->planes[1],
+ cfg->icb[1].meram_size);
+ if (ret < 0) {
+ meram_plane_free(priv, &cache->planes[0]);
+ goto error;
+ }
+
+ return cache;
+
+error:
+ kfree(cache);
+ return ERR_PTR(-ENOMEM);
+}
+
+void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *pdata,
+ const struct sh_mobile_meram_cfg *cfg,
+ unsigned int xres, unsigned int yres,
+ unsigned int pixelformat, unsigned int *pitch)
{
struct sh_mobile_meram_fb_cache *cache;
struct sh_mobile_meram_priv *priv = pdata->priv;
struct platform_device *pdev = pdata->pdev;
+ unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1;
unsigned int out_pitch;
+ if (priv == NULL)
+ return ERR_PTR(-ENODEV);
+
if (pixelformat != SH_MOBILE_MERAM_PF_NV &&
pixelformat != SH_MOBILE_MERAM_PF_NV24 &&
pixelformat != SH_MOBILE_MERAM_PF_RGB)
@@ -469,10 +491,16 @@ static void *sh_mobile_meram_register(struct sh_mobile_meram_info *pdata,
return ERR_PTR(-EINVAL);
}
+ if (cfg->icb[0].meram_size == 0)
+ return ERR_PTR(-EINVAL);
+
+ if (nplanes == 2 && cfg->icb[1].meram_size == 0)
+ return ERR_PTR(-EINVAL);
+
mutex_lock(&priv->lock);
/* We now register the ICBs and allocate the MERAM regions. */
- cache = meram_alloc(priv, cfg, pixelformat);
+ cache = meram_cache_alloc(priv, cfg, pixelformat);
if (IS_ERR(cache)) {
dev_err(&pdev->dev, "MERAM allocation failed (%ld).",
PTR_ERR(cache));
@@ -480,42 +508,50 @@ static void *sh_mobile_meram_register(struct sh_mobile_meram_info *pdata,
}
/* initialize MERAM */
- meram_init(priv, &cache->planes[0], xres, yres, &out_pitch);
+ meram_plane_init(priv, &cache->planes[0], xres, yres, &out_pitch);
*pitch = out_pitch;
if (pixelformat == SH_MOBILE_MERAM_PF_NV)
- meram_init(priv, &cache->planes[1], xres, (yres + 1) / 2,
- &out_pitch);
+ meram_plane_init(priv, &cache->planes[1],
+ xres, (yres + 1) / 2, &out_pitch);
else if (pixelformat == SH_MOBILE_MERAM_PF_NV24)
- meram_init(priv, &cache->planes[1], 2 * xres, (yres + 1) / 2,
- &out_pitch);
+ meram_plane_init(priv, &cache->planes[1],
+ 2 * xres, (yres + 1) / 2, &out_pitch);
err:
mutex_unlock(&priv->lock);
return cache;
}
+EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_alloc);
-static void
-sh_mobile_meram_unregister(struct sh_mobile_meram_info *pdata, void *data)
+void
+sh_mobile_meram_cache_free(struct sh_mobile_meram_info *pdata, void *data)
{
struct sh_mobile_meram_fb_cache *cache = data;
struct sh_mobile_meram_priv *priv = pdata->priv;
mutex_lock(&priv->lock);
- /* deinit & free */
- meram_deinit(priv, &cache->planes[0]);
- if (cache->nplanes == 2)
- meram_deinit(priv, &cache->planes[1]);
+ /* Cleanup and free. */
+ meram_plane_cleanup(priv, &cache->planes[0]);
+ meram_plane_free(priv, &cache->planes[0]);
+
+ if (cache->nplanes == 2) {
+ meram_plane_cleanup(priv, &cache->planes[1]);
+ meram_plane_free(priv, &cache->planes[1]);
+ }
- meram_free(priv, cache);
+ kfree(cache);
mutex_unlock(&priv->lock);
}
-
-static void
-sh_mobile_meram_update(struct sh_mobile_meram_info *pdata, void *data,
- unsigned long base_addr_y, unsigned long base_addr_c,
- unsigned long *icb_addr_y, unsigned long *icb_addr_c)
+EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_free);
+
+void
+sh_mobile_meram_cache_update(struct sh_mobile_meram_info *pdata, void *data,
+ unsigned long base_addr_y,
+ unsigned long base_addr_c,
+ unsigned long *icb_addr_y,
+ unsigned long *icb_addr_c)
{
struct sh_mobile_meram_fb_cache *cache = data;
struct sh_mobile_meram_priv *priv = pdata->priv;
@@ -527,13 +563,7 @@ sh_mobile_meram_update(struct sh_mobile_meram_info *pdata, void *data,
mutex_unlock(&priv->lock);
}
-
-static struct sh_mobile_meram_ops sh_mobile_meram_ops = {
- .module = THIS_MODULE,
- .meram_register = sh_mobile_meram_register,
- .meram_unregister = sh_mobile_meram_unregister,
- .meram_update = sh_mobile_meram_update,
-};
+EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_update);
/* -----------------------------------------------------------------------------
* Power management
@@ -624,7 +654,6 @@ static int __devinit sh_mobile_meram_probe(struct platform_device *pdev)
for (i = 0; i < MERAM_ICB_NUM; ++i)
priv->icbs[i].index = i;
- pdata->ops = &sh_mobile_meram_ops;
pdata->priv = priv;
pdata->pdev = pdev;
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index 26f864289498..5533a32c6ca1 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -904,7 +904,7 @@ static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf,
result = fb_sys_write(info, buf, count, ppos);
if (result > 0) {
- int start = max((int)(offset / info->fix.line_length) - 1, 0);
+ int start = max((int)(offset / info->fix.line_length), 0);
int lines = min((u32)((result / info->fix.line_length) + 1),
(u32)info->var.yres);
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 90a2e30272ad..2f6b2b835f88 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -1567,6 +1567,18 @@ static void w100_suspend(u32 mode)
val = readl(remapped_regs + mmPLL_CNTL);
val |= 0x00000004; /* bit2=1 */
writel(val, remapped_regs + mmPLL_CNTL);
+
+ writel(0x00000000, remapped_regs + mmLCDD_CNTL1);
+ writel(0x00000000, remapped_regs + mmLCDD_CNTL2);
+ writel(0x00000000, remapped_regs + mmGENLCD_CNTL1);
+ writel(0x00000000, remapped_regs + mmGENLCD_CNTL2);
+ writel(0x00000000, remapped_regs + mmGENLCD_CNTL3);
+
+ val = readl(remapped_regs + mmMEM_EXT_CNTL);
+ val |= 0xF0000000;
+ val &= ~(0x00000001);
+ writel(val, remapped_regs + mmMEM_EXT_CNTL);
+
writel(0x0000001d, remapped_regs + mmPWRMGT_CNTL);
}
}
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index d90062b211f8..92d08e7fcba2 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -91,6 +91,11 @@ static struct w1_family w1_therm_family_DS28EA00 = {
.fops = &w1_therm_fops,
};
+static struct w1_family w1_therm_family_DS1825 = {
+ .fid = W1_THERM_DS1825,
+ .fops = &w1_therm_fops,
+};
+
struct w1_therm_family_converter
{
u8 broken;
@@ -120,6 +125,10 @@ static struct w1_therm_family_converter w1_therm_families[] = {
.f = &w1_therm_family_DS28EA00,
.convert = w1_DS18B20_convert_temp
},
+ {
+ .f = &w1_therm_family_DS1825,
+ .convert = w1_DS18B20_convert_temp
+ }
};
static inline int w1_DS18B20_convert_temp(u8 rom[9])
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index b00ada44a89b..a1f0ce151d53 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -39,6 +39,7 @@
#define W1_EEPROM_DS2431 0x2D
#define W1_FAMILY_DS2760 0x30
#define W1_FAMILY_DS2780 0x32
+#define W1_THERM_DS1825 0x3B
#define W1_FAMILY_DS2781 0x3D
#define W1_THERM_DS28EA00 0x42
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 3fe82d0e8caa..5b06d31ab6a9 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -166,18 +166,17 @@ static long booke_wdt_ioctl(struct file *file,
switch (cmd) {
case WDIOC_GETSUPPORT:
- if (copy_to_user((void *)arg, &ident, sizeof(ident)))
- return -EFAULT;
+ return copy_to_user(p, &ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
return put_user(0, p);
case WDIOC_GETBOOTSTATUS:
/* XXX: something is clearing TSR */
tmp = mfspr(SPRN_TSR) & TSR_WRS(3);
/* returns CARDRESET if last reset was caused by the WDT */
- return (tmp ? WDIOF_CARDRESET : 0);
+ return put_user((tmp ? WDIOF_CARDRESET : 0), p);
case WDIOC_SETOPTIONS:
if (get_user(tmp, p))
- return -EINVAL;
+ return -EFAULT;
if (tmp == WDIOS_ENABLECARD) {
booke_wdt_ping();
break;
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
index 3f75129eb0a9..f7abbaeebcaf 100644
--- a/drivers/watchdog/da9052_wdt.c
+++ b/drivers/watchdog/da9052_wdt.c
@@ -21,7 +21,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
-#include <linux/delay.h>
#include <linux/mfd/da9052/reg.h>
#include <linux/mfd/da9052/da9052.h>
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index a73bea4aa1ba..c20f96b579d9 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -24,6 +24,7 @@
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <mach/bridge-regs.h>
/*
@@ -192,6 +193,12 @@ static void orion_wdt_shutdown(struct platform_device *pdev)
orion_wdt_stop(&orion_wdt);
}
+static const struct of_device_id orion_wdt_of_match_table[] __devinitdata = {
+ { .compatible = "marvell,orion-wdt", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, orion_wdt_of_match_table);
+
static struct platform_driver orion_wdt_driver = {
.probe = orion_wdt_probe,
.remove = __devexit_p(orion_wdt_remove),
@@ -199,6 +206,7 @@ static struct platform_driver orion_wdt_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "orion_wdt",
+ .of_match_table = of_match_ptr(orion_wdt_of_match_table),
},
};
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index 54984deb8561..ccd6b29e21bf 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -54,10 +54,10 @@ static int sa1100dog_open(struct inode *inode, struct file *file)
return -EBUSY;
/* Activate SA1100 Watchdog timer */
- OSMR3 = OSCR + pre_margin;
- OSSR = OSSR_M3;
- OWER = OWER_WME;
- OIER |= OIER_E3;
+ writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
+ writel_relaxed(OSSR_M3, OSSR);
+ writel_relaxed(OWER_WME, OWER);
+ writel_relaxed(readl_relaxed(OIER) | OIER_E3, OIER);
return nonseekable_open(inode, file);
}
@@ -80,7 +80,7 @@ static ssize_t sa1100dog_write(struct file *file, const char __user *data,
{
if (len)
/* Refresh OSMR3 timer. */
- OSMR3 = OSCR + pre_margin;
+ writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
return len;
}
@@ -114,7 +114,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
break;
case WDIOC_KEEPALIVE:
- OSMR3 = OSCR + pre_margin;
+ writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
ret = 0;
break;
@@ -129,7 +129,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd,
}
pre_margin = oscr_freq * time;
- OSMR3 = OSCR + pre_margin;
+ writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3);
/*fall through*/
case WDIOC_GETTIMEOUT:
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c
index 181fa8158a8b..858c9714b2f3 100644
--- a/drivers/zorro/zorro.c
+++ b/drivers/zorro/zorro.c
@@ -37,7 +37,6 @@ struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO];
*/
struct zorro_bus {
- struct list_head devices; /* list of devices on this bus */
struct device dev;
};
@@ -136,7 +135,6 @@ static int __init amiga_zorro_probe(struct platform_device *pdev)
if (!bus)
return -ENOMEM;
- INIT_LIST_HEAD(&bus->devices);
bus->dev.parent = &pdev->dev;
dev_set_name(&bus->dev, "zorro");
error = device_register(&bus->dev);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index fc06fd27065e..dd6f7ee1e312 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -610,6 +610,9 @@ v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
page, (unsigned long)filp->private_data);
+ /* Update file times before taking page lock */
+ file_update_time(filp);
+
v9inode = V9FS_I(inode);
/* make sure the cache has finished storing the page */
v9fs_fscache_wait_on_page_write(inode, page);
diff --git a/fs/affs/bitmap.c b/fs/affs/bitmap.c
index 6e0be43ef6ef..a32246b8359e 100644
--- a/fs/affs/bitmap.c
+++ b/fs/affs/bitmap.c
@@ -10,30 +10,6 @@
#include <linux/slab.h>
#include "affs.h"
-/* This is, of course, shamelessly stolen from fs/minix */
-
-static const int nibblemap[] = { 0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4 };
-
-static u32
-affs_count_free_bits(u32 blocksize, const void *data)
-{
- const u32 *map;
- u32 free;
- u32 tmp;
-
- map = data;
- free = 0;
- for (blocksize /= 4; blocksize > 0; blocksize--) {
- tmp = *map++;
- while (tmp) {
- free += nibblemap[tmp & 0xf];
- tmp >>= 4;
- }
- }
-
- return free;
-}
-
u32
affs_count_free_blocks(struct super_block *sb)
{
@@ -317,7 +293,7 @@ int affs_init_bitmap(struct super_block *sb, int *flags)
goto out;
}
pr_debug("AFFS: read bitmap block %d: %d\n", blk, bm->bm_key);
- bm->bm_free = affs_count_free_bits(sb->s_blocksize - 4, bh->b_data + 4);
+ bm->bm_free = memweight(bh->b_data + 4, sb->s_blocksize - 4);
/* Don't try read the extension if this is the last block,
* but we also need the right bm pointer below
@@ -367,7 +343,7 @@ int affs_init_bitmap(struct super_block *sb, int *flags)
/* recalculate bitmap count for last block */
bm--;
- bm->bm_free = affs_count_free_bits(sb->s_blocksize - 4, bh->b_data + 4);
+ bm->bm_free = memweight(bh->b_data + 4, sb->s_blocksize - 4);
out:
affs_brelse(bh);
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 1feb68ecef95..842d00048a65 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -94,25 +94,21 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev,
{
struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
struct list_head *next;
- struct dentry *p, *q;
+ struct dentry *q;
spin_lock(&sbi->lookup_lock);
+ spin_lock(&root->d_lock);
- if (prev == NULL) {
- spin_lock(&root->d_lock);
+ if (prev)
+ next = prev->d_u.d_child.next;
+ else {
prev = dget_dlock(root);
next = prev->d_subdirs.next;
- p = prev;
- goto start;
}
- p = prev;
- spin_lock(&p->d_lock);
-again:
- next = p->d_u.d_child.next;
-start:
+cont:
if (next == &root->d_subdirs) {
- spin_unlock(&p->d_lock);
+ spin_unlock(&root->d_lock);
spin_unlock(&sbi->lookup_lock);
dput(prev);
return NULL;
@@ -121,16 +117,15 @@ start:
q = list_entry(next, struct dentry, d_u.d_child);
spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
- /* Negative dentry - try next */
- if (!simple_positive(q)) {
- spin_unlock(&p->d_lock);
- lock_set_subclass(&q->d_lock.dep_map, 0, _RET_IP_);
- p = q;
- goto again;
+ /* Already gone or negative dentry (under construction) - try next */
+ if (q->d_count == 0 || !simple_positive(q)) {
+ spin_unlock(&q->d_lock);
+ next = q->d_u.d_child.next;
+ goto cont;
}
dget_dlock(q);
spin_unlock(&q->d_lock);
- spin_unlock(&p->d_lock);
+ spin_unlock(&root->d_lock);
spin_unlock(&sbi->lookup_lock);
dput(prev);
@@ -404,11 +399,6 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
DPRINTK("checking mountpoint %p %.*s",
dentry, (int)dentry->d_name.len, dentry->d_name.name);
- /* Path walk currently on this dentry? */
- ino_count = atomic_read(&ino->count) + 2;
- if (dentry->d_count > ino_count)
- goto next;
-
/* Can we umount this guy */
if (autofs4_mount_busy(mnt, dentry))
goto next;
diff --git a/fs/bio.c b/fs/bio.c
index 73922abba832..71072ab99128 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -73,7 +73,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
{
unsigned int sz = sizeof(struct bio) + extra_size;
struct kmem_cache *slab = NULL;
- struct bio_slab *bslab;
+ struct bio_slab *bslab, *new_bio_slabs;
unsigned int i, entry = -1;
mutex_lock(&bio_slab_lock);
@@ -97,11 +97,12 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
if (bio_slab_nr == bio_slab_max && entry == -1) {
bio_slab_max <<= 1;
- bio_slabs = krealloc(bio_slabs,
- bio_slab_max * sizeof(struct bio_slab),
- GFP_KERNEL);
- if (!bio_slabs)
+ new_bio_slabs = krealloc(bio_slabs,
+ bio_slab_max * sizeof(struct bio_slab),
+ GFP_KERNEL);
+ if (!new_bio_slabs)
goto out_unlock;
+ bio_slabs = new_bio_slabs;
}
if (entry == -1)
entry = bio_slab_nr++;
@@ -1312,7 +1313,7 @@ EXPORT_SYMBOL(bio_copy_kern);
* Note that this code is very hard to test under normal circumstances because
* direct-io pins the pages with get_user_pages(). This makes
* is_page_cache_freeable return false, and the VM will not clean the pages.
- * But other code (eg, pdflush) could clean the pages if they are mapped
+ * But other code (eg, flusher threads) could clean the pages if they are mapped
* pagecache.
*
* Simply disabling the call to bio_set_pages_dirty() is a good way to test the
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1e519195d45b..38e721b35d45 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1578,10 +1578,12 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
+ struct blk_plug plug;
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
+ blk_start_plug(&plug);
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
if (ret > 0 || ret == -EIOCBQUEUED) {
ssize_t err;
@@ -1590,6 +1592,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (err < 0 && ret > 0)
ret = err;
}
+ blk_finish_plug(&plug);
return ret;
}
EXPORT_SYMBOL_GPL(blkdev_aio_write);
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 0c4fa2befae7..d7fcdba141a2 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -8,7 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
- reada.o backref.o ulist.o
+ reada.o backref.o ulist.o qgroup.o send.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 42704149b723..58b7d14b08ee 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -206,10 +206,17 @@ static noinline void run_ordered_completions(struct btrfs_workers *workers,
work->ordered_func(work);
- /* now take the lock again and call the freeing code */
+ /* now take the lock again and drop our item from the list */
spin_lock(&workers->order_lock);
list_del(&work->order_list);
+ spin_unlock(&workers->order_lock);
+
+ /*
+ * we don't want to call the ordered free functions
+ * with the lock held though
+ */
work->ordered_free(work);
+ spin_lock(&workers->order_lock);
}
spin_unlock(&workers->order_lock);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index a383c18e74e8..ff6475f409d6 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -773,9 +773,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
*/
static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 delayed_ref_seq, u64 time_seq,
- struct ulist *refs, struct ulist *roots,
- const u64 *extent_item_pos)
+ u64 time_seq, struct ulist *refs,
+ struct ulist *roots, const u64 *extent_item_pos)
{
struct btrfs_key key;
struct btrfs_path *path;
@@ -837,7 +836,7 @@ again:
btrfs_put_delayed_ref(&head->node);
goto again;
}
- ret = __add_delayed_refs(head, delayed_ref_seq,
+ ret = __add_delayed_refs(head, time_seq,
&prefs_delayed);
mutex_unlock(&head->mutex);
if (ret) {
@@ -981,8 +980,7 @@ static void free_leaf_list(struct ulist *blocks)
*/
static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 delayed_ref_seq, u64 time_seq,
- struct ulist **leafs,
+ u64 time_seq, struct ulist **leafs,
const u64 *extent_item_pos)
{
struct ulist *tmp;
@@ -997,7 +995,7 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
return -ENOMEM;
}
- ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
+ ret = find_parent_nodes(trans, fs_info, bytenr,
time_seq, *leafs, tmp, extent_item_pos);
ulist_free(tmp);
@@ -1024,8 +1022,7 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
*/
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 delayed_ref_seq, u64 time_seq,
- struct ulist **roots)
+ u64 time_seq, struct ulist **roots)
{
struct ulist *tmp;
struct ulist_node *node = NULL;
@@ -1043,7 +1040,7 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
ULIST_ITER_INIT(&uiter);
while (1) {
- ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
+ ret = find_parent_nodes(trans, fs_info, bytenr,
time_seq, tmp, *roots, NULL);
if (ret < 0 && ret != -ENOENT) {
ulist_free(tmp);
@@ -1125,10 +1122,10 @@ static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
* required for the path to fit into the buffer. in that case, the returned
* value will be smaller than dest. callers must check this!
*/
-static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
- struct btrfs_inode_ref *iref,
- struct extent_buffer *eb_in, u64 parent,
- char *dest, u32 size)
+char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ struct btrfs_inode_ref *iref,
+ struct extent_buffer *eb_in, u64 parent,
+ char *dest, u32 size)
{
u32 len;
int slot;
@@ -1376,11 +1373,9 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
struct ulist *roots = NULL;
struct ulist_node *ref_node = NULL;
struct ulist_node *root_node = NULL;
- struct seq_list seq_elem = {};
struct seq_list tree_mod_seq_elem = {};
struct ulist_iterator ref_uiter;
struct ulist_iterator root_uiter;
- struct btrfs_delayed_ref_root *delayed_refs = NULL;
pr_debug("resolving all inodes for extent %llu\n",
extent_item_objectid);
@@ -1391,16 +1386,11 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
trans = btrfs_join_transaction(fs_info->extent_root);
if (IS_ERR(trans))
return PTR_ERR(trans);
-
- delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
- btrfs_get_delayed_seq(delayed_refs, &seq_elem);
- spin_unlock(&delayed_refs->lock);
btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
}
ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
- seq_elem.seq, tree_mod_seq_elem.seq, &refs,
+ tree_mod_seq_elem.seq, &refs,
&extent_item_pos);
if (ret)
goto out;
@@ -1408,8 +1398,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
ULIST_ITER_INIT(&ref_uiter);
while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
- seq_elem.seq,
- tree_mod_seq_elem.seq, &roots);
+ tree_mod_seq_elem.seq, &roots);
if (ret)
break;
ULIST_ITER_INIT(&root_uiter);
@@ -1431,7 +1420,6 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
out:
if (!search_commit_root) {
btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
- btrfs_put_delayed_seq(delayed_refs, &seq_elem);
btrfs_end_transaction(trans, fs_info->extent_root);
}
@@ -1450,10 +1438,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
ret = extent_from_logical(fs_info, logical, path,
&found_key);
btrfs_release_path(path);
- if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
- ret = -EINVAL;
if (ret < 0)
return ret;
+ if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+ return -EINVAL;
extent_item_pos = logical - found_key.objectid;
ret = iterate_extent_inodes(fs_info, found_key.objectid,
@@ -1543,7 +1531,7 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
ipath->fspath->bytes_left - s_ptr : 0;
fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
- fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
+ fspath = btrfs_iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
inum, fspath_min, bytes_left);
if (IS_ERR(fspath))
return PTR_ERR(fspath);
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index c18d8ac7b795..032f4dc7eab8 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -21,6 +21,7 @@
#include "ioctl.h"
#include "ulist.h"
+#include "extent_io.h"
#define BTRFS_BACKREF_SEARCH_COMMIT_ROOT ((struct btrfs_trans_handle *)0)
@@ -58,8 +59,10 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr,
- u64 delayed_ref_seq, u64 time_seq,
- struct ulist **roots);
+ u64 time_seq, struct ulist **roots);
+char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ struct btrfs_inode_ref *iref, struct extent_buffer *eb,
+ u64 parent, char *dest, u32 size);
struct btrfs_data_container *init_data_container(u32 total_bytes);
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 12394a90d60f..5b2ad6bc4fe7 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -87,9 +87,6 @@ struct btrfs_inode {
/* node for the red-black tree that links inodes in subvolume root */
struct rb_node rb_node;
- /* the space_info for where this inode's data allocations are done */
- struct btrfs_space_info *space_info;
-
unsigned long runtime_flags;
/* full 64 bit generation number, struct vfs_inode doesn't have a big
@@ -191,11 +188,14 @@ static inline void btrfs_i_size_write(struct inode *inode, u64 size)
BTRFS_I(inode)->disk_i_size = size;
}
-static inline bool btrfs_is_free_space_inode(struct btrfs_root *root,
- struct inode *inode)
+static inline bool btrfs_is_free_space_inode(struct inode *inode)
{
- if (root == root->fs_info->tree_root ||
- BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+
+ if (root == root->fs_info->tree_root &&
+ btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID)
+ return true;
+ if (BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
return true;
return false;
}
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index da6e9364a5e3..9197e2e33407 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1032,6 +1032,7 @@ continue_with_current_leaf_stack_frame:
struct btrfs_disk_key *disk_key;
u8 type;
u32 item_offset;
+ u32 item_size;
if (disk_item_offset + sizeof(struct btrfs_item) >
sf->block_ctx->len) {
@@ -1047,6 +1048,7 @@ leaf_item_out_of_bounce_error:
disk_item_offset,
sizeof(struct btrfs_item));
item_offset = le32_to_cpu(disk_item.offset);
+ item_size = le32_to_cpu(disk_item.size);
disk_key = &disk_item.key;
type = disk_key->type;
@@ -1057,14 +1059,13 @@ leaf_item_out_of_bounce_error:
root_item_offset = item_offset +
offsetof(struct btrfs_leaf, items);
- if (root_item_offset +
- sizeof(struct btrfs_root_item) >
+ if (root_item_offset + item_size >
sf->block_ctx->len)
goto leaf_item_out_of_bounce_error;
btrfsic_read_from_block_data(
sf->block_ctx, &root_item,
root_item_offset,
- sizeof(struct btrfs_root_item));
+ item_size);
next_bytenr = le64_to_cpu(root_item.bytenr);
sf->error =
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 86eff48dab78..43d1c5a3a030 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -818,6 +818,7 @@ static void free_workspace(int type, struct list_head *workspace)
btrfs_compress_op[idx]->free_workspace(workspace);
atomic_dec(alloc_workspace);
wake:
+ smp_mb();
if (waitqueue_active(workspace_wait))
wake_up(workspace_wait);
}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 8206b3900587..6d183f60d63a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -321,7 +321,7 @@ struct tree_mod_root {
struct tree_mod_elem {
struct rb_node node;
u64 index; /* shifted logical */
- struct seq_list elem;
+ u64 seq;
enum mod_log_op op;
/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
@@ -341,20 +341,50 @@ struct tree_mod_elem {
struct tree_mod_root old_root;
};
-static inline void
-__get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
+static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
{
- elem->seq = atomic_inc_return(&fs_info->tree_mod_seq);
- list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
+ read_lock(&fs_info->tree_mod_log_lock);
}
-void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
- struct seq_list *elem)
+static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
+{
+ read_unlock(&fs_info->tree_mod_log_lock);
+}
+
+static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
+{
+ write_lock(&fs_info->tree_mod_log_lock);
+}
+
+static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
+{
+ write_unlock(&fs_info->tree_mod_log_lock);
+}
+
+/*
+ * This adds a new blocker to the tree mod log's blocker list if the @elem
+ * passed does not already have a sequence number set. So when a caller expects
+ * to record tree modifications, it should ensure to set elem->seq to zero
+ * before calling btrfs_get_tree_mod_seq.
+ * Returns a fresh, unused tree log modification sequence number, even if no new
+ * blocker was added.
+ */
+u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
+ struct seq_list *elem)
{
- elem->flags = 1;
+ u64 seq;
+
+ tree_mod_log_write_lock(fs_info);
spin_lock(&fs_info->tree_mod_seq_lock);
- __get_tree_mod_seq(fs_info, elem);
+ if (!elem->seq) {
+ elem->seq = btrfs_inc_tree_mod_seq(fs_info);
+ list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
+ }
+ seq = btrfs_inc_tree_mod_seq(fs_info);
spin_unlock(&fs_info->tree_mod_seq_lock);
+ tree_mod_log_write_unlock(fs_info);
+
+ return seq;
}
void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
@@ -371,41 +401,40 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
if (!seq_putting)
return;
- BUG_ON(!(elem->flags & 1));
spin_lock(&fs_info->tree_mod_seq_lock);
list_del(&elem->list);
+ elem->seq = 0;
list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
- if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) {
+ if (cur_elem->seq < min_seq) {
if (seq_putting > cur_elem->seq) {
/*
* blocker with lower sequence number exists, we
* cannot remove anything from the log
*/
- goto out;
+ spin_unlock(&fs_info->tree_mod_seq_lock);
+ return;
}
min_seq = cur_elem->seq;
}
}
+ spin_unlock(&fs_info->tree_mod_seq_lock);
/*
* anything that's lower than the lowest existing (read: blocked)
* sequence number can be removed from the tree.
*/
- write_lock(&fs_info->tree_mod_log_lock);
+ tree_mod_log_write_lock(fs_info);
tm_root = &fs_info->tree_mod_log;
for (node = rb_first(tm_root); node; node = next) {
next = rb_next(node);
tm = container_of(node, struct tree_mod_elem, node);
- if (tm->elem.seq > min_seq)
+ if (tm->seq > min_seq)
continue;
rb_erase(node, tm_root);
- list_del(&tm->elem.list);
kfree(tm);
}
- write_unlock(&fs_info->tree_mod_log_lock);
-out:
- spin_unlock(&fs_info->tree_mod_seq_lock);
+ tree_mod_log_write_unlock(fs_info);
}
/*
@@ -423,11 +452,9 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
struct rb_node **new;
struct rb_node *parent = NULL;
struct tree_mod_elem *cur;
- int ret = 0;
- BUG_ON(!tm || !tm->elem.seq);
+ BUG_ON(!tm || !tm->seq);
- write_lock(&fs_info->tree_mod_log_lock);
tm_root = &fs_info->tree_mod_log;
new = &tm_root->rb_node;
while (*new) {
@@ -437,88 +464,81 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
new = &((*new)->rb_left);
else if (cur->index > tm->index)
new = &((*new)->rb_right);
- else if (cur->elem.seq < tm->elem.seq)
+ else if (cur->seq < tm->seq)
new = &((*new)->rb_left);
- else if (cur->elem.seq > tm->elem.seq)
+ else if (cur->seq > tm->seq)
new = &((*new)->rb_right);
else {
kfree(tm);
- ret = -EEXIST;
- goto unlock;
+ return -EEXIST;
}
}
rb_link_node(&tm->node, parent, new);
rb_insert_color(&tm->node, tm_root);
-unlock:
- write_unlock(&fs_info->tree_mod_log_lock);
- return ret;
+ return 0;
}
+/*
+ * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
+ * returns zero with the tree_mod_log_lock acquired. The caller must hold
+ * this until all tree mod log insertions are recorded in the rb tree and then
+ * call tree_mod_log_write_unlock() to release.
+ */
static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb) {
smp_mb();
if (list_empty(&(fs_info)->tree_mod_seq_list))
return 1;
- if (!eb)
- return 0;
- if (btrfs_header_level(eb) == 0)
+ if (eb && btrfs_header_level(eb) == 0)
return 1;
+
+ tree_mod_log_write_lock(fs_info);
+ if (list_empty(&fs_info->tree_mod_seq_list)) {
+ /*
+ * someone emptied the list while we were waiting for the lock.
+ * we must not add to the list when no blocker exists.
+ */
+ tree_mod_log_write_unlock(fs_info);
+ return 1;
+ }
+
return 0;
}
/*
- * This allocates memory and gets a tree modification sequence number when
- * needed.
+ * This allocates memory and gets a tree modification sequence number.
*
- * Returns 0 when no sequence number is needed, < 0 on error.
- * Returns 1 when a sequence number was added. In this case,
- * fs_info->tree_mod_seq_lock was acquired and must be released by the caller
- * after inserting into the rb tree.
+ * Returns <0 on error.
+ * Returns >0 (the added sequence number) on success.
*/
static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
struct tree_mod_elem **tm_ret)
{
struct tree_mod_elem *tm;
- int seq;
- if (tree_mod_dont_log(fs_info, NULL))
- return 0;
-
- tm = *tm_ret = kzalloc(sizeof(*tm), flags);
+ /*
+ * once we switch from spin locks to something different, we should
+ * honor the flags parameter here.
+ */
+ tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC);
if (!tm)
return -ENOMEM;
- tm->elem.flags = 0;
- spin_lock(&fs_info->tree_mod_seq_lock);
- if (list_empty(&fs_info->tree_mod_seq_list)) {
- /*
- * someone emptied the list while we were waiting for the lock.
- * we must not add to the list, because no blocker exists. items
- * are removed from the list only when the existing blocker is
- * removed from the list.
- */
- kfree(tm);
- seq = 0;
- spin_unlock(&fs_info->tree_mod_seq_lock);
- } else {
- __get_tree_mod_seq(fs_info, &tm->elem);
- seq = tm->elem.seq;
- }
-
- return seq;
+ tm->seq = btrfs_inc_tree_mod_seq(fs_info);
+ return tm->seq;
}
-static noinline int
-tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb, int slot,
- enum mod_log_op op, gfp_t flags)
+static inline int
+__tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb, int slot,
+ enum mod_log_op op, gfp_t flags)
{
- struct tree_mod_elem *tm;
int ret;
+ struct tree_mod_elem *tm;
ret = tree_mod_alloc(fs_info, flags, &tm);
- if (ret <= 0)
+ if (ret < 0)
return ret;
tm->index = eb->start >> PAGE_CACHE_SHIFT;
@@ -530,8 +550,22 @@ tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
tm->slot = slot;
tm->generation = btrfs_node_ptr_generation(eb, slot);
- ret = __tree_mod_log_insert(fs_info, tm);
- spin_unlock(&fs_info->tree_mod_seq_lock);
+ return __tree_mod_log_insert(fs_info, tm);
+}
+
+static noinline int
+tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb, int slot,
+ enum mod_log_op op, gfp_t flags)
+{
+ int ret;
+
+ if (tree_mod_dont_log(fs_info, eb))
+ return 0;
+
+ ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
+
+ tree_mod_log_write_unlock(fs_info);
return ret;
}
@@ -543,6 +577,14 @@ tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
}
static noinline int
+tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *eb, int slot,
+ enum mod_log_op op)
+{
+ return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS);
+}
+
+static noinline int
tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int dst_slot, int src_slot,
int nr_items, gfp_t flags)
@@ -555,14 +597,14 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
return 0;
for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
- ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
+ ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
MOD_LOG_KEY_REMOVE_WHILE_MOVING);
BUG_ON(ret < 0);
}
ret = tree_mod_alloc(fs_info, flags, &tm);
- if (ret <= 0)
- return ret;
+ if (ret < 0)
+ goto out;
tm->index = eb->start >> PAGE_CACHE_SHIFT;
tm->slot = src_slot;
@@ -571,10 +613,29 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
tm->op = MOD_LOG_MOVE_KEYS;
ret = __tree_mod_log_insert(fs_info, tm);
- spin_unlock(&fs_info->tree_mod_seq_lock);
+out:
+ tree_mod_log_write_unlock(fs_info);
return ret;
}
+static inline void
+__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
+{
+ int i;
+ u32 nritems;
+ int ret;
+
+ if (btrfs_header_level(eb) == 0)
+ return;
+
+ nritems = btrfs_header_nritems(eb);
+ for (i = nritems - 1; i >= 0; i--) {
+ ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
+ MOD_LOG_KEY_REMOVE_WHILE_FREEING);
+ BUG_ON(ret < 0);
+ }
+}
+
static noinline int
tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
struct extent_buffer *old_root,
@@ -583,9 +644,14 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
struct tree_mod_elem *tm;
int ret;
+ if (tree_mod_dont_log(fs_info, NULL))
+ return 0;
+
+ __tree_mod_log_free_eb(fs_info, old_root);
+
ret = tree_mod_alloc(fs_info, flags, &tm);
- if (ret <= 0)
- return ret;
+ if (ret < 0)
+ goto out;
tm->index = new_root->start >> PAGE_CACHE_SHIFT;
tm->old_root.logical = old_root->start;
@@ -594,7 +660,8 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
tm->op = MOD_LOG_ROOT_REPLACE;
ret = __tree_mod_log_insert(fs_info, tm);
- spin_unlock(&fs_info->tree_mod_seq_lock);
+out:
+ tree_mod_log_write_unlock(fs_info);
return ret;
}
@@ -608,7 +675,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
struct tree_mod_elem *found = NULL;
u64 index = start >> PAGE_CACHE_SHIFT;
- read_lock(&fs_info->tree_mod_log_lock);
+ tree_mod_log_read_lock(fs_info);
tm_root = &fs_info->tree_mod_log;
node = tm_root->rb_node;
while (node) {
@@ -617,18 +684,18 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
node = node->rb_left;
} else if (cur->index > index) {
node = node->rb_right;
- } else if (cur->elem.seq < min_seq) {
+ } else if (cur->seq < min_seq) {
node = node->rb_left;
} else if (!smallest) {
/* we want the node with the highest seq */
if (found)
- BUG_ON(found->elem.seq > cur->elem.seq);
+ BUG_ON(found->seq > cur->seq);
found = cur;
node = node->rb_left;
- } else if (cur->elem.seq > min_seq) {
+ } else if (cur->seq > min_seq) {
/* we want the node with the smallest seq */
if (found)
- BUG_ON(found->elem.seq < cur->elem.seq);
+ BUG_ON(found->seq < cur->seq);
found = cur;
node = node->rb_right;
} else {
@@ -636,7 +703,7 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
break;
}
}
- read_unlock(&fs_info->tree_mod_log_lock);
+ tree_mod_log_read_unlock(fs_info);
return found;
}
@@ -664,7 +731,7 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
return __tree_mod_log_search(fs_info, start, min_seq, 0);
}
-static inline void
+static noinline void
tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
struct extent_buffer *src, unsigned long dst_offset,
unsigned long src_offset, int nr_items)
@@ -675,18 +742,23 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
if (tree_mod_dont_log(fs_info, NULL))
return;
- if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
+ if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) {
+ tree_mod_log_write_unlock(fs_info);
return;
+ }
- /* speed this up by single seq for all operations? */
for (i = 0; i < nr_items; i++) {
- ret = tree_mod_log_insert_key(fs_info, src, i + src_offset,
- MOD_LOG_KEY_REMOVE);
+ ret = tree_mod_log_insert_key_locked(fs_info, src,
+ i + src_offset,
+ MOD_LOG_KEY_REMOVE);
BUG_ON(ret < 0);
- ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset,
- MOD_LOG_KEY_ADD);
+ ret = tree_mod_log_insert_key_locked(fs_info, dst,
+ i + dst_offset,
+ MOD_LOG_KEY_ADD);
BUG_ON(ret < 0);
}
+
+ tree_mod_log_write_unlock(fs_info);
}
static inline void
@@ -699,7 +771,7 @@ tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
BUG_ON(ret < 0);
}
-static inline void
+static noinline void
tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int slot, int atomic)
@@ -712,30 +784,22 @@ tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
BUG_ON(ret < 0);
}
-static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb)
+static noinline void
+tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
{
- int i;
- int ret;
- u32 nritems;
-
if (tree_mod_dont_log(fs_info, eb))
return;
- nritems = btrfs_header_nritems(eb);
- for (i = nritems - 1; i >= 0; i--) {
- ret = tree_mod_log_insert_key(fs_info, eb, i,
- MOD_LOG_KEY_REMOVE_WHILE_FREEING);
- BUG_ON(ret < 0);
- }
+ __tree_mod_log_free_eb(fs_info, eb);
+
+ tree_mod_log_write_unlock(fs_info);
}
-static inline void
+static noinline void
tree_mod_log_set_root_pointer(struct btrfs_root *root,
struct extent_buffer *new_root_node)
{
int ret;
- tree_mod_log_free_eb(root->fs_info, root->node);
ret = tree_mod_log_insert_root(root->fs_info, root->node,
new_root_node, GFP_NOFS);
BUG_ON(ret < 0);
@@ -1069,7 +1133,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
unsigned long p_size = sizeof(struct btrfs_key_ptr);
n = btrfs_header_nritems(eb);
- while (tm && tm->elem.seq >= time_seq) {
+ while (tm && tm->seq >= time_seq) {
/*
* all the operations are recorded with the operator used for
* the modification. as we're going backwards, we do the
@@ -2722,6 +2786,80 @@ done:
}
/*
+ * helper to use instead of search slot if no exact match is needed but
+ * instead the next or previous item should be returned.
+ * When find_higher is true, the next higher item is returned, the next lower
+ * otherwise.
+ * When return_any and find_higher are both true, and no higher item is found,
+ * return the next lower instead.
+ * When return_any is true and find_higher is false, and no lower item is found,
+ * return the next higher instead.
+ * It returns 0 if any item is found, 1 if none is found (tree empty), and
+ * < 0 on error
+ */
+int btrfs_search_slot_for_read(struct btrfs_root *root,
+ struct btrfs_key *key, struct btrfs_path *p,
+ int find_higher, int return_any)
+{
+ int ret;
+ struct extent_buffer *leaf;
+
+again:
+ ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
+ if (ret <= 0)
+ return ret;
+ /*
+ * a return value of 1 means the path is at the position where the
+ * item should be inserted. Normally this is the next bigger item,
+ * but in case the previous item is the last in a leaf, path points
+ * to the first free slot in the previous leaf, i.e. at an invalid
+ * item.
+ */
+ leaf = p->nodes[0];
+
+ if (find_higher) {
+ if (p->slots[0] >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, p);
+ if (ret <= 0)
+ return ret;
+ if (!return_any)
+ return 1;
+ /*
+ * no higher item found, return the next
+ * lower instead
+ */
+ return_any = 0;
+ find_higher = 0;
+ btrfs_release_path(p);
+ goto again;
+ }
+ } else {
+ if (p->slots[0] == 0) {
+ ret = btrfs_prev_leaf(root, p);
+ if (ret < 0)
+ return ret;
+ if (!ret) {
+ p->slots[0] = btrfs_header_nritems(leaf) - 1;
+ return 0;
+ }
+ if (!return_any)
+ return 1;
+ /*
+ * no lower item found, return the next
+ * higher instead
+ */
+ return_any = 0;
+ find_higher = 1;
+ btrfs_release_path(p);
+ goto again;
+ } else {
+ --p->slots[0];
+ }
+ }
+ return 0;
+}
+
+/*
* adjust the pointers going up the tree, starting at level
* making sure the right key of each node is points to 'key'.
* This is used after shifting pointers to the left, so it stops
@@ -4931,6 +5069,431 @@ out:
return ret;
}
+static void tree_move_down(struct btrfs_root *root,
+ struct btrfs_path *path,
+ int *level, int root_level)
+{
+ path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
+ path->slots[*level]);
+ path->slots[*level - 1] = 0;
+ (*level)--;
+}
+
+static int tree_move_next_or_upnext(struct btrfs_root *root,
+ struct btrfs_path *path,
+ int *level, int root_level)
+{
+ int ret = 0;
+ int nritems;
+ nritems = btrfs_header_nritems(path->nodes[*level]);
+
+ path->slots[*level]++;
+
+ while (path->slots[*level] == nritems) {
+ if (*level == root_level)
+ return -1;
+
+ /* move upnext */
+ path->slots[*level] = 0;
+ free_extent_buffer(path->nodes[*level]);
+ path->nodes[*level] = NULL;
+ (*level)++;
+ path->slots[*level]++;
+
+ nritems = btrfs_header_nritems(path->nodes[*level]);
+ ret = 1;
+ }
+ return ret;
+}
+
+/*
+ * Returns 1 if it had to move up and next. 0 is returned if it moved only next
+ * or down.
+ */
+static int tree_advance(struct btrfs_root *root,
+ struct btrfs_path *path,
+ int *level, int root_level,
+ int allow_down,
+ struct btrfs_key *key)
+{
+ int ret;
+
+ if (*level == 0 || !allow_down) {
+ ret = tree_move_next_or_upnext(root, path, level, root_level);
+ } else {
+ tree_move_down(root, path, level, root_level);
+ ret = 0;
+ }
+ if (ret >= 0) {
+ if (*level == 0)
+ btrfs_item_key_to_cpu(path->nodes[*level], key,
+ path->slots[*level]);
+ else
+ btrfs_node_key_to_cpu(path->nodes[*level], key,
+ path->slots[*level]);
+ }
+ return ret;
+}
+
+static int tree_compare_item(struct btrfs_root *left_root,
+ struct btrfs_path *left_path,
+ struct btrfs_path *right_path,
+ char *tmp_buf)
+{
+ int cmp;
+ int len1, len2;
+ unsigned long off1, off2;
+
+ len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
+ len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
+ if (len1 != len2)
+ return 1;
+
+ off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
+ off2 = btrfs_item_ptr_offset(right_path->nodes[0],
+ right_path->slots[0]);
+
+ read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
+
+ cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
+ if (cmp)
+ return 1;
+ return 0;
+}
+
+#define ADVANCE 1
+#define ADVANCE_ONLY_NEXT -1
+
+/*
+ * This function compares two trees and calls the provided callback for
+ * every changed/new/deleted item it finds.
+ * If shared tree blocks are encountered, whole subtrees are skipped, making
+ * the compare pretty fast on snapshotted subvolumes.
+ *
+ * This currently works on commit roots only. As commit roots are read only,
+ * we don't do any locking. The commit roots are protected with transactions.
+ * Transactions are ended and rejoined when a commit is tried in between.
+ *
+ * This function checks for modifications done to the trees while comparing.
+ * If it detects a change, it aborts immediately.
+ */
+int btrfs_compare_trees(struct btrfs_root *left_root,
+ struct btrfs_root *right_root,
+ btrfs_changed_cb_t changed_cb, void *ctx)
+{
+ int ret;
+ int cmp;
+ struct btrfs_trans_handle *trans = NULL;
+ struct btrfs_path *left_path = NULL;
+ struct btrfs_path *right_path = NULL;
+ struct btrfs_key left_key;
+ struct btrfs_key right_key;
+ char *tmp_buf = NULL;
+ int left_root_level;
+ int right_root_level;
+ int left_level;
+ int right_level;
+ int left_end_reached;
+ int right_end_reached;
+ int advance_left;
+ int advance_right;
+ u64 left_blockptr;
+ u64 right_blockptr;
+ u64 left_start_ctransid;
+ u64 right_start_ctransid;
+ u64 ctransid;
+
+ left_path = btrfs_alloc_path();
+ if (!left_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ right_path = btrfs_alloc_path();
+ if (!right_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ left_path->search_commit_root = 1;
+ left_path->skip_locking = 1;
+ right_path->search_commit_root = 1;
+ right_path->skip_locking = 1;
+
+ spin_lock(&left_root->root_times_lock);
+ left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
+ spin_unlock(&left_root->root_times_lock);
+
+ spin_lock(&right_root->root_times_lock);
+ right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
+ spin_unlock(&right_root->root_times_lock);
+
+ trans = btrfs_join_transaction(left_root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
+ }
+
+ /*
+ * Strategy: Go to the first items of both trees. Then do
+ *
+ * If both trees are at level 0
+ * Compare keys of current items
+ * If left < right treat left item as new, advance left tree
+ * and repeat
+ * If left > right treat right item as deleted, advance right tree
+ * and repeat
+ * If left == right do deep compare of items, treat as changed if
+ * needed, advance both trees and repeat
+ * If both trees are at the same level but not at level 0
+ * Compare keys of current nodes/leafs
+ * If left < right advance left tree and repeat
+ * If left > right advance right tree and repeat
+ * If left == right compare blockptrs of the next nodes/leafs
+ * If they match advance both trees but stay at the same level
+ * and repeat
+ * If they don't match advance both trees while allowing to go
+ * deeper and repeat
+ * If tree levels are different
+ * Advance the tree that needs it and repeat
+ *
+ * Advancing a tree means:
+ * If we are at level 0, try to go to the next slot. If that's not
+ * possible, go one level up and repeat. Stop when we found a level
+ * where we could go to the next slot. We may at this point be on a
+ * node or a leaf.
+ *
+ * If we are not at level 0 and not on shared tree blocks, go one
+ * level deeper.
+ *
+ * If we are not at level 0 and on shared tree blocks, go one slot to
+ * the right if possible or go up and right.
+ */
+
+ left_level = btrfs_header_level(left_root->commit_root);
+ left_root_level = left_level;
+ left_path->nodes[left_level] = left_root->commit_root;
+ extent_buffer_get(left_path->nodes[left_level]);
+
+ right_level = btrfs_header_level(right_root->commit_root);
+ right_root_level = right_level;
+ right_path->nodes[right_level] = right_root->commit_root;
+ extent_buffer_get(right_path->nodes[right_level]);
+
+ if (left_level == 0)
+ btrfs_item_key_to_cpu(left_path->nodes[left_level],
+ &left_key, left_path->slots[left_level]);
+ else
+ btrfs_node_key_to_cpu(left_path->nodes[left_level],
+ &left_key, left_path->slots[left_level]);
+ if (right_level == 0)
+ btrfs_item_key_to_cpu(right_path->nodes[right_level],
+ &right_key, right_path->slots[right_level]);
+ else
+ btrfs_node_key_to_cpu(right_path->nodes[right_level],
+ &right_key, right_path->slots[right_level]);
+
+ left_end_reached = right_end_reached = 0;
+ advance_left = advance_right = 0;
+
+ while (1) {
+ /*
+ * We need to make sure the transaction does not get committed
+ * while we do anything on commit roots. This means, we need to
+ * join and leave transactions for every item that we process.
+ */
+ if (trans && btrfs_should_end_transaction(trans, left_root)) {
+ btrfs_release_path(left_path);
+ btrfs_release_path(right_path);
+
+ ret = btrfs_end_transaction(trans, left_root);
+ trans = NULL;
+ if (ret < 0)
+ goto out;
+ }
+ /* now rejoin the transaction */
+ if (!trans) {
+ trans = btrfs_join_transaction(left_root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
+ }
+
+ spin_lock(&left_root->root_times_lock);
+ ctransid = btrfs_root_ctransid(&left_root->root_item);
+ spin_unlock(&left_root->root_times_lock);
+ if (ctransid != left_start_ctransid)
+ left_start_ctransid = 0;
+
+ spin_lock(&right_root->root_times_lock);
+ ctransid = btrfs_root_ctransid(&right_root->root_item);
+ spin_unlock(&right_root->root_times_lock);
+ if (ctransid != right_start_ctransid)
+ right_start_ctransid = 0;
+
+ if (!left_start_ctransid || !right_start_ctransid) {
+ WARN(1, KERN_WARNING
+ "btrfs: btrfs_compare_tree detected "
+ "a change in one of the trees while "
+ "iterating. This is probably a "
+ "bug.\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * the commit root may have changed, so start again
+ * where we stopped
+ */
+ left_path->lowest_level = left_level;
+ right_path->lowest_level = right_level;
+ ret = btrfs_search_slot(NULL, left_root,
+ &left_key, left_path, 0, 0);
+ if (ret < 0)
+ goto out;
+ ret = btrfs_search_slot(NULL, right_root,
+ &right_key, right_path, 0, 0);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (advance_left && !left_end_reached) {
+ ret = tree_advance(left_root, left_path, &left_level,
+ left_root_level,
+ advance_left != ADVANCE_ONLY_NEXT,
+ &left_key);
+ if (ret < 0)
+ left_end_reached = ADVANCE;
+ advance_left = 0;
+ }
+ if (advance_right && !right_end_reached) {
+ ret = tree_advance(right_root, right_path, &right_level,
+ right_root_level,
+ advance_right != ADVANCE_ONLY_NEXT,
+ &right_key);
+ if (ret < 0)
+ right_end_reached = ADVANCE;
+ advance_right = 0;
+ }
+
+ if (left_end_reached && right_end_reached) {
+ ret = 0;
+ goto out;
+ } else if (left_end_reached) {
+ if (right_level == 0) {
+ ret = changed_cb(left_root, right_root,
+ left_path, right_path,
+ &right_key,
+ BTRFS_COMPARE_TREE_DELETED,
+ ctx);
+ if (ret < 0)
+ goto out;
+ }
+ advance_right = ADVANCE;
+ continue;
+ } else if (right_end_reached) {
+ if (left_level == 0) {
+ ret = changed_cb(left_root, right_root,
+ left_path, right_path,
+ &left_key,
+ BTRFS_COMPARE_TREE_NEW,
+ ctx);
+ if (ret < 0)
+ goto out;
+ }
+ advance_left = ADVANCE;
+ continue;
+ }
+
+ if (left_level == 0 && right_level == 0) {
+ cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
+ if (cmp < 0) {
+ ret = changed_cb(left_root, right_root,
+ left_path, right_path,
+ &left_key,
+ BTRFS_COMPARE_TREE_NEW,
+ ctx);
+ if (ret < 0)
+ goto out;
+ advance_left = ADVANCE;
+ } else if (cmp > 0) {
+ ret = changed_cb(left_root, right_root,
+ left_path, right_path,
+ &right_key,
+ BTRFS_COMPARE_TREE_DELETED,
+ ctx);
+ if (ret < 0)
+ goto out;
+ advance_right = ADVANCE;
+ } else {
+ ret = tree_compare_item(left_root, left_path,
+ right_path, tmp_buf);
+ if (ret) {
+ ret = changed_cb(left_root, right_root,
+ left_path, right_path,
+ &left_key,
+ BTRFS_COMPARE_TREE_CHANGED,
+ ctx);
+ if (ret < 0)
+ goto out;
+ }
+ advance_left = ADVANCE;
+ advance_right = ADVANCE;
+ }
+ } else if (left_level == right_level) {
+ cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
+ if (cmp < 0) {
+ advance_left = ADVANCE;
+ } else if (cmp > 0) {
+ advance_right = ADVANCE;
+ } else {
+ left_blockptr = btrfs_node_blockptr(
+ left_path->nodes[left_level],
+ left_path->slots[left_level]);
+ right_blockptr = btrfs_node_blockptr(
+ right_path->nodes[right_level],
+ right_path->slots[right_level]);
+ if (left_blockptr == right_blockptr) {
+ /*
+ * As we're on a shared block, don't
+ * allow to go deeper.
+ */
+ advance_left = ADVANCE_ONLY_NEXT;
+ advance_right = ADVANCE_ONLY_NEXT;
+ } else {
+ advance_left = ADVANCE;
+ advance_right = ADVANCE;
+ }
+ }
+ } else if (left_level < right_level) {
+ advance_right = ADVANCE;
+ } else {
+ advance_left = ADVANCE;
+ }
+ }
+
+out:
+ btrfs_free_path(left_path);
+ btrfs_free_path(right_path);
+ kfree(tmp_buf);
+
+ if (trans) {
+ if (!ret)
+ ret = btrfs_end_transaction(trans, left_root);
+ else
+ btrfs_end_transaction(trans, left_root);
+ }
+
+ return ret;
+}
+
/*
* this is similar to btrfs_next_leaf, but does not try to preserve
* and fixup the path. It looks for and returns the next key in the
@@ -5127,6 +5690,7 @@ again:
* locked. To solve this situation, we give up
* on our lock and cycle.
*/
+ free_extent_buffer(next);
btrfs_release_path(path);
cond_resched();
goto again;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index fa5c45b39075..0d195b507660 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -91,6 +91,9 @@ struct btrfs_ordered_sum;
/* for storing balance parameters in the root tree */
#define BTRFS_BALANCE_OBJECTID -4ULL
+/* holds quota configuration and tracking */
+#define BTRFS_QUOTA_TREE_OBJECTID 8ULL
+
/* orhpan objectid for tracking unlinked/truncated files */
#define BTRFS_ORPHAN_OBJECTID -5ULL
@@ -709,6 +712,36 @@ struct btrfs_root_item {
struct btrfs_disk_key drop_progress;
u8 drop_level;
u8 level;
+
+ /*
+ * The following fields appear after subvol_uuids+subvol_times
+ * were introduced.
+ */
+
+ /*
+ * This generation number is used to test if the new fields are valid
+ * and up to date while reading the root item. Everytime the root item
+ * is written out, the "generation" field is copied into this field. If
+ * anyone ever mounted the fs with an older kernel, we will have
+ * mismatching generation values here and thus must invalidate the
+ * new fields. See btrfs_update_root and btrfs_find_last_root for
+ * details.
+ * the offset of generation_v2 is also used as the start for the memset
+ * when invalidating the fields.
+ */
+ __le64 generation_v2;
+ u8 uuid[BTRFS_UUID_SIZE];
+ u8 parent_uuid[BTRFS_UUID_SIZE];
+ u8 received_uuid[BTRFS_UUID_SIZE];
+ __le64 ctransid; /* updated when an inode changes */
+ __le64 otransid; /* trans when created */
+ __le64 stransid; /* trans when sent. non-zero for received subvol */
+ __le64 rtransid; /* trans when received. non-zero for received subvol */
+ struct btrfs_timespec ctime;
+ struct btrfs_timespec otime;
+ struct btrfs_timespec stime;
+ struct btrfs_timespec rtime;
+ __le64 reserved[8]; /* for future */
} __attribute__ ((__packed__));
/*
@@ -883,6 +916,72 @@ struct btrfs_block_group_item {
__le64 flags;
} __attribute__ ((__packed__));
+/*
+ * is subvolume quota turned on?
+ */
+#define BTRFS_QGROUP_STATUS_FLAG_ON (1ULL << 0)
+/*
+ * SCANNING is set during the initialization phase
+ */
+#define BTRFS_QGROUP_STATUS_FLAG_SCANNING (1ULL << 1)
+/*
+ * Some qgroup entries are known to be out of date,
+ * either because the configuration has changed in a way that
+ * makes a rescan necessary, or because the fs has been mounted
+ * with a non-qgroup-aware version.
+ * Turning qouta off and on again makes it inconsistent, too.
+ */
+#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2)
+
+#define BTRFS_QGROUP_STATUS_VERSION 1
+
+struct btrfs_qgroup_status_item {
+ __le64 version;
+ /*
+ * the generation is updated during every commit. As older
+ * versions of btrfs are not aware of qgroups, it will be
+ * possible to detect inconsistencies by checking the
+ * generation on mount time
+ */
+ __le64 generation;
+
+ /* flag definitions see above */
+ __le64 flags;
+
+ /*
+ * only used during scanning to record the progress
+ * of the scan. It contains a logical address
+ */
+ __le64 scan;
+} __attribute__ ((__packed__));
+
+struct btrfs_qgroup_info_item {
+ __le64 generation;
+ __le64 rfer;
+ __le64 rfer_cmpr;
+ __le64 excl;
+ __le64 excl_cmpr;
+} __attribute__ ((__packed__));
+
+/* flags definition for qgroup limits */
+#define BTRFS_QGROUP_LIMIT_MAX_RFER (1ULL << 0)
+#define BTRFS_QGROUP_LIMIT_MAX_EXCL (1ULL << 1)
+#define BTRFS_QGROUP_LIMIT_RSV_RFER (1ULL << 2)
+#define BTRFS_QGROUP_LIMIT_RSV_EXCL (1ULL << 3)
+#define BTRFS_QGROUP_LIMIT_RFER_CMPR (1ULL << 4)
+#define BTRFS_QGROUP_LIMIT_EXCL_CMPR (1ULL << 5)
+
+struct btrfs_qgroup_limit_item {
+ /*
+ * only updated when any of the other values change
+ */
+ __le64 flags;
+ __le64 max_rfer;
+ __le64 max_excl;
+ __le64 rsv_rfer;
+ __le64 rsv_excl;
+} __attribute__ ((__packed__));
+
struct btrfs_space_info {
u64 flags;
@@ -1030,6 +1129,13 @@ struct btrfs_block_group_cache {
struct list_head cluster_list;
};
+/* delayed seq elem */
+struct seq_list {
+ struct list_head list;
+ u64 seq;
+};
+
+/* fs_info */
struct reloc_control;
struct btrfs_device;
struct btrfs_fs_devices;
@@ -1044,6 +1150,7 @@ struct btrfs_fs_info {
struct btrfs_root *dev_root;
struct btrfs_root *fs_root;
struct btrfs_root *csum_root;
+ struct btrfs_root *quota_root;
/* the log root tree is a directory of all the other log roots */
struct btrfs_root *log_root_tree;
@@ -1144,6 +1251,7 @@ struct btrfs_fs_info {
spinlock_t tree_mod_seq_lock;
atomic_t tree_mod_seq;
struct list_head tree_mod_seq_list;
+ struct seq_list tree_mod_seq_elem;
/* this protects tree_mod_log */
rwlock_t tree_mod_log_lock;
@@ -1240,6 +1348,8 @@ struct btrfs_fs_info {
*/
struct list_head space_info;
+ struct btrfs_space_info *data_sinfo;
+
struct reloc_control *reloc_ctl;
spinlock_t delalloc_lock;
@@ -1296,6 +1406,29 @@ struct btrfs_fs_info {
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
u32 check_integrity_print_mask;
#endif
+ /*
+ * quota information
+ */
+ unsigned int quota_enabled:1;
+
+ /*
+ * quota_enabled only changes state after a commit. This holds the
+ * next state.
+ */
+ unsigned int pending_quota_state:1;
+
+ /* is qgroup tracking in a consistent state? */
+ u64 qgroup_flags;
+
+ /* holds configuration and tracking. Protected by qgroup_lock */
+ struct rb_root qgroup_tree;
+ spinlock_t qgroup_lock;
+
+ /* list of dirty qgroups to be written at next commit */
+ struct list_head dirty_qgroups;
+
+ /* used by btrfs_qgroup_record_ref for an efficient tree traversal */
+ u64 qgroup_seq;
/* filesystem state */
u64 fs_state;
@@ -1416,6 +1549,8 @@ struct btrfs_root {
dev_t anon_dev;
int force_cow;
+
+ spinlock_t root_times_lock;
};
struct btrfs_ioctl_defrag_range_args {
@@ -1525,6 +1660,30 @@ struct btrfs_ioctl_defrag_range_args {
#define BTRFS_DEV_ITEM_KEY 216
#define BTRFS_CHUNK_ITEM_KEY 228
+/*
+ * Records the overall state of the qgroups.
+ * There's only one instance of this key present,
+ * (0, BTRFS_QGROUP_STATUS_KEY, 0)
+ */
+#define BTRFS_QGROUP_STATUS_KEY 240
+/*
+ * Records the currently used space of the qgroup.
+ * One key per qgroup, (0, BTRFS_QGROUP_INFO_KEY, qgroupid).
+ */
+#define BTRFS_QGROUP_INFO_KEY 242
+/*
+ * Contains the user configured limits for the qgroup.
+ * One key per qgroup, (0, BTRFS_QGROUP_LIMIT_KEY, qgroupid).
+ */
+#define BTRFS_QGROUP_LIMIT_KEY 244
+/*
+ * Records the child-parent relationship of qgroups. For
+ * each relation, 2 keys are present:
+ * (childid, BTRFS_QGROUP_RELATION_KEY, parentid)
+ * (parentid, BTRFS_QGROUP_RELATION_KEY, childid)
+ */
+#define BTRFS_QGROUP_RELATION_KEY 246
+
#define BTRFS_BALANCE_ITEM_KEY 248
/*
@@ -1621,13 +1780,54 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
offsetof(type, member), \
sizeof(((type *)0)->member)))
-#ifndef BTRFS_SETGET_FUNCS
+#define DECLARE_BTRFS_SETGET_BITS(bits) \
+u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
+ unsigned long off, \
+ struct btrfs_map_token *token); \
+void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \
+ unsigned long off, u##bits val, \
+ struct btrfs_map_token *token); \
+static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \
+ unsigned long off) \
+{ \
+ return btrfs_get_token_##bits(eb, ptr, off, NULL); \
+} \
+static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \
+ unsigned long off, u##bits val) \
+{ \
+ btrfs_set_token_##bits(eb, ptr, off, val, NULL); \
+}
+
+DECLARE_BTRFS_SETGET_BITS(8)
+DECLARE_BTRFS_SETGET_BITS(16)
+DECLARE_BTRFS_SETGET_BITS(32)
+DECLARE_BTRFS_SETGET_BITS(64)
+
#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
-u##bits btrfs_##name(struct extent_buffer *eb, type *s); \
-u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, struct btrfs_map_token *token); \
-void btrfs_set_token_##name(struct extent_buffer *eb, type *s, u##bits val, struct btrfs_map_token *token);\
-void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);
-#endif
+static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \
+{ \
+ BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
+ return btrfs_get_##bits(eb, s, offsetof(type, member)); \
+} \
+static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \
+ u##bits val) \
+{ \
+ BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
+ btrfs_set_##bits(eb, s, offsetof(type, member), val); \
+} \
+static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \
+ struct btrfs_map_token *token) \
+{ \
+ BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
+ return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \
+} \
+static inline void btrfs_set_token_##name(struct extent_buffer *eb, \
+ type *s, u##bits val, \
+ struct btrfs_map_token *token) \
+{ \
+ BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
+ btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \
+}
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
static inline u##bits btrfs_##name(struct extent_buffer *eb) \
@@ -2189,6 +2389,16 @@ BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64);
BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64);
BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item,
last_snapshot, 64);
+BTRFS_SETGET_STACK_FUNCS(root_generation_v2, struct btrfs_root_item,
+ generation_v2, 64);
+BTRFS_SETGET_STACK_FUNCS(root_ctransid, struct btrfs_root_item,
+ ctransid, 64);
+BTRFS_SETGET_STACK_FUNCS(root_otransid, struct btrfs_root_item,
+ otransid, 64);
+BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item,
+ stransid, 64);
+BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item,
+ rtransid, 64);
static inline bool btrfs_root_readonly(struct btrfs_root *root)
{
@@ -2465,6 +2675,49 @@ static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb,
sizeof(val));
}
+/* btrfs_qgroup_status_item */
+BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item,
+ generation, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_version, struct btrfs_qgroup_status_item,
+ version, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item,
+ flags, 64);
+BTRFS_SETGET_FUNCS(qgroup_status_scan, struct btrfs_qgroup_status_item,
+ scan, 64);
+
+/* btrfs_qgroup_info_item */
+BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item,
+ generation, 64);
+BTRFS_SETGET_FUNCS(qgroup_info_rfer, struct btrfs_qgroup_info_item, rfer, 64);
+BTRFS_SETGET_FUNCS(qgroup_info_rfer_cmpr, struct btrfs_qgroup_info_item,
+ rfer_cmpr, 64);
+BTRFS_SETGET_FUNCS(qgroup_info_excl, struct btrfs_qgroup_info_item, excl, 64);
+BTRFS_SETGET_FUNCS(qgroup_info_excl_cmpr, struct btrfs_qgroup_info_item,
+ excl_cmpr, 64);
+
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_generation,
+ struct btrfs_qgroup_info_item, generation, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer, struct btrfs_qgroup_info_item,
+ rfer, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer_cmpr,
+ struct btrfs_qgroup_info_item, rfer_cmpr, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl, struct btrfs_qgroup_info_item,
+ excl, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl_cmpr,
+ struct btrfs_qgroup_info_item, excl_cmpr, 64);
+
+/* btrfs_qgroup_limit_item */
+BTRFS_SETGET_FUNCS(qgroup_limit_flags, struct btrfs_qgroup_limit_item,
+ flags, 64);
+BTRFS_SETGET_FUNCS(qgroup_limit_max_rfer, struct btrfs_qgroup_limit_item,
+ max_rfer, 64);
+BTRFS_SETGET_FUNCS(qgroup_limit_max_excl, struct btrfs_qgroup_limit_item,
+ max_excl, 64);
+BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item,
+ rsv_rfer, 64);
+BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item,
+ rsv_excl, 64);
+
static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
{
return sb->s_fs_info;
@@ -2607,7 +2860,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 group_start);
u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
-void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
int btrfs_check_data_free_space(struct inode *inode, u64 bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
@@ -2661,6 +2913,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
+int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
int level, int *slot);
@@ -2680,6 +2934,21 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
struct btrfs_key *max_key,
struct btrfs_path *path, int cache_only,
u64 min_trans);
+enum btrfs_compare_tree_result {
+ BTRFS_COMPARE_TREE_NEW,
+ BTRFS_COMPARE_TREE_DELETED,
+ BTRFS_COMPARE_TREE_CHANGED,
+};
+typedef int (*btrfs_changed_cb_t)(struct btrfs_root *left_root,
+ struct btrfs_root *right_root,
+ struct btrfs_path *left_path,
+ struct btrfs_path *right_path,
+ struct btrfs_key *key,
+ enum btrfs_compare_tree_result result,
+ void *ctx);
+int btrfs_compare_trees(struct btrfs_root *left_root,
+ struct btrfs_root *right_root,
+ btrfs_changed_cb_t cb, void *ctx);
int btrfs_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot,
@@ -2711,6 +2980,9 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
ins_len, int cow);
int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
struct btrfs_path *p, u64 time_seq);
+int btrfs_search_slot_for_read(struct btrfs_root *root,
+ struct btrfs_key *key, struct btrfs_path *p,
+ int find_higher, int return_any);
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *parent,
int start_slot, int cache_only, u64 *last_ret,
@@ -2793,11 +3065,22 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
kfree(fs_info->chunk_root);
kfree(fs_info->dev_root);
kfree(fs_info->csum_root);
+ kfree(fs_info->quota_root);
kfree(fs_info->super_copy);
kfree(fs_info->super_for_commit);
kfree(fs_info);
}
+/* tree mod log functions from ctree.c */
+u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
+ struct seq_list *elem);
+void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
+ struct seq_list *elem);
+static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
+{
+ return atomic_inc_return(&fs_info->tree_mod_seq);
+}
+
/* root-item.c */
int btrfs_find_root_ref(struct btrfs_root *tree_root,
struct btrfs_path *path,
@@ -2819,6 +3102,9 @@ int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_key *key,
struct btrfs_root_item *item);
+void btrfs_read_root_item(struct btrfs_root *root,
+ struct extent_buffer *eb, int slot,
+ struct btrfs_root_item *item);
int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
btrfs_root_item *item, struct btrfs_key *key);
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
@@ -2826,6 +3112,8 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
void btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node);
void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
+void btrfs_update_root_times(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root);
/* dir-item.c */
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
@@ -2903,7 +3191,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u32 *dst);
int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u64 logical_offset, u32 *dst);
+ struct bio *bio, u64 logical_offset);
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 objectid, u64 pos,
@@ -3053,14 +3341,43 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
/* super.c */
int btrfs_parse_options(struct btrfs_root *root, char *options);
int btrfs_sync_fs(struct super_block *sb, int wait);
+
+#ifdef CONFIG_PRINTK
+__printf(2, 3)
void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...);
+#else
+static inline __printf(2, 3)
+void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
+{
+}
+#endif
+
+__printf(5, 6)
void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...);
+
void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *function,
unsigned int line, int errno);
+#define btrfs_set_fs_incompat(__fs_info, opt) \
+ __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
+
+static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info,
+ u64 flag)
+{
+ struct btrfs_super_block *disk_super;
+ u64 features;
+
+ disk_super = fs_info->super_copy;
+ features = btrfs_super_incompat_flags(disk_super);
+ if (!(features & flag)) {
+ features |= flag;
+ btrfs_set_super_incompat_flags(disk_super, features);
+ }
+}
+
#define btrfs_abort_transaction(trans, root, errno) \
do { \
__btrfs_abort_transaction(trans, root, __func__, \
@@ -3080,6 +3397,7 @@ do { \
(errno), fmt, ##args); \
} while (0)
+__printf(5, 6)
void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...);
@@ -3156,17 +3474,49 @@ void btrfs_reada_detach(void *handle);
int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
u64 start, int err);
-/* delayed seq elem */
-struct seq_list {
+/* qgroup.c */
+struct qgroup_update {
struct list_head list;
- u64 seq;
- u32 flags;
+ struct btrfs_delayed_ref_node *node;
+ struct btrfs_delayed_extent_op *extent_op;
};
-void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
- struct seq_list *elem);
-void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
- struct seq_list *elem);
+int btrfs_quota_enable(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info);
+int btrfs_quota_disable(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info);
+int btrfs_quota_rescan(struct btrfs_fs_info *fs_info);
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 src, u64 dst);
+int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 src, u64 dst);
+int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 qgroupid,
+ char *name);
+int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 qgroupid);
+int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 qgroupid,
+ struct btrfs_qgroup_limit *limit);
+int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
+void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
+struct btrfs_delayed_extent_op;
+int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_extent_op *extent_op);
+int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_extent_op *extent_op);
+int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info);
+int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
+ struct btrfs_qgroup_inherit *inherit);
+int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes);
+void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes);
+
+void assert_qgroups_uptodate(struct btrfs_trans_handle *trans);
static inline int is_fstree(u64 rootid)
{
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 2399f4086915..07d5eeb1e6f1 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -62,6 +62,7 @@ static inline void btrfs_init_delayed_node(
INIT_LIST_HEAD(&delayed_node->n_list);
INIT_LIST_HEAD(&delayed_node->p_list);
delayed_node->bytes_reserved = 0;
+ memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
}
static inline int btrfs_is_continuous_delayed_item(
@@ -511,8 +512,8 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
rb_erase(&delayed_item->rb_node, root);
delayed_item->delayed_node->count--;
- atomic_dec(&delayed_root->items);
- if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
+ if (atomic_dec_return(&delayed_root->items) <
+ BTRFS_DELAYED_BACKGROUND &&
waitqueue_active(&delayed_root->wait))
wake_up(&delayed_root->wait);
}
@@ -1027,9 +1028,10 @@ do_again:
btrfs_release_delayed_item(prev);
ret = 0;
btrfs_release_path(path);
- if (curr)
+ if (curr) {
+ mutex_unlock(&node->mutex);
goto do_again;
- else
+ } else
goto delete_fail;
}
@@ -1054,8 +1056,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
delayed_node->count--;
delayed_root = delayed_node->root->fs_info->delayed_root;
- atomic_dec(&delayed_root->items);
- if (atomic_read(&delayed_root->items) <
+ if (atomic_dec_return(&delayed_root->items) <
BTRFS_DELAYED_BACKGROUND &&
waitqueue_active(&delayed_root->wait))
wake_up(&delayed_root->wait);
@@ -1113,8 +1114,8 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
* Returns < 0 on error and returns with an aborted transaction with any
* outstanding delayed items cleaned up.
*/
-int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
+static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, int nr)
{
struct btrfs_root *curr_root = root;
struct btrfs_delayed_root *delayed_root;
@@ -1122,6 +1123,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv;
int ret = 0;
+ bool count = (nr > 0);
if (trans->aborted)
return -EIO;
@@ -1137,7 +1139,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
delayed_root = btrfs_get_delayed_root(root);
curr_node = btrfs_first_delayed_node(delayed_root);
- while (curr_node) {
+ while (curr_node && (!count || (count && nr--))) {
curr_root = curr_node->root;
ret = btrfs_insert_delayed_items(trans, path, curr_root,
curr_node);
@@ -1149,6 +1151,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
path, curr_node);
if (ret) {
btrfs_release_delayed_node(curr_node);
+ curr_node = NULL;
btrfs_abort_transaction(trans, root, ret);
break;
}
@@ -1158,12 +1161,26 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
btrfs_release_delayed_node(prev_node);
}
+ if (curr_node)
+ btrfs_release_delayed_node(curr_node);
btrfs_free_path(path);
trans->block_rsv = block_rsv;
return ret;
}
+int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ return __btrfs_run_delayed_items(trans, root, -1);
+}
+
+int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, int nr)
+{
+ return __btrfs_run_delayed_items(trans, root, nr);
+}
+
static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_delayed_node *node)
{
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index f5aa4023d3e1..4f808e1baeed 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -107,6 +107,8 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode);
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
+int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, int nr);
void btrfs_balance_delayed_items(struct btrfs_root *root);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 13ae7b04790e..ae9411773397 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -38,17 +38,14 @@
static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
struct btrfs_delayed_tree_ref *ref1)
{
- if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
- if (ref1->root < ref2->root)
- return -1;
- if (ref1->root > ref2->root)
- return 1;
- } else {
- if (ref1->parent < ref2->parent)
- return -1;
- if (ref1->parent > ref2->parent)
- return 1;
- }
+ if (ref1->root < ref2->root)
+ return -1;
+ if (ref1->root > ref2->root)
+ return 1;
+ if (ref1->parent < ref2->parent)
+ return -1;
+ if (ref1->parent > ref2->parent)
+ return 1;
return 0;
}
@@ -85,7 +82,8 @@ static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
* type of the delayed backrefs and content of delayed backrefs.
*/
static int comp_entry(struct btrfs_delayed_ref_node *ref2,
- struct btrfs_delayed_ref_node *ref1)
+ struct btrfs_delayed_ref_node *ref1,
+ bool compare_seq)
{
if (ref1->bytenr < ref2->bytenr)
return -1;
@@ -102,10 +100,12 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
if (ref1->type > ref2->type)
return 1;
/* merging of sequenced refs is not allowed */
- if (ref1->seq < ref2->seq)
- return -1;
- if (ref1->seq > ref2->seq)
- return 1;
+ if (compare_seq) {
+ if (ref1->seq < ref2->seq)
+ return -1;
+ if (ref1->seq > ref2->seq)
+ return 1;
+ }
if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
@@ -139,7 +139,7 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
rb_node);
- cmp = comp_entry(entry, ins);
+ cmp = comp_entry(entry, ins, 1);
if (cmp < 0)
p = &(*p)->rb_left;
else if (cmp > 0)
@@ -233,22 +233,134 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
return 0;
}
-int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
- u64 seq)
+static void inline drop_delayed_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_node *ref)
{
- struct seq_list *elem;
+ rb_erase(&ref->rb_node, &delayed_refs->root);
+ ref->in_tree = 0;
+ btrfs_put_delayed_ref(ref);
+ delayed_refs->num_entries--;
+ if (trans->delayed_ref_updates)
+ trans->delayed_ref_updates--;
+}
- assert_spin_locked(&delayed_refs->lock);
- if (list_empty(&delayed_refs->seq_head))
- return 0;
+static int merge_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_node *ref, u64 seq)
+{
+ struct rb_node *node;
+ int merged = 0;
+ int mod = 0;
+ int done = 0;
+
+ node = rb_prev(&ref->rb_node);
+ while (node) {
+ struct btrfs_delayed_ref_node *next;
+
+ next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
+ node = rb_prev(node);
+ if (next->bytenr != ref->bytenr)
+ break;
+ if (seq && next->seq >= seq)
+ break;
+ if (comp_entry(ref, next, 0))
+ continue;
+
+ if (ref->action == next->action) {
+ mod = next->ref_mod;
+ } else {
+ if (ref->ref_mod < next->ref_mod) {
+ struct btrfs_delayed_ref_node *tmp;
- elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
- if (seq >= elem->seq) {
- pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n",
- seq, elem->seq, delayed_refs);
- return 1;
+ tmp = ref;
+ ref = next;
+ next = tmp;
+ done = 1;
+ }
+ mod = -next->ref_mod;
+ }
+
+ merged++;
+ drop_delayed_ref(trans, delayed_refs, next);
+ ref->ref_mod += mod;
+ if (ref->ref_mod == 0) {
+ drop_delayed_ref(trans, delayed_refs, ref);
+ break;
+ } else {
+ /*
+ * You can't have multiples of the same ref on a tree
+ * block.
+ */
+ WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
+ ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
+ }
+
+ if (done)
+ break;
+ node = rb_prev(&ref->rb_node);
}
- return 0;
+
+ return merged;
+}
+
+void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
+{
+ struct rb_node *node;
+ u64 seq = 0;
+
+ spin_lock(&fs_info->tree_mod_seq_lock);
+ if (!list_empty(&fs_info->tree_mod_seq_list)) {
+ struct seq_list *elem;
+
+ elem = list_first_entry(&fs_info->tree_mod_seq_list,
+ struct seq_list, list);
+ seq = elem->seq;
+ }
+ spin_unlock(&fs_info->tree_mod_seq_lock);
+
+ node = rb_prev(&head->node.rb_node);
+ while (node) {
+ struct btrfs_delayed_ref_node *ref;
+
+ ref = rb_entry(node, struct btrfs_delayed_ref_node,
+ rb_node);
+ if (ref->bytenr != head->node.bytenr)
+ break;
+
+ /* We can't merge refs that are outside of our seq count */
+ if (seq && ref->seq >= seq)
+ break;
+ if (merge_ref(trans, delayed_refs, ref, seq))
+ node = rb_prev(&head->node.rb_node);
+ else
+ node = rb_prev(node);
+ }
+}
+
+int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ u64 seq)
+{
+ struct seq_list *elem;
+ int ret = 0;
+
+ spin_lock(&fs_info->tree_mod_seq_lock);
+ if (!list_empty(&fs_info->tree_mod_seq_list)) {
+ elem = list_first_entry(&fs_info->tree_mod_seq_list,
+ struct seq_list, list);
+ if (seq >= elem->seq) {
+ pr_debug("holding back delayed_ref %llu, lowest is "
+ "%llu (%p)\n", seq, elem->seq, delayed_refs);
+ ret = 1;
+ }
+ }
+
+ spin_unlock(&fs_info->tree_mod_seq_lock);
+ return ret;
}
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
@@ -332,18 +444,11 @@ update_existing_ref(struct btrfs_trans_handle *trans,
* every changing the extent allocation tree.
*/
existing->ref_mod--;
- if (existing->ref_mod == 0) {
- rb_erase(&existing->rb_node,
- &delayed_refs->root);
- existing->in_tree = 0;
- btrfs_put_delayed_ref(existing);
- delayed_refs->num_entries--;
- if (trans->delayed_ref_updates)
- trans->delayed_ref_updates--;
- } else {
+ if (existing->ref_mod == 0)
+ drop_delayed_ref(trans, delayed_refs, existing);
+ else
WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
- }
} else {
WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
@@ -525,8 +630,8 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
ref->is_head = 0;
ref->in_tree = 1;
- if (is_fstree(ref_root))
- seq = inc_delayed_seq(delayed_refs);
+ if (need_ref_seq(for_cow, ref_root))
+ seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
ref->seq = seq;
full_ref = btrfs_delayed_node_to_tree_ref(ref);
@@ -584,8 +689,8 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
ref->is_head = 0;
ref->in_tree = 1;
- if (is_fstree(ref_root))
- seq = inc_delayed_seq(delayed_refs);
+ if (need_ref_seq(for_cow, ref_root))
+ seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
ref->seq = seq;
full_ref = btrfs_delayed_node_to_data_ref(ref);
@@ -658,10 +763,9 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
num_bytes, parent, ref_root, level, action,
for_cow);
- if (!is_fstree(ref_root) &&
- waitqueue_active(&delayed_refs->seq_wait))
- wake_up(&delayed_refs->seq_wait);
spin_unlock(&delayed_refs->lock);
+ if (need_ref_seq(for_cow, ref_root))
+ btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
return 0;
}
@@ -707,10 +811,9 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
num_bytes, parent, ref_root, owner, offset,
action, for_cow);
- if (!is_fstree(ref_root) &&
- waitqueue_active(&delayed_refs->seq_wait))
- wake_up(&delayed_refs->seq_wait);
spin_unlock(&delayed_refs->lock);
+ if (need_ref_seq(for_cow, ref_root))
+ btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
return 0;
}
@@ -736,8 +839,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
extent_op->is_data);
- if (waitqueue_active(&delayed_refs->seq_wait))
- wake_up(&delayed_refs->seq_wait);
spin_unlock(&delayed_refs->lock);
return 0;
}
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 413927fb9957..ab5300595847 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -139,26 +139,6 @@ struct btrfs_delayed_ref_root {
int flushing;
u64 run_delayed_start;
-
- /*
- * seq number of delayed refs. We need to know if a backref was being
- * added before the currently processed ref or afterwards.
- */
- u64 seq;
-
- /*
- * seq_list holds a list of all seq numbers that are currently being
- * added to the list. While walking backrefs (btrfs_find_all_roots,
- * qgroups), which might take some time, no newer ref must be processed,
- * as it might influence the outcome of the walk.
- */
- struct list_head seq_head;
-
- /*
- * when the only refs we have in the list must not be processed, we want
- * to wait for more refs to show up or for the end of backref walking.
- */
- wait_queue_head_t seq_wait;
};
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
@@ -187,6 +167,10 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
struct btrfs_delayed_extent_op *extent_op);
+void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head);
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
@@ -195,34 +179,28 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
struct list_head *cluster, u64 search_start);
-static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs)
-{
- assert_spin_locked(&delayed_refs->lock);
- ++delayed_refs->seq;
- return delayed_refs->seq;
-}
+int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ u64 seq);
-static inline void
-btrfs_get_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
- struct seq_list *elem)
+/*
+ * delayed refs with a ref_seq > 0 must be held back during backref walking.
+ * this only applies to items in one of the fs-trees. for_cow items never need
+ * to be held back, so they won't get a ref_seq number.
+ */
+static inline int need_ref_seq(int for_cow, u64 rootid)
{
- assert_spin_locked(&delayed_refs->lock);
- elem->seq = delayed_refs->seq;
- list_add_tail(&elem->list, &delayed_refs->seq_head);
-}
+ if (for_cow)
+ return 0;
-static inline void
-btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
- struct seq_list *elem)
-{
- spin_lock(&delayed_refs->lock);
- list_del(&elem->list);
- wake_up(&delayed_refs->seq_wait);
- spin_unlock(&delayed_refs->lock);
-}
+ if (rootid == BTRFS_FS_TREE_OBJECTID)
+ return 1;
-int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
- u64 seq);
+ if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
+ return 1;
+
+ return 0;
+}
/*
* a node might live in a head or a regular ref, this lets you
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 2936ca49b3b4..22e98e04c2ea 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -377,9 +377,13 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
ret = read_extent_buffer_pages(io_tree, eb, start,
WAIT_COMPLETE,
btree_get_extent, mirror_num);
- if (!ret && !verify_parent_transid(io_tree, eb,
+ if (!ret) {
+ if (!verify_parent_transid(io_tree, eb,
parent_transid, 0))
- break;
+ break;
+ else
+ ret = -EIO;
+ }
/*
* This buffer's crc is fine, but its contents are corrupted, so
@@ -407,7 +411,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
break;
}
- if (failed && !ret)
+ if (failed && !ret && failed_mirror)
repair_eb_io_failure(root, eb, failed_mirror);
return ret;
@@ -754,9 +758,7 @@ static void run_one_async_done(struct btrfs_work *work)
limit = btrfs_async_submit_limit(fs_info);
limit = limit * 2 / 3;
- atomic_dec(&fs_info->nr_async_submits);
-
- if (atomic_read(&fs_info->nr_async_submits) < limit &&
+ if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait);
@@ -1114,7 +1116,7 @@ void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
spin_unlock(&root->fs_info->delalloc_lock);
btrfs_panic(root->fs_info, -EOVERFLOW,
"Can't clear %lu bytes from "
- " dirty_mdatadata_bytes (%lu)",
+ " dirty_mdatadata_bytes (%llu)",
buf->len,
root->fs_info->dirty_metadata_bytes);
}
@@ -1182,6 +1184,8 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->defrag_running = 0;
root->root_key.objectid = objectid;
root->anon_dev = 0;
+
+ spin_lock_init(&root->root_times_lock);
}
static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
@@ -1225,6 +1229,82 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
return root;
}
+struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ u64 objectid)
+{
+ struct extent_buffer *leaf;
+ struct btrfs_root *tree_root = fs_info->tree_root;
+ struct btrfs_root *root;
+ struct btrfs_key key;
+ int ret = 0;
+ u64 bytenr;
+
+ root = btrfs_alloc_root(fs_info);
+ if (!root)
+ return ERR_PTR(-ENOMEM);
+
+ __setup_root(tree_root->nodesize, tree_root->leafsize,
+ tree_root->sectorsize, tree_root->stripesize,
+ root, fs_info, objectid);
+ root->root_key.objectid = objectid;
+ root->root_key.type = BTRFS_ROOT_ITEM_KEY;
+ root->root_key.offset = 0;
+
+ leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
+ 0, objectid, NULL, 0, 0, 0);
+ if (IS_ERR(leaf)) {
+ ret = PTR_ERR(leaf);
+ goto fail;
+ }
+
+ bytenr = leaf->start;
+ memset_extent_buffer(leaf, 0, 0, sizeof(struct btrfs_header));
+ btrfs_set_header_bytenr(leaf, leaf->start);
+ btrfs_set_header_generation(leaf, trans->transid);
+ btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV);
+ btrfs_set_header_owner(leaf, objectid);
+ root->node = leaf;
+
+ write_extent_buffer(leaf, fs_info->fsid,
+ (unsigned long)btrfs_header_fsid(leaf),
+ BTRFS_FSID_SIZE);
+ write_extent_buffer(leaf, fs_info->chunk_tree_uuid,
+ (unsigned long)btrfs_header_chunk_tree_uuid(leaf),
+ BTRFS_UUID_SIZE);
+ btrfs_mark_buffer_dirty(leaf);
+
+ root->commit_root = btrfs_root_node(root);
+ root->track_dirty = 1;
+
+
+ root->root_item.flags = 0;
+ root->root_item.byte_limit = 0;
+ btrfs_set_root_bytenr(&root->root_item, leaf->start);
+ btrfs_set_root_generation(&root->root_item, trans->transid);
+ btrfs_set_root_level(&root->root_item, 0);
+ btrfs_set_root_refs(&root->root_item, 1);
+ btrfs_set_root_used(&root->root_item, leaf->len);
+ btrfs_set_root_last_snapshot(&root->root_item, 0);
+ btrfs_set_root_dirid(&root->root_item, 0);
+ root->root_item.drop_level = 0;
+
+ key.objectid = objectid;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = 0;
+ ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item);
+ if (ret)
+ goto fail;
+
+ btrfs_tree_unlock(leaf);
+
+fail:
+ if (ret)
+ return ERR_PTR(ret);
+
+ return root;
+}
+
static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
@@ -1326,6 +1406,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
u64 generation;
u32 blocksize;
int ret = 0;
+ int slot;
root = btrfs_alloc_root(fs_info);
if (!root)
@@ -1352,9 +1433,8 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
if (ret == 0) {
l = path->nodes[0];
- read_extent_buffer(l, &root->root_item,
- btrfs_item_ptr_offset(l, path->slots[0]),
- sizeof(root->root_item));
+ slot = path->slots[0];
+ btrfs_read_root_item(tree_root, l, slot, &root->root_item);
memcpy(&root->root_key, location, sizeof(*location));
}
btrfs_free_path(path);
@@ -1396,6 +1476,9 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
return fs_info->dev_root;
if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
return fs_info->csum_root;
+ if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
+ return fs_info->quota_root ? fs_info->quota_root :
+ ERR_PTR(-ENOENT);
again:
spin_lock(&fs_info->fs_roots_radix_lock);
root = radix_tree_lookup(&fs_info->fs_roots_radix,
@@ -1533,8 +1616,6 @@ static int cleaner_kthread(void *arg)
struct btrfs_root *root = arg;
do {
- vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
-
if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
mutex_trylock(&root->fs_info->cleaner_mutex)) {
btrfs_run_delayed_iputs(root);
@@ -1566,7 +1647,6 @@ static int transaction_kthread(void *arg)
do {
cannot_commit = false;
delay = HZ * 30;
- vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
mutex_lock(&root->fs_info->transaction_kthread_mutex);
spin_lock(&root->fs_info->trans_lock);
@@ -1823,6 +1903,10 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
free_extent_buffer(info->extent_root->commit_root);
free_extent_buffer(info->csum_root->node);
free_extent_buffer(info->csum_root->commit_root);
+ if (info->quota_root) {
+ free_extent_buffer(info->quota_root->node);
+ free_extent_buffer(info->quota_root->commit_root);
+ }
info->tree_root->node = NULL;
info->tree_root->commit_root = NULL;
@@ -1832,6 +1916,10 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
info->extent_root->commit_root = NULL;
info->csum_root->node = NULL;
info->csum_root->commit_root = NULL;
+ if (info->quota_root) {
+ info->quota_root->node = NULL;
+ info->quota_root->commit_root = NULL;
+ }
if (chunk_root) {
free_extent_buffer(info->chunk_root->node);
@@ -1862,6 +1950,7 @@ int open_ctree(struct super_block *sb,
struct btrfs_root *csum_root;
struct btrfs_root *chunk_root;
struct btrfs_root *dev_root;
+ struct btrfs_root *quota_root;
struct btrfs_root *log_tree_root;
int ret;
int err = -EINVAL;
@@ -1873,9 +1962,10 @@ int open_ctree(struct super_block *sb,
csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
+ quota_root = fs_info->quota_root = btrfs_alloc_root(fs_info);
if (!tree_root || !extent_root || !csum_root ||
- !chunk_root || !dev_root) {
+ !chunk_root || !dev_root || !quota_root) {
err = -ENOMEM;
goto fail;
}
@@ -2032,6 +2122,13 @@ int open_ctree(struct super_block *sb,
init_rwsem(&fs_info->cleanup_work_sem);
init_rwsem(&fs_info->subvol_sem);
+ spin_lock_init(&fs_info->qgroup_lock);
+ fs_info->qgroup_tree = RB_ROOT;
+ INIT_LIST_HEAD(&fs_info->dirty_qgroups);
+ fs_info->qgroup_seq = 1;
+ fs_info->quota_enabled = 0;
+ fs_info->pending_quota_state = 0;
+
btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
@@ -2244,7 +2341,7 @@ int open_ctree(struct super_block *sb,
ret |= btrfs_start_workers(&fs_info->caching_workers);
ret |= btrfs_start_workers(&fs_info->readahead_workers);
if (ret) {
- ret = -ENOMEM;
+ err = -ENOMEM;
goto fail_sb_buffer;
}
@@ -2356,6 +2453,17 @@ retry_root_backup:
goto recovery_tree_root;
csum_root->track_dirty = 1;
+ ret = find_and_setup_root(tree_root, fs_info,
+ BTRFS_QUOTA_TREE_OBJECTID, quota_root);
+ if (ret) {
+ kfree(quota_root);
+ quota_root = fs_info->quota_root = NULL;
+ } else {
+ quota_root->track_dirty = 1;
+ fs_info->quota_enabled = 1;
+ fs_info->pending_quota_state = 1;
+ }
+
fs_info->generation = generation;
fs_info->last_trans_committed = generation;
@@ -2415,17 +2523,19 @@ retry_root_backup:
" integrity check module %s\n", sb->s_id);
}
#endif
+ ret = btrfs_read_qgroup_config(fs_info);
+ if (ret)
+ goto fail_trans_kthread;
/* do not make disk changes in broken FS */
- if (btrfs_super_log_root(disk_super) != 0 &&
- !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
+ if (btrfs_super_log_root(disk_super) != 0) {
u64 bytenr = btrfs_super_log_root(disk_super);
if (fs_devices->rw_devices == 0) {
printk(KERN_WARNING "Btrfs log replay required "
"on RO media\n");
err = -EIO;
- goto fail_trans_kthread;
+ goto fail_qgroup;
}
blocksize =
btrfs_level_size(tree_root,
@@ -2434,7 +2544,7 @@ retry_root_backup:
log_tree_root = btrfs_alloc_root(fs_info);
if (!log_tree_root) {
err = -ENOMEM;
- goto fail_trans_kthread;
+ goto fail_qgroup;
}
__setup_root(nodesize, leafsize, sectorsize, stripesize,
@@ -2466,15 +2576,15 @@ retry_root_backup:
if (!(sb->s_flags & MS_RDONLY)) {
ret = btrfs_cleanup_fs_roots(fs_info);
- if (ret) {
- }
+ if (ret)
+ goto fail_trans_kthread;
ret = btrfs_recover_relocation(tree_root);
if (ret < 0) {
printk(KERN_WARNING
"btrfs: failed to recover relocation\n");
err = -EINVAL;
- goto fail_trans_kthread;
+ goto fail_qgroup;
}
}
@@ -2484,10 +2594,10 @@ retry_root_backup:
fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
if (!fs_info->fs_root)
- goto fail_trans_kthread;
+ goto fail_qgroup;
if (IS_ERR(fs_info->fs_root)) {
err = PTR_ERR(fs_info->fs_root);
- goto fail_trans_kthread;
+ goto fail_qgroup;
}
if (sb->s_flags & MS_RDONLY)
@@ -2511,6 +2621,8 @@ retry_root_backup:
return 0;
+fail_qgroup:
+ btrfs_free_qgroup_config(fs_info);
fail_trans_kthread:
kthread_stop(fs_info->transaction_kthread);
fail_cleaner:
@@ -3076,30 +3188,14 @@ int close_ctree(struct btrfs_root *root)
/* clear out the rbtree of defraggable inodes */
btrfs_run_defrag_inodes(fs_info);
- /*
- * Here come 2 situations when btrfs is broken to flip readonly:
- *
- * 1. when btrfs flips readonly somewhere else before
- * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
- * and btrfs will skip to write sb directly to keep
- * ERROR state on disk.
- *
- * 2. when btrfs flips readonly just in btrfs_commit_super,
- * and in such case, btrfs cannot write sb via btrfs_commit_super,
- * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
- * btrfs will cleanup all FS resources first and write sb then.
- */
if (!(fs_info->sb->s_flags & MS_RDONLY)) {
ret = btrfs_commit_super(root);
if (ret)
printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
}
- if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
- ret = btrfs_error_commit_super(root);
- if (ret)
- printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
- }
+ if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+ btrfs_error_commit_super(root);
btrfs_put_block_group_cache(fs_info);
@@ -3109,6 +3205,8 @@ int close_ctree(struct btrfs_root *root)
fs_info->closing = 2;
smp_mb();
+ btrfs_free_qgroup_config(root->fs_info);
+
if (fs_info->delalloc_bytes) {
printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
(unsigned long long)fs_info->delalloc_bytes);
@@ -3128,6 +3226,10 @@ int close_ctree(struct btrfs_root *root)
free_extent_buffer(fs_info->dev_root->commit_root);
free_extent_buffer(fs_info->csum_root->node);
free_extent_buffer(fs_info->csum_root->commit_root);
+ if (fs_info->quota_root) {
+ free_extent_buffer(fs_info->quota_root->node);
+ free_extent_buffer(fs_info->quota_root->commit_root);
+ }
btrfs_free_block_groups(fs_info);
@@ -3258,7 +3360,7 @@ int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
return btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
}
-static int btree_lock_page_hook(struct page *page, void *data,
+int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *))
{
struct inode *inode = page->mapping->host;
@@ -3315,18 +3417,11 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
if (read_only)
return 0;
- if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
- printk(KERN_WARNING "warning: mount fs with errors, "
- "running btrfsck is recommended\n");
- }
-
return 0;
}
-int btrfs_error_commit_super(struct btrfs_root *root)
+void btrfs_error_commit_super(struct btrfs_root *root)
{
- int ret;
-
mutex_lock(&root->fs_info->cleaner_mutex);
btrfs_run_delayed_iputs(root);
mutex_unlock(&root->fs_info->cleaner_mutex);
@@ -3336,10 +3431,6 @@ int btrfs_error_commit_super(struct btrfs_root *root)
/* cleanup FS via transaction */
btrfs_cleanup_transaction(root);
-
- ret = write_ctree_super(NULL, root, 0);
-
- return ret;
}
static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
@@ -3663,14 +3754,17 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
/* FIXME: cleanup wait for commit */
t->in_commit = 1;
t->blocked = 1;
+ smp_mb();
if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
wake_up(&root->fs_info->transaction_blocked_wait);
t->blocked = 0;
+ smp_mb();
if (waitqueue_active(&root->fs_info->transaction_wait))
wake_up(&root->fs_info->transaction_wait);
t->commit_done = 1;
+ smp_mb();
if (waitqueue_active(&t->commit_wait))
wake_up(&t->commit_wait);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 05b3fab39f7e..c5b00a735fef 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -54,7 +54,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int max_mirrors);
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
int btrfs_commit_super(struct btrfs_root *root);
-int btrfs_error_commit_super(struct btrfs_root *root);
+void btrfs_error_commit_super(struct btrfs_root *root);
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize);
struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
@@ -89,6 +89,12 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
int btrfs_cleanup_transaction(struct btrfs_root *root);
void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
struct btrfs_root *root);
+void btrfs_abort_devices(struct btrfs_root *root);
+struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ u64 objectid);
+int btree_lock_page_hook(struct page *page, void *data,
+ void (*flush_fn)(void *));
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_init_lockdep(void);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 6e1d36702ff7..ba58024d40d3 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -34,6 +34,8 @@
#include "locking.h"
#include "free-space-cache.h"
+#undef SCRAMBLE_DELAYED_REFS
+
/*
* control flags for do_chunk_alloc's force field
* CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
@@ -2217,6 +2219,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_head *locked_ref = NULL;
struct btrfs_delayed_extent_op *extent_op;
+ struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int count = 0;
int must_insert_reserved = 0;
@@ -2249,13 +2252,23 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
}
/*
+ * We need to try and merge add/drops of the same ref since we
+ * can run into issues with relocate dropping the implicit ref
+ * and then it being added back again before the drop can
+ * finish. If we merged anything we need to re-loop so we can
+ * get a good ref.
+ */
+ btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
+ locked_ref);
+
+ /*
* locked_ref is the head node, so we have to go one
* node back for any delayed ref updates
*/
ref = select_delayed_ref(locked_ref);
if (ref && ref->seq &&
- btrfs_check_delayed_seq(delayed_refs, ref->seq)) {
+ btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
/*
* there are still refs with lower seq numbers in the
* process of being added. Don't run this ref yet.
@@ -2315,12 +2328,23 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ref->in_tree = 0;
rb_erase(&ref->rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
- /*
- * we modified num_entries, but as we're currently running
- * delayed refs, skip
- * wake_up(&delayed_refs->seq_wait);
- * here.
- */
+ if (locked_ref) {
+ /*
+ * when we play the delayed ref, also correct the
+ * ref_mod on head
+ */
+ switch (ref->action) {
+ case BTRFS_ADD_DELAYED_REF:
+ case BTRFS_ADD_DELAYED_EXTENT:
+ locked_ref->node.ref_mod -= ref->ref_mod;
+ break;
+ case BTRFS_DROP_DELAYED_REF:
+ locked_ref->node.ref_mod += ref->ref_mod;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ }
spin_unlock(&delayed_refs->lock);
ret = run_one_delayed_ref(trans, root, ref, extent_op,
@@ -2337,7 +2361,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
}
next:
- do_chunk_alloc(trans, root->fs_info->extent_root,
+ do_chunk_alloc(trans, fs_info->extent_root,
2 * 1024 * 1024,
btrfs_get_alloc_profile(root, 0),
CHUNK_ALLOC_NO_FORCE);
@@ -2347,19 +2371,81 @@ next:
return count;
}
-static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
- unsigned long num_refs,
- struct list_head *first_seq)
+#ifdef SCRAMBLE_DELAYED_REFS
+/*
+ * Normally delayed refs get processed in ascending bytenr order. This
+ * correlates in most cases to the order added. To expose dependencies on this
+ * order, we start to process the tree in the middle instead of the beginning
+ */
+static u64 find_middle(struct rb_root *root)
{
- spin_unlock(&delayed_refs->lock);
- pr_debug("waiting for more refs (num %ld, first %p)\n",
- num_refs, first_seq);
- wait_event(delayed_refs->seq_wait,
- num_refs != delayed_refs->num_entries ||
- delayed_refs->seq_head.next != first_seq);
- pr_debug("done waiting for more refs (num %ld, first %p)\n",
- delayed_refs->num_entries, delayed_refs->seq_head.next);
- spin_lock(&delayed_refs->lock);
+ struct rb_node *n = root->rb_node;
+ struct btrfs_delayed_ref_node *entry;
+ int alt = 1;
+ u64 middle;
+ u64 first = 0, last = 0;
+
+ n = rb_first(root);
+ if (n) {
+ entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
+ first = entry->bytenr;
+ }
+ n = rb_last(root);
+ if (n) {
+ entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
+ last = entry->bytenr;
+ }
+ n = root->rb_node;
+
+ while (n) {
+ entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
+ WARN_ON(!entry->in_tree);
+
+ middle = entry->bytenr;
+
+ if (alt)
+ n = n->rb_left;
+ else
+ n = n->rb_right;
+
+ alt = 1 - alt;
+ }
+ return middle;
+}
+#endif
+
+int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
+{
+ struct qgroup_update *qgroup_update;
+ int ret = 0;
+
+ if (list_empty(&trans->qgroup_ref_list) !=
+ !trans->delayed_ref_elem.seq) {
+ /* list without seq or seq without list */
+ printk(KERN_ERR "btrfs: qgroup accounting update error, list is%s empty, seq is %llu\n",
+ list_empty(&trans->qgroup_ref_list) ? "" : " not",
+ trans->delayed_ref_elem.seq);
+ BUG();
+ }
+
+ if (!trans->delayed_ref_elem.seq)
+ return 0;
+
+ while (!list_empty(&trans->qgroup_ref_list)) {
+ qgroup_update = list_first_entry(&trans->qgroup_ref_list,
+ struct qgroup_update, list);
+ list_del(&qgroup_update->list);
+ if (!ret)
+ ret = btrfs_qgroup_account_ref(
+ trans, fs_info, qgroup_update->node,
+ qgroup_update->extent_op);
+ kfree(qgroup_update);
+ }
+
+ btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
+
+ return ret;
}
/*
@@ -2379,13 +2465,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
struct list_head cluster;
- struct list_head *first_seq = NULL;
int ret;
u64 delayed_start;
int run_all = count == (unsigned long)-1;
int run_most = 0;
- unsigned long num_refs = 0;
- int consider_waiting;
+ int loops;
/* We'll clean this up in btrfs_cleanup_transaction */
if (trans->aborted)
@@ -2398,11 +2482,18 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
CHUNK_ALLOC_NO_FORCE);
+ btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
+
delayed_refs = &trans->transaction->delayed_refs;
INIT_LIST_HEAD(&cluster);
again:
- consider_waiting = 0;
+ loops = 0;
spin_lock(&delayed_refs->lock);
+
+#ifdef SCRAMBLE_DELAYED_REFS
+ delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
+#endif
+
if (count == 0) {
count = delayed_refs->num_entries * 2;
run_most = 1;
@@ -2424,31 +2515,6 @@ again:
if (ret)
break;
- if (delayed_start >= delayed_refs->run_delayed_start) {
- if (consider_waiting == 0) {
- /*
- * btrfs_find_ref_cluster looped. let's do one
- * more cycle. if we don't run any delayed ref
- * during that cycle (because we can't because
- * all of them are blocked) and if the number of
- * refs doesn't change, we avoid busy waiting.
- */
- consider_waiting = 1;
- num_refs = delayed_refs->num_entries;
- first_seq = root->fs_info->tree_mod_seq_list.next;
- } else {
- wait_for_more_refs(delayed_refs,
- num_refs, first_seq);
- /*
- * after waiting, things have changed. we
- * dropped the lock and someone else might have
- * run some refs, built new clusters and so on.
- * therefore, we restart staleness detection.
- */
- consider_waiting = 0;
- }
- }
-
ret = run_clustered_refs(trans, root, &cluster);
if (ret < 0) {
spin_unlock(&delayed_refs->lock);
@@ -2461,9 +2527,26 @@ again:
if (count == 0)
break;
- if (ret || delayed_refs->run_delayed_start == 0) {
+ if (delayed_start >= delayed_refs->run_delayed_start) {
+ if (loops == 0) {
+ /*
+ * btrfs_find_ref_cluster looped. let's do one
+ * more cycle. if we don't run any delayed ref
+ * during that cycle (because we can't because
+ * all of them are blocked), bail out.
+ */
+ loops = 1;
+ } else {
+ /*
+ * no runnable refs left, stop trying
+ */
+ BUG_ON(run_all);
+ break;
+ }
+ }
+ if (ret) {
/* refs were run, let's reset staleness detection */
- consider_waiting = 0;
+ loops = 0;
}
}
@@ -2502,6 +2585,7 @@ again:
}
out:
spin_unlock(&delayed_refs->lock);
+ assert_qgroups_uptodate(trans);
return 0;
}
@@ -2581,8 +2665,10 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
node = rb_prev(node);
if (node) {
+ int seq = ref->seq;
+
ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
- if (ref->bytenr == bytenr)
+ if (ref->bytenr == bytenr && ref->seq == seq)
goto out_unlock;
}
@@ -2903,25 +2989,29 @@ again:
}
spin_lock(&block_group->lock);
- if (block_group->cached != BTRFS_CACHE_FINISHED) {
- /* We're not cached, don't bother trying to write stuff out */
+ if (block_group->cached != BTRFS_CACHE_FINISHED ||
+ !btrfs_test_opt(root, SPACE_CACHE)) {
+ /*
+ * don't bother trying to write stuff out _if_
+ * a) we're not cached,
+ * b) we're with nospace_cache mount option.
+ */
dcs = BTRFS_DC_WRITTEN;
spin_unlock(&block_group->lock);
goto out_put;
}
spin_unlock(&block_group->lock);
- num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
+ /*
+ * Try to preallocate enough space based on how big the block group is.
+ * Keep in mind this has to include any pinned space which could end up
+ * taking up quite a bit since it's not folded into the other space
+ * cache.
+ */
+ num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
if (!num_pages)
num_pages = 1;
- /*
- * Just to make absolutely sure we have enough space, we're going to
- * preallocate 12 pages worth of space for each block group. In
- * practice we ought to use at most 8, but we need extra space so we can
- * add our header and have a terminator between the extents and the
- * bitmaps.
- */
num_pages *= 16;
num_pages *= PAGE_CACHE_SIZE;
@@ -3134,6 +3224,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
init_waitqueue_head(&found->wait);
*space_info = found;
list_add_rcu(&found->list, &info->space_info);
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ info->data_sinfo = found;
return 0;
}
@@ -3263,12 +3355,6 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
return get_alloc_profile(root, flags);
}
-void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
-{
- BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
- BTRFS_BLOCK_GROUP_DATA);
-}
-
/*
* This will check the space that the inode allocates from to make sure we have
* enough space for bytes.
@@ -3277,6 +3363,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
{
struct btrfs_space_info *data_sinfo;
struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
u64 used;
int ret = 0, committed = 0, alloc_chunk = 1;
@@ -3289,7 +3376,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
committed = 1;
}
- data_sinfo = BTRFS_I(inode)->space_info;
+ data_sinfo = fs_info->data_sinfo;
if (!data_sinfo)
goto alloc;
@@ -3330,10 +3417,9 @@ alloc:
goto commit_trans;
}
- if (!data_sinfo) {
- btrfs_set_inode_space_info(root, inode);
- data_sinfo = BTRFS_I(inode)->space_info;
- }
+ if (!data_sinfo)
+ data_sinfo = fs_info->data_sinfo;
+
goto again;
}
@@ -3380,7 +3466,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
/* make sure bytes are sectorsize aligned */
bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
- data_sinfo = BTRFS_I(inode)->space_info;
+ data_sinfo = root->fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
data_sinfo->bytes_may_use -= bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info",
@@ -3586,89 +3672,58 @@ out:
/*
* shrink metadata reservation for delalloc
*/
-static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
- bool wait_ordered)
+static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
+ bool wait_ordered)
{
struct btrfs_block_rsv *block_rsv;
struct btrfs_space_info *space_info;
struct btrfs_trans_handle *trans;
- u64 reserved;
+ u64 delalloc_bytes;
u64 max_reclaim;
- u64 reclaimed = 0;
long time_left;
unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
int loops = 0;
- unsigned long progress;
trans = (struct btrfs_trans_handle *)current->journal_info;
block_rsv = &root->fs_info->delalloc_block_rsv;
space_info = block_rsv->space_info;
smp_mb();
- reserved = space_info->bytes_may_use;
- progress = space_info->reservation_progress;
-
- if (reserved == 0)
- return 0;
-
- smp_mb();
- if (root->fs_info->delalloc_bytes == 0) {
+ delalloc_bytes = root->fs_info->delalloc_bytes;
+ if (delalloc_bytes == 0) {
if (trans)
- return 0;
+ return;
btrfs_wait_ordered_extents(root, 0, 0);
- return 0;
+ return;
}
- max_reclaim = min(reserved, to_reclaim);
- nr_pages = max_t(unsigned long, nr_pages,
- max_reclaim >> PAGE_CACHE_SHIFT);
- while (loops < 1024) {
- /* have the flusher threads jump in and do some IO */
- smp_mb();
- nr_pages = min_t(unsigned long, nr_pages,
- root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
+ while (delalloc_bytes && loops < 3) {
+ max_reclaim = min(delalloc_bytes, to_reclaim);
+ nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
- WB_REASON_FS_FREE_SPACE);
+ WB_REASON_FS_FREE_SPACE);
spin_lock(&space_info->lock);
- if (reserved > space_info->bytes_may_use)
- reclaimed += reserved - space_info->bytes_may_use;
- reserved = space_info->bytes_may_use;
+ if (space_info->bytes_used + space_info->bytes_reserved +
+ space_info->bytes_pinned + space_info->bytes_readonly +
+ space_info->bytes_may_use + orig <=
+ space_info->total_bytes) {
+ spin_unlock(&space_info->lock);
+ break;
+ }
spin_unlock(&space_info->lock);
loops++;
-
- if (reserved == 0 || reclaimed >= max_reclaim)
- break;
-
- if (trans && trans->transaction->blocked)
- return -EAGAIN;
-
if (wait_ordered && !trans) {
btrfs_wait_ordered_extents(root, 0, 0);
} else {
- time_left = schedule_timeout_interruptible(1);
-
- /* We were interrupted, exit */
+ time_left = schedule_timeout_killable(1);
if (time_left)
break;
}
-
- /* we've kicked the IO a few times, if anything has been freed,
- * exit. There is no sense in looping here for a long time
- * when we really need to commit the transaction, or there are
- * just too many writers without enough free space
- */
-
- if (loops > 3) {
- smp_mb();
- if (progress != space_info->reservation_progress)
- break;
- }
-
+ smp_mb();
+ delalloc_bytes = root->fs_info->delalloc_bytes;
}
-
- return reclaimed >= to_reclaim;
}
/**
@@ -3728,6 +3783,58 @@ commit:
return btrfs_commit_transaction(trans, root);
}
+enum flush_state {
+ FLUSH_DELALLOC = 1,
+ FLUSH_DELALLOC_WAIT = 2,
+ FLUSH_DELAYED_ITEMS_NR = 3,
+ FLUSH_DELAYED_ITEMS = 4,
+ COMMIT_TRANS = 5,
+};
+
+static int flush_space(struct btrfs_root *root,
+ struct btrfs_space_info *space_info, u64 num_bytes,
+ u64 orig_bytes, int state)
+{
+ struct btrfs_trans_handle *trans;
+ int nr;
+ int ret = 0;
+
+ switch (state) {
+ case FLUSH_DELALLOC:
+ case FLUSH_DELALLOC_WAIT:
+ shrink_delalloc(root, num_bytes, orig_bytes,
+ state == FLUSH_DELALLOC_WAIT);
+ break;
+ case FLUSH_DELAYED_ITEMS_NR:
+ case FLUSH_DELAYED_ITEMS:
+ if (state == FLUSH_DELAYED_ITEMS_NR) {
+ u64 bytes = btrfs_calc_trans_metadata_size(root, 1);
+
+ nr = (int)div64_u64(num_bytes, bytes);
+ if (!nr)
+ nr = 1;
+ nr *= 2;
+ } else {
+ nr = -1;
+ }
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ break;
+ }
+ ret = btrfs_run_delayed_items_nr(trans, root, nr);
+ btrfs_end_transaction(trans, root);
+ break;
+ case COMMIT_TRANS:
+ ret = may_commit_transaction(root, space_info, orig_bytes, 0);
+ break;
+ default:
+ ret = -ENOSPC;
+ break;
+ }
+
+ return ret;
+}
/**
* reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
* @root - the root we're allocating for
@@ -3749,11 +3856,10 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
struct btrfs_space_info *space_info = block_rsv->space_info;
u64 used;
u64 num_bytes = orig_bytes;
- int retries = 0;
+ int flush_state = FLUSH_DELALLOC;
int ret = 0;
- bool committed = false;
bool flushing = false;
- bool wait_ordered = false;
+ bool committed = false;
again:
ret = 0;
@@ -3812,9 +3918,8 @@ again:
* amount plus the amount of bytes that we need for this
* reservation.
*/
- wait_ordered = true;
num_bytes = used - space_info->total_bytes +
- (orig_bytes * (retries + 1));
+ (orig_bytes * 2);
}
if (ret) {
@@ -3867,8 +3972,6 @@ again:
trace_btrfs_space_reservation(root->fs_info,
"space_info", space_info->flags, orig_bytes, 1);
ret = 0;
- } else {
- wait_ordered = true;
}
}
@@ -3887,36 +3990,13 @@ again:
if (!ret || !flush)
goto out;
- /*
- * We do synchronous shrinking since we don't actually unreserve
- * metadata until after the IO is completed.
- */
- ret = shrink_delalloc(root, num_bytes, wait_ordered);
- if (ret < 0)
- goto out;
-
- ret = 0;
-
- /*
- * So if we were overcommitted it's possible that somebody else flushed
- * out enough space and we simply didn't have enough space to reclaim,
- * so go back around and try again.
- */
- if (retries < 2) {
- wait_ordered = true;
- retries++;
+ ret = flush_space(root, space_info, num_bytes, orig_bytes,
+ flush_state);
+ flush_state++;
+ if (!ret)
goto again;
- }
-
- ret = -ENOSPC;
- if (committed)
- goto out;
-
- ret = may_commit_transaction(root, space_info, orig_bytes, 0);
- if (!ret) {
- committed = true;
+ else if (flush_state <= COMMIT_TRANS)
goto again;
- }
out:
if (flushing) {
@@ -3934,7 +4014,10 @@ static struct btrfs_block_rsv *get_block_rsv(
{
struct btrfs_block_rsv *block_rsv = NULL;
- if (root->ref_cows || root == root->fs_info->csum_root)
+ if (root->ref_cows)
+ block_rsv = trans->block_rsv;
+
+ if (root == root->fs_info->csum_root && trans->adding_csums)
block_rsv = trans->block_rsv;
if (!block_rsv)
@@ -4286,6 +4369,9 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
+ if (!trans->block_rsv)
+ return;
+
if (!trans->bytes_reserved)
return;
@@ -4444,7 +4530,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
int ret;
/* Need to be holding the i_mutex here if we aren't free space cache */
- if (btrfs_is_free_space_inode(root, inode))
+ if (btrfs_is_free_space_inode(inode))
flush = 0;
if (flush && btrfs_transaction_in_commit(root->fs_info))
@@ -4476,6 +4562,15 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
+ if (root->fs_info->quota_enabled) {
+ ret = btrfs_qgroup_reserve(root, num_bytes +
+ nr_extents * root->leafsize);
+ if (ret) {
+ mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
+ return ret;
+ }
+ }
+
ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
if (ret) {
u64 to_free = 0;
@@ -4554,6 +4649,11 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
trace_btrfs_space_reservation(root->fs_info, "delalloc",
btrfs_ino(inode), to_free, 0);
+ if (root->fs_info->quota_enabled) {
+ btrfs_qgroup_free(root, num_bytes +
+ dropped * root->leafsize);
+ }
+
btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
to_free);
}
@@ -5190,8 +5290,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
rb_erase(&head->node.rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
- if (waitqueue_active(&delayed_refs->seq_wait))
- wake_up(&delayed_refs->seq_wait);
/*
* we don't take a ref on the node because we're removing it from the
@@ -5748,7 +5846,11 @@ loop:
ret = do_chunk_alloc(trans, root, num_bytes +
2 * 1024 * 1024, data,
CHUNK_ALLOC_LIMITED);
- if (ret < 0) {
+ /*
+ * Do not bail out on ENOSPC since we
+ * can do more things.
+ */
+ if (ret < 0 && ret != -ENOSPC) {
btrfs_abort_transaction(trans,
root, ret);
goto out;
@@ -5816,13 +5918,13 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
again:
list_for_each_entry(cache, &info->block_groups[index], list) {
spin_lock(&cache->lock);
- printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
- "%llu pinned %llu reserved\n",
+ printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
(unsigned long long)cache->key.objectid,
(unsigned long long)cache->key.offset,
(unsigned long long)btrfs_block_group_used(&cache->item),
(unsigned long long)cache->pinned,
- (unsigned long long)cache->reserved);
+ (unsigned long long)cache->reserved,
+ cache->ro ? "[readonly]" : "");
btrfs_dump_free_space(cache, bytes);
spin_unlock(&cache->lock);
}
@@ -7610,8 +7712,21 @@ int btrfs_read_block_groups(struct btrfs_root *root)
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);
- if (need_clear)
+ if (need_clear) {
+ /*
+ * When we mount with old space cache, we need to
+ * set BTRFS_DC_CLEAR and set dirty flag.
+ *
+ * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
+ * truncate the old free space cache inode and
+ * setup a new one.
+ * b) Setting 'dirty flag' makes sure that we flush
+ * the new space cache info onto disk.
+ */
cache->disk_cache_state = BTRFS_DC_CLEAR;
+ if (btrfs_test_opt(root, SPACE_CACHE))
+ cache->dirty = 1;
+ }
read_extent_buffer(leaf, &cache->item,
btrfs_item_ptr_offset(leaf, path->slots[0]),
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index deafe19c34b5..4c878476bb91 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1919,7 +1919,7 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
return -EIO;
}
- printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
+ printk_ratelimited_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
"(dev %s sector %llu)\n", page->mapping->host->i_ino,
start, rcu_str_deref(dev->name), sector);
@@ -2330,23 +2330,10 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
ret = tree->ops->readpage_end_io_hook(page, start, end,
state, mirror);
- if (ret) {
- /* no IO indicated but software detected errors
- * in the block, either checksum errors or
- * issues with the contents */
- struct btrfs_root *root =
- BTRFS_I(page->mapping->host)->root;
- struct btrfs_device *device;
-
+ if (ret)
uptodate = 0;
- device = btrfs_find_device_for_logical(
- root, start, mirror);
- if (device)
- btrfs_dev_stat_inc_and_print(device,
- BTRFS_DEV_STAT_CORRUPTION_ERRS);
- } else {
+ else
clean_io_failure(start, page);
- }
}
if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
@@ -3078,8 +3065,15 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
}
}
+ /*
+ * We need to do this to prevent races in people who check if the eb is
+ * under IO since we can end up having no IO bits set for a short period
+ * of time.
+ */
+ spin_lock(&eb->refs_lock);
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+ spin_unlock(&eb->refs_lock);
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
spin_lock(&fs_info->delalloc_lock);
if (fs_info->dirty_metadata_bytes >= eb->len)
@@ -3088,6 +3082,8 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
WARN_ON(1);
spin_unlock(&fs_info->delalloc_lock);
ret = 1;
+ } else {
+ spin_unlock(&eb->refs_lock);
}
btrfs_tree_unlock(eb);
@@ -3558,19 +3554,38 @@ int extent_readpages(struct extent_io_tree *tree,
struct bio *bio = NULL;
unsigned page_idx;
unsigned long bio_flags = 0;
+ struct page *pagepool[16];
+ struct page *page;
+ int i = 0;
+ int nr = 0;
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
- struct page *page = list_entry(pages->prev, struct page, lru);
+ page = list_entry(pages->prev, struct page, lru);
prefetchw(&page->flags);
list_del(&page->lru);
- if (!add_to_page_cache_lru(page, mapping,
+ if (add_to_page_cache_lru(page, mapping,
page->index, GFP_NOFS)) {
- __extent_read_full_page(tree, page, get_extent,
- &bio, 0, &bio_flags);
+ page_cache_release(page);
+ continue;
}
- page_cache_release(page);
+
+ pagepool[nr++] = page;
+ if (nr < ARRAY_SIZE(pagepool))
+ continue;
+ for (i = 0; i < nr; i++) {
+ __extent_read_full_page(tree, pagepool[i], get_extent,
+ &bio, 0, &bio_flags);
+ page_cache_release(pagepool[i]);
+ }
+ nr = 0;
}
+ for (i = 0; i < nr; i++) {
+ __extent_read_full_page(tree, pagepool[i], get_extent,
+ &bio, 0, &bio_flags);
+ page_cache_release(pagepool[i]);
+ }
+
BUG_ON(!list_empty(pages));
if (bio)
return submit_one_bio(READ, bio, 0, bio_flags);
@@ -4124,11 +4139,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
* So bump the ref count first, then set the bit. If someone
* beat us to it, drop the ref we added.
*/
- if (!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
+ spin_lock(&eb->refs_lock);
+ if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
atomic_inc(&eb->refs);
- if (test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
- atomic_dec(&eb->refs);
- }
+ spin_unlock(&eb->refs_lock);
}
static void mark_extent_buffer_accessed(struct extent_buffer *eb)
@@ -4240,9 +4254,7 @@ again:
goto free_eb;
}
/* add one reference for the tree */
- spin_lock(&eb->refs_lock);
check_buffer_tree_ref(eb);
- spin_unlock(&eb->refs_lock);
spin_unlock(&tree->buffer_lock);
radix_tree_preload_end();
@@ -4301,7 +4313,7 @@ static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
}
/* Expects to have eb->eb_lock already held */
-static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
+static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
{
WARN_ON(atomic_read(&eb->refs) == 0);
if (atomic_dec_and_test(&eb->refs)) {
@@ -4322,9 +4334,11 @@ static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
btrfs_release_extent_buffer_page(eb, 0);
call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
- return;
+ return 1;
}
spin_unlock(&eb->refs_lock);
+
+ return 0;
}
void free_extent_buffer(struct extent_buffer *eb)
@@ -4963,7 +4977,6 @@ int try_release_extent_buffer(struct page *page, gfp_t mask)
spin_unlock(&eb->refs_lock);
return 0;
}
- release_extent_buffer(eb, mask);
- return 1;
+ return release_extent_buffer(eb, mask);
}
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 5d158d320233..857d93cd01dc 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -183,7 +183,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
* read from the commit root and sidestep a nasty deadlock
* between reading the free space cache and updating the csum tree.
*/
- if (btrfs_is_free_space_inode(root, inode)) {
+ if (btrfs_is_free_space_inode(inode)) {
path->search_commit_root = 1;
path->skip_locking = 1;
}
@@ -272,9 +272,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
}
int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
- struct bio *bio, u64 offset, u32 *dst)
+ struct bio *bio, u64 offset)
{
- return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1);
+ return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
}
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
@@ -690,6 +690,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
return -ENOMEM;
sector_sum = sums->sums;
+ trans->adding_csums = 1;
again:
next_offset = (u64)-1;
found_next = 0;
@@ -853,6 +854,7 @@ next_sector:
goto again;
}
out:
+ trans->adding_csums = 0;
btrfs_free_path(path);
return ret;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 9aa01ec2138d..5caf285c6e4d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1379,7 +1379,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
ssize_t err = 0;
size_t count, ocount;
- vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+ sb_start_write(inode->i_sb);
mutex_lock(&inode->i_mutex);
@@ -1469,6 +1469,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
num_written = err;
}
out:
+ sb_end_write(inode->i_sb);
current->backing_dev_info = NULL;
return num_written ? num_written : err;
}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 6c4e2baa9290..6b10acfc2f5c 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1968,7 +1968,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
info = rb_entry(n, struct btrfs_free_space, offset_index);
- if (info->bytes >= bytes)
+ if (info->bytes >= bytes && !block_group->ro)
count++;
printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
(unsigned long long)info->offset,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index fb8d671d00e6..ec154f954646 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -324,7 +324,8 @@ static noinline int add_async_extent(struct async_cow *cow,
* If this code finds it can't get good compression, it puts an
* entry onto the work queue to write the uncompressed bytes. This
* makes sure that both compressed inodes and uncompressed inodes
- * are written in the same order that pdflush sent them down.
+ * are written in the same order that the flusher thread sent them
+ * down.
*/
static noinline int compress_file_range(struct inode *inode,
struct page *locked_page,
@@ -825,7 +826,7 @@ static noinline int cow_file_range(struct inode *inode,
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
int ret = 0;
- BUG_ON(btrfs_is_free_space_inode(root, inode));
+ BUG_ON(btrfs_is_free_space_inode(inode));
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
extent_clear_unlock_delalloc(inode,
@@ -1007,10 +1008,8 @@ static noinline void async_cow_submit(struct btrfs_work *work)
nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
PAGE_CACHE_SHIFT;
- atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
-
- if (atomic_read(&root->fs_info->async_delalloc_pages) <
- 5 * 1042 * 1024 &&
+ if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
+ 5 * 1024 * 1024 &&
waitqueue_active(&root->fs_info->async_submit_wait))
wake_up(&root->fs_info->async_submit_wait);
@@ -1035,7 +1034,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
struct btrfs_root *root = BTRFS_I(inode)->root;
unsigned long nr_pages;
u64 cur_end;
- int limit = 10 * 1024 * 1042;
+ int limit = 10 * 1024 * 1024;
clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1, 0, NULL, GFP_NOFS);
@@ -1153,7 +1152,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
return -ENOMEM;
}
- nolock = btrfs_is_free_space_inode(root, inode);
+ nolock = btrfs_is_free_space_inode(inode);
if (nolock)
trans = btrfs_join_transaction_nolock(root);
@@ -1466,7 +1465,7 @@ static void btrfs_set_bit_hook(struct inode *inode,
if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
- bool do_list = !btrfs_is_free_space_inode(root, inode);
+ bool do_list = !btrfs_is_free_space_inode(inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1501,7 +1500,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 len = state->end + 1 - state->start;
- bool do_list = !btrfs_is_free_space_inode(root, inode);
+ bool do_list = !btrfs_is_free_space_inode(inode);
if (*bits & EXTENT_FIRST_DELALLOC) {
*bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1612,7 +1611,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
- if (btrfs_is_free_space_inode(root, inode))
+ if (btrfs_is_free_space_inode(inode))
metadata = 2;
if (!(rw & REQ_WRITE)) {
@@ -1869,7 +1868,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
int ret;
bool nolock;
- nolock = btrfs_is_free_space_inode(root, inode);
+ nolock = btrfs_is_free_space_inode(inode);
if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
ret = -EIO;
@@ -1884,8 +1883,11 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
trans = btrfs_join_transaction_nolock(root);
else
trans = btrfs_join_transaction(root);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
+ }
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) /* -ENOMEM or corruption */
@@ -2007,7 +2009,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
ordered_extent->work.func = finish_ordered_fn;
ordered_extent->work.flags = 0;
- if (btrfs_is_free_space_inode(root, inode))
+ if (btrfs_is_free_space_inode(inode))
workers = &root->fs_info->endio_freespace_worker;
else
workers = &root->fs_info->endio_write_workers;
@@ -2732,8 +2734,10 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
* The data relocation inode should also be directly updated
* without delay
*/
- if (!btrfs_is_free_space_inode(root, inode)
+ if (!btrfs_is_free_space_inode(inode)
&& root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ btrfs_update_root_times(trans, root);
+
ret = btrfs_delayed_update_inode(trans, root, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
@@ -2833,7 +2837,7 @@ err:
inode_inc_iversion(inode);
inode_inc_iversion(dir);
inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- btrfs_update_inode(trans, root, dir);
+ ret = btrfs_update_inode(trans, root, dir);
out:
return ret;
}
@@ -3171,7 +3175,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_i_size_write(dir, dir->i_size - name_len * 2);
inode_inc_iversion(dir);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- ret = btrfs_update_inode(trans, root, dir);
+ ret = btrfs_update_inode_fallback(trans, root, dir);
if (ret)
btrfs_abort_transaction(trans, root, ret);
out:
@@ -3743,7 +3747,7 @@ void btrfs_evict_inode(struct inode *inode)
truncate_inode_pages(&inode->i_data, 0);
if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
- btrfs_is_free_space_inode(root, inode)))
+ btrfs_is_free_space_inode(inode)))
goto no_delete;
if (is_bad_inode(inode)) {
@@ -4082,7 +4086,6 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
struct btrfs_iget_args *args = p;
inode->i_ino = args->ino;
BTRFS_I(inode)->root = args->root;
- btrfs_set_inode_space_info(args->root, inode);
return 0;
}
@@ -4457,7 +4460,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
return 0;
- if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
+ if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
nolock = true;
if (wbc->sync_mode == WB_SYNC_ALL) {
@@ -4518,6 +4521,11 @@ int btrfs_dirty_inode(struct inode *inode)
static int btrfs_update_time(struct inode *inode, struct timespec *now,
int flags)
{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+
+ if (btrfs_root_readonly(root))
+ return -EROFS;
+
if (flags & S_VERSION)
inode_inc_iversion(inode);
if (flags & S_CTIME)
@@ -4662,7 +4670,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
BTRFS_I(inode)->root = root;
BTRFS_I(inode)->generation = trans->transid;
inode->i_generation = BTRFS_I(inode)->generation;
- btrfs_set_inode_space_info(root, inode);
if (S_ISDIR(mode))
owner = 0;
@@ -4690,6 +4697,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
+ memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
+ sizeof(*inode_item));
fill_inode_item(trans, path->nodes[0], inode_item, inode);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
@@ -4723,6 +4732,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
trace_btrfs_inode_new(inode);
btrfs_set_inode_last_trans(trans, inode);
+ btrfs_update_root_times(trans, root);
+
return inode;
fail:
if (dir)
@@ -5764,18 +5775,112 @@ out:
return ret;
}
+static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
+ struct extent_state **cached_state, int writing)
+{
+ struct btrfs_ordered_extent *ordered;
+ int ret = 0;
+
+ while (1) {
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ 0, cached_state);
+ /*
+ * We're concerned with the entire range that we're going to be
+ * doing DIO to, so we need to make sure theres no ordered
+ * extents in this range.
+ */
+ ordered = btrfs_lookup_ordered_range(inode, lockstart,
+ lockend - lockstart + 1);
+
+ /*
+ * We need to make sure there are no buffered pages in this
+ * range either, we could have raced between the invalidate in
+ * generic_file_direct_write and locking the extent. The
+ * invalidate needs to happen so that reads after a write do not
+ * get stale data.
+ */
+ if (!ordered && (!writing ||
+ !test_range_bit(&BTRFS_I(inode)->io_tree,
+ lockstart, lockend, EXTENT_UPTODATE, 0,
+ *cached_state)))
+ break;
+
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ cached_state, GFP_NOFS);
+
+ if (ordered) {
+ btrfs_start_ordered_extent(inode, ordered, 1);
+ btrfs_put_ordered_extent(ordered);
+ } else {
+ /* Screw you mmap */
+ ret = filemap_write_and_wait_range(inode->i_mapping,
+ lockstart,
+ lockend);
+ if (ret)
+ break;
+
+ /*
+ * If we found a page that couldn't be invalidated just
+ * fall back to buffered.
+ */
+ ret = invalidate_inode_pages2_range(inode->i_mapping,
+ lockstart >> PAGE_CACHE_SHIFT,
+ lockend >> PAGE_CACHE_SHIFT);
+ if (ret)
+ break;
+ }
+
+ cond_resched();
+ }
+
+ return ret;
+}
+
static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
struct extent_map *em;
struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct extent_state *cached_state = NULL;
u64 start = iblock << inode->i_blkbits;
+ u64 lockstart, lockend;
u64 len = bh_result->b_size;
struct btrfs_trans_handle *trans;
+ int unlock_bits = EXTENT_LOCKED;
+ int ret;
+
+ if (create) {
+ ret = btrfs_delalloc_reserve_space(inode, len);
+ if (ret)
+ return ret;
+ unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
+ } else {
+ len = min_t(u64, len, root->sectorsize);
+ }
+
+ lockstart = start;
+ lockend = start + len - 1;
+
+ /*
+ * If this errors out it's because we couldn't invalidate pagecache for
+ * this range and we need to fallback to buffered.
+ */
+ if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
+ return -ENOTBLK;
+
+ if (create) {
+ ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, EXTENT_DELALLOC, NULL,
+ &cached_state, GFP_NOFS);
+ if (ret)
+ goto unlock_err;
+ }
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
- if (IS_ERR(em))
- return PTR_ERR(em);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto unlock_err;
+ }
/*
* Ok for INLINE and COMPRESSED extents we need to fallback on buffered
@@ -5794,17 +5899,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
em->block_start == EXTENT_MAP_INLINE) {
free_extent_map(em);
- return -ENOTBLK;
+ ret = -ENOTBLK;
+ goto unlock_err;
}
/* Just a good old fashioned hole, return */
if (!create && (em->block_start == EXTENT_MAP_HOLE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
free_extent_map(em);
- /* DIO will do one hole at a time, so just unlock a sector */
- unlock_extent(&BTRFS_I(inode)->io_tree, start,
- start + root->sectorsize - 1);
- return 0;
+ ret = 0;
+ goto unlock_err;
}
/*
@@ -5817,8 +5921,9 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
*
*/
if (!create) {
- len = em->len - (start - em->start);
- goto map;
+ len = min(len, em->len - (start - em->start));
+ lockstart = start + len;
+ goto unlock;
}
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
@@ -5850,7 +5955,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
btrfs_end_transaction(trans, root);
if (ret) {
free_extent_map(em);
- return ret;
+ goto unlock_err;
}
goto unlock;
}
@@ -5863,14 +5968,12 @@ must_cow:
*/
len = bh_result->b_size;
em = btrfs_new_extent_direct(inode, em, start, len);
- if (IS_ERR(em))
- return PTR_ERR(em);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto unlock_err;
+ }
len = min(len, em->len - (start - em->start));
unlock:
- clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1,
- EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1,
- 0, NULL, GFP_NOFS);
-map:
bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
inode->i_blkbits;
bh_result->b_size = len;
@@ -5888,9 +5991,44 @@ map:
i_size_write(inode, start + len);
}
+ /*
+ * In the case of write we need to clear and unlock the entire range,
+ * in the case of read we need to unlock only the end area that we
+ * aren't using if there is any left over space.
+ */
+ if (lockstart < lockend) {
+ if (create && len < lockend - lockstart) {
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockstart + len - 1, unlock_bits, 1, 0,
+ &cached_state, GFP_NOFS);
+ /*
+ * Beside unlock, we also need to cleanup reserved space
+ * for the left range by attaching EXTENT_DO_ACCOUNTING.
+ */
+ clear_extent_bit(&BTRFS_I(inode)->io_tree,
+ lockstart + len, lockend,
+ unlock_bits | EXTENT_DO_ACCOUNTING,
+ 1, 0, NULL, GFP_NOFS);
+ } else {
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, unlock_bits, 1, 0,
+ &cached_state, GFP_NOFS);
+ }
+ } else {
+ free_extent_state(cached_state);
+ }
+
free_extent_map(em);
return 0;
+
+unlock_err:
+ if (create)
+ unlock_bits |= EXTENT_DO_ACCOUNTING;
+
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
+ unlock_bits, 1, 0, &cached_state, GFP_NOFS);
+ return ret;
}
struct btrfs_dio_private {
@@ -5898,7 +6036,6 @@ struct btrfs_dio_private {
u64 logical_offset;
u64 disk_bytenr;
u64 bytes;
- u32 *csums;
void *private;
/* number of bios pending for this dio */
@@ -5918,7 +6055,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
struct inode *inode = dip->inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start;
- u32 *private = dip->csums;
start = dip->logical_offset;
do {
@@ -5926,8 +6062,12 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
struct page *page = bvec->bv_page;
char *kaddr;
u32 csum = ~(u32)0;
+ u64 private = ~(u32)0;
unsigned long flags;
+ if (get_state_private(&BTRFS_I(inode)->io_tree,
+ start, &private))
+ goto failed;
local_irq_save(flags);
kaddr = kmap_atomic(page);
csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
@@ -5937,18 +6077,18 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
local_irq_restore(flags);
flush_dcache_page(bvec->bv_page);
- if (csum != *private) {
+ if (csum != private) {
+failed:
printk(KERN_ERR "btrfs csum failed ino %llu off"
" %llu csum %u private %u\n",
(unsigned long long)btrfs_ino(inode),
(unsigned long long)start,
- csum, *private);
+ csum, (unsigned)private);
err = -EIO;
}
}
start += bvec->bv_len;
- private++;
bvec++;
} while (bvec <= bvec_end);
@@ -5956,7 +6096,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
dip->logical_offset + dip->bytes - 1);
bio->bi_private = dip->private;
- kfree(dip->csums);
kfree(dip);
/* If we had a csum failure make sure to clear the uptodate flag */
@@ -6062,7 +6201,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
int rw, u64 file_offset, int skip_sum,
- u32 *csums, int async_submit)
+ int async_submit)
{
int write = rw & REQ_WRITE;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -6095,8 +6234,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
if (ret)
goto err;
} else if (!skip_sum) {
- ret = btrfs_lookup_bio_sums_dio(root, inode, bio,
- file_offset, csums);
+ ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset);
if (ret)
goto err;
}
@@ -6122,10 +6260,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
u64 submit_len = 0;
u64 map_length;
int nr_pages = 0;
- u32 *csums = dip->csums;
int ret = 0;
int async_submit = 0;
- int write = rw & REQ_WRITE;
map_length = orig_bio->bi_size;
ret = btrfs_map_block(map_tree, READ, start_sector << 9,
@@ -6161,16 +6297,13 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
atomic_inc(&dip->pending_bios);
ret = __btrfs_submit_dio_bio(bio, inode, rw,
file_offset, skip_sum,
- csums, async_submit);
+ async_submit);
if (ret) {
bio_put(bio);
atomic_dec(&dip->pending_bios);
goto out_err;
}
- /* Write's use the ordered csums */
- if (!write && !skip_sum)
- csums = csums + nr_pages;
start_sector += submit_len >> 9;
file_offset += submit_len;
@@ -6200,7 +6333,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
submit:
ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
- csums, async_submit);
+ async_submit);
if (!ret)
return 0;
@@ -6236,17 +6369,6 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
ret = -ENOMEM;
goto free_ordered;
}
- dip->csums = NULL;
-
- /* Write's use the ordered csum stuff, so we don't need dip->csums */
- if (!write && !skip_sum) {
- dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
- if (!dip->csums) {
- kfree(dip);
- ret = -ENOMEM;
- goto free_ordered;
- }
- }
dip->private = bio->bi_private;
dip->inode = inode;
@@ -6331,132 +6453,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
out:
return retval;
}
+
static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- struct btrfs_ordered_extent *ordered;
- struct extent_state *cached_state = NULL;
- u64 lockstart, lockend;
- ssize_t ret;
- int writing = rw & WRITE;
- int write_bits = 0;
- size_t count = iov_length(iov, nr_segs);
if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
- offset, nr_segs)) {
+ offset, nr_segs))
return 0;
- }
- lockstart = offset;
- lockend = offset + count - 1;
-
- if (writing) {
- ret = btrfs_delalloc_reserve_space(inode, count);
- if (ret)
- goto out;
- }
-
- while (1) {
- lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- 0, &cached_state);
- /*
- * We're concerned with the entire range that we're going to be
- * doing DIO to, so we need to make sure theres no ordered
- * extents in this range.
- */
- ordered = btrfs_lookup_ordered_range(inode, lockstart,
- lockend - lockstart + 1);
-
- /*
- * We need to make sure there are no buffered pages in this
- * range either, we could have raced between the invalidate in
- * generic_file_direct_write and locking the extent. The
- * invalidate needs to happen so that reads after a write do not
- * get stale data.
- */
- if (!ordered && (!writing ||
- !test_range_bit(&BTRFS_I(inode)->io_tree,
- lockstart, lockend, EXTENT_UPTODATE, 0,
- cached_state)))
- break;
-
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state, GFP_NOFS);
-
- if (ordered) {
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
- } else {
- /* Screw you mmap */
- ret = filemap_write_and_wait_range(file->f_mapping,
- lockstart,
- lockend);
- if (ret)
- goto out;
-
- /*
- * If we found a page that couldn't be invalidated just
- * fall back to buffered.
- */
- ret = invalidate_inode_pages2_range(file->f_mapping,
- lockstart >> PAGE_CACHE_SHIFT,
- lockend >> PAGE_CACHE_SHIFT);
- if (ret) {
- if (ret == -EBUSY)
- ret = 0;
- goto out;
- }
- }
-
- cond_resched();
- }
-
- /*
- * we don't use btrfs_set_extent_delalloc because we don't want
- * the dirty or uptodate bits
- */
- if (writing) {
- write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING;
- ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- EXTENT_DELALLOC, NULL, &cached_state,
- GFP_NOFS);
- if (ret) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, EXTENT_LOCKED | write_bits,
- 1, 0, &cached_state, GFP_NOFS);
- goto out;
- }
- }
-
- free_extent_state(cached_state);
- cached_state = NULL;
-
- ret = __blockdev_direct_IO(rw, iocb, inode,
+ return __blockdev_direct_IO(rw, iocb, inode,
BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, 0);
-
- if (ret < 0 && ret != -EIOCBQUEUED) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, offset,
- offset + iov_length(iov, nr_segs) - 1,
- EXTENT_LOCKED | write_bits, 1, 0,
- &cached_state, GFP_NOFS);
- } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) {
- /*
- * We're falling back to buffered, unlock the section we didn't
- * do IO on.
- */
- clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret,
- offset + iov_length(iov, nr_segs) - 1,
- EXTENT_LOCKED | write_bits, 1, 0,
- &cached_state, GFP_NOFS);
- }
-out:
- free_extent_state(cached_state);
- return ret;
}
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
@@ -6620,6 +6632,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
u64 page_start;
u64 page_end;
+ sb_start_pagefault(inode->i_sb);
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (!ret) {
ret = file_update_time(vma->vm_file);
@@ -6709,12 +6722,15 @@ again:
unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
out_unlock:
- if (!ret)
+ if (!ret) {
+ sb_end_pagefault(inode->i_sb);
return VM_FAULT_LOCKED;
+ }
unlock_page(page);
out:
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
out_noreserve:
+ sb_end_pagefault(inode->i_sb);
return ret;
}
@@ -6939,7 +6955,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
return NULL;
ei->root = NULL;
- ei->space_info = NULL;
ei->generation = 0;
ei->last_trans = 0;
ei->last_sub_trans = 0;
@@ -7046,7 +7061,7 @@ int btrfs_drop_inode(struct inode *inode)
struct btrfs_root *root = BTRFS_I(inode)->root;
if (btrfs_root_refs(&root->root_item) == 0 &&
- !btrfs_is_free_space_inode(root, inode))
+ !btrfs_is_free_space_inode(inode))
return 1;
else
return generic_drop_inode(inode);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 1e9f6c019ad0..9df50fa8a078 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -41,6 +41,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
+#include <linux/uuid.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
@@ -53,6 +54,7 @@
#include "inode-map.h"
#include "backref.h"
#include "rcu-string.h"
+#include "send.h"
/* Mask out flags that are inappropriate for the given type of inode. */
static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -193,6 +195,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
if (!inode_owner_or_capable(inode))
return -EACCES;
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
+
mutex_lock(&inode->i_mutex);
ip_oldflags = ip->flags;
@@ -207,10 +213,6 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
}
}
- ret = mnt_want_write_file(file);
- if (ret)
- goto out_unlock;
-
if (flags & FS_SYNC_FL)
ip->flags |= BTRFS_INODE_SYNC;
else
@@ -273,9 +275,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
inode->i_flags = i_oldflags;
}
- mnt_drop_write_file(file);
out_unlock:
mutex_unlock(&inode->i_mutex);
+ mnt_drop_write_file(file);
return ret;
}
@@ -336,7 +338,8 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
static noinline int create_subvol(struct btrfs_root *root,
struct dentry *dentry,
char *name, int namelen,
- u64 *async_transid)
+ u64 *async_transid,
+ struct btrfs_qgroup_inherit **inherit)
{
struct btrfs_trans_handle *trans;
struct btrfs_key key;
@@ -346,11 +349,13 @@ static noinline int create_subvol(struct btrfs_root *root,
struct btrfs_root *new_root;
struct dentry *parent = dentry->d_parent;
struct inode *dir;
+ struct timespec cur_time = CURRENT_TIME;
int ret;
int err;
u64 objectid;
u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
u64 index = 0;
+ uuid_le new_uuid;
ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
if (ret)
@@ -368,6 +373,11 @@ static noinline int create_subvol(struct btrfs_root *root,
if (IS_ERR(trans))
return PTR_ERR(trans);
+ ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid,
+ inherit ? *inherit : NULL);
+ if (ret)
+ goto fail;
+
leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
0, objectid, NULL, 0, 0, 0);
if (IS_ERR(leaf)) {
@@ -389,8 +399,9 @@ static noinline int create_subvol(struct btrfs_root *root,
BTRFS_UUID_SIZE);
btrfs_mark_buffer_dirty(leaf);
+ memset(&root_item, 0, sizeof(root_item));
+
inode_item = &root_item.inode;
- memset(inode_item, 0, sizeof(*inode_item));
inode_item->generation = cpu_to_le64(1);
inode_item->size = cpu_to_le64(3);
inode_item->nlink = cpu_to_le32(1);
@@ -408,8 +419,15 @@ static noinline int create_subvol(struct btrfs_root *root,
btrfs_set_root_used(&root_item, leaf->len);
btrfs_set_root_last_snapshot(&root_item, 0);
- memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
- root_item.drop_level = 0;
+ btrfs_set_root_generation_v2(&root_item,
+ btrfs_root_generation(&root_item));
+ uuid_le_gen(&new_uuid);
+ memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE);
+ root_item.otime.sec = cpu_to_le64(cur_time.tv_sec);
+ root_item.otime.nsec = cpu_to_le32(cur_time.tv_nsec);
+ root_item.ctime = root_item.otime;
+ btrfs_set_root_ctransid(&root_item, trans->transid);
+ btrfs_set_root_otransid(&root_item, trans->transid);
btrfs_tree_unlock(leaf);
free_extent_buffer(leaf);
@@ -484,7 +502,7 @@ fail:
static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
char *name, int namelen, u64 *async_transid,
- bool readonly)
+ bool readonly, struct btrfs_qgroup_inherit **inherit)
{
struct inode *inode;
struct btrfs_pending_snapshot *pending_snapshot;
@@ -502,6 +520,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
pending_snapshot->dentry = dentry;
pending_snapshot->root = root;
pending_snapshot->readonly = readonly;
+ if (inherit) {
+ pending_snapshot->inherit = *inherit;
+ *inherit = NULL; /* take responsibility to free it */
+ }
trans = btrfs_start_transaction(root->fs_info->extent_root, 5);
if (IS_ERR(trans)) {
@@ -635,7 +657,8 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child)
static noinline int btrfs_mksubvol(struct path *parent,
char *name, int namelen,
struct btrfs_root *snap_src,
- u64 *async_transid, bool readonly)
+ u64 *async_transid, bool readonly,
+ struct btrfs_qgroup_inherit **inherit)
{
struct inode *dir = parent->dentry->d_inode;
struct dentry *dentry;
@@ -652,13 +675,9 @@ static noinline int btrfs_mksubvol(struct path *parent,
if (dentry->d_inode)
goto out_dput;
- error = mnt_want_write(parent->mnt);
- if (error)
- goto out_dput;
-
error = btrfs_may_create(dir, dentry);
if (error)
- goto out_drop_write;
+ goto out_dput;
down_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
@@ -666,18 +685,16 @@ static noinline int btrfs_mksubvol(struct path *parent,
goto out_up_read;
if (snap_src) {
- error = create_snapshot(snap_src, dentry,
- name, namelen, async_transid, readonly);
+ error = create_snapshot(snap_src, dentry, name, namelen,
+ async_transid, readonly, inherit);
} else {
error = create_subvol(BTRFS_I(dir)->root, dentry,
- name, namelen, async_transid);
+ name, namelen, async_transid, inherit);
}
if (!error)
fsnotify_mkdir(dir, dentry);
out_up_read:
up_read(&BTRFS_I(dir)->root->fs_info->subvol_sem);
-out_drop_write:
- mnt_drop_write(parent->mnt);
out_dput:
dput(dentry);
out_unlock:
@@ -832,7 +849,8 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
}
static int should_defrag_range(struct inode *inode, u64 start, int thresh,
- u64 *last_len, u64 *skip, u64 *defrag_end)
+ u64 *last_len, u64 *skip, u64 *defrag_end,
+ int compress)
{
struct extent_map *em;
int ret = 1;
@@ -863,7 +881,7 @@ static int should_defrag_range(struct inode *inode, u64 start, int thresh,
* we hit a real extent, if it is big or the next extent is not a
* real extent, don't bother defragging it
*/
- if ((*last_len == 0 || *last_len >= thresh) &&
+ if (!compress && (*last_len == 0 || *last_len >= thresh) &&
(em->len >= thresh || !next_mergeable))
ret = 0;
out:
@@ -1047,11 +1065,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
u64 newer_than, unsigned long max_to_defrag)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_super_block *disk_super;
struct file_ra_state *ra = NULL;
unsigned long last_index;
u64 isize = i_size_read(inode);
- u64 features;
u64 last_len = 0;
u64 skip = 0;
u64 defrag_end = 0;
@@ -1145,7 +1161,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
extent_thresh, &last_len, &skip,
- &defrag_end)) {
+ &defrag_end, range->flags &
+ BTRFS_DEFRAG_RANGE_COMPRESS)) {
unsigned long next;
/*
* the should_defrag function tells us how much to skip
@@ -1237,11 +1254,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
mutex_unlock(&inode->i_mutex);
}
- disk_super = root->fs_info->super_copy;
- features = btrfs_super_incompat_flags(disk_super);
if (range->compress_type == BTRFS_COMPRESS_LZO) {
- features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
- btrfs_set_super_incompat_flags(disk_super, features);
+ btrfs_set_fs_incompat(root->fs_info, COMPRESS_LZO);
}
ret = defrag_count;
@@ -1379,41 +1393,39 @@ out:
}
static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
- char *name,
- unsigned long fd,
- int subvol,
- u64 *transid,
- bool readonly)
+ char *name, unsigned long fd, int subvol,
+ u64 *transid, bool readonly,
+ struct btrfs_qgroup_inherit **inherit)
{
- struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
struct file *src_file;
int namelen;
int ret = 0;
- if (root->fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
+ ret = mnt_want_write_file(file);
+ if (ret)
+ goto out;
namelen = strlen(name);
if (strchr(name, '/')) {
ret = -EINVAL;
- goto out;
+ goto out_drop_write;
}
if (name[0] == '.' &&
(namelen == 1 || (name[1] == '.' && namelen == 2))) {
ret = -EEXIST;
- goto out;
+ goto out_drop_write;
}
if (subvol) {
ret = btrfs_mksubvol(&file->f_path, name, namelen,
- NULL, transid, readonly);
+ NULL, transid, readonly, inherit);
} else {
struct inode *src_inode;
src_file = fget(fd);
if (!src_file) {
ret = -EINVAL;
- goto out;
+ goto out_drop_write;
}
src_inode = src_file->f_path.dentry->d_inode;
@@ -1422,13 +1434,15 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
"another FS\n");
ret = -EINVAL;
fput(src_file);
- goto out;
+ goto out_drop_write;
}
ret = btrfs_mksubvol(&file->f_path, name, namelen,
BTRFS_I(src_inode)->root,
- transid, readonly);
+ transid, readonly, inherit);
fput(src_file);
}
+out_drop_write:
+ mnt_drop_write_file(file);
out:
return ret;
}
@@ -1446,7 +1460,7 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
vol_args->fd, subvol,
- NULL, false);
+ NULL, false, NULL);
kfree(vol_args);
return ret;
@@ -1460,6 +1474,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
u64 transid = 0;
u64 *ptr = NULL;
bool readonly = false;
+ struct btrfs_qgroup_inherit *inherit = NULL;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
@@ -1467,7 +1482,8 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
if (vol_args->flags &
- ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY)) {
+ ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY |
+ BTRFS_SUBVOL_QGROUP_INHERIT)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -1476,10 +1492,21 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
ptr = &transid;
if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
readonly = true;
+ if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
+ if (vol_args->size > PAGE_CACHE_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+ inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size);
+ if (IS_ERR(inherit)) {
+ ret = PTR_ERR(inherit);
+ goto out;
+ }
+ }
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
- vol_args->fd, subvol,
- ptr, readonly);
+ vol_args->fd, subvol, ptr,
+ readonly, &inherit);
if (ret == 0 && ptr &&
copy_to_user(arg +
@@ -1488,6 +1515,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
ret = -EFAULT;
out:
kfree(vol_args);
+ kfree(inherit);
return ret;
}
@@ -1523,29 +1551,40 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
u64 flags;
int ret = 0;
- if (root->fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
+ ret = mnt_want_write_file(file);
+ if (ret)
+ goto out;
- if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
- return -EINVAL;
+ if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
+ ret = -EINVAL;
+ goto out_drop_write;
+ }
- if (copy_from_user(&flags, arg, sizeof(flags)))
- return -EFAULT;
+ if (copy_from_user(&flags, arg, sizeof(flags))) {
+ ret = -EFAULT;
+ goto out_drop_write;
+ }
- if (flags & BTRFS_SUBVOL_CREATE_ASYNC)
- return -EINVAL;
+ if (flags & BTRFS_SUBVOL_CREATE_ASYNC) {
+ ret = -EINVAL;
+ goto out_drop_write;
+ }
- if (flags & ~BTRFS_SUBVOL_RDONLY)
- return -EOPNOTSUPP;
+ if (flags & ~BTRFS_SUBVOL_RDONLY) {
+ ret = -EOPNOTSUPP;
+ goto out_drop_write;
+ }
- if (!inode_owner_or_capable(inode))
- return -EACCES;
+ if (!inode_owner_or_capable(inode)) {
+ ret = -EACCES;
+ goto out_drop_write;
+ }
down_write(&root->fs_info->subvol_sem);
/* nothing to do */
if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root))
- goto out;
+ goto out_drop_sem;
root_flags = btrfs_root_flags(&root->root_item);
if (flags & BTRFS_SUBVOL_RDONLY)
@@ -1568,8 +1607,11 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
out_reset:
if (ret)
btrfs_set_root_flags(&root->root_item, root_flags);
-out:
+out_drop_sem:
up_write(&root->fs_info->subvol_sem);
+out_drop_write:
+ mnt_drop_write_file(file);
+out:
return ret;
}
@@ -2340,6 +2382,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
goto out_drop_write;
}
+ ret = -EXDEV;
+ if (src_file->f_path.mnt != file->f_path.mnt)
+ goto out_fput;
+
src = src_file->f_dentry->d_inode;
ret = -EINVAL;
@@ -2360,7 +2406,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
goto out_fput;
ret = -EXDEV;
- if (src->i_sb != inode->i_sb || BTRFS_I(src)->root != root)
+ if (src->i_sb != inode->i_sb)
goto out_fput;
ret = -ENOMEM;
@@ -2434,13 +2480,14 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
* note the key will change type as we walk through the
* tree.
*/
- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
+ 0, 0);
if (ret < 0)
goto out;
nritems = btrfs_header_nritems(path->nodes[0]);
if (path->slots[0] >= nritems) {
- ret = btrfs_next_leaf(root, path);
+ ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
if (ret < 0)
goto out;
if (ret > 0)
@@ -2749,8 +2796,6 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
struct btrfs_path *path;
struct btrfs_key location;
struct btrfs_disk_key disk_key;
- struct btrfs_super_block *disk_super;
- u64 features;
u64 objectid = 0;
u64 dir_id;
@@ -2801,12 +2846,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
- disk_super = root->fs_info->super_copy;
- features = btrfs_super_incompat_flags(disk_super);
- if (!(features & BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)) {
- features |= BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL;
- btrfs_set_super_incompat_flags(disk_super, features);
- }
+ btrfs_set_fs_incompat(root->fs_info, DEFAULT_SUBVOL);
btrfs_end_transaction(trans, root);
return 0;
@@ -3063,19 +3103,21 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
}
static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
- void __user *arg, int reset_after_read)
+ void __user *arg)
{
struct btrfs_ioctl_get_dev_stats *sa;
int ret;
- if (reset_after_read && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
sa = memdup_user(arg, sizeof(*sa));
if (IS_ERR(sa))
return PTR_ERR(sa);
- ret = btrfs_get_dev_stats(root, sa, reset_after_read);
+ if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) {
+ kfree(sa);
+ return -EPERM;
+ }
+
+ ret = btrfs_get_dev_stats(root, sa);
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
@@ -3265,9 +3307,6 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
-
ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -3390,6 +3429,264 @@ out:
return ret;
}
+static long btrfs_ioctl_quota_ctl(struct btrfs_root *root, void __user *arg)
+{
+ struct btrfs_ioctl_quota_ctl_args *sa;
+ struct btrfs_trans_handle *trans = NULL;
+ int ret;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (root->fs_info->sb->s_flags & MS_RDONLY)
+ return -EROFS;
+
+ sa = memdup_user(arg, sizeof(*sa));
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ if (sa->cmd != BTRFS_QUOTA_CTL_RESCAN) {
+ trans = btrfs_start_transaction(root, 2);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+ }
+
+ switch (sa->cmd) {
+ case BTRFS_QUOTA_CTL_ENABLE:
+ ret = btrfs_quota_enable(trans, root->fs_info);
+ break;
+ case BTRFS_QUOTA_CTL_DISABLE:
+ ret = btrfs_quota_disable(trans, root->fs_info);
+ break;
+ case BTRFS_QUOTA_CTL_RESCAN:
+ ret = btrfs_quota_rescan(root->fs_info);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (copy_to_user(arg, sa, sizeof(*sa)))
+ ret = -EFAULT;
+
+ if (trans) {
+ err = btrfs_commit_transaction(trans, root);
+ if (err && !ret)
+ ret = err;
+ }
+
+out:
+ kfree(sa);
+ return ret;
+}
+
+static long btrfs_ioctl_qgroup_assign(struct btrfs_root *root, void __user *arg)
+{
+ struct btrfs_ioctl_qgroup_assign_args *sa;
+ struct btrfs_trans_handle *trans;
+ int ret;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (root->fs_info->sb->s_flags & MS_RDONLY)
+ return -EROFS;
+
+ sa = memdup_user(arg, sizeof(*sa));
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+
+ /* FIXME: check if the IDs really exist */
+ if (sa->assign) {
+ ret = btrfs_add_qgroup_relation(trans, root->fs_info,
+ sa->src, sa->dst);
+ } else {
+ ret = btrfs_del_qgroup_relation(trans, root->fs_info,
+ sa->src, sa->dst);
+ }
+
+ err = btrfs_end_transaction(trans, root);
+ if (err && !ret)
+ ret = err;
+
+out:
+ kfree(sa);
+ return ret;
+}
+
+static long btrfs_ioctl_qgroup_create(struct btrfs_root *root, void __user *arg)
+{
+ struct btrfs_ioctl_qgroup_create_args *sa;
+ struct btrfs_trans_handle *trans;
+ int ret;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (root->fs_info->sb->s_flags & MS_RDONLY)
+ return -EROFS;
+
+ sa = memdup_user(arg, sizeof(*sa));
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+
+ /* FIXME: check if the IDs really exist */
+ if (sa->create) {
+ ret = btrfs_create_qgroup(trans, root->fs_info, sa->qgroupid,
+ NULL);
+ } else {
+ ret = btrfs_remove_qgroup(trans, root->fs_info, sa->qgroupid);
+ }
+
+ err = btrfs_end_transaction(trans, root);
+ if (err && !ret)
+ ret = err;
+
+out:
+ kfree(sa);
+ return ret;
+}
+
+static long btrfs_ioctl_qgroup_limit(struct btrfs_root *root, void __user *arg)
+{
+ struct btrfs_ioctl_qgroup_limit_args *sa;
+ struct btrfs_trans_handle *trans;
+ int ret;
+ int err;
+ u64 qgroupid;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (root->fs_info->sb->s_flags & MS_RDONLY)
+ return -EROFS;
+
+ sa = memdup_user(arg, sizeof(*sa));
+ if (IS_ERR(sa))
+ return PTR_ERR(sa);
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+
+ qgroupid = sa->qgroupid;
+ if (!qgroupid) {
+ /* take the current subvol as qgroup */
+ qgroupid = root->root_key.objectid;
+ }
+
+ /* FIXME: check if the IDs really exist */
+ ret = btrfs_limit_qgroup(trans, root->fs_info, qgroupid, &sa->lim);
+
+ err = btrfs_end_transaction(trans, root);
+ if (err && !ret)
+ ret = err;
+
+out:
+ kfree(sa);
+ return ret;
+}
+
+static long btrfs_ioctl_set_received_subvol(struct file *file,
+ void __user *arg)
+{
+ struct btrfs_ioctl_received_subvol_args *sa = NULL;
+ struct inode *inode = fdentry(file)->d_inode;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_root_item *root_item = &root->root_item;
+ struct btrfs_trans_handle *trans;
+ struct timespec ct = CURRENT_TIME;
+ int ret = 0;
+
+ ret = mnt_want_write_file(file);
+ if (ret < 0)
+ return ret;
+
+ down_write(&root->fs_info->subvol_sem);
+
+ if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (btrfs_root_readonly(root)) {
+ ret = -EROFS;
+ goto out;
+ }
+
+ if (!inode_owner_or_capable(inode)) {
+ ret = -EACCES;
+ goto out;
+ }
+
+ sa = memdup_user(arg, sizeof(*sa));
+ if (IS_ERR(sa)) {
+ ret = PTR_ERR(sa);
+ sa = NULL;
+ goto out;
+ }
+
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
+ }
+
+ sa->rtransid = trans->transid;
+ sa->rtime.sec = ct.tv_sec;
+ sa->rtime.nsec = ct.tv_nsec;
+
+ memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE);
+ btrfs_set_root_stransid(root_item, sa->stransid);
+ btrfs_set_root_rtransid(root_item, sa->rtransid);
+ root_item->stime.sec = cpu_to_le64(sa->stime.sec);
+ root_item->stime.nsec = cpu_to_le32(sa->stime.nsec);
+ root_item->rtime.sec = cpu_to_le64(sa->rtime.sec);
+ root_item->rtime.nsec = cpu_to_le32(sa->rtime.nsec);
+
+ ret = btrfs_update_root(trans, root->fs_info->tree_root,
+ &root->root_key, &root->root_item);
+ if (ret < 0) {
+ btrfs_end_transaction(trans, root);
+ trans = NULL;
+ goto out;
+ } else {
+ ret = btrfs_commit_transaction(trans, root);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = copy_to_user(arg, sa, sizeof(*sa));
+ if (ret)
+ ret = -EFAULT;
+
+out:
+ kfree(sa);
+ up_write(&root->fs_info->subvol_sem);
+ mnt_drop_write_file(file);
+ return ret;
+}
+
long btrfs_ioctl(struct file *file, unsigned int
cmd, unsigned long arg)
{
@@ -3411,6 +3708,8 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_snap_create_v2(file, argp, 0);
case BTRFS_IOC_SUBVOL_CREATE:
return btrfs_ioctl_snap_create(file, argp, 1);
+ case BTRFS_IOC_SUBVOL_CREATE_V2:
+ return btrfs_ioctl_snap_create_v2(file, argp, 1);
case BTRFS_IOC_SNAP_DESTROY:
return btrfs_ioctl_snap_destroy(file, argp);
case BTRFS_IOC_SUBVOL_GETFLAGS:
@@ -3472,10 +3771,20 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_balance_ctl(root, arg);
case BTRFS_IOC_BALANCE_PROGRESS:
return btrfs_ioctl_balance_progress(root, argp);
+ case BTRFS_IOC_SET_RECEIVED_SUBVOL:
+ return btrfs_ioctl_set_received_subvol(file, argp);
+ case BTRFS_IOC_SEND:
+ return btrfs_ioctl_send(file, argp);
case BTRFS_IOC_GET_DEV_STATS:
- return btrfs_ioctl_get_dev_stats(root, argp, 0);
- case BTRFS_IOC_GET_AND_RESET_DEV_STATS:
- return btrfs_ioctl_get_dev_stats(root, argp, 1);
+ return btrfs_ioctl_get_dev_stats(root, argp);
+ case BTRFS_IOC_QUOTA_CTL:
+ return btrfs_ioctl_quota_ctl(root, argp);
+ case BTRFS_IOC_QGROUP_ASSIGN:
+ return btrfs_ioctl_qgroup_assign(root, argp);
+ case BTRFS_IOC_QGROUP_CREATE:
+ return btrfs_ioctl_qgroup_create(root, argp);
+ case BTRFS_IOC_QGROUP_LIMIT:
+ return btrfs_ioctl_qgroup_limit(root, argp);
}
return -ENOTTY;
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index e440aa653c30..731e2875ab93 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -32,15 +32,46 @@ struct btrfs_ioctl_vol_args {
#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
+#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
#define BTRFS_FSID_SIZE 16
#define BTRFS_UUID_SIZE 16
+#define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0)
+
+struct btrfs_qgroup_limit {
+ __u64 flags;
+ __u64 max_rfer;
+ __u64 max_excl;
+ __u64 rsv_rfer;
+ __u64 rsv_excl;
+};
+
+struct btrfs_qgroup_inherit {
+ __u64 flags;
+ __u64 num_qgroups;
+ __u64 num_ref_copies;
+ __u64 num_excl_copies;
+ struct btrfs_qgroup_limit lim;
+ __u64 qgroups[0];
+};
+
+struct btrfs_ioctl_qgroup_limit_args {
+ __u64 qgroupid;
+ struct btrfs_qgroup_limit lim;
+};
+
#define BTRFS_SUBVOL_NAME_MAX 4039
struct btrfs_ioctl_vol_args_v2 {
__s64 fd;
__u64 transid;
__u64 flags;
- __u64 unused[4];
+ union {
+ struct {
+ __u64 size;
+ struct btrfs_qgroup_inherit __user *qgroup_inherit;
+ };
+ __u64 unused[4];
+ };
char name[BTRFS_SUBVOL_NAME_MAX + 1];
};
@@ -285,9 +316,13 @@ enum btrfs_dev_stat_values {
BTRFS_DEV_STAT_VALUES_MAX
};
+/* Reset statistics after reading; needs SYS_ADMIN capability */
+#define BTRFS_DEV_STATS_RESET (1ULL << 0)
+
struct btrfs_ioctl_get_dev_stats {
__u64 devid; /* in */
__u64 nr_items; /* in/out */
+ __u64 flags; /* in/out */
/* out values: */
__u64 values[BTRFS_DEV_STAT_VALUES_MAX];
@@ -295,6 +330,48 @@ struct btrfs_ioctl_get_dev_stats {
__u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; /* pad to 1k */
};
+#define BTRFS_QUOTA_CTL_ENABLE 1
+#define BTRFS_QUOTA_CTL_DISABLE 2
+#define BTRFS_QUOTA_CTL_RESCAN 3
+struct btrfs_ioctl_quota_ctl_args {
+ __u64 cmd;
+ __u64 status;
+};
+
+struct btrfs_ioctl_qgroup_assign_args {
+ __u64 assign;
+ __u64 src;
+ __u64 dst;
+};
+
+struct btrfs_ioctl_qgroup_create_args {
+ __u64 create;
+ __u64 qgroupid;
+};
+struct btrfs_ioctl_timespec {
+ __u64 sec;
+ __u32 nsec;
+};
+
+struct btrfs_ioctl_received_subvol_args {
+ char uuid[BTRFS_UUID_SIZE]; /* in */
+ __u64 stransid; /* in */
+ __u64 rtransid; /* out */
+ struct btrfs_ioctl_timespec stime; /* in */
+ struct btrfs_ioctl_timespec rtime; /* out */
+ __u64 flags; /* in */
+ __u64 reserved[16]; /* in */
+};
+
+struct btrfs_ioctl_send_args {
+ __s64 send_fd; /* in */
+ __u64 clone_sources_count; /* in */
+ __u64 __user *clone_sources; /* in */
+ __u64 parent_root; /* in */
+ __u64 flags; /* in */
+ __u64 reserved[4]; /* in */
+};
+
#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
struct btrfs_ioctl_vol_args)
#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
@@ -339,6 +416,8 @@ struct btrfs_ioctl_get_dev_stats {
#define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
struct btrfs_ioctl_vol_args_v2)
+#define BTRFS_IOC_SUBVOL_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 24, \
+ struct btrfs_ioctl_vol_args_v2)
#define BTRFS_IOC_SUBVOL_GETFLAGS _IOR(BTRFS_IOCTL_MAGIC, 25, __u64)
#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
#define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
@@ -359,9 +438,19 @@ struct btrfs_ioctl_get_dev_stats {
struct btrfs_ioctl_ino_path_args)
#define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
struct btrfs_ioctl_ino_path_args)
+#define BTRFS_IOC_SET_RECEIVED_SUBVOL _IOWR(BTRFS_IOCTL_MAGIC, 37, \
+ struct btrfs_ioctl_received_subvol_args)
+#define BTRFS_IOC_SEND _IOW(BTRFS_IOCTL_MAGIC, 38, struct btrfs_ioctl_send_args)
+#define BTRFS_IOC_DEVICES_READY _IOR(BTRFS_IOCTL_MAGIC, 39, \
+ struct btrfs_ioctl_vol_args)
+#define BTRFS_IOC_QUOTA_CTL _IOWR(BTRFS_IOCTL_MAGIC, 40, \
+ struct btrfs_ioctl_quota_ctl_args)
+#define BTRFS_IOC_QGROUP_ASSIGN _IOW(BTRFS_IOCTL_MAGIC, 41, \
+ struct btrfs_ioctl_qgroup_assign_args)
+#define BTRFS_IOC_QGROUP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 42, \
+ struct btrfs_ioctl_qgroup_create_args)
+#define BTRFS_IOC_QGROUP_LIMIT _IOR(BTRFS_IOCTL_MAGIC, 43, \
+ struct btrfs_ioctl_qgroup_limit_args)
#define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
struct btrfs_ioctl_get_dev_stats)
-#define BTRFS_IOC_GET_AND_RESET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 53, \
- struct btrfs_ioctl_get_dev_stats)
-
#endif
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 272f911203ff..2a1762c66041 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -67,7 +67,7 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
{
if (eb->lock_nested) {
read_lock(&eb->lock);
- if (&eb->lock_nested && current->pid == eb->lock_owner) {
+ if (eb->lock_nested && current->pid == eb->lock_owner) {
read_unlock(&eb->lock);
return;
}
@@ -78,13 +78,15 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
write_lock(&eb->lock);
WARN_ON(atomic_read(&eb->spinning_writers));
atomic_inc(&eb->spinning_writers);
- if (atomic_dec_and_test(&eb->blocking_writers))
+ if (atomic_dec_and_test(&eb->blocking_writers) &&
+ waitqueue_active(&eb->write_lock_wq))
wake_up(&eb->write_lock_wq);
} else if (rw == BTRFS_READ_LOCK_BLOCKING) {
BUG_ON(atomic_read(&eb->blocking_readers) == 0);
read_lock(&eb->lock);
atomic_inc(&eb->spinning_readers);
- if (atomic_dec_and_test(&eb->blocking_readers))
+ if (atomic_dec_and_test(&eb->blocking_readers) &&
+ waitqueue_active(&eb->read_lock_wq))
wake_up(&eb->read_lock_wq);
}
return;
@@ -199,7 +201,8 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
}
btrfs_assert_tree_read_locked(eb);
WARN_ON(atomic_read(&eb->blocking_readers) == 0);
- if (atomic_dec_and_test(&eb->blocking_readers))
+ if (atomic_dec_and_test(&eb->blocking_readers) &&
+ waitqueue_active(&eb->read_lock_wq))
wake_up(&eb->read_lock_wq);
atomic_dec(&eb->read_locks);
}
@@ -247,8 +250,9 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
if (blockers) {
WARN_ON(atomic_read(&eb->spinning_writers));
atomic_dec(&eb->blocking_writers);
- smp_wmb();
- wake_up(&eb->write_lock_wq);
+ smp_mb();
+ if (waitqueue_active(&eb->write_lock_wq))
+ wake_up(&eb->write_lock_wq);
} else {
WARN_ON(atomic_read(&eb->spinning_writers) != 1);
atomic_dec(&eb->spinning_writers);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 643335a4fe3c..051c7fe551dd 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -596,7 +596,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
/*
* pages in the range can be dirty, clean or writeback. We
* start IO on any dirty ones so the wait doesn't stall waiting
- * for pdflush to find them
+ * for the flusher thread to find them
*/
if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
filemap_fdatawrite_range(inode->i_mapping, start, end);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
new file mode 100644
index 000000000000..38b42e7bc91d
--- /dev/null
+++ b/fs/btrfs/qgroup.c
@@ -0,0 +1,1577 @@
+/*
+ * Copyright (C) 2011 STRATO. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/writeback.h>
+#include <linux/blkdev.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "ctree.h"
+#include "transaction.h"
+#include "disk-io.h"
+#include "locking.h"
+#include "ulist.h"
+#include "ioctl.h"
+#include "backref.h"
+
+/* TODO XXX FIXME
+ * - subvol delete -> delete when ref goes to 0? delete limits also?
+ * - reorganize keys
+ * - compressed
+ * - sync
+ * - rescan
+ * - copy also limits on subvol creation
+ * - limit
+ * - caches fuer ulists
+ * - performance benchmarks
+ * - check all ioctl parameters
+ */
+
+/*
+ * one struct for each qgroup, organized in fs_info->qgroup_tree.
+ */
+struct btrfs_qgroup {
+ u64 qgroupid;
+
+ /*
+ * state
+ */
+ u64 rfer; /* referenced */
+ u64 rfer_cmpr; /* referenced compressed */
+ u64 excl; /* exclusive */
+ u64 excl_cmpr; /* exclusive compressed */
+
+ /*
+ * limits
+ */
+ u64 lim_flags; /* which limits are set */
+ u64 max_rfer;
+ u64 max_excl;
+ u64 rsv_rfer;
+ u64 rsv_excl;
+
+ /*
+ * reservation tracking
+ */
+ u64 reserved;
+
+ /*
+ * lists
+ */
+ struct list_head groups; /* groups this group is member of */
+ struct list_head members; /* groups that are members of this group */
+ struct list_head dirty; /* dirty groups */
+ struct rb_node node; /* tree of qgroups */
+
+ /*
+ * temp variables for accounting operations
+ */
+ u64 tag;
+ u64 refcnt;
+};
+
+/*
+ * glue structure to represent the relations between qgroups.
+ */
+struct btrfs_qgroup_list {
+ struct list_head next_group;
+ struct list_head next_member;
+ struct btrfs_qgroup *group;
+ struct btrfs_qgroup *member;
+};
+
+/* must be called with qgroup_lock held */
+static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
+ u64 qgroupid)
+{
+ struct rb_node *n = fs_info->qgroup_tree.rb_node;
+ struct btrfs_qgroup *qgroup;
+
+ while (n) {
+ qgroup = rb_entry(n, struct btrfs_qgroup, node);
+ if (qgroup->qgroupid < qgroupid)
+ n = n->rb_left;
+ else if (qgroup->qgroupid > qgroupid)
+ n = n->rb_right;
+ else
+ return qgroup;
+ }
+ return NULL;
+}
+
+/* must be called with qgroup_lock held */
+static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
+ u64 qgroupid)
+{
+ struct rb_node **p = &fs_info->qgroup_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct btrfs_qgroup *qgroup;
+
+ while (*p) {
+ parent = *p;
+ qgroup = rb_entry(parent, struct btrfs_qgroup, node);
+
+ if (qgroup->qgroupid < qgroupid)
+ p = &(*p)->rb_left;
+ else if (qgroup->qgroupid > qgroupid)
+ p = &(*p)->rb_right;
+ else
+ return qgroup;
+ }
+
+ qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
+ if (!qgroup)
+ return ERR_PTR(-ENOMEM);
+
+ qgroup->qgroupid = qgroupid;
+ INIT_LIST_HEAD(&qgroup->groups);
+ INIT_LIST_HEAD(&qgroup->members);
+ INIT_LIST_HEAD(&qgroup->dirty);
+
+ rb_link_node(&qgroup->node, parent, p);
+ rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
+
+ return qgroup;
+}
+
+/* must be called with qgroup_lock held */
+static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
+{
+ struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
+ struct btrfs_qgroup_list *list;
+
+ if (!qgroup)
+ return -ENOENT;
+
+ rb_erase(&qgroup->node, &fs_info->qgroup_tree);
+ list_del(&qgroup->dirty);
+
+ while (!list_empty(&qgroup->groups)) {
+ list = list_first_entry(&qgroup->groups,
+ struct btrfs_qgroup_list, next_group);
+ list_del(&list->next_group);
+ list_del(&list->next_member);
+ kfree(list);
+ }
+
+ while (!list_empty(&qgroup->members)) {
+ list = list_first_entry(&qgroup->members,
+ struct btrfs_qgroup_list, next_member);
+ list_del(&list->next_group);
+ list_del(&list->next_member);
+ kfree(list);
+ }
+ kfree(qgroup);
+
+ return 0;
+}
+
+/* must be called with qgroup_lock held */
+static int add_relation_rb(struct btrfs_fs_info *fs_info,
+ u64 memberid, u64 parentid)
+{
+ struct btrfs_qgroup *member;
+ struct btrfs_qgroup *parent;
+ struct btrfs_qgroup_list *list;
+
+ member = find_qgroup_rb(fs_info, memberid);
+ parent = find_qgroup_rb(fs_info, parentid);
+ if (!member || !parent)
+ return -ENOENT;
+
+ list = kzalloc(sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ list->group = parent;
+ list->member = member;
+ list_add_tail(&list->next_group, &member->groups);
+ list_add_tail(&list->next_member, &parent->members);
+
+ return 0;
+}
+
+/* must be called with qgroup_lock held */
+static int del_relation_rb(struct btrfs_fs_info *fs_info,
+ u64 memberid, u64 parentid)
+{
+ struct btrfs_qgroup *member;
+ struct btrfs_qgroup *parent;
+ struct btrfs_qgroup_list *list;
+
+ member = find_qgroup_rb(fs_info, memberid);
+ parent = find_qgroup_rb(fs_info, parentid);
+ if (!member || !parent)
+ return -ENOENT;
+
+ list_for_each_entry(list, &member->groups, next_group) {
+ if (list->group == parent) {
+ list_del(&list->next_group);
+ list_del(&list->next_member);
+ kfree(list);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/*
+ * The full config is read in one go, only called from open_ctree()
+ * It doesn't use any locking, as at this point we're still single-threaded
+ */
+int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_root *quota_root = fs_info->quota_root;
+ struct btrfs_path *path = NULL;
+ struct extent_buffer *l;
+ int slot;
+ int ret = 0;
+ u64 flags = 0;
+
+ if (!fs_info->quota_enabled)
+ return 0;
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* default this to quota off, in case no status key is found */
+ fs_info->qgroup_flags = 0;
+
+ /*
+ * pass 1: read status, all qgroup infos and limits
+ */
+ key.objectid = 0;
+ key.type = 0;
+ key.offset = 0;
+ ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
+ if (ret)
+ goto out;
+
+ while (1) {
+ struct btrfs_qgroup *qgroup;
+
+ slot = path->slots[0];
+ l = path->nodes[0];
+ btrfs_item_key_to_cpu(l, &found_key, slot);
+
+ if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
+ struct btrfs_qgroup_status_item *ptr;
+
+ ptr = btrfs_item_ptr(l, slot,
+ struct btrfs_qgroup_status_item);
+
+ if (btrfs_qgroup_status_version(l, ptr) !=
+ BTRFS_QGROUP_STATUS_VERSION) {
+ printk(KERN_ERR
+ "btrfs: old qgroup version, quota disabled\n");
+ goto out;
+ }
+ if (btrfs_qgroup_status_generation(l, ptr) !=
+ fs_info->generation) {
+ flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ printk(KERN_ERR
+ "btrfs: qgroup generation mismatch, "
+ "marked as inconsistent\n");
+ }
+ fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
+ ptr);
+ /* FIXME read scan element */
+ goto next1;
+ }
+
+ if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
+ found_key.type != BTRFS_QGROUP_LIMIT_KEY)
+ goto next1;
+
+ qgroup = find_qgroup_rb(fs_info, found_key.offset);
+ if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
+ (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
+ printk(KERN_ERR "btrfs: inconsitent qgroup config\n");
+ flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ }
+ if (!qgroup) {
+ qgroup = add_qgroup_rb(fs_info, found_key.offset);
+ if (IS_ERR(qgroup)) {
+ ret = PTR_ERR(qgroup);
+ goto out;
+ }
+ }
+ switch (found_key.type) {
+ case BTRFS_QGROUP_INFO_KEY: {
+ struct btrfs_qgroup_info_item *ptr;
+
+ ptr = btrfs_item_ptr(l, slot,
+ struct btrfs_qgroup_info_item);
+ qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
+ qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
+ qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
+ qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
+ /* generation currently unused */
+ break;
+ }
+ case BTRFS_QGROUP_LIMIT_KEY: {
+ struct btrfs_qgroup_limit_item *ptr;
+
+ ptr = btrfs_item_ptr(l, slot,
+ struct btrfs_qgroup_limit_item);
+ qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
+ qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
+ qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
+ qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
+ qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
+ break;
+ }
+ }
+next1:
+ ret = btrfs_next_item(quota_root, path);
+ if (ret < 0)
+ goto out;
+ if (ret)
+ break;
+ }
+ btrfs_release_path(path);
+
+ /*
+ * pass 2: read all qgroup relations
+ */
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_RELATION_KEY;
+ key.offset = 0;
+ ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
+ if (ret)
+ goto out;
+ while (1) {
+ slot = path->slots[0];
+ l = path->nodes[0];
+ btrfs_item_key_to_cpu(l, &found_key, slot);
+
+ if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
+ goto next2;
+
+ if (found_key.objectid > found_key.offset) {
+ /* parent <- member, not needed to build config */
+ /* FIXME should we omit the key completely? */
+ goto next2;
+ }
+
+ ret = add_relation_rb(fs_info, found_key.objectid,
+ found_key.offset);
+ if (ret)
+ goto out;
+next2:
+ ret = btrfs_next_item(quota_root, path);
+ if (ret < 0)
+ goto out;
+ if (ret)
+ break;
+ }
+out:
+ fs_info->qgroup_flags |= flags;
+ if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
+ fs_info->quota_enabled = 0;
+ fs_info->pending_quota_state = 0;
+ }
+ btrfs_free_path(path);
+
+ return ret < 0 ? ret : 0;
+}
+
+/*
+ * This is only called from close_ctree() or open_ctree(), both in single-
+ * treaded paths. Clean up the in-memory structures. No locking needed.
+ */
+void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
+{
+ struct rb_node *n;
+ struct btrfs_qgroup *qgroup;
+ struct btrfs_qgroup_list *list;
+
+ while ((n = rb_first(&fs_info->qgroup_tree))) {
+ qgroup = rb_entry(n, struct btrfs_qgroup, node);
+ rb_erase(n, &fs_info->qgroup_tree);
+
+ WARN_ON(!list_empty(&qgroup->dirty));
+
+ while (!list_empty(&qgroup->groups)) {
+ list = list_first_entry(&qgroup->groups,
+ struct btrfs_qgroup_list,
+ next_group);
+ list_del(&list->next_group);
+ list_del(&list->next_member);
+ kfree(list);
+ }
+
+ while (!list_empty(&qgroup->members)) {
+ list = list_first_entry(&qgroup->members,
+ struct btrfs_qgroup_list,
+ next_member);
+ list_del(&list->next_group);
+ list_del(&list->next_member);
+ kfree(list);
+ }
+ kfree(qgroup);
+ }
+}
+
+static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *quota_root,
+ u64 src, u64 dst)
+{
+ int ret;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = src;
+ key.type = BTRFS_QGROUP_RELATION_KEY;
+ key.offset = dst;
+
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
+
+ btrfs_mark_buffer_dirty(path->nodes[0]);
+
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *quota_root,
+ u64 src, u64 dst)
+{
+ int ret;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = src;
+ key.type = BTRFS_QGROUP_RELATION_KEY;
+ key.offset = dst;
+
+ ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
+ if (ret < 0)
+ goto out;
+
+ if (ret > 0) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = btrfs_del_item(trans, quota_root, path);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int add_qgroup_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *quota_root, u64 qgroupid)
+{
+ int ret;
+ struct btrfs_path *path;
+ struct btrfs_qgroup_info_item *qgroup_info;
+ struct btrfs_qgroup_limit_item *qgroup_limit;
+ struct extent_buffer *leaf;
+ struct btrfs_key key;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_INFO_KEY;
+ key.offset = qgroupid;
+
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
+ sizeof(*qgroup_info));
+ if (ret)
+ goto out;
+
+ leaf = path->nodes[0];
+ qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_qgroup_info_item);
+ btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
+ btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
+ btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
+ btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
+ btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
+
+ btrfs_mark_buffer_dirty(leaf);
+
+ btrfs_release_path(path);
+
+ key.type = BTRFS_QGROUP_LIMIT_KEY;
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
+ sizeof(*qgroup_limit));
+ if (ret)
+ goto out;
+
+ leaf = path->nodes[0];
+ qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_qgroup_limit_item);
+ btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
+ btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
+ btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
+ btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
+ btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
+
+ btrfs_mark_buffer_dirty(leaf);
+
+ ret = 0;
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int del_qgroup_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *quota_root, u64 qgroupid)
+{
+ int ret;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_INFO_KEY;
+ key.offset = qgroupid;
+ ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
+ if (ret < 0)
+ goto out;
+
+ if (ret > 0) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = btrfs_del_item(trans, quota_root, path);
+ if (ret)
+ goto out;
+
+ btrfs_release_path(path);
+
+ key.type = BTRFS_QGROUP_LIMIT_KEY;
+ ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
+ if (ret < 0)
+ goto out;
+
+ if (ret > 0) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = btrfs_del_item(trans, quota_root, path);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, u64 qgroupid,
+ u64 flags, u64 max_rfer, u64 max_excl,
+ u64 rsv_rfer, u64 rsv_excl)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct extent_buffer *l;
+ struct btrfs_qgroup_limit_item *qgroup_limit;
+ int ret;
+ int slot;
+
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_LIMIT_KEY;
+ key.offset = qgroupid;
+
+ path = btrfs_alloc_path();
+ BUG_ON(!path);
+ ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ if (ret > 0)
+ ret = -ENOENT;
+
+ if (ret)
+ goto out;
+
+ l = path->nodes[0];
+ slot = path->slots[0];
+ qgroup_limit = btrfs_item_ptr(l, path->slots[0],
+ struct btrfs_qgroup_limit_item);
+ btrfs_set_qgroup_limit_flags(l, qgroup_limit, flags);
+ btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, max_rfer);
+ btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, max_excl);
+ btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, rsv_rfer);
+ btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, rsv_excl);
+
+ btrfs_mark_buffer_dirty(l);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_qgroup *qgroup)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct extent_buffer *l;
+ struct btrfs_qgroup_info_item *qgroup_info;
+ int ret;
+ int slot;
+
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_INFO_KEY;
+ key.offset = qgroup->qgroupid;
+
+ path = btrfs_alloc_path();
+ BUG_ON(!path);
+ ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ if (ret > 0)
+ ret = -ENOENT;
+
+ if (ret)
+ goto out;
+
+ l = path->nodes[0];
+ slot = path->slots[0];
+ qgroup_info = btrfs_item_ptr(l, path->slots[0],
+ struct btrfs_qgroup_info_item);
+ btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
+ btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
+ btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
+ btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
+ btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
+
+ btrfs_mark_buffer_dirty(l);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_root *root)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct extent_buffer *l;
+ struct btrfs_qgroup_status_item *ptr;
+ int ret;
+ int slot;
+
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_STATUS_KEY;
+ key.offset = 0;
+
+ path = btrfs_alloc_path();
+ BUG_ON(!path);
+ ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+ if (ret > 0)
+ ret = -ENOENT;
+
+ if (ret)
+ goto out;
+
+ l = path->nodes[0];
+ slot = path->slots[0];
+ ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
+ btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
+ btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
+ /* XXX scan */
+
+ btrfs_mark_buffer_dirty(l);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+/*
+ * called with qgroup_lock held
+ */
+static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ int ret;
+
+ if (!root)
+ return -EINVAL;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ while (1) {
+ key.objectid = 0;
+ key.offset = 0;
+ key.type = 0;
+
+ path->leave_spinning = 1;
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret > 0) {
+ if (path->slots[0] == 0)
+ break;
+ path->slots[0]--;
+ } else if (ret < 0) {
+ break;
+ }
+
+ ret = btrfs_del_item(trans, root, path);
+ if (ret)
+ goto out;
+ btrfs_release_path(path);
+ }
+ ret = 0;
+out:
+ root->fs_info->pending_quota_state = 0;
+ btrfs_free_path(path);
+ return ret;
+}
+
+int btrfs_quota_enable(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_root *quota_root;
+ struct btrfs_path *path = NULL;
+ struct btrfs_qgroup_status_item *ptr;
+ struct extent_buffer *leaf;
+ struct btrfs_key key;
+ int ret = 0;
+
+ spin_lock(&fs_info->qgroup_lock);
+ if (fs_info->quota_root) {
+ fs_info->pending_quota_state = 1;
+ spin_unlock(&fs_info->qgroup_lock);
+ goto out;
+ }
+ spin_unlock(&fs_info->qgroup_lock);
+
+ /*
+ * initially create the quota tree
+ */
+ quota_root = btrfs_create_tree(trans, fs_info,
+ BTRFS_QUOTA_TREE_OBJECTID);
+ if (IS_ERR(quota_root)) {
+ ret = PTR_ERR(quota_root);
+ goto out;
+ }
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = 0;
+ key.type = BTRFS_QGROUP_STATUS_KEY;
+ key.offset = 0;
+
+ ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
+ sizeof(*ptr));
+ if (ret)
+ goto out;
+
+ leaf = path->nodes[0];
+ ptr = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_qgroup_status_item);
+ btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
+ btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
+ fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
+ BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
+ btrfs_set_qgroup_status_scan(leaf, ptr, 0);
+
+ btrfs_mark_buffer_dirty(leaf);
+
+ spin_lock(&fs_info->qgroup_lock);
+ fs_info->quota_root = quota_root;
+ fs_info->pending_quota_state = 1;
+ spin_unlock(&fs_info->qgroup_lock);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+int btrfs_quota_disable(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_root *tree_root = fs_info->tree_root;
+ struct btrfs_root *quota_root;
+ int ret = 0;
+
+ spin_lock(&fs_info->qgroup_lock);
+ fs_info->quota_enabled = 0;
+ fs_info->pending_quota_state = 0;
+ quota_root = fs_info->quota_root;
+ fs_info->quota_root = NULL;
+ btrfs_free_qgroup_config(fs_info);
+ spin_unlock(&fs_info->qgroup_lock);
+
+ if (!quota_root)
+ return -EINVAL;
+
+ ret = btrfs_clean_quota_tree(trans, quota_root);
+ if (ret)
+ goto out;
+
+ ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
+ if (ret)
+ goto out;
+
+ list_del(&quota_root->dirty_list);
+
+ btrfs_tree_lock(quota_root->node);
+ clean_tree_block(trans, tree_root, quota_root->node);
+ btrfs_tree_unlock(quota_root->node);
+ btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
+
+ free_extent_buffer(quota_root->node);
+ free_extent_buffer(quota_root->commit_root);
+ kfree(quota_root);
+out:
+ return ret;
+}
+
+int btrfs_quota_rescan(struct btrfs_fs_info *fs_info)
+{
+ /* FIXME */
+ return 0;
+}
+
+int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 src, u64 dst)
+{
+ struct btrfs_root *quota_root;
+ int ret = 0;
+
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ return -EINVAL;
+
+ ret = add_qgroup_relation_item(trans, quota_root, src, dst);
+ if (ret)
+ return ret;
+
+ ret = add_qgroup_relation_item(trans, quota_root, dst, src);
+ if (ret) {
+ del_qgroup_relation_item(trans, quota_root, src, dst);
+ return ret;
+ }
+
+ spin_lock(&fs_info->qgroup_lock);
+ ret = add_relation_rb(quota_root->fs_info, src, dst);
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return ret;
+}
+
+int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 src, u64 dst)
+{
+ struct btrfs_root *quota_root;
+ int ret = 0;
+ int err;
+
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ return -EINVAL;
+
+ ret = del_qgroup_relation_item(trans, quota_root, src, dst);
+ err = del_qgroup_relation_item(trans, quota_root, dst, src);
+ if (err && !ret)
+ ret = err;
+
+ spin_lock(&fs_info->qgroup_lock);
+ del_relation_rb(fs_info, src, dst);
+
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return ret;
+}
+
+int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 qgroupid, char *name)
+{
+ struct btrfs_root *quota_root;
+ struct btrfs_qgroup *qgroup;
+ int ret = 0;
+
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ return -EINVAL;
+
+ ret = add_qgroup_item(trans, quota_root, qgroupid);
+
+ spin_lock(&fs_info->qgroup_lock);
+ qgroup = add_qgroup_rb(fs_info, qgroupid);
+ spin_unlock(&fs_info->qgroup_lock);
+
+ if (IS_ERR(qgroup))
+ ret = PTR_ERR(qgroup);
+
+ return ret;
+}
+
+int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 qgroupid)
+{
+ struct btrfs_root *quota_root;
+ int ret = 0;
+
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ return -EINVAL;
+
+ ret = del_qgroup_item(trans, quota_root, qgroupid);
+
+ spin_lock(&fs_info->qgroup_lock);
+ del_qgroup_rb(quota_root->fs_info, qgroupid);
+
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return ret;
+}
+
+int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 qgroupid,
+ struct btrfs_qgroup_limit *limit)
+{
+ struct btrfs_root *quota_root = fs_info->quota_root;
+ struct btrfs_qgroup *qgroup;
+ int ret = 0;
+
+ if (!quota_root)
+ return -EINVAL;
+
+ ret = update_qgroup_limit_item(trans, quota_root, qgroupid,
+ limit->flags, limit->max_rfer,
+ limit->max_excl, limit->rsv_rfer,
+ limit->rsv_excl);
+ if (ret) {
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ printk(KERN_INFO "unable to update quota limit for %llu\n",
+ (unsigned long long)qgroupid);
+ }
+
+ spin_lock(&fs_info->qgroup_lock);
+
+ qgroup = find_qgroup_rb(fs_info, qgroupid);
+ if (!qgroup) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ qgroup->lim_flags = limit->flags;
+ qgroup->max_rfer = limit->max_rfer;
+ qgroup->max_excl = limit->max_excl;
+ qgroup->rsv_rfer = limit->rsv_rfer;
+ qgroup->rsv_excl = limit->rsv_excl;
+
+unlock:
+ spin_unlock(&fs_info->qgroup_lock);
+
+ return ret;
+}
+
+static void qgroup_dirty(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup *qgroup)
+{
+ if (list_empty(&qgroup->dirty))
+ list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
+}
+
+/*
+ * btrfs_qgroup_record_ref is called when the ref is added or deleted. it puts
+ * the modification into a list that's later used by btrfs_end_transaction to
+ * pass the recorded modifications on to btrfs_qgroup_account_ref.
+ */
+int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_extent_op *extent_op)
+{
+ struct qgroup_update *u;
+
+ BUG_ON(!trans->delayed_ref_elem.seq);
+ u = kmalloc(sizeof(*u), GFP_NOFS);
+ if (!u)
+ return -ENOMEM;
+
+ u->node = node;
+ u->extent_op = extent_op;
+ list_add_tail(&u->list, &trans->qgroup_ref_list);
+
+ return 0;
+}
+
+/*
+ * btrfs_qgroup_account_ref is called for every ref that is added to or deleted
+ * from the fs. First, all roots referencing the extent are searched, and
+ * then the space is accounted accordingly to the different roots. The
+ * accounting algorithm works in 3 steps documented inline.
+ */
+int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_extent_op *extent_op)
+{
+ struct btrfs_key ins;
+ struct btrfs_root *quota_root;
+ u64 ref_root;
+ struct btrfs_qgroup *qgroup;
+ struct ulist_node *unode;
+ struct ulist *roots = NULL;
+ struct ulist *tmp = NULL;
+ struct ulist_iterator uiter;
+ u64 seq;
+ int ret = 0;
+ int sgn;
+
+ if (!fs_info->quota_enabled)
+ return 0;
+
+ BUG_ON(!fs_info->quota_root);
+
+ ins.objectid = node->bytenr;
+ ins.offset = node->num_bytes;
+ ins.type = BTRFS_EXTENT_ITEM_KEY;
+
+ if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
+ node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
+ struct btrfs_delayed_tree_ref *ref;
+ ref = btrfs_delayed_node_to_tree_ref(node);
+ ref_root = ref->root;
+ } else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
+ node->type == BTRFS_SHARED_DATA_REF_KEY) {
+ struct btrfs_delayed_data_ref *ref;
+ ref = btrfs_delayed_node_to_data_ref(node);
+ ref_root = ref->root;
+ } else {
+ BUG();
+ }
+
+ if (!is_fstree(ref_root)) {
+ /*
+ * non-fs-trees are not being accounted
+ */
+ return 0;
+ }
+
+ switch (node->action) {
+ case BTRFS_ADD_DELAYED_REF:
+ case BTRFS_ADD_DELAYED_EXTENT:
+ sgn = 1;
+ break;
+ case BTRFS_DROP_DELAYED_REF:
+ sgn = -1;
+ break;
+ case BTRFS_UPDATE_DELAYED_HEAD:
+ return 0;
+ default:
+ BUG();
+ }
+
+ /*
+ * the delayed ref sequence number we pass depends on the direction of
+ * the operation. for add operations, we pass (node->seq - 1) to skip
+ * the delayed ref's current sequence number, because we need the state
+ * of the tree before the add operation. for delete operations, we pass
+ * (node->seq) to include the delayed ref's current sequence number,
+ * because we need the state of the tree after the delete operation.
+ */
+ ret = btrfs_find_all_roots(trans, fs_info, node->bytenr,
+ sgn > 0 ? node->seq - 1 : node->seq, &roots);
+ if (ret < 0)
+ goto out;
+
+ spin_lock(&fs_info->qgroup_lock);
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ goto unlock;
+
+ qgroup = find_qgroup_rb(fs_info, ref_root);
+ if (!qgroup)
+ goto unlock;
+
+ /*
+ * step 1: for each old ref, visit all nodes once and inc refcnt
+ */
+ tmp = ulist_alloc(GFP_ATOMIC);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ seq = fs_info->qgroup_seq;
+ fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
+
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(roots, &uiter))) {
+ struct ulist_node *tmp_unode;
+ struct ulist_iterator tmp_uiter;
+ struct btrfs_qgroup *qg;
+
+ qg = find_qgroup_rb(fs_info, unode->val);
+ if (!qg)
+ continue;
+
+ ulist_reinit(tmp);
+ /* XXX id not needed */
+ ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC);
+ ULIST_ITER_INIT(&tmp_uiter);
+ while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)tmp_unode->aux;
+ if (qg->refcnt < seq)
+ qg->refcnt = seq + 1;
+ else
+ ++qg->refcnt;
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ulist_add(tmp, glist->group->qgroupid,
+ (unsigned long)glist->group,
+ GFP_ATOMIC);
+ }
+ }
+ }
+
+ /*
+ * step 2: walk from the new root
+ */
+ ulist_reinit(tmp);
+ ulist_add(tmp, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC);
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(tmp, &uiter))) {
+ struct btrfs_qgroup *qg;
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)unode->aux;
+ if (qg->refcnt < seq) {
+ /* not visited by step 1 */
+ qg->rfer += sgn * node->num_bytes;
+ qg->rfer_cmpr += sgn * node->num_bytes;
+ if (roots->nnodes == 0) {
+ qg->excl += sgn * node->num_bytes;
+ qg->excl_cmpr += sgn * node->num_bytes;
+ }
+ qgroup_dirty(fs_info, qg);
+ }
+ WARN_ON(qg->tag >= seq);
+ qg->tag = seq;
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ulist_add(tmp, glist->group->qgroupid,
+ (unsigned long)glist->group, GFP_ATOMIC);
+ }
+ }
+
+ /*
+ * step 3: walk again from old refs
+ */
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(roots, &uiter))) {
+ struct btrfs_qgroup *qg;
+ struct ulist_node *tmp_unode;
+ struct ulist_iterator tmp_uiter;
+
+ qg = find_qgroup_rb(fs_info, unode->val);
+ if (!qg)
+ continue;
+
+ ulist_reinit(tmp);
+ ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC);
+ ULIST_ITER_INIT(&tmp_uiter);
+ while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)tmp_unode->aux;
+ if (qg->tag == seq)
+ continue;
+
+ if (qg->refcnt - seq == roots->nnodes) {
+ qg->excl -= sgn * node->num_bytes;
+ qg->excl_cmpr -= sgn * node->num_bytes;
+ qgroup_dirty(fs_info, qg);
+ }
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ulist_add(tmp, glist->group->qgroupid,
+ (unsigned long)glist->group,
+ GFP_ATOMIC);
+ }
+ }
+ }
+ ret = 0;
+unlock:
+ spin_unlock(&fs_info->qgroup_lock);
+out:
+ ulist_free(roots);
+ ulist_free(tmp);
+
+ return ret;
+}
+
+/*
+ * called from commit_transaction. Writes all changed qgroups to disk.
+ */
+int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_root *quota_root = fs_info->quota_root;
+ int ret = 0;
+
+ if (!quota_root)
+ goto out;
+
+ fs_info->quota_enabled = fs_info->pending_quota_state;
+
+ spin_lock(&fs_info->qgroup_lock);
+ while (!list_empty(&fs_info->dirty_qgroups)) {
+ struct btrfs_qgroup *qgroup;
+ qgroup = list_first_entry(&fs_info->dirty_qgroups,
+ struct btrfs_qgroup, dirty);
+ list_del_init(&qgroup->dirty);
+ spin_unlock(&fs_info->qgroup_lock);
+ ret = update_qgroup_info_item(trans, quota_root, qgroup);
+ if (ret)
+ fs_info->qgroup_flags |=
+ BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+ spin_lock(&fs_info->qgroup_lock);
+ }
+ if (fs_info->quota_enabled)
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
+ else
+ fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
+ spin_unlock(&fs_info->qgroup_lock);
+
+ ret = update_qgroup_status_item(trans, fs_info, quota_root);
+ if (ret)
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
+
+out:
+
+ return ret;
+}
+
+/*
+ * copy the acounting information between qgroups. This is necessary when a
+ * snapshot or a subvolume is created
+ */
+int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
+ struct btrfs_qgroup_inherit *inherit)
+{
+ int ret = 0;
+ int i;
+ u64 *i_qgroups;
+ struct btrfs_root *quota_root = fs_info->quota_root;
+ struct btrfs_qgroup *srcgroup;
+ struct btrfs_qgroup *dstgroup;
+ u32 level_size = 0;
+
+ if (!fs_info->quota_enabled)
+ return 0;
+
+ if (!quota_root)
+ return -EINVAL;
+
+ /*
+ * create a tracking group for the subvol itself
+ */
+ ret = add_qgroup_item(trans, quota_root, objectid);
+ if (ret)
+ goto out;
+
+ if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
+ ret = update_qgroup_limit_item(trans, quota_root, objectid,
+ inherit->lim.flags,
+ inherit->lim.max_rfer,
+ inherit->lim.max_excl,
+ inherit->lim.rsv_rfer,
+ inherit->lim.rsv_excl);
+ if (ret)
+ goto out;
+ }
+
+ if (srcid) {
+ struct btrfs_root *srcroot;
+ struct btrfs_key srckey;
+ int srcroot_level;
+
+ srckey.objectid = srcid;
+ srckey.type = BTRFS_ROOT_ITEM_KEY;
+ srckey.offset = (u64)-1;
+ srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
+ if (IS_ERR(srcroot)) {
+ ret = PTR_ERR(srcroot);
+ goto out;
+ }
+
+ rcu_read_lock();
+ srcroot_level = btrfs_header_level(srcroot->node);
+ level_size = btrfs_level_size(srcroot, srcroot_level);
+ rcu_read_unlock();
+ }
+
+ /*
+ * add qgroup to all inherited groups
+ */
+ if (inherit) {
+ i_qgroups = (u64 *)(inherit + 1);
+ for (i = 0; i < inherit->num_qgroups; ++i) {
+ ret = add_qgroup_relation_item(trans, quota_root,
+ objectid, *i_qgroups);
+ if (ret)
+ goto out;
+ ret = add_qgroup_relation_item(trans, quota_root,
+ *i_qgroups, objectid);
+ if (ret)
+ goto out;
+ ++i_qgroups;
+ }
+ }
+
+
+ spin_lock(&fs_info->qgroup_lock);
+
+ dstgroup = add_qgroup_rb(fs_info, objectid);
+ if (IS_ERR(dstgroup)) {
+ ret = PTR_ERR(dstgroup);
+ goto unlock;
+ }
+
+ if (srcid) {
+ srcgroup = find_qgroup_rb(fs_info, srcid);
+ if (!srcgroup) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ dstgroup->rfer = srcgroup->rfer - level_size;
+ dstgroup->rfer_cmpr = srcgroup->rfer_cmpr - level_size;
+ srcgroup->excl = level_size;
+ srcgroup->excl_cmpr = level_size;
+ qgroup_dirty(fs_info, dstgroup);
+ qgroup_dirty(fs_info, srcgroup);
+ }
+
+ if (!inherit) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ i_qgroups = (u64 *)(inherit + 1);
+ for (i = 0; i < inherit->num_qgroups; ++i) {
+ ret = add_relation_rb(quota_root->fs_info, objectid,
+ *i_qgroups);
+ if (ret)
+ goto unlock;
+ ++i_qgroups;
+ }
+
+ for (i = 0; i < inherit->num_ref_copies; ++i) {
+ struct btrfs_qgroup *src;
+ struct btrfs_qgroup *dst;
+
+ src = find_qgroup_rb(fs_info, i_qgroups[0]);
+ dst = find_qgroup_rb(fs_info, i_qgroups[1]);
+
+ if (!src || !dst) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ dst->rfer = src->rfer - level_size;
+ dst->rfer_cmpr = src->rfer_cmpr - level_size;
+ i_qgroups += 2;
+ }
+ for (i = 0; i < inherit->num_excl_copies; ++i) {
+ struct btrfs_qgroup *src;
+ struct btrfs_qgroup *dst;
+
+ src = find_qgroup_rb(fs_info, i_qgroups[0]);
+ dst = find_qgroup_rb(fs_info, i_qgroups[1]);
+
+ if (!src || !dst) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ dst->excl = src->excl + level_size;
+ dst->excl_cmpr = src->excl_cmpr + level_size;
+ i_qgroups += 2;
+ }
+
+unlock:
+ spin_unlock(&fs_info->qgroup_lock);
+out:
+ return ret;
+}
+
+/*
+ * reserve some space for a qgroup and all its parents. The reservation takes
+ * place with start_transaction or dealloc_reserve, similar to ENOSPC
+ * accounting. If not enough space is available, EDQUOT is returned.
+ * We assume that the requested space is new for all qgroups.
+ */
+int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
+{
+ struct btrfs_root *quota_root;
+ struct btrfs_qgroup *qgroup;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ u64 ref_root = root->root_key.objectid;
+ int ret = 0;
+ struct ulist *ulist = NULL;
+ struct ulist_node *unode;
+ struct ulist_iterator uiter;
+
+ if (!is_fstree(ref_root))
+ return 0;
+
+ if (num_bytes == 0)
+ return 0;
+
+ spin_lock(&fs_info->qgroup_lock);
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ goto out;
+
+ qgroup = find_qgroup_rb(fs_info, ref_root);
+ if (!qgroup)
+ goto out;
+
+ /*
+ * in a first step, we check all affected qgroups if any limits would
+ * be exceeded
+ */
+ ulist = ulist_alloc(GFP_ATOMIC);
+ ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC);
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(ulist, &uiter))) {
+ struct btrfs_qgroup *qg;
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)unode->aux;
+
+ if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
+ qg->reserved + qg->rfer + num_bytes >
+ qg->max_rfer)
+ ret = -EDQUOT;
+
+ if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
+ qg->reserved + qg->excl + num_bytes >
+ qg->max_excl)
+ ret = -EDQUOT;
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ulist_add(ulist, glist->group->qgroupid,
+ (unsigned long)glist->group, GFP_ATOMIC);
+ }
+ }
+ if (ret)
+ goto out;
+
+ /*
+ * no limits exceeded, now record the reservation into all qgroups
+ */
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(ulist, &uiter))) {
+ struct btrfs_qgroup *qg;
+
+ qg = (struct btrfs_qgroup *)unode->aux;
+
+ qg->reserved += num_bytes;
+ }
+
+out:
+ spin_unlock(&fs_info->qgroup_lock);
+ ulist_free(ulist);
+
+ return ret;
+}
+
+void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
+{
+ struct btrfs_root *quota_root;
+ struct btrfs_qgroup *qgroup;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct ulist *ulist = NULL;
+ struct ulist_node *unode;
+ struct ulist_iterator uiter;
+ u64 ref_root = root->root_key.objectid;
+
+ if (!is_fstree(ref_root))
+ return;
+
+ if (num_bytes == 0)
+ return;
+
+ spin_lock(&fs_info->qgroup_lock);
+
+ quota_root = fs_info->quota_root;
+ if (!quota_root)
+ goto out;
+
+ qgroup = find_qgroup_rb(fs_info, ref_root);
+ if (!qgroup)
+ goto out;
+
+ ulist = ulist_alloc(GFP_ATOMIC);
+ ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC);
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(ulist, &uiter))) {
+ struct btrfs_qgroup *qg;
+ struct btrfs_qgroup_list *glist;
+
+ qg = (struct btrfs_qgroup *)unode->aux;
+
+ qg->reserved -= num_bytes;
+
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ ulist_add(ulist, glist->group->qgroupid,
+ (unsigned long)glist->group, GFP_ATOMIC);
+ }
+ }
+
+out:
+ spin_unlock(&fs_info->qgroup_lock);
+ ulist_free(ulist);
+}
+
+void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
+{
+ if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
+ return;
+ printk(KERN_ERR "btrfs: qgroups not uptodate in trans handle %p: list is%s empty, seq is %llu\n",
+ trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
+ trans->delayed_ref_elem.seq);
+ BUG();
+}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 646ee21bb035..4da08652004d 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1239,10 +1239,11 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node) {
- kfree(node);
btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
"for start=%llu while inserting into relocation "
- "tree\n");
+ "tree\n", node->bytenr);
+ kfree(node);
+ return -EEXIST;
}
list_add_tail(&root->root_list, &rc->reloc_roots);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 24fb8ce4e071..10d8e4d88071 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -16,12 +16,55 @@
* Boston, MA 021110-1307, USA.
*/
+#include <linux/uuid.h>
#include "ctree.h"
#include "transaction.h"
#include "disk-io.h"
#include "print-tree.h"
/*
+ * Read a root item from the tree. In case we detect a root item smaller then
+ * sizeof(root_item), we know it's an old version of the root structure and
+ * initialize all new fields to zero. The same happens if we detect mismatching
+ * generation numbers as then we know the root was once mounted with an older
+ * kernel that was not aware of the root item structure change.
+ */
+void btrfs_read_root_item(struct btrfs_root *root,
+ struct extent_buffer *eb, int slot,
+ struct btrfs_root_item *item)
+{
+ uuid_le uuid;
+ int len;
+ int need_reset = 0;
+
+ len = btrfs_item_size_nr(eb, slot);
+ read_extent_buffer(eb, item, btrfs_item_ptr_offset(eb, slot),
+ min_t(int, len, (int)sizeof(*item)));
+ if (len < sizeof(*item))
+ need_reset = 1;
+ if (!need_reset && btrfs_root_generation(item)
+ != btrfs_root_generation_v2(item)) {
+ if (btrfs_root_generation_v2(item) != 0) {
+ printk(KERN_WARNING "btrfs: mismatching "
+ "generation and generation_v2 "
+ "found in root item. This root "
+ "was probably mounted with an "
+ "older kernel. Resetting all "
+ "new fields.\n");
+ }
+ need_reset = 1;
+ }
+ if (need_reset) {
+ memset(&item->generation_v2, 0,
+ sizeof(*item) - offsetof(struct btrfs_root_item,
+ generation_v2));
+
+ uuid_le_gen(&uuid);
+ memcpy(item->uuid, uuid.b, BTRFS_UUID_SIZE);
+ }
+}
+
+/*
* lookup the root with the highest offset for a given objectid. The key we do
* find is copied into 'key'. If we find something return 0, otherwise 1, < 0
* on error.
@@ -61,10 +104,10 @@ int btrfs_find_last_root(struct btrfs_root *root, u64 objectid,
goto out;
}
if (item)
- read_extent_buffer(l, item, btrfs_item_ptr_offset(l, slot),
- sizeof(*item));
+ btrfs_read_root_item(root, l, slot, item);
if (key)
memcpy(key, &found_key, sizeof(found_key));
+
ret = 0;
out:
btrfs_free_path(path);
@@ -91,16 +134,15 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
int ret;
int slot;
unsigned long ptr;
+ int old_len;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
- if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
- goto out;
- }
+ if (ret < 0)
+ goto out_abort;
if (ret != 0) {
btrfs_print_leaf(root, path->nodes[0]);
@@ -113,16 +155,56 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
l = path->nodes[0];
slot = path->slots[0];
ptr = btrfs_item_ptr_offset(l, slot);
+ old_len = btrfs_item_size_nr(l, slot);
+
+ /*
+ * If this is the first time we update the root item which originated
+ * from an older kernel, we need to enlarge the item size to make room
+ * for the added fields.
+ */
+ if (old_len < sizeof(*item)) {
+ btrfs_release_path(path);
+ ret = btrfs_search_slot(trans, root, key, path,
+ -1, 1);
+ if (ret < 0)
+ goto out_abort;
+ ret = btrfs_del_item(trans, root, path);
+ if (ret < 0)
+ goto out_abort;
+ btrfs_release_path(path);
+ ret = btrfs_insert_empty_item(trans, root, path,
+ key, sizeof(*item));
+ if (ret < 0)
+ goto out_abort;
+ l = path->nodes[0];
+ slot = path->slots[0];
+ ptr = btrfs_item_ptr_offset(l, slot);
+ }
+
+ /*
+ * Update generation_v2 so at the next mount we know the new root
+ * fields are valid.
+ */
+ btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
+
write_extent_buffer(l, item, ptr, sizeof(*item));
btrfs_mark_buffer_dirty(path->nodes[0]);
out:
btrfs_free_path(path);
return ret;
+
+out_abort:
+ btrfs_abort_transaction(trans, root, ret);
+ goto out;
}
int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_key *key, struct btrfs_root_item *item)
{
+ /*
+ * Make sure generation v1 and v2 match. See update_root for details.
+ */
+ btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
return btrfs_insert_item(trans, root, key, item, sizeof(*item));
}
@@ -454,3 +536,16 @@ void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item)
root_item->byte_limit = 0;
}
}
+
+void btrfs_update_root_times(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root)
+{
+ struct btrfs_root_item *item = &root->root_item;
+ struct timespec ct = CURRENT_TIME;
+
+ spin_lock(&root->root_times_lock);
+ item->ctransid = cpu_to_le64(trans->transid);
+ item->ctime.sec = cpu_to_le64(ct.tv_sec);
+ item->ctime.nsec = cpu_to_le32(ct.tv_nsec);
+ spin_unlock(&root->root_times_lock);
+}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
new file mode 100644
index 000000000000..fb5ffe95f869
--- /dev/null
+++ b/fs/btrfs/send.c
@@ -0,0 +1,4572 @@
+/*
+ * Copyright (C) 2012 Alexander Block. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/bsearch.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/sort.h>
+#include <linux/mount.h>
+#include <linux/xattr.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/radix-tree.h>
+#include <linux/crc32c.h>
+#include <linux/vmalloc.h>
+
+#include "send.h"
+#include "backref.h"
+#include "locking.h"
+#include "disk-io.h"
+#include "btrfs_inode.h"
+#include "transaction.h"
+
+static int g_verbose = 0;
+
+#define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
+
+/*
+ * A fs_path is a helper to dynamically build path names with unknown size.
+ * It reallocates the internal buffer on demand.
+ * It allows fast adding of path elements on the right side (normal path) and
+ * fast adding to the left side (reversed path). A reversed path can also be
+ * unreversed if needed.
+ */
+struct fs_path {
+ union {
+ struct {
+ char *start;
+ char *end;
+ char *prepared;
+
+ char *buf;
+ int buf_len;
+ int reversed:1;
+ int virtual_mem:1;
+ char inline_buf[];
+ };
+ char pad[PAGE_SIZE];
+ };
+};
+#define FS_PATH_INLINE_SIZE \
+ (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
+
+
+/* reused for each extent */
+struct clone_root {
+ struct btrfs_root *root;
+ u64 ino;
+ u64 offset;
+
+ u64 found_refs;
+};
+
+#define SEND_CTX_MAX_NAME_CACHE_SIZE 128
+#define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
+
+struct send_ctx {
+ struct file *send_filp;
+ loff_t send_off;
+ char *send_buf;
+ u32 send_size;
+ u32 send_max_size;
+ u64 total_send_size;
+ u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
+
+ struct vfsmount *mnt;
+
+ struct btrfs_root *send_root;
+ struct btrfs_root *parent_root;
+ struct clone_root *clone_roots;
+ int clone_roots_cnt;
+
+ /* current state of the compare_tree call */
+ struct btrfs_path *left_path;
+ struct btrfs_path *right_path;
+ struct btrfs_key *cmp_key;
+
+ /*
+ * infos of the currently processed inode. In case of deleted inodes,
+ * these are the values from the deleted inode.
+ */
+ u64 cur_ino;
+ u64 cur_inode_gen;
+ int cur_inode_new;
+ int cur_inode_new_gen;
+ int cur_inode_deleted;
+ int cur_inode_first_ref_orphan;
+ u64 cur_inode_size;
+ u64 cur_inode_mode;
+
+ u64 send_progress;
+
+ struct list_head new_refs;
+ struct list_head deleted_refs;
+
+ struct radix_tree_root name_cache;
+ struct list_head name_cache_list;
+ int name_cache_size;
+
+ struct file *cur_inode_filp;
+ char *read_buf;
+};
+
+struct name_cache_entry {
+ struct list_head list;
+ struct list_head use_list;
+ u64 ino;
+ u64 gen;
+ u64 parent_ino;
+ u64 parent_gen;
+ int ret;
+ int need_later_update;
+ int name_len;
+ char name[];
+};
+
+static void fs_path_reset(struct fs_path *p)
+{
+ if (p->reversed) {
+ p->start = p->buf + p->buf_len - 1;
+ p->end = p->start;
+ *p->start = 0;
+ } else {
+ p->start = p->buf;
+ p->end = p->start;
+ *p->start = 0;
+ }
+}
+
+static struct fs_path *fs_path_alloc(struct send_ctx *sctx)
+{
+ struct fs_path *p;
+
+ p = kmalloc(sizeof(*p), GFP_NOFS);
+ if (!p)
+ return NULL;
+ p->reversed = 0;
+ p->virtual_mem = 0;
+ p->buf = p->inline_buf;
+ p->buf_len = FS_PATH_INLINE_SIZE;
+ fs_path_reset(p);
+ return p;
+}
+
+static struct fs_path *fs_path_alloc_reversed(struct send_ctx *sctx)
+{
+ struct fs_path *p;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return NULL;
+ p->reversed = 1;
+ fs_path_reset(p);
+ return p;
+}
+
+static void fs_path_free(struct send_ctx *sctx, struct fs_path *p)
+{
+ if (!p)
+ return;
+ if (p->buf != p->inline_buf) {
+ if (p->virtual_mem)
+ vfree(p->buf);
+ else
+ kfree(p->buf);
+ }
+ kfree(p);
+}
+
+static int fs_path_len(struct fs_path *p)
+{
+ return p->end - p->start;
+}
+
+static int fs_path_ensure_buf(struct fs_path *p, int len)
+{
+ char *tmp_buf;
+ int path_len;
+ int old_buf_len;
+
+ len++;
+
+ if (p->buf_len >= len)
+ return 0;
+
+ path_len = p->end - p->start;
+ old_buf_len = p->buf_len;
+ len = PAGE_ALIGN(len);
+
+ if (p->buf == p->inline_buf) {
+ tmp_buf = kmalloc(len, GFP_NOFS);
+ if (!tmp_buf) {
+ tmp_buf = vmalloc(len);
+ if (!tmp_buf)
+ return -ENOMEM;
+ p->virtual_mem = 1;
+ }
+ memcpy(tmp_buf, p->buf, p->buf_len);
+ p->buf = tmp_buf;
+ p->buf_len = len;
+ } else {
+ if (p->virtual_mem) {
+ tmp_buf = vmalloc(len);
+ if (!tmp_buf)
+ return -ENOMEM;
+ memcpy(tmp_buf, p->buf, p->buf_len);
+ vfree(p->buf);
+ } else {
+ tmp_buf = krealloc(p->buf, len, GFP_NOFS);
+ if (!tmp_buf) {
+ tmp_buf = vmalloc(len);
+ if (!tmp_buf)
+ return -ENOMEM;
+ memcpy(tmp_buf, p->buf, p->buf_len);
+ kfree(p->buf);
+ p->virtual_mem = 1;
+ }
+ }
+ p->buf = tmp_buf;
+ p->buf_len = len;
+ }
+ if (p->reversed) {
+ tmp_buf = p->buf + old_buf_len - path_len - 1;
+ p->end = p->buf + p->buf_len - 1;
+ p->start = p->end - path_len;
+ memmove(p->start, tmp_buf, path_len + 1);
+ } else {
+ p->start = p->buf;
+ p->end = p->start + path_len;
+ }
+ return 0;
+}
+
+static int fs_path_prepare_for_add(struct fs_path *p, int name_len)
+{
+ int ret;
+ int new_len;
+
+ new_len = p->end - p->start + name_len;
+ if (p->start != p->end)
+ new_len++;
+ ret = fs_path_ensure_buf(p, new_len);
+ if (ret < 0)
+ goto out;
+
+ if (p->reversed) {
+ if (p->start != p->end)
+ *--p->start = '/';
+ p->start -= name_len;
+ p->prepared = p->start;
+ } else {
+ if (p->start != p->end)
+ *p->end++ = '/';
+ p->prepared = p->end;
+ p->end += name_len;
+ *p->end = 0;
+ }
+
+out:
+ return ret;
+}
+
+static int fs_path_add(struct fs_path *p, const char *name, int name_len)
+{
+ int ret;
+
+ ret = fs_path_prepare_for_add(p, name_len);
+ if (ret < 0)
+ goto out;
+ memcpy(p->prepared, name, name_len);
+ p->prepared = NULL;
+
+out:
+ return ret;
+}
+
+static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
+{
+ int ret;
+
+ ret = fs_path_prepare_for_add(p, p2->end - p2->start);
+ if (ret < 0)
+ goto out;
+ memcpy(p->prepared, p2->start, p2->end - p2->start);
+ p->prepared = NULL;
+
+out:
+ return ret;
+}
+
+static int fs_path_add_from_extent_buffer(struct fs_path *p,
+ struct extent_buffer *eb,
+ unsigned long off, int len)
+{
+ int ret;
+
+ ret = fs_path_prepare_for_add(p, len);
+ if (ret < 0)
+ goto out;
+
+ read_extent_buffer(eb, p->prepared, off, len);
+ p->prepared = NULL;
+
+out:
+ return ret;
+}
+
+static void fs_path_remove(struct fs_path *p)
+{
+ BUG_ON(p->reversed);
+ while (p->start != p->end && *p->end != '/')
+ p->end--;
+ *p->end = 0;
+}
+
+static int fs_path_copy(struct fs_path *p, struct fs_path *from)
+{
+ int ret;
+
+ p->reversed = from->reversed;
+ fs_path_reset(p);
+
+ ret = fs_path_add_path(p, from);
+
+ return ret;
+}
+
+
+static void fs_path_unreverse(struct fs_path *p)
+{
+ char *tmp;
+ int len;
+
+ if (!p->reversed)
+ return;
+
+ tmp = p->start;
+ len = p->end - p->start;
+ p->start = p->buf;
+ p->end = p->start + len;
+ memmove(p->start, tmp, len + 1);
+ p->reversed = 0;
+}
+
+static struct btrfs_path *alloc_path_for_send(void)
+{
+ struct btrfs_path *path;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return NULL;
+ path->search_commit_root = 1;
+ path->skip_locking = 1;
+ return path;
+}
+
+static int write_buf(struct send_ctx *sctx, const void *buf, u32 len)
+{
+ int ret;
+ mm_segment_t old_fs;
+ u32 pos = 0;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ while (pos < len) {
+ ret = vfs_write(sctx->send_filp, (char *)buf + pos, len - pos,
+ &sctx->send_off);
+ /* TODO handle that correctly */
+ /*if (ret == -ERESTARTSYS) {
+ continue;
+ }*/
+ if (ret < 0)
+ goto out;
+ if (ret == 0) {
+ ret = -EIO;
+ goto out;
+ }
+ pos += ret;
+ }
+
+ ret = 0;
+
+out:
+ set_fs(old_fs);
+ return ret;
+}
+
+static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
+{
+ struct btrfs_tlv_header *hdr;
+ int total_len = sizeof(*hdr) + len;
+ int left = sctx->send_max_size - sctx->send_size;
+
+ if (unlikely(left < total_len))
+ return -EOVERFLOW;
+
+ hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
+ hdr->tlv_type = cpu_to_le16(attr);
+ hdr->tlv_len = cpu_to_le16(len);
+ memcpy(hdr + 1, data, len);
+ sctx->send_size += total_len;
+
+ return 0;
+}
+
+#if 0
+static int tlv_put_u8(struct send_ctx *sctx, u16 attr, u8 value)
+{
+ return tlv_put(sctx, attr, &value, sizeof(value));
+}
+
+static int tlv_put_u16(struct send_ctx *sctx, u16 attr, u16 value)
+{
+ __le16 tmp = cpu_to_le16(value);
+ return tlv_put(sctx, attr, &tmp, sizeof(tmp));
+}
+
+static int tlv_put_u32(struct send_ctx *sctx, u16 attr, u32 value)
+{
+ __le32 tmp = cpu_to_le32(value);
+ return tlv_put(sctx, attr, &tmp, sizeof(tmp));
+}
+#endif
+
+static int tlv_put_u64(struct send_ctx *sctx, u16 attr, u64 value)
+{
+ __le64 tmp = cpu_to_le64(value);
+ return tlv_put(sctx, attr, &tmp, sizeof(tmp));
+}
+
+static int tlv_put_string(struct send_ctx *sctx, u16 attr,
+ const char *str, int len)
+{
+ if (len == -1)
+ len = strlen(str);
+ return tlv_put(sctx, attr, str, len);
+}
+
+static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
+ const u8 *uuid)
+{
+ return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
+}
+
+#if 0
+static int tlv_put_timespec(struct send_ctx *sctx, u16 attr,
+ struct timespec *ts)
+{
+ struct btrfs_timespec bts;
+ bts.sec = cpu_to_le64(ts->tv_sec);
+ bts.nsec = cpu_to_le32(ts->tv_nsec);
+ return tlv_put(sctx, attr, &bts, sizeof(bts));
+}
+#endif
+
+static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
+ struct extent_buffer *eb,
+ struct btrfs_timespec *ts)
+{
+ struct btrfs_timespec bts;
+ read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
+ return tlv_put(sctx, attr, &bts, sizeof(bts));
+}
+
+
+#define TLV_PUT(sctx, attrtype, attrlen, data) \
+ do { \
+ ret = tlv_put(sctx, attrtype, attrlen, data); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while (0)
+
+#define TLV_PUT_INT(sctx, attrtype, bits, value) \
+ do { \
+ ret = tlv_put_u##bits(sctx, attrtype, value); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while (0)
+
+#define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
+#define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
+#define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
+#define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
+#define TLV_PUT_STRING(sctx, attrtype, str, len) \
+ do { \
+ ret = tlv_put_string(sctx, attrtype, str, len); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while (0)
+#define TLV_PUT_PATH(sctx, attrtype, p) \
+ do { \
+ ret = tlv_put_string(sctx, attrtype, p->start, \
+ p->end - p->start); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while(0)
+#define TLV_PUT_UUID(sctx, attrtype, uuid) \
+ do { \
+ ret = tlv_put_uuid(sctx, attrtype, uuid); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while (0)
+#define TLV_PUT_TIMESPEC(sctx, attrtype, ts) \
+ do { \
+ ret = tlv_put_timespec(sctx, attrtype, ts); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while (0)
+#define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
+ do { \
+ ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
+ if (ret < 0) \
+ goto tlv_put_failure; \
+ } while (0)
+
+static int send_header(struct send_ctx *sctx)
+{
+ struct btrfs_stream_header hdr;
+
+ strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
+ hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
+
+ return write_buf(sctx, &hdr, sizeof(hdr));
+}
+
+/*
+ * For each command/item we want to send to userspace, we call this function.
+ */
+static int begin_cmd(struct send_ctx *sctx, int cmd)
+{
+ struct btrfs_cmd_header *hdr;
+
+ if (!sctx->send_buf) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ BUG_ON(sctx->send_size);
+
+ sctx->send_size += sizeof(*hdr);
+ hdr = (struct btrfs_cmd_header *)sctx->send_buf;
+ hdr->cmd = cpu_to_le16(cmd);
+
+ return 0;
+}
+
+static int send_cmd(struct send_ctx *sctx)
+{
+ int ret;
+ struct btrfs_cmd_header *hdr;
+ u32 crc;
+
+ hdr = (struct btrfs_cmd_header *)sctx->send_buf;
+ hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
+ hdr->crc = 0;
+
+ crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
+ hdr->crc = cpu_to_le32(crc);
+
+ ret = write_buf(sctx, sctx->send_buf, sctx->send_size);
+
+ sctx->total_send_size += sctx->send_size;
+ sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
+ sctx->send_size = 0;
+
+ return ret;
+}
+
+/*
+ * Sends a move instruction to user space
+ */
+static int send_rename(struct send_ctx *sctx,
+ struct fs_path *from, struct fs_path *to)
+{
+ int ret;
+
+verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ return ret;
+}
+
+/*
+ * Sends a link instruction to user space
+ */
+static int send_link(struct send_ctx *sctx,
+ struct fs_path *path, struct fs_path *lnk)
+{
+ int ret;
+
+verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ return ret;
+}
+
+/*
+ * Sends an unlink instruction to user space
+ */
+static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
+{
+ int ret;
+
+verbose_printk("btrfs: send_unlink %s\n", path->start);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ return ret;
+}
+
+/*
+ * Sends a rmdir instruction to user space
+ */
+static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
+{
+ int ret;
+
+verbose_printk("btrfs: send_rmdir %s\n", path->start);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ return ret;
+}
+
+/*
+ * Helper function to retrieve some fields from an inode item.
+ */
+static int get_inode_info(struct btrfs_root *root,
+ u64 ino, u64 *size, u64 *gen,
+ u64 *mode, u64 *uid, u64 *gid)
+{
+ int ret;
+ struct btrfs_inode_item *ii;
+ struct btrfs_key key;
+ struct btrfs_path *path;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = ino;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_item);
+ if (size)
+ *size = btrfs_inode_size(path->nodes[0], ii);
+ if (gen)
+ *gen = btrfs_inode_generation(path->nodes[0], ii);
+ if (mode)
+ *mode = btrfs_inode_mode(path->nodes[0], ii);
+ if (uid)
+ *uid = btrfs_inode_uid(path->nodes[0], ii);
+ if (gid)
+ *gid = btrfs_inode_gid(path->nodes[0], ii);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
+ struct fs_path *p,
+ void *ctx);
+
+/*
+ * Helper function to iterate the entries in ONE btrfs_inode_ref.
+ * The iterate callback may return a non zero value to stop iteration. This can
+ * be a negative value for error codes or 1 to simply stop it.
+ *
+ * path must point to the INODE_REF when called.
+ */
+static int iterate_inode_ref(struct send_ctx *sctx,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *found_key, int resolve,
+ iterate_inode_ref_t iterate, void *ctx)
+{
+ struct extent_buffer *eb;
+ struct btrfs_item *item;
+ struct btrfs_inode_ref *iref;
+ struct btrfs_path *tmp_path;
+ struct fs_path *p;
+ u32 cur;
+ u32 len;
+ u32 total;
+ int slot;
+ u32 name_len;
+ char *start;
+ int ret = 0;
+ int num;
+ int index;
+
+ p = fs_path_alloc_reversed(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ tmp_path = alloc_path_for_send();
+ if (!tmp_path) {
+ fs_path_free(sctx, p);
+ return -ENOMEM;
+ }
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ item = btrfs_item_nr(eb, slot);
+ iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
+ cur = 0;
+ len = 0;
+ total = btrfs_item_size(eb, item);
+
+ num = 0;
+ while (cur < total) {
+ fs_path_reset(p);
+
+ name_len = btrfs_inode_ref_name_len(eb, iref);
+ index = btrfs_inode_ref_index(eb, iref);
+ if (resolve) {
+ start = btrfs_iref_to_path(root, tmp_path, iref, eb,
+ found_key->offset, p->buf,
+ p->buf_len);
+ if (IS_ERR(start)) {
+ ret = PTR_ERR(start);
+ goto out;
+ }
+ if (start < p->buf) {
+ /* overflow , try again with larger buffer */
+ ret = fs_path_ensure_buf(p,
+ p->buf_len + p->buf - start);
+ if (ret < 0)
+ goto out;
+ start = btrfs_iref_to_path(root, tmp_path, iref,
+ eb, found_key->offset, p->buf,
+ p->buf_len);
+ if (IS_ERR(start)) {
+ ret = PTR_ERR(start);
+ goto out;
+ }
+ BUG_ON(start < p->buf);
+ }
+ p->start = start;
+ } else {
+ ret = fs_path_add_from_extent_buffer(p, eb,
+ (unsigned long)(iref + 1), name_len);
+ if (ret < 0)
+ goto out;
+ }
+
+
+ len = sizeof(*iref) + name_len;
+ iref = (struct btrfs_inode_ref *)((char *)iref + len);
+ cur += len;
+
+ ret = iterate(num, found_key->offset, index, p, ctx);
+ if (ret)
+ goto out;
+
+ num++;
+ }
+
+out:
+ btrfs_free_path(tmp_path);
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *ctx);
+
+/*
+ * Helper function to iterate the entries in ONE btrfs_dir_item.
+ * The iterate callback may return a non zero value to stop iteration. This can
+ * be a negative value for error codes or 1 to simply stop it.
+ *
+ * path must point to the dir item when called.
+ */
+static int iterate_dir_item(struct send_ctx *sctx,
+ struct btrfs_root *root, struct btrfs_path *path,
+ struct btrfs_key *found_key,
+ iterate_dir_item_t iterate, void *ctx)
+{
+ int ret = 0;
+ struct extent_buffer *eb;
+ struct btrfs_item *item;
+ struct btrfs_dir_item *di;
+ struct btrfs_path *tmp_path = NULL;
+ struct btrfs_key di_key;
+ char *buf = NULL;
+ char *buf2 = NULL;
+ int buf_len;
+ int buf_virtual = 0;
+ u32 name_len;
+ u32 data_len;
+ u32 cur;
+ u32 len;
+ u32 total;
+ int slot;
+ int num;
+ u8 type;
+
+ buf_len = PAGE_SIZE;
+ buf = kmalloc(buf_len, GFP_NOFS);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tmp_path = alloc_path_for_send();
+ if (!tmp_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ item = btrfs_item_nr(eb, slot);
+ di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
+ cur = 0;
+ len = 0;
+ total = btrfs_item_size(eb, item);
+
+ num = 0;
+ while (cur < total) {
+ name_len = btrfs_dir_name_len(eb, di);
+ data_len = btrfs_dir_data_len(eb, di);
+ type = btrfs_dir_type(eb, di);
+ btrfs_dir_item_key_to_cpu(eb, di, &di_key);
+
+ if (name_len + data_len > buf_len) {
+ buf_len = PAGE_ALIGN(name_len + data_len);
+ if (buf_virtual) {
+ buf2 = vmalloc(buf_len);
+ if (!buf2) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ vfree(buf);
+ } else {
+ buf2 = krealloc(buf, buf_len, GFP_NOFS);
+ if (!buf2) {
+ buf2 = vmalloc(buf_len);
+ if (!buf2) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ kfree(buf);
+ buf_virtual = 1;
+ }
+ }
+
+ buf = buf2;
+ buf2 = NULL;
+ }
+
+ read_extent_buffer(eb, buf, (unsigned long)(di + 1),
+ name_len + data_len);
+
+ len = sizeof(*di) + name_len + data_len;
+ di = (struct btrfs_dir_item *)((char *)di + len);
+ cur += len;
+
+ ret = iterate(num, &di_key, buf, name_len, buf + name_len,
+ data_len, type, ctx);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ num++;
+ }
+
+out:
+ btrfs_free_path(tmp_path);
+ if (buf_virtual)
+ vfree(buf);
+ else
+ kfree(buf);
+ return ret;
+}
+
+static int __copy_first_ref(int num, u64 dir, int index,
+ struct fs_path *p, void *ctx)
+{
+ int ret;
+ struct fs_path *pt = ctx;
+
+ ret = fs_path_copy(pt, p);
+ if (ret < 0)
+ return ret;
+
+ /* we want the first only */
+ return 1;
+}
+
+/*
+ * Retrieve the first path of an inode. If an inode has more then one
+ * ref/hardlink, this is ignored.
+ */
+static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root,
+ u64 ino, struct fs_path *path)
+{
+ int ret;
+ struct btrfs_key key, found_key;
+ struct btrfs_path *p;
+
+ p = alloc_path_for_send();
+ if (!p)
+ return -ENOMEM;
+
+ fs_path_reset(path);
+
+ key.objectid = ino;
+ key.type = BTRFS_INODE_REF_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 1;
+ goto out;
+ }
+ btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
+ if (found_key.objectid != ino ||
+ found_key.type != BTRFS_INODE_REF_KEY) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = iterate_inode_ref(sctx, root, p, &found_key, 1,
+ __copy_first_ref, path);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+
+out:
+ btrfs_free_path(p);
+ return ret;
+}
+
+struct backref_ctx {
+ struct send_ctx *sctx;
+
+ /* number of total found references */
+ u64 found;
+
+ /*
+ * used for clones found in send_root. clones found behind cur_objectid
+ * and cur_offset are not considered as allowed clones.
+ */
+ u64 cur_objectid;
+ u64 cur_offset;
+
+ /* may be truncated in case it's the last extent in a file */
+ u64 extent_len;
+
+ /* Just to check for bugs in backref resolving */
+ int found_in_send_root;
+};
+
+static int __clone_root_cmp_bsearch(const void *key, const void *elt)
+{
+ u64 root = (u64)key;
+ struct clone_root *cr = (struct clone_root *)elt;
+
+ if (root < cr->root->objectid)
+ return -1;
+ if (root > cr->root->objectid)
+ return 1;
+ return 0;
+}
+
+static int __clone_root_cmp_sort(const void *e1, const void *e2)
+{
+ struct clone_root *cr1 = (struct clone_root *)e1;
+ struct clone_root *cr2 = (struct clone_root *)e2;
+
+ if (cr1->root->objectid < cr2->root->objectid)
+ return -1;
+ if (cr1->root->objectid > cr2->root->objectid)
+ return 1;
+ return 0;
+}
+
+/*
+ * Called for every backref that is found for the current extent.
+ */
+static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
+{
+ struct backref_ctx *bctx = ctx_;
+ struct clone_root *found;
+ int ret;
+ u64 i_size;
+
+ /* First check if the root is in the list of accepted clone sources */
+ found = bsearch((void *)root, bctx->sctx->clone_roots,
+ bctx->sctx->clone_roots_cnt,
+ sizeof(struct clone_root),
+ __clone_root_cmp_bsearch);
+ if (!found)
+ return 0;
+
+ if (found->root == bctx->sctx->send_root &&
+ ino == bctx->cur_objectid &&
+ offset == bctx->cur_offset) {
+ bctx->found_in_send_root = 1;
+ }
+
+ /*
+ * There are inodes that have extents that lie behind it's i_size. Don't
+ * accept clones from these extents.
+ */
+ ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ if (offset + bctx->extent_len > i_size)
+ return 0;
+
+ /*
+ * Make sure we don't consider clones from send_root that are
+ * behind the current inode/offset.
+ */
+ if (found->root == bctx->sctx->send_root) {
+ /*
+ * TODO for the moment we don't accept clones from the inode
+ * that is currently send. We may change this when
+ * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
+ * file.
+ */
+ if (ino >= bctx->cur_objectid)
+ return 0;
+ /*if (ino > ctx->cur_objectid)
+ return 0;
+ if (offset + ctx->extent_len > ctx->cur_offset)
+ return 0;*/
+
+ bctx->found++;
+ found->found_refs++;
+ found->ino = ino;
+ found->offset = offset;
+ return 0;
+ }
+
+ bctx->found++;
+ found->found_refs++;
+ if (ino < found->ino) {
+ found->ino = ino;
+ found->offset = offset;
+ } else if (found->ino == ino) {
+ /*
+ * same extent found more then once in the same file.
+ */
+ if (found->offset > offset + bctx->extent_len)
+ found->offset = offset;
+ }
+
+ return 0;
+}
+
+/*
+ * path must point to the extent item when called.
+ */
+static int find_extent_clone(struct send_ctx *sctx,
+ struct btrfs_path *path,
+ u64 ino, u64 data_offset,
+ u64 ino_size,
+ struct clone_root **found)
+{
+ int ret;
+ int extent_type;
+ u64 logical;
+ u64 num_bytes;
+ u64 extent_item_pos;
+ struct btrfs_file_extent_item *fi;
+ struct extent_buffer *eb = path->nodes[0];
+ struct backref_ctx backref_ctx;
+ struct clone_root *cur_clone_root;
+ struct btrfs_key found_key;
+ struct btrfs_path *tmp_path;
+ u32 i;
+
+ tmp_path = alloc_path_for_send();
+ if (!tmp_path)
+ return -ENOMEM;
+
+ if (data_offset >= ino_size) {
+ /*
+ * There may be extents that lie behind the file's size.
+ * I at least had this in combination with snapshotting while
+ * writing large files.
+ */
+ ret = 0;
+ goto out;
+ }
+
+ fi = btrfs_item_ptr(eb, path->slots[0],
+ struct btrfs_file_extent_item);
+ extent_type = btrfs_file_extent_type(eb, fi);
+ if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ num_bytes = btrfs_file_extent_num_bytes(eb, fi);
+ logical = btrfs_file_extent_disk_bytenr(eb, fi);
+ if (logical == 0) {
+ ret = -ENOENT;
+ goto out;
+ }
+ logical += btrfs_file_extent_offset(eb, fi);
+
+ ret = extent_from_logical(sctx->send_root->fs_info,
+ logical, tmp_path, &found_key);
+ btrfs_release_path(tmp_path);
+
+ if (ret < 0)
+ goto out;
+ if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * Setup the clone roots.
+ */
+ for (i = 0; i < sctx->clone_roots_cnt; i++) {
+ cur_clone_root = sctx->clone_roots + i;
+ cur_clone_root->ino = (u64)-1;
+ cur_clone_root->offset = 0;
+ cur_clone_root->found_refs = 0;
+ }
+
+ backref_ctx.sctx = sctx;
+ backref_ctx.found = 0;
+ backref_ctx.cur_objectid = ino;
+ backref_ctx.cur_offset = data_offset;
+ backref_ctx.found_in_send_root = 0;
+ backref_ctx.extent_len = num_bytes;
+
+ /*
+ * The last extent of a file may be too large due to page alignment.
+ * We need to adjust extent_len in this case so that the checks in
+ * __iterate_backrefs work.
+ */
+ if (data_offset + num_bytes >= ino_size)
+ backref_ctx.extent_len = ino_size - data_offset;
+
+ /*
+ * Now collect all backrefs.
+ */
+ extent_item_pos = logical - found_key.objectid;
+ ret = iterate_extent_inodes(sctx->send_root->fs_info,
+ found_key.objectid, extent_item_pos, 1,
+ __iterate_backrefs, &backref_ctx);
+ if (ret < 0)
+ goto out;
+
+ if (!backref_ctx.found_in_send_root) {
+ /* found a bug in backref code? */
+ ret = -EIO;
+ printk(KERN_ERR "btrfs: ERROR did not find backref in "
+ "send_root. inode=%llu, offset=%llu, "
+ "logical=%llu\n",
+ ino, data_offset, logical);
+ goto out;
+ }
+
+verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
+ "ino=%llu, "
+ "num_bytes=%llu, logical=%llu\n",
+ data_offset, ino, num_bytes, logical);
+
+ if (!backref_ctx.found)
+ verbose_printk("btrfs: no clones found\n");
+
+ cur_clone_root = NULL;
+ for (i = 0; i < sctx->clone_roots_cnt; i++) {
+ if (sctx->clone_roots[i].found_refs) {
+ if (!cur_clone_root)
+ cur_clone_root = sctx->clone_roots + i;
+ else if (sctx->clone_roots[i].root == sctx->send_root)
+ /* prefer clones from send_root over others */
+ cur_clone_root = sctx->clone_roots + i;
+ break;
+ }
+
+ }
+
+ if (cur_clone_root) {
+ *found = cur_clone_root;
+ ret = 0;
+ } else {
+ ret = -ENOENT;
+ }
+
+out:
+ btrfs_free_path(tmp_path);
+ return ret;
+}
+
+static int read_symlink(struct send_ctx *sctx,
+ struct btrfs_root *root,
+ u64 ino,
+ struct fs_path *dest)
+{
+ int ret;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_file_extent_item *ei;
+ u8 type;
+ u8 compression;
+ unsigned long off;
+ int len;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = ino;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = 0;
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+ BUG_ON(ret);
+
+ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+ type = btrfs_file_extent_type(path->nodes[0], ei);
+ compression = btrfs_file_extent_compression(path->nodes[0], ei);
+ BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
+ BUG_ON(compression);
+
+ off = btrfs_file_extent_inline_start(ei);
+ len = btrfs_file_extent_inline_len(path->nodes[0], ei);
+
+ ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
+ if (ret < 0)
+ goto out;
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+/*
+ * Helper function to generate a file name that is unique in the root of
+ * send_root and parent_root. This is used to generate names for orphan inodes.
+ */
+static int gen_unique_name(struct send_ctx *sctx,
+ u64 ino, u64 gen,
+ struct fs_path *dest)
+{
+ int ret = 0;
+ struct btrfs_path *path;
+ struct btrfs_dir_item *di;
+ char tmp[64];
+ int len;
+ u64 idx = 0;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ while (1) {
+ len = snprintf(tmp, sizeof(tmp) - 1, "o%llu-%llu-%llu",
+ ino, gen, idx);
+ if (len >= sizeof(tmp)) {
+ /* should really not happen */
+ ret = -EOVERFLOW;
+ goto out;
+ }
+
+ di = btrfs_lookup_dir_item(NULL, sctx->send_root,
+ path, BTRFS_FIRST_FREE_OBJECTID,
+ tmp, strlen(tmp), 0);
+ btrfs_release_path(path);
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ }
+ if (di) {
+ /* not unique, try again */
+ idx++;
+ continue;
+ }
+
+ if (!sctx->parent_root) {
+ /* unique */
+ ret = 0;
+ break;
+ }
+
+ di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
+ path, BTRFS_FIRST_FREE_OBJECTID,
+ tmp, strlen(tmp), 0);
+ btrfs_release_path(path);
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ }
+ if (di) {
+ /* not unique, try again */
+ idx++;
+ continue;
+ }
+ /* unique */
+ break;
+ }
+
+ ret = fs_path_add(dest, tmp, strlen(tmp));
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+enum inode_state {
+ inode_state_no_change,
+ inode_state_will_create,
+ inode_state_did_create,
+ inode_state_will_delete,
+ inode_state_did_delete,
+};
+
+static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
+{
+ int ret;
+ int left_ret;
+ int right_ret;
+ u64 left_gen;
+ u64 right_gen;
+
+ ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
+ NULL);
+ if (ret < 0 && ret != -ENOENT)
+ goto out;
+ left_ret = ret;
+
+ if (!sctx->parent_root) {
+ right_ret = -ENOENT;
+ } else {
+ ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
+ NULL, NULL, NULL);
+ if (ret < 0 && ret != -ENOENT)
+ goto out;
+ right_ret = ret;
+ }
+
+ if (!left_ret && !right_ret) {
+ if (left_gen == gen && right_gen == gen)
+ ret = inode_state_no_change;
+ else if (left_gen == gen) {
+ if (ino < sctx->send_progress)
+ ret = inode_state_did_create;
+ else
+ ret = inode_state_will_create;
+ } else if (right_gen == gen) {
+ if (ino < sctx->send_progress)
+ ret = inode_state_did_delete;
+ else
+ ret = inode_state_will_delete;
+ } else {
+ ret = -ENOENT;
+ }
+ } else if (!left_ret) {
+ if (left_gen == gen) {
+ if (ino < sctx->send_progress)
+ ret = inode_state_did_create;
+ else
+ ret = inode_state_will_create;
+ } else {
+ ret = -ENOENT;
+ }
+ } else if (!right_ret) {
+ if (right_gen == gen) {
+ if (ino < sctx->send_progress)
+ ret = inode_state_did_delete;
+ else
+ ret = inode_state_will_delete;
+ } else {
+ ret = -ENOENT;
+ }
+ } else {
+ ret = -ENOENT;
+ }
+
+out:
+ return ret;
+}
+
+static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
+{
+ int ret;
+
+ ret = get_cur_inode_state(sctx, ino, gen);
+ if (ret < 0)
+ goto out;
+
+ if (ret == inode_state_no_change ||
+ ret == inode_state_did_create ||
+ ret == inode_state_will_delete)
+ ret = 1;
+ else
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/*
+ * Helper function to lookup a dir item in a dir.
+ */
+static int lookup_dir_item_inode(struct btrfs_root *root,
+ u64 dir, const char *name, int name_len,
+ u64 *found_inode,
+ u8 *found_type)
+{
+ int ret = 0;
+ struct btrfs_dir_item *di;
+ struct btrfs_key key;
+ struct btrfs_path *path;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ di = btrfs_lookup_dir_item(NULL, root, path,
+ dir, name, name_len, 0);
+ if (!di) {
+ ret = -ENOENT;
+ goto out;
+ }
+ if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ }
+ btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
+ *found_inode = key.objectid;
+ *found_type = btrfs_dir_type(path->nodes[0], di);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int get_first_ref(struct send_ctx *sctx,
+ struct btrfs_root *root, u64 ino,
+ u64 *dir, u64 *dir_gen, struct fs_path *name)
+{
+ int ret;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_path *path;
+ struct btrfs_inode_ref *iref;
+ int len;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = ino;
+ key.type = BTRFS_INODE_REF_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (!ret)
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+ path->slots[0]);
+ if (ret || found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_ref);
+ len = btrfs_inode_ref_name_len(path->nodes[0], iref);
+ ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
+ (unsigned long)(iref + 1), len);
+ if (ret < 0)
+ goto out;
+ btrfs_release_path(path);
+
+ ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL,
+ NULL);
+ if (ret < 0)
+ goto out;
+
+ *dir = found_key.offset;
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int is_first_ref(struct send_ctx *sctx,
+ struct btrfs_root *root,
+ u64 ino, u64 dir,
+ const char *name, int name_len)
+{
+ int ret;
+ struct fs_path *tmp_name;
+ u64 tmp_dir;
+ u64 tmp_dir_gen;
+
+ tmp_name = fs_path_alloc(sctx);
+ if (!tmp_name)
+ return -ENOMEM;
+
+ ret = get_first_ref(sctx, root, ino, &tmp_dir, &tmp_dir_gen, tmp_name);
+ if (ret < 0)
+ goto out;
+
+ if (name_len != fs_path_len(tmp_name)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = memcmp(tmp_name->start, name, name_len);
+ if (ret)
+ ret = 0;
+ else
+ ret = 1;
+
+out:
+ fs_path_free(sctx, tmp_name);
+ return ret;
+}
+
+static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
+ const char *name, int name_len,
+ u64 *who_ino, u64 *who_gen)
+{
+ int ret = 0;
+ u64 other_inode = 0;
+ u8 other_type = 0;
+
+ if (!sctx->parent_root)
+ goto out;
+
+ ret = is_inode_existent(sctx, dir, dir_gen);
+ if (ret <= 0)
+ goto out;
+
+ ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
+ &other_inode, &other_type);
+ if (ret < 0 && ret != -ENOENT)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ if (other_inode > sctx->send_progress) {
+ ret = get_inode_info(sctx->parent_root, other_inode, NULL,
+ who_gen, NULL, NULL, NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = 1;
+ *who_ino = other_inode;
+ } else {
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static int did_overwrite_ref(struct send_ctx *sctx,
+ u64 dir, u64 dir_gen,
+ u64 ino, u64 ino_gen,
+ const char *name, int name_len)
+{
+ int ret = 0;
+ u64 gen;
+ u64 ow_inode;
+ u8 other_type;
+
+ if (!sctx->parent_root)
+ goto out;
+
+ ret = is_inode_existent(sctx, dir, dir_gen);
+ if (ret <= 0)
+ goto out;
+
+ /* check if the ref was overwritten by another ref */
+ ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
+ &ow_inode, &other_type);
+ if (ret < 0 && ret != -ENOENT)
+ goto out;
+ if (ret) {
+ /* was never and will never be overwritten */
+ ret = 0;
+ goto out;
+ }
+
+ ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
+ NULL);
+ if (ret < 0)
+ goto out;
+
+ if (ow_inode == ino && gen == ino_gen) {
+ ret = 0;
+ goto out;
+ }
+
+ /* we know that it is or will be overwritten. check this now */
+ if (ow_inode < sctx->send_progress)
+ ret = 1;
+ else
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
+{
+ int ret = 0;
+ struct fs_path *name = NULL;
+ u64 dir;
+ u64 dir_gen;
+
+ if (!sctx->parent_root)
+ goto out;
+
+ name = fs_path_alloc(sctx);
+ if (!name)
+ return -ENOMEM;
+
+ ret = get_first_ref(sctx, sctx->parent_root, ino, &dir, &dir_gen, name);
+ if (ret < 0)
+ goto out;
+
+ ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
+ name->start, fs_path_len(name));
+ if (ret < 0)
+ goto out;
+
+out:
+ fs_path_free(sctx, name);
+ return ret;
+}
+
+static int name_cache_insert(struct send_ctx *sctx,
+ struct name_cache_entry *nce)
+{
+ int ret = 0;
+ struct name_cache_entry **ncea;
+
+ ncea = radix_tree_lookup(&sctx->name_cache, nce->ino);
+ if (ncea) {
+ if (!ncea[0])
+ ncea[0] = nce;
+ else if (!ncea[1])
+ ncea[1] = nce;
+ else
+ BUG();
+ } else {
+ ncea = kmalloc(sizeof(void *) * 2, GFP_NOFS);
+ if (!ncea)
+ return -ENOMEM;
+
+ ncea[0] = nce;
+ ncea[1] = NULL;
+ ret = radix_tree_insert(&sctx->name_cache, nce->ino, ncea);
+ if (ret < 0)
+ return ret;
+ }
+ list_add_tail(&nce->list, &sctx->name_cache_list);
+ sctx->name_cache_size++;
+
+ return ret;
+}
+
+static void name_cache_delete(struct send_ctx *sctx,
+ struct name_cache_entry *nce)
+{
+ struct name_cache_entry **ncea;
+
+ ncea = radix_tree_lookup(&sctx->name_cache, nce->ino);
+ BUG_ON(!ncea);
+
+ if (ncea[0] == nce)
+ ncea[0] = NULL;
+ else if (ncea[1] == nce)
+ ncea[1] = NULL;
+ else
+ BUG();
+
+ if (!ncea[0] && !ncea[1]) {
+ radix_tree_delete(&sctx->name_cache, nce->ino);
+ kfree(ncea);
+ }
+
+ list_del(&nce->list);
+
+ sctx->name_cache_size--;
+}
+
+static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
+ u64 ino, u64 gen)
+{
+ struct name_cache_entry **ncea;
+
+ ncea = radix_tree_lookup(&sctx->name_cache, ino);
+ if (!ncea)
+ return NULL;
+
+ if (ncea[0] && ncea[0]->gen == gen)
+ return ncea[0];
+ else if (ncea[1] && ncea[1]->gen == gen)
+ return ncea[1];
+ return NULL;
+}
+
+static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
+{
+ list_del(&nce->list);
+ list_add_tail(&nce->list, &sctx->name_cache_list);
+}
+
+static void name_cache_clean_unused(struct send_ctx *sctx)
+{
+ struct name_cache_entry *nce;
+
+ if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
+ return;
+
+ while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
+ nce = list_entry(sctx->name_cache_list.next,
+ struct name_cache_entry, list);
+ name_cache_delete(sctx, nce);
+ kfree(nce);
+ }
+}
+
+static void name_cache_free(struct send_ctx *sctx)
+{
+ struct name_cache_entry *nce;
+ struct name_cache_entry *tmp;
+
+ list_for_each_entry_safe(nce, tmp, &sctx->name_cache_list, list) {
+ name_cache_delete(sctx, nce);
+ }
+}
+
+static int __get_cur_name_and_parent(struct send_ctx *sctx,
+ u64 ino, u64 gen,
+ u64 *parent_ino,
+ u64 *parent_gen,
+ struct fs_path *dest)
+{
+ int ret;
+ int nce_ret;
+ struct btrfs_path *path = NULL;
+ struct name_cache_entry *nce = NULL;
+
+ nce = name_cache_search(sctx, ino, gen);
+ if (nce) {
+ if (ino < sctx->send_progress && nce->need_later_update) {
+ name_cache_delete(sctx, nce);
+ kfree(nce);
+ nce = NULL;
+ } else {
+ name_cache_used(sctx, nce);
+ *parent_ino = nce->parent_ino;
+ *parent_gen = nce->parent_gen;
+ ret = fs_path_add(dest, nce->name, nce->name_len);
+ if (ret < 0)
+ goto out;
+ ret = nce->ret;
+ goto out;
+ }
+ }
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ ret = is_inode_existent(sctx, ino, gen);
+ if (ret < 0)
+ goto out;
+
+ if (!ret) {
+ ret = gen_unique_name(sctx, ino, gen, dest);
+ if (ret < 0)
+ goto out;
+ ret = 1;
+ goto out_cache;
+ }
+
+ if (ino < sctx->send_progress)
+ ret = get_first_ref(sctx, sctx->send_root, ino,
+ parent_ino, parent_gen, dest);
+ else
+ ret = get_first_ref(sctx, sctx->parent_root, ino,
+ parent_ino, parent_gen, dest);
+ if (ret < 0)
+ goto out;
+
+ ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
+ dest->start, dest->end - dest->start);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ fs_path_reset(dest);
+ ret = gen_unique_name(sctx, ino, gen, dest);
+ if (ret < 0)
+ goto out;
+ ret = 1;
+ }
+
+out_cache:
+ nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
+ if (!nce) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ nce->ino = ino;
+ nce->gen = gen;
+ nce->parent_ino = *parent_ino;
+ nce->parent_gen = *parent_gen;
+ nce->name_len = fs_path_len(dest);
+ nce->ret = ret;
+ strcpy(nce->name, dest->start);
+ memset(&nce->use_list, 0, sizeof(nce->use_list));
+
+ if (ino < sctx->send_progress)
+ nce->need_later_update = 0;
+ else
+ nce->need_later_update = 1;
+
+ nce_ret = name_cache_insert(sctx, nce);
+ if (nce_ret < 0)
+ ret = nce_ret;
+ name_cache_clean_unused(sctx);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+/*
+ * Magic happens here. This function returns the first ref to an inode as it
+ * would look like while receiving the stream at this point in time.
+ * We walk the path up to the root. For every inode in between, we check if it
+ * was already processed/sent. If yes, we continue with the parent as found
+ * in send_root. If not, we continue with the parent as found in parent_root.
+ * If we encounter an inode that was deleted at this point in time, we use the
+ * inodes "orphan" name instead of the real name and stop. Same with new inodes
+ * that were not created yet and overwritten inodes/refs.
+ *
+ * When do we have have orphan inodes:
+ * 1. When an inode is freshly created and thus no valid refs are available yet
+ * 2. When a directory lost all it's refs (deleted) but still has dir items
+ * inside which were not processed yet (pending for move/delete). If anyone
+ * tried to get the path to the dir items, it would get a path inside that
+ * orphan directory.
+ * 3. When an inode is moved around or gets new links, it may overwrite the ref
+ * of an unprocessed inode. If in that case the first ref would be
+ * overwritten, the overwritten inode gets "orphanized". Later when we
+ * process this overwritten inode, it is restored at a new place by moving
+ * the orphan inode.
+ *
+ * sctx->send_progress tells this function at which point in time receiving
+ * would be.
+ */
+static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
+ struct fs_path *dest)
+{
+ int ret = 0;
+ struct fs_path *name = NULL;
+ u64 parent_inode = 0;
+ u64 parent_gen = 0;
+ int stop = 0;
+
+ name = fs_path_alloc(sctx);
+ if (!name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dest->reversed = 1;
+ fs_path_reset(dest);
+
+ while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
+ fs_path_reset(name);
+
+ ret = __get_cur_name_and_parent(sctx, ino, gen,
+ &parent_inode, &parent_gen, name);
+ if (ret < 0)
+ goto out;
+ if (ret)
+ stop = 1;
+
+ ret = fs_path_add_path(dest, name);
+ if (ret < 0)
+ goto out;
+
+ ino = parent_inode;
+ gen = parent_gen;
+ }
+
+out:
+ fs_path_free(sctx, name);
+ if (!ret)
+ fs_path_unreverse(dest);
+ return ret;
+}
+
+/*
+ * Called for regular files when sending extents data. Opens a struct file
+ * to read from the file.
+ */
+static int open_cur_inode_file(struct send_ctx *sctx)
+{
+ int ret = 0;
+ struct btrfs_key key;
+ struct path path;
+ struct inode *inode;
+ struct dentry *dentry;
+ struct file *filp;
+ int new = 0;
+
+ if (sctx->cur_inode_filp)
+ goto out;
+
+ key.objectid = sctx->cur_ino;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+ inode = btrfs_iget(sctx->send_root->fs_info->sb, &key, sctx->send_root,
+ &new);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ goto out;
+ }
+
+ dentry = d_obtain_alias(inode);
+ inode = NULL;
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ path.mnt = sctx->mnt;
+ path.dentry = dentry;
+ filp = dentry_open(&path, O_RDONLY | O_LARGEFILE, current_cred());
+ dput(dentry);
+ dentry = NULL;
+ if (IS_ERR(filp)) {
+ ret = PTR_ERR(filp);
+ goto out;
+ }
+ sctx->cur_inode_filp = filp;
+
+out:
+ /*
+ * no xxxput required here as every vfs op
+ * does it by itself on failure
+ */
+ return ret;
+}
+
+/*
+ * Closes the struct file that was created in open_cur_inode_file
+ */
+static int close_cur_inode_file(struct send_ctx *sctx)
+{
+ int ret = 0;
+
+ if (!sctx->cur_inode_filp)
+ goto out;
+
+ ret = filp_close(sctx->cur_inode_filp, NULL);
+ sctx->cur_inode_filp = NULL;
+
+out:
+ return ret;
+}
+
+/*
+ * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
+ */
+static int send_subvol_begin(struct send_ctx *sctx)
+{
+ int ret;
+ struct btrfs_root *send_root = sctx->send_root;
+ struct btrfs_root *parent_root = sctx->parent_root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_root_ref *ref;
+ struct extent_buffer *leaf;
+ char *name = NULL;
+ int namelen;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
+ if (!name) {
+ btrfs_free_path(path);
+ return -ENOMEM;
+ }
+
+ key.objectid = send_root->objectid;
+ key.type = BTRFS_ROOT_BACKREF_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
+ &key, path, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+ if (key.type != BTRFS_ROOT_BACKREF_KEY ||
+ key.objectid != send_root->objectid) {
+ ret = -ENOENT;
+ goto out;
+ }
+ ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
+ namelen = btrfs_root_ref_name_len(leaf, ref);
+ read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
+ btrfs_release_path(path);
+
+ if (ret < 0)
+ goto out;
+
+ if (parent_root) {
+ ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
+ if (ret < 0)
+ goto out;
+ }
+
+ TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
+ TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
+ sctx->send_root->root_item.uuid);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
+ sctx->send_root->root_item.ctransid);
+ if (parent_root) {
+ TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
+ sctx->parent_root->root_item.uuid);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
+ sctx->parent_root->root_item.ctransid);
+ }
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ btrfs_free_path(path);
+ kfree(name);
+ return ret;
+}
+
+static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
+{
+ int ret = 0;
+ struct fs_path *p;
+
+verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, ino, gen, p);
+ if (ret < 0)
+ goto out;
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
+{
+ int ret = 0;
+ struct fs_path *p;
+
+verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, ino, gen, p);
+ if (ret < 0)
+ goto out;
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
+{
+ int ret = 0;
+ struct fs_path *p;
+
+verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, ino, gen, p);
+ if (ret < 0)
+ goto out;
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
+{
+ int ret = 0;
+ struct fs_path *p = NULL;
+ struct btrfs_inode_item *ii;
+ struct btrfs_path *path = NULL;
+ struct extent_buffer *eb;
+ struct btrfs_key key;
+ int slot;
+
+verbose_printk("btrfs: send_utimes %llu\n", ino);
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ path = alloc_path_for_send();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ key.objectid = ino;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+ ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, ino, gen, p);
+ if (ret < 0)
+ goto out;
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb,
+ btrfs_inode_atime(ii));
+ TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb,
+ btrfs_inode_mtime(ii));
+ TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb,
+ btrfs_inode_ctime(ii));
+ /* TODO otime? */
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ btrfs_free_path(path);
+ return ret;
+}
+
+/*
+ * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
+ * a valid path yet because we did not process the refs yet. So, the inode
+ * is created as orphan.
+ */
+static int send_create_inode(struct send_ctx *sctx, struct btrfs_path *path,
+ struct btrfs_key *key)
+{
+ int ret = 0;
+ struct extent_buffer *eb = path->nodes[0];
+ struct btrfs_inode_item *ii;
+ struct fs_path *p;
+ int slot = path->slots[0];
+ int cmd;
+ u64 mode;
+
+verbose_printk("btrfs: send_create_inode %llu\n", sctx->cur_ino);
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
+ mode = btrfs_inode_mode(eb, ii);
+
+ if (S_ISREG(mode))
+ cmd = BTRFS_SEND_C_MKFILE;
+ else if (S_ISDIR(mode))
+ cmd = BTRFS_SEND_C_MKDIR;
+ else if (S_ISLNK(mode))
+ cmd = BTRFS_SEND_C_SYMLINK;
+ else if (S_ISCHR(mode) || S_ISBLK(mode))
+ cmd = BTRFS_SEND_C_MKNOD;
+ else if (S_ISFIFO(mode))
+ cmd = BTRFS_SEND_C_MKFIFO;
+ else if (S_ISSOCK(mode))
+ cmd = BTRFS_SEND_C_MKSOCK;
+ else {
+ printk(KERN_WARNING "btrfs: unexpected inode type %o",
+ (int)(mode & S_IFMT));
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ ret = begin_cmd(sctx, cmd);
+ if (ret < 0)
+ goto out;
+
+ ret = gen_unique_name(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, sctx->cur_ino);
+
+ if (S_ISLNK(mode)) {
+ fs_path_reset(p);
+ ret = read_symlink(sctx, sctx->send_root, sctx->cur_ino, p);
+ if (ret < 0)
+ goto out;
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
+ } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
+ S_ISFIFO(mode) || S_ISSOCK(mode)) {
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, btrfs_inode_rdev(eb, ii));
+ }
+
+ ret = send_cmd(sctx);
+ if (ret < 0)
+ goto out;
+
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+struct recorded_ref {
+ struct list_head list;
+ char *dir_path;
+ char *name;
+ struct fs_path *full_path;
+ u64 dir;
+ u64 dir_gen;
+ int dir_path_len;
+ int name_len;
+};
+
+/*
+ * We need to process new refs before deleted refs, but compare_tree gives us
+ * everything mixed. So we first record all refs and later process them.
+ * This function is a helper to record one ref.
+ */
+static int record_ref(struct list_head *head, u64 dir,
+ u64 dir_gen, struct fs_path *path)
+{
+ struct recorded_ref *ref;
+ char *tmp;
+
+ ref = kmalloc(sizeof(*ref), GFP_NOFS);
+ if (!ref)
+ return -ENOMEM;
+
+ ref->dir = dir;
+ ref->dir_gen = dir_gen;
+ ref->full_path = path;
+
+ tmp = strrchr(ref->full_path->start, '/');
+ if (!tmp) {
+ ref->name_len = ref->full_path->end - ref->full_path->start;
+ ref->name = ref->full_path->start;
+ ref->dir_path_len = 0;
+ ref->dir_path = ref->full_path->start;
+ } else {
+ tmp++;
+ ref->name_len = ref->full_path->end - tmp;
+ ref->name = tmp;
+ ref->dir_path = ref->full_path->start;
+ ref->dir_path_len = ref->full_path->end -
+ ref->full_path->start - 1 - ref->name_len;
+ }
+
+ list_add_tail(&ref->list, head);
+ return 0;
+}
+
+static void __free_recorded_refs(struct send_ctx *sctx, struct list_head *head)
+{
+ struct recorded_ref *cur;
+ struct recorded_ref *tmp;
+
+ list_for_each_entry_safe(cur, tmp, head, list) {
+ fs_path_free(sctx, cur->full_path);
+ kfree(cur);
+ }
+ INIT_LIST_HEAD(head);
+}
+
+static void free_recorded_refs(struct send_ctx *sctx)
+{
+ __free_recorded_refs(sctx, &sctx->new_refs);
+ __free_recorded_refs(sctx, &sctx->deleted_refs);
+}
+
+/*
+ * Renames/moves a file/dir to it's orphan name. Used when the first
+ * ref of an unprocessed inode gets overwritten and for all non empty
+ * directories.
+ */
+static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
+ struct fs_path *path)
+{
+ int ret;
+ struct fs_path *orphan;
+
+ orphan = fs_path_alloc(sctx);
+ if (!orphan)
+ return -ENOMEM;
+
+ ret = gen_unique_name(sctx, ino, gen, orphan);
+ if (ret < 0)
+ goto out;
+
+ ret = send_rename(sctx, path, orphan);
+
+out:
+ fs_path_free(sctx, orphan);
+ return ret;
+}
+
+/*
+ * Returns 1 if a directory can be removed at this point in time.
+ * We check this by iterating all dir items and checking if the inode behind
+ * the dir item was already processed.
+ */
+static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress)
+{
+ int ret = 0;
+ struct btrfs_root *root = sctx->parent_root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_key loc;
+ struct btrfs_dir_item *di;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = dir;
+ key.type = BTRFS_DIR_INDEX_KEY;
+ key.offset = 0;
+
+ while (1) {
+ ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ btrfs_item_key_to_cpu(path->nodes[0], &found_key,
+ path->slots[0]);
+ }
+ if (ret || found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ break;
+ }
+
+ di = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_dir_item);
+ btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
+
+ if (loc.objectid > send_progress) {
+ ret = 0;
+ goto out;
+ }
+
+ btrfs_release_path(path);
+ key.offset = found_key.offset + 1;
+ }
+
+ ret = 1;
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+struct finish_unordered_dir_ctx {
+ struct send_ctx *sctx;
+ struct fs_path *cur_path;
+ struct fs_path *dir_path;
+ u64 dir_ino;
+ int need_delete;
+ int delete_pass;
+};
+
+int __finish_unordered_dir(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *ctx)
+{
+ int ret = 0;
+ struct finish_unordered_dir_ctx *fctx = ctx;
+ struct send_ctx *sctx = fctx->sctx;
+ u64 di_gen;
+ u64 di_mode;
+ int is_orphan = 0;
+
+ if (di_key->objectid >= fctx->dir_ino)
+ goto out;
+
+ fs_path_reset(fctx->cur_path);
+
+ ret = get_inode_info(sctx->send_root, di_key->objectid,
+ NULL, &di_gen, &di_mode, NULL, NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = is_first_ref(sctx, sctx->send_root, di_key->objectid,
+ fctx->dir_ino, name, name_len);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ is_orphan = 1;
+ ret = gen_unique_name(sctx, di_key->objectid, di_gen,
+ fctx->cur_path);
+ } else {
+ ret = get_cur_path(sctx, di_key->objectid, di_gen,
+ fctx->cur_path);
+ }
+ if (ret < 0)
+ goto out;
+
+ ret = fs_path_add(fctx->dir_path, name, name_len);
+ if (ret < 0)
+ goto out;
+
+ if (!fctx->delete_pass) {
+ if (S_ISDIR(di_mode)) {
+ ret = send_rename(sctx, fctx->cur_path,
+ fctx->dir_path);
+ } else {
+ ret = send_link(sctx, fctx->dir_path,
+ fctx->cur_path);
+ if (is_orphan)
+ fctx->need_delete = 1;
+ }
+ } else if (!S_ISDIR(di_mode)) {
+ ret = send_unlink(sctx, fctx->cur_path);
+ } else {
+ ret = 0;
+ }
+
+ fs_path_remove(fctx->dir_path);
+
+out:
+ return ret;
+}
+
+/*
+ * Go through all dir items and see if we find refs which could not be created
+ * in the past because the dir did not exist at that time.
+ */
+static int finish_outoforder_dir(struct send_ctx *sctx, u64 dir, u64 dir_gen)
+{
+ int ret = 0;
+ struct btrfs_path *path = NULL;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct extent_buffer *eb;
+ struct finish_unordered_dir_ctx fctx;
+ int slot;
+
+ path = alloc_path_for_send();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(&fctx, 0, sizeof(fctx));
+ fctx.sctx = sctx;
+ fctx.cur_path = fs_path_alloc(sctx);
+ fctx.dir_path = fs_path_alloc(sctx);
+ if (!fctx.cur_path || !fctx.dir_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ fctx.dir_ino = dir;
+
+ ret = get_cur_path(sctx, dir, dir_gen, fctx.dir_path);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * We do two passes. The first links in the new refs and the second
+ * deletes orphans if required. Deletion of orphans is not required for
+ * directory inodes, as we always have only one ref and use rename
+ * instead of link for those.
+ */
+
+again:
+ key.objectid = dir;
+ key.type = BTRFS_DIR_ITEM_KEY;
+ key.offset = 0;
+ while (1) {
+ ret = btrfs_search_slot_for_read(sctx->send_root, &key, path,
+ 1, 0);
+ if (ret < 0)
+ goto out;
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+
+ if (found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ btrfs_release_path(path);
+ break;
+ }
+
+ ret = iterate_dir_item(sctx, sctx->send_root, path,
+ &found_key, __finish_unordered_dir,
+ &fctx);
+ if (ret < 0)
+ goto out;
+
+ key.offset = found_key.offset + 1;
+ btrfs_release_path(path);
+ }
+
+ if (!fctx.delete_pass && fctx.need_delete) {
+ fctx.delete_pass = 1;
+ goto again;
+ }
+
+out:
+ btrfs_free_path(path);
+ fs_path_free(sctx, fctx.cur_path);
+ fs_path_free(sctx, fctx.dir_path);
+ return ret;
+}
+
+/*
+ * This does all the move/link/unlink/rmdir magic.
+ */
+static int process_recorded_refs(struct send_ctx *sctx)
+{
+ int ret = 0;
+ struct recorded_ref *cur;
+ struct ulist *check_dirs = NULL;
+ struct ulist_iterator uit;
+ struct ulist_node *un;
+ struct fs_path *valid_path = NULL;
+ u64 ow_inode = 0;
+ u64 ow_gen;
+ int did_overwrite = 0;
+ int is_orphan = 0;
+
+verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
+
+ valid_path = fs_path_alloc(sctx);
+ if (!valid_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ check_dirs = ulist_alloc(GFP_NOFS);
+ if (!check_dirs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * First, check if the first ref of the current inode was overwritten
+ * before. If yes, we know that the current inode was already orphanized
+ * and thus use the orphan name. If not, we can use get_cur_path to
+ * get the path of the first ref as it would like while receiving at
+ * this point in time.
+ * New inodes are always orphan at the beginning, so force to use the
+ * orphan name in this case.
+ * The first ref is stored in valid_path and will be updated if it
+ * gets moved around.
+ */
+ if (!sctx->cur_inode_new) {
+ ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
+ sctx->cur_inode_gen);
+ if (ret < 0)
+ goto out;
+ if (ret)
+ did_overwrite = 1;
+ }
+ if (sctx->cur_inode_new || did_overwrite) {
+ ret = gen_unique_name(sctx, sctx->cur_ino,
+ sctx->cur_inode_gen, valid_path);
+ if (ret < 0)
+ goto out;
+ is_orphan = 1;
+ } else {
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+ valid_path);
+ if (ret < 0)
+ goto out;
+ }
+
+ list_for_each_entry(cur, &sctx->new_refs, list) {
+ /*
+ * Check if this new ref would overwrite the first ref of
+ * another unprocessed inode. If yes, orphanize the
+ * overwritten inode. If we find an overwritten ref that is
+ * not the first ref, simply unlink it.
+ */
+ ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
+ cur->name, cur->name_len,
+ &ow_inode, &ow_gen);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = is_first_ref(sctx, sctx->parent_root,
+ ow_inode, cur->dir, cur->name,
+ cur->name_len);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = orphanize_inode(sctx, ow_inode, ow_gen,
+ cur->full_path);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = send_unlink(sctx, cur->full_path);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ /*
+ * link/move the ref to the new place. If we have an orphan
+ * inode, move it and update valid_path. If not, link or move
+ * it depending on the inode mode.
+ */
+ if (is_orphan && !sctx->cur_inode_first_ref_orphan) {
+ ret = send_rename(sctx, valid_path, cur->full_path);
+ if (ret < 0)
+ goto out;
+ is_orphan = 0;
+ ret = fs_path_copy(valid_path, cur->full_path);
+ if (ret < 0)
+ goto out;
+ } else {
+ if (S_ISDIR(sctx->cur_inode_mode)) {
+ /*
+ * Dirs can't be linked, so move it. For moved
+ * dirs, we always have one new and one deleted
+ * ref. The deleted ref is ignored later.
+ */
+ ret = send_rename(sctx, valid_path,
+ cur->full_path);
+ if (ret < 0)
+ goto out;
+ ret = fs_path_copy(valid_path, cur->full_path);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = send_link(sctx, cur->full_path,
+ valid_path);
+ if (ret < 0)
+ goto out;
+ }
+ }
+ ret = ulist_add(check_dirs, cur->dir, cur->dir_gen,
+ GFP_NOFS);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
+ /*
+ * Check if we can already rmdir the directory. If not,
+ * orphanize it. For every dir item inside that gets deleted
+ * later, we do this check again and rmdir it then if possible.
+ * See the use of check_dirs for more details.
+ */
+ ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_ino);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = send_rmdir(sctx, valid_path);
+ if (ret < 0)
+ goto out;
+ } else if (!is_orphan) {
+ ret = orphanize_inode(sctx, sctx->cur_ino,
+ sctx->cur_inode_gen, valid_path);
+ if (ret < 0)
+ goto out;
+ is_orphan = 1;
+ }
+
+ list_for_each_entry(cur, &sctx->deleted_refs, list) {
+ ret = ulist_add(check_dirs, cur->dir, cur->dir_gen,
+ GFP_NOFS);
+ if (ret < 0)
+ goto out;
+ }
+ } else if (!S_ISDIR(sctx->cur_inode_mode)) {
+ /*
+ * We have a non dir inode. Go through all deleted refs and
+ * unlink them if they were not already overwritten by other
+ * inodes.
+ */
+ list_for_each_entry(cur, &sctx->deleted_refs, list) {
+ ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
+ sctx->cur_ino, sctx->cur_inode_gen,
+ cur->name, cur->name_len);
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ /*
+ * In case the inode was moved to a directory
+ * that was not created yet (see
+ * __record_new_ref), we can not unlink the ref
+ * as it will be needed later when the parent
+ * directory is created, so that we can move in
+ * the inode to the new dir.
+ */
+ if (!is_orphan &&
+ sctx->cur_inode_first_ref_orphan) {
+ ret = orphanize_inode(sctx,
+ sctx->cur_ino,
+ sctx->cur_inode_gen,
+ cur->full_path);
+ if (ret < 0)
+ goto out;
+ ret = gen_unique_name(sctx,
+ sctx->cur_ino,
+ sctx->cur_inode_gen,
+ valid_path);
+ if (ret < 0)
+ goto out;
+ is_orphan = 1;
+
+ } else {
+ ret = send_unlink(sctx, cur->full_path);
+ if (ret < 0)
+ goto out;
+ }
+ }
+ ret = ulist_add(check_dirs, cur->dir, cur->dir_gen,
+ GFP_NOFS);
+ if (ret < 0)
+ goto out;
+ }
+
+ /*
+ * If the inode is still orphan, unlink the orphan. This may
+ * happen when a previous inode did overwrite the first ref
+ * of this inode and no new refs were added for the current
+ * inode.
+ * We can however not delete the orphan in case the inode relies
+ * in a directory that was not created yet (see
+ * __record_new_ref)
+ */
+ if (is_orphan && !sctx->cur_inode_first_ref_orphan) {
+ ret = send_unlink(sctx, valid_path);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ /*
+ * We did collect all parent dirs where cur_inode was once located. We
+ * now go through all these dirs and check if they are pending for
+ * deletion and if it's finally possible to perform the rmdir now.
+ * We also update the inode stats of the parent dirs here.
+ */
+ ULIST_ITER_INIT(&uit);
+ while ((un = ulist_next(check_dirs, &uit))) {
+ if (un->val > sctx->cur_ino)
+ continue;
+
+ ret = get_cur_inode_state(sctx, un->val, un->aux);
+ if (ret < 0)
+ goto out;
+
+ if (ret == inode_state_did_create ||
+ ret == inode_state_no_change) {
+ /* TODO delayed utimes */
+ ret = send_utimes(sctx, un->val, un->aux);
+ if (ret < 0)
+ goto out;
+ } else if (ret == inode_state_did_delete) {
+ ret = can_rmdir(sctx, un->val, sctx->cur_ino);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = get_cur_path(sctx, un->val, un->aux,
+ valid_path);
+ if (ret < 0)
+ goto out;
+ ret = send_rmdir(sctx, valid_path);
+ if (ret < 0)
+ goto out;
+ }
+ }
+ }
+
+ /*
+ * Current inode is now at it's new position, so we must increase
+ * send_progress
+ */
+ sctx->send_progress = sctx->cur_ino + 1;
+
+ /*
+ * We may have a directory here that has pending refs which could not
+ * be created before (because the dir did not exist before, see
+ * __record_new_ref). finish_outoforder_dir will link/move the pending
+ * refs.
+ */
+ if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_new) {
+ ret = finish_outoforder_dir(sctx, sctx->cur_ino,
+ sctx->cur_inode_gen);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ free_recorded_refs(sctx);
+ ulist_free(check_dirs);
+ fs_path_free(sctx, valid_path);
+ return ret;
+}
+
+static int __record_new_ref(int num, u64 dir, int index,
+ struct fs_path *name,
+ void *ctx)
+{
+ int ret = 0;
+ struct send_ctx *sctx = ctx;
+ struct fs_path *p;
+ u64 gen;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL,
+ NULL);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * The parent may be non-existent at this point in time. This happens
+ * if the ino of the parent dir is higher then the current ino. In this
+ * case, we can not process this ref until the parent dir is finally
+ * created. If we reach the parent dir later, process_recorded_refs
+ * will go through all dir items and process the refs that could not be
+ * processed before. In case this is the first ref, we set
+ * cur_inode_first_ref_orphan to 1 to inform process_recorded_refs to
+ * keep an orphan of the inode so that it later can be used for
+ * link/move
+ */
+ ret = is_inode_existent(sctx, dir, gen);
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ ret = is_first_ref(sctx, sctx->send_root, sctx->cur_ino, dir,
+ name->start, fs_path_len(name));
+ if (ret < 0)
+ goto out;
+ if (ret)
+ sctx->cur_inode_first_ref_orphan = 1;
+ ret = 0;
+ goto out;
+ }
+
+ ret = get_cur_path(sctx, dir, gen, p);
+ if (ret < 0)
+ goto out;
+ ret = fs_path_add_path(p, name);
+ if (ret < 0)
+ goto out;
+
+ ret = record_ref(&sctx->new_refs, dir, gen, p);
+
+out:
+ if (ret)
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int __record_deleted_ref(int num, u64 dir, int index,
+ struct fs_path *name,
+ void *ctx)
+{
+ int ret = 0;
+ struct send_ctx *sctx = ctx;
+ struct fs_path *p;
+ u64 gen;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL,
+ NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, dir, gen, p);
+ if (ret < 0)
+ goto out;
+ ret = fs_path_add_path(p, name);
+ if (ret < 0)
+ goto out;
+
+ ret = record_ref(&sctx->deleted_refs, dir, gen, p);
+
+out:
+ if (ret)
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int record_new_ref(struct send_ctx *sctx)
+{
+ int ret;
+
+ ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path,
+ sctx->cmp_key, 0, __record_new_ref, sctx);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int record_deleted_ref(struct send_ctx *sctx)
+{
+ int ret;
+
+ ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, 0, __record_deleted_ref, sctx);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+struct find_ref_ctx {
+ u64 dir;
+ struct fs_path *name;
+ int found_idx;
+};
+
+static int __find_iref(int num, u64 dir, int index,
+ struct fs_path *name,
+ void *ctx_)
+{
+ struct find_ref_ctx *ctx = ctx_;
+
+ if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
+ strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
+ ctx->found_idx = num;
+ return 1;
+ }
+ return 0;
+}
+
+static int find_iref(struct send_ctx *sctx,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *key,
+ u64 dir, struct fs_path *name)
+{
+ int ret;
+ struct find_ref_ctx ctx;
+
+ ctx.dir = dir;
+ ctx.name = name;
+ ctx.found_idx = -1;
+
+ ret = iterate_inode_ref(sctx, root, path, key, 0, __find_iref, &ctx);
+ if (ret < 0)
+ return ret;
+
+ if (ctx.found_idx == -1)
+ return -ENOENT;
+
+ return ctx.found_idx;
+}
+
+static int __record_changed_new_ref(int num, u64 dir, int index,
+ struct fs_path *name,
+ void *ctx)
+{
+ int ret;
+ struct send_ctx *sctx = ctx;
+
+ ret = find_iref(sctx, sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, dir, name);
+ if (ret == -ENOENT)
+ ret = __record_new_ref(num, dir, index, name, sctx);
+ else if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int __record_changed_deleted_ref(int num, u64 dir, int index,
+ struct fs_path *name,
+ void *ctx)
+{
+ int ret;
+ struct send_ctx *sctx = ctx;
+
+ ret = find_iref(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key,
+ dir, name);
+ if (ret == -ENOENT)
+ ret = __record_deleted_ref(num, dir, index, name, sctx);
+ else if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int record_changed_ref(struct send_ctx *sctx)
+{
+ int ret = 0;
+
+ ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path,
+ sctx->cmp_key, 0, __record_changed_new_ref, sctx);
+ if (ret < 0)
+ goto out;
+ ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
+ if (ret < 0)
+ goto out;
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/*
+ * Record and process all refs at once. Needed when an inode changes the
+ * generation number, which means that it was deleted and recreated.
+ */
+static int process_all_refs(struct send_ctx *sctx,
+ enum btrfs_compare_tree_result cmd)
+{
+ int ret;
+ struct btrfs_root *root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct extent_buffer *eb;
+ int slot;
+ iterate_inode_ref_t cb;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ if (cmd == BTRFS_COMPARE_TREE_NEW) {
+ root = sctx->send_root;
+ cb = __record_new_ref;
+ } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
+ root = sctx->parent_root;
+ cb = __record_deleted_ref;
+ } else {
+ BUG();
+ }
+
+ key.objectid = sctx->cmp_key->objectid;
+ key.type = BTRFS_INODE_REF_KEY;
+ key.offset = 0;
+ while (1) {
+ ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
+ if (ret < 0) {
+ btrfs_release_path(path);
+ goto out;
+ }
+ if (ret) {
+ btrfs_release_path(path);
+ break;
+ }
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+
+ if (found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ btrfs_release_path(path);
+ break;
+ }
+
+ ret = iterate_inode_ref(sctx, sctx->parent_root, path,
+ &found_key, 0, cb, sctx);
+ btrfs_release_path(path);
+ if (ret < 0)
+ goto out;
+
+ key.offset = found_key.offset + 1;
+ }
+
+ ret = process_recorded_refs(sctx);
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int send_set_xattr(struct send_ctx *sctx,
+ struct fs_path *path,
+ const char *name, int name_len,
+ const char *data, int data_len)
+{
+ int ret = 0;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+ TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
+ TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ return ret;
+}
+
+static int send_remove_xattr(struct send_ctx *sctx,
+ struct fs_path *path,
+ const char *name, int name_len)
+{
+ int ret = 0;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
+ TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ return ret;
+}
+
+static int __process_new_xattr(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *ctx)
+{
+ int ret;
+ struct send_ctx *sctx = ctx;
+ struct fs_path *p;
+ posix_acl_xattr_header dummy_acl;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ /*
+ * This hack is needed because empty acl's are stored as zero byte
+ * data in xattrs. Problem with that is, that receiving these zero byte
+ * acl's will fail later. To fix this, we send a dummy acl list that
+ * only contains the version number and no entries.
+ */
+ if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
+ !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
+ if (data_len == 0) {
+ dummy_acl.a_version =
+ cpu_to_le32(POSIX_ACL_XATTR_VERSION);
+ data = (char *)&dummy_acl;
+ data_len = sizeof(dummy_acl);
+ }
+ }
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ if (ret < 0)
+ goto out;
+
+ ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
+
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *ctx)
+{
+ int ret;
+ struct send_ctx *sctx = ctx;
+ struct fs_path *p;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ if (ret < 0)
+ goto out;
+
+ ret = send_remove_xattr(sctx, p, name, name_len);
+
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int process_new_xattr(struct send_ctx *sctx)
+{
+ int ret = 0;
+
+ ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path,
+ sctx->cmp_key, __process_new_xattr, sctx);
+
+ return ret;
+}
+
+static int process_deleted_xattr(struct send_ctx *sctx)
+{
+ int ret;
+
+ ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, __process_deleted_xattr, sctx);
+
+ return ret;
+}
+
+struct find_xattr_ctx {
+ const char *name;
+ int name_len;
+ int found_idx;
+ char *found_data;
+ int found_data_len;
+};
+
+static int __find_xattr(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *vctx)
+{
+ struct find_xattr_ctx *ctx = vctx;
+
+ if (name_len == ctx->name_len &&
+ strncmp(name, ctx->name, name_len) == 0) {
+ ctx->found_idx = num;
+ ctx->found_data_len = data_len;
+ ctx->found_data = kmalloc(data_len, GFP_NOFS);
+ if (!ctx->found_data)
+ return -ENOMEM;
+ memcpy(ctx->found_data, data, data_len);
+ return 1;
+ }
+ return 0;
+}
+
+static int find_xattr(struct send_ctx *sctx,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_key *key,
+ const char *name, int name_len,
+ char **data, int *data_len)
+{
+ int ret;
+ struct find_xattr_ctx ctx;
+
+ ctx.name = name;
+ ctx.name_len = name_len;
+ ctx.found_idx = -1;
+ ctx.found_data = NULL;
+ ctx.found_data_len = 0;
+
+ ret = iterate_dir_item(sctx, root, path, key, __find_xattr, &ctx);
+ if (ret < 0)
+ return ret;
+
+ if (ctx.found_idx == -1)
+ return -ENOENT;
+ if (data) {
+ *data = ctx.found_data;
+ *data_len = ctx.found_data_len;
+ } else {
+ kfree(ctx.found_data);
+ }
+ return ctx.found_idx;
+}
+
+
+static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *ctx)
+{
+ int ret;
+ struct send_ctx *sctx = ctx;
+ char *found_data = NULL;
+ int found_data_len = 0;
+ struct fs_path *p = NULL;
+
+ ret = find_xattr(sctx, sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, name, name_len, &found_data,
+ &found_data_len);
+ if (ret == -ENOENT) {
+ ret = __process_new_xattr(num, di_key, name, name_len, data,
+ data_len, type, ctx);
+ } else if (ret >= 0) {
+ if (data_len != found_data_len ||
+ memcmp(data, found_data, data_len)) {
+ ret = __process_new_xattr(num, di_key, name, name_len,
+ data, data_len, type, ctx);
+ } else {
+ ret = 0;
+ }
+ }
+
+ kfree(found_data);
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
+ const char *name, int name_len,
+ const char *data, int data_len,
+ u8 type, void *ctx)
+{
+ int ret;
+ struct send_ctx *sctx = ctx;
+
+ ret = find_xattr(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key,
+ name, name_len, NULL, NULL);
+ if (ret == -ENOENT)
+ ret = __process_deleted_xattr(num, di_key, name, name_len, data,
+ data_len, type, ctx);
+ else if (ret >= 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int process_changed_xattr(struct send_ctx *sctx)
+{
+ int ret = 0;
+
+ ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path,
+ sctx->cmp_key, __process_changed_new_xattr, sctx);
+ if (ret < 0)
+ goto out;
+ ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path,
+ sctx->cmp_key, __process_changed_deleted_xattr, sctx);
+
+out:
+ return ret;
+}
+
+static int process_all_new_xattrs(struct send_ctx *sctx)
+{
+ int ret;
+ struct btrfs_root *root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct extent_buffer *eb;
+ int slot;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ root = sctx->send_root;
+
+ key.objectid = sctx->cmp_key->objectid;
+ key.type = BTRFS_XATTR_ITEM_KEY;
+ key.offset = 0;
+ while (1) {
+ ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+
+ if (found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = iterate_dir_item(sctx, root, path, &found_key,
+ __process_new_xattr, sctx);
+ if (ret < 0)
+ goto out;
+
+ btrfs_release_path(path);
+ key.offset = found_key.offset + 1;
+ }
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+/*
+ * Read some bytes from the current inode/file and send a write command to
+ * user space.
+ */
+static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
+{
+ int ret = 0;
+ struct fs_path *p;
+ loff_t pos = offset;
+ int readed = 0;
+ mm_segment_t old_fs;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ /*
+ * vfs normally only accepts user space buffers for security reasons.
+ * we only read from the file and also only provide the read_buf buffer
+ * to vfs. As this buffer does not come from a user space call, it's
+ * ok to temporary allow kernel space buffers.
+ */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
+
+ ret = open_cur_inode_file(sctx);
+ if (ret < 0)
+ goto out;
+
+ ret = vfs_read(sctx->cur_inode_filp, sctx->read_buf, len, &pos);
+ if (ret < 0)
+ goto out;
+ readed = ret;
+ if (!readed)
+ goto out;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, readed);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ set_fs(old_fs);
+ if (ret < 0)
+ return ret;
+ return readed;
+}
+
+/*
+ * Send a clone command to user space.
+ */
+static int send_clone(struct send_ctx *sctx,
+ u64 offset, u32 len,
+ struct clone_root *clone_root)
+{
+ int ret = 0;
+ struct btrfs_root *clone_root2 = clone_root->root;
+ struct fs_path *p;
+ u64 gen;
+
+verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
+ "clone_inode=%llu, clone_offset=%llu\n", offset, len,
+ clone_root->root->objectid, clone_root->ino,
+ clone_root->offset);
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+
+ if (clone_root2 == sctx->send_root) {
+ ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
+ &gen, NULL, NULL, NULL);
+ if (ret < 0)
+ goto out;
+ ret = get_cur_path(sctx, clone_root->ino, gen, p);
+ } else {
+ ret = get_inode_path(sctx, clone_root2, clone_root->ino, p);
+ }
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
+ clone_root2->root_item.uuid);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
+ clone_root2->root_item.ctransid);
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
+ clone_root->offset);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
+static int send_write_or_clone(struct send_ctx *sctx,
+ struct btrfs_path *path,
+ struct btrfs_key *key,
+ struct clone_root *clone_root)
+{
+ int ret = 0;
+ struct btrfs_file_extent_item *ei;
+ u64 offset = key->offset;
+ u64 pos = 0;
+ u64 len;
+ u32 l;
+ u8 type;
+
+ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+ type = btrfs_file_extent_type(path->nodes[0], ei);
+ if (type == BTRFS_FILE_EXTENT_INLINE)
+ len = btrfs_file_extent_inline_len(path->nodes[0], ei);
+ else
+ len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
+
+ if (offset + len > sctx->cur_inode_size)
+ len = sctx->cur_inode_size - offset;
+ if (len == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ if (!clone_root) {
+ while (pos < len) {
+ l = len - pos;
+ if (l > BTRFS_SEND_READ_SIZE)
+ l = BTRFS_SEND_READ_SIZE;
+ ret = send_write(sctx, pos + offset, l);
+ if (ret < 0)
+ goto out;
+ if (!ret)
+ break;
+ pos += ret;
+ }
+ ret = 0;
+ } else {
+ ret = send_clone(sctx, offset, len, clone_root);
+ }
+
+out:
+ return ret;
+}
+
+static int is_extent_unchanged(struct send_ctx *sctx,
+ struct btrfs_path *left_path,
+ struct btrfs_key *ekey)
+{
+ int ret = 0;
+ struct btrfs_key key;
+ struct btrfs_path *path = NULL;
+ struct extent_buffer *eb;
+ int slot;
+ struct btrfs_key found_key;
+ struct btrfs_file_extent_item *ei;
+ u64 left_disknr;
+ u64 right_disknr;
+ u64 left_offset;
+ u64 right_offset;
+ u64 left_offset_fixed;
+ u64 left_len;
+ u64 right_len;
+ u8 left_type;
+ u8 right_type;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ eb = left_path->nodes[0];
+ slot = left_path->slots[0];
+
+ ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+ left_type = btrfs_file_extent_type(eb, ei);
+ left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
+ left_len = btrfs_file_extent_num_bytes(eb, ei);
+ left_offset = btrfs_file_extent_offset(eb, ei);
+
+ if (left_type != BTRFS_FILE_EXTENT_REG) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * Following comments will refer to these graphics. L is the left
+ * extents which we are checking at the moment. 1-8 are the right
+ * extents that we iterate.
+ *
+ * |-----L-----|
+ * |-1-|-2a-|-3-|-4-|-5-|-6-|
+ *
+ * |-----L-----|
+ * |--1--|-2b-|...(same as above)
+ *
+ * Alternative situation. Happens on files where extents got split.
+ * |-----L-----|
+ * |-----------7-----------|-6-|
+ *
+ * Alternative situation. Happens on files which got larger.
+ * |-----L-----|
+ * |-8-|
+ * Nothing follows after 8.
+ */
+
+ key.objectid = ekey->objectid;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = ekey->offset;
+ ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * Handle special case where the right side has no extents at all.
+ */
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+ if (found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * We're now on 2a, 2b or 7.
+ */
+ key = found_key;
+ while (key.offset < ekey->offset + left_len) {
+ ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+ right_type = btrfs_file_extent_type(eb, ei);
+ right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
+ right_len = btrfs_file_extent_num_bytes(eb, ei);
+ right_offset = btrfs_file_extent_offset(eb, ei);
+
+ if (right_type != BTRFS_FILE_EXTENT_REG) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * Are we at extent 8? If yes, we know the extent is changed.
+ * This may only happen on the first iteration.
+ */
+ if (found_key.offset + right_len < ekey->offset) {
+ ret = 0;
+ goto out;
+ }
+
+ left_offset_fixed = left_offset;
+ if (key.offset < ekey->offset) {
+ /* Fix the right offset for 2a and 7. */
+ right_offset += ekey->offset - key.offset;
+ } else {
+ /* Fix the left offset for all behind 2a and 2b */
+ left_offset_fixed += key.offset - ekey->offset;
+ }
+
+ /*
+ * Check if we have the same extent.
+ */
+ if (left_disknr + left_offset_fixed !=
+ right_disknr + right_offset) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * Go to the next extent.
+ */
+ ret = btrfs_next_item(sctx->parent_root, path);
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+ }
+ if (ret || found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ key.offset += right_len;
+ break;
+ } else {
+ if (found_key.offset != key.offset + right_len) {
+ /* Should really not happen */
+ ret = -EIO;
+ goto out;
+ }
+ }
+ key = found_key;
+ }
+
+ /*
+ * We're now behind the left extent (treat as unchanged) or at the end
+ * of the right side (treat as changed).
+ */
+ if (key.offset >= ekey->offset + left_len)
+ ret = 1;
+ else
+ ret = 0;
+
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int process_extent(struct send_ctx *sctx,
+ struct btrfs_path *path,
+ struct btrfs_key *key)
+{
+ int ret = 0;
+ struct clone_root *found_clone = NULL;
+
+ if (S_ISLNK(sctx->cur_inode_mode))
+ return 0;
+
+ if (sctx->parent_root && !sctx->cur_inode_new) {
+ ret = is_extent_unchanged(sctx, path, key);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ret = find_extent_clone(sctx, path, key->objectid, key->offset,
+ sctx->cur_inode_size, &found_clone);
+ if (ret != -ENOENT && ret < 0)
+ goto out;
+
+ ret = send_write_or_clone(sctx, path, key, found_clone);
+
+out:
+ return ret;
+}
+
+static int process_all_extents(struct send_ctx *sctx)
+{
+ int ret;
+ struct btrfs_root *root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct extent_buffer *eb;
+ int slot;
+
+ root = sctx->send_root;
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ key.objectid = sctx->cmp_key->objectid;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = 0;
+ while (1) {
+ ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+
+ if (found_key.objectid != key.objectid ||
+ found_key.type != key.type) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = process_extent(sctx, path, &found_key);
+ if (ret < 0)
+ goto out;
+
+ btrfs_release_path(path);
+ key.offset = found_key.offset + 1;
+ }
+
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
+static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end)
+{
+ int ret = 0;
+
+ if (sctx->cur_ino == 0)
+ goto out;
+ if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
+ sctx->cmp_key->type <= BTRFS_INODE_REF_KEY)
+ goto out;
+ if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
+ goto out;
+
+ ret = process_recorded_refs(sctx);
+
+out:
+ return ret;
+}
+
+static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
+{
+ int ret = 0;
+ u64 left_mode;
+ u64 left_uid;
+ u64 left_gid;
+ u64 right_mode;
+ u64 right_uid;
+ u64 right_gid;
+ int need_chmod = 0;
+ int need_chown = 0;
+
+ ret = process_recorded_refs_if_needed(sctx, at_end);
+ if (ret < 0)
+ goto out;
+
+ if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
+ goto out;
+ if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
+ goto out;
+
+ ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
+ &left_mode, &left_uid, &left_gid);
+ if (ret < 0)
+ goto out;
+
+ if (!S_ISLNK(sctx->cur_inode_mode)) {
+ if (!sctx->parent_root || sctx->cur_inode_new) {
+ need_chmod = 1;
+ need_chown = 1;
+ } else {
+ ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
+ NULL, NULL, &right_mode, &right_uid,
+ &right_gid);
+ if (ret < 0)
+ goto out;
+
+ if (left_uid != right_uid || left_gid != right_gid)
+ need_chown = 1;
+ if (left_mode != right_mode)
+ need_chmod = 1;
+ }
+ }
+
+ if (S_ISREG(sctx->cur_inode_mode)) {
+ ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+ sctx->cur_inode_size);
+ if (ret < 0)
+ goto out;
+ }
+
+ if (need_chown) {
+ ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+ left_uid, left_gid);
+ if (ret < 0)
+ goto out;
+ }
+ if (need_chmod) {
+ ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
+ left_mode);
+ if (ret < 0)
+ goto out;
+ }
+
+ /*
+ * Need to send that every time, no matter if it actually changed
+ * between the two trees as we have done changes to the inode before.
+ */
+ ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
+ if (ret < 0)
+ goto out;
+
+out:
+ return ret;
+}
+
+static int changed_inode(struct send_ctx *sctx,
+ enum btrfs_compare_tree_result result)
+{
+ int ret = 0;
+ struct btrfs_key *key = sctx->cmp_key;
+ struct btrfs_inode_item *left_ii = NULL;
+ struct btrfs_inode_item *right_ii = NULL;
+ u64 left_gen = 0;
+ u64 right_gen = 0;
+
+ ret = close_cur_inode_file(sctx);
+ if (ret < 0)
+ goto out;
+
+ sctx->cur_ino = key->objectid;
+ sctx->cur_inode_new_gen = 0;
+ sctx->cur_inode_first_ref_orphan = 0;
+ sctx->send_progress = sctx->cur_ino;
+
+ if (result == BTRFS_COMPARE_TREE_NEW ||
+ result == BTRFS_COMPARE_TREE_CHANGED) {
+ left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
+ sctx->left_path->slots[0],
+ struct btrfs_inode_item);
+ left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
+ left_ii);
+ } else {
+ right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
+ sctx->right_path->slots[0],
+ struct btrfs_inode_item);
+ right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
+ right_ii);
+ }
+ if (result == BTRFS_COMPARE_TREE_CHANGED) {
+ right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
+ sctx->right_path->slots[0],
+ struct btrfs_inode_item);
+
+ right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
+ right_ii);
+ if (left_gen != right_gen)
+ sctx->cur_inode_new_gen = 1;
+ }
+
+ if (result == BTRFS_COMPARE_TREE_NEW) {
+ sctx->cur_inode_gen = left_gen;
+ sctx->cur_inode_new = 1;
+ sctx->cur_inode_deleted = 0;
+ sctx->cur_inode_size = btrfs_inode_size(
+ sctx->left_path->nodes[0], left_ii);
+ sctx->cur_inode_mode = btrfs_inode_mode(
+ sctx->left_path->nodes[0], left_ii);
+ if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
+ ret = send_create_inode(sctx, sctx->left_path,
+ sctx->cmp_key);
+ } else if (result == BTRFS_COMPARE_TREE_DELETED) {
+ sctx->cur_inode_gen = right_gen;
+ sctx->cur_inode_new = 0;
+ sctx->cur_inode_deleted = 1;
+ sctx->cur_inode_size = btrfs_inode_size(
+ sctx->right_path->nodes[0], right_ii);
+ sctx->cur_inode_mode = btrfs_inode_mode(
+ sctx->right_path->nodes[0], right_ii);
+ } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
+ if (sctx->cur_inode_new_gen) {
+ sctx->cur_inode_gen = right_gen;
+ sctx->cur_inode_new = 0;
+ sctx->cur_inode_deleted = 1;
+ sctx->cur_inode_size = btrfs_inode_size(
+ sctx->right_path->nodes[0], right_ii);
+ sctx->cur_inode_mode = btrfs_inode_mode(
+ sctx->right_path->nodes[0], right_ii);
+ ret = process_all_refs(sctx,
+ BTRFS_COMPARE_TREE_DELETED);
+ if (ret < 0)
+ goto out;
+
+ sctx->cur_inode_gen = left_gen;
+ sctx->cur_inode_new = 1;
+ sctx->cur_inode_deleted = 0;
+ sctx->cur_inode_size = btrfs_inode_size(
+ sctx->left_path->nodes[0], left_ii);
+ sctx->cur_inode_mode = btrfs_inode_mode(
+ sctx->left_path->nodes[0], left_ii);
+ ret = send_create_inode(sctx, sctx->left_path,
+ sctx->cmp_key);
+ if (ret < 0)
+ goto out;
+
+ ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
+ if (ret < 0)
+ goto out;
+ ret = process_all_extents(sctx);
+ if (ret < 0)
+ goto out;
+ ret = process_all_new_xattrs(sctx);
+ if (ret < 0)
+ goto out;
+ } else {
+ sctx->cur_inode_gen = left_gen;
+ sctx->cur_inode_new = 0;
+ sctx->cur_inode_new_gen = 0;
+ sctx->cur_inode_deleted = 0;
+ sctx->cur_inode_size = btrfs_inode_size(
+ sctx->left_path->nodes[0], left_ii);
+ sctx->cur_inode_mode = btrfs_inode_mode(
+ sctx->left_path->nodes[0], left_ii);
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int changed_ref(struct send_ctx *sctx,
+ enum btrfs_compare_tree_result result)
+{
+ int ret = 0;
+
+ BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+
+ if (!sctx->cur_inode_new_gen &&
+ sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
+ if (result == BTRFS_COMPARE_TREE_NEW)
+ ret = record_new_ref(sctx);
+ else if (result == BTRFS_COMPARE_TREE_DELETED)
+ ret = record_deleted_ref(sctx);
+ else if (result == BTRFS_COMPARE_TREE_CHANGED)
+ ret = record_changed_ref(sctx);
+ }
+
+ return ret;
+}
+
+static int changed_xattr(struct send_ctx *sctx,
+ enum btrfs_compare_tree_result result)
+{
+ int ret = 0;
+
+ BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+
+ if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
+ if (result == BTRFS_COMPARE_TREE_NEW)
+ ret = process_new_xattr(sctx);
+ else if (result == BTRFS_COMPARE_TREE_DELETED)
+ ret = process_deleted_xattr(sctx);
+ else if (result == BTRFS_COMPARE_TREE_CHANGED)
+ ret = process_changed_xattr(sctx);
+ }
+
+ return ret;
+}
+
+static int changed_extent(struct send_ctx *sctx,
+ enum btrfs_compare_tree_result result)
+{
+ int ret = 0;
+
+ BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+
+ if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
+ if (result != BTRFS_COMPARE_TREE_DELETED)
+ ret = process_extent(sctx, sctx->left_path,
+ sctx->cmp_key);
+ }
+
+ return ret;
+}
+
+
+static int changed_cb(struct btrfs_root *left_root,
+ struct btrfs_root *right_root,
+ struct btrfs_path *left_path,
+ struct btrfs_path *right_path,
+ struct btrfs_key *key,
+ enum btrfs_compare_tree_result result,
+ void *ctx)
+{
+ int ret = 0;
+ struct send_ctx *sctx = ctx;
+
+ sctx->left_path = left_path;
+ sctx->right_path = right_path;
+ sctx->cmp_key = key;
+
+ ret = finish_inode_if_needed(sctx, 0);
+ if (ret < 0)
+ goto out;
+
+ if (key->type == BTRFS_INODE_ITEM_KEY)
+ ret = changed_inode(sctx, result);
+ else if (key->type == BTRFS_INODE_REF_KEY)
+ ret = changed_ref(sctx, result);
+ else if (key->type == BTRFS_XATTR_ITEM_KEY)
+ ret = changed_xattr(sctx, result);
+ else if (key->type == BTRFS_EXTENT_DATA_KEY)
+ ret = changed_extent(sctx, result);
+
+out:
+ return ret;
+}
+
+static int full_send_tree(struct send_ctx *sctx)
+{
+ int ret;
+ struct btrfs_trans_handle *trans = NULL;
+ struct btrfs_root *send_root = sctx->send_root;
+ struct btrfs_key key;
+ struct btrfs_key found_key;
+ struct btrfs_path *path;
+ struct extent_buffer *eb;
+ int slot;
+ u64 start_ctransid;
+ u64 ctransid;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ spin_lock(&send_root->root_times_lock);
+ start_ctransid = btrfs_root_ctransid(&send_root->root_item);
+ spin_unlock(&send_root->root_times_lock);
+
+ key.objectid = BTRFS_FIRST_FREE_OBJECTID;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+join_trans:
+ /*
+ * We need to make sure the transaction does not get committed
+ * while we do anything on commit roots. Join a transaction to prevent
+ * this.
+ */
+ trans = btrfs_join_transaction(send_root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ trans = NULL;
+ goto out;
+ }
+
+ /*
+ * Make sure the tree has not changed
+ */
+ spin_lock(&send_root->root_times_lock);
+ ctransid = btrfs_root_ctransid(&send_root->root_item);
+ spin_unlock(&send_root->root_times_lock);
+
+ if (ctransid != start_ctransid) {
+ WARN(1, KERN_WARNING "btrfs: the root that you're trying to "
+ "send was modified in between. This is "
+ "probably a bug.\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
+ if (ret < 0)
+ goto out;
+ if (ret)
+ goto out_finish;
+
+ while (1) {
+ /*
+ * When someone want to commit while we iterate, end the
+ * joined transaction and rejoin.
+ */
+ if (btrfs_should_end_transaction(trans, send_root)) {
+ ret = btrfs_end_transaction(trans, send_root);
+ trans = NULL;
+ if (ret < 0)
+ goto out;
+ btrfs_release_path(path);
+ goto join_trans;
+ }
+
+ eb = path->nodes[0];
+ slot = path->slots[0];
+ btrfs_item_key_to_cpu(eb, &found_key, slot);
+
+ ret = changed_cb(send_root, NULL, path, NULL,
+ &found_key, BTRFS_COMPARE_TREE_NEW, sctx);
+ if (ret < 0)
+ goto out;
+
+ key.objectid = found_key.objectid;
+ key.type = found_key.type;
+ key.offset = found_key.offset + 1;
+
+ ret = btrfs_next_item(send_root, path);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ ret = 0;
+ break;
+ }
+ }
+
+out_finish:
+ ret = finish_inode_if_needed(sctx, 1);
+
+out:
+ btrfs_free_path(path);
+ if (trans) {
+ if (!ret)
+ ret = btrfs_end_transaction(trans, send_root);
+ else
+ btrfs_end_transaction(trans, send_root);
+ }
+ return ret;
+}
+
+static int send_subvol(struct send_ctx *sctx)
+{
+ int ret;
+
+ ret = send_header(sctx);
+ if (ret < 0)
+ goto out;
+
+ ret = send_subvol_begin(sctx);
+ if (ret < 0)
+ goto out;
+
+ if (sctx->parent_root) {
+ ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
+ changed_cb, sctx);
+ if (ret < 0)
+ goto out;
+ ret = finish_inode_if_needed(sctx, 1);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = full_send_tree(sctx);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ if (!ret)
+ ret = close_cur_inode_file(sctx);
+ else
+ close_cur_inode_file(sctx);
+
+ free_recorded_refs(sctx);
+ return ret;
+}
+
+long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
+{
+ int ret = 0;
+ struct btrfs_root *send_root;
+ struct btrfs_root *clone_root;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_ioctl_send_args *arg = NULL;
+ struct btrfs_key key;
+ struct file *filp = NULL;
+ struct send_ctx *sctx = NULL;
+ u32 i;
+ u64 *clone_sources_tmp = NULL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ send_root = BTRFS_I(fdentry(mnt_file)->d_inode)->root;
+ fs_info = send_root->fs_info;
+
+ arg = memdup_user(arg_, sizeof(*arg));
+ if (IS_ERR(arg)) {
+ ret = PTR_ERR(arg);
+ arg = NULL;
+ goto out;
+ }
+
+ if (!access_ok(VERIFY_READ, arg->clone_sources,
+ sizeof(*arg->clone_sources *
+ arg->clone_sources_count))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
+ if (!sctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&sctx->new_refs);
+ INIT_LIST_HEAD(&sctx->deleted_refs);
+ INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
+ INIT_LIST_HEAD(&sctx->name_cache_list);
+
+ sctx->send_filp = fget(arg->send_fd);
+ if (IS_ERR(sctx->send_filp)) {
+ ret = PTR_ERR(sctx->send_filp);
+ goto out;
+ }
+
+ sctx->mnt = mnt_file->f_path.mnt;
+
+ sctx->send_root = send_root;
+ sctx->clone_roots_cnt = arg->clone_sources_count;
+
+ sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
+ sctx->send_buf = vmalloc(sctx->send_max_size);
+ if (!sctx->send_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE);
+ if (!sctx->read_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sctx->clone_roots = vzalloc(sizeof(struct clone_root) *
+ (arg->clone_sources_count + 1));
+ if (!sctx->clone_roots) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (arg->clone_sources_count) {
+ clone_sources_tmp = vmalloc(arg->clone_sources_count *
+ sizeof(*arg->clone_sources));
+ if (!clone_sources_tmp) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
+ arg->clone_sources_count *
+ sizeof(*arg->clone_sources));
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ for (i = 0; i < arg->clone_sources_count; i++) {
+ key.objectid = clone_sources_tmp[i];
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+ clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
+ if (!clone_root) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (IS_ERR(clone_root)) {
+ ret = PTR_ERR(clone_root);
+ goto out;
+ }
+ sctx->clone_roots[i].root = clone_root;
+ }
+ vfree(clone_sources_tmp);
+ clone_sources_tmp = NULL;
+ }
+
+ if (arg->parent_root) {
+ key.objectid = arg->parent_root;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+ sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
+ if (!sctx->parent_root) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ /*
+ * Clones from send_root are allowed, but only if the clone source
+ * is behind the current send position. This is checked while searching
+ * for possible clone sources.
+ */
+ sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
+
+ /* We do a bsearch later */
+ sort(sctx->clone_roots, sctx->clone_roots_cnt,
+ sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
+ NULL);
+
+ ret = send_subvol(sctx);
+ if (ret < 0)
+ goto out;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_END);
+ if (ret < 0)
+ goto out;
+ ret = send_cmd(sctx);
+ if (ret < 0)
+ goto out;
+
+out:
+ if (filp)
+ fput(filp);
+ kfree(arg);
+ vfree(clone_sources_tmp);
+
+ if (sctx) {
+ if (sctx->send_filp)
+ fput(sctx->send_filp);
+
+ vfree(sctx->clone_roots);
+ vfree(sctx->send_buf);
+ vfree(sctx->read_buf);
+
+ name_cache_free(sctx);
+
+ kfree(sctx);
+ }
+
+ return ret;
+}
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h
new file mode 100644
index 000000000000..9934e948e57f
--- /dev/null
+++ b/fs/btrfs/send.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2012 Alexander Block. All rights reserved.
+ * Copyright (C) 2012 STRATO. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include "ctree.h"
+
+#define BTRFS_SEND_STREAM_MAGIC "btrfs-stream"
+#define BTRFS_SEND_STREAM_VERSION 1
+
+#define BTRFS_SEND_BUF_SIZE (1024 * 64)
+#define BTRFS_SEND_READ_SIZE (1024 * 48)
+
+enum btrfs_tlv_type {
+ BTRFS_TLV_U8,
+ BTRFS_TLV_U16,
+ BTRFS_TLV_U32,
+ BTRFS_TLV_U64,
+ BTRFS_TLV_BINARY,
+ BTRFS_TLV_STRING,
+ BTRFS_TLV_UUID,
+ BTRFS_TLV_TIMESPEC,
+};
+
+struct btrfs_stream_header {
+ char magic[sizeof(BTRFS_SEND_STREAM_MAGIC)];
+ __le32 version;
+} __attribute__ ((__packed__));
+
+struct btrfs_cmd_header {
+ /* len excluding the header */
+ __le32 len;
+ __le16 cmd;
+ /* crc including the header with zero crc field */
+ __le32 crc;
+} __attribute__ ((__packed__));
+
+struct btrfs_tlv_header {
+ __le16 tlv_type;
+ /* len excluding the header */
+ __le16 tlv_len;
+} __attribute__ ((__packed__));
+
+/* commands */
+enum btrfs_send_cmd {
+ BTRFS_SEND_C_UNSPEC,
+
+ BTRFS_SEND_C_SUBVOL,
+ BTRFS_SEND_C_SNAPSHOT,
+
+ BTRFS_SEND_C_MKFILE,
+ BTRFS_SEND_C_MKDIR,
+ BTRFS_SEND_C_MKNOD,
+ BTRFS_SEND_C_MKFIFO,
+ BTRFS_SEND_C_MKSOCK,
+ BTRFS_SEND_C_SYMLINK,
+
+ BTRFS_SEND_C_RENAME,
+ BTRFS_SEND_C_LINK,
+ BTRFS_SEND_C_UNLINK,
+ BTRFS_SEND_C_RMDIR,
+
+ BTRFS_SEND_C_SET_XATTR,
+ BTRFS_SEND_C_REMOVE_XATTR,
+
+ BTRFS_SEND_C_WRITE,
+ BTRFS_SEND_C_CLONE,
+
+ BTRFS_SEND_C_TRUNCATE,
+ BTRFS_SEND_C_CHMOD,
+ BTRFS_SEND_C_CHOWN,
+ BTRFS_SEND_C_UTIMES,
+
+ BTRFS_SEND_C_END,
+ __BTRFS_SEND_C_MAX,
+};
+#define BTRFS_SEND_C_MAX (__BTRFS_SEND_C_MAX - 1)
+
+/* attributes in send stream */
+enum {
+ BTRFS_SEND_A_UNSPEC,
+
+ BTRFS_SEND_A_UUID,
+ BTRFS_SEND_A_CTRANSID,
+
+ BTRFS_SEND_A_INO,
+ BTRFS_SEND_A_SIZE,
+ BTRFS_SEND_A_MODE,
+ BTRFS_SEND_A_UID,
+ BTRFS_SEND_A_GID,
+ BTRFS_SEND_A_RDEV,
+ BTRFS_SEND_A_CTIME,
+ BTRFS_SEND_A_MTIME,
+ BTRFS_SEND_A_ATIME,
+ BTRFS_SEND_A_OTIME,
+
+ BTRFS_SEND_A_XATTR_NAME,
+ BTRFS_SEND_A_XATTR_DATA,
+
+ BTRFS_SEND_A_PATH,
+ BTRFS_SEND_A_PATH_TO,
+ BTRFS_SEND_A_PATH_LINK,
+
+ BTRFS_SEND_A_FILE_OFFSET,
+ BTRFS_SEND_A_DATA,
+
+ BTRFS_SEND_A_CLONE_UUID,
+ BTRFS_SEND_A_CLONE_CTRANSID,
+ BTRFS_SEND_A_CLONE_PATH,
+ BTRFS_SEND_A_CLONE_OFFSET,
+ BTRFS_SEND_A_CLONE_LEN,
+
+ __BTRFS_SEND_A_MAX,
+};
+#define BTRFS_SEND_A_MAX (__BTRFS_SEND_A_MAX - 1)
+
+#ifdef __KERNEL__
+long btrfs_ioctl_send(struct file *mnt_file, void __user *arg);
+#endif
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index c6ffa5812419..b976597b0721 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -17,15 +17,27 @@
*/
#include <linux/highmem.h>
+#include <asm/unaligned.h>
-/* this is some deeply nasty code. ctree.h has a different
- * definition for this BTRFS_SETGET_FUNCS macro, behind a #ifndef
+#include "ctree.h"
+
+static inline u8 get_unaligned_le8(const void *p)
+{
+ return *(u8 *)p;
+}
+
+static inline void put_unaligned_le8(u8 val, void *p)
+{
+ *(u8 *)p = val;
+}
+
+/*
+ * this is some deeply nasty code.
*
* The end result is that anyone who #includes ctree.h gets a
- * declaration for the btrfs_set_foo functions and btrfs_foo functions
- *
- * This file declares the macros and then #includes ctree.h, which results
- * in cpp creating the function here based on the template below.
+ * declaration for the btrfs_set_foo functions and btrfs_foo functions,
+ * which are wappers of btrfs_set_token_#bits functions and
+ * btrfs_get_token_#bits functions, which are defined in this file.
*
* These setget functions do all the extent_buffer related mapping
* required to efficiently read and write specific fields in the extent
@@ -33,103 +45,93 @@
* an unsigned long offset into the extent buffer which has been
* cast to a specific type. This gives us all the gcc type checking.
*
- * The extent buffer api is used to do all the kmapping and page
- * spanning work required to get extent buffers in highmem and have
- * a metadata blocksize different from the page size.
- *
- * The macro starts with a simple function prototype declaration so that
- * sparse won't complain about it being static.
+ * The extent buffer api is used to do the page spanning work required to
+ * have a metadata blocksize different from the page size.
*/
-#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
-u##bits btrfs_##name(struct extent_buffer *eb, type *s); \
-void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val); \
-void btrfs_set_token_##name(struct extent_buffer *eb, type *s, u##bits val, struct btrfs_map_token *token); \
-u##bits btrfs_token_##name(struct extent_buffer *eb, \
- type *s, struct btrfs_map_token *token) \
+#define DEFINE_BTRFS_SETGET_BITS(bits) \
+u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
+ unsigned long off, \
+ struct btrfs_map_token *token) \
{ \
- unsigned long part_offset = (unsigned long)s; \
- unsigned long offset = part_offset + offsetof(type, member); \
- type *p; \
- int err; \
- char *kaddr; \
- unsigned long map_start; \
- unsigned long map_len; \
- unsigned long mem_len = sizeof(((type *)0)->member); \
- u##bits res; \
- if (token && token->kaddr && token->offset <= offset && \
- token->eb == eb && \
- (token->offset + PAGE_CACHE_SIZE >= offset + mem_len)) { \
- kaddr = token->kaddr; \
- p = (type *)(kaddr + part_offset - token->offset); \
- res = le##bits##_to_cpu(p->member); \
- return res; \
- } \
- err = map_private_extent_buffer(eb, offset, \
- mem_len, \
- &kaddr, &map_start, &map_len); \
- if (err) { \
- __le##bits leres; \
- read_eb_member(eb, s, type, member, &leres); \
- return le##bits##_to_cpu(leres); \
- } \
- p = (type *)(kaddr + part_offset - map_start); \
- res = le##bits##_to_cpu(p->member); \
- if (token) { \
- token->kaddr = kaddr; \
- token->offset = map_start; \
- token->eb = eb; \
- } \
- return res; \
+ unsigned long part_offset = (unsigned long)ptr; \
+ unsigned long offset = part_offset + off; \
+ void *p; \
+ int err; \
+ char *kaddr; \
+ unsigned long map_start; \
+ unsigned long map_len; \
+ int size = sizeof(u##bits); \
+ u##bits res; \
+ \
+ if (token && token->kaddr && token->offset <= offset && \
+ token->eb == eb && \
+ (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \
+ kaddr = token->kaddr; \
+ p = kaddr + part_offset - token->offset; \
+ res = get_unaligned_le##bits(p + off); \
+ return res; \
+ } \
+ err = map_private_extent_buffer(eb, offset, size, \
+ &kaddr, &map_start, &map_len); \
+ if (err) { \
+ __le##bits leres; \
+ \
+ read_extent_buffer(eb, &leres, offset, size); \
+ return le##bits##_to_cpu(leres); \
+ } \
+ p = kaddr + part_offset - map_start; \
+ res = get_unaligned_le##bits(p + off); \
+ if (token) { \
+ token->kaddr = kaddr; \
+ token->offset = map_start; \
+ token->eb = eb; \
+ } \
+ return res; \
} \
-void btrfs_set_token_##name(struct extent_buffer *eb, \
- type *s, u##bits val, struct btrfs_map_token *token) \
+void btrfs_set_token_##bits(struct extent_buffer *eb, \
+ void *ptr, unsigned long off, u##bits val, \
+ struct btrfs_map_token *token) \
{ \
- unsigned long part_offset = (unsigned long)s; \
- unsigned long offset = part_offset + offsetof(type, member); \
- type *p; \
- int err; \
- char *kaddr; \
- unsigned long map_start; \
- unsigned long map_len; \
- unsigned long mem_len = sizeof(((type *)0)->member); \
- if (token && token->kaddr && token->offset <= offset && \
- token->eb == eb && \
- (token->offset + PAGE_CACHE_SIZE >= offset + mem_len)) { \
- kaddr = token->kaddr; \
- p = (type *)(kaddr + part_offset - token->offset); \
- p->member = cpu_to_le##bits(val); \
- return; \
- } \
- err = map_private_extent_buffer(eb, offset, \
- mem_len, \
- &kaddr, &map_start, &map_len); \
- if (err) { \
- __le##bits val2; \
- val2 = cpu_to_le##bits(val); \
- write_eb_member(eb, s, type, member, &val2); \
- return; \
- } \
- p = (type *)(kaddr + part_offset - map_start); \
- p->member = cpu_to_le##bits(val); \
- if (token) { \
- token->kaddr = kaddr; \
- token->offset = map_start; \
- token->eb = eb; \
- } \
-} \
-void btrfs_set_##name(struct extent_buffer *eb, \
- type *s, u##bits val) \
-{ \
- btrfs_set_token_##name(eb, s, val, NULL); \
-} \
-u##bits btrfs_##name(struct extent_buffer *eb, \
- type *s) \
-{ \
- return btrfs_token_##name(eb, s, NULL); \
-} \
+ unsigned long part_offset = (unsigned long)ptr; \
+ unsigned long offset = part_offset + off; \
+ void *p; \
+ int err; \
+ char *kaddr; \
+ unsigned long map_start; \
+ unsigned long map_len; \
+ int size = sizeof(u##bits); \
+ \
+ if (token && token->kaddr && token->offset <= offset && \
+ token->eb == eb && \
+ (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \
+ kaddr = token->kaddr; \
+ p = kaddr + part_offset - token->offset; \
+ put_unaligned_le##bits(val, p + off); \
+ return; \
+ } \
+ err = map_private_extent_buffer(eb, offset, size, \
+ &kaddr, &map_start, &map_len); \
+ if (err) { \
+ __le##bits val2; \
+ \
+ val2 = cpu_to_le##bits(val); \
+ write_extent_buffer(eb, &val2, offset, size); \
+ return; \
+ } \
+ p = kaddr + part_offset - map_start; \
+ put_unaligned_le##bits(val, p + off); \
+ if (token) { \
+ token->kaddr = kaddr; \
+ token->offset = map_start; \
+ token->eb = eb; \
+ } \
+}
-#include "ctree.h"
+DEFINE_BTRFS_SETGET_BITS(8)
+DEFINE_BTRFS_SETGET_BITS(16)
+DEFINE_BTRFS_SETGET_BITS(32)
+DEFINE_BTRFS_SETGET_BITS(64)
void btrfs_node_key(struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int nr)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index b19d75567728..83d6f9f9c220 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -100,10 +100,6 @@ static void __save_error_info(struct btrfs_fs_info *fs_info)
fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR;
}
-/* NOTE:
- * We move write_super stuff at umount in order to avoid deadlock
- * for umount hold all lock.
- */
static void save_error_info(struct btrfs_fs_info *fs_info)
{
__save_error_info(fs_info);
@@ -125,6 +121,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
}
}
+#ifdef CONFIG_PRINTK
/*
* __btrfs_std_error decodes expected errors from the caller and
* invokes the approciate error response.
@@ -167,7 +164,7 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
va_end(args);
}
-const char *logtypes[] = {
+static const char * const logtypes[] = {
"emergency",
"alert",
"critical",
@@ -185,22 +182,50 @@ void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
struct va_format vaf;
va_list args;
const char *type = logtypes[4];
+ int kern_level;
va_start(args, fmt);
- if (fmt[0] == '<' && isdigit(fmt[1]) && fmt[2] == '>') {
- memcpy(lvl, fmt, 3);
- lvl[3] = '\0';
- fmt += 3;
- type = logtypes[fmt[1] - '0'];
+ kern_level = printk_get_level(fmt);
+ if (kern_level) {
+ size_t size = printk_skip_level(fmt) - fmt;
+ memcpy(lvl, fmt, size);
+ lvl[size] = '\0';
+ fmt += size;
+ type = logtypes[kern_level - '0'];
} else
*lvl = '\0';
vaf.fmt = fmt;
vaf.va = &args;
+
printk("%sBTRFS %s (device %s): %pV", lvl, type, sb->s_id, &vaf);
+
+ va_end(args);
}
+#else
+
+void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
+ unsigned int line, int errno, const char *fmt, ...)
+{
+ struct super_block *sb = fs_info->sb;
+
+ /*
+ * Special case: if the error is EROFS, and we're already
+ * under MS_RDONLY, then it is safe here.
+ */
+ if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
+ return;
+
+ /* Don't go through full error handling during mount */
+ if (sb->s_flags & MS_BORN) {
+ save_error_info(fs_info);
+ btrfs_handle_error(fs_info);
+ }
+}
+#endif
+
/*
* We only mark the transaction aborted and then set the file system read-only.
* This will prevent new transactions from starting or trying to join this
@@ -396,15 +421,23 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
strcmp(args[0].from, "zlib") == 0) {
compress_type = "zlib";
info->compress_type = BTRFS_COMPRESS_ZLIB;
+ btrfs_set_opt(info->mount_opt, COMPRESS);
} else if (strcmp(args[0].from, "lzo") == 0) {
compress_type = "lzo";
info->compress_type = BTRFS_COMPRESS_LZO;
+ btrfs_set_opt(info->mount_opt, COMPRESS);
+ btrfs_set_fs_incompat(info, COMPRESS_LZO);
+ } else if (strncmp(args[0].from, "no", 2) == 0) {
+ compress_type = "no";
+ info->compress_type = BTRFS_COMPRESS_NONE;
+ btrfs_clear_opt(info->mount_opt, COMPRESS);
+ btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
+ compress_force = false;
} else {
ret = -EINVAL;
goto out;
}
- btrfs_set_opt(info->mount_opt, COMPRESS);
if (compress_force) {
btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
pr_info("btrfs: force %s compression\n",
@@ -805,7 +838,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
struct btrfs_trans_handle *trans;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *root = fs_info->tree_root;
- int ret;
trace_btrfs_sync_fs(wait);
@@ -816,11 +848,17 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
btrfs_wait_ordered_extents(root, 0, 0);
- trans = btrfs_start_transaction(root, 0);
+ spin_lock(&fs_info->trans_lock);
+ if (!fs_info->running_transaction) {
+ spin_unlock(&fs_info->trans_lock);
+ return 0;
+ }
+ spin_unlock(&fs_info->trans_lock);
+
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_commit_transaction(trans, root);
- return ret;
+ return btrfs_commit_transaction(trans, root);
}
static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
@@ -1455,6 +1493,13 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
ret = btrfs_scan_one_device(vol->name, FMODE_READ,
&btrfs_fs_type, &fs_devices);
break;
+ case BTRFS_IOC_DEVICES_READY:
+ ret = btrfs_scan_one_device(vol->name, FMODE_READ,
+ &btrfs_fs_type, &fs_devices);
+ if (ret)
+ break;
+ ret = !(fs_devices->num_devices == fs_devices->total_devices);
+ break;
}
kfree(vol);
@@ -1477,16 +1522,6 @@ static int btrfs_unfreeze(struct super_block *sb)
return 0;
}
-static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
-{
- int ret;
-
- ret = btrfs_dirty_inode(inode);
- if (ret)
- printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
- "error %d\n", btrfs_ino(inode), ret);
-}
-
static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
{
struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
@@ -1500,6 +1535,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
while (cur_devices) {
head = &cur_devices->devices;
list_for_each_entry(dev, head, dev_list) {
+ if (dev->missing)
+ continue;
if (!first_dev || dev->devid < first_dev->devid)
first_dev = dev;
}
@@ -1526,7 +1563,6 @@ static const struct super_operations btrfs_super_ops = {
.show_options = btrfs_show_options,
.show_devname = btrfs_show_devname,
.write_inode = btrfs_write_inode,
- .dirty_inode = btrfs_fs_dirty_inode,
.alloc_inode = btrfs_alloc_inode,
.destroy_inode = btrfs_destroy_inode,
.statfs = btrfs_statfs,
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index b72b068183ec..27c26004e050 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -22,6 +22,7 @@
#include <linux/writeback.h>
#include <linux/pagemap.h>
#include <linux/blkdev.h>
+#include <linux/uuid.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@@ -38,7 +39,6 @@ void put_transaction(struct btrfs_transaction *transaction)
if (atomic_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
WARN_ON(transaction->delayed_refs.root.rb_node);
- WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
memset(transaction, 0, sizeof(*transaction));
kmem_cache_free(btrfs_transaction_cachep, transaction);
}
@@ -100,8 +100,8 @@ loop:
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
cur_trans = fs_info->running_transaction;
goto loop;
- } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
- spin_unlock(&root->fs_info->trans_lock);
+ } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ spin_unlock(&fs_info->trans_lock);
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
return -EROFS;
}
@@ -126,7 +126,6 @@ loop:
cur_trans->delayed_refs.num_heads = 0;
cur_trans->delayed_refs.flushing = 0;
cur_trans->delayed_refs.run_delayed_start = 0;
- cur_trans->delayed_refs.seq = 1;
/*
* although the tree mod log is per file system and not per transaction,
@@ -145,10 +144,8 @@ loop:
}
atomic_set(&fs_info->tree_mod_seq, 0);
- init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
spin_lock_init(&cur_trans->commit_lock);
spin_lock_init(&cur_trans->delayed_refs.lock);
- INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
@@ -299,6 +296,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
struct btrfs_transaction *cur_trans;
u64 num_bytes = 0;
int ret;
+ u64 qgroup_reserved = 0;
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
return ERR_PTR(-EROFS);
@@ -317,6 +315,14 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
* the appropriate flushing if need be.
*/
if (num_items > 0 && root != root->fs_info->chunk_root) {
+ if (root->fs_info->quota_enabled &&
+ is_fstree(root->root_key.objectid)) {
+ qgroup_reserved = num_items * root->leafsize;
+ ret = btrfs_qgroup_reserve(root, qgroup_reserved);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
ret = btrfs_block_rsv_add(root,
&root->fs_info->trans_block_rsv,
@@ -329,6 +335,8 @@ again:
if (!h)
return ERR_PTR(-ENOMEM);
+ sb_start_intwrite(root->fs_info->sb);
+
if (may_wait_transaction(root, type))
wait_current_trans(root);
@@ -339,6 +347,7 @@ again:
} while (ret == -EBUSY);
if (ret < 0) {
+ sb_end_intwrite(root->fs_info->sb);
kmem_cache_free(btrfs_trans_handle_cachep, h);
return ERR_PTR(ret);
}
@@ -349,11 +358,16 @@ again:
h->transaction = cur_trans;
h->blocks_used = 0;
h->bytes_reserved = 0;
+ h->root = root;
h->delayed_ref_updates = 0;
h->use_count = 1;
+ h->adding_csums = 0;
h->block_rsv = NULL;
h->orig_rsv = NULL;
h->aborted = 0;
+ h->qgroup_reserved = qgroup_reserved;
+ h->delayed_ref_elem.seq = 0;
+ INIT_LIST_HEAD(&h->qgroup_ref_list);
smp_mb();
if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -473,7 +487,6 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_transaction *cur_trans = trans->transaction;
- struct btrfs_block_rsv *rsv = trans->block_rsv;
int updates;
int err;
@@ -481,12 +494,6 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
return 1;
- /*
- * We need to do this in case we're deleting csums so the global block
- * rsv get's used instead of the csum block rsv.
- */
- trans->block_rsv = NULL;
-
updates = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (updates) {
@@ -495,8 +502,6 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
return err;
}
- trans->block_rsv = rsv;
-
return should_end_transaction(trans, root);
}
@@ -513,8 +518,24 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
return 0;
}
+ /*
+ * do the qgroup accounting as early as possible
+ */
+ err = btrfs_delayed_refs_qgroup_accounting(trans, info);
+
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
+ /*
+ * the same root has to be passed to start_transaction and
+ * end_transaction. Subvolume quota depends on this.
+ */
+ WARN_ON(trans->root != root);
+
+ if (trans->qgroup_reserved) {
+ btrfs_qgroup_free(root, trans->qgroup_reserved);
+ trans->qgroup_reserved = 0;
+ }
+
while (count < 2) {
unsigned long cur = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
@@ -527,6 +548,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
}
count++;
}
+ btrfs_trans_release_metadata(trans, root);
+ trans->block_rsv = NULL;
+
+ sb_end_intwrite(root->fs_info->sb);
if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
should_end_transaction(trans, root)) {
@@ -567,6 +592,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
err = -EIO;
}
+ assert_qgroups_uptodate(trans);
memset(trans, 0, sizeof(*trans));
kmem_cache_free(btrfs_trans_handle_cachep, trans);
@@ -785,6 +811,13 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
ret = btrfs_run_dev_stats(trans, root->fs_info);
BUG_ON(ret);
+ ret = btrfs_run_qgroups(trans, root->fs_info);
+ BUG_ON(ret);
+
+ /* run_qgroups might have added some more refs */
+ ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+ BUG_ON(ret);
+
while (!list_empty(&fs_info->dirty_cowonly_roots)) {
next = fs_info->dirty_cowonly_roots.next;
list_del_init(next);
@@ -926,11 +959,13 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
struct dentry *dentry;
struct extent_buffer *tmp;
struct extent_buffer *old;
+ struct timespec cur_time = CURRENT_TIME;
int ret;
u64 to_reserve = 0;
u64 index = 0;
u64 objectid;
u64 root_flags;
+ uuid_le new_uuid;
rsv = trans->block_rsv;
@@ -957,6 +992,14 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
}
+ ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
+ objectid, pending->inherit);
+ kfree(pending->inherit);
+ if (ret) {
+ pending->error = ret;
+ goto fail;
+ }
+
key.objectid = objectid;
key.offset = (u64)-1;
key.type = BTRFS_ROOT_ITEM_KEY;
@@ -988,6 +1031,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_i_size_write(parent_inode, parent_inode->i_size +
dentry->d_name.len * 2);
+ parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, parent_root, parent_inode);
if (ret)
goto abort_trans_dput;
@@ -1016,6 +1060,20 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
btrfs_set_root_flags(new_root_item, root_flags);
+ btrfs_set_root_generation_v2(new_root_item,
+ trans->transid);
+ uuid_le_gen(&new_uuid);
+ memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
+ memcpy(new_root_item->parent_uuid, root->root_item.uuid,
+ BTRFS_UUID_SIZE);
+ new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
+ new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
+ btrfs_set_root_otransid(new_root_item, trans->transid);
+ memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
+ memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
+ btrfs_set_root_stransid(new_root_item, 0);
+ btrfs_set_root_rtransid(new_root_item, 0);
+
old = btrfs_lock_root_node(root);
ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
if (ret) {
@@ -1269,9 +1327,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_run_ordered_operations(root, 0);
- btrfs_trans_release_metadata(trans, root);
- trans->block_rsv = NULL;
-
if (cur_trans->aborted)
goto cleanup_transaction;
@@ -1282,6 +1337,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
if (ret)
goto cleanup_transaction;
+ btrfs_trans_release_metadata(trans, root);
+ trans->block_rsv = NULL;
+
cur_trans = trans->transaction;
/*
@@ -1330,7 +1388,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
spin_unlock(&root->fs_info->trans_lock);
}
- if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
+ if (!btrfs_test_opt(root, SSD) &&
+ (now < cur_trans->start_time || now - cur_trans->start_time < 1))
should_grow = 1;
do {
@@ -1352,6 +1411,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
goto cleanup_transaction;
/*
+ * running the delayed items may have added new refs. account
+ * them now so that they hinder processing of more delayed refs
+ * as little as possible.
+ */
+ btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
+
+ /*
* rename don't use btrfs_join_transaction, so, once we
* set the transaction to blocked above, we aren't going
* to get any new ordered operations. We can safely run
@@ -1463,6 +1529,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
root->fs_info->chunk_root->node);
switch_commit_root(root->fs_info->chunk_root);
+ assert_qgroups_uptodate(trans);
update_super_roots(root);
if (!root->fs_info->log_root_recovering) {
@@ -1517,6 +1584,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
put_transaction(cur_trans);
put_transaction(cur_trans);
+ sb_end_intwrite(root->fs_info->sb);
+
trace_btrfs_transaction_commit(root);
btrfs_scrub_continue(root);
@@ -1532,6 +1601,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
return ret;
cleanup_transaction:
+ btrfs_trans_release_metadata(trans, root);
+ trans->block_rsv = NULL;
btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
// WARN_ON(1);
if (current->journal_info == trans)
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index fe27379e368b..e8b8416c688b 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -20,6 +20,7 @@
#define __BTRFS_TRANSACTION__
#include "btrfs_inode.h"
#include "delayed-ref.h"
+#include "ctree.h"
struct btrfs_transaction {
u64 transid;
@@ -49,6 +50,7 @@ struct btrfs_transaction {
struct btrfs_trans_handle {
u64 transid;
u64 bytes_reserved;
+ u64 qgroup_reserved;
unsigned long use_count;
unsigned long blocks_reserved;
unsigned long blocks_used;
@@ -57,12 +59,22 @@ struct btrfs_trans_handle {
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
int aborted;
+ int adding_csums;
+ /*
+ * this root is only needed to validate that the root passed to
+ * start_transaction is the same as the one passed to end_transaction.
+ * Subvolume quota depends on this
+ */
+ struct btrfs_root *root;
+ struct seq_list delayed_ref_elem;
+ struct list_head qgroup_ref_list;
};
struct btrfs_pending_snapshot {
struct dentry *dentry;
struct btrfs_root *root;
struct btrfs_root *snap;
+ struct btrfs_qgroup_inherit *inherit;
/* block reservation for the operation */
struct btrfs_block_rsv block_rsv;
/* extra metadata reseration for relocation */
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 8abeae4224f9..c86670f4f285 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -637,7 +637,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
}
inode_set_bytes(inode, saved_nbytes);
- btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, inode);
out:
if (inode)
iput(inode);
@@ -1133,7 +1133,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
if (ret == 0) {
btrfs_inc_nlink(inode);
- btrfs_update_inode(trans, root, inode);
+ ret = btrfs_update_inode(trans, root, inode);
} else if (ret == -EEXIST) {
ret = 0;
} else {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index ecaad40e7ef4..88b969aeeb71 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -227,9 +227,8 @@ loop_lock:
cur = pending;
pending = pending->bi_next;
cur->bi_next = NULL;
- atomic_dec(&fs_info->nr_async_bios);
- if (atomic_read(&fs_info->nr_async_bios) < limit &&
+ if (atomic_dec_return(&fs_info->nr_async_bios) < limit &&
waitqueue_active(&fs_info->async_submit_wait))
wake_up(&fs_info->async_submit_wait);
@@ -429,6 +428,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
mutex_init(&fs_devices->device_list_mutex);
fs_devices->latest_devid = orig->latest_devid;
fs_devices->latest_trans = orig->latest_trans;
+ fs_devices->total_devices = orig->total_devices;
memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
/* We have held the volume lock, it is safe to get the devices. */
@@ -568,9 +568,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
memcpy(new_device, device, sizeof(*new_device));
/* Safe because we are under uuid_mutex */
- name = rcu_string_strdup(device->name->str, GFP_NOFS);
- BUG_ON(device->name && !name); /* -ENOMEM */
- rcu_assign_pointer(new_device->name, name);
+ if (device->name) {
+ name = rcu_string_strdup(device->name->str, GFP_NOFS);
+ BUG_ON(device->name && !name); /* -ENOMEM */
+ rcu_assign_pointer(new_device->name, name);
+ }
new_device->bdev = NULL;
new_device->writeable = 0;
new_device->in_fs_metadata = 0;
@@ -739,6 +741,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
int ret;
u64 devid;
u64 transid;
+ u64 total_devices;
flags |= FMODE_EXCL;
bdev = blkdev_get_by_path(path, flags, holder);
@@ -760,6 +763,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
transid = btrfs_super_generation(disk_super);
+ total_devices = btrfs_super_num_devices(disk_super);
if (disk_super->label[0])
printk(KERN_INFO "device label %s ", disk_super->label);
else
@@ -767,7 +771,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
printk(KERN_CONT "devid %llu transid %llu %s\n",
(unsigned long long)devid, (unsigned long long)transid, path);
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
-
+ if (!ret && fs_devices_ret)
+ (*fs_devices_ret)->total_devices = total_devices;
brelse(bh);
error_close:
mutex_unlock(&uuid_mutex);
@@ -1433,6 +1438,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
list_del_rcu(&device->dev_list);
device->fs_devices->num_devices--;
+ device->fs_devices->total_devices--;
if (device->missing)
root->fs_info->fs_devices->missing_devices--;
@@ -1550,6 +1556,7 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
fs_devices->seeding = 0;
fs_devices->num_devices = 0;
fs_devices->open_devices = 0;
+ fs_devices->total_devices = 0;
fs_devices->seed = seed_devices;
generate_random_uuid(fs_devices->fsid);
@@ -1738,10 +1745,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
device->fs_devices = root->fs_info->fs_devices;
- /*
- * we don't want write_supers to jump in here with our device
- * half setup
- */
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
list_add(&device->dev_alloc_list,
@@ -1749,6 +1752,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
root->fs_info->fs_devices->num_devices++;
root->fs_info->fs_devices->open_devices++;
root->fs_info->fs_devices->rw_devices++;
+ root->fs_info->fs_devices->total_devices++;
if (device->can_discard)
root->fs_info->fs_devices->num_can_discard++;
root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
@@ -4602,28 +4606,6 @@ int btrfs_read_sys_array(struct btrfs_root *root)
return ret;
}
-struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
- u64 logical, int mirror_num)
-{
- struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
- int ret;
- u64 map_length = 0;
- struct btrfs_bio *bbio = NULL;
- struct btrfs_device *device;
-
- BUG_ON(mirror_num == 0);
- ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
- mirror_num);
- if (ret) {
- BUG_ON(bbio != NULL);
- return NULL;
- }
- BUG_ON(mirror_num != bbio->mirror_num);
- device = bbio->stripes[mirror_num - 1].dev;
- kfree(bbio);
- return device;
-}
-
int btrfs_read_chunk_tree(struct btrfs_root *root)
{
struct btrfs_path *path;
@@ -4736,9 +4718,6 @@ int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
key.offset = device->devid;
ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
if (ret) {
- printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n",
- rcu_str_deref(device->name),
- (unsigned long long)device->devid);
__btrfs_reset_dev_stats(device);
device->dev_stats_valid = 1;
btrfs_release_path(path);
@@ -4880,6 +4859,14 @@ void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
{
+ int i;
+
+ for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
+ if (btrfs_dev_stat_read(dev, i) != 0)
+ break;
+ if (i == BTRFS_DEV_STAT_VALUES_MAX)
+ return; /* all values == 0, suppress message */
+
printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
rcu_str_deref(dev->name),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
@@ -4890,8 +4877,7 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
}
int btrfs_get_dev_stats(struct btrfs_root *root,
- struct btrfs_ioctl_get_dev_stats *stats,
- int reset_after_read)
+ struct btrfs_ioctl_get_dev_stats *stats)
{
struct btrfs_device *dev;
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
@@ -4909,7 +4895,7 @@ int btrfs_get_dev_stats(struct btrfs_root *root,
printk(KERN_WARNING
"btrfs: get dev_stats failed, not yet valid\n");
return -ENODEV;
- } else if (reset_after_read) {
+ } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
if (stats->nr_items > i)
stats->values[i] =
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 95f6637614db..53c06af92e8d 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -126,6 +126,7 @@ struct btrfs_fs_devices {
u64 missing_devices;
u64 total_rw_bytes;
u64 num_can_discard;
+ u64 total_devices;
struct block_device *latest_bdev;
/* all of the devices in the FS, protected by a mutex
@@ -288,13 +289,10 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *max_avail);
-struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
- u64 logical, int mirror_num);
void btrfs_dev_stat_print_on_error(struct btrfs_device *device);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
int btrfs_get_dev_stats(struct btrfs_root *root,
- struct btrfs_ioctl_get_dev_stats *stats,
- int reset_after_read);
+ struct btrfs_ioctl_get_dev_stats *stats);
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
diff --git a/fs/buffer.c b/fs/buffer.c
index c7062c896d7c..58e2e7b77372 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -914,7 +914,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
/*
* Initialise the state of a blockdev page's buffers.
*/
-static void
+static sector_t
init_page_buffers(struct page *page, struct block_device *bdev,
sector_t block, int size)
{
@@ -936,33 +936,41 @@ init_page_buffers(struct page *page, struct block_device *bdev,
block++;
bh = bh->b_this_page;
} while (bh != head);
+
+ /*
+ * Caller needs to validate requested block against end of device.
+ */
+ return end_block;
}
/*
* Create the page-cache page that contains the requested block.
*
- * This is user purely for blockdev mappings.
+ * This is used purely for blockdev mappings.
*/
-static struct page *
+static int
grow_dev_page(struct block_device *bdev, sector_t block,
- pgoff_t index, int size)
+ pgoff_t index, int size, int sizebits)
{
struct inode *inode = bdev->bd_inode;
struct page *page;
struct buffer_head *bh;
+ sector_t end_block;
+ int ret = 0; /* Will call free_more_memory() */
page = find_or_create_page(inode->i_mapping, index,
(mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
if (!page)
- return NULL;
+ return ret;
BUG_ON(!PageLocked(page));
if (page_has_buffers(page)) {
bh = page_buffers(page);
if (bh->b_size == size) {
- init_page_buffers(page, bdev, block, size);
- return page;
+ end_block = init_page_buffers(page, bdev,
+ index << sizebits, size);
+ goto done;
}
if (!try_to_free_buffers(page))
goto failed;
@@ -982,14 +990,14 @@ grow_dev_page(struct block_device *bdev, sector_t block,
*/
spin_lock(&inode->i_mapping->private_lock);
link_dev_buffers(page, bh);
- init_page_buffers(page, bdev, block, size);
+ end_block = init_page_buffers(page, bdev, index << sizebits, size);
spin_unlock(&inode->i_mapping->private_lock);
- return page;
-
+done:
+ ret = (block < end_block) ? 1 : -ENXIO;
failed:
unlock_page(page);
page_cache_release(page);
- return NULL;
+ return ret;
}
/*
@@ -999,7 +1007,6 @@ failed:
static int
grow_buffers(struct block_device *bdev, sector_t block, int size)
{
- struct page *page;
pgoff_t index;
int sizebits;
@@ -1023,22 +1030,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
bdevname(bdev, b));
return -EIO;
}
- block = index << sizebits;
+
/* Create a page with the proper size buffers.. */
- page = grow_dev_page(bdev, block, index, size);
- if (!page)
- return 0;
- unlock_page(page);
- page_cache_release(page);
- return 1;
+ return grow_dev_page(bdev, block, index, size, sizebits);
}
static struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block, int size)
{
- int ret;
- struct buffer_head *bh;
-
/* Size must be multiple of hard sectorsize */
if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
(size < 512 || size > PAGE_SIZE))) {
@@ -1051,21 +1050,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
return NULL;
}
-retry:
- bh = __find_get_block(bdev, block, size);
- if (bh)
- return bh;
+ for (;;) {
+ struct buffer_head *bh;
+ int ret;
- ret = grow_buffers(bdev, block, size);
- if (ret == 0) {
- free_more_memory();
- goto retry;
- } else if (ret > 0) {
bh = __find_get_block(bdev, block, size);
if (bh)
return bh;
+
+ ret = grow_buffers(bdev, block, size);
+ if (ret < 0)
+ return NULL;
+ if (ret == 0)
+ free_more_memory();
}
- return NULL;
}
/*
@@ -1321,10 +1319,6 @@ EXPORT_SYMBOL(__find_get_block);
* which corresponds to the passed block_device, block and size. The
* returned buffer has its reference count incremented.
*
- * __getblk() cannot fail - it just keeps trying. If you pass it an
- * illegal block number, __getblk() will happily return a buffer_head
- * which represents the non-existent block. Very weird.
- *
* __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
* attempt is failing. FIXME, perhaps?
*/
@@ -2306,8 +2300,8 @@ EXPORT_SYMBOL(block_commit_write);
* beyond EOF, then the page is guaranteed safe against truncation until we
* unlock the page.
*
- * Direct callers of this function should call vfs_check_frozen() so that page
- * fault does not busyloop until the fs is thawed.
+ * Direct callers of this function should protect against filesystem freezing
+ * using sb_start_write() - sb_end_write() functions.
*/
int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block)
@@ -2318,6 +2312,12 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
loff_t size;
int ret;
+ /*
+ * Update file times before taking page lock. We may end up failing the
+ * fault so this update may be superfluous but who really cares...
+ */
+ file_update_time(vma->vm_file);
+
lock_page(page);
size = i_size_read(inode);
if ((page->mapping != inode->i_mapping) ||
@@ -2339,18 +2339,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
if (unlikely(ret < 0))
goto out_unlock;
- /*
- * Freezing in progress? We check after the page is marked dirty and
- * with page lock held so if the test here fails, we are sure freezing
- * code will wait during syncing until the page fault is done - at that
- * point page will be dirty and unlocked so freezing code will write it
- * and writeprotect it again.
- */
set_page_dirty(page);
- if (inode->i_sb->s_frozen != SB_UNFROZEN) {
- ret = -EAGAIN;
- goto out_unlock;
- }
wait_on_page_writeback(page);
return 0;
out_unlock:
@@ -2365,12 +2354,9 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
int ret;
struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
- /*
- * This check is racy but catches the common case. The check in
- * __block_page_mkwrite() is reliable.
- */
- vfs_check_frozen(sb, SB_FREEZE_WRITE);
+ sb_start_pagefault(sb);
ret = __block_page_mkwrite(vma, vmf, get_block);
+ sb_end_pagefault(sb);
return block_page_mkwrite_return(ret);
}
EXPORT_SYMBOL(block_page_mkwrite);
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index c0353dfac51f..c994691d9445 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -919,7 +919,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
* own time */
path.mnt = cache->mnt;
path.dentry = object->backer;
- file = dentry_open(&path, O_RDWR, cache->cache_cred);
+ file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
} else {
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 8b67304e4b80..452e71a1b753 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1184,6 +1184,9 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
loff_t size, len;
int ret;
+ /* Update time before taking page lock */
+ file_update_time(vma->vm_file);
+
size = i_size_read(inode);
if (off + PAGE_CACHE_SIZE <= size)
len = PAGE_CACHE_SIZE;
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index fb962efdacee..6d59006bfa27 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -201,6 +201,7 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
int err = -ENOMEM;
dout("ceph_fs_debugfs_init\n");
+ BUG_ON(!fsc->client->debugfs_dir);
fsc->debugfs_congestion_kb =
debugfs_create_file("writeback_congestion_kb",
0600,
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 00894ff9246c..e5b77319c97b 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -51,8 +51,7 @@ int ceph_init_dentry(struct dentry *dentry)
goto out_unlock;
}
- if (dentry->d_parent == NULL || /* nfs fh_to_dentry */
- ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
+ if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
d_set_d_op(dentry, &ceph_dentry_ops);
else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
@@ -79,7 +78,7 @@ struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
return NULL;
spin_lock(&dentry->d_lock);
- if (dentry->d_parent) {
+ if (!IS_ROOT(dentry)) {
inode = dentry->d_parent->d_inode;
ihold(inode);
}
@@ -634,44 +633,6 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
return dentry;
}
-int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned flags, umode_t mode,
- int *opened)
-{
- int err;
- struct dentry *res = NULL;
-
- if (!(flags & O_CREAT)) {
- if (dentry->d_name.len > NAME_MAX)
- return -ENAMETOOLONG;
-
- err = ceph_init_dentry(dentry);
- if (err < 0)
- return err;
-
- return ceph_lookup_open(dir, dentry, file, flags, mode, opened);
- }
-
- if (d_unhashed(dentry)) {
- res = ceph_lookup(dir, dentry, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
-
- if (res)
- dentry = res;
- }
-
- /* We don't deal with positive dentries here */
- if (dentry->d_inode)
- return finish_no_open(file, res);
-
- *opened |= FILE_CREATED;
- err = ceph_lookup_open(dir, dentry, file, flags, mode, opened);
- dput(res);
-
- return err;
-}
-
/*
* If we do a create but get no trace back from the MDS, follow up with
* a lookup (the VFS expects us to link up the provided dentry).
@@ -1154,7 +1115,7 @@ static void ceph_d_prune(struct dentry *dentry)
dout("ceph_d_prune %p\n", dentry);
/* do we have a valid parent? */
- if (!dentry->d_parent || IS_ROOT(dentry))
+ if (IS_ROOT(dentry))
return;
/* if we are not hashed, we don't affect D_COMPLETE */
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 1b81d6c31878..ecebbc09bfc7 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/file.h>
+#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/writeback.h>
@@ -106,9 +107,6 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
}
/*
- * If the filp already has private_data, that means the file was
- * already opened by intent during lookup, and we do nothing.
- *
* If we already have the requisite capabilities, we can satisfy
* the open request locally (no need to request new caps from the
* MDS). We do, however, need to inform the MDS (asynchronously)
@@ -207,24 +205,29 @@ out:
/*
- * Do a lookup + open with a single request.
- *
- * If this succeeds, but some subsequent check in the vfs
- * may_open() fails, the struct *file gets cleaned up (i.e.
- * ceph_release gets called). So fear not!
+ * Do a lookup + open with a single request. If we get a non-existent
+ * file or symlink, return 1 so the VFS can retry.
*/
-int ceph_lookup_open(struct inode *dir, struct dentry *dentry,
+int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags, umode_t mode,
int *opened)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
- struct dentry *ret;
+ struct dentry *dn;
int err;
- dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
- dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
+ dout("atomic_open %p dentry %p '%.*s' %s flags %d mode 0%o\n",
+ dir, dentry, dentry->d_name.len, dentry->d_name.name,
+ d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
+
+ if (dentry->d_name.len > NAME_MAX)
+ return -ENAMETOOLONG;
+
+ err = ceph_init_dentry(dentry);
+ if (err < 0)
+ return err;
/* do the open */
req = prepare_open_request(dir->i_sb, flags, mode);
@@ -241,22 +244,31 @@ int ceph_lookup_open(struct inode *dir, struct dentry *dentry,
(flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
req);
err = ceph_handle_snapdir(req, dentry, err);
- if (err)
- goto out;
- if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
+ if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
- if (err)
- goto out;
- err = finish_open(file, req->r_dentry, ceph_open, opened);
-out:
- ret = ceph_finish_lookup(req, dentry, err);
- ceph_mdsc_put_request(req);
- dout("ceph_lookup_open result=%p\n", ret);
- if (IS_ERR(ret))
- return PTR_ERR(ret);
+ if (d_unhashed(dentry)) {
+ dn = ceph_finish_lookup(req, dentry, err);
+ if (IS_ERR(dn))
+ err = PTR_ERR(dn);
+ } else {
+ /* we were given a hashed negative dentry */
+ dn = NULL;
+ }
+ if (err)
+ goto out_err;
+ if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) {
+ /* make vfs retry on splice, ENOENT, or symlink */
+ dout("atomic_open finish_no_open on dn %p\n", dn);
+ err = finish_no_open(file, dn);
+ } else {
+ dout("atomic_open finish_open on dn %p\n", dn);
+ err = finish_open(file, dentry, ceph_open, opened);
+ }
- dput(ret);
+out_err:
+ ceph_mdsc_put_request(req);
+ dout("atomic_open result=%d\n", err);
return err;
}
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 9fff9f3b17e4..4b5762ef7c2b 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -992,11 +992,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
if (rinfo->head->is_dentry) {
struct inode *dir = req->r_locked_dir;
- err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
- session, req->r_request_started, -1,
- &req->r_caps_reservation);
- if (err < 0)
- return err;
+ if (dir) {
+ err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
+ session, req->r_request_started, -1,
+ &req->r_caps_reservation);
+ if (err < 0)
+ return err;
+ } else {
+ WARN_ON_ONCE(1);
+ }
}
/*
@@ -1004,6 +1008,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
* will have trouble splicing in the virtual snapdir later
*/
if (rinfo->head->is_dentry && !req->r_aborted &&
+ req->r_locked_dir &&
(rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
fsc->mount_options->snapdir_name,
req->r_dentry->d_name.len))) {
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 8e3fb69fbe62..1396ceb46797 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -42,7 +42,8 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
/* validate striping parameters */
if ((l->object_size & ~PAGE_MASK) ||
(l->stripe_unit & ~PAGE_MASK) ||
- ((unsigned)l->object_size % (unsigned)l->stripe_unit))
+ (l->stripe_unit != 0 &&
+ ((unsigned)l->object_size % (unsigned)l->stripe_unit)))
return -EINVAL;
/* make sure it's a valid data pool */
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 200bc87eceb1..a5a735422aa7 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -10,6 +10,7 @@
#include "super.h"
#include "mds_client.h"
+#include <linux/ceph/ceph_features.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/pagelist.h>
@@ -394,11 +395,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
s->s_seq = 0;
mutex_init(&s->s_mutex);
- ceph_con_init(mdsc->fsc->client->msgr, &s->s_con);
- s->s_con.private = s;
- s->s_con.ops = &mds_con_ops;
- s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
- s->s_con.peer_name.num = cpu_to_le64(mds);
+ ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
spin_lock_init(&s->s_gen_ttl_lock);
s->s_cap_gen = 0;
@@ -440,7 +437,8 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
mdsc->sessions[mds] = s;
atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */
- ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
+ ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
+ ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
return s;
@@ -1472,11 +1470,6 @@ retry:
else
len += 1 + temp->d_name.len;
temp = temp->d_parent;
- if (temp == NULL) {
- rcu_read_unlock();
- pr_err("build_path corrupt dentry %p\n", dentry);
- return ERR_PTR(-EINVAL);
- }
}
rcu_read_unlock();
if (len)
@@ -1513,12 +1506,6 @@ retry:
if (pos)
path[--pos] = '/';
temp = temp->d_parent;
- if (temp == NULL) {
- rcu_read_unlock();
- pr_err("build_path corrupt dentry\n");
- kfree(path);
- return ERR_PTR(-EINVAL);
- }
}
rcu_read_unlock();
if (pos != 0 || read_seqretry(&rename_lock, seq)) {
@@ -2531,7 +2518,9 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
session->s_state = CEPH_MDS_SESSION_RECONNECTING;
session->s_seq = 0;
+ ceph_con_close(&session->s_con);
ceph_con_open(&session->s_con,
+ CEPH_ENTITY_TYPE_MDS, mds,
ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
/* replay unsafe requests */
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index e5206fc76562..cbb2f54a3019 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -296,8 +296,7 @@ static int build_snap_context(struct ceph_snap_realm *realm)
struct ceph_snap_realm *parent = realm->parent;
struct ceph_snap_context *snapc;
int err = 0;
- int i;
- int num = realm->num_prior_parent_snaps + realm->num_snaps;
+ u32 num = realm->num_prior_parent_snaps + realm->num_snaps;
/*
* build parent context, if it hasn't been built.
@@ -321,11 +320,11 @@ static int build_snap_context(struct ceph_snap_realm *realm)
realm->cached_context->seq == realm->seq &&
(!parent ||
realm->cached_context->seq >= parent->cached_context->seq)) {
- dout("build_snap_context %llx %p: %p seq %lld (%d snaps)"
+ dout("build_snap_context %llx %p: %p seq %lld (%u snaps)"
" (unchanged)\n",
realm->ino, realm, realm->cached_context,
realm->cached_context->seq,
- realm->cached_context->num_snaps);
+ (unsigned int) realm->cached_context->num_snaps);
return 0;
}
@@ -342,6 +341,8 @@ static int build_snap_context(struct ceph_snap_realm *realm)
num = 0;
snapc->seq = realm->seq;
if (parent) {
+ u32 i;
+
/* include any of parent's snaps occurring _after_ my
parent became my parent */
for (i = 0; i < parent->cached_context->num_snaps; i++)
@@ -361,8 +362,9 @@ static int build_snap_context(struct ceph_snap_realm *realm)
sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
snapc->num_snaps = num;
- dout("build_snap_context %llx %p: %p seq %lld (%d snaps)\n",
- realm->ino, realm, snapc, snapc->seq, snapc->num_snaps);
+ dout("build_snap_context %llx %p: %p seq %lld (%u snaps)\n",
+ realm->ino, realm, snapc, snapc->seq,
+ (unsigned int) snapc->num_snaps);
if (realm->cached_context)
ceph_put_snap_context(realm->cached_context);
@@ -402,9 +404,9 @@ static void rebuild_snap_realms(struct ceph_snap_realm *realm)
* helper to allocate and decode an array of snapids. free prior
* instance, if any.
*/
-static int dup_array(u64 **dst, __le64 *src, int num)
+static int dup_array(u64 **dst, __le64 *src, u32 num)
{
- int i;
+ u32 i;
kfree(*dst);
if (num) {
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 7076109f014d..b982239f38f9 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -18,6 +18,7 @@
#include "super.h"
#include "mds_client.h"
+#include <linux/ceph/ceph_features.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/mon_client.h>
#include <linux/ceph/auth.h>
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index f4d5522cb619..66ebe720e40d 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -612,9 +612,9 @@ struct ceph_snap_realm {
u64 parent_since; /* snapid when our current parent became so */
u64 *prior_parent_snaps; /* snaps inherited from any parents we */
- int num_prior_parent_snaps; /* had prior to parent_since */
+ u32 num_prior_parent_snaps; /* had prior to parent_since */
u64 *snaps; /* snaps specific to this realm */
- int num_snaps;
+ u32 num_snaps;
struct ceph_snap_realm *parent;
struct list_head children; /* list of child realms */
@@ -806,9 +806,9 @@ extern int ceph_copy_from_page_vector(struct page **pages,
loff_t off, size_t len);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
extern int ceph_open(struct inode *inode, struct file *file);
-extern int ceph_lookup_open(struct inode *dir, struct dentry *dentry,
- struct file *od, unsigned flags,
- umode_t mode, int *opened);
+extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned flags, umode_t mode,
+ int *opened);
extern int ceph_release(struct inode *inode, struct file *filp);
/* dir.c */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 785cb3057c95..2c2ae5be9902 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -457,6 +457,7 @@ start:
for (i = 0; i < numattr; i++)
kfree(xattrs[i]);
kfree(xattrs);
+ xattrs = NULL;
goto start;
}
err = -EIO;
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 4b4127544349..feee94309271 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -16,4 +16,5 @@ cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o
cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o
-cifs-$(CONFIG_CIFS_SMB2) += smb2ops.o
+cifs-$(CONFIG_CIFS_SMB2) += smb2ops.o smb2maperror.o smb2transport.o \
+ smb2misc.o smb2pdu.o smb2inode.o
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index 545509c3313b..282d6de7e410 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -152,7 +152,7 @@ static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer,
sharename = extract_sharename(tcon->treeName);
if (IS_ERR(sharename)) {
- cFYI(1, "%s: couldn't extract sharename\n", __func__);
+ cFYI(1, "%s: couldn't extract sharename", __func__);
sharename = NULL;
return 0;
}
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index e8140528ca5c..d9ea6ede6a7a 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -65,7 +65,7 @@ void cifs_dump_detail(void *buf)
cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
smb->Command, smb->Status.CifsError,
smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
- cERROR(1, "smb buf %p len %d", smb, smbCalcSize(smb));
+ cERROR(1, "smb buf %p len %u", smb, smbCalcSize(smb));
#endif /* CONFIG_CIFS_DEBUG2 */
}
@@ -282,24 +282,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
struct cifs_tcon,
tcon_list);
atomic_set(&tcon->num_smbs_sent, 0);
- atomic_set(&tcon->num_writes, 0);
- atomic_set(&tcon->num_reads, 0);
- atomic_set(&tcon->num_oplock_brks, 0);
- atomic_set(&tcon->num_opens, 0);
- atomic_set(&tcon->num_posixopens, 0);
- atomic_set(&tcon->num_posixmkdirs, 0);
- atomic_set(&tcon->num_closes, 0);
- atomic_set(&tcon->num_deletes, 0);
- atomic_set(&tcon->num_mkdirs, 0);
- atomic_set(&tcon->num_rmdirs, 0);
- atomic_set(&tcon->num_renames, 0);
- atomic_set(&tcon->num_t2renames, 0);
- atomic_set(&tcon->num_ffirst, 0);
- atomic_set(&tcon->num_fnext, 0);
- atomic_set(&tcon->num_fclose, 0);
- atomic_set(&tcon->num_hardlinks, 0);
- atomic_set(&tcon->num_symlinks, 0);
- atomic_set(&tcon->num_locks, 0);
+ if (server->ops->clear_stats)
+ server->ops->clear_stats(tcon);
}
}
}
@@ -358,42 +342,10 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\n%d) %s", i, tcon->treeName);
if (tcon->need_reconnect)
seq_puts(m, "\tDISCONNECTED ");
- seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
- atomic_read(&tcon->num_smbs_sent),
- atomic_read(&tcon->num_oplock_brks));
- seq_printf(m, "\nReads: %d Bytes: %lld",
- atomic_read(&tcon->num_reads),
- (long long)(tcon->bytes_read));
- seq_printf(m, "\nWrites: %d Bytes: %lld",
- atomic_read(&tcon->num_writes),
- (long long)(tcon->bytes_written));
- seq_printf(m, "\nFlushes: %d",
- atomic_read(&tcon->num_flushes));
- seq_printf(m, "\nLocks: %d HardLinks: %d "
- "Symlinks: %d",
- atomic_read(&tcon->num_locks),
- atomic_read(&tcon->num_hardlinks),
- atomic_read(&tcon->num_symlinks));
- seq_printf(m, "\nOpens: %d Closes: %d "
- "Deletes: %d",
- atomic_read(&tcon->num_opens),
- atomic_read(&tcon->num_closes),
- atomic_read(&tcon->num_deletes));
- seq_printf(m, "\nPosix Opens: %d "
- "Posix Mkdirs: %d",
- atomic_read(&tcon->num_posixopens),
- atomic_read(&tcon->num_posixmkdirs));
- seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
- atomic_read(&tcon->num_mkdirs),
- atomic_read(&tcon->num_rmdirs));
- seq_printf(m, "\nRenames: %d T2 Renames %d",
- atomic_read(&tcon->num_renames),
- atomic_read(&tcon->num_t2renames));
- seq_printf(m, "\nFindFirst: %d FNext %d "
- "FClose %d",
- atomic_read(&tcon->num_ffirst),
- atomic_read(&tcon->num_fnext),
- atomic_read(&tcon->num_fclose));
+ seq_printf(m, "\nSMBs: %d",
+ atomic_read(&tcon->num_smbs_sent));
+ if (server->ops->print_stats)
+ server->ops->print_stats(m, tcon);
}
}
}
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 6873bb634a97..ce5cbd717bfc 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -275,7 +275,8 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
struct cifs_sb_info *cifs_sb;
struct cifs_ses *ses;
char *full_path;
- int xid, i;
+ unsigned int xid;
+ int i;
int rc;
struct vfsmount *mnt;
struct tcon_link *tlink;
@@ -302,11 +303,11 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
}
ses = tlink_tcon(tlink)->ses;
- xid = GetXid();
+ xid = get_xid();
rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls,
&num_referrals, &referrals,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index fbb9da951843..7dab9c04ad52 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -331,3 +331,63 @@ ctoUTF16_out:
return i;
}
+#ifdef CONFIG_CIFS_SMB2
+/*
+ * cifs_local_to_utf16_bytes - how long will a string be after conversion?
+ * @from - pointer to input string
+ * @maxbytes - don't go past this many bytes of input string
+ * @codepage - source codepage
+ *
+ * Walk a string and return the number of bytes that the string will
+ * be after being converted to the given charset, not including any null
+ * termination required. Don't walk past maxbytes in the source buffer.
+ */
+
+static int
+cifs_local_to_utf16_bytes(const char *from, int len,
+ const struct nls_table *codepage)
+{
+ int charlen;
+ int i;
+ wchar_t wchar_to;
+
+ for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
+ charlen = codepage->char2uni(from, len, &wchar_to);
+ /* Failed conversion defaults to a question mark */
+ if (charlen < 1)
+ charlen = 1;
+ }
+ return 2 * i; /* UTF16 characters are two bytes */
+}
+
+/*
+ * cifs_strndup_to_utf16 - copy a string to wire format from the local codepage
+ * @src - source string
+ * @maxlen - don't walk past this many bytes in the source string
+ * @utf16_len - the length of the allocated string in bytes (including null)
+ * @cp - source codepage
+ * @remap - map special chars
+ *
+ * Take a string convert it from the local codepage to UTF16 and
+ * put it in a new buffer. Returns a pointer to the new string or NULL on
+ * error.
+ */
+__le16 *
+cifs_strndup_to_utf16(const char *src, const int maxlen, int *utf16_len,
+ const struct nls_table *cp, int remap)
+{
+ int len;
+ __le16 *dst;
+
+ len = cifs_local_to_utf16_bytes(src, maxlen, cp);
+ len += 2; /* NULL */
+ dst = kmalloc(len, GFP_KERNEL);
+ if (!dst) {
+ *utf16_len = 0;
+ return NULL;
+ }
+ cifsConvertToUTF16(dst, src, strlen(src), cp, remap);
+ *utf16_len = len;
+ return dst;
+}
+#endif /* CONFIG_CIFS_SMB2 */
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index a513a546700b..4fb097468e21 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -84,7 +84,11 @@ char *cifs_strndup_from_utf16(const char *src, const int maxlen,
const struct nls_table *codepage);
extern int cifsConvertToUTF16(__le16 *target, const char *source, int maxlen,
const struct nls_table *cp, int mapChars);
-
+#ifdef CONFIG_CIFS_SMB2
+extern __le16 *cifs_strndup_to_utf16(const char *src, const int maxlen,
+ int *utf16_len, const struct nls_table *cp,
+ int remap);
+#endif /* CONFIG_CIFS_SMB2 */
#endif
/*
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 3cc1b251ca08..05f4dc263a23 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -525,7 +525,7 @@ init_cifs_idmap(void)
struct key *keyring;
int ret;
- cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type.name);
+ cFYI(1, "Registering the %s key type", cifs_idmap_key_type.name);
/* create an override credential set with a special thread keyring in
* which requests are cached
@@ -572,7 +572,7 @@ init_cifs_idmap(void)
sidgidtree = RB_ROOT;
register_shrinker(&cifs_shrinker);
- cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
+ cFYI(1, "cifs idmap keyring: %d", key_serial(keyring));
return 0;
failed_put_key:
@@ -589,7 +589,7 @@ exit_cifs_idmap(void)
unregister_key_type(&cifs_idmap_key_type);
put_cred(root_cred);
unregister_shrinker(&cifs_shrinker);
- cFYI(1, "Unregistered %s key type\n", cifs_idmap_key_type.name);
+ cFYI(1, "Unregistered %s key type", cifs_idmap_key_type.name);
}
void
@@ -1153,15 +1153,16 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
__u16 fid, u32 *pacllen)
{
struct cifs_ntsd *pntsd = NULL;
- int xid, rc;
+ unsigned int xid;
+ int rc;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return ERR_CAST(tlink);
- xid = GetXid();
+ xid = get_xid();
rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
@@ -1176,7 +1177,8 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
{
struct cifs_ntsd *pntsd = NULL;
int oplock = 0;
- int xid, rc, create_options = 0;
+ unsigned int xid;
+ int rc, create_options = 0;
__u16 fid;
struct cifs_tcon *tcon;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
@@ -1185,7 +1187,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
return ERR_CAST(tlink);
tcon = tlink_tcon(tlink);
- xid = GetXid();
+ xid = get_xid();
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
@@ -1199,7 +1201,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
}
cifs_put_tlink(tlink);
- FreeXid(xid);
+ free_xid(xid);
cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
if (rc)
@@ -1230,7 +1232,8 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
struct inode *inode, const char *path, int aclflag)
{
int oplock = 0;
- int xid, rc, access_flags, create_options = 0;
+ unsigned int xid;
+ int rc, access_flags, create_options = 0;
__u16 fid;
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -1240,7 +1243,7 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
- xid = GetXid();
+ xid = get_xid();
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
@@ -1263,7 +1266,7 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
CIFSSMBClose(xid, tcon, fid);
out:
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
return rc;
}
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 63c460e503b6..6a0d741159f0 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -47,20 +47,20 @@ static int cifs_calc_signature(const struct kvec *iov, int n_vec,
return -EINVAL;
if (!server->secmech.sdescmd5) {
- cERROR(1, "%s: Can't generate signature\n", __func__);
+ cERROR(1, "%s: Can't generate signature", __func__);
return -1;
}
rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
if (rc) {
- cERROR(1, "%s: Could not init md5\n", __func__);
+ cERROR(1, "%s: Could not init md5", __func__);
return rc;
}
rc = crypto_shash_update(&server->secmech.sdescmd5->shash,
server->session_key.response, server->session_key.len);
if (rc) {
- cERROR(1, "%s: Could not update with response\n", __func__);
+ cERROR(1, "%s: Could not update with response", __func__);
return rc;
}
@@ -85,7 +85,7 @@ static int cifs_calc_signature(const struct kvec *iov, int n_vec,
iov[i].iov_base, iov[i].iov_len);
}
if (rc) {
- cERROR(1, "%s: Could not update with payload\n",
+ cERROR(1, "%s: Could not update with payload",
__func__);
return rc;
}
@@ -93,13 +93,13 @@ static int cifs_calc_signature(const struct kvec *iov, int n_vec,
rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature);
if (rc)
- cERROR(1, "%s: Could not generate md5 hash\n", __func__);
+ cERROR(1, "%s: Could not generate md5 hash", __func__);
return rc;
}
/* must be called with server->srv_mutex held */
-int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
+int cifs_sign_smbv(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
__u32 *pexpected_response_sequence_number)
{
int rc = 0;
@@ -143,7 +143,7 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
iov.iov_base = cifs_pdu;
iov.iov_len = be32_to_cpu(cifs_pdu->smb_buf_length) + 4;
- return cifs_sign_smb2(&iov, 1, server,
+ return cifs_sign_smbv(&iov, 1, server,
pexpected_response_sequence_number);
}
@@ -399,7 +399,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
wchar_t *server;
if (!ses->server->secmech.sdeschmacmd5) {
- cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
+ cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash");
return -1;
}
@@ -415,7 +415,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
if (rc) {
- cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5\n");
+ cERROR(1, "calc_ntlmv2_hash: could not init hmacmd5");
return rc;
}
@@ -423,7 +423,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
len = ses->user_name ? strlen(ses->user_name) : 0;
user = kmalloc(2 + (len * 2), GFP_KERNEL);
if (user == NULL) {
- cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
+ cERROR(1, "calc_ntlmv2_hash: user mem alloc failure");
rc = -ENOMEM;
return rc;
}
@@ -439,7 +439,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
(char *)user, 2 * len);
kfree(user);
if (rc) {
- cERROR(1, "%s: Could not update with user\n", __func__);
+ cERROR(1, "%s: Could not update with user", __func__);
return rc;
}
@@ -460,7 +460,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
(char *)domain, 2 * len);
kfree(domain);
if (rc) {
- cERROR(1, "%s: Could not update with domain\n",
+ cERROR(1, "%s: Could not update with domain",
__func__);
return rc;
}
@@ -480,7 +480,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
(char *)server, 2 * len);
kfree(server);
if (rc) {
- cERROR(1, "%s: Could not update with server\n",
+ cERROR(1, "%s: Could not update with server",
__func__);
return rc;
}
@@ -489,7 +489,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
ntlmv2_hash);
if (rc)
- cERROR(1, "%s: Could not generate md5 hash\n", __func__);
+ cERROR(1, "%s: Could not generate md5 hash", __func__);
return rc;
}
@@ -501,7 +501,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
unsigned int offset = CIFS_SESS_KEY_SIZE + 8;
if (!ses->server->secmech.sdeschmacmd5) {
- cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash\n");
+ cERROR(1, "calc_ntlmv2_hash: can't generate ntlmv2 hash");
return -1;
}
@@ -527,14 +527,14 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
ses->auth_key.response + offset, ses->auth_key.len - offset);
if (rc) {
- cERROR(1, "%s: Could not update with response\n", __func__);
+ cERROR(1, "%s: Could not update with response", __func__);
return rc;
}
rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
ses->auth_key.response + CIFS_SESS_KEY_SIZE);
if (rc)
- cERROR(1, "%s: Could not generate md5 hash\n", __func__);
+ cERROR(1, "%s: Could not generate md5 hash", __func__);
return rc;
}
@@ -613,7 +613,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
if (rc) {
- cERROR(1, "%s: Could not init hmacmd5\n", __func__);
+ cERROR(1, "%s: Could not init hmacmd5", __func__);
goto setup_ntlmv2_rsp_ret;
}
@@ -621,14 +621,14 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
ses->auth_key.response + CIFS_SESS_KEY_SIZE,
CIFS_HMAC_MD5_HASH_SIZE);
if (rc) {
- cERROR(1, "%s: Could not update with response\n", __func__);
+ cERROR(1, "%s: Could not update with response", __func__);
goto setup_ntlmv2_rsp_ret;
}
rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
ses->auth_key.response);
if (rc)
- cERROR(1, "%s: Could not generate md5 hash\n", __func__);
+ cERROR(1, "%s: Could not generate md5 hash", __func__);
setup_ntlmv2_rsp_ret:
kfree(tiblob);
@@ -650,7 +650,7 @@ calc_seckey(struct cifs_ses *ses)
tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm_arc4)) {
rc = PTR_ERR(tfm_arc4);
- cERROR(1, "could not allocate crypto API arc4\n");
+ cERROR(1, "could not allocate crypto API arc4");
return rc;
}
@@ -668,7 +668,7 @@ calc_seckey(struct cifs_ses *ses)
rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE);
if (rc) {
- cERROR(1, "could not encrypt session key rc: %d\n", rc);
+ cERROR(1, "could not encrypt session key rc: %d", rc);
crypto_free_blkcipher(tfm_arc4);
return rc;
}
@@ -705,13 +705,13 @@ cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
if (IS_ERR(server->secmech.hmacmd5)) {
- cERROR(1, "could not allocate crypto hmacmd5\n");
+ cERROR(1, "could not allocate crypto hmacmd5");
return PTR_ERR(server->secmech.hmacmd5);
}
server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(server->secmech.md5)) {
- cERROR(1, "could not allocate crypto md5\n");
+ cERROR(1, "could not allocate crypto md5");
rc = PTR_ERR(server->secmech.md5);
goto crypto_allocate_md5_fail;
}
@@ -720,7 +720,7 @@ cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
crypto_shash_descsize(server->secmech.hmacmd5);
server->secmech.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
if (!server->secmech.sdeschmacmd5) {
- cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5\n");
+ cERROR(1, "cifs_crypto_shash_allocate: can't alloc hmacmd5");
rc = -ENOMEM;
goto crypto_allocate_hmacmd5_sdesc_fail;
}
@@ -732,7 +732,7 @@ cifs_crypto_shash_allocate(struct TCP_Server_Info *server)
crypto_shash_descsize(server->secmech.md5);
server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
if (!server->secmech.sdescmd5) {
- cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5\n");
+ cERROR(1, "cifs_crypto_shash_allocate: can't alloc md5");
rc = -ENOMEM;
goto crypto_allocate_md5_sdesc_fail;
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index a7610cfedf0a..db8a404a51dd 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -48,6 +48,9 @@
#include <linux/key-type.h>
#include "cifs_spnego.h"
#include "fscache.h"
+#ifdef CONFIG_CIFS_SMB2
+#include "smb2pdu.h"
+#endif
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
int cifsFYI = 0;
@@ -158,9 +161,9 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
int rc = -EOPNOTSUPP;
- int xid;
+ unsigned int xid;
- xid = GetXid();
+ xid = get_xid();
buf->f_type = CIFS_MAGIC_NUMBER;
@@ -197,7 +200,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
if (rc)
rc = SMBOldQFSInfo(xid, tcon, buf);
- FreeXid(xid);
+ free_xid(xid);
return 0;
}
@@ -546,8 +549,8 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
char *s, *p;
char sep;
- full_path = cifs_build_path_to_root(vol, cifs_sb,
- cifs_sb_master_tcon(cifs_sb));
+ full_path = build_path_to_root(vol, cifs_sb,
+ cifs_sb_master_tcon(cifs_sb));
if (full_path == NULL)
return ERR_PTR(-ENOMEM);
@@ -980,6 +983,14 @@ cifs_destroy_inodecache(void)
static int
cifs_init_request_bufs(void)
{
+ size_t max_hdr_size = MAX_CIFS_HDR_SIZE;
+#ifdef CONFIG_CIFS_SMB2
+ /*
+ * SMB2 maximum header size is bigger than CIFS one - no problems to
+ * allocate some more bytes for CIFS.
+ */
+ max_hdr_size = MAX_SMB2_HDR_SIZE;
+#endif
if (CIFSMaxBufSize < 8192) {
/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
Unicode path name has to fit in any SMB/CIFS path based frames */
@@ -991,8 +1002,7 @@ cifs_init_request_bufs(void)
}
/* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
cifs_req_cachep = kmem_cache_create("cifs_request",
- CIFSMaxBufSize +
- MAX_CIFS_HDR_SIZE, 0,
+ CIFSMaxBufSize + max_hdr_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
if (cifs_req_cachep == NULL)
return -ENOMEM;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6df0cbe1cbc9..977dc0e85ccb 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -22,11 +22,15 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/slab.h>
+#include <linux/mempool.h>
#include <linux/workqueue.h>
#include "cifs_fs_sb.h"
#include "cifsacl.h"
#include <crypto/internal/hash.h>
#include <linux/scatterlist.h>
+#ifdef CONFIG_CIFS_SMB2
+#include "smb2pdu.h"
+#endif
/*
* The sizes of various internal tables and strings
@@ -72,6 +76,9 @@
/* (max path length + 1 for null) * 2 for unicode */
#define MAX_NAME 514
+/* SMB echo "timeout" -- FIXME: tunable? */
+#define SMB_ECHO_INTERVAL (60 * HZ)
+
#include "cifspdu.h"
#ifndef XATTR_DOS_ATTRIB
@@ -160,6 +167,10 @@ struct mid_q_entry;
struct TCP_Server_Info;
struct cifsFileInfo;
struct cifs_ses;
+struct cifs_tcon;
+struct dfs_info3_param;
+struct cifs_fattr;
+struct smb_vol;
struct smb_version_operations {
int (*send_cancel)(struct TCP_Server_Info *, void *,
@@ -168,12 +179,17 @@ struct smb_version_operations {
/* setup request: allocate mid, sign message */
int (*setup_request)(struct cifs_ses *, struct kvec *, unsigned int,
struct mid_q_entry **);
+ /* setup async request: allocate mid, sign message */
+ int (*setup_async_request)(struct TCP_Server_Info *, struct kvec *,
+ unsigned int, struct mid_q_entry **);
/* check response: verify signature, map error */
int (*check_receive)(struct mid_q_entry *, struct TCP_Server_Info *,
bool);
- void (*add_credits)(struct TCP_Server_Info *, const unsigned int);
+ void (*add_credits)(struct TCP_Server_Info *, const unsigned int,
+ const int);
void (*set_credits)(struct TCP_Server_Info *, const int);
- int * (*get_credits_field)(struct TCP_Server_Info *);
+ int * (*get_credits_field)(struct TCP_Server_Info *, const int);
+ unsigned int (*get_credits)(struct mid_q_entry *);
__u64 (*get_next_mid)(struct TCP_Server_Info *);
/* data offset from read response message */
unsigned int (*read_data_offset)(char *);
@@ -184,9 +200,62 @@ struct smb_version_operations {
/* find mid corresponding to the response message */
struct mid_q_entry * (*find_mid)(struct TCP_Server_Info *, char *);
void (*dump_detail)(void *);
+ void (*clear_stats)(struct cifs_tcon *);
+ void (*print_stats)(struct seq_file *m, struct cifs_tcon *);
/* verify the message */
int (*check_message)(char *, unsigned int);
bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+ /* process transaction2 response */
+ bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
+ char *, int);
+ /* check if we need to negotiate */
+ bool (*need_neg)(struct TCP_Server_Info *);
+ /* negotiate to the server */
+ int (*negotiate)(const unsigned int, struct cifs_ses *);
+ /* setup smb sessionn */
+ int (*sess_setup)(const unsigned int, struct cifs_ses *,
+ const struct nls_table *);
+ /* close smb session */
+ int (*logoff)(const unsigned int, struct cifs_ses *);
+ /* connect to a server share */
+ int (*tree_connect)(const unsigned int, struct cifs_ses *, const char *,
+ struct cifs_tcon *, const struct nls_table *);
+ /* close tree connecion */
+ int (*tree_disconnect)(const unsigned int, struct cifs_tcon *);
+ /* get DFS referrals */
+ int (*get_dfs_refer)(const unsigned int, struct cifs_ses *,
+ const char *, struct dfs_info3_param **,
+ unsigned int *, const struct nls_table *, int);
+ /* informational QFS call */
+ void (*qfs_tcon)(const unsigned int, struct cifs_tcon *);
+ /* check if a path is accessible or not */
+ int (*is_path_accessible)(const unsigned int, struct cifs_tcon *,
+ struct cifs_sb_info *, const char *);
+ /* query path data from the server */
+ int (*query_path_info)(const unsigned int, struct cifs_tcon *,
+ struct cifs_sb_info *, const char *,
+ FILE_ALL_INFO *, bool *);
+ /* get server index number */
+ int (*get_srv_inum)(const unsigned int, struct cifs_tcon *,
+ struct cifs_sb_info *, const char *,
+ u64 *uniqueid, FILE_ALL_INFO *);
+ /* build a full path to the root of the mount */
+ char * (*build_path_to_root)(struct smb_vol *, struct cifs_sb_info *,
+ struct cifs_tcon *);
+ /* check if we can send an echo or nor */
+ bool (*can_echo)(struct TCP_Server_Info *);
+ /* send echo request */
+ int (*echo)(struct TCP_Server_Info *);
+ /* create directory */
+ int (*mkdir)(const unsigned int, struct cifs_tcon *, const char *,
+ struct cifs_sb_info *);
+ /* set info on created directory */
+ void (*mkdir_setinfo)(struct inode *, const char *,
+ struct cifs_sb_info *, struct cifs_tcon *,
+ const unsigned int);
+ /* remove directory */
+ int (*rmdir)(const unsigned int, struct cifs_tcon *, const char *,
+ struct cifs_sb_info *);
};
struct smb_version_values {
@@ -198,6 +267,10 @@ struct smb_version_values {
size_t header_size;
size_t max_header_size;
size_t read_rsp_size;
+ __le16 lock_cmd;
+ unsigned int cap_unix;
+ unsigned int cap_nt_find;
+ unsigned int cap_large_files;
};
#define HEADER_SIZE(server) (server->vals->header_size)
@@ -291,6 +364,12 @@ get_rfc1002_length(void *buf)
return be32_to_cpu(*((__be32 *)buf));
}
+static inline void
+inc_rfc1001_len(void *buf, int count)
+{
+ be32_add_cpu((__be32 *)buf, count);
+}
+
struct TCP_Server_Info {
struct list_head tcp_ses_list;
struct list_head smb_ses_list;
@@ -319,8 +398,13 @@ struct TCP_Server_Info {
struct mutex srv_mutex;
struct task_struct *tsk;
char server_GUID[16];
- char sec_mode;
+ __u16 sec_mode;
bool session_estab; /* mark when very first sess is established */
+#ifdef CONFIG_CIFS_SMB2
+ int echo_credits; /* echo reserved slots */
+ int oplock_credits; /* oplock break reserved slots */
+ bool echoes:1; /* enable echoes */
+#endif
u16 dialect; /* dialect index that server chose */
enum securityEnum secType;
bool oplocks:1; /* enable oplocks */
@@ -337,7 +421,7 @@ struct TCP_Server_Info {
unsigned int max_vcs; /* maximum number of smb sessions, at least
those that can be specified uniquely with
vcnumbers */
- int capabilities; /* allow selective disabling of caps by smb sess */
+ unsigned int capabilities; /* selective disabling of caps by smb sess */
int timeAdj; /* Adjust for difference in server time zone in sec */
__u64 CurrentMid; /* multiplex id - rotating counter */
char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
@@ -366,6 +450,10 @@ struct TCP_Server_Info {
atomic_t in_send; /* requests trying to send */
atomic_t num_waiters; /* blocked waiting to get in sendrecv */
#endif
+#ifdef CONFIG_CIFS_SMB2
+ unsigned int max_read;
+ unsigned int max_write;
+#endif /* CONFIG_CIFS_SMB2 */
};
static inline unsigned int
@@ -389,9 +477,10 @@ has_credits(struct TCP_Server_Info *server, int *credits)
}
static inline void
-add_credits(struct TCP_Server_Info *server, const unsigned int add)
+add_credits(struct TCP_Server_Info *server, const unsigned int add,
+ const int optype)
{
- server->ops->add_credits(server, add);
+ server->ops->add_credits(server, add, optype);
}
static inline void
@@ -453,10 +542,10 @@ struct cifs_ses {
char *serverOS; /* name of operating system underlying server */
char *serverNOS; /* name of network operating system of server */
char *serverDomain; /* security realm of server */
- int Suid; /* remote smb uid */
+ __u64 Suid; /* remote smb uid */
uid_t linux_uid; /* overriding owner of files on the mount */
uid_t cred_uid; /* owner of credentials */
- int capabilities;
+ unsigned int capabilities;
char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
TCP names - will ipv6 and sctp addresses fit? */
char *user_name; /* must not be null except during init of sess
@@ -466,6 +555,9 @@ struct cifs_ses {
struct session_key auth_key;
struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
bool need_reconnect:1; /* connection reset, uid now invalid */
+#ifdef CONFIG_CIFS_SMB2
+ __u16 session_flags;
+#endif /* CONFIG_CIFS_SMB2 */
};
/* no more than one of the following three session flags may be set */
#define CIFS_SES_NT4 1
@@ -475,6 +567,13 @@ struct cifs_ses {
which do not negotiate NTLM or POSIX dialects, but instead
negotiate one of the older LANMAN dialects */
#define CIFS_SES_LANMAN 8
+
+static inline bool
+cap_unix(struct cifs_ses *ses)
+{
+ return ses->server->vals->cap_unix & ses->capabilities;
+}
+
/*
* there is one of these for each connection to a resource on a particular
* session
@@ -487,32 +586,42 @@ struct cifs_tcon {
char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
char *nativeFileSystem;
char *password; /* for share-level security */
- __u16 tid; /* The 2 byte tree id */
+ __u32 tid; /* The 4 byte tree id */
__u16 Flags; /* optional support bits */
enum statusEnum tidStatus;
#ifdef CONFIG_CIFS_STATS
atomic_t num_smbs_sent;
- atomic_t num_writes;
- atomic_t num_reads;
- atomic_t num_flushes;
- atomic_t num_oplock_brks;
- atomic_t num_opens;
- atomic_t num_closes;
- atomic_t num_deletes;
- atomic_t num_mkdirs;
- atomic_t num_posixopens;
- atomic_t num_posixmkdirs;
- atomic_t num_rmdirs;
- atomic_t num_renames;
- atomic_t num_t2renames;
- atomic_t num_ffirst;
- atomic_t num_fnext;
- atomic_t num_fclose;
- atomic_t num_hardlinks;
- atomic_t num_symlinks;
- atomic_t num_locks;
- atomic_t num_acl_get;
- atomic_t num_acl_set;
+ union {
+ struct {
+ atomic_t num_writes;
+ atomic_t num_reads;
+ atomic_t num_flushes;
+ atomic_t num_oplock_brks;
+ atomic_t num_opens;
+ atomic_t num_closes;
+ atomic_t num_deletes;
+ atomic_t num_mkdirs;
+ atomic_t num_posixopens;
+ atomic_t num_posixmkdirs;
+ atomic_t num_rmdirs;
+ atomic_t num_renames;
+ atomic_t num_t2renames;
+ atomic_t num_ffirst;
+ atomic_t num_fnext;
+ atomic_t num_fclose;
+ atomic_t num_hardlinks;
+ atomic_t num_symlinks;
+ atomic_t num_locks;
+ atomic_t num_acl_get;
+ atomic_t num_acl_set;
+ } cifs_stats;
+#ifdef CONFIG_CIFS_SMB2
+ struct {
+ atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
+ atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
+ } smb2_stats;
+#endif /* CONFIG_CIFS_SMB2 */
+ } stats;
#ifdef CONFIG_CIFS_STATS2
unsigned long long time_writes;
unsigned long long time_reads;
@@ -543,6 +652,15 @@ struct cifs_tcon {
bool local_lease:1; /* check leases (only) on local system not remote */
bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */
bool need_reconnect:1; /* connection reset, tid now invalid */
+#ifdef CONFIG_CIFS_SMB2
+ bool print:1; /* set if connection to printer share */
+ bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
+ __u32 capabilities;
+ __u32 share_flags;
+ __u32 maximal_access;
+ __u32 vol_serial_number;
+ __le64 vol_create_time;
+#endif /* CONFIG_CIFS_SMB2 */
#ifdef CONFIG_CIFS_FSCACHE
u64 resource_id; /* server resource id */
struct fscache_cookie *fscache; /* cookie for share */
@@ -657,13 +775,13 @@ struct cifs_io_parms {
* Take a reference on the file private data. Must be called with
* cifs_file_list_lock held.
*/
-static inline
-struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file)
+static inline void
+cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
{
++cifs_file->count;
- return cifs_file;
}
+struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
/*
@@ -734,6 +852,15 @@ convert_delimiter(char *path, char delim)
}
}
+static inline char *
+build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
+ struct cifs_tcon *tcon)
+{
+ if (!vol->ops->build_path_to_root)
+ return NULL;
+ return vol->ops->build_path_to_root(vol, cifs_sb, tcon);
+}
+
#ifdef CONFIG_CIFS_STATS
#define cifs_stats_inc atomic_inc
@@ -791,6 +918,7 @@ typedef void (mid_callback_t)(struct mid_q_entry *mid);
/* one of these for every pending CIFS request to the server */
struct mid_q_entry {
struct list_head qhead; /* mids waiting on reply from this server */
+ struct TCP_Server_Info *server; /* server corresponding to this mid */
__u64 mid; /* multiplex id */
__u32 pid; /* process id */
__u32 sequence_number; /* for CIFS signing */
@@ -954,6 +1082,12 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
#define CIFS_LARGE_BUF_OP 0x020 /* large request buffer */
#define CIFS_NO_RESP 0x040 /* no response buffer required */
+/* Type of request operation */
+#define CIFS_ECHO_OP 0x080 /* echo request */
+#define CIFS_OBREAK_OP 0x0100 /* oplock break request */
+#define CIFS_NEG_OP 0x0200 /* negotiate request */
+#define CIFS_OP_MASK 0x0380 /* mask request type */
+
/* Security Flags: indicate type of session setup needed */
#define CIFSSEC_MAY_SIGN 0x00001
#define CIFSSEC_MAY_NTLM 0x00002
@@ -1127,6 +1261,8 @@ void cifs_oplock_break(struct work_struct *work);
extern const struct slow_work_ops cifs_oplock_break_ops;
extern struct workqueue_struct *cifsiod_wq;
+extern mempool_t *cifs_mid_poolp;
+
/* Operations for different SMB versions */
#define SMB1_VERSION_STRING "1.0"
extern struct smb_version_operations smb1_operations;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 0a6cbfe2761e..f1bbf8305d3a 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -37,29 +37,26 @@ extern struct smb_hdr *cifs_small_buf_get(void);
extern void cifs_small_buf_release(void *);
extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
unsigned int /* length */);
-extern unsigned int _GetXid(void);
-extern void _FreeXid(unsigned int);
-#define GetXid() \
+extern unsigned int _get_xid(void);
+extern void _free_xid(unsigned int);
+#define get_xid() \
({ \
- int __xid = (int)_GetXid(); \
- cFYI(1, "CIFS VFS: in %s as Xid: %d with uid: %d", \
+ unsigned int __xid = _get_xid(); \
+ cFYI(1, "CIFS VFS: in %s as Xid: %u with uid: %d", \
__func__, __xid, current_fsuid()); \
__xid; \
})
-#define FreeXid(curr_xid) \
+#define free_xid(curr_xid) \
do { \
- _FreeXid(curr_xid); \
- cFYI(1, "CIFS VFS: leaving %s (xid = %d) rc = %d", \
+ _free_xid(curr_xid); \
+ cFYI(1, "CIFS VFS: leaving %s (xid = %u) rc = %d", \
__func__, curr_xid, (int)rc); \
} while (0)
extern int init_cifs_idmap(void);
extern void exit_cifs_idmap(void);
extern void cifs_destroy_idmaptrees(void);
extern char *build_path_from_dentry(struct dentry *);
-extern char *cifs_build_path_to_root(struct smb_vol *vol,
- struct cifs_sb_info *cifs_sb,
- struct cifs_tcon *tcon);
extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
extern char *cifs_compose_mount_options(const char *sb_mountdata,
const char *fullpath, const struct dfs_info3_param *ref,
@@ -68,18 +65,21 @@ extern char *cifs_compose_mount_options(const char *sb_mountdata,
extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
struct TCP_Server_Info *server);
extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
+extern void cifs_wake_up_task(struct mid_q_entry *mid);
extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
unsigned int nvec, mid_receive_t *receive,
mid_callback_t *callback, void *cbdata,
- bool ignore_pend);
+ const int flags);
extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ ,
- int * /* bytes returned */ , const int long_op);
+ int * /* bytes returned */ , const int);
extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
char *in_buf, int flags);
extern int cifs_setup_request(struct cifs_ses *, struct kvec *, unsigned int,
struct mid_q_entry **);
+extern int cifs_setup_async_request(struct TCP_Server_Info *, struct kvec *,
+ unsigned int, struct mid_q_entry **);
extern int cifs_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
@@ -90,6 +90,7 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
struct smb_hdr *in_buf ,
struct smb_hdr *out_buf,
int *bytes_returned);
+extern int cifs_reconnect(struct TCP_Server_Info *server);
extern int checkSMB(char *buf, unsigned int length);
extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
extern bool backup_cred(struct cifs_sb_info *);
@@ -112,8 +113,8 @@ extern void header_assemble(struct smb_hdr *, char /* command */ ,
extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
struct cifs_ses *ses,
void **request_buf);
-extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *nls_cp);
+extern int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
+ const struct nls_table *nls_cp);
extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
extern u64 cifs_UnixTimeToNT(struct timespec);
extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
@@ -123,10 +124,10 @@ extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle,
struct file *file, struct tcon_link *tlink,
__u32 oplock);
-extern int cifs_posix_open(char *full_path, struct inode **pinode,
- struct super_block *sb,
- int mode, unsigned int f_flags,
- __u32 *poplock, __u16 *pnetfid, int xid);
+extern int cifs_posix_open(char *full_path, struct inode **inode,
+ struct super_block *sb, int mode,
+ unsigned int f_flags, __u32 *oplock, __u16 *netfid,
+ unsigned int xid);
void cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr);
extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
FILE_UNIX_BASIC_INFO *info,
@@ -136,14 +137,13 @@ extern struct inode *cifs_iget(struct super_block *sb,
struct cifs_fattr *fattr);
extern int cifs_get_file_info(struct file *filp);
-extern int cifs_get_inode_info(struct inode **pinode,
- const unsigned char *search_path,
- FILE_ALL_INFO *pfile_info,
- struct super_block *sb, int xid, const __u16 *pfid);
+extern int cifs_get_inode_info(struct inode **inode, const char *full_path,
+ FILE_ALL_INFO *data, struct super_block *sb,
+ int xid, const __u16 *fid);
extern int cifs_get_file_info_unix(struct file *filp);
extern int cifs_get_inode_info_unix(struct inode **pinode,
const unsigned char *search_path,
- struct super_block *sb, int xid);
+ struct super_block *sb, unsigned int xid);
extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
struct cifs_fattr *fattr, struct inode *inode,
const char *path, const __u16 *pfid);
@@ -168,6 +168,7 @@ extern struct smb_vol *cifs_get_volume_info(char *mount_data,
const char *devname);
extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *);
extern void cifs_umount(struct cifs_sb_info *);
+extern void cifs_mark_open_files_invalid(struct cifs_tcon *tcon);
#if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)
extern void cifs_dfs_release_automount_timer(void);
@@ -178,98 +179,97 @@ extern void cifs_dfs_release_automount_timer(void);
void cifs_proc_init(void);
void cifs_proc_clean(void);
-extern int cifs_negotiate_protocol(unsigned int xid,
- struct cifs_ses *ses);
-extern int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
- struct nls_table *nls_info);
-extern int CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses);
+extern int cifs_negotiate_protocol(const unsigned int xid,
+ struct cifs_ses *ses);
+extern int cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ struct nls_table *nls_info);
+extern int CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses);
-extern int CIFSTCon(unsigned int xid, struct cifs_ses *ses,
- const char *tree, struct cifs_tcon *tcon,
- const struct nls_table *);
+extern int CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
+ const char *tree, struct cifs_tcon *tcon,
+ const struct nls_table *);
-extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
+extern int CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon,
const char *searchName, const struct nls_table *nls_codepage,
__u16 *searchHandle, __u16 search_flags,
struct cifs_search_info *psrch_inf,
int map, const char dirsep);
-extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
+extern int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon,
__u16 searchHandle, __u16 search_flags,
struct cifs_search_info *psrch_inf);
-extern int CIFSFindClose(const int, struct cifs_tcon *tcon,
+extern int CIFSFindClose(const unsigned int xid, struct cifs_tcon *tcon,
const __u16 search_handle);
-extern int CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBQFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_ALL_INFO *pFindData);
-extern int CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon,
- const unsigned char *searchName,
- FILE_ALL_INFO *findData,
- int legacy /* whether to use old info level */,
- const struct nls_table *nls_codepage, int remap);
-extern int SMBQueryInformation(const int xid, struct cifs_tcon *tcon,
- const unsigned char *searchName,
- FILE_ALL_INFO *findData,
- const struct nls_table *nls_codepage, int remap);
-
-extern int CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBQPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *search_Name, FILE_ALL_INFO *data,
+ int legacy /* whether to use old info level */,
+ const struct nls_table *nls_codepage, int remap);
+extern int SMBQueryInformation(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *search_name, FILE_ALL_INFO *data,
+ const struct nls_table *nls_codepage, int remap);
+
+extern int CIFSSMBUnixQFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_UNIX_BASIC_INFO *pFindData);
-extern int CIFSSMBUnixQPathInfo(const int xid,
+extern int CIFSSMBUnixQPathInfo(const unsigned int xid,
struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_UNIX_BASIC_INFO *pFindData,
const struct nls_table *nls_codepage, int remap);
-extern int CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
- const unsigned char *searchName,
- struct dfs_info3_param **target_nodes,
- unsigned int *number_of_nodes_in_array,
- const struct nls_table *nls_codepage, int remap);
+extern int CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
+ const char *search_name,
+ struct dfs_info3_param **target_nodes,
+ unsigned int *num_of_nodes,
+ const struct nls_table *nls_codepage, int remap);
-extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo,
+extern int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
const char *old_path,
const struct nls_table *nls_codepage,
- unsigned int *pnum_referrals,
- struct dfs_info3_param **preferrals,
- int remap);
-extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
+ unsigned int *num_referrals,
+ struct dfs_info3_param **referrals, int remap);
+extern void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
struct smb_vol *vol);
-extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBQFSInfo(const unsigned int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData);
-extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon,
+extern int SMBOldQFSInfo(const unsigned int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData);
-extern int CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBSetFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon,
__u64 cap);
-extern int CIFSSMBQFSAttributeInfo(const int xid,
+extern int CIFSSMBQFSAttributeInfo(const unsigned int xid,
struct cifs_tcon *tcon);
-extern int CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon);
-extern int CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon);
-extern int CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBQFSDeviceInfo(const unsigned int xid, struct cifs_tcon *tcon);
+extern int CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon);
+extern int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData);
-extern int CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
const char *fileName, const FILE_BASIC_INFO *data,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
const FILE_BASIC_INFO *data, __u16 fid,
__u32 pid_of_opener);
-extern int CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
- bool delete_file, __u16 fid, __u32 pid_of_opener);
+extern int CIFSSMBSetFileDisposition(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ bool delete_file, __u16 fid,
+ __u32 pid_of_opener);
#if 0
-extern int CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBSetAttrLegacy(unsigned int xid, struct cifs_tcon *tcon,
char *fileName, __u16 dos_attributes,
const struct nls_table *nls_codepage);
#endif /* possibly unneeded function */
-extern int CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBSetEOF(const unsigned int xid, struct cifs_tcon *tcon,
const char *fileName, __u64 size,
bool setAllocationSizeFlag,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon,
__u64 size, __u16 fileHandle, __u32 opener_pid,
bool AllocSizeFlag);
@@ -283,114 +283,114 @@ struct cifs_unix_set_info_args {
dev_t device;
};
-extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBUnixSetFileInfo(const unsigned int xid,
+ struct cifs_tcon *tcon,
const struct cifs_unix_set_info_args *args,
u16 fid, u32 pid_of_opener);
-extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *pTcon,
- char *fileName,
- const struct cifs_unix_set_info_args *args,
- const struct nls_table *nls_codepage,
- int remap_special_chars);
-
-extern int CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon,
- const char *newName,
- const struct nls_table *nls_codepage,
- int remap_special_chars);
-extern int CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon,
- const char *name, const struct nls_table *nls_codepage,
- int remap_special_chars);
-extern int CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBUnixSetPathInfo(const unsigned int xid,
+ struct cifs_tcon *tcon, const char *file_name,
+ const struct cifs_unix_set_info_args *args,
+ const struct nls_table *nls_codepage,
+ int remap);
+
+extern int CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *name, struct cifs_sb_info *cifs_sb);
+extern int CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *name, struct cifs_sb_info *cifs_sb);
+extern int CIFSPOSIXDelFile(const unsigned int xid, struct cifs_tcon *tcon,
const char *name, __u16 type,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBDelFile(const unsigned int xid, struct cifs_tcon *tcon,
const char *name,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBRename(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBRename(const unsigned int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
- int netfid, const char *target_name,
- const struct nls_table *nls_codepage,
- int remap_special_chars);
-extern int CIFSCreateHardLink(const int xid,
+extern int CIFSSMBRenameOpenFile(const unsigned int xid, struct cifs_tcon *tcon,
+ int netfid, const char *target_name,
+ const struct nls_table *nls_codepage,
+ int remap_special_chars);
+extern int CIFSCreateHardLink(const unsigned int xid,
struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSUnixCreateHardLink(const int xid,
+extern int CIFSUnixCreateHardLink(const unsigned int xid,
struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSUnixCreateSymLink(const int xid,
+extern int CIFSUnixCreateSymLink(const unsigned int xid,
struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage);
-extern int CIFSSMBUnixQuerySymLink(const int xid,
+extern int CIFSSMBUnixQuerySymLink(const unsigned int xid,
struct cifs_tcon *tcon,
const unsigned char *searchName, char **syminfo,
const struct nls_table *nls_codepage);
#ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL
-extern int CIFSSMBQueryReparseLinkInfo(const int xid,
+extern int CIFSSMBQueryReparseLinkInfo(const unsigned int xid,
struct cifs_tcon *tcon,
const unsigned char *searchName,
char *symlinkinfo, const int buflen, __u16 fid,
const struct nls_table *nls_codepage);
#endif /* temporarily unused until cifs_symlink fixed */
-extern int CIFSSMBOpen(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBOpen(const unsigned int xid, struct cifs_tcon *tcon,
const char *fileName, const int disposition,
const int access_flags, const int omode,
__u16 *netfid, int *pOplock, FILE_ALL_INFO *,
const struct nls_table *nls_codepage, int remap);
-extern int SMBLegacyOpen(const int xid, struct cifs_tcon *tcon,
+extern int SMBLegacyOpen(const unsigned int xid, struct cifs_tcon *tcon,
const char *fileName, const int disposition,
const int access_flags, const int omode,
__u16 *netfid, int *pOplock, FILE_ALL_INFO *,
const struct nls_table *nls_codepage, int remap);
-extern int CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon,
+extern int CIFSPOSIXCreate(const unsigned int xid, struct cifs_tcon *tcon,
u32 posix_flags, __u64 mode, __u16 *netfid,
FILE_UNIX_BASIC_INFO *pRetData,
__u32 *pOplock, const char *name,
const struct nls_table *nls_codepage, int remap);
-extern int CIFSSMBClose(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBClose(const unsigned int xid, struct cifs_tcon *tcon,
const int smb_file_id);
-extern int CIFSSMBFlush(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBFlush(const unsigned int xid, struct cifs_tcon *tcon,
const int smb_file_id);
-extern int CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms,
+extern int CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, char **buf,
int *return_buf_type);
-extern int CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms,
+extern int CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, const char *buf,
const char __user *ubuf, const int long_op);
-extern int CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
+extern int CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, const int nvec,
const int long_op);
-extern int CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
- const unsigned char *searchName, __u64 *inode_number,
- const struct nls_table *nls_codepage,
- int remap_special_chars);
-
-extern int cifs_lockv(const int xid, struct cifs_tcon *tcon, const __u16 netfid,
- const __u8 lock_type, const __u32 num_unlock,
- const __u32 num_lock, LOCKING_ANDX_RANGE *buf);
-extern int CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
+extern int CIFSGetSrvInodeNumber(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *search_name, __u64 *inode_number,
+ const struct nls_table *nls_codepage,
+ int remap);
+
+extern int cifs_lockv(const unsigned int xid, struct cifs_tcon *tcon,
+ const __u16 netfid, const __u8 lock_type,
+ const __u32 num_unlock, const __u32 num_lock,
+ LOCKING_ANDX_RANGE *buf);
+extern int CIFSSMBLock(const unsigned int xid, struct cifs_tcon *tcon,
const __u16 netfid, const __u32 netpid, const __u64 len,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
const bool waitFlag, const __u8 oplock_level);
-extern int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const __u32 netpid,
- const int get_flag, const __u64 len, struct file_lock *,
- const __u16 lock_type, const bool waitFlag);
-extern int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon);
+ const loff_t start_offset, const __u64 len,
+ struct file_lock *, const __u16 lock_type,
+ const bool waitFlag);
+extern int CIFSSMBTDis(const unsigned int xid, struct cifs_tcon *tcon);
extern int CIFSSMBEcho(struct TCP_Server_Info *server);
-extern int CIFSSMBLogoff(const int xid, struct cifs_ses *ses);
+extern int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses);
extern struct cifs_ses *sesInfoAlloc(void);
extern void sesInfoFree(struct cifs_ses *);
@@ -398,7 +398,7 @@ extern struct cifs_tcon *tconInfoAlloc(void);
extern void tconInfoFree(struct cifs_tcon *);
extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
-extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
+extern int cifs_sign_smbv(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
__u32 *);
extern int cifs_verify_signature(struct kvec *iov, unsigned int nr_iov,
struct TCP_Server_Info *server,
@@ -416,46 +416,46 @@ extern int calc_lanman_hash(const char *password, const char *cryptkey,
bool encrypt, char *lnm_session_key);
#endif /* CIFS_WEAK_PW_HASH */
#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
-extern int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBNotify(const unsigned int xid, struct cifs_tcon *tcon,
const int notify_subdirs, const __u16 netfid,
__u32 filter, struct file *file, int multishot,
const struct nls_table *nls_codepage);
#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
-extern int CIFSSMBCopy(int xid,
+extern int CIFSSMBCopy(unsigned int xid,
struct cifs_tcon *source_tcon,
const char *fromName,
const __u16 target_tid,
const char *toName, const int flags,
const struct nls_table *nls_codepage,
int remap_special_chars);
-extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
+extern ssize_t CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
const unsigned char *ea_name, char *EAData,
size_t bufsize, const struct nls_table *nls_codepage,
int remap_special_chars);
-extern int CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
const char *fileName, const char *ea_name,
const void *ea_value, const __u16 ea_value_len,
const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon,
__u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
-extern int CIFSSMBSetCIFSACL(const int, struct cifs_tcon *, __u16,
+extern int CIFSSMBSetCIFSACL(const unsigned int, struct cifs_tcon *, __u16,
struct cifs_ntsd *, __u32, int);
-extern int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBGetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
char *acl_inf, const int buflen, const int acl_type,
const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon,
+extern int CIFSSMBSetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
const unsigned char *fileName,
const char *local_acl, const int buflen, const int acl_type,
const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon,
+extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
const unsigned char *path,
- struct cifs_sb_info *cifs_sb, int xid);
+ struct cifs_sb_info *cifs_sb, unsigned int xid);
extern int mdfour(unsigned char *, unsigned char *, int);
extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
const struct nls_table *codepage);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4ee522b3f66f..f0cf934ba877 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -112,24 +112,29 @@ cifs_kmap_unlock(void)
#define cifs_kmap_unlock() do { ; } while(0)
#endif /* CONFIG_HIGHMEM */
-/* Mark as invalid, all open files on tree connections since they
- were closed when session to server was lost */
-static void mark_open_files_invalid(struct cifs_tcon *pTcon)
+/*
+ * Mark as invalid, all open files on tree connections since they
+ * were closed when session to server was lost.
+ */
+void
+cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
{
struct cifsFileInfo *open_file = NULL;
struct list_head *tmp;
struct list_head *tmp1;
-/* list all files open on tree connection and mark them invalid */
+ /* list all files open on tree connection and mark them invalid */
spin_lock(&cifs_file_list_lock);
- list_for_each_safe(tmp, tmp1, &pTcon->openFileList) {
+ list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
open_file = list_entry(tmp, struct cifsFileInfo, tlist);
open_file->invalidHandle = true;
open_file->oplock_break_cancelled = true;
}
spin_unlock(&cifs_file_list_lock);
- /* BB Add call to invalidate_inodes(sb) for all superblocks mounted
- to this tcon */
+ /*
+ * BB Add call to invalidate_inodes(sb) for all superblocks mounted
+ * to this tcon.
+ */
}
/* reconnect the socket, tcon, and smb session if needed */
@@ -209,7 +214,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
goto out;
}
- mark_open_files_invalid(tcon);
+ cifs_mark_open_files_invalid(tcon);
rc = CIFSTCon(0, ses, tcon->treeName, tcon, nls_codepage);
mutex_unlock(&ses->session_mutex);
cFYI(1, "reconnect tcon rc = %d", rc);
@@ -388,15 +393,8 @@ vt2_err:
return -EINVAL;
}
-static inline void inc_rfc1001_len(void *pSMB, int count)
-{
- struct smb_hdr *hdr = (struct smb_hdr *)pSMB;
-
- be32_add_cpu(&hdr->smb_buf_length, count);
-}
-
int
-CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
+CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
{
NEGOTIATE_REQ *pSMB;
NEGOTIATE_RSP *pSMBr;
@@ -480,7 +478,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
rc = -EOPNOTSUPP;
goto neg_err_exit;
}
- server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode);
+ server->sec_mode = le16_to_cpu(rsp->SecurityMode);
server->maxReq = min_t(unsigned int,
le16_to_cpu(rsp->MaxMpxCount),
cifs_max_pending);
@@ -694,7 +692,7 @@ neg_err_exit:
}
int
-CIFSSMBTDis(const int xid, struct cifs_tcon *tcon)
+CIFSSMBTDis(const unsigned int xid, struct cifs_tcon *tcon)
{
struct smb_hdr *smb_buffer;
int rc = 0;
@@ -744,7 +742,7 @@ cifs_echo_callback(struct mid_q_entry *mid)
struct TCP_Server_Info *server = mid->callback_data;
DeleteMidQEntry(mid);
- add_credits(server, 1);
+ add_credits(server, 1, CIFS_ECHO_OP);
}
int
@@ -771,7 +769,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
rc = cifs_call_async(server, &iov, 1, NULL, cifs_echo_callback,
- server, true);
+ server, CIFS_ASYNC_OP | CIFS_ECHO_OP);
if (rc)
cFYI(1, "Echo request failed: %d", rc);
@@ -781,7 +779,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
}
int
-CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
+CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses)
{
LOGOFF_ANDX_REQ *pSMB;
int rc = 0;
@@ -828,8 +826,9 @@ session_already_dead:
}
int
-CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName,
- __u16 type, const struct nls_table *nls_codepage, int remap)
+CIFSPOSIXDelFile(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *fileName, __u16 type,
+ const struct nls_table *nls_codepage, int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
TRANSACTION2_SPI_RSP *pSMBr = NULL;
@@ -894,7 +893,7 @@ PsxDelete:
cFYI(1, "Posix delete returned %d", rc);
cifs_buf_release(pSMB);
- cifs_stats_inc(&tcon->num_deletes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_deletes);
if (rc == -EAGAIN)
goto PsxDelete;
@@ -903,8 +902,9 @@ PsxDelete:
}
int
-CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName,
- const struct nls_table *nls_codepage, int remap)
+CIFSSMBDelFile(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *fileName, const struct nls_table *nls_codepage,
+ int remap)
{
DELETE_FILE_REQ *pSMB = NULL;
DELETE_FILE_RSP *pSMBr = NULL;
@@ -936,7 +936,7 @@ DelFileRetry:
pSMB->ByteCount = cpu_to_le16(name_len + 1);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_deletes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_deletes);
if (rc)
cFYI(1, "Error in RMFile = %d", rc);
@@ -948,14 +948,15 @@ DelFileRetry:
}
int
-CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon, const char *dirName,
- const struct nls_table *nls_codepage, int remap)
+CIFSSMBRmDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+ struct cifs_sb_info *cifs_sb)
{
DELETE_DIRECTORY_REQ *pSMB = NULL;
DELETE_DIRECTORY_RSP *pSMBr = NULL;
int rc = 0;
int bytes_returned;
int name_len;
+ int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR;
cFYI(1, "In CIFSSMBRmDir");
RmDirRetry:
@@ -965,14 +966,15 @@ RmDirRetry:
return rc;
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, dirName,
- PATH_MAX, nls_codepage, remap);
+ name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name,
+ PATH_MAX, cifs_sb->local_nls,
+ remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve check for buffer overruns BB */
- name_len = strnlen(dirName, PATH_MAX);
+ name_len = strnlen(name, PATH_MAX);
name_len++; /* trailing null */
- strncpy(pSMB->DirName, dirName, name_len);
+ strncpy(pSMB->DirName, name, name_len);
}
pSMB->BufferFormat = 0x04;
@@ -980,7 +982,7 @@ RmDirRetry:
pSMB->ByteCount = cpu_to_le16(name_len + 1);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_rmdirs);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_rmdirs);
if (rc)
cFYI(1, "Error in RMDir = %d", rc);
@@ -991,14 +993,15 @@ RmDirRetry:
}
int
-CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon,
- const char *name, const struct nls_table *nls_codepage, int remap)
+CIFSSMBMkDir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+ struct cifs_sb_info *cifs_sb)
{
int rc = 0;
CREATE_DIRECTORY_REQ *pSMB = NULL;
CREATE_DIRECTORY_RSP *pSMBr = NULL;
int bytes_returned;
int name_len;
+ int remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR;
cFYI(1, "In CIFSSMBMkDir");
MkDirRetry:
@@ -1009,7 +1012,8 @@ MkDirRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name,
- PATH_MAX, nls_codepage, remap);
+ PATH_MAX, cifs_sb->local_nls,
+ remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve check for buffer overruns BB */
@@ -1023,7 +1027,7 @@ MkDirRetry:
pSMB->ByteCount = cpu_to_le16(name_len + 1);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_mkdirs);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_mkdirs);
if (rc)
cFYI(1, "Error in Mkdir = %d", rc);
@@ -1034,10 +1038,11 @@ MkDirRetry:
}
int
-CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon, __u32 posix_flags,
- __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData,
- __u32 *pOplock, const char *name,
- const struct nls_table *nls_codepage, int remap)
+CIFSPOSIXCreate(const unsigned int xid, struct cifs_tcon *tcon,
+ __u32 posix_flags, __u64 mode, __u16 *netfid,
+ FILE_UNIX_BASIC_INFO *pRetData, __u32 *pOplock,
+ const char *name, const struct nls_table *nls_codepage,
+ int remap)
{
TRANSACTION2_SPI_REQ *pSMB = NULL;
TRANSACTION2_SPI_RSP *pSMBr = NULL;
@@ -1145,9 +1150,9 @@ psx_create_err:
cifs_buf_release(pSMB);
if (posix_flags & SMB_O_DIRECTORY)
- cifs_stats_inc(&tcon->num_posixmkdirs);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_posixmkdirs);
else
- cifs_stats_inc(&tcon->num_posixopens);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_posixopens);
if (rc == -EAGAIN)
goto PsxCreat;
@@ -1200,7 +1205,7 @@ access_flags_to_smbopen_mode(const int access_flags)
}
int
-SMBLegacyOpen(const int xid, struct cifs_tcon *tcon,
+SMBLegacyOpen(const unsigned int xid, struct cifs_tcon *tcon,
const char *fileName, const int openDisposition,
const int access_flags, const int create_options, __u16 *netfid,
int *pOplock, FILE_ALL_INFO *pfile_info,
@@ -1268,7 +1273,7 @@ OldOpenRetry:
/* long_op set to 1 to allow for oplock break timeouts */
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *)pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_opens);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_opens);
if (rc) {
cFYI(1, "Error in Open = %d", rc);
} else {
@@ -1307,7 +1312,7 @@ OldOpenRetry:
}
int
-CIFSSMBOpen(const int xid, struct cifs_tcon *tcon,
+CIFSSMBOpen(const unsigned int xid, struct cifs_tcon *tcon,
const char *fileName, const int openDisposition,
const int access_flags, const int create_options, __u16 *netfid,
int *pOplock, FILE_ALL_INFO *pfile_info,
@@ -1381,7 +1386,7 @@ openRetry:
/* long_op set to 1 to allow for oplock break timeouts */
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *)pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_opens);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_opens);
if (rc) {
cFYI(1, "Error in Open = %d", rc);
} else {
@@ -1571,9 +1576,14 @@ cifs_readv_callback(struct mid_q_entry *mid)
/* result already set, check signature */
if (server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
- if (cifs_verify_signature(rdata->iov, rdata->nr_iov,
- server, mid->sequence_number + 1))
- cERROR(1, "Unexpected SMB signature");
+ int rc = 0;
+
+ rc = cifs_verify_signature(rdata->iov, rdata->nr_iov,
+ server,
+ mid->sequence_number + 1);
+ if (rc)
+ cERROR(1, "SMB signature verification returned "
+ "error = %d", rc);
}
/* FIXME: should this be counted toward the initiating task? */
task_io_account_read(rdata->bytes);
@@ -1589,7 +1599,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
queue_work(cifsiod_wq, &rdata->work);
DeleteMidQEntry(mid);
- add_credits(server, 1);
+ add_credits(server, 1, 0);
}
/* cifs_async_readv - send an async write, and set up mid to handle result */
@@ -1645,10 +1655,10 @@ cifs_async_readv(struct cifs_readdata *rdata)
kref_get(&rdata->refcount);
rc = cifs_call_async(tcon->ses->server, rdata->iov, 1,
cifs_readv_receive, cifs_readv_callback,
- rdata, false);
+ rdata, 0);
if (rc == 0)
- cifs_stats_inc(&tcon->num_reads);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_reads);
else
kref_put(&rdata->refcount, cifs_readdata_release);
@@ -1657,8 +1667,8 @@ cifs_async_readv(struct cifs_readdata *rdata)
}
int
-CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes,
- char **buf, int *pbuf_type)
+CIFSSMBRead(const unsigned int xid, struct cifs_io_parms *io_parms,
+ unsigned int *nbytes, char **buf, int *pbuf_type)
{
int rc = -EACCES;
READ_REQ *pSMB = NULL;
@@ -1718,7 +1728,7 @@ CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes,
iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4;
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
&resp_buf_type, CIFS_LOG_ERROR);
- cifs_stats_inc(&tcon->num_reads);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_reads);
pSMBr = (READ_RSP *)iov[0].iov_base;
if (rc) {
cERROR(1, "Send error in read = %d", rc);
@@ -1769,7 +1779,7 @@ CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes,
int
-CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms,
+CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, const char *buf,
const char __user *ubuf, const int long_op)
{
@@ -1870,7 +1880,7 @@ CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms,
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, long_op);
- cifs_stats_inc(&tcon->num_writes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
if (rc) {
cFYI(1, "Send error in write = %d", rc);
} else {
@@ -2036,7 +2046,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
queue_work(cifsiod_wq, &wdata->work);
DeleteMidQEntry(mid);
- add_credits(tcon->ses->server, 1);
+ add_credits(tcon->ses->server, 1, 0);
}
/* cifs_async_writev - send an async write, and set up mid to handle result */
@@ -2118,10 +2128,10 @@ cifs_async_writev(struct cifs_writedata *wdata)
kref_get(&wdata->refcount);
rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1,
- NULL, cifs_writev_callback, wdata, false);
+ NULL, cifs_writev_callback, wdata, 0);
if (rc == 0)
- cifs_stats_inc(&tcon->num_writes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
else
kref_put(&wdata->refcount, cifs_writedata_release);
@@ -2136,7 +2146,7 @@ async_writev_out:
}
int
-CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
+CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, int n_vec,
const int long_op)
{
@@ -2211,7 +2221,7 @@ CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
rc = SendReceive2(xid, tcon->ses, iov, n_vec + 1, &resp_buf_type,
long_op);
- cifs_stats_inc(&tcon->num_writes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
if (rc) {
cFYI(1, "Send error Write2 = %d", rc);
} else if (resp_buf_type == 0) {
@@ -2244,8 +2254,8 @@ CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
return rc;
}
-int cifs_lockv(const int xid, struct cifs_tcon *tcon, const __u16 netfid,
- const __u8 lock_type, const __u32 num_unlock,
+int cifs_lockv(const unsigned int xid, struct cifs_tcon *tcon,
+ const __u16 netfid, const __u8 lock_type, const __u32 num_unlock,
const __u32 num_lock, LOCKING_ANDX_RANGE *buf)
{
int rc = 0;
@@ -2277,7 +2287,7 @@ int cifs_lockv(const int xid, struct cifs_tcon *tcon, const __u16 netfid,
iov[1].iov_base = (char *)buf;
iov[1].iov_len = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
- cifs_stats_inc(&tcon->num_locks);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
if (rc)
cFYI(1, "Send error in cifs_lockv = %d", rc);
@@ -2286,7 +2296,7 @@ int cifs_lockv(const int xid, struct cifs_tcon *tcon, const __u16 netfid,
}
int
-CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
+CIFSSMBLock(const unsigned int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const __u32 netpid, const __u64 len,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
@@ -2296,7 +2306,7 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
LOCK_REQ *pSMB = NULL;
/* LOCK_RSP *pSMBr = NULL; */ /* No response data other than rc to parse */
int bytes_returned;
- int timeout = 0;
+ int flags = 0;
__u16 count;
cFYI(1, "CIFSSMBLock timeout %d numLock %d", (int)waitFlag, numLock);
@@ -2306,10 +2316,11 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
return rc;
if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
- timeout = CIFS_ASYNC_OP; /* no response expected */
+ /* no response expected */
+ flags = CIFS_ASYNC_OP | CIFS_OBREAK_OP;
pSMB->Timeout = 0;
} else if (waitFlag) {
- timeout = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
+ flags = CIFS_BLOCKING_OP; /* blocking operation, no timeout */
pSMB->Timeout = cpu_to_le32(-1);/* blocking - do not time out */
} else {
pSMB->Timeout = 0;
@@ -2342,10 +2353,10 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
(struct smb_hdr *) pSMB, &bytes_returned);
cifs_small_buf_release(pSMB);
} else {
- rc = SendReceiveNoRsp(xid, tcon->ses, (char *)pSMB, timeout);
+ rc = SendReceiveNoRsp(xid, tcon->ses, (char *)pSMB, flags);
/* SMB buffer freed by function above */
}
- cifs_stats_inc(&tcon->num_locks);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
if (rc)
cFYI(1, "Send error in Lock = %d", rc);
@@ -2355,10 +2366,11 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
}
int
-CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
- const __u16 smb_file_id, const __u32 netpid, const int get_flag,
- const __u64 len, struct file_lock *pLockData,
- const __u16 lock_type, const bool waitFlag)
+CIFSSMBPosixLock(const unsigned int xid, struct cifs_tcon *tcon,
+ const __u16 smb_file_id, const __u32 netpid,
+ const loff_t start_offset, const __u64 len,
+ struct file_lock *pLockData, const __u16 lock_type,
+ const bool waitFlag)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
@@ -2372,9 +2384,6 @@ CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
cFYI(1, "Posix Lock");
- if (pLockData == NULL)
- return -EINVAL;
-
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
@@ -2395,7 +2404,7 @@ CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
- if (get_flag)
+ if (pLockData)
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
else
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
@@ -2417,7 +2426,7 @@ CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
pSMB->Timeout = 0;
parm_data->pid = cpu_to_le32(netpid);
- parm_data->start = cpu_to_le64(pLockData->fl_start);
+ parm_data->start = cpu_to_le64(start_offset);
parm_data->length = cpu_to_le64(len); /* normalize negative numbers */
pSMB->DataOffset = cpu_to_le16(offset);
@@ -2441,7 +2450,7 @@ CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
if (rc) {
cFYI(1, "Send error in Posix Lock = %d", rc);
- } else if (get_flag) {
+ } else if (pLockData) {
/* lock structure can be returned on get */
__u16 data_offset;
__u16 data_count;
@@ -2493,7 +2502,7 @@ plk_err_exit:
int
-CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id)
+CIFSSMBClose(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id)
{
int rc = 0;
CLOSE_REQ *pSMB = NULL;
@@ -2510,7 +2519,7 @@ CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id)
pSMB->LastWriteTime = 0xFFFFFFFF;
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
- cifs_stats_inc(&tcon->num_closes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_closes);
if (rc) {
if (rc != -EINTR) {
/* EINTR is expected when user ctl-c to kill app */
@@ -2526,7 +2535,7 @@ CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id)
}
int
-CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id)
+CIFSSMBFlush(const unsigned int xid, struct cifs_tcon *tcon, int smb_file_id)
{
int rc = 0;
FLUSH_REQ *pSMB = NULL;
@@ -2539,7 +2548,7 @@ CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id)
pSMB->FileID = (__u16) smb_file_id;
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (char *) pSMB, 0);
- cifs_stats_inc(&tcon->num_flushes);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_flushes);
if (rc)
cERROR(1, "Send error in Flush = %d", rc);
@@ -2547,7 +2556,7 @@ CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id)
}
int
-CIFSSMBRename(const int xid, struct cifs_tcon *tcon,
+CIFSSMBRename(const unsigned int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
@@ -2602,7 +2611,7 @@ renameRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_renames);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_renames);
if (rc)
cFYI(1, "Send error in rename = %d", rc);
@@ -2614,7 +2623,7 @@ renameRetry:
return rc;
}
-int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
+int CIFSSMBRenameOpenFile(const unsigned int xid, struct cifs_tcon *pTcon,
int netfid, const char *target_name,
const struct nls_table *nls_codepage, int remap)
{
@@ -2683,7 +2692,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&pTcon->num_t2renames);
+ cifs_stats_inc(&pTcon->stats.cifs_stats.num_t2renames);
if (rc)
cFYI(1, "Send error in Rename (by file handle) = %d", rc);
@@ -2696,9 +2705,9 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
}
int
-CIFSSMBCopy(const int xid, struct cifs_tcon *tcon, const char *fromName,
- const __u16 target_tid, const char *toName, const int flags,
- const struct nls_table *nls_codepage, int remap)
+CIFSSMBCopy(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *fromName, const __u16 target_tid, const char *toName,
+ const int flags, const struct nls_table *nls_codepage, int remap)
{
int rc = 0;
COPY_REQ *pSMB = NULL;
@@ -2764,7 +2773,7 @@ copyRetry:
}
int
-CIFSUnixCreateSymLink(const int xid, struct cifs_tcon *tcon,
+CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage)
{
@@ -2840,7 +2849,7 @@ createSymLinkRetry:
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_symlinks);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_symlinks);
if (rc)
cFYI(1, "Send error in SetPathInfo create symlink = %d", rc);
@@ -2853,7 +2862,7 @@ createSymLinkRetry:
}
int
-CIFSUnixCreateHardLink(const int xid, struct cifs_tcon *tcon,
+CIFSUnixCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
@@ -2926,7 +2935,7 @@ createHardLinkRetry:
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_hardlinks);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_hardlinks);
if (rc)
cFYI(1, "Send error in SetPathInfo (hard link) = %d", rc);
@@ -2938,7 +2947,7 @@ createHardLinkRetry:
}
int
-CIFSCreateHardLink(const int xid, struct cifs_tcon *tcon,
+CIFSCreateHardLink(const unsigned int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap)
{
@@ -2998,7 +3007,7 @@ winCreateHardLinkRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_hardlinks);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_hardlinks);
if (rc)
cFYI(1, "Send error in hard link (NT rename) = %d", rc);
@@ -3010,7 +3019,7 @@ winCreateHardLinkRetry:
}
int
-CIFSSMBUnixQuerySymLink(const int xid, struct cifs_tcon *tcon,
+CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, char **symlinkinfo,
const struct nls_table *nls_codepage)
{
@@ -3115,7 +3124,7 @@ querySymLinkRetry:
* it is not compiled in by default until callers fixed up and more tested.
*/
int
-CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon,
+CIFSSMBQueryReparseLinkInfo(const unsigned int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
char *symlinkinfo, const int buflen, __u16 fid,
const struct nls_table *nls_codepage)
@@ -3352,7 +3361,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
}
int
-CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
+CIFSSMBGetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
char *acl_inf, const int buflen, const int acl_type,
const struct nls_table *nls_codepage, int remap)
@@ -3416,7 +3425,7 @@ queryAclRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_acl_get);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get);
if (rc) {
cFYI(1, "Send error in Query POSIX ACL = %d", rc);
} else {
@@ -3441,7 +3450,7 @@ queryAclRetry:
}
int
-CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon,
+CIFSSMBSetPosixACL(const unsigned int xid, struct cifs_tcon *tcon,
const unsigned char *fileName,
const char *local_acl, const int buflen,
const int acl_type,
@@ -3521,7 +3530,7 @@ setACLerrorExit:
/* BB fix tabs in this function FIXME BB */
int
-CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon,
+CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask)
{
int rc = 0;
@@ -3696,7 +3705,7 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata,
/* Get Security Descriptor (by handle) from remote server for a file or dir */
int
-CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
+CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
struct cifs_ntsd **acl_inf, __u32 *pbuflen)
{
int rc = 0;
@@ -3727,7 +3736,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
0);
- cifs_stats_inc(&tcon->num_acl_get);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_acl_get);
if (rc) {
cFYI(1, "Send error in QuerySecDesc = %d", rc);
} else { /* decode response */
@@ -3788,7 +3797,7 @@ qsec_out:
}
int
-CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
+CIFSSMBSetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
struct cifs_ntsd *pntsd, __u32 acllen, int aclflag)
{
__u16 byte_count, param_count, data_count, param_offset, data_offset;
@@ -3852,10 +3861,10 @@ setCifsAclRetry:
/* Legacy Query Path Information call for lookup to old servers such
as Win9x/WinME */
-int SMBQueryInformation(const int xid, struct cifs_tcon *tcon,
- const unsigned char *searchName,
- FILE_ALL_INFO *pFinfo,
- const struct nls_table *nls_codepage, int remap)
+int
+SMBQueryInformation(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *search_name, FILE_ALL_INFO *data,
+ const struct nls_table *nls_codepage, int remap)
{
QUERY_INFORMATION_REQ *pSMB;
QUERY_INFORMATION_RSP *pSMBr;
@@ -3863,7 +3872,7 @@ int SMBQueryInformation(const int xid, struct cifs_tcon *tcon,
int bytes_returned;
int name_len;
- cFYI(1, "In SMBQPath path %s", searchName);
+ cFYI(1, "In SMBQPath path %s", search_name);
QInfRetry:
rc = smb_init(SMB_COM_QUERY_INFORMATION, 0, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -3873,14 +3882,14 @@ QInfRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUTF16((__le16 *) pSMB->FileName,
- searchName, PATH_MAX, nls_codepage,
+ search_name, PATH_MAX, nls_codepage,
remap);
name_len++; /* trailing null */
name_len *= 2;
} else {
- name_len = strnlen(searchName, PATH_MAX);
+ name_len = strnlen(search_name, PATH_MAX);
name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ strncpy(pSMB->FileName, search_name, name_len);
}
pSMB->BufferFormat = 0x04;
name_len++; /* account for buffer type byte */
@@ -3891,23 +3900,23 @@ QInfRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, "Send error in QueryInfo = %d", rc);
- } else if (pFinfo) {
+ } else if (data) {
struct timespec ts;
__u32 time = le32_to_cpu(pSMBr->last_write_time);
/* decode response */
/* BB FIXME - add time zone adjustment BB */
- memset(pFinfo, 0, sizeof(FILE_ALL_INFO));
+ memset(data, 0, sizeof(FILE_ALL_INFO));
ts.tv_nsec = 0;
ts.tv_sec = time;
/* decode time fields */
- pFinfo->ChangeTime = cpu_to_le64(cifs_UnixTimeToNT(ts));
- pFinfo->LastWriteTime = pFinfo->ChangeTime;
- pFinfo->LastAccessTime = 0;
- pFinfo->AllocationSize =
+ data->ChangeTime = cpu_to_le64(cifs_UnixTimeToNT(ts));
+ data->LastWriteTime = data->ChangeTime;
+ data->LastAccessTime = 0;
+ data->AllocationSize =
cpu_to_le64(le32_to_cpu(pSMBr->size));
- pFinfo->EndOfFile = pFinfo->AllocationSize;
- pFinfo->Attributes =
+ data->EndOfFile = data->AllocationSize;
+ data->Attributes =
cpu_to_le32(le16_to_cpu(pSMBr->attr));
} else
rc = -EIO; /* bad buffer passed in */
@@ -3921,7 +3930,7 @@ QInfRetry:
}
int
-CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon,
+CIFSSMBQFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_ALL_INFO *pFindData)
{
struct smb_t2_qfi_req *pSMB = NULL;
@@ -3988,13 +3997,12 @@ QFileInfoRetry:
}
int
-CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon,
- const unsigned char *searchName,
- FILE_ALL_INFO *pFindData,
+CIFSSMBQPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *search_name, FILE_ALL_INFO *data,
int legacy /* old style infolevel */,
const struct nls_table *nls_codepage, int remap)
{
-/* level 263 SMB_QUERY_FILE_ALL_INFO */
+ /* level 263 SMB_QUERY_FILE_ALL_INFO */
TRANSACTION2_QPI_REQ *pSMB = NULL;
TRANSACTION2_QPI_RSP *pSMBr = NULL;
int rc = 0;
@@ -4002,7 +4010,7 @@ CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon,
int name_len;
__u16 params, byte_count;
-/* cFYI(1, "In QPathInfo path %s", searchName); */
+ /* cFYI(1, "In QPathInfo path %s", search_name); */
QPathInfoRetry:
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
@@ -4011,14 +4019,14 @@ QPathInfoRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+ cifsConvertToUTF16((__le16 *) pSMB->FileName, search_name,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
+ name_len = strnlen(search_name, PATH_MAX);
name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ strncpy(pSMB->FileName, search_name, name_len);
}
params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4063,20 +4071,21 @@ QPathInfoRetry:
else if (legacy && get_bcc(&pSMBr->hdr) < 24)
rc = -EIO; /* 24 or 26 expected but we do not read
last field */
- else if (pFindData) {
+ else if (data) {
int size;
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- /* On legacy responses we do not read the last field,
- EAsize, fortunately since it varies by subdialect and
- also note it differs on Set vs. Get, ie two bytes or 4
- bytes depending but we don't care here */
+ /*
+ * On legacy responses we do not read the last field,
+ * EAsize, fortunately since it varies by subdialect and
+ * also note it differs on Set vs Get, ie two bytes or 4
+ * bytes depending but we don't care here.
+ */
if (legacy)
size = sizeof(FILE_INFO_STANDARD);
else
size = sizeof(FILE_ALL_INFO);
- memcpy((char *) pFindData,
- (char *) &pSMBr->hdr.Protocol +
+ memcpy((char *) data, (char *) &pSMBr->hdr.Protocol +
data_offset, size);
} else
rc = -ENOMEM;
@@ -4089,7 +4098,7 @@ QPathInfoRetry:
}
int
-CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon,
+CIFSSMBUnixQFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
u16 netfid, FILE_UNIX_BASIC_INFO *pFindData)
{
struct smb_t2_qfi_req *pSMB = NULL;
@@ -4137,7 +4146,7 @@ UnixQFileInfoRetry:
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) {
- cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n"
+ cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response. "
"Unix Extensions can be disabled on mount "
"by specifying the nosfu mount option.");
rc = -EIO; /* bad smb */
@@ -4158,7 +4167,7 @@ UnixQFileInfoRetry:
}
int
-CIFSSMBUnixQPathInfo(const int xid, struct cifs_tcon *tcon,
+CIFSSMBUnixQPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
const unsigned char *searchName,
FILE_UNIX_BASIC_INFO *pFindData,
const struct nls_table *nls_codepage, int remap)
@@ -4223,7 +4232,7 @@ UnixQPathInfoRetry:
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
if (rc || get_bcc(&pSMBr->hdr) < sizeof(FILE_UNIX_BASIC_INFO)) {
- cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response.\n"
+ cERROR(1, "Malformed FILE_UNIX_BASIC_INFO response. "
"Unix Extensions can be disabled on mount "
"by specifying the nosfu mount option.");
rc = -EIO; /* bad smb */
@@ -4244,7 +4253,7 @@ UnixQPathInfoRetry:
/* xid, tcon, searchName and codepage are input parms, rest are returned */
int
-CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
+CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon,
const char *searchName,
const struct nls_table *nls_codepage,
__u16 *pnetfid, __u16 search_flags,
@@ -4329,7 +4338,7 @@ findFirstRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_ffirst);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_ffirst);
if (rc) {/* BB add logic to retry regular search if Unix search
rejected unexpectedly by server */
@@ -4389,8 +4398,9 @@ findFirstRetry:
return rc;
}
-int CIFSFindNext(const int xid, struct cifs_tcon *tcon, __u16 searchHandle,
- __u16 search_flags, struct cifs_search_info *psrch_inf)
+int CIFSFindNext(const unsigned int xid, struct cifs_tcon *tcon,
+ __u16 searchHandle, __u16 search_flags,
+ struct cifs_search_info *psrch_inf)
{
TRANSACTION2_FNEXT_REQ *pSMB = NULL;
TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
@@ -4455,7 +4465,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon, __u16 searchHandle,
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- cifs_stats_inc(&tcon->num_fnext);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_fnext);
if (rc) {
if (rc == -EBADF) {
psrch_inf->endOfSearch = true;
@@ -4524,7 +4534,7 @@ FNext2_err_exit:
}
int
-CIFSFindClose(const int xid, struct cifs_tcon *tcon,
+CIFSFindClose(const unsigned int xid, struct cifs_tcon *tcon,
const __u16 searchHandle)
{
int rc = 0;
@@ -4546,7 +4556,7 @@ CIFSFindClose(const int xid, struct cifs_tcon *tcon,
if (rc)
cERROR(1, "Send error in FindClose = %d", rc);
- cifs_stats_inc(&tcon->num_fclose);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_fclose);
/* Since session is dead, search handle closed on server already */
if (rc == -EAGAIN)
@@ -4556,9 +4566,8 @@ CIFSFindClose(const int xid, struct cifs_tcon *tcon,
}
int
-CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
- const unsigned char *searchName,
- __u64 *inode_number,
+CIFSGetSrvInodeNumber(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *search_name, __u64 *inode_number,
const struct nls_table *nls_codepage, int remap)
{
int rc = 0;
@@ -4567,7 +4576,7 @@ CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
int name_len, bytes_returned;
__u16 params, byte_count;
- cFYI(1, "In GetSrvInodeNum for %s", searchName);
+ cFYI(1, "In GetSrvInodeNum for %s", search_name);
if (tcon == NULL)
return -ENODEV;
@@ -4580,14 +4589,14 @@ GetInodeNumberRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
cifsConvertToUTF16((__le16 *) pSMB->FileName,
- searchName, PATH_MAX, nls_codepage,
+ search_name, PATH_MAX, nls_codepage,
remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
+ name_len = strnlen(search_name, PATH_MAX);
name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ strncpy(pSMB->FileName, search_name, name_len);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -4675,7 +4684,7 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
if (*num_of_nodes < 1) {
cERROR(1, "num_referrals: must be at least > 0,"
- "but we get num_referrals = %d\n", *num_of_nodes);
+ "but we get num_referrals = %d", *num_of_nodes);
rc = -EINVAL;
goto parse_DFS_referrals_exit;
}
@@ -4692,14 +4701,14 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
data_end = (char *)(&(pSMBr->PathConsumed)) +
le16_to_cpu(pSMBr->t2.DataCount);
- cFYI(1, "num_referrals: %d dfs flags: 0x%x ...\n",
+ cFYI(1, "num_referrals: %d dfs flags: 0x%x ...",
*num_of_nodes,
le32_to_cpu(pSMBr->DFSFlags));
*target_nodes = kzalloc(sizeof(struct dfs_info3_param) *
*num_of_nodes, GFP_KERNEL);
if (*target_nodes == NULL) {
- cERROR(1, "Failed to allocate buffer for target_nodes\n");
+ cERROR(1, "Failed to allocate buffer for target_nodes");
rc = -ENOMEM;
goto parse_DFS_referrals_exit;
}
@@ -4763,9 +4772,8 @@ parse_DFS_referrals_exit:
}
int
-CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
- const unsigned char *searchName,
- struct dfs_info3_param **target_nodes,
+CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses,
+ const char *search_name, struct dfs_info3_param **target_nodes,
unsigned int *num_of_nodes,
const struct nls_table *nls_codepage, int remap)
{
@@ -4779,7 +4787,7 @@ CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
*num_of_nodes = 0;
*target_nodes = NULL;
- cFYI(1, "In GetDFSRefer the path %s", searchName);
+ cFYI(1, "In GetDFSRefer the path %s", search_name);
if (ses == NULL)
return -ENODEV;
getDFSRetry:
@@ -4802,14 +4810,14 @@ getDFSRetry:
pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
name_len =
cifsConvertToUTF16((__le16 *) pSMB->RequestFileName,
- searchName, PATH_MAX, nls_codepage,
+ search_name, PATH_MAX, nls_codepage,
remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
+ name_len = strnlen(search_name, PATH_MAX);
name_len++; /* trailing null */
- strncpy(pSMB->RequestFileName, searchName, name_len);
+ strncpy(pSMB->RequestFileName, search_name, name_len);
}
if (ses->server) {
@@ -4865,7 +4873,7 @@ getDFSRetry:
/* parse returned result into more usable form */
rc = parse_DFS_referrals(pSMBr, num_of_nodes,
target_nodes, nls_codepage, remap,
- searchName);
+ search_name);
GetDFSRefExit:
cifs_buf_release(pSMB);
@@ -4878,7 +4886,8 @@ GetDFSRefExit:
/* Query File System Info such as free space to old servers such as Win 9x */
int
-SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData)
+SMBOldQFSInfo(const unsigned int xid, struct cifs_tcon *tcon,
+ struct kstatfs *FSData)
{
/* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4957,7 +4966,8 @@ oldQFSInfoRetry:
}
int
-CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData)
+CIFSSMBQFSInfo(const unsigned int xid, struct cifs_tcon *tcon,
+ struct kstatfs *FSData)
{
/* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -5036,7 +5046,7 @@ QFSInfoRetry:
}
int
-CIFSSMBQFSAttributeInfo(const int xid, struct cifs_tcon *tcon)
+CIFSSMBQFSAttributeInfo(const unsigned int xid, struct cifs_tcon *tcon)
{
/* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -5106,7 +5116,7 @@ QFSAttributeRetry:
}
int
-CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon)
+CIFSSMBQFSDeviceInfo(const unsigned int xid, struct cifs_tcon *tcon)
{
/* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -5177,7 +5187,7 @@ QFSDeviceRetry:
}
int
-CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon)
+CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon)
{
/* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */
TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -5247,7 +5257,7 @@ QFSUnixRetry:
}
int
-CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon, __u64 cap)
+CIFSSMBSetFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon, __u64 cap)
{
/* level 0x200 SMB_SET_CIFS_UNIX_INFO */
TRANSACTION2_SETFSI_REQ *pSMB = NULL;
@@ -5321,7 +5331,7 @@ SETFSUnixRetry:
int
-CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon,
+CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData)
{
/* level 0x201 SMB_QUERY_CIFS_POSIX_INFO */
@@ -5414,8 +5424,8 @@ QFSPosixRetry:
in Samba which this routine can run into */
int
-CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon, const char *fileName,
- __u64 size, bool SetAllocation,
+CIFSSMBSetEOF(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *fileName, __u64 size, bool SetAllocation,
const struct nls_table *nls_codepage, int remap)
{
struct smb_com_transaction2_spi_req *pSMB = NULL;
@@ -5503,7 +5513,7 @@ SetEOFRetry:
}
int
-CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size,
+CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon, __u64 size,
__u16 fid, __u32 pid_of_opener, bool SetAllocation)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
@@ -5585,7 +5595,7 @@ CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size,
time and resort to the original setpathinfo level which takes the ancient
DOS time format with 2 second granularity */
int
-CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
+CIFSSMBSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
@@ -5648,7 +5658,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
}
int
-CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
+CIFSSMBSetFileDisposition(const unsigned int xid, struct cifs_tcon *tcon,
bool delete_file, __u16 fid, __u32 pid_of_opener)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
@@ -5704,7 +5714,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
}
int
-CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon,
+CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
const char *fileName, const FILE_BASIC_INFO *data,
const struct nls_table *nls_codepage, int remap)
{
@@ -5788,7 +5798,7 @@ SetTimesRetry:
handling it anyway and NT4 was what we thought it would be needed for
Do not delete it until we prove whether needed for Win9x though */
int
-CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon, char *fileName,
+CIFSSMBSetAttrLegacy(unsigned int xid, struct cifs_tcon *tcon, char *fileName,
__u16 dos_attrs, const struct nls_table *nls_codepage)
{
SETATTR_REQ *pSMB = NULL;
@@ -5876,7 +5886,7 @@ cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
}
int
-CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
+CIFSSMBUnixSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon,
const struct cifs_unix_set_info_args *args,
u16 fid, u32 pid_of_opener)
{
@@ -5940,7 +5950,8 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
}
int
-CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *tcon, char *fileName,
+CIFSSMBUnixSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *file_name,
const struct cifs_unix_set_info_args *args,
const struct nls_table *nls_codepage, int remap)
{
@@ -5961,14 +5972,14 @@ setPermsRetry:
if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
name_len =
- cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+ cifsConvertToUTF16((__le16 *) pSMB->FileName, file_name,
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
+ name_len = strnlen(file_name, PATH_MAX);
name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ strncpy(pSMB->FileName, file_name, name_len);
}
params = 6 + name_len;
@@ -6027,7 +6038,7 @@ setPermsRetry:
* the data isn't copied to it, but the length is returned.
*/
ssize_t
-CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
+CIFSSMBQAllEAs(const unsigned int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, const unsigned char *ea_name,
char *EAData, size_t buf_size,
const struct nls_table *nls_codepage, int remap)
@@ -6210,8 +6221,8 @@ QAllEAsOut:
}
int
-CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon, const char *fileName,
- const char *ea_name, const void *ea_value,
+CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *fileName, const char *ea_name, const void *ea_value,
const __u16 ea_value_len, const struct nls_table *nls_codepage,
int remap)
{
@@ -6337,7 +6348,7 @@ SetEARetry:
* incompatible for network fs clients, we could instead simply
* expose this config flag by adding a future cifs (and smb2) notify ioctl.
*/
-int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
+int CIFSSMBNotify(const unsigned int xid, struct cifs_tcon *tcon,
const int notify_subdirs, const __u16 netfid,
__u32 filter, struct file *pfile, int multishot,
const struct nls_table *nls_codepage)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 94b7788c3189..6df6fa14cba8 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -56,9 +56,6 @@
#define CIFS_PORT 445
#define RFC1001_PORT 139
-/* SMB echo "timeout" -- FIXME: tunable? */
-#define SMB_ECHO_INTERVAL (60 * HZ)
-
extern mempool_t *cifs_req_poolp;
/* FIXME: should these be tunable? */
@@ -238,8 +235,8 @@ static const match_table_t cifs_mount_option_tokens = {
enum {
Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p,
Opt_sec_ntlmsspi, Opt_sec_ntlmssp,
- Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2i,
- Opt_sec_nontlm, Opt_sec_lanman,
+ Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2,
+ Opt_sec_ntlmv2i, Opt_sec_lanman,
Opt_sec_none,
Opt_sec_err
@@ -253,8 +250,9 @@ static const match_table_t cifs_secflavor_tokens = {
{ Opt_sec_ntlmssp, "ntlmssp" },
{ Opt_ntlm, "ntlm" },
{ Opt_sec_ntlmi, "ntlmi" },
+ { Opt_sec_ntlmv2, "nontlm" },
+ { Opt_sec_ntlmv2, "ntlmv2" },
{ Opt_sec_ntlmv2i, "ntlmv2i" },
- { Opt_sec_nontlm, "nontlm" },
{ Opt_sec_lanman, "lanman" },
{ Opt_sec_none, "none" },
@@ -296,7 +294,7 @@ static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
* reconnect tcp session
* wake up waiters on reconnection? - (not needed currently)
*/
-static int
+int
cifs_reconnect(struct TCP_Server_Info *server)
{
int rc = 0;
@@ -316,6 +314,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
server->tcpStatus = CifsNeedReconnect;
spin_unlock(&GlobalMid_Lock);
server->maxBuf = 0;
+#ifdef CONFIG_CIFS_SMB2
+ server->max_read = 0;
+#endif
cFYI(1, "Reconnecting tcp session");
@@ -394,143 +395,6 @@ cifs_reconnect(struct TCP_Server_Info *server)
return rc;
}
-/*
- return codes:
- 0 not a transact2, or all data present
- >0 transact2 with that much data missing
- -EINVAL = invalid transact2
-
- */
-static int check2ndT2(char *buf)
-{
- struct smb_hdr *pSMB = (struct smb_hdr *)buf;
- struct smb_t2_rsp *pSMBt;
- int remaining;
- __u16 total_data_size, data_in_this_rsp;
-
- if (pSMB->Command != SMB_COM_TRANSACTION2)
- return 0;
-
- /* check for plausible wct, bcc and t2 data and parm sizes */
- /* check for parm and data offset going beyond end of smb */
- if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
- cFYI(1, "invalid transact2 word count");
- return -EINVAL;
- }
-
- pSMBt = (struct smb_t2_rsp *)pSMB;
-
- total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
- data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
-
- if (total_data_size == data_in_this_rsp)
- return 0;
- else if (total_data_size < data_in_this_rsp) {
- cFYI(1, "total data %d smaller than data in frame %d",
- total_data_size, data_in_this_rsp);
- return -EINVAL;
- }
-
- remaining = total_data_size - data_in_this_rsp;
-
- cFYI(1, "missing %d bytes from transact2, check next response",
- remaining);
- if (total_data_size > CIFSMaxBufSize) {
- cERROR(1, "TotalDataSize %d is over maximum buffer %d",
- total_data_size, CIFSMaxBufSize);
- return -EINVAL;
- }
- return remaining;
-}
-
-static int coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
-{
- struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)second_buf;
- struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)target_hdr;
- char *data_area_of_tgt;
- char *data_area_of_src;
- int remaining;
- unsigned int byte_count, total_in_tgt;
- __u16 tgt_total_cnt, src_total_cnt, total_in_src;
-
- src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount);
- tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
-
- if (tgt_total_cnt != src_total_cnt)
- cFYI(1, "total data count of primary and secondary t2 differ "
- "source=%hu target=%hu", src_total_cnt, tgt_total_cnt);
-
- total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
-
- remaining = tgt_total_cnt - total_in_tgt;
-
- if (remaining < 0) {
- cFYI(1, "Server sent too much data. tgt_total_cnt=%hu "
- "total_in_tgt=%hu", tgt_total_cnt, total_in_tgt);
- return -EPROTO;
- }
-
- if (remaining == 0) {
- /* nothing to do, ignore */
- cFYI(1, "no more data remains");
- return 0;
- }
-
- total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount);
- if (remaining < total_in_src)
- cFYI(1, "transact2 2nd response contains too much data");
-
- /* find end of first SMB data area */
- data_area_of_tgt = (char *)&pSMBt->hdr.Protocol +
- get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
-
- /* validate target area */
- data_area_of_src = (char *)&pSMBs->hdr.Protocol +
- get_unaligned_le16(&pSMBs->t2_rsp.DataOffset);
-
- data_area_of_tgt += total_in_tgt;
-
- total_in_tgt += total_in_src;
- /* is the result too big for the field? */
- if (total_in_tgt > USHRT_MAX) {
- cFYI(1, "coalesced DataCount too large (%u)", total_in_tgt);
- return -EPROTO;
- }
- put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
-
- /* fix up the BCC */
- byte_count = get_bcc(target_hdr);
- byte_count += total_in_src;
- /* is the result too big for the field? */
- if (byte_count > USHRT_MAX) {
- cFYI(1, "coalesced BCC too large (%u)", byte_count);
- return -EPROTO;
- }
- put_bcc(byte_count, target_hdr);
-
- byte_count = be32_to_cpu(target_hdr->smb_buf_length);
- byte_count += total_in_src;
- /* don't allow buffer to overflow */
- if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count);
- return -ENOBUFS;
- }
- target_hdr->smb_buf_length = cpu_to_be32(byte_count);
-
- /* copy second buffer into end of first buffer */
- memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
-
- if (remaining != total_in_src) {
- /* more responses to go */
- cFYI(1, "waiting for more secondary responses");
- return 1;
- }
-
- /* we are done */
- cFYI(1, "found the last secondary response");
- return 0;
-}
-
static void
cifs_echo_request(struct work_struct *work)
{
@@ -539,15 +403,17 @@ cifs_echo_request(struct work_struct *work)
struct TCP_Server_Info, echo.work);
/*
- * We cannot send an echo until the NEGOTIATE_PROTOCOL request is
- * done, which is indicated by maxBuf != 0. Also, no need to ping if
- * we got a response recently
+ * We cannot send an echo if it is disabled or until the
+ * NEGOTIATE_PROTOCOL request is done, which is indicated by
+ * server->ops->need_neg() == true. Also, no need to ping if
+ * we got a response recently.
*/
- if (server->maxBuf == 0 ||
+ if (!server->ops->need_neg || server->ops->need_neg(server) ||
+ (server->ops->can_echo && !server->ops->can_echo(server)) ||
time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
goto requeue_echo;
- rc = CIFSSMBEcho(server);
+ rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
if (rc)
cFYI(1, "Unable to send echo request to server: %s",
server->hostname);
@@ -803,29 +669,9 @@ static void
handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
char *buf, int malformed)
{
- if (malformed == 0 && check2ndT2(buf) > 0) {
- mid->multiRsp = true;
- if (mid->resp_buf) {
- /* merge response - fix up 1st*/
- malformed = coalesce_t2(buf, mid->resp_buf);
- if (malformed > 0)
- return;
-
- /* All parts received or packet is malformed. */
- mid->multiEnd = true;
- return dequeue_mid(mid, malformed);
- }
- if (!server->large_buf) {
- /*FIXME: switch to already allocated largebuf?*/
- cERROR(1, "1st trans2 resp needs bigbuf");
- } else {
- /* Have first buffer */
- mid->resp_buf = buf;
- mid->large_buf = true;
- server->bigbuf = NULL;
- }
+ if (server->ops->check_trans2 &&
+ server->ops->check_trans2(mid, server, buf, malformed))
return;
- }
mid->resp_buf = buf;
mid->large_buf = server->large_buf;
/* Was previous buf put in mpx struct for multi-rsp? */
@@ -1167,7 +1013,7 @@ static int cifs_parse_security_flavors(char *value,
case Opt_sec_ntlmi:
vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN;
break;
- case Opt_sec_nontlm:
+ case Opt_sec_ntlmv2:
vol->secFlg |= CIFSSEC_MAY_NTLMV2;
break;
case Opt_sec_ntlmv2i:
@@ -2409,10 +2255,10 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
static void
cifs_put_smb_ses(struct cifs_ses *ses)
{
- int xid;
+ unsigned int xid;
struct TCP_Server_Info *server = ses->server;
- cFYI(1, "%s: ses_count=%d\n", __func__, ses->ses_count);
+ cFYI(1, "%s: ses_count=%d", __func__, ses->ses_count);
spin_lock(&cifs_tcp_ses_lock);
if (--ses->ses_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
@@ -2422,10 +2268,10 @@ cifs_put_smb_ses(struct cifs_ses *ses)
list_del_init(&ses->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
- if (ses->status == CifsGood) {
- xid = GetXid();
- CIFSSMBLogoff(xid, ses);
- _FreeXid(xid);
+ if (ses->status == CifsGood && server->ops->logoff) {
+ xid = get_xid();
+ server->ops->logoff(xid, ses);
+ _free_xid(xid);
}
sesInfoFree(ses);
cifs_put_tcp_session(server);
@@ -2562,12 +2408,13 @@ static bool warned_on_ntlm; /* globals init to false automatically */
static struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
{
- int rc = -ENOMEM, xid;
+ int rc = -ENOMEM;
+ unsigned int xid;
struct cifs_ses *ses;
struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
- xid = GetXid();
+ xid = get_xid();
ses = cifs_find_smb_ses(server, volume_info);
if (ses) {
@@ -2579,7 +2426,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
mutex_unlock(&ses->session_mutex);
/* problem -- put our ses reference */
cifs_put_smb_ses(ses);
- FreeXid(xid);
+ free_xid(xid);
return ERR_PTR(rc);
}
if (ses->need_reconnect) {
@@ -2590,7 +2437,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
mutex_unlock(&ses->session_mutex);
/* problem -- put our reference */
cifs_put_smb_ses(ses);
- FreeXid(xid);
+ free_xid(xid);
return ERR_PTR(rc);
}
}
@@ -2598,7 +2445,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
/* existing SMB ses has a server reference already */
cifs_put_tcp_session(server);
- FreeXid(xid);
+ free_xid(xid);
return ses;
}
@@ -2657,12 +2504,12 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
- FreeXid(xid);
+ free_xid(xid);
return ses;
get_ses_fail:
sesInfoFree(ses);
- FreeXid(xid);
+ free_xid(xid);
return ERR_PTR(rc);
}
@@ -2697,10 +2544,10 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc)
static void
cifs_put_tcon(struct cifs_tcon *tcon)
{
- int xid;
+ unsigned int xid;
struct cifs_ses *ses = tcon->ses;
- cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count);
+ cFYI(1, "%s: tc_count=%d", __func__, tcon->tc_count);
spin_lock(&cifs_tcp_ses_lock);
if (--tcon->tc_count > 0) {
spin_unlock(&cifs_tcp_ses_lock);
@@ -2710,9 +2557,10 @@ cifs_put_tcon(struct cifs_tcon *tcon)
list_del_init(&tcon->tcon_list);
spin_unlock(&cifs_tcp_ses_lock);
- xid = GetXid();
- CIFSSMBTDis(xid, tcon);
- _FreeXid(xid);
+ xid = get_xid();
+ if (ses->server->ops->tree_disconnect)
+ ses->server->ops->tree_disconnect(xid, tcon);
+ _free_xid(xid);
cifs_fscache_release_super_cookie(tcon);
tconInfoFree(tcon);
@@ -2736,6 +2584,11 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
return tcon;
}
+ if (!ses->server->ops->tree_connect) {
+ rc = -ENOSYS;
+ goto out_fail;
+ }
+
tcon = tconInfoAlloc();
if (tcon == NULL) {
rc = -ENOMEM;
@@ -2758,13 +2611,15 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
goto out_fail;
}
- /* BB Do we need to wrap session_mutex around
- * this TCon call and Unix SetFS as
- * we do on SessSetup and reconnect? */
- xid = GetXid();
- rc = CIFSTCon(xid, ses, volume_info->UNC, tcon, volume_info->local_nls);
- FreeXid(xid);
- cFYI(1, "CIFS Tcon rc = %d", rc);
+ /*
+ * BB Do we need to wrap session_mutex around this TCon call and Unix
+ * SetFS as we do on SessSetup and reconnect?
+ */
+ xid = get_xid();
+ rc = ses->server->ops->tree_connect(xid, ses, volume_info->UNC, tcon,
+ volume_info->local_nls);
+ free_xid(xid);
+ cFYI(1, "Tcon rc = %d", rc);
if (rc)
goto out_fail;
@@ -2773,10 +2628,11 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
cFYI(1, "DFS disabled (%d)", tcon->Flags);
}
tcon->seal = volume_info->seal;
- /* we can have only one retry value for a connection
- to a share so for resources mounted more than once
- to the same server share the last value passed in
- for the retry flag is used */
+ /*
+ * We can have only one retry value for a connection to a share so for
+ * resources mounted more than once to the same server share the last
+ * value passed in for the retry flag is used.
+ */
tcon->retry = volume_info->retry;
tcon->nocase = volume_info->nocase;
tcon->local_lease = volume_info->local_lease;
@@ -2910,37 +2766,42 @@ out:
}
int
-get_dfs_path(int xid, struct cifs_ses *pSesInfo, const char *old_path,
- const struct nls_table *nls_codepage, unsigned int *pnum_referrals,
- struct dfs_info3_param **preferrals, int remap)
+get_dfs_path(const unsigned int xid, struct cifs_ses *ses, const char *old_path,
+ const struct nls_table *nls_codepage, unsigned int *num_referrals,
+ struct dfs_info3_param **referrals, int remap)
{
char *temp_unc;
int rc = 0;
- *pnum_referrals = 0;
- *preferrals = NULL;
+ if (!ses->server->ops->tree_connect || !ses->server->ops->get_dfs_refer)
+ return -ENOSYS;
+
+ *num_referrals = 0;
+ *referrals = NULL;
- if (pSesInfo->ipc_tid == 0) {
+ if (ses->ipc_tid == 0) {
temp_unc = kmalloc(2 /* for slashes */ +
- strnlen(pSesInfo->serverName,
- SERVER_NAME_LEN_WITH_NULL * 2)
- + 1 + 4 /* slash IPC$ */ + 2,
- GFP_KERNEL);
+ strnlen(ses->serverName, SERVER_NAME_LEN_WITH_NULL * 2)
+ + 1 + 4 /* slash IPC$ */ + 2, GFP_KERNEL);
if (temp_unc == NULL)
return -ENOMEM;
temp_unc[0] = '\\';
temp_unc[1] = '\\';
- strcpy(temp_unc + 2, pSesInfo->serverName);
- strcpy(temp_unc + 2 + strlen(pSesInfo->serverName), "\\IPC$");
- rc = CIFSTCon(xid, pSesInfo, temp_unc, NULL, nls_codepage);
- cFYI(1, "CIFS Tcon rc = %d ipc_tid = %d", rc, pSesInfo->ipc_tid);
+ strcpy(temp_unc + 2, ses->serverName);
+ strcpy(temp_unc + 2 + strlen(ses->serverName), "\\IPC$");
+ rc = ses->server->ops->tree_connect(xid, ses, temp_unc, NULL,
+ nls_codepage);
+ cFYI(1, "Tcon rc = %d ipc_tid = %d", rc, ses->ipc_tid);
kfree(temp_unc);
}
if (rc == 0)
- rc = CIFSGetDFSRefer(xid, pSesInfo, old_path, preferrals,
- pnum_referrals, nls_codepage, remap);
- /* BB map targetUNCs to dfs_info3 structures, here or
- in CIFSGetDFSRefer BB */
+ rc = ses->server->ops->get_dfs_refer(xid, ses, old_path,
+ referrals, num_referrals,
+ nls_codepage, remap);
+ /*
+ * BB - map targetUNCs to dfs_info3 structures, here or in
+ * ses->server->ops->get_dfs_refer.
+ */
return rc;
}
@@ -3009,11 +2870,11 @@ bind_socket(struct TCP_Server_Info *server)
saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
if (saddr6->sin6_family == AF_INET6)
cERROR(1, "cifs: "
- "Failed to bind to: %pI6c, error: %d\n",
+ "Failed to bind to: %pI6c, error: %d",
&saddr6->sin6_addr, rc);
else
cERROR(1, "cifs: "
- "Failed to bind to: %pI4, error: %d\n",
+ "Failed to bind to: %pI4, error: %d",
&saddr4->sin_addr.s_addr, rc);
}
}
@@ -3209,7 +3070,7 @@ ip_connect(struct TCP_Server_Info *server)
return generic_ip_connect(server);
}
-void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
+void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info)
{
/* if we are reconnecting then should we check to see if
@@ -3304,9 +3165,9 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
cFYI(1, "resetting capabilities failed");
} else
cERROR(1, "Negotiating Unix capabilities "
- "with the server failed. Consider "
- "mounting with the Unix Extensions\n"
- "disabled, if problems are found, "
+ "with the server failed. Consider "
+ "mounting with the Unix Extensions "
+ "disabled if problems are found "
"by specifying the nounix mount "
"option.");
@@ -3540,30 +3401,6 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
return rsize;
}
-static int
-is_path_accessible(int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const char *full_path)
-{
- int rc;
- FILE_ALL_INFO *pfile_info;
-
- pfile_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
- if (pfile_info == NULL)
- return -ENOMEM;
-
- rc = CIFSSMBQPathInfo(xid, tcon, full_path, pfile_info,
- 0 /* not legacy */, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
-
- if (rc == -EOPNOTSUPP || rc == -EINVAL)
- rc = SMBQueryInformation(xid, tcon, full_path, pfile_info,
- cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
- kfree(pfile_info);
- return rc;
-}
-
static void
cleanup_volume_info_contents(struct smb_vol *volume_info)
{
@@ -3627,7 +3464,7 @@ build_unc_path_to_root(const struct smb_vol *vol,
* determine whether there were referrals.
*/
static int
-expand_dfs_referral(int xid, struct cifs_ses *pSesInfo,
+expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
struct smb_vol *volume_info, struct cifs_sb_info *cifs_sb,
int check_prefix)
{
@@ -3643,7 +3480,7 @@ expand_dfs_referral(int xid, struct cifs_ses *pSesInfo,
/* For DFS paths, skip the first '\' of the UNC */
ref_path = check_prefix ? full_path + 1 : volume_info->UNC + 1;
- rc = get_dfs_path(xid, pSesInfo , ref_path, cifs_sb->local_nls,
+ rc = get_dfs_path(xid, ses, ref_path, cifs_sb->local_nls,
&num_referrals, &referrals,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -3737,10 +3574,10 @@ int
cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
{
int rc;
- int xid;
- struct cifs_ses *pSesInfo;
+ unsigned int xid;
+ struct cifs_ses *ses;
struct cifs_tcon *tcon;
- struct TCP_Server_Info *srvTcp;
+ struct TCP_Server_Info *server;
char *full_path;
struct tcon_link *tlink;
#ifdef CONFIG_CIFS_DFS_UPCALL
@@ -3757,39 +3594,39 @@ try_mount_again:
if (referral_walks_count) {
if (tcon)
cifs_put_tcon(tcon);
- else if (pSesInfo)
- cifs_put_smb_ses(pSesInfo);
+ else if (ses)
+ cifs_put_smb_ses(ses);
- FreeXid(xid);
+ free_xid(xid);
}
#endif
rc = 0;
tcon = NULL;
- pSesInfo = NULL;
- srvTcp = NULL;
+ ses = NULL;
+ server = NULL;
full_path = NULL;
tlink = NULL;
- xid = GetXid();
+ xid = get_xid();
/* get a reference to a tcp session */
- srvTcp = cifs_get_tcp_session(volume_info);
- if (IS_ERR(srvTcp)) {
- rc = PTR_ERR(srvTcp);
+ server = cifs_get_tcp_session(volume_info);
+ if (IS_ERR(server)) {
+ rc = PTR_ERR(server);
bdi_destroy(&cifs_sb->bdi);
goto out;
}
/* get a reference to a SMB session */
- pSesInfo = cifs_get_smb_ses(srvTcp, volume_info);
- if (IS_ERR(pSesInfo)) {
- rc = PTR_ERR(pSesInfo);
- pSesInfo = NULL;
+ ses = cifs_get_smb_ses(server, volume_info);
+ if (IS_ERR(ses)) {
+ rc = PTR_ERR(ses);
+ ses = NULL;
goto mount_fail_check;
}
/* search for existing tcon to this server share */
- tcon = cifs_get_tcon(pSesInfo, volume_info);
+ tcon = cifs_get_tcon(ses, volume_info);
if (IS_ERR(tcon)) {
rc = PTR_ERR(tcon);
tcon = NULL;
@@ -3797,7 +3634,7 @@ try_mount_again:
}
/* tell server which Unix caps we support */
- if (tcon->ses->capabilities & CAP_UNIX) {
+ if (cap_unix(tcon->ses)) {
/* reset of caps checks mount to see if unix extensions
disabled for just this mount */
reset_cifs_unix_caps(xid, tcon, cifs_sb, volume_info);
@@ -3810,11 +3647,9 @@ try_mount_again:
} else
tcon->unix_ext = 0; /* server does not support them */
- /* do not care if following two calls succeed - informational */
- if (!tcon->ipc) {
- CIFSSMBQFSDeviceInfo(xid, tcon);
- CIFSSMBQFSAttributeInfo(xid, tcon);
- }
+ /* do not care if a following call succeed - informational */
+ if (!tcon->ipc && server->ops->qfs_tcon)
+ server->ops->qfs_tcon(xid, tcon);
cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info);
cifs_sb->rsize = cifs_negotiate_rsize(tcon, volume_info);
@@ -3832,8 +3667,8 @@ remote_path_check:
* Chase the referral if found, otherwise continue normally.
*/
if (referral_walks_count == 0) {
- int refrc = expand_dfs_referral(xid, pSesInfo, volume_info,
- cifs_sb, false);
+ int refrc = expand_dfs_referral(xid, ses, volume_info, cifs_sb,
+ false);
if (!refrc) {
referral_walks_count++;
goto try_mount_again;
@@ -3843,13 +3678,18 @@ remote_path_check:
/* check if a whole path is not remote */
if (!rc && tcon) {
+ if (!server->ops->is_path_accessible) {
+ rc = -ENOSYS;
+ goto mount_fail_check;
+ }
/* build_path_to_root works only when we have a valid tcon */
- full_path = cifs_build_path_to_root(volume_info, cifs_sb, tcon);
+ full_path = build_path_to_root(volume_info, cifs_sb, tcon);
if (full_path == NULL) {
rc = -ENOMEM;
goto mount_fail_check;
}
- rc = is_path_accessible(xid, tcon, cifs_sb, full_path);
+ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
+ full_path);
if (rc != 0 && rc != -EREMOTE) {
kfree(full_path);
goto mount_fail_check;
@@ -3871,8 +3711,7 @@ remote_path_check:
goto mount_fail_check;
}
- rc = expand_dfs_referral(xid, pSesInfo, volume_info, cifs_sb,
- true);
+ rc = expand_dfs_referral(xid, ses, volume_info, cifs_sb, true);
if (!rc) {
referral_walks_count++;
@@ -3894,7 +3733,7 @@ remote_path_check:
goto mount_fail_check;
}
- tlink->tl_uid = pSesInfo->linux_uid;
+ tlink->tl_uid = ses->linux_uid;
tlink->tl_tcon = tcon;
tlink->tl_time = jiffies;
set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
@@ -3915,15 +3754,15 @@ mount_fail_check:
/* up accidentally freeing someone elses tcon struct */
if (tcon)
cifs_put_tcon(tcon);
- else if (pSesInfo)
- cifs_put_smb_ses(pSesInfo);
+ else if (ses)
+ cifs_put_smb_ses(ses);
else
- cifs_put_tcp_session(srvTcp);
+ cifs_put_tcp_session(server);
bdi_destroy(&cifs_sb->bdi);
}
out:
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -3932,7 +3771,7 @@ out:
* pointer may be NULL.
*/
int
-CIFSTCon(unsigned int xid, struct cifs_ses *ses,
+CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
const char *tree, struct cifs_tcon *tcon,
const struct nls_table *nls_codepage)
{
@@ -4116,24 +3955,22 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
kfree(cifs_sb);
}
-int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
+int
+cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses)
{
int rc = 0;
struct TCP_Server_Info *server = ses->server;
+ if (!server->ops->need_neg || !server->ops->negotiate)
+ return -ENOSYS;
+
/* only send once per connect */
- if (server->maxBuf != 0)
+ if (!server->ops->need_neg(server))
return 0;
set_credits(server, 1);
- rc = CIFSSMBNegotiate(xid, ses);
- if (rc == -EAGAIN) {
- /* retry only once on 1st time connection */
- set_credits(server, 1);
- rc = CIFSSMBNegotiate(xid, ses);
- if (rc == -EAGAIN)
- rc = -EHOSTDOWN;
- }
+
+ rc = server->ops->negotiate(xid, ses);
if (rc == 0) {
spin_lock(&GlobalMid_Lock);
if (server->tcpStatus == CifsNeedNegotiate)
@@ -4141,28 +3978,29 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
else
rc = -EHOSTDOWN;
spin_unlock(&GlobalMid_Lock);
-
}
return rc;
}
-
-int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
- struct nls_table *nls_info)
+int
+cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ struct nls_table *nls_info)
{
- int rc = 0;
+ int rc = -ENOSYS;
struct TCP_Server_Info *server = ses->server;
ses->flags = 0;
ses->capabilities = server->capabilities;
if (linuxExtEnabled == 0)
- ses->capabilities &= (~CAP_UNIX);
+ ses->capabilities &= (~server->vals->cap_unix);
cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
server->sec_mode, server->capabilities, server->timeAdj);
- rc = CIFS_SessSetup(xid, ses, nls_info);
+ if (server->ops->sess_setup)
+ rc = server->ops->sess_setup(xid, ses, nls_info);
+
if (rc) {
cERROR(1, "Send error in SessSetup = %d", rc);
} else {
@@ -4262,7 +4100,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
goto out;
}
- if (ses->capabilities & CAP_UNIX)
+ if (cap_unix(ses))
reset_cifs_unix_caps(0, tcon, NULL, vol_info);
out:
kfree(vol_info->username);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index a180265a10b5..781025be48bc 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -157,10 +157,10 @@ check_name(struct dentry *direntry)
/* Inode operations in similar order to how they appear in Linux file fs.h */
-static int cifs_do_create(struct inode *inode, struct dentry *direntry,
- int xid, struct tcon_link *tlink, unsigned oflags,
- umode_t mode, __u32 *oplock, __u16 *fileHandle,
- int *created)
+static int
+cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
+ struct tcon_link *tlink, unsigned oflags, umode_t mode,
+ __u32 *oplock, __u16 *fileHandle, int *created)
{
int rc = -ENOENT;
int create_options = CREATE_NOT_DIR;
@@ -182,8 +182,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry,
goto out;
}
- if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
- !tcon->broken_posix_open &&
+ if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = cifs_posix_open(full_path, &newinode,
@@ -357,19 +356,12 @@ cifs_create_get_file_info:
cifs_create_set_dentry:
if (rc != 0) {
cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
+ CIFSSMBClose(xid, tcon, *fileHandle);
goto out;
}
d_drop(direntry);
d_add(direntry, newinode);
- /* ENOENT for create? How weird... */
- rc = -ENOENT;
- if (!newinode) {
- CIFSSMBClose(xid, tcon, *fileHandle);
- goto out;
- }
- rc = 0;
-
out:
kfree(buf);
kfree(full_path);
@@ -382,12 +374,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
int *opened)
{
int rc;
- int xid;
+ unsigned int xid;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
__u16 fileHandle;
__u32 oplock;
- struct file *filp;
struct cifsFileInfo *pfile_info;
/* Posix open is only called (at lookup time) for file create now. For
@@ -412,15 +403,14 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
if (rc)
return rc;
- xid = GetXid();
+ xid = get_xid();
cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p",
inode, direntry->d_name.name, direntry);
tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
- filp = ERR_CAST(tlink);
if (IS_ERR(tlink))
- goto free_xid;
+ goto out_free_xid;
tcon = tlink_tcon(tlink);
@@ -436,17 +426,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
goto out;
}
- pfile_info = cifs_new_fileinfo(fileHandle, filp, tlink, oplock);
+ pfile_info = cifs_new_fileinfo(fileHandle, file, tlink, oplock);
if (pfile_info == NULL) {
CIFSSMBClose(xid, tcon, fileHandle);
- fput(filp);
rc = -ENOMEM;
}
out:
cifs_put_tlink(tlink);
-free_xid:
- FreeXid(xid);
+out_free_xid:
+ free_xid(xid);
return rc;
}
@@ -454,7 +443,7 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
bool excl)
{
int rc;
- int xid = GetXid();
+ unsigned int xid = get_xid();
/*
* BB below access is probably too much for mknod to request
* but we have to do query and setpathinfo so requesting
@@ -474,7 +463,7 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
rc = PTR_ERR(tlink);
if (IS_ERR(tlink))
- goto free_xid;
+ goto out_free_xid;
rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
&oplock, &fileHandle, &created);
@@ -482,9 +471,8 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
CIFSSMBClose(xid, tlink_tcon(tlink), fileHandle);
cifs_put_tlink(tlink);
-free_xid:
- FreeXid(xid);
-
+out_free_xid:
+ free_xid(xid);
return rc;
}
@@ -492,7 +480,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
dev_t device_number)
{
int rc = -EPERM;
- int xid;
+ unsigned int xid;
int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
@@ -516,7 +504,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
pTcon = tlink_tcon(tlink);
- xid = GetXid();
+ xid = get_xid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
@@ -564,7 +552,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
if (buf == NULL) {
kfree(full_path);
rc = -ENOMEM;
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -614,7 +602,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
mknod_out:
kfree(full_path);
kfree(buf);
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
return rc;
}
@@ -623,7 +611,7 @@ struct dentry *
cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
unsigned int flags)
{
- int xid;
+ unsigned int xid;
int rc = 0; /* to get around spurious gcc warning, set to zero here */
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
@@ -631,7 +619,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
struct inode *newInode = NULL;
char *full_path = NULL;
- xid = GetXid();
+ xid = get_xid();
cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p",
parent_dir_inode, direntry->d_name.name, direntry);
@@ -641,7 +629,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
cifs_sb = CIFS_SB(parent_dir_inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
- FreeXid(xid);
+ free_xid(xid);
return (struct dentry *)tlink;
}
pTcon = tlink_tcon(tlink);
@@ -695,7 +683,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
lookup_out:
kfree(full_path);
cifs_put_tlink(tlink);
- FreeXid(xid);
+ free_xid(xid);
return ERR_PTR(rc);
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 513adbc211d7..9154192b0683 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -107,7 +107,7 @@ static inline int cifs_get_disposition(unsigned int flags)
int cifs_posix_open(char *full_path, struct inode **pinode,
struct super_block *sb, int mode, unsigned int f_flags,
- __u32 *poplock, __u16 *pnetfid, int xid)
+ __u32 *poplock, __u16 *pnetfid, unsigned int xid)
{
int rc;
FILE_UNIX_BASIC_INFO *presp_data;
@@ -170,7 +170,7 @@ posix_open_ret:
static int
cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
- __u16 *pnetfid, int xid)
+ __u16 *pnetfid, unsigned int xid)
{
int rc;
int desiredAccess;
@@ -284,6 +284,15 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
+struct cifsFileInfo *
+cifsFileInfo_get(struct cifsFileInfo *cifs_file)
+{
+ spin_lock(&cifs_file_list_lock);
+ cifsFileInfo_get_locked(cifs_file);
+ spin_unlock(&cifs_file_list_lock);
+ return cifs_file;
+}
+
/*
* Release a reference on the file private data. This may involve closing
* the filehandle out on the server. Must be called without holding
@@ -324,11 +333,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
cancel_work_sync(&cifs_file->oplock_break);
if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
- int xid, rc;
-
- xid = GetXid();
+ unsigned int xid;
+ int rc;
+ xid = get_xid();
rc = CIFSSMBClose(xid, tcon, cifs_file->netfid);
- FreeXid(xid);
+ free_xid(xid);
}
/* Delete any outstanding lock records. We'll lose them when the file
@@ -350,7 +359,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
int cifs_open(struct inode *inode, struct file *file)
{
int rc = -EACCES;
- int xid;
+ unsigned int xid;
__u32 oplock;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
@@ -360,12 +369,12 @@ int cifs_open(struct inode *inode, struct file *file)
bool posix_open_ok = false;
__u16 netfid;
- xid = GetXid();
+ xid = get_xid();
cifs_sb = CIFS_SB(inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
- FreeXid(xid);
+ free_xid(xid);
return PTR_ERR(tlink);
}
tcon = tlink_tcon(tlink);
@@ -385,9 +394,8 @@ int cifs_open(struct inode *inode, struct file *file)
oplock = 0;
if (!tcon->broken_posix_open && tcon->unix_ext &&
- (tcon->ses->capabilities & CAP_UNIX) &&
- (CIFS_UNIX_POSIX_PATH_OPS_CAP &
- le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+ cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+ le64_to_cpu(tcon->fsUnixInfo.Capability))) {
/* can not refresh inode info since size could be stale */
rc = cifs_posix_open(full_path, &inode, inode->i_sb,
cifs_sb->mnt_file_mode /* ignored */,
@@ -445,7 +453,7 @@ int cifs_open(struct inode *inode, struct file *file)
out:
kfree(full_path);
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
return rc;
}
@@ -464,7 +472,7 @@ static int cifs_relock_file(struct cifsFileInfo *cifsFile)
static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
{
int rc = -EACCES;
- int xid;
+ unsigned int xid;
__u32 oplock;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *tcon;
@@ -476,12 +484,12 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
int create_options = CREATE_NOT_DIR;
__u16 netfid;
- xid = GetXid();
+ xid = get_xid();
mutex_lock(&pCifsFile->fh_mutex);
if (!pCifsFile->invalidHandle) {
mutex_unlock(&pCifsFile->fh_mutex);
rc = 0;
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -497,7 +505,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
if (full_path == NULL) {
rc = -ENOMEM;
mutex_unlock(&pCifsFile->fh_mutex);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -509,10 +517,9 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
else
oplock = 0;
- if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
+ if (tcon->unix_ext && cap_unix(tcon->ses) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
- le64_to_cpu(tcon->fsUnixInfo.Capability))) {
-
+ le64_to_cpu(tcon->fsUnixInfo.Capability))) {
/*
* O_CREAT, O_EXCL and O_TRUNC already had their effect on the
* original open. Must mask them off for a reopen.
@@ -583,7 +590,7 @@ reopen_success:
reopen_error_exit:
kfree(full_path);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -601,13 +608,13 @@ int cifs_close(struct inode *inode, struct file *file)
int cifs_closedir(struct inode *inode, struct file *file)
{
int rc = 0;
- int xid;
+ unsigned int xid;
struct cifsFileInfo *pCFileStruct = file->private_data;
char *ptmp;
cFYI(1, "Closedir inode = 0x%p", inode);
- xid = GetXid();
+ xid = get_xid();
if (pCFileStruct) {
struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
@@ -639,7 +646,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
file->private_data = NULL;
}
/* BB can we lock the filestruct while this is going on? */
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -872,7 +879,8 @@ try_again:
static int
cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
{
- int xid, rc = 0, stored_rc;
+ unsigned int xid;
+ int rc = 0, stored_rc;
struct cifsLockInfo *li, *tmp;
struct cifs_tcon *tcon;
struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
@@ -882,13 +890,13 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
int i;
- xid = GetXid();
+ xid = get_xid();
tcon = tlink_tcon(cfile->tlink);
mutex_lock(&cinode->lock_mutex);
if (!cinode->can_cache_brlcks) {
mutex_unlock(&cinode->lock_mutex);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -899,7 +907,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
max_buf = tcon->ses->server->maxBuf;
if (!max_buf) {
mutex_unlock(&cinode->lock_mutex);
- FreeXid(xid);
+ free_xid(xid);
return -EINVAL;
}
@@ -908,7 +916,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
if (!buf) {
mutex_unlock(&cinode->lock_mutex);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -947,7 +955,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
mutex_unlock(&cinode->lock_mutex);
kfree(buf);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -977,12 +985,12 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
struct lock_to_push *lck, *tmp;
__u64 length;
- xid = GetXid();
+ xid = get_xid();
mutex_lock(&cinode->lock_mutex);
if (!cinode->can_cache_brlcks) {
mutex_unlock(&cinode->lock_mutex);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -1039,12 +1047,10 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
unlock_flocks();
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
- struct file_lock tmp_lock;
int stored_rc;
- tmp_lock.fl_start = lck->offset;
stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
- 0, lck->length, &tmp_lock,
+ lck->offset, lck->length, NULL,
lck->type, 0);
if (stored_rc)
rc = stored_rc;
@@ -1056,7 +1062,7 @@ out:
cinode->can_cache_brlcks = false;
mutex_unlock(&cinode->lock_mutex);
- FreeXid(xid);
+ free_xid(xid);
return rc;
err_out:
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
@@ -1072,7 +1078,7 @@ cifs_push_locks(struct cifsFileInfo *cfile)
struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- if ((tcon->ses->capabilities & CAP_UNIX) &&
+ if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
return cifs_push_posix_locks(cfile);
@@ -1128,7 +1134,7 @@ cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
}
static int
-cifs_mandatory_lock(int xid, struct cifsFileInfo *cfile, __u64 offset,
+cifs_mandatory_lock(unsigned int xid, struct cifsFileInfo *cfile, __u64 offset,
__u64 length, __u32 type, int lock, int unlock, bool wait)
{
return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->netfid,
@@ -1138,7 +1144,7 @@ cifs_mandatory_lock(int xid, struct cifsFileInfo *cfile, __u64 offset,
static int
cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
- bool wait_flag, bool posix_lck, int xid)
+ bool wait_flag, bool posix_lck, unsigned int xid)
{
int rc = 0;
__u64 length = 1 + flock->fl_end - flock->fl_start;
@@ -1159,7 +1165,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
else
posix_lock_type = CIFS_WRLCK;
rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
- 1 /* get */, length, flock,
+ flock->fl_start, length, flock,
posix_lock_type, wait_flag);
return rc;
}
@@ -1223,7 +1229,8 @@ cifs_free_llist(struct list_head *llist)
}
static int
-cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
+cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
+ unsigned int xid)
{
int rc = 0, stored_rc;
int types[] = {LOCKING_ANDX_LARGE_FILES,
@@ -1328,7 +1335,8 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
static int
cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
- bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
+ bool wait_flag, bool posix_lck, int lock, int unlock,
+ unsigned int xid)
{
int rc = 0;
__u64 length = 1 + flock->fl_end - flock->fl_start;
@@ -1353,7 +1361,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
posix_lock_type = CIFS_UNLCK;
rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
- 0 /* set */, length, flock,
+ flock->fl_start, length, NULL,
posix_lock_type, wait_flag);
goto out;
}
@@ -1402,7 +1410,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
__u32 type;
rc = -EACCES;
- xid = GetXid();
+ xid = get_xid();
cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
"end: %lld", cmd, flock->fl_flags, flock->fl_type,
@@ -1418,7 +1426,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
netfid = cfile->netfid;
cinode = CIFS_I(file->f_path.dentry->d_inode);
- if ((tcon->ses->capabilities & CAP_UNIX) &&
+ if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
posix_lck = true;
@@ -1428,7 +1436,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
*/
if (IS_GETLK(cmd)) {
rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -1437,13 +1445,13 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
* if no lock or unlock then nothing to do since we do not
* know what it is
*/
- FreeXid(xid);
+ free_xid(xid);
return -EOPNOTSUPP;
}
rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
xid);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -1470,7 +1478,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
unsigned int total_written;
struct cifs_sb_info *cifs_sb;
struct cifs_tcon *pTcon;
- int xid;
+ unsigned int xid;
struct dentry *dentry = open_file->dentry;
struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
struct cifs_io_parms io_parms;
@@ -1482,7 +1490,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
pTcon = tlink_tcon(open_file->tlink);
- xid = GetXid();
+ xid = get_xid();
for (total_written = 0; write_size > total_written;
total_written += bytes_written) {
@@ -1518,7 +1526,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
if (total_written)
break;
else {
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
} else {
@@ -1538,7 +1546,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
spin_unlock(&dentry->d_inode->i_lock);
}
mark_inode_dirty_sync(dentry->d_inode);
- FreeXid(xid);
+ free_xid(xid);
return total_written;
}
@@ -1563,7 +1571,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
if (!open_file->invalidHandle) {
/* found a good file */
/* lock it so it will not be closed on us */
- cifsFileInfo_get(open_file);
+ cifsFileInfo_get_locked(open_file);
spin_unlock(&cifs_file_list_lock);
return open_file;
} /* else might as well continue, and look for
@@ -1615,7 +1623,7 @@ refind_writable:
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
if (!open_file->invalidHandle) {
/* found a good writable file */
- cifsFileInfo_get(open_file);
+ cifsFileInfo_get_locked(open_file);
spin_unlock(&cifs_file_list_lock);
return open_file;
} else {
@@ -1632,7 +1640,7 @@ refind_writable:
if (inv_file) {
any_available = false;
- cifsFileInfo_get(inv_file);
+ cifsFileInfo_get_locked(inv_file);
}
spin_unlock(&cifs_file_list_lock);
@@ -1937,9 +1945,9 @@ static int
cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
{
int rc;
- int xid;
+ unsigned int xid;
- xid = GetXid();
+ xid = get_xid();
/* BB add check for wbc flags */
page_cache_get(page);
if (!PageUptodate(page))
@@ -1968,7 +1976,7 @@ retry_write:
SetPageUptodate(page);
end_page_writeback(page);
page_cache_release(page);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -2007,9 +2015,9 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
if (!PageUptodate(page)) {
char *page_data;
unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
- int xid;
+ unsigned int xid;
- xid = GetXid();
+ xid = get_xid();
/* this is probably better than directly calling
partialpage_write since in this function the file handle is
known which we might as well leverage */
@@ -2020,7 +2028,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
/* if (rc < 0) should we set writebehind rc? */
kunmap(page);
- FreeXid(xid);
+ free_xid(xid);
} else {
rc = copied;
pos += copied;
@@ -2043,7 +2051,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
- int xid;
+ unsigned int xid;
int rc = 0;
struct cifs_tcon *tcon;
struct cifsFileInfo *smbfile = file->private_data;
@@ -2055,7 +2063,7 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
return rc;
mutex_lock(&inode->i_mutex);
- xid = GetXid();
+ xid = get_xid();
cFYI(1, "Sync file - name: %s datasync: 0x%x",
file->f_path.dentry->d_name.name, datasync);
@@ -2072,14 +2080,14 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
- FreeXid(xid);
+ free_xid(xid);
mutex_unlock(&inode->i_mutex);
return rc;
}
int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- int xid;
+ unsigned int xid;
int rc = 0;
struct cifs_tcon *tcon;
struct cifsFileInfo *smbfile = file->private_data;
@@ -2091,7 +2099,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
return rc;
mutex_lock(&inode->i_mutex);
- xid = GetXid();
+ xid = get_xid();
cFYI(1, "Sync file - name: %s datasync: 0x%x",
file->f_path.dentry->d_name.name, datasync);
@@ -2100,7 +2108,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
- FreeXid(xid);
+ free_xid(xid);
mutex_unlock(&inode->i_mutex);
return rc;
}
@@ -2744,15 +2752,15 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
unsigned int current_read_size;
unsigned int rsize;
struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *pTcon;
- int xid;
+ struct cifs_tcon *tcon;
+ unsigned int xid;
char *current_offset;
struct cifsFileInfo *open_file;
struct cifs_io_parms io_parms;
int buf_type = CIFS_NO_BUFFER;
__u32 pid;
- xid = GetXid();
+ xid = get_xid();
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
/* FIXME: set up handlers for larger reads and/or convert to async */
@@ -2760,11 +2768,11 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
if (file->private_data == NULL) {
rc = -EBADF;
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
open_file = file->private_data;
- pTcon = tlink_tcon(open_file->tlink);
+ tcon = tlink_tcon(open_file->tlink);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
@@ -2778,11 +2786,12 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
read_size > total_read;
total_read += bytes_read, current_offset += bytes_read) {
current_read_size = min_t(uint, read_size - total_read, rsize);
-
- /* For windows me and 9x we do not want to request more
- than it negotiated since it will refuse the read then */
- if ((pTcon->ses) &&
- !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
+ /*
+ * For windows me and 9x we do not want to request more than it
+ * negotiated since it will refuse the read then.
+ */
+ if ((tcon->ses) && !(tcon->ses->capabilities &
+ tcon->ses->server->vals->cap_large_files)) {
current_read_size = min_t(uint, current_read_size,
CIFSMaxBufSize);
}
@@ -2795,7 +2804,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
}
io_parms.netfid = open_file->netfid;
io_parms.pid = pid;
- io_parms.tcon = pTcon;
+ io_parms.tcon = tcon;
io_parms.offset = *poffset;
io_parms.length = current_read_size;
rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
@@ -2805,15 +2814,15 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
if (total_read) {
break;
} else {
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
} else {
- cifs_stats_bytes_read(pTcon, total_read);
+ cifs_stats_bytes_read(tcon, total_read);
*poffset += bytes_read;
}
}
- FreeXid(xid);
+ free_xid(xid);
return total_read;
}
@@ -2840,7 +2849,7 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
int rc, xid;
struct inode *inode = file->f_path.dentry->d_inode;
- xid = GetXid();
+ xid = get_xid();
if (!CIFS_I(inode)->clientCanCacheRead) {
rc = cifs_invalidate_mapping(inode);
@@ -2851,7 +2860,7 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
rc = generic_file_mmap(file, vma);
if (rc == 0)
vma->vm_ops = &cifs_file_vm_ops;
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -2859,17 +2868,17 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
int rc, xid;
- xid = GetXid();
+ xid = get_xid();
rc = cifs_revalidate_file(file);
if (rc) {
cFYI(1, "Validation prior to mmap failed, error=%d", rc);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
rc = generic_file_mmap(file, vma);
if (rc == 0)
vma->vm_ops = &cifs_file_vm_ops;
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -3082,8 +3091,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
break;
}
- spin_lock(&cifs_file_list_lock);
- spin_unlock(&cifs_file_list_lock);
rdata->cfile = cifsFileInfo_get(open_file);
rdata->mapping = mapping;
rdata->offset = offset;
@@ -3159,24 +3166,24 @@ static int cifs_readpage(struct file *file, struct page *page)
{
loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
int rc = -EACCES;
- int xid;
+ unsigned int xid;
- xid = GetXid();
+ xid = get_xid();
if (file->private_data == NULL) {
rc = -EBADF;
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
- cFYI(1, "readpage %p at offset %d 0x%x\n",
+ cFYI(1, "readpage %p at offset %d 0x%x",
page, (int)offset, (int)offset);
rc = cifs_readpage_worker(file, page, &offset);
unlock_page(page);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 8e8bb49112ff..cb79c7edecb0 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -124,10 +124,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- unsigned long oldtime = cifs_i->time;
cifs_revalidate_cache(inode, fattr);
+ spin_lock(&inode->i_lock);
inode->i_atime = fattr->cf_atime;
inode->i_mtime = fattr->cf_mtime;
inode->i_ctime = fattr->cf_ctime;
@@ -148,9 +148,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
else
cifs_i->time = jiffies;
- cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode,
- oldtime, cifs_i->time);
-
cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
cifs_i->server_eof = fattr->cf_eof;
@@ -158,7 +155,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
* Can't safely change the file size here if the client is writing to
* it due to potential races.
*/
- spin_lock(&inode->i_lock);
if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
i_size_write(inode, fattr->cf_eof);
@@ -289,7 +285,7 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
int cifs_get_file_info_unix(struct file *filp)
{
int rc;
- int xid;
+ unsigned int xid;
FILE_UNIX_BASIC_INFO find_data;
struct cifs_fattr fattr;
struct inode *inode = filp->f_path.dentry->d_inode;
@@ -297,7 +293,7 @@ int cifs_get_file_info_unix(struct file *filp)
struct cifsFileInfo *cfile = filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- xid = GetXid();
+ xid = get_xid();
rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data);
if (!rc) {
cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
@@ -307,13 +303,13 @@ int cifs_get_file_info_unix(struct file *filp)
}
cifs_fattr_to_inode(inode, &fattr);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
int cifs_get_inode_info_unix(struct inode **pinode,
const unsigned char *full_path,
- struct super_block *sb, int xid)
+ struct super_block *sb, unsigned int xid)
{
int rc;
FILE_UNIX_BASIC_INFO find_data;
@@ -367,7 +363,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
static int
cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
- struct cifs_sb_info *cifs_sb, int xid)
+ struct cifs_sb_info *cifs_sb, unsigned int xid)
{
int rc;
int oplock = 0;
@@ -466,7 +462,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
* FIXME: Doesn't this clobber the type bit we got from cifs_sfu_type ?
*/
static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
- struct cifs_sb_info *cifs_sb, int xid)
+ struct cifs_sb_info *cifs_sb, unsigned int xid)
{
#ifdef CONFIG_CIFS_XATTR
ssize_t rc;
@@ -557,7 +553,7 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
int cifs_get_file_info(struct file *filp)
{
int rc;
- int xid;
+ unsigned int xid;
FILE_ALL_INFO find_data;
struct cifs_fattr fattr;
struct inode *inode = filp->f_path.dentry->d_inode;
@@ -565,7 +561,7 @@ int cifs_get_file_info(struct file *filp)
struct cifsFileInfo *cfile = filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- xid = GetXid();
+ xid = get_xid();
rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
switch (rc) {
case 0:
@@ -596,65 +592,58 @@ int cifs_get_file_info(struct file *filp)
fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
cifs_fattr_to_inode(inode, &fattr);
cgfi_exit:
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
-int cifs_get_inode_info(struct inode **pinode,
- const unsigned char *full_path, FILE_ALL_INFO *pfindData,
- struct super_block *sb, int xid, const __u16 *pfid)
+int
+cifs_get_inode_info(struct inode **inode, const char *full_path,
+ FILE_ALL_INFO *data, struct super_block *sb, int xid,
+ const __u16 *fid)
{
int rc = 0, tmprc;
- struct cifs_tcon *pTcon;
+ struct cifs_tcon *tcon;
+ struct TCP_Server_Info *server;
struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
char *buf = NULL;
- bool adjustTZ = false;
+ bool adjust_tz = false;
struct cifs_fattr fattr;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
- pTcon = tlink_tcon(tlink);
+ tcon = tlink_tcon(tlink);
+ server = tcon->ses->server;
cFYI(1, "Getting info on %s", full_path);
- if ((pfindData == NULL) && (*pinode != NULL)) {
- if (CIFS_I(*pinode)->clientCanCacheRead) {
+ if ((data == NULL) && (*inode != NULL)) {
+ if (CIFS_I(*inode)->clientCanCacheRead) {
cFYI(1, "No need to revalidate cached inode sizes");
goto cgii_exit;
}
}
- /* if file info not passed in then get it from server */
- if (pfindData == NULL) {
+ /* if inode info is not passed, get it from server */
+ if (data == NULL) {
+ if (!server->ops->query_path_info) {
+ rc = -ENOSYS;
+ goto cgii_exit;
+ }
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
rc = -ENOMEM;
goto cgii_exit;
}
- pfindData = (FILE_ALL_INFO *)buf;
-
- /* could do find first instead but this returns more info */
- rc = CIFSSMBQPathInfo(xid, pTcon, full_path, pfindData,
- 0 /* not legacy */,
- cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
- /* BB optimize code so we do not make the above call
- when server claims no NT SMB support and the above call
- failed at least once - set flag in tcon or mount */
- if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
- rc = SMBQueryInformation(xid, pTcon, full_path,
- pfindData, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
- adjustTZ = true;
- }
+ data = (FILE_ALL_INFO *)buf;
+ rc = server->ops->query_path_info(xid, tcon, cifs_sb, full_path,
+ data, &adjust_tz);
}
if (!rc) {
- cifs_all_info_to_fattr(&fattr, (FILE_ALL_INFO *) pfindData,
- cifs_sb, adjustTZ);
+ cifs_all_info_to_fattr(&fattr, (FILE_ALL_INFO *)data, cifs_sb,
+ adjust_tz);
} else if (rc == -EREMOTE) {
cifs_create_dfs_fattr(&fattr, sb);
rc = 0;
@@ -668,28 +657,17 @@ int cifs_get_inode_info(struct inode **pinode,
* Is an i_ino of zero legal? Can we use that to check if the server
* supports returning inode numbers? Are there other sanity checks we
* can use to ensure that the server is really filling in that field?
- *
- * We can not use the IndexNumber field by default from Windows or
- * Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA
- * CIFS spec claims that this value is unique within the scope of a
- * share, and the windows docs hint that it's actually unique
- * per-machine.
- *
- * There may be higher info levels that work but are there Windows
- * server or network appliances for which IndexNumber field is not
- * guaranteed unique?
*/
- if (*pinode == NULL) {
+ if (*inode == NULL) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
- int rc1 = 0;
-
- rc1 = CIFSGetSrvInodeNumber(xid, pTcon,
- full_path, &fattr.cf_uniqueid,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc1 || !fattr.cf_uniqueid) {
- cFYI(1, "GetSrvInodeNum rc %d", rc1);
+ if (server->ops->get_srv_inum)
+ tmprc = server->ops->get_srv_inum(xid, tcon,
+ cifs_sb, full_path, &fattr.cf_uniqueid,
+ data);
+ else
+ tmprc = -ENOSYS;
+ if (tmprc || !fattr.cf_uniqueid) {
+ cFYI(1, "GetSrvInodeNum rc %d", tmprc);
fattr.cf_uniqueid = iunique(sb, ROOT_I);
cifs_autodisable_serverino(cifs_sb);
}
@@ -697,7 +675,7 @@ int cifs_get_inode_info(struct inode **pinode,
fattr.cf_uniqueid = iunique(sb, ROOT_I);
}
} else {
- fattr.cf_uniqueid = CIFS_I(*pinode)->uniqueid;
+ fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
}
/* query for SFU type info if supported and needed */
@@ -711,8 +689,7 @@ int cifs_get_inode_info(struct inode **pinode,
#ifdef CONFIG_CIFS_ACL
/* fill in 0777 bits from ACL */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
- rc = cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path,
- pfid);
+ rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, full_path, fid);
if (rc) {
cFYI(1, "%s: Getting ACL failed with error: %d",
__func__, rc);
@@ -732,12 +709,12 @@ int cifs_get_inode_info(struct inode **pinode,
cFYI(1, "CIFSCheckMFSymlink: %d", tmprc);
}
- if (!*pinode) {
- *pinode = cifs_iget(sb, &fattr);
- if (!*pinode)
+ if (!*inode) {
+ *inode = cifs_iget(sb, &fattr);
+ if (!*inode)
rc = -ENOMEM;
} else {
- cifs_fattr_to_inode(*pinode, &fattr);
+ cifs_fattr_to_inode(*inode, &fattr);
}
cgii_exit:
@@ -750,38 +727,6 @@ static const struct inode_operations cifs_ipc_inode_ops = {
.lookup = cifs_lookup,
};
-char *cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
- struct cifs_tcon *tcon)
-{
- int pplen = vol->prepath ? strlen(vol->prepath) : 0;
- int dfsplen;
- char *full_path = NULL;
-
- /* if no prefix path, simply set path to the root of share to "" */
- if (pplen == 0) {
- full_path = kmalloc(1, GFP_KERNEL);
- if (full_path)
- full_path[0] = 0;
- return full_path;
- }
-
- if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
- dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
- else
- dfsplen = 0;
-
- full_path = kmalloc(dfsplen + pplen + 1, GFP_KERNEL);
- if (full_path == NULL)
- return full_path;
-
- if (dfsplen)
- strncpy(full_path, tcon->treeName, dfsplen);
- strncpy(full_path + dfsplen, vol->prepath, pplen);
- convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
- full_path[dfsplen + pplen] = 0; /* add trailing null */
- return full_path;
-}
-
static int
cifs_find_inode(struct inode *inode, void *opaque)
{
@@ -886,13 +831,13 @@ retry_iget5_locked:
/* gets root inode */
struct inode *cifs_root_iget(struct super_block *sb)
{
- int xid;
+ unsigned int xid;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct inode *inode = NULL;
long rc;
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
- xid = GetXid();
+ xid = get_xid();
if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
else
@@ -910,27 +855,29 @@ struct inode *cifs_root_iget(struct super_block *sb)
if (rc && tcon->ipc) {
cFYI(1, "ipc connection - fake read inode");
+ spin_lock(&inode->i_lock);
inode->i_mode |= S_IFDIR;
set_nlink(inode, 2);
inode->i_op = &cifs_ipc_inode_ops;
inode->i_fop = &simple_dir_operations;
inode->i_uid = cifs_sb->mnt_uid;
inode->i_gid = cifs_sb->mnt_gid;
+ spin_unlock(&inode->i_lock);
} else if (rc) {
iget_failed(inode);
inode = ERR_PTR(rc);
}
out:
- /* can not call macro FreeXid here since in a void func
+ /* can not call macro free_xid here since in a void func
* TODO: This is no longer true
*/
- _FreeXid(xid);
+ _free_xid(xid);
return inode;
}
static int
-cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
+cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
char *full_path, __u32 dosattr)
{
int rc;
@@ -1051,7 +998,8 @@ out:
* anything else.
*/
static int
-cifs_rename_pending_delete(char *full_path, struct dentry *dentry, int xid)
+cifs_rename_pending_delete(char *full_path, struct dentry *dentry,
+ unsigned int xid)
{
int oplock = 0;
int rc;
@@ -1160,6 +1108,15 @@ undo_setattr:
goto out_close;
}
+/* copied from fs/nfs/dir.c with small changes */
+static void
+cifs_drop_nlink(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ if (inode->i_nlink > 0)
+ drop_nlink(inode);
+ spin_unlock(&inode->i_lock);
+}
/*
* If dentry->d_inode is null (usually meaning the cached dentry
@@ -1171,7 +1128,7 @@ undo_setattr:
int cifs_unlink(struct inode *dir, struct dentry *dentry)
{
int rc = 0;
- int xid;
+ unsigned int xid;
char *full_path = NULL;
struct inode *inode = dentry->d_inode;
struct cifsInodeInfo *cifs_inode;
@@ -1189,7 +1146,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
- xid = GetXid();
+ xid = get_xid();
/* Unlink can be called from rename so we can not take the
* sb->s_vfs_rename_mutex here */
@@ -1199,9 +1156,8 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
goto unlink_out;
}
- if ((tcon->ses->capabilities & CAP_UNIX) &&
- (CIFS_UNIX_POSIX_PATH_OPS_CAP &
- le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+ if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+ le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = CIFSPOSIXDelFile(xid, tcon, full_path,
SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -1217,13 +1173,13 @@ retry_std_delete:
psx_del_no_retry:
if (!rc) {
if (inode)
- drop_nlink(inode);
+ cifs_drop_nlink(inode);
} else if (rc == -ENOENT) {
d_drop(dentry);
} else if (rc == -ETXTBSY) {
rc = cifs_rename_pending_delete(full_path, dentry, xid);
if (rc == 0)
- drop_nlink(inode);
+ cifs_drop_nlink(inode);
} else if ((rc == -EACCES) && (dosattr == 0) && inode) {
attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
if (attrs == NULL) {
@@ -1265,21 +1221,159 @@ out_reval:
unlink_out:
kfree(full_path);
kfree(attrs);
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
return rc;
}
+static int
+cifs_mkdir_qinfo(struct inode *inode, struct dentry *dentry, umode_t mode,
+ const char *full_path, struct cifs_sb_info *cifs_sb,
+ struct cifs_tcon *tcon, const unsigned int xid)
+{
+ int rc = 0;
+ struct inode *newinode = NULL;
+
+ if (tcon->unix_ext)
+ rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb,
+ xid);
+ else
+ rc = cifs_get_inode_info(&newinode, full_path, NULL,
+ inode->i_sb, xid, NULL);
+ if (rc)
+ return rc;
+
+ d_instantiate(dentry, newinode);
+ /*
+ * setting nlink not necessary except in cases where we failed to get it
+ * from the server or was set bogus
+ */
+ spin_lock(&dentry->d_inode->i_lock);
+ if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2))
+ set_nlink(dentry->d_inode, 2);
+ spin_unlock(&dentry->d_inode->i_lock);
+ mode &= ~current_umask();
+ /* must turn on setgid bit if parent dir has it */
+ if (inode->i_mode & S_ISGID)
+ mode |= S_ISGID;
+
+ if (tcon->unix_ext) {
+ struct cifs_unix_set_info_args args = {
+ .mode = mode,
+ .ctime = NO_CHANGE_64,
+ .atime = NO_CHANGE_64,
+ .mtime = NO_CHANGE_64,
+ .device = 0,
+ };
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ args.uid = (__u64)current_fsuid();
+ if (inode->i_mode & S_ISGID)
+ args.gid = (__u64)inode->i_gid;
+ else
+ args.gid = (__u64)current_fsgid();
+ } else {
+ args.uid = NO_CHANGE_64;
+ args.gid = NO_CHANGE_64;
+ }
+ CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ } else {
+ struct TCP_Server_Info *server = tcon->ses->server;
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
+ (mode & S_IWUGO) == 0 && server->ops->mkdir_setinfo)
+ server->ops->mkdir_setinfo(newinode, full_path, cifs_sb,
+ tcon, xid);
+ if (dentry->d_inode) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
+ dentry->d_inode->i_mode = (mode | S_IFDIR);
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
+ dentry->d_inode->i_uid = current_fsuid();
+ if (inode->i_mode & S_ISGID)
+ dentry->d_inode->i_gid = inode->i_gid;
+ else
+ dentry->d_inode->i_gid =
+ current_fsgid();
+ }
+ }
+ }
+ return rc;
+}
+
+static int
+cifs_posix_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode,
+ const char *full_path, struct cifs_sb_info *cifs_sb,
+ struct cifs_tcon *tcon, const unsigned int xid)
+{
+ int rc = 0;
+ u32 oplock = 0;
+ FILE_UNIX_BASIC_INFO *info = NULL;
+ struct inode *newinode = NULL;
+ struct cifs_fattr fattr;
+
+ info = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
+ if (info == NULL) {
+ rc = -ENOMEM;
+ goto posix_mkdir_out;
+ }
+
+ mode &= ~current_umask();
+ rc = CIFSPOSIXCreate(xid, tcon, SMB_O_DIRECTORY | SMB_O_CREAT, mode,
+ NULL /* netfid */, info, &oplock, full_path,
+ cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ if (rc == -EOPNOTSUPP)
+ goto posix_mkdir_out;
+ else if (rc) {
+ cFYI(1, "posix mkdir returned 0x%x", rc);
+ d_drop(dentry);
+ goto posix_mkdir_out;
+ }
+
+ if (info->Type == cpu_to_le32(-1))
+ /* no return info, go query for it */
+ goto posix_mkdir_get_info;
+ /*
+ * BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if
+ * need to set uid/gid.
+ */
+
+ cifs_unix_basic_to_fattr(&fattr, info, cifs_sb);
+ cifs_fill_uniqueid(inode->i_sb, &fattr);
+ newinode = cifs_iget(inode->i_sb, &fattr);
+ if (!newinode)
+ goto posix_mkdir_get_info;
+
+ d_instantiate(dentry, newinode);
+
+#ifdef CONFIG_CIFS_DEBUG2
+ cFYI(1, "instantiated dentry %p %s to inode %p", dentry,
+ dentry->d_name.name, newinode);
+
+ if (newinode->i_nlink != 2)
+ cFYI(1, "unexpected number of links %d", newinode->i_nlink);
+#endif
+
+posix_mkdir_out:
+ kfree(info);
+ return rc;
+posix_mkdir_get_info:
+ rc = cifs_mkdir_qinfo(inode, dentry, mode, full_path, cifs_sb, tcon,
+ xid);
+ goto posix_mkdir_out;
+}
+
int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
{
- int rc = 0, tmprc;
- int xid;
+ int rc = 0;
+ unsigned int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifs_tcon *pTcon;
- char *full_path = NULL;
- struct inode *newinode = NULL;
- struct cifs_fattr fattr;
+ struct cifs_tcon *tcon;
+ struct TCP_Server_Info *server;
+ char *full_path;
cFYI(1, "In cifs_mkdir, mode = 0x%hx inode = 0x%p", mode, inode);
@@ -1287,9 +1381,9 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
- pTcon = tlink_tcon(tlink);
+ tcon = tlink_tcon(tlink);
- xid = GetXid();
+ xid = get_xid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
@@ -1297,148 +1391,31 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
goto mkdir_out;
}
- if ((pTcon->ses->capabilities & CAP_UNIX) &&
- (CIFS_UNIX_POSIX_PATH_OPS_CAP &
- le64_to_cpu(pTcon->fsUnixInfo.Capability))) {
- u32 oplock = 0;
- FILE_UNIX_BASIC_INFO *pInfo =
- kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
- if (pInfo == NULL) {
- rc = -ENOMEM;
+ if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
+ le64_to_cpu(tcon->fsUnixInfo.Capability))) {
+ rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb,
+ tcon, xid);
+ if (rc != -EOPNOTSUPP)
goto mkdir_out;
- }
-
- mode &= ~current_umask();
- rc = CIFSPOSIXCreate(xid, pTcon, SMB_O_DIRECTORY | SMB_O_CREAT,
- mode, NULL /* netfid */, pInfo, &oplock,
- full_path, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc == -EOPNOTSUPP) {
- kfree(pInfo);
- goto mkdir_retry_old;
- } else if (rc) {
- cFYI(1, "posix mkdir returned 0x%x", rc);
- d_drop(direntry);
- } else {
- if (pInfo->Type == cpu_to_le32(-1)) {
- /* no return info, go query for it */
- kfree(pInfo);
- goto mkdir_get_info;
- }
-/*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need
- to set uid/gid */
-
- cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
- cifs_fill_uniqueid(inode->i_sb, &fattr);
- newinode = cifs_iget(inode->i_sb, &fattr);
- if (!newinode) {
- kfree(pInfo);
- goto mkdir_get_info;
- }
-
- d_instantiate(direntry, newinode);
+ }
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, "instantiated dentry %p %s to inode %p",
- direntry, direntry->d_name.name, newinode);
+ server = tcon->ses->server;
- if (newinode->i_nlink != 2)
- cFYI(1, "unexpected number of links %d",
- newinode->i_nlink);
-#endif
- }
- kfree(pInfo);
+ if (!server->ops->mkdir) {
+ rc = -ENOSYS;
goto mkdir_out;
}
-mkdir_retry_old:
+
/* BB add setting the equivalent of mode via CreateX w/ACLs */
- rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ rc = server->ops->mkdir(xid, tcon, full_path, cifs_sb);
if (rc) {
cFYI(1, "cifs_mkdir returned 0x%x", rc);
d_drop(direntry);
- } else {
-mkdir_get_info:
- if (pTcon->unix_ext)
- rc = cifs_get_inode_info_unix(&newinode, full_path,
- inode->i_sb, xid);
- else
- rc = cifs_get_inode_info(&newinode, full_path, NULL,
- inode->i_sb, xid, NULL);
-
- d_instantiate(direntry, newinode);
- /* setting nlink not necessary except in cases where we
- * failed to get it from the server or was set bogus */
- if ((direntry->d_inode) && (direntry->d_inode->i_nlink < 2))
- set_nlink(direntry->d_inode, 2);
-
- mode &= ~current_umask();
- /* must turn on setgid bit if parent dir has it */
- if (inode->i_mode & S_ISGID)
- mode |= S_ISGID;
-
- if (pTcon->unix_ext) {
- struct cifs_unix_set_info_args args = {
- .mode = mode,
- .ctime = NO_CHANGE_64,
- .atime = NO_CHANGE_64,
- .mtime = NO_CHANGE_64,
- .device = 0,
- };
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
- args.uid = (__u64)current_fsuid();
- if (inode->i_mode & S_ISGID)
- args.gid = (__u64)inode->i_gid;
- else
- args.gid = (__u64)current_fsgid();
- } else {
- args.uid = NO_CHANGE_64;
- args.gid = NO_CHANGE_64;
- }
- CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
- } else {
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
- (mode & S_IWUGO) == 0) {
- FILE_BASIC_INFO pInfo;
- struct cifsInodeInfo *cifsInode;
- u32 dosattrs;
-
- memset(&pInfo, 0, sizeof(pInfo));
- cifsInode = CIFS_I(newinode);
- dosattrs = cifsInode->cifsAttrs|ATTR_READONLY;
- pInfo.Attributes = cpu_to_le32(dosattrs);
- tmprc = CIFSSMBSetPathInfo(xid, pTcon,
- full_path, &pInfo,
- cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (tmprc == 0)
- cifsInode->cifsAttrs = dosattrs;
- }
- if (direntry->d_inode) {
- if (cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_DYNPERM)
- direntry->d_inode->i_mode =
- (mode | S_IFDIR);
-
- if (cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_SET_UID) {
- direntry->d_inode->i_uid =
- current_fsuid();
- if (inode->i_mode & S_ISGID)
- direntry->d_inode->i_gid =
- inode->i_gid;
- else
- direntry->d_inode->i_gid =
- current_fsgid();
- }
- }
- }
+ goto mkdir_out;
}
+
+ rc = cifs_mkdir_qinfo(inode, direntry, mode, full_path, cifs_sb, tcon,
+ xid);
mkdir_out:
/*
* Force revalidate to get parent dir info when needed since cached
@@ -1446,7 +1423,7 @@ mkdir_out:
*/
CIFS_I(inode)->time = 0;
kfree(full_path);
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
return rc;
}
@@ -1454,16 +1431,17 @@ mkdir_out:
int cifs_rmdir(struct inode *inode, struct dentry *direntry)
{
int rc = 0;
- int xid;
+ unsigned int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
- struct cifs_tcon *pTcon;
+ struct cifs_tcon *tcon;
+ struct TCP_Server_Info *server;
char *full_path = NULL;
struct cifsInodeInfo *cifsInode;
cFYI(1, "cifs_rmdir, inode = 0x%p", inode);
- xid = GetXid();
+ xid = get_xid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
@@ -1477,10 +1455,16 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
rc = PTR_ERR(tlink);
goto rmdir_exit;
}
- pTcon = tlink_tcon(tlink);
+ tcon = tlink_tcon(tlink);
+ server = tcon->ses->server;
+
+ if (!server->ops->rmdir) {
+ rc = -ENOSYS;
+ cifs_put_tlink(tlink);
+ goto rmdir_exit;
+ }
- rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ rc = server->ops->rmdir(xid, tcon, full_path, cifs_sb);
cifs_put_tlink(tlink);
if (!rc) {
@@ -1506,13 +1490,14 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
rmdir_exit:
kfree(full_path);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
static int
-cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
- struct dentry *to_dentry, const char *toPath)
+cifs_do_rename(unsigned int xid, struct dentry *from_dentry,
+ const char *fromPath, struct dentry *to_dentry,
+ const char *toPath)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb);
struct tcon_link *tlink;
@@ -1571,7 +1556,8 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
struct cifs_tcon *tcon;
FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
FILE_UNIX_BASIC_INFO *info_buf_target;
- int xid, rc, tmprc;
+ unsigned int xid;
+ int rc, tmprc;
cifs_sb = CIFS_SB(source_dir->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
@@ -1579,7 +1565,7 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
- xid = GetXid();
+ xid = get_xid();
/*
* we already have the rename sem so we do not need to
@@ -1652,7 +1638,7 @@ cifs_rename_exit:
kfree(info_buf_source);
kfree(fromName);
kfree(toName);
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
return rc;
}
@@ -1727,7 +1713,7 @@ int cifs_revalidate_file_attr(struct file *filp)
int cifs_revalidate_dentry_attr(struct dentry *dentry)
{
- int xid;
+ unsigned int xid;
int rc = 0;
struct inode *inode = dentry->d_inode;
struct super_block *sb = dentry->d_sb;
@@ -1739,7 +1725,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
if (!cifs_inode_needs_reval(inode))
return rc;
- xid = GetXid();
+ xid = get_xid();
/* can not safely grab the rename sem here if rename calls revalidate
since that would deadlock */
@@ -1761,7 +1747,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
out:
kfree(full_path);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -1869,7 +1855,7 @@ static void cifs_setsize(struct inode *inode, loff_t offset)
static int
cifs_set_file_size(struct inode *inode, struct iattr *attrs,
- int xid, char *full_path)
+ unsigned int xid, char *full_path)
{
int rc;
struct cifsFileInfo *open_file;
@@ -1971,7 +1957,7 @@ static int
cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
{
int rc;
- int xid;
+ unsigned int xid;
char *full_path = NULL;
struct inode *inode = direntry->d_inode;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
@@ -1984,7 +1970,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
cFYI(1, "setattr_unix on file %s attrs->ia_valid=0x%x",
direntry->d_name.name, attrs->ia_valid);
- xid = GetXid();
+ xid = get_xid();
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
attrs->ia_valid |= ATTR_FORCE;
@@ -2104,14 +2090,14 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
out:
kfree(args);
kfree(full_path);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
static int
cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
{
- int xid;
+ unsigned int xid;
uid_t uid = NO_CHANGE_32;
gid_t gid = NO_CHANGE_32;
struct inode *inode = direntry->d_inode;
@@ -2122,7 +2108,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
__u32 dosattr = 0;
__u64 mode = NO_CHANGE_64;
- xid = GetXid();
+ xid = get_xid();
cFYI(1, "setattr on file %s attrs->iavalid 0x%x",
direntry->d_name.name, attrs->ia_valid);
@@ -2132,14 +2118,14 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
rc = inode_change_ok(inode, attrs);
if (rc < 0) {
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
@@ -2265,7 +2251,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
cifs_setattr_exit:
kfree(full_path);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 6d2667f0c98c..ae082a66de2f 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -34,7 +34,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
{
struct inode *inode = filep->f_dentry->d_inode;
int rc = -ENOTTY; /* strange error - but the precedent */
- int xid;
+ unsigned int xid;
struct cifs_sb_info *cifs_sb;
#ifdef CONFIG_CIFS_POSIX
struct cifsFileInfo *pSMBFile = filep->private_data;
@@ -44,7 +44,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
__u64 caps;
#endif /* CONFIG_CIFS_POSIX */
- xid = GetXid();
+ xid = get_xid();
cFYI(1, "ioctl file %p cmd %u arg %lu", filep, command, arg);
@@ -105,6 +105,6 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
break;
}
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 6b0e06434391..e6ce3b112875 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -56,14 +56,14 @@ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
md5 = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(md5)) {
rc = PTR_ERR(md5);
- cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc);
+ cERROR(1, "%s: Crypto md5 allocation error %d", __func__, rc);
return rc;
}
size = sizeof(struct shash_desc) + crypto_shash_descsize(md5);
sdescmd5 = kmalloc(size, GFP_KERNEL);
if (!sdescmd5) {
rc = -ENOMEM;
- cERROR(1, "%s: Memory allocation failure\n", __func__);
+ cERROR(1, "%s: Memory allocation failure", __func__);
goto symlink_hash_err;
}
sdescmd5->shash.tfm = md5;
@@ -71,17 +71,17 @@ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
rc = crypto_shash_init(&sdescmd5->shash);
if (rc) {
- cERROR(1, "%s: Could not init md5 shash\n", __func__);
+ cERROR(1, "%s: Could not init md5 shash", __func__);
goto symlink_hash_err;
}
rc = crypto_shash_update(&sdescmd5->shash, link_str, link_len);
if (rc) {
- cERROR(1, "%s: Could not update iwth link_str\n", __func__);
+ cERROR(1, "%s: Could not update iwth link_str", __func__);
goto symlink_hash_err;
}
rc = crypto_shash_final(&sdescmd5->shash, md5_hash);
if (rc)
- cERROR(1, "%s: Could not generate md5 hash\n", __func__);
+ cERROR(1, "%s: Could not generate md5 hash", __func__);
symlink_hash_err:
crypto_free_shash(md5);
@@ -115,7 +115,7 @@ CIFSParseMFSymlink(const u8 *buf,
rc = symlink_hash(link_len, link_str, md5_hash);
if (rc) {
- cFYI(1, "%s: MD5 hash failure: %d\n", __func__, rc);
+ cFYI(1, "%s: MD5 hash failure: %d", __func__, rc);
return rc;
}
@@ -154,7 +154,7 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
rc = symlink_hash(link_len, link_str, md5_hash);
if (rc) {
- cFYI(1, "%s: MD5 hash failure: %d\n", __func__, rc);
+ cFYI(1, "%s: MD5 hash failure: %d", __func__, rc);
return rc;
}
@@ -181,7 +181,7 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
}
static int
-CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon,
+CIFSCreateMFSymLink(const unsigned int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName,
struct cifs_sb_info *cifs_sb)
{
@@ -238,7 +238,7 @@ CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon,
}
static int
-CIFSQueryMFSymLink(const int xid, struct cifs_tcon *tcon,
+CIFSQueryMFSymLink(const unsigned int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, char **symlinkinfo,
const struct nls_table *nls_codepage, int remap)
{
@@ -307,7 +307,7 @@ CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr)
int
CIFSCheckMFSymlink(struct cifs_fattr *fattr,
const unsigned char *path,
- struct cifs_sb_info *cifs_sb, int xid)
+ struct cifs_sb_info *cifs_sb, unsigned int xid)
{
int rc;
int oplock = 0;
@@ -390,7 +390,7 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
struct dentry *direntry)
{
int rc = -EACCES;
- int xid;
+ unsigned int xid;
char *fromName = NULL;
char *toName = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -403,7 +403,7 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
- xid = GetXid();
+ xid = get_xid();
fromName = build_path_from_dentry(old_file);
toName = build_path_from_dentry(direntry);
@@ -433,7 +433,9 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
if (old_file->d_inode) {
cifsInode = CIFS_I(old_file->d_inode);
if (rc == 0) {
+ spin_lock(&old_file->d_inode->i_lock);
inc_nlink(old_file->d_inode);
+ spin_unlock(&old_file->d_inode->i_lock);
/* BB should we make this contingent on superblock flag NOATIME? */
/* old_file->d_inode->i_ctime = CURRENT_TIME;*/
/* parent dir timestamps will update from srv
@@ -455,7 +457,7 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
cifs_hl_exit:
kfree(fromName);
kfree(toName);
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
return rc;
}
@@ -465,14 +467,14 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
{
struct inode *inode = direntry->d_inode;
int rc = -ENOMEM;
- int xid;
+ unsigned int xid;
char *full_path = NULL;
char *target_path = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
struct cifs_tcon *tcon;
- xid = GetXid();
+ xid = get_xid();
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
@@ -495,8 +497,8 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
* but there doesn't seem to be any harm in allowing the client to
* read them.
*/
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
- && !(tcon->ses->capabilities & CAP_UNIX)) {
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) &&
+ !cap_unix(tcon->ses)) {
rc = -EACCES;
goto out;
}
@@ -518,7 +520,7 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if ((rc != 0) && (tcon->ses->capabilities & CAP_UNIX))
+ if ((rc != 0) && cap_unix(tcon->ses))
rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, &target_path,
cifs_sb->local_nls);
@@ -529,7 +531,7 @@ out:
target_path = ERR_PTR(rc);
}
- FreeXid(xid);
+ free_xid(xid);
if (tlink)
cifs_put_tlink(tlink);
nd_set_link(nd, target_path);
@@ -540,14 +542,14 @@ int
cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
{
int rc = -EOPNOTSUPP;
- int xid;
+ unsigned int xid;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
- xid = GetXid();
+ xid = get_xid();
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
@@ -594,7 +596,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
symlink_exit:
kfree(full_path);
cifs_put_tlink(tlink);
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 557506ae1e2a..ce41fee07e5b 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -29,6 +29,9 @@
#include "smberr.h"
#include "nterr.h"
#include "cifs_unicode.h"
+#ifdef CONFIG_CIFS_SMB2
+#include "smb2pdu.h"
+#endif
extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
@@ -40,7 +43,7 @@ extern mempool_t *cifs_req_poolp;
since the cifs fs was mounted */
unsigned int
-_GetXid(void)
+_get_xid(void)
{
unsigned int xid;
@@ -58,7 +61,7 @@ _GetXid(void)
}
void
-_FreeXid(unsigned int xid)
+_free_xid(unsigned int xid)
{
spin_lock(&GlobalMid_Lock);
/* if (GlobalTotalActiveXid == 0)
@@ -143,17 +146,27 @@ struct smb_hdr *
cifs_buf_get(void)
{
struct smb_hdr *ret_buf = NULL;
-
-/* We could use negotiated size instead of max_msgsize -
- but it may be more efficient to always alloc same size
- albeit slightly larger than necessary and maxbuffersize
- defaults to this and can not be bigger */
+ size_t buf_size = sizeof(struct smb_hdr);
+
+#ifdef CONFIG_CIFS_SMB2
+ /*
+ * SMB2 header is bigger than CIFS one - no problems to clean some
+ * more bytes for CIFS.
+ */
+ buf_size = sizeof(struct smb2_hdr);
+#endif
+ /*
+ * We could use negotiated size instead of max_msgsize -
+ * but it may be more efficient to always alloc same size
+ * albeit slightly larger than necessary and maxbuffersize
+ * defaults to this and can not be bigger.
+ */
ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
/* clear the first few header bytes */
/* for most paths, more is cleared in header_assemble */
if (ret_buf) {
- memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
+ memset(ret_buf, 0, buf_size + 3);
atomic_inc(&bufAllocCount);
#ifdef CONFIG_CIFS_STATS2
atomic_inc(&totBufAllocCount);
@@ -448,7 +461,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
if (tcon->tid != buf->Tid)
continue;
- cifs_stats_inc(&tcon->num_oplock_brks);
+ cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
spin_lock(&cifs_file_list_lock);
list_for_each(tmp2, &tcon->openFileList) {
netfile = list_entry(tmp2, struct cifsFileInfo,
diff --git a/fs/cifs/nterr.c b/fs/cifs/nterr.c
index 819fd994b121..b6023c646123 100644
--- a/fs/cifs/nterr.c
+++ b/fs/cifs/nterr.c
@@ -31,7 +31,7 @@ const struct nt_err_code_struct nt_errs[] = {
{"NT_STATUS_INVALID_INFO_CLASS", NT_STATUS_INVALID_INFO_CLASS},
{"NT_STATUS_INFO_LENGTH_MISMATCH", NT_STATUS_INFO_LENGTH_MISMATCH},
{"NT_STATUS_ACCESS_VIOLATION", NT_STATUS_ACCESS_VIOLATION},
- {"STATUS_BUFFER_OVERFLOW", STATUS_BUFFER_OVERFLOW},
+ {"NT_STATUS_BUFFER_OVERFLOW", NT_STATUS_BUFFER_OVERFLOW},
{"NT_STATUS_IN_PAGE_ERROR", NT_STATUS_IN_PAGE_ERROR},
{"NT_STATUS_PAGEFILE_QUOTA", NT_STATUS_PAGEFILE_QUOTA},
{"NT_STATUS_INVALID_HANDLE", NT_STATUS_INVALID_HANDLE},
@@ -681,7 +681,7 @@ const struct nt_err_code_struct nt_errs[] = {
NT_STATUS_QUOTA_LIST_INCONSISTENT},
{"NT_STATUS_FILE_IS_OFFLINE", NT_STATUS_FILE_IS_OFFLINE},
{"NT_STATUS_NO_MORE_ENTRIES", NT_STATUS_NO_MORE_ENTRIES},
- {"STATUS_MORE_ENTRIES", STATUS_MORE_ENTRIES},
- {"STATUS_SOME_UNMAPPED", STATUS_SOME_UNMAPPED},
+ {"NT_STATUS_MORE_ENTRIES", NT_STATUS_MORE_ENTRIES},
+ {"NT_STATUS_SOME_UNMAPPED", NT_STATUS_SOME_UNMAPPED},
{NULL, 0}
};
diff --git a/fs/cifs/nterr.h b/fs/cifs/nterr.h
index 257267367d41..7a0eae5ae7c9 100644
--- a/fs/cifs/nterr.h
+++ b/fs/cifs/nterr.h
@@ -35,18 +35,20 @@ struct nt_err_code_struct {
extern const struct nt_err_code_struct nt_errs[];
/* Win32 Status codes. */
-#define STATUS_MORE_ENTRIES 0x0105
-#define ERROR_INVALID_PARAMETER 0x0057
-#define ERROR_INSUFFICIENT_BUFFER 0x007a
-#define STATUS_1804 0x070c
-#define STATUS_NOTIFY_ENUM_DIR 0x010c
+#define NT_STATUS_MORE_ENTRIES 0x0105
+#define NT_ERROR_INVALID_PARAMETER 0x0057
+#define NT_ERROR_INSUFFICIENT_BUFFER 0x007a
+#define NT_STATUS_1804 0x070c
+#define NT_STATUS_NOTIFY_ENUM_DIR 0x010c
-/* Win32 Error codes extracted using a loop in smbclient then printing a
- netmon sniff to a file. */
+/*
+ * Win32 Error codes extracted using a loop in smbclient then printing a netmon
+ * sniff to a file.
+ */
-#define NT_STATUS_OK 0x0000
-#define STATUS_SOME_UNMAPPED 0x0107
-#define STATUS_BUFFER_OVERFLOW 0x80000005
+#define NT_STATUS_OK 0x0000
+#define NT_STATUS_SOME_UNMAPPED 0x0107
+#define NT_STATUS_BUFFER_OVERFLOW 0x80000005
#define NT_STATUS_NO_MORE_ENTRIES 0x8000001a
#define NT_STATUS_MEDIA_CHANGED 0x8000001c
#define NT_STATUS_END_OF_MEDIA 0x8000001e
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 5d52e4a3b1ed..848249fa120f 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -126,3 +126,13 @@ typedef struct _AUTHENTICATE_MESSAGE {
do not set the version is present flag */
char UserString[0];
} __attribute__((packed)) AUTHENTICATE_MESSAGE, *PAUTHENTICATE_MESSAGE;
+
+/*
+ * Size of the session key (crypto key encrypted with the password
+ */
+
+int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
+void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
+int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
+ struct cifs_ses *ses,
+ const struct nls_table *nls_cp);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index a4217f02fab2..d87f82678bc7 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -193,7 +193,7 @@ cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
we try to do FindFirst on (NTFS) directory symlinks */
/*
int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
- int xid)
+ unsigned int xid)
{
__u16 fid;
int len;
@@ -220,7 +220,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
}
*/
-static int initiate_cifs_search(const int xid, struct file *file)
+static int initiate_cifs_search(const unsigned int xid, struct file *file)
{
__u16 search_flags;
int rc = 0;
@@ -228,7 +228,7 @@ static int initiate_cifs_search(const int xid, struct file *file)
struct cifsFileInfo *cifsFile;
struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
struct tcon_link *tlink = NULL;
- struct cifs_tcon *pTcon;
+ struct cifs_tcon *tcon;
if (file->private_data == NULL) {
tlink = cifs_sb_tlink(cifs_sb);
@@ -242,10 +242,10 @@ static int initiate_cifs_search(const int xid, struct file *file)
}
file->private_data = cifsFile;
cifsFile->tlink = cifs_get_tlink(tlink);
- pTcon = tlink_tcon(tlink);
+ tcon = tlink_tcon(tlink);
} else {
cifsFile = file->private_data;
- pTcon = tlink_tcon(cifsFile->tlink);
+ tcon = tlink_tcon(cifsFile->tlink);
}
cifsFile->invalidHandle = true;
@@ -262,11 +262,11 @@ static int initiate_cifs_search(const int xid, struct file *file)
ffirst_retry:
/* test for Unix extensions */
/* but now check for them on the share/mount not on the SMB session */
-/* if (pTcon->ses->capabilities & CAP_UNIX) { */
- if (pTcon->unix_ext)
+ /* if (cap_unix(tcon->ses) { */
+ if (tcon->unix_ext)
cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX;
- else if ((pTcon->ses->capabilities &
- (CAP_NT_SMBS | CAP_NT_FIND)) == 0) {
+ else if ((tcon->ses->capabilities &
+ tcon->ses->server->vals->cap_nt_find) == 0) {
cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD;
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
@@ -278,7 +278,7 @@ ffirst_retry:
if (backup_cred(cifs_sb))
search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
- rc = CIFSFindFirst(xid, pTcon, full_path, cifs_sb->local_nls,
+ rc = CIFSFindFirst(xid, tcon, full_path, cifs_sb->local_nls,
&cifsFile->netfid, search_flags, &cifsFile->srch_inf,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb));
@@ -507,7 +507,7 @@ static int cifs_save_resume_key(const char *current_entry,
assume that they are located in the findfirst return buffer.*/
/* We start counting in the buffer with entry 2 and increment for every
entry (do not increment for . or .. entry) */
-static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
+static int find_cifs_entry(const unsigned int xid, struct cifs_tcon *pTcon,
struct file *file, char **ppCurrentEntry, int *num_to_ret)
{
__u16 search_flags;
@@ -721,7 +721,8 @@ static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir,
int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
{
int rc = 0;
- int xid, i;
+ unsigned int xid;
+ int i;
struct cifs_tcon *pTcon;
struct cifsFileInfo *cifsFile = NULL;
char *current_entry;
@@ -730,7 +731,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
char *end_of_smb;
unsigned int max_len;
- xid = GetXid();
+ xid = get_xid();
/*
* Ensure FindFirst doesn't fail before doing filldir() for '.' and
@@ -768,7 +769,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
if (file->private_data == NULL) {
rc = -EINVAL;
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
cifsFile = file->private_data;
@@ -840,6 +841,6 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
} /* end switch */
rddir2_exit:
- FreeXid(xid);
+ free_xid(xid);
return rc;
}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 551d0c2b9736..382c06d01b38 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -364,7 +364,7 @@ static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
return rc;
}
-static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
+int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
struct cifs_ses *ses)
{
unsigned int tioffset; /* challenge message target info area */
@@ -415,7 +415,7 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
/* We do not malloc the blob, it is passed in pbuffer, because
it is fixed size, and small, making this approach cleaner */
-static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
+void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
struct cifs_ses *ses)
{
NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer;
@@ -451,7 +451,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
/* We do not malloc the blob, it is passed in pbuffer, because its
maximum possible size is fixed and small, making this approach cleaner.
This function returns the length of the data in the blob */
-static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+int build_ntlmssp_auth_blob(unsigned char *pbuffer,
u16 *buflen,
struct cifs_ses *ses,
const struct nls_table *nls_cp)
@@ -556,7 +556,7 @@ setup_ntlmv2_ret:
}
int
-CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
+CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
int rc = 0;
@@ -898,7 +898,7 @@ ssetup_ntlmssp_authenticate:
if (action & GUEST_LOGIN)
cFYI(1, "Guest login"); /* BB mark SesInfo struct? */
ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */
- cFYI(1, "UID = %d ", ses->Suid);
+ cFYI(1, "UID = %llu ", ses->Suid);
/* response can have either 3 or 4 word count - Samba sends 3 */
/* and lanman response is 3 */
bytes_remaining = get_bcc(smb_buf);
@@ -938,7 +938,7 @@ ssetup_ntlmssp_authenticate:
ssetup_exit:
if (spnego_key) {
- key_revoke(spnego_key);
+ key_invalidate(spnego_key);
key_put(spnego_key);
}
kfree(str_area);
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 6dec38f5522d..3129ac74b819 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -101,7 +101,8 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
}
static void
-cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
+cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add,
+ const int optype)
{
spin_lock(&server->req_lock);
server->credits += add;
@@ -120,11 +121,17 @@ cifs_set_credits(struct TCP_Server_Info *server, const int val)
}
static int *
-cifs_get_credits_field(struct TCP_Server_Info *server)
+cifs_get_credits_field(struct TCP_Server_Info *server, const int optype)
{
return &server->credits;
}
+static unsigned int
+cifs_get_credits(struct mid_q_entry *mid)
+{
+ return 1;
+}
+
/*
* Find a free multiplex id (SMB mid). Otherwise there could be
* mid collisions which might cause problems, demultiplexing the
@@ -213,14 +220,403 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
return mid;
}
+/*
+ return codes:
+ 0 not a transact2, or all data present
+ >0 transact2 with that much data missing
+ -EINVAL invalid transact2
+ */
+static int
+check2ndT2(char *buf)
+{
+ struct smb_hdr *pSMB = (struct smb_hdr *)buf;
+ struct smb_t2_rsp *pSMBt;
+ int remaining;
+ __u16 total_data_size, data_in_this_rsp;
+
+ if (pSMB->Command != SMB_COM_TRANSACTION2)
+ return 0;
+
+ /* check for plausible wct, bcc and t2 data and parm sizes */
+ /* check for parm and data offset going beyond end of smb */
+ if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
+ cFYI(1, "invalid transact2 word count");
+ return -EINVAL;
+ }
+
+ pSMBt = (struct smb_t2_rsp *)pSMB;
+
+ total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
+ data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
+
+ if (total_data_size == data_in_this_rsp)
+ return 0;
+ else if (total_data_size < data_in_this_rsp) {
+ cFYI(1, "total data %d smaller than data in frame %d",
+ total_data_size, data_in_this_rsp);
+ return -EINVAL;
+ }
+
+ remaining = total_data_size - data_in_this_rsp;
+
+ cFYI(1, "missing %d bytes from transact2, check next response",
+ remaining);
+ if (total_data_size > CIFSMaxBufSize) {
+ cERROR(1, "TotalDataSize %d is over maximum buffer %d",
+ total_data_size, CIFSMaxBufSize);
+ return -EINVAL;
+ }
+ return remaining;
+}
+
+static int
+coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
+{
+ struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)second_buf;
+ struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)target_hdr;
+ char *data_area_of_tgt;
+ char *data_area_of_src;
+ int remaining;
+ unsigned int byte_count, total_in_tgt;
+ __u16 tgt_total_cnt, src_total_cnt, total_in_src;
+
+ src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount);
+ tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
+
+ if (tgt_total_cnt != src_total_cnt)
+ cFYI(1, "total data count of primary and secondary t2 differ "
+ "source=%hu target=%hu", src_total_cnt, tgt_total_cnt);
+
+ total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
+
+ remaining = tgt_total_cnt - total_in_tgt;
+
+ if (remaining < 0) {
+ cFYI(1, "Server sent too much data. tgt_total_cnt=%hu "
+ "total_in_tgt=%hu", tgt_total_cnt, total_in_tgt);
+ return -EPROTO;
+ }
+
+ if (remaining == 0) {
+ /* nothing to do, ignore */
+ cFYI(1, "no more data remains");
+ return 0;
+ }
+
+ total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount);
+ if (remaining < total_in_src)
+ cFYI(1, "transact2 2nd response contains too much data");
+
+ /* find end of first SMB data area */
+ data_area_of_tgt = (char *)&pSMBt->hdr.Protocol +
+ get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
+
+ /* validate target area */
+ data_area_of_src = (char *)&pSMBs->hdr.Protocol +
+ get_unaligned_le16(&pSMBs->t2_rsp.DataOffset);
+
+ data_area_of_tgt += total_in_tgt;
+
+ total_in_tgt += total_in_src;
+ /* is the result too big for the field? */
+ if (total_in_tgt > USHRT_MAX) {
+ cFYI(1, "coalesced DataCount too large (%u)", total_in_tgt);
+ return -EPROTO;
+ }
+ put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
+
+ /* fix up the BCC */
+ byte_count = get_bcc(target_hdr);
+ byte_count += total_in_src;
+ /* is the result too big for the field? */
+ if (byte_count > USHRT_MAX) {
+ cFYI(1, "coalesced BCC too large (%u)", byte_count);
+ return -EPROTO;
+ }
+ put_bcc(byte_count, target_hdr);
+
+ byte_count = be32_to_cpu(target_hdr->smb_buf_length);
+ byte_count += total_in_src;
+ /* don't allow buffer to overflow */
+ if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+ cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count);
+ return -ENOBUFS;
+ }
+ target_hdr->smb_buf_length = cpu_to_be32(byte_count);
+
+ /* copy second buffer into end of first buffer */
+ memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
+
+ if (remaining != total_in_src) {
+ /* more responses to go */
+ cFYI(1, "waiting for more secondary responses");
+ return 1;
+ }
+
+ /* we are done */
+ cFYI(1, "found the last secondary response");
+ return 0;
+}
+
+static bool
+cifs_check_trans2(struct mid_q_entry *mid, struct TCP_Server_Info *server,
+ char *buf, int malformed)
+{
+ if (malformed)
+ return false;
+ if (check2ndT2(buf) <= 0)
+ return false;
+ mid->multiRsp = true;
+ if (mid->resp_buf) {
+ /* merge response - fix up 1st*/
+ malformed = coalesce_t2(buf, mid->resp_buf);
+ if (malformed > 0)
+ return true;
+ /* All parts received or packet is malformed. */
+ mid->multiEnd = true;
+ dequeue_mid(mid, malformed);
+ return true;
+ }
+ if (!server->large_buf) {
+ /*FIXME: switch to already allocated largebuf?*/
+ cERROR(1, "1st trans2 resp needs bigbuf");
+ } else {
+ /* Have first buffer */
+ mid->resp_buf = buf;
+ mid->large_buf = true;
+ server->bigbuf = NULL;
+ }
+ return true;
+}
+
+static bool
+cifs_need_neg(struct TCP_Server_Info *server)
+{
+ return server->maxBuf == 0;
+}
+
+static int
+cifs_negotiate(const unsigned int xid, struct cifs_ses *ses)
+{
+ int rc;
+ rc = CIFSSMBNegotiate(xid, ses);
+ if (rc == -EAGAIN) {
+ /* retry only once on 1st time connection */
+ set_credits(ses->server, 1);
+ rc = CIFSSMBNegotiate(xid, ses);
+ if (rc == -EAGAIN)
+ rc = -EHOSTDOWN;
+ }
+ return rc;
+}
+
+static void
+cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+{
+ CIFSSMBQFSDeviceInfo(xid, tcon);
+ CIFSSMBQFSAttributeInfo(xid, tcon);
+}
+
+static int
+cifs_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path)
+{
+ int rc;
+ FILE_ALL_INFO *file_info;
+
+ file_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+ if (file_info == NULL)
+ return -ENOMEM;
+
+ rc = CIFSSMBQPathInfo(xid, tcon, full_path, file_info,
+ 0 /* not legacy */, cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+
+ if (rc == -EOPNOTSUPP || rc == -EINVAL)
+ rc = SMBQueryInformation(xid, tcon, full_path, file_info,
+ cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ kfree(file_info);
+ return rc;
+}
+
+static int
+cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ FILE_ALL_INFO *data, bool *adjustTZ)
+{
+ int rc;
+
+ /* could do find first instead but this returns more info */
+ rc = CIFSSMBQPathInfo(xid, tcon, full_path, data, 0 /* not legacy */,
+ cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ /*
+ * BB optimize code so we do not make the above call when server claims
+ * no NT SMB support and the above call failed at least once - set flag
+ * in tcon or mount.
+ */
+ if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
+ rc = SMBQueryInformation(xid, tcon, full_path, data,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ *adjustTZ = true;
+ }
+ return rc;
+}
+
+static int
+cifs_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ u64 *uniqueid, FILE_ALL_INFO *data)
+{
+ /*
+ * We can not use the IndexNumber field by default from Windows or
+ * Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA
+ * CIFS spec claims that this value is unique within the scope of a
+ * share, and the windows docs hint that it's actually unique
+ * per-machine.
+ *
+ * There may be higher info levels that work but are there Windows
+ * server or network appliances for which IndexNumber field is not
+ * guaranteed unique?
+ */
+ return CIFSGetSrvInodeNumber(xid, tcon, full_path, uniqueid,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+}
+
+static char *
+cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
+ struct cifs_tcon *tcon)
+{
+ int pplen = vol->prepath ? strlen(vol->prepath) : 0;
+ int dfsplen;
+ char *full_path = NULL;
+
+ /* if no prefix path, simply set path to the root of share to "" */
+ if (pplen == 0) {
+ full_path = kzalloc(1, GFP_KERNEL);
+ return full_path;
+ }
+
+ if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
+ dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
+ else
+ dfsplen = 0;
+
+ full_path = kmalloc(dfsplen + pplen + 1, GFP_KERNEL);
+ if (full_path == NULL)
+ return full_path;
+
+ if (dfsplen)
+ strncpy(full_path, tcon->treeName, dfsplen);
+ strncpy(full_path + dfsplen, vol->prepath, pplen);
+ convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
+ full_path[dfsplen + pplen] = 0; /* add trailing null */
+ return full_path;
+}
+
+static void
+cifs_clear_stats(struct cifs_tcon *tcon)
+{
+#ifdef CONFIG_CIFS_STATS
+ atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
+ atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
+#endif
+}
+
+static void
+cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
+{
+#ifdef CONFIG_CIFS_STATS
+ seq_printf(m, " Oplocks breaks: %d",
+ atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
+ seq_printf(m, "\nReads: %d Bytes: %llu",
+ atomic_read(&tcon->stats.cifs_stats.num_reads),
+ (long long)(tcon->bytes_read));
+ seq_printf(m, "\nWrites: %d Bytes: %llu",
+ atomic_read(&tcon->stats.cifs_stats.num_writes),
+ (long long)(tcon->bytes_written));
+ seq_printf(m, "\nFlushes: %d",
+ atomic_read(&tcon->stats.cifs_stats.num_flushes));
+ seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
+ atomic_read(&tcon->stats.cifs_stats.num_locks),
+ atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
+ atomic_read(&tcon->stats.cifs_stats.num_symlinks));
+ seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
+ atomic_read(&tcon->stats.cifs_stats.num_opens),
+ atomic_read(&tcon->stats.cifs_stats.num_closes),
+ atomic_read(&tcon->stats.cifs_stats.num_deletes));
+ seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
+ atomic_read(&tcon->stats.cifs_stats.num_posixopens),
+ atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
+ seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
+ atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
+ atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
+ seq_printf(m, "\nRenames: %d T2 Renames %d",
+ atomic_read(&tcon->stats.cifs_stats.num_renames),
+ atomic_read(&tcon->stats.cifs_stats.num_t2renames));
+ seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
+ atomic_read(&tcon->stats.cifs_stats.num_ffirst),
+ atomic_read(&tcon->stats.cifs_stats.num_fnext),
+ atomic_read(&tcon->stats.cifs_stats.num_fclose));
+#endif
+}
+
+static void
+cifs_mkdir_setinfo(struct inode *inode, const char *full_path,
+ struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon,
+ const unsigned int xid)
+{
+ FILE_BASIC_INFO info;
+ struct cifsInodeInfo *cifsInode;
+ u32 dosattrs;
+ int rc;
+
+ memset(&info, 0, sizeof(info));
+ cifsInode = CIFS_I(inode);
+ dosattrs = cifsInode->cifsAttrs|ATTR_READONLY;
+ info.Attributes = cpu_to_le32(dosattrs);
+ rc = CIFSSMBSetPathInfo(xid, tcon, full_path, &info, cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ if (rc == 0)
+ cifsInode->cifsAttrs = dosattrs;
+}
+
struct smb_version_operations smb1_operations = {
.send_cancel = send_nt_cancel,
.compare_fids = cifs_compare_fids,
.setup_request = cifs_setup_request,
+ .setup_async_request = cifs_setup_async_request,
.check_receive = cifs_check_receive,
.add_credits = cifs_add_credits,
.set_credits = cifs_set_credits,
.get_credits_field = cifs_get_credits_field,
+ .get_credits = cifs_get_credits,
.get_next_mid = cifs_get_next_mid,
.read_data_offset = cifs_read_data_offset,
.read_data_length = cifs_read_data_length,
@@ -228,7 +624,26 @@ struct smb_version_operations smb1_operations = {
.find_mid = cifs_find_mid,
.check_message = checkSMB,
.dump_detail = cifs_dump_detail,
+ .clear_stats = cifs_clear_stats,
+ .print_stats = cifs_print_stats,
.is_oplock_break = is_valid_oplock_break,
+ .check_trans2 = cifs_check_trans2,
+ .need_neg = cifs_need_neg,
+ .negotiate = cifs_negotiate,
+ .sess_setup = CIFS_SessSetup,
+ .logoff = CIFSSMBLogoff,
+ .tree_connect = CIFSTCon,
+ .tree_disconnect = CIFSSMBTDis,
+ .get_dfs_refer = CIFSGetDFSRefer,
+ .qfs_tcon = cifs_qfs_tcon,
+ .is_path_accessible = cifs_is_path_accessible,
+ .query_path_info = cifs_query_path_info,
+ .get_srv_inum = cifs_get_srv_inum,
+ .build_path_to_root = cifs_build_path_to_root,
+ .echo = CIFSSMBEcho,
+ .mkdir = CIFSSMBMkDir,
+ .mkdir_setinfo = cifs_mkdir_setinfo,
+ .rmdir = CIFSSMBRmDir,
};
struct smb_version_values smb1_values = {
@@ -240,4 +655,8 @@ struct smb_version_values smb1_values = {
.header_size = sizeof(struct smb_hdr),
.max_header_size = MAX_CIFS_HDR_SIZE,
.read_rsp_size = sizeof(READ_RSP),
+ .lock_cmd = cpu_to_le16(SMB_COM_LOCKING_ANDX),
+ .cap_unix = CAP_UNIX,
+ .cap_nt_find = CAP_NT_SMBS | CAP_NT_FIND,
+ .cap_large_files = CAP_LARGE_FILES,
};
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
new file mode 100644
index 000000000000..33c1d89090c0
--- /dev/null
+++ b/fs/cifs/smb2glob.h
@@ -0,0 +1,44 @@
+/*
+ * fs/cifs/smb2glob.h
+ *
+ * Definitions for various global variables and structures
+ *
+ * Copyright (C) International Business Machines Corp., 2002, 2011
+ * Etersoft, 2012
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ * Jeremy Allison (jra@samba.org)
+ * Pavel Shilovsky (pshilovsky@samba.org) 2012
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ */
+#ifndef _SMB2_GLOB_H
+#define _SMB2_GLOB_H
+
+/*
+ *****************************************************************
+ * Constants go here
+ *****************************************************************
+ */
+
+/*
+ * Identifiers for functions that use the open, operation, close pattern
+ * in smb2inode.c:smb2_open_op_close()
+ */
+#define SMB2_OP_SET_DELETE 1
+#define SMB2_OP_SET_INFO 2
+#define SMB2_OP_QUERY_INFO 3
+#define SMB2_OP_QUERY_DIR 4
+#define SMB2_OP_MKDIR 5
+#define SMB2_OP_RENAME 6
+#define SMB2_OP_DELETE 7
+
+#endif /* _SMB2_GLOB_H */
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
new file mode 100644
index 000000000000..2aa5cb08c526
--- /dev/null
+++ b/fs/cifs/smb2inode.c
@@ -0,0 +1,163 @@
+/*
+ * fs/cifs/smb2inode.c
+ *
+ * Copyright (C) International Business Machines Corp., 2002, 2011
+ * Etersoft, 2012
+ * Author(s): Pavel Shilovsky (pshilovsky@samba.org),
+ * Steve French (sfrench@us.ibm.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <asm/div64.h>
+#include "cifsfs.h"
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "cifs_fs_sb.h"
+#include "cifs_unicode.h"
+#include "fscache.h"
+#include "smb2glob.h"
+#include "smb2pdu.h"
+#include "smb2proto.h"
+
+static int
+smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ __u32 desired_access, __u32 create_disposition,
+ __u32 file_attributes, __u32 create_options,
+ void *data, int command)
+{
+ int rc, tmprc = 0;
+ u64 persistent_fid, volatile_fid;
+ __le16 *utf16_path;
+
+ utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+ if (!utf16_path)
+ return -ENOMEM;
+
+ rc = SMB2_open(xid, tcon, utf16_path, &persistent_fid, &volatile_fid,
+ desired_access, create_disposition, file_attributes,
+ create_options);
+ if (rc) {
+ kfree(utf16_path);
+ return rc;
+ }
+
+ switch (command) {
+ case SMB2_OP_DELETE:
+ break;
+ case SMB2_OP_QUERY_INFO:
+ tmprc = SMB2_query_info(xid, tcon, persistent_fid,
+ volatile_fid,
+ (struct smb2_file_all_info *)data);
+ break;
+ case SMB2_OP_MKDIR:
+ /*
+ * Directories are created through parameters in the
+ * SMB2_open() call.
+ */
+ break;
+ default:
+ cERROR(1, "Invalid command");
+ break;
+ }
+
+ rc = SMB2_close(xid, tcon, persistent_fid, volatile_fid);
+ if (tmprc)
+ rc = tmprc;
+ kfree(utf16_path);
+ return rc;
+}
+
+static void
+move_smb2_info_to_cifs(FILE_ALL_INFO *dst, struct smb2_file_all_info *src)
+{
+ memcpy(dst, src, (size_t)(&src->CurrentByteOffset) - (size_t)src);
+ dst->CurrentByteOffset = src->CurrentByteOffset;
+ dst->Mode = src->Mode;
+ dst->AlignmentRequirement = src->AlignmentRequirement;
+ dst->IndexNumber1 = 0; /* we don't use it */
+}
+
+int
+smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ FILE_ALL_INFO *data, bool *adjust_tz)
+{
+ int rc;
+ struct smb2_file_all_info *smb2_data;
+
+ *adjust_tz = false;
+
+ smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
+ GFP_KERNEL);
+ if (smb2_data == NULL)
+ return -ENOMEM;
+
+ rc = smb2_open_op_close(xid, tcon, cifs_sb, full_path,
+ FILE_READ_ATTRIBUTES, FILE_OPEN, 0, 0,
+ smb2_data, SMB2_OP_QUERY_INFO);
+ if (rc)
+ goto out;
+
+ move_smb2_info_to_cifs(data, smb2_data);
+out:
+ kfree(smb2_data);
+ return rc;
+}
+
+int
+smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+ struct cifs_sb_info *cifs_sb)
+{
+ return smb2_open_op_close(xid, tcon, cifs_sb, name,
+ FILE_WRITE_ATTRIBUTES, FILE_CREATE, 0,
+ CREATE_NOT_FILE, NULL, SMB2_OP_MKDIR);
+}
+
+void
+smb2_mkdir_setinfo(struct inode *inode, const char *name,
+ struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon,
+ const unsigned int xid)
+{
+ FILE_BASIC_INFO data;
+ struct cifsInodeInfo *cifs_i;
+ u32 dosattrs;
+ int tmprc;
+
+ memset(&data, 0, sizeof(data));
+ cifs_i = CIFS_I(inode);
+ dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
+ data.Attributes = cpu_to_le32(dosattrs);
+ tmprc = smb2_open_op_close(xid, tcon, cifs_sb, name,
+ FILE_WRITE_ATTRIBUTES, FILE_CREATE, 0,
+ CREATE_NOT_FILE, &data, SMB2_OP_SET_INFO);
+ if (tmprc == 0)
+ cifs_i->cifsAttrs = dosattrs;
+}
+
+int
+smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+ struct cifs_sb_info *cifs_sb)
+{
+ return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+ 0, CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE,
+ NULL, SMB2_OP_DELETE);
+}
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
new file mode 100644
index 000000000000..be41478acc05
--- /dev/null
+++ b/fs/cifs/smb2maperror.c
@@ -0,0 +1,2477 @@
+/*
+ * fs/smb2/smb2maperror.c
+ *
+ * Functions which do error mapping of SMB2 status codes to POSIX errors
+ *
+ * Copyright (C) International Business Machines Corp., 2009
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/errno.h>
+#include "cifsglob.h"
+#include "cifs_debug.h"
+#include "smb2pdu.h"
+#include "smb2proto.h"
+#include "smb2status.h"
+
+struct status_to_posix_error {
+ __le32 smb2_status;
+ int posix_error;
+ char *status_string;
+};
+
+static const struct status_to_posix_error smb2_error_map_table[] = {
+ {STATUS_SUCCESS, 0, "STATUS_SUCCESS"},
+ {STATUS_WAIT_0, 0, "STATUS_WAIT_0"},
+ {STATUS_WAIT_1, -EIO, "STATUS_WAIT_1"},
+ {STATUS_WAIT_2, -EIO, "STATUS_WAIT_2"},
+ {STATUS_WAIT_3, -EIO, "STATUS_WAIT_3"},
+ {STATUS_WAIT_63, -EIO, "STATUS_WAIT_63"},
+ {STATUS_ABANDONED, -EIO, "STATUS_ABANDONED"},
+ {STATUS_ABANDONED_WAIT_0, -EIO, "STATUS_ABANDONED_WAIT_0"},
+ {STATUS_ABANDONED_WAIT_63, -EIO, "STATUS_ABANDONED_WAIT_63"},
+ {STATUS_USER_APC, -EIO, "STATUS_USER_APC"},
+ {STATUS_KERNEL_APC, -EIO, "STATUS_KERNEL_APC"},
+ {STATUS_ALERTED, -EIO, "STATUS_ALERTED"},
+ {STATUS_TIMEOUT, -ETIMEDOUT, "STATUS_TIMEOUT"},
+ {STATUS_PENDING, -EIO, "STATUS_PENDING"},
+ {STATUS_REPARSE, -EIO, "STATUS_REPARSE"},
+ {STATUS_MORE_ENTRIES, -EIO, "STATUS_MORE_ENTRIES"},
+ {STATUS_NOT_ALL_ASSIGNED, -EIO, "STATUS_NOT_ALL_ASSIGNED"},
+ {STATUS_SOME_NOT_MAPPED, -EIO, "STATUS_SOME_NOT_MAPPED"},
+ {STATUS_OPLOCK_BREAK_IN_PROGRESS, -EIO,
+ "STATUS_OPLOCK_BREAK_IN_PROGRESS"},
+ {STATUS_VOLUME_MOUNTED, -EIO, "STATUS_VOLUME_MOUNTED"},
+ {STATUS_RXACT_COMMITTED, -EIO, "STATUS_RXACT_COMMITTED"},
+ {STATUS_NOTIFY_CLEANUP, -EIO, "STATUS_NOTIFY_CLEANUP"},
+ {STATUS_NOTIFY_ENUM_DIR, -EIO, "STATUS_NOTIFY_ENUM_DIR"},
+ {STATUS_NO_QUOTAS_FOR_ACCOUNT, -EIO, "STATUS_NO_QUOTAS_FOR_ACCOUNT"},
+ {STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED, -EIO,
+ "STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED"},
+ {STATUS_PAGE_FAULT_TRANSITION, -EIO, "STATUS_PAGE_FAULT_TRANSITION"},
+ {STATUS_PAGE_FAULT_DEMAND_ZERO, -EIO, "STATUS_PAGE_FAULT_DEMAND_ZERO"},
+ {STATUS_PAGE_FAULT_COPY_ON_WRITE, -EIO,
+ "STATUS_PAGE_FAULT_COPY_ON_WRITE"},
+ {STATUS_PAGE_FAULT_GUARD_PAGE, -EIO, "STATUS_PAGE_FAULT_GUARD_PAGE"},
+ {STATUS_PAGE_FAULT_PAGING_FILE, -EIO, "STATUS_PAGE_FAULT_PAGING_FILE"},
+ {STATUS_CACHE_PAGE_LOCKED, -EIO, "STATUS_CACHE_PAGE_LOCKED"},
+ {STATUS_CRASH_DUMP, -EIO, "STATUS_CRASH_DUMP"},
+ {STATUS_BUFFER_ALL_ZEROS, -EIO, "STATUS_BUFFER_ALL_ZEROS"},
+ {STATUS_REPARSE_OBJECT, -EIO, "STATUS_REPARSE_OBJECT"},
+ {STATUS_RESOURCE_REQUIREMENTS_CHANGED, -EIO,
+ "STATUS_RESOURCE_REQUIREMENTS_CHANGED"},
+ {STATUS_TRANSLATION_COMPLETE, -EIO, "STATUS_TRANSLATION_COMPLETE"},
+ {STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY, -EIO,
+ "STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY"},
+ {STATUS_NOTHING_TO_TERMINATE, -EIO, "STATUS_NOTHING_TO_TERMINATE"},
+ {STATUS_PROCESS_NOT_IN_JOB, -EIO, "STATUS_PROCESS_NOT_IN_JOB"},
+ {STATUS_PROCESS_IN_JOB, -EIO, "STATUS_PROCESS_IN_JOB"},
+ {STATUS_VOLSNAP_HIBERNATE_READY, -EIO,
+ "STATUS_VOLSNAP_HIBERNATE_READY"},
+ {STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY, -EIO,
+ "STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY"},
+ {STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED, -EIO,
+ "STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED"},
+ {STATUS_INTERRUPT_STILL_CONNECTED, -EIO,
+ "STATUS_INTERRUPT_STILL_CONNECTED"},
+ {STATUS_PROCESS_CLONED, -EIO, "STATUS_PROCESS_CLONED"},
+ {STATUS_FILE_LOCKED_WITH_ONLY_READERS, -EIO,
+ "STATUS_FILE_LOCKED_WITH_ONLY_READERS"},
+ {STATUS_FILE_LOCKED_WITH_WRITERS, -EIO,
+ "STATUS_FILE_LOCKED_WITH_WRITERS"},
+ {STATUS_RESOURCEMANAGER_READ_ONLY, -EROFS,
+ "STATUS_RESOURCEMANAGER_READ_ONLY"},
+ {STATUS_WAIT_FOR_OPLOCK, -EIO, "STATUS_WAIT_FOR_OPLOCK"},
+ {DBG_EXCEPTION_HANDLED, -EIO, "DBG_EXCEPTION_HANDLED"},
+ {DBG_CONTINUE, -EIO, "DBG_CONTINUE"},
+ {STATUS_FLT_IO_COMPLETE, -EIO, "STATUS_FLT_IO_COMPLETE"},
+ {STATUS_OBJECT_NAME_EXISTS, -EIO, "STATUS_OBJECT_NAME_EXISTS"},
+ {STATUS_THREAD_WAS_SUSPENDED, -EIO, "STATUS_THREAD_WAS_SUSPENDED"},
+ {STATUS_WORKING_SET_LIMIT_RANGE, -EIO,
+ "STATUS_WORKING_SET_LIMIT_RANGE"},
+ {STATUS_IMAGE_NOT_AT_BASE, -EIO, "STATUS_IMAGE_NOT_AT_BASE"},
+ {STATUS_RXACT_STATE_CREATED, -EIO, "STATUS_RXACT_STATE_CREATED"},
+ {STATUS_SEGMENT_NOTIFICATION, -EIO, "STATUS_SEGMENT_NOTIFICATION"},
+ {STATUS_LOCAL_USER_SESSION_KEY, -EIO, "STATUS_LOCAL_USER_SESSION_KEY"},
+ {STATUS_BAD_CURRENT_DIRECTORY, -EIO, "STATUS_BAD_CURRENT_DIRECTORY"},
+ {STATUS_SERIAL_MORE_WRITES, -EIO, "STATUS_SERIAL_MORE_WRITES"},
+ {STATUS_REGISTRY_RECOVERED, -EIO, "STATUS_REGISTRY_RECOVERED"},
+ {STATUS_FT_READ_RECOVERY_FROM_BACKUP, -EIO,
+ "STATUS_FT_READ_RECOVERY_FROM_BACKUP"},
+ {STATUS_FT_WRITE_RECOVERY, -EIO, "STATUS_FT_WRITE_RECOVERY"},
+ {STATUS_SERIAL_COUNTER_TIMEOUT, -ETIMEDOUT,
+ "STATUS_SERIAL_COUNTER_TIMEOUT"},
+ {STATUS_NULL_LM_PASSWORD, -EIO, "STATUS_NULL_LM_PASSWORD"},
+ {STATUS_IMAGE_MACHINE_TYPE_MISMATCH, -EIO,
+ "STATUS_IMAGE_MACHINE_TYPE_MISMATCH"},
+ {STATUS_RECEIVE_PARTIAL, -EIO, "STATUS_RECEIVE_PARTIAL"},
+ {STATUS_RECEIVE_EXPEDITED, -EIO, "STATUS_RECEIVE_EXPEDITED"},
+ {STATUS_RECEIVE_PARTIAL_EXPEDITED, -EIO,
+ "STATUS_RECEIVE_PARTIAL_EXPEDITED"},
+ {STATUS_EVENT_DONE, -EIO, "STATUS_EVENT_DONE"},
+ {STATUS_EVENT_PENDING, -EIO, "STATUS_EVENT_PENDING"},
+ {STATUS_CHECKING_FILE_SYSTEM, -EIO, "STATUS_CHECKING_FILE_SYSTEM"},
+ {STATUS_FATAL_APP_EXIT, -EIO, "STATUS_FATAL_APP_EXIT"},
+ {STATUS_PREDEFINED_HANDLE, -EIO, "STATUS_PREDEFINED_HANDLE"},
+ {STATUS_WAS_UNLOCKED, -EIO, "STATUS_WAS_UNLOCKED"},
+ {STATUS_SERVICE_NOTIFICATION, -EIO, "STATUS_SERVICE_NOTIFICATION"},
+ {STATUS_WAS_LOCKED, -EIO, "STATUS_WAS_LOCKED"},
+ {STATUS_LOG_HARD_ERROR, -EIO, "STATUS_LOG_HARD_ERROR"},
+ {STATUS_ALREADY_WIN32, -EIO, "STATUS_ALREADY_WIN32"},
+ {STATUS_WX86_UNSIMULATE, -EIO, "STATUS_WX86_UNSIMULATE"},
+ {STATUS_WX86_CONTINUE, -EIO, "STATUS_WX86_CONTINUE"},
+ {STATUS_WX86_SINGLE_STEP, -EIO, "STATUS_WX86_SINGLE_STEP"},
+ {STATUS_WX86_BREAKPOINT, -EIO, "STATUS_WX86_BREAKPOINT"},
+ {STATUS_WX86_EXCEPTION_CONTINUE, -EIO,
+ "STATUS_WX86_EXCEPTION_CONTINUE"},
+ {STATUS_WX86_EXCEPTION_LASTCHANCE, -EIO,
+ "STATUS_WX86_EXCEPTION_LASTCHANCE"},
+ {STATUS_WX86_EXCEPTION_CHAIN, -EIO, "STATUS_WX86_EXCEPTION_CHAIN"},
+ {STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE, -EIO,
+ "STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE"},
+ {STATUS_NO_YIELD_PERFORMED, -EIO, "STATUS_NO_YIELD_PERFORMED"},
+ {STATUS_TIMER_RESUME_IGNORED, -EIO, "STATUS_TIMER_RESUME_IGNORED"},
+ {STATUS_ARBITRATION_UNHANDLED, -EIO, "STATUS_ARBITRATION_UNHANDLED"},
+ {STATUS_CARDBUS_NOT_SUPPORTED, -ENOSYS, "STATUS_CARDBUS_NOT_SUPPORTED"},
+ {STATUS_WX86_CREATEWX86TIB, -EIO, "STATUS_WX86_CREATEWX86TIB"},
+ {STATUS_MP_PROCESSOR_MISMATCH, -EIO, "STATUS_MP_PROCESSOR_MISMATCH"},
+ {STATUS_HIBERNATED, -EIO, "STATUS_HIBERNATED"},
+ {STATUS_RESUME_HIBERNATION, -EIO, "STATUS_RESUME_HIBERNATION"},
+ {STATUS_FIRMWARE_UPDATED, -EIO, "STATUS_FIRMWARE_UPDATED"},
+ {STATUS_DRIVERS_LEAKING_LOCKED_PAGES, -EIO,
+ "STATUS_DRIVERS_LEAKING_LOCKED_PAGES"},
+ {STATUS_MESSAGE_RETRIEVED, -EIO, "STATUS_MESSAGE_RETRIEVED"},
+ {STATUS_SYSTEM_POWERSTATE_TRANSITION, -EIO,
+ "STATUS_SYSTEM_POWERSTATE_TRANSITION"},
+ {STATUS_ALPC_CHECK_COMPLETION_LIST, -EIO,
+ "STATUS_ALPC_CHECK_COMPLETION_LIST"},
+ {STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION, -EIO,
+ "STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION"},
+ {STATUS_ACCESS_AUDIT_BY_POLICY, -EIO, "STATUS_ACCESS_AUDIT_BY_POLICY"},
+ {STATUS_ABANDON_HIBERFILE, -EIO, "STATUS_ABANDON_HIBERFILE"},
+ {STATUS_BIZRULES_NOT_ENABLED, -EIO, "STATUS_BIZRULES_NOT_ENABLED"},
+ {STATUS_WAKE_SYSTEM, -EIO, "STATUS_WAKE_SYSTEM"},
+ {STATUS_DS_SHUTTING_DOWN, -EIO, "STATUS_DS_SHUTTING_DOWN"},
+ {DBG_REPLY_LATER, -EIO, "DBG_REPLY_LATER"},
+ {DBG_UNABLE_TO_PROVIDE_HANDLE, -EIO, "DBG_UNABLE_TO_PROVIDE_HANDLE"},
+ {DBG_TERMINATE_THREAD, -EIO, "DBG_TERMINATE_THREAD"},
+ {DBG_TERMINATE_PROCESS, -EIO, "DBG_TERMINATE_PROCESS"},
+ {DBG_CONTROL_C, -EIO, "DBG_CONTROL_C"},
+ {DBG_PRINTEXCEPTION_C, -EIO, "DBG_PRINTEXCEPTION_C"},
+ {DBG_RIPEXCEPTION, -EIO, "DBG_RIPEXCEPTION"},
+ {DBG_CONTROL_BREAK, -EIO, "DBG_CONTROL_BREAK"},
+ {DBG_COMMAND_EXCEPTION, -EIO, "DBG_COMMAND_EXCEPTION"},
+ {RPC_NT_UUID_LOCAL_ONLY, -EIO, "RPC_NT_UUID_LOCAL_ONLY"},
+ {RPC_NT_SEND_INCOMPLETE, -EIO, "RPC_NT_SEND_INCOMPLETE"},
+ {STATUS_CTX_CDM_CONNECT, -EIO, "STATUS_CTX_CDM_CONNECT"},
+ {STATUS_CTX_CDM_DISCONNECT, -EIO, "STATUS_CTX_CDM_DISCONNECT"},
+ {STATUS_SXS_RELEASE_ACTIVATION_CONTEXT, -EIO,
+ "STATUS_SXS_RELEASE_ACTIVATION_CONTEXT"},
+ {STATUS_RECOVERY_NOT_NEEDED, -EIO, "STATUS_RECOVERY_NOT_NEEDED"},
+ {STATUS_RM_ALREADY_STARTED, -EIO, "STATUS_RM_ALREADY_STARTED"},
+ {STATUS_LOG_NO_RESTART, -EIO, "STATUS_LOG_NO_RESTART"},
+ {STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST, -EIO,
+ "STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST"},
+ {STATUS_GRAPHICS_PARTIAL_DATA_POPULATED, -EIO,
+ "STATUS_GRAPHICS_PARTIAL_DATA_POPULATED"},
+ {STATUS_GRAPHICS_DRIVER_MISMATCH, -EIO,
+ "STATUS_GRAPHICS_DRIVER_MISMATCH"},
+ {STATUS_GRAPHICS_MODE_NOT_PINNED, -EIO,
+ "STATUS_GRAPHICS_MODE_NOT_PINNED"},
+ {STATUS_GRAPHICS_NO_PREFERRED_MODE, -EIO,
+ "STATUS_GRAPHICS_NO_PREFERRED_MODE"},
+ {STATUS_GRAPHICS_DATASET_IS_EMPTY, -EIO,
+ "STATUS_GRAPHICS_DATASET_IS_EMPTY"},
+ {STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET, -EIO,
+ "STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET"},
+ {STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED, -EIO,
+ "STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED"},
+ {STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS, -EIO,
+ "STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS"},
+ {STATUS_GRAPHICS_LEADLINK_START_DEFERRED, -EIO,
+ "STATUS_GRAPHICS_LEADLINK_START_DEFERRED"},
+ {STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY, -EIO,
+ "STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY"},
+ {STATUS_GRAPHICS_START_DEFERRED, -EIO,
+ "STATUS_GRAPHICS_START_DEFERRED"},
+ {STATUS_NDIS_INDICATION_REQUIRED, -EIO,
+ "STATUS_NDIS_INDICATION_REQUIRED"},
+ {STATUS_GUARD_PAGE_VIOLATION, -EIO, "STATUS_GUARD_PAGE_VIOLATION"},
+ {STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"},
+ {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
+ {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
+ {STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"},
+ {STATUS_NO_MORE_FILES, -EIO, "STATUS_NO_MORE_FILES"},
+ {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
+ {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
+ {STATUS_NO_INHERITANCE, -EIO, "STATUS_NO_INHERITANCE"},
+ {STATUS_GUID_SUBSTITUTION_MADE, -EIO, "STATUS_GUID_SUBSTITUTION_MADE"},
+ {STATUS_PARTIAL_COPY, -EIO, "STATUS_PARTIAL_COPY"},
+ {STATUS_DEVICE_PAPER_EMPTY, -EIO, "STATUS_DEVICE_PAPER_EMPTY"},
+ {STATUS_DEVICE_POWERED_OFF, -EIO, "STATUS_DEVICE_POWERED_OFF"},
+ {STATUS_DEVICE_OFF_LINE, -EIO, "STATUS_DEVICE_OFF_LINE"},
+ {STATUS_DEVICE_BUSY, -EBUSY, "STATUS_DEVICE_BUSY"},
+ {STATUS_NO_MORE_EAS, -EIO, "STATUS_NO_MORE_EAS"},
+ {STATUS_INVALID_EA_NAME, -EINVAL, "STATUS_INVALID_EA_NAME"},
+ {STATUS_EA_LIST_INCONSISTENT, -EIO, "STATUS_EA_LIST_INCONSISTENT"},
+ {STATUS_INVALID_EA_FLAG, -EINVAL, "STATUS_INVALID_EA_FLAG"},
+ {STATUS_VERIFY_REQUIRED, -EIO, "STATUS_VERIFY_REQUIRED"},
+ {STATUS_EXTRANEOUS_INFORMATION, -EIO, "STATUS_EXTRANEOUS_INFORMATION"},
+ {STATUS_RXACT_COMMIT_NECESSARY, -EIO, "STATUS_RXACT_COMMIT_NECESSARY"},
+ {STATUS_NO_MORE_ENTRIES, -EIO, "STATUS_NO_MORE_ENTRIES"},
+ {STATUS_FILEMARK_DETECTED, -EIO, "STATUS_FILEMARK_DETECTED"},
+ {STATUS_MEDIA_CHANGED, -EIO, "STATUS_MEDIA_CHANGED"},
+ {STATUS_BUS_RESET, -EIO, "STATUS_BUS_RESET"},
+ {STATUS_END_OF_MEDIA, -EIO, "STATUS_END_OF_MEDIA"},
+ {STATUS_BEGINNING_OF_MEDIA, -EIO, "STATUS_BEGINNING_OF_MEDIA"},
+ {STATUS_MEDIA_CHECK, -EIO, "STATUS_MEDIA_CHECK"},
+ {STATUS_SETMARK_DETECTED, -EIO, "STATUS_SETMARK_DETECTED"},
+ {STATUS_NO_DATA_DETECTED, -EIO, "STATUS_NO_DATA_DETECTED"},
+ {STATUS_REDIRECTOR_HAS_OPEN_HANDLES, -EIO,
+ "STATUS_REDIRECTOR_HAS_OPEN_HANDLES"},
+ {STATUS_SERVER_HAS_OPEN_HANDLES, -EIO,
+ "STATUS_SERVER_HAS_OPEN_HANDLES"},
+ {STATUS_ALREADY_DISCONNECTED, -EIO, "STATUS_ALREADY_DISCONNECTED"},
+ {STATUS_LONGJUMP, -EIO, "STATUS_LONGJUMP"},
+ {STATUS_CLEANER_CARTRIDGE_INSTALLED, -EIO,
+ "STATUS_CLEANER_CARTRIDGE_INSTALLED"},
+ {STATUS_PLUGPLAY_QUERY_VETOED, -EIO, "STATUS_PLUGPLAY_QUERY_VETOED"},
+ {STATUS_UNWIND_CONSOLIDATE, -EIO, "STATUS_UNWIND_CONSOLIDATE"},
+ {STATUS_REGISTRY_HIVE_RECOVERED, -EIO,
+ "STATUS_REGISTRY_HIVE_RECOVERED"},
+ {STATUS_DLL_MIGHT_BE_INSECURE, -EIO, "STATUS_DLL_MIGHT_BE_INSECURE"},
+ {STATUS_DLL_MIGHT_BE_INCOMPATIBLE, -EIO,
+ "STATUS_DLL_MIGHT_BE_INCOMPATIBLE"},
+ {STATUS_STOPPED_ON_SYMLINK, -EOPNOTSUPP, "STATUS_STOPPED_ON_SYMLINK"},
+ {STATUS_DEVICE_REQUIRES_CLEANING, -EIO,
+ "STATUS_DEVICE_REQUIRES_CLEANING"},
+ {STATUS_DEVICE_DOOR_OPEN, -EIO, "STATUS_DEVICE_DOOR_OPEN"},
+ {STATUS_DATA_LOST_REPAIR, -EIO, "STATUS_DATA_LOST_REPAIR"},
+ {DBG_EXCEPTION_NOT_HANDLED, -EIO, "DBG_EXCEPTION_NOT_HANDLED"},
+ {STATUS_CLUSTER_NODE_ALREADY_UP, -EIO,
+ "STATUS_CLUSTER_NODE_ALREADY_UP"},
+ {STATUS_CLUSTER_NODE_ALREADY_DOWN, -EIO,
+ "STATUS_CLUSTER_NODE_ALREADY_DOWN"},
+ {STATUS_CLUSTER_NETWORK_ALREADY_ONLINE, -EIO,
+ "STATUS_CLUSTER_NETWORK_ALREADY_ONLINE"},
+ {STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE, -EIO,
+ "STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE"},
+ {STATUS_CLUSTER_NODE_ALREADY_MEMBER, -EIO,
+ "STATUS_CLUSTER_NODE_ALREADY_MEMBER"},
+ {STATUS_COULD_NOT_RESIZE_LOG, -EIO, "STATUS_COULD_NOT_RESIZE_LOG"},
+ {STATUS_NO_TXF_METADATA, -EIO, "STATUS_NO_TXF_METADATA"},
+ {STATUS_CANT_RECOVER_WITH_HANDLE_OPEN, -EIO,
+ "STATUS_CANT_RECOVER_WITH_HANDLE_OPEN"},
+ {STATUS_TXF_METADATA_ALREADY_PRESENT, -EIO,
+ "STATUS_TXF_METADATA_ALREADY_PRESENT"},
+ {STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET, -EIO,
+ "STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET"},
+ {STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED, -EIO,
+ "STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED"},
+ {STATUS_FLT_BUFFER_TOO_SMALL, -ENOBUFS, "STATUS_FLT_BUFFER_TOO_SMALL"},
+ {STATUS_FVE_PARTIAL_METADATA, -EIO, "STATUS_FVE_PARTIAL_METADATA"},
+ {STATUS_UNSUCCESSFUL, -EIO, "STATUS_UNSUCCESSFUL"},
+ {STATUS_NOT_IMPLEMENTED, -ENOSYS, "STATUS_NOT_IMPLEMENTED"},
+ {STATUS_INVALID_INFO_CLASS, -EIO, "STATUS_INVALID_INFO_CLASS"},
+ {STATUS_INFO_LENGTH_MISMATCH, -EIO, "STATUS_INFO_LENGTH_MISMATCH"},
+ {STATUS_ACCESS_VIOLATION, -EACCES, "STATUS_ACCESS_VIOLATION"},
+ {STATUS_IN_PAGE_ERROR, -EFAULT, "STATUS_IN_PAGE_ERROR"},
+ {STATUS_PAGEFILE_QUOTA, -EDQUOT, "STATUS_PAGEFILE_QUOTA"},
+ {STATUS_INVALID_HANDLE, -EBADF, "STATUS_INVALID_HANDLE"},
+ {STATUS_BAD_INITIAL_STACK, -EIO, "STATUS_BAD_INITIAL_STACK"},
+ {STATUS_BAD_INITIAL_PC, -EIO, "STATUS_BAD_INITIAL_PC"},
+ {STATUS_INVALID_CID, -EIO, "STATUS_INVALID_CID"},
+ {STATUS_TIMER_NOT_CANCELED, -EIO, "STATUS_TIMER_NOT_CANCELED"},
+ {STATUS_INVALID_PARAMETER, -EINVAL, "STATUS_INVALID_PARAMETER"},
+ {STATUS_NO_SUCH_DEVICE, -ENODEV, "STATUS_NO_SUCH_DEVICE"},
+ {STATUS_NO_SUCH_FILE, -ENOENT, "STATUS_NO_SUCH_FILE"},
+ {STATUS_INVALID_DEVICE_REQUEST, -EIO, "STATUS_INVALID_DEVICE_REQUEST"},
+ {STATUS_END_OF_FILE, -ENODATA, "STATUS_END_OF_FILE"},
+ {STATUS_WRONG_VOLUME, -EIO, "STATUS_WRONG_VOLUME"},
+ {STATUS_NO_MEDIA_IN_DEVICE, -EIO, "STATUS_NO_MEDIA_IN_DEVICE"},
+ {STATUS_UNRECOGNIZED_MEDIA, -EIO, "STATUS_UNRECOGNIZED_MEDIA"},
+ {STATUS_NONEXISTENT_SECTOR, -EIO, "STATUS_NONEXISTENT_SECTOR"},
+ {STATUS_MORE_PROCESSING_REQUIRED, -EIO,
+ "STATUS_MORE_PROCESSING_REQUIRED"},
+ {STATUS_NO_MEMORY, -ENOMEM, "STATUS_NO_MEMORY"},
+ {STATUS_CONFLICTING_ADDRESSES, -EADDRINUSE,
+ "STATUS_CONFLICTING_ADDRESSES"},
+ {STATUS_NOT_MAPPED_VIEW, -EIO, "STATUS_NOT_MAPPED_VIEW"},
+ {STATUS_UNABLE_TO_FREE_VM, -EIO, "STATUS_UNABLE_TO_FREE_VM"},
+ {STATUS_UNABLE_TO_DELETE_SECTION, -EIO,
+ "STATUS_UNABLE_TO_DELETE_SECTION"},
+ {STATUS_INVALID_SYSTEM_SERVICE, -EIO, "STATUS_INVALID_SYSTEM_SERVICE"},
+ {STATUS_ILLEGAL_INSTRUCTION, -EIO, "STATUS_ILLEGAL_INSTRUCTION"},
+ {STATUS_INVALID_LOCK_SEQUENCE, -EIO, "STATUS_INVALID_LOCK_SEQUENCE"},
+ {STATUS_INVALID_VIEW_SIZE, -EIO, "STATUS_INVALID_VIEW_SIZE"},
+ {STATUS_INVALID_FILE_FOR_SECTION, -EIO,
+ "STATUS_INVALID_FILE_FOR_SECTION"},
+ {STATUS_ALREADY_COMMITTED, -EIO, "STATUS_ALREADY_COMMITTED"},
+ {STATUS_ACCESS_DENIED, -EACCES, "STATUS_ACCESS_DENIED"},
+ {STATUS_BUFFER_TOO_SMALL, -EIO, "STATUS_BUFFER_TOO_SMALL"},
+ {STATUS_OBJECT_TYPE_MISMATCH, -EIO, "STATUS_OBJECT_TYPE_MISMATCH"},
+ {STATUS_NONCONTINUABLE_EXCEPTION, -EIO,
+ "STATUS_NONCONTINUABLE_EXCEPTION"},
+ {STATUS_INVALID_DISPOSITION, -EIO, "STATUS_INVALID_DISPOSITION"},
+ {STATUS_UNWIND, -EIO, "STATUS_UNWIND"},
+ {STATUS_BAD_STACK, -EIO, "STATUS_BAD_STACK"},
+ {STATUS_INVALID_UNWIND_TARGET, -EIO, "STATUS_INVALID_UNWIND_TARGET"},
+ {STATUS_NOT_LOCKED, -EIO, "STATUS_NOT_LOCKED"},
+ {STATUS_PARITY_ERROR, -EIO, "STATUS_PARITY_ERROR"},
+ {STATUS_UNABLE_TO_DECOMMIT_VM, -EIO, "STATUS_UNABLE_TO_DECOMMIT_VM"},
+ {STATUS_NOT_COMMITTED, -EIO, "STATUS_NOT_COMMITTED"},
+ {STATUS_INVALID_PORT_ATTRIBUTES, -EIO,
+ "STATUS_INVALID_PORT_ATTRIBUTES"},
+ {STATUS_PORT_MESSAGE_TOO_LONG, -EIO, "STATUS_PORT_MESSAGE_TOO_LONG"},
+ {STATUS_INVALID_PARAMETER_MIX, -EINVAL, "STATUS_INVALID_PARAMETER_MIX"},
+ {STATUS_INVALID_QUOTA_LOWER, -EIO, "STATUS_INVALID_QUOTA_LOWER"},
+ {STATUS_DISK_CORRUPT_ERROR, -EIO, "STATUS_DISK_CORRUPT_ERROR"},
+ {STATUS_OBJECT_NAME_INVALID, -ENOENT, "STATUS_OBJECT_NAME_INVALID"},
+ {STATUS_OBJECT_NAME_NOT_FOUND, -ENOENT, "STATUS_OBJECT_NAME_NOT_FOUND"},
+ {STATUS_OBJECT_NAME_COLLISION, -EEXIST, "STATUS_OBJECT_NAME_COLLISION"},
+ {STATUS_PORT_DISCONNECTED, -EIO, "STATUS_PORT_DISCONNECTED"},
+ {STATUS_DEVICE_ALREADY_ATTACHED, -EIO,
+ "STATUS_DEVICE_ALREADY_ATTACHED"},
+ {STATUS_OBJECT_PATH_INVALID, -ENOTDIR, "STATUS_OBJECT_PATH_INVALID"},
+ {STATUS_OBJECT_PATH_NOT_FOUND, -ENOENT, "STATUS_OBJECT_PATH_NOT_FOUND"},
+ {STATUS_OBJECT_PATH_SYNTAX_BAD, -EIO, "STATUS_OBJECT_PATH_SYNTAX_BAD"},
+ {STATUS_DATA_OVERRUN, -EIO, "STATUS_DATA_OVERRUN"},
+ {STATUS_DATA_LATE_ERROR, -EIO, "STATUS_DATA_LATE_ERROR"},
+ {STATUS_DATA_ERROR, -EIO, "STATUS_DATA_ERROR"},
+ {STATUS_CRC_ERROR, -EIO, "STATUS_CRC_ERROR"},
+ {STATUS_SECTION_TOO_BIG, -EIO, "STATUS_SECTION_TOO_BIG"},
+ {STATUS_PORT_CONNECTION_REFUSED, -ECONNREFUSED,
+ "STATUS_PORT_CONNECTION_REFUSED"},
+ {STATUS_INVALID_PORT_HANDLE, -EIO, "STATUS_INVALID_PORT_HANDLE"},
+ {STATUS_SHARING_VIOLATION, -EBUSY, "STATUS_SHARING_VIOLATION"},
+ {STATUS_QUOTA_EXCEEDED, -EDQUOT, "STATUS_QUOTA_EXCEEDED"},
+ {STATUS_INVALID_PAGE_PROTECTION, -EIO,
+ "STATUS_INVALID_PAGE_PROTECTION"},
+ {STATUS_MUTANT_NOT_OWNED, -EIO, "STATUS_MUTANT_NOT_OWNED"},
+ {STATUS_SEMAPHORE_LIMIT_EXCEEDED, -EIO,
+ "STATUS_SEMAPHORE_LIMIT_EXCEEDED"},
+ {STATUS_PORT_ALREADY_SET, -EIO, "STATUS_PORT_ALREADY_SET"},
+ {STATUS_SECTION_NOT_IMAGE, -EIO, "STATUS_SECTION_NOT_IMAGE"},
+ {STATUS_SUSPEND_COUNT_EXCEEDED, -EIO, "STATUS_SUSPEND_COUNT_EXCEEDED"},
+ {STATUS_THREAD_IS_TERMINATING, -EIO, "STATUS_THREAD_IS_TERMINATING"},
+ {STATUS_BAD_WORKING_SET_LIMIT, -EIO, "STATUS_BAD_WORKING_SET_LIMIT"},
+ {STATUS_INCOMPATIBLE_FILE_MAP, -EIO, "STATUS_INCOMPATIBLE_FILE_MAP"},
+ {STATUS_SECTION_PROTECTION, -EIO, "STATUS_SECTION_PROTECTION"},
+ {STATUS_EAS_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_EAS_NOT_SUPPORTED"},
+ {STATUS_EA_TOO_LARGE, -EIO, "STATUS_EA_TOO_LARGE"},
+ {STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
+ {STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
+ {STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
+ {STATUS_FILE_LOCK_CONFLICT, -EIO, "STATUS_FILE_LOCK_CONFLICT"},
+ {STATUS_LOCK_NOT_GRANTED, -EIO, "STATUS_LOCK_NOT_GRANTED"},
+ {STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
+ {STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
+ "STATUS_CTL_FILE_NOT_SUPPORTED"},
+ {STATUS_UNKNOWN_REVISION, -EIO, "STATUS_UNKNOWN_REVISION"},
+ {STATUS_REVISION_MISMATCH, -EIO, "STATUS_REVISION_MISMATCH"},
+ {STATUS_INVALID_OWNER, -EIO, "STATUS_INVALID_OWNER"},
+ {STATUS_INVALID_PRIMARY_GROUP, -EIO, "STATUS_INVALID_PRIMARY_GROUP"},
+ {STATUS_NO_IMPERSONATION_TOKEN, -EIO, "STATUS_NO_IMPERSONATION_TOKEN"},
+ {STATUS_CANT_DISABLE_MANDATORY, -EIO, "STATUS_CANT_DISABLE_MANDATORY"},
+ {STATUS_NO_LOGON_SERVERS, -EIO, "STATUS_NO_LOGON_SERVERS"},
+ {STATUS_NO_SUCH_LOGON_SESSION, -EIO, "STATUS_NO_SUCH_LOGON_SESSION"},
+ {STATUS_NO_SUCH_PRIVILEGE, -EIO, "STATUS_NO_SUCH_PRIVILEGE"},
+ {STATUS_PRIVILEGE_NOT_HELD, -EIO, "STATUS_PRIVILEGE_NOT_HELD"},
+ {STATUS_INVALID_ACCOUNT_NAME, -EIO, "STATUS_INVALID_ACCOUNT_NAME"},
+ {STATUS_USER_EXISTS, -EIO, "STATUS_USER_EXISTS"},
+ {STATUS_NO_SUCH_USER, -EIO, "STATUS_NO_SUCH_USER"},
+ {STATUS_GROUP_EXISTS, -EIO, "STATUS_GROUP_EXISTS"},
+ {STATUS_NO_SUCH_GROUP, -EIO, "STATUS_NO_SUCH_GROUP"},
+ {STATUS_MEMBER_IN_GROUP, -EIO, "STATUS_MEMBER_IN_GROUP"},
+ {STATUS_MEMBER_NOT_IN_GROUP, -EIO, "STATUS_MEMBER_NOT_IN_GROUP"},
+ {STATUS_LAST_ADMIN, -EIO, "STATUS_LAST_ADMIN"},
+ {STATUS_WRONG_PASSWORD, -EACCES, "STATUS_WRONG_PASSWORD"},
+ {STATUS_ILL_FORMED_PASSWORD, -EINVAL, "STATUS_ILL_FORMED_PASSWORD"},
+ {STATUS_PASSWORD_RESTRICTION, -EACCES, "STATUS_PASSWORD_RESTRICTION"},
+ {STATUS_LOGON_FAILURE, -EACCES, "STATUS_LOGON_FAILURE"},
+ {STATUS_ACCOUNT_RESTRICTION, -EACCES, "STATUS_ACCOUNT_RESTRICTION"},
+ {STATUS_INVALID_LOGON_HOURS, -EACCES, "STATUS_INVALID_LOGON_HOURS"},
+ {STATUS_INVALID_WORKSTATION, -EACCES, "STATUS_INVALID_WORKSTATION"},
+ {STATUS_PASSWORD_EXPIRED, -EKEYEXPIRED, "STATUS_PASSWORD_EXPIRED"},
+ {STATUS_ACCOUNT_DISABLED, -EKEYREVOKED, "STATUS_ACCOUNT_DISABLED"},
+ {STATUS_NONE_MAPPED, -EIO, "STATUS_NONE_MAPPED"},
+ {STATUS_TOO_MANY_LUIDS_REQUESTED, -EIO,
+ "STATUS_TOO_MANY_LUIDS_REQUESTED"},
+ {STATUS_LUIDS_EXHAUSTED, -EIO, "STATUS_LUIDS_EXHAUSTED"},
+ {STATUS_INVALID_SUB_AUTHORITY, -EIO, "STATUS_INVALID_SUB_AUTHORITY"},
+ {STATUS_INVALID_ACL, -EIO, "STATUS_INVALID_ACL"},
+ {STATUS_INVALID_SID, -EIO, "STATUS_INVALID_SID"},
+ {STATUS_INVALID_SECURITY_DESCR, -EIO, "STATUS_INVALID_SECURITY_DESCR"},
+ {STATUS_PROCEDURE_NOT_FOUND, -EIO, "STATUS_PROCEDURE_NOT_FOUND"},
+ {STATUS_INVALID_IMAGE_FORMAT, -EIO, "STATUS_INVALID_IMAGE_FORMAT"},
+ {STATUS_NO_TOKEN, -EIO, "STATUS_NO_TOKEN"},
+ {STATUS_BAD_INHERITANCE_ACL, -EIO, "STATUS_BAD_INHERITANCE_ACL"},
+ {STATUS_RANGE_NOT_LOCKED, -EIO, "STATUS_RANGE_NOT_LOCKED"},
+ {STATUS_DISK_FULL, -ENOSPC, "STATUS_DISK_FULL"},
+ {STATUS_SERVER_DISABLED, -EIO, "STATUS_SERVER_DISABLED"},
+ {STATUS_SERVER_NOT_DISABLED, -EIO, "STATUS_SERVER_NOT_DISABLED"},
+ {STATUS_TOO_MANY_GUIDS_REQUESTED, -EIO,
+ "STATUS_TOO_MANY_GUIDS_REQUESTED"},
+ {STATUS_GUIDS_EXHAUSTED, -EIO, "STATUS_GUIDS_EXHAUSTED"},
+ {STATUS_INVALID_ID_AUTHORITY, -EIO, "STATUS_INVALID_ID_AUTHORITY"},
+ {STATUS_AGENTS_EXHAUSTED, -EIO, "STATUS_AGENTS_EXHAUSTED"},
+ {STATUS_INVALID_VOLUME_LABEL, -EIO, "STATUS_INVALID_VOLUME_LABEL"},
+ {STATUS_SECTION_NOT_EXTENDED, -EIO, "STATUS_SECTION_NOT_EXTENDED"},
+ {STATUS_NOT_MAPPED_DATA, -EIO, "STATUS_NOT_MAPPED_DATA"},
+ {STATUS_RESOURCE_DATA_NOT_FOUND, -EIO,
+ "STATUS_RESOURCE_DATA_NOT_FOUND"},
+ {STATUS_RESOURCE_TYPE_NOT_FOUND, -EIO,
+ "STATUS_RESOURCE_TYPE_NOT_FOUND"},
+ {STATUS_RESOURCE_NAME_NOT_FOUND, -EIO,
+ "STATUS_RESOURCE_NAME_NOT_FOUND"},
+ {STATUS_ARRAY_BOUNDS_EXCEEDED, -EIO, "STATUS_ARRAY_BOUNDS_EXCEEDED"},
+ {STATUS_FLOAT_DENORMAL_OPERAND, -EIO, "STATUS_FLOAT_DENORMAL_OPERAND"},
+ {STATUS_FLOAT_DIVIDE_BY_ZERO, -EIO, "STATUS_FLOAT_DIVIDE_BY_ZERO"},
+ {STATUS_FLOAT_INEXACT_RESULT, -EIO, "STATUS_FLOAT_INEXACT_RESULT"},
+ {STATUS_FLOAT_INVALID_OPERATION, -EIO,
+ "STATUS_FLOAT_INVALID_OPERATION"},
+ {STATUS_FLOAT_OVERFLOW, -EIO, "STATUS_FLOAT_OVERFLOW"},
+ {STATUS_FLOAT_STACK_CHECK, -EIO, "STATUS_FLOAT_STACK_CHECK"},
+ {STATUS_FLOAT_UNDERFLOW, -EIO, "STATUS_FLOAT_UNDERFLOW"},
+ {STATUS_INTEGER_DIVIDE_BY_ZERO, -EIO, "STATUS_INTEGER_DIVIDE_BY_ZERO"},
+ {STATUS_INTEGER_OVERFLOW, -EIO, "STATUS_INTEGER_OVERFLOW"},
+ {STATUS_PRIVILEGED_INSTRUCTION, -EIO, "STATUS_PRIVILEGED_INSTRUCTION"},
+ {STATUS_TOO_MANY_PAGING_FILES, -EIO, "STATUS_TOO_MANY_PAGING_FILES"},
+ {STATUS_FILE_INVALID, -EIO, "STATUS_FILE_INVALID"},
+ {STATUS_ALLOTTED_SPACE_EXCEEDED, -EIO,
+ "STATUS_ALLOTTED_SPACE_EXCEEDED"},
+ {STATUS_INSUFFICIENT_RESOURCES, -EIO, "STATUS_INSUFFICIENT_RESOURCES"},
+ {STATUS_DFS_EXIT_PATH_FOUND, -EIO, "STATUS_DFS_EXIT_PATH_FOUND"},
+ {STATUS_DEVICE_DATA_ERROR, -EIO, "STATUS_DEVICE_DATA_ERROR"},
+ {STATUS_DEVICE_NOT_CONNECTED, -EIO, "STATUS_DEVICE_NOT_CONNECTED"},
+ {STATUS_DEVICE_POWER_FAILURE, -EIO, "STATUS_DEVICE_POWER_FAILURE"},
+ {STATUS_FREE_VM_NOT_AT_BASE, -EIO, "STATUS_FREE_VM_NOT_AT_BASE"},
+ {STATUS_MEMORY_NOT_ALLOCATED, -EFAULT, "STATUS_MEMORY_NOT_ALLOCATED"},
+ {STATUS_WORKING_SET_QUOTA, -EIO, "STATUS_WORKING_SET_QUOTA"},
+ {STATUS_MEDIA_WRITE_PROTECTED, -EROFS, "STATUS_MEDIA_WRITE_PROTECTED"},
+ {STATUS_DEVICE_NOT_READY, -EIO, "STATUS_DEVICE_NOT_READY"},
+ {STATUS_INVALID_GROUP_ATTRIBUTES, -EIO,
+ "STATUS_INVALID_GROUP_ATTRIBUTES"},
+ {STATUS_BAD_IMPERSONATION_LEVEL, -EIO,
+ "STATUS_BAD_IMPERSONATION_LEVEL"},
+ {STATUS_CANT_OPEN_ANONYMOUS, -EIO, "STATUS_CANT_OPEN_ANONYMOUS"},
+ {STATUS_BAD_VALIDATION_CLASS, -EIO, "STATUS_BAD_VALIDATION_CLASS"},
+ {STATUS_BAD_TOKEN_TYPE, -EIO, "STATUS_BAD_TOKEN_TYPE"},
+ {STATUS_BAD_MASTER_BOOT_RECORD, -EIO, "STATUS_BAD_MASTER_BOOT_RECORD"},
+ {STATUS_INSTRUCTION_MISALIGNMENT, -EIO,
+ "STATUS_INSTRUCTION_MISALIGNMENT"},
+ {STATUS_INSTANCE_NOT_AVAILABLE, -EIO, "STATUS_INSTANCE_NOT_AVAILABLE"},
+ {STATUS_PIPE_NOT_AVAILABLE, -EIO, "STATUS_PIPE_NOT_AVAILABLE"},
+ {STATUS_INVALID_PIPE_STATE, -EIO, "STATUS_INVALID_PIPE_STATE"},
+ {STATUS_PIPE_BUSY, -EBUSY, "STATUS_PIPE_BUSY"},
+ {STATUS_ILLEGAL_FUNCTION, -EIO, "STATUS_ILLEGAL_FUNCTION"},
+ {STATUS_PIPE_DISCONNECTED, -EPIPE, "STATUS_PIPE_DISCONNECTED"},
+ {STATUS_PIPE_CLOSING, -EIO, "STATUS_PIPE_CLOSING"},
+ {STATUS_PIPE_CONNECTED, -EIO, "STATUS_PIPE_CONNECTED"},
+ {STATUS_PIPE_LISTENING, -EIO, "STATUS_PIPE_LISTENING"},
+ {STATUS_INVALID_READ_MODE, -EIO, "STATUS_INVALID_READ_MODE"},
+ {STATUS_IO_TIMEOUT, -ETIMEDOUT, "STATUS_IO_TIMEOUT"},
+ {STATUS_FILE_FORCED_CLOSED, -EIO, "STATUS_FILE_FORCED_CLOSED"},
+ {STATUS_PROFILING_NOT_STARTED, -EIO, "STATUS_PROFILING_NOT_STARTED"},
+ {STATUS_PROFILING_NOT_STOPPED, -EIO, "STATUS_PROFILING_NOT_STOPPED"},
+ {STATUS_COULD_NOT_INTERPRET, -EIO, "STATUS_COULD_NOT_INTERPRET"},
+ {STATUS_FILE_IS_A_DIRECTORY, -EISDIR, "STATUS_FILE_IS_A_DIRECTORY"},
+ {STATUS_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_NOT_SUPPORTED"},
+ {STATUS_REMOTE_NOT_LISTENING, -EHOSTDOWN,
+ "STATUS_REMOTE_NOT_LISTENING"},
+ {STATUS_DUPLICATE_NAME, -ENOTUNIQ, "STATUS_DUPLICATE_NAME"},
+ {STATUS_BAD_NETWORK_PATH, -EINVAL, "STATUS_BAD_NETWORK_PATH"},
+ {STATUS_NETWORK_BUSY, -EBUSY, "STATUS_NETWORK_BUSY"},
+ {STATUS_DEVICE_DOES_NOT_EXIST, -ENODEV, "STATUS_DEVICE_DOES_NOT_EXIST"},
+ {STATUS_TOO_MANY_COMMANDS, -EIO, "STATUS_TOO_MANY_COMMANDS"},
+ {STATUS_ADAPTER_HARDWARE_ERROR, -EIO, "STATUS_ADAPTER_HARDWARE_ERROR"},
+ {STATUS_INVALID_NETWORK_RESPONSE, -EIO,
+ "STATUS_INVALID_NETWORK_RESPONSE"},
+ {STATUS_UNEXPECTED_NETWORK_ERROR, -EIO,
+ "STATUS_UNEXPECTED_NETWORK_ERROR"},
+ {STATUS_BAD_REMOTE_ADAPTER, -EIO, "STATUS_BAD_REMOTE_ADAPTER"},
+ {STATUS_PRINT_QUEUE_FULL, -EIO, "STATUS_PRINT_QUEUE_FULL"},
+ {STATUS_NO_SPOOL_SPACE, -EIO, "STATUS_NO_SPOOL_SPACE"},
+ {STATUS_PRINT_CANCELLED, -EIO, "STATUS_PRINT_CANCELLED"},
+ {STATUS_NETWORK_NAME_DELETED, -EIO, "STATUS_NETWORK_NAME_DELETED"},
+ {STATUS_NETWORK_ACCESS_DENIED, -EACCES, "STATUS_NETWORK_ACCESS_DENIED"},
+ {STATUS_BAD_DEVICE_TYPE, -EIO, "STATUS_BAD_DEVICE_TYPE"},
+ {STATUS_BAD_NETWORK_NAME, -ENOENT, "STATUS_BAD_NETWORK_NAME"},
+ {STATUS_TOO_MANY_NAMES, -EIO, "STATUS_TOO_MANY_NAMES"},
+ {STATUS_TOO_MANY_SESSIONS, -EIO, "STATUS_TOO_MANY_SESSIONS"},
+ {STATUS_SHARING_PAUSED, -EIO, "STATUS_SHARING_PAUSED"},
+ {STATUS_REQUEST_NOT_ACCEPTED, -EIO, "STATUS_REQUEST_NOT_ACCEPTED"},
+ {STATUS_REDIRECTOR_PAUSED, -EIO, "STATUS_REDIRECTOR_PAUSED"},
+ {STATUS_NET_WRITE_FAULT, -EIO, "STATUS_NET_WRITE_FAULT"},
+ {STATUS_PROFILING_AT_LIMIT, -EIO, "STATUS_PROFILING_AT_LIMIT"},
+ {STATUS_NOT_SAME_DEVICE, -EXDEV, "STATUS_NOT_SAME_DEVICE"},
+ {STATUS_FILE_RENAMED, -EIO, "STATUS_FILE_RENAMED"},
+ {STATUS_VIRTUAL_CIRCUIT_CLOSED, -EIO, "STATUS_VIRTUAL_CIRCUIT_CLOSED"},
+ {STATUS_NO_SECURITY_ON_OBJECT, -EIO, "STATUS_NO_SECURITY_ON_OBJECT"},
+ {STATUS_CANT_WAIT, -EIO, "STATUS_CANT_WAIT"},
+ {STATUS_PIPE_EMPTY, -EIO, "STATUS_PIPE_EMPTY"},
+ {STATUS_CANT_ACCESS_DOMAIN_INFO, -EIO,
+ "STATUS_CANT_ACCESS_DOMAIN_INFO"},
+ {STATUS_CANT_TERMINATE_SELF, -EIO, "STATUS_CANT_TERMINATE_SELF"},
+ {STATUS_INVALID_SERVER_STATE, -EIO, "STATUS_INVALID_SERVER_STATE"},
+ {STATUS_INVALID_DOMAIN_STATE, -EIO, "STATUS_INVALID_DOMAIN_STATE"},
+ {STATUS_INVALID_DOMAIN_ROLE, -EIO, "STATUS_INVALID_DOMAIN_ROLE"},
+ {STATUS_NO_SUCH_DOMAIN, -EIO, "STATUS_NO_SUCH_DOMAIN"},
+ {STATUS_DOMAIN_EXISTS, -EIO, "STATUS_DOMAIN_EXISTS"},
+ {STATUS_DOMAIN_LIMIT_EXCEEDED, -EIO, "STATUS_DOMAIN_LIMIT_EXCEEDED"},
+ {STATUS_OPLOCK_NOT_GRANTED, -EIO, "STATUS_OPLOCK_NOT_GRANTED"},
+ {STATUS_INVALID_OPLOCK_PROTOCOL, -EIO,
+ "STATUS_INVALID_OPLOCK_PROTOCOL"},
+ {STATUS_INTERNAL_DB_CORRUPTION, -EIO, "STATUS_INTERNAL_DB_CORRUPTION"},
+ {STATUS_INTERNAL_ERROR, -EIO, "STATUS_INTERNAL_ERROR"},
+ {STATUS_GENERIC_NOT_MAPPED, -EIO, "STATUS_GENERIC_NOT_MAPPED"},
+ {STATUS_BAD_DESCRIPTOR_FORMAT, -EIO, "STATUS_BAD_DESCRIPTOR_FORMAT"},
+ {STATUS_INVALID_USER_BUFFER, -EIO, "STATUS_INVALID_USER_BUFFER"},
+ {STATUS_UNEXPECTED_IO_ERROR, -EIO, "STATUS_UNEXPECTED_IO_ERROR"},
+ {STATUS_UNEXPECTED_MM_CREATE_ERR, -EIO,
+ "STATUS_UNEXPECTED_MM_CREATE_ERR"},
+ {STATUS_UNEXPECTED_MM_MAP_ERROR, -EIO,
+ "STATUS_UNEXPECTED_MM_MAP_ERROR"},
+ {STATUS_UNEXPECTED_MM_EXTEND_ERR, -EIO,
+ "STATUS_UNEXPECTED_MM_EXTEND_ERR"},
+ {STATUS_NOT_LOGON_PROCESS, -EIO, "STATUS_NOT_LOGON_PROCESS"},
+ {STATUS_LOGON_SESSION_EXISTS, -EIO, "STATUS_LOGON_SESSION_EXISTS"},
+ {STATUS_INVALID_PARAMETER_1, -EINVAL, "STATUS_INVALID_PARAMETER_1"},
+ {STATUS_INVALID_PARAMETER_2, -EINVAL, "STATUS_INVALID_PARAMETER_2"},
+ {STATUS_INVALID_PARAMETER_3, -EINVAL, "STATUS_INVALID_PARAMETER_3"},
+ {STATUS_INVALID_PARAMETER_4, -EINVAL, "STATUS_INVALID_PARAMETER_4"},
+ {STATUS_INVALID_PARAMETER_5, -EINVAL, "STATUS_INVALID_PARAMETER_5"},
+ {STATUS_INVALID_PARAMETER_6, -EINVAL, "STATUS_INVALID_PARAMETER_6"},
+ {STATUS_INVALID_PARAMETER_7, -EINVAL, "STATUS_INVALID_PARAMETER_7"},
+ {STATUS_INVALID_PARAMETER_8, -EINVAL, "STATUS_INVALID_PARAMETER_8"},
+ {STATUS_INVALID_PARAMETER_9, -EINVAL, "STATUS_INVALID_PARAMETER_9"},
+ {STATUS_INVALID_PARAMETER_10, -EINVAL, "STATUS_INVALID_PARAMETER_10"},
+ {STATUS_INVALID_PARAMETER_11, -EINVAL, "STATUS_INVALID_PARAMETER_11"},
+ {STATUS_INVALID_PARAMETER_12, -EINVAL, "STATUS_INVALID_PARAMETER_12"},
+ {STATUS_REDIRECTOR_NOT_STARTED, -EIO, "STATUS_REDIRECTOR_NOT_STARTED"},
+ {STATUS_REDIRECTOR_STARTED, -EIO, "STATUS_REDIRECTOR_STARTED"},
+ {STATUS_STACK_OVERFLOW, -EIO, "STATUS_STACK_OVERFLOW"},
+ {STATUS_NO_SUCH_PACKAGE, -EIO, "STATUS_NO_SUCH_PACKAGE"},
+ {STATUS_BAD_FUNCTION_TABLE, -EIO, "STATUS_BAD_FUNCTION_TABLE"},
+ {STATUS_VARIABLE_NOT_FOUND, -EIO, "STATUS_VARIABLE_NOT_FOUND"},
+ {STATUS_DIRECTORY_NOT_EMPTY, -ENOTEMPTY, "STATUS_DIRECTORY_NOT_EMPTY"},
+ {STATUS_FILE_CORRUPT_ERROR, -EIO, "STATUS_FILE_CORRUPT_ERROR"},
+ {STATUS_NOT_A_DIRECTORY, -ENOTDIR, "STATUS_NOT_A_DIRECTORY"},
+ {STATUS_BAD_LOGON_SESSION_STATE, -EIO,
+ "STATUS_BAD_LOGON_SESSION_STATE"},
+ {STATUS_LOGON_SESSION_COLLISION, -EIO,
+ "STATUS_LOGON_SESSION_COLLISION"},
+ {STATUS_NAME_TOO_LONG, -ENAMETOOLONG, "STATUS_NAME_TOO_LONG"},
+ {STATUS_FILES_OPEN, -EIO, "STATUS_FILES_OPEN"},
+ {STATUS_CONNECTION_IN_USE, -EIO, "STATUS_CONNECTION_IN_USE"},
+ {STATUS_MESSAGE_NOT_FOUND, -EIO, "STATUS_MESSAGE_NOT_FOUND"},
+ {STATUS_PROCESS_IS_TERMINATING, -EIO, "STATUS_PROCESS_IS_TERMINATING"},
+ {STATUS_INVALID_LOGON_TYPE, -EIO, "STATUS_INVALID_LOGON_TYPE"},
+ {STATUS_NO_GUID_TRANSLATION, -EIO, "STATUS_NO_GUID_TRANSLATION"},
+ {STATUS_CANNOT_IMPERSONATE, -EIO, "STATUS_CANNOT_IMPERSONATE"},
+ {STATUS_IMAGE_ALREADY_LOADED, -EIO, "STATUS_IMAGE_ALREADY_LOADED"},
+ {STATUS_ABIOS_NOT_PRESENT, -EIO, "STATUS_ABIOS_NOT_PRESENT"},
+ {STATUS_ABIOS_LID_NOT_EXIST, -EIO, "STATUS_ABIOS_LID_NOT_EXIST"},
+ {STATUS_ABIOS_LID_ALREADY_OWNED, -EIO,
+ "STATUS_ABIOS_LID_ALREADY_OWNED"},
+ {STATUS_ABIOS_NOT_LID_OWNER, -EIO, "STATUS_ABIOS_NOT_LID_OWNER"},
+ {STATUS_ABIOS_INVALID_COMMAND, -EIO, "STATUS_ABIOS_INVALID_COMMAND"},
+ {STATUS_ABIOS_INVALID_LID, -EIO, "STATUS_ABIOS_INVALID_LID"},
+ {STATUS_ABIOS_SELECTOR_NOT_AVAILABLE, -EIO,
+ "STATUS_ABIOS_SELECTOR_NOT_AVAILABLE"},
+ {STATUS_ABIOS_INVALID_SELECTOR, -EIO, "STATUS_ABIOS_INVALID_SELECTOR"},
+ {STATUS_NO_LDT, -EIO, "STATUS_NO_LDT"},
+ {STATUS_INVALID_LDT_SIZE, -EIO, "STATUS_INVALID_LDT_SIZE"},
+ {STATUS_INVALID_LDT_OFFSET, -EIO, "STATUS_INVALID_LDT_OFFSET"},
+ {STATUS_INVALID_LDT_DESCRIPTOR, -EIO, "STATUS_INVALID_LDT_DESCRIPTOR"},
+ {STATUS_INVALID_IMAGE_NE_FORMAT, -EIO,
+ "STATUS_INVALID_IMAGE_NE_FORMAT"},
+ {STATUS_RXACT_INVALID_STATE, -EIO, "STATUS_RXACT_INVALID_STATE"},
+ {STATUS_RXACT_COMMIT_FAILURE, -EIO, "STATUS_RXACT_COMMIT_FAILURE"},
+ {STATUS_MAPPED_FILE_SIZE_ZERO, -EIO, "STATUS_MAPPED_FILE_SIZE_ZERO"},
+ {STATUS_TOO_MANY_OPENED_FILES, -EMFILE, "STATUS_TOO_MANY_OPENED_FILES"},
+ {STATUS_CANCELLED, -EIO, "STATUS_CANCELLED"},
+ {STATUS_CANNOT_DELETE, -EIO, "STATUS_CANNOT_DELETE"},
+ {STATUS_INVALID_COMPUTER_NAME, -EIO, "STATUS_INVALID_COMPUTER_NAME"},
+ {STATUS_FILE_DELETED, -EIO, "STATUS_FILE_DELETED"},
+ {STATUS_SPECIAL_ACCOUNT, -EIO, "STATUS_SPECIAL_ACCOUNT"},
+ {STATUS_SPECIAL_GROUP, -EIO, "STATUS_SPECIAL_GROUP"},
+ {STATUS_SPECIAL_USER, -EIO, "STATUS_SPECIAL_USER"},
+ {STATUS_MEMBERS_PRIMARY_GROUP, -EIO, "STATUS_MEMBERS_PRIMARY_GROUP"},
+ {STATUS_FILE_CLOSED, -EBADF, "STATUS_FILE_CLOSED"},
+ {STATUS_TOO_MANY_THREADS, -EIO, "STATUS_TOO_MANY_THREADS"},
+ {STATUS_THREAD_NOT_IN_PROCESS, -EIO, "STATUS_THREAD_NOT_IN_PROCESS"},
+ {STATUS_TOKEN_ALREADY_IN_USE, -EIO, "STATUS_TOKEN_ALREADY_IN_USE"},
+ {STATUS_PAGEFILE_QUOTA_EXCEEDED, -EDQUOT,
+ "STATUS_PAGEFILE_QUOTA_EXCEEDED"},
+ {STATUS_COMMITMENT_LIMIT, -EIO, "STATUS_COMMITMENT_LIMIT"},
+ {STATUS_INVALID_IMAGE_LE_FORMAT, -EIO,
+ "STATUS_INVALID_IMAGE_LE_FORMAT"},
+ {STATUS_INVALID_IMAGE_NOT_MZ, -EIO, "STATUS_INVALID_IMAGE_NOT_MZ"},
+ {STATUS_INVALID_IMAGE_PROTECT, -EIO, "STATUS_INVALID_IMAGE_PROTECT"},
+ {STATUS_INVALID_IMAGE_WIN_16, -EIO, "STATUS_INVALID_IMAGE_WIN_16"},
+ {STATUS_LOGON_SERVER_CONFLICT, -EIO, "STATUS_LOGON_SERVER_CONFLICT"},
+ {STATUS_TIME_DIFFERENCE_AT_DC, -EIO, "STATUS_TIME_DIFFERENCE_AT_DC"},
+ {STATUS_SYNCHRONIZATION_REQUIRED, -EIO,
+ "STATUS_SYNCHRONIZATION_REQUIRED"},
+ {STATUS_DLL_NOT_FOUND, -ENOENT, "STATUS_DLL_NOT_FOUND"},
+ {STATUS_OPEN_FAILED, -EIO, "STATUS_OPEN_FAILED"},
+ {STATUS_IO_PRIVILEGE_FAILED, -EIO, "STATUS_IO_PRIVILEGE_FAILED"},
+ {STATUS_ORDINAL_NOT_FOUND, -EIO, "STATUS_ORDINAL_NOT_FOUND"},
+ {STATUS_ENTRYPOINT_NOT_FOUND, -EIO, "STATUS_ENTRYPOINT_NOT_FOUND"},
+ {STATUS_CONTROL_C_EXIT, -EIO, "STATUS_CONTROL_C_EXIT"},
+ {STATUS_LOCAL_DISCONNECT, -EIO, "STATUS_LOCAL_DISCONNECT"},
+ {STATUS_REMOTE_DISCONNECT, -ESHUTDOWN, "STATUS_REMOTE_DISCONNECT"},
+ {STATUS_REMOTE_RESOURCES, -EIO, "STATUS_REMOTE_RESOURCES"},
+ {STATUS_LINK_FAILED, -EXDEV, "STATUS_LINK_FAILED"},
+ {STATUS_LINK_TIMEOUT, -ETIMEDOUT, "STATUS_LINK_TIMEOUT"},
+ {STATUS_INVALID_CONNECTION, -EIO, "STATUS_INVALID_CONNECTION"},
+ {STATUS_INVALID_ADDRESS, -EIO, "STATUS_INVALID_ADDRESS"},
+ {STATUS_DLL_INIT_FAILED, -EIO, "STATUS_DLL_INIT_FAILED"},
+ {STATUS_MISSING_SYSTEMFILE, -EIO, "STATUS_MISSING_SYSTEMFILE"},
+ {STATUS_UNHANDLED_EXCEPTION, -EIO, "STATUS_UNHANDLED_EXCEPTION"},
+ {STATUS_APP_INIT_FAILURE, -EIO, "STATUS_APP_INIT_FAILURE"},
+ {STATUS_PAGEFILE_CREATE_FAILED, -EIO, "STATUS_PAGEFILE_CREATE_FAILED"},
+ {STATUS_NO_PAGEFILE, -EIO, "STATUS_NO_PAGEFILE"},
+ {STATUS_INVALID_LEVEL, -EIO, "STATUS_INVALID_LEVEL"},
+ {STATUS_WRONG_PASSWORD_CORE, -EIO, "STATUS_WRONG_PASSWORD_CORE"},
+ {STATUS_ILLEGAL_FLOAT_CONTEXT, -EIO, "STATUS_ILLEGAL_FLOAT_CONTEXT"},
+ {STATUS_PIPE_BROKEN, -EPIPE, "STATUS_PIPE_BROKEN"},
+ {STATUS_REGISTRY_CORRUPT, -EIO, "STATUS_REGISTRY_CORRUPT"},
+ {STATUS_REGISTRY_IO_FAILED, -EIO, "STATUS_REGISTRY_IO_FAILED"},
+ {STATUS_NO_EVENT_PAIR, -EIO, "STATUS_NO_EVENT_PAIR"},
+ {STATUS_UNRECOGNIZED_VOLUME, -EIO, "STATUS_UNRECOGNIZED_VOLUME"},
+ {STATUS_SERIAL_NO_DEVICE_INITED, -EIO,
+ "STATUS_SERIAL_NO_DEVICE_INITED"},
+ {STATUS_NO_SUCH_ALIAS, -EIO, "STATUS_NO_SUCH_ALIAS"},
+ {STATUS_MEMBER_NOT_IN_ALIAS, -EIO, "STATUS_MEMBER_NOT_IN_ALIAS"},
+ {STATUS_MEMBER_IN_ALIAS, -EIO, "STATUS_MEMBER_IN_ALIAS"},
+ {STATUS_ALIAS_EXISTS, -EIO, "STATUS_ALIAS_EXISTS"},
+ {STATUS_LOGON_NOT_GRANTED, -EIO, "STATUS_LOGON_NOT_GRANTED"},
+ {STATUS_TOO_MANY_SECRETS, -EIO, "STATUS_TOO_MANY_SECRETS"},
+ {STATUS_SECRET_TOO_LONG, -EIO, "STATUS_SECRET_TOO_LONG"},
+ {STATUS_INTERNAL_DB_ERROR, -EIO, "STATUS_INTERNAL_DB_ERROR"},
+ {STATUS_FULLSCREEN_MODE, -EIO, "STATUS_FULLSCREEN_MODE"},
+ {STATUS_TOO_MANY_CONTEXT_IDS, -EIO, "STATUS_TOO_MANY_CONTEXT_IDS"},
+ {STATUS_LOGON_TYPE_NOT_GRANTED, -EIO, "STATUS_LOGON_TYPE_NOT_GRANTED"},
+ {STATUS_NOT_REGISTRY_FILE, -EIO, "STATUS_NOT_REGISTRY_FILE"},
+ {STATUS_NT_CROSS_ENCRYPTION_REQUIRED, -EIO,
+ "STATUS_NT_CROSS_ENCRYPTION_REQUIRED"},
+ {STATUS_DOMAIN_CTRLR_CONFIG_ERROR, -EIO,
+ "STATUS_DOMAIN_CTRLR_CONFIG_ERROR"},
+ {STATUS_FT_MISSING_MEMBER, -EIO, "STATUS_FT_MISSING_MEMBER"},
+ {STATUS_ILL_FORMED_SERVICE_ENTRY, -EIO,
+ "STATUS_ILL_FORMED_SERVICE_ENTRY"},
+ {STATUS_ILLEGAL_CHARACTER, -EIO, "STATUS_ILLEGAL_CHARACTER"},
+ {STATUS_UNMAPPABLE_CHARACTER, -EIO, "STATUS_UNMAPPABLE_CHARACTER"},
+ {STATUS_UNDEFINED_CHARACTER, -EIO, "STATUS_UNDEFINED_CHARACTER"},
+ {STATUS_FLOPPY_VOLUME, -EIO, "STATUS_FLOPPY_VOLUME"},
+ {STATUS_FLOPPY_ID_MARK_NOT_FOUND, -EIO,
+ "STATUS_FLOPPY_ID_MARK_NOT_FOUND"},
+ {STATUS_FLOPPY_WRONG_CYLINDER, -EIO, "STATUS_FLOPPY_WRONG_CYLINDER"},
+ {STATUS_FLOPPY_UNKNOWN_ERROR, -EIO, "STATUS_FLOPPY_UNKNOWN_ERROR"},
+ {STATUS_FLOPPY_BAD_REGISTERS, -EIO, "STATUS_FLOPPY_BAD_REGISTERS"},
+ {STATUS_DISK_RECALIBRATE_FAILED, -EIO,
+ "STATUS_DISK_RECALIBRATE_FAILED"},
+ {STATUS_DISK_OPERATION_FAILED, -EIO, "STATUS_DISK_OPERATION_FAILED"},
+ {STATUS_DISK_RESET_FAILED, -EIO, "STATUS_DISK_RESET_FAILED"},
+ {STATUS_SHARED_IRQ_BUSY, -EBUSY, "STATUS_SHARED_IRQ_BUSY"},
+ {STATUS_FT_ORPHANING, -EIO, "STATUS_FT_ORPHANING"},
+ {STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT, -EIO,
+ "STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT"},
+ {STATUS_PARTITION_FAILURE, -EIO, "STATUS_PARTITION_FAILURE"},
+ {STATUS_INVALID_BLOCK_LENGTH, -EIO, "STATUS_INVALID_BLOCK_LENGTH"},
+ {STATUS_DEVICE_NOT_PARTITIONED, -EIO, "STATUS_DEVICE_NOT_PARTITIONED"},
+ {STATUS_UNABLE_TO_LOCK_MEDIA, -EIO, "STATUS_UNABLE_TO_LOCK_MEDIA"},
+ {STATUS_UNABLE_TO_UNLOAD_MEDIA, -EIO, "STATUS_UNABLE_TO_UNLOAD_MEDIA"},
+ {STATUS_EOM_OVERFLOW, -EIO, "STATUS_EOM_OVERFLOW"},
+ {STATUS_NO_MEDIA, -EIO, "STATUS_NO_MEDIA"},
+ {STATUS_NO_SUCH_MEMBER, -EIO, "STATUS_NO_SUCH_MEMBER"},
+ {STATUS_INVALID_MEMBER, -EIO, "STATUS_INVALID_MEMBER"},
+ {STATUS_KEY_DELETED, -EIO, "STATUS_KEY_DELETED"},
+ {STATUS_NO_LOG_SPACE, -EIO, "STATUS_NO_LOG_SPACE"},
+ {STATUS_TOO_MANY_SIDS, -EIO, "STATUS_TOO_MANY_SIDS"},
+ {STATUS_LM_CROSS_ENCRYPTION_REQUIRED, -EIO,
+ "STATUS_LM_CROSS_ENCRYPTION_REQUIRED"},
+ {STATUS_KEY_HAS_CHILDREN, -EIO, "STATUS_KEY_HAS_CHILDREN"},
+ {STATUS_CHILD_MUST_BE_VOLATILE, -EIO, "STATUS_CHILD_MUST_BE_VOLATILE"},
+ {STATUS_DEVICE_CONFIGURATION_ERROR, -EIO,
+ "STATUS_DEVICE_CONFIGURATION_ERROR"},
+ {STATUS_DRIVER_INTERNAL_ERROR, -EIO, "STATUS_DRIVER_INTERNAL_ERROR"},
+ {STATUS_INVALID_DEVICE_STATE, -EIO, "STATUS_INVALID_DEVICE_STATE"},
+ {STATUS_IO_DEVICE_ERROR, -EIO, "STATUS_IO_DEVICE_ERROR"},
+ {STATUS_DEVICE_PROTOCOL_ERROR, -EIO, "STATUS_DEVICE_PROTOCOL_ERROR"},
+ {STATUS_BACKUP_CONTROLLER, -EIO, "STATUS_BACKUP_CONTROLLER"},
+ {STATUS_LOG_FILE_FULL, -EIO, "STATUS_LOG_FILE_FULL"},
+ {STATUS_TOO_LATE, -EIO, "STATUS_TOO_LATE"},
+ {STATUS_NO_TRUST_LSA_SECRET, -EIO, "STATUS_NO_TRUST_LSA_SECRET"},
+ {STATUS_NO_TRUST_SAM_ACCOUNT, -EIO, "STATUS_NO_TRUST_SAM_ACCOUNT"},
+ {STATUS_TRUSTED_DOMAIN_FAILURE, -EIO, "STATUS_TRUSTED_DOMAIN_FAILURE"},
+ {STATUS_TRUSTED_RELATIONSHIP_FAILURE, -EIO,
+ "STATUS_TRUSTED_RELATIONSHIP_FAILURE"},
+ {STATUS_EVENTLOG_FILE_CORRUPT, -EIO, "STATUS_EVENTLOG_FILE_CORRUPT"},
+ {STATUS_EVENTLOG_CANT_START, -EIO, "STATUS_EVENTLOG_CANT_START"},
+ {STATUS_TRUST_FAILURE, -EIO, "STATUS_TRUST_FAILURE"},
+ {STATUS_MUTANT_LIMIT_EXCEEDED, -EIO, "STATUS_MUTANT_LIMIT_EXCEEDED"},
+ {STATUS_NETLOGON_NOT_STARTED, -EIO, "STATUS_NETLOGON_NOT_STARTED"},
+ {STATUS_ACCOUNT_EXPIRED, -EKEYEXPIRED, "STATUS_ACCOUNT_EXPIRED"},
+ {STATUS_POSSIBLE_DEADLOCK, -EIO, "STATUS_POSSIBLE_DEADLOCK"},
+ {STATUS_NETWORK_CREDENTIAL_CONFLICT, -EIO,
+ "STATUS_NETWORK_CREDENTIAL_CONFLICT"},
+ {STATUS_REMOTE_SESSION_LIMIT, -EIO, "STATUS_REMOTE_SESSION_LIMIT"},
+ {STATUS_EVENTLOG_FILE_CHANGED, -EIO, "STATUS_EVENTLOG_FILE_CHANGED"},
+ {STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT, -EIO,
+ "STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT"},
+ {STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT, -EIO,
+ "STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT"},
+ {STATUS_NOLOGON_SERVER_TRUST_ACCOUNT, -EIO,
+ "STATUS_NOLOGON_SERVER_TRUST_ACCOUNT"},
+ {STATUS_DOMAIN_TRUST_INCONSISTENT, -EIO,
+ "STATUS_DOMAIN_TRUST_INCONSISTENT"},
+ {STATUS_FS_DRIVER_REQUIRED, -EIO, "STATUS_FS_DRIVER_REQUIRED"},
+ {STATUS_IMAGE_ALREADY_LOADED_AS_DLL, -EIO,
+ "STATUS_IMAGE_ALREADY_LOADED_AS_DLL"},
+ {STATUS_NETWORK_OPEN_RESTRICTION, -EIO,
+ "STATUS_NETWORK_OPEN_RESTRICTION"},
+ {STATUS_NO_USER_SESSION_KEY, -EIO, "STATUS_NO_USER_SESSION_KEY"},
+ {STATUS_USER_SESSION_DELETED, -EIO, "STATUS_USER_SESSION_DELETED"},
+ {STATUS_RESOURCE_LANG_NOT_FOUND, -EIO,
+ "STATUS_RESOURCE_LANG_NOT_FOUND"},
+ {STATUS_INSUFF_SERVER_RESOURCES, -EIO,
+ "STATUS_INSUFF_SERVER_RESOURCES"},
+ {STATUS_INVALID_BUFFER_SIZE, -EIO, "STATUS_INVALID_BUFFER_SIZE"},
+ {STATUS_INVALID_ADDRESS_COMPONENT, -EIO,
+ "STATUS_INVALID_ADDRESS_COMPONENT"},
+ {STATUS_INVALID_ADDRESS_WILDCARD, -EIO,
+ "STATUS_INVALID_ADDRESS_WILDCARD"},
+ {STATUS_TOO_MANY_ADDRESSES, -EIO, "STATUS_TOO_MANY_ADDRESSES"},
+ {STATUS_ADDRESS_ALREADY_EXISTS, -EADDRINUSE,
+ "STATUS_ADDRESS_ALREADY_EXISTS"},
+ {STATUS_ADDRESS_CLOSED, -EIO, "STATUS_ADDRESS_CLOSED"},
+ {STATUS_CONNECTION_DISCONNECTED, -ECONNABORTED,
+ "STATUS_CONNECTION_DISCONNECTED"},
+ {STATUS_CONNECTION_RESET, -ENETRESET, "STATUS_CONNECTION_RESET"},
+ {STATUS_TOO_MANY_NODES, -EIO, "STATUS_TOO_MANY_NODES"},
+ {STATUS_TRANSACTION_ABORTED, -EIO, "STATUS_TRANSACTION_ABORTED"},
+ {STATUS_TRANSACTION_TIMED_OUT, -EIO, "STATUS_TRANSACTION_TIMED_OUT"},
+ {STATUS_TRANSACTION_NO_RELEASE, -EIO, "STATUS_TRANSACTION_NO_RELEASE"},
+ {STATUS_TRANSACTION_NO_MATCH, -EIO, "STATUS_TRANSACTION_NO_MATCH"},
+ {STATUS_TRANSACTION_RESPONDED, -EIO, "STATUS_TRANSACTION_RESPONDED"},
+ {STATUS_TRANSACTION_INVALID_ID, -EIO, "STATUS_TRANSACTION_INVALID_ID"},
+ {STATUS_TRANSACTION_INVALID_TYPE, -EIO,
+ "STATUS_TRANSACTION_INVALID_TYPE"},
+ {STATUS_NOT_SERVER_SESSION, -EIO, "STATUS_NOT_SERVER_SESSION"},
+ {STATUS_NOT_CLIENT_SESSION, -EIO, "STATUS_NOT_CLIENT_SESSION"},
+ {STATUS_CANNOT_LOAD_REGISTRY_FILE, -EIO,
+ "STATUS_CANNOT_LOAD_REGISTRY_FILE"},
+ {STATUS_DEBUG_ATTACH_FAILED, -EIO, "STATUS_DEBUG_ATTACH_FAILED"},
+ {STATUS_SYSTEM_PROCESS_TERMINATED, -EIO,
+ "STATUS_SYSTEM_PROCESS_TERMINATED"},
+ {STATUS_DATA_NOT_ACCEPTED, -EIO, "STATUS_DATA_NOT_ACCEPTED"},
+ {STATUS_NO_BROWSER_SERVERS_FOUND, -EIO,
+ "STATUS_NO_BROWSER_SERVERS_FOUND"},
+ {STATUS_VDM_HARD_ERROR, -EIO, "STATUS_VDM_HARD_ERROR"},
+ {STATUS_DRIVER_CANCEL_TIMEOUT, -EIO, "STATUS_DRIVER_CANCEL_TIMEOUT"},
+ {STATUS_REPLY_MESSAGE_MISMATCH, -EIO, "STATUS_REPLY_MESSAGE_MISMATCH"},
+ {STATUS_MAPPED_ALIGNMENT, -EIO, "STATUS_MAPPED_ALIGNMENT"},
+ {STATUS_IMAGE_CHECKSUM_MISMATCH, -EIO,
+ "STATUS_IMAGE_CHECKSUM_MISMATCH"},
+ {STATUS_LOST_WRITEBEHIND_DATA, -EIO, "STATUS_LOST_WRITEBEHIND_DATA"},
+ {STATUS_CLIENT_SERVER_PARAMETERS_INVALID, -EIO,
+ "STATUS_CLIENT_SERVER_PARAMETERS_INVALID"},
+ {STATUS_PASSWORD_MUST_CHANGE, -EIO, "STATUS_PASSWORD_MUST_CHANGE"},
+ {STATUS_NOT_FOUND, -ENOENT, "STATUS_NOT_FOUND"},
+ {STATUS_NOT_TINY_STREAM, -EIO, "STATUS_NOT_TINY_STREAM"},
+ {STATUS_RECOVERY_FAILURE, -EIO, "STATUS_RECOVERY_FAILURE"},
+ {STATUS_STACK_OVERFLOW_READ, -EIO, "STATUS_STACK_OVERFLOW_READ"},
+ {STATUS_FAIL_CHECK, -EIO, "STATUS_FAIL_CHECK"},
+ {STATUS_DUPLICATE_OBJECTID, -EIO, "STATUS_DUPLICATE_OBJECTID"},
+ {STATUS_OBJECTID_EXISTS, -EIO, "STATUS_OBJECTID_EXISTS"},
+ {STATUS_CONVERT_TO_LARGE, -EIO, "STATUS_CONVERT_TO_LARGE"},
+ {STATUS_RETRY, -EAGAIN, "STATUS_RETRY"},
+ {STATUS_FOUND_OUT_OF_SCOPE, -EIO, "STATUS_FOUND_OUT_OF_SCOPE"},
+ {STATUS_ALLOCATE_BUCKET, -EIO, "STATUS_ALLOCATE_BUCKET"},
+ {STATUS_PROPSET_NOT_FOUND, -EIO, "STATUS_PROPSET_NOT_FOUND"},
+ {STATUS_MARSHALL_OVERFLOW, -EIO, "STATUS_MARSHALL_OVERFLOW"},
+ {STATUS_INVALID_VARIANT, -EIO, "STATUS_INVALID_VARIANT"},
+ {STATUS_DOMAIN_CONTROLLER_NOT_FOUND, -EIO,
+ "STATUS_DOMAIN_CONTROLLER_NOT_FOUND"},
+ {STATUS_ACCOUNT_LOCKED_OUT, -EIO, "STATUS_ACCOUNT_LOCKED_OUT"},
+ {STATUS_HANDLE_NOT_CLOSABLE, -EIO, "STATUS_HANDLE_NOT_CLOSABLE"},
+ {STATUS_CONNECTION_REFUSED, -EIO, "STATUS_CONNECTION_REFUSED"},
+ {STATUS_GRACEFUL_DISCONNECT, -EIO, "STATUS_GRACEFUL_DISCONNECT"},
+ {STATUS_ADDRESS_ALREADY_ASSOCIATED, -EIO,
+ "STATUS_ADDRESS_ALREADY_ASSOCIATED"},
+ {STATUS_ADDRESS_NOT_ASSOCIATED, -EIO, "STATUS_ADDRESS_NOT_ASSOCIATED"},
+ {STATUS_CONNECTION_INVALID, -EIO, "STATUS_CONNECTION_INVALID"},
+ {STATUS_CONNECTION_ACTIVE, -EIO, "STATUS_CONNECTION_ACTIVE"},
+ {STATUS_NETWORK_UNREACHABLE, -ENETUNREACH,
+ "STATUS_NETWORK_UNREACHABLE"},
+ {STATUS_HOST_UNREACHABLE, -EHOSTDOWN, "STATUS_HOST_UNREACHABLE"},
+ {STATUS_PROTOCOL_UNREACHABLE, -ENETUNREACH,
+ "STATUS_PROTOCOL_UNREACHABLE"},
+ {STATUS_PORT_UNREACHABLE, -ENETUNREACH, "STATUS_PORT_UNREACHABLE"},
+ {STATUS_REQUEST_ABORTED, -EIO, "STATUS_REQUEST_ABORTED"},
+ {STATUS_CONNECTION_ABORTED, -ECONNABORTED, "STATUS_CONNECTION_ABORTED"},
+ {STATUS_BAD_COMPRESSION_BUFFER, -EIO, "STATUS_BAD_COMPRESSION_BUFFER"},
+ {STATUS_USER_MAPPED_FILE, -EIO, "STATUS_USER_MAPPED_FILE"},
+ {STATUS_AUDIT_FAILED, -EIO, "STATUS_AUDIT_FAILED"},
+ {STATUS_TIMER_RESOLUTION_NOT_SET, -EIO,
+ "STATUS_TIMER_RESOLUTION_NOT_SET"},
+ {STATUS_CONNECTION_COUNT_LIMIT, -EIO, "STATUS_CONNECTION_COUNT_LIMIT"},
+ {STATUS_LOGIN_TIME_RESTRICTION, -EACCES,
+ "STATUS_LOGIN_TIME_RESTRICTION"},
+ {STATUS_LOGIN_WKSTA_RESTRICTION, -EACCES,
+ "STATUS_LOGIN_WKSTA_RESTRICTION"},
+ {STATUS_IMAGE_MP_UP_MISMATCH, -EIO, "STATUS_IMAGE_MP_UP_MISMATCH"},
+ {STATUS_INSUFFICIENT_LOGON_INFO, -EIO,
+ "STATUS_INSUFFICIENT_LOGON_INFO"},
+ {STATUS_BAD_DLL_ENTRYPOINT, -EIO, "STATUS_BAD_DLL_ENTRYPOINT"},
+ {STATUS_BAD_SERVICE_ENTRYPOINT, -EIO, "STATUS_BAD_SERVICE_ENTRYPOINT"},
+ {STATUS_LPC_REPLY_LOST, -EIO, "STATUS_LPC_REPLY_LOST"},
+ {STATUS_IP_ADDRESS_CONFLICT1, -EIO, "STATUS_IP_ADDRESS_CONFLICT1"},
+ {STATUS_IP_ADDRESS_CONFLICT2, -EIO, "STATUS_IP_ADDRESS_CONFLICT2"},
+ {STATUS_REGISTRY_QUOTA_LIMIT, -EDQUOT, "STATUS_REGISTRY_QUOTA_LIMIT"},
+ {STATUS_PATH_NOT_COVERED, -EREMOTE, "STATUS_PATH_NOT_COVERED"},
+ {STATUS_NO_CALLBACK_ACTIVE, -EIO, "STATUS_NO_CALLBACK_ACTIVE"},
+ {STATUS_LICENSE_QUOTA_EXCEEDED, -EACCES,
+ "STATUS_LICENSE_QUOTA_EXCEEDED"},
+ {STATUS_PWD_TOO_SHORT, -EIO, "STATUS_PWD_TOO_SHORT"},
+ {STATUS_PWD_TOO_RECENT, -EIO, "STATUS_PWD_TOO_RECENT"},
+ {STATUS_PWD_HISTORY_CONFLICT, -EIO, "STATUS_PWD_HISTORY_CONFLICT"},
+ {STATUS_PLUGPLAY_NO_DEVICE, -EIO, "STATUS_PLUGPLAY_NO_DEVICE"},
+ {STATUS_UNSUPPORTED_COMPRESSION, -EIO,
+ "STATUS_UNSUPPORTED_COMPRESSION"},
+ {STATUS_INVALID_HW_PROFILE, -EIO, "STATUS_INVALID_HW_PROFILE"},
+ {STATUS_INVALID_PLUGPLAY_DEVICE_PATH, -EIO,
+ "STATUS_INVALID_PLUGPLAY_DEVICE_PATH"},
+ {STATUS_DRIVER_ORDINAL_NOT_FOUND, -EIO,
+ "STATUS_DRIVER_ORDINAL_NOT_FOUND"},
+ {STATUS_DRIVER_ENTRYPOINT_NOT_FOUND, -EIO,
+ "STATUS_DRIVER_ENTRYPOINT_NOT_FOUND"},
+ {STATUS_RESOURCE_NOT_OWNED, -EIO, "STATUS_RESOURCE_NOT_OWNED"},
+ {STATUS_TOO_MANY_LINKS, -EMLINK, "STATUS_TOO_MANY_LINKS"},
+ {STATUS_QUOTA_LIST_INCONSISTENT, -EIO,
+ "STATUS_QUOTA_LIST_INCONSISTENT"},
+ {STATUS_FILE_IS_OFFLINE, -EIO, "STATUS_FILE_IS_OFFLINE"},
+ {STATUS_EVALUATION_EXPIRATION, -EIO, "STATUS_EVALUATION_EXPIRATION"},
+ {STATUS_ILLEGAL_DLL_RELOCATION, -EIO, "STATUS_ILLEGAL_DLL_RELOCATION"},
+ {STATUS_LICENSE_VIOLATION, -EIO, "STATUS_LICENSE_VIOLATION"},
+ {STATUS_DLL_INIT_FAILED_LOGOFF, -EIO, "STATUS_DLL_INIT_FAILED_LOGOFF"},
+ {STATUS_DRIVER_UNABLE_TO_LOAD, -EIO, "STATUS_DRIVER_UNABLE_TO_LOAD"},
+ {STATUS_DFS_UNAVAILABLE, -EIO, "STATUS_DFS_UNAVAILABLE"},
+ {STATUS_VOLUME_DISMOUNTED, -EIO, "STATUS_VOLUME_DISMOUNTED"},
+ {STATUS_WX86_INTERNAL_ERROR, -EIO, "STATUS_WX86_INTERNAL_ERROR"},
+ {STATUS_WX86_FLOAT_STACK_CHECK, -EIO, "STATUS_WX86_FLOAT_STACK_CHECK"},
+ {STATUS_VALIDATE_CONTINUE, -EIO, "STATUS_VALIDATE_CONTINUE"},
+ {STATUS_NO_MATCH, -EIO, "STATUS_NO_MATCH"},
+ {STATUS_NO_MORE_MATCHES, -EIO, "STATUS_NO_MORE_MATCHES"},
+ {STATUS_NOT_A_REPARSE_POINT, -EIO, "STATUS_NOT_A_REPARSE_POINT"},
+ {STATUS_IO_REPARSE_TAG_INVALID, -EIO, "STATUS_IO_REPARSE_TAG_INVALID"},
+ {STATUS_IO_REPARSE_TAG_MISMATCH, -EIO,
+ "STATUS_IO_REPARSE_TAG_MISMATCH"},
+ {STATUS_IO_REPARSE_DATA_INVALID, -EIO,
+ "STATUS_IO_REPARSE_DATA_INVALID"},
+ {STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EIO,
+ "STATUS_IO_REPARSE_TAG_NOT_HANDLED"},
+ {STATUS_REPARSE_POINT_NOT_RESOLVED, -EIO,
+ "STATUS_REPARSE_POINT_NOT_RESOLVED"},
+ {STATUS_DIRECTORY_IS_A_REPARSE_POINT, -EIO,
+ "STATUS_DIRECTORY_IS_A_REPARSE_POINT"},
+ {STATUS_RANGE_LIST_CONFLICT, -EIO, "STATUS_RANGE_LIST_CONFLICT"},
+ {STATUS_SOURCE_ELEMENT_EMPTY, -EIO, "STATUS_SOURCE_ELEMENT_EMPTY"},
+ {STATUS_DESTINATION_ELEMENT_FULL, -EIO,
+ "STATUS_DESTINATION_ELEMENT_FULL"},
+ {STATUS_ILLEGAL_ELEMENT_ADDRESS, -EIO,
+ "STATUS_ILLEGAL_ELEMENT_ADDRESS"},
+ {STATUS_MAGAZINE_NOT_PRESENT, -EIO, "STATUS_MAGAZINE_NOT_PRESENT"},
+ {STATUS_REINITIALIZATION_NEEDED, -EIO,
+ "STATUS_REINITIALIZATION_NEEDED"},
+ {STATUS_ENCRYPTION_FAILED, -EIO, "STATUS_ENCRYPTION_FAILED"},
+ {STATUS_DECRYPTION_FAILED, -EIO, "STATUS_DECRYPTION_FAILED"},
+ {STATUS_RANGE_NOT_FOUND, -EIO, "STATUS_RANGE_NOT_FOUND"},
+ {STATUS_NO_RECOVERY_POLICY, -EIO, "STATUS_NO_RECOVERY_POLICY"},
+ {STATUS_NO_EFS, -EIO, "STATUS_NO_EFS"},
+ {STATUS_WRONG_EFS, -EIO, "STATUS_WRONG_EFS"},
+ {STATUS_NO_USER_KEYS, -EIO, "STATUS_NO_USER_KEYS"},
+ {STATUS_FILE_NOT_ENCRYPTED, -EIO, "STATUS_FILE_NOT_ENCRYPTED"},
+ {STATUS_NOT_EXPORT_FORMAT, -EIO, "STATUS_NOT_EXPORT_FORMAT"},
+ {STATUS_FILE_ENCRYPTED, -EIO, "STATUS_FILE_ENCRYPTED"},
+ {STATUS_WMI_GUID_NOT_FOUND, -EIO, "STATUS_WMI_GUID_NOT_FOUND"},
+ {STATUS_WMI_INSTANCE_NOT_FOUND, -EIO, "STATUS_WMI_INSTANCE_NOT_FOUND"},
+ {STATUS_WMI_ITEMID_NOT_FOUND, -EIO, "STATUS_WMI_ITEMID_NOT_FOUND"},
+ {STATUS_WMI_TRY_AGAIN, -EIO, "STATUS_WMI_TRY_AGAIN"},
+ {STATUS_SHARED_POLICY, -EIO, "STATUS_SHARED_POLICY"},
+ {STATUS_POLICY_OBJECT_NOT_FOUND, -EIO,
+ "STATUS_POLICY_OBJECT_NOT_FOUND"},
+ {STATUS_POLICY_ONLY_IN_DS, -EIO, "STATUS_POLICY_ONLY_IN_DS"},
+ {STATUS_VOLUME_NOT_UPGRADED, -EIO, "STATUS_VOLUME_NOT_UPGRADED"},
+ {STATUS_REMOTE_STORAGE_NOT_ACTIVE, -EIO,
+ "STATUS_REMOTE_STORAGE_NOT_ACTIVE"},
+ {STATUS_REMOTE_STORAGE_MEDIA_ERROR, -EIO,
+ "STATUS_REMOTE_STORAGE_MEDIA_ERROR"},
+ {STATUS_NO_TRACKING_SERVICE, -EIO, "STATUS_NO_TRACKING_SERVICE"},
+ {STATUS_SERVER_SID_MISMATCH, -EIO, "STATUS_SERVER_SID_MISMATCH"},
+ {STATUS_DS_NO_ATTRIBUTE_OR_VALUE, -EIO,
+ "STATUS_DS_NO_ATTRIBUTE_OR_VALUE"},
+ {STATUS_DS_INVALID_ATTRIBUTE_SYNTAX, -EIO,
+ "STATUS_DS_INVALID_ATTRIBUTE_SYNTAX"},
+ {STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED, -EIO,
+ "STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED"},
+ {STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS, -EIO,
+ "STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS"},
+ {STATUS_DS_BUSY, -EBUSY, "STATUS_DS_BUSY"},
+ {STATUS_DS_UNAVAILABLE, -EIO, "STATUS_DS_UNAVAILABLE"},
+ {STATUS_DS_NO_RIDS_ALLOCATED, -EIO, "STATUS_DS_NO_RIDS_ALLOCATED"},
+ {STATUS_DS_NO_MORE_RIDS, -EIO, "STATUS_DS_NO_MORE_RIDS"},
+ {STATUS_DS_INCORRECT_ROLE_OWNER, -EIO,
+ "STATUS_DS_INCORRECT_ROLE_OWNER"},
+ {STATUS_DS_RIDMGR_INIT_ERROR, -EIO, "STATUS_DS_RIDMGR_INIT_ERROR"},
+ {STATUS_DS_OBJ_CLASS_VIOLATION, -EIO, "STATUS_DS_OBJ_CLASS_VIOLATION"},
+ {STATUS_DS_CANT_ON_NON_LEAF, -EIO, "STATUS_DS_CANT_ON_NON_LEAF"},
+ {STATUS_DS_CANT_ON_RDN, -EIO, "STATUS_DS_CANT_ON_RDN"},
+ {STATUS_DS_CANT_MOD_OBJ_CLASS, -EIO, "STATUS_DS_CANT_MOD_OBJ_CLASS"},
+ {STATUS_DS_CROSS_DOM_MOVE_FAILED, -EIO,
+ "STATUS_DS_CROSS_DOM_MOVE_FAILED"},
+ {STATUS_DS_GC_NOT_AVAILABLE, -EIO, "STATUS_DS_GC_NOT_AVAILABLE"},
+ {STATUS_DIRECTORY_SERVICE_REQUIRED, -EIO,
+ "STATUS_DIRECTORY_SERVICE_REQUIRED"},
+ {STATUS_REPARSE_ATTRIBUTE_CONFLICT, -EIO,
+ "STATUS_REPARSE_ATTRIBUTE_CONFLICT"},
+ {STATUS_CANT_ENABLE_DENY_ONLY, -EIO, "STATUS_CANT_ENABLE_DENY_ONLY"},
+ {STATUS_FLOAT_MULTIPLE_FAULTS, -EIO, "STATUS_FLOAT_MULTIPLE_FAULTS"},
+ {STATUS_FLOAT_MULTIPLE_TRAPS, -EIO, "STATUS_FLOAT_MULTIPLE_TRAPS"},
+ {STATUS_DEVICE_REMOVED, -EIO, "STATUS_DEVICE_REMOVED"},
+ {STATUS_JOURNAL_DELETE_IN_PROGRESS, -EIO,
+ "STATUS_JOURNAL_DELETE_IN_PROGRESS"},
+ {STATUS_JOURNAL_NOT_ACTIVE, -EIO, "STATUS_JOURNAL_NOT_ACTIVE"},
+ {STATUS_NOINTERFACE, -EIO, "STATUS_NOINTERFACE"},
+ {STATUS_DS_ADMIN_LIMIT_EXCEEDED, -EIO,
+ "STATUS_DS_ADMIN_LIMIT_EXCEEDED"},
+ {STATUS_DRIVER_FAILED_SLEEP, -EIO, "STATUS_DRIVER_FAILED_SLEEP"},
+ {STATUS_MUTUAL_AUTHENTICATION_FAILED, -EIO,
+ "STATUS_MUTUAL_AUTHENTICATION_FAILED"},
+ {STATUS_CORRUPT_SYSTEM_FILE, -EIO, "STATUS_CORRUPT_SYSTEM_FILE"},
+ {STATUS_DATATYPE_MISALIGNMENT_ERROR, -EIO,
+ "STATUS_DATATYPE_MISALIGNMENT_ERROR"},
+ {STATUS_WMI_READ_ONLY, -EROFS, "STATUS_WMI_READ_ONLY"},
+ {STATUS_WMI_SET_FAILURE, -EIO, "STATUS_WMI_SET_FAILURE"},
+ {STATUS_COMMITMENT_MINIMUM, -EIO, "STATUS_COMMITMENT_MINIMUM"},
+ {STATUS_REG_NAT_CONSUMPTION, -EIO, "STATUS_REG_NAT_CONSUMPTION"},
+ {STATUS_TRANSPORT_FULL, -EIO, "STATUS_TRANSPORT_FULL"},
+ {STATUS_DS_SAM_INIT_FAILURE, -EIO, "STATUS_DS_SAM_INIT_FAILURE"},
+ {STATUS_ONLY_IF_CONNECTED, -EIO, "STATUS_ONLY_IF_CONNECTED"},
+ {STATUS_DS_SENSITIVE_GROUP_VIOLATION, -EIO,
+ "STATUS_DS_SENSITIVE_GROUP_VIOLATION"},
+ {STATUS_PNP_RESTART_ENUMERATION, -EIO,
+ "STATUS_PNP_RESTART_ENUMERATION"},
+ {STATUS_JOURNAL_ENTRY_DELETED, -EIO, "STATUS_JOURNAL_ENTRY_DELETED"},
+ {STATUS_DS_CANT_MOD_PRIMARYGROUPID, -EIO,
+ "STATUS_DS_CANT_MOD_PRIMARYGROUPID"},
+ {STATUS_SYSTEM_IMAGE_BAD_SIGNATURE, -EIO,
+ "STATUS_SYSTEM_IMAGE_BAD_SIGNATURE"},
+ {STATUS_PNP_REBOOT_REQUIRED, -EIO, "STATUS_PNP_REBOOT_REQUIRED"},
+ {STATUS_POWER_STATE_INVALID, -EIO, "STATUS_POWER_STATE_INVALID"},
+ {STATUS_DS_INVALID_GROUP_TYPE, -EIO, "STATUS_DS_INVALID_GROUP_TYPE"},
+ {STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN, -EIO,
+ "STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN"},
+ {STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN, -EIO,
+ "STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN"},
+ {STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER, -EIO,
+ "STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER"},
+ {STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER, -EIO,
+ "STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER"},
+ {STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER, -EIO,
+ "STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER"},
+ {STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER, -EIO,
+ "STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER"},
+ {STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER, -EIO,
+ "STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER"},
+ {STATUS_DS_HAVE_PRIMARY_MEMBERS, -EIO,
+ "STATUS_DS_HAVE_PRIMARY_MEMBERS"},
+ {STATUS_WMI_NOT_SUPPORTED, -EOPNOTSUPP, "STATUS_WMI_NOT_SUPPORTED"},
+ {STATUS_INSUFFICIENT_POWER, -EIO, "STATUS_INSUFFICIENT_POWER"},
+ {STATUS_SAM_NEED_BOOTKEY_PASSWORD, -EIO,
+ "STATUS_SAM_NEED_BOOTKEY_PASSWORD"},
+ {STATUS_SAM_NEED_BOOTKEY_FLOPPY, -EIO,
+ "STATUS_SAM_NEED_BOOTKEY_FLOPPY"},
+ {STATUS_DS_CANT_START, -EIO, "STATUS_DS_CANT_START"},
+ {STATUS_DS_INIT_FAILURE, -EIO, "STATUS_DS_INIT_FAILURE"},
+ {STATUS_SAM_INIT_FAILURE, -EIO, "STATUS_SAM_INIT_FAILURE"},
+ {STATUS_DS_GC_REQUIRED, -EIO, "STATUS_DS_GC_REQUIRED"},
+ {STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY, -EIO,
+ "STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY"},
+ {STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS, -EIO,
+ "STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS"},
+ {STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED, -EDQUOT,
+ "STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED"},
+ {STATUS_MULTIPLE_FAULT_VIOLATION, -EIO,
+ "STATUS_MULTIPLE_FAULT_VIOLATION"},
+ {STATUS_CURRENT_DOMAIN_NOT_ALLOWED, -EIO,
+ "STATUS_CURRENT_DOMAIN_NOT_ALLOWED"},
+ {STATUS_CANNOT_MAKE, -EIO, "STATUS_CANNOT_MAKE"},
+ {STATUS_SYSTEM_SHUTDOWN, -EIO, "STATUS_SYSTEM_SHUTDOWN"},
+ {STATUS_DS_INIT_FAILURE_CONSOLE, -EIO,
+ "STATUS_DS_INIT_FAILURE_CONSOLE"},
+ {STATUS_DS_SAM_INIT_FAILURE_CONSOLE, -EIO,
+ "STATUS_DS_SAM_INIT_FAILURE_CONSOLE"},
+ {STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
+ "STATUS_UNFINISHED_CONTEXT_DELETED"},
+ {STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
+ {STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"},
+ {STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
+ {STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
+ "STATUS_WRONG_CREDENTIAL_HANDLE"},
+ {STATUS_CRYPTO_SYSTEM_INVALID, -EIO, "STATUS_CRYPTO_SYSTEM_INVALID"},
+ {STATUS_MAX_REFERRALS_EXCEEDED, -EIO, "STATUS_MAX_REFERRALS_EXCEEDED"},
+ {STATUS_MUST_BE_KDC, -EIO, "STATUS_MUST_BE_KDC"},
+ {STATUS_STRONG_CRYPTO_NOT_SUPPORTED, -EIO,
+ "STATUS_STRONG_CRYPTO_NOT_SUPPORTED"},
+ {STATUS_TOO_MANY_PRINCIPALS, -EIO, "STATUS_TOO_MANY_PRINCIPALS"},
+ {STATUS_NO_PA_DATA, -EIO, "STATUS_NO_PA_DATA"},
+ {STATUS_PKINIT_NAME_MISMATCH, -EIO, "STATUS_PKINIT_NAME_MISMATCH"},
+ {STATUS_SMARTCARD_LOGON_REQUIRED, -EIO,
+ "STATUS_SMARTCARD_LOGON_REQUIRED"},
+ {STATUS_KDC_INVALID_REQUEST, -EIO, "STATUS_KDC_INVALID_REQUEST"},
+ {STATUS_KDC_UNABLE_TO_REFER, -EIO, "STATUS_KDC_UNABLE_TO_REFER"},
+ {STATUS_KDC_UNKNOWN_ETYPE, -EIO, "STATUS_KDC_UNKNOWN_ETYPE"},
+ {STATUS_SHUTDOWN_IN_PROGRESS, -EIO, "STATUS_SHUTDOWN_IN_PROGRESS"},
+ {STATUS_SERVER_SHUTDOWN_IN_PROGRESS, -EIO,
+ "STATUS_SERVER_SHUTDOWN_IN_PROGRESS"},
+ {STATUS_NOT_SUPPORTED_ON_SBS, -EOPNOTSUPP,
+ "STATUS_NOT_SUPPORTED_ON_SBS"},
+ {STATUS_WMI_GUID_DISCONNECTED, -EIO, "STATUS_WMI_GUID_DISCONNECTED"},
+ {STATUS_WMI_ALREADY_DISABLED, -EIO, "STATUS_WMI_ALREADY_DISABLED"},
+ {STATUS_WMI_ALREADY_ENABLED, -EIO, "STATUS_WMI_ALREADY_ENABLED"},
+ {STATUS_MFT_TOO_FRAGMENTED, -EIO, "STATUS_MFT_TOO_FRAGMENTED"},
+ {STATUS_COPY_PROTECTION_FAILURE, -EIO,
+ "STATUS_COPY_PROTECTION_FAILURE"},
+ {STATUS_CSS_AUTHENTICATION_FAILURE, -EIO,
+ "STATUS_CSS_AUTHENTICATION_FAILURE"},
+ {STATUS_CSS_KEY_NOT_PRESENT, -EIO, "STATUS_CSS_KEY_NOT_PRESENT"},
+ {STATUS_CSS_KEY_NOT_ESTABLISHED, -EIO,
+ "STATUS_CSS_KEY_NOT_ESTABLISHED"},
+ {STATUS_CSS_SCRAMBLED_SECTOR, -EIO, "STATUS_CSS_SCRAMBLED_SECTOR"},
+ {STATUS_CSS_REGION_MISMATCH, -EIO, "STATUS_CSS_REGION_MISMATCH"},
+ {STATUS_CSS_RESETS_EXHAUSTED, -EIO, "STATUS_CSS_RESETS_EXHAUSTED"},
+ {STATUS_PKINIT_FAILURE, -EIO, "STATUS_PKINIT_FAILURE"},
+ {STATUS_SMARTCARD_SUBSYSTEM_FAILURE, -EIO,
+ "STATUS_SMARTCARD_SUBSYSTEM_FAILURE"},
+ {STATUS_NO_KERB_KEY, -EIO, "STATUS_NO_KERB_KEY"},
+ {STATUS_HOST_DOWN, -EIO, "STATUS_HOST_DOWN"},
+ {STATUS_UNSUPPORTED_PREAUTH, -EIO, "STATUS_UNSUPPORTED_PREAUTH"},
+ {STATUS_EFS_ALG_BLOB_TOO_BIG, -EIO, "STATUS_EFS_ALG_BLOB_TOO_BIG"},
+ {STATUS_PORT_NOT_SET, -EIO, "STATUS_PORT_NOT_SET"},
+ {STATUS_DEBUGGER_INACTIVE, -EIO, "STATUS_DEBUGGER_INACTIVE"},
+ {STATUS_DS_VERSION_CHECK_FAILURE, -EIO,
+ "STATUS_DS_VERSION_CHECK_FAILURE"},
+ {STATUS_AUDITING_DISABLED, -EIO, "STATUS_AUDITING_DISABLED"},
+ {STATUS_PRENT4_MACHINE_ACCOUNT, -EIO, "STATUS_PRENT4_MACHINE_ACCOUNT"},
+ {STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER, -EIO,
+ "STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER"},
+ {STATUS_INVALID_IMAGE_WIN_32, -EIO, "STATUS_INVALID_IMAGE_WIN_32"},
+ {STATUS_INVALID_IMAGE_WIN_64, -EIO, "STATUS_INVALID_IMAGE_WIN_64"},
+ {STATUS_BAD_BINDINGS, -EIO, "STATUS_BAD_BINDINGS"},
+ {STATUS_NETWORK_SESSION_EXPIRED, -EIO,
+ "STATUS_NETWORK_SESSION_EXPIRED"},
+ {STATUS_APPHELP_BLOCK, -EIO, "STATUS_APPHELP_BLOCK"},
+ {STATUS_ALL_SIDS_FILTERED, -EIO, "STATUS_ALL_SIDS_FILTERED"},
+ {STATUS_NOT_SAFE_MODE_DRIVER, -EIO, "STATUS_NOT_SAFE_MODE_DRIVER"},
+ {STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT, -EACCES,
+ "STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT"},
+ {STATUS_ACCESS_DISABLED_BY_POLICY_PATH, -EACCES,
+ "STATUS_ACCESS_DISABLED_BY_POLICY_PATH"},
+ {STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER, -EACCES,
+ "STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER"},
+ {STATUS_ACCESS_DISABLED_BY_POLICY_OTHER, -EACCES,
+ "STATUS_ACCESS_DISABLED_BY_POLICY_OTHER"},
+ {STATUS_FAILED_DRIVER_ENTRY, -EIO, "STATUS_FAILED_DRIVER_ENTRY"},
+ {STATUS_DEVICE_ENUMERATION_ERROR, -EIO,
+ "STATUS_DEVICE_ENUMERATION_ERROR"},
+ {STATUS_MOUNT_POINT_NOT_RESOLVED, -EIO,
+ "STATUS_MOUNT_POINT_NOT_RESOLVED"},
+ {STATUS_INVALID_DEVICE_OBJECT_PARAMETER, -EIO,
+ "STATUS_INVALID_DEVICE_OBJECT_PARAMETER"},
+ {STATUS_MCA_OCCURED, -EIO, "STATUS_MCA_OCCURED"},
+ {STATUS_DRIVER_BLOCKED_CRITICAL, -EIO,
+ "STATUS_DRIVER_BLOCKED_CRITICAL"},
+ {STATUS_DRIVER_BLOCKED, -EIO, "STATUS_DRIVER_BLOCKED"},
+ {STATUS_DRIVER_DATABASE_ERROR, -EIO, "STATUS_DRIVER_DATABASE_ERROR"},
+ {STATUS_SYSTEM_HIVE_TOO_LARGE, -EIO, "STATUS_SYSTEM_HIVE_TOO_LARGE"},
+ {STATUS_INVALID_IMPORT_OF_NON_DLL, -EIO,
+ "STATUS_INVALID_IMPORT_OF_NON_DLL"},
+ {STATUS_NO_SECRETS, -EIO, "STATUS_NO_SECRETS"},
+ {STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY, -EACCES,
+ "STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY"},
+ {STATUS_FAILED_STACK_SWITCH, -EIO, "STATUS_FAILED_STACK_SWITCH"},
+ {STATUS_HEAP_CORRUPTION, -EIO, "STATUS_HEAP_CORRUPTION"},
+ {STATUS_SMARTCARD_WRONG_PIN, -EIO, "STATUS_SMARTCARD_WRONG_PIN"},
+ {STATUS_SMARTCARD_CARD_BLOCKED, -EIO, "STATUS_SMARTCARD_CARD_BLOCKED"},
+ {STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED, -EIO,
+ "STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED"},
+ {STATUS_SMARTCARD_NO_CARD, -EIO, "STATUS_SMARTCARD_NO_CARD"},
+ {STATUS_SMARTCARD_NO_KEY_CONTAINER, -EIO,
+ "STATUS_SMARTCARD_NO_KEY_CONTAINER"},
+ {STATUS_SMARTCARD_NO_CERTIFICATE, -EIO,
+ "STATUS_SMARTCARD_NO_CERTIFICATE"},
+ {STATUS_SMARTCARD_NO_KEYSET, -EIO, "STATUS_SMARTCARD_NO_KEYSET"},
+ {STATUS_SMARTCARD_IO_ERROR, -EIO, "STATUS_SMARTCARD_IO_ERROR"},
+ {STATUS_DOWNGRADE_DETECTED, -EIO, "STATUS_DOWNGRADE_DETECTED"},
+ {STATUS_SMARTCARD_CERT_REVOKED, -EIO, "STATUS_SMARTCARD_CERT_REVOKED"},
+ {STATUS_ISSUING_CA_UNTRUSTED, -EIO, "STATUS_ISSUING_CA_UNTRUSTED"},
+ {STATUS_REVOCATION_OFFLINE_C, -EIO, "STATUS_REVOCATION_OFFLINE_C"},
+ {STATUS_PKINIT_CLIENT_FAILURE, -EIO, "STATUS_PKINIT_CLIENT_FAILURE"},
+ {STATUS_SMARTCARD_CERT_EXPIRED, -EIO, "STATUS_SMARTCARD_CERT_EXPIRED"},
+ {STATUS_DRIVER_FAILED_PRIOR_UNLOAD, -EIO,
+ "STATUS_DRIVER_FAILED_PRIOR_UNLOAD"},
+ {STATUS_SMARTCARD_SILENT_CONTEXT, -EIO,
+ "STATUS_SMARTCARD_SILENT_CONTEXT"},
+ {STATUS_PER_USER_TRUST_QUOTA_EXCEEDED, -EDQUOT,
+ "STATUS_PER_USER_TRUST_QUOTA_EXCEEDED"},
+ {STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED, -EDQUOT,
+ "STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED"},
+ {STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED, -EDQUOT,
+ "STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED"},
+ {STATUS_DS_NAME_NOT_UNIQUE, -EIO, "STATUS_DS_NAME_NOT_UNIQUE"},
+ {STATUS_DS_DUPLICATE_ID_FOUND, -EIO, "STATUS_DS_DUPLICATE_ID_FOUND"},
+ {STATUS_DS_GROUP_CONVERSION_ERROR, -EIO,
+ "STATUS_DS_GROUP_CONVERSION_ERROR"},
+ {STATUS_VOLSNAP_PREPARE_HIBERNATE, -EIO,
+ "STATUS_VOLSNAP_PREPARE_HIBERNATE"},
+ {STATUS_USER2USER_REQUIRED, -EIO, "STATUS_USER2USER_REQUIRED"},
+ {STATUS_STACK_BUFFER_OVERRUN, -EIO, "STATUS_STACK_BUFFER_OVERRUN"},
+ {STATUS_NO_S4U_PROT_SUPPORT, -EIO, "STATUS_NO_S4U_PROT_SUPPORT"},
+ {STATUS_CROSSREALM_DELEGATION_FAILURE, -EIO,
+ "STATUS_CROSSREALM_DELEGATION_FAILURE"},
+ {STATUS_REVOCATION_OFFLINE_KDC, -EIO, "STATUS_REVOCATION_OFFLINE_KDC"},
+ {STATUS_ISSUING_CA_UNTRUSTED_KDC, -EIO,
+ "STATUS_ISSUING_CA_UNTRUSTED_KDC"},
+ {STATUS_KDC_CERT_EXPIRED, -EIO, "STATUS_KDC_CERT_EXPIRED"},
+ {STATUS_KDC_CERT_REVOKED, -EIO, "STATUS_KDC_CERT_REVOKED"},
+ {STATUS_PARAMETER_QUOTA_EXCEEDED, -EDQUOT,
+ "STATUS_PARAMETER_QUOTA_EXCEEDED"},
+ {STATUS_HIBERNATION_FAILURE, -EIO, "STATUS_HIBERNATION_FAILURE"},
+ {STATUS_DELAY_LOAD_FAILED, -EIO, "STATUS_DELAY_LOAD_FAILED"},
+ {STATUS_AUTHENTICATION_FIREWALL_FAILED, -EIO,
+ "STATUS_AUTHENTICATION_FIREWALL_FAILED"},
+ {STATUS_VDM_DISALLOWED, -EIO, "STATUS_VDM_DISALLOWED"},
+ {STATUS_HUNG_DISPLAY_DRIVER_THREAD, -EIO,
+ "STATUS_HUNG_DISPLAY_DRIVER_THREAD"},
+ {STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE, -EIO,
+ "STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE"},
+ {STATUS_INVALID_CRUNTIME_PARAMETER, -EIO,
+ "STATUS_INVALID_CRUNTIME_PARAMETER"},
+ {STATUS_NTLM_BLOCKED, -EIO, "STATUS_NTLM_BLOCKED"},
+ {STATUS_ASSERTION_FAILURE, -EIO, "STATUS_ASSERTION_FAILURE"},
+ {STATUS_VERIFIER_STOP, -EIO, "STATUS_VERIFIER_STOP"},
+ {STATUS_CALLBACK_POP_STACK, -EIO, "STATUS_CALLBACK_POP_STACK"},
+ {STATUS_INCOMPATIBLE_DRIVER_BLOCKED, -EIO,
+ "STATUS_INCOMPATIBLE_DRIVER_BLOCKED"},
+ {STATUS_HIVE_UNLOADED, -EIO, "STATUS_HIVE_UNLOADED"},
+ {STATUS_COMPRESSION_DISABLED, -EIO, "STATUS_COMPRESSION_DISABLED"},
+ {STATUS_FILE_SYSTEM_LIMITATION, -EIO, "STATUS_FILE_SYSTEM_LIMITATION"},
+ {STATUS_INVALID_IMAGE_HASH, -EIO, "STATUS_INVALID_IMAGE_HASH"},
+ {STATUS_NOT_CAPABLE, -EIO, "STATUS_NOT_CAPABLE"},
+ {STATUS_REQUEST_OUT_OF_SEQUENCE, -EIO,
+ "STATUS_REQUEST_OUT_OF_SEQUENCE"},
+ {STATUS_IMPLEMENTATION_LIMIT, -EIO, "STATUS_IMPLEMENTATION_LIMIT"},
+ {STATUS_ELEVATION_REQUIRED, -EIO, "STATUS_ELEVATION_REQUIRED"},
+ {STATUS_BEYOND_VDL, -EIO, "STATUS_BEYOND_VDL"},
+ {STATUS_ENCOUNTERED_WRITE_IN_PROGRESS, -EIO,
+ "STATUS_ENCOUNTERED_WRITE_IN_PROGRESS"},
+ {STATUS_PTE_CHANGED, -EIO, "STATUS_PTE_CHANGED"},
+ {STATUS_PURGE_FAILED, -EIO, "STATUS_PURGE_FAILED"},
+ {STATUS_CRED_REQUIRES_CONFIRMATION, -EIO,
+ "STATUS_CRED_REQUIRES_CONFIRMATION"},
+ {STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE, -EIO,
+ "STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE"},
+ {STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER, -EIO,
+ "STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER"},
+ {STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE, -EIO,
+ "STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE"},
+ {STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE, -EIO,
+ "STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE"},
+ {STATUS_CS_ENCRYPTION_FILE_NOT_CSE, -EIO,
+ "STATUS_CS_ENCRYPTION_FILE_NOT_CSE"},
+ {STATUS_INVALID_LABEL, -EIO, "STATUS_INVALID_LABEL"},
+ {STATUS_DRIVER_PROCESS_TERMINATED, -EIO,
+ "STATUS_DRIVER_PROCESS_TERMINATED"},
+ {STATUS_AMBIGUOUS_SYSTEM_DEVICE, -EIO,
+ "STATUS_AMBIGUOUS_SYSTEM_DEVICE"},
+ {STATUS_SYSTEM_DEVICE_NOT_FOUND, -EIO,
+ "STATUS_SYSTEM_DEVICE_NOT_FOUND"},
+ {STATUS_RESTART_BOOT_APPLICATION, -EIO,
+ "STATUS_RESTART_BOOT_APPLICATION"},
+ {STATUS_INVALID_TASK_NAME, -EIO, "STATUS_INVALID_TASK_NAME"},
+ {STATUS_INVALID_TASK_INDEX, -EIO, "STATUS_INVALID_TASK_INDEX"},
+ {STATUS_THREAD_ALREADY_IN_TASK, -EIO, "STATUS_THREAD_ALREADY_IN_TASK"},
+ {STATUS_CALLBACK_BYPASS, -EIO, "STATUS_CALLBACK_BYPASS"},
+ {STATUS_PORT_CLOSED, -EIO, "STATUS_PORT_CLOSED"},
+ {STATUS_MESSAGE_LOST, -EIO, "STATUS_MESSAGE_LOST"},
+ {STATUS_INVALID_MESSAGE, -EIO, "STATUS_INVALID_MESSAGE"},
+ {STATUS_REQUEST_CANCELED, -EIO, "STATUS_REQUEST_CANCELED"},
+ {STATUS_RECURSIVE_DISPATCH, -EIO, "STATUS_RECURSIVE_DISPATCH"},
+ {STATUS_LPC_RECEIVE_BUFFER_EXPECTED, -EIO,
+ "STATUS_LPC_RECEIVE_BUFFER_EXPECTED"},
+ {STATUS_LPC_INVALID_CONNECTION_USAGE, -EIO,
+ "STATUS_LPC_INVALID_CONNECTION_USAGE"},
+ {STATUS_LPC_REQUESTS_NOT_ALLOWED, -EIO,
+ "STATUS_LPC_REQUESTS_NOT_ALLOWED"},
+ {STATUS_RESOURCE_IN_USE, -EIO, "STATUS_RESOURCE_IN_USE"},
+ {STATUS_HARDWARE_MEMORY_ERROR, -EIO, "STATUS_HARDWARE_MEMORY_ERROR"},
+ {STATUS_THREADPOOL_HANDLE_EXCEPTION, -EIO,
+ "STATUS_THREADPOOL_HANDLE_EXCEPTION"},
+ {STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED, -EIO,
+ "STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED"},
+ {STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED, -EIO,
+ "STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED"},
+ {STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED, -EIO,
+ "STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED"},
+ {STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED, -EIO,
+ "STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED"},
+ {STATUS_THREADPOOL_RELEASED_DURING_OPERATION, -EIO,
+ "STATUS_THREADPOOL_RELEASED_DURING_OPERATION"},
+ {STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING, -EIO,
+ "STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING"},
+ {STATUS_APC_RETURNED_WHILE_IMPERSONATING, -EIO,
+ "STATUS_APC_RETURNED_WHILE_IMPERSONATING"},
+ {STATUS_PROCESS_IS_PROTECTED, -EIO, "STATUS_PROCESS_IS_PROTECTED"},
+ {STATUS_MCA_EXCEPTION, -EIO, "STATUS_MCA_EXCEPTION"},
+ {STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE, -EIO,
+ "STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE"},
+ {STATUS_SYMLINK_CLASS_DISABLED, -EIO, "STATUS_SYMLINK_CLASS_DISABLED"},
+ {STATUS_INVALID_IDN_NORMALIZATION, -EIO,
+ "STATUS_INVALID_IDN_NORMALIZATION"},
+ {STATUS_NO_UNICODE_TRANSLATION, -EIO, "STATUS_NO_UNICODE_TRANSLATION"},
+ {STATUS_ALREADY_REGISTERED, -EIO, "STATUS_ALREADY_REGISTERED"},
+ {STATUS_CONTEXT_MISMATCH, -EIO, "STATUS_CONTEXT_MISMATCH"},
+ {STATUS_PORT_ALREADY_HAS_COMPLETION_LIST, -EIO,
+ "STATUS_PORT_ALREADY_HAS_COMPLETION_LIST"},
+ {STATUS_CALLBACK_RETURNED_THREAD_PRIORITY, -EIO,
+ "STATUS_CALLBACK_RETURNED_THREAD_PRIORITY"},
+ {STATUS_INVALID_THREAD, -EIO, "STATUS_INVALID_THREAD"},
+ {STATUS_CALLBACK_RETURNED_TRANSACTION, -EIO,
+ "STATUS_CALLBACK_RETURNED_TRANSACTION"},
+ {STATUS_CALLBACK_RETURNED_LDR_LOCK, -EIO,
+ "STATUS_CALLBACK_RETURNED_LDR_LOCK"},
+ {STATUS_CALLBACK_RETURNED_LANG, -EIO, "STATUS_CALLBACK_RETURNED_LANG"},
+ {STATUS_CALLBACK_RETURNED_PRI_BACK, -EIO,
+ "STATUS_CALLBACK_RETURNED_PRI_BACK"},
+ {STATUS_CALLBACK_RETURNED_THREAD_AFFINITY, -EIO,
+ "STATUS_CALLBACK_RETURNED_THREAD_AFFINITY"},
+ {STATUS_DISK_REPAIR_DISABLED, -EIO, "STATUS_DISK_REPAIR_DISABLED"},
+ {STATUS_DS_DOMAIN_RENAME_IN_PROGRESS, -EIO,
+ "STATUS_DS_DOMAIN_RENAME_IN_PROGRESS"},
+ {STATUS_DISK_QUOTA_EXCEEDED, -EDQUOT, "STATUS_DISK_QUOTA_EXCEEDED"},
+ {STATUS_CONTENT_BLOCKED, -EIO, "STATUS_CONTENT_BLOCKED"},
+ {STATUS_BAD_CLUSTERS, -EIO, "STATUS_BAD_CLUSTERS"},
+ {STATUS_VOLUME_DIRTY, -EIO, "STATUS_VOLUME_DIRTY"},
+ {STATUS_FILE_CHECKED_OUT, -EIO, "STATUS_FILE_CHECKED_OUT"},
+ {STATUS_CHECKOUT_REQUIRED, -EIO, "STATUS_CHECKOUT_REQUIRED"},
+ {STATUS_BAD_FILE_TYPE, -EIO, "STATUS_BAD_FILE_TYPE"},
+ {STATUS_FILE_TOO_LARGE, -EIO, "STATUS_FILE_TOO_LARGE"},
+ {STATUS_FORMS_AUTH_REQUIRED, -EIO, "STATUS_FORMS_AUTH_REQUIRED"},
+ {STATUS_VIRUS_INFECTED, -EIO, "STATUS_VIRUS_INFECTED"},
+ {STATUS_VIRUS_DELETED, -EIO, "STATUS_VIRUS_DELETED"},
+ {STATUS_BAD_MCFG_TABLE, -EIO, "STATUS_BAD_MCFG_TABLE"},
+ {STATUS_WOW_ASSERTION, -EIO, "STATUS_WOW_ASSERTION"},
+ {STATUS_INVALID_SIGNATURE, -EIO, "STATUS_INVALID_SIGNATURE"},
+ {STATUS_HMAC_NOT_SUPPORTED, -EIO, "STATUS_HMAC_NOT_SUPPORTED"},
+ {STATUS_IPSEC_QUEUE_OVERFLOW, -EIO, "STATUS_IPSEC_QUEUE_OVERFLOW"},
+ {STATUS_ND_QUEUE_OVERFLOW, -EIO, "STATUS_ND_QUEUE_OVERFLOW"},
+ {STATUS_HOPLIMIT_EXCEEDED, -EIO, "STATUS_HOPLIMIT_EXCEEDED"},
+ {STATUS_PROTOCOL_NOT_SUPPORTED, -EOPNOTSUPP,
+ "STATUS_PROTOCOL_NOT_SUPPORTED"},
+ {STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED, -EIO,
+ "STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED"},
+ {STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR, -EIO,
+ "STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR"},
+ {STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR, -EIO,
+ "STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR"},
+ {STATUS_XML_PARSE_ERROR, -EIO, "STATUS_XML_PARSE_ERROR"},
+ {STATUS_XMLDSIG_ERROR, -EIO, "STATUS_XMLDSIG_ERROR"},
+ {STATUS_WRONG_COMPARTMENT, -EIO, "STATUS_WRONG_COMPARTMENT"},
+ {STATUS_AUTHIP_FAILURE, -EIO, "STATUS_AUTHIP_FAILURE"},
+ {DBG_NO_STATE_CHANGE, -EIO, "DBG_NO_STATE_CHANGE"},
+ {DBG_APP_NOT_IDLE, -EIO, "DBG_APP_NOT_IDLE"},
+ {RPC_NT_INVALID_STRING_BINDING, -EIO, "RPC_NT_INVALID_STRING_BINDING"},
+ {RPC_NT_WRONG_KIND_OF_BINDING, -EIO, "RPC_NT_WRONG_KIND_OF_BINDING"},
+ {RPC_NT_INVALID_BINDING, -EIO, "RPC_NT_INVALID_BINDING"},
+ {RPC_NT_PROTSEQ_NOT_SUPPORTED, -EOPNOTSUPP,
+ "RPC_NT_PROTSEQ_NOT_SUPPORTED"},
+ {RPC_NT_INVALID_RPC_PROTSEQ, -EIO, "RPC_NT_INVALID_RPC_PROTSEQ"},
+ {RPC_NT_INVALID_STRING_UUID, -EIO, "RPC_NT_INVALID_STRING_UUID"},
+ {RPC_NT_INVALID_ENDPOINT_FORMAT, -EIO,
+ "RPC_NT_INVALID_ENDPOINT_FORMAT"},
+ {RPC_NT_INVALID_NET_ADDR, -EIO, "RPC_NT_INVALID_NET_ADDR"},
+ {RPC_NT_NO_ENDPOINT_FOUND, -EIO, "RPC_NT_NO_ENDPOINT_FOUND"},
+ {RPC_NT_INVALID_TIMEOUT, -EINVAL, "RPC_NT_INVALID_TIMEOUT"},
+ {RPC_NT_OBJECT_NOT_FOUND, -ENOENT, "RPC_NT_OBJECT_NOT_FOUND"},
+ {RPC_NT_ALREADY_REGISTERED, -EIO, "RPC_NT_ALREADY_REGISTERED"},
+ {RPC_NT_TYPE_ALREADY_REGISTERED, -EIO,
+ "RPC_NT_TYPE_ALREADY_REGISTERED"},
+ {RPC_NT_ALREADY_LISTENING, -EIO, "RPC_NT_ALREADY_LISTENING"},
+ {RPC_NT_NO_PROTSEQS_REGISTERED, -EIO, "RPC_NT_NO_PROTSEQS_REGISTERED"},
+ {RPC_NT_NOT_LISTENING, -EIO, "RPC_NT_NOT_LISTENING"},
+ {RPC_NT_UNKNOWN_MGR_TYPE, -EIO, "RPC_NT_UNKNOWN_MGR_TYPE"},
+ {RPC_NT_UNKNOWN_IF, -EIO, "RPC_NT_UNKNOWN_IF"},
+ {RPC_NT_NO_BINDINGS, -EIO, "RPC_NT_NO_BINDINGS"},
+ {RPC_NT_NO_PROTSEQS, -EIO, "RPC_NT_NO_PROTSEQS"},
+ {RPC_NT_CANT_CREATE_ENDPOINT, -EIO, "RPC_NT_CANT_CREATE_ENDPOINT"},
+ {RPC_NT_OUT_OF_RESOURCES, -EIO, "RPC_NT_OUT_OF_RESOURCES"},
+ {RPC_NT_SERVER_UNAVAILABLE, -EIO, "RPC_NT_SERVER_UNAVAILABLE"},
+ {RPC_NT_SERVER_TOO_BUSY, -EBUSY, "RPC_NT_SERVER_TOO_BUSY"},
+ {RPC_NT_INVALID_NETWORK_OPTIONS, -EIO,
+ "RPC_NT_INVALID_NETWORK_OPTIONS"},
+ {RPC_NT_NO_CALL_ACTIVE, -EIO, "RPC_NT_NO_CALL_ACTIVE"},
+ {RPC_NT_CALL_FAILED, -EIO, "RPC_NT_CALL_FAILED"},
+ {RPC_NT_CALL_FAILED_DNE, -EIO, "RPC_NT_CALL_FAILED_DNE"},
+ {RPC_NT_PROTOCOL_ERROR, -EIO, "RPC_NT_PROTOCOL_ERROR"},
+ {RPC_NT_UNSUPPORTED_TRANS_SYN, -EIO, "RPC_NT_UNSUPPORTED_TRANS_SYN"},
+ {RPC_NT_UNSUPPORTED_TYPE, -EIO, "RPC_NT_UNSUPPORTED_TYPE"},
+ {RPC_NT_INVALID_TAG, -EIO, "RPC_NT_INVALID_TAG"},
+ {RPC_NT_INVALID_BOUND, -EIO, "RPC_NT_INVALID_BOUND"},
+ {RPC_NT_NO_ENTRY_NAME, -EIO, "RPC_NT_NO_ENTRY_NAME"},
+ {RPC_NT_INVALID_NAME_SYNTAX, -EIO, "RPC_NT_INVALID_NAME_SYNTAX"},
+ {RPC_NT_UNSUPPORTED_NAME_SYNTAX, -EIO,
+ "RPC_NT_UNSUPPORTED_NAME_SYNTAX"},
+ {RPC_NT_UUID_NO_ADDRESS, -EIO, "RPC_NT_UUID_NO_ADDRESS"},
+ {RPC_NT_DUPLICATE_ENDPOINT, -ENOTUNIQ, "RPC_NT_DUPLICATE_ENDPOINT"},
+ {RPC_NT_UNKNOWN_AUTHN_TYPE, -EIO, "RPC_NT_UNKNOWN_AUTHN_TYPE"},
+ {RPC_NT_MAX_CALLS_TOO_SMALL, -EIO, "RPC_NT_MAX_CALLS_TOO_SMALL"},
+ {RPC_NT_STRING_TOO_LONG, -EIO, "RPC_NT_STRING_TOO_LONG"},
+ {RPC_NT_PROTSEQ_NOT_FOUND, -EIO, "RPC_NT_PROTSEQ_NOT_FOUND"},
+ {RPC_NT_PROCNUM_OUT_OF_RANGE, -EIO, "RPC_NT_PROCNUM_OUT_OF_RANGE"},
+ {RPC_NT_BINDING_HAS_NO_AUTH, -EIO, "RPC_NT_BINDING_HAS_NO_AUTH"},
+ {RPC_NT_UNKNOWN_AUTHN_SERVICE, -EIO, "RPC_NT_UNKNOWN_AUTHN_SERVICE"},
+ {RPC_NT_UNKNOWN_AUTHN_LEVEL, -EIO, "RPC_NT_UNKNOWN_AUTHN_LEVEL"},
+ {RPC_NT_INVALID_AUTH_IDENTITY, -EIO, "RPC_NT_INVALID_AUTH_IDENTITY"},
+ {RPC_NT_UNKNOWN_AUTHZ_SERVICE, -EIO, "RPC_NT_UNKNOWN_AUTHZ_SERVICE"},
+ {EPT_NT_INVALID_ENTRY, -EIO, "EPT_NT_INVALID_ENTRY"},
+ {EPT_NT_CANT_PERFORM_OP, -EIO, "EPT_NT_CANT_PERFORM_OP"},
+ {EPT_NT_NOT_REGISTERED, -EIO, "EPT_NT_NOT_REGISTERED"},
+ {RPC_NT_NOTHING_TO_EXPORT, -EIO, "RPC_NT_NOTHING_TO_EXPORT"},
+ {RPC_NT_INCOMPLETE_NAME, -EIO, "RPC_NT_INCOMPLETE_NAME"},
+ {RPC_NT_INVALID_VERS_OPTION, -EIO, "RPC_NT_INVALID_VERS_OPTION"},
+ {RPC_NT_NO_MORE_MEMBERS, -EIO, "RPC_NT_NO_MORE_MEMBERS"},
+ {RPC_NT_NOT_ALL_OBJS_UNEXPORTED, -EIO,
+ "RPC_NT_NOT_ALL_OBJS_UNEXPORTED"},
+ {RPC_NT_INTERFACE_NOT_FOUND, -EIO, "RPC_NT_INTERFACE_NOT_FOUND"},
+ {RPC_NT_ENTRY_ALREADY_EXISTS, -EIO, "RPC_NT_ENTRY_ALREADY_EXISTS"},
+ {RPC_NT_ENTRY_NOT_FOUND, -EIO, "RPC_NT_ENTRY_NOT_FOUND"},
+ {RPC_NT_NAME_SERVICE_UNAVAILABLE, -EIO,
+ "RPC_NT_NAME_SERVICE_UNAVAILABLE"},
+ {RPC_NT_INVALID_NAF_ID, -EIO, "RPC_NT_INVALID_NAF_ID"},
+ {RPC_NT_CANNOT_SUPPORT, -EOPNOTSUPP, "RPC_NT_CANNOT_SUPPORT"},
+ {RPC_NT_NO_CONTEXT_AVAILABLE, -EIO, "RPC_NT_NO_CONTEXT_AVAILABLE"},
+ {RPC_NT_INTERNAL_ERROR, -EIO, "RPC_NT_INTERNAL_ERROR"},
+ {RPC_NT_ZERO_DIVIDE, -EIO, "RPC_NT_ZERO_DIVIDE"},
+ {RPC_NT_ADDRESS_ERROR, -EIO, "RPC_NT_ADDRESS_ERROR"},
+ {RPC_NT_FP_DIV_ZERO, -EIO, "RPC_NT_FP_DIV_ZERO"},
+ {RPC_NT_FP_UNDERFLOW, -EIO, "RPC_NT_FP_UNDERFLOW"},
+ {RPC_NT_FP_OVERFLOW, -EIO, "RPC_NT_FP_OVERFLOW"},
+ {RPC_NT_CALL_IN_PROGRESS, -EIO, "RPC_NT_CALL_IN_PROGRESS"},
+ {RPC_NT_NO_MORE_BINDINGS, -EIO, "RPC_NT_NO_MORE_BINDINGS"},
+ {RPC_NT_GROUP_MEMBER_NOT_FOUND, -EIO, "RPC_NT_GROUP_MEMBER_NOT_FOUND"},
+ {EPT_NT_CANT_CREATE, -EIO, "EPT_NT_CANT_CREATE"},
+ {RPC_NT_INVALID_OBJECT, -EIO, "RPC_NT_INVALID_OBJECT"},
+ {RPC_NT_NO_INTERFACES, -EIO, "RPC_NT_NO_INTERFACES"},
+ {RPC_NT_CALL_CANCELLED, -EIO, "RPC_NT_CALL_CANCELLED"},
+ {RPC_NT_BINDING_INCOMPLETE, -EIO, "RPC_NT_BINDING_INCOMPLETE"},
+ {RPC_NT_COMM_FAILURE, -EIO, "RPC_NT_COMM_FAILURE"},
+ {RPC_NT_UNSUPPORTED_AUTHN_LEVEL, -EIO,
+ "RPC_NT_UNSUPPORTED_AUTHN_LEVEL"},
+ {RPC_NT_NO_PRINC_NAME, -EIO, "RPC_NT_NO_PRINC_NAME"},
+ {RPC_NT_NOT_RPC_ERROR, -EIO, "RPC_NT_NOT_RPC_ERROR"},
+ {RPC_NT_SEC_PKG_ERROR, -EIO, "RPC_NT_SEC_PKG_ERROR"},
+ {RPC_NT_NOT_CANCELLED, -EIO, "RPC_NT_NOT_CANCELLED"},
+ {RPC_NT_INVALID_ASYNC_HANDLE, -EIO, "RPC_NT_INVALID_ASYNC_HANDLE"},
+ {RPC_NT_INVALID_ASYNC_CALL, -EIO, "RPC_NT_INVALID_ASYNC_CALL"},
+ {RPC_NT_PROXY_ACCESS_DENIED, -EACCES, "RPC_NT_PROXY_ACCESS_DENIED"},
+ {RPC_NT_NO_MORE_ENTRIES, -EIO, "RPC_NT_NO_MORE_ENTRIES"},
+ {RPC_NT_SS_CHAR_TRANS_OPEN_FAIL, -EIO,
+ "RPC_NT_SS_CHAR_TRANS_OPEN_FAIL"},
+ {RPC_NT_SS_CHAR_TRANS_SHORT_FILE, -EIO,
+ "RPC_NT_SS_CHAR_TRANS_SHORT_FILE"},
+ {RPC_NT_SS_IN_NULL_CONTEXT, -EIO, "RPC_NT_SS_IN_NULL_CONTEXT"},
+ {RPC_NT_SS_CONTEXT_MISMATCH, -EIO, "RPC_NT_SS_CONTEXT_MISMATCH"},
+ {RPC_NT_SS_CONTEXT_DAMAGED, -EIO, "RPC_NT_SS_CONTEXT_DAMAGED"},
+ {RPC_NT_SS_HANDLES_MISMATCH, -EIO, "RPC_NT_SS_HANDLES_MISMATCH"},
+ {RPC_NT_SS_CANNOT_GET_CALL_HANDLE, -EIO,
+ "RPC_NT_SS_CANNOT_GET_CALL_HANDLE"},
+ {RPC_NT_NULL_REF_POINTER, -EIO, "RPC_NT_NULL_REF_POINTER"},
+ {RPC_NT_ENUM_VALUE_OUT_OF_RANGE, -EIO,
+ "RPC_NT_ENUM_VALUE_OUT_OF_RANGE"},
+ {RPC_NT_BYTE_COUNT_TOO_SMALL, -EIO, "RPC_NT_BYTE_COUNT_TOO_SMALL"},
+ {RPC_NT_BAD_STUB_DATA, -EIO, "RPC_NT_BAD_STUB_DATA"},
+ {RPC_NT_INVALID_ES_ACTION, -EIO, "RPC_NT_INVALID_ES_ACTION"},
+ {RPC_NT_WRONG_ES_VERSION, -EIO, "RPC_NT_WRONG_ES_VERSION"},
+ {RPC_NT_WRONG_STUB_VERSION, -EIO, "RPC_NT_WRONG_STUB_VERSION"},
+ {RPC_NT_INVALID_PIPE_OBJECT, -EIO, "RPC_NT_INVALID_PIPE_OBJECT"},
+ {RPC_NT_INVALID_PIPE_OPERATION, -EIO, "RPC_NT_INVALID_PIPE_OPERATION"},
+ {RPC_NT_WRONG_PIPE_VERSION, -EIO, "RPC_NT_WRONG_PIPE_VERSION"},
+ {RPC_NT_PIPE_CLOSED, -EIO, "RPC_NT_PIPE_CLOSED"},
+ {RPC_NT_PIPE_DISCIPLINE_ERROR, -EIO, "RPC_NT_PIPE_DISCIPLINE_ERROR"},
+ {RPC_NT_PIPE_EMPTY, -EIO, "RPC_NT_PIPE_EMPTY"},
+ {STATUS_PNP_BAD_MPS_TABLE, -EIO, "STATUS_PNP_BAD_MPS_TABLE"},
+ {STATUS_PNP_TRANSLATION_FAILED, -EIO, "STATUS_PNP_TRANSLATION_FAILED"},
+ {STATUS_PNP_IRQ_TRANSLATION_FAILED, -EIO,
+ "STATUS_PNP_IRQ_TRANSLATION_FAILED"},
+ {STATUS_PNP_INVALID_ID, -EIO, "STATUS_PNP_INVALID_ID"},
+ {STATUS_IO_REISSUE_AS_CACHED, -EIO, "STATUS_IO_REISSUE_AS_CACHED"},
+ {STATUS_CTX_WINSTATION_NAME_INVALID, -EIO,
+ "STATUS_CTX_WINSTATION_NAME_INVALID"},
+ {STATUS_CTX_INVALID_PD, -EIO, "STATUS_CTX_INVALID_PD"},
+ {STATUS_CTX_PD_NOT_FOUND, -EIO, "STATUS_CTX_PD_NOT_FOUND"},
+ {STATUS_CTX_CLOSE_PENDING, -EIO, "STATUS_CTX_CLOSE_PENDING"},
+ {STATUS_CTX_NO_OUTBUF, -EIO, "STATUS_CTX_NO_OUTBUF"},
+ {STATUS_CTX_MODEM_INF_NOT_FOUND, -EIO,
+ "STATUS_CTX_MODEM_INF_NOT_FOUND"},
+ {STATUS_CTX_INVALID_MODEMNAME, -EIO, "STATUS_CTX_INVALID_MODEMNAME"},
+ {STATUS_CTX_RESPONSE_ERROR, -EIO, "STATUS_CTX_RESPONSE_ERROR"},
+ {STATUS_CTX_MODEM_RESPONSE_TIMEOUT, -ETIMEDOUT,
+ "STATUS_CTX_MODEM_RESPONSE_TIMEOUT"},
+ {STATUS_CTX_MODEM_RESPONSE_NO_CARRIER, -EIO,
+ "STATUS_CTX_MODEM_RESPONSE_NO_CARRIER"},
+ {STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE, -EIO,
+ "STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE"},
+ {STATUS_CTX_MODEM_RESPONSE_BUSY, -EBUSY,
+ "STATUS_CTX_MODEM_RESPONSE_BUSY"},
+ {STATUS_CTX_MODEM_RESPONSE_VOICE, -EIO,
+ "STATUS_CTX_MODEM_RESPONSE_VOICE"},
+ {STATUS_CTX_TD_ERROR, -EIO, "STATUS_CTX_TD_ERROR"},
+ {STATUS_CTX_LICENSE_CLIENT_INVALID, -EIO,
+ "STATUS_CTX_LICENSE_CLIENT_INVALID"},
+ {STATUS_CTX_LICENSE_NOT_AVAILABLE, -EIO,
+ "STATUS_CTX_LICENSE_NOT_AVAILABLE"},
+ {STATUS_CTX_LICENSE_EXPIRED, -EIO, "STATUS_CTX_LICENSE_EXPIRED"},
+ {STATUS_CTX_WINSTATION_NOT_FOUND, -EIO,
+ "STATUS_CTX_WINSTATION_NOT_FOUND"},
+ {STATUS_CTX_WINSTATION_NAME_COLLISION, -EIO,
+ "STATUS_CTX_WINSTATION_NAME_COLLISION"},
+ {STATUS_CTX_WINSTATION_BUSY, -EBUSY, "STATUS_CTX_WINSTATION_BUSY"},
+ {STATUS_CTX_BAD_VIDEO_MODE, -EIO, "STATUS_CTX_BAD_VIDEO_MODE"},
+ {STATUS_CTX_GRAPHICS_INVALID, -EIO, "STATUS_CTX_GRAPHICS_INVALID"},
+ {STATUS_CTX_NOT_CONSOLE, -EIO, "STATUS_CTX_NOT_CONSOLE"},
+ {STATUS_CTX_CLIENT_QUERY_TIMEOUT, -EIO,
+ "STATUS_CTX_CLIENT_QUERY_TIMEOUT"},
+ {STATUS_CTX_CONSOLE_DISCONNECT, -EIO, "STATUS_CTX_CONSOLE_DISCONNECT"},
+ {STATUS_CTX_CONSOLE_CONNECT, -EIO, "STATUS_CTX_CONSOLE_CONNECT"},
+ {STATUS_CTX_SHADOW_DENIED, -EIO, "STATUS_CTX_SHADOW_DENIED"},
+ {STATUS_CTX_WINSTATION_ACCESS_DENIED, -EACCES,
+ "STATUS_CTX_WINSTATION_ACCESS_DENIED"},
+ {STATUS_CTX_INVALID_WD, -EIO, "STATUS_CTX_INVALID_WD"},
+ {STATUS_CTX_WD_NOT_FOUND, -EIO, "STATUS_CTX_WD_NOT_FOUND"},
+ {STATUS_CTX_SHADOW_INVALID, -EIO, "STATUS_CTX_SHADOW_INVALID"},
+ {STATUS_CTX_SHADOW_DISABLED, -EIO, "STATUS_CTX_SHADOW_DISABLED"},
+ {STATUS_RDP_PROTOCOL_ERROR, -EIO, "STATUS_RDP_PROTOCOL_ERROR"},
+ {STATUS_CTX_CLIENT_LICENSE_NOT_SET, -EIO,
+ "STATUS_CTX_CLIENT_LICENSE_NOT_SET"},
+ {STATUS_CTX_CLIENT_LICENSE_IN_USE, -EIO,
+ "STATUS_CTX_CLIENT_LICENSE_IN_USE"},
+ {STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE, -EIO,
+ "STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE"},
+ {STATUS_CTX_SHADOW_NOT_RUNNING, -EIO, "STATUS_CTX_SHADOW_NOT_RUNNING"},
+ {STATUS_CTX_LOGON_DISABLED, -EIO, "STATUS_CTX_LOGON_DISABLED"},
+ {STATUS_CTX_SECURITY_LAYER_ERROR, -EIO,
+ "STATUS_CTX_SECURITY_LAYER_ERROR"},
+ {STATUS_TS_INCOMPATIBLE_SESSIONS, -EIO,
+ "STATUS_TS_INCOMPATIBLE_SESSIONS"},
+ {STATUS_MUI_FILE_NOT_FOUND, -EIO, "STATUS_MUI_FILE_NOT_FOUND"},
+ {STATUS_MUI_INVALID_FILE, -EIO, "STATUS_MUI_INVALID_FILE"},
+ {STATUS_MUI_INVALID_RC_CONFIG, -EIO, "STATUS_MUI_INVALID_RC_CONFIG"},
+ {STATUS_MUI_INVALID_LOCALE_NAME, -EIO,
+ "STATUS_MUI_INVALID_LOCALE_NAME"},
+ {STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME, -EIO,
+ "STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME"},
+ {STATUS_MUI_FILE_NOT_LOADED, -EIO, "STATUS_MUI_FILE_NOT_LOADED"},
+ {STATUS_RESOURCE_ENUM_USER_STOP, -EIO,
+ "STATUS_RESOURCE_ENUM_USER_STOP"},
+ {STATUS_CLUSTER_INVALID_NODE, -EIO, "STATUS_CLUSTER_INVALID_NODE"},
+ {STATUS_CLUSTER_NODE_EXISTS, -EIO, "STATUS_CLUSTER_NODE_EXISTS"},
+ {STATUS_CLUSTER_JOIN_IN_PROGRESS, -EIO,
+ "STATUS_CLUSTER_JOIN_IN_PROGRESS"},
+ {STATUS_CLUSTER_NODE_NOT_FOUND, -EIO, "STATUS_CLUSTER_NODE_NOT_FOUND"},
+ {STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND, -EIO,
+ "STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND"},
+ {STATUS_CLUSTER_NETWORK_EXISTS, -EIO, "STATUS_CLUSTER_NETWORK_EXISTS"},
+ {STATUS_CLUSTER_NETWORK_NOT_FOUND, -EIO,
+ "STATUS_CLUSTER_NETWORK_NOT_FOUND"},
+ {STATUS_CLUSTER_NETINTERFACE_EXISTS, -EIO,
+ "STATUS_CLUSTER_NETINTERFACE_EXISTS"},
+ {STATUS_CLUSTER_NETINTERFACE_NOT_FOUND, -EIO,
+ "STATUS_CLUSTER_NETINTERFACE_NOT_FOUND"},
+ {STATUS_CLUSTER_INVALID_REQUEST, -EIO,
+ "STATUS_CLUSTER_INVALID_REQUEST"},
+ {STATUS_CLUSTER_INVALID_NETWORK_PROVIDER, -EIO,
+ "STATUS_CLUSTER_INVALID_NETWORK_PROVIDER"},
+ {STATUS_CLUSTER_NODE_DOWN, -EIO, "STATUS_CLUSTER_NODE_DOWN"},
+ {STATUS_CLUSTER_NODE_UNREACHABLE, -EIO,
+ "STATUS_CLUSTER_NODE_UNREACHABLE"},
+ {STATUS_CLUSTER_NODE_NOT_MEMBER, -EIO,
+ "STATUS_CLUSTER_NODE_NOT_MEMBER"},
+ {STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS, -EIO,
+ "STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS"},
+ {STATUS_CLUSTER_INVALID_NETWORK, -EIO,
+ "STATUS_CLUSTER_INVALID_NETWORK"},
+ {STATUS_CLUSTER_NO_NET_ADAPTERS, -EIO,
+ "STATUS_CLUSTER_NO_NET_ADAPTERS"},
+ {STATUS_CLUSTER_NODE_UP, -EIO, "STATUS_CLUSTER_NODE_UP"},
+ {STATUS_CLUSTER_NODE_PAUSED, -EIO, "STATUS_CLUSTER_NODE_PAUSED"},
+ {STATUS_CLUSTER_NODE_NOT_PAUSED, -EIO,
+ "STATUS_CLUSTER_NODE_NOT_PAUSED"},
+ {STATUS_CLUSTER_NO_SECURITY_CONTEXT, -EIO,
+ "STATUS_CLUSTER_NO_SECURITY_CONTEXT"},
+ {STATUS_CLUSTER_NETWORK_NOT_INTERNAL, -EIO,
+ "STATUS_CLUSTER_NETWORK_NOT_INTERNAL"},
+ {STATUS_CLUSTER_POISONED, -EIO, "STATUS_CLUSTER_POISONED"},
+ {STATUS_ACPI_INVALID_OPCODE, -EIO, "STATUS_ACPI_INVALID_OPCODE"},
+ {STATUS_ACPI_STACK_OVERFLOW, -EIO, "STATUS_ACPI_STACK_OVERFLOW"},
+ {STATUS_ACPI_ASSERT_FAILED, -EIO, "STATUS_ACPI_ASSERT_FAILED"},
+ {STATUS_ACPI_INVALID_INDEX, -EIO, "STATUS_ACPI_INVALID_INDEX"},
+ {STATUS_ACPI_INVALID_ARGUMENT, -EIO, "STATUS_ACPI_INVALID_ARGUMENT"},
+ {STATUS_ACPI_FATAL, -EIO, "STATUS_ACPI_FATAL"},
+ {STATUS_ACPI_INVALID_SUPERNAME, -EIO, "STATUS_ACPI_INVALID_SUPERNAME"},
+ {STATUS_ACPI_INVALID_ARGTYPE, -EIO, "STATUS_ACPI_INVALID_ARGTYPE"},
+ {STATUS_ACPI_INVALID_OBJTYPE, -EIO, "STATUS_ACPI_INVALID_OBJTYPE"},
+ {STATUS_ACPI_INVALID_TARGETTYPE, -EIO,
+ "STATUS_ACPI_INVALID_TARGETTYPE"},
+ {STATUS_ACPI_INCORRECT_ARGUMENT_COUNT, -EIO,
+ "STATUS_ACPI_INCORRECT_ARGUMENT_COUNT"},
+ {STATUS_ACPI_ADDRESS_NOT_MAPPED, -EIO,
+ "STATUS_ACPI_ADDRESS_NOT_MAPPED"},
+ {STATUS_ACPI_INVALID_EVENTTYPE, -EIO, "STATUS_ACPI_INVALID_EVENTTYPE"},
+ {STATUS_ACPI_HANDLER_COLLISION, -EIO, "STATUS_ACPI_HANDLER_COLLISION"},
+ {STATUS_ACPI_INVALID_DATA, -EIO, "STATUS_ACPI_INVALID_DATA"},
+ {STATUS_ACPI_INVALID_REGION, -EIO, "STATUS_ACPI_INVALID_REGION"},
+ {STATUS_ACPI_INVALID_ACCESS_SIZE, -EIO,
+ "STATUS_ACPI_INVALID_ACCESS_SIZE"},
+ {STATUS_ACPI_ACQUIRE_GLOBAL_LOCK, -EIO,
+ "STATUS_ACPI_ACQUIRE_GLOBAL_LOCK"},
+ {STATUS_ACPI_ALREADY_INITIALIZED, -EIO,
+ "STATUS_ACPI_ALREADY_INITIALIZED"},
+ {STATUS_ACPI_NOT_INITIALIZED, -EIO, "STATUS_ACPI_NOT_INITIALIZED"},
+ {STATUS_ACPI_INVALID_MUTEX_LEVEL, -EIO,
+ "STATUS_ACPI_INVALID_MUTEX_LEVEL"},
+ {STATUS_ACPI_MUTEX_NOT_OWNED, -EIO, "STATUS_ACPI_MUTEX_NOT_OWNED"},
+ {STATUS_ACPI_MUTEX_NOT_OWNER, -EIO, "STATUS_ACPI_MUTEX_NOT_OWNER"},
+ {STATUS_ACPI_RS_ACCESS, -EIO, "STATUS_ACPI_RS_ACCESS"},
+ {STATUS_ACPI_INVALID_TABLE, -EIO, "STATUS_ACPI_INVALID_TABLE"},
+ {STATUS_ACPI_REG_HANDLER_FAILED, -EIO,
+ "STATUS_ACPI_REG_HANDLER_FAILED"},
+ {STATUS_ACPI_POWER_REQUEST_FAILED, -EIO,
+ "STATUS_ACPI_POWER_REQUEST_FAILED"},
+ {STATUS_SXS_SECTION_NOT_FOUND, -EIO, "STATUS_SXS_SECTION_NOT_FOUND"},
+ {STATUS_SXS_CANT_GEN_ACTCTX, -EIO, "STATUS_SXS_CANT_GEN_ACTCTX"},
+ {STATUS_SXS_INVALID_ACTCTXDATA_FORMAT, -EIO,
+ "STATUS_SXS_INVALID_ACTCTXDATA_FORMAT"},
+ {STATUS_SXS_ASSEMBLY_NOT_FOUND, -EIO, "STATUS_SXS_ASSEMBLY_NOT_FOUND"},
+ {STATUS_SXS_MANIFEST_FORMAT_ERROR, -EIO,
+ "STATUS_SXS_MANIFEST_FORMAT_ERROR"},
+ {STATUS_SXS_MANIFEST_PARSE_ERROR, -EIO,
+ "STATUS_SXS_MANIFEST_PARSE_ERROR"},
+ {STATUS_SXS_ACTIVATION_CONTEXT_DISABLED, -EIO,
+ "STATUS_SXS_ACTIVATION_CONTEXT_DISABLED"},
+ {STATUS_SXS_KEY_NOT_FOUND, -EIO, "STATUS_SXS_KEY_NOT_FOUND"},
+ {STATUS_SXS_VERSION_CONFLICT, -EIO, "STATUS_SXS_VERSION_CONFLICT"},
+ {STATUS_SXS_WRONG_SECTION_TYPE, -EIO, "STATUS_SXS_WRONG_SECTION_TYPE"},
+ {STATUS_SXS_THREAD_QUERIES_DISABLED, -EIO,
+ "STATUS_SXS_THREAD_QUERIES_DISABLED"},
+ {STATUS_SXS_ASSEMBLY_MISSING, -EIO, "STATUS_SXS_ASSEMBLY_MISSING"},
+ {STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET, -EIO,
+ "STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET"},
+ {STATUS_SXS_EARLY_DEACTIVATION, -EIO, "STATUS_SXS_EARLY_DEACTIVATION"},
+ {STATUS_SXS_INVALID_DEACTIVATION, -EIO,
+ "STATUS_SXS_INVALID_DEACTIVATION"},
+ {STATUS_SXS_MULTIPLE_DEACTIVATION, -EIO,
+ "STATUS_SXS_MULTIPLE_DEACTIVATION"},
+ {STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY, -EIO,
+ "STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY"},
+ {STATUS_SXS_PROCESS_TERMINATION_REQUESTED, -EIO,
+ "STATUS_SXS_PROCESS_TERMINATION_REQUESTED"},
+ {STATUS_SXS_CORRUPT_ACTIVATION_STACK, -EIO,
+ "STATUS_SXS_CORRUPT_ACTIVATION_STACK"},
+ {STATUS_SXS_CORRUPTION, -EIO, "STATUS_SXS_CORRUPTION"},
+ {STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE, -EIO,
+ "STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE"},
+ {STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME, -EIO,
+ "STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME"},
+ {STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE, -EIO,
+ "STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE"},
+ {STATUS_SXS_IDENTITY_PARSE_ERROR, -EIO,
+ "STATUS_SXS_IDENTITY_PARSE_ERROR"},
+ {STATUS_SXS_COMPONENT_STORE_CORRUPT, -EIO,
+ "STATUS_SXS_COMPONENT_STORE_CORRUPT"},
+ {STATUS_SXS_FILE_HASH_MISMATCH, -EIO, "STATUS_SXS_FILE_HASH_MISMATCH"},
+ {STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT, -EIO,
+ "STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT"},
+ {STATUS_SXS_IDENTITIES_DIFFERENT, -EIO,
+ "STATUS_SXS_IDENTITIES_DIFFERENT"},
+ {STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT, -EIO,
+ "STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT"},
+ {STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY, -EIO,
+ "STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY"},
+ {STATUS_ADVANCED_INSTALLER_FAILED, -EIO,
+ "STATUS_ADVANCED_INSTALLER_FAILED"},
+ {STATUS_XML_ENCODING_MISMATCH, -EIO, "STATUS_XML_ENCODING_MISMATCH"},
+ {STATUS_SXS_MANIFEST_TOO_BIG, -EIO, "STATUS_SXS_MANIFEST_TOO_BIG"},
+ {STATUS_SXS_SETTING_NOT_REGISTERED, -EIO,
+ "STATUS_SXS_SETTING_NOT_REGISTERED"},
+ {STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE, -EIO,
+ "STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE"},
+ {STATUS_SMI_PRIMITIVE_INSTALLER_FAILED, -EIO,
+ "STATUS_SMI_PRIMITIVE_INSTALLER_FAILED"},
+ {STATUS_GENERIC_COMMAND_FAILED, -EIO, "STATUS_GENERIC_COMMAND_FAILED"},
+ {STATUS_SXS_FILE_HASH_MISSING, -EIO, "STATUS_SXS_FILE_HASH_MISSING"},
+ {STATUS_TRANSACTIONAL_CONFLICT, -EIO, "STATUS_TRANSACTIONAL_CONFLICT"},
+ {STATUS_INVALID_TRANSACTION, -EIO, "STATUS_INVALID_TRANSACTION"},
+ {STATUS_TRANSACTION_NOT_ACTIVE, -EIO, "STATUS_TRANSACTION_NOT_ACTIVE"},
+ {STATUS_TM_INITIALIZATION_FAILED, -EIO,
+ "STATUS_TM_INITIALIZATION_FAILED"},
+ {STATUS_RM_NOT_ACTIVE, -EIO, "STATUS_RM_NOT_ACTIVE"},
+ {STATUS_RM_METADATA_CORRUPT, -EIO, "STATUS_RM_METADATA_CORRUPT"},
+ {STATUS_TRANSACTION_NOT_JOINED, -EIO, "STATUS_TRANSACTION_NOT_JOINED"},
+ {STATUS_DIRECTORY_NOT_RM, -EIO, "STATUS_DIRECTORY_NOT_RM"},
+ {STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE, -EIO,
+ "STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE"},
+ {STATUS_LOG_RESIZE_INVALID_SIZE, -EIO,
+ "STATUS_LOG_RESIZE_INVALID_SIZE"},
+ {STATUS_REMOTE_FILE_VERSION_MISMATCH, -EIO,
+ "STATUS_REMOTE_FILE_VERSION_MISMATCH"},
+ {STATUS_CRM_PROTOCOL_ALREADY_EXISTS, -EIO,
+ "STATUS_CRM_PROTOCOL_ALREADY_EXISTS"},
+ {STATUS_TRANSACTION_PROPAGATION_FAILED, -EIO,
+ "STATUS_TRANSACTION_PROPAGATION_FAILED"},
+ {STATUS_CRM_PROTOCOL_NOT_FOUND, -EIO, "STATUS_CRM_PROTOCOL_NOT_FOUND"},
+ {STATUS_TRANSACTION_SUPERIOR_EXISTS, -EIO,
+ "STATUS_TRANSACTION_SUPERIOR_EXISTS"},
+ {STATUS_TRANSACTION_REQUEST_NOT_VALID, -EIO,
+ "STATUS_TRANSACTION_REQUEST_NOT_VALID"},
+ {STATUS_TRANSACTION_NOT_REQUESTED, -EIO,
+ "STATUS_TRANSACTION_NOT_REQUESTED"},
+ {STATUS_TRANSACTION_ALREADY_ABORTED, -EIO,
+ "STATUS_TRANSACTION_ALREADY_ABORTED"},
+ {STATUS_TRANSACTION_ALREADY_COMMITTED, -EIO,
+ "STATUS_TRANSACTION_ALREADY_COMMITTED"},
+ {STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER, -EIO,
+ "STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER"},
+ {STATUS_CURRENT_TRANSACTION_NOT_VALID, -EIO,
+ "STATUS_CURRENT_TRANSACTION_NOT_VALID"},
+ {STATUS_LOG_GROWTH_FAILED, -EIO, "STATUS_LOG_GROWTH_FAILED"},
+ {STATUS_OBJECT_NO_LONGER_EXISTS, -EIO,
+ "STATUS_OBJECT_NO_LONGER_EXISTS"},
+ {STATUS_STREAM_MINIVERSION_NOT_FOUND, -EIO,
+ "STATUS_STREAM_MINIVERSION_NOT_FOUND"},
+ {STATUS_STREAM_MINIVERSION_NOT_VALID, -EIO,
+ "STATUS_STREAM_MINIVERSION_NOT_VALID"},
+ {STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION, -EIO,
+ "STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION"},
+ {STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT, -EIO,
+ "STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT"},
+ {STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS, -EIO,
+ "STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS"},
+ {STATUS_HANDLE_NO_LONGER_VALID, -EIO, "STATUS_HANDLE_NO_LONGER_VALID"},
+ {STATUS_LOG_CORRUPTION_DETECTED, -EIO,
+ "STATUS_LOG_CORRUPTION_DETECTED"},
+ {STATUS_RM_DISCONNECTED, -EIO, "STATUS_RM_DISCONNECTED"},
+ {STATUS_ENLISTMENT_NOT_SUPERIOR, -EIO,
+ "STATUS_ENLISTMENT_NOT_SUPERIOR"},
+ {STATUS_FILE_IDENTITY_NOT_PERSISTENT, -EIO,
+ "STATUS_FILE_IDENTITY_NOT_PERSISTENT"},
+ {STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY, -EIO,
+ "STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY"},
+ {STATUS_CANT_CROSS_RM_BOUNDARY, -EIO, "STATUS_CANT_CROSS_RM_BOUNDARY"},
+ {STATUS_TXF_DIR_NOT_EMPTY, -EIO, "STATUS_TXF_DIR_NOT_EMPTY"},
+ {STATUS_INDOUBT_TRANSACTIONS_EXIST, -EIO,
+ "STATUS_INDOUBT_TRANSACTIONS_EXIST"},
+ {STATUS_TM_VOLATILE, -EIO, "STATUS_TM_VOLATILE"},
+ {STATUS_ROLLBACK_TIMER_EXPIRED, -EIO, "STATUS_ROLLBACK_TIMER_EXPIRED"},
+ {STATUS_TXF_ATTRIBUTE_CORRUPT, -EIO, "STATUS_TXF_ATTRIBUTE_CORRUPT"},
+ {STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION, -EIO,
+ "STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION"},
+ {STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED, -EIO,
+ "STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED"},
+ {STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE, -EIO,
+ "STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE"},
+ {STATUS_TRANSACTION_REQUIRED_PROMOTION, -EIO,
+ "STATUS_TRANSACTION_REQUIRED_PROMOTION"},
+ {STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION, -EIO,
+ "STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION"},
+ {STATUS_TRANSACTIONS_NOT_FROZEN, -EIO,
+ "STATUS_TRANSACTIONS_NOT_FROZEN"},
+ {STATUS_TRANSACTION_FREEZE_IN_PROGRESS, -EIO,
+ "STATUS_TRANSACTION_FREEZE_IN_PROGRESS"},
+ {STATUS_NOT_SNAPSHOT_VOLUME, -EIO, "STATUS_NOT_SNAPSHOT_VOLUME"},
+ {STATUS_NO_SAVEPOINT_WITH_OPEN_FILES, -EIO,
+ "STATUS_NO_SAVEPOINT_WITH_OPEN_FILES"},
+ {STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION, -EIO,
+ "STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION"},
+ {STATUS_TM_IDENTITY_MISMATCH, -EIO, "STATUS_TM_IDENTITY_MISMATCH"},
+ {STATUS_FLOATED_SECTION, -EIO, "STATUS_FLOATED_SECTION"},
+ {STATUS_CANNOT_ACCEPT_TRANSACTED_WORK, -EIO,
+ "STATUS_CANNOT_ACCEPT_TRANSACTED_WORK"},
+ {STATUS_CANNOT_ABORT_TRANSACTIONS, -EIO,
+ "STATUS_CANNOT_ABORT_TRANSACTIONS"},
+ {STATUS_TRANSACTION_NOT_FOUND, -EIO, "STATUS_TRANSACTION_NOT_FOUND"},
+ {STATUS_RESOURCEMANAGER_NOT_FOUND, -EIO,
+ "STATUS_RESOURCEMANAGER_NOT_FOUND"},
+ {STATUS_ENLISTMENT_NOT_FOUND, -EIO, "STATUS_ENLISTMENT_NOT_FOUND"},
+ {STATUS_TRANSACTIONMANAGER_NOT_FOUND, -EIO,
+ "STATUS_TRANSACTIONMANAGER_NOT_FOUND"},
+ {STATUS_TRANSACTIONMANAGER_NOT_ONLINE, -EIO,
+ "STATUS_TRANSACTIONMANAGER_NOT_ONLINE"},
+ {STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION, -EIO,
+ "STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION"},
+ {STATUS_TRANSACTION_NOT_ROOT, -EIO, "STATUS_TRANSACTION_NOT_ROOT"},
+ {STATUS_TRANSACTION_OBJECT_EXPIRED, -EIO,
+ "STATUS_TRANSACTION_OBJECT_EXPIRED"},
+ {STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION, -EIO,
+ "STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION"},
+ {STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED, -EIO,
+ "STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED"},
+ {STATUS_TRANSACTION_RECORD_TOO_LONG, -EIO,
+ "STATUS_TRANSACTION_RECORD_TOO_LONG"},
+ {STATUS_NO_LINK_TRACKING_IN_TRANSACTION, -EIO,
+ "STATUS_NO_LINK_TRACKING_IN_TRANSACTION"},
+ {STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION, -EOPNOTSUPP,
+ "STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION"},
+ {STATUS_TRANSACTION_INTEGRITY_VIOLATED, -EIO,
+ "STATUS_TRANSACTION_INTEGRITY_VIOLATED"},
+ {STATUS_LOG_SECTOR_INVALID, -EIO, "STATUS_LOG_SECTOR_INVALID"},
+ {STATUS_LOG_SECTOR_PARITY_INVALID, -EIO,
+ "STATUS_LOG_SECTOR_PARITY_INVALID"},
+ {STATUS_LOG_SECTOR_REMAPPED, -EIO, "STATUS_LOG_SECTOR_REMAPPED"},
+ {STATUS_LOG_BLOCK_INCOMPLETE, -EIO, "STATUS_LOG_BLOCK_INCOMPLETE"},
+ {STATUS_LOG_INVALID_RANGE, -EIO, "STATUS_LOG_INVALID_RANGE"},
+ {STATUS_LOG_BLOCKS_EXHAUSTED, -EIO, "STATUS_LOG_BLOCKS_EXHAUSTED"},
+ {STATUS_LOG_READ_CONTEXT_INVALID, -EIO,
+ "STATUS_LOG_READ_CONTEXT_INVALID"},
+ {STATUS_LOG_RESTART_INVALID, -EIO, "STATUS_LOG_RESTART_INVALID"},
+ {STATUS_LOG_BLOCK_VERSION, -EIO, "STATUS_LOG_BLOCK_VERSION"},
+ {STATUS_LOG_BLOCK_INVALID, -EIO, "STATUS_LOG_BLOCK_INVALID"},
+ {STATUS_LOG_READ_MODE_INVALID, -EIO, "STATUS_LOG_READ_MODE_INVALID"},
+ {STATUS_LOG_METADATA_CORRUPT, -EIO, "STATUS_LOG_METADATA_CORRUPT"},
+ {STATUS_LOG_METADATA_INVALID, -EIO, "STATUS_LOG_METADATA_INVALID"},
+ {STATUS_LOG_METADATA_INCONSISTENT, -EIO,
+ "STATUS_LOG_METADATA_INCONSISTENT"},
+ {STATUS_LOG_RESERVATION_INVALID, -EIO,
+ "STATUS_LOG_RESERVATION_INVALID"},
+ {STATUS_LOG_CANT_DELETE, -EIO, "STATUS_LOG_CANT_DELETE"},
+ {STATUS_LOG_CONTAINER_LIMIT_EXCEEDED, -EIO,
+ "STATUS_LOG_CONTAINER_LIMIT_EXCEEDED"},
+ {STATUS_LOG_START_OF_LOG, -EIO, "STATUS_LOG_START_OF_LOG"},
+ {STATUS_LOG_POLICY_ALREADY_INSTALLED, -EIO,
+ "STATUS_LOG_POLICY_ALREADY_INSTALLED"},
+ {STATUS_LOG_POLICY_NOT_INSTALLED, -EIO,
+ "STATUS_LOG_POLICY_NOT_INSTALLED"},
+ {STATUS_LOG_POLICY_INVALID, -EIO, "STATUS_LOG_POLICY_INVALID"},
+ {STATUS_LOG_POLICY_CONFLICT, -EIO, "STATUS_LOG_POLICY_CONFLICT"},
+ {STATUS_LOG_PINNED_ARCHIVE_TAIL, -EIO,
+ "STATUS_LOG_PINNED_ARCHIVE_TAIL"},
+ {STATUS_LOG_RECORD_NONEXISTENT, -EIO, "STATUS_LOG_RECORD_NONEXISTENT"},
+ {STATUS_LOG_RECORDS_RESERVED_INVALID, -EIO,
+ "STATUS_LOG_RECORDS_RESERVED_INVALID"},
+ {STATUS_LOG_SPACE_RESERVED_INVALID, -EIO,
+ "STATUS_LOG_SPACE_RESERVED_INVALID"},
+ {STATUS_LOG_TAIL_INVALID, -EIO, "STATUS_LOG_TAIL_INVALID"},
+ {STATUS_LOG_FULL, -EIO, "STATUS_LOG_FULL"},
+ {STATUS_LOG_MULTIPLEXED, -EIO, "STATUS_LOG_MULTIPLEXED"},
+ {STATUS_LOG_DEDICATED, -EIO, "STATUS_LOG_DEDICATED"},
+ {STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS, -EIO,
+ "STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS"},
+ {STATUS_LOG_ARCHIVE_IN_PROGRESS, -EIO,
+ "STATUS_LOG_ARCHIVE_IN_PROGRESS"},
+ {STATUS_LOG_EPHEMERAL, -EIO, "STATUS_LOG_EPHEMERAL"},
+ {STATUS_LOG_NOT_ENOUGH_CONTAINERS, -EIO,
+ "STATUS_LOG_NOT_ENOUGH_CONTAINERS"},
+ {STATUS_LOG_CLIENT_ALREADY_REGISTERED, -EIO,
+ "STATUS_LOG_CLIENT_ALREADY_REGISTERED"},
+ {STATUS_LOG_CLIENT_NOT_REGISTERED, -EIO,
+ "STATUS_LOG_CLIENT_NOT_REGISTERED"},
+ {STATUS_LOG_FULL_HANDLER_IN_PROGRESS, -EIO,
+ "STATUS_LOG_FULL_HANDLER_IN_PROGRESS"},
+ {STATUS_LOG_CONTAINER_READ_FAILED, -EIO,
+ "STATUS_LOG_CONTAINER_READ_FAILED"},
+ {STATUS_LOG_CONTAINER_WRITE_FAILED, -EIO,
+ "STATUS_LOG_CONTAINER_WRITE_FAILED"},
+ {STATUS_LOG_CONTAINER_OPEN_FAILED, -EIO,
+ "STATUS_LOG_CONTAINER_OPEN_FAILED"},
+ {STATUS_LOG_CONTAINER_STATE_INVALID, -EIO,
+ "STATUS_LOG_CONTAINER_STATE_INVALID"},
+ {STATUS_LOG_STATE_INVALID, -EIO, "STATUS_LOG_STATE_INVALID"},
+ {STATUS_LOG_PINNED, -EIO, "STATUS_LOG_PINNED"},
+ {STATUS_LOG_METADATA_FLUSH_FAILED, -EIO,
+ "STATUS_LOG_METADATA_FLUSH_FAILED"},
+ {STATUS_LOG_INCONSISTENT_SECURITY, -EIO,
+ "STATUS_LOG_INCONSISTENT_SECURITY"},
+ {STATUS_LOG_APPENDED_FLUSH_FAILED, -EIO,
+ "STATUS_LOG_APPENDED_FLUSH_FAILED"},
+ {STATUS_LOG_PINNED_RESERVATION, -EIO, "STATUS_LOG_PINNED_RESERVATION"},
+ {STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD, -EIO,
+ "STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD"},
+ {STATUS_FLT_NO_HANDLER_DEFINED, -EIO, "STATUS_FLT_NO_HANDLER_DEFINED"},
+ {STATUS_FLT_CONTEXT_ALREADY_DEFINED, -EIO,
+ "STATUS_FLT_CONTEXT_ALREADY_DEFINED"},
+ {STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST, -EIO,
+ "STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST"},
+ {STATUS_FLT_DISALLOW_FAST_IO, -EIO, "STATUS_FLT_DISALLOW_FAST_IO"},
+ {STATUS_FLT_INVALID_NAME_REQUEST, -EIO,
+ "STATUS_FLT_INVALID_NAME_REQUEST"},
+ {STATUS_FLT_NOT_SAFE_TO_POST_OPERATION, -EIO,
+ "STATUS_FLT_NOT_SAFE_TO_POST_OPERATION"},
+ {STATUS_FLT_NOT_INITIALIZED, -EIO, "STATUS_FLT_NOT_INITIALIZED"},
+ {STATUS_FLT_FILTER_NOT_READY, -EIO, "STATUS_FLT_FILTER_NOT_READY"},
+ {STATUS_FLT_POST_OPERATION_CLEANUP, -EIO,
+ "STATUS_FLT_POST_OPERATION_CLEANUP"},
+ {STATUS_FLT_INTERNAL_ERROR, -EIO, "STATUS_FLT_INTERNAL_ERROR"},
+ {STATUS_FLT_DELETING_OBJECT, -EIO, "STATUS_FLT_DELETING_OBJECT"},
+ {STATUS_FLT_MUST_BE_NONPAGED_POOL, -EIO,
+ "STATUS_FLT_MUST_BE_NONPAGED_POOL"},
+ {STATUS_FLT_DUPLICATE_ENTRY, -EIO, "STATUS_FLT_DUPLICATE_ENTRY"},
+ {STATUS_FLT_CBDQ_DISABLED, -EIO, "STATUS_FLT_CBDQ_DISABLED"},
+ {STATUS_FLT_DO_NOT_ATTACH, -EIO, "STATUS_FLT_DO_NOT_ATTACH"},
+ {STATUS_FLT_DO_NOT_DETACH, -EIO, "STATUS_FLT_DO_NOT_DETACH"},
+ {STATUS_FLT_INSTANCE_ALTITUDE_COLLISION, -EIO,
+ "STATUS_FLT_INSTANCE_ALTITUDE_COLLISION"},
+ {STATUS_FLT_INSTANCE_NAME_COLLISION, -EIO,
+ "STATUS_FLT_INSTANCE_NAME_COLLISION"},
+ {STATUS_FLT_FILTER_NOT_FOUND, -EIO, "STATUS_FLT_FILTER_NOT_FOUND"},
+ {STATUS_FLT_VOLUME_NOT_FOUND, -EIO, "STATUS_FLT_VOLUME_NOT_FOUND"},
+ {STATUS_FLT_INSTANCE_NOT_FOUND, -EIO, "STATUS_FLT_INSTANCE_NOT_FOUND"},
+ {STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND, -EIO,
+ "STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND"},
+ {STATUS_FLT_INVALID_CONTEXT_REGISTRATION, -EIO,
+ "STATUS_FLT_INVALID_CONTEXT_REGISTRATION"},
+ {STATUS_FLT_NAME_CACHE_MISS, -EIO, "STATUS_FLT_NAME_CACHE_MISS"},
+ {STATUS_FLT_NO_DEVICE_OBJECT, -EIO, "STATUS_FLT_NO_DEVICE_OBJECT"},
+ {STATUS_FLT_VOLUME_ALREADY_MOUNTED, -EIO,
+ "STATUS_FLT_VOLUME_ALREADY_MOUNTED"},
+ {STATUS_FLT_ALREADY_ENLISTED, -EIO, "STATUS_FLT_ALREADY_ENLISTED"},
+ {STATUS_FLT_CONTEXT_ALREADY_LINKED, -EIO,
+ "STATUS_FLT_CONTEXT_ALREADY_LINKED"},
+ {STATUS_FLT_NO_WAITER_FOR_REPLY, -EIO,
+ "STATUS_FLT_NO_WAITER_FOR_REPLY"},
+ {STATUS_MONITOR_NO_DESCRIPTOR, -EIO, "STATUS_MONITOR_NO_DESCRIPTOR"},
+ {STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT, -EIO,
+ "STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT"},
+ {STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM, -EIO,
+ "STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM"},
+ {STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK, -EIO,
+ "STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK"},
+ {STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED, -EIO,
+ "STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED"},
+ {STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK, -EIO,
+ "STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK"},
+ {STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK, -EIO,
+ "STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK"},
+ {STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA, -EIO,
+ "STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA"},
+ {STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK, -EIO,
+ "STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK"},
+ {STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER, -EIO,
+ "STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER"},
+ {STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER, -EIO,
+ "STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER"},
+ {STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER, -EIO,
+ "STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER"},
+ {STATUS_GRAPHICS_ADAPTER_WAS_RESET, -EIO,
+ "STATUS_GRAPHICS_ADAPTER_WAS_RESET"},
+ {STATUS_GRAPHICS_INVALID_DRIVER_MODEL, -EIO,
+ "STATUS_GRAPHICS_INVALID_DRIVER_MODEL"},
+ {STATUS_GRAPHICS_PRESENT_MODE_CHANGED, -EIO,
+ "STATUS_GRAPHICS_PRESENT_MODE_CHANGED"},
+ {STATUS_GRAPHICS_PRESENT_OCCLUDED, -EIO,
+ "STATUS_GRAPHICS_PRESENT_OCCLUDED"},
+ {STATUS_GRAPHICS_PRESENT_DENIED, -EIO,
+ "STATUS_GRAPHICS_PRESENT_DENIED"},
+ {STATUS_GRAPHICS_CANNOTCOLORCONVERT, -EIO,
+ "STATUS_GRAPHICS_CANNOTCOLORCONVERT"},
+ {STATUS_GRAPHICS_NO_VIDEO_MEMORY, -EIO,
+ "STATUS_GRAPHICS_NO_VIDEO_MEMORY"},
+ {STATUS_GRAPHICS_CANT_LOCK_MEMORY, -EIO,
+ "STATUS_GRAPHICS_CANT_LOCK_MEMORY"},
+ {STATUS_GRAPHICS_ALLOCATION_BUSY, -EBUSY,
+ "STATUS_GRAPHICS_ALLOCATION_BUSY"},
+ {STATUS_GRAPHICS_TOO_MANY_REFERENCES, -EIO,
+ "STATUS_GRAPHICS_TOO_MANY_REFERENCES"},
+ {STATUS_GRAPHICS_TRY_AGAIN_LATER, -EIO,
+ "STATUS_GRAPHICS_TRY_AGAIN_LATER"},
+ {STATUS_GRAPHICS_TRY_AGAIN_NOW, -EIO, "STATUS_GRAPHICS_TRY_AGAIN_NOW"},
+ {STATUS_GRAPHICS_ALLOCATION_INVALID, -EIO,
+ "STATUS_GRAPHICS_ALLOCATION_INVALID"},
+ {STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE, -EIO,
+ "STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE"},
+ {STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED, -EIO,
+ "STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED"},
+ {STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION, -EIO,
+ "STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION"},
+ {STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE, -EIO,
+ "STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE"},
+ {STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION, -EIO,
+ "STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION"},
+ {STATUS_GRAPHICS_ALLOCATION_CLOSED, -EIO,
+ "STATUS_GRAPHICS_ALLOCATION_CLOSED"},
+ {STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE, -EIO,
+ "STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE"},
+ {STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE, -EIO,
+ "STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE"},
+ {STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE, -EIO,
+ "STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE"},
+ {STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST, -EIO,
+ "STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST"},
+ {STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE, -EIO,
+ "STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE"},
+ {STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY, -EIO,
+ "STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY"},
+ {STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_INVALID_VIDPN, -EIO, "STATUS_GRAPHICS_INVALID_VIDPN"},
+ {STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE, -EIO,
+ "STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE"},
+ {STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET, -EIO,
+ "STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET"},
+ {STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET, -EIO,
+ "STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET"},
+ {STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET, -EIO,
+ "STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET"},
+ {STATUS_GRAPHICS_INVALID_FREQUENCY, -EIO,
+ "STATUS_GRAPHICS_INVALID_FREQUENCY"},
+ {STATUS_GRAPHICS_INVALID_ACTIVE_REGION, -EIO,
+ "STATUS_GRAPHICS_INVALID_ACTIVE_REGION"},
+ {STATUS_GRAPHICS_INVALID_TOTAL_REGION, -EIO,
+ "STATUS_GRAPHICS_INVALID_TOTAL_REGION"},
+ {STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE, -EIO,
+ "STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE"},
+ {STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE, -EIO,
+ "STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE"},
+ {STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET, -EIO,
+ "STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET"},
+ {STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY, -EIO,
+ "STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY"},
+ {STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET, -EIO,
+ "STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET"},
+ {STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET, -EIO,
+ "STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET"},
+ {STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET, -EIO,
+ "STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET"},
+ {STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET, -EIO,
+ "STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET"},
+ {STATUS_GRAPHICS_TARGET_ALREADY_IN_SET, -EIO,
+ "STATUS_GRAPHICS_TARGET_ALREADY_IN_SET"},
+ {STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH, -EIO,
+ "STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH"},
+ {STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY, -EIO,
+ "STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY"},
+ {STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET, -EIO,
+ "STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET"},
+ {STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE, -EIO,
+ "STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE"},
+ {STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET, -EIO,
+ "STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET"},
+ {STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET, -EIO,
+ "STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET"},
+ {STATUS_GRAPHICS_STALE_MODESET, -EIO, "STATUS_GRAPHICS_STALE_MODESET"},
+ {STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET, -EIO,
+ "STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET"},
+ {STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE, -EIO,
+ "STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE"},
+ {STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN, -EIO,
+ "STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN"},
+ {STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE, -EIO,
+ "STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE"},
+ {STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION, -EIO,
+ "STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION"},
+ {STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES, -EIO,
+ "STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES"},
+ {STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY, -EIO,
+ "STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY"},
+ {STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE, -EIO,
+ "STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE"},
+ {STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET, -EIO,
+ "STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET"},
+ {STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET, -EIO,
+ "STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET"},
+ {STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR, -EIO,
+ "STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR"},
+ {STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET, -EIO,
+ "STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET"},
+ {STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET, -EIO,
+ "STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET"},
+ {STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE, -EIO,
+ "STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE"},
+ {STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE, -EIO,
+ "STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE"},
+ {STATUS_GRAPHICS_RESOURCES_NOT_RELATED, -EIO,
+ "STATUS_GRAPHICS_RESOURCES_NOT_RELATED"},
+ {STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE, -EIO,
+ "STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE"},
+ {STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE, -EIO,
+ "STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE"},
+ {STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET, -EIO,
+ "STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET"},
+ {STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER, -EIO,
+ "STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER"},
+ {STATUS_GRAPHICS_NO_VIDPNMGR, -EIO, "STATUS_GRAPHICS_NO_VIDPNMGR"},
+ {STATUS_GRAPHICS_NO_ACTIVE_VIDPN, -EIO,
+ "STATUS_GRAPHICS_NO_ACTIVE_VIDPN"},
+ {STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY, -EIO,
+ "STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY"},
+ {STATUS_GRAPHICS_MONITOR_NOT_CONNECTED, -EIO,
+ "STATUS_GRAPHICS_MONITOR_NOT_CONNECTED"},
+ {STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY, -EIO,
+ "STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY"},
+ {STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE, -EIO,
+ "STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE"},
+ {STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE, -EIO,
+ "STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE"},
+ {STATUS_GRAPHICS_INVALID_STRIDE, -EIO,
+ "STATUS_GRAPHICS_INVALID_STRIDE"},
+ {STATUS_GRAPHICS_INVALID_PIXELFORMAT, -EIO,
+ "STATUS_GRAPHICS_INVALID_PIXELFORMAT"},
+ {STATUS_GRAPHICS_INVALID_COLORBASIS, -EIO,
+ "STATUS_GRAPHICS_INVALID_COLORBASIS"},
+ {STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE, -EIO,
+ "STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE"},
+ {STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY, -EIO,
+ "STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY"},
+ {STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT, -EIO,
+ "STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT"},
+ {STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE, -EIO,
+ "STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE"},
+ {STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN, -EIO,
+ "STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN"},
+ {STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL, -EIO,
+ "STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL"},
+ {STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION, -EIO,
+ "STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION"},
+ {STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED,
+ -EIO,
+ "STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_INVALID_GAMMA_RAMP, -EIO,
+ "STATUS_GRAPHICS_INVALID_GAMMA_RAMP"},
+ {STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_MODE_NOT_IN_MODESET, -EIO,
+ "STATUS_GRAPHICS_MODE_NOT_IN_MODESET"},
+ {STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON, -EIO,
+ "STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON"},
+ {STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE, -EIO,
+ "STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE"},
+ {STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE, -EIO,
+ "STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE"},
+ {STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS, -EIO,
+ "STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS"},
+ {STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING, -EIO,
+ "STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING"},
+ {STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED, -EIO,
+ "STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED"},
+ {STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS, -EIO,
+ "STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS"},
+ {STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT, -EIO,
+ "STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT"},
+ {STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM, -EIO,
+ "STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM"},
+ {STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN, -EIO,
+ "STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN"},
+ {STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT, -EIO,
+ "STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT"},
+ {STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED, -EIO,
+ "STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED"},
+ {STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION, -EIO,
+ "STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION"},
+ {STATUS_GRAPHICS_INVALID_CLIENT_TYPE, -EIO,
+ "STATUS_GRAPHICS_INVALID_CLIENT_TYPE"},
+ {STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET, -EIO,
+ "STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET"},
+ {STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED, -EIO,
+ "STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED"},
+ {STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER, -EIO,
+ "STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER"},
+ {STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED, -EIO,
+ "STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED"},
+ {STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED, -EIO,
+ "STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED"},
+ {STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY, -EIO,
+ "STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY"},
+ {STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED, -EIO,
+ "STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED"},
+ {STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON, -EIO,
+ "STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON"},
+ {STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE, -EIO,
+ "STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE"},
+ {STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER, -EIO,
+ "STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER"},
+ {STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED, -EIO,
+ "STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED"},
+ {STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS,
+ -EIO,
+ "STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS"},
+ {STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST, -EIO,
+ "STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST"},
+ {STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR, -EIO,
+ "STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR"},
+ {STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS, -EIO,
+ "STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS"},
+ {STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST, -EIO,
+ "STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST"},
+ {STATUS_GRAPHICS_OPM_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_OPM_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_COPP_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_COPP_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_UAB_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_UAB_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS, -EIO,
+ "STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS"},
+ {STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL, -EIO,
+ "STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL"},
+ {STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST, -EIO,
+ "STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST"},
+ {STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME, -EIO,
+ "STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME"},
+ {STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP, -EIO,
+ "STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP"},
+ {STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_OPM_INVALID_POINTER, -EIO,
+ "STATUS_GRAPHICS_OPM_INVALID_POINTER"},
+ {STATUS_GRAPHICS_OPM_INTERNAL_ERROR, -EIO,
+ "STATUS_GRAPHICS_OPM_INTERNAL_ERROR"},
+ {STATUS_GRAPHICS_OPM_INVALID_HANDLE, -EIO,
+ "STATUS_GRAPHICS_OPM_INVALID_HANDLE"},
+ {STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE, -EIO,
+ "STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE"},
+ {STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH, -EIO,
+ "STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH"},
+ {STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED, -EIO,
+ "STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED"},
+ {STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED, -EIO,
+ "STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED"},
+ {STATUS_GRAPHICS_PVP_HFS_FAILED, -EIO,
+ "STATUS_GRAPHICS_PVP_HFS_FAILED"},
+ {STATUS_GRAPHICS_OPM_INVALID_SRM, -EIO,
+ "STATUS_GRAPHICS_OPM_INVALID_SRM"},
+ {STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP, -EIO,
+ "STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP"},
+ {STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP, -EIO,
+ "STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP"},
+ {STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA, -EIO,
+ "STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA"},
+ {STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET, -EIO,
+ "STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET"},
+ {STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH, -EIO,
+ "STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH"},
+ {STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE, -EIO,
+ "STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE"},
+ {STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS, -EIO,
+ "STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS"},
+ {STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS, -EIO,
+ "STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS"},
+ {STATUS_GRAPHICS_I2C_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_I2C_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST, -EIO,
+ "STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST"},
+ {STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA, -EIO,
+ "STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA"},
+ {STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA, -EIO,
+ "STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA"},
+ {STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_DDCCI_INVALID_DATA, -EIO,
+ "STATUS_GRAPHICS_DDCCI_INVALID_DATA"},
+ {STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE,
+ -EIO,
+ "STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE"},
+ {STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING, -EIO,
+ "STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING"},
+ {STATUS_GRAPHICS_MCA_INTERNAL_ERROR, -EIO,
+ "STATUS_GRAPHICS_MCA_INTERNAL_ERROR"},
+ {STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND, -EIO,
+ "STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND"},
+ {STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH, -EIO,
+ "STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH"},
+ {STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM, -EIO,
+ "STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM"},
+ {STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE, -EIO,
+ "STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE"},
+ {STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS, -EIO,
+ "STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS"},
+ {STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED"},
+ {STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME, -EIO,
+ "STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME"},
+ {STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP, -EIO,
+ "STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP"},
+ {STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED, -EIO,
+ "STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED"},
+ {STATUS_GRAPHICS_INVALID_POINTER, -EIO,
+ "STATUS_GRAPHICS_INVALID_POINTER"},
+ {STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE, -EIO,
+ "STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE"},
+ {STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL, -EIO,
+ "STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL"},
+ {STATUS_GRAPHICS_INTERNAL_ERROR, -EIO,
+ "STATUS_GRAPHICS_INTERNAL_ERROR"},
+ {STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS, -EIO,
+ "STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS"},
+ {STATUS_FVE_LOCKED_VOLUME, -EIO, "STATUS_FVE_LOCKED_VOLUME"},
+ {STATUS_FVE_NOT_ENCRYPTED, -EIO, "STATUS_FVE_NOT_ENCRYPTED"},
+ {STATUS_FVE_BAD_INFORMATION, -EIO, "STATUS_FVE_BAD_INFORMATION"},
+ {STATUS_FVE_TOO_SMALL, -EIO, "STATUS_FVE_TOO_SMALL"},
+ {STATUS_FVE_FAILED_WRONG_FS, -EIO, "STATUS_FVE_FAILED_WRONG_FS"},
+ {STATUS_FVE_FAILED_BAD_FS, -EIO, "STATUS_FVE_FAILED_BAD_FS"},
+ {STATUS_FVE_FS_NOT_EXTENDED, -EIO, "STATUS_FVE_FS_NOT_EXTENDED"},
+ {STATUS_FVE_FS_MOUNTED, -EIO, "STATUS_FVE_FS_MOUNTED"},
+ {STATUS_FVE_NO_LICENSE, -EIO, "STATUS_FVE_NO_LICENSE"},
+ {STATUS_FVE_ACTION_NOT_ALLOWED, -EIO, "STATUS_FVE_ACTION_NOT_ALLOWED"},
+ {STATUS_FVE_BAD_DATA, -EIO, "STATUS_FVE_BAD_DATA"},
+ {STATUS_FVE_VOLUME_NOT_BOUND, -EIO, "STATUS_FVE_VOLUME_NOT_BOUND"},
+ {STATUS_FVE_NOT_DATA_VOLUME, -EIO, "STATUS_FVE_NOT_DATA_VOLUME"},
+ {STATUS_FVE_CONV_READ_ERROR, -EIO, "STATUS_FVE_CONV_READ_ERROR"},
+ {STATUS_FVE_CONV_WRITE_ERROR, -EIO, "STATUS_FVE_CONV_WRITE_ERROR"},
+ {STATUS_FVE_OVERLAPPED_UPDATE, -EIO, "STATUS_FVE_OVERLAPPED_UPDATE"},
+ {STATUS_FVE_FAILED_SECTOR_SIZE, -EIO, "STATUS_FVE_FAILED_SECTOR_SIZE"},
+ {STATUS_FVE_FAILED_AUTHENTICATION, -EIO,
+ "STATUS_FVE_FAILED_AUTHENTICATION"},
+ {STATUS_FVE_NOT_OS_VOLUME, -EIO, "STATUS_FVE_NOT_OS_VOLUME"},
+ {STATUS_FVE_KEYFILE_NOT_FOUND, -EIO, "STATUS_FVE_KEYFILE_NOT_FOUND"},
+ {STATUS_FVE_KEYFILE_INVALID, -EIO, "STATUS_FVE_KEYFILE_INVALID"},
+ {STATUS_FVE_KEYFILE_NO_VMK, -EIO, "STATUS_FVE_KEYFILE_NO_VMK"},
+ {STATUS_FVE_TPM_DISABLED, -EIO, "STATUS_FVE_TPM_DISABLED"},
+ {STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO, -EIO,
+ "STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO"},
+ {STATUS_FVE_TPM_INVALID_PCR, -EIO, "STATUS_FVE_TPM_INVALID_PCR"},
+ {STATUS_FVE_TPM_NO_VMK, -EIO, "STATUS_FVE_TPM_NO_VMK"},
+ {STATUS_FVE_PIN_INVALID, -EIO, "STATUS_FVE_PIN_INVALID"},
+ {STATUS_FVE_AUTH_INVALID_APPLICATION, -EIO,
+ "STATUS_FVE_AUTH_INVALID_APPLICATION"},
+ {STATUS_FVE_AUTH_INVALID_CONFIG, -EIO,
+ "STATUS_FVE_AUTH_INVALID_CONFIG"},
+ {STATUS_FVE_DEBUGGER_ENABLED, -EIO, "STATUS_FVE_DEBUGGER_ENABLED"},
+ {STATUS_FVE_DRY_RUN_FAILED, -EIO, "STATUS_FVE_DRY_RUN_FAILED"},
+ {STATUS_FVE_BAD_METADATA_POINTER, -EIO,
+ "STATUS_FVE_BAD_METADATA_POINTER"},
+ {STATUS_FVE_OLD_METADATA_COPY, -EIO, "STATUS_FVE_OLD_METADATA_COPY"},
+ {STATUS_FVE_REBOOT_REQUIRED, -EIO, "STATUS_FVE_REBOOT_REQUIRED"},
+ {STATUS_FVE_RAW_ACCESS, -EIO, "STATUS_FVE_RAW_ACCESS"},
+ {STATUS_FVE_RAW_BLOCKED, -EIO, "STATUS_FVE_RAW_BLOCKED"},
+ {STATUS_FWP_CALLOUT_NOT_FOUND, -EIO, "STATUS_FWP_CALLOUT_NOT_FOUND"},
+ {STATUS_FWP_CONDITION_NOT_FOUND, -EIO,
+ "STATUS_FWP_CONDITION_NOT_FOUND"},
+ {STATUS_FWP_FILTER_NOT_FOUND, -EIO, "STATUS_FWP_FILTER_NOT_FOUND"},
+ {STATUS_FWP_LAYER_NOT_FOUND, -EIO, "STATUS_FWP_LAYER_NOT_FOUND"},
+ {STATUS_FWP_PROVIDER_NOT_FOUND, -EIO, "STATUS_FWP_PROVIDER_NOT_FOUND"},
+ {STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND, -EIO,
+ "STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND"},
+ {STATUS_FWP_SUBLAYER_NOT_FOUND, -EIO, "STATUS_FWP_SUBLAYER_NOT_FOUND"},
+ {STATUS_FWP_NOT_FOUND, -EIO, "STATUS_FWP_NOT_FOUND"},
+ {STATUS_FWP_ALREADY_EXISTS, -EIO, "STATUS_FWP_ALREADY_EXISTS"},
+ {STATUS_FWP_IN_USE, -EIO, "STATUS_FWP_IN_USE"},
+ {STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS, -EIO,
+ "STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS"},
+ {STATUS_FWP_WRONG_SESSION, -EIO, "STATUS_FWP_WRONG_SESSION"},
+ {STATUS_FWP_NO_TXN_IN_PROGRESS, -EIO, "STATUS_FWP_NO_TXN_IN_PROGRESS"},
+ {STATUS_FWP_TXN_IN_PROGRESS, -EIO, "STATUS_FWP_TXN_IN_PROGRESS"},
+ {STATUS_FWP_TXN_ABORTED, -EIO, "STATUS_FWP_TXN_ABORTED"},
+ {STATUS_FWP_SESSION_ABORTED, -EIO, "STATUS_FWP_SESSION_ABORTED"},
+ {STATUS_FWP_INCOMPATIBLE_TXN, -EIO, "STATUS_FWP_INCOMPATIBLE_TXN"},
+ {STATUS_FWP_TIMEOUT, -ETIMEDOUT, "STATUS_FWP_TIMEOUT"},
+ {STATUS_FWP_NET_EVENTS_DISABLED, -EIO,
+ "STATUS_FWP_NET_EVENTS_DISABLED"},
+ {STATUS_FWP_INCOMPATIBLE_LAYER, -EIO, "STATUS_FWP_INCOMPATIBLE_LAYER"},
+ {STATUS_FWP_KM_CLIENTS_ONLY, -EIO, "STATUS_FWP_KM_CLIENTS_ONLY"},
+ {STATUS_FWP_LIFETIME_MISMATCH, -EIO, "STATUS_FWP_LIFETIME_MISMATCH"},
+ {STATUS_FWP_BUILTIN_OBJECT, -EIO, "STATUS_FWP_BUILTIN_OBJECT"},
+ {STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS, -EIO,
+ "STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS"},
+ {STATUS_FWP_TOO_MANY_CALLOUTS, -EIO, "STATUS_FWP_TOO_MANY_CALLOUTS"},
+ {STATUS_FWP_NOTIFICATION_DROPPED, -EIO,
+ "STATUS_FWP_NOTIFICATION_DROPPED"},
+ {STATUS_FWP_TRAFFIC_MISMATCH, -EIO, "STATUS_FWP_TRAFFIC_MISMATCH"},
+ {STATUS_FWP_INCOMPATIBLE_SA_STATE, -EIO,
+ "STATUS_FWP_INCOMPATIBLE_SA_STATE"},
+ {STATUS_FWP_NULL_POINTER, -EIO, "STATUS_FWP_NULL_POINTER"},
+ {STATUS_FWP_INVALID_ENUMERATOR, -EIO, "STATUS_FWP_INVALID_ENUMERATOR"},
+ {STATUS_FWP_INVALID_FLAGS, -EIO, "STATUS_FWP_INVALID_FLAGS"},
+ {STATUS_FWP_INVALID_NET_MASK, -EIO, "STATUS_FWP_INVALID_NET_MASK"},
+ {STATUS_FWP_INVALID_RANGE, -EIO, "STATUS_FWP_INVALID_RANGE"},
+ {STATUS_FWP_INVALID_INTERVAL, -EIO, "STATUS_FWP_INVALID_INTERVAL"},
+ {STATUS_FWP_ZERO_LENGTH_ARRAY, -EIO, "STATUS_FWP_ZERO_LENGTH_ARRAY"},
+ {STATUS_FWP_NULL_DISPLAY_NAME, -EIO, "STATUS_FWP_NULL_DISPLAY_NAME"},
+ {STATUS_FWP_INVALID_ACTION_TYPE, -EIO,
+ "STATUS_FWP_INVALID_ACTION_TYPE"},
+ {STATUS_FWP_INVALID_WEIGHT, -EIO, "STATUS_FWP_INVALID_WEIGHT"},
+ {STATUS_FWP_MATCH_TYPE_MISMATCH, -EIO,
+ "STATUS_FWP_MATCH_TYPE_MISMATCH"},
+ {STATUS_FWP_TYPE_MISMATCH, -EIO, "STATUS_FWP_TYPE_MISMATCH"},
+ {STATUS_FWP_OUT_OF_BOUNDS, -EIO, "STATUS_FWP_OUT_OF_BOUNDS"},
+ {STATUS_FWP_RESERVED, -EIO, "STATUS_FWP_RESERVED"},
+ {STATUS_FWP_DUPLICATE_CONDITION, -EIO,
+ "STATUS_FWP_DUPLICATE_CONDITION"},
+ {STATUS_FWP_DUPLICATE_KEYMOD, -EIO, "STATUS_FWP_DUPLICATE_KEYMOD"},
+ {STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER, -EIO,
+ "STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER"},
+ {STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER, -EIO,
+ "STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER"},
+ {STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER, -EIO,
+ "STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER"},
+ {STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT, -EIO,
+ "STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT"},
+ {STATUS_FWP_INCOMPATIBLE_AUTH_METHOD, -EIO,
+ "STATUS_FWP_INCOMPATIBLE_AUTH_METHOD"},
+ {STATUS_FWP_INCOMPATIBLE_DH_GROUP, -EIO,
+ "STATUS_FWP_INCOMPATIBLE_DH_GROUP"},
+ {STATUS_FWP_EM_NOT_SUPPORTED, -EOPNOTSUPP,
+ "STATUS_FWP_EM_NOT_SUPPORTED"},
+ {STATUS_FWP_NEVER_MATCH, -EIO, "STATUS_FWP_NEVER_MATCH"},
+ {STATUS_FWP_PROVIDER_CONTEXT_MISMATCH, -EIO,
+ "STATUS_FWP_PROVIDER_CONTEXT_MISMATCH"},
+ {STATUS_FWP_INVALID_PARAMETER, -EIO, "STATUS_FWP_INVALID_PARAMETER"},
+ {STATUS_FWP_TOO_MANY_SUBLAYERS, -EIO, "STATUS_FWP_TOO_MANY_SUBLAYERS"},
+ {STATUS_FWP_CALLOUT_NOTIFICATION_FAILED, -EIO,
+ "STATUS_FWP_CALLOUT_NOTIFICATION_FAILED"},
+ {STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG, -EIO,
+ "STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG"},
+ {STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG, -EIO,
+ "STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG"},
+ {STATUS_FWP_TCPIP_NOT_READY, -EIO, "STATUS_FWP_TCPIP_NOT_READY"},
+ {STATUS_FWP_INJECT_HANDLE_CLOSING, -EIO,
+ "STATUS_FWP_INJECT_HANDLE_CLOSING"},
+ {STATUS_FWP_INJECT_HANDLE_STALE, -EIO,
+ "STATUS_FWP_INJECT_HANDLE_STALE"},
+ {STATUS_FWP_CANNOT_PEND, -EIO, "STATUS_FWP_CANNOT_PEND"},
+ {STATUS_NDIS_CLOSING, -EIO, "STATUS_NDIS_CLOSING"},
+ {STATUS_NDIS_BAD_VERSION, -EIO, "STATUS_NDIS_BAD_VERSION"},
+ {STATUS_NDIS_BAD_CHARACTERISTICS, -EIO,
+ "STATUS_NDIS_BAD_CHARACTERISTICS"},
+ {STATUS_NDIS_ADAPTER_NOT_FOUND, -EIO, "STATUS_NDIS_ADAPTER_NOT_FOUND"},
+ {STATUS_NDIS_OPEN_FAILED, -EIO, "STATUS_NDIS_OPEN_FAILED"},
+ {STATUS_NDIS_DEVICE_FAILED, -EIO, "STATUS_NDIS_DEVICE_FAILED"},
+ {STATUS_NDIS_MULTICAST_FULL, -EIO, "STATUS_NDIS_MULTICAST_FULL"},
+ {STATUS_NDIS_MULTICAST_EXISTS, -EIO, "STATUS_NDIS_MULTICAST_EXISTS"},
+ {STATUS_NDIS_MULTICAST_NOT_FOUND, -EIO,
+ "STATUS_NDIS_MULTICAST_NOT_FOUND"},
+ {STATUS_NDIS_REQUEST_ABORTED, -EIO, "STATUS_NDIS_REQUEST_ABORTED"},
+ {STATUS_NDIS_RESET_IN_PROGRESS, -EIO, "STATUS_NDIS_RESET_IN_PROGRESS"},
+ {STATUS_NDIS_INVALID_PACKET, -EIO, "STATUS_NDIS_INVALID_PACKET"},
+ {STATUS_NDIS_INVALID_DEVICE_REQUEST, -EIO,
+ "STATUS_NDIS_INVALID_DEVICE_REQUEST"},
+ {STATUS_NDIS_ADAPTER_NOT_READY, -EIO, "STATUS_NDIS_ADAPTER_NOT_READY"},
+ {STATUS_NDIS_INVALID_LENGTH, -EIO, "STATUS_NDIS_INVALID_LENGTH"},
+ {STATUS_NDIS_INVALID_DATA, -EIO, "STATUS_NDIS_INVALID_DATA"},
+ {STATUS_NDIS_BUFFER_TOO_SHORT, -ENOBUFS,
+ "STATUS_NDIS_BUFFER_TOO_SHORT"},
+ {STATUS_NDIS_INVALID_OID, -EIO, "STATUS_NDIS_INVALID_OID"},
+ {STATUS_NDIS_ADAPTER_REMOVED, -EIO, "STATUS_NDIS_ADAPTER_REMOVED"},
+ {STATUS_NDIS_UNSUPPORTED_MEDIA, -EIO, "STATUS_NDIS_UNSUPPORTED_MEDIA"},
+ {STATUS_NDIS_GROUP_ADDRESS_IN_USE, -EIO,
+ "STATUS_NDIS_GROUP_ADDRESS_IN_USE"},
+ {STATUS_NDIS_FILE_NOT_FOUND, -EIO, "STATUS_NDIS_FILE_NOT_FOUND"},
+ {STATUS_NDIS_ERROR_READING_FILE, -EIO,
+ "STATUS_NDIS_ERROR_READING_FILE"},
+ {STATUS_NDIS_ALREADY_MAPPED, -EIO, "STATUS_NDIS_ALREADY_MAPPED"},
+ {STATUS_NDIS_RESOURCE_CONFLICT, -EIO, "STATUS_NDIS_RESOURCE_CONFLICT"},
+ {STATUS_NDIS_MEDIA_DISCONNECTED, -EIO,
+ "STATUS_NDIS_MEDIA_DISCONNECTED"},
+ {STATUS_NDIS_INVALID_ADDRESS, -EIO, "STATUS_NDIS_INVALID_ADDRESS"},
+ {STATUS_NDIS_PAUSED, -EIO, "STATUS_NDIS_PAUSED"},
+ {STATUS_NDIS_INTERFACE_NOT_FOUND, -EIO,
+ "STATUS_NDIS_INTERFACE_NOT_FOUND"},
+ {STATUS_NDIS_UNSUPPORTED_REVISION, -EIO,
+ "STATUS_NDIS_UNSUPPORTED_REVISION"},
+ {STATUS_NDIS_INVALID_PORT, -EIO, "STATUS_NDIS_INVALID_PORT"},
+ {STATUS_NDIS_INVALID_PORT_STATE, -EIO,
+ "STATUS_NDIS_INVALID_PORT_STATE"},
+ {STATUS_NDIS_LOW_POWER_STATE, -EIO, "STATUS_NDIS_LOW_POWER_STATE"},
+ {STATUS_NDIS_NOT_SUPPORTED, -ENOSYS, "STATUS_NDIS_NOT_SUPPORTED"},
+ {STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED, -EIO,
+ "STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED"},
+ {STATUS_NDIS_DOT11_MEDIA_IN_USE, -EIO,
+ "STATUS_NDIS_DOT11_MEDIA_IN_USE"},
+ {STATUS_NDIS_DOT11_POWER_STATE_INVALID, -EIO,
+ "STATUS_NDIS_DOT11_POWER_STATE_INVALID"},
+ {STATUS_IPSEC_BAD_SPI, -EIO, "STATUS_IPSEC_BAD_SPI"},
+ {STATUS_IPSEC_SA_LIFETIME_EXPIRED, -EIO,
+ "STATUS_IPSEC_SA_LIFETIME_EXPIRED"},
+ {STATUS_IPSEC_WRONG_SA, -EIO, "STATUS_IPSEC_WRONG_SA"},
+ {STATUS_IPSEC_REPLAY_CHECK_FAILED, -EIO,
+ "STATUS_IPSEC_REPLAY_CHECK_FAILED"},
+ {STATUS_IPSEC_INVALID_PACKET, -EIO, "STATUS_IPSEC_INVALID_PACKET"},
+ {STATUS_IPSEC_INTEGRITY_CHECK_FAILED, -EIO,
+ "STATUS_IPSEC_INTEGRITY_CHECK_FAILED"},
+ {STATUS_IPSEC_CLEAR_TEXT_DROP, -EIO, "STATUS_IPSEC_CLEAR_TEXT_DROP"},
+ {0, 0, NULL}
+};
+
+/*****************************************************************************
+ Print an error message from the status code
+ *****************************************************************************/
+static void
+smb2_print_status(__le32 status)
+{
+ int idx = 0;
+
+ while (smb2_error_map_table[idx].status_string != NULL) {
+ if ((smb2_error_map_table[idx].smb2_status) == status) {
+ pr_notice("Status code returned 0x%08x %s\n", status,
+ smb2_error_map_table[idx].status_string);
+ }
+ idx++;
+ }
+ return;
+}
+
+int
+map_smb2_to_linux_error(char *buf, bool log_err)
+{
+ struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+ unsigned int i;
+ int rc = -EIO;
+ __le32 smb2err = hdr->Status;
+
+ if (smb2err == 0)
+ return 0;
+
+ /* mask facility */
+ if (log_err && (smb2err != (STATUS_MORE_PROCESSING_REQUIRED)))
+ smb2_print_status(smb2err);
+ else if (cifsFYI & CIFS_RC)
+ smb2_print_status(smb2err);
+
+ for (i = 0; i < sizeof(smb2_error_map_table) /
+ sizeof(struct status_to_posix_error); i++) {
+ if (smb2_error_map_table[i].smb2_status == smb2err) {
+ rc = smb2_error_map_table[i].posix_error;
+ break;
+ }
+ }
+
+ /* on error mapping not found - return EIO */
+
+ cFYI(1, "Mapping SMB2 status code %d to POSIX err %d",
+ smb2err, rc);
+
+ return rc;
+}
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
new file mode 100644
index 000000000000..e4d3b9964167
--- /dev/null
+++ b/fs/cifs/smb2misc.c
@@ -0,0 +1,349 @@
+/*
+ * fs/cifs/smb2misc.c
+ *
+ * Copyright (C) International Business Machines Corp., 2002,2011
+ * Etersoft, 2012
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ * Pavel Shilovsky (pshilovsky@samba.org) 2012
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/ctype.h>
+#include "smb2pdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "smb2proto.h"
+#include "cifs_debug.h"
+#include "cifs_unicode.h"
+#include "smb2status.h"
+
+static int
+check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
+{
+ /*
+ * Make sure that this really is an SMB, that it is a response,
+ * and that the message ids match.
+ */
+ if ((*(__le32 *)hdr->ProtocolId == SMB2_PROTO_NUMBER) &&
+ (mid == hdr->MessageId)) {
+ if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
+ return 0;
+ else {
+ /* only one valid case where server sends us request */
+ if (hdr->Command == SMB2_OPLOCK_BREAK)
+ return 0;
+ else
+ cERROR(1, "Received Request not response");
+ }
+ } else { /* bad signature or mid */
+ if (*(__le32 *)hdr->ProtocolId != SMB2_PROTO_NUMBER)
+ cERROR(1, "Bad protocol string signature header %x",
+ *(unsigned int *) hdr->ProtocolId);
+ if (mid != hdr->MessageId)
+ cERROR(1, "Mids do not match: %llu and %llu", mid,
+ hdr->MessageId);
+ }
+ cERROR(1, "Bad SMB detected. The Mid=%llu", hdr->MessageId);
+ return 1;
+}
+
+/*
+ * The following table defines the expected "StructureSize" of SMB2 responses
+ * in order by SMB2 command. This is similar to "wct" in SMB/CIFS responses.
+ *
+ * Note that commands are defined in smb2pdu.h in le16 but the array below is
+ * indexed by command in host byte order
+ */
+static const __le16 smb2_rsp_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
+ /* SMB2_NEGOTIATE */ __constant_cpu_to_le16(65),
+ /* SMB2_SESSION_SETUP */ __constant_cpu_to_le16(9),
+ /* SMB2_LOGOFF */ __constant_cpu_to_le16(4),
+ /* SMB2_TREE_CONNECT */ __constant_cpu_to_le16(16),
+ /* SMB2_TREE_DISCONNECT */ __constant_cpu_to_le16(4),
+ /* SMB2_CREATE */ __constant_cpu_to_le16(89),
+ /* SMB2_CLOSE */ __constant_cpu_to_le16(60),
+ /* SMB2_FLUSH */ __constant_cpu_to_le16(4),
+ /* SMB2_READ */ __constant_cpu_to_le16(17),
+ /* SMB2_WRITE */ __constant_cpu_to_le16(17),
+ /* SMB2_LOCK */ __constant_cpu_to_le16(4),
+ /* SMB2_IOCTL */ __constant_cpu_to_le16(49),
+ /* BB CHECK this ... not listed in documentation */
+ /* SMB2_CANCEL */ __constant_cpu_to_le16(0),
+ /* SMB2_ECHO */ __constant_cpu_to_le16(4),
+ /* SMB2_QUERY_DIRECTORY */ __constant_cpu_to_le16(9),
+ /* SMB2_CHANGE_NOTIFY */ __constant_cpu_to_le16(9),
+ /* SMB2_QUERY_INFO */ __constant_cpu_to_le16(9),
+ /* SMB2_SET_INFO */ __constant_cpu_to_le16(2),
+ /* BB FIXME can also be 44 for lease break */
+ /* SMB2_OPLOCK_BREAK */ __constant_cpu_to_le16(24)
+};
+
+int
+smb2_check_message(char *buf, unsigned int length)
+{
+ struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+ struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
+ __u64 mid = hdr->MessageId;
+ __u32 len = get_rfc1002_length(buf);
+ __u32 clc_len; /* calculated length */
+ int command;
+
+ /* BB disable following printk later */
+ cFYI(1, "%s length: 0x%x, smb_buf_length: 0x%x", __func__, length, len);
+
+ /*
+ * Add function to do table lookup of StructureSize by command
+ * ie Validate the wct via smb2_struct_sizes table above
+ */
+
+ if (length < sizeof(struct smb2_pdu)) {
+ if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) {
+ pdu->StructureSize2 = 0;
+ /*
+ * As with SMB/CIFS, on some error cases servers may
+ * not return wct properly
+ */
+ return 0;
+ } else {
+ cERROR(1, "Length less than SMB header size");
+ }
+ return 1;
+ }
+ if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE - 4) {
+ cERROR(1, "SMB length greater than maximum, mid=%llu", mid);
+ return 1;
+ }
+
+ if (check_smb2_hdr(hdr, mid))
+ return 1;
+
+ if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
+ cERROR(1, "Illegal structure size %u",
+ le16_to_cpu(hdr->StructureSize));
+ return 1;
+ }
+
+ command = le16_to_cpu(hdr->Command);
+ if (command >= NUMBER_OF_SMB2_COMMANDS) {
+ cERROR(1, "Illegal SMB2 command %d", command);
+ return 1;
+ }
+
+ if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) {
+ if (hdr->Status == 0 ||
+ pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2) {
+ /* error packets have 9 byte structure size */
+ cERROR(1, "Illegal response size %u for command %d",
+ le16_to_cpu(pdu->StructureSize2), command);
+ return 1;
+ }
+ }
+
+ if (4 + len != length) {
+ cERROR(1, "Total length %u RFC1002 length %u mismatch mid %llu",
+ length, 4 + len, mid);
+ return 1;
+ }
+
+ clc_len = smb2_calc_size(hdr);
+
+ if (4 + len != clc_len) {
+ cFYI(1, "Calculated size %u length %u mismatch mid %llu",
+ clc_len, 4 + len, mid);
+ /* server can return one byte more */
+ if (clc_len == 4 + len + 1)
+ return 0;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * The size of the variable area depends on the offset and length fields
+ * located in different fields for various SMB2 responses. SMB2 responses
+ * with no variable length info, show an offset of zero for the offset field.
+ */
+static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
+ /* SMB2_NEGOTIATE */ true,
+ /* SMB2_SESSION_SETUP */ true,
+ /* SMB2_LOGOFF */ false,
+ /* SMB2_TREE_CONNECT */ false,
+ /* SMB2_TREE_DISCONNECT */ false,
+ /* SMB2_CREATE */ true,
+ /* SMB2_CLOSE */ false,
+ /* SMB2_FLUSH */ false,
+ /* SMB2_READ */ true,
+ /* SMB2_WRITE */ false,
+ /* SMB2_LOCK */ false,
+ /* SMB2_IOCTL */ true,
+ /* SMB2_CANCEL */ false, /* BB CHECK this not listed in documentation */
+ /* SMB2_ECHO */ false,
+ /* SMB2_QUERY_DIRECTORY */ true,
+ /* SMB2_CHANGE_NOTIFY */ true,
+ /* SMB2_QUERY_INFO */ true,
+ /* SMB2_SET_INFO */ false,
+ /* SMB2_OPLOCK_BREAK */ false
+};
+
+/*
+ * Returns the pointer to the beginning of the data area. Length of the data
+ * area and the offset to it (from the beginning of the smb are also returned.
+ */
+char *
+smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
+{
+ *off = 0;
+ *len = 0;
+
+ /* error responses do not have data area */
+ if (hdr->Status && hdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
+ (((struct smb2_err_rsp *)hdr)->StructureSize) ==
+ SMB2_ERROR_STRUCTURE_SIZE2)
+ return NULL;
+
+ /*
+ * Following commands have data areas so we have to get the location
+ * of the data buffer offset and data buffer length for the particular
+ * command.
+ */
+ switch (hdr->Command) {
+ case SMB2_NEGOTIATE:
+ *off = le16_to_cpu(
+ ((struct smb2_negotiate_rsp *)hdr)->SecurityBufferOffset);
+ *len = le16_to_cpu(
+ ((struct smb2_negotiate_rsp *)hdr)->SecurityBufferLength);
+ break;
+ case SMB2_SESSION_SETUP:
+ *off = le16_to_cpu(
+ ((struct smb2_sess_setup_rsp *)hdr)->SecurityBufferOffset);
+ *len = le16_to_cpu(
+ ((struct smb2_sess_setup_rsp *)hdr)->SecurityBufferLength);
+ break;
+ case SMB2_CREATE:
+ *off = le32_to_cpu(
+ ((struct smb2_create_rsp *)hdr)->CreateContextsOffset);
+ *len = le32_to_cpu(
+ ((struct smb2_create_rsp *)hdr)->CreateContextsLength);
+ break;
+ case SMB2_QUERY_INFO:
+ *off = le16_to_cpu(
+ ((struct smb2_query_info_rsp *)hdr)->OutputBufferOffset);
+ *len = le32_to_cpu(
+ ((struct smb2_query_info_rsp *)hdr)->OutputBufferLength);
+ break;
+ case SMB2_READ:
+ case SMB2_QUERY_DIRECTORY:
+ case SMB2_IOCTL:
+ case SMB2_CHANGE_NOTIFY:
+ default:
+ /* BB FIXME for unimplemented cases above */
+ cERROR(1, "no length check for command");
+ break;
+ }
+
+ /*
+ * Invalid length or offset probably means data area is invalid, but
+ * we have little choice but to ignore the data area in this case.
+ */
+ if (*off > 4096) {
+ cERROR(1, "offset %d too large, data area ignored", *off);
+ *len = 0;
+ *off = 0;
+ } else if (*off < 0) {
+ cERROR(1, "negative offset %d to data invalid ignore data area",
+ *off);
+ *off = 0;
+ *len = 0;
+ } else if (*len < 0) {
+ cERROR(1, "negative data length %d invalid, data area ignored",
+ *len);
+ *len = 0;
+ } else if (*len > 128 * 1024) {
+ cERROR(1, "data area larger than 128K: %d", *len);
+ *len = 0;
+ }
+
+ /* return pointer to beginning of data area, ie offset from SMB start */
+ if ((*off != 0) && (*len != 0))
+ return hdr->ProtocolId + *off;
+ else
+ return NULL;
+}
+
+/*
+ * Calculate the size of the SMB message based on the fixed header
+ * portion, the number of word parameters and the data portion of the message.
+ */
+unsigned int
+smb2_calc_size(struct smb2_hdr *hdr)
+{
+ struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
+ int offset; /* the offset from the beginning of SMB to data area */
+ int data_length; /* the length of the variable length data area */
+ /* Structure Size has already been checked to make sure it is 64 */
+ int len = 4 + le16_to_cpu(pdu->hdr.StructureSize);
+
+ /*
+ * StructureSize2, ie length of fixed parameter area has already
+ * been checked to make sure it is the correct length.
+ */
+ len += le16_to_cpu(pdu->StructureSize2);
+
+ if (has_smb2_data_area[le16_to_cpu(hdr->Command)] == false)
+ goto calc_size_exit;
+
+ smb2_get_data_area_len(&offset, &data_length, hdr);
+ cFYI(1, "SMB2 data length %d offset %d", data_length, offset);
+
+ if (data_length > 0) {
+ /*
+ * Check to make sure that data area begins after fixed area,
+ * Note that last byte of the fixed area is part of data area
+ * for some commands, typically those with odd StructureSize,
+ * so we must add one to the calculation (and 4 to account for
+ * the size of the RFC1001 hdr.
+ */
+ if (offset + 4 + 1 < len) {
+ cERROR(1, "data area offset %d overlaps SMB2 header %d",
+ offset + 4 + 1, len);
+ data_length = 0;
+ } else {
+ len = 4 + offset + data_length;
+ }
+ }
+calc_size_exit:
+ cFYI(1, "SMB2 len %d", len);
+ return len;
+}
+
+/* Note: caller must free return buffer */
+__le16 *
+cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
+{
+ int len;
+ const char *start_of_path;
+ __le16 *to;
+
+ /* Windows doesn't allow paths beginning with \ */
+ if (from[0] == '\\')
+ start_of_path = from + 1;
+ else
+ start_of_path = from;
+ to = cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ return to;
+}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index f065e89756a1..826209bf3684 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -18,10 +18,317 @@
*/
#include "cifsglob.h"
+#include "smb2pdu.h"
+#include "smb2proto.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+
+static int
+change_conf(struct TCP_Server_Info *server)
+{
+ server->credits += server->echo_credits + server->oplock_credits;
+ server->oplock_credits = server->echo_credits = 0;
+ switch (server->credits) {
+ case 0:
+ return -1;
+ case 1:
+ server->echoes = false;
+ server->oplocks = false;
+ cERROR(1, "disabling echoes and oplocks");
+ break;
+ case 2:
+ server->echoes = true;
+ server->oplocks = false;
+ server->echo_credits = 1;
+ cFYI(1, "disabling oplocks");
+ break;
+ default:
+ server->echoes = true;
+ server->oplocks = true;
+ server->echo_credits = 1;
+ server->oplock_credits = 1;
+ }
+ server->credits -= server->echo_credits + server->oplock_credits;
+ return 0;
+}
+
+static void
+smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
+ const int optype)
+{
+ int *val, rc = 0;
+ spin_lock(&server->req_lock);
+ val = server->ops->get_credits_field(server, optype);
+ *val += add;
+ server->in_flight--;
+ if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
+ rc = change_conf(server);
+ spin_unlock(&server->req_lock);
+ wake_up(&server->request_q);
+ if (rc)
+ cifs_reconnect(server);
+}
+
+static void
+smb2_set_credits(struct TCP_Server_Info *server, const int val)
+{
+ spin_lock(&server->req_lock);
+ server->credits = val;
+ spin_unlock(&server->req_lock);
+}
+
+static int *
+smb2_get_credits_field(struct TCP_Server_Info *server, const int optype)
+{
+ switch (optype) {
+ case CIFS_ECHO_OP:
+ return &server->echo_credits;
+ case CIFS_OBREAK_OP:
+ return &server->oplock_credits;
+ default:
+ return &server->credits;
+ }
+}
+
+static unsigned int
+smb2_get_credits(struct mid_q_entry *mid)
+{
+ return le16_to_cpu(((struct smb2_hdr *)mid->resp_buf)->CreditRequest);
+}
+
+static __u64
+smb2_get_next_mid(struct TCP_Server_Info *server)
+{
+ __u64 mid;
+ /* for SMB2 we need the current value */
+ spin_lock(&GlobalMid_Lock);
+ mid = server->CurrentMid++;
+ spin_unlock(&GlobalMid_Lock);
+ return mid;
+}
+
+static struct mid_q_entry *
+smb2_find_mid(struct TCP_Server_Info *server, char *buf)
+{
+ struct mid_q_entry *mid;
+ struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+
+ spin_lock(&GlobalMid_Lock);
+ list_for_each_entry(mid, &server->pending_mid_q, qhead) {
+ if ((mid->mid == hdr->MessageId) &&
+ (mid->mid_state == MID_REQUEST_SUBMITTED) &&
+ (mid->command == hdr->Command)) {
+ spin_unlock(&GlobalMid_Lock);
+ return mid;
+ }
+ }
+ spin_unlock(&GlobalMid_Lock);
+ return NULL;
+}
+
+static void
+smb2_dump_detail(void *buf)
+{
+#ifdef CONFIG_CIFS_DEBUG2
+ struct smb2_hdr *smb = (struct smb2_hdr *)buf;
+
+ cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Mid: %llu Pid: %d",
+ smb->Command, smb->Status, smb->Flags, smb->MessageId,
+ smb->ProcessId);
+ cERROR(1, "smb buf %p len %u", smb, smb2_calc_size(smb));
+#endif
+}
+
+static bool
+smb2_need_neg(struct TCP_Server_Info *server)
+{
+ return server->max_read == 0;
+}
+
+static int
+smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+{
+ int rc;
+ ses->server->CurrentMid = 0;
+ rc = SMB2_negotiate(xid, ses);
+ /* BB we probably don't need to retry with modern servers */
+ if (rc == -EAGAIN)
+ rc = -EHOSTDOWN;
+ return rc;
+}
+
+static int
+smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path)
+{
+ int rc;
+ __u64 persistent_fid, volatile_fid;
+ __le16 *utf16_path;
+
+ utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+ if (!utf16_path)
+ return -ENOMEM;
+
+ rc = SMB2_open(xid, tcon, utf16_path, &persistent_fid, &volatile_fid,
+ FILE_READ_ATTRIBUTES, FILE_OPEN, 0, 0);
+ if (rc) {
+ kfree(utf16_path);
+ return rc;
+ }
+
+ rc = SMB2_close(xid, tcon, persistent_fid, volatile_fid);
+ kfree(utf16_path);
+ return rc;
+}
+
+static int
+smb2_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ u64 *uniqueid, FILE_ALL_INFO *data)
+{
+ *uniqueid = le64_to_cpu(data->IndexNumber);
+ return 0;
+}
+
+static char *
+smb2_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
+ struct cifs_tcon *tcon)
+{
+ int pplen = vol->prepath ? strlen(vol->prepath) : 0;
+ char *full_path = NULL;
+
+ /* if no prefix path, simply set path to the root of share to "" */
+ if (pplen == 0) {
+ full_path = kzalloc(2, GFP_KERNEL);
+ return full_path;
+ }
+
+ cERROR(1, "prefixpath is not supported for SMB2 now");
+ return NULL;
+}
+
+static bool
+smb2_can_echo(struct TCP_Server_Info *server)
+{
+ return server->echoes;
+}
+
+static void
+smb2_clear_stats(struct cifs_tcon *tcon)
+{
+#ifdef CONFIG_CIFS_STATS
+ int i;
+ for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
+ atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
+ atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
+ }
+#endif
+}
+
+static void
+smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
+{
+#ifdef CONFIG_CIFS_STATS
+ atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
+ atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
+ seq_printf(m, "\nNegotiates: %d sent %d failed",
+ atomic_read(&sent[SMB2_NEGOTIATE_HE]),
+ atomic_read(&failed[SMB2_NEGOTIATE_HE]));
+ seq_printf(m, "\nSessionSetups: %d sent %d failed",
+ atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
+ atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
+#define SMB2LOGOFF 0x0002 /* trivial request/resp */
+ seq_printf(m, "\nLogoffs: %d sent %d failed",
+ atomic_read(&sent[SMB2_LOGOFF_HE]),
+ atomic_read(&failed[SMB2_LOGOFF_HE]));
+ seq_printf(m, "\nTreeConnects: %d sent %d failed",
+ atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
+ atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
+ seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
+ atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
+ atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
+ seq_printf(m, "\nCreates: %d sent %d failed",
+ atomic_read(&sent[SMB2_CREATE_HE]),
+ atomic_read(&failed[SMB2_CREATE_HE]));
+ seq_printf(m, "\nCloses: %d sent %d failed",
+ atomic_read(&sent[SMB2_CLOSE_HE]),
+ atomic_read(&failed[SMB2_CLOSE_HE]));
+ seq_printf(m, "\nFlushes: %d sent %d failed",
+ atomic_read(&sent[SMB2_FLUSH_HE]),
+ atomic_read(&failed[SMB2_FLUSH_HE]));
+ seq_printf(m, "\nReads: %d sent %d failed",
+ atomic_read(&sent[SMB2_READ_HE]),
+ atomic_read(&failed[SMB2_READ_HE]));
+ seq_printf(m, "\nWrites: %d sent %d failed",
+ atomic_read(&sent[SMB2_WRITE_HE]),
+ atomic_read(&failed[SMB2_WRITE_HE]));
+ seq_printf(m, "\nLocks: %d sent %d failed",
+ atomic_read(&sent[SMB2_LOCK_HE]),
+ atomic_read(&failed[SMB2_LOCK_HE]));
+ seq_printf(m, "\nIOCTLs: %d sent %d failed",
+ atomic_read(&sent[SMB2_IOCTL_HE]),
+ atomic_read(&failed[SMB2_IOCTL_HE]));
+ seq_printf(m, "\nCancels: %d sent %d failed",
+ atomic_read(&sent[SMB2_CANCEL_HE]),
+ atomic_read(&failed[SMB2_CANCEL_HE]));
+ seq_printf(m, "\nEchos: %d sent %d failed",
+ atomic_read(&sent[SMB2_ECHO_HE]),
+ atomic_read(&failed[SMB2_ECHO_HE]));
+ seq_printf(m, "\nQueryDirectories: %d sent %d failed",
+ atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
+ atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
+ seq_printf(m, "\nChangeNotifies: %d sent %d failed",
+ atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
+ atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
+ seq_printf(m, "\nQueryInfos: %d sent %d failed",
+ atomic_read(&sent[SMB2_QUERY_INFO_HE]),
+ atomic_read(&failed[SMB2_QUERY_INFO_HE]));
+ seq_printf(m, "\nSetInfos: %d sent %d failed",
+ atomic_read(&sent[SMB2_SET_INFO_HE]),
+ atomic_read(&failed[SMB2_SET_INFO_HE]));
+ seq_printf(m, "\nOplockBreaks: %d sent %d failed",
+ atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
+ atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
+#endif
+}
struct smb_version_operations smb21_operations = {
+ .setup_request = smb2_setup_request,
+ .setup_async_request = smb2_setup_async_request,
+ .check_receive = smb2_check_receive,
+ .add_credits = smb2_add_credits,
+ .set_credits = smb2_set_credits,
+ .get_credits_field = smb2_get_credits_field,
+ .get_credits = smb2_get_credits,
+ .get_next_mid = smb2_get_next_mid,
+ .find_mid = smb2_find_mid,
+ .check_message = smb2_check_message,
+ .dump_detail = smb2_dump_detail,
+ .clear_stats = smb2_clear_stats,
+ .print_stats = smb2_print_stats,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+ .sess_setup = SMB2_sess_setup,
+ .logoff = SMB2_logoff,
+ .tree_connect = SMB2_tcon,
+ .tree_disconnect = SMB2_tdis,
+ .is_path_accessible = smb2_is_path_accessible,
+ .can_echo = smb2_can_echo,
+ .echo = SMB2_echo,
+ .query_path_info = smb2_query_path_info,
+ .get_srv_inum = smb2_get_srv_inum,
+ .build_path_to_root = smb2_build_path_to_root,
+ .mkdir = smb2_mkdir,
+ .mkdir_setinfo = smb2_mkdir_setinfo,
+ .rmdir = smb2_rmdir,
};
struct smb_version_values smb21_values = {
.version_string = SMB21_VERSION_STRING,
+ .header_size = sizeof(struct smb2_hdr),
+ .max_header_size = MAX_SMB2_HDR_SIZE,
+ .lock_cmd = SMB2_LOCK,
+ .cap_unix = 0,
+ .cap_nt_find = SMB2_NT_FIND,
+ .cap_large_files = SMB2_LARGE_FILES,
};
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
new file mode 100644
index 000000000000..62b3f17d0613
--- /dev/null
+++ b/fs/cifs/smb2pdu.c
@@ -0,0 +1,1125 @@
+/*
+ * fs/cifs/smb2pdu.c
+ *
+ * Copyright (C) International Business Machines Corp., 2009, 2011
+ * Etersoft, 2012
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ * Pavel Shilovsky (pshilovsky@samba.org) 2012
+ *
+ * Contains the routines for constructing the SMB2 PDUs themselves
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+ /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
+ /* Note that there are handle based routines which must be */
+ /* treated slightly differently for reconnection purposes since we never */
+ /* want to reuse a stale file handle and only the caller knows the file info */
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/vfs.h>
+#include <linux/uaccess.h>
+#include <linux/xattr.h>
+#include "smb2pdu.h"
+#include "cifsglob.h"
+#include "cifsacl.h"
+#include "cifsproto.h"
+#include "smb2proto.h"
+#include "cifs_unicode.h"
+#include "cifs_debug.h"
+#include "ntlmssp.h"
+#include "smb2status.h"
+
+/*
+ * The following table defines the expected "StructureSize" of SMB2 requests
+ * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
+ *
+ * Note that commands are defined in smb2pdu.h in le16 but the array below is
+ * indexed by command in host byte order.
+ */
+static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
+ /* SMB2_NEGOTIATE */ 36,
+ /* SMB2_SESSION_SETUP */ 25,
+ /* SMB2_LOGOFF */ 4,
+ /* SMB2_TREE_CONNECT */ 9,
+ /* SMB2_TREE_DISCONNECT */ 4,
+ /* SMB2_CREATE */ 57,
+ /* SMB2_CLOSE */ 24,
+ /* SMB2_FLUSH */ 24,
+ /* SMB2_READ */ 49,
+ /* SMB2_WRITE */ 49,
+ /* SMB2_LOCK */ 48,
+ /* SMB2_IOCTL */ 57,
+ /* SMB2_CANCEL */ 4,
+ /* SMB2_ECHO */ 4,
+ /* SMB2_QUERY_DIRECTORY */ 33,
+ /* SMB2_CHANGE_NOTIFY */ 32,
+ /* SMB2_QUERY_INFO */ 41,
+ /* SMB2_SET_INFO */ 33,
+ /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
+};
+
+
+static void
+smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
+ const struct cifs_tcon *tcon)
+{
+ struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
+ char *temp = (char *)hdr;
+ /* lookup word count ie StructureSize from table */
+ __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
+
+ /*
+ * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
+ * largest operations (Create)
+ */
+ memset(temp, 0, 256);
+
+ /* Note this is only network field converted to big endian */
+ hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
+ - 4 /* RFC 1001 length field itself not counted */);
+
+ hdr->ProtocolId[0] = 0xFE;
+ hdr->ProtocolId[1] = 'S';
+ hdr->ProtocolId[2] = 'M';
+ hdr->ProtocolId[3] = 'B';
+ hdr->StructureSize = cpu_to_le16(64);
+ hdr->Command = smb2_cmd;
+ hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
+ hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
+
+ if (!tcon)
+ goto out;
+
+ hdr->TreeId = tcon->tid;
+ /* Uid is not converted */
+ if (tcon->ses)
+ hdr->SessionId = tcon->ses->Suid;
+ /* BB check following DFS flags BB */
+ /* BB do we have to add check for SHI1005_FLAGS_DFS_ROOT too? */
+ if (tcon->share_flags & SHI1005_FLAGS_DFS)
+ hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+ /* BB how does SMB2 do case sensitive? */
+ /* if (tcon->nocase)
+ hdr->Flags |= SMBFLG_CASELESS; */
+ /* if (tcon->ses && tcon->ses->server &&
+ (tcon->ses->server->sec_mode & SECMODE_SIGN_REQUIRED))
+ hdr->Flags |= SMB2_FLAGS_SIGNED; */
+out:
+ pdu->StructureSize2 = cpu_to_le16(parmsize);
+ return;
+}
+
+static int
+smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
+{
+ int rc = 0;
+ struct nls_table *nls_codepage;
+ struct cifs_ses *ses;
+ struct TCP_Server_Info *server;
+
+ /*
+ * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
+ * check for tcp and smb session status done differently
+ * for those three - in the calling routine.
+ */
+ if (tcon == NULL)
+ return rc;
+
+ if (smb2_command == SMB2_TREE_CONNECT)
+ return rc;
+
+ if (tcon->tidStatus == CifsExiting) {
+ /*
+ * only tree disconnect, open, and write,
+ * (and ulogoff which does not have tcon)
+ * are allowed as we start force umount.
+ */
+ if ((smb2_command != SMB2_WRITE) &&
+ (smb2_command != SMB2_CREATE) &&
+ (smb2_command != SMB2_TREE_DISCONNECT)) {
+ cFYI(1, "can not send cmd %d while umounting",
+ smb2_command);
+ return -ENODEV;
+ }
+ }
+ if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
+ (!tcon->ses->server))
+ return -EIO;
+
+ ses = tcon->ses;
+ server = ses->server;
+
+ /*
+ * Give demultiplex thread up to 10 seconds to reconnect, should be
+ * greater than cifs socket timeout which is 7 seconds
+ */
+ while (server->tcpStatus == CifsNeedReconnect) {
+ /*
+ * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
+ * here since they are implicitly done when session drops.
+ */
+ switch (smb2_command) {
+ /*
+ * BB Should we keep oplock break and add flush to exceptions?
+ */
+ case SMB2_TREE_DISCONNECT:
+ case SMB2_CANCEL:
+ case SMB2_CLOSE:
+ case SMB2_OPLOCK_BREAK:
+ return -EAGAIN;
+ }
+
+ wait_event_interruptible_timeout(server->response_q,
+ (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
+
+ /* are we still trying to reconnect? */
+ if (server->tcpStatus != CifsNeedReconnect)
+ break;
+
+ /*
+ * on "soft" mounts we wait once. Hard mounts keep
+ * retrying until process is killed or server comes
+ * back on-line
+ */
+ if (!tcon->retry) {
+ cFYI(1, "gave up waiting on reconnect in smb_init");
+ return -EHOSTDOWN;
+ }
+ }
+
+ if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
+ return rc;
+
+ nls_codepage = load_nls_default();
+
+ /*
+ * need to prevent multiple threads trying to simultaneously reconnect
+ * the same SMB session
+ */
+ mutex_lock(&tcon->ses->session_mutex);
+ rc = cifs_negotiate_protocol(0, tcon->ses);
+ if (!rc && tcon->ses->need_reconnect)
+ rc = cifs_setup_session(0, tcon->ses, nls_codepage);
+
+ if (rc || !tcon->need_reconnect) {
+ mutex_unlock(&tcon->ses->session_mutex);
+ goto out;
+ }
+
+ cifs_mark_open_files_invalid(tcon);
+ rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
+ mutex_unlock(&tcon->ses->session_mutex);
+ cFYI(1, "reconnect tcon rc = %d", rc);
+ if (rc)
+ goto out;
+ atomic_inc(&tconInfoReconnectCount);
+ /*
+ * BB FIXME add code to check if wsize needs update due to negotiated
+ * smb buffer size shrinking.
+ */
+out:
+ /*
+ * Check if handle based operation so we know whether we can continue
+ * or not without returning to caller to reset file handle.
+ */
+ /*
+ * BB Is flush done by server on drop of tcp session? Should we special
+ * case it and skip above?
+ */
+ switch (smb2_command) {
+ case SMB2_FLUSH:
+ case SMB2_READ:
+ case SMB2_WRITE:
+ case SMB2_LOCK:
+ case SMB2_IOCTL:
+ case SMB2_QUERY_DIRECTORY:
+ case SMB2_CHANGE_NOTIFY:
+ case SMB2_QUERY_INFO:
+ case SMB2_SET_INFO:
+ return -EAGAIN;
+ }
+ unload_nls(nls_codepage);
+ return rc;
+}
+
+/*
+ * Allocate and return pointer to an SMB request hdr, and set basic
+ * SMB information in the SMB header. If the return code is zero, this
+ * function must have filled in request_buf pointer.
+ */
+static int
+small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ void **request_buf)
+{
+ int rc = 0;
+
+ rc = smb2_reconnect(smb2_command, tcon);
+ if (rc)
+ return rc;
+
+ /* BB eventually switch this to SMB2 specific small buf size */
+ *request_buf = cifs_small_buf_get();
+ if (*request_buf == NULL) {
+ /* BB should we add a retry in here if not a writepage? */
+ return -ENOMEM;
+ }
+
+ smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
+
+ if (tcon != NULL) {
+#ifdef CONFIG_CIFS_STATS2
+ uint16_t com_code = le16_to_cpu(smb2_command);
+ cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
+#endif
+ cifs_stats_inc(&tcon->num_smbs_sent);
+ }
+
+ return rc;
+}
+
+static void
+free_rsp_buf(int resp_buftype, void *rsp)
+{
+ if (resp_buftype == CIFS_SMALL_BUFFER)
+ cifs_small_buf_release(rsp);
+ else if (resp_buftype == CIFS_LARGE_BUFFER)
+ cifs_buf_release(rsp);
+}
+
+#define SMB2_NUM_PROT 1
+
+#define SMB2_PROT 0
+#define SMB21_PROT 1
+#define BAD_PROT 0xFFFF
+
+#define SMB2_PROT_ID 0x0202
+#define SMB21_PROT_ID 0x0210
+#define BAD_PROT_ID 0xFFFF
+
+static struct {
+ int index;
+ __le16 name;
+} smb2protocols[] = {
+ {SMB2_PROT, cpu_to_le16(SMB2_PROT_ID)},
+ {SMB21_PROT, cpu_to_le16(SMB21_PROT_ID)},
+ {BAD_PROT, cpu_to_le16(BAD_PROT_ID)}
+};
+
+/*
+ *
+ * SMB2 Worker functions follow:
+ *
+ * The general structure of the worker functions is:
+ * 1) Call smb2_init (assembles SMB2 header)
+ * 2) Initialize SMB2 command specific fields in fixed length area of SMB
+ * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
+ * 4) Decode SMB2 command specific fields in the fixed length area
+ * 5) Decode variable length data area (if any for this SMB2 command type)
+ * 6) Call free smb buffer
+ * 7) return
+ *
+ */
+
+int
+SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+{
+ struct smb2_negotiate_req *req;
+ struct smb2_negotiate_rsp *rsp;
+ struct kvec iov[1];
+ int rc = 0;
+ int resp_buftype;
+ struct TCP_Server_Info *server;
+ unsigned int sec_flags;
+ u16 i;
+ u16 temp = 0;
+ int blob_offset, blob_length;
+ char *security_blob;
+ int flags = CIFS_NEG_OP;
+
+ cFYI(1, "Negotiate protocol");
+
+ if (ses->server)
+ server = ses->server;
+ else {
+ rc = -EIO;
+ return rc;
+ }
+
+ rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
+ if (rc)
+ return rc;
+
+ /* if any of auth flags (ie not sign or seal) are overriden use them */
+ if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
+ sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/
+ else /* if override flags set only sign/seal OR them with global auth */
+ sec_flags = global_secflags | ses->overrideSecFlg;
+
+ cFYI(1, "sec_flags 0x%x", sec_flags);
+
+ req->hdr.SessionId = 0;
+
+ for (i = 0; i < SMB2_NUM_PROT; i++)
+ req->Dialects[i] = smb2protocols[i].name;
+
+ req->DialectCount = cpu_to_le16(i);
+ inc_rfc1001_len(req, i * 2);
+
+ /* only one of SMB2 signing flags may be set in SMB2 request */
+ if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN)
+ temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
+ else if (sec_flags & CIFSSEC_MAY_SIGN) /* MAY_SIGN is a single flag */
+ temp = SMB2_NEGOTIATE_SIGNING_ENABLED;
+
+ req->SecurityMode = cpu_to_le16(temp);
+
+ req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
+
+ iov[0].iov_base = (char *)req;
+ /* 4 for rfc1002 length field */
+ iov[0].iov_len = get_rfc1002_length(req) + 4;
+
+ rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
+
+ rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
+ /*
+ * No tcon so can't do
+ * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
+ */
+ if (rc != 0)
+ goto neg_exit;
+
+ if (rsp == NULL) {
+ rc = -EIO;
+ goto neg_exit;
+ }
+
+ cFYI(1, "mode 0x%x", rsp->SecurityMode);
+
+ if (rsp->DialectRevision == smb2protocols[SMB21_PROT].name)
+ cFYI(1, "negotiated smb2.1 dialect");
+ else if (rsp->DialectRevision == smb2protocols[SMB2_PROT].name)
+ cFYI(1, "negotiated smb2 dialect");
+ else {
+ cERROR(1, "Illegal dialect returned by server %d",
+ le16_to_cpu(rsp->DialectRevision));
+ rc = -EIO;
+ goto neg_exit;
+ }
+ server->dialect = le16_to_cpu(rsp->DialectRevision);
+
+ server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
+ server->max_read = le32_to_cpu(rsp->MaxReadSize);
+ server->max_write = le32_to_cpu(rsp->MaxWriteSize);
+ /* BB Do we need to validate the SecurityMode? */
+ server->sec_mode = le16_to_cpu(rsp->SecurityMode);
+ server->capabilities = le32_to_cpu(rsp->Capabilities);
+ /* Internal types */
+ server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
+
+ security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
+ &rsp->hdr);
+ if (blob_length == 0) {
+ cERROR(1, "missing security blob on negprot");
+ rc = -EIO;
+ goto neg_exit;
+ }
+#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
+ rc = decode_neg_token_init(security_blob, blob_length,
+ &server->sec_type);
+ if (rc == 1)
+ rc = 0;
+ else if (rc == 0) {
+ rc = -EIO;
+ goto neg_exit;
+ }
+#endif
+
+neg_exit:
+ free_rsp_buf(resp_buftype, rsp);
+ return rc;
+}
+
+int
+SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ const struct nls_table *nls_cp)
+{
+ struct smb2_sess_setup_req *req;
+ struct smb2_sess_setup_rsp *rsp = NULL;
+ struct kvec iov[2];
+ int rc = 0;
+ int resp_buftype;
+ __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
+ struct TCP_Server_Info *server;
+ unsigned int sec_flags;
+ u8 temp = 0;
+ u16 blob_length = 0;
+ char *security_blob;
+ char *ntlmssp_blob = NULL;
+ bool use_spnego = false; /* else use raw ntlmssp */
+
+ cFYI(1, "Session Setup");
+
+ if (ses->server)
+ server = ses->server;
+ else {
+ rc = -EIO;
+ return rc;
+ }
+
+ /*
+ * If memory allocation is successful, caller of this function
+ * frees it.
+ */
+ ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
+ if (!ses->ntlmssp)
+ return -ENOMEM;
+
+ ses->server->secType = RawNTLMSSP;
+
+ssetup_ntlmssp_authenticate:
+ if (phase == NtLmChallenge)
+ phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
+
+ rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
+ if (rc)
+ return rc;
+
+ /* if any of auth flags (ie not sign or seal) are overriden use them */
+ if (ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
+ sec_flags = ses->overrideSecFlg; /* BB FIXME fix sign flags?*/
+ else /* if override flags set only sign/seal OR them with global auth */
+ sec_flags = global_secflags | ses->overrideSecFlg;
+
+ cFYI(1, "sec_flags 0x%x", sec_flags);
+
+ req->hdr.SessionId = 0; /* First session, not a reauthenticate */
+ req->VcNumber = 0; /* MBZ */
+ /* to enable echos and oplocks */
+ req->hdr.CreditRequest = cpu_to_le16(3);
+
+ /* only one of SMB2 signing flags may be set in SMB2 request */
+ if ((sec_flags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN)
+ temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
+ else if (ses->server->sec_mode & SMB2_NEGOTIATE_SIGNING_REQUIRED)
+ temp = SMB2_NEGOTIATE_SIGNING_REQUIRED;
+ else if (sec_flags & CIFSSEC_MAY_SIGN) /* MAY_SIGN is a single flag */
+ temp = SMB2_NEGOTIATE_SIGNING_ENABLED;
+
+ req->SecurityMode = temp;
+ req->Capabilities = 0;
+ req->Channel = 0; /* MBZ */
+
+ iov[0].iov_base = (char *)req;
+ /* 4 for rfc1002 length field and 1 for pad */
+ iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+ if (phase == NtLmNegotiate) {
+ ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
+ GFP_KERNEL);
+ if (ntlmssp_blob == NULL) {
+ rc = -ENOMEM;
+ goto ssetup_exit;
+ }
+ build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
+ if (use_spnego) {
+ /* blob_length = build_spnego_ntlmssp_blob(
+ &security_blob,
+ sizeof(struct _NEGOTIATE_MESSAGE),
+ ntlmssp_blob); */
+ /* BB eventually need to add this */
+ cERROR(1, "spnego not supported for SMB2 yet");
+ rc = -EOPNOTSUPP;
+ kfree(ntlmssp_blob);
+ goto ssetup_exit;
+ } else {
+ blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
+ /* with raw NTLMSSP we don't encapsulate in SPNEGO */
+ security_blob = ntlmssp_blob;
+ }
+ } else if (phase == NtLmAuthenticate) {
+ req->hdr.SessionId = ses->Suid;
+ ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
+ GFP_KERNEL);
+ if (ntlmssp_blob == NULL) {
+ cERROR(1, "failed to malloc ntlmssp blob");
+ rc = -ENOMEM;
+ goto ssetup_exit;
+ }
+ rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
+ nls_cp);
+ if (rc) {
+ cFYI(1, "build_ntlmssp_auth_blob failed %d", rc);
+ goto ssetup_exit; /* BB double check error handling */
+ }
+ if (use_spnego) {
+ /* blob_length = build_spnego_ntlmssp_blob(
+ &security_blob,
+ blob_length,
+ ntlmssp_blob); */
+ cERROR(1, "spnego not supported for SMB2 yet");
+ rc = -EOPNOTSUPP;
+ kfree(ntlmssp_blob);
+ goto ssetup_exit;
+ } else {
+ security_blob = ntlmssp_blob;
+ }
+ } else {
+ cERROR(1, "illegal ntlmssp phase");
+ rc = -EIO;
+ goto ssetup_exit;
+ }
+
+ /* Testing shows that buffer offset must be at location of Buffer[0] */
+ req->SecurityBufferOffset =
+ cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
+ 1 /* pad */ - 4 /* rfc1001 len */);
+ req->SecurityBufferLength = cpu_to_le16(blob_length);
+ iov[1].iov_base = security_blob;
+ iov[1].iov_len = blob_length;
+
+ inc_rfc1001_len(req, blob_length - 1 /* pad */);
+
+ /* BB add code to build os and lm fields */
+
+ rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, CIFS_LOG_ERROR);
+
+ kfree(security_blob);
+ rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
+ if (rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
+ if (phase != NtLmNegotiate) {
+ cERROR(1, "Unexpected more processing error");
+ goto ssetup_exit;
+ }
+ if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
+ le16_to_cpu(rsp->SecurityBufferOffset)) {
+ cERROR(1, "Invalid security buffer offset %d",
+ le16_to_cpu(rsp->SecurityBufferOffset));
+ rc = -EIO;
+ goto ssetup_exit;
+ }
+
+ /* NTLMSSP Negotiate sent now processing challenge (response) */
+ phase = NtLmChallenge; /* process ntlmssp challenge */
+ rc = 0; /* MORE_PROCESSING is not an error here but expected */
+ ses->Suid = rsp->hdr.SessionId;
+ rc = decode_ntlmssp_challenge(rsp->Buffer,
+ le16_to_cpu(rsp->SecurityBufferLength), ses);
+ }
+
+ /*
+ * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
+ * but at least the raw NTLMSSP case works.
+ */
+ /*
+ * No tcon so can't do
+ * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
+ */
+ if (rc != 0)
+ goto ssetup_exit;
+
+ if (rsp == NULL) {
+ rc = -EIO;
+ goto ssetup_exit;
+ }
+
+ ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ssetup_exit:
+ free_rsp_buf(resp_buftype, rsp);
+
+ /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
+ if ((phase == NtLmChallenge) && (rc == 0))
+ goto ssetup_ntlmssp_authenticate;
+ return rc;
+}
+
+int
+SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
+{
+ struct smb2_logoff_req *req; /* response is also trivial struct */
+ int rc = 0;
+ struct TCP_Server_Info *server;
+
+ cFYI(1, "disconnect session %p", ses);
+
+ if (ses && (ses->server))
+ server = ses->server;
+ else
+ return -EIO;
+
+ rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
+ if (rc)
+ return rc;
+
+ /* since no tcon, smb2_init can not do this, so do here */
+ req->hdr.SessionId = ses->Suid;
+
+ rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
+ /*
+ * No tcon so can't do
+ * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
+ */
+ return rc;
+}
+
+static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
+{
+ cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
+}
+
+#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
+
+int
+SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ struct cifs_tcon *tcon, const struct nls_table *cp)
+{
+ struct smb2_tree_connect_req *req;
+ struct smb2_tree_connect_rsp *rsp = NULL;
+ struct kvec iov[2];
+ int rc = 0;
+ int resp_buftype;
+ int unc_path_len;
+ struct TCP_Server_Info *server;
+ __le16 *unc_path = NULL;
+
+ cFYI(1, "TCON");
+
+ if ((ses->server) && tree)
+ server = ses->server;
+ else
+ return -EIO;
+
+ if (tcon && tcon->bad_network_name)
+ return -ENOENT;
+
+ unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
+ if (unc_path == NULL)
+ return -ENOMEM;
+
+ unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
+ unc_path_len *= 2;
+ if (unc_path_len < 2) {
+ kfree(unc_path);
+ return -EINVAL;
+ }
+
+ rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
+ if (rc) {
+ kfree(unc_path);
+ return rc;
+ }
+
+ if (tcon == NULL) {
+ /* since no tcon, smb2_init can not do this, so do here */
+ req->hdr.SessionId = ses->Suid;
+ /* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
+ req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
+ }
+
+ iov[0].iov_base = (char *)req;
+ /* 4 for rfc1002 length field and 1 for pad */
+ iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+
+ /* Testing shows that buffer offset must be at location of Buffer[0] */
+ req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
+ - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
+ req->PathLength = cpu_to_le16(unc_path_len - 2);
+ iov[1].iov_base = unc_path;
+ iov[1].iov_len = unc_path_len;
+
+ inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
+
+ rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
+ rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
+
+ if (rc != 0) {
+ if (tcon) {
+ cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
+ tcon->need_reconnect = true;
+ }
+ goto tcon_error_exit;
+ }
+
+ if (rsp == NULL) {
+ rc = -EIO;
+ goto tcon_exit;
+ }
+
+ if (tcon == NULL) {
+ ses->ipc_tid = rsp->hdr.TreeId;
+ goto tcon_exit;
+ }
+
+ if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
+ cFYI(1, "connection to disk share");
+ else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
+ tcon->ipc = true;
+ cFYI(1, "connection to pipe share");
+ } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
+ tcon->print = true;
+ cFYI(1, "connection to printer");
+ } else {
+ cERROR(1, "unknown share type %d", rsp->ShareType);
+ rc = -EOPNOTSUPP;
+ goto tcon_error_exit;
+ }
+
+ tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
+ tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
+ tcon->tidStatus = CifsGood;
+ tcon->need_reconnect = false;
+ tcon->tid = rsp->hdr.TreeId;
+ strncpy(tcon->treeName, tree, MAX_TREE_SIZE);
+
+ if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
+ ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
+ cERROR(1, "DFS capability contradicts DFS flag");
+
+tcon_exit:
+ free_rsp_buf(resp_buftype, rsp);
+ kfree(unc_path);
+ return rc;
+
+tcon_error_exit:
+ if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
+ cERROR(1, "BAD_NETWORK_NAME: %s", tree);
+ tcon->bad_network_name = true;
+ }
+ goto tcon_exit;
+}
+
+int
+SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
+{
+ struct smb2_tree_disconnect_req *req; /* response is trivial */
+ int rc = 0;
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses = tcon->ses;
+
+ cFYI(1, "Tree Disconnect");
+
+ if (ses && (ses->server))
+ server = ses->server;
+ else
+ return -EIO;
+
+ if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
+ return 0;
+
+ rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
+ if (rc)
+ return rc;
+
+ rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
+ if (rc)
+ cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
+
+ return rc;
+}
+
+int
+SMB2_open(const unsigned int xid, struct cifs_tcon *tcon, __le16 *path,
+ u64 *persistent_fid, u64 *volatile_fid, __u32 desired_access,
+ __u32 create_disposition, __u32 file_attributes, __u32 create_options)
+{
+ struct smb2_create_req *req;
+ struct smb2_create_rsp *rsp;
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses = tcon->ses;
+ struct kvec iov[2];
+ int resp_buftype;
+ int uni_path_len;
+ int rc = 0;
+ int num_iovecs = 2;
+
+ cFYI(1, "create/open");
+
+ if (ses && (ses->server))
+ server = ses->server;
+ else
+ return -EIO;
+
+ rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
+ if (rc)
+ return rc;
+
+ if (enable_oplocks)
+ req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_BATCH;
+ else
+ req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+ req->ImpersonationLevel = IL_IMPERSONATION;
+ req->DesiredAccess = cpu_to_le32(desired_access);
+ /* File attributes ignored on open (used in create though) */
+ req->FileAttributes = cpu_to_le32(file_attributes);
+ req->ShareAccess = FILE_SHARE_ALL_LE;
+ req->CreateDisposition = cpu_to_le32(create_disposition);
+ req->CreateOptions = cpu_to_le32(create_options);
+ uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
+ req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req)
+ - 1 /* pad */ - 4 /* do not count rfc1001 len field */);
+
+ iov[0].iov_base = (char *)req;
+ /* 4 for rfc1002 length field */
+ iov[0].iov_len = get_rfc1002_length(req) + 4;
+
+ /* MUST set path len (NameLength) to 0 opening root of share */
+ if (uni_path_len >= 4) {
+ req->NameLength = cpu_to_le16(uni_path_len - 2);
+ /* -1 since last byte is buf[0] which is sent below (path) */
+ iov[0].iov_len--;
+ iov[1].iov_len = uni_path_len;
+ iov[1].iov_base = path;
+ /*
+ * -1 since last byte is buf[0] which was counted in
+ * smb2_buf_len.
+ */
+ inc_rfc1001_len(req, uni_path_len - 1);
+ } else {
+ num_iovecs = 1;
+ req->NameLength = 0;
+ }
+
+ rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
+ rsp = (struct smb2_create_rsp *)iov[0].iov_base;
+
+ if (rc != 0) {
+ cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+ goto creat_exit;
+ }
+
+ if (rsp == NULL) {
+ rc = -EIO;
+ goto creat_exit;
+ }
+ *persistent_fid = rsp->PersistentFileId;
+ *volatile_fid = rsp->VolatileFileId;
+creat_exit:
+ free_rsp_buf(resp_buftype, rsp);
+ return rc;
+}
+
+int
+SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid)
+{
+ struct smb2_close_req *req;
+ struct smb2_close_rsp *rsp;
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses = tcon->ses;
+ struct kvec iov[1];
+ int resp_buftype;
+ int rc = 0;
+
+ cFYI(1, "Close");
+
+ if (ses && (ses->server))
+ server = ses->server;
+ else
+ return -EIO;
+
+ rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
+ if (rc)
+ return rc;
+
+ req->PersistentFileId = persistent_fid;
+ req->VolatileFileId = volatile_fid;
+
+ iov[0].iov_base = (char *)req;
+ /* 4 for rfc1002 length field */
+ iov[0].iov_len = get_rfc1002_length(req) + 4;
+
+ rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
+ rsp = (struct smb2_close_rsp *)iov[0].iov_base;
+
+ if (rc != 0) {
+ if (tcon)
+ cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
+ goto close_exit;
+ }
+
+ if (rsp == NULL) {
+ rc = -EIO;
+ goto close_exit;
+ }
+
+ /* BB FIXME - decode close response, update inode for caching */
+
+close_exit:
+ free_rsp_buf(resp_buftype, rsp);
+ return rc;
+}
+
+static int
+validate_buf(unsigned int offset, unsigned int buffer_length,
+ struct smb2_hdr *hdr, unsigned int min_buf_size)
+
+{
+ unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
+ char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
+ char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
+ char *end_of_buf = begin_of_buf + buffer_length;
+
+
+ if (buffer_length < min_buf_size) {
+ cERROR(1, "buffer length %d smaller than minimum size %d",
+ buffer_length, min_buf_size);
+ return -EINVAL;
+ }
+
+ /* check if beyond RFC1001 maximum length */
+ if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
+ cERROR(1, "buffer length %d or smb length %d too large",
+ buffer_length, smb_len);
+ return -EINVAL;
+ }
+
+ if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
+ cERROR(1, "illegal server response, bad offset to data");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * If SMB buffer fields are valid, copy into temporary buffer to hold result.
+ * Caller must free buffer.
+ */
+static int
+validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
+ struct smb2_hdr *hdr, unsigned int minbufsize,
+ char *data)
+
+{
+ char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
+ int rc;
+
+ if (!data)
+ return -EINVAL;
+
+ rc = validate_buf(offset, buffer_length, hdr, minbufsize);
+ if (rc)
+ return rc;
+
+ memcpy(data, begin_of_buf, buffer_length);
+
+ return 0;
+}
+
+int
+SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid,
+ struct smb2_file_all_info *data)
+{
+ struct smb2_query_info_req *req;
+ struct smb2_query_info_rsp *rsp = NULL;
+ struct kvec iov[2];
+ int rc = 0;
+ int resp_buftype;
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses = tcon->ses;
+
+ cFYI(1, "Query Info");
+
+ if (ses && (ses->server))
+ server = ses->server;
+ else
+ return -EIO;
+
+ rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
+ if (rc)
+ return rc;
+
+ req->InfoType = SMB2_O_INFO_FILE;
+ req->FileInfoClass = FILE_ALL_INFORMATION;
+ req->PersistentFileId = persistent_fid;
+ req->VolatileFileId = volatile_fid;
+ /* 4 for rfc1002 length field and 1 for Buffer */
+ req->InputBufferOffset =
+ cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
+ req->OutputBufferLength =
+ cpu_to_le32(sizeof(struct smb2_file_all_info) + MAX_NAME * 2);
+
+ iov[0].iov_base = (char *)req;
+ /* 4 for rfc1002 length field */
+ iov[0].iov_len = get_rfc1002_length(req) + 4;
+
+ rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
+ if (rc) {
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+ goto qinf_exit;
+ }
+
+ rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
+
+ rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
+ le32_to_cpu(rsp->OutputBufferLength),
+ &rsp->hdr, sizeof(struct smb2_file_all_info),
+ (char *)data);
+
+qinf_exit:
+ free_rsp_buf(resp_buftype, rsp);
+ return rc;
+}
+
+/*
+ * This is a no-op for now. We're not really interested in the reply, but
+ * rather in the fact that the server sent one and that server->lstrp
+ * gets updated.
+ *
+ * FIXME: maybe we should consider checking that the reply matches request?
+ */
+static void
+smb2_echo_callback(struct mid_q_entry *mid)
+{
+ struct TCP_Server_Info *server = mid->callback_data;
+ struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
+ unsigned int credits_received = 1;
+
+ if (mid->mid_state == MID_RESPONSE_RECEIVED)
+ credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
+
+ DeleteMidQEntry(mid);
+ add_credits(server, credits_received, CIFS_ECHO_OP);
+}
+
+int
+SMB2_echo(struct TCP_Server_Info *server)
+{
+ struct smb2_echo_req *req;
+ int rc = 0;
+ struct kvec iov;
+
+ cFYI(1, "In echo request");
+
+ rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
+ if (rc)
+ return rc;
+
+ req->hdr.CreditRequest = cpu_to_le16(1);
+
+ iov.iov_base = (char *)req;
+ /* 4 for rfc1002 length field */
+ iov.iov_len = get_rfc1002_length(req) + 4;
+
+ rc = cifs_call_async(server, &iov, 1, NULL, smb2_echo_callback, server,
+ CIFS_ECHO_OP);
+ if (rc)
+ cFYI(1, "Echo request failed: %d", rc);
+
+ cifs_small_buf_release(req);
+ return rc;
+}
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
new file mode 100644
index 000000000000..c5fbfac5d576
--- /dev/null
+++ b/fs/cifs/smb2pdu.h
@@ -0,0 +1,579 @@
+/*
+ * fs/cifs/smb2pdu.h
+ *
+ * Copyright (c) International Business Machines Corp., 2009, 2010
+ * Etersoft, 2012
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ * Pavel Shilovsky (pshilovsky@samba.org) 2012
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _SMB2PDU_H
+#define _SMB2PDU_H
+
+#include <net/sock.h>
+
+/*
+ * Note that, due to trying to use names similar to the protocol specifications,
+ * there are many mixed case field names in the structures below. Although
+ * this does not match typical Linux kernel style, it is necessary to be
+ * be able to match against the protocol specfication.
+ *
+ * SMB2 commands
+ * Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
+ * (ie no useful data other than the SMB error code itself) and are marked such.
+ * Knowing this helps avoid response buffer allocations and copy in some cases.
+ */
+
+/* List of commands in host endian */
+#define SMB2_NEGOTIATE_HE 0x0000
+#define SMB2_SESSION_SETUP_HE 0x0001
+#define SMB2_LOGOFF_HE 0x0002 /* trivial request/resp */
+#define SMB2_TREE_CONNECT_HE 0x0003
+#define SMB2_TREE_DISCONNECT_HE 0x0004 /* trivial req/resp */
+#define SMB2_CREATE_HE 0x0005
+#define SMB2_CLOSE_HE 0x0006
+#define SMB2_FLUSH_HE 0x0007 /* trivial resp */
+#define SMB2_READ_HE 0x0008
+#define SMB2_WRITE_HE 0x0009
+#define SMB2_LOCK_HE 0x000A
+#define SMB2_IOCTL_HE 0x000B
+#define SMB2_CANCEL_HE 0x000C
+#define SMB2_ECHO_HE 0x000D
+#define SMB2_QUERY_DIRECTORY_HE 0x000E
+#define SMB2_CHANGE_NOTIFY_HE 0x000F
+#define SMB2_QUERY_INFO_HE 0x0010
+#define SMB2_SET_INFO_HE 0x0011
+#define SMB2_OPLOCK_BREAK_HE 0x0012
+
+/* The same list in little endian */
+#define SMB2_NEGOTIATE cpu_to_le16(SMB2_NEGOTIATE_HE)
+#define SMB2_SESSION_SETUP cpu_to_le16(SMB2_SESSION_SETUP_HE)
+#define SMB2_LOGOFF cpu_to_le16(SMB2_LOGOFF_HE)
+#define SMB2_TREE_CONNECT cpu_to_le16(SMB2_TREE_CONNECT_HE)
+#define SMB2_TREE_DISCONNECT cpu_to_le16(SMB2_TREE_DISCONNECT_HE)
+#define SMB2_CREATE cpu_to_le16(SMB2_CREATE_HE)
+#define SMB2_CLOSE cpu_to_le16(SMB2_CLOSE_HE)
+#define SMB2_FLUSH cpu_to_le16(SMB2_FLUSH_HE)
+#define SMB2_READ cpu_to_le16(SMB2_READ_HE)
+#define SMB2_WRITE cpu_to_le16(SMB2_WRITE_HE)
+#define SMB2_LOCK cpu_to_le16(SMB2_LOCK_HE)
+#define SMB2_IOCTL cpu_to_le16(SMB2_IOCTL_HE)
+#define SMB2_CANCEL cpu_to_le16(SMB2_CANCEL_HE)
+#define SMB2_ECHO cpu_to_le16(SMB2_ECHO_HE)
+#define SMB2_QUERY_DIRECTORY cpu_to_le16(SMB2_QUERY_DIRECTORY_HE)
+#define SMB2_CHANGE_NOTIFY cpu_to_le16(SMB2_CHANGE_NOTIFY_HE)
+#define SMB2_QUERY_INFO cpu_to_le16(SMB2_QUERY_INFO_HE)
+#define SMB2_SET_INFO cpu_to_le16(SMB2_SET_INFO_HE)
+#define SMB2_OPLOCK_BREAK cpu_to_le16(SMB2_OPLOCK_BREAK_HE)
+
+#define NUMBER_OF_SMB2_COMMANDS 0x0013
+
+/* BB FIXME - analyze following length BB */
+#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
+
+#define SMB2_PROTO_NUMBER __constant_cpu_to_le32(0x424d53fe)
+
+/*
+ * SMB2 Header Definition
+ *
+ * "MBZ" : Must be Zero
+ * "BB" : BugBug, Something to check/review/analyze later
+ * "PDU" : "Protocol Data Unit" (ie a network "frame")
+ *
+ */
+
+#define SMB2_HEADER_STRUCTURE_SIZE __constant_le16_to_cpu(64)
+
+struct smb2_hdr {
+ __be32 smb2_buf_length; /* big endian on wire */
+ /* length is only two or three bytes - with
+ one or two byte type preceding it that MBZ */
+ __u8 ProtocolId[4]; /* 0xFE 'S' 'M' 'B' */
+ __le16 StructureSize; /* 64 */
+ __le16 CreditCharge; /* MBZ */
+ __le32 Status; /* Error from server */
+ __le16 Command;
+ __le16 CreditRequest; /* CreditResponse */
+ __le32 Flags;
+ __le32 NextCommand;
+ __u64 MessageId; /* opaque - so can stay little endian */
+ __le32 ProcessId;
+ __u32 TreeId; /* opaque - so do not make little endian */
+ __u64 SessionId; /* opaque - so do not make little endian */
+ __u8 Signature[16];
+} __packed;
+
+struct smb2_pdu {
+ struct smb2_hdr hdr;
+ __le16 StructureSize2; /* size of wct area (varies, request specific) */
+} __packed;
+
+/*
+ * SMB2 flag definitions
+ */
+#define SMB2_FLAGS_SERVER_TO_REDIR __constant_cpu_to_le32(0x00000001)
+#define SMB2_FLAGS_ASYNC_COMMAND __constant_cpu_to_le32(0x00000002)
+#define SMB2_FLAGS_RELATED_OPERATIONS __constant_cpu_to_le32(0x00000004)
+#define SMB2_FLAGS_SIGNED __constant_cpu_to_le32(0x00000008)
+#define SMB2_FLAGS_DFS_OPERATIONS __constant_cpu_to_le32(0x10000000)
+
+/*
+ * Definitions for SMB2 Protocol Data Units (network frames)
+ *
+ * See MS-SMB2.PDF specification for protocol details.
+ * The Naming convention is the lower case version of the SMB2
+ * command code name for the struct. Note that structures must be packed.
+ *
+ */
+
+#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9)
+
+struct smb2_err_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize;
+ __le16 Reserved; /* MBZ */
+ __le32 ByteCount; /* even if zero, at least one byte follows */
+ __u8 ErrorData[1]; /* variable length */
+} __packed;
+
+struct smb2_negotiate_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 36 */
+ __le16 DialectCount;
+ __le16 SecurityMode;
+ __le16 Reserved; /* MBZ */
+ __le32 Capabilities;
+ __u8 ClientGUID[16]; /* MBZ */
+ __le64 ClientStartTime; /* MBZ */
+ __le16 Dialects[2]; /* variable length */
+} __packed;
+
+/* SecurityMode flags */
+#define SMB2_NEGOTIATE_SIGNING_ENABLED 0x0001
+#define SMB2_NEGOTIATE_SIGNING_REQUIRED 0x0002
+/* Capabilities flags */
+#define SMB2_GLOBAL_CAP_DFS 0x00000001
+#define SMB2_GLOBAL_CAP_LEASING 0x00000002 /* Resp only New to SMB2.1 */
+#define SMB2_GLOBAL_CAP_LARGE_MTU 0X00000004 /* Resp only New to SMB2.1 */
+/* Internal types */
+#define SMB2_NT_FIND 0x00100000
+#define SMB2_LARGE_FILES 0x00200000
+
+struct smb2_negotiate_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 65 */
+ __le16 SecurityMode;
+ __le16 DialectRevision;
+ __le16 Reserved; /* MBZ */
+ __u8 ServerGUID[16];
+ __le32 Capabilities;
+ __le32 MaxTransactSize;
+ __le32 MaxReadSize;
+ __le32 MaxWriteSize;
+ __le64 SystemTime; /* MBZ */
+ __le64 ServerStartTime;
+ __le16 SecurityBufferOffset;
+ __le16 SecurityBufferLength;
+ __le32 Reserved2; /* may be any value, ignore */
+ __u8 Buffer[1]; /* variable length GSS security buffer */
+} __packed;
+
+struct smb2_sess_setup_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 25 */
+ __u8 VcNumber;
+ __u8 SecurityMode;
+ __le32 Capabilities;
+ __le32 Channel;
+ __le16 SecurityBufferOffset;
+ __le16 SecurityBufferLength;
+ __le64 PreviousSessionId;
+ __u8 Buffer[1]; /* variable length GSS security buffer */
+} __packed;
+
+/* Currently defined SessionFlags */
+#define SMB2_SESSION_FLAG_IS_GUEST 0x0001
+#define SMB2_SESSION_FLAG_IS_NULL 0x0002
+struct smb2_sess_setup_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 9 */
+ __le16 SessionFlags;
+ __le16 SecurityBufferOffset;
+ __le16 SecurityBufferLength;
+ __u8 Buffer[1]; /* variable length GSS security buffer */
+} __packed;
+
+struct smb2_logoff_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __le16 Reserved;
+} __packed;
+
+struct smb2_logoff_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __le16 Reserved;
+} __packed;
+
+struct smb2_tree_connect_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 9 */
+ __le16 Reserved;
+ __le16 PathOffset;
+ __le16 PathLength;
+ __u8 Buffer[1]; /* variable length */
+} __packed;
+
+struct smb2_tree_connect_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 16 */
+ __u8 ShareType; /* see below */
+ __u8 Reserved;
+ __le32 ShareFlags; /* see below */
+ __le32 Capabilities; /* see below */
+ __le32 MaximalAccess;
+} __packed;
+
+/* Possible ShareType values */
+#define SMB2_SHARE_TYPE_DISK 0x01
+#define SMB2_SHARE_TYPE_PIPE 0x02
+#define SMB2_SHARE_TYPE_PRINT 0x03
+
+/*
+ * Possible ShareFlags - exactly one and only one of the first 4 caching flags
+ * must be set (any of the remaining, SHI1005, flags may be set individually
+ * or in combination.
+ */
+#define SMB2_SHAREFLAG_MANUAL_CACHING 0x00000000
+#define SMB2_SHAREFLAG_AUTO_CACHING 0x00000010
+#define SMB2_SHAREFLAG_VDO_CACHING 0x00000020
+#define SMB2_SHAREFLAG_NO_CACHING 0x00000030
+#define SHI1005_FLAGS_DFS 0x00000001
+#define SHI1005_FLAGS_DFS_ROOT 0x00000002
+#define SHI1005_FLAGS_RESTRICT_EXCLUSIVE_OPENS 0x00000100
+#define SHI1005_FLAGS_FORCE_SHARED_DELETE 0x00000200
+#define SHI1005_FLAGS_ALLOW_NAMESPACE_CACHING 0x00000400
+#define SHI1005_FLAGS_ACCESS_BASED_DIRECTORY_ENUM 0x00000800
+#define SHI1005_FLAGS_FORCE_LEVELII_OPLOCK 0x00001000
+#define SHI1005_FLAGS_ENABLE_HASH 0x00002000
+
+/* Possible share capabilities */
+#define SMB2_SHARE_CAP_DFS cpu_to_le32(0x00000008)
+
+struct smb2_tree_disconnect_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __le16 Reserved;
+} __packed;
+
+struct smb2_tree_disconnect_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __le16 Reserved;
+} __packed;
+
+/* File Attrubutes */
+#define FILE_ATTRIBUTE_READONLY 0x00000001
+#define FILE_ATTRIBUTE_HIDDEN 0x00000002
+#define FILE_ATTRIBUTE_SYSTEM 0x00000004
+#define FILE_ATTRIBUTE_DIRECTORY 0x00000010
+#define FILE_ATTRIBUTE_ARCHIVE 0x00000020
+#define FILE_ATTRIBUTE_NORMAL 0x00000080
+#define FILE_ATTRIBUTE_TEMPORARY 0x00000100
+#define FILE_ATTRIBUTE_SPARSE_FILE 0x00000200
+#define FILE_ATTRIBUTE_REPARSE_POINT 0x00000400
+#define FILE_ATTRIBUTE_COMPRESSED 0x00000800
+#define FILE_ATTRIBUTE_OFFLINE 0x00001000
+#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED 0x00002000
+#define FILE_ATTRIBUTE_ENCRYPTED 0x00004000
+
+/* Oplock levels */
+#define SMB2_OPLOCK_LEVEL_NONE 0x00
+#define SMB2_OPLOCK_LEVEL_II 0x01
+#define SMB2_OPLOCK_LEVEL_EXCLUSIVE 0x08
+#define SMB2_OPLOCK_LEVEL_BATCH 0x09
+#define SMB2_OPLOCK_LEVEL_LEASE 0xFF
+
+/* Desired Access Flags */
+#define FILE_READ_DATA_LE cpu_to_le32(0x00000001)
+#define FILE_WRITE_DATA_LE cpu_to_le32(0x00000002)
+#define FILE_APPEND_DATA_LE cpu_to_le32(0x00000004)
+#define FILE_READ_EA_LE cpu_to_le32(0x00000008)
+#define FILE_WRITE_EA_LE cpu_to_le32(0x00000010)
+#define FILE_EXECUTE_LE cpu_to_le32(0x00000020)
+#define FILE_READ_ATTRIBUTES_LE cpu_to_le32(0x00000080)
+#define FILE_WRITE_ATTRIBUTES_LE cpu_to_le32(0x00000100)
+#define FILE_DELETE_LE cpu_to_le32(0x00010000)
+#define FILE_READ_CONTROL_LE cpu_to_le32(0x00020000)
+#define FILE_WRITE_DAC_LE cpu_to_le32(0x00040000)
+#define FILE_WRITE_OWNER_LE cpu_to_le32(0x00080000)
+#define FILE_SYNCHRONIZE_LE cpu_to_le32(0x00100000)
+#define FILE_ACCESS_SYSTEM_SECURITY_LE cpu_to_le32(0x01000000)
+#define FILE_MAXIMAL_ACCESS_LE cpu_to_le32(0x02000000)
+#define FILE_GENERIC_ALL_LE cpu_to_le32(0x10000000)
+#define FILE_GENERIC_EXECUTE_LE cpu_to_le32(0x20000000)
+#define FILE_GENERIC_WRITE_LE cpu_to_le32(0x40000000)
+#define FILE_GENERIC_READ_LE cpu_to_le32(0x80000000)
+
+/* ShareAccess Flags */
+#define FILE_SHARE_READ_LE cpu_to_le32(0x00000001)
+#define FILE_SHARE_WRITE_LE cpu_to_le32(0x00000002)
+#define FILE_SHARE_DELETE_LE cpu_to_le32(0x00000004)
+#define FILE_SHARE_ALL_LE cpu_to_le32(0x00000007)
+
+/* CreateDisposition Flags */
+#define FILE_SUPERSEDE_LE cpu_to_le32(0x00000000)
+#define FILE_OPEN_LE cpu_to_le32(0x00000001)
+#define FILE_CREATE_LE cpu_to_le32(0x00000002)
+#define FILE_OPEN_IF_LE cpu_to_le32(0x00000003)
+#define FILE_OVERWRITE_LE cpu_to_le32(0x00000004)
+#define FILE_OVERWRITE_IF_LE cpu_to_le32(0x00000005)
+
+/* CreateOptions Flags */
+#define FILE_DIRECTORY_FILE_LE cpu_to_le32(0x00000001)
+/* same as #define CREATE_NOT_FILE_LE cpu_to_le32(0x00000001) */
+#define FILE_WRITE_THROUGH_LE cpu_to_le32(0x00000002)
+#define FILE_SEQUENTIAL_ONLY_LE cpu_to_le32(0x00000004)
+#define FILE_NO_INTERMEDIATE_BUFFERRING_LE cpu_to_le32(0x00000008)
+#define FILE_SYNCHRONOUS_IO_ALERT_LE cpu_to_le32(0x00000010)
+#define FILE_SYNCHRONOUS_IO_NON_ALERT_LE cpu_to_le32(0x00000020)
+#define FILE_NON_DIRECTORY_FILE_LE cpu_to_le32(0x00000040)
+#define FILE_COMPLETE_IF_OPLOCKED_LE cpu_to_le32(0x00000100)
+#define FILE_NO_EA_KNOWLEDGE_LE cpu_to_le32(0x00000200)
+#define FILE_RANDOM_ACCESS_LE cpu_to_le32(0x00000800)
+#define FILE_DELETE_ON_CLOSE_LE cpu_to_le32(0x00001000)
+#define FILE_OPEN_BY_FILE_ID_LE cpu_to_le32(0x00002000)
+#define FILE_OPEN_FOR_BACKUP_INTENT_LE cpu_to_le32(0x00004000)
+#define FILE_NO_COMPRESSION_LE cpu_to_le32(0x00008000)
+#define FILE_RESERVE_OPFILTER_LE cpu_to_le32(0x00100000)
+#define FILE_OPEN_REPARSE_POINT_LE cpu_to_le32(0x00200000)
+#define FILE_OPEN_NO_RECALL_LE cpu_to_le32(0x00400000)
+#define FILE_OPEN_FOR_FREE_SPACE_QUERY_LE cpu_to_le32(0x00800000)
+
+#define FILE_READ_RIGHTS_LE (FILE_READ_DATA_LE | FILE_READ_EA_LE \
+ | FILE_READ_ATTRIBUTES_LE)
+#define FILE_WRITE_RIGHTS_LE (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE \
+ | FILE_WRITE_EA_LE | FILE_WRITE_ATTRIBUTES_LE)
+#define FILE_EXEC_RIGHTS_LE (FILE_EXECUTE_LE)
+
+/* Impersonation Levels */
+#define IL_ANONYMOUS cpu_to_le32(0x00000000)
+#define IL_IDENTIFICATION cpu_to_le32(0x00000001)
+#define IL_IMPERSONATION cpu_to_le32(0x00000002)
+#define IL_DELEGATE cpu_to_le32(0x00000003)
+
+/* Create Context Values */
+#define SMB2_CREATE_EA_BUFFER "ExtA" /* extended attributes */
+#define SMB2_CREATE_SD_BUFFER "SecD" /* security descriptor */
+#define SMB2_CREATE_DURABLE_HANDLE_REQUEST "DHnQ"
+#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT "DHnC"
+#define SMB2_CREATE_ALLOCATION_SIZE "AlSi"
+#define SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST "MxAc"
+#define SMB2_CREATE_TIMEWARP_REQUEST "TWrp"
+#define SMB2_CREATE_QUERY_ON_DISK_ID "QFid"
+#define SMB2_CREATE_REQUEST_LEASE "RqLs"
+
+struct smb2_create_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 57 */
+ __u8 SecurityFlags;
+ __u8 RequestedOplockLevel;
+ __le32 ImpersonationLevel;
+ __le64 SmbCreateFlags;
+ __le64 Reserved;
+ __le32 DesiredAccess;
+ __le32 FileAttributes;
+ __le32 ShareAccess;
+ __le32 CreateDisposition;
+ __le32 CreateOptions;
+ __le16 NameOffset;
+ __le16 NameLength;
+ __le32 CreateContextsOffset;
+ __le32 CreateContextsLength;
+ __u8 Buffer[1];
+} __packed;
+
+struct smb2_create_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 89 */
+ __u8 OplockLevel;
+ __u8 Reserved;
+ __le32 CreateAction;
+ __le64 CreationTime;
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le64 AllocationSize;
+ __le64 EndofFile;
+ __le32 FileAttributes;
+ __le32 Reserved2;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+ __le32 CreateContextsOffset;
+ __le32 CreateContextsLength;
+ __u8 Buffer[1];
+} __packed;
+
+/* Currently defined values for close flags */
+#define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB cpu_to_le16(0x0001)
+struct smb2_close_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 24 */
+ __le16 Flags;
+ __le32 Reserved;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+} __packed;
+
+struct smb2_close_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* 60 */
+ __le16 Flags;
+ __le32 Reserved;
+ __le64 CreationTime;
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
+ __le64 EndOfFile;
+ __le32 Attributes;
+} __packed;
+
+struct smb2_echo_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __u16 Reserved;
+} __packed;
+
+struct smb2_echo_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 4 */
+ __u16 Reserved;
+} __packed;
+
+/* Possible InfoType values */
+#define SMB2_O_INFO_FILE 0x01
+#define SMB2_O_INFO_FILESYSTEM 0x02
+#define SMB2_O_INFO_SECURITY 0x03
+#define SMB2_O_INFO_QUOTA 0x04
+
+struct smb2_query_info_req {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 41 */
+ __u8 InfoType;
+ __u8 FileInfoClass;
+ __le32 OutputBufferLength;
+ __le16 InputBufferOffset;
+ __u16 Reserved;
+ __le32 InputBufferLength;
+ __le32 AdditionalInformation;
+ __le32 Flags;
+ __u64 PersistentFileId; /* opaque endianness */
+ __u64 VolatileFileId; /* opaque endianness */
+ __u8 Buffer[1];
+} __packed;
+
+struct smb2_query_info_rsp {
+ struct smb2_hdr hdr;
+ __le16 StructureSize; /* Must be 9 */
+ __le16 OutputBufferOffset;
+ __le32 OutputBufferLength;
+ __u8 Buffer[1];
+} __packed;
+
+/*
+ * PDU infolevel structure definitions
+ * BB consider moving to a different header
+ */
+
+/* partial list of QUERY INFO levels */
+#define FILE_DIRECTORY_INFORMATION 1
+#define FILE_FULL_DIRECTORY_INFORMATION 2
+#define FILE_BOTH_DIRECTORY_INFORMATION 3
+#define FILE_BASIC_INFORMATION 4
+#define FILE_STANDARD_INFORMATION 5
+#define FILE_INTERNAL_INFORMATION 6
+#define FILE_EA_INFORMATION 7
+#define FILE_ACCESS_INFORMATION 8
+#define FILE_NAME_INFORMATION 9
+#define FILE_RENAME_INFORMATION 10
+#define FILE_LINK_INFORMATION 11
+#define FILE_NAMES_INFORMATION 12
+#define FILE_DISPOSITION_INFORMATION 13
+#define FILE_POSITION_INFORMATION 14
+#define FILE_FULL_EA_INFORMATION 15
+#define FILE_MODE_INFORMATION 16
+#define FILE_ALIGNMENT_INFORMATION 17
+#define FILE_ALL_INFORMATION 18
+#define FILE_ALLOCATION_INFORMATION 19
+#define FILE_END_OF_FILE_INFORMATION 20
+#define FILE_ALTERNATE_NAME_INFORMATION 21
+#define FILE_STREAM_INFORMATION 22
+#define FILE_PIPE_INFORMATION 23
+#define FILE_PIPE_LOCAL_INFORMATION 24
+#define FILE_PIPE_REMOTE_INFORMATION 25
+#define FILE_MAILSLOT_QUERY_INFORMATION 26
+#define FILE_MAILSLOT_SET_INFORMATION 27
+#define FILE_COMPRESSION_INFORMATION 28
+#define FILE_OBJECT_ID_INFORMATION 29
+/* Number 30 not defined in documents */
+#define FILE_MOVE_CLUSTER_INFORMATION 31
+#define FILE_QUOTA_INFORMATION 32
+#define FILE_REPARSE_POINT_INFORMATION 33
+#define FILE_NETWORK_OPEN_INFORMATION 34
+#define FILE_ATTRIBUTE_TAG_INFORMATION 35
+#define FILE_TRACKING_INFORMATION 36
+#define FILEID_BOTH_DIRECTORY_INFORMATION 37
+#define FILEID_FULL_DIRECTORY_INFORMATION 38
+#define FILE_VALID_DATA_LENGTH_INFORMATION 39
+#define FILE_SHORT_NAME_INFORMATION 40
+#define FILE_SFIO_RESERVE_INFORMATION 44
+#define FILE_SFIO_VOLUME_INFORMATION 45
+#define FILE_HARD_LINK_INFORMATION 46
+#define FILE_NORMALIZED_NAME_INFORMATION 48
+#define FILEID_GLOBAL_TX_DIRECTORY_INFORMATION 50
+#define FILE_STANDARD_LINK_INFORMATION 54
+
+/*
+ * This level 18, although with struct with same name is different from cifs
+ * level 0x107. Level 0x107 has an extra u64 between AccessFlags and
+ * CurrentByteOffset.
+ */
+struct smb2_file_all_info { /* data block encoding of response to level 18 */
+ __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le32 Attributes;
+ __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */
+ __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
+ __le64 EndOfFile; /* size ie offset to first free byte in file */
+ __le32 NumberOfLinks; /* hard links */
+ __u8 DeletePending;
+ __u8 Directory;
+ __u16 Pad2; /* End of FILE_STANDARD_INFO equivalent */
+ __le64 IndexNumber;
+ __le32 EASize;
+ __le32 AccessFlags;
+ __le64 CurrentByteOffset;
+ __le32 Mode;
+ __le32 AlignmentRequirement;
+ __le32 FileNameLength;
+ char FileName[1];
+} __packed; /* level 18 Query */
+
+#endif /* _SMB2PDU_H */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
new file mode 100644
index 000000000000..bfaa7b148afd
--- /dev/null
+++ b/fs/cifs/smb2proto.h
@@ -0,0 +1,86 @@
+/*
+ * fs/cifs/smb2proto.h
+ *
+ * Copyright (c) International Business Machines Corp., 2002, 2011
+ * Etersoft, 2012
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ * Pavel Shilovsky (pshilovsky@samba.org) 2012
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _SMB2PROTO_H
+#define _SMB2PROTO_H
+#include <linux/nls.h>
+#include <linux/key-type.h>
+
+struct statfs;
+
+/*
+ *****************************************************************
+ * All Prototypes
+ *****************************************************************
+ */
+extern int map_smb2_to_linux_error(char *buf, bool log_err);
+extern int smb2_check_message(char *buf, unsigned int length);
+extern unsigned int smb2_calc_size(struct smb2_hdr *hdr);
+extern char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr);
+extern __le16 *cifs_convert_path_to_utf16(const char *from,
+ struct cifs_sb_info *cifs_sb);
+
+extern int smb2_check_receive(struct mid_q_entry *mid,
+ struct TCP_Server_Info *server, bool log_error);
+extern int smb2_setup_request(struct cifs_ses *ses, struct kvec *iov,
+ unsigned int nvec, struct mid_q_entry **ret_mid);
+extern int smb2_setup_async_request(struct TCP_Server_Info *server,
+ struct kvec *iov, unsigned int nvec,
+ struct mid_q_entry **ret_mid);
+extern void smb2_echo_request(struct work_struct *work);
+
+extern int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+ const char *full_path, FILE_ALL_INFO *data,
+ bool *adjust_tz);
+extern int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *name, struct cifs_sb_info *cifs_sb);
+extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
+ struct cifs_sb_info *cifs_sb,
+ struct cifs_tcon *tcon, const unsigned int xid);
+extern int smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *name, struct cifs_sb_info *cifs_sb);
+
+/*
+ * SMB2 Worker functions - most of protocol specific implementation details
+ * are contained within these calls.
+ */
+extern int SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses);
+extern int SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ const struct nls_table *nls_cp);
+extern int SMB2_logoff(const unsigned int xid, struct cifs_ses *ses);
+extern int SMB2_tcon(const unsigned int xid, struct cifs_ses *ses,
+ const char *tree, struct cifs_tcon *tcon,
+ const struct nls_table *);
+extern int SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon);
+extern int SMB2_open(const unsigned int xid, struct cifs_tcon *tcon,
+ __le16 *path, u64 *persistent_fid, u64 *volatile_fid,
+ __u32 desired_access, __u32 create_disposition,
+ __u32 file_attributes, __u32 create_options);
+extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_file_id, u64 volatile_file_id);
+extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_file_id, u64 volatile_file_id,
+ struct smb2_file_all_info *data);
+extern int SMB2_echo(struct TCP_Server_Info *server);
+
+#endif /* _SMB2PROTO_H */
diff --git a/fs/cifs/smb2status.h b/fs/cifs/smb2status.h
new file mode 100644
index 000000000000..3d5f62150de4
--- /dev/null
+++ b/fs/cifs/smb2status.h
@@ -0,0 +1,1782 @@
+/*
+ * fs/cifs/smb2status.h
+ *
+ * SMB2 Status code (network error) definitions
+ * Definitions are from MS-ERREF
+ *
+ * Copyright (c) International Business Machines Corp., 2009,2011
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * 0 1 2 3 4 5 6 7 8 9 0 A B C D E F 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ * SEV C N <-------Facility--------> <------Error Status Code------>
+ *
+ * C is set if "customer defined" error, N bit is reserved and MBZ
+ */
+
+#define STATUS_SEVERITY_SUCCESS __constant_cpu_to_le32(0x0000)
+#define STATUS_SEVERITY_INFORMATIONAL __constanst_cpu_to_le32(0x0001)
+#define STATUS_SEVERITY_WARNING __constanst_cpu_to_le32(0x0002)
+#define STATUS_SEVERITY_ERROR __constanst_cpu_to_le32(0x0003)
+
+struct ntstatus {
+ /* Facility is the high 12 bits of the following field */
+ __le32 Facility; /* low 2 bits Severity, next is Customer, then rsrvd */
+ __le32 Code;
+};
+
+#define STATUS_SUCCESS __constant_cpu_to_le32(0x00000000)
+#define STATUS_WAIT_0 __constant_cpu_to_le32(0x00000000)
+#define STATUS_WAIT_1 __constant_cpu_to_le32(0x00000001)
+#define STATUS_WAIT_2 __constant_cpu_to_le32(0x00000002)
+#define STATUS_WAIT_3 __constant_cpu_to_le32(0x00000003)
+#define STATUS_WAIT_63 __constant_cpu_to_le32(0x0000003F)
+#define STATUS_ABANDONED __constant_cpu_to_le32(0x00000080)
+#define STATUS_ABANDONED_WAIT_0 __constant_cpu_to_le32(0x00000080)
+#define STATUS_ABANDONED_WAIT_63 __constant_cpu_to_le32(0x000000BF)
+#define STATUS_USER_APC __constant_cpu_to_le32(0x000000C0)
+#define STATUS_KERNEL_APC __constant_cpu_to_le32(0x00000100)
+#define STATUS_ALERTED __constant_cpu_to_le32(0x00000101)
+#define STATUS_TIMEOUT __constant_cpu_to_le32(0x00000102)
+#define STATUS_PENDING __constant_cpu_to_le32(0x00000103)
+#define STATUS_REPARSE __constant_cpu_to_le32(0x00000104)
+#define STATUS_MORE_ENTRIES __constant_cpu_to_le32(0x00000105)
+#define STATUS_NOT_ALL_ASSIGNED __constant_cpu_to_le32(0x00000106)
+#define STATUS_SOME_NOT_MAPPED __constant_cpu_to_le32(0x00000107)
+#define STATUS_OPLOCK_BREAK_IN_PROGRESS __constant_cpu_to_le32(0x00000108)
+#define STATUS_VOLUME_MOUNTED __constant_cpu_to_le32(0x00000109)
+#define STATUS_RXACT_COMMITTED __constant_cpu_to_le32(0x0000010A)
+#define STATUS_NOTIFY_CLEANUP __constant_cpu_to_le32(0x0000010B)
+#define STATUS_NOTIFY_ENUM_DIR __constant_cpu_to_le32(0x0000010C)
+#define STATUS_NO_QUOTAS_FOR_ACCOUNT __constant_cpu_to_le32(0x0000010D)
+#define STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED __constant_cpu_to_le32(0x0000010E)
+#define STATUS_PAGE_FAULT_TRANSITION __constant_cpu_to_le32(0x00000110)
+#define STATUS_PAGE_FAULT_DEMAND_ZERO __constant_cpu_to_le32(0x00000111)
+#define STATUS_PAGE_FAULT_COPY_ON_WRITE __constant_cpu_to_le32(0x00000112)
+#define STATUS_PAGE_FAULT_GUARD_PAGE __constant_cpu_to_le32(0x00000113)
+#define STATUS_PAGE_FAULT_PAGING_FILE __constant_cpu_to_le32(0x00000114)
+#define STATUS_CACHE_PAGE_LOCKED __constant_cpu_to_le32(0x00000115)
+#define STATUS_CRASH_DUMP __constant_cpu_to_le32(0x00000116)
+#define STATUS_BUFFER_ALL_ZEROS __constant_cpu_to_le32(0x00000117)
+#define STATUS_REPARSE_OBJECT __constant_cpu_to_le32(0x00000118)
+#define STATUS_RESOURCE_REQUIREMENTS_CHANGED __constant_cpu_to_le32(0x00000119)
+#define STATUS_TRANSLATION_COMPLETE __constant_cpu_to_le32(0x00000120)
+#define STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY __constant_cpu_to_le32(0x00000121)
+#define STATUS_NOTHING_TO_TERMINATE __constant_cpu_to_le32(0x00000122)
+#define STATUS_PROCESS_NOT_IN_JOB __constant_cpu_to_le32(0x00000123)
+#define STATUS_PROCESS_IN_JOB __constant_cpu_to_le32(0x00000124)
+#define STATUS_VOLSNAP_HIBERNATE_READY __constant_cpu_to_le32(0x00000125)
+#define STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY __constant_cpu_to_le32(0x00000126)
+#define STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED __constant_cpu_to_le32(0x00000127)
+#define STATUS_INTERRUPT_STILL_CONNECTED __constant_cpu_to_le32(0x00000128)
+#define STATUS_PROCESS_CLONED __constant_cpu_to_le32(0x00000129)
+#define STATUS_FILE_LOCKED_WITH_ONLY_READERS __constant_cpu_to_le32(0x0000012A)
+#define STATUS_FILE_LOCKED_WITH_WRITERS __constant_cpu_to_le32(0x0000012B)
+#define STATUS_RESOURCEMANAGER_READ_ONLY __constant_cpu_to_le32(0x00000202)
+#define STATUS_WAIT_FOR_OPLOCK __constant_cpu_to_le32(0x00000367)
+#define DBG_EXCEPTION_HANDLED __constant_cpu_to_le32(0x00010001)
+#define DBG_CONTINUE __constant_cpu_to_le32(0x00010002)
+#define STATUS_FLT_IO_COMPLETE __constant_cpu_to_le32(0x001C0001)
+#define STATUS_OBJECT_NAME_EXISTS __constant_cpu_to_le32(0x40000000)
+#define STATUS_THREAD_WAS_SUSPENDED __constant_cpu_to_le32(0x40000001)
+#define STATUS_WORKING_SET_LIMIT_RANGE __constant_cpu_to_le32(0x40000002)
+#define STATUS_IMAGE_NOT_AT_BASE __constant_cpu_to_le32(0x40000003)
+#define STATUS_RXACT_STATE_CREATED __constant_cpu_to_le32(0x40000004)
+#define STATUS_SEGMENT_NOTIFICATION __constant_cpu_to_le32(0x40000005)
+#define STATUS_LOCAL_USER_SESSION_KEY __constant_cpu_to_le32(0x40000006)
+#define STATUS_BAD_CURRENT_DIRECTORY __constant_cpu_to_le32(0x40000007)
+#define STATUS_SERIAL_MORE_WRITES __constant_cpu_to_le32(0x40000008)
+#define STATUS_REGISTRY_RECOVERED __constant_cpu_to_le32(0x40000009)
+#define STATUS_FT_READ_RECOVERY_FROM_BACKUP __constant_cpu_to_le32(0x4000000A)
+#define STATUS_FT_WRITE_RECOVERY __constant_cpu_to_le32(0x4000000B)
+#define STATUS_SERIAL_COUNTER_TIMEOUT __constant_cpu_to_le32(0x4000000C)
+#define STATUS_NULL_LM_PASSWORD __constant_cpu_to_le32(0x4000000D)
+#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH __constant_cpu_to_le32(0x4000000E)
+#define STATUS_RECEIVE_PARTIAL __constant_cpu_to_le32(0x4000000F)
+#define STATUS_RECEIVE_EXPEDITED __constant_cpu_to_le32(0x40000010)
+#define STATUS_RECEIVE_PARTIAL_EXPEDITED __constant_cpu_to_le32(0x40000011)
+#define STATUS_EVENT_DONE __constant_cpu_to_le32(0x40000012)
+#define STATUS_EVENT_PENDING __constant_cpu_to_le32(0x40000013)
+#define STATUS_CHECKING_FILE_SYSTEM __constant_cpu_to_le32(0x40000014)
+#define STATUS_FATAL_APP_EXIT __constant_cpu_to_le32(0x40000015)
+#define STATUS_PREDEFINED_HANDLE __constant_cpu_to_le32(0x40000016)
+#define STATUS_WAS_UNLOCKED __constant_cpu_to_le32(0x40000017)
+#define STATUS_SERVICE_NOTIFICATION __constant_cpu_to_le32(0x40000018)
+#define STATUS_WAS_LOCKED __constant_cpu_to_le32(0x40000019)
+#define STATUS_LOG_HARD_ERROR __constant_cpu_to_le32(0x4000001A)
+#define STATUS_ALREADY_WIN32 __constant_cpu_to_le32(0x4000001B)
+#define STATUS_WX86_UNSIMULATE __constant_cpu_to_le32(0x4000001C)
+#define STATUS_WX86_CONTINUE __constant_cpu_to_le32(0x4000001D)
+#define STATUS_WX86_SINGLE_STEP __constant_cpu_to_le32(0x4000001E)
+#define STATUS_WX86_BREAKPOINT __constant_cpu_to_le32(0x4000001F)
+#define STATUS_WX86_EXCEPTION_CONTINUE __constant_cpu_to_le32(0x40000020)
+#define STATUS_WX86_EXCEPTION_LASTCHANCE __constant_cpu_to_le32(0x40000021)
+#define STATUS_WX86_EXCEPTION_CHAIN __constant_cpu_to_le32(0x40000022)
+#define STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE __constant_cpu_to_le32(0x40000023)
+#define STATUS_NO_YIELD_PERFORMED __constant_cpu_to_le32(0x40000024)
+#define STATUS_TIMER_RESUME_IGNORED __constant_cpu_to_le32(0x40000025)
+#define STATUS_ARBITRATION_UNHANDLED __constant_cpu_to_le32(0x40000026)
+#define STATUS_CARDBUS_NOT_SUPPORTED __constant_cpu_to_le32(0x40000027)
+#define STATUS_WX86_CREATEWX86TIB __constant_cpu_to_le32(0x40000028)
+#define STATUS_MP_PROCESSOR_MISMATCH __constant_cpu_to_le32(0x40000029)
+#define STATUS_HIBERNATED __constant_cpu_to_le32(0x4000002A)
+#define STATUS_RESUME_HIBERNATION __constant_cpu_to_le32(0x4000002B)
+#define STATUS_FIRMWARE_UPDATED __constant_cpu_to_le32(0x4000002C)
+#define STATUS_DRIVERS_LEAKING_LOCKED_PAGES __constant_cpu_to_le32(0x4000002D)
+#define STATUS_MESSAGE_RETRIEVED __constant_cpu_to_le32(0x4000002E)
+#define STATUS_SYSTEM_POWERSTATE_TRANSITION __constant_cpu_to_le32(0x4000002F)
+#define STATUS_ALPC_CHECK_COMPLETION_LIST __constant_cpu_to_le32(0x40000030)
+#define STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION __constant_cpu_to_le32(0x40000031)
+#define STATUS_ACCESS_AUDIT_BY_POLICY __constant_cpu_to_le32(0x40000032)
+#define STATUS_ABANDON_HIBERFILE __constant_cpu_to_le32(0x40000033)
+#define STATUS_BIZRULES_NOT_ENABLED __constant_cpu_to_le32(0x40000034)
+#define STATUS_WAKE_SYSTEM __constant_cpu_to_le32(0x40000294)
+#define STATUS_DS_SHUTTING_DOWN __constant_cpu_to_le32(0x40000370)
+#define DBG_REPLY_LATER __constant_cpu_to_le32(0x40010001)
+#define DBG_UNABLE_TO_PROVIDE_HANDLE __constant_cpu_to_le32(0x40010002)
+#define DBG_TERMINATE_THREAD __constant_cpu_to_le32(0x40010003)
+#define DBG_TERMINATE_PROCESS __constant_cpu_to_le32(0x40010004)
+#define DBG_CONTROL_C __constant_cpu_to_le32(0x40010005)
+#define DBG_PRINTEXCEPTION_C __constant_cpu_to_le32(0x40010006)
+#define DBG_RIPEXCEPTION __constant_cpu_to_le32(0x40010007)
+#define DBG_CONTROL_BREAK __constant_cpu_to_le32(0x40010008)
+#define DBG_COMMAND_EXCEPTION __constant_cpu_to_le32(0x40010009)
+#define RPC_NT_UUID_LOCAL_ONLY __constant_cpu_to_le32(0x40020056)
+#define RPC_NT_SEND_INCOMPLETE __constant_cpu_to_le32(0x400200AF)
+#define STATUS_CTX_CDM_CONNECT __constant_cpu_to_le32(0x400A0004)
+#define STATUS_CTX_CDM_DISCONNECT __constant_cpu_to_le32(0x400A0005)
+#define STATUS_SXS_RELEASE_ACTIVATION_CONTEXT __constant_cpu_to_le32(0x4015000D)
+#define STATUS_RECOVERY_NOT_NEEDED __constant_cpu_to_le32(0x40190034)
+#define STATUS_RM_ALREADY_STARTED __constant_cpu_to_le32(0x40190035)
+#define STATUS_LOG_NO_RESTART __constant_cpu_to_le32(0x401A000C)
+#define STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST __constant_cpu_to_le32(0x401B00EC)
+#define STATUS_GRAPHICS_PARTIAL_DATA_POPULATED __constant_cpu_to_le32(0x401E000A)
+#define STATUS_GRAPHICS_DRIVER_MISMATCH __constant_cpu_to_le32(0x401E0117)
+#define STATUS_GRAPHICS_MODE_NOT_PINNED __constant_cpu_to_le32(0x401E0307)
+#define STATUS_GRAPHICS_NO_PREFERRED_MODE __constant_cpu_to_le32(0x401E031E)
+#define STATUS_GRAPHICS_DATASET_IS_EMPTY __constant_cpu_to_le32(0x401E034B)
+#define STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET __constant_cpu_to_le32(0x401E034C)
+#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED __constant_cpu_to_le32(0x401E0351)
+#define STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS __constant_cpu_to_le32(0x401E042F)
+#define STATUS_GRAPHICS_LEADLINK_START_DEFERRED __constant_cpu_to_le32(0x401E0437)
+#define STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY __constant_cpu_to_le32(0x401E0439)
+#define STATUS_GRAPHICS_START_DEFERRED __constant_cpu_to_le32(0x401E043A)
+#define STATUS_NDIS_INDICATION_REQUIRED __constant_cpu_to_le32(0x40230001)
+#define STATUS_GUARD_PAGE_VIOLATION __constant_cpu_to_le32(0x80000001)
+#define STATUS_DATATYPE_MISALIGNMENT __constant_cpu_to_le32(0x80000002)
+#define STATUS_BREAKPOINT __constant_cpu_to_le32(0x80000003)
+#define STATUS_SINGLE_STEP __constant_cpu_to_le32(0x80000004)
+#define STATUS_BUFFER_OVERFLOW __constant_cpu_to_le32(0x80000005)
+#define STATUS_NO_MORE_FILES __constant_cpu_to_le32(0x80000006)
+#define STATUS_WAKE_SYSTEM_DEBUGGER __constant_cpu_to_le32(0x80000007)
+#define STATUS_HANDLES_CLOSED __constant_cpu_to_le32(0x8000000A)
+#define STATUS_NO_INHERITANCE __constant_cpu_to_le32(0x8000000B)
+#define STATUS_GUID_SUBSTITUTION_MADE __constant_cpu_to_le32(0x8000000C)
+#define STATUS_PARTIAL_COPY __constant_cpu_to_le32(0x8000000D)
+#define STATUS_DEVICE_PAPER_EMPTY __constant_cpu_to_le32(0x8000000E)
+#define STATUS_DEVICE_POWERED_OFF __constant_cpu_to_le32(0x8000000F)
+#define STATUS_DEVICE_OFF_LINE __constant_cpu_to_le32(0x80000010)
+#define STATUS_DEVICE_BUSY __constant_cpu_to_le32(0x80000011)
+#define STATUS_NO_MORE_EAS __constant_cpu_to_le32(0x80000012)
+#define STATUS_INVALID_EA_NAME __constant_cpu_to_le32(0x80000013)
+#define STATUS_EA_LIST_INCONSISTENT __constant_cpu_to_le32(0x80000014)
+#define STATUS_INVALID_EA_FLAG __constant_cpu_to_le32(0x80000015)
+#define STATUS_VERIFY_REQUIRED __constant_cpu_to_le32(0x80000016)
+#define STATUS_EXTRANEOUS_INFORMATION __constant_cpu_to_le32(0x80000017)
+#define STATUS_RXACT_COMMIT_NECESSARY __constant_cpu_to_le32(0x80000018)
+#define STATUS_NO_MORE_ENTRIES __constant_cpu_to_le32(0x8000001A)
+#define STATUS_FILEMARK_DETECTED __constant_cpu_to_le32(0x8000001B)
+#define STATUS_MEDIA_CHANGED __constant_cpu_to_le32(0x8000001C)
+#define STATUS_BUS_RESET __constant_cpu_to_le32(0x8000001D)
+#define STATUS_END_OF_MEDIA __constant_cpu_to_le32(0x8000001E)
+#define STATUS_BEGINNING_OF_MEDIA __constant_cpu_to_le32(0x8000001F)
+#define STATUS_MEDIA_CHECK __constant_cpu_to_le32(0x80000020)
+#define STATUS_SETMARK_DETECTED __constant_cpu_to_le32(0x80000021)
+#define STATUS_NO_DATA_DETECTED __constant_cpu_to_le32(0x80000022)
+#define STATUS_REDIRECTOR_HAS_OPEN_HANDLES __constant_cpu_to_le32(0x80000023)
+#define STATUS_SERVER_HAS_OPEN_HANDLES __constant_cpu_to_le32(0x80000024)
+#define STATUS_ALREADY_DISCONNECTED __constant_cpu_to_le32(0x80000025)
+#define STATUS_LONGJUMP __constant_cpu_to_le32(0x80000026)
+#define STATUS_CLEANER_CARTRIDGE_INSTALLED __constant_cpu_to_le32(0x80000027)
+#define STATUS_PLUGPLAY_QUERY_VETOED __constant_cpu_to_le32(0x80000028)
+#define STATUS_UNWIND_CONSOLIDATE __constant_cpu_to_le32(0x80000029)
+#define STATUS_REGISTRY_HIVE_RECOVERED __constant_cpu_to_le32(0x8000002A)
+#define STATUS_DLL_MIGHT_BE_INSECURE __constant_cpu_to_le32(0x8000002B)
+#define STATUS_DLL_MIGHT_BE_INCOMPATIBLE __constant_cpu_to_le32(0x8000002C)
+#define STATUS_STOPPED_ON_SYMLINK __constant_cpu_to_le32(0x8000002D)
+#define STATUS_DEVICE_REQUIRES_CLEANING __constant_cpu_to_le32(0x80000288)
+#define STATUS_DEVICE_DOOR_OPEN __constant_cpu_to_le32(0x80000289)
+#define STATUS_DATA_LOST_REPAIR __constant_cpu_to_le32(0x80000803)
+#define DBG_EXCEPTION_NOT_HANDLED __constant_cpu_to_le32(0x80010001)
+#define STATUS_CLUSTER_NODE_ALREADY_UP __constant_cpu_to_le32(0x80130001)
+#define STATUS_CLUSTER_NODE_ALREADY_DOWN __constant_cpu_to_le32(0x80130002)
+#define STATUS_CLUSTER_NETWORK_ALREADY_ONLINE __constant_cpu_to_le32(0x80130003)
+#define STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE __constant_cpu_to_le32(0x80130004)
+#define STATUS_CLUSTER_NODE_ALREADY_MEMBER __constant_cpu_to_le32(0x80130005)
+#define STATUS_COULD_NOT_RESIZE_LOG __constant_cpu_to_le32(0x80190009)
+#define STATUS_NO_TXF_METADATA __constant_cpu_to_le32(0x80190029)
+#define STATUS_CANT_RECOVER_WITH_HANDLE_OPEN __constant_cpu_to_le32(0x80190031)
+#define STATUS_TXF_METADATA_ALREADY_PRESENT __constant_cpu_to_le32(0x80190041)
+#define STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET __constant_cpu_to_le32(0x80190042)
+#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED __constant_cpu_to_le32(0x801B00EB)
+#define STATUS_FLT_BUFFER_TOO_SMALL __constant_cpu_to_le32(0x801C0001)
+#define STATUS_FVE_PARTIAL_METADATA __constant_cpu_to_le32(0x80210001)
+#define STATUS_UNSUCCESSFUL __constant_cpu_to_le32(0xC0000001)
+#define STATUS_NOT_IMPLEMENTED __constant_cpu_to_le32(0xC0000002)
+#define STATUS_INVALID_INFO_CLASS __constant_cpu_to_le32(0xC0000003)
+#define STATUS_INFO_LENGTH_MISMATCH __constant_cpu_to_le32(0xC0000004)
+#define STATUS_ACCESS_VIOLATION __constant_cpu_to_le32(0xC0000005)
+#define STATUS_IN_PAGE_ERROR __constant_cpu_to_le32(0xC0000006)
+#define STATUS_PAGEFILE_QUOTA __constant_cpu_to_le32(0xC0000007)
+#define STATUS_INVALID_HANDLE __constant_cpu_to_le32(0xC0000008)
+#define STATUS_BAD_INITIAL_STACK __constant_cpu_to_le32(0xC0000009)
+#define STATUS_BAD_INITIAL_PC __constant_cpu_to_le32(0xC000000A)
+#define STATUS_INVALID_CID __constant_cpu_to_le32(0xC000000B)
+#define STATUS_TIMER_NOT_CANCELED __constant_cpu_to_le32(0xC000000C)
+#define STATUS_INVALID_PARAMETER __constant_cpu_to_le32(0xC000000D)
+#define STATUS_NO_SUCH_DEVICE __constant_cpu_to_le32(0xC000000E)
+#define STATUS_NO_SUCH_FILE __constant_cpu_to_le32(0xC000000F)
+#define STATUS_INVALID_DEVICE_REQUEST __constant_cpu_to_le32(0xC0000010)
+#define STATUS_END_OF_FILE __constant_cpu_to_le32(0xC0000011)
+#define STATUS_WRONG_VOLUME __constant_cpu_to_le32(0xC0000012)
+#define STATUS_NO_MEDIA_IN_DEVICE __constant_cpu_to_le32(0xC0000013)
+#define STATUS_UNRECOGNIZED_MEDIA __constant_cpu_to_le32(0xC0000014)
+#define STATUS_NONEXISTENT_SECTOR __constant_cpu_to_le32(0xC0000015)
+#define STATUS_MORE_PROCESSING_REQUIRED __constant_cpu_to_le32(0xC0000016)
+#define STATUS_NO_MEMORY __constant_cpu_to_le32(0xC0000017)
+#define STATUS_CONFLICTING_ADDRESSES __constant_cpu_to_le32(0xC0000018)
+#define STATUS_NOT_MAPPED_VIEW __constant_cpu_to_le32(0xC0000019)
+#define STATUS_UNABLE_TO_FREE_VM __constant_cpu_to_le32(0xC000001A)
+#define STATUS_UNABLE_TO_DELETE_SECTION __constant_cpu_to_le32(0xC000001B)
+#define STATUS_INVALID_SYSTEM_SERVICE __constant_cpu_to_le32(0xC000001C)
+#define STATUS_ILLEGAL_INSTRUCTION __constant_cpu_to_le32(0xC000001D)
+#define STATUS_INVALID_LOCK_SEQUENCE __constant_cpu_to_le32(0xC000001E)
+#define STATUS_INVALID_VIEW_SIZE __constant_cpu_to_le32(0xC000001F)
+#define STATUS_INVALID_FILE_FOR_SECTION __constant_cpu_to_le32(0xC0000020)
+#define STATUS_ALREADY_COMMITTED __constant_cpu_to_le32(0xC0000021)
+#define STATUS_ACCESS_DENIED __constant_cpu_to_le32(0xC0000022)
+#define STATUS_BUFFER_TOO_SMALL __constant_cpu_to_le32(0xC0000023)
+#define STATUS_OBJECT_TYPE_MISMATCH __constant_cpu_to_le32(0xC0000024)
+#define STATUS_NONCONTINUABLE_EXCEPTION __constant_cpu_to_le32(0xC0000025)
+#define STATUS_INVALID_DISPOSITION __constant_cpu_to_le32(0xC0000026)
+#define STATUS_UNWIND __constant_cpu_to_le32(0xC0000027)
+#define STATUS_BAD_STACK __constant_cpu_to_le32(0xC0000028)
+#define STATUS_INVALID_UNWIND_TARGET __constant_cpu_to_le32(0xC0000029)
+#define STATUS_NOT_LOCKED __constant_cpu_to_le32(0xC000002A)
+#define STATUS_PARITY_ERROR __constant_cpu_to_le32(0xC000002B)
+#define STATUS_UNABLE_TO_DECOMMIT_VM __constant_cpu_to_le32(0xC000002C)
+#define STATUS_NOT_COMMITTED __constant_cpu_to_le32(0xC000002D)
+#define STATUS_INVALID_PORT_ATTRIBUTES __constant_cpu_to_le32(0xC000002E)
+#define STATUS_PORT_MESSAGE_TOO_LONG __constant_cpu_to_le32(0xC000002F)
+#define STATUS_INVALID_PARAMETER_MIX __constant_cpu_to_le32(0xC0000030)
+#define STATUS_INVALID_QUOTA_LOWER __constant_cpu_to_le32(0xC0000031)
+#define STATUS_DISK_CORRUPT_ERROR __constant_cpu_to_le32(0xC0000032)
+#define STATUS_OBJECT_NAME_INVALID __constant_cpu_to_le32(0xC0000033)
+#define STATUS_OBJECT_NAME_NOT_FOUND __constant_cpu_to_le32(0xC0000034)
+#define STATUS_OBJECT_NAME_COLLISION __constant_cpu_to_le32(0xC0000035)
+#define STATUS_PORT_DISCONNECTED __constant_cpu_to_le32(0xC0000037)
+#define STATUS_DEVICE_ALREADY_ATTACHED __constant_cpu_to_le32(0xC0000038)
+#define STATUS_OBJECT_PATH_INVALID __constant_cpu_to_le32(0xC0000039)
+#define STATUS_OBJECT_PATH_NOT_FOUND __constant_cpu_to_le32(0xC000003A)
+#define STATUS_OBJECT_PATH_SYNTAX_BAD __constant_cpu_to_le32(0xC000003B)
+#define STATUS_DATA_OVERRUN __constant_cpu_to_le32(0xC000003C)
+#define STATUS_DATA_LATE_ERROR __constant_cpu_to_le32(0xC000003D)
+#define STATUS_DATA_ERROR __constant_cpu_to_le32(0xC000003E)
+#define STATUS_CRC_ERROR __constant_cpu_to_le32(0xC000003F)
+#define STATUS_SECTION_TOO_BIG __constant_cpu_to_le32(0xC0000040)
+#define STATUS_PORT_CONNECTION_REFUSED __constant_cpu_to_le32(0xC0000041)
+#define STATUS_INVALID_PORT_HANDLE __constant_cpu_to_le32(0xC0000042)
+#define STATUS_SHARING_VIOLATION __constant_cpu_to_le32(0xC0000043)
+#define STATUS_QUOTA_EXCEEDED __constant_cpu_to_le32(0xC0000044)
+#define STATUS_INVALID_PAGE_PROTECTION __constant_cpu_to_le32(0xC0000045)
+#define STATUS_MUTANT_NOT_OWNED __constant_cpu_to_le32(0xC0000046)
+#define STATUS_SEMAPHORE_LIMIT_EXCEEDED __constant_cpu_to_le32(0xC0000047)
+#define STATUS_PORT_ALREADY_SET __constant_cpu_to_le32(0xC0000048)
+#define STATUS_SECTION_NOT_IMAGE __constant_cpu_to_le32(0xC0000049)
+#define STATUS_SUSPEND_COUNT_EXCEEDED __constant_cpu_to_le32(0xC000004A)
+#define STATUS_THREAD_IS_TERMINATING __constant_cpu_to_le32(0xC000004B)
+#define STATUS_BAD_WORKING_SET_LIMIT __constant_cpu_to_le32(0xC000004C)
+#define STATUS_INCOMPATIBLE_FILE_MAP __constant_cpu_to_le32(0xC000004D)
+#define STATUS_SECTION_PROTECTION __constant_cpu_to_le32(0xC000004E)
+#define STATUS_EAS_NOT_SUPPORTED __constant_cpu_to_le32(0xC000004F)
+#define STATUS_EA_TOO_LARGE __constant_cpu_to_le32(0xC0000050)
+#define STATUS_NONEXISTENT_EA_ENTRY __constant_cpu_to_le32(0xC0000051)
+#define STATUS_NO_EAS_ON_FILE __constant_cpu_to_le32(0xC0000052)
+#define STATUS_EA_CORRUPT_ERROR __constant_cpu_to_le32(0xC0000053)
+#define STATUS_FILE_LOCK_CONFLICT __constant_cpu_to_le32(0xC0000054)
+#define STATUS_LOCK_NOT_GRANTED __constant_cpu_to_le32(0xC0000055)
+#define STATUS_DELETE_PENDING __constant_cpu_to_le32(0xC0000056)
+#define STATUS_CTL_FILE_NOT_SUPPORTED __constant_cpu_to_le32(0xC0000057)
+#define STATUS_UNKNOWN_REVISION __constant_cpu_to_le32(0xC0000058)
+#define STATUS_REVISION_MISMATCH __constant_cpu_to_le32(0xC0000059)
+#define STATUS_INVALID_OWNER __constant_cpu_to_le32(0xC000005A)
+#define STATUS_INVALID_PRIMARY_GROUP __constant_cpu_to_le32(0xC000005B)
+#define STATUS_NO_IMPERSONATION_TOKEN __constant_cpu_to_le32(0xC000005C)
+#define STATUS_CANT_DISABLE_MANDATORY __constant_cpu_to_le32(0xC000005D)
+#define STATUS_NO_LOGON_SERVERS __constant_cpu_to_le32(0xC000005E)
+#define STATUS_NO_SUCH_LOGON_SESSION __constant_cpu_to_le32(0xC000005F)
+#define STATUS_NO_SUCH_PRIVILEGE __constant_cpu_to_le32(0xC0000060)
+#define STATUS_PRIVILEGE_NOT_HELD __constant_cpu_to_le32(0xC0000061)
+#define STATUS_INVALID_ACCOUNT_NAME __constant_cpu_to_le32(0xC0000062)
+#define STATUS_USER_EXISTS __constant_cpu_to_le32(0xC0000063)
+#define STATUS_NO_SUCH_USER __constant_cpu_to_le32(0xC0000064)
+#define STATUS_GROUP_EXISTS __constant_cpu_to_le32(0xC0000065)
+#define STATUS_NO_SUCH_GROUP __constant_cpu_to_le32(0xC0000066)
+#define STATUS_MEMBER_IN_GROUP __constant_cpu_to_le32(0xC0000067)
+#define STATUS_MEMBER_NOT_IN_GROUP __constant_cpu_to_le32(0xC0000068)
+#define STATUS_LAST_ADMIN __constant_cpu_to_le32(0xC0000069)
+#define STATUS_WRONG_PASSWORD __constant_cpu_to_le32(0xC000006A)
+#define STATUS_ILL_FORMED_PASSWORD __constant_cpu_to_le32(0xC000006B)
+#define STATUS_PASSWORD_RESTRICTION __constant_cpu_to_le32(0xC000006C)
+#define STATUS_LOGON_FAILURE __constant_cpu_to_le32(0xC000006D)
+#define STATUS_ACCOUNT_RESTRICTION __constant_cpu_to_le32(0xC000006E)
+#define STATUS_INVALID_LOGON_HOURS __constant_cpu_to_le32(0xC000006F)
+#define STATUS_INVALID_WORKSTATION __constant_cpu_to_le32(0xC0000070)
+#define STATUS_PASSWORD_EXPIRED __constant_cpu_to_le32(0xC0000071)
+#define STATUS_ACCOUNT_DISABLED __constant_cpu_to_le32(0xC0000072)
+#define STATUS_NONE_MAPPED __constant_cpu_to_le32(0xC0000073)
+#define STATUS_TOO_MANY_LUIDS_REQUESTED __constant_cpu_to_le32(0xC0000074)
+#define STATUS_LUIDS_EXHAUSTED __constant_cpu_to_le32(0xC0000075)
+#define STATUS_INVALID_SUB_AUTHORITY __constant_cpu_to_le32(0xC0000076)
+#define STATUS_INVALID_ACL __constant_cpu_to_le32(0xC0000077)
+#define STATUS_INVALID_SID __constant_cpu_to_le32(0xC0000078)
+#define STATUS_INVALID_SECURITY_DESCR __constant_cpu_to_le32(0xC0000079)
+#define STATUS_PROCEDURE_NOT_FOUND __constant_cpu_to_le32(0xC000007A)
+#define STATUS_INVALID_IMAGE_FORMAT __constant_cpu_to_le32(0xC000007B)
+#define STATUS_NO_TOKEN __constant_cpu_to_le32(0xC000007C)
+#define STATUS_BAD_INHERITANCE_ACL __constant_cpu_to_le32(0xC000007D)
+#define STATUS_RANGE_NOT_LOCKED __constant_cpu_to_le32(0xC000007E)
+#define STATUS_DISK_FULL __constant_cpu_to_le32(0xC000007F)
+#define STATUS_SERVER_DISABLED __constant_cpu_to_le32(0xC0000080)
+#define STATUS_SERVER_NOT_DISABLED __constant_cpu_to_le32(0xC0000081)
+#define STATUS_TOO_MANY_GUIDS_REQUESTED __constant_cpu_to_le32(0xC0000082)
+#define STATUS_GUIDS_EXHAUSTED __constant_cpu_to_le32(0xC0000083)
+#define STATUS_INVALID_ID_AUTHORITY __constant_cpu_to_le32(0xC0000084)
+#define STATUS_AGENTS_EXHAUSTED __constant_cpu_to_le32(0xC0000085)
+#define STATUS_INVALID_VOLUME_LABEL __constant_cpu_to_le32(0xC0000086)
+#define STATUS_SECTION_NOT_EXTENDED __constant_cpu_to_le32(0xC0000087)
+#define STATUS_NOT_MAPPED_DATA __constant_cpu_to_le32(0xC0000088)
+#define STATUS_RESOURCE_DATA_NOT_FOUND __constant_cpu_to_le32(0xC0000089)
+#define STATUS_RESOURCE_TYPE_NOT_FOUND __constant_cpu_to_le32(0xC000008A)
+#define STATUS_RESOURCE_NAME_NOT_FOUND __constant_cpu_to_le32(0xC000008B)
+#define STATUS_ARRAY_BOUNDS_EXCEEDED __constant_cpu_to_le32(0xC000008C)
+#define STATUS_FLOAT_DENORMAL_OPERAND __constant_cpu_to_le32(0xC000008D)
+#define STATUS_FLOAT_DIVIDE_BY_ZERO __constant_cpu_to_le32(0xC000008E)
+#define STATUS_FLOAT_INEXACT_RESULT __constant_cpu_to_le32(0xC000008F)
+#define STATUS_FLOAT_INVALID_OPERATION __constant_cpu_to_le32(0xC0000090)
+#define STATUS_FLOAT_OVERFLOW __constant_cpu_to_le32(0xC0000091)
+#define STATUS_FLOAT_STACK_CHECK __constant_cpu_to_le32(0xC0000092)
+#define STATUS_FLOAT_UNDERFLOW __constant_cpu_to_le32(0xC0000093)
+#define STATUS_INTEGER_DIVIDE_BY_ZERO __constant_cpu_to_le32(0xC0000094)
+#define STATUS_INTEGER_OVERFLOW __constant_cpu_to_le32(0xC0000095)
+#define STATUS_PRIVILEGED_INSTRUCTION __constant_cpu_to_le32(0xC0000096)
+#define STATUS_TOO_MANY_PAGING_FILES __constant_cpu_to_le32(0xC0000097)
+#define STATUS_FILE_INVALID __constant_cpu_to_le32(0xC0000098)
+#define STATUS_ALLOTTED_SPACE_EXCEEDED __constant_cpu_to_le32(0xC0000099)
+#define STATUS_INSUFFICIENT_RESOURCES __constant_cpu_to_le32(0xC000009A)
+#define STATUS_DFS_EXIT_PATH_FOUND __constant_cpu_to_le32(0xC000009B)
+#define STATUS_DEVICE_DATA_ERROR __constant_cpu_to_le32(0xC000009C)
+#define STATUS_DEVICE_NOT_CONNECTED __constant_cpu_to_le32(0xC000009D)
+#define STATUS_DEVICE_POWER_FAILURE __constant_cpu_to_le32(0xC000009E)
+#define STATUS_FREE_VM_NOT_AT_BASE __constant_cpu_to_le32(0xC000009F)
+#define STATUS_MEMORY_NOT_ALLOCATED __constant_cpu_to_le32(0xC00000A0)
+#define STATUS_WORKING_SET_QUOTA __constant_cpu_to_le32(0xC00000A1)
+#define STATUS_MEDIA_WRITE_PROTECTED __constant_cpu_to_le32(0xC00000A2)
+#define STATUS_DEVICE_NOT_READY __constant_cpu_to_le32(0xC00000A3)
+#define STATUS_INVALID_GROUP_ATTRIBUTES __constant_cpu_to_le32(0xC00000A4)
+#define STATUS_BAD_IMPERSONATION_LEVEL __constant_cpu_to_le32(0xC00000A5)
+#define STATUS_CANT_OPEN_ANONYMOUS __constant_cpu_to_le32(0xC00000A6)
+#define STATUS_BAD_VALIDATION_CLASS __constant_cpu_to_le32(0xC00000A7)
+#define STATUS_BAD_TOKEN_TYPE __constant_cpu_to_le32(0xC00000A8)
+#define STATUS_BAD_MASTER_BOOT_RECORD __constant_cpu_to_le32(0xC00000A9)
+#define STATUS_INSTRUCTION_MISALIGNMENT __constant_cpu_to_le32(0xC00000AA)
+#define STATUS_INSTANCE_NOT_AVAILABLE __constant_cpu_to_le32(0xC00000AB)
+#define STATUS_PIPE_NOT_AVAILABLE __constant_cpu_to_le32(0xC00000AC)
+#define STATUS_INVALID_PIPE_STATE __constant_cpu_to_le32(0xC00000AD)
+#define STATUS_PIPE_BUSY __constant_cpu_to_le32(0xC00000AE)
+#define STATUS_ILLEGAL_FUNCTION __constant_cpu_to_le32(0xC00000AF)
+#define STATUS_PIPE_DISCONNECTED __constant_cpu_to_le32(0xC00000B0)
+#define STATUS_PIPE_CLOSING __constant_cpu_to_le32(0xC00000B1)
+#define STATUS_PIPE_CONNECTED __constant_cpu_to_le32(0xC00000B2)
+#define STATUS_PIPE_LISTENING __constant_cpu_to_le32(0xC00000B3)
+#define STATUS_INVALID_READ_MODE __constant_cpu_to_le32(0xC00000B4)
+#define STATUS_IO_TIMEOUT __constant_cpu_to_le32(0xC00000B5)
+#define STATUS_FILE_FORCED_CLOSED __constant_cpu_to_le32(0xC00000B6)
+#define STATUS_PROFILING_NOT_STARTED __constant_cpu_to_le32(0xC00000B7)
+#define STATUS_PROFILING_NOT_STOPPED __constant_cpu_to_le32(0xC00000B8)
+#define STATUS_COULD_NOT_INTERPRET __constant_cpu_to_le32(0xC00000B9)
+#define STATUS_FILE_IS_A_DIRECTORY __constant_cpu_to_le32(0xC00000BA)
+#define STATUS_NOT_SUPPORTED __constant_cpu_to_le32(0xC00000BB)
+#define STATUS_REMOTE_NOT_LISTENING __constant_cpu_to_le32(0xC00000BC)
+#define STATUS_DUPLICATE_NAME __constant_cpu_to_le32(0xC00000BD)
+#define STATUS_BAD_NETWORK_PATH __constant_cpu_to_le32(0xC00000BE)
+#define STATUS_NETWORK_BUSY __constant_cpu_to_le32(0xC00000BF)
+#define STATUS_DEVICE_DOES_NOT_EXIST __constant_cpu_to_le32(0xC00000C0)
+#define STATUS_TOO_MANY_COMMANDS __constant_cpu_to_le32(0xC00000C1)
+#define STATUS_ADAPTER_HARDWARE_ERROR __constant_cpu_to_le32(0xC00000C2)
+#define STATUS_INVALID_NETWORK_RESPONSE __constant_cpu_to_le32(0xC00000C3)
+#define STATUS_UNEXPECTED_NETWORK_ERROR __constant_cpu_to_le32(0xC00000C4)
+#define STATUS_BAD_REMOTE_ADAPTER __constant_cpu_to_le32(0xC00000C5)
+#define STATUS_PRINT_QUEUE_FULL __constant_cpu_to_le32(0xC00000C6)
+#define STATUS_NO_SPOOL_SPACE __constant_cpu_to_le32(0xC00000C7)
+#define STATUS_PRINT_CANCELLED __constant_cpu_to_le32(0xC00000C8)
+#define STATUS_NETWORK_NAME_DELETED __constant_cpu_to_le32(0xC00000C9)
+#define STATUS_NETWORK_ACCESS_DENIED __constant_cpu_to_le32(0xC00000CA)
+#define STATUS_BAD_DEVICE_TYPE __constant_cpu_to_le32(0xC00000CB)
+#define STATUS_BAD_NETWORK_NAME __constant_cpu_to_le32(0xC00000CC)
+#define STATUS_TOO_MANY_NAMES __constant_cpu_to_le32(0xC00000CD)
+#define STATUS_TOO_MANY_SESSIONS __constant_cpu_to_le32(0xC00000CE)
+#define STATUS_SHARING_PAUSED __constant_cpu_to_le32(0xC00000CF)
+#define STATUS_REQUEST_NOT_ACCEPTED __constant_cpu_to_le32(0xC00000D0)
+#define STATUS_REDIRECTOR_PAUSED __constant_cpu_to_le32(0xC00000D1)
+#define STATUS_NET_WRITE_FAULT __constant_cpu_to_le32(0xC00000D2)
+#define STATUS_PROFILING_AT_LIMIT __constant_cpu_to_le32(0xC00000D3)
+#define STATUS_NOT_SAME_DEVICE __constant_cpu_to_le32(0xC00000D4)
+#define STATUS_FILE_RENAMED __constant_cpu_to_le32(0xC00000D5)
+#define STATUS_VIRTUAL_CIRCUIT_CLOSED __constant_cpu_to_le32(0xC00000D6)
+#define STATUS_NO_SECURITY_ON_OBJECT __constant_cpu_to_le32(0xC00000D7)
+#define STATUS_CANT_WAIT __constant_cpu_to_le32(0xC00000D8)
+#define STATUS_PIPE_EMPTY __constant_cpu_to_le32(0xC00000D9)
+#define STATUS_CANT_ACCESS_DOMAIN_INFO __constant_cpu_to_le32(0xC00000DA)
+#define STATUS_CANT_TERMINATE_SELF __constant_cpu_to_le32(0xC00000DB)
+#define STATUS_INVALID_SERVER_STATE __constant_cpu_to_le32(0xC00000DC)
+#define STATUS_INVALID_DOMAIN_STATE __constant_cpu_to_le32(0xC00000DD)
+#define STATUS_INVALID_DOMAIN_ROLE __constant_cpu_to_le32(0xC00000DE)
+#define STATUS_NO_SUCH_DOMAIN __constant_cpu_to_le32(0xC00000DF)
+#define STATUS_DOMAIN_EXISTS __constant_cpu_to_le32(0xC00000E0)
+#define STATUS_DOMAIN_LIMIT_EXCEEDED __constant_cpu_to_le32(0xC00000E1)
+#define STATUS_OPLOCK_NOT_GRANTED __constant_cpu_to_le32(0xC00000E2)
+#define STATUS_INVALID_OPLOCK_PROTOCOL __constant_cpu_to_le32(0xC00000E3)
+#define STATUS_INTERNAL_DB_CORRUPTION __constant_cpu_to_le32(0xC00000E4)
+#define STATUS_INTERNAL_ERROR __constant_cpu_to_le32(0xC00000E5)
+#define STATUS_GENERIC_NOT_MAPPED __constant_cpu_to_le32(0xC00000E6)
+#define STATUS_BAD_DESCRIPTOR_FORMAT __constant_cpu_to_le32(0xC00000E7)
+#define STATUS_INVALID_USER_BUFFER __constant_cpu_to_le32(0xC00000E8)
+#define STATUS_UNEXPECTED_IO_ERROR __constant_cpu_to_le32(0xC00000E9)
+#define STATUS_UNEXPECTED_MM_CREATE_ERR __constant_cpu_to_le32(0xC00000EA)
+#define STATUS_UNEXPECTED_MM_MAP_ERROR __constant_cpu_to_le32(0xC00000EB)
+#define STATUS_UNEXPECTED_MM_EXTEND_ERR __constant_cpu_to_le32(0xC00000EC)
+#define STATUS_NOT_LOGON_PROCESS __constant_cpu_to_le32(0xC00000ED)
+#define STATUS_LOGON_SESSION_EXISTS __constant_cpu_to_le32(0xC00000EE)
+#define STATUS_INVALID_PARAMETER_1 __constant_cpu_to_le32(0xC00000EF)
+#define STATUS_INVALID_PARAMETER_2 __constant_cpu_to_le32(0xC00000F0)
+#define STATUS_INVALID_PARAMETER_3 __constant_cpu_to_le32(0xC00000F1)
+#define STATUS_INVALID_PARAMETER_4 __constant_cpu_to_le32(0xC00000F2)
+#define STATUS_INVALID_PARAMETER_5 __constant_cpu_to_le32(0xC00000F3)
+#define STATUS_INVALID_PARAMETER_6 __constant_cpu_to_le32(0xC00000F4)
+#define STATUS_INVALID_PARAMETER_7 __constant_cpu_to_le32(0xC00000F5)
+#define STATUS_INVALID_PARAMETER_8 __constant_cpu_to_le32(0xC00000F6)
+#define STATUS_INVALID_PARAMETER_9 __constant_cpu_to_le32(0xC00000F7)
+#define STATUS_INVALID_PARAMETER_10 __constant_cpu_to_le32(0xC00000F8)
+#define STATUS_INVALID_PARAMETER_11 __constant_cpu_to_le32(0xC00000F9)
+#define STATUS_INVALID_PARAMETER_12 __constant_cpu_to_le32(0xC00000FA)
+#define STATUS_REDIRECTOR_NOT_STARTED __constant_cpu_to_le32(0xC00000FB)
+#define STATUS_REDIRECTOR_STARTED __constant_cpu_to_le32(0xC00000FC)
+#define STATUS_STACK_OVERFLOW __constant_cpu_to_le32(0xC00000FD)
+#define STATUS_NO_SUCH_PACKAGE __constant_cpu_to_le32(0xC00000FE)
+#define STATUS_BAD_FUNCTION_TABLE __constant_cpu_to_le32(0xC00000FF)
+#define STATUS_VARIABLE_NOT_FOUND __constant_cpu_to_le32(0xC0000100)
+#define STATUS_DIRECTORY_NOT_EMPTY __constant_cpu_to_le32(0xC0000101)
+#define STATUS_FILE_CORRUPT_ERROR __constant_cpu_to_le32(0xC0000102)
+#define STATUS_NOT_A_DIRECTORY __constant_cpu_to_le32(0xC0000103)
+#define STATUS_BAD_LOGON_SESSION_STATE __constant_cpu_to_le32(0xC0000104)
+#define STATUS_LOGON_SESSION_COLLISION __constant_cpu_to_le32(0xC0000105)
+#define STATUS_NAME_TOO_LONG __constant_cpu_to_le32(0xC0000106)
+#define STATUS_FILES_OPEN __constant_cpu_to_le32(0xC0000107)
+#define STATUS_CONNECTION_IN_USE __constant_cpu_to_le32(0xC0000108)
+#define STATUS_MESSAGE_NOT_FOUND __constant_cpu_to_le32(0xC0000109)
+#define STATUS_PROCESS_IS_TERMINATING __constant_cpu_to_le32(0xC000010A)
+#define STATUS_INVALID_LOGON_TYPE __constant_cpu_to_le32(0xC000010B)
+#define STATUS_NO_GUID_TRANSLATION __constant_cpu_to_le32(0xC000010C)
+#define STATUS_CANNOT_IMPERSONATE __constant_cpu_to_le32(0xC000010D)
+#define STATUS_IMAGE_ALREADY_LOADED __constant_cpu_to_le32(0xC000010E)
+#define STATUS_ABIOS_NOT_PRESENT __constant_cpu_to_le32(0xC000010F)
+#define STATUS_ABIOS_LID_NOT_EXIST __constant_cpu_to_le32(0xC0000110)
+#define STATUS_ABIOS_LID_ALREADY_OWNED __constant_cpu_to_le32(0xC0000111)
+#define STATUS_ABIOS_NOT_LID_OWNER __constant_cpu_to_le32(0xC0000112)
+#define STATUS_ABIOS_INVALID_COMMAND __constant_cpu_to_le32(0xC0000113)
+#define STATUS_ABIOS_INVALID_LID __constant_cpu_to_le32(0xC0000114)
+#define STATUS_ABIOS_SELECTOR_NOT_AVAILABLE __constant_cpu_to_le32(0xC0000115)
+#define STATUS_ABIOS_INVALID_SELECTOR __constant_cpu_to_le32(0xC0000116)
+#define STATUS_NO_LDT __constant_cpu_to_le32(0xC0000117)
+#define STATUS_INVALID_LDT_SIZE __constant_cpu_to_le32(0xC0000118)
+#define STATUS_INVALID_LDT_OFFSET __constant_cpu_to_le32(0xC0000119)
+#define STATUS_INVALID_LDT_DESCRIPTOR __constant_cpu_to_le32(0xC000011A)
+#define STATUS_INVALID_IMAGE_NE_FORMAT __constant_cpu_to_le32(0xC000011B)
+#define STATUS_RXACT_INVALID_STATE __constant_cpu_to_le32(0xC000011C)
+#define STATUS_RXACT_COMMIT_FAILURE __constant_cpu_to_le32(0xC000011D)
+#define STATUS_MAPPED_FILE_SIZE_ZERO __constant_cpu_to_le32(0xC000011E)
+#define STATUS_TOO_MANY_OPENED_FILES __constant_cpu_to_le32(0xC000011F)
+#define STATUS_CANCELLED __constant_cpu_to_le32(0xC0000120)
+#define STATUS_CANNOT_DELETE __constant_cpu_to_le32(0xC0000121)
+#define STATUS_INVALID_COMPUTER_NAME __constant_cpu_to_le32(0xC0000122)
+#define STATUS_FILE_DELETED __constant_cpu_to_le32(0xC0000123)
+#define STATUS_SPECIAL_ACCOUNT __constant_cpu_to_le32(0xC0000124)
+#define STATUS_SPECIAL_GROUP __constant_cpu_to_le32(0xC0000125)
+#define STATUS_SPECIAL_USER __constant_cpu_to_le32(0xC0000126)
+#define STATUS_MEMBERS_PRIMARY_GROUP __constant_cpu_to_le32(0xC0000127)
+#define STATUS_FILE_CLOSED __constant_cpu_to_le32(0xC0000128)
+#define STATUS_TOO_MANY_THREADS __constant_cpu_to_le32(0xC0000129)
+#define STATUS_THREAD_NOT_IN_PROCESS __constant_cpu_to_le32(0xC000012A)
+#define STATUS_TOKEN_ALREADY_IN_USE __constant_cpu_to_le32(0xC000012B)
+#define STATUS_PAGEFILE_QUOTA_EXCEEDED __constant_cpu_to_le32(0xC000012C)
+#define STATUS_COMMITMENT_LIMIT __constant_cpu_to_le32(0xC000012D)
+#define STATUS_INVALID_IMAGE_LE_FORMAT __constant_cpu_to_le32(0xC000012E)
+#define STATUS_INVALID_IMAGE_NOT_MZ __constant_cpu_to_le32(0xC000012F)
+#define STATUS_INVALID_IMAGE_PROTECT __constant_cpu_to_le32(0xC0000130)
+#define STATUS_INVALID_IMAGE_WIN_16 __constant_cpu_to_le32(0xC0000131)
+#define STATUS_LOGON_SERVER_CONFLICT __constant_cpu_to_le32(0xC0000132)
+#define STATUS_TIME_DIFFERENCE_AT_DC __constant_cpu_to_le32(0xC0000133)
+#define STATUS_SYNCHRONIZATION_REQUIRED __constant_cpu_to_le32(0xC0000134)
+#define STATUS_DLL_NOT_FOUND __constant_cpu_to_le32(0xC0000135)
+#define STATUS_OPEN_FAILED __constant_cpu_to_le32(0xC0000136)
+#define STATUS_IO_PRIVILEGE_FAILED __constant_cpu_to_le32(0xC0000137)
+#define STATUS_ORDINAL_NOT_FOUND __constant_cpu_to_le32(0xC0000138)
+#define STATUS_ENTRYPOINT_NOT_FOUND __constant_cpu_to_le32(0xC0000139)
+#define STATUS_CONTROL_C_EXIT __constant_cpu_to_le32(0xC000013A)
+#define STATUS_LOCAL_DISCONNECT __constant_cpu_to_le32(0xC000013B)
+#define STATUS_REMOTE_DISCONNECT __constant_cpu_to_le32(0xC000013C)
+#define STATUS_REMOTE_RESOURCES __constant_cpu_to_le32(0xC000013D)
+#define STATUS_LINK_FAILED __constant_cpu_to_le32(0xC000013E)
+#define STATUS_LINK_TIMEOUT __constant_cpu_to_le32(0xC000013F)
+#define STATUS_INVALID_CONNECTION __constant_cpu_to_le32(0xC0000140)
+#define STATUS_INVALID_ADDRESS __constant_cpu_to_le32(0xC0000141)
+#define STATUS_DLL_INIT_FAILED __constant_cpu_to_le32(0xC0000142)
+#define STATUS_MISSING_SYSTEMFILE __constant_cpu_to_le32(0xC0000143)
+#define STATUS_UNHANDLED_EXCEPTION __constant_cpu_to_le32(0xC0000144)
+#define STATUS_APP_INIT_FAILURE __constant_cpu_to_le32(0xC0000145)
+#define STATUS_PAGEFILE_CREATE_FAILED __constant_cpu_to_le32(0xC0000146)
+#define STATUS_NO_PAGEFILE __constant_cpu_to_le32(0xC0000147)
+#define STATUS_INVALID_LEVEL __constant_cpu_to_le32(0xC0000148)
+#define STATUS_WRONG_PASSWORD_CORE __constant_cpu_to_le32(0xC0000149)
+#define STATUS_ILLEGAL_FLOAT_CONTEXT __constant_cpu_to_le32(0xC000014A)
+#define STATUS_PIPE_BROKEN __constant_cpu_to_le32(0xC000014B)
+#define STATUS_REGISTRY_CORRUPT __constant_cpu_to_le32(0xC000014C)
+#define STATUS_REGISTRY_IO_FAILED __constant_cpu_to_le32(0xC000014D)
+#define STATUS_NO_EVENT_PAIR __constant_cpu_to_le32(0xC000014E)
+#define STATUS_UNRECOGNIZED_VOLUME __constant_cpu_to_le32(0xC000014F)
+#define STATUS_SERIAL_NO_DEVICE_INITED __constant_cpu_to_le32(0xC0000150)
+#define STATUS_NO_SUCH_ALIAS __constant_cpu_to_le32(0xC0000151)
+#define STATUS_MEMBER_NOT_IN_ALIAS __constant_cpu_to_le32(0xC0000152)
+#define STATUS_MEMBER_IN_ALIAS __constant_cpu_to_le32(0xC0000153)
+#define STATUS_ALIAS_EXISTS __constant_cpu_to_le32(0xC0000154)
+#define STATUS_LOGON_NOT_GRANTED __constant_cpu_to_le32(0xC0000155)
+#define STATUS_TOO_MANY_SECRETS __constant_cpu_to_le32(0xC0000156)
+#define STATUS_SECRET_TOO_LONG __constant_cpu_to_le32(0xC0000157)
+#define STATUS_INTERNAL_DB_ERROR __constant_cpu_to_le32(0xC0000158)
+#define STATUS_FULLSCREEN_MODE __constant_cpu_to_le32(0xC0000159)
+#define STATUS_TOO_MANY_CONTEXT_IDS __constant_cpu_to_le32(0xC000015A)
+#define STATUS_LOGON_TYPE_NOT_GRANTED __constant_cpu_to_le32(0xC000015B)
+#define STATUS_NOT_REGISTRY_FILE __constant_cpu_to_le32(0xC000015C)
+#define STATUS_NT_CROSS_ENCRYPTION_REQUIRED __constant_cpu_to_le32(0xC000015D)
+#define STATUS_DOMAIN_CTRLR_CONFIG_ERROR __constant_cpu_to_le32(0xC000015E)
+#define STATUS_FT_MISSING_MEMBER __constant_cpu_to_le32(0xC000015F)
+#define STATUS_ILL_FORMED_SERVICE_ENTRY __constant_cpu_to_le32(0xC0000160)
+#define STATUS_ILLEGAL_CHARACTER __constant_cpu_to_le32(0xC0000161)
+#define STATUS_UNMAPPABLE_CHARACTER __constant_cpu_to_le32(0xC0000162)
+#define STATUS_UNDEFINED_CHARACTER __constant_cpu_to_le32(0xC0000163)
+#define STATUS_FLOPPY_VOLUME __constant_cpu_to_le32(0xC0000164)
+#define STATUS_FLOPPY_ID_MARK_NOT_FOUND __constant_cpu_to_le32(0xC0000165)
+#define STATUS_FLOPPY_WRONG_CYLINDER __constant_cpu_to_le32(0xC0000166)
+#define STATUS_FLOPPY_UNKNOWN_ERROR __constant_cpu_to_le32(0xC0000167)
+#define STATUS_FLOPPY_BAD_REGISTERS __constant_cpu_to_le32(0xC0000168)
+#define STATUS_DISK_RECALIBRATE_FAILED __constant_cpu_to_le32(0xC0000169)
+#define STATUS_DISK_OPERATION_FAILED __constant_cpu_to_le32(0xC000016A)
+#define STATUS_DISK_RESET_FAILED __constant_cpu_to_le32(0xC000016B)
+#define STATUS_SHARED_IRQ_BUSY __constant_cpu_to_le32(0xC000016C)
+#define STATUS_FT_ORPHANING __constant_cpu_to_le32(0xC000016D)
+#define STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT __constant_cpu_to_le32(0xC000016E)
+#define STATUS_PARTITION_FAILURE __constant_cpu_to_le32(0xC0000172)
+#define STATUS_INVALID_BLOCK_LENGTH __constant_cpu_to_le32(0xC0000173)
+#define STATUS_DEVICE_NOT_PARTITIONED __constant_cpu_to_le32(0xC0000174)
+#define STATUS_UNABLE_TO_LOCK_MEDIA __constant_cpu_to_le32(0xC0000175)
+#define STATUS_UNABLE_TO_UNLOAD_MEDIA __constant_cpu_to_le32(0xC0000176)
+#define STATUS_EOM_OVERFLOW __constant_cpu_to_le32(0xC0000177)
+#define STATUS_NO_MEDIA __constant_cpu_to_le32(0xC0000178)
+#define STATUS_NO_SUCH_MEMBER __constant_cpu_to_le32(0xC000017A)
+#define STATUS_INVALID_MEMBER __constant_cpu_to_le32(0xC000017B)
+#define STATUS_KEY_DELETED __constant_cpu_to_le32(0xC000017C)
+#define STATUS_NO_LOG_SPACE __constant_cpu_to_le32(0xC000017D)
+#define STATUS_TOO_MANY_SIDS __constant_cpu_to_le32(0xC000017E)
+#define STATUS_LM_CROSS_ENCRYPTION_REQUIRED __constant_cpu_to_le32(0xC000017F)
+#define STATUS_KEY_HAS_CHILDREN __constant_cpu_to_le32(0xC0000180)
+#define STATUS_CHILD_MUST_BE_VOLATILE __constant_cpu_to_le32(0xC0000181)
+#define STATUS_DEVICE_CONFIGURATION_ERROR __constant_cpu_to_le32(0xC0000182)
+#define STATUS_DRIVER_INTERNAL_ERROR __constant_cpu_to_le32(0xC0000183)
+#define STATUS_INVALID_DEVICE_STATE __constant_cpu_to_le32(0xC0000184)
+#define STATUS_IO_DEVICE_ERROR __constant_cpu_to_le32(0xC0000185)
+#define STATUS_DEVICE_PROTOCOL_ERROR __constant_cpu_to_le32(0xC0000186)
+#define STATUS_BACKUP_CONTROLLER __constant_cpu_to_le32(0xC0000187)
+#define STATUS_LOG_FILE_FULL __constant_cpu_to_le32(0xC0000188)
+#define STATUS_TOO_LATE __constant_cpu_to_le32(0xC0000189)
+#define STATUS_NO_TRUST_LSA_SECRET __constant_cpu_to_le32(0xC000018A)
+#define STATUS_NO_TRUST_SAM_ACCOUNT __constant_cpu_to_le32(0xC000018B)
+#define STATUS_TRUSTED_DOMAIN_FAILURE __constant_cpu_to_le32(0xC000018C)
+#define STATUS_TRUSTED_RELATIONSHIP_FAILURE __constant_cpu_to_le32(0xC000018D)
+#define STATUS_EVENTLOG_FILE_CORRUPT __constant_cpu_to_le32(0xC000018E)
+#define STATUS_EVENTLOG_CANT_START __constant_cpu_to_le32(0xC000018F)
+#define STATUS_TRUST_FAILURE __constant_cpu_to_le32(0xC0000190)
+#define STATUS_MUTANT_LIMIT_EXCEEDED __constant_cpu_to_le32(0xC0000191)
+#define STATUS_NETLOGON_NOT_STARTED __constant_cpu_to_le32(0xC0000192)
+#define STATUS_ACCOUNT_EXPIRED __constant_cpu_to_le32(0xC0000193)
+#define STATUS_POSSIBLE_DEADLOCK __constant_cpu_to_le32(0xC0000194)
+#define STATUS_NETWORK_CREDENTIAL_CONFLICT __constant_cpu_to_le32(0xC0000195)
+#define STATUS_REMOTE_SESSION_LIMIT __constant_cpu_to_le32(0xC0000196)
+#define STATUS_EVENTLOG_FILE_CHANGED __constant_cpu_to_le32(0xC0000197)
+#define STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT __constant_cpu_to_le32(0xC0000198)
+#define STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT __constant_cpu_to_le32(0xC0000199)
+#define STATUS_NOLOGON_SERVER_TRUST_ACCOUNT __constant_cpu_to_le32(0xC000019A)
+#define STATUS_DOMAIN_TRUST_INCONSISTENT __constant_cpu_to_le32(0xC000019B)
+#define STATUS_FS_DRIVER_REQUIRED __constant_cpu_to_le32(0xC000019C)
+#define STATUS_IMAGE_ALREADY_LOADED_AS_DLL __constant_cpu_to_le32(0xC000019D)
+#define STATUS_NETWORK_OPEN_RESTRICTION __constant_cpu_to_le32(0xC0000201)
+#define STATUS_NO_USER_SESSION_KEY __constant_cpu_to_le32(0xC0000202)
+#define STATUS_USER_SESSION_DELETED __constant_cpu_to_le32(0xC0000203)
+#define STATUS_RESOURCE_LANG_NOT_FOUND __constant_cpu_to_le32(0xC0000204)
+#define STATUS_INSUFF_SERVER_RESOURCES __constant_cpu_to_le32(0xC0000205)
+#define STATUS_INVALID_BUFFER_SIZE __constant_cpu_to_le32(0xC0000206)
+#define STATUS_INVALID_ADDRESS_COMPONENT __constant_cpu_to_le32(0xC0000207)
+#define STATUS_INVALID_ADDRESS_WILDCARD __constant_cpu_to_le32(0xC0000208)
+#define STATUS_TOO_MANY_ADDRESSES __constant_cpu_to_le32(0xC0000209)
+#define STATUS_ADDRESS_ALREADY_EXISTS __constant_cpu_to_le32(0xC000020A)
+#define STATUS_ADDRESS_CLOSED __constant_cpu_to_le32(0xC000020B)
+#define STATUS_CONNECTION_DISCONNECTED __constant_cpu_to_le32(0xC000020C)
+#define STATUS_CONNECTION_RESET __constant_cpu_to_le32(0xC000020D)
+#define STATUS_TOO_MANY_NODES __constant_cpu_to_le32(0xC000020E)
+#define STATUS_TRANSACTION_ABORTED __constant_cpu_to_le32(0xC000020F)
+#define STATUS_TRANSACTION_TIMED_OUT __constant_cpu_to_le32(0xC0000210)
+#define STATUS_TRANSACTION_NO_RELEASE __constant_cpu_to_le32(0xC0000211)
+#define STATUS_TRANSACTION_NO_MATCH __constant_cpu_to_le32(0xC0000212)
+#define STATUS_TRANSACTION_RESPONDED __constant_cpu_to_le32(0xC0000213)
+#define STATUS_TRANSACTION_INVALID_ID __constant_cpu_to_le32(0xC0000214)
+#define STATUS_TRANSACTION_INVALID_TYPE __constant_cpu_to_le32(0xC0000215)
+#define STATUS_NOT_SERVER_SESSION __constant_cpu_to_le32(0xC0000216)
+#define STATUS_NOT_CLIENT_SESSION __constant_cpu_to_le32(0xC0000217)
+#define STATUS_CANNOT_LOAD_REGISTRY_FILE __constant_cpu_to_le32(0xC0000218)
+#define STATUS_DEBUG_ATTACH_FAILED __constant_cpu_to_le32(0xC0000219)
+#define STATUS_SYSTEM_PROCESS_TERMINATED __constant_cpu_to_le32(0xC000021A)
+#define STATUS_DATA_NOT_ACCEPTED __constant_cpu_to_le32(0xC000021B)
+#define STATUS_NO_BROWSER_SERVERS_FOUND __constant_cpu_to_le32(0xC000021C)
+#define STATUS_VDM_HARD_ERROR __constant_cpu_to_le32(0xC000021D)
+#define STATUS_DRIVER_CANCEL_TIMEOUT __constant_cpu_to_le32(0xC000021E)
+#define STATUS_REPLY_MESSAGE_MISMATCH __constant_cpu_to_le32(0xC000021F)
+#define STATUS_MAPPED_ALIGNMENT __constant_cpu_to_le32(0xC0000220)
+#define STATUS_IMAGE_CHECKSUM_MISMATCH __constant_cpu_to_le32(0xC0000221)
+#define STATUS_LOST_WRITEBEHIND_DATA __constant_cpu_to_le32(0xC0000222)
+#define STATUS_CLIENT_SERVER_PARAMETERS_INVALID __constant_cpu_to_le32(0xC0000223)
+#define STATUS_PASSWORD_MUST_CHANGE __constant_cpu_to_le32(0xC0000224)
+#define STATUS_NOT_FOUND __constant_cpu_to_le32(0xC0000225)
+#define STATUS_NOT_TINY_STREAM __constant_cpu_to_le32(0xC0000226)
+#define STATUS_RECOVERY_FAILURE __constant_cpu_to_le32(0xC0000227)
+#define STATUS_STACK_OVERFLOW_READ __constant_cpu_to_le32(0xC0000228)
+#define STATUS_FAIL_CHECK __constant_cpu_to_le32(0xC0000229)
+#define STATUS_DUPLICATE_OBJECTID __constant_cpu_to_le32(0xC000022A)
+#define STATUS_OBJECTID_EXISTS __constant_cpu_to_le32(0xC000022B)
+#define STATUS_CONVERT_TO_LARGE __constant_cpu_to_le32(0xC000022C)
+#define STATUS_RETRY __constant_cpu_to_le32(0xC000022D)
+#define STATUS_FOUND_OUT_OF_SCOPE __constant_cpu_to_le32(0xC000022E)
+#define STATUS_ALLOCATE_BUCKET __constant_cpu_to_le32(0xC000022F)
+#define STATUS_PROPSET_NOT_FOUND __constant_cpu_to_le32(0xC0000230)
+#define STATUS_MARSHALL_OVERFLOW __constant_cpu_to_le32(0xC0000231)
+#define STATUS_INVALID_VARIANT __constant_cpu_to_le32(0xC0000232)
+#define STATUS_DOMAIN_CONTROLLER_NOT_FOUND __constant_cpu_to_le32(0xC0000233)
+#define STATUS_ACCOUNT_LOCKED_OUT __constant_cpu_to_le32(0xC0000234)
+#define STATUS_HANDLE_NOT_CLOSABLE __constant_cpu_to_le32(0xC0000235)
+#define STATUS_CONNECTION_REFUSED __constant_cpu_to_le32(0xC0000236)
+#define STATUS_GRACEFUL_DISCONNECT __constant_cpu_to_le32(0xC0000237)
+#define STATUS_ADDRESS_ALREADY_ASSOCIATED __constant_cpu_to_le32(0xC0000238)
+#define STATUS_ADDRESS_NOT_ASSOCIATED __constant_cpu_to_le32(0xC0000239)
+#define STATUS_CONNECTION_INVALID __constant_cpu_to_le32(0xC000023A)
+#define STATUS_CONNECTION_ACTIVE __constant_cpu_to_le32(0xC000023B)
+#define STATUS_NETWORK_UNREACHABLE __constant_cpu_to_le32(0xC000023C)
+#define STATUS_HOST_UNREACHABLE __constant_cpu_to_le32(0xC000023D)
+#define STATUS_PROTOCOL_UNREACHABLE __constant_cpu_to_le32(0xC000023E)
+#define STATUS_PORT_UNREACHABLE __constant_cpu_to_le32(0xC000023F)
+#define STATUS_REQUEST_ABORTED __constant_cpu_to_le32(0xC0000240)
+#define STATUS_CONNECTION_ABORTED __constant_cpu_to_le32(0xC0000241)
+#define STATUS_BAD_COMPRESSION_BUFFER __constant_cpu_to_le32(0xC0000242)
+#define STATUS_USER_MAPPED_FILE __constant_cpu_to_le32(0xC0000243)
+#define STATUS_AUDIT_FAILED __constant_cpu_to_le32(0xC0000244)
+#define STATUS_TIMER_RESOLUTION_NOT_SET __constant_cpu_to_le32(0xC0000245)
+#define STATUS_CONNECTION_COUNT_LIMIT __constant_cpu_to_le32(0xC0000246)
+#define STATUS_LOGIN_TIME_RESTRICTION __constant_cpu_to_le32(0xC0000247)
+#define STATUS_LOGIN_WKSTA_RESTRICTION __constant_cpu_to_le32(0xC0000248)
+#define STATUS_IMAGE_MP_UP_MISMATCH __constant_cpu_to_le32(0xC0000249)
+#define STATUS_INSUFFICIENT_LOGON_INFO __constant_cpu_to_le32(0xC0000250)
+#define STATUS_BAD_DLL_ENTRYPOINT __constant_cpu_to_le32(0xC0000251)
+#define STATUS_BAD_SERVICE_ENTRYPOINT __constant_cpu_to_le32(0xC0000252)
+#define STATUS_LPC_REPLY_LOST __constant_cpu_to_le32(0xC0000253)
+#define STATUS_IP_ADDRESS_CONFLICT1 __constant_cpu_to_le32(0xC0000254)
+#define STATUS_IP_ADDRESS_CONFLICT2 __constant_cpu_to_le32(0xC0000255)
+#define STATUS_REGISTRY_QUOTA_LIMIT __constant_cpu_to_le32(0xC0000256)
+#define STATUS_PATH_NOT_COVERED __constant_cpu_to_le32(0xC0000257)
+#define STATUS_NO_CALLBACK_ACTIVE __constant_cpu_to_le32(0xC0000258)
+#define STATUS_LICENSE_QUOTA_EXCEEDED __constant_cpu_to_le32(0xC0000259)
+#define STATUS_PWD_TOO_SHORT __constant_cpu_to_le32(0xC000025A)
+#define STATUS_PWD_TOO_RECENT __constant_cpu_to_le32(0xC000025B)
+#define STATUS_PWD_HISTORY_CONFLICT __constant_cpu_to_le32(0xC000025C)
+#define STATUS_PLUGPLAY_NO_DEVICE __constant_cpu_to_le32(0xC000025E)
+#define STATUS_UNSUPPORTED_COMPRESSION __constant_cpu_to_le32(0xC000025F)
+#define STATUS_INVALID_HW_PROFILE __constant_cpu_to_le32(0xC0000260)
+#define STATUS_INVALID_PLUGPLAY_DEVICE_PATH __constant_cpu_to_le32(0xC0000261)
+#define STATUS_DRIVER_ORDINAL_NOT_FOUND __constant_cpu_to_le32(0xC0000262)
+#define STATUS_DRIVER_ENTRYPOINT_NOT_FOUND __constant_cpu_to_le32(0xC0000263)
+#define STATUS_RESOURCE_NOT_OWNED __constant_cpu_to_le32(0xC0000264)
+#define STATUS_TOO_MANY_LINKS __constant_cpu_to_le32(0xC0000265)
+#define STATUS_QUOTA_LIST_INCONSISTENT __constant_cpu_to_le32(0xC0000266)
+#define STATUS_FILE_IS_OFFLINE __constant_cpu_to_le32(0xC0000267)
+#define STATUS_EVALUATION_EXPIRATION __constant_cpu_to_le32(0xC0000268)
+#define STATUS_ILLEGAL_DLL_RELOCATION __constant_cpu_to_le32(0xC0000269)
+#define STATUS_LICENSE_VIOLATION __constant_cpu_to_le32(0xC000026A)
+#define STATUS_DLL_INIT_FAILED_LOGOFF __constant_cpu_to_le32(0xC000026B)
+#define STATUS_DRIVER_UNABLE_TO_LOAD __constant_cpu_to_le32(0xC000026C)
+#define STATUS_DFS_UNAVAILABLE __constant_cpu_to_le32(0xC000026D)
+#define STATUS_VOLUME_DISMOUNTED __constant_cpu_to_le32(0xC000026E)
+#define STATUS_WX86_INTERNAL_ERROR __constant_cpu_to_le32(0xC000026F)
+#define STATUS_WX86_FLOAT_STACK_CHECK __constant_cpu_to_le32(0xC0000270)
+#define STATUS_VALIDATE_CONTINUE __constant_cpu_to_le32(0xC0000271)
+#define STATUS_NO_MATCH __constant_cpu_to_le32(0xC0000272)
+#define STATUS_NO_MORE_MATCHES __constant_cpu_to_le32(0xC0000273)
+#define STATUS_NOT_A_REPARSE_POINT __constant_cpu_to_le32(0xC0000275)
+#define STATUS_IO_REPARSE_TAG_INVALID __constant_cpu_to_le32(0xC0000276)
+#define STATUS_IO_REPARSE_TAG_MISMATCH __constant_cpu_to_le32(0xC0000277)
+#define STATUS_IO_REPARSE_DATA_INVALID __constant_cpu_to_le32(0xC0000278)
+#define STATUS_IO_REPARSE_TAG_NOT_HANDLED __constant_cpu_to_le32(0xC0000279)
+#define STATUS_REPARSE_POINT_NOT_RESOLVED __constant_cpu_to_le32(0xC0000280)
+#define STATUS_DIRECTORY_IS_A_REPARSE_POINT __constant_cpu_to_le32(0xC0000281)
+#define STATUS_RANGE_LIST_CONFLICT __constant_cpu_to_le32(0xC0000282)
+#define STATUS_SOURCE_ELEMENT_EMPTY __constant_cpu_to_le32(0xC0000283)
+#define STATUS_DESTINATION_ELEMENT_FULL __constant_cpu_to_le32(0xC0000284)
+#define STATUS_ILLEGAL_ELEMENT_ADDRESS __constant_cpu_to_le32(0xC0000285)
+#define STATUS_MAGAZINE_NOT_PRESENT __constant_cpu_to_le32(0xC0000286)
+#define STATUS_REINITIALIZATION_NEEDED __constant_cpu_to_le32(0xC0000287)
+#define STATUS_ENCRYPTION_FAILED __constant_cpu_to_le32(0xC000028A)
+#define STATUS_DECRYPTION_FAILED __constant_cpu_to_le32(0xC000028B)
+#define STATUS_RANGE_NOT_FOUND __constant_cpu_to_le32(0xC000028C)
+#define STATUS_NO_RECOVERY_POLICY __constant_cpu_to_le32(0xC000028D)
+#define STATUS_NO_EFS __constant_cpu_to_le32(0xC000028E)
+#define STATUS_WRONG_EFS __constant_cpu_to_le32(0xC000028F)
+#define STATUS_NO_USER_KEYS __constant_cpu_to_le32(0xC0000290)
+#define STATUS_FILE_NOT_ENCRYPTED __constant_cpu_to_le32(0xC0000291)
+#define STATUS_NOT_EXPORT_FORMAT __constant_cpu_to_le32(0xC0000292)
+#define STATUS_FILE_ENCRYPTED __constant_cpu_to_le32(0xC0000293)
+#define STATUS_WMI_GUID_NOT_FOUND __constant_cpu_to_le32(0xC0000295)
+#define STATUS_WMI_INSTANCE_NOT_FOUND __constant_cpu_to_le32(0xC0000296)
+#define STATUS_WMI_ITEMID_NOT_FOUND __constant_cpu_to_le32(0xC0000297)
+#define STATUS_WMI_TRY_AGAIN __constant_cpu_to_le32(0xC0000298)
+#define STATUS_SHARED_POLICY __constant_cpu_to_le32(0xC0000299)
+#define STATUS_POLICY_OBJECT_NOT_FOUND __constant_cpu_to_le32(0xC000029A)
+#define STATUS_POLICY_ONLY_IN_DS __constant_cpu_to_le32(0xC000029B)
+#define STATUS_VOLUME_NOT_UPGRADED __constant_cpu_to_le32(0xC000029C)
+#define STATUS_REMOTE_STORAGE_NOT_ACTIVE __constant_cpu_to_le32(0xC000029D)
+#define STATUS_REMOTE_STORAGE_MEDIA_ERROR __constant_cpu_to_le32(0xC000029E)
+#define STATUS_NO_TRACKING_SERVICE __constant_cpu_to_le32(0xC000029F)
+#define STATUS_SERVER_SID_MISMATCH __constant_cpu_to_le32(0xC00002A0)
+#define STATUS_DS_NO_ATTRIBUTE_OR_VALUE __constant_cpu_to_le32(0xC00002A1)
+#define STATUS_DS_INVALID_ATTRIBUTE_SYNTAX __constant_cpu_to_le32(0xC00002A2)
+#define STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED __constant_cpu_to_le32(0xC00002A3)
+#define STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS __constant_cpu_to_le32(0xC00002A4)
+#define STATUS_DS_BUSY __constant_cpu_to_le32(0xC00002A5)
+#define STATUS_DS_UNAVAILABLE __constant_cpu_to_le32(0xC00002A6)
+#define STATUS_DS_NO_RIDS_ALLOCATED __constant_cpu_to_le32(0xC00002A7)
+#define STATUS_DS_NO_MORE_RIDS __constant_cpu_to_le32(0xC00002A8)
+#define STATUS_DS_INCORRECT_ROLE_OWNER __constant_cpu_to_le32(0xC00002A9)
+#define STATUS_DS_RIDMGR_INIT_ERROR __constant_cpu_to_le32(0xC00002AA)
+#define STATUS_DS_OBJ_CLASS_VIOLATION __constant_cpu_to_le32(0xC00002AB)
+#define STATUS_DS_CANT_ON_NON_LEAF __constant_cpu_to_le32(0xC00002AC)
+#define STATUS_DS_CANT_ON_RDN __constant_cpu_to_le32(0xC00002AD)
+#define STATUS_DS_CANT_MOD_OBJ_CLASS __constant_cpu_to_le32(0xC00002AE)
+#define STATUS_DS_CROSS_DOM_MOVE_FAILED __constant_cpu_to_le32(0xC00002AF)
+#define STATUS_DS_GC_NOT_AVAILABLE __constant_cpu_to_le32(0xC00002B0)
+#define STATUS_DIRECTORY_SERVICE_REQUIRED __constant_cpu_to_le32(0xC00002B1)
+#define STATUS_REPARSE_ATTRIBUTE_CONFLICT __constant_cpu_to_le32(0xC00002B2)
+#define STATUS_CANT_ENABLE_DENY_ONLY __constant_cpu_to_le32(0xC00002B3)
+#define STATUS_FLOAT_MULTIPLE_FAULTS __constant_cpu_to_le32(0xC00002B4)
+#define STATUS_FLOAT_MULTIPLE_TRAPS __constant_cpu_to_le32(0xC00002B5)
+#define STATUS_DEVICE_REMOVED __constant_cpu_to_le32(0xC00002B6)
+#define STATUS_JOURNAL_DELETE_IN_PROGRESS __constant_cpu_to_le32(0xC00002B7)
+#define STATUS_JOURNAL_NOT_ACTIVE __constant_cpu_to_le32(0xC00002B8)
+#define STATUS_NOINTERFACE __constant_cpu_to_le32(0xC00002B9)
+#define STATUS_DS_ADMIN_LIMIT_EXCEEDED __constant_cpu_to_le32(0xC00002C1)
+#define STATUS_DRIVER_FAILED_SLEEP __constant_cpu_to_le32(0xC00002C2)
+#define STATUS_MUTUAL_AUTHENTICATION_FAILED __constant_cpu_to_le32(0xC00002C3)
+#define STATUS_CORRUPT_SYSTEM_FILE __constant_cpu_to_le32(0xC00002C4)
+#define STATUS_DATATYPE_MISALIGNMENT_ERROR __constant_cpu_to_le32(0xC00002C5)
+#define STATUS_WMI_READ_ONLY __constant_cpu_to_le32(0xC00002C6)
+#define STATUS_WMI_SET_FAILURE __constant_cpu_to_le32(0xC00002C7)
+#define STATUS_COMMITMENT_MINIMUM __constant_cpu_to_le32(0xC00002C8)
+#define STATUS_REG_NAT_CONSUMPTION __constant_cpu_to_le32(0xC00002C9)
+#define STATUS_TRANSPORT_FULL __constant_cpu_to_le32(0xC00002CA)
+#define STATUS_DS_SAM_INIT_FAILURE __constant_cpu_to_le32(0xC00002CB)
+#define STATUS_ONLY_IF_CONNECTED __constant_cpu_to_le32(0xC00002CC)
+#define STATUS_DS_SENSITIVE_GROUP_VIOLATION __constant_cpu_to_le32(0xC00002CD)
+#define STATUS_PNP_RESTART_ENUMERATION __constant_cpu_to_le32(0xC00002CE)
+#define STATUS_JOURNAL_ENTRY_DELETED __constant_cpu_to_le32(0xC00002CF)
+#define STATUS_DS_CANT_MOD_PRIMARYGROUPID __constant_cpu_to_le32(0xC00002D0)
+#define STATUS_SYSTEM_IMAGE_BAD_SIGNATURE __constant_cpu_to_le32(0xC00002D1)
+#define STATUS_PNP_REBOOT_REQUIRED __constant_cpu_to_le32(0xC00002D2)
+#define STATUS_POWER_STATE_INVALID __constant_cpu_to_le32(0xC00002D3)
+#define STATUS_DS_INVALID_GROUP_TYPE __constant_cpu_to_le32(0xC00002D4)
+#define STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN __constant_cpu_to_le32(0xC00002D5)
+#define STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN __constant_cpu_to_le32(0xC00002D6)
+#define STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER __constant_cpu_to_le32(0xC00002D7)
+#define STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER __constant_cpu_to_le32(0xC00002D8)
+#define STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER __constant_cpu_to_le32(0xC00002D9)
+#define STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER __constant_cpu_to_le32(0xC00002DA)
+#define STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER __constant_cpu_to_le32(0xC00002DB)
+#define STATUS_DS_HAVE_PRIMARY_MEMBERS __constant_cpu_to_le32(0xC00002DC)
+#define STATUS_WMI_NOT_SUPPORTED __constant_cpu_to_le32(0xC00002DD)
+#define STATUS_INSUFFICIENT_POWER __constant_cpu_to_le32(0xC00002DE)
+#define STATUS_SAM_NEED_BOOTKEY_PASSWORD __constant_cpu_to_le32(0xC00002DF)
+#define STATUS_SAM_NEED_BOOTKEY_FLOPPY __constant_cpu_to_le32(0xC00002E0)
+#define STATUS_DS_CANT_START __constant_cpu_to_le32(0xC00002E1)
+#define STATUS_DS_INIT_FAILURE __constant_cpu_to_le32(0xC00002E2)
+#define STATUS_SAM_INIT_FAILURE __constant_cpu_to_le32(0xC00002E3)
+#define STATUS_DS_GC_REQUIRED __constant_cpu_to_le32(0xC00002E4)
+#define STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY __constant_cpu_to_le32(0xC00002E5)
+#define STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS __constant_cpu_to_le32(0xC00002E6)
+#define STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED __constant_cpu_to_le32(0xC00002E7)
+#define STATUS_MULTIPLE_FAULT_VIOLATION __constant_cpu_to_le32(0xC00002E8)
+#define STATUS_CURRENT_DOMAIN_NOT_ALLOWED __constant_cpu_to_le32(0xC00002E9)
+#define STATUS_CANNOT_MAKE __constant_cpu_to_le32(0xC00002EA)
+#define STATUS_SYSTEM_SHUTDOWN __constant_cpu_to_le32(0xC00002EB)
+#define STATUS_DS_INIT_FAILURE_CONSOLE __constant_cpu_to_le32(0xC00002EC)
+#define STATUS_DS_SAM_INIT_FAILURE_CONSOLE __constant_cpu_to_le32(0xC00002ED)
+#define STATUS_UNFINISHED_CONTEXT_DELETED __constant_cpu_to_le32(0xC00002EE)
+#define STATUS_NO_TGT_REPLY __constant_cpu_to_le32(0xC00002EF)
+#define STATUS_OBJECTID_NOT_FOUND __constant_cpu_to_le32(0xC00002F0)
+#define STATUS_NO_IP_ADDRESSES __constant_cpu_to_le32(0xC00002F1)
+#define STATUS_WRONG_CREDENTIAL_HANDLE __constant_cpu_to_le32(0xC00002F2)
+#define STATUS_CRYPTO_SYSTEM_INVALID __constant_cpu_to_le32(0xC00002F3)
+#define STATUS_MAX_REFERRALS_EXCEEDED __constant_cpu_to_le32(0xC00002F4)
+#define STATUS_MUST_BE_KDC __constant_cpu_to_le32(0xC00002F5)
+#define STATUS_STRONG_CRYPTO_NOT_SUPPORTED __constant_cpu_to_le32(0xC00002F6)
+#define STATUS_TOO_MANY_PRINCIPALS __constant_cpu_to_le32(0xC00002F7)
+#define STATUS_NO_PA_DATA __constant_cpu_to_le32(0xC00002F8)
+#define STATUS_PKINIT_NAME_MISMATCH __constant_cpu_to_le32(0xC00002F9)
+#define STATUS_SMARTCARD_LOGON_REQUIRED __constant_cpu_to_le32(0xC00002FA)
+#define STATUS_KDC_INVALID_REQUEST __constant_cpu_to_le32(0xC00002FB)
+#define STATUS_KDC_UNABLE_TO_REFER __constant_cpu_to_le32(0xC00002FC)
+#define STATUS_KDC_UNKNOWN_ETYPE __constant_cpu_to_le32(0xC00002FD)
+#define STATUS_SHUTDOWN_IN_PROGRESS __constant_cpu_to_le32(0xC00002FE)
+#define STATUS_SERVER_SHUTDOWN_IN_PROGRESS __constant_cpu_to_le32(0xC00002FF)
+#define STATUS_NOT_SUPPORTED_ON_SBS __constant_cpu_to_le32(0xC0000300)
+#define STATUS_WMI_GUID_DISCONNECTED __constant_cpu_to_le32(0xC0000301)
+#define STATUS_WMI_ALREADY_DISABLED __constant_cpu_to_le32(0xC0000302)
+#define STATUS_WMI_ALREADY_ENABLED __constant_cpu_to_le32(0xC0000303)
+#define STATUS_MFT_TOO_FRAGMENTED __constant_cpu_to_le32(0xC0000304)
+#define STATUS_COPY_PROTECTION_FAILURE __constant_cpu_to_le32(0xC0000305)
+#define STATUS_CSS_AUTHENTICATION_FAILURE __constant_cpu_to_le32(0xC0000306)
+#define STATUS_CSS_KEY_NOT_PRESENT __constant_cpu_to_le32(0xC0000307)
+#define STATUS_CSS_KEY_NOT_ESTABLISHED __constant_cpu_to_le32(0xC0000308)
+#define STATUS_CSS_SCRAMBLED_SECTOR __constant_cpu_to_le32(0xC0000309)
+#define STATUS_CSS_REGION_MISMATCH __constant_cpu_to_le32(0xC000030A)
+#define STATUS_CSS_RESETS_EXHAUSTED __constant_cpu_to_le32(0xC000030B)
+#define STATUS_PKINIT_FAILURE __constant_cpu_to_le32(0xC0000320)
+#define STATUS_SMARTCARD_SUBSYSTEM_FAILURE __constant_cpu_to_le32(0xC0000321)
+#define STATUS_NO_KERB_KEY __constant_cpu_to_le32(0xC0000322)
+#define STATUS_HOST_DOWN __constant_cpu_to_le32(0xC0000350)
+#define STATUS_UNSUPPORTED_PREAUTH __constant_cpu_to_le32(0xC0000351)
+#define STATUS_EFS_ALG_BLOB_TOO_BIG __constant_cpu_to_le32(0xC0000352)
+#define STATUS_PORT_NOT_SET __constant_cpu_to_le32(0xC0000353)
+#define STATUS_DEBUGGER_INACTIVE __constant_cpu_to_le32(0xC0000354)
+#define STATUS_DS_VERSION_CHECK_FAILURE __constant_cpu_to_le32(0xC0000355)
+#define STATUS_AUDITING_DISABLED __constant_cpu_to_le32(0xC0000356)
+#define STATUS_PRENT4_MACHINE_ACCOUNT __constant_cpu_to_le32(0xC0000357)
+#define STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER __constant_cpu_to_le32(0xC0000358)
+#define STATUS_INVALID_IMAGE_WIN_32 __constant_cpu_to_le32(0xC0000359)
+#define STATUS_INVALID_IMAGE_WIN_64 __constant_cpu_to_le32(0xC000035A)
+#define STATUS_BAD_BINDINGS __constant_cpu_to_le32(0xC000035B)
+#define STATUS_NETWORK_SESSION_EXPIRED __constant_cpu_to_le32(0xC000035C)
+#define STATUS_APPHELP_BLOCK __constant_cpu_to_le32(0xC000035D)
+#define STATUS_ALL_SIDS_FILTERED __constant_cpu_to_le32(0xC000035E)
+#define STATUS_NOT_SAFE_MODE_DRIVER __constant_cpu_to_le32(0xC000035F)
+#define STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT __constant_cpu_to_le32(0xC0000361)
+#define STATUS_ACCESS_DISABLED_BY_POLICY_PATH __constant_cpu_to_le32(0xC0000362)
+#define STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER __constant_cpu_to_le32(0xC0000363)
+#define STATUS_ACCESS_DISABLED_BY_POLICY_OTHER __constant_cpu_to_le32(0xC0000364)
+#define STATUS_FAILED_DRIVER_ENTRY __constant_cpu_to_le32(0xC0000365)
+#define STATUS_DEVICE_ENUMERATION_ERROR __constant_cpu_to_le32(0xC0000366)
+#define STATUS_MOUNT_POINT_NOT_RESOLVED __constant_cpu_to_le32(0xC0000368)
+#define STATUS_INVALID_DEVICE_OBJECT_PARAMETER __constant_cpu_to_le32(0xC0000369)
+#define STATUS_MCA_OCCURED __constant_cpu_to_le32(0xC000036A)
+#define STATUS_DRIVER_BLOCKED_CRITICAL __constant_cpu_to_le32(0xC000036B)
+#define STATUS_DRIVER_BLOCKED __constant_cpu_to_le32(0xC000036C)
+#define STATUS_DRIVER_DATABASE_ERROR __constant_cpu_to_le32(0xC000036D)
+#define STATUS_SYSTEM_HIVE_TOO_LARGE __constant_cpu_to_le32(0xC000036E)
+#define STATUS_INVALID_IMPORT_OF_NON_DLL __constant_cpu_to_le32(0xC000036F)
+#define STATUS_NO_SECRETS __constant_cpu_to_le32(0xC0000371)
+#define STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY __constant_cpu_to_le32(0xC0000372)
+#define STATUS_FAILED_STACK_SWITCH __constant_cpu_to_le32(0xC0000373)
+#define STATUS_HEAP_CORRUPTION __constant_cpu_to_le32(0xC0000374)
+#define STATUS_SMARTCARD_WRONG_PIN __constant_cpu_to_le32(0xC0000380)
+#define STATUS_SMARTCARD_CARD_BLOCKED __constant_cpu_to_le32(0xC0000381)
+#define STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED __constant_cpu_to_le32(0xC0000382)
+#define STATUS_SMARTCARD_NO_CARD __constant_cpu_to_le32(0xC0000383)
+#define STATUS_SMARTCARD_NO_KEY_CONTAINER __constant_cpu_to_le32(0xC0000384)
+#define STATUS_SMARTCARD_NO_CERTIFICATE __constant_cpu_to_le32(0xC0000385)
+#define STATUS_SMARTCARD_NO_KEYSET __constant_cpu_to_le32(0xC0000386)
+#define STATUS_SMARTCARD_IO_ERROR __constant_cpu_to_le32(0xC0000387)
+#define STATUS_DOWNGRADE_DETECTED __constant_cpu_to_le32(0xC0000388)
+#define STATUS_SMARTCARD_CERT_REVOKED __constant_cpu_to_le32(0xC0000389)
+#define STATUS_ISSUING_CA_UNTRUSTED __constant_cpu_to_le32(0xC000038A)
+#define STATUS_REVOCATION_OFFLINE_C __constant_cpu_to_le32(0xC000038B)
+#define STATUS_PKINIT_CLIENT_FAILURE __constant_cpu_to_le32(0xC000038C)
+#define STATUS_SMARTCARD_CERT_EXPIRED __constant_cpu_to_le32(0xC000038D)
+#define STATUS_DRIVER_FAILED_PRIOR_UNLOAD __constant_cpu_to_le32(0xC000038E)
+#define STATUS_SMARTCARD_SILENT_CONTEXT __constant_cpu_to_le32(0xC000038F)
+#define STATUS_PER_USER_TRUST_QUOTA_EXCEEDED __constant_cpu_to_le32(0xC0000401)
+#define STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED __constant_cpu_to_le32(0xC0000402)
+#define STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED __constant_cpu_to_le32(0xC0000403)
+#define STATUS_DS_NAME_NOT_UNIQUE __constant_cpu_to_le32(0xC0000404)
+#define STATUS_DS_DUPLICATE_ID_FOUND __constant_cpu_to_le32(0xC0000405)
+#define STATUS_DS_GROUP_CONVERSION_ERROR __constant_cpu_to_le32(0xC0000406)
+#define STATUS_VOLSNAP_PREPARE_HIBERNATE __constant_cpu_to_le32(0xC0000407)
+#define STATUS_USER2USER_REQUIRED __constant_cpu_to_le32(0xC0000408)
+#define STATUS_STACK_BUFFER_OVERRUN __constant_cpu_to_le32(0xC0000409)
+#define STATUS_NO_S4U_PROT_SUPPORT __constant_cpu_to_le32(0xC000040A)
+#define STATUS_CROSSREALM_DELEGATION_FAILURE __constant_cpu_to_le32(0xC000040B)
+#define STATUS_REVOCATION_OFFLINE_KDC __constant_cpu_to_le32(0xC000040C)
+#define STATUS_ISSUING_CA_UNTRUSTED_KDC __constant_cpu_to_le32(0xC000040D)
+#define STATUS_KDC_CERT_EXPIRED __constant_cpu_to_le32(0xC000040E)
+#define STATUS_KDC_CERT_REVOKED __constant_cpu_to_le32(0xC000040F)
+#define STATUS_PARAMETER_QUOTA_EXCEEDED __constant_cpu_to_le32(0xC0000410)
+#define STATUS_HIBERNATION_FAILURE __constant_cpu_to_le32(0xC0000411)
+#define STATUS_DELAY_LOAD_FAILED __constant_cpu_to_le32(0xC0000412)
+#define STATUS_AUTHENTICATION_FIREWALL_FAILED __constant_cpu_to_le32(0xC0000413)
+#define STATUS_VDM_DISALLOWED __constant_cpu_to_le32(0xC0000414)
+#define STATUS_HUNG_DISPLAY_DRIVER_THREAD __constant_cpu_to_le32(0xC0000415)
+#define STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE __constant_cpu_to_le32(0xC0000416)
+#define STATUS_INVALID_CRUNTIME_PARAMETER __constant_cpu_to_le32(0xC0000417)
+#define STATUS_NTLM_BLOCKED __constant_cpu_to_le32(0xC0000418)
+#define STATUS_ASSERTION_FAILURE __constant_cpu_to_le32(0xC0000420)
+#define STATUS_VERIFIER_STOP __constant_cpu_to_le32(0xC0000421)
+#define STATUS_CALLBACK_POP_STACK __constant_cpu_to_le32(0xC0000423)
+#define STATUS_INCOMPATIBLE_DRIVER_BLOCKED __constant_cpu_to_le32(0xC0000424)
+#define STATUS_HIVE_UNLOADED __constant_cpu_to_le32(0xC0000425)
+#define STATUS_COMPRESSION_DISABLED __constant_cpu_to_le32(0xC0000426)
+#define STATUS_FILE_SYSTEM_LIMITATION __constant_cpu_to_le32(0xC0000427)
+#define STATUS_INVALID_IMAGE_HASH __constant_cpu_to_le32(0xC0000428)
+#define STATUS_NOT_CAPABLE __constant_cpu_to_le32(0xC0000429)
+#define STATUS_REQUEST_OUT_OF_SEQUENCE __constant_cpu_to_le32(0xC000042A)
+#define STATUS_IMPLEMENTATION_LIMIT __constant_cpu_to_le32(0xC000042B)
+#define STATUS_ELEVATION_REQUIRED __constant_cpu_to_le32(0xC000042C)
+#define STATUS_BEYOND_VDL __constant_cpu_to_le32(0xC0000432)
+#define STATUS_ENCOUNTERED_WRITE_IN_PROGRESS __constant_cpu_to_le32(0xC0000433)
+#define STATUS_PTE_CHANGED __constant_cpu_to_le32(0xC0000434)
+#define STATUS_PURGE_FAILED __constant_cpu_to_le32(0xC0000435)
+#define STATUS_CRED_REQUIRES_CONFIRMATION __constant_cpu_to_le32(0xC0000440)
+#define STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE __constant_cpu_to_le32(0xC0000441)
+#define STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER __constant_cpu_to_le32(0xC0000442)
+#define STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE __constant_cpu_to_le32(0xC0000443)
+#define STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE __constant_cpu_to_le32(0xC0000444)
+#define STATUS_CS_ENCRYPTION_FILE_NOT_CSE __constant_cpu_to_le32(0xC0000445)
+#define STATUS_INVALID_LABEL __constant_cpu_to_le32(0xC0000446)
+#define STATUS_DRIVER_PROCESS_TERMINATED __constant_cpu_to_le32(0xC0000450)
+#define STATUS_AMBIGUOUS_SYSTEM_DEVICE __constant_cpu_to_le32(0xC0000451)
+#define STATUS_SYSTEM_DEVICE_NOT_FOUND __constant_cpu_to_le32(0xC0000452)
+#define STATUS_RESTART_BOOT_APPLICATION __constant_cpu_to_le32(0xC0000453)
+#define STATUS_INVALID_TASK_NAME __constant_cpu_to_le32(0xC0000500)
+#define STATUS_INVALID_TASK_INDEX __constant_cpu_to_le32(0xC0000501)
+#define STATUS_THREAD_ALREADY_IN_TASK __constant_cpu_to_le32(0xC0000502)
+#define STATUS_CALLBACK_BYPASS __constant_cpu_to_le32(0xC0000503)
+#define STATUS_PORT_CLOSED __constant_cpu_to_le32(0xC0000700)
+#define STATUS_MESSAGE_LOST __constant_cpu_to_le32(0xC0000701)
+#define STATUS_INVALID_MESSAGE __constant_cpu_to_le32(0xC0000702)
+#define STATUS_REQUEST_CANCELED __constant_cpu_to_le32(0xC0000703)
+#define STATUS_RECURSIVE_DISPATCH __constant_cpu_to_le32(0xC0000704)
+#define STATUS_LPC_RECEIVE_BUFFER_EXPECTED __constant_cpu_to_le32(0xC0000705)
+#define STATUS_LPC_INVALID_CONNECTION_USAGE __constant_cpu_to_le32(0xC0000706)
+#define STATUS_LPC_REQUESTS_NOT_ALLOWED __constant_cpu_to_le32(0xC0000707)
+#define STATUS_RESOURCE_IN_USE __constant_cpu_to_le32(0xC0000708)
+#define STATUS_HARDWARE_MEMORY_ERROR __constant_cpu_to_le32(0xC0000709)
+#define STATUS_THREADPOOL_HANDLE_EXCEPTION __constant_cpu_to_le32(0xC000070A)
+#define STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED __constant_cpu_to_le32(0xC000070B)
+#define STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED __constant_cpu_to_le32(0xC000070C)
+#define STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED __constant_cpu_to_le32(0xC000070D)
+#define STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED __constant_cpu_to_le32(0xC000070E)
+#define STATUS_THREADPOOL_RELEASED_DURING_OPERATION __constant_cpu_to_le32(0xC000070F)
+#define STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING __constant_cpu_to_le32(0xC0000710)
+#define STATUS_APC_RETURNED_WHILE_IMPERSONATING __constant_cpu_to_le32(0xC0000711)
+#define STATUS_PROCESS_IS_PROTECTED __constant_cpu_to_le32(0xC0000712)
+#define STATUS_MCA_EXCEPTION __constant_cpu_to_le32(0xC0000713)
+#define STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE __constant_cpu_to_le32(0xC0000714)
+#define STATUS_SYMLINK_CLASS_DISABLED __constant_cpu_to_le32(0xC0000715)
+#define STATUS_INVALID_IDN_NORMALIZATION __constant_cpu_to_le32(0xC0000716)
+#define STATUS_NO_UNICODE_TRANSLATION __constant_cpu_to_le32(0xC0000717)
+#define STATUS_ALREADY_REGISTERED __constant_cpu_to_le32(0xC0000718)
+#define STATUS_CONTEXT_MISMATCH __constant_cpu_to_le32(0xC0000719)
+#define STATUS_PORT_ALREADY_HAS_COMPLETION_LIST __constant_cpu_to_le32(0xC000071A)
+#define STATUS_CALLBACK_RETURNED_THREAD_PRIORITY __constant_cpu_to_le32(0xC000071B)
+#define STATUS_INVALID_THREAD __constant_cpu_to_le32(0xC000071C)
+#define STATUS_CALLBACK_RETURNED_TRANSACTION __constant_cpu_to_le32(0xC000071D)
+#define STATUS_CALLBACK_RETURNED_LDR_LOCK __constant_cpu_to_le32(0xC000071E)
+#define STATUS_CALLBACK_RETURNED_LANG __constant_cpu_to_le32(0xC000071F)
+#define STATUS_CALLBACK_RETURNED_PRI_BACK __constant_cpu_to_le32(0xC0000720)
+#define STATUS_CALLBACK_RETURNED_THREAD_AFFINITY __constant_cpu_to_le32(0xC0000721)
+#define STATUS_DISK_REPAIR_DISABLED __constant_cpu_to_le32(0xC0000800)
+#define STATUS_DS_DOMAIN_RENAME_IN_PROGRESS __constant_cpu_to_le32(0xC0000801)
+#define STATUS_DISK_QUOTA_EXCEEDED __constant_cpu_to_le32(0xC0000802)
+#define STATUS_CONTENT_BLOCKED __constant_cpu_to_le32(0xC0000804)
+#define STATUS_BAD_CLUSTERS __constant_cpu_to_le32(0xC0000805)
+#define STATUS_VOLUME_DIRTY __constant_cpu_to_le32(0xC0000806)
+#define STATUS_FILE_CHECKED_OUT __constant_cpu_to_le32(0xC0000901)
+#define STATUS_CHECKOUT_REQUIRED __constant_cpu_to_le32(0xC0000902)
+#define STATUS_BAD_FILE_TYPE __constant_cpu_to_le32(0xC0000903)
+#define STATUS_FILE_TOO_LARGE __constant_cpu_to_le32(0xC0000904)
+#define STATUS_FORMS_AUTH_REQUIRED __constant_cpu_to_le32(0xC0000905)
+#define STATUS_VIRUS_INFECTED __constant_cpu_to_le32(0xC0000906)
+#define STATUS_VIRUS_DELETED __constant_cpu_to_le32(0xC0000907)
+#define STATUS_BAD_MCFG_TABLE __constant_cpu_to_le32(0xC0000908)
+#define STATUS_WOW_ASSERTION __constant_cpu_to_le32(0xC0009898)
+#define STATUS_INVALID_SIGNATURE __constant_cpu_to_le32(0xC000A000)
+#define STATUS_HMAC_NOT_SUPPORTED __constant_cpu_to_le32(0xC000A001)
+#define STATUS_IPSEC_QUEUE_OVERFLOW __constant_cpu_to_le32(0xC000A010)
+#define STATUS_ND_QUEUE_OVERFLOW __constant_cpu_to_le32(0xC000A011)
+#define STATUS_HOPLIMIT_EXCEEDED __constant_cpu_to_le32(0xC000A012)
+#define STATUS_PROTOCOL_NOT_SUPPORTED __constant_cpu_to_le32(0xC000A013)
+#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED __constant_cpu_to_le32(0xC000A080)
+#define STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR __constant_cpu_to_le32(0xC000A081)
+#define STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR __constant_cpu_to_le32(0xC000A082)
+#define STATUS_XML_PARSE_ERROR __constant_cpu_to_le32(0xC000A083)
+#define STATUS_XMLDSIG_ERROR __constant_cpu_to_le32(0xC000A084)
+#define STATUS_WRONG_COMPARTMENT __constant_cpu_to_le32(0xC000A085)
+#define STATUS_AUTHIP_FAILURE __constant_cpu_to_le32(0xC000A086)
+#define DBG_NO_STATE_CHANGE __constant_cpu_to_le32(0xC0010001)
+#define DBG_APP_NOT_IDLE __constant_cpu_to_le32(0xC0010002)
+#define RPC_NT_INVALID_STRING_BINDING __constant_cpu_to_le32(0xC0020001)
+#define RPC_NT_WRONG_KIND_OF_BINDING __constant_cpu_to_le32(0xC0020002)
+#define RPC_NT_INVALID_BINDING __constant_cpu_to_le32(0xC0020003)
+#define RPC_NT_PROTSEQ_NOT_SUPPORTED __constant_cpu_to_le32(0xC0020004)
+#define RPC_NT_INVALID_RPC_PROTSEQ __constant_cpu_to_le32(0xC0020005)
+#define RPC_NT_INVALID_STRING_UUID __constant_cpu_to_le32(0xC0020006)
+#define RPC_NT_INVALID_ENDPOINT_FORMAT __constant_cpu_to_le32(0xC0020007)
+#define RPC_NT_INVALID_NET_ADDR __constant_cpu_to_le32(0xC0020008)
+#define RPC_NT_NO_ENDPOINT_FOUND __constant_cpu_to_le32(0xC0020009)
+#define RPC_NT_INVALID_TIMEOUT __constant_cpu_to_le32(0xC002000A)
+#define RPC_NT_OBJECT_NOT_FOUND __constant_cpu_to_le32(0xC002000B)
+#define RPC_NT_ALREADY_REGISTERED __constant_cpu_to_le32(0xC002000C)
+#define RPC_NT_TYPE_ALREADY_REGISTERED __constant_cpu_to_le32(0xC002000D)
+#define RPC_NT_ALREADY_LISTENING __constant_cpu_to_le32(0xC002000E)
+#define RPC_NT_NO_PROTSEQS_REGISTERED __constant_cpu_to_le32(0xC002000F)
+#define RPC_NT_NOT_LISTENING __constant_cpu_to_le32(0xC0020010)
+#define RPC_NT_UNKNOWN_MGR_TYPE __constant_cpu_to_le32(0xC0020011)
+#define RPC_NT_UNKNOWN_IF __constant_cpu_to_le32(0xC0020012)
+#define RPC_NT_NO_BINDINGS __constant_cpu_to_le32(0xC0020013)
+#define RPC_NT_NO_PROTSEQS __constant_cpu_to_le32(0xC0020014)
+#define RPC_NT_CANT_CREATE_ENDPOINT __constant_cpu_to_le32(0xC0020015)
+#define RPC_NT_OUT_OF_RESOURCES __constant_cpu_to_le32(0xC0020016)
+#define RPC_NT_SERVER_UNAVAILABLE __constant_cpu_to_le32(0xC0020017)
+#define RPC_NT_SERVER_TOO_BUSY __constant_cpu_to_le32(0xC0020018)
+#define RPC_NT_INVALID_NETWORK_OPTIONS __constant_cpu_to_le32(0xC0020019)
+#define RPC_NT_NO_CALL_ACTIVE __constant_cpu_to_le32(0xC002001A)
+#define RPC_NT_CALL_FAILED __constant_cpu_to_le32(0xC002001B)
+#define RPC_NT_CALL_FAILED_DNE __constant_cpu_to_le32(0xC002001C)
+#define RPC_NT_PROTOCOL_ERROR __constant_cpu_to_le32(0xC002001D)
+#define RPC_NT_UNSUPPORTED_TRANS_SYN __constant_cpu_to_le32(0xC002001F)
+#define RPC_NT_UNSUPPORTED_TYPE __constant_cpu_to_le32(0xC0020021)
+#define RPC_NT_INVALID_TAG __constant_cpu_to_le32(0xC0020022)
+#define RPC_NT_INVALID_BOUND __constant_cpu_to_le32(0xC0020023)
+#define RPC_NT_NO_ENTRY_NAME __constant_cpu_to_le32(0xC0020024)
+#define RPC_NT_INVALID_NAME_SYNTAX __constant_cpu_to_le32(0xC0020025)
+#define RPC_NT_UNSUPPORTED_NAME_SYNTAX __constant_cpu_to_le32(0xC0020026)
+#define RPC_NT_UUID_NO_ADDRESS __constant_cpu_to_le32(0xC0020028)
+#define RPC_NT_DUPLICATE_ENDPOINT __constant_cpu_to_le32(0xC0020029)
+#define RPC_NT_UNKNOWN_AUTHN_TYPE __constant_cpu_to_le32(0xC002002A)
+#define RPC_NT_MAX_CALLS_TOO_SMALL __constant_cpu_to_le32(0xC002002B)
+#define RPC_NT_STRING_TOO_LONG __constant_cpu_to_le32(0xC002002C)
+#define RPC_NT_PROTSEQ_NOT_FOUND __constant_cpu_to_le32(0xC002002D)
+#define RPC_NT_PROCNUM_OUT_OF_RANGE __constant_cpu_to_le32(0xC002002E)
+#define RPC_NT_BINDING_HAS_NO_AUTH __constant_cpu_to_le32(0xC002002F)
+#define RPC_NT_UNKNOWN_AUTHN_SERVICE __constant_cpu_to_le32(0xC0020030)
+#define RPC_NT_UNKNOWN_AUTHN_LEVEL __constant_cpu_to_le32(0xC0020031)
+#define RPC_NT_INVALID_AUTH_IDENTITY __constant_cpu_to_le32(0xC0020032)
+#define RPC_NT_UNKNOWN_AUTHZ_SERVICE __constant_cpu_to_le32(0xC0020033)
+#define EPT_NT_INVALID_ENTRY __constant_cpu_to_le32(0xC0020034)
+#define EPT_NT_CANT_PERFORM_OP __constant_cpu_to_le32(0xC0020035)
+#define EPT_NT_NOT_REGISTERED __constant_cpu_to_le32(0xC0020036)
+#define RPC_NT_NOTHING_TO_EXPORT __constant_cpu_to_le32(0xC0020037)
+#define RPC_NT_INCOMPLETE_NAME __constant_cpu_to_le32(0xC0020038)
+#define RPC_NT_INVALID_VERS_OPTION __constant_cpu_to_le32(0xC0020039)
+#define RPC_NT_NO_MORE_MEMBERS __constant_cpu_to_le32(0xC002003A)
+#define RPC_NT_NOT_ALL_OBJS_UNEXPORTED __constant_cpu_to_le32(0xC002003B)
+#define RPC_NT_INTERFACE_NOT_FOUND __constant_cpu_to_le32(0xC002003C)
+#define RPC_NT_ENTRY_ALREADY_EXISTS __constant_cpu_to_le32(0xC002003D)
+#define RPC_NT_ENTRY_NOT_FOUND __constant_cpu_to_le32(0xC002003E)
+#define RPC_NT_NAME_SERVICE_UNAVAILABLE __constant_cpu_to_le32(0xC002003F)
+#define RPC_NT_INVALID_NAF_ID __constant_cpu_to_le32(0xC0020040)
+#define RPC_NT_CANNOT_SUPPORT __constant_cpu_to_le32(0xC0020041)
+#define RPC_NT_NO_CONTEXT_AVAILABLE __constant_cpu_to_le32(0xC0020042)
+#define RPC_NT_INTERNAL_ERROR __constant_cpu_to_le32(0xC0020043)
+#define RPC_NT_ZERO_DIVIDE __constant_cpu_to_le32(0xC0020044)
+#define RPC_NT_ADDRESS_ERROR __constant_cpu_to_le32(0xC0020045)
+#define RPC_NT_FP_DIV_ZERO __constant_cpu_to_le32(0xC0020046)
+#define RPC_NT_FP_UNDERFLOW __constant_cpu_to_le32(0xC0020047)
+#define RPC_NT_FP_OVERFLOW __constant_cpu_to_le32(0xC0020048)
+#define RPC_NT_CALL_IN_PROGRESS __constant_cpu_to_le32(0xC0020049)
+#define RPC_NT_NO_MORE_BINDINGS __constant_cpu_to_le32(0xC002004A)
+#define RPC_NT_GROUP_MEMBER_NOT_FOUND __constant_cpu_to_le32(0xC002004B)
+#define EPT_NT_CANT_CREATE __constant_cpu_to_le32(0xC002004C)
+#define RPC_NT_INVALID_OBJECT __constant_cpu_to_le32(0xC002004D)
+#define RPC_NT_NO_INTERFACES __constant_cpu_to_le32(0xC002004F)
+#define RPC_NT_CALL_CANCELLED __constant_cpu_to_le32(0xC0020050)
+#define RPC_NT_BINDING_INCOMPLETE __constant_cpu_to_le32(0xC0020051)
+#define RPC_NT_COMM_FAILURE __constant_cpu_to_le32(0xC0020052)
+#define RPC_NT_UNSUPPORTED_AUTHN_LEVEL __constant_cpu_to_le32(0xC0020053)
+#define RPC_NT_NO_PRINC_NAME __constant_cpu_to_le32(0xC0020054)
+#define RPC_NT_NOT_RPC_ERROR __constant_cpu_to_le32(0xC0020055)
+#define RPC_NT_SEC_PKG_ERROR __constant_cpu_to_le32(0xC0020057)
+#define RPC_NT_NOT_CANCELLED __constant_cpu_to_le32(0xC0020058)
+#define RPC_NT_INVALID_ASYNC_HANDLE __constant_cpu_to_le32(0xC0020062)
+#define RPC_NT_INVALID_ASYNC_CALL __constant_cpu_to_le32(0xC0020063)
+#define RPC_NT_PROXY_ACCESS_DENIED __constant_cpu_to_le32(0xC0020064)
+#define RPC_NT_NO_MORE_ENTRIES __constant_cpu_to_le32(0xC0030001)
+#define RPC_NT_SS_CHAR_TRANS_OPEN_FAIL __constant_cpu_to_le32(0xC0030002)
+#define RPC_NT_SS_CHAR_TRANS_SHORT_FILE __constant_cpu_to_le32(0xC0030003)
+#define RPC_NT_SS_IN_NULL_CONTEXT __constant_cpu_to_le32(0xC0030004)
+#define RPC_NT_SS_CONTEXT_MISMATCH __constant_cpu_to_le32(0xC0030005)
+#define RPC_NT_SS_CONTEXT_DAMAGED __constant_cpu_to_le32(0xC0030006)
+#define RPC_NT_SS_HANDLES_MISMATCH __constant_cpu_to_le32(0xC0030007)
+#define RPC_NT_SS_CANNOT_GET_CALL_HANDLE __constant_cpu_to_le32(0xC0030008)
+#define RPC_NT_NULL_REF_POINTER __constant_cpu_to_le32(0xC0030009)
+#define RPC_NT_ENUM_VALUE_OUT_OF_RANGE __constant_cpu_to_le32(0xC003000A)
+#define RPC_NT_BYTE_COUNT_TOO_SMALL __constant_cpu_to_le32(0xC003000B)
+#define RPC_NT_BAD_STUB_DATA __constant_cpu_to_le32(0xC003000C)
+#define RPC_NT_INVALID_ES_ACTION __constant_cpu_to_le32(0xC0030059)
+#define RPC_NT_WRONG_ES_VERSION __constant_cpu_to_le32(0xC003005A)
+#define RPC_NT_WRONG_STUB_VERSION __constant_cpu_to_le32(0xC003005B)
+#define RPC_NT_INVALID_PIPE_OBJECT __constant_cpu_to_le32(0xC003005C)
+#define RPC_NT_INVALID_PIPE_OPERATION __constant_cpu_to_le32(0xC003005D)
+#define RPC_NT_WRONG_PIPE_VERSION __constant_cpu_to_le32(0xC003005E)
+#define RPC_NT_PIPE_CLOSED __constant_cpu_to_le32(0xC003005F)
+#define RPC_NT_PIPE_DISCIPLINE_ERROR __constant_cpu_to_le32(0xC0030060)
+#define RPC_NT_PIPE_EMPTY __constant_cpu_to_le32(0xC0030061)
+#define STATUS_PNP_BAD_MPS_TABLE __constant_cpu_to_le32(0xC0040035)
+#define STATUS_PNP_TRANSLATION_FAILED __constant_cpu_to_le32(0xC0040036)
+#define STATUS_PNP_IRQ_TRANSLATION_FAILED __constant_cpu_to_le32(0xC0040037)
+#define STATUS_PNP_INVALID_ID __constant_cpu_to_le32(0xC0040038)
+#define STATUS_IO_REISSUE_AS_CACHED __constant_cpu_to_le32(0xC0040039)
+#define STATUS_CTX_WINSTATION_NAME_INVALID __constant_cpu_to_le32(0xC00A0001)
+#define STATUS_CTX_INVALID_PD __constant_cpu_to_le32(0xC00A0002)
+#define STATUS_CTX_PD_NOT_FOUND __constant_cpu_to_le32(0xC00A0003)
+#define STATUS_CTX_CLOSE_PENDING __constant_cpu_to_le32(0xC00A0006)
+#define STATUS_CTX_NO_OUTBUF __constant_cpu_to_le32(0xC00A0007)
+#define STATUS_CTX_MODEM_INF_NOT_FOUND __constant_cpu_to_le32(0xC00A0008)
+#define STATUS_CTX_INVALID_MODEMNAME __constant_cpu_to_le32(0xC00A0009)
+#define STATUS_CTX_RESPONSE_ERROR __constant_cpu_to_le32(0xC00A000A)
+#define STATUS_CTX_MODEM_RESPONSE_TIMEOUT __constant_cpu_to_le32(0xC00A000B)
+#define STATUS_CTX_MODEM_RESPONSE_NO_CARRIER __constant_cpu_to_le32(0xC00A000C)
+#define STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE __constant_cpu_to_le32(0xC00A000D)
+#define STATUS_CTX_MODEM_RESPONSE_BUSY __constant_cpu_to_le32(0xC00A000E)
+#define STATUS_CTX_MODEM_RESPONSE_VOICE __constant_cpu_to_le32(0xC00A000F)
+#define STATUS_CTX_TD_ERROR __constant_cpu_to_le32(0xC00A0010)
+#define STATUS_CTX_LICENSE_CLIENT_INVALID __constant_cpu_to_le32(0xC00A0012)
+#define STATUS_CTX_LICENSE_NOT_AVAILABLE __constant_cpu_to_le32(0xC00A0013)
+#define STATUS_CTX_LICENSE_EXPIRED __constant_cpu_to_le32(0xC00A0014)
+#define STATUS_CTX_WINSTATION_NOT_FOUND __constant_cpu_to_le32(0xC00A0015)
+#define STATUS_CTX_WINSTATION_NAME_COLLISION __constant_cpu_to_le32(0xC00A0016)
+#define STATUS_CTX_WINSTATION_BUSY __constant_cpu_to_le32(0xC00A0017)
+#define STATUS_CTX_BAD_VIDEO_MODE __constant_cpu_to_le32(0xC00A0018)
+#define STATUS_CTX_GRAPHICS_INVALID __constant_cpu_to_le32(0xC00A0022)
+#define STATUS_CTX_NOT_CONSOLE __constant_cpu_to_le32(0xC00A0024)
+#define STATUS_CTX_CLIENT_QUERY_TIMEOUT __constant_cpu_to_le32(0xC00A0026)
+#define STATUS_CTX_CONSOLE_DISCONNECT __constant_cpu_to_le32(0xC00A0027)
+#define STATUS_CTX_CONSOLE_CONNECT __constant_cpu_to_le32(0xC00A0028)
+#define STATUS_CTX_SHADOW_DENIED __constant_cpu_to_le32(0xC00A002A)
+#define STATUS_CTX_WINSTATION_ACCESS_DENIED __constant_cpu_to_le32(0xC00A002B)
+#define STATUS_CTX_INVALID_WD __constant_cpu_to_le32(0xC00A002E)
+#define STATUS_CTX_WD_NOT_FOUND __constant_cpu_to_le32(0xC00A002F)
+#define STATUS_CTX_SHADOW_INVALID __constant_cpu_to_le32(0xC00A0030)
+#define STATUS_CTX_SHADOW_DISABLED __constant_cpu_to_le32(0xC00A0031)
+#define STATUS_RDP_PROTOCOL_ERROR __constant_cpu_to_le32(0xC00A0032)
+#define STATUS_CTX_CLIENT_LICENSE_NOT_SET __constant_cpu_to_le32(0xC00A0033)
+#define STATUS_CTX_CLIENT_LICENSE_IN_USE __constant_cpu_to_le32(0xC00A0034)
+#define STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE __constant_cpu_to_le32(0xC00A0035)
+#define STATUS_CTX_SHADOW_NOT_RUNNING __constant_cpu_to_le32(0xC00A0036)
+#define STATUS_CTX_LOGON_DISABLED __constant_cpu_to_le32(0xC00A0037)
+#define STATUS_CTX_SECURITY_LAYER_ERROR __constant_cpu_to_le32(0xC00A0038)
+#define STATUS_TS_INCOMPATIBLE_SESSIONS __constant_cpu_to_le32(0xC00A0039)
+#define STATUS_MUI_FILE_NOT_FOUND __constant_cpu_to_le32(0xC00B0001)
+#define STATUS_MUI_INVALID_FILE __constant_cpu_to_le32(0xC00B0002)
+#define STATUS_MUI_INVALID_RC_CONFIG __constant_cpu_to_le32(0xC00B0003)
+#define STATUS_MUI_INVALID_LOCALE_NAME __constant_cpu_to_le32(0xC00B0004)
+#define STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME __constant_cpu_to_le32(0xC00B0005)
+#define STATUS_MUI_FILE_NOT_LOADED __constant_cpu_to_le32(0xC00B0006)
+#define STATUS_RESOURCE_ENUM_USER_STOP __constant_cpu_to_le32(0xC00B0007)
+#define STATUS_CLUSTER_INVALID_NODE __constant_cpu_to_le32(0xC0130001)
+#define STATUS_CLUSTER_NODE_EXISTS __constant_cpu_to_le32(0xC0130002)
+#define STATUS_CLUSTER_JOIN_IN_PROGRESS __constant_cpu_to_le32(0xC0130003)
+#define STATUS_CLUSTER_NODE_NOT_FOUND __constant_cpu_to_le32(0xC0130004)
+#define STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND __constant_cpu_to_le32(0xC0130005)
+#define STATUS_CLUSTER_NETWORK_EXISTS __constant_cpu_to_le32(0xC0130006)
+#define STATUS_CLUSTER_NETWORK_NOT_FOUND __constant_cpu_to_le32(0xC0130007)
+#define STATUS_CLUSTER_NETINTERFACE_EXISTS __constant_cpu_to_le32(0xC0130008)
+#define STATUS_CLUSTER_NETINTERFACE_NOT_FOUND __constant_cpu_to_le32(0xC0130009)
+#define STATUS_CLUSTER_INVALID_REQUEST __constant_cpu_to_le32(0xC013000A)
+#define STATUS_CLUSTER_INVALID_NETWORK_PROVIDER __constant_cpu_to_le32(0xC013000B)
+#define STATUS_CLUSTER_NODE_DOWN __constant_cpu_to_le32(0xC013000C)
+#define STATUS_CLUSTER_NODE_UNREACHABLE __constant_cpu_to_le32(0xC013000D)
+#define STATUS_CLUSTER_NODE_NOT_MEMBER __constant_cpu_to_le32(0xC013000E)
+#define STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS __constant_cpu_to_le32(0xC013000F)
+#define STATUS_CLUSTER_INVALID_NETWORK __constant_cpu_to_le32(0xC0130010)
+#define STATUS_CLUSTER_NO_NET_ADAPTERS __constant_cpu_to_le32(0xC0130011)
+#define STATUS_CLUSTER_NODE_UP __constant_cpu_to_le32(0xC0130012)
+#define STATUS_CLUSTER_NODE_PAUSED __constant_cpu_to_le32(0xC0130013)
+#define STATUS_CLUSTER_NODE_NOT_PAUSED __constant_cpu_to_le32(0xC0130014)
+#define STATUS_CLUSTER_NO_SECURITY_CONTEXT __constant_cpu_to_le32(0xC0130015)
+#define STATUS_CLUSTER_NETWORK_NOT_INTERNAL __constant_cpu_to_le32(0xC0130016)
+#define STATUS_CLUSTER_POISONED __constant_cpu_to_le32(0xC0130017)
+#define STATUS_ACPI_INVALID_OPCODE __constant_cpu_to_le32(0xC0140001)
+#define STATUS_ACPI_STACK_OVERFLOW __constant_cpu_to_le32(0xC0140002)
+#define STATUS_ACPI_ASSERT_FAILED __constant_cpu_to_le32(0xC0140003)
+#define STATUS_ACPI_INVALID_INDEX __constant_cpu_to_le32(0xC0140004)
+#define STATUS_ACPI_INVALID_ARGUMENT __constant_cpu_to_le32(0xC0140005)
+#define STATUS_ACPI_FATAL __constant_cpu_to_le32(0xC0140006)
+#define STATUS_ACPI_INVALID_SUPERNAME __constant_cpu_to_le32(0xC0140007)
+#define STATUS_ACPI_INVALID_ARGTYPE __constant_cpu_to_le32(0xC0140008)
+#define STATUS_ACPI_INVALID_OBJTYPE __constant_cpu_to_le32(0xC0140009)
+#define STATUS_ACPI_INVALID_TARGETTYPE __constant_cpu_to_le32(0xC014000A)
+#define STATUS_ACPI_INCORRECT_ARGUMENT_COUNT __constant_cpu_to_le32(0xC014000B)
+#define STATUS_ACPI_ADDRESS_NOT_MAPPED __constant_cpu_to_le32(0xC014000C)
+#define STATUS_ACPI_INVALID_EVENTTYPE __constant_cpu_to_le32(0xC014000D)
+#define STATUS_ACPI_HANDLER_COLLISION __constant_cpu_to_le32(0xC014000E)
+#define STATUS_ACPI_INVALID_DATA __constant_cpu_to_le32(0xC014000F)
+#define STATUS_ACPI_INVALID_REGION __constant_cpu_to_le32(0xC0140010)
+#define STATUS_ACPI_INVALID_ACCESS_SIZE __constant_cpu_to_le32(0xC0140011)
+#define STATUS_ACPI_ACQUIRE_GLOBAL_LOCK __constant_cpu_to_le32(0xC0140012)
+#define STATUS_ACPI_ALREADY_INITIALIZED __constant_cpu_to_le32(0xC0140013)
+#define STATUS_ACPI_NOT_INITIALIZED __constant_cpu_to_le32(0xC0140014)
+#define STATUS_ACPI_INVALID_MUTEX_LEVEL __constant_cpu_to_le32(0xC0140015)
+#define STATUS_ACPI_MUTEX_NOT_OWNED __constant_cpu_to_le32(0xC0140016)
+#define STATUS_ACPI_MUTEX_NOT_OWNER __constant_cpu_to_le32(0xC0140017)
+#define STATUS_ACPI_RS_ACCESS __constant_cpu_to_le32(0xC0140018)
+#define STATUS_ACPI_INVALID_TABLE __constant_cpu_to_le32(0xC0140019)
+#define STATUS_ACPI_REG_HANDLER_FAILED __constant_cpu_to_le32(0xC0140020)
+#define STATUS_ACPI_POWER_REQUEST_FAILED __constant_cpu_to_le32(0xC0140021)
+#define STATUS_SXS_SECTION_NOT_FOUND __constant_cpu_to_le32(0xC0150001)
+#define STATUS_SXS_CANT_GEN_ACTCTX __constant_cpu_to_le32(0xC0150002)
+#define STATUS_SXS_INVALID_ACTCTXDATA_FORMAT __constant_cpu_to_le32(0xC0150003)
+#define STATUS_SXS_ASSEMBLY_NOT_FOUND __constant_cpu_to_le32(0xC0150004)
+#define STATUS_SXS_MANIFEST_FORMAT_ERROR __constant_cpu_to_le32(0xC0150005)
+#define STATUS_SXS_MANIFEST_PARSE_ERROR __constant_cpu_to_le32(0xC0150006)
+#define STATUS_SXS_ACTIVATION_CONTEXT_DISABLED __constant_cpu_to_le32(0xC0150007)
+#define STATUS_SXS_KEY_NOT_FOUND __constant_cpu_to_le32(0xC0150008)
+#define STATUS_SXS_VERSION_CONFLICT __constant_cpu_to_le32(0xC0150009)
+#define STATUS_SXS_WRONG_SECTION_TYPE __constant_cpu_to_le32(0xC015000A)
+#define STATUS_SXS_THREAD_QUERIES_DISABLED __constant_cpu_to_le32(0xC015000B)
+#define STATUS_SXS_ASSEMBLY_MISSING __constant_cpu_to_le32(0xC015000C)
+#define STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET __constant_cpu_to_le32(0xC015000E)
+#define STATUS_SXS_EARLY_DEACTIVATION __constant_cpu_to_le32(0xC015000F)
+#define STATUS_SXS_INVALID_DEACTIVATION __constant_cpu_to_le32(0xC0150010)
+#define STATUS_SXS_MULTIPLE_DEACTIVATION __constant_cpu_to_le32(0xC0150011)
+#define STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY __constant_cpu_to_le32(0xC0150012)
+#define STATUS_SXS_PROCESS_TERMINATION_REQUESTED __constant_cpu_to_le32(0xC0150013)
+#define STATUS_SXS_CORRUPT_ACTIVATION_STACK __constant_cpu_to_le32(0xC0150014)
+#define STATUS_SXS_CORRUPTION __constant_cpu_to_le32(0xC0150015)
+#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE __constant_cpu_to_le32(0xC0150016)
+#define STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME __constant_cpu_to_le32(0xC0150017)
+#define STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE __constant_cpu_to_le32(0xC0150018)
+#define STATUS_SXS_IDENTITY_PARSE_ERROR __constant_cpu_to_le32(0xC0150019)
+#define STATUS_SXS_COMPONENT_STORE_CORRUPT __constant_cpu_to_le32(0xC015001A)
+#define STATUS_SXS_FILE_HASH_MISMATCH __constant_cpu_to_le32(0xC015001B)
+#define STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT __constant_cpu_to_le32(0xC015001C)
+#define STATUS_SXS_IDENTITIES_DIFFERENT __constant_cpu_to_le32(0xC015001D)
+#define STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT __constant_cpu_to_le32(0xC015001E)
+#define STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY __constant_cpu_to_le32(0xC015001F)
+#define STATUS_ADVANCED_INSTALLER_FAILED __constant_cpu_to_le32(0xC0150020)
+#define STATUS_XML_ENCODING_MISMATCH __constant_cpu_to_le32(0xC0150021)
+#define STATUS_SXS_MANIFEST_TOO_BIG __constant_cpu_to_le32(0xC0150022)
+#define STATUS_SXS_SETTING_NOT_REGISTERED __constant_cpu_to_le32(0xC0150023)
+#define STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE __constant_cpu_to_le32(0xC0150024)
+#define STATUS_SMI_PRIMITIVE_INSTALLER_FAILED __constant_cpu_to_le32(0xC0150025)
+#define STATUS_GENERIC_COMMAND_FAILED __constant_cpu_to_le32(0xC0150026)
+#define STATUS_SXS_FILE_HASH_MISSING __constant_cpu_to_le32(0xC0150027)
+#define STATUS_TRANSACTIONAL_CONFLICT __constant_cpu_to_le32(0xC0190001)
+#define STATUS_INVALID_TRANSACTION __constant_cpu_to_le32(0xC0190002)
+#define STATUS_TRANSACTION_NOT_ACTIVE __constant_cpu_to_le32(0xC0190003)
+#define STATUS_TM_INITIALIZATION_FAILED __constant_cpu_to_le32(0xC0190004)
+#define STATUS_RM_NOT_ACTIVE __constant_cpu_to_le32(0xC0190005)
+#define STATUS_RM_METADATA_CORRUPT __constant_cpu_to_le32(0xC0190006)
+#define STATUS_TRANSACTION_NOT_JOINED __constant_cpu_to_le32(0xC0190007)
+#define STATUS_DIRECTORY_NOT_RM __constant_cpu_to_le32(0xC0190008)
+#define STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE __constant_cpu_to_le32(0xC019000A)
+#define STATUS_LOG_RESIZE_INVALID_SIZE __constant_cpu_to_le32(0xC019000B)
+#define STATUS_REMOTE_FILE_VERSION_MISMATCH __constant_cpu_to_le32(0xC019000C)
+#define STATUS_CRM_PROTOCOL_ALREADY_EXISTS __constant_cpu_to_le32(0xC019000F)
+#define STATUS_TRANSACTION_PROPAGATION_FAILED __constant_cpu_to_le32(0xC0190010)
+#define STATUS_CRM_PROTOCOL_NOT_FOUND __constant_cpu_to_le32(0xC0190011)
+#define STATUS_TRANSACTION_SUPERIOR_EXISTS __constant_cpu_to_le32(0xC0190012)
+#define STATUS_TRANSACTION_REQUEST_NOT_VALID __constant_cpu_to_le32(0xC0190013)
+#define STATUS_TRANSACTION_NOT_REQUESTED __constant_cpu_to_le32(0xC0190014)
+#define STATUS_TRANSACTION_ALREADY_ABORTED __constant_cpu_to_le32(0xC0190015)
+#define STATUS_TRANSACTION_ALREADY_COMMITTED __constant_cpu_to_le32(0xC0190016)
+#define STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER __constant_cpu_to_le32(0xC0190017)
+#define STATUS_CURRENT_TRANSACTION_NOT_VALID __constant_cpu_to_le32(0xC0190018)
+#define STATUS_LOG_GROWTH_FAILED __constant_cpu_to_le32(0xC0190019)
+#define STATUS_OBJECT_NO_LONGER_EXISTS __constant_cpu_to_le32(0xC0190021)
+#define STATUS_STREAM_MINIVERSION_NOT_FOUND __constant_cpu_to_le32(0xC0190022)
+#define STATUS_STREAM_MINIVERSION_NOT_VALID __constant_cpu_to_le32(0xC0190023)
+#define STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION __constant_cpu_to_le32(0xC0190024)
+#define STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT __constant_cpu_to_le32(0xC0190025)
+#define STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS __constant_cpu_to_le32(0xC0190026)
+#define STATUS_HANDLE_NO_LONGER_VALID __constant_cpu_to_le32(0xC0190028)
+#define STATUS_LOG_CORRUPTION_DETECTED __constant_cpu_to_le32(0xC0190030)
+#define STATUS_RM_DISCONNECTED __constant_cpu_to_le32(0xC0190032)
+#define STATUS_ENLISTMENT_NOT_SUPERIOR __constant_cpu_to_le32(0xC0190033)
+#define STATUS_FILE_IDENTITY_NOT_PERSISTENT __constant_cpu_to_le32(0xC0190036)
+#define STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY __constant_cpu_to_le32(0xC0190037)
+#define STATUS_CANT_CROSS_RM_BOUNDARY __constant_cpu_to_le32(0xC0190038)
+#define STATUS_TXF_DIR_NOT_EMPTY __constant_cpu_to_le32(0xC0190039)
+#define STATUS_INDOUBT_TRANSACTIONS_EXIST __constant_cpu_to_le32(0xC019003A)
+#define STATUS_TM_VOLATILE __constant_cpu_to_le32(0xC019003B)
+#define STATUS_ROLLBACK_TIMER_EXPIRED __constant_cpu_to_le32(0xC019003C)
+#define STATUS_TXF_ATTRIBUTE_CORRUPT __constant_cpu_to_le32(0xC019003D)
+#define STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION __constant_cpu_to_le32(0xC019003E)
+#define STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED __constant_cpu_to_le32(0xC019003F)
+#define STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE __constant_cpu_to_le32(0xC0190040)
+#define STATUS_TRANSACTION_REQUIRED_PROMOTION __constant_cpu_to_le32(0xC0190043)
+#define STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION __constant_cpu_to_le32(0xC0190044)
+#define STATUS_TRANSACTIONS_NOT_FROZEN __constant_cpu_to_le32(0xC0190045)
+#define STATUS_TRANSACTION_FREEZE_IN_PROGRESS __constant_cpu_to_le32(0xC0190046)
+#define STATUS_NOT_SNAPSHOT_VOLUME __constant_cpu_to_le32(0xC0190047)
+#define STATUS_NO_SAVEPOINT_WITH_OPEN_FILES __constant_cpu_to_le32(0xC0190048)
+#define STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION __constant_cpu_to_le32(0xC0190049)
+#define STATUS_TM_IDENTITY_MISMATCH __constant_cpu_to_le32(0xC019004A)
+#define STATUS_FLOATED_SECTION __constant_cpu_to_le32(0xC019004B)
+#define STATUS_CANNOT_ACCEPT_TRANSACTED_WORK __constant_cpu_to_le32(0xC019004C)
+#define STATUS_CANNOT_ABORT_TRANSACTIONS __constant_cpu_to_le32(0xC019004D)
+#define STATUS_TRANSACTION_NOT_FOUND __constant_cpu_to_le32(0xC019004E)
+#define STATUS_RESOURCEMANAGER_NOT_FOUND __constant_cpu_to_le32(0xC019004F)
+#define STATUS_ENLISTMENT_NOT_FOUND __constant_cpu_to_le32(0xC0190050)
+#define STATUS_TRANSACTIONMANAGER_NOT_FOUND __constant_cpu_to_le32(0xC0190051)
+#define STATUS_TRANSACTIONMANAGER_NOT_ONLINE __constant_cpu_to_le32(0xC0190052)
+#define STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION __constant_cpu_to_le32(0xC0190053)
+#define STATUS_TRANSACTION_NOT_ROOT __constant_cpu_to_le32(0xC0190054)
+#define STATUS_TRANSACTION_OBJECT_EXPIRED __constant_cpu_to_le32(0xC0190055)
+#define STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION __constant_cpu_to_le32(0xC0190056)
+#define STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED __constant_cpu_to_le32(0xC0190057)
+#define STATUS_TRANSACTION_RECORD_TOO_LONG __constant_cpu_to_le32(0xC0190058)
+#define STATUS_NO_LINK_TRACKING_IN_TRANSACTION __constant_cpu_to_le32(0xC0190059)
+#define STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION __constant_cpu_to_le32(0xC019005A)
+#define STATUS_TRANSACTION_INTEGRITY_VIOLATED __constant_cpu_to_le32(0xC019005B)
+#define STATUS_LOG_SECTOR_INVALID __constant_cpu_to_le32(0xC01A0001)
+#define STATUS_LOG_SECTOR_PARITY_INVALID __constant_cpu_to_le32(0xC01A0002)
+#define STATUS_LOG_SECTOR_REMAPPED __constant_cpu_to_le32(0xC01A0003)
+#define STATUS_LOG_BLOCK_INCOMPLETE __constant_cpu_to_le32(0xC01A0004)
+#define STATUS_LOG_INVALID_RANGE __constant_cpu_to_le32(0xC01A0005)
+#define STATUS_LOG_BLOCKS_EXHAUSTED __constant_cpu_to_le32(0xC01A0006)
+#define STATUS_LOG_READ_CONTEXT_INVALID __constant_cpu_to_le32(0xC01A0007)
+#define STATUS_LOG_RESTART_INVALID __constant_cpu_to_le32(0xC01A0008)
+#define STATUS_LOG_BLOCK_VERSION __constant_cpu_to_le32(0xC01A0009)
+#define STATUS_LOG_BLOCK_INVALID __constant_cpu_to_le32(0xC01A000A)
+#define STATUS_LOG_READ_MODE_INVALID __constant_cpu_to_le32(0xC01A000B)
+#define STATUS_LOG_METADATA_CORRUPT __constant_cpu_to_le32(0xC01A000D)
+#define STATUS_LOG_METADATA_INVALID __constant_cpu_to_le32(0xC01A000E)
+#define STATUS_LOG_METADATA_INCONSISTENT __constant_cpu_to_le32(0xC01A000F)
+#define STATUS_LOG_RESERVATION_INVALID __constant_cpu_to_le32(0xC01A0010)
+#define STATUS_LOG_CANT_DELETE __constant_cpu_to_le32(0xC01A0011)
+#define STATUS_LOG_CONTAINER_LIMIT_EXCEEDED __constant_cpu_to_le32(0xC01A0012)
+#define STATUS_LOG_START_OF_LOG __constant_cpu_to_le32(0xC01A0013)
+#define STATUS_LOG_POLICY_ALREADY_INSTALLED __constant_cpu_to_le32(0xC01A0014)
+#define STATUS_LOG_POLICY_NOT_INSTALLED __constant_cpu_to_le32(0xC01A0015)
+#define STATUS_LOG_POLICY_INVALID __constant_cpu_to_le32(0xC01A0016)
+#define STATUS_LOG_POLICY_CONFLICT __constant_cpu_to_le32(0xC01A0017)
+#define STATUS_LOG_PINNED_ARCHIVE_TAIL __constant_cpu_to_le32(0xC01A0018)
+#define STATUS_LOG_RECORD_NONEXISTENT __constant_cpu_to_le32(0xC01A0019)
+#define STATUS_LOG_RECORDS_RESERVED_INVALID __constant_cpu_to_le32(0xC01A001A)
+#define STATUS_LOG_SPACE_RESERVED_INVALID __constant_cpu_to_le32(0xC01A001B)
+#define STATUS_LOG_TAIL_INVALID __constant_cpu_to_le32(0xC01A001C)
+#define STATUS_LOG_FULL __constant_cpu_to_le32(0xC01A001D)
+#define STATUS_LOG_MULTIPLEXED __constant_cpu_to_le32(0xC01A001E)
+#define STATUS_LOG_DEDICATED __constant_cpu_to_le32(0xC01A001F)
+#define STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS __constant_cpu_to_le32(0xC01A0020)
+#define STATUS_LOG_ARCHIVE_IN_PROGRESS __constant_cpu_to_le32(0xC01A0021)
+#define STATUS_LOG_EPHEMERAL __constant_cpu_to_le32(0xC01A0022)
+#define STATUS_LOG_NOT_ENOUGH_CONTAINERS __constant_cpu_to_le32(0xC01A0023)
+#define STATUS_LOG_CLIENT_ALREADY_REGISTERED __constant_cpu_to_le32(0xC01A0024)
+#define STATUS_LOG_CLIENT_NOT_REGISTERED __constant_cpu_to_le32(0xC01A0025)
+#define STATUS_LOG_FULL_HANDLER_IN_PROGRESS __constant_cpu_to_le32(0xC01A0026)
+#define STATUS_LOG_CONTAINER_READ_FAILED __constant_cpu_to_le32(0xC01A0027)
+#define STATUS_LOG_CONTAINER_WRITE_FAILED __constant_cpu_to_le32(0xC01A0028)
+#define STATUS_LOG_CONTAINER_OPEN_FAILED __constant_cpu_to_le32(0xC01A0029)
+#define STATUS_LOG_CONTAINER_STATE_INVALID __constant_cpu_to_le32(0xC01A002A)
+#define STATUS_LOG_STATE_INVALID __constant_cpu_to_le32(0xC01A002B)
+#define STATUS_LOG_PINNED __constant_cpu_to_le32(0xC01A002C)
+#define STATUS_LOG_METADATA_FLUSH_FAILED __constant_cpu_to_le32(0xC01A002D)
+#define STATUS_LOG_INCONSISTENT_SECURITY __constant_cpu_to_le32(0xC01A002E)
+#define STATUS_LOG_APPENDED_FLUSH_FAILED __constant_cpu_to_le32(0xC01A002F)
+#define STATUS_LOG_PINNED_RESERVATION __constant_cpu_to_le32(0xC01A0030)
+#define STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD __constant_cpu_to_le32(0xC01B00EA)
+#define STATUS_FLT_NO_HANDLER_DEFINED __constant_cpu_to_le32(0xC01C0001)
+#define STATUS_FLT_CONTEXT_ALREADY_DEFINED __constant_cpu_to_le32(0xC01C0002)
+#define STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST __constant_cpu_to_le32(0xC01C0003)
+#define STATUS_FLT_DISALLOW_FAST_IO __constant_cpu_to_le32(0xC01C0004)
+#define STATUS_FLT_INVALID_NAME_REQUEST __constant_cpu_to_le32(0xC01C0005)
+#define STATUS_FLT_NOT_SAFE_TO_POST_OPERATION __constant_cpu_to_le32(0xC01C0006)
+#define STATUS_FLT_NOT_INITIALIZED __constant_cpu_to_le32(0xC01C0007)
+#define STATUS_FLT_FILTER_NOT_READY __constant_cpu_to_le32(0xC01C0008)
+#define STATUS_FLT_POST_OPERATION_CLEANUP __constant_cpu_to_le32(0xC01C0009)
+#define STATUS_FLT_INTERNAL_ERROR __constant_cpu_to_le32(0xC01C000A)
+#define STATUS_FLT_DELETING_OBJECT __constant_cpu_to_le32(0xC01C000B)
+#define STATUS_FLT_MUST_BE_NONPAGED_POOL __constant_cpu_to_le32(0xC01C000C)
+#define STATUS_FLT_DUPLICATE_ENTRY __constant_cpu_to_le32(0xC01C000D)
+#define STATUS_FLT_CBDQ_DISABLED __constant_cpu_to_le32(0xC01C000E)
+#define STATUS_FLT_DO_NOT_ATTACH __constant_cpu_to_le32(0xC01C000F)
+#define STATUS_FLT_DO_NOT_DETACH __constant_cpu_to_le32(0xC01C0010)
+#define STATUS_FLT_INSTANCE_ALTITUDE_COLLISION __constant_cpu_to_le32(0xC01C0011)
+#define STATUS_FLT_INSTANCE_NAME_COLLISION __constant_cpu_to_le32(0xC01C0012)
+#define STATUS_FLT_FILTER_NOT_FOUND __constant_cpu_to_le32(0xC01C0013)
+#define STATUS_FLT_VOLUME_NOT_FOUND __constant_cpu_to_le32(0xC01C0014)
+#define STATUS_FLT_INSTANCE_NOT_FOUND __constant_cpu_to_le32(0xC01C0015)
+#define STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND __constant_cpu_to_le32(0xC01C0016)
+#define STATUS_FLT_INVALID_CONTEXT_REGISTRATION __constant_cpu_to_le32(0xC01C0017)
+#define STATUS_FLT_NAME_CACHE_MISS __constant_cpu_to_le32(0xC01C0018)
+#define STATUS_FLT_NO_DEVICE_OBJECT __constant_cpu_to_le32(0xC01C0019)
+#define STATUS_FLT_VOLUME_ALREADY_MOUNTED __constant_cpu_to_le32(0xC01C001A)
+#define STATUS_FLT_ALREADY_ENLISTED __constant_cpu_to_le32(0xC01C001B)
+#define STATUS_FLT_CONTEXT_ALREADY_LINKED __constant_cpu_to_le32(0xC01C001C)
+#define STATUS_FLT_NO_WAITER_FOR_REPLY __constant_cpu_to_le32(0xC01C0020)
+#define STATUS_MONITOR_NO_DESCRIPTOR __constant_cpu_to_le32(0xC01D0001)
+#define STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT __constant_cpu_to_le32(0xC01D0002)
+#define STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM __constant_cpu_to_le32(0xC01D0003)
+#define STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK __constant_cpu_to_le32(0xC01D0004)
+#define STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED __constant_cpu_to_le32(0xC01D0005)
+#define STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK __constant_cpu_to_le32(0xC01D0006)
+#define STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK __constant_cpu_to_le32(0xC01D0007)
+#define STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA __constant_cpu_to_le32(0xC01D0008)
+#define STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK __constant_cpu_to_le32(0xC01D0009)
+#define STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER __constant_cpu_to_le32(0xC01E0000)
+#define STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER __constant_cpu_to_le32(0xC01E0001)
+#define STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER __constant_cpu_to_le32(0xC01E0002)
+#define STATUS_GRAPHICS_ADAPTER_WAS_RESET __constant_cpu_to_le32(0xC01E0003)
+#define STATUS_GRAPHICS_INVALID_DRIVER_MODEL __constant_cpu_to_le32(0xC01E0004)
+#define STATUS_GRAPHICS_PRESENT_MODE_CHANGED __constant_cpu_to_le32(0xC01E0005)
+#define STATUS_GRAPHICS_PRESENT_OCCLUDED __constant_cpu_to_le32(0xC01E0006)
+#define STATUS_GRAPHICS_PRESENT_DENIED __constant_cpu_to_le32(0xC01E0007)
+#define STATUS_GRAPHICS_CANNOTCOLORCONVERT __constant_cpu_to_le32(0xC01E0008)
+#define STATUS_GRAPHICS_NO_VIDEO_MEMORY __constant_cpu_to_le32(0xC01E0100)
+#define STATUS_GRAPHICS_CANT_LOCK_MEMORY __constant_cpu_to_le32(0xC01E0101)
+#define STATUS_GRAPHICS_ALLOCATION_BUSY __constant_cpu_to_le32(0xC01E0102)
+#define STATUS_GRAPHICS_TOO_MANY_REFERENCES __constant_cpu_to_le32(0xC01E0103)
+#define STATUS_GRAPHICS_TRY_AGAIN_LATER __constant_cpu_to_le32(0xC01E0104)
+#define STATUS_GRAPHICS_TRY_AGAIN_NOW __constant_cpu_to_le32(0xC01E0105)
+#define STATUS_GRAPHICS_ALLOCATION_INVALID __constant_cpu_to_le32(0xC01E0106)
+#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE __constant_cpu_to_le32(0xC01E0107)
+#define STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED __constant_cpu_to_le32(0xC01E0108)
+#define STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION __constant_cpu_to_le32(0xC01E0109)
+#define STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE __constant_cpu_to_le32(0xC01E0110)
+#define STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION __constant_cpu_to_le32(0xC01E0111)
+#define STATUS_GRAPHICS_ALLOCATION_CLOSED __constant_cpu_to_le32(0xC01E0112)
+#define STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE __constant_cpu_to_le32(0xC01E0113)
+#define STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE __constant_cpu_to_le32(0xC01E0114)
+#define STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE __constant_cpu_to_le32(0xC01E0115)
+#define STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST __constant_cpu_to_le32(0xC01E0116)
+#define STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE __constant_cpu_to_le32(0xC01E0200)
+#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY __constant_cpu_to_le32(0xC01E0300)
+#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0301)
+#define STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0302)
+#define STATUS_GRAPHICS_INVALID_VIDPN __constant_cpu_to_le32(0xC01E0303)
+#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE __constant_cpu_to_le32(0xC01E0304)
+#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET __constant_cpu_to_le32(0xC01E0305)
+#define STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0306)
+#define STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET __constant_cpu_to_le32(0xC01E0308)
+#define STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET __constant_cpu_to_le32(0xC01E0309)
+#define STATUS_GRAPHICS_INVALID_FREQUENCY __constant_cpu_to_le32(0xC01E030A)
+#define STATUS_GRAPHICS_INVALID_ACTIVE_REGION __constant_cpu_to_le32(0xC01E030B)
+#define STATUS_GRAPHICS_INVALID_TOTAL_REGION __constant_cpu_to_le32(0xC01E030C)
+#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE __constant_cpu_to_le32(0xC01E0310)
+#define STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE __constant_cpu_to_le32(0xC01E0311)
+#define STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET __constant_cpu_to_le32(0xC01E0312)
+#define STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY __constant_cpu_to_le32(0xC01E0313)
+#define STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET __constant_cpu_to_le32(0xC01E0314)
+#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET __constant_cpu_to_le32(0xC01E0315)
+#define STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET __constant_cpu_to_le32(0xC01E0316)
+#define STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET __constant_cpu_to_le32(0xC01E0317)
+#define STATUS_GRAPHICS_TARGET_ALREADY_IN_SET __constant_cpu_to_le32(0xC01E0318)
+#define STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH __constant_cpu_to_le32(0xC01E0319)
+#define STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY __constant_cpu_to_le32(0xC01E031A)
+#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET __constant_cpu_to_le32(0xC01E031B)
+#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE __constant_cpu_to_le32(0xC01E031C)
+#define STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET __constant_cpu_to_le32(0xC01E031D)
+#define STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET __constant_cpu_to_le32(0xC01E031F)
+#define STATUS_GRAPHICS_STALE_MODESET __constant_cpu_to_le32(0xC01E0320)
+#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET __constant_cpu_to_le32(0xC01E0321)
+#define STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE __constant_cpu_to_le32(0xC01E0322)
+#define STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN __constant_cpu_to_le32(0xC01E0323)
+#define STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE __constant_cpu_to_le32(0xC01E0324)
+#define STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION __constant_cpu_to_le32(0xC01E0325)
+#define STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES __constant_cpu_to_le32(0xC01E0326)
+#define STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY __constant_cpu_to_le32(0xC01E0327)
+#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE __constant_cpu_to_le32(0xC01E0328)
+#define STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET __constant_cpu_to_le32(0xC01E0329)
+#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET __constant_cpu_to_le32(0xC01E032A)
+#define STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR __constant_cpu_to_le32(0xC01E032B)
+#define STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET __constant_cpu_to_le32(0xC01E032C)
+#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET __constant_cpu_to_le32(0xC01E032D)
+#define STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE __constant_cpu_to_le32(0xC01E032E)
+#define STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE __constant_cpu_to_le32(0xC01E032F)
+#define STATUS_GRAPHICS_RESOURCES_NOT_RELATED __constant_cpu_to_le32(0xC01E0330)
+#define STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE __constant_cpu_to_le32(0xC01E0331)
+#define STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE __constant_cpu_to_le32(0xC01E0332)
+#define STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET __constant_cpu_to_le32(0xC01E0333)
+#define STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER __constant_cpu_to_le32(0xC01E0334)
+#define STATUS_GRAPHICS_NO_VIDPNMGR __constant_cpu_to_le32(0xC01E0335)
+#define STATUS_GRAPHICS_NO_ACTIVE_VIDPN __constant_cpu_to_le32(0xC01E0336)
+#define STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY __constant_cpu_to_le32(0xC01E0337)
+#define STATUS_GRAPHICS_MONITOR_NOT_CONNECTED __constant_cpu_to_le32(0xC01E0338)
+#define STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY __constant_cpu_to_le32(0xC01E0339)
+#define STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE __constant_cpu_to_le32(0xC01E033A)
+#define STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE __constant_cpu_to_le32(0xC01E033B)
+#define STATUS_GRAPHICS_INVALID_STRIDE __constant_cpu_to_le32(0xC01E033C)
+#define STATUS_GRAPHICS_INVALID_PIXELFORMAT __constant_cpu_to_le32(0xC01E033D)
+#define STATUS_GRAPHICS_INVALID_COLORBASIS __constant_cpu_to_le32(0xC01E033E)
+#define STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE __constant_cpu_to_le32(0xC01E033F)
+#define STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY __constant_cpu_to_le32(0xC01E0340)
+#define STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT __constant_cpu_to_le32(0xC01E0341)
+#define STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE __constant_cpu_to_le32(0xC01E0342)
+#define STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN __constant_cpu_to_le32(0xC01E0343)
+#define STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL __constant_cpu_to_le32(0xC01E0344)
+#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION __constant_cpu_to_le32(0xC01E0345)
+#define STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0346)
+#define STATUS_GRAPHICS_INVALID_GAMMA_RAMP __constant_cpu_to_le32(0xC01E0347)
+#define STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0348)
+#define STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0349)
+#define STATUS_GRAPHICS_MODE_NOT_IN_MODESET __constant_cpu_to_le32(0xC01E034A)
+#define STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON __constant_cpu_to_le32(0xC01E034D)
+#define STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE __constant_cpu_to_le32(0xC01E034E)
+#define STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE __constant_cpu_to_le32(0xC01E034F)
+#define STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS __constant_cpu_to_le32(0xC01E0350)
+#define STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING __constant_cpu_to_le32(0xC01E0352)
+#define STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED __constant_cpu_to_le32(0xC01E0353)
+#define STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS __constant_cpu_to_le32(0xC01E0354)
+#define STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT __constant_cpu_to_le32(0xC01E0355)
+#define STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM __constant_cpu_to_le32(0xC01E0356)
+#define STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN __constant_cpu_to_le32(0xC01E0357)
+#define STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT __constant_cpu_to_le32(0xC01E0358)
+#define STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED __constant_cpu_to_le32(0xC01E0359)
+#define STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION __constant_cpu_to_le32(0xC01E035A)
+#define STATUS_GRAPHICS_INVALID_CLIENT_TYPE __constant_cpu_to_le32(0xC01E035B)
+#define STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET __constant_cpu_to_le32(0xC01E035C)
+#define STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED __constant_cpu_to_le32(0xC01E0400)
+#define STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0401)
+#define STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER __constant_cpu_to_le32(0xC01E0430)
+#define STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED __constant_cpu_to_le32(0xC01E0431)
+#define STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED __constant_cpu_to_le32(0xC01E0432)
+#define STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY __constant_cpu_to_le32(0xC01E0433)
+#define STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED __constant_cpu_to_le32(0xC01E0434)
+#define STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON __constant_cpu_to_le32(0xC01E0435)
+#define STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE __constant_cpu_to_le32(0xC01E0436)
+#define STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER __constant_cpu_to_le32(0xC01E0438)
+#define STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED __constant_cpu_to_le32(0xC01E043B)
+#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS __constant_cpu_to_le32(0xC01E051C)
+#define STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST __constant_cpu_to_le32(0xC01E051D)
+#define STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR __constant_cpu_to_le32(0xC01E051E)
+#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS __constant_cpu_to_le32(0xC01E051F)
+#define STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0520)
+#define STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST __constant_cpu_to_le32(0xC01E0521)
+#define STATUS_GRAPHICS_OPM_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0500)
+#define STATUS_GRAPHICS_COPP_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0501)
+#define STATUS_GRAPHICS_UAB_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0502)
+#define STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS __constant_cpu_to_le32(0xC01E0503)
+#define STATUS_GRAPHICS_OPM_PARAMETER_ARRAY_TOO_SMALL __constant_cpu_to_le32(0xC01E0504)
+#define STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST __constant_cpu_to_le32(0xC01E0505)
+#define STATUS_GRAPHICS_PVP_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME __constant_cpu_to_le32(0xC01E0506)
+#define STATUS_GRAPHICS_PVP_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP __constant_cpu_to_le32(0xC01E0507)
+#define STATUS_GRAPHICS_PVP_MIRRORING_DEVICES_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0508)
+#define STATUS_GRAPHICS_OPM_INVALID_POINTER __constant_cpu_to_le32(0xC01E050A)
+#define STATUS_GRAPHICS_OPM_INTERNAL_ERROR __constant_cpu_to_le32(0xC01E050B)
+#define STATUS_GRAPHICS_OPM_INVALID_HANDLE __constant_cpu_to_le32(0xC01E050C)
+#define STATUS_GRAPHICS_PVP_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE __constant_cpu_to_le32(0xC01E050D)
+#define STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH __constant_cpu_to_le32(0xC01E050E)
+#define STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED __constant_cpu_to_le32(0xC01E050F)
+#define STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED __constant_cpu_to_le32(0xC01E0510)
+#define STATUS_GRAPHICS_PVP_HFS_FAILED __constant_cpu_to_le32(0xC01E0511)
+#define STATUS_GRAPHICS_OPM_INVALID_SRM __constant_cpu_to_le32(0xC01E0512)
+#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP __constant_cpu_to_le32(0xC01E0513)
+#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP __constant_cpu_to_le32(0xC01E0514)
+#define STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA __constant_cpu_to_le32(0xC01E0515)
+#define STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET __constant_cpu_to_le32(0xC01E0516)
+#define STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH __constant_cpu_to_le32(0xC01E0517)
+#define STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE __constant_cpu_to_le32(0xC01E0518)
+#define STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS __constant_cpu_to_le32(0xC01E051A)
+#define STATUS_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS __constant_cpu_to_le32(0xC01E051B)
+#define STATUS_GRAPHICS_I2C_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0580)
+#define STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST __constant_cpu_to_le32(0xC01E0581)
+#define STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA __constant_cpu_to_le32(0xC01E0582)
+#define STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA __constant_cpu_to_le32(0xC01E0583)
+#define STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E0584)
+#define STATUS_GRAPHICS_DDCCI_INVALID_DATA __constant_cpu_to_le32(0xC01E0585)
+#define STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE __constant_cpu_to_le32(0xC01E0586)
+#define STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING __constant_cpu_to_le32(0xC01E0587)
+#define STATUS_GRAPHICS_MCA_INTERNAL_ERROR __constant_cpu_to_le32(0xC01E0588)
+#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND __constant_cpu_to_le32(0xC01E0589)
+#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH __constant_cpu_to_le32(0xC01E058A)
+#define STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM __constant_cpu_to_le32(0xC01E058B)
+#define STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE __constant_cpu_to_le32(0xC01E058C)
+#define STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS __constant_cpu_to_le32(0xC01E058D)
+#define STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED __constant_cpu_to_le32(0xC01E05E0)
+#define STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME __constant_cpu_to_le32(0xC01E05E1)
+#define STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP __constant_cpu_to_le32(0xC01E05E2)
+#define STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED __constant_cpu_to_le32(0xC01E05E3)
+#define STATUS_GRAPHICS_INVALID_POINTER __constant_cpu_to_le32(0xC01E05E4)
+#define STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE __constant_cpu_to_le32(0xC01E05E5)
+#define STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL __constant_cpu_to_le32(0xC01E05E6)
+#define STATUS_GRAPHICS_INTERNAL_ERROR __constant_cpu_to_le32(0xC01E05E7)
+#define STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS __constant_cpu_to_le32(0xC01E05E8)
+#define STATUS_FVE_LOCKED_VOLUME __constant_cpu_to_le32(0xC0210000)
+#define STATUS_FVE_NOT_ENCRYPTED __constant_cpu_to_le32(0xC0210001)
+#define STATUS_FVE_BAD_INFORMATION __constant_cpu_to_le32(0xC0210002)
+#define STATUS_FVE_TOO_SMALL __constant_cpu_to_le32(0xC0210003)
+#define STATUS_FVE_FAILED_WRONG_FS __constant_cpu_to_le32(0xC0210004)
+#define STATUS_FVE_FAILED_BAD_FS __constant_cpu_to_le32(0xC0210005)
+#define STATUS_FVE_FS_NOT_EXTENDED __constant_cpu_to_le32(0xC0210006)
+#define STATUS_FVE_FS_MOUNTED __constant_cpu_to_le32(0xC0210007)
+#define STATUS_FVE_NO_LICENSE __constant_cpu_to_le32(0xC0210008)
+#define STATUS_FVE_ACTION_NOT_ALLOWED __constant_cpu_to_le32(0xC0210009)
+#define STATUS_FVE_BAD_DATA __constant_cpu_to_le32(0xC021000A)
+#define STATUS_FVE_VOLUME_NOT_BOUND __constant_cpu_to_le32(0xC021000B)
+#define STATUS_FVE_NOT_DATA_VOLUME __constant_cpu_to_le32(0xC021000C)
+#define STATUS_FVE_CONV_READ_ERROR __constant_cpu_to_le32(0xC021000D)
+#define STATUS_FVE_CONV_WRITE_ERROR __constant_cpu_to_le32(0xC021000E)
+#define STATUS_FVE_OVERLAPPED_UPDATE __constant_cpu_to_le32(0xC021000F)
+#define STATUS_FVE_FAILED_SECTOR_SIZE __constant_cpu_to_le32(0xC0210010)
+#define STATUS_FVE_FAILED_AUTHENTICATION __constant_cpu_to_le32(0xC0210011)
+#define STATUS_FVE_NOT_OS_VOLUME __constant_cpu_to_le32(0xC0210012)
+#define STATUS_FVE_KEYFILE_NOT_FOUND __constant_cpu_to_le32(0xC0210013)
+#define STATUS_FVE_KEYFILE_INVALID __constant_cpu_to_le32(0xC0210014)
+#define STATUS_FVE_KEYFILE_NO_VMK __constant_cpu_to_le32(0xC0210015)
+#define STATUS_FVE_TPM_DISABLED __constant_cpu_to_le32(0xC0210016)
+#define STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO __constant_cpu_to_le32(0xC0210017)
+#define STATUS_FVE_TPM_INVALID_PCR __constant_cpu_to_le32(0xC0210018)
+#define STATUS_FVE_TPM_NO_VMK __constant_cpu_to_le32(0xC0210019)
+#define STATUS_FVE_PIN_INVALID __constant_cpu_to_le32(0xC021001A)
+#define STATUS_FVE_AUTH_INVALID_APPLICATION __constant_cpu_to_le32(0xC021001B)
+#define STATUS_FVE_AUTH_INVALID_CONFIG __constant_cpu_to_le32(0xC021001C)
+#define STATUS_FVE_DEBUGGER_ENABLED __constant_cpu_to_le32(0xC021001D)
+#define STATUS_FVE_DRY_RUN_FAILED __constant_cpu_to_le32(0xC021001E)
+#define STATUS_FVE_BAD_METADATA_POINTER __constant_cpu_to_le32(0xC021001F)
+#define STATUS_FVE_OLD_METADATA_COPY __constant_cpu_to_le32(0xC0210020)
+#define STATUS_FVE_REBOOT_REQUIRED __constant_cpu_to_le32(0xC0210021)
+#define STATUS_FVE_RAW_ACCESS __constant_cpu_to_le32(0xC0210022)
+#define STATUS_FVE_RAW_BLOCKED __constant_cpu_to_le32(0xC0210023)
+#define STATUS_FWP_CALLOUT_NOT_FOUND __constant_cpu_to_le32(0xC0220001)
+#define STATUS_FWP_CONDITION_NOT_FOUND __constant_cpu_to_le32(0xC0220002)
+#define STATUS_FWP_FILTER_NOT_FOUND __constant_cpu_to_le32(0xC0220003)
+#define STATUS_FWP_LAYER_NOT_FOUND __constant_cpu_to_le32(0xC0220004)
+#define STATUS_FWP_PROVIDER_NOT_FOUND __constant_cpu_to_le32(0xC0220005)
+#define STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND __constant_cpu_to_le32(0xC0220006)
+#define STATUS_FWP_SUBLAYER_NOT_FOUND __constant_cpu_to_le32(0xC0220007)
+#define STATUS_FWP_NOT_FOUND __constant_cpu_to_le32(0xC0220008)
+#define STATUS_FWP_ALREADY_EXISTS __constant_cpu_to_le32(0xC0220009)
+#define STATUS_FWP_IN_USE __constant_cpu_to_le32(0xC022000A)
+#define STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS __constant_cpu_to_le32(0xC022000B)
+#define STATUS_FWP_WRONG_SESSION __constant_cpu_to_le32(0xC022000C)
+#define STATUS_FWP_NO_TXN_IN_PROGRESS __constant_cpu_to_le32(0xC022000D)
+#define STATUS_FWP_TXN_IN_PROGRESS __constant_cpu_to_le32(0xC022000E)
+#define STATUS_FWP_TXN_ABORTED __constant_cpu_to_le32(0xC022000F)
+#define STATUS_FWP_SESSION_ABORTED __constant_cpu_to_le32(0xC0220010)
+#define STATUS_FWP_INCOMPATIBLE_TXN __constant_cpu_to_le32(0xC0220011)
+#define STATUS_FWP_TIMEOUT __constant_cpu_to_le32(0xC0220012)
+#define STATUS_FWP_NET_EVENTS_DISABLED __constant_cpu_to_le32(0xC0220013)
+#define STATUS_FWP_INCOMPATIBLE_LAYER __constant_cpu_to_le32(0xC0220014)
+#define STATUS_FWP_KM_CLIENTS_ONLY __constant_cpu_to_le32(0xC0220015)
+#define STATUS_FWP_LIFETIME_MISMATCH __constant_cpu_to_le32(0xC0220016)
+#define STATUS_FWP_BUILTIN_OBJECT __constant_cpu_to_le32(0xC0220017)
+#define STATUS_FWP_TOO_MANY_BOOTTIME_FILTERS __constant_cpu_to_le32(0xC0220018)
+#define STATUS_FWP_TOO_MANY_CALLOUTS __constant_cpu_to_le32(0xC0220018)
+#define STATUS_FWP_NOTIFICATION_DROPPED __constant_cpu_to_le32(0xC0220019)
+#define STATUS_FWP_TRAFFIC_MISMATCH __constant_cpu_to_le32(0xC022001A)
+#define STATUS_FWP_INCOMPATIBLE_SA_STATE __constant_cpu_to_le32(0xC022001B)
+#define STATUS_FWP_NULL_POINTER __constant_cpu_to_le32(0xC022001C)
+#define STATUS_FWP_INVALID_ENUMERATOR __constant_cpu_to_le32(0xC022001D)
+#define STATUS_FWP_INVALID_FLAGS __constant_cpu_to_le32(0xC022001E)
+#define STATUS_FWP_INVALID_NET_MASK __constant_cpu_to_le32(0xC022001F)
+#define STATUS_FWP_INVALID_RANGE __constant_cpu_to_le32(0xC0220020)
+#define STATUS_FWP_INVALID_INTERVAL __constant_cpu_to_le32(0xC0220021)
+#define STATUS_FWP_ZERO_LENGTH_ARRAY __constant_cpu_to_le32(0xC0220022)
+#define STATUS_FWP_NULL_DISPLAY_NAME __constant_cpu_to_le32(0xC0220023)
+#define STATUS_FWP_INVALID_ACTION_TYPE __constant_cpu_to_le32(0xC0220024)
+#define STATUS_FWP_INVALID_WEIGHT __constant_cpu_to_le32(0xC0220025)
+#define STATUS_FWP_MATCH_TYPE_MISMATCH __constant_cpu_to_le32(0xC0220026)
+#define STATUS_FWP_TYPE_MISMATCH __constant_cpu_to_le32(0xC0220027)
+#define STATUS_FWP_OUT_OF_BOUNDS __constant_cpu_to_le32(0xC0220028)
+#define STATUS_FWP_RESERVED __constant_cpu_to_le32(0xC0220029)
+#define STATUS_FWP_DUPLICATE_CONDITION __constant_cpu_to_le32(0xC022002A)
+#define STATUS_FWP_DUPLICATE_KEYMOD __constant_cpu_to_le32(0xC022002B)
+#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER __constant_cpu_to_le32(0xC022002C)
+#define STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER __constant_cpu_to_le32(0xC022002D)
+#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER __constant_cpu_to_le32(0xC022002E)
+#define STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT __constant_cpu_to_le32(0xC022002F)
+#define STATUS_FWP_INCOMPATIBLE_AUTH_METHOD __constant_cpu_to_le32(0xC0220030)
+#define STATUS_FWP_INCOMPATIBLE_DH_GROUP __constant_cpu_to_le32(0xC0220031)
+#define STATUS_FWP_EM_NOT_SUPPORTED __constant_cpu_to_le32(0xC0220032)
+#define STATUS_FWP_NEVER_MATCH __constant_cpu_to_le32(0xC0220033)
+#define STATUS_FWP_PROVIDER_CONTEXT_MISMATCH __constant_cpu_to_le32(0xC0220034)
+#define STATUS_FWP_INVALID_PARAMETER __constant_cpu_to_le32(0xC0220035)
+#define STATUS_FWP_TOO_MANY_SUBLAYERS __constant_cpu_to_le32(0xC0220036)
+#define STATUS_FWP_CALLOUT_NOTIFICATION_FAILED __constant_cpu_to_le32(0xC0220037)
+#define STATUS_FWP_INCOMPATIBLE_AUTH_CONFIG __constant_cpu_to_le32(0xC0220038)
+#define STATUS_FWP_INCOMPATIBLE_CIPHER_CONFIG __constant_cpu_to_le32(0xC0220039)
+#define STATUS_FWP_TCPIP_NOT_READY __constant_cpu_to_le32(0xC0220100)
+#define STATUS_FWP_INJECT_HANDLE_CLOSING __constant_cpu_to_le32(0xC0220101)
+#define STATUS_FWP_INJECT_HANDLE_STALE __constant_cpu_to_le32(0xC0220102)
+#define STATUS_FWP_CANNOT_PEND __constant_cpu_to_le32(0xC0220103)
+#define STATUS_NDIS_CLOSING __constant_cpu_to_le32(0xC0230002)
+#define STATUS_NDIS_BAD_VERSION __constant_cpu_to_le32(0xC0230004)
+#define STATUS_NDIS_BAD_CHARACTERISTICS __constant_cpu_to_le32(0xC0230005)
+#define STATUS_NDIS_ADAPTER_NOT_FOUND __constant_cpu_to_le32(0xC0230006)
+#define STATUS_NDIS_OPEN_FAILED __constant_cpu_to_le32(0xC0230007)
+#define STATUS_NDIS_DEVICE_FAILED __constant_cpu_to_le32(0xC0230008)
+#define STATUS_NDIS_MULTICAST_FULL __constant_cpu_to_le32(0xC0230009)
+#define STATUS_NDIS_MULTICAST_EXISTS __constant_cpu_to_le32(0xC023000A)
+#define STATUS_NDIS_MULTICAST_NOT_FOUND __constant_cpu_to_le32(0xC023000B)
+#define STATUS_NDIS_REQUEST_ABORTED __constant_cpu_to_le32(0xC023000C)
+#define STATUS_NDIS_RESET_IN_PROGRESS __constant_cpu_to_le32(0xC023000D)
+#define STATUS_NDIS_INVALID_PACKET __constant_cpu_to_le32(0xC023000F)
+#define STATUS_NDIS_INVALID_DEVICE_REQUEST __constant_cpu_to_le32(0xC0230010)
+#define STATUS_NDIS_ADAPTER_NOT_READY __constant_cpu_to_le32(0xC0230011)
+#define STATUS_NDIS_INVALID_LENGTH __constant_cpu_to_le32(0xC0230014)
+#define STATUS_NDIS_INVALID_DATA __constant_cpu_to_le32(0xC0230015)
+#define STATUS_NDIS_BUFFER_TOO_SHORT __constant_cpu_to_le32(0xC0230016)
+#define STATUS_NDIS_INVALID_OID __constant_cpu_to_le32(0xC0230017)
+#define STATUS_NDIS_ADAPTER_REMOVED __constant_cpu_to_le32(0xC0230018)
+#define STATUS_NDIS_UNSUPPORTED_MEDIA __constant_cpu_to_le32(0xC0230019)
+#define STATUS_NDIS_GROUP_ADDRESS_IN_USE __constant_cpu_to_le32(0xC023001A)
+#define STATUS_NDIS_FILE_NOT_FOUND __constant_cpu_to_le32(0xC023001B)
+#define STATUS_NDIS_ERROR_READING_FILE __constant_cpu_to_le32(0xC023001C)
+#define STATUS_NDIS_ALREADY_MAPPED __constant_cpu_to_le32(0xC023001D)
+#define STATUS_NDIS_RESOURCE_CONFLICT __constant_cpu_to_le32(0xC023001E)
+#define STATUS_NDIS_MEDIA_DISCONNECTED __constant_cpu_to_le32(0xC023001F)
+#define STATUS_NDIS_INVALID_ADDRESS __constant_cpu_to_le32(0xC0230022)
+#define STATUS_NDIS_PAUSED __constant_cpu_to_le32(0xC023002A)
+#define STATUS_NDIS_INTERFACE_NOT_FOUND __constant_cpu_to_le32(0xC023002B)
+#define STATUS_NDIS_UNSUPPORTED_REVISION __constant_cpu_to_le32(0xC023002C)
+#define STATUS_NDIS_INVALID_PORT __constant_cpu_to_le32(0xC023002D)
+#define STATUS_NDIS_INVALID_PORT_STATE __constant_cpu_to_le32(0xC023002E)
+#define STATUS_NDIS_LOW_POWER_STATE __constant_cpu_to_le32(0xC023002F)
+#define STATUS_NDIS_NOT_SUPPORTED __constant_cpu_to_le32(0xC02300BB)
+#define STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED __constant_cpu_to_le32(0xC0232000)
+#define STATUS_NDIS_DOT11_MEDIA_IN_USE __constant_cpu_to_le32(0xC0232001)
+#define STATUS_NDIS_DOT11_POWER_STATE_INVALID __constant_cpu_to_le32(0xC0232002)
+#define STATUS_IPSEC_BAD_SPI __constant_cpu_to_le32(0xC0360001)
+#define STATUS_IPSEC_SA_LIFETIME_EXPIRED __constant_cpu_to_le32(0xC0360002)
+#define STATUS_IPSEC_WRONG_SA __constant_cpu_to_le32(0xC0360003)
+#define STATUS_IPSEC_REPLAY_CHECK_FAILED __constant_cpu_to_le32(0xC0360004)
+#define STATUS_IPSEC_INVALID_PACKET __constant_cpu_to_le32(0xC0360005)
+#define STATUS_IPSEC_INTEGRITY_CHECK_FAILED __constant_cpu_to_le32(0xC0360006)
+#define STATUS_IPSEC_CLEAR_TEXT_DROP __constant_cpu_to_le32(0xC0360007)
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
new file mode 100644
index 000000000000..31f5d420b3ea
--- /dev/null
+++ b/fs/cifs/smb2transport.c
@@ -0,0 +1,172 @@
+/*
+ * fs/cifs/smb2transport.c
+ *
+ * Copyright (C) International Business Machines Corp., 2002, 2011
+ * Etersoft, 2012
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ * Jeremy Allison (jra@samba.org) 2006
+ * Pavel Shilovsky (pshilovsky@samba.org) 2012
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/net.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <asm/processor.h>
+#include <linux/mempool.h>
+#include "smb2pdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "smb2proto.h"
+#include "cifs_debug.h"
+#include "smb2status.h"
+
+/*
+ * Set message id for the request. Should be called after wait_for_free_request
+ * and when srv_mutex is held.
+ */
+static inline void
+smb2_seq_num_into_buf(struct TCP_Server_Info *server, struct smb2_hdr *hdr)
+{
+ hdr->MessageId = get_next_mid(server);
+}
+
+static struct mid_q_entry *
+smb2_mid_entry_alloc(const struct smb2_hdr *smb_buffer,
+ struct TCP_Server_Info *server)
+{
+ struct mid_q_entry *temp;
+
+ if (server == NULL) {
+ cERROR(1, "Null TCP session in smb2_mid_entry_alloc");
+ return NULL;
+ }
+
+ temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
+ if (temp == NULL)
+ return temp;
+ else {
+ memset(temp, 0, sizeof(struct mid_q_entry));
+ temp->mid = smb_buffer->MessageId; /* always LE */
+ temp->pid = current->pid;
+ temp->command = smb_buffer->Command; /* Always LE */
+ temp->when_alloc = jiffies;
+ temp->server = server;
+
+ /*
+ * The default is for the mid to be synchronous, so the
+ * default callback just wakes up the current task.
+ */
+ temp->callback = cifs_wake_up_task;
+ temp->callback_data = current;
+ }
+
+ atomic_inc(&midCount);
+ temp->mid_state = MID_REQUEST_ALLOCATED;
+ return temp;
+}
+
+static int
+smb2_get_mid_entry(struct cifs_ses *ses, struct smb2_hdr *buf,
+ struct mid_q_entry **mid)
+{
+ if (ses->server->tcpStatus == CifsExiting)
+ return -ENOENT;
+
+ if (ses->server->tcpStatus == CifsNeedReconnect) {
+ cFYI(1, "tcp session dead - return to caller to retry");
+ return -EAGAIN;
+ }
+
+ if (ses->status != CifsGood) {
+ /* check if SMB2 session is bad because we are setting it up */
+ if ((buf->Command != SMB2_SESSION_SETUP) &&
+ (buf->Command != SMB2_NEGOTIATE))
+ return -EAGAIN;
+ /* else ok - we are setting up session */
+ }
+ *mid = smb2_mid_entry_alloc(buf, ses->server);
+ if (*mid == NULL)
+ return -ENOMEM;
+ spin_lock(&GlobalMid_Lock);
+ list_add_tail(&(*mid)->qhead, &ses->server->pending_mid_q);
+ spin_unlock(&GlobalMid_Lock);
+ return 0;
+}
+
+int
+smb2_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
+ bool log_error)
+{
+ unsigned int len = get_rfc1002_length(mid->resp_buf);
+
+ dump_smb(mid->resp_buf, min_t(u32, 80, len));
+ /* convert the length into a more usable form */
+ /* BB - uncomment with SMB2 signing implementation */
+ /* if ((len > 24) &&
+ (server->sec_mode & (SECMODE_SIGN_REQUIRED|SECMODE_SIGN_ENABLED))) {
+ if (smb2_verify_signature(mid->resp_buf, server))
+ cERROR(1, "Unexpected SMB signature");
+ } */
+
+ return map_smb2_to_linux_error(mid->resp_buf, log_error);
+}
+
+int
+smb2_setup_request(struct cifs_ses *ses, struct kvec *iov,
+ unsigned int nvec, struct mid_q_entry **ret_mid)
+{
+ int rc;
+ struct smb2_hdr *hdr = (struct smb2_hdr *)iov[0].iov_base;
+ struct mid_q_entry *mid;
+
+ smb2_seq_num_into_buf(ses->server, hdr);
+
+ rc = smb2_get_mid_entry(ses, hdr, &mid);
+ if (rc)
+ return rc;
+ /* rc = smb2_sign_smb2(iov, nvec, ses->server);
+ if (rc)
+ delete_mid(mid); */
+ *ret_mid = mid;
+ return rc;
+}
+
+int
+smb2_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
+ unsigned int nvec, struct mid_q_entry **ret_mid)
+{
+ int rc = 0;
+ struct smb2_hdr *hdr = (struct smb2_hdr *)iov[0].iov_base;
+ struct mid_q_entry *mid;
+
+ smb2_seq_num_into_buf(server, hdr);
+
+ mid = smb2_mid_entry_alloc(hdr, server);
+ if (mid == NULL)
+ return -ENOMEM;
+
+ /* rc = smb2_sign_smb2(iov, nvec, server);
+ if (rc) {
+ DeleteMidQEntry(mid);
+ return rc;
+ }*/
+ *ret_mid = mid;
+ return rc;
+}
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index d5cd9aa7eacc..a0a58fbe2c10 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -78,7 +78,7 @@ smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
tfm_des = crypto_alloc_blkcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm_des)) {
rc = PTR_ERR(tfm_des);
- cERROR(1, "could not allocate des crypto API\n");
+ cERROR(1, "could not allocate des crypto API");
goto smbhash_err;
}
@@ -91,7 +91,7 @@ smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8);
if (rc)
- cERROR(1, "could not encrypt crypt key rc: %d\n", rc);
+ cERROR(1, "could not encrypt crypt key rc: %d", rc);
crypto_free_blkcipher(tfm_des);
smbhash_err:
@@ -139,14 +139,14 @@ mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
md4 = crypto_alloc_shash("md4", 0, 0);
if (IS_ERR(md4)) {
rc = PTR_ERR(md4);
- cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc);
+ cERROR(1, "%s: Crypto md4 allocation error %d", __func__, rc);
return rc;
}
size = sizeof(struct shash_desc) + crypto_shash_descsize(md4);
sdescmd4 = kmalloc(size, GFP_KERNEL);
if (!sdescmd4) {
rc = -ENOMEM;
- cERROR(1, "%s: Memory allocation failure\n", __func__);
+ cERROR(1, "%s: Memory allocation failure", __func__);
goto mdfour_err;
}
sdescmd4->shash.tfm = md4;
@@ -154,17 +154,17 @@ mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
rc = crypto_shash_init(&sdescmd4->shash);
if (rc) {
- cERROR(1, "%s: Could not init md4 shash\n", __func__);
+ cERROR(1, "%s: Could not init md4 shash", __func__);
goto mdfour_err;
}
rc = crypto_shash_update(&sdescmd4->shash, link_str, link_len);
if (rc) {
- cERROR(1, "%s: Could not update with link_str\n", __func__);
+ cERROR(1, "%s: Could not update with link_str", __func__);
goto mdfour_err;
}
rc = crypto_shash_final(&sdescmd4->shash, md4_hash);
if (rc)
- cERROR(1, "%s: Could not genereate md4 hash\n", __func__);
+ cERROR(1, "%s: Could not genereate md4 hash", __func__);
mdfour_err:
crypto_free_shash(md4);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index f25d4ea14be4..d9b639b95fa8 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -35,10 +35,8 @@
#include "cifsproto.h"
#include "cifs_debug.h"
-extern mempool_t *cifs_mid_poolp;
-
-static void
-wake_up_task(struct mid_q_entry *mid)
+void
+cifs_wake_up_task(struct mid_q_entry *mid)
{
wake_up_process(mid->callback_data);
}
@@ -65,12 +63,13 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
/* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
/* when mid allocated can be before when sent */
temp->when_alloc = jiffies;
+ temp->server = server;
/*
* The default is for the mid to be synchronous, so the
* default callback just wakes up the current task.
*/
- temp->callback = wake_up_task;
+ temp->callback = cifs_wake_up_task;
temp->callback_data = current;
}
@@ -83,6 +82,7 @@ void
DeleteMidQEntry(struct mid_q_entry *midEntry)
{
#ifdef CONFIG_CIFS_STATS2
+ __le16 command = midEntry->server->vals->lock_cmd;
unsigned long now;
#endif
midEntry->mid_state = MID_FREE;
@@ -96,8 +96,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
/* commands taking longer than one second are indications that
something is wrong, unless it is quite a slow link or server */
if ((now - midEntry->when_alloc) > HZ) {
- if ((cifsFYI & CIFS_TIMER) &&
- (midEntry->command != cpu_to_le16(SMB_COM_LOCKING_ANDX))) {
+ if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
midEntry->command, midEntry->mid);
printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
@@ -126,7 +125,6 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
int rc = 0;
int i = 0;
struct msghdr smb_msg;
- __be32 *buf_len = (__be32 *)(iov[0].iov_base);
unsigned int len = iov[0].iov_len;
unsigned int total_len;
int first_vec = 0;
@@ -235,9 +233,6 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
else
rc = 0;
- /* Don't want to modify the buffer as a side effect of this call. */
- *buf_len = cpu_to_be32(smb_buf_length);
-
return rc;
}
@@ -254,13 +249,13 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
}
static int
-wait_for_free_credits(struct TCP_Server_Info *server, const int optype,
+wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
int *credits)
{
int rc;
spin_lock(&server->req_lock);
- if (optype == CIFS_ASYNC_OP) {
+ if (timeout == CIFS_ASYNC_OP) {
/* oplock breaks must not be held up */
server->in_flight++;
*credits -= 1;
@@ -290,7 +285,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int optype,
*/
/* update # of requests on the wire to server */
- if (optype != CIFS_BLOCKING_OP) {
+ if (timeout != CIFS_BLOCKING_OP) {
*credits -= 1;
server->in_flight++;
}
@@ -302,10 +297,11 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int optype,
}
static int
-wait_for_free_request(struct TCP_Server_Info *server, const int optype)
+wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
+ const int optype)
{
- return wait_for_free_credits(server, optype,
- server->ops->get_credits_field(server));
+ return wait_for_free_credits(server, timeout,
+ server->ops->get_credits_field(server, optype));
}
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
@@ -349,7 +345,7 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
return 0;
}
-static int
+int
cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
unsigned int nvec, struct mid_q_entry **ret_mid)
{
@@ -365,7 +361,7 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
if (mid == NULL)
return -ENOMEM;
- rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
+ rc = cifs_sign_smbv(iov, nvec, server, &mid->sequence_number);
if (rc) {
DeleteMidQEntry(mid);
return rc;
@@ -382,20 +378,23 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
int
cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
unsigned int nvec, mid_receive_t *receive,
- mid_callback_t *callback, void *cbdata, bool ignore_pend)
+ mid_callback_t *callback, void *cbdata, const int flags)
{
- int rc;
+ int rc, timeout, optype;
struct mid_q_entry *mid;
- rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
+ timeout = flags & CIFS_TIMEOUT_MASK;
+ optype = flags & CIFS_OP_MASK;
+
+ rc = wait_for_free_request(server, timeout, optype);
if (rc)
return rc;
mutex_lock(&server->srv_mutex);
- rc = cifs_setup_async_request(server, iov, nvec, &mid);
+ rc = server->ops->setup_async_request(server, iov, nvec, &mid);
if (rc) {
mutex_unlock(&server->srv_mutex);
- add_credits(server, 1);
+ add_credits(server, 1, optype);
wake_up(&server->request_q);
return rc;
}
@@ -421,7 +420,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
return 0;
delete_mid(mid);
- add_credits(server, 1);
+ add_credits(server, 1, optype);
wake_up(&server->request_q);
return rc;
}
@@ -504,13 +503,16 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
/* convert the length into a more usable form */
if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
struct kvec iov;
+ int rc = 0;
iov.iov_base = mid->resp_buf;
iov.iov_len = len;
/* FIXME: add code to kill session */
- if (cifs_verify_signature(&iov, 1, server,
- mid->sequence_number + 1) != 0)
- cERROR(1, "Unexpected SMB signature");
+ rc = cifs_verify_signature(&iov, 1, server,
+ mid->sequence_number + 1);
+ if (rc)
+ cERROR(1, "SMB signature verification returned error = "
+ "%d", rc);
}
/* BB special case reconnect tid and uid here? */
@@ -528,7 +530,7 @@ cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
rc = allocate_mid(ses, hdr, &mid);
if (rc)
return rc;
- rc = cifs_sign_smb2(iov, nvec, ses->server, &mid->sequence_number);
+ rc = cifs_sign_smbv(iov, nvec, ses->server, &mid->sequence_number);
if (rc)
delete_mid(mid);
*ret_mid = mid;
@@ -537,17 +539,19 @@ cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
int
SendReceive2(const unsigned int xid, struct cifs_ses *ses,
- struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
+ struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
const int flags)
{
int rc = 0;
- int long_op;
+ int timeout, optype;
struct mid_q_entry *midQ;
char *buf = iov[0].iov_base;
+ unsigned int credits = 1;
- long_op = flags & CIFS_TIMEOUT_MASK;
+ timeout = flags & CIFS_TIMEOUT_MASK;
+ optype = flags & CIFS_OP_MASK;
- *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
+ *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
if ((ses == NULL) || (ses->server == NULL)) {
cifs_small_buf_release(buf);
@@ -566,7 +570,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
* use ses->maxReq.
*/
- rc = wait_for_free_request(ses->server, long_op);
+ rc = wait_for_free_request(ses->server, timeout, optype);
if (rc) {
cifs_small_buf_release(buf);
return rc;
@@ -585,7 +589,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
mutex_unlock(&ses->server->srv_mutex);
cifs_small_buf_release(buf);
/* Update # of requests on wire to server */
- add_credits(ses->server, 1);
+ add_credits(ses->server, 1, optype);
return rc;
}
@@ -602,7 +606,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
goto out;
}
- if (long_op == CIFS_ASYNC_OP) {
+ if (timeout == CIFS_ASYNC_OP) {
cifs_small_buf_release(buf);
goto out;
}
@@ -615,7 +619,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
cifs_small_buf_release(buf);
- add_credits(ses->server, 1);
+ add_credits(ses->server, 1, optype);
return rc;
}
spin_unlock(&GlobalMid_Lock);
@@ -625,7 +629,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
- add_credits(ses->server, 1);
+ add_credits(ses->server, 1, optype);
return rc;
}
@@ -639,9 +643,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
iov[0].iov_base = buf;
iov[0].iov_len = get_rfc1002_length(buf) + 4;
if (midQ->large_buf)
- *pRespBufType = CIFS_LARGE_BUFFER;
+ *resp_buf_type = CIFS_LARGE_BUFFER;
else
- *pRespBufType = CIFS_SMALL_BUFFER;
+ *resp_buf_type = CIFS_SMALL_BUFFER;
+
+ credits = ses->server->ops->get_credits(midQ);
rc = ses->server->ops->check_receive(midQ, ses->server,
flags & CIFS_LOG_ERROR);
@@ -651,7 +657,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
midQ->resp_buf = NULL;
out:
delete_mid(midQ);
- add_credits(ses->server, 1);
+ add_credits(ses->server, credits, optype);
return rc;
}
@@ -659,7 +665,7 @@ out:
int
SendReceive(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
- int *pbytes_returned, const int long_op)
+ int *pbytes_returned, const int timeout)
{
int rc = 0;
struct mid_q_entry *midQ;
@@ -687,7 +693,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
return -EIO;
}
- rc = wait_for_free_request(ses->server, long_op);
+ rc = wait_for_free_request(ses->server, timeout, 0);
if (rc)
return rc;
@@ -701,7 +707,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
/* Update # of requests on wire to server */
- add_credits(ses->server, 1);
+ add_credits(ses->server, 1, 0);
return rc;
}
@@ -722,7 +728,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc < 0)
goto out;
- if (long_op == CIFS_ASYNC_OP)
+ if (timeout == CIFS_ASYNC_OP)
goto out;
rc = wait_for_response(ses->server, midQ);
@@ -733,7 +739,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
- add_credits(ses->server, 1);
+ add_credits(ses->server, 1, 0);
return rc;
}
spin_unlock(&GlobalMid_Lock);
@@ -741,7 +747,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
- add_credits(ses->server, 1);
+ add_credits(ses->server, 1, 0);
return rc;
}
@@ -757,7 +763,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_check_receive(midQ, ses->server, 0);
out:
delete_mid(midQ);
- add_credits(ses->server, 1);
+ add_credits(ses->server, 1, 0);
return rc;
}
@@ -822,7 +828,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return -EIO;
}
- rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
+ rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
if (rc)
return rc;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 10d92cf57ab6..5142f2c60278 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -39,7 +39,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
{
int rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
- int xid;
+ unsigned int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
@@ -60,7 +60,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
- xid = GetXid();
+ xid = get_xid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
@@ -88,7 +88,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
}
remove_ea_exit:
kfree(full_path);
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
#endif
return rc;
@@ -99,7 +99,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
{
int rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
- int xid;
+ unsigned int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
@@ -120,7 +120,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
- xid = GetXid();
+ xid = get_xid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
@@ -221,7 +221,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
set_ea_exit:
kfree(full_path);
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
#endif
return rc;
@@ -232,7 +232,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
{
ssize_t rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
- int xid;
+ unsigned int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
@@ -253,7 +253,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
- xid = GetXid();
+ xid = get_xid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
@@ -355,7 +355,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
get_ea_exit:
kfree(full_path);
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
#endif
return rc;
@@ -365,7 +365,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
{
ssize_t rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
- int xid;
+ unsigned int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
@@ -389,7 +389,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
- xid = GetXid();
+ xid = get_xid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
@@ -409,7 +409,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
list_ea_exit:
kfree(full_path);
- FreeXid(xid);
+ free_xid(xid);
cifs_put_tlink(tlink);
#endif
return rc;
diff --git a/fs/compat.c b/fs/compat.c
index 6161255fac45..1bdb350ea5d3 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1155,11 +1155,14 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec,
struct file *file;
int fput_needed;
ssize_t ret;
+ loff_t pos;
file = fget_light(fd, &fput_needed);
if (!file)
return -EBADF;
- ret = compat_readv(file, vec, vlen, &file->f_pos);
+ pos = file->f_pos;
+ ret = compat_readv(file, vec, vlen, &pos);
+ file->f_pos = pos;
fput_light(file, fput_needed);
return ret;
}
@@ -1221,11 +1224,14 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec,
struct file *file;
int fput_needed;
ssize_t ret;
+ loff_t pos;
file = fget_light(fd, &fput_needed);
if (!file)
return -EBADF;
- ret = compat_writev(file, vec, vlen, &file->f_pos);
+ pos = file->f_pos;
+ ret = compat_writev(file, vec, vlen, &pos);
+ file->f_pos = pos;
fput_light(file, fput_needed);
return ret;
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1faf4cb56f39..f86c720dba0e 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1062,6 +1062,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
unsigned long user_addr;
size_t bytes;
struct buffer_head map_bh = { 0, };
+ struct blk_plug plug;
if (rw & WRITE)
rw = WRITE_ODIRECT;
@@ -1177,6 +1178,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
PAGE_SIZE - user_addr / PAGE_SIZE);
}
+ blk_start_plug(&plug);
+
for (seg = 0; seg < nr_segs; seg++) {
user_addr = (unsigned long)iov[seg].iov_base;
sdio.size += bytes = iov[seg].iov_len;
@@ -1235,6 +1238,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
if (sdio.bio)
dio_bio_submit(dio, &sdio);
+ blk_finish_plug(&plug);
+
/*
* It is possible that, we return short IO due to end of file.
* In that case, we need to release all the pages we got hold on.
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index e7e327d43fa5..9ccf7346834a 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -96,7 +96,6 @@ struct dlm_cluster {
unsigned int cl_tcp_port;
unsigned int cl_buffer_size;
unsigned int cl_rsbtbl_size;
- unsigned int cl_dirtbl_size;
unsigned int cl_recover_timer;
unsigned int cl_toss_secs;
unsigned int cl_scan_secs;
@@ -113,7 +112,6 @@ enum {
CLUSTER_ATTR_TCP_PORT = 0,
CLUSTER_ATTR_BUFFER_SIZE,
CLUSTER_ATTR_RSBTBL_SIZE,
- CLUSTER_ATTR_DIRTBL_SIZE,
CLUSTER_ATTR_RECOVER_TIMER,
CLUSTER_ATTR_TOSS_SECS,
CLUSTER_ATTR_SCAN_SECS,
@@ -189,7 +187,6 @@ __CONFIGFS_ATTR(name, 0644, name##_read, name##_write)
CLUSTER_ATTR(tcp_port, 1);
CLUSTER_ATTR(buffer_size, 1);
CLUSTER_ATTR(rsbtbl_size, 1);
-CLUSTER_ATTR(dirtbl_size, 1);
CLUSTER_ATTR(recover_timer, 1);
CLUSTER_ATTR(toss_secs, 1);
CLUSTER_ATTR(scan_secs, 1);
@@ -204,7 +201,6 @@ static struct configfs_attribute *cluster_attrs[] = {
[CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr,
[CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size.attr,
[CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size.attr,
- [CLUSTER_ATTR_DIRTBL_SIZE] = &cluster_attr_dirtbl_size.attr,
[CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer.attr,
[CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs.attr,
[CLUSTER_ATTR_SCAN_SECS] = &cluster_attr_scan_secs.attr,
@@ -478,7 +474,6 @@ static struct config_group *make_cluster(struct config_group *g,
cl->cl_tcp_port = dlm_config.ci_tcp_port;
cl->cl_buffer_size = dlm_config.ci_buffer_size;
cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size;
- cl->cl_dirtbl_size = dlm_config.ci_dirtbl_size;
cl->cl_recover_timer = dlm_config.ci_recover_timer;
cl->cl_toss_secs = dlm_config.ci_toss_secs;
cl->cl_scan_secs = dlm_config.ci_scan_secs;
@@ -1050,7 +1045,6 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
#define DEFAULT_TCP_PORT 21064
#define DEFAULT_BUFFER_SIZE 4096
#define DEFAULT_RSBTBL_SIZE 1024
-#define DEFAULT_DIRTBL_SIZE 1024
#define DEFAULT_RECOVER_TIMER 5
#define DEFAULT_TOSS_SECS 10
#define DEFAULT_SCAN_SECS 5
@@ -1066,7 +1060,6 @@ struct dlm_config_info dlm_config = {
.ci_tcp_port = DEFAULT_TCP_PORT,
.ci_buffer_size = DEFAULT_BUFFER_SIZE,
.ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE,
- .ci_dirtbl_size = DEFAULT_DIRTBL_SIZE,
.ci_recover_timer = DEFAULT_RECOVER_TIMER,
.ci_toss_secs = DEFAULT_TOSS_SECS,
.ci_scan_secs = DEFAULT_SCAN_SECS,
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index 9f5e3663bb0c..dbd35a08f3a5 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -27,7 +27,6 @@ struct dlm_config_info {
int ci_tcp_port;
int ci_buffer_size;
int ci_rsbtbl_size;
- int ci_dirtbl_size;
int ci_recover_timer;
int ci_toss_secs;
int ci_scan_secs;
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 1c9b08095f98..b969deef9ebb 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -344,6 +344,45 @@ static int print_format3(struct dlm_rsb *r, struct seq_file *s)
return rv;
}
+static int print_format4(struct dlm_rsb *r, struct seq_file *s)
+{
+ int our_nodeid = dlm_our_nodeid();
+ int print_name = 1;
+ int i, rv;
+
+ lock_rsb(r);
+
+ rv = seq_printf(s, "rsb %p %d %d %d %d %lu %lx %d ",
+ r,
+ r->res_nodeid,
+ r->res_master_nodeid,
+ r->res_dir_nodeid,
+ our_nodeid,
+ r->res_toss_time,
+ r->res_flags,
+ r->res_length);
+ if (rv)
+ goto out;
+
+ for (i = 0; i < r->res_length; i++) {
+ if (!isascii(r->res_name[i]) || !isprint(r->res_name[i]))
+ print_name = 0;
+ }
+
+ seq_printf(s, "%s", print_name ? "str " : "hex");
+
+ for (i = 0; i < r->res_length; i++) {
+ if (print_name)
+ seq_printf(s, "%c", r->res_name[i]);
+ else
+ seq_printf(s, " %02x", (unsigned char)r->res_name[i]);
+ }
+ rv = seq_printf(s, "\n");
+ out:
+ unlock_rsb(r);
+ return rv;
+}
+
struct rsbtbl_iter {
struct dlm_rsb *rsb;
unsigned bucket;
@@ -382,6 +421,13 @@ static int table_seq_show(struct seq_file *seq, void *iter_ptr)
}
rv = print_format3(ri->rsb, seq);
break;
+ case 4:
+ if (ri->header) {
+ seq_printf(seq, "version 4 rsb 2\n");
+ ri->header = 0;
+ }
+ rv = print_format4(ri->rsb, seq);
+ break;
}
return rv;
@@ -390,15 +436,18 @@ static int table_seq_show(struct seq_file *seq, void *iter_ptr)
static const struct seq_operations format1_seq_ops;
static const struct seq_operations format2_seq_ops;
static const struct seq_operations format3_seq_ops;
+static const struct seq_operations format4_seq_ops;
static void *table_seq_start(struct seq_file *seq, loff_t *pos)
{
+ struct rb_root *tree;
struct rb_node *node;
struct dlm_ls *ls = seq->private;
struct rsbtbl_iter *ri;
struct dlm_rsb *r;
loff_t n = *pos;
unsigned bucket, entry;
+ int toss = (seq->op == &format4_seq_ops);
bucket = n >> 32;
entry = n & ((1LL << 32) - 1);
@@ -417,11 +466,14 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
ri->format = 2;
if (seq->op == &format3_seq_ops)
ri->format = 3;
+ if (seq->op == &format4_seq_ops)
+ ri->format = 4;
+
+ tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
spin_lock(&ls->ls_rsbtbl[bucket].lock);
- if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[bucket].keep)) {
- for (node = rb_first(&ls->ls_rsbtbl[bucket].keep); node;
- node = rb_next(node)) {
+ if (!RB_EMPTY_ROOT(tree)) {
+ for (node = rb_first(tree); node; node = rb_next(node)) {
r = rb_entry(node, struct dlm_rsb, res_hashnode);
if (!entry--) {
dlm_hold_rsb(r);
@@ -449,10 +501,11 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
kfree(ri);
return NULL;
}
+ tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
spin_lock(&ls->ls_rsbtbl[bucket].lock);
- if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[bucket].keep)) {
- node = rb_first(&ls->ls_rsbtbl[bucket].keep);
+ if (!RB_EMPTY_ROOT(tree)) {
+ node = rb_first(tree);
r = rb_entry(node, struct dlm_rsb, res_hashnode);
dlm_hold_rsb(r);
ri->rsb = r;
@@ -469,10 +522,12 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
{
struct dlm_ls *ls = seq->private;
struct rsbtbl_iter *ri = iter_ptr;
+ struct rb_root *tree;
struct rb_node *next;
struct dlm_rsb *r, *rp;
loff_t n = *pos;
unsigned bucket;
+ int toss = (seq->op == &format4_seq_ops);
bucket = n >> 32;
@@ -511,10 +566,11 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
kfree(ri);
return NULL;
}
+ tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
spin_lock(&ls->ls_rsbtbl[bucket].lock);
- if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[bucket].keep)) {
- next = rb_first(&ls->ls_rsbtbl[bucket].keep);
+ if (!RB_EMPTY_ROOT(tree)) {
+ next = rb_first(tree);
r = rb_entry(next, struct dlm_rsb, res_hashnode);
dlm_hold_rsb(r);
ri->rsb = r;
@@ -558,9 +614,17 @@ static const struct seq_operations format3_seq_ops = {
.show = table_seq_show,
};
+static const struct seq_operations format4_seq_ops = {
+ .start = table_seq_start,
+ .next = table_seq_next,
+ .stop = table_seq_stop,
+ .show = table_seq_show,
+};
+
static const struct file_operations format1_fops;
static const struct file_operations format2_fops;
static const struct file_operations format3_fops;
+static const struct file_operations format4_fops;
static int table_open(struct inode *inode, struct file *file)
{
@@ -573,6 +637,8 @@ static int table_open(struct inode *inode, struct file *file)
ret = seq_open(file, &format2_seq_ops);
else if (file->f_op == &format3_fops)
ret = seq_open(file, &format3_seq_ops);
+ else if (file->f_op == &format4_fops)
+ ret = seq_open(file, &format4_seq_ops);
if (ret)
return ret;
@@ -606,6 +672,14 @@ static const struct file_operations format3_fops = {
.release = seq_release
};
+static const struct file_operations format4_fops = {
+ .owner = THIS_MODULE,
+ .open = table_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
/*
* dump lkb's on the ls_waiters list
*/
@@ -652,6 +726,8 @@ void dlm_delete_debug_file(struct dlm_ls *ls)
debugfs_remove(ls->ls_debug_locks_dentry);
if (ls->ls_debug_all_dentry)
debugfs_remove(ls->ls_debug_all_dentry);
+ if (ls->ls_debug_toss_dentry)
+ debugfs_remove(ls->ls_debug_toss_dentry);
}
int dlm_create_debug_file(struct dlm_ls *ls)
@@ -694,6 +770,19 @@ int dlm_create_debug_file(struct dlm_ls *ls)
if (!ls->ls_debug_all_dentry)
goto fail;
+ /* format 4 */
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_toss", ls->ls_name);
+
+ ls->ls_debug_toss_dentry = debugfs_create_file(name,
+ S_IFREG | S_IRUGO,
+ dlm_root,
+ ls,
+ &format4_fops);
+ if (!ls->ls_debug_toss_dentry)
+ goto fail;
+
memset(name, 0, sizeof(name));
snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_waiters", ls->ls_name);
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index dc5eb598b81f..278a75cda446 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -23,50 +23,6 @@
#include "lock.h"
#include "dir.h"
-
-static void put_free_de(struct dlm_ls *ls, struct dlm_direntry *de)
-{
- spin_lock(&ls->ls_recover_list_lock);
- list_add(&de->list, &ls->ls_recover_list);
- spin_unlock(&ls->ls_recover_list_lock);
-}
-
-static struct dlm_direntry *get_free_de(struct dlm_ls *ls, int len)
-{
- int found = 0;
- struct dlm_direntry *de;
-
- spin_lock(&ls->ls_recover_list_lock);
- list_for_each_entry(de, &ls->ls_recover_list, list) {
- if (de->length == len) {
- list_del(&de->list);
- de->master_nodeid = 0;
- memset(de->name, 0, len);
- found = 1;
- break;
- }
- }
- spin_unlock(&ls->ls_recover_list_lock);
-
- if (!found)
- de = kzalloc(sizeof(struct dlm_direntry) + len, GFP_NOFS);
- return de;
-}
-
-void dlm_clear_free_entries(struct dlm_ls *ls)
-{
- struct dlm_direntry *de;
-
- spin_lock(&ls->ls_recover_list_lock);
- while (!list_empty(&ls->ls_recover_list)) {
- de = list_entry(ls->ls_recover_list.next, struct dlm_direntry,
- list);
- list_del(&de->list);
- kfree(de);
- }
- spin_unlock(&ls->ls_recover_list_lock);
-}
-
/*
* We use the upper 16 bits of the hash value to select the directory node.
* Low bits are used for distribution of rsb's among hash buckets on each node.
@@ -78,144 +34,53 @@ void dlm_clear_free_entries(struct dlm_ls *ls)
int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash)
{
- struct list_head *tmp;
- struct dlm_member *memb = NULL;
- uint32_t node, n = 0;
- int nodeid;
-
- if (ls->ls_num_nodes == 1) {
- nodeid = dlm_our_nodeid();
- goto out;
- }
+ uint32_t node;
- if (ls->ls_node_array) {
+ if (ls->ls_num_nodes == 1)
+ return dlm_our_nodeid();
+ else {
node = (hash >> 16) % ls->ls_total_weight;
- nodeid = ls->ls_node_array[node];
- goto out;
- }
-
- /* make_member_array() failed to kmalloc ls_node_array... */
-
- node = (hash >> 16) % ls->ls_num_nodes;
-
- list_for_each(tmp, &ls->ls_nodes) {
- if (n++ != node)
- continue;
- memb = list_entry(tmp, struct dlm_member, list);
- break;
+ return ls->ls_node_array[node];
}
-
- DLM_ASSERT(memb , printk("num_nodes=%u n=%u node=%u\n",
- ls->ls_num_nodes, n, node););
- nodeid = memb->nodeid;
- out:
- return nodeid;
}
int dlm_dir_nodeid(struct dlm_rsb *r)
{
- return dlm_hash2nodeid(r->res_ls, r->res_hash);
-}
-
-static inline uint32_t dir_hash(struct dlm_ls *ls, char *name, int len)
-{
- uint32_t val;
-
- val = jhash(name, len, 0);
- val &= (ls->ls_dirtbl_size - 1);
-
- return val;
-}
-
-static void add_entry_to_hash(struct dlm_ls *ls, struct dlm_direntry *de)
-{
- uint32_t bucket;
-
- bucket = dir_hash(ls, de->name, de->length);
- list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
+ return r->res_dir_nodeid;
}
-static struct dlm_direntry *search_bucket(struct dlm_ls *ls, char *name,
- int namelen, uint32_t bucket)
+void dlm_recover_dir_nodeid(struct dlm_ls *ls)
{
- struct dlm_direntry *de;
-
- list_for_each_entry(de, &ls->ls_dirtbl[bucket].list, list) {
- if (de->length == namelen && !memcmp(name, de->name, namelen))
- goto out;
- }
- de = NULL;
- out:
- return de;
-}
-
-void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen)
-{
- struct dlm_direntry *de;
- uint32_t bucket;
-
- bucket = dir_hash(ls, name, namelen);
-
- spin_lock(&ls->ls_dirtbl[bucket].lock);
-
- de = search_bucket(ls, name, namelen, bucket);
-
- if (!de) {
- log_error(ls, "remove fr %u none", nodeid);
- goto out;
- }
-
- if (de->master_nodeid != nodeid) {
- log_error(ls, "remove fr %u ID %u", nodeid, de->master_nodeid);
- goto out;
- }
-
- list_del(&de->list);
- kfree(de);
- out:
- spin_unlock(&ls->ls_dirtbl[bucket].lock);
-}
+ struct dlm_rsb *r;
-void dlm_dir_clear(struct dlm_ls *ls)
-{
- struct list_head *head;
- struct dlm_direntry *de;
- int i;
-
- DLM_ASSERT(list_empty(&ls->ls_recover_list), );
-
- for (i = 0; i < ls->ls_dirtbl_size; i++) {
- spin_lock(&ls->ls_dirtbl[i].lock);
- head = &ls->ls_dirtbl[i].list;
- while (!list_empty(head)) {
- de = list_entry(head->next, struct dlm_direntry, list);
- list_del(&de->list);
- put_free_de(ls, de);
- }
- spin_unlock(&ls->ls_dirtbl[i].lock);
+ down_read(&ls->ls_root_sem);
+ list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
+ r->res_dir_nodeid = dlm_hash2nodeid(ls, r->res_hash);
}
+ up_read(&ls->ls_root_sem);
}
int dlm_recover_directory(struct dlm_ls *ls)
{
struct dlm_member *memb;
- struct dlm_direntry *de;
char *b, *last_name = NULL;
- int error = -ENOMEM, last_len, count = 0;
+ int error = -ENOMEM, last_len, nodeid, result;
uint16_t namelen;
+ unsigned int count = 0, count_match = 0, count_bad = 0, count_add = 0;
log_debug(ls, "dlm_recover_directory");
if (dlm_no_directory(ls))
goto out_status;
- dlm_dir_clear(ls);
-
last_name = kmalloc(DLM_RESNAME_MAXLEN, GFP_NOFS);
if (!last_name)
goto out;
list_for_each_entry(memb, &ls->ls_nodes, list) {
+ if (memb->nodeid == dlm_our_nodeid())
+ continue;
+
memset(last_name, 0, DLM_RESNAME_MAXLEN);
last_len = 0;
@@ -230,7 +95,7 @@ int dlm_recover_directory(struct dlm_ls *ls)
if (error)
goto out_free;
- schedule();
+ cond_resched();
/*
* pick namelen/name pairs out of received buffer
@@ -267,87 +132,71 @@ int dlm_recover_directory(struct dlm_ls *ls)
if (namelen > DLM_RESNAME_MAXLEN)
goto out_free;
- error = -ENOMEM;
- de = get_free_de(ls, namelen);
- if (!de)
+ error = dlm_master_lookup(ls, memb->nodeid,
+ b, namelen,
+ DLM_LU_RECOVER_DIR,
+ &nodeid, &result);
+ if (error) {
+ log_error(ls, "recover_dir lookup %d",
+ error);
goto out_free;
+ }
+
+ /* The name was found in rsbtbl, but the
+ * master nodeid is different from
+ * memb->nodeid which says it is the master.
+ * This should not happen. */
+
+ if (result == DLM_LU_MATCH &&
+ nodeid != memb->nodeid) {
+ count_bad++;
+ log_error(ls, "recover_dir lookup %d "
+ "nodeid %d memb %d bad %u",
+ result, nodeid, memb->nodeid,
+ count_bad);
+ print_hex_dump_bytes("dlm_recover_dir ",
+ DUMP_PREFIX_NONE,
+ b, namelen);
+ }
+
+ /* The name was found in rsbtbl, and the
+ * master nodeid matches memb->nodeid. */
+
+ if (result == DLM_LU_MATCH &&
+ nodeid == memb->nodeid) {
+ count_match++;
+ }
+
+ /* The name was not found in rsbtbl and was
+ * added with memb->nodeid as the master. */
+
+ if (result == DLM_LU_ADD) {
+ count_add++;
+ }
- de->master_nodeid = memb->nodeid;
- de->length = namelen;
last_len = namelen;
- memcpy(de->name, b, namelen);
memcpy(last_name, b, namelen);
b += namelen;
left -= namelen;
-
- add_entry_to_hash(ls, de);
count++;
}
}
- done:
+ done:
;
}
out_status:
error = 0;
- log_debug(ls, "dlm_recover_directory %d entries", count);
+ dlm_set_recover_status(ls, DLM_RS_DIR);
+
+ log_debug(ls, "dlm_recover_directory %u in %u new",
+ count, count_add);
out_free:
kfree(last_name);
out:
- dlm_clear_free_entries(ls);
return error;
}
-static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
- int namelen, int *r_nodeid)
-{
- struct dlm_direntry *de, *tmp;
- uint32_t bucket;
-
- bucket = dir_hash(ls, name, namelen);
-
- spin_lock(&ls->ls_dirtbl[bucket].lock);
- de = search_bucket(ls, name, namelen, bucket);
- if (de) {
- *r_nodeid = de->master_nodeid;
- spin_unlock(&ls->ls_dirtbl[bucket].lock);
- if (*r_nodeid == nodeid)
- return -EEXIST;
- return 0;
- }
-
- spin_unlock(&ls->ls_dirtbl[bucket].lock);
-
- if (namelen > DLM_RESNAME_MAXLEN)
- return -EINVAL;
-
- de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_NOFS);
- if (!de)
- return -ENOMEM;
-
- de->master_nodeid = nodeid;
- de->length = namelen;
- memcpy(de->name, name, namelen);
-
- spin_lock(&ls->ls_dirtbl[bucket].lock);
- tmp = search_bucket(ls, name, namelen, bucket);
- if (tmp) {
- kfree(de);
- de = tmp;
- } else {
- list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
- }
- *r_nodeid = de->master_nodeid;
- spin_unlock(&ls->ls_dirtbl[bucket].lock);
- return 0;
-}
-
-int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen,
- int *r_nodeid)
-{
- return get_entry(ls, nodeid, name, namelen, r_nodeid);
-}
-
static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, char *name, int len)
{
struct dlm_rsb *r;
@@ -358,10 +207,10 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, char *name, int len)
bucket = hash & (ls->ls_rsbtbl_size - 1);
spin_lock(&ls->ls_rsbtbl[bucket].lock);
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, 0, &r);
+ rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r);
if (rv)
rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss,
- name, len, 0, &r);
+ name, len, &r);
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
if (!rv)
@@ -371,7 +220,7 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, char *name, int len)
list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
if (len == r->res_length && !memcmp(name, r->res_name, len)) {
up_read(&ls->ls_root_sem);
- log_error(ls, "find_rsb_root revert to root_list %s",
+ log_debug(ls, "find_rsb_root revert to root_list %s",
r->res_name);
return r;
}
@@ -429,6 +278,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
be_namelen = cpu_to_be16(0);
memcpy(outbuf + offset, &be_namelen, sizeof(__be16));
offset += sizeof(__be16);
+ ls->ls_recover_dir_sent_msg++;
goto out;
}
@@ -437,6 +287,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
offset += sizeof(__be16);
memcpy(outbuf + offset, r->res_name, r->res_length);
offset += r->res_length;
+ ls->ls_recover_dir_sent_res++;
}
/*
@@ -449,8 +300,8 @@ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
be_namelen = cpu_to_be16(0xFFFF);
memcpy(outbuf + offset, &be_namelen, sizeof(__be16));
offset += sizeof(__be16);
+ ls->ls_recover_dir_sent_msg++;
}
-
out:
up_read(&ls->ls_root_sem);
}
diff --git a/fs/dlm/dir.h b/fs/dlm/dir.h
index 0b0eb1267b6e..417506344456 100644
--- a/fs/dlm/dir.h
+++ b/fs/dlm/dir.h
@@ -14,15 +14,10 @@
#ifndef __DIR_DOT_H__
#define __DIR_DOT_H__
-
int dlm_dir_nodeid(struct dlm_rsb *rsb);
int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash);
-void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int len);
-void dlm_dir_clear(struct dlm_ls *ls);
-void dlm_clear_free_entries(struct dlm_ls *ls);
+void dlm_recover_dir_nodeid(struct dlm_ls *ls);
int dlm_recover_directory(struct dlm_ls *ls);
-int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen,
- int *r_nodeid);
void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
char *outbuf, int outlen, int nodeid);
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index bc342f7ac3af..9d3e485f88c8 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -55,8 +55,6 @@ struct dlm_lkb;
struct dlm_rsb;
struct dlm_member;
struct dlm_rsbtable;
-struct dlm_dirtable;
-struct dlm_direntry;
struct dlm_recover;
struct dlm_header;
struct dlm_message;
@@ -98,18 +96,6 @@ do { \
}
-struct dlm_direntry {
- struct list_head list;
- uint32_t master_nodeid;
- uint16_t length;
- char name[1];
-};
-
-struct dlm_dirtable {
- struct list_head list;
- spinlock_t lock;
-};
-
struct dlm_rsbtable {
struct rb_root keep;
struct rb_root toss;
@@ -283,6 +269,15 @@ struct dlm_lkb {
};
};
+/*
+ * res_master_nodeid is "normal": 0 is unset/invalid, non-zero is the real
+ * nodeid, even when nodeid is our_nodeid.
+ *
+ * res_nodeid is "odd": -1 is unset/invalid, zero means our_nodeid,
+ * greater than zero when another nodeid.
+ *
+ * (TODO: remove res_nodeid and only use res_master_nodeid)
+ */
struct dlm_rsb {
struct dlm_ls *res_ls; /* the lockspace */
@@ -291,6 +286,9 @@ struct dlm_rsb {
unsigned long res_flags;
int res_length; /* length of rsb name */
int res_nodeid;
+ int res_master_nodeid;
+ int res_dir_nodeid;
+ int res_id; /* for ls_recover_idr */
uint32_t res_lvbseq;
uint32_t res_hash;
uint32_t res_bucket; /* rsbtbl */
@@ -313,10 +311,21 @@ struct dlm_rsb {
char res_name[DLM_RESNAME_MAXLEN+1];
};
+/* dlm_master_lookup() flags */
+
+#define DLM_LU_RECOVER_DIR 1
+#define DLM_LU_RECOVER_MASTER 2
+
+/* dlm_master_lookup() results */
+
+#define DLM_LU_MATCH 1
+#define DLM_LU_ADD 2
+
/* find_rsb() flags */
-#define R_MASTER 1 /* only return rsb if it's a master */
-#define R_CREATE 2 /* create/add rsb if not found */
+#define R_REQUEST 0x00000001
+#define R_RECEIVE_REQUEST 0x00000002
+#define R_RECEIVE_RECOVER 0x00000004
/* rsb_flags */
@@ -489,6 +498,13 @@ struct rcom_lock {
char rl_lvb[0];
};
+/*
+ * The max number of resources per rsbtbl bucket that shrink will attempt
+ * to remove in each iteration.
+ */
+
+#define DLM_REMOVE_NAMES_MAX 8
+
struct dlm_ls {
struct list_head ls_list; /* list of lockspaces */
dlm_lockspace_t *ls_local_handle;
@@ -509,9 +525,6 @@ struct dlm_ls {
struct dlm_rsbtable *ls_rsbtbl;
uint32_t ls_rsbtbl_size;
- struct dlm_dirtable *ls_dirtbl;
- uint32_t ls_dirtbl_size;
-
struct mutex ls_waiters_mutex;
struct list_head ls_waiters; /* lkbs needing a reply */
@@ -525,6 +538,12 @@ struct dlm_ls {
int ls_new_rsb_count;
struct list_head ls_new_rsb; /* new rsb structs */
+ spinlock_t ls_remove_spin;
+ char ls_remove_name[DLM_RESNAME_MAXLEN+1];
+ char *ls_remove_names[DLM_REMOVE_NAMES_MAX];
+ int ls_remove_len;
+ int ls_remove_lens[DLM_REMOVE_NAMES_MAX];
+
struct list_head ls_nodes; /* current nodes in ls */
struct list_head ls_nodes_gone; /* dead node list, recovery */
int ls_num_nodes; /* number of nodes in ls */
@@ -545,6 +564,7 @@ struct dlm_ls {
struct dentry *ls_debug_waiters_dentry; /* debugfs */
struct dentry *ls_debug_locks_dentry; /* debugfs */
struct dentry *ls_debug_all_dentry; /* debugfs */
+ struct dentry *ls_debug_toss_dentry; /* debugfs */
wait_queue_head_t ls_uevent_wait; /* user part of join/leave */
int ls_uevent_result;
@@ -573,12 +593,16 @@ struct dlm_ls {
struct mutex ls_requestqueue_mutex;
struct dlm_rcom *ls_recover_buf;
int ls_recover_nodeid; /* for debugging */
+ unsigned int ls_recover_dir_sent_res; /* for log info */
+ unsigned int ls_recover_dir_sent_msg; /* for log info */
unsigned int ls_recover_locks_in; /* for log info */
uint64_t ls_rcom_seq;
spinlock_t ls_rcom_spin;
struct list_head ls_recover_list;
spinlock_t ls_recover_list_lock;
int ls_recover_list_count;
+ struct idr ls_recover_idr;
+ spinlock_t ls_recover_idr_lock;
wait_queue_head_t ls_wait_general;
struct mutex ls_clear_proc_locks;
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index bdafb65a5234..b56950758188 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -90,6 +90,7 @@ static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
static int receive_extralen(struct dlm_message *ms);
static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
static void del_timeout(struct dlm_lkb *lkb);
+static void toss_rsb(struct kref *kref);
/*
* Lock compatibilty matrix - thanks Steve
@@ -170,9 +171,11 @@ void dlm_print_lkb(struct dlm_lkb *lkb)
static void dlm_print_rsb(struct dlm_rsb *r)
{
- printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
- r->res_nodeid, r->res_flags, r->res_first_lkid,
- r->res_recover_locks_count, r->res_name);
+ printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
+ "rlc %d name %s\n",
+ r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
+ r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
+ r->res_name);
}
void dlm_dump_rsb(struct dlm_rsb *r)
@@ -327,6 +330,37 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
* Basic operations on rsb's and lkb's
*/
+/* This is only called to add a reference when the code already holds
+ a valid reference to the rsb, so there's no need for locking. */
+
+static inline void hold_rsb(struct dlm_rsb *r)
+{
+ kref_get(&r->res_ref);
+}
+
+void dlm_hold_rsb(struct dlm_rsb *r)
+{
+ hold_rsb(r);
+}
+
+/* When all references to the rsb are gone it's transferred to
+ the tossed list for later disposal. */
+
+static void put_rsb(struct dlm_rsb *r)
+{
+ struct dlm_ls *ls = r->res_ls;
+ uint32_t bucket = r->res_bucket;
+
+ spin_lock(&ls->ls_rsbtbl[bucket].lock);
+ kref_put(&r->res_ref, toss_rsb);
+ spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+}
+
+void dlm_put_rsb(struct dlm_rsb *r)
+{
+ put_rsb(r);
+}
+
static int pre_rsb_struct(struct dlm_ls *ls)
{
struct dlm_rsb *r1, *r2;
@@ -411,11 +445,10 @@ static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
}
int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
- unsigned int flags, struct dlm_rsb **r_ret)
+ struct dlm_rsb **r_ret)
{
struct rb_node *node = tree->rb_node;
struct dlm_rsb *r;
- int error = 0;
int rc;
while (node) {
@@ -432,10 +465,8 @@ int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
return -EBADR;
found:
- if (r->res_nodeid && (flags & R_MASTER))
- error = -ENOTBLK;
*r_ret = r;
- return error;
+ return 0;
}
static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
@@ -467,124 +498,587 @@ static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
return 0;
}
-static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
- unsigned int flags, struct dlm_rsb **r_ret)
+/*
+ * Find rsb in rsbtbl and potentially create/add one
+ *
+ * Delaying the release of rsb's has a similar benefit to applications keeping
+ * NL locks on an rsb, but without the guarantee that the cached master value
+ * will still be valid when the rsb is reused. Apps aren't always smart enough
+ * to keep NL locks on an rsb that they may lock again shortly; this can lead
+ * to excessive master lookups and removals if we don't delay the release.
+ *
+ * Searching for an rsb means looking through both the normal list and toss
+ * list. When found on the toss list the rsb is moved to the normal list with
+ * ref count of 1; when found on normal list the ref count is incremented.
+ *
+ * rsb's on the keep list are being used locally and refcounted.
+ * rsb's on the toss list are not being used locally, and are not refcounted.
+ *
+ * The toss list rsb's were either
+ * - previously used locally but not any more (were on keep list, then
+ * moved to toss list when last refcount dropped)
+ * - created and put on toss list as a directory record for a lookup
+ * (we are the dir node for the res, but are not using the res right now,
+ * but some other node is)
+ *
+ * The purpose of find_rsb() is to return a refcounted rsb for local use.
+ * So, if the given rsb is on the toss list, it is moved to the keep list
+ * before being returned.
+ *
+ * toss_rsb() happens when all local usage of the rsb is done, i.e. no
+ * more refcounts exist, so the rsb is moved from the keep list to the
+ * toss list.
+ *
+ * rsb's on both keep and toss lists are used for doing a name to master
+ * lookups. rsb's that are in use locally (and being refcounted) are on
+ * the keep list, rsb's that are not in use locally (not refcounted) and
+ * only exist for name/master lookups are on the toss list.
+ *
+ * rsb's on the toss list who's dir_nodeid is not local can have stale
+ * name/master mappings. So, remote requests on such rsb's can potentially
+ * return with an error, which means the mapping is stale and needs to
+ * be updated with a new lookup. (The idea behind MASTER UNCERTAIN and
+ * first_lkid is to keep only a single outstanding request on an rsb
+ * while that rsb has a potentially stale master.)
+ */
+
+static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
+ uint32_t hash, uint32_t b,
+ int dir_nodeid, int from_nodeid,
+ unsigned int flags, struct dlm_rsb **r_ret)
{
- struct dlm_rsb *r;
+ struct dlm_rsb *r = NULL;
+ int our_nodeid = dlm_our_nodeid();
+ int from_local = 0;
+ int from_other = 0;
+ int from_dir = 0;
+ int create = 0;
int error;
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, flags, &r);
- if (!error) {
- kref_get(&r->res_ref);
- goto out;
+ if (flags & R_RECEIVE_REQUEST) {
+ if (from_nodeid == dir_nodeid)
+ from_dir = 1;
+ else
+ from_other = 1;
+ } else if (flags & R_REQUEST) {
+ from_local = 1;
}
- if (error == -ENOTBLK)
- goto out;
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
+ /*
+ * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
+ * from_nodeid has sent us a lock in dlm_recover_locks, believing
+ * we're the new master. Our local recovery may not have set
+ * res_master_nodeid to our_nodeid yet, so allow either. Don't
+ * create the rsb; dlm_recover_process_copy() will handle EBADR
+ * by resending.
+ *
+ * If someone sends us a request, we are the dir node, and we do
+ * not find the rsb anywhere, then recreate it. This happens if
+ * someone sends us a request after we have removed/freed an rsb
+ * from our toss list. (They sent a request instead of lookup
+ * because they are using an rsb from their toss list.)
+ */
+
+ if (from_local || from_dir ||
+ (from_other && (dir_nodeid == our_nodeid))) {
+ create = 1;
+ }
+
+ retry:
+ if (create) {
+ error = pre_rsb_struct(ls);
+ if (error < 0)
+ goto out;
+ }
+
+ spin_lock(&ls->ls_rsbtbl[b].lock);
+
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
if (error)
- goto out;
+ goto do_toss;
+
+ /*
+ * rsb is active, so we can't check master_nodeid without lock_rsb.
+ */
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
- error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
+ kref_get(&r->res_ref);
+ error = 0;
+ goto out_unlock;
+
+
+ do_toss:
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
if (error)
- return error;
+ goto do_new;
- if (dlm_no_directory(ls))
- goto out;
+ /*
+ * rsb found inactive (master_nodeid may be out of date unless
+ * we are the dir_nodeid or were the master) No other thread
+ * is using this rsb because it's on the toss list, so we can
+ * look at or update res_master_nodeid without lock_rsb.
+ */
+
+ if ((r->res_master_nodeid != our_nodeid) && from_other) {
+ /* our rsb was not master, and another node (not the dir node)
+ has sent us a request */
+ log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
+ from_nodeid, r->res_master_nodeid, dir_nodeid,
+ r->res_name);
+ error = -ENOTBLK;
+ goto out_unlock;
+ }
- if (r->res_nodeid == -1) {
+ if ((r->res_master_nodeid != our_nodeid) && from_dir) {
+ /* don't think this should ever happen */
+ log_error(ls, "find_rsb toss from_dir %d master %d",
+ from_nodeid, r->res_master_nodeid);
+ dlm_print_rsb(r);
+ /* fix it and go on */
+ r->res_master_nodeid = our_nodeid;
+ r->res_nodeid = 0;
rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
r->res_first_lkid = 0;
- } else if (r->res_nodeid > 0) {
+ }
+
+ if (from_local && (r->res_master_nodeid != our_nodeid)) {
+ /* Because we have held no locks on this rsb,
+ res_master_nodeid could have become stale. */
rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
r->res_first_lkid = 0;
+ }
+
+ rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
+ error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
+ goto out_unlock;
+
+
+ do_new:
+ /*
+ * rsb not found
+ */
+
+ if (error == -EBADR && !create)
+ goto out_unlock;
+
+ error = get_rsb_struct(ls, name, len, &r);
+ if (error == -EAGAIN) {
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ goto retry;
+ }
+ if (error)
+ goto out_unlock;
+
+ r->res_hash = hash;
+ r->res_bucket = b;
+ r->res_dir_nodeid = dir_nodeid;
+ kref_init(&r->res_ref);
+
+ if (from_dir) {
+ /* want to see how often this happens */
+ log_debug(ls, "find_rsb new from_dir %d recreate %s",
+ from_nodeid, r->res_name);
+ r->res_master_nodeid = our_nodeid;
+ r->res_nodeid = 0;
+ goto out_add;
+ }
+
+ if (from_other && (dir_nodeid != our_nodeid)) {
+ /* should never happen */
+ log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
+ from_nodeid, dir_nodeid, our_nodeid, r->res_name);
+ dlm_free_rsb(r);
+ error = -ENOTBLK;
+ goto out_unlock;
+ }
+
+ if (from_other) {
+ log_debug(ls, "find_rsb new from_other %d dir %d %s",
+ from_nodeid, dir_nodeid, r->res_name);
+ }
+
+ if (dir_nodeid == our_nodeid) {
+ /* When we are the dir nodeid, we can set the master
+ node immediately */
+ r->res_master_nodeid = our_nodeid;
+ r->res_nodeid = 0;
} else {
- DLM_ASSERT(r->res_nodeid == 0, dlm_print_rsb(r););
- DLM_ASSERT(!rsb_flag(r, RSB_MASTER_UNCERTAIN),);
+ /* set_master will send_lookup to dir_nodeid */
+ r->res_master_nodeid = 0;
+ r->res_nodeid = -1;
+ }
+
+ out_add:
+ error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
+ out_unlock:
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ out:
+ *r_ret = r;
+ return error;
+}
+
+/* During recovery, other nodes can send us new MSTCPY locks (from
+ dlm_recover_locks) before we've made ourself master (in
+ dlm_recover_masters). */
+
+static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
+ uint32_t hash, uint32_t b,
+ int dir_nodeid, int from_nodeid,
+ unsigned int flags, struct dlm_rsb **r_ret)
+{
+ struct dlm_rsb *r = NULL;
+ int our_nodeid = dlm_our_nodeid();
+ int recover = (flags & R_RECEIVE_RECOVER);
+ int error;
+
+ retry:
+ error = pre_rsb_struct(ls);
+ if (error < 0)
+ goto out;
+
+ spin_lock(&ls->ls_rsbtbl[b].lock);
+
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
+ if (error)
+ goto do_toss;
+
+ /*
+ * rsb is active, so we can't check master_nodeid without lock_rsb.
+ */
+
+ kref_get(&r->res_ref);
+ goto out_unlock;
+
+
+ do_toss:
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
+ if (error)
+ goto do_new;
+
+ /*
+ * rsb found inactive. No other thread is using this rsb because
+ * it's on the toss list, so we can look at or update
+ * res_master_nodeid without lock_rsb.
+ */
+
+ if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
+ /* our rsb is not master, and another node has sent us a
+ request; this should never happen */
+ log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
+ from_nodeid, r->res_master_nodeid, dir_nodeid);
+ dlm_print_rsb(r);
+ error = -ENOTBLK;
+ goto out_unlock;
}
+
+ if (!recover && (r->res_master_nodeid != our_nodeid) &&
+ (dir_nodeid == our_nodeid)) {
+ /* our rsb is not master, and we are dir; may as well fix it;
+ this should never happen */
+ log_error(ls, "find_rsb toss our %d master %d dir %d",
+ our_nodeid, r->res_master_nodeid, dir_nodeid);
+ dlm_print_rsb(r);
+ r->res_master_nodeid = our_nodeid;
+ r->res_nodeid = 0;
+ }
+
+ rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
+ error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
+ goto out_unlock;
+
+
+ do_new:
+ /*
+ * rsb not found
+ */
+
+ error = get_rsb_struct(ls, name, len, &r);
+ if (error == -EAGAIN) {
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ goto retry;
+ }
+ if (error)
+ goto out_unlock;
+
+ r->res_hash = hash;
+ r->res_bucket = b;
+ r->res_dir_nodeid = dir_nodeid;
+ r->res_master_nodeid = dir_nodeid;
+ r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
+ kref_init(&r->res_ref);
+
+ error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
+ out_unlock:
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
out:
*r_ret = r;
return error;
}
+static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
+ unsigned int flags, struct dlm_rsb **r_ret)
+{
+ uint32_t hash, b;
+ int dir_nodeid;
+
+ if (len > DLM_RESNAME_MAXLEN)
+ return -EINVAL;
+
+ hash = jhash(name, len, 0);
+ b = hash & (ls->ls_rsbtbl_size - 1);
+
+ dir_nodeid = dlm_hash2nodeid(ls, hash);
+
+ if (dlm_no_directory(ls))
+ return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
+ from_nodeid, flags, r_ret);
+ else
+ return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
+ from_nodeid, flags, r_ret);
+}
+
+/* we have received a request and found that res_master_nodeid != our_nodeid,
+ so we need to return an error or make ourself the master */
+
+static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
+ int from_nodeid)
+{
+ if (dlm_no_directory(ls)) {
+ log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
+ from_nodeid, r->res_master_nodeid,
+ r->res_dir_nodeid);
+ dlm_print_rsb(r);
+ return -ENOTBLK;
+ }
+
+ if (from_nodeid != r->res_dir_nodeid) {
+ /* our rsb is not master, and another node (not the dir node)
+ has sent us a request. this is much more common when our
+ master_nodeid is zero, so limit debug to non-zero. */
+
+ if (r->res_master_nodeid) {
+ log_debug(ls, "validate master from_other %d master %d "
+ "dir %d first %x %s", from_nodeid,
+ r->res_master_nodeid, r->res_dir_nodeid,
+ r->res_first_lkid, r->res_name);
+ }
+ return -ENOTBLK;
+ } else {
+ /* our rsb is not master, but the dir nodeid has sent us a
+ request; this could happen with master 0 / res_nodeid -1 */
+
+ if (r->res_master_nodeid) {
+ log_error(ls, "validate master from_dir %d master %d "
+ "first %x %s",
+ from_nodeid, r->res_master_nodeid,
+ r->res_first_lkid, r->res_name);
+ }
+
+ r->res_master_nodeid = dlm_our_nodeid();
+ r->res_nodeid = 0;
+ return 0;
+ }
+}
+
/*
- * Find rsb in rsbtbl and potentially create/add one
+ * We're the dir node for this res and another node wants to know the
+ * master nodeid. During normal operation (non recovery) this is only
+ * called from receive_lookup(); master lookups when the local node is
+ * the dir node are done by find_rsb().
*
- * Delaying the release of rsb's has a similar benefit to applications keeping
- * NL locks on an rsb, but without the guarantee that the cached master value
- * will still be valid when the rsb is reused. Apps aren't always smart enough
- * to keep NL locks on an rsb that they may lock again shortly; this can lead
- * to excessive master lookups and removals if we don't delay the release.
+ * normal operation, we are the dir node for a resource
+ * . _request_lock
+ * . set_master
+ * . send_lookup
+ * . receive_lookup
+ * . dlm_master_lookup flags 0
*
- * Searching for an rsb means looking through both the normal list and toss
- * list. When found on the toss list the rsb is moved to the normal list with
- * ref count of 1; when found on normal list the ref count is incremented.
+ * recover directory, we are rebuilding dir for all resources
+ * . dlm_recover_directory
+ * . dlm_rcom_names
+ * remote node sends back the rsb names it is master of and we are dir of
+ * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
+ * we either create new rsb setting remote node as master, or find existing
+ * rsb and set master to be the remote node.
+ *
+ * recover masters, we are finding the new master for resources
+ * . dlm_recover_masters
+ * . recover_master
+ * . dlm_send_rcom_lookup
+ * . receive_rcom_lookup
+ * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
*/
-static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
- unsigned int flags, struct dlm_rsb **r_ret)
+int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
+ unsigned int flags, int *r_nodeid, int *result)
{
struct dlm_rsb *r = NULL;
- uint32_t hash, bucket;
- int error;
+ uint32_t hash, b;
+ int from_master = (flags & DLM_LU_RECOVER_DIR);
+ int fix_master = (flags & DLM_LU_RECOVER_MASTER);
+ int our_nodeid = dlm_our_nodeid();
+ int dir_nodeid, error, toss_list = 0;
- if (namelen > DLM_RESNAME_MAXLEN) {
- error = -EINVAL;
- goto out;
+ if (len > DLM_RESNAME_MAXLEN)
+ return -EINVAL;
+
+ if (from_nodeid == our_nodeid) {
+ log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
+ our_nodeid, flags);
+ return -EINVAL;
}
- if (dlm_no_directory(ls))
- flags |= R_CREATE;
+ hash = jhash(name, len, 0);
+ b = hash & (ls->ls_rsbtbl_size - 1);
- hash = jhash(name, namelen, 0);
- bucket = hash & (ls->ls_rsbtbl_size - 1);
+ dir_nodeid = dlm_hash2nodeid(ls, hash);
+ if (dir_nodeid != our_nodeid) {
+ log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
+ from_nodeid, dir_nodeid, our_nodeid, hash,
+ ls->ls_num_nodes);
+ *r_nodeid = -1;
+ return -EINVAL;
+ }
retry:
- if (flags & R_CREATE) {
- error = pre_rsb_struct(ls);
- if (error < 0)
- goto out;
+ error = pre_rsb_struct(ls);
+ if (error < 0)
+ return error;
+
+ spin_lock(&ls->ls_rsbtbl[b].lock);
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
+ if (!error) {
+ /* because the rsb is active, we need to lock_rsb before
+ checking/changing re_master_nodeid */
+
+ hold_rsb(r);
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ lock_rsb(r);
+ goto found;
}
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
+ if (error)
+ goto not_found;
- error = _search_rsb(ls, name, namelen, bucket, flags, &r);
- if (!error)
- goto out_unlock;
+ /* because the rsb is inactive (on toss list), it's not refcounted
+ and lock_rsb is not used, but is protected by the rsbtbl lock */
- if (error == -EBADR && !(flags & R_CREATE))
- goto out_unlock;
+ toss_list = 1;
+ found:
+ if (r->res_dir_nodeid != our_nodeid) {
+ /* should not happen, but may as well fix it and carry on */
+ log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
+ r->res_dir_nodeid, our_nodeid, r->res_name);
+ r->res_dir_nodeid = our_nodeid;
+ }
+
+ if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
+ /* Recovery uses this function to set a new master when
+ the previous master failed. Setting NEW_MASTER will
+ force dlm_recover_masters to call recover_master on this
+ rsb even though the res_nodeid is no longer removed. */
+
+ r->res_master_nodeid = from_nodeid;
+ r->res_nodeid = from_nodeid;
+ rsb_set_flag(r, RSB_NEW_MASTER);
+
+ if (toss_list) {
+ /* I don't think we should ever find it on toss list. */
+ log_error(ls, "dlm_master_lookup fix_master on toss");
+ dlm_dump_rsb(r);
+ }
+ }
- /* the rsb was found but wasn't a master copy */
- if (error == -ENOTBLK)
- goto out_unlock;
+ if (from_master && (r->res_master_nodeid != from_nodeid)) {
+ /* this will happen if from_nodeid became master during
+ a previous recovery cycle, and we aborted the previous
+ cycle before recovering this master value */
+
+ log_limit(ls, "dlm_master_lookup from_master %d "
+ "master_nodeid %d res_nodeid %d first %x %s",
+ from_nodeid, r->res_master_nodeid, r->res_nodeid,
+ r->res_first_lkid, r->res_name);
+
+ if (r->res_master_nodeid == our_nodeid) {
+ log_error(ls, "from_master %d our_master", from_nodeid);
+ dlm_dump_rsb(r);
+ dlm_send_rcom_lookup_dump(r, from_nodeid);
+ goto out_found;
+ }
+
+ r->res_master_nodeid = from_nodeid;
+ r->res_nodeid = from_nodeid;
+ rsb_set_flag(r, RSB_NEW_MASTER);
+ }
+
+ if (!r->res_master_nodeid) {
+ /* this will happen if recovery happens while we're looking
+ up the master for this rsb */
+
+ log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
+ from_nodeid, r->res_first_lkid, r->res_name);
+ r->res_master_nodeid = from_nodeid;
+ r->res_nodeid = from_nodeid;
+ }
- error = get_rsb_struct(ls, name, namelen, &r);
+ if (!from_master && !fix_master &&
+ (r->res_master_nodeid == from_nodeid)) {
+ /* this can happen when the master sends remove, the dir node
+ finds the rsb on the keep list and ignores the remove,
+ and the former master sends a lookup */
+
+ log_limit(ls, "dlm_master_lookup from master %d flags %x "
+ "first %x %s", from_nodeid, flags,
+ r->res_first_lkid, r->res_name);
+ }
+
+ out_found:
+ *r_nodeid = r->res_master_nodeid;
+ if (result)
+ *result = DLM_LU_MATCH;
+
+ if (toss_list) {
+ r->res_toss_time = jiffies;
+ /* the rsb was inactive (on toss list) */
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ } else {
+ /* the rsb was active */
+ unlock_rsb(r);
+ put_rsb(r);
+ }
+ return 0;
+
+ not_found:
+ error = get_rsb_struct(ls, name, len, &r);
if (error == -EAGAIN) {
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
goto retry;
}
if (error)
goto out_unlock;
r->res_hash = hash;
- r->res_bucket = bucket;
- r->res_nodeid = -1;
+ r->res_bucket = b;
+ r->res_dir_nodeid = our_nodeid;
+ r->res_master_nodeid = from_nodeid;
+ r->res_nodeid = from_nodeid;
kref_init(&r->res_ref);
+ r->res_toss_time = jiffies;
- /* With no directory, the master can be set immediately */
- if (dlm_no_directory(ls)) {
- int nodeid = dlm_dir_nodeid(r);
- if (nodeid == dlm_our_nodeid())
- nodeid = 0;
- r->res_nodeid = nodeid;
+ error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
+ if (error) {
+ /* should never happen */
+ dlm_free_rsb(r);
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ goto retry;
}
- error = rsb_insert(r, &ls->ls_rsbtbl[bucket].keep);
+
+ if (result)
+ *result = DLM_LU_ADD;
+ *r_nodeid = from_nodeid;
+ error = 0;
out_unlock:
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- out:
- *r_ret = r;
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
return error;
}
@@ -605,17 +1099,27 @@ static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
}
}
-/* This is only called to add a reference when the code already holds
- a valid reference to the rsb, so there's no need for locking. */
-
-static inline void hold_rsb(struct dlm_rsb *r)
+void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
{
- kref_get(&r->res_ref);
-}
+ struct dlm_rsb *r = NULL;
+ uint32_t hash, b;
+ int error;
-void dlm_hold_rsb(struct dlm_rsb *r)
-{
- hold_rsb(r);
+ hash = jhash(name, len, 0);
+ b = hash & (ls->ls_rsbtbl_size - 1);
+
+ spin_lock(&ls->ls_rsbtbl[b].lock);
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
+ if (!error)
+ goto out_dump;
+
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
+ if (error)
+ goto out;
+ out_dump:
+ dlm_dump_rsb(r);
+ out:
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
}
static void toss_rsb(struct kref *kref)
@@ -634,24 +1138,6 @@ static void toss_rsb(struct kref *kref)
}
}
-/* When all references to the rsb are gone it's transferred to
- the tossed list for later disposal. */
-
-static void put_rsb(struct dlm_rsb *r)
-{
- struct dlm_ls *ls = r->res_ls;
- uint32_t bucket = r->res_bucket;
-
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- kref_put(&r->res_ref, toss_rsb);
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
-}
-
-void dlm_put_rsb(struct dlm_rsb *r)
-{
- put_rsb(r);
-}
-
/* See comment for unhold_lkb */
static void unhold_rsb(struct dlm_rsb *r)
@@ -1138,61 +1624,170 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
return error;
}
-static void dir_remove(struct dlm_rsb *r)
-{
- int to_nodeid;
-
- if (dlm_no_directory(r->res_ls))
- return;
+/* If there's an rsb for the same resource being removed, ensure
+ that the remove message is sent before the new lookup message.
+ It should be rare to need a delay here, but if not, then it may
+ be worthwhile to add a proper wait mechanism rather than a delay. */
- to_nodeid = dlm_dir_nodeid(r);
- if (to_nodeid != dlm_our_nodeid())
- send_remove(r);
- else
- dlm_dir_remove_entry(r->res_ls, to_nodeid,
- r->res_name, r->res_length);
+static void wait_pending_remove(struct dlm_rsb *r)
+{
+ struct dlm_ls *ls = r->res_ls;
+ restart:
+ spin_lock(&ls->ls_remove_spin);
+ if (ls->ls_remove_len &&
+ !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
+ log_debug(ls, "delay lookup for remove dir %d %s",
+ r->res_dir_nodeid, r->res_name);
+ spin_unlock(&ls->ls_remove_spin);
+ msleep(1);
+ goto restart;
+ }
+ spin_unlock(&ls->ls_remove_spin);
}
-/* FIXME: make this more efficient */
+/*
+ * ls_remove_spin protects ls_remove_name and ls_remove_len which are
+ * read by other threads in wait_pending_remove. ls_remove_names
+ * and ls_remove_lens are only used by the scan thread, so they do
+ * not need protection.
+ */
-static int shrink_bucket(struct dlm_ls *ls, int b)
+static void shrink_bucket(struct dlm_ls *ls, int b)
{
- struct rb_node *n;
+ struct rb_node *n, *next;
struct dlm_rsb *r;
- int count = 0, found;
+ char *name;
+ int our_nodeid = dlm_our_nodeid();
+ int remote_count = 0;
+ int i, len, rv;
+
+ memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
+
+ spin_lock(&ls->ls_rsbtbl[b].lock);
+ for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
+ next = rb_next(n);
+ r = rb_entry(n, struct dlm_rsb, res_hashnode);
+
+ /* If we're the directory record for this rsb, and
+ we're not the master of it, then we need to wait
+ for the master node to send us a dir remove for
+ before removing the dir record. */
+
+ if (!dlm_no_directory(ls) &&
+ (r->res_master_nodeid != our_nodeid) &&
+ (dlm_dir_nodeid(r) == our_nodeid)) {
+ continue;
+ }
+
+ if (!time_after_eq(jiffies, r->res_toss_time +
+ dlm_config.ci_toss_secs * HZ)) {
+ continue;
+ }
+
+ if (!dlm_no_directory(ls) &&
+ (r->res_master_nodeid == our_nodeid) &&
+ (dlm_dir_nodeid(r) != our_nodeid)) {
+
+ /* We're the master of this rsb but we're not
+ the directory record, so we need to tell the
+ dir node to remove the dir record. */
+
+ ls->ls_remove_lens[remote_count] = r->res_length;
+ memcpy(ls->ls_remove_names[remote_count], r->res_name,
+ DLM_RESNAME_MAXLEN);
+ remote_count++;
+
+ if (remote_count >= DLM_REMOVE_NAMES_MAX)
+ break;
+ continue;
+ }
+
+ if (!kref_put(&r->res_ref, kill_rsb)) {
+ log_error(ls, "tossed rsb in use %s", r->res_name);
+ continue;
+ }
+
+ rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
+ dlm_free_rsb(r);
+ }
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+
+ /*
+ * While searching for rsb's to free, we found some that require
+ * remote removal. We leave them in place and find them again here
+ * so there is a very small gap between removing them from the toss
+ * list and sending the removal. Keeping this gap small is
+ * important to keep us (the master node) from being out of sync
+ * with the remote dir node for very long.
+ *
+ * From the time the rsb is removed from toss until just after
+ * send_remove, the rsb name is saved in ls_remove_name. A new
+ * lookup checks this to ensure that a new lookup message for the
+ * same resource name is not sent just before the remove message.
+ */
+
+ for (i = 0; i < remote_count; i++) {
+ name = ls->ls_remove_names[i];
+ len = ls->ls_remove_lens[i];
- for (;;) {
- found = 0;
spin_lock(&ls->ls_rsbtbl[b].lock);
- for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = rb_next(n)) {
- r = rb_entry(n, struct dlm_rsb, res_hashnode);
- if (!time_after_eq(jiffies, r->res_toss_time +
- dlm_config.ci_toss_secs * HZ))
- continue;
- found = 1;
- break;
+ rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
+ if (rv) {
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ log_debug(ls, "remove_name not toss %s", name);
+ continue;
}
- if (!found) {
+ if (r->res_master_nodeid != our_nodeid) {
spin_unlock(&ls->ls_rsbtbl[b].lock);
- break;
+ log_debug(ls, "remove_name master %d dir %d our %d %s",
+ r->res_master_nodeid, r->res_dir_nodeid,
+ our_nodeid, name);
+ continue;
}
- if (kref_put(&r->res_ref, kill_rsb)) {
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
+ if (r->res_dir_nodeid == our_nodeid) {
+ /* should never happen */
spin_unlock(&ls->ls_rsbtbl[b].lock);
+ log_error(ls, "remove_name dir %d master %d our %d %s",
+ r->res_dir_nodeid, r->res_master_nodeid,
+ our_nodeid, name);
+ continue;
+ }
- if (is_master(r))
- dir_remove(r);
- dlm_free_rsb(r);
- count++;
- } else {
+ if (!time_after_eq(jiffies, r->res_toss_time +
+ dlm_config.ci_toss_secs * HZ)) {
spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_error(ls, "tossed rsb in use %s", r->res_name);
+ log_debug(ls, "remove_name toss_time %lu now %lu %s",
+ r->res_toss_time, jiffies, name);
+ continue;
}
- }
- return count;
+ if (!kref_put(&r->res_ref, kill_rsb)) {
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ log_error(ls, "remove_name in use %s", name);
+ continue;
+ }
+
+ rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
+
+ /* block lookup of same name until we've sent remove */
+ spin_lock(&ls->ls_remove_spin);
+ ls->ls_remove_len = len;
+ memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
+ spin_unlock(&ls->ls_remove_spin);
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+
+ send_remove(r);
+
+ /* allow lookup of name again */
+ spin_lock(&ls->ls_remove_spin);
+ ls->ls_remove_len = 0;
+ memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
+ spin_unlock(&ls->ls_remove_spin);
+
+ dlm_free_rsb(r);
+ }
}
void dlm_scan_rsbs(struct dlm_ls *ls)
@@ -1684,10 +2279,14 @@ static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
* immediate request, it is 0 if called later, after the lock has been
* queued.
*
+ * recover is 1 if dlm_recover_grant() is trying to grant conversions
+ * after recovery.
+ *
* References are from chapter 6 of "VAXcluster Principles" by Roy Davis
*/
-static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
+static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
+ int recover)
{
int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
@@ -1719,7 +2318,7 @@ static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
*/
if (queue_conflict(&r->res_grantqueue, lkb))
- goto out;
+ return 0;
/*
* 6-3: By default, a conversion request is immediately granted if the
@@ -1728,7 +2327,24 @@ static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
*/
if (queue_conflict(&r->res_convertqueue, lkb))
- goto out;
+ return 0;
+
+ /*
+ * The RECOVER_GRANT flag means dlm_recover_grant() is granting
+ * locks for a recovered rsb, on which lkb's have been rebuilt.
+ * The lkb's may have been rebuilt on the queues in a different
+ * order than they were in on the previous master. So, granting
+ * queued conversions in order after recovery doesn't make sense
+ * since the order hasn't been preserved anyway. The new order
+ * could also have created a new "in place" conversion deadlock.
+ * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
+ * After recovery, there would be no granted locks, and possibly
+ * NL->EX, PR->EX, an in-place conversion deadlock.) So, after
+ * recovery, grant conversions without considering order.
+ */
+
+ if (conv && recover)
+ return 1;
/*
* 6-5: But the default algorithm for deciding whether to grant or
@@ -1765,7 +2381,7 @@ static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
if (list_empty(&r->res_convertqueue))
return 1;
else
- goto out;
+ return 0;
}
/*
@@ -1811,12 +2427,12 @@ static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
if (!now && !conv && list_empty(&r->res_convertqueue) &&
first_in_list(lkb, &r->res_waitqueue))
return 1;
- out:
+
return 0;
}
static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
- int *err)
+ int recover, int *err)
{
int rv;
int8_t alt = 0, rqmode = lkb->lkb_rqmode;
@@ -1825,7 +2441,7 @@ static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
if (err)
*err = 0;
- rv = _can_be_granted(r, lkb, now);
+ rv = _can_be_granted(r, lkb, now, recover);
if (rv)
goto out;
@@ -1866,7 +2482,7 @@ static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
if (alt) {
lkb->lkb_rqmode = alt;
- rv = _can_be_granted(r, lkb, now);
+ rv = _can_be_granted(r, lkb, now, 0);
if (rv)
lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
else
@@ -1890,6 +2506,7 @@ static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
unsigned int *count)
{
struct dlm_lkb *lkb, *s;
+ int recover = rsb_flag(r, RSB_RECOVER_GRANT);
int hi, demoted, quit, grant_restart, demote_restart;
int deadlk;
@@ -1903,7 +2520,7 @@ static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
demoted = is_demoted(lkb);
deadlk = 0;
- if (can_be_granted(r, lkb, 0, &deadlk)) {
+ if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
grant_lock_pending(r, lkb);
grant_restart = 1;
if (count)
@@ -1947,7 +2564,7 @@ static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
struct dlm_lkb *lkb, *s;
list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
- if (can_be_granted(r, lkb, 0, NULL)) {
+ if (can_be_granted(r, lkb, 0, 0, NULL)) {
grant_lock_pending(r, lkb);
if (count)
(*count)++;
@@ -2078,8 +2695,7 @@ static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
- struct dlm_ls *ls = r->res_ls;
- int i, error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
+ int our_nodeid = dlm_our_nodeid();
if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
@@ -2093,53 +2709,37 @@ static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
return 1;
}
- if (r->res_nodeid == 0) {
+ if (r->res_master_nodeid == our_nodeid) {
lkb->lkb_nodeid = 0;
return 0;
}
- if (r->res_nodeid > 0) {
- lkb->lkb_nodeid = r->res_nodeid;
+ if (r->res_master_nodeid) {
+ lkb->lkb_nodeid = r->res_master_nodeid;
return 0;
}
- DLM_ASSERT(r->res_nodeid == -1, dlm_dump_rsb(r););
-
- dir_nodeid = dlm_dir_nodeid(r);
-
- if (dir_nodeid != our_nodeid) {
- r->res_first_lkid = lkb->lkb_id;
- send_lookup(r, lkb);
- return 1;
- }
-
- for (i = 0; i < 2; i++) {
- /* It's possible for dlm_scand to remove an old rsb for
- this same resource from the toss list, us to create
- a new one, look up the master locally, and find it
- already exists just before dlm_scand does the
- dir_remove() on the previous rsb. */
-
- error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
- r->res_length, &ret_nodeid);
- if (!error)
- break;
- log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
- schedule();
- }
- if (error && error != -EEXIST)
- return error;
-
- if (ret_nodeid == our_nodeid) {
- r->res_first_lkid = 0;
+ if (dlm_dir_nodeid(r) == our_nodeid) {
+ /* This is a somewhat unusual case; find_rsb will usually
+ have set res_master_nodeid when dir nodeid is local, but
+ there are cases where we become the dir node after we've
+ past find_rsb and go through _request_lock again.
+ confirm_master() or process_lookup_list() needs to be
+ called after this. */
+ log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
+ lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
+ r->res_name);
+ r->res_master_nodeid = our_nodeid;
r->res_nodeid = 0;
lkb->lkb_nodeid = 0;
- } else {
- r->res_first_lkid = lkb->lkb_id;
- r->res_nodeid = ret_nodeid;
- lkb->lkb_nodeid = ret_nodeid;
+ return 0;
}
- return 0;
+
+ wait_pending_remove(r);
+
+ r->res_first_lkid = lkb->lkb_id;
+ send_lookup(r, lkb);
+ return 1;
}
static void process_lookup_list(struct dlm_rsb *r)
@@ -2464,7 +3064,7 @@ static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error = 0;
- if (can_be_granted(r, lkb, 1, NULL)) {
+ if (can_be_granted(r, lkb, 1, 0, NULL)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
goto out;
@@ -2504,7 +3104,7 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
/* changing an existing lock may allow others to be granted */
- if (can_be_granted(r, lkb, 1, &deadlk)) {
+ if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
goto out;
@@ -2530,7 +3130,7 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
if (is_demoted(lkb)) {
grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
- if (_can_be_granted(r, lkb, 1)) {
+ if (_can_be_granted(r, lkb, 1, 0)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
goto out;
@@ -2584,7 +3184,7 @@ static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
}
/* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
-
+
static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error;
@@ -2708,11 +3308,11 @@ static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
error = validate_lock_args(ls, lkb, args);
if (error)
- goto out;
+ return error;
- error = find_rsb(ls, name, len, R_CREATE, &r);
+ error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
if (error)
- goto out;
+ return error;
lock_rsb(r);
@@ -2723,8 +3323,6 @@ static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
unlock_rsb(r);
put_rsb(r);
-
- out:
return error;
}
@@ -3402,11 +4000,72 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
return error;
}
+static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
+{
+ char name[DLM_RESNAME_MAXLEN + 1];
+ struct dlm_message *ms;
+ struct dlm_mhandle *mh;
+ struct dlm_rsb *r;
+ uint32_t hash, b;
+ int rv, dir_nodeid;
+
+ memset(name, 0, sizeof(name));
+ memcpy(name, ms_name, len);
+
+ hash = jhash(name, len, 0);
+ b = hash & (ls->ls_rsbtbl_size - 1);
+
+ dir_nodeid = dlm_hash2nodeid(ls, hash);
+
+ log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
+
+ spin_lock(&ls->ls_rsbtbl[b].lock);
+ rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
+ if (!rv) {
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ log_error(ls, "repeat_remove on keep %s", name);
+ return;
+ }
+
+ rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
+ if (!rv) {
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ log_error(ls, "repeat_remove on toss %s", name);
+ return;
+ }
+
+ /* use ls->remove_name2 to avoid conflict with shrink? */
+
+ spin_lock(&ls->ls_remove_spin);
+ ls->ls_remove_len = len;
+ memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
+ spin_unlock(&ls->ls_remove_spin);
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+
+ rv = _create_message(ls, sizeof(struct dlm_message) + len,
+ dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
+ if (rv)
+ return;
+
+ memcpy(ms->m_extra, name, len);
+ ms->m_hash = hash;
+
+ send_message(mh, ms);
+
+ spin_lock(&ls->ls_remove_spin);
+ ls->ls_remove_len = 0;
+ memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
+ spin_unlock(&ls->ls_remove_spin);
+}
+
static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
- int error, namelen;
+ int from_nodeid;
+ int error, namelen = 0;
+
+ from_nodeid = ms->m_header.h_nodeid;
error = create_lkb(ls, &lkb);
if (error)
@@ -3420,9 +4079,16 @@ static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
goto fail;
}
+ /* The dir node is the authority on whether we are the master
+ for this rsb or not, so if the master sends us a request, we should
+ recreate the rsb if we've destroyed it. This race happens when we
+ send a remove message to the dir node at the same time that the dir
+ node sends us a request for the rsb. */
+
namelen = receive_extralen(ms);
- error = find_rsb(ls, ms->m_extra, namelen, R_MASTER, &r);
+ error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
+ R_RECEIVE_REQUEST, &r);
if (error) {
__put_lkb(ls, lkb);
goto fail;
@@ -3430,6 +4096,16 @@ static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
lock_rsb(r);
+ if (r->res_master_nodeid != dlm_our_nodeid()) {
+ error = validate_master_nodeid(ls, r, from_nodeid);
+ if (error) {
+ unlock_rsb(r);
+ put_rsb(r);
+ __put_lkb(ls, lkb);
+ goto fail;
+ }
+ }
+
attach_lkb(r, lkb);
error = do_request(r, lkb);
send_request_reply(r, lkb, error);
@@ -3445,6 +4121,31 @@ static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
return 0;
fail:
+ /* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
+ and do this receive_request again from process_lookup_list once
+ we get the lookup reply. This would avoid a many repeated
+ ENOTBLK request failures when the lookup reply designating us
+ as master is delayed. */
+
+ /* We could repeatedly return -EBADR here if our send_remove() is
+ delayed in being sent/arriving/being processed on the dir node.
+ Another node would repeatedly lookup up the master, and the dir
+ node would continue returning our nodeid until our send_remove
+ took effect.
+
+ We send another remove message in case our previous send_remove
+ was lost/ignored/missed somehow. */
+
+ if (error != -ENOTBLK) {
+ log_limit(ls, "receive_request %x from %d %d",
+ ms->m_lkid, from_nodeid, error);
+ }
+
+ if (namelen && error == -EBADR) {
+ send_repeat_remove(ls, ms->m_extra, namelen);
+ msleep(1000);
+ }
+
setup_stub_lkb(ls, ms);
send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
return error;
@@ -3651,49 +4352,110 @@ static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
{
- int len, error, ret_nodeid, dir_nodeid, from_nodeid, our_nodeid;
+ int len, error, ret_nodeid, from_nodeid, our_nodeid;
from_nodeid = ms->m_header.h_nodeid;
our_nodeid = dlm_our_nodeid();
len = receive_extralen(ms);
- dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
- if (dir_nodeid != our_nodeid) {
- log_error(ls, "lookup dir_nodeid %d from %d",
- dir_nodeid, from_nodeid);
- error = -EINVAL;
- ret_nodeid = -1;
- goto out;
- }
-
- error = dlm_dir_lookup(ls, from_nodeid, ms->m_extra, len, &ret_nodeid);
+ error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
+ &ret_nodeid, NULL);
/* Optimization: we're master so treat lookup as a request */
if (!error && ret_nodeid == our_nodeid) {
receive_request(ls, ms);
return;
}
- out:
send_lookup_reply(ls, ms, ret_nodeid, error);
}
static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
{
- int len, dir_nodeid, from_nodeid;
+ char name[DLM_RESNAME_MAXLEN+1];
+ struct dlm_rsb *r;
+ uint32_t hash, b;
+ int rv, len, dir_nodeid, from_nodeid;
from_nodeid = ms->m_header.h_nodeid;
len = receive_extralen(ms);
+ if (len > DLM_RESNAME_MAXLEN) {
+ log_error(ls, "receive_remove from %d bad len %d",
+ from_nodeid, len);
+ return;
+ }
+
dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
if (dir_nodeid != dlm_our_nodeid()) {
- log_error(ls, "remove dir entry dir_nodeid %d from %d",
- dir_nodeid, from_nodeid);
+ log_error(ls, "receive_remove from %d bad nodeid %d",
+ from_nodeid, dir_nodeid);
+ return;
+ }
+
+ /* Look for name on rsbtbl.toss, if it's there, kill it.
+ If it's on rsbtbl.keep, it's being used, and we should ignore this
+ message. This is an expected race between the dir node sending a
+ request to the master node at the same time as the master node sends
+ a remove to the dir node. The resolution to that race is for the
+ dir node to ignore the remove message, and the master node to
+ recreate the master rsb when it gets a request from the dir node for
+ an rsb it doesn't have. */
+
+ memset(name, 0, sizeof(name));
+ memcpy(name, ms->m_extra, len);
+
+ hash = jhash(name, len, 0);
+ b = hash & (ls->ls_rsbtbl_size - 1);
+
+ spin_lock(&ls->ls_rsbtbl[b].lock);
+
+ rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
+ if (rv) {
+ /* verify the rsb is on keep list per comment above */
+ rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
+ if (rv) {
+ /* should not happen */
+ log_error(ls, "receive_remove from %d not found %s",
+ from_nodeid, name);
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ return;
+ }
+ if (r->res_master_nodeid != from_nodeid) {
+ /* should not happen */
+ log_error(ls, "receive_remove keep from %d master %d",
+ from_nodeid, r->res_master_nodeid);
+ dlm_print_rsb(r);
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ return;
+ }
+
+ log_debug(ls, "receive_remove from %d master %d first %x %s",
+ from_nodeid, r->res_master_nodeid, r->res_first_lkid,
+ name);
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ return;
+ }
+
+ if (r->res_master_nodeid != from_nodeid) {
+ log_error(ls, "receive_remove toss from %d master %d",
+ from_nodeid, r->res_master_nodeid);
+ dlm_print_rsb(r);
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
return;
}
- dlm_dir_remove_entry(ls, from_nodeid, ms->m_extra, len);
+ if (kref_put(&r->res_ref, kill_rsb)) {
+ rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ dlm_free_rsb(r);
+ } else {
+ log_error(ls, "receive_remove from %d rsb ref error",
+ from_nodeid);
+ dlm_print_rsb(r);
+ spin_unlock(&ls->ls_rsbtbl[b].lock);
+ }
}
static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
@@ -3706,6 +4468,7 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error, mstype, result;
+ int from_nodeid = ms->m_header.h_nodeid;
error = find_lkb(ls, ms->m_remid, &lkb);
if (error)
@@ -3723,8 +4486,7 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
if (error) {
log_error(ls, "receive_request_reply %x remote %d %x result %d",
- lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
- ms->m_result);
+ lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
dlm_dump_rsb(r);
goto out;
}
@@ -3732,8 +4494,9 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
/* Optimization: the dir node was also the master, so it took our
lookup as a request and sent request reply instead of lookup reply */
if (mstype == DLM_MSG_LOOKUP) {
- r->res_nodeid = ms->m_header.h_nodeid;
- lkb->lkb_nodeid = r->res_nodeid;
+ r->res_master_nodeid = from_nodeid;
+ r->res_nodeid = from_nodeid;
+ lkb->lkb_nodeid = from_nodeid;
}
/* this is the value returned from do_request() on the master */
@@ -3767,18 +4530,30 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
case -EBADR:
case -ENOTBLK:
/* find_rsb failed to find rsb or rsb wasn't master */
- log_debug(ls, "receive_request_reply %x %x master diff %d %d",
- lkb->lkb_id, lkb->lkb_flags, r->res_nodeid, result);
- r->res_nodeid = -1;
- lkb->lkb_nodeid = -1;
+ log_limit(ls, "receive_request_reply %x from %d %d "
+ "master %d dir %d first %x %s", lkb->lkb_id,
+ from_nodeid, result, r->res_master_nodeid,
+ r->res_dir_nodeid, r->res_first_lkid, r->res_name);
+
+ if (r->res_dir_nodeid != dlm_our_nodeid() &&
+ r->res_master_nodeid != dlm_our_nodeid()) {
+ /* cause _request_lock->set_master->send_lookup */
+ r->res_master_nodeid = 0;
+ r->res_nodeid = -1;
+ lkb->lkb_nodeid = -1;
+ }
if (is_overlap(lkb)) {
/* we'll ignore error in cancel/unlock reply */
queue_cast_overlap(r, lkb);
confirm_master(r, result);
unhold_lkb(lkb); /* undoes create_lkb() */
- } else
+ } else {
_request_lock(r, lkb);
+
+ if (r->res_master_nodeid == dlm_our_nodeid())
+ confirm_master(r, 0);
+ }
break;
default:
@@ -3994,6 +4769,7 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error, ret_nodeid;
+ int do_lookup_list = 0;
error = find_lkb(ls, ms->m_lkid, &lkb);
if (error) {
@@ -4001,7 +4777,7 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
return;
}
- /* ms->m_result is the value returned by dlm_dir_lookup on dir node
+ /* ms->m_result is the value returned by dlm_master_lookup on dir node
FIXME: will a non-zero error ever be returned? */
r = lkb->lkb_resource;
@@ -4013,12 +4789,37 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
goto out;
ret_nodeid = ms->m_nodeid;
+
+ /* We sometimes receive a request from the dir node for this
+ rsb before we've received the dir node's loookup_reply for it.
+ The request from the dir node implies we're the master, so we set
+ ourself as master in receive_request_reply, and verify here that
+ we are indeed the master. */
+
+ if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
+ /* This should never happen */
+ log_error(ls, "receive_lookup_reply %x from %d ret %d "
+ "master %d dir %d our %d first %x %s",
+ lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
+ r->res_master_nodeid, r->res_dir_nodeid,
+ dlm_our_nodeid(), r->res_first_lkid, r->res_name);
+ }
+
if (ret_nodeid == dlm_our_nodeid()) {
+ r->res_master_nodeid = ret_nodeid;
r->res_nodeid = 0;
- ret_nodeid = 0;
+ do_lookup_list = 1;
r->res_first_lkid = 0;
+ } else if (ret_nodeid == -1) {
+ /* the remote node doesn't believe it's the dir node */
+ log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
+ lkb->lkb_id, ms->m_header.h_nodeid);
+ r->res_master_nodeid = 0;
+ r->res_nodeid = -1;
+ lkb->lkb_nodeid = -1;
} else {
- /* set_master() will copy res_nodeid to lkb_nodeid */
+ /* set_master() will set lkb_nodeid from r */
+ r->res_master_nodeid = ret_nodeid;
r->res_nodeid = ret_nodeid;
}
@@ -4033,7 +4834,7 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
_request_lock(r, lkb);
out_list:
- if (!ret_nodeid)
+ if (do_lookup_list)
process_lookup_list(r);
out:
unlock_rsb(r);
@@ -4047,7 +4848,7 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
int error = 0, noent = 0;
if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
- log_debug(ls, "ignore non-member message %d from %d %x %x %d",
+ log_limit(ls, "receive %d from non-member %d %x %x %d",
ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
ms->m_remid, ms->m_result);
return;
@@ -4174,6 +4975,15 @@ static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
int nodeid)
{
if (dlm_locking_stopped(ls)) {
+ /* If we were a member of this lockspace, left, and rejoined,
+ other nodes may still be sending us messages from the
+ lockspace generation before we left. */
+ if (!ls->ls_generation) {
+ log_limit(ls, "receive %d from %d ignore old gen",
+ ms->m_type, nodeid);
+ return;
+ }
+
dlm_add_requestqueue(ls, nodeid, ms);
} else {
dlm_wait_requestqueue(ls);
@@ -4651,9 +5461,10 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
if (!rsb_flag(r, RSB_RECOVER_GRANT))
continue;
- rsb_clear_flag(r, RSB_RECOVER_GRANT);
- if (!is_master(r))
+ if (!is_master(r)) {
+ rsb_clear_flag(r, RSB_RECOVER_GRANT);
continue;
+ }
hold_rsb(r);
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
return r;
@@ -4698,7 +5509,9 @@ void dlm_recover_grant(struct dlm_ls *ls)
rsb_count++;
count = 0;
lock_rsb(r);
+ /* the RECOVER_GRANT flag is checked in the grant path */
grant_pending_locks(r, &count);
+ rsb_clear_flag(r, RSB_RECOVER_GRANT);
lkb_count += count;
confirm_master(r, 0);
unlock_rsb(r);
@@ -4798,6 +5611,7 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
struct dlm_rsb *r;
struct dlm_lkb *lkb;
uint32_t remid = 0;
+ int from_nodeid = rc->rc_header.h_nodeid;
int error;
if (rl->rl_parent_lkid) {
@@ -4815,21 +5629,21 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
we make ourselves master, dlm_recover_masters() won't touch the
MSTCPY locks we've received early. */
- error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen), 0, &r);
+ error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
+ from_nodeid, R_RECEIVE_RECOVER, &r);
if (error)
goto out;
+ lock_rsb(r);
+
if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
- rc->rc_header.h_nodeid, remid);
+ from_nodeid, remid);
error = -EBADR;
- put_rsb(r);
- goto out;
+ goto out_unlock;
}
- lock_rsb(r);
-
- lkb = search_remid(r, rc->rc_header.h_nodeid, remid);
+ lkb = search_remid(r, from_nodeid, remid);
if (lkb) {
error = -EEXIST;
goto out_remid;
@@ -4866,7 +5680,7 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
out:
if (error && error != -EEXIST)
log_debug(ls, "dlm_recover_master_copy remote %d %x error %d",
- rc->rc_header.h_nodeid, remid, error);
+ from_nodeid, remid, error);
rl->rl_result = cpu_to_le32(error);
return error;
}
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index c8b226c62807..5e0c72e36a9b 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -14,6 +14,7 @@
#define __LOCK_DOT_H__
void dlm_dump_rsb(struct dlm_rsb *r);
+void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len);
void dlm_print_lkb(struct dlm_lkb *lkb);
void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
uint32_t saved_seq);
@@ -28,9 +29,11 @@ void dlm_unlock_recovery(struct dlm_ls *ls);
void dlm_scan_waiters(struct dlm_ls *ls);
void dlm_scan_timeout(struct dlm_ls *ls);
void dlm_adjust_timeouts(struct dlm_ls *ls);
+int dlm_master_lookup(struct dlm_ls *ls, int nodeid, char *name, int len,
+ unsigned int flags, int *r_nodeid, int *result);
int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
- unsigned int flags, struct dlm_rsb **r_ret);
+ struct dlm_rsb **r_ret);
void dlm_recover_purge(struct dlm_ls *ls);
void dlm_purge_mstcpy_locks(struct dlm_rsb *r);
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index ca506abbdd3b..952557d00ccd 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -506,20 +506,18 @@ static int new_lockspace(const char *name, const char *cluster,
spin_lock_init(&ls->ls_rsbtbl[i].lock);
}
- idr_init(&ls->ls_lkbidr);
- spin_lock_init(&ls->ls_lkbidr_spin);
+ spin_lock_init(&ls->ls_remove_spin);
- size = dlm_config.ci_dirtbl_size;
- ls->ls_dirtbl_size = size;
-
- ls->ls_dirtbl = vmalloc(sizeof(struct dlm_dirtable) * size);
- if (!ls->ls_dirtbl)
- goto out_lkbfree;
- for (i = 0; i < size; i++) {
- INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
- spin_lock_init(&ls->ls_dirtbl[i].lock);
+ for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
+ ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1,
+ GFP_KERNEL);
+ if (!ls->ls_remove_names[i])
+ goto out_rsbtbl;
}
+ idr_init(&ls->ls_lkbidr);
+ spin_lock_init(&ls->ls_lkbidr_spin);
+
INIT_LIST_HEAD(&ls->ls_waiters);
mutex_init(&ls->ls_waiters_mutex);
INIT_LIST_HEAD(&ls->ls_orphans);
@@ -567,7 +565,7 @@ static int new_lockspace(const char *name, const char *cluster,
ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
if (!ls->ls_recover_buf)
- goto out_dirfree;
+ goto out_lkbidr;
ls->ls_slot = 0;
ls->ls_num_slots = 0;
@@ -576,6 +574,8 @@ static int new_lockspace(const char *name, const char *cluster,
INIT_LIST_HEAD(&ls->ls_recover_list);
spin_lock_init(&ls->ls_recover_list_lock);
+ idr_init(&ls->ls_recover_idr);
+ spin_lock_init(&ls->ls_recover_idr_lock);
ls->ls_recover_list_count = 0;
ls->ls_local_handle = ls;
init_waitqueue_head(&ls->ls_wait_general);
@@ -647,11 +647,15 @@ static int new_lockspace(const char *name, const char *cluster,
spin_lock(&lslist_lock);
list_del(&ls->ls_list);
spin_unlock(&lslist_lock);
+ idr_destroy(&ls->ls_recover_idr);
kfree(ls->ls_recover_buf);
- out_dirfree:
- vfree(ls->ls_dirtbl);
- out_lkbfree:
+ out_lkbidr:
idr_destroy(&ls->ls_lkbidr);
+ for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
+ if (ls->ls_remove_names[i])
+ kfree(ls->ls_remove_names[i]);
+ }
+ out_rsbtbl:
vfree(ls->ls_rsbtbl);
out_lsfree:
if (do_unreg)
@@ -779,13 +783,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
kfree(ls->ls_recover_buf);
/*
- * Free direntry structs.
- */
-
- dlm_dir_clear(ls);
- vfree(ls->ls_dirtbl);
-
- /*
* Free all lkb's in idr
*/
@@ -813,6 +810,9 @@ static int release_lockspace(struct dlm_ls *ls, int force)
vfree(ls->ls_rsbtbl);
+ for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
+ kfree(ls->ls_remove_names[i]);
+
while (!list_empty(&ls->ls_new_rsb)) {
rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
res_hashchain);
@@ -826,7 +826,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_purge_requestqueue(ls);
kfree(ls->ls_recover_args);
- dlm_clear_free_entries(ls);
dlm_clear_members(ls);
dlm_clear_members_gone(ls);
kfree(ls->ls_node_array);
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index 64d3e2b958c7..87f1a56eab32 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -23,8 +23,6 @@
#include "memory.h"
#include "lock.h"
#include "util.h"
-#include "member.h"
-
static int rcom_response(struct dlm_ls *ls)
{
@@ -275,19 +273,9 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
struct dlm_rcom *rc;
struct dlm_mhandle *mh;
int error = 0;
- int max_size = dlm_config.ci_buffer_size - sizeof(struct dlm_rcom);
ls->ls_recover_nodeid = nodeid;
- if (nodeid == dlm_our_nodeid()) {
- ls->ls_recover_buf->rc_header.h_length =
- dlm_config.ci_buffer_size;
- dlm_copy_master_names(ls, last_name, last_len,
- ls->ls_recover_buf->rc_buf,
- max_size, nodeid);
- goto out;
- }
-
error = create_rcom(ls, nodeid, DLM_RCOM_NAMES, last_len, &rc, &mh);
if (error)
goto out;
@@ -337,7 +325,26 @@ int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid)
if (error)
goto out;
memcpy(rc->rc_buf, r->res_name, r->res_length);
- rc->rc_id = (unsigned long) r;
+ rc->rc_id = (unsigned long) r->res_id;
+
+ send_rcom(ls, mh, rc);
+ out:
+ return error;
+}
+
+int dlm_send_rcom_lookup_dump(struct dlm_rsb *r, int to_nodeid)
+{
+ struct dlm_rcom *rc;
+ struct dlm_mhandle *mh;
+ struct dlm_ls *ls = r->res_ls;
+ int error;
+
+ error = create_rcom(ls, to_nodeid, DLM_RCOM_LOOKUP, r->res_length,
+ &rc, &mh);
+ if (error)
+ goto out;
+ memcpy(rc->rc_buf, r->res_name, r->res_length);
+ rc->rc_id = 0xFFFFFFFF;
send_rcom(ls, mh, rc);
out:
@@ -355,7 +362,14 @@ static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
if (error)
return;
- error = dlm_dir_lookup(ls, nodeid, rc_in->rc_buf, len, &ret_nodeid);
+ if (rc_in->rc_id == 0xFFFFFFFF) {
+ log_error(ls, "receive_rcom_lookup dump from %d", nodeid);
+ dlm_dump_rsb_name(ls, rc_in->rc_buf, len);
+ return;
+ }
+
+ error = dlm_master_lookup(ls, nodeid, rc_in->rc_buf, len,
+ DLM_LU_RECOVER_MASTER, &ret_nodeid, NULL);
if (error)
ret_nodeid = error;
rc->rc_result = ret_nodeid;
@@ -486,17 +500,76 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
return 0;
}
+/*
+ * Ignore messages for stage Y before we set
+ * recover_status bit for stage X:
+ *
+ * recover_status = 0
+ *
+ * dlm_recover_members()
+ * - send nothing
+ * - recv nothing
+ * - ignore NAMES, NAMES_REPLY
+ * - ignore LOOKUP, LOOKUP_REPLY
+ * - ignore LOCK, LOCK_REPLY
+ *
+ * recover_status |= NODES
+ *
+ * dlm_recover_members_wait()
+ *
+ * dlm_recover_directory()
+ * - send NAMES
+ * - recv NAMES_REPLY
+ * - ignore LOOKUP, LOOKUP_REPLY
+ * - ignore LOCK, LOCK_REPLY
+ *
+ * recover_status |= DIR
+ *
+ * dlm_recover_directory_wait()
+ *
+ * dlm_recover_masters()
+ * - send LOOKUP
+ * - recv LOOKUP_REPLY
+ *
+ * dlm_recover_locks()
+ * - send LOCKS
+ * - recv LOCKS_REPLY
+ *
+ * recover_status |= LOCKS
+ *
+ * dlm_recover_locks_wait()
+ *
+ * recover_status |= DONE
+ */
+
/* Called by dlm_recv; corresponds to dlm_receive_message() but special
recovery-only comms are sent through here. */
void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
{
int lock_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_lock);
- int stop, reply = 0, lock = 0;
+ int stop, reply = 0, names = 0, lookup = 0, lock = 0;
uint32_t status;
uint64_t seq;
switch (rc->rc_type) {
+ case DLM_RCOM_STATUS_REPLY:
+ reply = 1;
+ break;
+ case DLM_RCOM_NAMES:
+ names = 1;
+ break;
+ case DLM_RCOM_NAMES_REPLY:
+ names = 1;
+ reply = 1;
+ break;
+ case DLM_RCOM_LOOKUP:
+ lookup = 1;
+ break;
+ case DLM_RCOM_LOOKUP_REPLY:
+ lookup = 1;
+ reply = 1;
+ break;
case DLM_RCOM_LOCK:
lock = 1;
break;
@@ -504,10 +577,6 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
lock = 1;
reply = 1;
break;
- case DLM_RCOM_STATUS_REPLY:
- case DLM_RCOM_NAMES_REPLY:
- case DLM_RCOM_LOOKUP_REPLY:
- reply = 1;
};
spin_lock(&ls->ls_recover_lock);
@@ -516,19 +585,17 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
seq = ls->ls_recover_seq;
spin_unlock(&ls->ls_recover_lock);
- if ((stop && (rc->rc_type != DLM_RCOM_STATUS)) ||
- (reply && (rc->rc_seq_reply != seq)) ||
- (lock && !(status & DLM_RS_DIR))) {
- log_limit(ls, "dlm_receive_rcom ignore msg %d "
- "from %d %llu %llu recover seq %llu sts %x gen %u",
- rc->rc_type,
- nodeid,
- (unsigned long long)rc->rc_seq,
- (unsigned long long)rc->rc_seq_reply,
- (unsigned long long)seq,
- status, ls->ls_generation);
- goto out;
- }
+ if (stop && (rc->rc_type != DLM_RCOM_STATUS))
+ goto ignore;
+
+ if (reply && (rc->rc_seq_reply != seq))
+ goto ignore;
+
+ if (!(status & DLM_RS_NODES) && (names || lookup || lock))
+ goto ignore;
+
+ if (!(status & DLM_RS_DIR) && (lookup || lock))
+ goto ignore;
switch (rc->rc_type) {
case DLM_RCOM_STATUS:
@@ -570,10 +637,20 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
default:
log_error(ls, "receive_rcom bad type %d", rc->rc_type);
}
-out:
+ return;
+
+ignore:
+ log_limit(ls, "dlm_receive_rcom ignore msg %d "
+ "from %d %llu %llu recover seq %llu sts %x gen %u",
+ rc->rc_type,
+ nodeid,
+ (unsigned long long)rc->rc_seq,
+ (unsigned long long)rc->rc_seq_reply,
+ (unsigned long long)seq,
+ status, ls->ls_generation);
return;
Eshort:
- log_error(ls, "recovery message %x from %d is too short",
- rc->rc_type, nodeid);
+ log_error(ls, "recovery message %d from %d is too short",
+ rc->rc_type, nodeid);
}
diff --git a/fs/dlm/rcom.h b/fs/dlm/rcom.h
index 206723ab744d..f8e243463c15 100644
--- a/fs/dlm/rcom.h
+++ b/fs/dlm/rcom.h
@@ -17,6 +17,7 @@
int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags);
int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,int last_len);
int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid);
+int dlm_send_rcom_lookup_dump(struct dlm_rsb *r, int to_nodeid);
int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid);
int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in);
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index 7554e4dac6bb..4a7a76e42fc3 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -36,30 +36,23 @@
* (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another
* function thinks it could have completed the waited-on task, they should wake
* up ls_wait_general to get an immediate response rather than waiting for the
- * timer to detect the result. A timer wakes us up periodically while waiting
- * to see if we should abort due to a node failure. This should only be called
- * by the dlm_recoverd thread.
+ * timeout. This uses a timeout so it can check periodically if the wait
+ * should abort due to node failure (which doesn't cause a wake_up).
+ * This should only be called by the dlm_recoverd thread.
*/
-static void dlm_wait_timer_fn(unsigned long data)
-{
- struct dlm_ls *ls = (struct dlm_ls *) data;
- mod_timer(&ls->ls_timer, jiffies + (dlm_config.ci_recover_timer * HZ));
- wake_up(&ls->ls_wait_general);
-}
-
int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
{
int error = 0;
+ int rv;
- init_timer(&ls->ls_timer);
- ls->ls_timer.function = dlm_wait_timer_fn;
- ls->ls_timer.data = (long) ls;
- ls->ls_timer.expires = jiffies + (dlm_config.ci_recover_timer * HZ);
- add_timer(&ls->ls_timer);
-
- wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls));
- del_timer_sync(&ls->ls_timer);
+ while (1) {
+ rv = wait_event_timeout(ls->ls_wait_general,
+ testfn(ls) || dlm_recovery_stopped(ls),
+ dlm_config.ci_recover_timer * HZ);
+ if (rv)
+ break;
+ }
if (dlm_recovery_stopped(ls)) {
log_debug(ls, "dlm_wait_function aborted");
@@ -277,22 +270,6 @@ static void recover_list_del(struct dlm_rsb *r)
dlm_put_rsb(r);
}
-static struct dlm_rsb *recover_list_find(struct dlm_ls *ls, uint64_t id)
-{
- struct dlm_rsb *r = NULL;
-
- spin_lock(&ls->ls_recover_list_lock);
-
- list_for_each_entry(r, &ls->ls_recover_list, res_recover_list) {
- if (id == (unsigned long) r)
- goto out;
- }
- r = NULL;
- out:
- spin_unlock(&ls->ls_recover_list_lock);
- return r;
-}
-
static void recover_list_clear(struct dlm_ls *ls)
{
struct dlm_rsb *r, *s;
@@ -313,6 +290,94 @@ static void recover_list_clear(struct dlm_ls *ls)
spin_unlock(&ls->ls_recover_list_lock);
}
+static int recover_idr_empty(struct dlm_ls *ls)
+{
+ int empty = 1;
+
+ spin_lock(&ls->ls_recover_idr_lock);
+ if (ls->ls_recover_list_count)
+ empty = 0;
+ spin_unlock(&ls->ls_recover_idr_lock);
+
+ return empty;
+}
+
+static int recover_idr_add(struct dlm_rsb *r)
+{
+ struct dlm_ls *ls = r->res_ls;
+ int rv, id;
+
+ rv = idr_pre_get(&ls->ls_recover_idr, GFP_NOFS);
+ if (!rv)
+ return -ENOMEM;
+
+ spin_lock(&ls->ls_recover_idr_lock);
+ if (r->res_id) {
+ spin_unlock(&ls->ls_recover_idr_lock);
+ return -1;
+ }
+ rv = idr_get_new_above(&ls->ls_recover_idr, r, 1, &id);
+ if (rv) {
+ spin_unlock(&ls->ls_recover_idr_lock);
+ return rv;
+ }
+ r->res_id = id;
+ ls->ls_recover_list_count++;
+ dlm_hold_rsb(r);
+ spin_unlock(&ls->ls_recover_idr_lock);
+ return 0;
+}
+
+static void recover_idr_del(struct dlm_rsb *r)
+{
+ struct dlm_ls *ls = r->res_ls;
+
+ spin_lock(&ls->ls_recover_idr_lock);
+ idr_remove(&ls->ls_recover_idr, r->res_id);
+ r->res_id = 0;
+ ls->ls_recover_list_count--;
+ spin_unlock(&ls->ls_recover_idr_lock);
+
+ dlm_put_rsb(r);
+}
+
+static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
+{
+ struct dlm_rsb *r;
+
+ spin_lock(&ls->ls_recover_idr_lock);
+ r = idr_find(&ls->ls_recover_idr, (int)id);
+ spin_unlock(&ls->ls_recover_idr_lock);
+ return r;
+}
+
+static int recover_idr_clear_rsb(int id, void *p, void *data)
+{
+ struct dlm_ls *ls = data;
+ struct dlm_rsb *r = p;
+
+ r->res_id = 0;
+ r->res_recover_locks_count = 0;
+ ls->ls_recover_list_count--;
+
+ dlm_put_rsb(r);
+ return 0;
+}
+
+static void recover_idr_clear(struct dlm_ls *ls)
+{
+ spin_lock(&ls->ls_recover_idr_lock);
+ idr_for_each(&ls->ls_recover_idr, recover_idr_clear_rsb, ls);
+ idr_remove_all(&ls->ls_recover_idr);
+
+ if (ls->ls_recover_list_count != 0) {
+ log_error(ls, "warning: recover_list_count %d",
+ ls->ls_recover_list_count);
+ ls->ls_recover_list_count = 0;
+ }
+ spin_unlock(&ls->ls_recover_idr_lock);
+}
+
/* Master recovery: find new master node for rsb's that were
mastered on nodes that have been removed.
@@ -361,9 +426,8 @@ static void set_master_lkbs(struct dlm_rsb *r)
* rsb's to consider.
*/
-static void set_new_master(struct dlm_rsb *r, int nodeid)
+static void set_new_master(struct dlm_rsb *r)
{
- r->res_nodeid = nodeid;
set_master_lkbs(r);
rsb_set_flag(r, RSB_NEW_MASTER);
rsb_set_flag(r, RSB_NEW_MASTER2);
@@ -372,31 +436,48 @@ static void set_new_master(struct dlm_rsb *r, int nodeid)
/*
* We do async lookups on rsb's that need new masters. The rsb's
* waiting for a lookup reply are kept on the recover_list.
+ *
+ * Another node recovering the master may have sent us a rcom lookup,
+ * and our dlm_master_lookup() set it as the new master, along with
+ * NEW_MASTER so that we'll recover it here (this implies dir_nodeid
+ * equals our_nodeid below).
*/
-static int recover_master(struct dlm_rsb *r)
+static int recover_master(struct dlm_rsb *r, unsigned int *count)
{
struct dlm_ls *ls = r->res_ls;
- int error, ret_nodeid;
- int our_nodeid = dlm_our_nodeid();
- int dir_nodeid = dlm_dir_nodeid(r);
+ int our_nodeid, dir_nodeid;
+ int is_removed = 0;
+ int error;
+
+ if (is_master(r))
+ return 0;
+
+ is_removed = dlm_is_removed(ls, r->res_nodeid);
+
+ if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER))
+ return 0;
+
+ our_nodeid = dlm_our_nodeid();
+ dir_nodeid = dlm_dir_nodeid(r);
if (dir_nodeid == our_nodeid) {
- error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
- r->res_length, &ret_nodeid);
- if (error)
- log_error(ls, "recover dir lookup error %d", error);
+ if (is_removed) {
+ r->res_master_nodeid = our_nodeid;
+ r->res_nodeid = 0;
+ }
- if (ret_nodeid == our_nodeid)
- ret_nodeid = 0;
- lock_rsb(r);
- set_new_master(r, ret_nodeid);
- unlock_rsb(r);
+ /* set master of lkbs to ourself when is_removed, or to
+ another new master which we set along with NEW_MASTER
+ in dlm_master_lookup */
+ set_new_master(r);
+ error = 0;
} else {
- recover_list_add(r);
+ recover_idr_add(r);
error = dlm_send_rcom_lookup(r, dir_nodeid);
}
+ (*count)++;
return error;
}
@@ -415,7 +496,7 @@ static int recover_master(struct dlm_rsb *r)
* resent.
*/
-static int recover_master_static(struct dlm_rsb *r)
+static int recover_master_static(struct dlm_rsb *r, unsigned int *count)
{
int dir_nodeid = dlm_dir_nodeid(r);
int new_master = dir_nodeid;
@@ -423,11 +504,12 @@ static int recover_master_static(struct dlm_rsb *r)
if (dir_nodeid == dlm_our_nodeid())
new_master = 0;
- lock_rsb(r);
dlm_purge_mstcpy_locks(r);
- set_new_master(r, new_master);
- unlock_rsb(r);
- return 1;
+ r->res_master_nodeid = dir_nodeid;
+ r->res_nodeid = new_master;
+ set_new_master(r);
+ (*count)++;
+ return 0;
}
/*
@@ -443,7 +525,10 @@ static int recover_master_static(struct dlm_rsb *r)
int dlm_recover_masters(struct dlm_ls *ls)
{
struct dlm_rsb *r;
- int error = 0, count = 0;
+ unsigned int total = 0;
+ unsigned int count = 0;
+ int nodir = dlm_no_directory(ls);
+ int error;
log_debug(ls, "dlm_recover_masters");
@@ -455,50 +540,58 @@ int dlm_recover_masters(struct dlm_ls *ls)
goto out;
}
- if (dlm_no_directory(ls))
- count += recover_master_static(r);
- else if (!is_master(r) &&
- (dlm_is_removed(ls, r->res_nodeid) ||
- rsb_flag(r, RSB_NEW_MASTER))) {
- recover_master(r);
- count++;
- }
+ lock_rsb(r);
+ if (nodir)
+ error = recover_master_static(r, &count);
+ else
+ error = recover_master(r, &count);
+ unlock_rsb(r);
+ cond_resched();
+ total++;
- schedule();
+ if (error) {
+ up_read(&ls->ls_root_sem);
+ goto out;
+ }
}
up_read(&ls->ls_root_sem);
- log_debug(ls, "dlm_recover_masters %d resources", count);
+ log_debug(ls, "dlm_recover_masters %u of %u", count, total);
- error = dlm_wait_function(ls, &recover_list_empty);
+ error = dlm_wait_function(ls, &recover_idr_empty);
out:
if (error)
- recover_list_clear(ls);
+ recover_idr_clear(ls);
return error;
}
int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
{
struct dlm_rsb *r;
- int nodeid;
+ int ret_nodeid, new_master;
- r = recover_list_find(ls, rc->rc_id);
+ r = recover_idr_find(ls, rc->rc_id);
if (!r) {
log_error(ls, "dlm_recover_master_reply no id %llx",
(unsigned long long)rc->rc_id);
goto out;
}
- nodeid = rc->rc_result;
- if (nodeid == dlm_our_nodeid())
- nodeid = 0;
+ ret_nodeid = rc->rc_result;
+
+ if (ret_nodeid == dlm_our_nodeid())
+ new_master = 0;
+ else
+ new_master = ret_nodeid;
lock_rsb(r);
- set_new_master(r, nodeid);
+ r->res_master_nodeid = ret_nodeid;
+ r->res_nodeid = new_master;
+ set_new_master(r);
unlock_rsb(r);
- recover_list_del(r);
+ recover_idr_del(r);
- if (recover_list_empty(ls))
+ if (recover_idr_empty(ls))
wake_up(&ls->ls_wait_general);
out:
return 0;
@@ -711,6 +804,7 @@ static void recover_lvb(struct dlm_rsb *r)
static void recover_conversion(struct dlm_rsb *r)
{
+ struct dlm_ls *ls = r->res_ls;
struct dlm_lkb *lkb;
int grmode = -1;
@@ -725,10 +819,15 @@ static void recover_conversion(struct dlm_rsb *r)
list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
if (lkb->lkb_grmode != DLM_LOCK_IV)
continue;
- if (grmode == -1)
+ if (grmode == -1) {
+ log_debug(ls, "recover_conversion %x set gr to rq %d",
+ lkb->lkb_id, lkb->lkb_rqmode);
lkb->lkb_grmode = lkb->lkb_rqmode;
- else
+ } else {
+ log_debug(ls, "recover_conversion %x set gr %d",
+ lkb->lkb_id, grmode);
lkb->lkb_grmode = grmode;
+ }
}
}
@@ -791,20 +890,8 @@ int dlm_create_root_list(struct dlm_ls *ls)
dlm_hold_rsb(r);
}
- /* If we're using a directory, add tossed rsbs to the root
- list; they'll have entries created in the new directory,
- but no other recovery steps should do anything with them. */
-
- if (dlm_no_directory(ls)) {
- spin_unlock(&ls->ls_rsbtbl[i].lock);
- continue;
- }
-
- for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = rb_next(n)) {
- r = rb_entry(n, struct dlm_rsb, res_hashnode);
- list_add(&r->res_root_list, &ls->ls_root_list);
- dlm_hold_rsb(r);
- }
+ if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss))
+ log_error(ls, "dlm_create_root_list toss not empty");
spin_unlock(&ls->ls_rsbtbl[i].lock);
}
out:
@@ -824,28 +911,26 @@ void dlm_release_root_list(struct dlm_ls *ls)
up_write(&ls->ls_root_sem);
}
-/* If not using a directory, clear the entire toss list, there's no benefit to
- caching the master value since it's fixed. If we are using a dir, keep the
- rsb's we're the master of. Recovery will add them to the root list and from
- there they'll be entered in the rebuilt directory. */
-
-void dlm_clear_toss_list(struct dlm_ls *ls)
+void dlm_clear_toss(struct dlm_ls *ls)
{
struct rb_node *n, *next;
- struct dlm_rsb *rsb;
+ struct dlm_rsb *r;
+ unsigned int count = 0;
int i;
for (i = 0; i < ls->ls_rsbtbl_size; i++) {
spin_lock(&ls->ls_rsbtbl[i].lock);
for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
- next = rb_next(n);;
- rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
- if (dlm_no_directory(ls) || !is_master(rsb)) {
- rb_erase(n, &ls->ls_rsbtbl[i].toss);
- dlm_free_rsb(rsb);
- }
+ next = rb_next(n);
+ r = rb_entry(n, struct dlm_rsb, res_hashnode);
+ rb_erase(n, &ls->ls_rsbtbl[i].toss);
+ dlm_free_rsb(r);
+ count++;
}
spin_unlock(&ls->ls_rsbtbl[i].lock);
}
+
+ if (count)
+ log_debug(ls, "dlm_clear_toss %u done", count);
}
diff --git a/fs/dlm/recover.h b/fs/dlm/recover.h
index ebd0363f1e08..d8c8738c70eb 100644
--- a/fs/dlm/recover.h
+++ b/fs/dlm/recover.h
@@ -27,7 +27,7 @@ int dlm_recover_locks(struct dlm_ls *ls);
void dlm_recovered_lock(struct dlm_rsb *r);
int dlm_create_root_list(struct dlm_ls *ls);
void dlm_release_root_list(struct dlm_ls *ls);
-void dlm_clear_toss_list(struct dlm_ls *ls);
+void dlm_clear_toss(struct dlm_ls *ls);
void dlm_recover_rsbs(struct dlm_ls *ls);
#endif /* __RECOVER_DOT_H__ */
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index f1a9073c0835..88ce65ff021e 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -60,12 +60,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
dlm_callback_suspend(ls);
- /*
- * Free non-master tossed rsb's. Master rsb's are kept on toss
- * list and put on root list to be included in resdir recovery.
- */
-
- dlm_clear_toss_list(ls);
+ dlm_clear_toss(ls);
/*
* This list of root rsb's will be the basis of most of the recovery
@@ -84,6 +79,10 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
goto fail;
}
+ dlm_recover_dir_nodeid(ls);
+
+ ls->ls_recover_dir_sent_res = 0;
+ ls->ls_recover_dir_sent_msg = 0;
ls->ls_recover_locks_in = 0;
dlm_set_recover_status(ls, DLM_RS_NODES);
@@ -115,6 +114,9 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
goto fail;
}
+ log_debug(ls, "dlm_recover_directory %u out %u messages",
+ ls->ls_recover_dir_sent_res, ls->ls_recover_dir_sent_msg);
+
/*
* We may have outstanding operations that are waiting for a reply from
* a failed node. Mark these to be resent after recovery. Unlock and
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 989e034f02bd..cfb4b9fed520 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -385,8 +385,6 @@ struct ecryptfs_msg_ctx {
struct mutex mux;
};
-struct ecryptfs_daemon;
-
struct ecryptfs_daemon {
#define ECRYPTFS_DAEMON_IN_READ 0x00000001
#define ECRYPTFS_DAEMON_IN_POLL 0x00000002
@@ -394,10 +392,7 @@ struct ecryptfs_daemon {
#define ECRYPTFS_DAEMON_MISCDEV_OPEN 0x00000008
u32 flags;
u32 num_queued_msg_ctx;
- struct pid *pid;
- uid_t euid;
- struct user_namespace *user_ns;
- struct task_struct *task;
+ struct file *file;
struct mutex mux;
struct list_head msg_ctx_out_queue;
wait_queue_head_t wait;
@@ -554,6 +549,8 @@ extern struct kmem_cache *ecryptfs_key_tfm_cache;
struct inode *ecryptfs_get_inode(struct inode *lower_inode,
struct super_block *sb);
void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
+int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
+ struct inode *ecryptfs_inode);
int ecryptfs_decode_and_decrypt_filename(char **decrypted_name,
size_t *decrypted_name_size,
struct dentry *ecryptfs_dentry,
@@ -607,13 +604,8 @@ int
ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags);
int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode);
-int ecryptfs_process_helo(uid_t euid, struct user_namespace *user_ns,
- struct pid *pid);
-int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns,
- struct pid *pid);
-int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid,
- struct user_namespace *user_ns, struct pid *pid,
- u32 seq);
+int ecryptfs_process_response(struct ecryptfs_daemon *daemon,
+ struct ecryptfs_message *msg, u32 seq);
int ecryptfs_send_message(char *data, int data_len,
struct ecryptfs_msg_ctx **msg_ctx);
int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx,
@@ -658,8 +650,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
struct inode *ecryptfs_inode);
struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index);
int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon);
-int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid,
- struct user_namespace *user_ns);
+int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon);
int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
size_t *length_size);
int ecryptfs_write_packet_length(char *dest, size_t size,
@@ -671,8 +662,7 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
u16 msg_flags, struct ecryptfs_daemon *daemon);
void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx);
int
-ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid,
- struct user_namespace *user_ns, struct pid *pid);
+ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, struct file *file);
int ecryptfs_init_kthread(void);
void ecryptfs_destroy_kthread(void);
int ecryptfs_privileged_open(struct file **lower_file,
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 2b17f2f9b121..44ce5c6a541d 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -138,29 +138,50 @@ out:
return rc;
}
-static void ecryptfs_vma_close(struct vm_area_struct *vma)
-{
- filemap_write_and_wait(vma->vm_file->f_mapping);
-}
-
-static const struct vm_operations_struct ecryptfs_file_vm_ops = {
- .close = ecryptfs_vma_close,
- .fault = filemap_fault,
-};
+struct kmem_cache *ecryptfs_file_info_cache;
-static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+static int read_or_initialize_metadata(struct dentry *dentry)
{
+ struct inode *inode = dentry->d_inode;
+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
+ struct ecryptfs_crypt_stat *crypt_stat;
int rc;
- rc = generic_file_mmap(file, vma);
+ crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
+ mount_crypt_stat = &ecryptfs_superblock_to_private(
+ inode->i_sb)->mount_crypt_stat;
+ mutex_lock(&crypt_stat->cs_mutex);
+
+ if (crypt_stat->flags & ECRYPTFS_POLICY_APPLIED &&
+ crypt_stat->flags & ECRYPTFS_KEY_VALID) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = ecryptfs_read_metadata(dentry);
if (!rc)
- vma->vm_ops = &ecryptfs_file_vm_ops;
+ goto out;
+
+ if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED) {
+ crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
+ | ECRYPTFS_ENCRYPTED);
+ rc = 0;
+ goto out;
+ }
+ if (!(mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED) &&
+ !i_size_read(ecryptfs_inode_to_lower(inode))) {
+ rc = ecryptfs_initialize_file(dentry, inode);
+ if (!rc)
+ goto out;
+ }
+
+ rc = -EIO;
+out:
+ mutex_unlock(&crypt_stat->cs_mutex);
return rc;
}
-struct kmem_cache *ecryptfs_file_info_cache;
-
/**
* ecryptfs_open
* @inode: inode speciying file to open
@@ -236,32 +257,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
rc = 0;
goto out;
}
- mutex_lock(&crypt_stat->cs_mutex);
- if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
- || !(crypt_stat->flags & ECRYPTFS_KEY_VALID)) {
- rc = ecryptfs_read_metadata(ecryptfs_dentry);
- if (rc) {
- ecryptfs_printk(KERN_DEBUG,
- "Valid headers not found\n");
- if (!(mount_crypt_stat->flags
- & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
- rc = -EIO;
- printk(KERN_WARNING "Either the lower file "
- "is not in a valid eCryptfs format, "
- "or the key could not be retrieved. "
- "Plaintext passthrough mode is not "
- "enabled; returning -EIO\n");
- mutex_unlock(&crypt_stat->cs_mutex);
- goto out_put;
- }
- rc = 0;
- crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
- | ECRYPTFS_ENCRYPTED);
- mutex_unlock(&crypt_stat->cs_mutex);
- goto out;
- }
- }
- mutex_unlock(&crypt_stat->cs_mutex);
+ rc = read_or_initialize_metadata(ecryptfs_dentry);
+ if (rc)
+ goto out_put;
ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = "
"[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino,
(unsigned long long)i_size_read(inode));
@@ -292,15 +290,7 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
static int
ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- int rc = 0;
-
- rc = generic_file_fsync(file, start, end, datasync);
- if (rc)
- goto out;
- rc = vfs_fsync_range(ecryptfs_file_to_lower(file), start, end,
- datasync);
-out:
- return rc;
+ return vfs_fsync(ecryptfs_file_to_lower(file), datasync);
}
static int ecryptfs_fasync(int fd, struct file *file, int flag)
@@ -369,7 +359,7 @@ const struct file_operations ecryptfs_main_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = ecryptfs_compat_ioctl,
#endif
- .mmap = ecryptfs_file_mmap,
+ .mmap = generic_file_mmap,
.open = ecryptfs_open,
.flush = ecryptfs_flush,
.release = ecryptfs_release,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index ffa2be57804d..534b129ea676 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -143,6 +143,31 @@ static int ecryptfs_interpose(struct dentry *lower_dentry,
return 0;
}
+static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
+ struct inode *inode)
+{
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+ struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
+ struct dentry *lower_dir_dentry;
+ int rc;
+
+ dget(lower_dentry);
+ lower_dir_dentry = lock_parent(lower_dentry);
+ rc = vfs_unlink(lower_dir_inode, lower_dentry);
+ if (rc) {
+ printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
+ goto out_unlock;
+ }
+ fsstack_copy_attr_times(dir, lower_dir_inode);
+ set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
+ inode->i_ctime = dir->i_ctime;
+ d_drop(dentry);
+out_unlock:
+ unlock_dir(lower_dir_dentry);
+ dput(lower_dentry);
+ return rc;
+}
+
/**
* ecryptfs_do_create
* @directory_inode: inode of the new file's dentry's parent in ecryptfs
@@ -182,8 +207,10 @@ ecryptfs_do_create(struct inode *directory_inode,
}
inode = __ecryptfs_get_inode(lower_dentry->d_inode,
directory_inode->i_sb);
- if (IS_ERR(inode))
+ if (IS_ERR(inode)) {
+ vfs_unlink(lower_dir_dentry->d_inode, lower_dentry);
goto out_lock;
+ }
fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
out_lock:
@@ -200,8 +227,8 @@ out:
*
* Returns zero on success
*/
-static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
- struct inode *ecryptfs_inode)
+int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
+ struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
@@ -264,7 +291,9 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
* that this on disk file is prepared to be an ecryptfs file */
rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
if (rc) {
- drop_nlink(ecryptfs_inode);
+ ecryptfs_do_unlink(directory_inode, ecryptfs_dentry,
+ ecryptfs_inode);
+ make_bad_inode(ecryptfs_inode);
unlock_new_inode(ecryptfs_inode);
iput(ecryptfs_inode);
goto out;
@@ -318,21 +347,20 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry,
struct vfsmount *lower_mnt;
int rc = 0;
- lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
- fsstack_copy_attr_atime(dir_inode, lower_dentry->d_parent->d_inode);
- BUG_ON(!lower_dentry->d_count);
-
dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
- ecryptfs_set_dentry_private(dentry, dentry_info);
if (!dentry_info) {
printk(KERN_ERR "%s: Out of memory whilst attempting "
"to allocate ecryptfs_dentry_info struct\n",
__func__);
dput(lower_dentry);
- mntput(lower_mnt);
- d_drop(dentry);
return -ENOMEM;
}
+
+ lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
+ fsstack_copy_attr_atime(dir_inode, lower_dentry->d_parent->d_inode);
+ BUG_ON(!lower_dentry->d_count);
+
+ ecryptfs_set_dentry_private(dentry, dentry_info);
ecryptfs_set_dentry_lower(dentry, lower_dentry);
ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt);
@@ -381,12 +409,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
struct dentry *lower_dir_dentry, *lower_dentry;
int rc = 0;
- if ((ecryptfs_dentry->d_name.len == 1
- && !strcmp(ecryptfs_dentry->d_name.name, "."))
- || (ecryptfs_dentry->d_name.len == 2
- && !strcmp(ecryptfs_dentry->d_name.name, ".."))) {
- goto out_d_drop;
- }
lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name,
@@ -397,8 +419,8 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
rc = PTR_ERR(lower_dentry);
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
"[%d] on lower_dentry = [%s]\n", __func__, rc,
- encrypted_and_encoded_name);
- goto out_d_drop;
+ ecryptfs_dentry->d_name.name);
+ goto out;
}
if (lower_dentry->d_inode)
goto interpose;
@@ -415,7 +437,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt and encode "
"filename; rc = [%d]\n", __func__, rc);
- goto out_d_drop;
+ goto out;
}
mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
lower_dentry = lookup_one_len(encrypted_and_encoded_name,
@@ -427,14 +449,11 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
"[%d] on lower_dentry = [%s]\n", __func__, rc,
encrypted_and_encoded_name);
- goto out_d_drop;
+ goto out;
}
interpose:
rc = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry,
ecryptfs_dir_inode);
- goto out;
-out_d_drop:
- d_drop(ecryptfs_dentry);
out:
kfree(encrypted_and_encoded_name);
return ERR_PTR(rc);
@@ -476,27 +495,7 @@ out_lock:
static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
{
- int rc = 0;
- struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
- struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
- struct dentry *lower_dir_dentry;
-
- dget(lower_dentry);
- lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_unlink(lower_dir_inode, lower_dentry);
- if (rc) {
- printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
- goto out_unlock;
- }
- fsstack_copy_attr_times(dir, lower_dir_inode);
- set_nlink(dentry->d_inode,
- ecryptfs_inode_to_lower(dentry->d_inode)->i_nlink);
- dentry->d_inode->i_ctime = dir->i_ctime;
- d_drop(dentry);
-out_unlock:
- unlock_dir(lower_dir_dentry);
- dput(lower_dentry);
- return rc;
+ return ecryptfs_do_unlink(dir, dentry, dentry->d_inode);
}
static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
@@ -971,12 +970,6 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
goto out;
}
- if (S_ISREG(inode->i_mode)) {
- rc = filemap_write_and_wait(inode->i_mapping);
- if (rc)
- goto out;
- fsstack_copy_attr_all(inode, lower_inode);
- }
memcpy(&lower_ia, ia, sizeof(lower_ia));
if (ia->ia_valid & ATTR_FILE)
lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 1c0b3b6b75c6..2768138eefee 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -279,6 +279,7 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
char *fnek_src;
char *cipher_key_bytes_src;
char *fn_cipher_key_bytes_src;
+ u8 cipher_code;
*check_ruid = 0;
@@ -420,6 +421,18 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
&& !fn_cipher_key_bytes_set)
mount_crypt_stat->global_default_fn_cipher_key_bytes =
mount_crypt_stat->global_default_cipher_key_size;
+
+ cipher_code = ecryptfs_code_for_cipher_string(
+ mount_crypt_stat->global_default_cipher_name,
+ mount_crypt_stat->global_default_cipher_key_size);
+ if (!cipher_code) {
+ ecryptfs_printk(KERN_ERR,
+ "eCryptfs doesn't support cipher: %s",
+ mount_crypt_stat->global_default_cipher_name);
+ rc = -EINVAL;
+ goto out;
+ }
+
mutex_lock(&key_tfm_list_mutex);
if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name,
NULL)) {
@@ -540,6 +553,15 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
}
ecryptfs_set_superblock_lower(s, path.dentry->d_sb);
+
+ /**
+ * Set the POSIX ACL flag based on whether they're enabled in the lower
+ * mount. Force a read-only eCryptfs mount if the lower mount is ro.
+ * Allow a ro eCryptfs mount even when the lower mount is rw.
+ */
+ s->s_flags = flags & ~MS_POSIXACL;
+ s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL);
+
s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
s->s_blocksize = path.dentry->d_sb->s_blocksize;
s->s_magic = ECRYPTFS_SUPER_MAGIC;
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index a750f957b145..b29bb8bfa8d9 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -32,8 +32,8 @@ static struct mutex ecryptfs_msg_ctx_lists_mux;
static struct hlist_head *ecryptfs_daemon_hash;
struct mutex ecryptfs_daemon_hash_mux;
static int ecryptfs_hash_bits;
-#define ecryptfs_uid_hash(uid) \
- hash_long((unsigned long)uid, ecryptfs_hash_bits)
+#define ecryptfs_current_euid_hash(uid) \
+ hash_long((unsigned long)current_euid(), ecryptfs_hash_bits)
static u32 ecryptfs_msg_counter;
static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
@@ -105,26 +105,24 @@ void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
/**
* ecryptfs_find_daemon_by_euid
- * @euid: The effective user id which maps to the desired daemon id
- * @user_ns: The namespace in which @euid applies
* @daemon: If return value is zero, points to the desired daemon pointer
*
* Must be called with ecryptfs_daemon_hash_mux held.
*
- * Search the hash list for the given user id.
+ * Search the hash list for the current effective user id.
*
* Returns zero if the user id exists in the list; non-zero otherwise.
*/
-int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid,
- struct user_namespace *user_ns)
+int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon)
{
struct hlist_node *elem;
int rc;
hlist_for_each_entry(*daemon, elem,
- &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)],
- euid_chain) {
- if ((*daemon)->euid == euid && (*daemon)->user_ns == user_ns) {
+ &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()],
+ euid_chain) {
+ if ((*daemon)->file->f_cred->euid == current_euid() &&
+ (*daemon)->file->f_cred->user_ns == current_user_ns()) {
rc = 0;
goto out;
}
@@ -137,9 +135,7 @@ out:
/**
* ecryptfs_spawn_daemon - Create and initialize a new daemon struct
* @daemon: Pointer to set to newly allocated daemon struct
- * @euid: Effective user id for the daemon
- * @user_ns: The namespace in which @euid applies
- * @pid: Process id for the daemon
+ * @file: File used when opening /dev/ecryptfs
*
* Must be called ceremoniously while in possession of
* ecryptfs_sacred_daemon_hash_mux
@@ -147,8 +143,7 @@ out:
* Returns zero on success; non-zero otherwise
*/
int
-ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid,
- struct user_namespace *user_ns, struct pid *pid)
+ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, struct file *file)
{
int rc = 0;
@@ -159,16 +154,13 @@ ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid,
"GFP_KERNEL memory\n", __func__, sizeof(**daemon));
goto out;
}
- (*daemon)->euid = euid;
- (*daemon)->user_ns = get_user_ns(user_ns);
- (*daemon)->pid = get_pid(pid);
- (*daemon)->task = current;
+ (*daemon)->file = file;
mutex_init(&(*daemon)->mux);
INIT_LIST_HEAD(&(*daemon)->msg_ctx_out_queue);
init_waitqueue_head(&(*daemon)->wait);
(*daemon)->num_queued_msg_ctx = 0;
hlist_add_head(&(*daemon)->euid_chain,
- &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)]);
+ &ecryptfs_daemon_hash[ecryptfs_current_euid_hash()]);
out:
return rc;
}
@@ -188,9 +180,6 @@ int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon)
if ((daemon->flags & ECRYPTFS_DAEMON_IN_READ)
|| (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)) {
rc = -EBUSY;
- printk(KERN_WARNING "%s: Attempt to destroy daemon with pid "
- "[0x%p], but it is in the midst of a read or a poll\n",
- __func__, daemon->pid);
mutex_unlock(&daemon->mux);
goto out;
}
@@ -203,12 +192,6 @@ int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon)
ecryptfs_msg_ctx_alloc_to_free(msg_ctx);
}
hlist_del(&daemon->euid_chain);
- if (daemon->task)
- wake_up_process(daemon->task);
- if (daemon->pid)
- put_pid(daemon->pid);
- if (daemon->user_ns)
- put_user_ns(daemon->user_ns);
mutex_unlock(&daemon->mux);
kzfree(daemon);
out:
@@ -216,42 +199,9 @@ out:
}
/**
- * ecryptfs_process_quit
- * @euid: The user ID owner of the message
- * @user_ns: The namespace in which @euid applies
- * @pid: The process ID for the userspace program that sent the
- * message
- *
- * Deletes the corresponding daemon for the given euid and pid, if
- * it is the registered that is requesting the deletion. Returns zero
- * after deleting the desired daemon; non-zero otherwise.
- */
-int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns,
- struct pid *pid)
-{
- struct ecryptfs_daemon *daemon;
- int rc;
-
- mutex_lock(&ecryptfs_daemon_hash_mux);
- rc = ecryptfs_find_daemon_by_euid(&daemon, euid, user_ns);
- if (rc || !daemon) {
- rc = -EINVAL;
- printk(KERN_ERR "Received request from user [%d] to "
- "unregister unrecognized daemon [0x%p]\n", euid, pid);
- goto out_unlock;
- }
- rc = ecryptfs_exorcise_daemon(daemon);
-out_unlock:
- mutex_unlock(&ecryptfs_daemon_hash_mux);
- return rc;
-}
-
-/**
* ecryptfs_process_reponse
* @msg: The ecryptfs message received; the caller should sanity check
* msg->data_len and free the memory
- * @pid: The process ID of the userspace application that sent the
- * message
* @seq: The sequence number of the message; must match the sequence
* number for the existing message context waiting for this
* response
@@ -270,16 +220,11 @@ out_unlock:
*
* Returns zero on success; non-zero otherwise
*/
-int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid,
- struct user_namespace *user_ns, struct pid *pid,
- u32 seq)
+int ecryptfs_process_response(struct ecryptfs_daemon *daemon,
+ struct ecryptfs_message *msg, u32 seq)
{
- struct ecryptfs_daemon *uninitialized_var(daemon);
struct ecryptfs_msg_ctx *msg_ctx;
size_t msg_size;
- struct nsproxy *nsproxy;
- struct user_namespace *tsk_user_ns;
- uid_t ctx_euid;
int rc;
if (msg->index >= ecryptfs_message_buf_len) {
@@ -292,51 +237,6 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid,
}
msg_ctx = &ecryptfs_msg_ctx_arr[msg->index];
mutex_lock(&msg_ctx->mux);
- mutex_lock(&ecryptfs_daemon_hash_mux);
- rcu_read_lock();
- nsproxy = task_nsproxy(msg_ctx->task);
- if (nsproxy == NULL) {
- rc = -EBADMSG;
- printk(KERN_ERR "%s: Receiving process is a zombie. Dropping "
- "message.\n", __func__);
- rcu_read_unlock();
- mutex_unlock(&ecryptfs_daemon_hash_mux);
- goto wake_up;
- }
- tsk_user_ns = __task_cred(msg_ctx->task)->user_ns;
- ctx_euid = task_euid(msg_ctx->task);
- rc = ecryptfs_find_daemon_by_euid(&daemon, ctx_euid, tsk_user_ns);
- rcu_read_unlock();
- mutex_unlock(&ecryptfs_daemon_hash_mux);
- if (rc) {
- rc = -EBADMSG;
- printk(KERN_WARNING "%s: User [%d] received a "
- "message response from process [0x%p] but does "
- "not have a registered daemon\n", __func__,
- ctx_euid, pid);
- goto wake_up;
- }
- if (ctx_euid != euid) {
- rc = -EBADMSG;
- printk(KERN_WARNING "%s: Received message from user "
- "[%d]; expected message from user [%d]\n", __func__,
- euid, ctx_euid);
- goto unlock;
- }
- if (tsk_user_ns != user_ns) {
- rc = -EBADMSG;
- printk(KERN_WARNING "%s: Received message from user_ns "
- "[0x%p]; expected message from user_ns [0x%p]\n",
- __func__, user_ns, tsk_user_ns);
- goto unlock;
- }
- if (daemon->pid != pid) {
- rc = -EBADMSG;
- printk(KERN_ERR "%s: User [%d] sent a message response "
- "from an unrecognized process [0x%p]\n",
- __func__, ctx_euid, pid);
- goto unlock;
- }
if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) {
rc = -EINVAL;
printk(KERN_WARNING "%s: Desired context element is not "
@@ -359,9 +259,8 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid,
}
memcpy(msg_ctx->msg, msg, msg_size);
msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_DONE;
- rc = 0;
-wake_up:
wake_up_process(msg_ctx->task);
+ rc = 0;
unlock:
mutex_unlock(&msg_ctx->mux);
out:
@@ -383,14 +282,11 @@ ecryptfs_send_message_locked(char *data, int data_len, u8 msg_type,
struct ecryptfs_msg_ctx **msg_ctx)
{
struct ecryptfs_daemon *daemon;
- uid_t euid = current_euid();
int rc;
- rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
+ rc = ecryptfs_find_daemon_by_euid(&daemon);
if (rc || !daemon) {
rc = -ENOTCONN;
- printk(KERN_ERR "%s: User [%d] does not have a daemon "
- "registered\n", __func__, euid);
goto out;
}
mutex_lock(&ecryptfs_msg_ctx_lists_mux);
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index c0038f6566d4..412e6eda25f8 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -33,7 +33,7 @@ static atomic_t ecryptfs_num_miscdev_opens;
/**
* ecryptfs_miscdev_poll
- * @file: dev file (ignored)
+ * @file: dev file
* @pt: dev poll table (ignored)
*
* Returns the poll mask
@@ -41,20 +41,10 @@ static atomic_t ecryptfs_num_miscdev_opens;
static unsigned int
ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
{
- struct ecryptfs_daemon *daemon;
+ struct ecryptfs_daemon *daemon = file->private_data;
unsigned int mask = 0;
- uid_t euid = current_euid();
- int rc;
- mutex_lock(&ecryptfs_daemon_hash_mux);
- /* TODO: Just use file->private_data? */
- rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
- if (rc || !daemon) {
- mutex_unlock(&ecryptfs_daemon_hash_mux);
- return -EINVAL;
- }
mutex_lock(&daemon->mux);
- mutex_unlock(&ecryptfs_daemon_hash_mux);
if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
printk(KERN_WARNING "%s: Attempt to poll on zombified "
"daemon\n", __func__);
@@ -79,7 +69,7 @@ out_unlock_daemon:
/**
* ecryptfs_miscdev_open
* @inode: inode of miscdev handle (ignored)
- * @file: file for miscdev handle (ignored)
+ * @file: file for miscdev handle
*
* Returns zero on success; non-zero otherwise
*/
@@ -87,7 +77,6 @@ static int
ecryptfs_miscdev_open(struct inode *inode, struct file *file)
{
struct ecryptfs_daemon *daemon = NULL;
- uid_t euid = current_euid();
int rc;
mutex_lock(&ecryptfs_daemon_hash_mux);
@@ -98,30 +87,20 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
"count; rc = [%d]\n", __func__, rc);
goto out_unlock_daemon_list;
}
- rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
- if (rc || !daemon) {
- rc = ecryptfs_spawn_daemon(&daemon, euid, current_user_ns(),
- task_pid(current));
- if (rc) {
- printk(KERN_ERR "%s: Error attempting to spawn daemon; "
- "rc = [%d]\n", __func__, rc);
- goto out_module_put_unlock_daemon_list;
- }
- }
- mutex_lock(&daemon->mux);
- if (daemon->pid != task_pid(current)) {
+ rc = ecryptfs_find_daemon_by_euid(&daemon);
+ if (!rc) {
rc = -EINVAL;
- printk(KERN_ERR "%s: pid [0x%p] has registered with euid [%d], "
- "but pid [0x%p] has attempted to open the handle "
- "instead\n", __func__, daemon->pid, daemon->euid,
- task_pid(current));
- goto out_unlock_daemon;
+ goto out_unlock_daemon_list;
+ }
+ rc = ecryptfs_spawn_daemon(&daemon, file);
+ if (rc) {
+ printk(KERN_ERR "%s: Error attempting to spawn daemon; "
+ "rc = [%d]\n", __func__, rc);
+ goto out_module_put_unlock_daemon_list;
}
+ mutex_lock(&daemon->mux);
if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) {
rc = -EBUSY;
- printk(KERN_ERR "%s: Miscellaneous device handle may only be "
- "opened once per daemon; pid [0x%p] already has this "
- "handle open\n", __func__, daemon->pid);
goto out_unlock_daemon;
}
daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
@@ -140,7 +119,7 @@ out_unlock_daemon_list:
/**
* ecryptfs_miscdev_release
* @inode: inode of fs/ecryptfs/euid handle (ignored)
- * @file: file for fs/ecryptfs/euid handle (ignored)
+ * @file: file for fs/ecryptfs/euid handle
*
* This keeps the daemon registered until the daemon sends another
* ioctl to fs/ecryptfs/ctl or until the kernel module unregisters.
@@ -150,20 +129,18 @@ out_unlock_daemon_list:
static int
ecryptfs_miscdev_release(struct inode *inode, struct file *file)
{
- struct ecryptfs_daemon *daemon = NULL;
- uid_t euid = current_euid();
+ struct ecryptfs_daemon *daemon = file->private_data;
int rc;
- mutex_lock(&ecryptfs_daemon_hash_mux);
- rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
- if (rc || !daemon)
- daemon = file->private_data;
mutex_lock(&daemon->mux);
BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
atomic_dec(&ecryptfs_num_miscdev_opens);
mutex_unlock(&daemon->mux);
+
+ mutex_lock(&ecryptfs_daemon_hash_mux);
rc = ecryptfs_exorcise_daemon(daemon);
+ mutex_unlock(&ecryptfs_daemon_hash_mux);
if (rc) {
printk(KERN_CRIT "%s: Fatal error whilst attempting to "
"shut down daemon; rc = [%d]. Please report this "
@@ -171,7 +148,6 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
BUG();
}
module_put(THIS_MODULE);
- mutex_unlock(&ecryptfs_daemon_hash_mux);
return rc;
}
@@ -248,7 +224,7 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
/**
* ecryptfs_miscdev_read - format and send message from queue
- * @file: fs/ecryptfs/euid miscdevfs handle (ignored)
+ * @file: miscdevfs handle
* @buf: User buffer into which to copy the next message on the daemon queue
* @count: Amount of space available in @buf
* @ppos: Offset in file (ignored)
@@ -262,43 +238,27 @@ static ssize_t
ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
- struct ecryptfs_daemon *daemon;
+ struct ecryptfs_daemon *daemon = file->private_data;
struct ecryptfs_msg_ctx *msg_ctx;
size_t packet_length_size;
char packet_length[ECRYPTFS_MAX_PKT_LEN_SIZE];
size_t i;
size_t total_length;
- uid_t euid = current_euid();
int rc;
- mutex_lock(&ecryptfs_daemon_hash_mux);
- /* TODO: Just use file->private_data? */
- rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
- if (rc || !daemon) {
- mutex_unlock(&ecryptfs_daemon_hash_mux);
- return -EINVAL;
- }
mutex_lock(&daemon->mux);
- if (task_pid(current) != daemon->pid) {
- mutex_unlock(&daemon->mux);
- mutex_unlock(&ecryptfs_daemon_hash_mux);
- return -EPERM;
- }
if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
rc = 0;
- mutex_unlock(&ecryptfs_daemon_hash_mux);
printk(KERN_WARNING "%s: Attempt to read from zombified "
"daemon\n", __func__);
goto out_unlock_daemon;
}
if (daemon->flags & ECRYPTFS_DAEMON_IN_READ) {
rc = 0;
- mutex_unlock(&ecryptfs_daemon_hash_mux);
goto out_unlock_daemon;
}
/* This daemon will not go away so long as this flag is set */
daemon->flags |= ECRYPTFS_DAEMON_IN_READ;
- mutex_unlock(&ecryptfs_daemon_hash_mux);
check_list:
if (list_empty(&daemon->msg_ctx_out_queue)) {
mutex_unlock(&daemon->mux);
@@ -382,16 +342,12 @@ out_unlock_daemon:
* ecryptfs_miscdev_response - miscdevess response to message previously sent to daemon
* @data: Bytes comprising struct ecryptfs_message
* @data_size: sizeof(struct ecryptfs_message) + data len
- * @euid: Effective user id of miscdevess sending the miscdev response
- * @user_ns: The namespace in which @euid applies
- * @pid: Miscdevess id of miscdevess sending the miscdev response
* @seq: Sequence number for miscdev response packet
*
* Returns zero on success; non-zero otherwise
*/
-static int ecryptfs_miscdev_response(char *data, size_t data_size,
- uid_t euid, struct user_namespace *user_ns,
- struct pid *pid, u32 seq)
+static int ecryptfs_miscdev_response(struct ecryptfs_daemon *daemon, char *data,
+ size_t data_size, u32 seq)
{
struct ecryptfs_message *msg = (struct ecryptfs_message *)data;
int rc;
@@ -403,7 +359,7 @@ static int ecryptfs_miscdev_response(char *data, size_t data_size,
rc = -EINVAL;
goto out;
}
- rc = ecryptfs_process_response(msg, euid, user_ns, pid, seq);
+ rc = ecryptfs_process_response(daemon, msg, seq);
if (rc)
printk(KERN_ERR
"Error processing response message; rc = [%d]\n", rc);
@@ -413,7 +369,7 @@ out:
/**
* ecryptfs_miscdev_write - handle write to daemon miscdev handle
- * @file: File for misc dev handle (ignored)
+ * @file: File for misc dev handle
* @buf: Buffer containing user data
* @count: Amount of data in @buf
* @ppos: Pointer to offset in file (ignored)
@@ -428,7 +384,6 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
u32 seq;
size_t packet_size, packet_size_length;
char *data;
- uid_t euid = current_euid();
unsigned char packet_size_peek[ECRYPTFS_MAX_PKT_LEN_SIZE];
ssize_t rc;
@@ -488,10 +443,9 @@ memdup:
}
memcpy(&counter_nbo, &data[PKT_CTR_OFFSET], PKT_CTR_SIZE);
seq = be32_to_cpu(counter_nbo);
- rc = ecryptfs_miscdev_response(
+ rc = ecryptfs_miscdev_response(file->private_data,
&data[PKT_LEN_OFFSET + packet_size_length],
- packet_size, euid, current_user_ns(),
- task_pid(current), seq);
+ packet_size, seq);
if (rc) {
printk(KERN_WARNING "%s: Failed to deliver miscdev "
"response to requesting operation; rc = [%zd]\n",
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index a46b3a8fee1e..bd1d57f98f74 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -66,18 +66,6 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
{
int rc;
- /*
- * Refuse to write the page out if we are called from reclaim context
- * since our writepage() path may potentially allocate memory when
- * calling into the lower fs vfs_write() which may in turn invoke
- * us again.
- */
- if (current->flags & PF_MEMALLOC) {
- redirty_page_for_writepage(wbc, page);
- rc = 0;
- goto out;
- }
-
rc = ecryptfs_encrypt_page(page);
if (rc) {
ecryptfs_printk(KERN_WARNING, "Error encrypting "
@@ -498,7 +486,6 @@ static int ecryptfs_write_end(struct file *file,
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
int rc;
- int need_unlock_page = 1;
ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
"(page w/ index = [0x%.16lx], to = [%d])\n", index, to);
@@ -519,26 +506,26 @@ static int ecryptfs_write_end(struct file *file,
"zeros in page with index = [0x%.16lx]\n", index);
goto out;
}
- set_page_dirty(page);
- unlock_page(page);
- need_unlock_page = 0;
+ rc = ecryptfs_encrypt_page(page);
+ if (rc) {
+ ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
+ "index [0x%.16lx])\n", index);
+ goto out;
+ }
if (pos + copied > i_size_read(ecryptfs_inode)) {
i_size_write(ecryptfs_inode, pos + copied);
ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
"[0x%.16llx]\n",
(unsigned long long)i_size_read(ecryptfs_inode));
- balance_dirty_pages_ratelimited(mapping);
- rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
- if (rc) {
- printk(KERN_ERR "Error writing inode size to metadata; "
- "rc = [%d]\n", rc);
- goto out;
- }
}
- rc = copied;
+ rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode);
+ if (rc)
+ printk(KERN_ERR "Error writing inode size to metadata; "
+ "rc = [%d]\n", rc);
+ else
+ rc = copied;
out:
- if (need_unlock_page)
- unlock_page(page);
+ unlock_page(page);
page_cache_release(page);
return rc;
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 1c8b55670804..eedec84c1809 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
error = PTR_ERR(file);
goto out_free_fd;
}
- fd_install(fd, file);
ep->file = file;
+ fd_install(fd, file);
return fd;
out_free_fd:
diff --git a/fs/exec.c b/fs/exec.c
index da27b91ff1e8..574cf4de4ec3 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1020,7 +1020,7 @@ static void flush_old_files(struct files_struct * files)
unsigned long set, i;
j++;
- i = j * __NFDBITS;
+ i = j * BITS_PER_LONG;
fdt = files_fdtable(files);
if (i >= fdt->max_fds)
break;
@@ -2002,17 +2002,17 @@ static void coredump_finish(struct mm_struct *mm)
void set_dumpable(struct mm_struct *mm, int value)
{
switch (value) {
- case 0:
+ case SUID_DUMPABLE_DISABLED:
clear_bit(MMF_DUMPABLE, &mm->flags);
smp_wmb();
clear_bit(MMF_DUMP_SECURELY, &mm->flags);
break;
- case 1:
+ case SUID_DUMPABLE_ENABLED:
set_bit(MMF_DUMPABLE, &mm->flags);
smp_wmb();
clear_bit(MMF_DUMP_SECURELY, &mm->flags);
break;
- case 2:
+ case SUID_DUMPABLE_SAFE:
set_bit(MMF_DUMP_SECURELY, &mm->flags);
smp_wmb();
set_bit(MMF_DUMPABLE, &mm->flags);
@@ -2025,7 +2025,7 @@ static int __get_dumpable(unsigned long mm_flags)
int ret;
ret = mm_flags & MMF_DUMPABLE_MASK;
- return (ret >= 2) ? 2 : ret;
+ return (ret > SUID_DUMPABLE_ENABLED) ? SUID_DUMPABLE_SAFE : ret;
}
int get_dumpable(struct mm_struct *mm)
@@ -2069,25 +2069,18 @@ static void wait_for_dump_helpers(struct file *file)
*/
static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
{
- struct file *rp, *wp;
+ struct file *files[2];
struct fdtable *fdt;
struct coredump_params *cp = (struct coredump_params *)info->data;
struct files_struct *cf = current->files;
+ int err = create_pipe_files(files, 0);
+ if (err)
+ return err;
- wp = create_write_pipe(0);
- if (IS_ERR(wp))
- return PTR_ERR(wp);
-
- rp = create_read_pipe(wp, 0);
- if (IS_ERR(rp)) {
- free_write_pipe(wp);
- return PTR_ERR(rp);
- }
-
- cp->file = wp;
+ cp->file = files[1];
sys_close(0);
- fd_install(0, rp);
+ fd_install(0, files[0]);
spin_lock(&cf->file_lock);
fdt = files_fdtable(cf);
__set_open_fd(0, fdt);
@@ -2111,6 +2104,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
int retval = 0;
int flag = 0;
int ispipe;
+ bool need_nonrelative = false;
static atomic_t core_dump_count = ATOMIC_INIT(0);
struct coredump_params cprm = {
.signr = signr,
@@ -2136,14 +2130,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
if (!cred)
goto fail;
/*
- * We cannot trust fsuid as being the "true" uid of the
- * process nor do we know its entire history. We only know it
- * was tainted so we dump it as root in mode 2.
+ * We cannot trust fsuid as being the "true" uid of the process
+ * nor do we know its entire history. We only know it was tainted
+ * so we dump it as root in mode 2, and only into a controlled
+ * environment (pipe handler or fully qualified path).
*/
- if (__get_dumpable(cprm.mm_flags) == 2) {
+ if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
/* Setuid core dump mode */
flag = O_EXCL; /* Stop rewrite attacks */
cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
+ need_nonrelative = true;
}
retval = coredump_wait(exit_code, &core_state);
@@ -2171,15 +2167,16 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
}
if (cprm.limit == 1) {
- /*
+ /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
+ *
* Normally core limits are irrelevant to pipes, since
* we're not writing to the file system, but we use
- * cprm.limit of 1 here as a speacial value. Any
- * non-1 limit gets set to RLIM_INFINITY below, but
- * a limit of 0 skips the dump. This is a consistent
- * way to catch recursive crashes. We can still crash
- * if the core_pattern binary sets RLIM_CORE = !1
- * but it runs as root, and can do lots of stupid things
+ * cprm.limit of 1 here as a speacial value, this is a
+ * consistent way to catch recursive crashes.
+ * We can still crash if the core_pattern binary sets
+ * RLIM_CORE = !1, but it runs as root, and can do
+ * lots of stupid things.
+ *
* Note that we use task_tgid_vnr here to grab the pid
* of the process group leader. That way we get the
* right pid if a thread in a multi-threaded
@@ -2223,6 +2220,14 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
if (cprm.limit < binfmt->min_coredump)
goto fail_unlock;
+ if (need_nonrelative && cn.corename[0] != '/') {
+ printk(KERN_WARNING "Pid %d(%s) can only dump core "\
+ "to fully qualified path!\n",
+ task_tgid_vnr(current), current->comm);
+ printk(KERN_WARNING "Skipping core dump\n");
+ goto fail_unlock;
+ }
+
cprm.file = filp_open(cn.corename,
O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
0600);
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 5badb0c039de..1562c27a2fab 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -37,15 +37,12 @@
#define EXOFS_DBGMSG2(M...) do {} while (0)
-enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
-
unsigned exofs_max_io_pages(struct ore_layout *layout,
unsigned expected_pages)
{
- unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
+ unsigned pages = min_t(unsigned, expected_pages,
+ layout->max_io_length / PAGE_SIZE);
- /* TODO: easily support bio chaining */
- pages = min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
return pages;
}
@@ -101,7 +98,8 @@ static void _pcol_reset(struct page_collect *pcol)
* it might not end here. don't be left with nothing
*/
if (!pcol->expected_pages)
- pcol->expected_pages = MAX_PAGES_KMALLOC;
+ pcol->expected_pages =
+ exofs_max_io_pages(&pcol->sbi->layout, ~0);
}
static int pcol_try_alloc(struct page_collect *pcol)
@@ -389,6 +387,8 @@ static int readpage_strip(void *data, struct page *page)
size_t len;
int ret;
+ BUG_ON(!PageLocked(page));
+
/* FIXME: Just for debugging, will be removed */
if (PageUptodate(page))
EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
@@ -572,8 +572,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
if (!pcol->that_locked_page ||
(pcol->that_locked_page->index != index)) {
- struct page *page = find_get_page(pcol->inode->i_mapping, index);
+ struct page *page;
+ loff_t i_size = i_size_read(pcol->inode);
+
+ if (offset >= i_size) {
+ *uptodate = true;
+ EXOFS_DBGMSG("offset >= i_size index=0x%lx\n", index);
+ return ZERO_PAGE(0);
+ }
+ page = find_get_page(pcol->inode->i_mapping, index);
if (!page) {
page = find_or_create_page(pcol->inode->i_mapping,
index, GFP_NOFS);
@@ -602,12 +610,13 @@ static void __r4w_put_page(void *priv, struct page *page)
{
struct page_collect *pcol = priv;
- if (pcol->that_locked_page != page) {
+ if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
EXOFS_DBGMSG("index=0x%lx\n", page->index);
page_cache_release(page);
return;
}
- EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index);
+ EXOFS_DBGMSG("that_locked_page index=0x%lx\n",
+ ZERO_PAGE(0) == page ? -1 : page->index);
}
static const struct _ore_r4w_op _r4w_op = {
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 24a49d47e935..1585db1aa365 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
bio->bi_rw |= REQ_WRITE;
}
- osd_req_write(or, _ios_obj(ios, dev), per_dev->offset,
- bio, per_dev->length);
+ osd_req_write(or, _ios_obj(ios, cur_comp),
+ per_dev->offset, bio, per_dev->length);
ORE_DBGMSG("write(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(_ios_obj(ios, dev)->id),
+ _LLU(_ios_obj(ios, cur_comp)->id),
_LLU(per_dev->offset),
_LLU(per_dev->length), dev);
} else if (ios->kern_buff) {
@@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
(ios->si.unit_off + ios->length >
ios->layout->stripe_unit));
- ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev),
+ ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp),
per_dev->offset,
ios->kern_buff, ios->length);
if (unlikely(ret))
goto out;
ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx "
"length=0x%llx dev=%d\n",
- _LLU(_ios_obj(ios, dev)->id),
+ _LLU(_ios_obj(ios, cur_comp)->id),
_LLU(per_dev->offset),
_LLU(ios->length), per_dev->dev);
} else {
- osd_req_set_attributes(or, _ios_obj(ios, dev));
+ osd_req_set_attributes(or, _ios_obj(ios, cur_comp));
ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n",
- _LLU(_ios_obj(ios, dev)->id),
+ _LLU(_ios_obj(ios, cur_comp)->id),
ios->out_attr_len, dev);
}
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 433783624d10..dde41a75c7c8 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -400,8 +400,6 @@ static int exofs_sync_fs(struct super_block *sb, int wait)
ret = ore_write(ios);
if (unlikely(ret))
EXOFS_ERR("%s: ore_write failed.\n", __func__);
- else
- sb->s_dirt = 0;
unlock_super(sb);
@@ -412,14 +410,6 @@ out:
return ret;
}
-static void exofs_write_super(struct super_block *sb)
-{
- if (!(sb->s_flags & MS_RDONLY))
- exofs_sync_fs(sb, 1);
- else
- sb->s_dirt = 0;
-}
-
static void _exofs_print_device(const char *msg, const char *dev_path,
struct osd_dev *od, u64 pid)
{
@@ -952,7 +942,6 @@ static const struct super_operations exofs_sops = {
.write_inode = exofs_write_inode,
.evict_inode = exofs_evict_inode,
.put_super = exofs_put_super,
- .write_super = exofs_write_super,
.sync_fs = exofs_sync_fs,
.statfs = exofs_statfs,
};
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 1c3613998862..376aa77f3ca7 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -1444,19 +1444,9 @@ ext2_fsblk_t ext2_new_block(struct inode *inode, unsigned long goal, int *errp)
#ifdef EXT2FS_DEBUG
-static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
-
-unsigned long ext2_count_free (struct buffer_head * map, unsigned int numchars)
+unsigned long ext2_count_free(struct buffer_head *map, unsigned int numchars)
{
- unsigned int i;
- unsigned long sum = 0;
-
- if (!map)
- return (0);
- for (i = 0; i < numchars; i++)
- sum += nibblemap[map->b_data[i] & 0xf] +
- nibblemap[(map->b_data[i] >> 4) & 0xf];
- return (sum);
+ return numchars * BITS_PER_BYTE - memweight(map->b_data, numchars);
}
#endif /* EXT2FS_DEBUG */
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index c13eb7b91a11..8f370e012e61 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -644,6 +644,7 @@ unsigned long ext2_count_free_inodes (struct super_block * sb)
}
brelse(bitmap_bh);
printk("ext2_count_free_inodes: stored = %lu, computed = %lu, %lu\n",
+ (unsigned long)
percpu_counter_read(&EXT2_SB(sb)->s_freeinodes_counter),
desc_count, bitmap_count);
return desc_count;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 264d315f6c47..6363ac66fafa 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -79,6 +79,7 @@ void ext2_evict_inode(struct inode * inode)
truncate_inode_pages(&inode->i_data, 0);
if (want_delete) {
+ sb_start_intwrite(inode->i_sb);
/* set dtime */
EXT2_I(inode)->i_dtime = get_seconds();
mark_inode_dirty(inode);
@@ -98,8 +99,10 @@ void ext2_evict_inode(struct inode * inode)
if (unlikely(rsv))
kfree(rsv);
- if (want_delete)
+ if (want_delete) {
ext2_free_inode(inode);
+ sb_end_intwrite(inode->i_sb);
+ }
}
typedef struct {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 9f311d27b16f..af74d9e27b71 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -42,6 +42,8 @@ static void ext2_sync_super(struct super_block *sb,
static int ext2_remount (struct super_block * sb, int * flags, char * data);
static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
static int ext2_sync_fs(struct super_block *sb, int wait);
+static int ext2_freeze(struct super_block *sb);
+static int ext2_unfreeze(struct super_block *sb);
void ext2_error(struct super_block *sb, const char *function,
const char *fmt, ...)
@@ -305,6 +307,8 @@ static const struct super_operations ext2_sops = {
.evict_inode = ext2_evict_inode,
.put_super = ext2_put_super,
.sync_fs = ext2_sync_fs,
+ .freeze_fs = ext2_freeze,
+ .unfreeze_fs = ext2_unfreeze,
.statfs = ext2_statfs,
.remount_fs = ext2_remount,
.show_options = ext2_show_options,
@@ -1200,6 +1204,35 @@ static int ext2_sync_fs(struct super_block *sb, int wait)
return 0;
}
+static int ext2_freeze(struct super_block *sb)
+{
+ struct ext2_sb_info *sbi = EXT2_SB(sb);
+
+ /*
+ * Open but unlinked files present? Keep EXT2_VALID_FS flag cleared
+ * because we have unattached inodes and thus filesystem is not fully
+ * consistent.
+ */
+ if (atomic_long_read(&sb->s_remove_count)) {
+ ext2_sync_fs(sb, 1);
+ return 0;
+ }
+ /* Set EXT2_FS_VALID flag */
+ spin_lock(&sbi->s_lock);
+ sbi->s_es->s_state = cpu_to_le16(sbi->s_mount_state);
+ spin_unlock(&sbi->s_lock);
+ ext2_sync_super(sb, sbi->s_es, 1);
+
+ return 0;
+}
+
+static int ext2_unfreeze(struct super_block *sb)
+{
+ /* Just write sb to clear EXT2_VALID_FS flag */
+ ext2_write_super(sb);
+
+ return 0;
+}
void ext2_write_super(struct super_block *sb)
{
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 25cd60892116..90d901f0486b 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1813,7 +1813,7 @@ ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb)
brelse(bitmap_bh);
printk("ext3_count_free_blocks: stored = "E3FSBLK
", computed = "E3FSBLK", "E3FSBLK"\n",
- le32_to_cpu(es->s_free_blocks_count),
+ (ext3_fsblk_t)le32_to_cpu(es->s_free_blocks_count),
desc_count, bitmap_count);
return bitmap_count;
#else
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
index 909d13e26560..ef9c643e8e9d 100644
--- a/fs/ext3/bitmap.c
+++ b/fs/ext3/bitmap.c
@@ -11,19 +11,9 @@
#ifdef EXT3FS_DEBUG
-static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
-
unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars)
{
- unsigned int i;
- unsigned long sum = 0;
-
- if (!map)
- return (0);
- for (i = 0; i < numchars; i++)
- sum += nibblemap[map->b_data[i] & 0xf] +
- nibblemap[(map->b_data[i] >> 4) & 0xf];
- return (sum);
+ return numchars * BITS_PER_BYTE - memweight(map->b_data, numchars);
}
#endif /* EXT3FS_DEBUG */
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 9a4a5c48b1c9..a07597307fd1 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -3459,14 +3459,6 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
* inode out, but prune_icache isn't a user-visible syncing function.
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
- *
- * Is this efficient/effective? Well, we're being nice to the system
- * by cleaning up our inodes proactively so they can be reaped
- * without I/O. But we are potentially leaving up to five seconds'
- * worth of inodes floating about which prune_icache wants us to
- * write out. One way to fix that would be to get prune_icache()
- * to do a write_super() to free up some memory. It has the desired
- * effect.
*/
int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index ff9bcdc5b0d5..8c892e93d8e7 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -64,11 +64,6 @@ static int ext3_freeze(struct super_block *sb);
/*
* Wrappers for journal_start/end.
- *
- * The only special thing we need to do here is to make sure that all
- * journal_end calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
*/
handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
{
@@ -90,12 +85,6 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
return journal_start(journal, nblocks);
}
-/*
- * The only special thing we need to do here is to make sure that all
- * journal_stop calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
- */
int __ext3_journal_stop(const char *where, handle_t *handle)
{
struct super_block *sb;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index cee7812cc3cf..1b5089067d01 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -280,14 +280,18 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
return desc;
}
-static int ext4_valid_block_bitmap(struct super_block *sb,
- struct ext4_group_desc *desc,
- unsigned int block_group,
- struct buffer_head *bh)
+/*
+ * Return the block number which was discovered to be invalid, or 0 if
+ * the block bitmap is valid.
+ */
+static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
+ struct ext4_group_desc *desc,
+ unsigned int block_group,
+ struct buffer_head *bh)
{
ext4_grpblk_t offset;
ext4_grpblk_t next_zero_bit;
- ext4_fsblk_t bitmap_blk;
+ ext4_fsblk_t blk;
ext4_fsblk_t group_first_block;
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
@@ -297,37 +301,33 @@ static int ext4_valid_block_bitmap(struct super_block *sb,
* or it has to also read the block group where the bitmaps
* are located to verify they are set.
*/
- return 1;
+ return 0;
}
group_first_block = ext4_group_first_block_no(sb, block_group);
/* check whether block bitmap block number is set */
- bitmap_blk = ext4_block_bitmap(sb, desc);
- offset = bitmap_blk - group_first_block;
+ blk = ext4_block_bitmap(sb, desc);
+ offset = blk - group_first_block;
if (!ext4_test_bit(offset, bh->b_data))
/* bad block bitmap */
- goto err_out;
+ return blk;
/* check whether the inode bitmap block number is set */
- bitmap_blk = ext4_inode_bitmap(sb, desc);
- offset = bitmap_blk - group_first_block;
+ blk = ext4_inode_bitmap(sb, desc);
+ offset = blk - group_first_block;
if (!ext4_test_bit(offset, bh->b_data))
/* bad block bitmap */
- goto err_out;
+ return blk;
/* check whether the inode table block number is set */
- bitmap_blk = ext4_inode_table(sb, desc);
- offset = bitmap_blk - group_first_block;
+ blk = ext4_inode_table(sb, desc);
+ offset = blk - group_first_block;
next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
offset + EXT4_SB(sb)->s_itb_per_group,
offset);
- if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group)
- /* good bitmap for inode tables */
- return 1;
-
-err_out:
- ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu",
- block_group, bitmap_blk);
+ if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group)
+ /* bad bitmap for inode tables */
+ return blk;
return 0;
}
@@ -336,14 +336,26 @@ void ext4_validate_block_bitmap(struct super_block *sb,
unsigned int block_group,
struct buffer_head *bh)
{
+ ext4_fsblk_t blk;
+
if (buffer_verified(bh))
return;
ext4_lock_group(sb, block_group);
- if (ext4_valid_block_bitmap(sb, desc, block_group, bh) &&
- ext4_block_bitmap_csum_verify(sb, block_group, desc, bh,
- EXT4_BLOCKS_PER_GROUP(sb) / 8))
- set_buffer_verified(bh);
+ blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
+ if (unlikely(blk != 0)) {
+ ext4_unlock_group(sb, block_group);
+ ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
+ block_group, blk);
+ return;
+ }
+ if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
+ desc, bh, EXT4_BLOCKS_PER_GROUP(sb) / 8))) {
+ ext4_unlock_group(sb, block_group);
+ ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
+ return;
+ }
+ set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
}
@@ -609,7 +621,8 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
if (bitmap_bh == NULL)
continue;
- x = ext4_count_free(bitmap_bh, sb->s_blocksize);
+ x = ext4_count_free(bitmap_bh->b_data,
+ EXT4_BLOCKS_PER_GROUP(sb) / 8);
printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
i, ext4_free_group_clusters(sb, gdp), x);
bitmap_count += x;
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index b319721da26a..5c2d1813ebe9 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -11,24 +11,11 @@
#include <linux/jbd2.h>
#include "ext4.h"
-#ifdef EXT4FS_DEBUG
-
-static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
-
-unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
+unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
{
- unsigned int i, sum = 0;
-
- if (!map)
- return 0;
- for (i = 0; i < numchars; i++)
- sum += nibblemap[map->b_data[i] & 0xf] +
- nibblemap[(map->b_data[i] >> 4) & 0xf];
- return sum;
+ return numchars * BITS_PER_BYTE - memweight(bitmap, numchars);
}
-#endif /* EXT4FS_DEBUG */
-
int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp,
struct buffer_head *bh, int sz)
@@ -92,7 +79,6 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
if (provided == calculated)
return 1;
- ext4_error(sb, "Bad block bitmap checksum: block_group = %u", group);
return 0;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index cfc4e01b3c83..c3411d4ce2da 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -571,6 +571,8 @@ enum {
#define EXT4_GET_BLOCKS_NO_NORMALIZE 0x0040
/* Request will not result in inode size update (user for fallocate) */
#define EXT4_GET_BLOCKS_KEEP_SIZE 0x0080
+ /* Do not take i_data_sem locking in ext4_map_blocks */
+#define EXT4_GET_BLOCKS_NO_LOCK 0x0100
/*
* Flags used by ext4_free_blocks
@@ -1161,8 +1163,7 @@ struct ext4_sb_info {
unsigned long s_desc_per_block; /* Number of group descriptors per block */
ext4_group_t s_groups_count; /* Number of groups in the fs */
ext4_group_t s_blockfile_groups;/* Groups acceptable for non-extent files */
- unsigned long s_overhead_last; /* Last calculated overhead */
- unsigned long s_blocks_last; /* Last seen block count */
+ unsigned long s_overhead; /* # of fs overhead clusters */
unsigned int s_cluster_ratio; /* Number of blocks per cluster */
unsigned int s_cluster_bits; /* log2 of s_cluster_ratio */
loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
@@ -1314,6 +1315,8 @@ static inline struct timespec ext4_current_time(struct inode *inode)
static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
{
return ino == EXT4_ROOT_INO ||
+ ino == EXT4_USR_QUOTA_INO ||
+ ino == EXT4_GRP_QUOTA_INO ||
ino == EXT4_JOURNAL_INO ||
ino == EXT4_RESIZE_INO ||
(ino >= EXT4_FIRST_INO(sb) &&
@@ -1496,7 +1499,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
EXT4_FEATURE_RO_COMPAT_BTREE_DIR |\
EXT4_FEATURE_RO_COMPAT_HUGE_FILE |\
EXT4_FEATURE_RO_COMPAT_BIGALLOC |\
- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)
+ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM|\
+ EXT4_FEATURE_RO_COMPAT_QUOTA)
/*
* Default values for user and/or group using reserved blocks
@@ -1663,10 +1667,12 @@ static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
{
struct {
struct shash_desc shash;
- char ctx[crypto_shash_descsize(sbi->s_chksum_driver)];
+ char ctx[4];
} desc;
int err;
+ BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx));
+
desc.shash.tfm = sbi->s_chksum_driver;
desc.shash.flags = 0;
*(u32 *)desc.ctx = crc;
@@ -1852,7 +1858,7 @@ struct mmpd_data {
# define NORET_AND noreturn,
/* bitmap.c */
-extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
+extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp,
struct buffer_head *bh, int sz);
@@ -2037,6 +2043,7 @@ extern int ext4_group_extend(struct super_block *sb,
extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
/* super.c */
+extern int ext4_calculate_overhead(struct super_block *sb);
extern int ext4_superblock_csum_verify(struct super_block *sb,
struct ext4_super_block *es);
extern void ext4_superblock_csum_set(struct super_block *sb,
@@ -2321,15 +2328,6 @@ static inline void ext4_unlock_group(struct super_block *sb,
spin_unlock(ext4_group_lock_ptr(sb, group));
}
-static inline void ext4_mark_super_dirty(struct super_block *sb)
-{
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-
- ext4_superblock_csum_set(sb, es);
- if (EXT4_SB(sb)->s_journal == NULL)
- sb->s_dirt =1;
-}
-
/*
* Block validity checking
*/
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 90f7c2e84db1..bfa65b49d424 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -138,8 +138,7 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
}
int __ext4_handle_dirty_super(const char *where, unsigned int line,
- handle_t *handle, struct super_block *sb,
- int now)
+ handle_t *handle, struct super_block *sb)
{
struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
int err = 0;
@@ -151,11 +150,10 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
if (err)
ext4_journal_abort_handle(where, line, __func__,
bh, handle, err);
- } else if (now) {
+ } else {
ext4_superblock_csum_set(sb,
(struct ext4_super_block *)bh->b_data);
mark_buffer_dirty(bh);
- } else
- sb->s_dirt = 1;
+ }
return err;
}
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index f440e8f1841f..56d258c18303 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -87,14 +87,20 @@
#ifdef CONFIG_QUOTA
/* Amount of blocks needed for quota update - we know that the structure was
* allocated so we need to update only data block */
-#define EXT4_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 1 : 0)
+#define EXT4_QUOTA_TRANS_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
+ EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) ?\
+ 1 : 0)
/* Amount of blocks needed for quota insert/delete - we do some block writes
* but inode, sb and group updates are done only once */
-#define EXT4_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
- (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_INIT_REWRITE) : 0)
-
-#define EXT4_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
- (EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)+3+DQUOT_DEL_REWRITE) : 0)
+#define EXT4_QUOTA_INIT_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
+ EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) ?\
+ (DQUOT_INIT_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\
+ +3+DQUOT_INIT_REWRITE) : 0)
+
+#define EXT4_QUOTA_DEL_BLOCKS(sb) ((test_opt(sb, QUOTA) ||\
+ EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) ?\
+ (DQUOT_DEL_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\
+ +3+DQUOT_DEL_REWRITE) : 0)
#else
#define EXT4_QUOTA_TRANS_BLOCKS(sb) 0
#define EXT4_QUOTA_INIT_BLOCKS(sb) 0
@@ -213,8 +219,7 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
struct buffer_head *bh);
int __ext4_handle_dirty_super(const char *where, unsigned int line,
- handle_t *handle, struct super_block *sb,
- int now);
+ handle_t *handle, struct super_block *sb);
#define ext4_journal_get_write_access(handle, bh) \
__ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
@@ -226,10 +231,8 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
#define ext4_handle_dirty_metadata(handle, inode, bh) \
__ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
(bh))
-#define ext4_handle_dirty_super_now(handle, sb) \
- __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb), 1)
#define ext4_handle_dirty_super(handle, sb) \
- __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb), 0)
+ __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 91341ec6e06a..aabbb3f53683 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -1891,11 +1891,10 @@ has_space:
nearex->ee_len = newext->ee_len;
merge:
- /* try to merge extents to the right */
+ /* try to merge extents */
if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
ext4_ext_try_to_merge(inode, path, nearex);
- /* try to merge extents to the left */
/* time to correct all indexes above */
err = ext4_ext_correct_indexes(handle, inode, path);
@@ -2570,10 +2569,10 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
{
struct super_block *sb = inode->i_sb;
int depth = ext_depth(inode);
- struct ext4_ext_path *path;
+ struct ext4_ext_path *path = NULL;
ext4_fsblk_t partial_cluster = 0;
handle_t *handle;
- int i, err;
+ int i = 0, err;
ext_debug("truncate since %u to %u\n", start, end);
@@ -2606,8 +2605,12 @@ again:
}
depth = ext_depth(inode);
ex = path[depth].p_ext;
- if (!ex)
+ if (!ex) {
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ path = NULL;
goto cont;
+ }
ee_block = le32_to_cpu(ex->ee_block);
@@ -2637,8 +2640,6 @@ again:
if (err < 0)
goto out;
}
- ext4_ext_drop_refs(path);
- kfree(path);
}
cont:
@@ -2647,19 +2648,28 @@ cont:
* after i_size and walking into the tree depth-wise.
*/
depth = ext_depth(inode);
- path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
- if (path == NULL) {
- ext4_journal_stop(handle);
- return -ENOMEM;
- }
- path[0].p_depth = depth;
- path[0].p_hdr = ext_inode_hdr(inode);
+ if (path) {
+ int k = i = depth;
+ while (--k > 0)
+ path[k].p_block =
+ le16_to_cpu(path[k].p_hdr->eh_entries)+1;
+ } else {
+ path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
+ GFP_NOFS);
+ if (path == NULL) {
+ ext4_journal_stop(handle);
+ return -ENOMEM;
+ }
+ path[0].p_depth = depth;
+ path[0].p_hdr = ext_inode_hdr(inode);
+ i = 0;
- if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
- err = -EIO;
- goto out;
+ if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
+ err = -EIO;
+ goto out;
+ }
}
- i = err = 0;
+ err = 0;
while (i >= 0 && err == 0) {
if (i == depth) {
@@ -2773,8 +2783,10 @@ cont:
out:
ext4_ext_drop_refs(path);
kfree(path);
- if (err == -EAGAIN)
+ if (err == -EAGAIN) {
+ path = NULL;
goto again;
+ }
ext4_journal_stop(handle);
return err;
@@ -4420,6 +4432,8 @@ retry:
ext4_falloc_update_inode(inode, mode, new_size,
(map.m_flags & EXT4_MAP_NEW));
ext4_mark_inode_dirty(handle, inode);
+ if ((file->f_flags & O_SYNC) && ret >= max_blocks)
+ ext4_handle_sync(handle);
ret2 = ext4_journal_stop(handle);
if (ret2)
break;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 782eecb57e43..3b0e3bdaabfc 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -90,11 +90,91 @@ ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
}
static ssize_t
+ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_mapping->host;
+ struct blk_plug plug;
+ int unaligned_aio = 0;
+ ssize_t ret;
+ int overwrite = 0;
+ size_t length = iov_length(iov, nr_segs);
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
+ !is_sync_kiocb(iocb))
+ unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
+
+ /* Unaligned direct AIO must be serialized; see comment above */
+ if (unaligned_aio) {
+ static unsigned long unaligned_warn_time;
+
+ /* Warn about this once per day */
+ if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
+ ext4_msg(inode->i_sb, KERN_WARNING,
+ "Unaligned AIO/DIO on inode %ld by %s; "
+ "performance will be poor.",
+ inode->i_ino, current->comm);
+ mutex_lock(ext4_aio_mutex(inode));
+ ext4_aiodio_wait(inode);
+ }
+
+ BUG_ON(iocb->ki_pos != pos);
+
+ mutex_lock(&inode->i_mutex);
+ blk_start_plug(&plug);
+
+ iocb->private = &overwrite;
+
+ /* check whether we do a DIO overwrite or not */
+ if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
+ !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
+ struct ext4_map_blocks map;
+ unsigned int blkbits = inode->i_blkbits;
+ int err, len;
+
+ map.m_lblk = pos >> blkbits;
+ map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
+ - map.m_lblk;
+ len = map.m_len;
+
+ err = ext4_map_blocks(NULL, inode, &map, 0);
+ /*
+ * 'err==len' means that all of blocks has been preallocated no
+ * matter they are initialized or not. For excluding
+ * uninitialized extents, we need to check m_flags. There are
+ * two conditions that indicate for initialized extents.
+ * 1) If we hit extent cache, EXT4_MAP_MAPPED flag is returned;
+ * 2) If we do a real lookup, non-flags are returned.
+ * So we should check these two conditions.
+ */
+ if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
+ overwrite = 1;
+ }
+
+ ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+ mutex_unlock(&inode->i_mutex);
+
+ if (ret > 0 || ret == -EIOCBQUEUED) {
+ ssize_t err;
+
+ err = generic_write_sync(file, pos, ret);
+ if (err < 0 && ret > 0)
+ ret = err;
+ }
+ blk_finish_plug(&plug);
+
+ if (unaligned_aio)
+ mutex_unlock(ext4_aio_mutex(inode));
+
+ return ret;
+}
+
+static ssize_t
ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
- int unaligned_aio = 0;
ssize_t ret;
/*
@@ -114,29 +194,12 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
sbi->s_bitmap_maxbytes - pos);
}
- } else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) &&
- !is_sync_kiocb(iocb))) {
- unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
}
- /* Unaligned direct AIO must be serialized; see comment above */
- if (unaligned_aio) {
- static unsigned long unaligned_warn_time;
-
- /* Warn about this once per day */
- if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
- ext4_msg(inode->i_sb, KERN_WARNING,
- "Unaligned AIO/DIO on inode %ld by %s; "
- "performance will be poor.",
- inode->i_ino, current->comm);
- mutex_lock(ext4_aio_mutex(inode));
- ext4_aiodio_wait(inode);
- }
-
- ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
-
- if (unaligned_aio)
- mutex_unlock(ext4_aio_mutex(inode));
+ if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
+ ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
+ else
+ ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
return ret;
}
@@ -181,9 +244,21 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
path.dentry = mnt->mnt_root;
cp = d_path(&path, buf, sizeof(buf));
if (!IS_ERR(cp)) {
+ handle_t *handle;
+ int err;
+
+ handle = ext4_journal_start_sb(sb, 1);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ err = ext4_journal_get_write_access(handle, sbi->s_sbh);
+ if (err) {
+ ext4_journal_stop(handle);
+ return err;
+ }
strlcpy(sbi->s_es->s_last_mounted, cp,
sizeof(sbi->s_es->s_last_mounted));
- ext4_mark_super_dirty(sb);
+ ext4_handle_dirty_super(handle, sb);
+ ext4_journal_stop(handle);
}
}
/*
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index d48e8b14928c..26154b81b836 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -315,7 +315,6 @@ out:
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!fatal)
fatal = err;
- ext4_mark_super_dirty(sb);
} else
ext4_error(sb, "bit already cleared for inode %lu", ino);
@@ -830,7 +829,6 @@ got:
percpu_counter_dec(&sbi->s_freeinodes_counter);
if (S_ISDIR(mode))
percpu_counter_inc(&sbi->s_dirs_counter);
- ext4_mark_super_dirty(sb);
if (sbi->s_log_groups_per_flex) {
flex_group = ext4_flex_group(sbi, group);
@@ -1054,7 +1052,8 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
if (!bitmap_bh)
continue;
- x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
+ x = ext4_count_free(bitmap_bh->b_data,
+ EXT4_INODES_PER_GROUP(sb) / 8);
printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
bitmap_count += x;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 02bc8cbe7281..dff171c3a123 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -233,6 +233,11 @@ void ext4_evict_inode(struct inode *inode)
if (is_bad_inode(inode))
goto no_delete;
+ /*
+ * Protect us against freezing - iput() caller didn't have to have any
+ * protection against it
+ */
+ sb_start_intwrite(inode->i_sb);
handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
if (IS_ERR(handle)) {
ext4_std_error(inode->i_sb, PTR_ERR(handle));
@@ -242,6 +247,7 @@ void ext4_evict_inode(struct inode *inode)
* cleaned up.
*/
ext4_orphan_del(NULL, inode);
+ sb_end_intwrite(inode->i_sb);
goto no_delete;
}
@@ -273,6 +279,7 @@ void ext4_evict_inode(struct inode *inode)
stop_handle:
ext4_journal_stop(handle);
ext4_orphan_del(NULL, inode);
+ sb_end_intwrite(inode->i_sb);
goto no_delete;
}
}
@@ -301,6 +308,7 @@ void ext4_evict_inode(struct inode *inode)
else
ext4_free_inode(handle, inode);
ext4_journal_stop(handle);
+ sb_end_intwrite(inode->i_sb);
return;
no_delete:
ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
@@ -346,6 +354,15 @@ void ext4_da_update_reserve_space(struct inode *inode,
used = ei->i_reserved_data_blocks;
}
+ if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
+ ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
+ "with only %d reserved metadata blocks\n", __func__,
+ inode->i_ino, ei->i_allocated_meta_blocks,
+ ei->i_reserved_meta_blocks);
+ WARN_ON(1);
+ ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
+ }
+
/* Update per-inode reservations */
ei->i_reserved_data_blocks -= used;
ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
@@ -544,7 +561,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
* Try to see if we can get the block without requesting a new
* file system block.
*/
- down_read((&EXT4_I(inode)->i_data_sem));
+ if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
+ down_read((&EXT4_I(inode)->i_data_sem));
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
@@ -552,7 +570,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
retval = ext4_ind_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE);
}
- up_read((&EXT4_I(inode)->i_data_sem));
+ if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
+ up_read((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
int ret = check_block_validity(inode, map);
@@ -1171,6 +1190,17 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed;
int ret;
+ ext4_lblk_t save_last_lblock;
+ int save_len;
+
+ /*
+ * We will charge metadata quota at writeout time; this saves
+ * us from metadata over-estimation, though we may go over by
+ * a small amount in the end. Here we just reserve for data.
+ */
+ ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
+ if (ret)
+ return ret;
/*
* recalculate the amount of metadata blocks to reserve
@@ -1179,32 +1209,31 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
*/
repeat:
spin_lock(&ei->i_block_reservation_lock);
+ /*
+ * ext4_calc_metadata_amount() has side effects, which we have
+ * to be prepared undo if we fail to claim space.
+ */
+ save_len = ei->i_da_metadata_calc_len;
+ save_last_lblock = ei->i_da_metadata_calc_last_lblock;
md_needed = EXT4_NUM_B2C(sbi,
ext4_calc_metadata_amount(inode, lblock));
trace_ext4_da_reserve_space(inode, md_needed);
- spin_unlock(&ei->i_block_reservation_lock);
/*
- * We will charge metadata quota at writeout time; this saves
- * us from metadata over-estimation, though we may go over by
- * a small amount in the end. Here we just reserve for data.
- */
- ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
- if (ret)
- return ret;
- /*
* We do still charge estimated metadata to the sb though;
* we cannot afford to run out of free blocks.
*/
if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
- dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
+ ei->i_da_metadata_calc_len = save_len;
+ ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+ spin_unlock(&ei->i_block_reservation_lock);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield();
goto repeat;
}
+ dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
return -ENOSPC;
}
- spin_lock(&ei->i_block_reservation_lock);
ei->i_reserved_data_blocks++;
ei->i_reserved_meta_blocks += md_needed;
spin_unlock(&ei->i_block_reservation_lock);
@@ -1941,7 +1970,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
* This function can get called via...
* - ext4_da_writepages after taking page lock (have journal handle)
* - journal_submit_inode_data_buffers (no journal handle)
- * - shrink_page_list via pdflush (no journal handle)
+ * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
* - grab_page_cache when doing write_begin (have journal handle)
*
* We don't do any block allocation in this function. If we have page with
@@ -2818,6 +2847,32 @@ static int ext4_get_block_write(struct inode *inode, sector_t iblock,
EXT4_GET_BLOCKS_IO_CREATE_EXT);
}
+static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int flags)
+{
+ handle_t *handle = ext4_journal_current_handle();
+ struct ext4_map_blocks map;
+ int ret = 0;
+
+ ext4_debug("ext4_get_block_write_nolock: inode %lu, flag %d\n",
+ inode->i_ino, flags);
+
+ flags = EXT4_GET_BLOCKS_NO_LOCK;
+
+ map.m_lblk = iblock;
+ map.m_len = bh_result->b_size >> inode->i_blkbits;
+
+ ret = ext4_map_blocks(handle, inode, &map, flags);
+ if (ret > 0) {
+ map_bh(bh_result, inode->i_sb, map.m_pblk);
+ bh_result->b_state = (bh_result->b_state & ~EXT4_MAP_FLAGS) |
+ map.m_flags;
+ bh_result->b_size = inode->i_sb->s_blocksize * map.m_len;
+ ret = 0;
+ }
+ return ret;
+}
+
static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private, int ret,
bool is_async)
@@ -2966,6 +3021,18 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
loff_t final_size = offset + count;
if (rw == WRITE && final_size <= inode->i_size) {
+ int overwrite = 0;
+
+ BUG_ON(iocb->private == NULL);
+
+ /* If we do a overwrite dio, i_mutex locking can be released */
+ overwrite = *((int *)iocb->private);
+
+ if (overwrite) {
+ down_read(&EXT4_I(inode)->i_data_sem);
+ mutex_unlock(&inode->i_mutex);
+ }
+
/*
* We could direct write to holes and fallocate.
*
@@ -2991,8 +3058,10 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
if (!is_sync_kiocb(iocb)) {
ext4_io_end_t *io_end =
ext4_init_io_end(inode, GFP_NOFS);
- if (!io_end)
- return -ENOMEM;
+ if (!io_end) {
+ ret = -ENOMEM;
+ goto retake_lock;
+ }
io_end->flag |= EXT4_IO_END_DIRECT;
iocb->private = io_end;
/*
@@ -3005,13 +3074,22 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
EXT4_I(inode)->cur_aio_dio = iocb->private;
}
- ret = __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev, iov,
- offset, nr_segs,
- ext4_get_block_write,
- ext4_end_io_dio,
- NULL,
- DIO_LOCKING);
+ if (overwrite)
+ ret = __blockdev_direct_IO(rw, iocb, inode,
+ inode->i_sb->s_bdev, iov,
+ offset, nr_segs,
+ ext4_get_block_write_nolock,
+ ext4_end_io_dio,
+ NULL,
+ 0);
+ else
+ ret = __blockdev_direct_IO(rw, iocb, inode,
+ inode->i_sb->s_bdev, iov,
+ offset, nr_segs,
+ ext4_get_block_write,
+ ext4_end_io_dio,
+ NULL,
+ DIO_LOCKING);
if (iocb->private)
EXT4_I(inode)->cur_aio_dio = NULL;
/*
@@ -3031,7 +3109,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
ext4_free_io_end(iocb->private);
iocb->private = NULL;
- } else if (ret > 0 && ext4_test_inode_state(inode,
+ } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN)) {
int err;
/*
@@ -3044,6 +3122,14 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
ret = err;
ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
}
+
+ retake_lock:
+ /* take i_mutex locking again if we do a ovewrite dio */
+ if (overwrite) {
+ up_read(&EXT4_I(inode)->i_data_sem);
+ mutex_lock(&inode->i_mutex);
+ }
+
return ret;
}
@@ -4034,7 +4120,7 @@ static int ext4_do_update_inode(handle_t *handle,
EXT4_SET_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
ext4_handle_sync(handle);
- err = ext4_handle_dirty_super_now(handle, sb);
+ err = ext4_handle_dirty_super(handle, sb);
}
}
raw_inode->i_generation = cpu_to_le32(inode->i_generation);
@@ -4503,14 +4589,6 @@ static int ext4_expand_extra_isize(struct inode *inode,
* inode out, but prune_icache isn't a user-visible syncing function.
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
- *
- * Is this efficient/effective? Well, we're being nice to the system
- * by cleaning up our inodes proactively so they can be reaped
- * without I/O. But we are potentially leaving up to five seconds'
- * worth of inodes floating about which prune_icache wants us to
- * write out. One way to fix that would be to get prune_icache()
- * to do a write_super() to free up some memory. It has the desired
- * effect.
*/
int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
{
@@ -4701,11 +4779,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
get_block_t *get_block;
int retries = 0;
- /*
- * This check is racy but catches the common case. We rely on
- * __block_page_mkwrite() to do a reliable check.
- */
- vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+ sb_start_pagefault(inode->i_sb);
/* Delalloc case is easy... */
if (test_opt(inode->i_sb, DELALLOC) &&
!ext4_should_journal_data(inode) &&
@@ -4773,5 +4847,6 @@ retry_alloc:
out_ret:
ret = block_page_mkwrite_return(ret);
out:
+ sb_end_pagefault(inode->i_sb);
return ret;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 1cd6994fc446..8eae94771c45 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -969,7 +969,6 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
block++;
pnum = block / blocks_per_page;
- poff = block % blocks_per_page;
page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
if (!page)
return -EIO;
@@ -2077,8 +2076,9 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
struct super_block *sb = seq->private;
ext4_group_t group = (ext4_group_t) ((unsigned long) v);
int i;
- int err;
+ int err, buddy_loaded = 0;
struct ext4_buddy e4b;
+ struct ext4_group_info *grinfo;
struct sg {
struct ext4_group_info info;
ext4_grpblk_t counters[16];
@@ -2095,15 +2095,21 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
sizeof(struct ext4_group_info);
- err = ext4_mb_load_buddy(sb, group, &e4b);
- if (err) {
- seq_printf(seq, "#%-5u: I/O error\n", group);
- return 0;
+ grinfo = ext4_get_group_info(sb, group);
+ /* Load the group info in memory only if not already loaded. */
+ if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
+ err = ext4_mb_load_buddy(sb, group, &e4b);
+ if (err) {
+ seq_printf(seq, "#%-5u: I/O error\n", group);
+ return 0;
+ }
+ buddy_loaded = 1;
}
- ext4_lock_group(sb, group);
+
memcpy(&sg, ext4_get_group_info(sb, group), i);
- ext4_unlock_group(sb, group);
- ext4_mb_unload_buddy(&e4b);
+
+ if (buddy_loaded)
+ ext4_mb_unload_buddy(&e4b);
seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
sg.info.bb_fragments, sg.info.bb_first_free);
@@ -2825,7 +2831,6 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh);
out_err:
- ext4_mark_super_dirty(sb);
brelse(bitmap_bh);
return err;
}
@@ -4694,7 +4699,6 @@ do_more:
put_bh(bitmap_bh);
goto do_more;
}
- ext4_mark_super_dirty(sb);
error_return:
brelse(bitmap_bh);
ext4_std_error(sb, err);
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index f99a1311e847..fe7c63f4717e 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -44,6 +44,11 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
{
struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
+ /*
+ * We protect against freezing so that we don't create dirty buffers
+ * on frozen filesystem.
+ */
+ sb_start_write(sb);
ext4_mmp_csum_set(sb, mmp);
mark_buffer_dirty(bh);
lock_buffer(bh);
@@ -51,6 +56,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
get_bh(bh);
submit_bh(WRITE_SYNC, bh);
wait_on_buffer(bh);
+ sb_end_write(sb);
if (unlikely(!buffer_uptodate(bh)))
return 1;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index d0d3f0e87f99..2a42cc04466f 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2397,7 +2397,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
/* Insert this inode at the head of the on-disk orphan list... */
NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
- err = ext4_handle_dirty_super_now(handle, sb);
+ err = ext4_handle_dirty_super(handle, sb);
rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
if (!err)
err = rc;
@@ -2470,7 +2470,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
if (err)
goto out_brelse;
sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
- err = ext4_handle_dirty_super_now(handle, inode->i_sb);
+ err = ext4_handle_dirty_super(handle, inode->i_sb);
} else {
struct ext4_iloc iloc2;
struct inode *i_prev =
@@ -2918,8 +2918,15 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
cpu_to_le32(new_dir->i_ino);
BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
- retval = ext4_handle_dirty_dirent_node(handle, old_inode,
- dir_bh);
+ if (is_dx(old_inode)) {
+ retval = ext4_handle_dirty_dx_node(handle,
+ old_inode,
+ dir_bh);
+ } else {
+ retval = ext4_handle_dirty_dirent_node(handle,
+ old_inode,
+ dir_bh);
+ }
if (retval) {
ext4_std_error(old_dir->i_sb, retval);
goto end_rename;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 7ea6cbb44121..41f6ef68e2e1 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -798,7 +798,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
ext4_kvfree(o_group_desc);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
- err = ext4_handle_dirty_super_now(handle, sb);
+ err = ext4_handle_dirty_super(handle, sb);
if (err)
ext4_std_error(sb, err);
@@ -1272,6 +1272,11 @@ static void ext4_update_super(struct super_block *sb,
&sbi->s_flex_groups[flex_group].free_inodes);
}
+ /*
+ * Update the fs overhead information
+ */
+ ext4_calculate_overhead(sb);
+
if (test_opt(sb, DEBUG))
printk(KERN_DEBUG "EXT4-fs: added group %u:"
"%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d8759401ecae..c6e0cb3d1f4a 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -74,7 +74,6 @@ static const char *ext4_decode_error(struct super_block *sb, int errno,
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
static int ext4_unfreeze(struct super_block *sb);
-static void ext4_write_super(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data);
@@ -327,38 +326,17 @@ static void ext4_put_nojournal(handle_t *handle)
/*
* Wrappers for jbd2_journal_start/end.
- *
- * The only special thing we need to do here is to make sure that all
- * journal_end calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
- *
- * To avoid j_barrier hold in userspace when a user calls freeze(),
- * ext4 prevents a new handle from being started by s_frozen, which
- * is in an upper layer.
*/
handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
{
journal_t *journal;
- handle_t *handle;
trace_ext4_journal_start(sb, nblocks, _RET_IP_);
if (sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
+ WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
journal = EXT4_SB(sb)->s_journal;
- handle = ext4_journal_current_handle();
-
- /*
- * If a handle has been started, it should be allowed to
- * finish, otherwise deadlock could happen between freeze
- * and others(e.g. truncate) due to the restart of the
- * journal handle if the filesystem is forzen and active
- * handles are not stopped.
- */
- if (!handle)
- vfs_check_frozen(sb, SB_FREEZE_TRANS);
-
if (!journal)
return ext4_get_nojournal();
/*
@@ -373,12 +351,6 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
return jbd2_journal_start(journal, nblocks);
}
-/*
- * The only special thing we need to do here is to make sure that all
- * jbd2_journal_stop calls result in the superblock being marked dirty, so
- * that sync() will call the filesystem's write_super callback if
- * appropriate.
- */
int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
{
struct super_block *sb;
@@ -896,7 +868,7 @@ static void ext4_put_super(struct super_block *sb)
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
es->s_state = cpu_to_le16(sbi->s_mount_state);
}
- if (sb->s_dirt || !(sb->s_flags & MS_RDONLY))
+ if (!(sb->s_flags & MS_RDONLY))
ext4_commit_super(sb, 1);
if (sbi->s_proc) {
@@ -976,6 +948,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
ei->i_da_metadata_calc_len = 0;
+ ei->i_da_metadata_calc_last_lblock = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
@@ -1137,12 +1110,18 @@ static int ext4_mark_dquot_dirty(struct dquot *dquot);
static int ext4_write_info(struct super_block *sb, int type);
static int ext4_quota_on(struct super_block *sb, int type, int format_id,
struct path *path);
+static int ext4_quota_on_sysfile(struct super_block *sb, int type,
+ int format_id);
static int ext4_quota_off(struct super_block *sb, int type);
+static int ext4_quota_off_sysfile(struct super_block *sb, int type);
static int ext4_quota_on_mount(struct super_block *sb, int type);
static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off);
static ssize_t ext4_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
+static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+ unsigned int flags);
+static int ext4_enable_quotas(struct super_block *sb);
static const struct dquot_operations ext4_quota_operations = {
.get_reserved_space = ext4_get_reserved_space,
@@ -1164,6 +1143,16 @@ static const struct quotactl_ops ext4_qctl_operations = {
.get_dqblk = dquot_get_dqblk,
.set_dqblk = dquot_set_dqblk
};
+
+static const struct quotactl_ops ext4_qctl_sysfile_operations = {
+ .quota_on_meta = ext4_quota_on_sysfile,
+ .quota_off = ext4_quota_off_sysfile,
+ .quota_sync = dquot_quota_sync,
+ .get_info = dquot_get_dqinfo,
+ .set_info = dquot_set_dqinfo,
+ .get_dqblk = dquot_get_dqblk,
+ .set_dqblk = dquot_set_dqblk
+};
#endif
static const struct super_operations ext4_sops = {
@@ -1194,7 +1183,6 @@ static const struct super_operations ext4_nojournal_sops = {
.dirty_inode = ext4_dirty_inode,
.drop_inode = ext4_drop_inode,
.evict_inode = ext4_evict_inode,
- .write_super = ext4_write_super,
.put_super = ext4_put_super,
.statfs = ext4_statfs,
.remount_fs = ext4_remount,
@@ -2661,6 +2649,16 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
"extents feature\n");
return 0;
}
+
+#ifndef CONFIG_QUOTA
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
+ !readonly) {
+ ext4_msg(sb, KERN_ERR,
+ "Filesystem with quota feature cannot be mounted RDWR "
+ "without CONFIG_QUOTA");
+ return 0;
+ }
+#endif /* CONFIG_QUOTA */
return 1;
}
@@ -2723,6 +2721,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
sb = elr->lr_super;
ngroups = EXT4_SB(sb)->s_groups_count;
+ sb_start_write(sb);
for (group = elr->lr_next_group; group < ngroups; group++) {
gdp = ext4_get_group_desc(sb, group, NULL);
if (!gdp) {
@@ -2749,6 +2748,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
elr->lr_next_sched = jiffies + elr->lr_timeout;
elr->lr_next_group = group + 1;
}
+ sb_end_write(sb);
return ret;
}
@@ -3085,6 +3085,118 @@ static int set_journal_csum_feature_set(struct super_block *sb)
return ret;
}
+/*
+ * Note: calculating the overhead so we can be compatible with
+ * historical BSD practice is quite difficult in the face of
+ * clusters/bigalloc. This is because multiple metadata blocks from
+ * different block group can end up in the same allocation cluster.
+ * Calculating the exact overhead in the face of clustered allocation
+ * requires either O(all block bitmaps) in memory or O(number of block
+ * groups**2) in time. We will still calculate the superblock for
+ * older file systems --- and if we come across with a bigalloc file
+ * system with zero in s_overhead_clusters the estimate will be close to
+ * correct especially for very large cluster sizes --- but for newer
+ * file systems, it's better to calculate this figure once at mkfs
+ * time, and store it in the superblock. If the superblock value is
+ * present (even for non-bigalloc file systems), we will use it.
+ */
+static int count_overhead(struct super_block *sb, ext4_group_t grp,
+ char *buf)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_group_desc *gdp;
+ ext4_fsblk_t first_block, last_block, b;
+ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+ int s, j, count = 0;
+
+ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC))
+ return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
+ sbi->s_itb_per_group + 2);
+
+ first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
+ (grp * EXT4_BLOCKS_PER_GROUP(sb));
+ last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
+ for (i = 0; i < ngroups; i++) {
+ gdp = ext4_get_group_desc(sb, i, NULL);
+ b = ext4_block_bitmap(sb, gdp);
+ if (b >= first_block && b <= last_block) {
+ ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
+ count++;
+ }
+ b = ext4_inode_bitmap(sb, gdp);
+ if (b >= first_block && b <= last_block) {
+ ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
+ count++;
+ }
+ b = ext4_inode_table(sb, gdp);
+ if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
+ for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
+ int c = EXT4_B2C(sbi, b - first_block);
+ ext4_set_bit(c, buf);
+ count++;
+ }
+ if (i != grp)
+ continue;
+ s = 0;
+ if (ext4_bg_has_super(sb, grp)) {
+ ext4_set_bit(s++, buf);
+ count++;
+ }
+ for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
+ ext4_set_bit(EXT4_B2C(sbi, s++), buf);
+ count++;
+ }
+ }
+ if (!count)
+ return 0;
+ return EXT4_CLUSTERS_PER_GROUP(sb) -
+ ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
+}
+
+/*
+ * Compute the overhead and stash it in sbi->s_overhead
+ */
+int ext4_calculate_overhead(struct super_block *sb)
+{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+ ext4_fsblk_t overhead = 0;
+ char *buf = (char *) get_zeroed_page(GFP_KERNEL);
+
+ memset(buf, 0, PAGE_SIZE);
+ if (!buf)
+ return -ENOMEM;
+
+ /*
+ * Compute the overhead (FS structures). This is constant
+ * for a given filesystem unless the number of block groups
+ * changes so we cache the previous value until it does.
+ */
+
+ /*
+ * All of the blocks before first_data_block are overhead
+ */
+ overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
+
+ /*
+ * Add the overhead found in each block group
+ */
+ for (i = 0; i < ngroups; i++) {
+ int blks;
+
+ blks = count_overhead(sb, i, buf);
+ overhead += blks;
+ if (blks)
+ memset(buf, 0, PAGE_SIZE);
+ cond_resched();
+ }
+ sbi->s_overhead = overhead;
+ smp_wmb();
+ free_page((unsigned long) buf);
+ return 0;
+}
+
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
{
char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -3640,6 +3752,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_QUOTA
sb->s_qcop = &ext4_qctl_operations;
sb->dq_op = &ext4_quota_operations;
+
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
+ /* Use qctl operations for hidden quota files. */
+ sb->s_qcop = &ext4_qctl_sysfile_operations;
+ }
#endif
memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
@@ -3735,6 +3852,18 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
no_journal:
/*
+ * Get the # of file system overhead blocks from the
+ * superblock if present.
+ */
+ if (es->s_overhead_clusters)
+ sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
+ else {
+ ret = ext4_calculate_overhead(sb);
+ if (ret)
+ goto failed_mount_wq;
+ }
+
+ /*
* The maximum number of concurrent works can be high and
* concurrency isn't really necessary. Limit it to 1.
*/
@@ -3840,6 +3969,16 @@ no_journal:
} else
descr = "out journal";
+#ifdef CONFIG_QUOTA
+ /* Enable quota usage during mount. */
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
+ !(sb->s_flags & MS_RDONLY)) {
+ ret = ext4_enable_quotas(sb);
+ if (ret)
+ goto failed_mount7;
+ }
+#endif /* CONFIG_QUOTA */
+
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
"Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
@@ -4203,7 +4342,6 @@ static int ext4_commit_super(struct super_block *sb, int sync)
es->s_free_inodes_count =
cpu_to_le32(percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeinodes_counter));
- sb->s_dirt = 0;
BUFFER_TRACE(sbh, "marking dirty");
ext4_superblock_csum_set(sb, es);
mark_buffer_dirty(sbh);
@@ -4286,6 +4424,7 @@ static void ext4_clear_journal_err(struct super_block *sb,
ext4_commit_super(sb, 1);
jbd2_journal_clear_err(journal);
+ jbd2_journal_update_sb_errno(journal);
}
}
@@ -4302,21 +4441,12 @@ int ext4_force_commit(struct super_block *sb)
return 0;
journal = EXT4_SB(sb)->s_journal;
- if (journal) {
- vfs_check_frozen(sb, SB_FREEZE_TRANS);
+ if (journal)
ret = ext4_journal_force_commit(journal);
- }
return ret;
}
-static void ext4_write_super(struct super_block *sb)
-{
- lock_super(sb);
- ext4_commit_super(sb, 1);
- unlock_super(sb);
-}
-
static int ext4_sync_fs(struct super_block *sb, int wait)
{
int ret = 0;
@@ -4342,9 +4472,8 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
* gives us a chance to flush the journal completely and mark the fs clean.
*
* Note that only this function cannot bring a filesystem to be in a clean
- * state independently, because ext4 prevents a new handle from being started
- * by @sb->s_frozen, which stays in an upper layer. It thus needs help from
- * the upper layer.
+ * state independently. It relies on upper layer to stop all data & metadata
+ * modifications.
*/
static int ext4_freeze(struct super_block *sb)
{
@@ -4371,7 +4500,7 @@ static int ext4_freeze(struct super_block *sb)
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
error = ext4_commit_super(sb, 1);
out:
- /* we rely on s_frozen to stop further updates */
+ /* we rely on upper layer to stop further updates */
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
return error;
}
@@ -4567,16 +4696,26 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (sbi->s_journal == NULL)
ext4_commit_super(sb, 1);
+ unlock_super(sb);
#ifdef CONFIG_QUOTA
/* Release old quota file names */
for (i = 0; i < MAXQUOTAS; i++)
if (old_opts.s_qf_names[i] &&
old_opts.s_qf_names[i] != sbi->s_qf_names[i])
kfree(old_opts.s_qf_names[i]);
+ if (enable_quota) {
+ if (sb_any_quota_suspended(sb))
+ dquot_resume(sb, -1);
+ else if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ EXT4_FEATURE_RO_COMPAT_QUOTA)) {
+ err = ext4_enable_quotas(sb);
+ if (err) {
+ lock_super(sb);
+ goto restore_opts;
+ }
+ }
+ }
#endif
- unlock_super(sb);
- if (enable_quota)
- dquot_resume(sb, -1);
ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
kfree(orig_data);
@@ -4605,67 +4744,21 @@ restore_opts:
return err;
}
-/*
- * Note: calculating the overhead so we can be compatible with
- * historical BSD practice is quite difficult in the face of
- * clusters/bigalloc. This is because multiple metadata blocks from
- * different block group can end up in the same allocation cluster.
- * Calculating the exact overhead in the face of clustered allocation
- * requires either O(all block bitmaps) in memory or O(number of block
- * groups**2) in time. We will still calculate the superblock for
- * older file systems --- and if we come across with a bigalloc file
- * system with zero in s_overhead_clusters the estimate will be close to
- * correct especially for very large cluster sizes --- but for newer
- * file systems, it's better to calculate this figure once at mkfs
- * time, and store it in the superblock. If the superblock value is
- * present (even for non-bigalloc file systems), we will use it.
- */
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
- struct ext4_group_desc *gdp;
+ ext4_fsblk_t overhead = 0;
u64 fsid;
s64 bfree;
- if (test_opt(sb, MINIX_DF)) {
- sbi->s_overhead_last = 0;
- } else if (es->s_overhead_clusters) {
- sbi->s_overhead_last = le32_to_cpu(es->s_overhead_clusters);
- } else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
- ext4_group_t i, ngroups = ext4_get_groups_count(sb);
- ext4_fsblk_t overhead = 0;
-
- /*
- * Compute the overhead (FS structures). This is constant
- * for a given filesystem unless the number of block groups
- * changes so we cache the previous value until it does.
- */
-
- /*
- * All of the blocks before first_data_block are
- * overhead
- */
- overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
-
- /*
- * Add the overhead found in each block group
- */
- for (i = 0; i < ngroups; i++) {
- gdp = ext4_get_group_desc(sb, i, NULL);
- overhead += ext4_num_overhead_clusters(sb, i, gdp);
- cond_resched();
- }
- sbi->s_overhead_last = overhead;
- smp_wmb();
- sbi->s_blocks_last = ext4_blocks_count(es);
- }
+ if (!test_opt(sb, MINIX_DF))
+ overhead = sbi->s_overhead;
buf->f_type = EXT4_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
- buf->f_blocks = (ext4_blocks_count(es) -
- EXT4_C2B(sbi, sbi->s_overhead_last));
+ buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, sbi->s_overhead);
bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
/* prevent underflow in case that few free space is available */
@@ -4835,6 +4928,74 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
return dquot_quota_on(sb, type, format_id, path);
}
+static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+ unsigned int flags)
+{
+ int err;
+ struct inode *qf_inode;
+ unsigned long qf_inums[MAXQUOTAS] = {
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum)
+ };
+
+ BUG_ON(!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA));
+
+ if (!qf_inums[type])
+ return -EPERM;
+
+ qf_inode = ext4_iget(sb, qf_inums[type]);
+ if (IS_ERR(qf_inode)) {
+ ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
+ return PTR_ERR(qf_inode);
+ }
+
+ err = dquot_enable(qf_inode, type, format_id, flags);
+ iput(qf_inode);
+
+ return err;
+}
+
+/* Enable usage tracking for all quota types. */
+static int ext4_enable_quotas(struct super_block *sb)
+{
+ int type, err = 0;
+ unsigned long qf_inums[MAXQUOTAS] = {
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum)
+ };
+
+ sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
+ for (type = 0; type < MAXQUOTAS; type++) {
+ if (qf_inums[type]) {
+ err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
+ DQUOT_USAGE_ENABLED);
+ if (err) {
+ ext4_warning(sb,
+ "Failed to enable quota (type=%d) "
+ "tracking. Please run e2fsck to fix.",
+ type);
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * quota_on function that is used when QUOTA feature is set.
+ */
+static int ext4_quota_on_sysfile(struct super_block *sb, int type,
+ int format_id)
+{
+ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
+ return -EINVAL;
+
+ /*
+ * USAGE was enabled at mount time. Only need to enable LIMITS now.
+ */
+ return ext4_quota_enable(sb, type, format_id, DQUOT_LIMITS_ENABLED);
+}
+
static int ext4_quota_off(struct super_block *sb, int type)
{
struct inode *inode = sb_dqopt(sb)->files[type];
@@ -4861,6 +5022,18 @@ out:
return dquot_quota_off(sb, type);
}
+/*
+ * quota_off function that is used when QUOTA feature is set.
+ */
+static int ext4_quota_off_sysfile(struct super_block *sb, int type)
+{
+ if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
+ return -EINVAL;
+
+ /* Disable only the limits. */
+ return dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
+}
+
/* Read data from quotafile - avoid pagecache and such because we cannot afford
* acquiring the locks... As quota files are never truncated and quota code
* itself serializes the operations (and no one else should touch the files)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index e56c9ed7d6e3..2cdb98d62980 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -127,19 +127,16 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
struct ext4_xattr_header *hdr)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct ext4_inode_info *ei = EXT4_I(inode);
__u32 csum, old;
old = hdr->h_checksum;
hdr->h_checksum = 0;
- if (le32_to_cpu(hdr->h_refcount) != 1) {
- block_nr = cpu_to_le64(block_nr);
- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr,
- sizeof(block_nr));
- } else
- csum = ei->i_csum_seed;
+ block_nr = cpu_to_le64(block_nr);
+ csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr,
+ sizeof(block_nr));
csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
EXT4_BLOCK_SIZE(inode->i_sb));
+
hdr->h_checksum = old;
return cpu_to_le32(csum);
}
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 6eaa28c98ad1..dc49ed2cbffa 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -35,6 +35,11 @@
#define FAT_MAX_UNI_CHARS ((MSDOS_SLOTS - 1) * 13 + 1)
#define FAT_MAX_UNI_SIZE (FAT_MAX_UNI_CHARS * sizeof(wchar_t))
+static inline unsigned char fat_tolower(unsigned char c)
+{
+ return ((c >= 'A') && (c <= 'Z')) ? c+32 : c;
+}
+
static inline loff_t fat_make_i_pos(struct super_block *sb,
struct buffer_head *bh,
struct msdos_dir_entry *de)
@@ -333,6 +338,124 @@ parse_long:
return 0;
}
+/**
+ * fat_parse_short - Parse MS-DOS (short) directory entry.
+ * @sb: superblock
+ * @de: directory entry to parse
+ * @name: FAT_MAX_SHORT_SIZE array in which to place extracted name
+ * @dot_hidden: Nonzero == prepend '.' to names with ATTR_HIDDEN
+ *
+ * Returns the number of characters extracted into 'name'.
+ */
+static int fat_parse_short(struct super_block *sb,
+ const struct msdos_dir_entry *de,
+ unsigned char *name, int dot_hidden)
+{
+ const struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ int isvfat = sbi->options.isvfat;
+ int nocase = sbi->options.nocase;
+ unsigned short opt_shortname = sbi->options.shortname;
+ struct nls_table *nls_disk = sbi->nls_disk;
+ wchar_t uni_name[14];
+ unsigned char c, work[MSDOS_NAME];
+ unsigned char *ptname = name;
+ int chi, chl, i, j, k;
+ int dotoffset = 0;
+ int name_len = 0, uni_len = 0;
+
+ if (!isvfat && dot_hidden && (de->attr & ATTR_HIDDEN)) {
+ *ptname++ = '.';
+ dotoffset = 1;
+ }
+
+ memcpy(work, de->name, sizeof(work));
+ /* see namei.c, msdos_format_name */
+ if (work[0] == 0x05)
+ work[0] = 0xE5;
+
+ /* Filename */
+ for (i = 0, j = 0; i < 8;) {
+ c = work[i];
+ if (!c)
+ break;
+ chl = fat_shortname2uni(nls_disk, &work[i], 8 - i,
+ &uni_name[j++], opt_shortname,
+ de->lcase & CASE_LOWER_BASE);
+ if (chl <= 1) {
+ if (!isvfat)
+ ptname[i] = nocase ? c : fat_tolower(c);
+ i++;
+ if (c != ' ') {
+ name_len = i;
+ uni_len = j;
+ }
+ } else {
+ uni_len = j;
+ if (isvfat)
+ i += min(chl, 8-i);
+ else {
+ for (chi = 0; chi < chl && i < 8; chi++, i++)
+ ptname[i] = work[i];
+ }
+ if (chl)
+ name_len = i;
+ }
+ }
+
+ i = name_len;
+ j = uni_len;
+ fat_short2uni(nls_disk, ".", 1, &uni_name[j++]);
+ if (!isvfat)
+ ptname[i] = '.';
+ i++;
+
+ /* Extension */
+ for (k = 8; k < MSDOS_NAME;) {
+ c = work[k];
+ if (!c)
+ break;
+ chl = fat_shortname2uni(nls_disk, &work[k], MSDOS_NAME - k,
+ &uni_name[j++], opt_shortname,
+ de->lcase & CASE_LOWER_EXT);
+ if (chl <= 1) {
+ k++;
+ if (!isvfat)
+ ptname[i] = nocase ? c : fat_tolower(c);
+ i++;
+ if (c != ' ') {
+ name_len = i;
+ uni_len = j;
+ }
+ } else {
+ uni_len = j;
+ if (isvfat) {
+ int offset = min(chl, MSDOS_NAME-k);
+ k += offset;
+ i += offset;
+ } else {
+ for (chi = 0; chi < chl && k < MSDOS_NAME;
+ chi++, i++, k++) {
+ ptname[i] = work[k];
+ }
+ }
+ if (chl)
+ name_len = i;
+ }
+ }
+
+ if (name_len > 0) {
+ name_len += dotoffset;
+
+ if (sbi->options.isvfat) {
+ uni_name[uni_len] = 0x0000;
+ name_len = fat_uni_to_x8(sb, uni_name, name,
+ FAT_MAX_SHORT_SIZE);
+ }
+ }
+
+ return name_len;
+}
+
/*
* Return values: negative -> error, 0 -> not found, positive -> found,
* value is the total amount of slots, including the shortname entry.
@@ -344,15 +467,11 @@ int fat_search_long(struct inode *inode, const unsigned char *name,
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh = NULL;
struct msdos_dir_entry *de;
- struct nls_table *nls_disk = sbi->nls_disk;
unsigned char nr_slots;
- wchar_t bufuname[14];
wchar_t *unicode = NULL;
- unsigned char work[MSDOS_NAME];
unsigned char bufname[FAT_MAX_SHORT_SIZE];
- unsigned short opt_shortname = sbi->options.shortname;
loff_t cpos = 0;
- int chl, i, j, last_u, err, len;
+ int err, len;
err = -ENOENT;
while (1) {
@@ -380,47 +499,16 @@ parse_record:
goto end_of_dir;
}
- memcpy(work, de->name, sizeof(de->name));
- /* see namei.c, msdos_format_name */
- if (work[0] == 0x05)
- work[0] = 0xE5;
- for (i = 0, j = 0, last_u = 0; i < 8;) {
- if (!work[i])
- break;
- chl = fat_shortname2uni(nls_disk, &work[i], 8 - i,
- &bufuname[j++], opt_shortname,
- de->lcase & CASE_LOWER_BASE);
- if (chl <= 1) {
- if (work[i] != ' ')
- last_u = j;
- } else {
- last_u = j;
- }
- i += chl;
- }
- j = last_u;
- fat_short2uni(nls_disk, ".", 1, &bufuname[j++]);
- for (i = 8; i < MSDOS_NAME;) {
- if (!work[i])
- break;
- chl = fat_shortname2uni(nls_disk, &work[i],
- MSDOS_NAME - i,
- &bufuname[j++], opt_shortname,
- de->lcase & CASE_LOWER_EXT);
- if (chl <= 1) {
- if (work[i] != ' ')
- last_u = j;
- } else {
- last_u = j;
- }
- i += chl;
- }
- if (!last_u)
+ /* Never prepend '.' to hidden files here.
+ * That is done only for msdos mounts (and only when
+ * 'dotsOK=yes'); if we are executing here, it is in the
+ * context of a vfat mount.
+ */
+ len = fat_parse_short(sb, de, bufname, 0);
+ if (len == 0)
continue;
/* Compare shortname */
- bufuname[last_u] = 0x0000;
- len = fat_uni_to_x8(sb, bufuname, bufname, sizeof(bufname));
if (fat_name_match(sbi, name, name_len, bufname, len))
goto found;
@@ -469,20 +557,15 @@ static int __fat_readdir(struct inode *inode, struct file *filp, void *dirent,
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *bh;
struct msdos_dir_entry *de;
- struct nls_table *nls_disk = sbi->nls_disk;
unsigned char nr_slots;
- wchar_t bufuname[14];
wchar_t *unicode = NULL;
- unsigned char c, work[MSDOS_NAME];
- unsigned char bufname[FAT_MAX_SHORT_SIZE], *ptname = bufname;
- unsigned short opt_shortname = sbi->options.shortname;
+ unsigned char bufname[FAT_MAX_SHORT_SIZE];
int isvfat = sbi->options.isvfat;
- int nocase = sbi->options.nocase;
const char *fill_name = NULL;
unsigned long inum;
unsigned long lpos, dummy, *furrfu = &lpos;
loff_t cpos;
- int chi, chl, i, i2, j, last, last_u, dotoffset = 0, fill_len = 0;
+ int short_len = 0, fill_len = 0;
int ret = 0;
lock_super(sb);
@@ -556,74 +639,10 @@ parse_record:
}
}
- if (sbi->options.dotsOK) {
- ptname = bufname;
- dotoffset = 0;
- if (de->attr & ATTR_HIDDEN) {
- *ptname++ = '.';
- dotoffset = 1;
- }
- }
-
- memcpy(work, de->name, sizeof(de->name));
- /* see namei.c, msdos_format_name */
- if (work[0] == 0x05)
- work[0] = 0xE5;
- for (i = 0, j = 0, last = 0, last_u = 0; i < 8;) {
- if (!(c = work[i]))
- break;
- chl = fat_shortname2uni(nls_disk, &work[i], 8 - i,
- &bufuname[j++], opt_shortname,
- de->lcase & CASE_LOWER_BASE);
- if (chl <= 1) {
- ptname[i++] = (!nocase && c>='A' && c<='Z') ? c+32 : c;
- if (c != ' ') {
- last = i;
- last_u = j;
- }
- } else {
- last_u = j;
- for (chi = 0; chi < chl && i < 8; chi++) {
- ptname[i] = work[i];
- i++; last = i;
- }
- }
- }
- i = last;
- j = last_u;
- fat_short2uni(nls_disk, ".", 1, &bufuname[j++]);
- ptname[i++] = '.';
- for (i2 = 8; i2 < MSDOS_NAME;) {
- if (!(c = work[i2]))
- break;
- chl = fat_shortname2uni(nls_disk, &work[i2], MSDOS_NAME - i2,
- &bufuname[j++], opt_shortname,
- de->lcase & CASE_LOWER_EXT);
- if (chl <= 1) {
- i2++;
- ptname[i++] = (!nocase && c>='A' && c<='Z') ? c+32 : c;
- if (c != ' ') {
- last = i;
- last_u = j;
- }
- } else {
- last_u = j;
- for (chi = 0; chi < chl && i2 < MSDOS_NAME; chi++) {
- ptname[i++] = work[i2++];
- last = i;
- }
- }
- }
- if (!last)
+ short_len = fat_parse_short(sb, de, bufname, sbi->options.dotsOK);
+ if (short_len == 0)
goto record_end;
- i = last + dotoffset;
- j = last_u;
-
- if (isvfat) {
- bufuname[j] = 0x0000;
- i = fat_uni_to_x8(sb, bufuname, bufname, sizeof(bufname));
- }
if (nr_slots) {
/* hack for fat_ioctl_filldir() */
struct fat_ioctl_filldir_callback *p = dirent;
@@ -631,12 +650,12 @@ parse_record:
p->longname = fill_name;
p->long_len = fill_len;
p->shortname = bufname;
- p->short_len = i;
+ p->short_len = short_len;
fill_name = NULL;
fill_len = 0;
} else {
fill_name = bufname;
- fill_len = i;
+ fill_len = short_len;
}
start_filldir:
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index fc35c5c69136..2deeeb86f331 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -217,6 +217,21 @@ static inline void fat16_towchar(wchar_t *dst, const __u8 *src, size_t len)
#endif
}
+static inline int fat_get_start(const struct msdos_sb_info *sbi,
+ const struct msdos_dir_entry *de)
+{
+ int cluster = le16_to_cpu(de->start);
+ if (sbi->fat_bits == 32)
+ cluster |= (le16_to_cpu(de->starthi) << 16);
+ return cluster;
+}
+
+static inline void fat_set_start(struct msdos_dir_entry *de, int cluster)
+{
+ de->start = cpu_to_le16(cluster);
+ de->starthi = cpu_to_le16(cluster >> 16);
+}
+
static inline void fatwchar_to16(__u8 *dst, const wchar_t *src, size_t len)
{
#ifdef __BIG_ENDIAN
diff --git a/fs/fat/file.c b/fs/fat/file.c
index a71fe3715ee8..e007b8bd8e5e 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -43,10 +43,10 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
if (err)
goto out;
- mutex_lock(&inode->i_mutex);
err = mnt_want_write_file(file);
if (err)
- goto out_unlock_inode;
+ goto out;
+ mutex_lock(&inode->i_mutex);
/*
* ATTR_VOLUME and ATTR_DIR cannot be changed; this also
@@ -73,14 +73,14 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
/* The root directory has no attributes */
if (inode->i_ino == MSDOS_ROOT_INO && attr != ATTR_DIR) {
err = -EINVAL;
- goto out_drop_write;
+ goto out_unlock_inode;
}
if (sbi->options.sys_immutable &&
((attr | oldattr) & ATTR_SYS) &&
!capable(CAP_LINUX_IMMUTABLE)) {
err = -EPERM;
- goto out_drop_write;
+ goto out_unlock_inode;
}
/*
@@ -90,12 +90,12 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
*/
err = security_inode_setattr(file->f_path.dentry, &ia);
if (err)
- goto out_drop_write;
+ goto out_unlock_inode;
/* This MUST be done before doing anything irreversible... */
err = fat_setattr(file->f_path.dentry, &ia);
if (err)
- goto out_drop_write;
+ goto out_unlock_inode;
fsnotify_change(file->f_path.dentry, ia.ia_valid);
if (sbi->options.sys_immutable) {
@@ -107,10 +107,9 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
fat_save_attrs(inode, attr);
mark_inode_dirty(inode);
-out_drop_write:
- mnt_drop_write_file(file);
out_unlock_inode:
mutex_unlock(&inode->i_mutex);
+ mnt_drop_write_file(file);
out:
return err;
}
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 0038b32cb362..05e897fe9866 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -369,10 +369,7 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
inode->i_op = sbi->dir_ops;
inode->i_fop = &fat_dir_operations;
- MSDOS_I(inode)->i_start = le16_to_cpu(de->start);
- if (sbi->fat_bits == 32)
- MSDOS_I(inode)->i_start |= (le16_to_cpu(de->starthi) << 16);
-
+ MSDOS_I(inode)->i_start = fat_get_start(sbi, de);
MSDOS_I(inode)->i_logstart = MSDOS_I(inode)->i_start;
error = fat_calc_dir_size(inode);
if (error < 0)
@@ -385,9 +382,7 @@ static int fat_fill_inode(struct inode *inode, struct msdos_dir_entry *de)
inode->i_mode = fat_make_mode(sbi, de->attr,
((sbi->options.showexec && !is_exec(de->name + 8))
? S_IRUGO|S_IWUGO : S_IRWXUGO));
- MSDOS_I(inode)->i_start = le16_to_cpu(de->start);
- if (sbi->fat_bits == 32)
- MSDOS_I(inode)->i_start |= (le16_to_cpu(de->starthi) << 16);
+ MSDOS_I(inode)->i_start = fat_get_start(sbi, de);
MSDOS_I(inode)->i_logstart = MSDOS_I(inode)->i_start;
inode->i_size = le32_to_cpu(de->size);
@@ -613,8 +608,7 @@ retry:
else
raw_entry->size = cpu_to_le32(inode->i_size);
raw_entry->attr = fat_make_attrs(inode);
- raw_entry->start = cpu_to_le16(MSDOS_I(inode)->i_logstart);
- raw_entry->starthi = cpu_to_le16(MSDOS_I(inode)->i_logstart >> 16);
+ fat_set_start(raw_entry, MSDOS_I(inode)->i_logstart);
fat_time_unix2fat(sbi, &inode->i_mtime, &raw_entry->time,
&raw_entry->date, NULL);
if (sbi->options.isvfat) {
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 70d993a93805..b0e12bf9f4a1 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -246,8 +246,7 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name,
de.ctime_cs = 0;
de.time = time;
de.date = date;
- de.start = cpu_to_le16(cluster);
- de.starthi = cpu_to_le16(cluster >> 16);
+ fat_set_start(&de, cluster);
de.size = 0;
err = fat_add_entries(dir, &de, 1, sinfo);
@@ -530,9 +529,7 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
mark_inode_dirty(old_inode);
if (update_dotdot) {
- int start = MSDOS_I(new_dir)->i_logstart;
- dotdot_de->start = cpu_to_le16(start);
- dotdot_de->starthi = cpu_to_le16(start >> 16);
+ fat_set_start(dotdot_de, MSDOS_I(new_dir)->i_logstart);
mark_buffer_dirty_inode(dotdot_bh, old_inode);
if (IS_DIRSYNC(new_dir)) {
err = sync_dirty_buffer(dotdot_bh);
@@ -572,9 +569,7 @@ error_dotdot:
corrupt = 1;
if (update_dotdot) {
- int start = MSDOS_I(old_dir)->i_logstart;
- dotdot_de->start = cpu_to_le16(start);
- dotdot_de->starthi = cpu_to_le16(start >> 16);
+ fat_set_start(dotdot_de, MSDOS_I(old_dir)->i_logstart);
mark_buffer_dirty_inode(dotdot_bh, old_inode);
corrupt |= sync_dirty_buffer(dotdot_bh);
}
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 6cc480652433..6a6d8c0715a1 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -651,8 +651,7 @@ shortname:
de->time = de->ctime = time;
de->date = de->cdate = de->adate = date;
de->ctime_cs = time_cs;
- de->start = cpu_to_le16(cluster);
- de->starthi = cpu_to_le16(cluster >> 16);
+ fat_set_start(de, cluster);
de->size = 0;
out_free:
__putname(uname);
@@ -965,9 +964,7 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
mark_inode_dirty(old_inode);
if (update_dotdot) {
- int start = MSDOS_I(new_dir)->i_logstart;
- dotdot_de->start = cpu_to_le16(start);
- dotdot_de->starthi = cpu_to_le16(start >> 16);
+ fat_set_start(dotdot_de, MSDOS_I(new_dir)->i_logstart);
mark_buffer_dirty_inode(dotdot_bh, old_inode);
if (IS_DIRSYNC(new_dir)) {
err = sync_dirty_buffer(dotdot_bh);
@@ -1009,9 +1006,7 @@ error_dotdot:
corrupt = 1;
if (update_dotdot) {
- int start = MSDOS_I(old_dir)->i_logstart;
- dotdot_de->start = cpu_to_le16(start);
- dotdot_de->starthi = cpu_to_le16(start >> 16);
+ fat_set_start(dotdot_de, MSDOS_I(old_dir)->i_logstart);
mark_buffer_dirty_inode(dotdot_bh, old_inode);
corrupt |= sync_dirty_buffer(dotdot_bh);
}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 81b70e665bf0..887b5ba8c9b5 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -20,6 +20,7 @@
#include <linux/signal.h>
#include <linux/rcupdate.h>
#include <linux/pid_namespace.h>
+#include <linux/user_namespace.h>
#include <asm/poll.h>
#include <asm/siginfo.h>
@@ -340,6 +341,31 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
return ret;
}
+#ifdef CONFIG_CHECKPOINT_RESTORE
+static int f_getowner_uids(struct file *filp, unsigned long arg)
+{
+ struct user_namespace *user_ns = current_user_ns();
+ uid_t * __user dst = (void * __user)arg;
+ uid_t src[2];
+ int err;
+
+ read_lock(&filp->f_owner.lock);
+ src[0] = from_kuid(user_ns, filp->f_owner.uid);
+ src[1] = from_kuid(user_ns, filp->f_owner.euid);
+ read_unlock(&filp->f_owner.lock);
+
+ err = put_user(src[0], &dst[0]);
+ err |= put_user(src[1], &dst[1]);
+
+ return err;
+}
+#else
+static int f_getowner_uids(struct file *filp, unsigned long arg)
+{
+ return -EINVAL;
+}
+#endif
+
static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
struct file *filp)
{
@@ -396,6 +422,9 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
case F_SETOWN_EX:
err = f_setown_ex(filp, arg);
break;
+ case F_GETOWNER_UIDS:
+ err = f_getowner_uids(filp, arg);
+ break;
case F_GETSIG:
err = filp->f_owner.signum;
break;
diff --git a/fs/file_table.c b/fs/file_table.c
index b3fc4d67a26b..701985e4ccda 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -43,7 +43,7 @@ static struct kmem_cache *filp_cachep __read_mostly;
static struct percpu_counter nr_files __cacheline_aligned_in_smp;
-static inline void file_free_rcu(struct rcu_head *head)
+static void file_free_rcu(struct rcu_head *head)
{
struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
@@ -217,7 +217,7 @@ static void drop_file_write_access(struct file *file)
return;
if (file_check_writeable(file) != 0)
return;
- mnt_drop_write(mnt);
+ __mnt_drop_write(mnt);
file_release_write(file);
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 8f660dd6137a..be3efc4f64f4 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -52,11 +52,6 @@ struct wb_writeback_work {
struct completion *done; /* set if the caller waits */
};
-/*
- * We don't actually have pdflush, but this one is exported though /proc...
- */
-int nr_pdflush_threads;
-
/**
* writeback_in_progress - determine whether there is writeback in progress
* @bdi: the device's backing_dev_info structure.
@@ -628,8 +623,8 @@ static long writeback_sb_inodes(struct super_block *sb,
}
/*
- * Don't bother with new inodes or inodes beeing freed, first
- * kind does not need peridic writeout yet, and for the latter
+ * Don't bother with new inodes or inodes being freed, first
+ * kind does not need periodic writeout yet, and for the latter
* kind writeout is handled by the freer.
*/
spin_lock(&inode->i_lock);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8964cf3999b2..324bc0850534 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -383,6 +383,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
struct fuse_entry_out outentry;
struct fuse_file *ff;
+ /* Userspace expects S_IFREG in create mode */
+ BUG_ON((mode & S_IFMT) != S_IFREG);
+
forget = fuse_alloc_forget();
err = -ENOMEM;
if (!forget)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index b321a688cde7..aba15f1b7ad2 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -703,13 +703,16 @@ static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
- if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
+ /*
+ * In auto invalidate mode, always update attributes on read.
+ * Otherwise, only update if we attempt to read past EOF (to ensure
+ * i_size is up to date).
+ */
+ if (fc->auto_inval_data ||
+ (pos + iov_length(iov, nr_segs) > i_size_read(inode))) {
int err;
- /*
- * If trying to read past EOF, make sure the i_size
- * attribute is up-to-date.
- */
err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
if (err)
return err;
@@ -944,9 +947,8 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
return err;
count = ocount;
-
+ sb_start_write(inode->i_sb);
mutex_lock(&inode->i_mutex);
- vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
/* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info;
@@ -1004,6 +1006,7 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
out:
current->backing_dev_info = NULL;
mutex_unlock(&inode->i_mutex);
+ sb_end_write(inode->i_sb);
return written ? written : err;
}
@@ -1700,7 +1703,7 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
size_t n;
u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
- for (n = 0; n < count; n++) {
+ for (n = 0; n < count; n++, iov++) {
if (iov->iov_len > (size_t) max)
return -ENOMEM;
max -= iov->iov_len;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 771fb6322c07..e24dd74e3068 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -484,6 +484,9 @@ struct fuse_conn {
/** Is fallocate not implemented by fs? */
unsigned no_fallocate:1;
+ /** Use enhanced/automatic page cache invalidation. */
+ unsigned auto_inval_data:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 1cd61652018c..ce0a2838ccd0 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -197,6 +197,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
loff_t oldsize;
+ struct timespec old_mtime;
spin_lock(&fc->lock);
if (attr_version != 0 && fi->attr_version > attr_version) {
@@ -204,15 +205,35 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
return;
}
+ old_mtime = inode->i_mtime;
fuse_change_attributes_common(inode, attr, attr_valid);
oldsize = inode->i_size;
i_size_write(inode, attr->size);
spin_unlock(&fc->lock);
- if (S_ISREG(inode->i_mode) && oldsize != attr->size) {
- truncate_pagecache(inode, oldsize, attr->size);
- invalidate_inode_pages2(inode->i_mapping);
+ if (S_ISREG(inode->i_mode)) {
+ bool inval = false;
+
+ if (oldsize != attr->size) {
+ truncate_pagecache(inode, oldsize, attr->size);
+ inval = true;
+ } else if (fc->auto_inval_data) {
+ struct timespec new_mtime = {
+ .tv_sec = attr->mtime,
+ .tv_nsec = attr->mtimensec,
+ };
+
+ /*
+ * Auto inval mode also checks and invalidates if mtime
+ * has changed.
+ */
+ if (!timespec_equal(&old_mtime, &new_mtime))
+ inval = true;
+ }
+
+ if (inval)
+ invalidate_inode_pages2(inode->i_mapping);
}
}
@@ -834,6 +855,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->big_writes = 1;
if (arg->flags & FUSE_DONT_MASK)
fc->dont_mask = 1;
+ if (arg->flags & FUSE_AUTO_INVAL_DATA)
+ fc->auto_inval_data = 1;
} else {
ra_pages = fc->max_read / PAGE_CACHE_SIZE;
fc->no_lock = 1;
@@ -859,7 +882,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
- FUSE_FLOCK_LOCKS;
+ FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
+ FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA;
req->in.h.opcode = FUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(*arg);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 9aa6af13823c..d1d791ef38de 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -373,11 +373,10 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
loff_t size;
int ret;
- /* Wait if fs is frozen. This is racy so we check again later on
- * and retry if the fs has been frozen after the page lock has
- * been acquired
- */
- vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+ sb_start_pagefault(inode->i_sb);
+
+ /* Update file times before taking page lock */
+ file_update_time(vma->vm_file);
ret = gfs2_rs_alloc(ip);
if (ret)
@@ -462,14 +461,9 @@ out:
gfs2_holder_uninit(&gh);
if (ret == 0) {
set_page_dirty(page);
- /* This check must be post dropping of transaction lock */
- if (inode->i_sb->s_frozen == SB_UNFROZEN) {
- wait_on_page_writeback(page);
- } else {
- ret = -EAGAIN;
- unlock_page(page);
- }
+ wait_on_page_writeback(page);
}
+ sb_end_pagefault(inode->i_sb);
return block_page_mkwrite_return(ret);
}
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 3a56c8d94de0..22255d96b27e 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -52,7 +52,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
/*
* If it's a fully non-blocking write attempt and we cannot
* lock the buffer then redirty the page. Note that this can
- * potentially cause a busy-wait loop from pdflush and kswapd
+ * potentially cause a busy-wait loop from flusher thread and kswapd
* activity, but those code paths have their own higher-level
* throttling.
*/
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index ad3e2fb763d7..adbd27875ef9 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -50,6 +50,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
if (revokes)
tr->tr_reserved += gfs2_struct2blk(sdp, revokes,
sizeof(u64));
+ sb_start_intwrite(sdp->sd_vfs);
gfs2_holder_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &tr->tr_t_gh);
error = gfs2_glock_nq(&tr->tr_t_gh);
@@ -68,6 +69,7 @@ fail_gunlock:
gfs2_glock_dq(&tr->tr_t_gh);
fail_holder_uninit:
+ sb_end_intwrite(sdp->sd_vfs);
gfs2_holder_uninit(&tr->tr_t_gh);
kfree(tr);
@@ -116,6 +118,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
gfs2_holder_uninit(&tr->tr_t_gh);
kfree(tr);
}
+ sb_end_intwrite(sdp->sd_vfs);
return;
}
@@ -136,6 +139,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS)
gfs2_log_flush(sdp, NULL);
+ sb_end_intwrite(sdp->sd_vfs);
}
/**
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 5fd51a5833ff..b7ec224910c5 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -236,10 +236,10 @@ out:
* hfs_mdb_commit()
*
* Description:
- * This updates the MDB on disk (look also at hfs_write_super()).
+ * This updates the MDB on disk.
* It does not check, if the superblock has been modified, or
* if the filesystem has been mounted read-only. It is mainly
- * called by hfs_write_super() and hfs_btree_extend().
+ * called by hfs_sync_fs() and flush_mdb().
* Input Variable(s):
* struct hfs_mdb *mdb: Pointer to the hfs MDB
* int backup;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 473332098013..fdafb2d71654 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -365,7 +365,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
u64 last_fs_block, last_fs_page;
int err;
- err = -EINVAL;
+ err = -ENOMEM;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
goto out;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index e13e9bdb0bf5..8349a899912e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -416,8 +416,8 @@ hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff)
else
v_offset = 0;
- __unmap_hugepage_range(vma,
- vma->vm_start + v_offset, vma->vm_end, NULL);
+ unmap_hugepage_range(vma, vma->vm_start + v_offset,
+ vma->vm_end, NULL);
}
}
diff --git a/fs/inode.c b/fs/inode.c
index 775cbabd4fa5..ac8d904b3f16 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1542,18 +1542,24 @@ void touch_atime(struct path *path)
if (timespec_equal(&inode->i_atime, &now))
return;
- if (mnt_want_write(mnt))
+ if (!sb_start_write_trylock(inode->i_sb))
return;
+ if (__mnt_want_write(mnt))
+ goto skip_update;
/*
* File systems can error out when updating inodes if they need to
* allocate new space to modify an inode (such is the case for
* Btrfs), but since we touch atime while walking down the path we
* really don't care if we failed to update the atime of the file,
* so just ignore the return value.
+ * We may also fail on filesystems that have the ability to make parts
+ * of the fs read only, e.g. subvolumes in Btrfs.
*/
update_time(inode, &now, S_ATIME);
- mnt_drop_write(mnt);
+ __mnt_drop_write(mnt);
+skip_update:
+ sb_end_write(inode->i_sb);
}
EXPORT_SYMBOL(touch_atime);
@@ -1660,11 +1666,11 @@ int file_update_time(struct file *file)
return 0;
/* Finally allowed to write? Takes lock. */
- if (mnt_want_write_file(file))
+ if (__mnt_want_write_file(file))
return 0;
ret = update_time(inode, &now, sync_it);
- mnt_drop_write_file(file);
+ __mnt_drop_write_file(file);
return ret;
}
diff --git a/fs/internal.h b/fs/internal.h
index a6fd56c68b11..371bcc4b1697 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -61,6 +61,10 @@ extern void __init mnt_init(void);
extern struct lglock vfsmount_lock;
+extern int __mnt_want_write(struct vfsmount *);
+extern int __mnt_want_write_file(struct file *);
+extern void __mnt_drop_write(struct vfsmount *);
+extern void __mnt_drop_write_file(struct file *);
/*
* fs_struct.c
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 425c2f2cf170..a2862339323b 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -534,8 +534,8 @@ int journal_start_commit(journal_t *journal, tid_t *ptid)
ret = 1;
} else if (journal->j_committing_transaction) {
/*
- * If ext3_write_super() recently started a commit, then we
- * have to wait for completion of that transaction
+ * If commit has been started, then we have to wait for
+ * completion of that transaction.
*/
if (ptid)
*ptid = journal->j_committing_transaction->t_tid;
@@ -1113,6 +1113,11 @@ static void mark_journal_empty(journal_t *journal)
BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
spin_lock(&journal->j_state_lock);
+ /* Is it already empty? */
+ if (sb->s_start == 0) {
+ spin_unlock(&journal->j_state_lock);
+ return;
+ }
jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
journal->j_tail_sequence);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 216f4299f65e..af5280fb579b 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -349,12 +349,12 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
return;
sequence = cpu_to_be32(sequence);
- addr = kmap_atomic(page, KM_USER0);
+ addr = kmap_atomic(page);
csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
sizeof(sequence));
csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data),
bh->b_size);
- kunmap_atomic(addr, KM_USER0);
+ kunmap_atomic(addr);
tag->t_checksum = cpu_to_be32(csum);
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index e9a3c4c85594..e149b99a7ffb 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -612,8 +612,8 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
ret = 1;
} else if (journal->j_committing_transaction) {
/*
- * If ext3_write_super() recently started a commit, then we
- * have to wait for completion of that transaction
+ * If commit has been started, then we have to wait for
+ * completion of that transaction.
*/
if (ptid)
*ptid = journal->j_committing_transaction->t_tid;
@@ -1377,7 +1377,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
* Update a journal's errno. Write updated superblock to disk waiting for IO
* to complete.
*/
-static void jbd2_journal_update_sb_errno(journal_t *journal)
+void jbd2_journal_update_sb_errno(journal_t *journal)
{
journal_superblock_t *sb = journal->j_superblock;
@@ -1390,6 +1390,7 @@ static void jbd2_journal_update_sb_errno(journal_t *journal)
jbd2_write_superblock(journal, WRITE_SYNC);
}
+EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
/*
* Read the superblock for a given journal, performing initial
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 8392cb85bd54..05d29124c6ab 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -156,12 +156,16 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
struct nlm_rqst *call;
int status;
- nlm_get_host(host);
call = nlm_alloc_call(host);
if (call == NULL)
return -ENOMEM;
nlmclnt_locks_init_private(fl, host);
+ if (!fl->fl_u.nfs_fl.owner) {
+ /* lockowner allocation has failed */
+ nlmclnt_release_call(call);
+ return -ENOMEM;
+ }
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
@@ -185,9 +189,6 @@ EXPORT_SYMBOL_GPL(nlmclnt_proc);
/*
* Allocate an NLM RPC call struct
- *
- * Note: the caller must hold a reference to host. In case of failure,
- * this reference will be released.
*/
struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
{
@@ -199,7 +200,7 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
atomic_set(&call->a_count, 1);
locks_init_lock(&call->a_args.lock.fl);
locks_init_lock(&call->a_res.lock.fl);
- call->a_host = host;
+ call->a_host = nlm_get_host(host);
return call;
}
if (signalled())
@@ -207,7 +208,6 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
printk("nlm_alloc_call: failed, waiting for memory\n");
schedule_timeout_interruptible(5*HZ);
}
- nlmclnt_release_host(host);
return NULL;
}
@@ -750,7 +750,7 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl
dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
" Attempting to cancel lock.\n");
- req = nlm_alloc_call(nlm_get_host(host));
+ req = nlm_alloc_call(host);
if (!req)
return -ENOMEM;
req->a_flags = RPC_TASK_ASYNC;
diff --git a/fs/lockd/grace.c b/fs/lockd/grace.c
index 183cc1f0af1c..6d1ee7204c88 100644
--- a/fs/lockd/grace.c
+++ b/fs/lockd/grace.c
@@ -4,8 +4,10 @@
#include <linux/module.h>
#include <linux/lockd/bind.h>
+#include <net/net_namespace.h>
+
+#include "netns.h"
-static LIST_HEAD(grace_list);
static DEFINE_SPINLOCK(grace_lock);
/**
@@ -19,10 +21,12 @@ static DEFINE_SPINLOCK(grace_lock);
*
* This function is called to start a grace period.
*/
-void locks_start_grace(struct lock_manager *lm)
+void locks_start_grace(struct net *net, struct lock_manager *lm)
{
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
spin_lock(&grace_lock);
- list_add(&lm->list, &grace_list);
+ list_add(&lm->list, &ln->grace_list);
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_start_grace);
@@ -52,8 +56,10 @@ EXPORT_SYMBOL_GPL(locks_end_grace);
* to answer ordinary lock requests, and when they should accept only
* lock reclaims.
*/
-int locks_in_grace(void)
+int locks_in_grace(struct net *net)
{
- return !list_empty(&grace_list);
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+ return !list_empty(&ln->grace_list);
}
EXPORT_SYMBOL_GPL(locks_in_grace);
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index eb75ca7c2d6e..f9b22e58f78f 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -21,6 +21,8 @@
#include <net/ipv6.h>
+#include "netns.h"
+
#define NLMDBG_FACILITY NLMDBG_HOSTCACHE
#define NLM_HOST_NRHASH 32
#define NLM_HOST_REBIND (60 * HZ)
@@ -41,11 +43,10 @@ static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH];
hlist_for_each_entry_safe((host), (pos), (next), \
(chain), h_hash)
-static unsigned long next_gc;
static unsigned long nrhosts;
static DEFINE_MUTEX(nlm_host_mutex);
-static void nlm_gc_hosts(void);
+static void nlm_gc_hosts(struct net *net);
struct nlm_lookup_host_info {
const int server; /* search for server|client */
@@ -172,6 +173,7 @@ out:
static void nlm_destroy_host_locked(struct nlm_host *host)
{
struct rpc_clnt *clnt;
+ struct lockd_net *ln = net_generic(host->net, lockd_net_id);
dprintk("lockd: destroy host %s\n", host->h_name);
@@ -188,6 +190,7 @@ static void nlm_destroy_host_locked(struct nlm_host *host)
rpc_shutdown_client(clnt);
kfree(host);
+ ln->nrhosts--;
nrhosts--;
}
@@ -228,6 +231,7 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
struct hlist_node *pos;
struct nlm_host *host;
struct nsm_handle *nsm = NULL;
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__,
(hostname ? hostname : "<none>"), version,
@@ -262,6 +266,7 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
goto out;
hlist_add_head(&host->h_hash, chain);
+ ln->nrhosts++;
nrhosts++;
dprintk("lockd: %s created host %s (%s)\n", __func__,
@@ -326,7 +331,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
struct nsm_handle *nsm = NULL;
struct sockaddr *src_sap = svc_daddr(rqstp);
size_t src_len = rqstp->rq_daddrlen;
- struct net *net = rqstp->rq_xprt->xpt_net;
+ struct net *net = SVC_NET(rqstp);
struct nlm_lookup_host_info ni = {
.server = 1,
.sap = svc_addr(rqstp),
@@ -337,6 +342,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
.hostname_len = hostname_len,
.net = net,
};
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
(int)hostname_len, hostname, rqstp->rq_vers,
@@ -344,8 +350,8 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
mutex_lock(&nlm_host_mutex);
- if (time_after_eq(jiffies, next_gc))
- nlm_gc_hosts();
+ if (time_after_eq(jiffies, ln->next_gc))
+ nlm_gc_hosts(net);
chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
hlist_for_each_entry(host, pos, chain, h_hash) {
@@ -382,6 +388,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
memcpy(nlm_srcaddr(host), src_sap, src_len);
host->h_srcaddrlen = src_len;
hlist_add_head(&host->h_hash, chain);
+ ln->nrhosts++;
nrhosts++;
dprintk("lockd: %s created host %s (%s)\n",
@@ -565,6 +572,35 @@ void nlm_host_rebooted(const struct nlm_reboot *info)
nsm_release(nsm);
}
+static void nlm_complain_hosts(struct net *net)
+{
+ struct hlist_head *chain;
+ struct hlist_node *pos;
+ struct nlm_host *host;
+
+ if (net) {
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+ if (ln->nrhosts == 0)
+ return;
+ printk(KERN_WARNING "lockd: couldn't shutdown host module for net %p!\n", net);
+ dprintk("lockd: %lu hosts left in net %p:\n", ln->nrhosts, net);
+ } else {
+ if (nrhosts == 0)
+ return;
+ printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
+ dprintk("lockd: %lu hosts left:\n", nrhosts);
+ }
+
+ for_each_host(host, pos, chain, nlm_server_hosts) {
+ if (net && host->net != net)
+ continue;
+ dprintk(" %s (cnt %d use %d exp %ld net %p)\n",
+ host->h_name, atomic_read(&host->h_count),
+ host->h_inuse, host->h_expires, host->net);
+ }
+}
+
void
nlm_shutdown_hosts_net(struct net *net)
{
@@ -572,11 +608,10 @@ nlm_shutdown_hosts_net(struct net *net)
struct hlist_node *pos;
struct nlm_host *host;
- dprintk("lockd: shutting down host module\n");
mutex_lock(&nlm_host_mutex);
/* First, make all hosts eligible for gc */
- dprintk("lockd: nuking all hosts...\n");
+ dprintk("lockd: nuking all hosts in net %p...\n", net);
for_each_host(host, pos, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
@@ -588,8 +623,10 @@ nlm_shutdown_hosts_net(struct net *net)
}
/* Then, perform a garbage collection pass */
- nlm_gc_hosts();
+ nlm_gc_hosts(net);
mutex_unlock(&nlm_host_mutex);
+
+ nlm_complain_hosts(net);
}
/*
@@ -599,22 +636,8 @@ nlm_shutdown_hosts_net(struct net *net)
void
nlm_shutdown_hosts(void)
{
- struct hlist_head *chain;
- struct hlist_node *pos;
- struct nlm_host *host;
-
+ dprintk("lockd: shutting down host module\n");
nlm_shutdown_hosts_net(NULL);
-
- /* complain if any hosts are left */
- if (nrhosts != 0) {
- printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
- dprintk("lockd: %lu hosts left:\n", nrhosts);
- for_each_host(host, pos, chain, nlm_server_hosts) {
- dprintk(" %s (cnt %d use %d exp %ld net %p)\n",
- host->h_name, atomic_read(&host->h_count),
- host->h_inuse, host->h_expires, host->net);
- }
- }
}
/*
@@ -623,30 +646,39 @@ nlm_shutdown_hosts(void)
* mark & sweep for resources held by remote clients.
*/
static void
-nlm_gc_hosts(void)
+nlm_gc_hosts(struct net *net)
{
struct hlist_head *chain;
struct hlist_node *pos, *next;
struct nlm_host *host;
- dprintk("lockd: host garbage collection\n");
- for_each_host(host, pos, chain, nlm_server_hosts)
+ dprintk("lockd: host garbage collection for net %p\n", net);
+ for_each_host(host, pos, chain, nlm_server_hosts) {
+ if (net && host->net != net)
+ continue;
host->h_inuse = 0;
+ }
/* Mark all hosts that hold locks, blocks or shares */
- nlmsvc_mark_resources();
+ nlmsvc_mark_resources(net);
for_each_host_safe(host, pos, next, chain, nlm_server_hosts) {
+ if (net && host->net != net)
+ continue;
if (atomic_read(&host->h_count) || host->h_inuse
|| time_before(jiffies, host->h_expires)) {
dprintk("nlm_gc_hosts skipping %s "
- "(cnt %d use %d exp %ld)\n",
+ "(cnt %d use %d exp %ld net %p)\n",
host->h_name, atomic_read(&host->h_count),
- host->h_inuse, host->h_expires);
+ host->h_inuse, host->h_expires, host->net);
continue;
}
nlm_destroy_host_locked(host);
}
- next_gc = jiffies + NLM_HOST_COLLECT;
+ if (net) {
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+ ln->next_gc = jiffies + NLM_HOST_COLLECT;
+ }
}
diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
index ce227e0fbc5c..4eee248ba96e 100644
--- a/fs/lockd/netns.h
+++ b/fs/lockd/netns.h
@@ -1,10 +1,17 @@
#ifndef __LOCKD_NETNS_H__
#define __LOCKD_NETNS_H__
+#include <linux/fs.h>
#include <net/netns/generic.h>
struct lockd_net {
unsigned int nlmsvc_users;
+ unsigned long next_gc;
+ unsigned long nrhosts;
+
+ struct delayed_work grace_period_end;
+ struct lock_manager lockd_manager;
+ struct list_head grace_list;
};
extern int lockd_net_id;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 80938fda67e0..31a63f87b806 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -87,32 +87,36 @@ static unsigned long get_lockd_grace_period(void)
return nlm_timeout * 5 * HZ;
}
-static struct lock_manager lockd_manager = {
-};
-
-static void grace_ender(struct work_struct *not_used)
+static void grace_ender(struct work_struct *grace)
{
- locks_end_grace(&lockd_manager);
-}
+ struct delayed_work *dwork = container_of(grace, struct delayed_work,
+ work);
+ struct lockd_net *ln = container_of(dwork, struct lockd_net,
+ grace_period_end);
-static DECLARE_DELAYED_WORK(grace_period_end, grace_ender);
+ locks_end_grace(&ln->lockd_manager);
+}
-static void set_grace_period(void)
+static void set_grace_period(struct net *net)
{
unsigned long grace_period = get_lockd_grace_period();
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
- locks_start_grace(&lockd_manager);
- cancel_delayed_work_sync(&grace_period_end);
- schedule_delayed_work(&grace_period_end, grace_period);
+ locks_start_grace(net, &ln->lockd_manager);
+ cancel_delayed_work_sync(&ln->grace_period_end);
+ schedule_delayed_work(&ln->grace_period_end, grace_period);
}
static void restart_grace(void)
{
if (nlmsvc_ops) {
- cancel_delayed_work_sync(&grace_period_end);
- locks_end_grace(&lockd_manager);
+ struct net *net = &init_net;
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+ cancel_delayed_work_sync(&ln->grace_period_end);
+ locks_end_grace(&ln->lockd_manager);
nlmsvc_invalidate_all();
- set_grace_period();
+ set_grace_period(net);
}
}
@@ -137,8 +141,6 @@ lockd(void *vrqstp)
nlm_timeout = LOCKD_DFLT_TIMEO;
nlmsvc_timeout = nlm_timeout * HZ;
- set_grace_period();
-
/*
* The main request loop. We don't terminate until the last
* NFS mount or NFS daemon has gone away.
@@ -184,8 +186,6 @@ lockd(void *vrqstp)
svc_process(rqstp);
}
flush_signals(current);
- cancel_delayed_work_sync(&grace_period_end);
- locks_end_grace(&lockd_manager);
if (nlmsvc_ops)
nlmsvc_invalidate_all();
nlm_shutdown_hosts();
@@ -266,6 +266,7 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
error = make_socks(serv, net);
if (error < 0)
goto err_socks;
+ set_grace_period(net);
dprintk("lockd_up_net: per-net data created; net=%p\n", net);
return 0;
@@ -283,6 +284,8 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
if (ln->nlmsvc_users) {
if (--ln->nlmsvc_users == 0) {
nlm_shutdown_hosts_net(net);
+ cancel_delayed_work_sync(&ln->grace_period_end);
+ locks_end_grace(&ln->lockd_manager);
svc_shutdown_net(serv, net);
dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
}
@@ -589,6 +592,10 @@ module_param(nlm_max_connections, uint, 0644);
static int lockd_init_net(struct net *net)
{
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+ INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
+ INIT_LIST_HEAD(&ln->grace_list);
return 0;
}
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 9a41fdc19511..b147d1ae71fd 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -11,6 +11,7 @@
#include <linux/time.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/share.h>
+#include <linux/sunrpc/svc_xprt.h>
#define NLMDBG_FACILITY NLMDBG_CLIENT
@@ -151,7 +152,7 @@ nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->cookie = argp->cookie;
/* Don't accept requests during grace period */
- if (locks_in_grace()) {
+ if (locks_in_grace(SVC_NET(rqstp))) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
@@ -161,7 +162,7 @@ nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
/* Try to cancel request. */
- resp->status = nlmsvc_cancel_blocked(file, &argp->lock);
+ resp->status = nlmsvc_cancel_blocked(SVC_NET(rqstp), file, &argp->lock);
dprintk("lockd: CANCEL status %d\n", ntohl(resp->status));
nlmsvc_release_host(host);
@@ -184,7 +185,7 @@ nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->cookie = argp->cookie;
/* Don't accept new lock requests during grace period */
- if (locks_in_grace()) {
+ if (locks_in_grace(SVC_NET(rqstp))) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
@@ -194,7 +195,7 @@ nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
/* Now try to remove the lock */
- resp->status = nlmsvc_unlock(file, &argp->lock);
+ resp->status = nlmsvc_unlock(SVC_NET(rqstp), file, &argp->lock);
dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status));
nlmsvc_release_host(host);
@@ -256,6 +257,7 @@ static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args
return rpc_system_err;
call = nlm_alloc_call(host);
+ nlmsvc_release_host(host);
if (call == NULL)
return rpc_system_err;
@@ -321,7 +323,7 @@ nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->cookie = argp->cookie;
/* Don't accept new lock requests during grace period */
- if (locks_in_grace() && !argp->reclaim) {
+ if (locks_in_grace(SVC_NET(rqstp)) && !argp->reclaim) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
@@ -354,7 +356,7 @@ nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->cookie = argp->cookie;
/* Don't accept requests during grace period */
- if (locks_in_grace()) {
+ if (locks_in_grace(SVC_NET(rqstp))) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index e46353f41a42..fb1a2bedbe97 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -26,7 +26,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sunrpc/clnt.h>
-#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/svc_xprt.h>
#include <linux/lockd/nlm.h>
#include <linux/lockd/lockd.h>
#include <linux/kthread.h>
@@ -219,7 +219,6 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host,
struct nlm_block *block;
struct nlm_rqst *call = NULL;
- nlm_get_host(host);
call = nlm_alloc_call(host);
if (call == NULL)
return NULL;
@@ -447,11 +446,11 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
}
- if (locks_in_grace() && !reclaim) {
+ if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) {
ret = nlm_lck_denied_grace_period;
goto out;
}
- if (reclaim && !locks_in_grace()) {
+ if (reclaim && !locks_in_grace(SVC_NET(rqstp))) {
ret = nlm_lck_denied_grace_period;
goto out;
}
@@ -559,7 +558,7 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
}
- if (locks_in_grace()) {
+ if (locks_in_grace(SVC_NET(rqstp))) {
ret = nlm_lck_denied_grace_period;
goto out;
}
@@ -603,7 +602,7 @@ out:
* must be removed.
*/
__be32
-nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
+nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
{
int error;
@@ -615,7 +614,7 @@ nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
(long long)lock->fl.fl_end);
/* First, cancel any lock that might be there */
- nlmsvc_cancel_blocked(file, lock);
+ nlmsvc_cancel_blocked(net, file, lock);
lock->fl.fl_type = F_UNLCK;
error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
@@ -631,7 +630,7 @@ nlmsvc_unlock(struct nlm_file *file, struct nlm_lock *lock)
* The calling procedure must check whether the file can be closed.
*/
__be32
-nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
+nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock)
{
struct nlm_block *block;
int status = 0;
@@ -643,7 +642,7 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end);
- if (locks_in_grace())
+ if (locks_in_grace(net))
return nlm_lck_denied_grace_period;
mutex_lock(&file->f_mutex);
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index d27aab11f324..3009a365e082 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -11,6 +11,7 @@
#include <linux/time.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/share.h>
+#include <linux/sunrpc/svc_xprt.h>
#define NLMDBG_FACILITY NLMDBG_CLIENT
@@ -175,13 +176,14 @@ nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
{
struct nlm_host *host;
struct nlm_file *file;
+ struct net *net = SVC_NET(rqstp);
dprintk("lockd: CANCEL called\n");
resp->cookie = argp->cookie;
/* Don't accept requests during grace period */
- if (locks_in_grace()) {
+ if (locks_in_grace(net)) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
@@ -191,7 +193,7 @@ nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
/* Try to cancel request. */
- resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock));
+ resp->status = cast_status(nlmsvc_cancel_blocked(net, file, &argp->lock));
dprintk("lockd: CANCEL status %d\n", ntohl(resp->status));
nlmsvc_release_host(host);
@@ -208,13 +210,14 @@ nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
{
struct nlm_host *host;
struct nlm_file *file;
+ struct net *net = SVC_NET(rqstp);
dprintk("lockd: UNLOCK called\n");
resp->cookie = argp->cookie;
/* Don't accept new lock requests during grace period */
- if (locks_in_grace()) {
+ if (locks_in_grace(net)) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
@@ -224,7 +227,7 @@ nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
/* Now try to remove the lock */
- resp->status = cast_status(nlmsvc_unlock(file, &argp->lock));
+ resp->status = cast_status(nlmsvc_unlock(net, file, &argp->lock));
dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status));
nlmsvc_release_host(host);
@@ -294,6 +297,7 @@ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args
return rpc_system_err;
call = nlm_alloc_call(host);
+ nlmsvc_release_host(host);
if (call == NULL)
return rpc_system_err;
@@ -361,7 +365,7 @@ nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->cookie = argp->cookie;
/* Don't accept new lock requests during grace period */
- if (locks_in_grace() && !argp->reclaim) {
+ if (locks_in_grace(SVC_NET(rqstp)) && !argp->reclaim) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
@@ -394,7 +398,7 @@ nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
resp->cookie = argp->cookie;
/* Don't accept requests during grace period */
- if (locks_in_grace()) {
+ if (locks_in_grace(SVC_NET(rqstp))) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 2240d384d787..0deb5f6c9dd4 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -309,7 +309,8 @@ nlm_release_file(struct nlm_file *file)
* Helpers function for resource traversal
*
* nlmsvc_mark_host:
- * used by the garbage collector; simply sets h_inuse.
+ * used by the garbage collector; simply sets h_inuse only for those
+ * hosts, which passed network check.
* Always returns 0.
*
* nlmsvc_same_host:
@@ -320,12 +321,15 @@ nlm_release_file(struct nlm_file *file)
* returns 1 iff the host is a client.
* Used by nlmsvc_invalidate_all
*/
+
static int
-nlmsvc_mark_host(void *data, struct nlm_host *dummy)
+nlmsvc_mark_host(void *data, struct nlm_host *hint)
{
struct nlm_host *host = data;
- host->h_inuse = 1;
+ if ((hint->net == NULL) ||
+ (host->net == hint->net))
+ host->h_inuse = 1;
return 0;
}
@@ -358,10 +362,13 @@ nlmsvc_is_client(void *data, struct nlm_host *dummy)
* Mark all hosts that still hold resources
*/
void
-nlmsvc_mark_resources(void)
+nlmsvc_mark_resources(struct net *net)
{
- dprintk("lockd: nlmsvc_mark_resources\n");
- nlm_traverse_files(NULL, nlmsvc_mark_host, NULL);
+ struct nlm_host hint;
+
+ dprintk("lockd: nlmsvc_mark_resources for net %p\n", net);
+ hint.net = net;
+ nlm_traverse_files(&hint, nlmsvc_mark_host, NULL);
}
/*
diff --git a/fs/locks.c b/fs/locks.c
index 82c353304f9e..7e81bfc75164 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -200,11 +200,7 @@ void locks_release_private(struct file_lock *fl)
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
}
- if (fl->fl_lmops) {
- if (fl->fl_lmops->lm_release_private)
- fl->fl_lmops->lm_release_private(fl);
- fl->fl_lmops = NULL;
- }
+ fl->fl_lmops = NULL;
}
EXPORT_SYMBOL_GPL(locks_release_private);
@@ -427,18 +423,8 @@ static void lease_break_callback(struct file_lock *fl)
kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG);
}
-static void lease_release_private_callback(struct file_lock *fl)
-{
- if (!fl->fl_file)
- return;
-
- f_delown(fl->fl_file);
- fl->fl_file->f_owner.signum = 0;
-}
-
static const struct lock_manager_operations lease_manager_ops = {
.lm_break = lease_break_callback,
- .lm_release_private = lease_release_private_callback,
.lm_change = lease_modify,
};
@@ -580,12 +566,6 @@ static void locks_delete_lock(struct file_lock **thisfl_p)
fl->fl_next = NULL;
list_del_init(&fl->fl_link);
- fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
- if (fl->fl_fasync != NULL) {
- printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
- fl->fl_fasync = NULL;
- }
-
if (fl->fl_nspid) {
put_pid(fl->fl_nspid);
fl->fl_nspid = NULL;
@@ -1155,8 +1135,18 @@ int lease_modify(struct file_lock **before, int arg)
return error;
lease_clear_pending(fl, arg);
locks_wake_up_blocks(fl);
- if (arg == F_UNLCK)
+ if (arg == F_UNLCK) {
+ struct file *filp = fl->fl_file;
+
+ f_delown(filp);
+ filp->f_owner.signum = 0;
+ fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
+ if (fl->fl_fasync != NULL) {
+ printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
+ fl->fl_fasync = NULL;
+ }
locks_delete_lock(before);
+ }
return 0;
}
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index df0de27c2733..e784a217b500 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -26,6 +26,7 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
struct completion complete;
bio_init(&bio);
+ bio.bi_max_vecs = 1;
bio.bi_io_vec = &bio_vec;
bio_vec.bv_page = page;
bio_vec.bv_len = PAGE_SIZE;
@@ -95,12 +96,11 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
struct address_space *mapping = super->s_mapping_inode->i_mapping;
struct bio *bio;
struct page *page;
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
- unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
+ unsigned int max_pages;
int i;
- if (max_pages > BIO_MAX_PAGES)
- max_pages = BIO_MAX_PAGES;
+ max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
+
bio = bio_alloc(GFP_NOFS, max_pages);
BUG_ON(!bio);
@@ -190,12 +190,11 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
{
struct logfs_super *super = logfs_super(sb);
struct bio *bio;
- struct request_queue *q = bdev_get_queue(sb->s_bdev);
- unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
+ unsigned int max_pages;
int i;
- if (max_pages > BIO_MAX_PAGES)
- max_pages = BIO_MAX_PAGES;
+ max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
+
bio = bio_alloc(GFP_NOFS, max_pages);
BUG_ON(!bio);
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
index a422f42238b2..6984562738d3 100644
--- a/fs/logfs/inode.c
+++ b/fs/logfs/inode.c
@@ -156,10 +156,26 @@ static void __logfs_destroy_inode(struct inode *inode)
call_rcu(&inode->i_rcu, logfs_i_callback);
}
+static void __logfs_destroy_meta_inode(struct inode *inode)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ BUG_ON(li->li_block);
+ call_rcu(&inode->i_rcu, logfs_i_callback);
+}
+
static void logfs_destroy_inode(struct inode *inode)
{
struct logfs_inode *li = logfs_inode(inode);
+ if (inode->i_ino < LOGFS_RESERVED_INOS) {
+ /*
+ * The reserved inodes are never destroyed unless we are in
+ * unmont path.
+ */
+ __logfs_destroy_meta_inode(inode);
+ return;
+ }
+
BUG_ON(list_empty(&li->li_freeing_list));
spin_lock(&logfs_inode_lock);
li->li_refcount--;
@@ -373,8 +389,8 @@ static void logfs_put_super(struct super_block *sb)
{
struct logfs_super *super = logfs_super(sb);
/* kill the meta-inodes */
- iput(super->s_master_inode);
iput(super->s_segfile_inode);
+ iput(super->s_master_inode);
iput(super->s_mapping_inode);
}
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index 1e1c369df22b..2a09b8d73989 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -565,7 +565,7 @@ static void write_wbuf(struct super_block *sb, struct logfs_area *area,
index = ofs >> PAGE_SHIFT;
page_ofs = ofs & (PAGE_SIZE - 1);
- page = find_lock_page(mapping, index);
+ page = find_or_create_page(mapping, index, GFP_NOFS);
BUG_ON(!page);
memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize);
unlock_page(page);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index f1cb512c5019..5be0abef603d 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -2189,7 +2189,6 @@ void logfs_evict_inode(struct inode *inode)
return;
}
- BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS);
page = inode_to_page(inode);
BUG_ON(!page); /* FIXME: Use emergency page */
logfs_put_write_page(page);
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index e28d090c98d6..038da0991794 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -886,7 +886,7 @@ static struct logfs_area *alloc_area(struct super_block *sb)
static void map_invalidatepage(struct page *page, unsigned long l)
{
- BUG();
+ return;
}
static int map_releasepage(struct page *page, gfp_t g)
diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c
index 13487ad16894..78e2d93e5c83 100644
--- a/fs/minix/itree_v2.c
+++ b/fs/minix/itree_v2.c
@@ -32,7 +32,8 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
if (block < 0) {
printk("MINIX-fs: block_to_path: block %ld < 0 on dev %s\n",
block, bdevname(sb->s_bdev, b));
- } else if (block >= (minix_sb(inode->i_sb)->s_max_size/sb->s_blocksize)) {
+ } else if ((u64)block * (u64)sb->s_blocksize >=
+ minix_sb(sb)->s_max_size) {
if (printk_ratelimit())
printk("MINIX-fs: block_to_path: "
"block %ld too big on dev %s\n",
diff --git a/fs/namei.c b/fs/namei.c
index 2ccc35c4dc24..dd1ed1b8e98e 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -352,6 +352,7 @@ int __inode_permission(struct inode *inode, int mask)
/**
* sb_permission - Check superblock-level permissions
* @sb: Superblock of inode to check permission on
+ * @inode: Inode to check permission on
* @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
* Separate out file-system wide checks from inode-specific permission checks.
@@ -650,6 +651,122 @@ static inline void put_link(struct nameidata *nd, struct path *link, void *cooki
path_put(link);
}
+int sysctl_protected_symlinks __read_mostly = 1;
+int sysctl_protected_hardlinks __read_mostly = 1;
+
+/**
+ * may_follow_link - Check symlink following for unsafe situations
+ * @link: The path of the symlink
+ * @nd: nameidata pathwalk data
+ *
+ * In the case of the sysctl_protected_symlinks sysctl being enabled,
+ * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is
+ * in a sticky world-writable directory. This is to protect privileged
+ * processes from failing races against path names that may change out
+ * from under them by way of other users creating malicious symlinks.
+ * It will permit symlinks to be followed only when outside a sticky
+ * world-writable directory, or when the uid of the symlink and follower
+ * match, or when the directory owner matches the symlink's owner.
+ *
+ * Returns 0 if following the symlink is allowed, -ve on error.
+ */
+static inline int may_follow_link(struct path *link, struct nameidata *nd)
+{
+ const struct inode *inode;
+ const struct inode *parent;
+
+ if (!sysctl_protected_symlinks)
+ return 0;
+
+ /* Allowed if owner and follower match. */
+ inode = link->dentry->d_inode;
+ if (current_cred()->fsuid == inode->i_uid)
+ return 0;
+
+ /* Allowed if parent directory not sticky and world-writable. */
+ parent = nd->path.dentry->d_inode;
+ if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
+ return 0;
+
+ /* Allowed if parent directory and link owner match. */
+ if (parent->i_uid == inode->i_uid)
+ return 0;
+
+ path_put_conditional(link, nd);
+ path_put(&nd->path);
+ audit_log_link_denied("follow_link", link);
+ return -EACCES;
+}
+
+/**
+ * safe_hardlink_source - Check for safe hardlink conditions
+ * @inode: the source inode to hardlink from
+ *
+ * Return false if at least one of the following conditions:
+ * - inode is not a regular file
+ * - inode is setuid
+ * - inode is setgid and group-exec
+ * - access failure for read and write
+ *
+ * Otherwise returns true.
+ */
+static bool safe_hardlink_source(struct inode *inode)
+{
+ umode_t mode = inode->i_mode;
+
+ /* Special files should not get pinned to the filesystem. */
+ if (!S_ISREG(mode))
+ return false;
+
+ /* Setuid files should not get pinned to the filesystem. */
+ if (mode & S_ISUID)
+ return false;
+
+ /* Executable setgid files should not get pinned to the filesystem. */
+ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
+ return false;
+
+ /* Hardlinking to unreadable or unwritable sources is dangerous. */
+ if (inode_permission(inode, MAY_READ | MAY_WRITE))
+ return false;
+
+ return true;
+}
+
+/**
+ * may_linkat - Check permissions for creating a hardlink
+ * @link: the source to hardlink from
+ *
+ * Block hardlink when all of:
+ * - sysctl_protected_hardlinks enabled
+ * - fsuid does not match inode
+ * - hardlink source is unsafe (see safe_hardlink_source() above)
+ * - not CAP_FOWNER
+ *
+ * Returns 0 if successful, -ve on error.
+ */
+static int may_linkat(struct path *link)
+{
+ const struct cred *cred;
+ struct inode *inode;
+
+ if (!sysctl_protected_hardlinks)
+ return 0;
+
+ cred = current_cred();
+ inode = link->dentry->d_inode;
+
+ /* Source inode owner (or CAP_FOWNER) can hardlink all they like,
+ * otherwise, it must be a safe source.
+ */
+ if (cred->fsuid == inode->i_uid || safe_hardlink_source(inode) ||
+ capable(CAP_FOWNER))
+ return 0;
+
+ audit_log_link_denied("linkat", link);
+ return -EPERM;
+}
+
static __always_inline int
follow_link(struct path *link, struct nameidata *nd, void **p)
{
@@ -1818,6 +1935,9 @@ static int path_lookupat(int dfd, const char *name,
while (err > 0) {
void *cookie;
struct path link = path;
+ err = may_follow_link(&link, nd);
+ if (unlikely(err))
+ break;
nd->flags |= LOOKUP_PARENT;
err = follow_link(&link, nd, &cookie);
if (err)
@@ -2277,7 +2397,7 @@ static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode)
static int atomic_open(struct nameidata *nd, struct dentry *dentry,
struct path *path, struct file *file,
const struct open_flags *op,
- bool *want_write, bool need_lookup,
+ bool got_write, bool need_lookup,
int *opened)
{
struct inode *dir = nd->path.dentry->d_inode;
@@ -2296,11 +2416,11 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
goto out;
}
- mode = op->mode & S_IALLUGO;
+ mode = op->mode;
if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))
mode &= ~current_umask();
- if (open_flag & O_EXCL) {
+ if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT)) {
open_flag &= ~O_TRUNC;
*opened |= FILE_CREATED;
}
@@ -2314,12 +2434,9 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
* Another problem is returing the "right" error value (e.g. for an
* O_EXCL open we want to return EEXIST not EROFS).
*/
- if ((open_flag & (O_CREAT | O_TRUNC)) ||
- (open_flag & O_ACCMODE) != O_RDONLY) {
- error = mnt_want_write(nd->path.mnt);
- if (!error) {
- *want_write = true;
- } else if (!(open_flag & O_CREAT)) {
+ if (((open_flag & (O_CREAT | O_TRUNC)) ||
+ (open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) {
+ if (!(open_flag & O_CREAT)) {
/*
* No O_CREATE -> atomicity not a requirement -> fall
* back to lookup + open
@@ -2327,17 +2444,17 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
goto no_open;
} else if (open_flag & (O_EXCL | O_TRUNC)) {
/* Fall back and fail with the right error */
- create_error = error;
+ create_error = -EROFS;
goto no_open;
} else {
/* No side effects, safe to clear O_CREAT */
- create_error = error;
+ create_error = -EROFS;
open_flag &= ~O_CREAT;
}
}
if (open_flag & O_CREAT) {
- error = may_o_create(&nd->path, dentry, op->mode);
+ error = may_o_create(&nd->path, dentry, mode);
if (error) {
create_error = error;
if (open_flag & O_EXCL)
@@ -2374,6 +2491,10 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry,
dput(dentry);
dentry = file->f_path.dentry;
}
+ if (create_error && dentry->d_inode == NULL) {
+ error = create_error;
+ goto out;
+ }
goto looked_up;
}
@@ -2438,7 +2559,7 @@ looked_up:
static int lookup_open(struct nameidata *nd, struct path *path,
struct file *file,
const struct open_flags *op,
- bool *want_write, int *opened)
+ bool got_write, int *opened)
{
struct dentry *dir = nd->path.dentry;
struct inode *dir_inode = dir->d_inode;
@@ -2456,7 +2577,7 @@ static int lookup_open(struct nameidata *nd, struct path *path,
goto out_no_open;
if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) {
- return atomic_open(nd, dentry, path, file, op, want_write,
+ return atomic_open(nd, dentry, path, file, op, got_write,
need_lookup, opened);
}
@@ -2480,10 +2601,10 @@ static int lookup_open(struct nameidata *nd, struct path *path,
* a permanent write count is taken through
* the 'struct file' in finish_open().
*/
- error = mnt_want_write(nd->path.mnt);
- if (error)
+ if (!got_write) {
+ error = -EROFS;
goto out_dput;
- *want_write = true;
+ }
*opened |= FILE_CREATED;
error = security_path_mknod(&nd->path, dentry, mode, 0);
if (error)
@@ -2513,7 +2634,7 @@ static int do_last(struct nameidata *nd, struct path *path,
struct dentry *dir = nd->path.dentry;
int open_flag = op->open_flag;
bool will_truncate = (open_flag & O_TRUNC) != 0;
- bool want_write = false;
+ bool got_write = false;
int acc_mode = op->acc_mode;
struct inode *inode;
bool symlink_ok = false;
@@ -2582,8 +2703,18 @@ static int do_last(struct nameidata *nd, struct path *path,
}
retry_lookup:
+ if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) {
+ error = mnt_want_write(nd->path.mnt);
+ if (!error)
+ got_write = true;
+ /*
+ * do _not_ fail yet - we might not need that or fail with
+ * a different error; let lookup_open() decide; we'll be
+ * dropping this one anyway.
+ */
+ }
mutex_lock(&dir->d_inode->i_mutex);
- error = lookup_open(nd, path, file, op, &want_write, opened);
+ error = lookup_open(nd, path, file, op, got_write, opened);
mutex_unlock(&dir->d_inode->i_mutex);
if (error <= 0) {
@@ -2608,22 +2739,23 @@ retry_lookup:
}
/*
- * It already exists.
+ * create/update audit record if it already exists.
*/
- audit_inode(pathname, path->dentry);
+ if (path->dentry->d_inode)
+ audit_inode(pathname, path->dentry);
/*
* If atomic_open() acquired write access it is dropped now due to
* possible mount and symlink following (this might be optimized away if
* necessary...)
*/
- if (want_write) {
+ if (got_write) {
mnt_drop_write(nd->path.mnt);
- want_write = false;
+ got_write = false;
}
error = -EEXIST;
- if (open_flag & O_EXCL)
+ if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))
goto exit_dput;
error = follow_managed(path, nd->flags);
@@ -2684,7 +2816,7 @@ finish_open:
error = mnt_want_write(nd->path.mnt);
if (error)
goto out;
- want_write = true;
+ got_write = true;
}
finish_open_created:
error = may_open(&nd->path, acc_mode, open_flag);
@@ -2711,7 +2843,7 @@ opened:
goto exit_fput;
}
out:
- if (want_write)
+ if (got_write)
mnt_drop_write(nd->path.mnt);
path_put(&save_parent);
terminate_walk(nd);
@@ -2735,9 +2867,9 @@ stale_open:
nd->inode = dir->d_inode;
save_parent.mnt = NULL;
save_parent.dentry = NULL;
- if (want_write) {
+ if (got_write) {
mnt_drop_write(nd->path.mnt);
- want_write = false;
+ got_write = false;
}
retried = true;
goto retry_lookup;
@@ -2777,6 +2909,9 @@ static struct file *path_openat(int dfd, const char *pathname,
error = -ELOOP;
break;
}
+ error = may_follow_link(&link, nd);
+ if (unlikely(error))
+ break;
nd->flags |= LOOKUP_PARENT;
nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
error = follow_link(&link, nd, &cookie);
@@ -2846,6 +2981,7 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
{
struct dentry *dentry = ERR_PTR(-EEXIST);
struct nameidata nd;
+ int err2;
int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd);
if (error)
return ERR_PTR(error);
@@ -2859,16 +2995,19 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
nd.flags &= ~LOOKUP_PARENT;
nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL;
+ /* don't fail immediately if it's r/o, at least try to report other errors */
+ err2 = mnt_want_write(nd.path.mnt);
/*
* Do the final lookup.
*/
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd);
if (IS_ERR(dentry))
- goto fail;
+ goto unlock;
+ error = -EEXIST;
if (dentry->d_inode)
- goto eexist;
+ goto fail;
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
@@ -2876,23 +3015,37 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
* been asking for (non-existent) directory. -ENOENT for you.
*/
if (unlikely(!is_dir && nd.last.name[nd.last.len])) {
- dput(dentry);
- dentry = ERR_PTR(-ENOENT);
+ error = -ENOENT;
+ goto fail;
+ }
+ if (unlikely(err2)) {
+ error = err2;
goto fail;
}
*path = nd.path;
return dentry;
-eexist:
- dput(dentry);
- dentry = ERR_PTR(-EEXIST);
fail:
+ dput(dentry);
+ dentry = ERR_PTR(error);
+unlock:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ if (!err2)
+ mnt_drop_write(nd.path.mnt);
out:
path_put(&nd.path);
return dentry;
}
EXPORT_SYMBOL(kern_path_create);
+void done_path_create(struct path *path, struct dentry *dentry)
+{
+ dput(dentry);
+ mutex_unlock(&path->dentry->d_inode->i_mutex);
+ mnt_drop_write(path->mnt);
+ path_put(path);
+}
+EXPORT_SYMBOL(done_path_create);
+
struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir)
{
char *tmp = getname(pathname);
@@ -2956,8 +3109,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
struct path path;
int error;
- if (S_ISDIR(mode))
- return -EPERM;
+ error = may_mknod(mode);
+ if (error)
+ return error;
dentry = user_path_create(dfd, filename, &path, 0);
if (IS_ERR(dentry))
@@ -2965,15 +3119,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
- error = may_mknod(mode);
- if (error)
- goto out_dput;
- error = mnt_want_write(path.mnt);
- if (error)
- goto out_dput;
error = security_path_mknod(&path, dentry, mode, dev);
if (error)
- goto out_drop_write;
+ goto out;
switch (mode & S_IFMT) {
case 0: case S_IFREG:
error = vfs_create(path.dentry->d_inode,dentry,mode,true);
@@ -2986,13 +3134,8 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
error = vfs_mknod(path.dentry->d_inode,dentry,mode,0);
break;
}
-out_drop_write:
- mnt_drop_write(path.mnt);
-out_dput:
- dput(dentry);
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- path_put(&path);
-
+out:
+ done_path_create(&path, dentry);
return error;
}
@@ -3038,19 +3181,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
if (!IS_POSIXACL(path.dentry->d_inode))
mode &= ~current_umask();
- error = mnt_want_write(path.mnt);
- if (error)
- goto out_dput;
error = security_path_mkdir(&path, dentry, mode);
- if (error)
- goto out_drop_write;
- error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
-out_drop_write:
- mnt_drop_write(path.mnt);
-out_dput:
- dput(dentry);
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- path_put(&path);
+ if (!error)
+ error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
+ done_path_create(&path, dentry);
return error;
}
@@ -3144,6 +3278,9 @@ static long do_rmdir(int dfd, const char __user *pathname)
}
nd.flags &= ~LOOKUP_PARENT;
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto exit1;
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd);
@@ -3154,19 +3291,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
error = -ENOENT;
goto exit3;
}
- error = mnt_want_write(nd.path.mnt);
- if (error)
- goto exit3;
error = security_path_rmdir(&nd.path, dentry);
if (error)
- goto exit4;
+ goto exit3;
error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
-exit4:
- mnt_drop_write(nd.path.mnt);
exit3:
dput(dentry);
exit2:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ mnt_drop_write(nd.path.mnt);
exit1:
path_put(&nd.path);
putname(name);
@@ -3233,6 +3366,9 @@ static long do_unlinkat(int dfd, const char __user *pathname)
goto exit1;
nd.flags &= ~LOOKUP_PARENT;
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto exit1;
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_hash(&nd);
@@ -3245,21 +3381,17 @@ static long do_unlinkat(int dfd, const char __user *pathname)
if (!inode)
goto slashes;
ihold(inode);
- error = mnt_want_write(nd.path.mnt);
- if (error)
- goto exit2;
error = security_path_unlink(&nd.path, dentry);
if (error)
- goto exit3;
+ goto exit2;
error = vfs_unlink(nd.path.dentry->d_inode, dentry);
-exit3:
- mnt_drop_write(nd.path.mnt);
- exit2:
+exit2:
dput(dentry);
}
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
if (inode)
iput(inode); /* truncate the inode here */
+ mnt_drop_write(nd.path.mnt);
exit1:
path_put(&nd.path);
putname(name);
@@ -3324,19 +3456,10 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
if (IS_ERR(dentry))
goto out_putname;
- error = mnt_want_write(path.mnt);
- if (error)
- goto out_dput;
error = security_path_symlink(&path, dentry, from);
- if (error)
- goto out_drop_write;
- error = vfs_symlink(path.dentry->d_inode, dentry, from);
-out_drop_write:
- mnt_drop_write(path.mnt);
-out_dput:
- dput(dentry);
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- path_put(&path);
+ if (!error)
+ error = vfs_symlink(path.dentry->d_inode, dentry, from);
+ done_path_create(&path, dentry);
out_putname:
putname(from);
return error;
@@ -3436,19 +3559,15 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
error = -EXDEV;
if (old_path.mnt != new_path.mnt)
goto out_dput;
- error = mnt_want_write(new_path.mnt);
- if (error)
+ error = may_linkat(&old_path);
+ if (unlikely(error))
goto out_dput;
error = security_path_link(old_path.dentry, &new_path, new_dentry);
if (error)
- goto out_drop_write;
+ goto out_dput;
error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
-out_drop_write:
- mnt_drop_write(new_path.mnt);
out_dput:
- dput(new_dentry);
- mutex_unlock(&new_path.dentry->d_inode->i_mutex);
- path_put(&new_path);
+ done_path_create(&new_path, new_dentry);
out:
path_put(&old_path);
@@ -3644,6 +3763,10 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
if (newnd.last_type != LAST_NORM)
goto exit2;
+ error = mnt_want_write(oldnd.path.mnt);
+ if (error)
+ goto exit2;
+
oldnd.flags &= ~LOOKUP_PARENT;
newnd.flags &= ~LOOKUP_PARENT;
newnd.flags |= LOOKUP_RENAME_TARGET;
@@ -3679,23 +3802,19 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
if (new_dentry == trap)
goto exit5;
- error = mnt_want_write(oldnd.path.mnt);
- if (error)
- goto exit5;
error = security_path_rename(&oldnd.path, old_dentry,
&newnd.path, new_dentry);
if (error)
- goto exit6;
+ goto exit5;
error = vfs_rename(old_dir->d_inode, old_dentry,
new_dir->d_inode, new_dentry);
-exit6:
- mnt_drop_write(oldnd.path.mnt);
exit5:
dput(new_dentry);
exit4:
dput(old_dentry);
exit3:
unlock_rename(new_dir, old_dir);
+ mnt_drop_write(oldnd.path.mnt);
exit2:
path_put(&newnd.path);
putname(to);
diff --git a/fs/namespace.c b/fs/namespace.c
index c53d3381b0d0..4d31f73e2561 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -283,24 +283,22 @@ static int mnt_is_readonly(struct vfsmount *mnt)
}
/*
- * Most r/o checks on a fs are for operations that take
- * discrete amounts of time, like a write() or unlink().
- * We must keep track of when those operations start
- * (for permission checks) and when they end, so that
- * we can determine when writes are able to occur to
- * a filesystem.
+ * Most r/o & frozen checks on a fs are for operations that take discrete
+ * amounts of time, like a write() or unlink(). We must keep track of when
+ * those operations start (for permission checks) and when they end, so that we
+ * can determine when writes are able to occur to a filesystem.
*/
/**
- * mnt_want_write - get write access to a mount
+ * __mnt_want_write - get write access to a mount without freeze protection
* @m: the mount on which to take a write
*
- * This tells the low-level filesystem that a write is
- * about to be performed to it, and makes sure that
- * writes are allowed before returning success. When
- * the write operation is finished, mnt_drop_write()
- * must be called. This is effectively a refcount.
+ * This tells the low-level filesystem that a write is about to be performed to
+ * it, and makes sure that writes are allowed (mnt it read-write) before
+ * returning success. This operation does not protect against filesystem being
+ * frozen. When the write operation is finished, __mnt_drop_write() must be
+ * called. This is effectively a refcount.
*/
-int mnt_want_write(struct vfsmount *m)
+int __mnt_want_write(struct vfsmount *m)
{
struct mount *mnt = real_mount(m);
int ret = 0;
@@ -326,6 +324,27 @@ int mnt_want_write(struct vfsmount *m)
ret = -EROFS;
}
preempt_enable();
+
+ return ret;
+}
+
+/**
+ * mnt_want_write - get write access to a mount
+ * @m: the mount on which to take a write
+ *
+ * This tells the low-level filesystem that a write is about to be performed to
+ * it, and makes sure that writes are allowed (mount is read-write, filesystem
+ * is not frozen) before returning success. When the write operation is
+ * finished, mnt_drop_write() must be called. This is effectively a refcount.
+ */
+int mnt_want_write(struct vfsmount *m)
+{
+ int ret;
+
+ sb_start_write(m->mnt_sb);
+ ret = __mnt_want_write(m);
+ if (ret)
+ sb_end_write(m->mnt_sb);
return ret;
}
EXPORT_SYMBOL_GPL(mnt_want_write);
@@ -355,38 +374,76 @@ int mnt_clone_write(struct vfsmount *mnt)
EXPORT_SYMBOL_GPL(mnt_clone_write);
/**
- * mnt_want_write_file - get write access to a file's mount
+ * __mnt_want_write_file - get write access to a file's mount
* @file: the file who's mount on which to take a write
*
- * This is like mnt_want_write, but it takes a file and can
+ * This is like __mnt_want_write, but it takes a file and can
* do some optimisations if the file is open for write already
*/
-int mnt_want_write_file(struct file *file)
+int __mnt_want_write_file(struct file *file)
{
struct inode *inode = file->f_dentry->d_inode;
+
if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
- return mnt_want_write(file->f_path.mnt);
+ return __mnt_want_write(file->f_path.mnt);
else
return mnt_clone_write(file->f_path.mnt);
}
+
+/**
+ * mnt_want_write_file - get write access to a file's mount
+ * @file: the file who's mount on which to take a write
+ *
+ * This is like mnt_want_write, but it takes a file and can
+ * do some optimisations if the file is open for write already
+ */
+int mnt_want_write_file(struct file *file)
+{
+ int ret;
+
+ sb_start_write(file->f_path.mnt->mnt_sb);
+ ret = __mnt_want_write_file(file);
+ if (ret)
+ sb_end_write(file->f_path.mnt->mnt_sb);
+ return ret;
+}
EXPORT_SYMBOL_GPL(mnt_want_write_file);
/**
- * mnt_drop_write - give up write access to a mount
+ * __mnt_drop_write - give up write access to a mount
* @mnt: the mount on which to give up write access
*
* Tells the low-level filesystem that we are done
* performing writes to it. Must be matched with
- * mnt_want_write() call above.
+ * __mnt_want_write() call above.
*/
-void mnt_drop_write(struct vfsmount *mnt)
+void __mnt_drop_write(struct vfsmount *mnt)
{
preempt_disable();
mnt_dec_writers(real_mount(mnt));
preempt_enable();
}
+
+/**
+ * mnt_drop_write - give up write access to a mount
+ * @mnt: the mount on which to give up write access
+ *
+ * Tells the low-level filesystem that we are done performing writes to it and
+ * also allows filesystem to be frozen again. Must be matched with
+ * mnt_want_write() call above.
+ */
+void mnt_drop_write(struct vfsmount *mnt)
+{
+ __mnt_drop_write(mnt);
+ sb_end_write(mnt->mnt_sb);
+}
EXPORT_SYMBOL_GPL(mnt_drop_write);
+void __mnt_drop_write_file(struct file *file)
+{
+ __mnt_drop_write(file->f_path.mnt);
+}
+
void mnt_drop_write_file(struct file *file)
{
mnt_drop_write(file->f_path.mnt);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index f90f4f5cd421..db7ad719628a 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -30,7 +30,7 @@ config NFS_FS
If unsure, say N.
config NFS_V2
- bool "NFS client support for NFS version 2"
+ tristate "NFS client support for NFS version 2"
depends on NFS_FS
default y
help
@@ -40,7 +40,7 @@ config NFS_V2
If unsure, say Y.
config NFS_V3
- bool "NFS client support for NFS version 3"
+ tristate "NFS client support for NFS version 3"
depends on NFS_FS
default y
help
@@ -72,7 +72,7 @@ config NFS_V3_ACL
If unsure, say N.
config NFS_V4
- bool "NFS client support for NFS version 4"
+ tristate "NFS client support for NFS version 4"
depends on NFS_FS
select SUNRPC_GSS
select KEYS
@@ -86,11 +86,18 @@ config NFS_V4
If unsure, say Y.
+config NFS_SWAP
+ bool "Provide swap over NFS support"
+ default n
+ depends on NFS_FS
+ select SUNRPC_SWAP
+ help
+ This option enables swapon to work on files located on NFS mounts.
+
config NFS_V4_1
bool "NFS client support for NFSv4.1 (EXPERIMENTAL)"
- depends on NFS_FS && NFS_V4 && EXPERIMENTAL
+ depends on NFS_V4 && EXPERIMENTAL
select SUNRPC_BACKCHANNEL
- select PNFS_FILE_LAYOUT
help
This option enables support for minor version 1 of the NFSv4 protocol
(RFC 5661) in the kernel's NFS client.
@@ -99,15 +106,17 @@ config NFS_V4_1
config PNFS_FILE_LAYOUT
tristate
+ depends on NFS_V4_1
+ default m
config PNFS_BLOCK
tristate
- depends on NFS_FS && NFS_V4_1 && BLK_DEV_DM
+ depends on NFS_V4_1 && BLK_DEV_DM
default m
config PNFS_OBJLAYOUT
tristate
- depends on NFS_FS && NFS_V4_1 && SCSI_OSD_ULD
+ depends on NFS_V4_1 && SCSI_OSD_ULD
default m
config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 7ddd45d9f170..b7db60897f91 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -9,17 +9,23 @@ nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
write.o namespace.o mount_clnt.o \
dns_resolve.o cache_lib.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
-nfs-$(CONFIG_NFS_V2) += proc.o nfs2xdr.o
-nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
-nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
-nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
- delegation.o idmap.o \
- callback.o callback_xdr.o callback_proc.o \
- nfs4namespace.o
-nfs-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
-nfs-$(CONFIG_SYSCTL) += sysctl.o
+nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
+obj-$(CONFIG_NFS_V2) += nfsv2.o
+nfsv2-y := nfs2super.o proc.o nfs2xdr.o
+
+obj-$(CONFIG_NFS_V3) += nfsv3.o
+nfsv3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o
+nfsv3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
+
+obj-$(CONFIG_NFS_V4) += nfsv4.o
+nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \
+ delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \
+ nfs4namespace.o nfs4getroot.o nfs4client.o
+nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o
+nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
+
obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o
nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 7ae8a608956f..dd392ed5f2e2 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -228,6 +228,14 @@ bl_end_par_io_read(void *data, int unused)
schedule_work(&rdata->task.u.tk_work);
}
+static bool
+bl_check_alignment(u64 offset, u32 len, unsigned long blkmask)
+{
+ if ((offset & blkmask) || (len & blkmask))
+ return false;
+ return true;
+}
+
static enum pnfs_try_status
bl_read_pagelist(struct nfs_read_data *rdata)
{
@@ -244,6 +252,9 @@ bl_read_pagelist(struct nfs_read_data *rdata)
dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
rdata->pages.npages, f_offset, (unsigned int)rdata->args.count);
+ if (!bl_check_alignment(f_offset, rdata->args.count, PAGE_CACHE_MASK))
+ goto use_mds;
+
par = alloc_parallel(rdata);
if (!par)
goto use_mds;
@@ -552,7 +563,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
struct bio *bio = NULL;
struct pnfs_block_extent *be = NULL, *cow_read = NULL;
sector_t isect, last_isect = 0, extent_length = 0;
- struct parallel_io *par;
+ struct parallel_io *par = NULL;
loff_t offset = wdata->args.offset;
size_t count = wdata->args.count;
struct page **pages = wdata->args.pages;
@@ -563,6 +574,10 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
+ /* Check for alignment first */
+ if (!bl_check_alignment(offset, count, PAGE_CACHE_MASK))
+ goto out_mds;
+
/* At this point, wdata->pages is a (sequential) list of nfs_pages.
* We want to write each, and if there is an error set pnfs_error
* to have it redone using nfs.
@@ -996,14 +1011,32 @@ bl_clear_layoutdriver(struct nfs_server *server)
return 0;
}
+static void
+bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+ if (!bl_check_alignment(req->wb_offset, req->wb_bytes, PAGE_CACHE_MASK))
+ nfs_pageio_reset_read_mds(pgio);
+ else
+ pnfs_generic_pg_init_read(pgio, req);
+}
+
+static void
+bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+ if (!bl_check_alignment(req->wb_offset, req->wb_bytes, PAGE_CACHE_MASK))
+ nfs_pageio_reset_write_mds(pgio);
+ else
+ pnfs_generic_pg_init_write(pgio, req);
+}
+
static const struct nfs_pageio_ops bl_pg_read_ops = {
- .pg_init = pnfs_generic_pg_init_read,
+ .pg_init = bl_pg_init_read,
.pg_test = pnfs_generic_pg_test,
.pg_doio = pnfs_generic_pg_readpages,
};
static const struct nfs_pageio_ops bl_pg_write_ops = {
- .pg_init = pnfs_generic_pg_init_write,
+ .pg_init = bl_pg_init_write,
.pg_test = pnfs_generic_pg_test,
.pg_doio = pnfs_generic_pg_writepages,
};
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 23ff18fe080a..4c8459e5bdee 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -37,31 +37,7 @@ static struct nfs_callback_data nfs_callback_info[NFS4_MAX_MINOR_VERSION + 1];
static DEFINE_MUTEX(nfs_callback_mutex);
static struct svc_program nfs4_callback_program;
-unsigned int nfs_callback_set_tcpport;
-unsigned short nfs_callback_tcpport;
unsigned short nfs_callback_tcpport6;
-#define NFS_CALLBACK_MAXPORTNR (65535U)
-
-static int param_set_portnr(const char *val, const struct kernel_param *kp)
-{
- unsigned long num;
- int ret;
-
- if (!val)
- return -EINVAL;
- ret = strict_strtoul(val, 0, &num);
- if (ret == -EINVAL || num > NFS_CALLBACK_MAXPORTNR)
- return -EINVAL;
- *((unsigned int *)kp->arg) = num;
- return 0;
-}
-static struct kernel_param_ops param_ops_portnr = {
- .set = param_set_portnr,
- .get = param_get_uint,
-};
-#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
-
-module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
/*
* This is the NFSv4 callback kernel thread.
@@ -265,6 +241,10 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
ret = -ENOMEM;
goto out_err;
}
+ /* As there is only one thread we need to over-ride the
+ * default maximum of 80 connections
+ */
+ serv->sv_maxconn = 1024;
ret = svc_bind(serv, net);
if (ret < 0) {
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index a5527c90a5aa..b44d7b128b71 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -192,7 +192,7 @@ extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
struct cb_process_state *cps);
extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
struct cb_process_state *cps);
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
extern void nfs_callback_down(int minorversion);
extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation,
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index e64b01d2a338..742ff4ffced7 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -863,7 +863,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
.drc_status = 0,
.clp = NULL,
.slotid = NFS4_NO_SLOT,
- .net = rqstp->rq_xprt->xpt_net,
+ .net = SVC_NET(rqstp),
};
unsigned int nops = 0;
@@ -879,7 +879,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
return rpc_garbage_args;
if (hdr_arg.minorversion == 0) {
- cps.clp = nfs4_find_client_ident(rqstp->rq_xprt->xpt_net, hdr_arg.cb_ident);
+ cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
return rpc_drop_reply;
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index f005b5bebdc7..99694442b93f 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -51,54 +51,23 @@
#include "internal.h"
#include "fscache.h"
#include "pnfs.h"
+#include "nfs.h"
#include "netns.h"
#define NFSDBG_FACILITY NFSDBG_CLIENT
static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
-#ifdef CONFIG_NFS_V4
-
-/*
- * Get a unique NFSv4.0 callback identifier which will be used
- * by the V4.0 callback service to lookup the nfs_client struct
- */
-static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
-{
- int ret = 0;
- struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
-
- if (clp->rpc_ops->version != 4 || minorversion != 0)
- return ret;
-retry:
- if (!idr_pre_get(&nn->cb_ident_idr, GFP_KERNEL))
- return -ENOMEM;
- spin_lock(&nn->nfs_client_lock);
- ret = idr_get_new(&nn->cb_ident_idr, clp, &clp->cl_cb_ident);
- spin_unlock(&nn->nfs_client_lock);
- if (ret == -EAGAIN)
- goto retry;
- return ret;
-}
-#endif /* CONFIG_NFS_V4 */
-
-/*
- * Turn off NFSv4 uid/gid mapping when using AUTH_SYS
- */
-static bool nfs4_disable_idmapping = true;
+static DEFINE_SPINLOCK(nfs_version_lock);
+static DEFINE_MUTEX(nfs_version_mutex);
+static LIST_HEAD(nfs_versions);
/*
* RPC cruft for NFS
*/
static const struct rpc_version *nfs_version[5] = {
-#ifdef CONFIG_NFS_V2
- [2] = &nfs_version2,
-#endif
-#ifdef CONFIG_NFS_V3
- [3] = &nfs_version3,
-#endif
-#ifdef CONFIG_NFS_V4
- [4] = &nfs_version4,
-#endif
+ [2] = NULL,
+ [3] = NULL,
+ [4] = NULL,
};
const struct rpc_program nfs_program = {
@@ -114,32 +83,64 @@ struct rpc_stat nfs_rpcstat = {
.program = &nfs_program
};
+static struct nfs_subversion *find_nfs_version(unsigned int version)
+{
+ struct nfs_subversion *nfs;
+ spin_lock(&nfs_version_lock);
-#ifdef CONFIG_NFS_V3_ACL
-static struct rpc_stat nfsacl_rpcstat = { &nfsacl_program };
-static const struct rpc_version *nfsacl_version[] = {
- [3] = &nfsacl_version3,
-};
+ list_for_each_entry(nfs, &nfs_versions, list) {
+ if (nfs->rpc_ops->version == version) {
+ spin_unlock(&nfs_version_lock);
+ return nfs;
+ }
+ };
-const struct rpc_program nfsacl_program = {
- .name = "nfsacl",
- .number = NFS_ACL_PROGRAM,
- .nrvers = ARRAY_SIZE(nfsacl_version),
- .version = nfsacl_version,
- .stats = &nfsacl_rpcstat,
-};
-#endif /* CONFIG_NFS_V3_ACL */
-
-struct nfs_client_initdata {
- unsigned long init_flags;
- const char *hostname;
- const struct sockaddr *addr;
- size_t addrlen;
- const struct nfs_rpc_ops *rpc_ops;
- int proto;
- u32 minorversion;
- struct net *net;
-};
+ spin_unlock(&nfs_version_lock);
+ return ERR_PTR(-EPROTONOSUPPORT);;
+}
+
+struct nfs_subversion *get_nfs_version(unsigned int version)
+{
+ struct nfs_subversion *nfs = find_nfs_version(version);
+
+ if (IS_ERR(nfs)) {
+ mutex_lock(&nfs_version_mutex);
+ request_module("nfsv%d", version);
+ nfs = find_nfs_version(version);
+ mutex_unlock(&nfs_version_mutex);
+ }
+
+ if (!IS_ERR(nfs))
+ try_module_get(nfs->owner);
+ return nfs;
+}
+
+void put_nfs_version(struct nfs_subversion *nfs)
+{
+ module_put(nfs->owner);
+}
+
+void register_nfs_version(struct nfs_subversion *nfs)
+{
+ spin_lock(&nfs_version_lock);
+
+ list_add(&nfs->list, &nfs_versions);
+ nfs_version[nfs->rpc_ops->version] = nfs->rpc_vers;
+
+ spin_unlock(&nfs_version_lock);
+}
+EXPORT_SYMBOL_GPL(register_nfs_version);
+
+void unregister_nfs_version(struct nfs_subversion *nfs)
+{
+ spin_lock(&nfs_version_lock);
+
+ nfs_version[nfs->rpc_ops->version] = NULL;
+ list_del(&nfs->list);
+
+ spin_unlock(&nfs_version_lock);
+}
+EXPORT_SYMBOL_GPL(unregister_nfs_version);
/*
* Allocate a shared client record
@@ -147,7 +148,7 @@ struct nfs_client_initdata {
* Since these are allocated/deallocated very rarely, we don't
* bother putting them in a slab cache...
*/
-static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
+struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
{
struct nfs_client *clp;
struct rpc_cred *cred;
@@ -156,7 +157,10 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
goto error_0;
- clp->rpc_ops = cl_init->rpc_ops;
+ clp->cl_nfs_mod = cl_init->nfs_mod;
+ try_module_get(clp->cl_nfs_mod->owner);
+
+ clp->rpc_ops = clp->cl_nfs_mod->rpc_ops;
atomic_set(&clp->cl_count, 1);
clp->cl_cons_state = NFS_CS_INITING;
@@ -177,18 +181,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
clp->cl_proto = cl_init->proto;
clp->cl_net = get_net(cl_init->net);
-#ifdef CONFIG_NFS_V4
- err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
- if (err)
- goto error_cleanup;
-
- spin_lock_init(&clp->cl_lock);
- INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
- rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
- clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
- clp->cl_minorversion = cl_init->minorversion;
- clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
-#endif
cred = rpc_lookup_machine_cred("*");
if (!IS_ERR(cred))
clp->cl_machine_cred = cred;
@@ -197,51 +189,14 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
return clp;
error_cleanup:
+ put_nfs_version(clp->cl_nfs_mod);
kfree(clp);
error_0:
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(nfs_alloc_client);
-#ifdef CONFIG_NFS_V4
-#ifdef CONFIG_NFS_V4_1
-static void nfs4_shutdown_session(struct nfs_client *clp)
-{
- if (nfs4_has_session(clp)) {
- nfs4_destroy_session(clp->cl_session);
- nfs4_destroy_clientid(clp);
- }
-
-}
-#else /* CONFIG_NFS_V4_1 */
-static void nfs4_shutdown_session(struct nfs_client *clp)
-{
-}
-#endif /* CONFIG_NFS_V4_1 */
-
-/*
- * Destroy the NFS4 callback service
- */
-static void nfs4_destroy_callback(struct nfs_client *clp)
-{
- if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
- nfs_callback_down(clp->cl_mvops->minor_version);
-}
-
-static void nfs4_shutdown_client(struct nfs_client *clp)
-{
- if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
- nfs4_kill_renewd(clp);
- nfs4_shutdown_session(clp);
- nfs4_destroy_callback(clp);
- if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
- nfs_idmap_delete(clp);
-
- rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
- kfree(clp->cl_serverowner);
- kfree(clp->cl_serverscope);
- kfree(clp->cl_implid);
-}
-
+#if IS_ENABLED(CONFIG_NFS_V4)
/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
void nfs_cleanup_cb_ident_idr(struct net *net)
{
@@ -264,16 +219,7 @@ static void pnfs_init_server(struct nfs_server *server)
rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
}
-static void nfs4_destroy_server(struct nfs_server *server)
-{
- nfs4_purge_state_owners(server);
-}
-
#else
-static void nfs4_shutdown_client(struct nfs_client *clp)
-{
-}
-
void nfs_cleanup_cb_ident_idr(struct net *net)
{
}
@@ -291,12 +237,10 @@ static void pnfs_init_server(struct nfs_server *server)
/*
* Destroy a shared client record
*/
-static void nfs_free_client(struct nfs_client *clp)
+void nfs_free_client(struct nfs_client *clp)
{
dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version);
- nfs4_shutdown_client(clp);
-
nfs_fscache_release_client_cookie(clp);
/* -EIO all pending I/O */
@@ -307,11 +251,13 @@ static void nfs_free_client(struct nfs_client *clp)
put_rpccred(clp->cl_machine_cred);
put_net(clp->cl_net);
+ put_nfs_version(clp->cl_nfs_mod);
kfree(clp->cl_hostname);
kfree(clp);
dprintk("<-- nfs_free_client()\n");
}
+EXPORT_SYMBOL_GPL(nfs_free_client);
/*
* Release a reference to a shared client record
@@ -333,7 +279,7 @@ void nfs_put_client(struct nfs_client *clp)
BUG_ON(!list_empty(&clp->cl_superblocks));
- nfs_free_client(clp);
+ clp->rpc_ops->free_client(clp);
}
}
EXPORT_SYMBOL_GPL(nfs_put_client);
@@ -412,8 +358,8 @@ static int nfs_sockaddr_cmp_ip4(const struct sockaddr *sa1,
* Test if two socket addresses represent the same actual socket,
* by comparing (only) relevant fields, excluding the port number.
*/
-static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1,
- const struct sockaddr *sa2)
+int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1,
+ const struct sockaddr *sa2)
{
if (sa1->sa_family != sa2->sa_family)
return 0;
@@ -426,6 +372,7 @@ static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1,
}
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_sockaddr_match_ipaddr);
#endif /* CONFIG_NFS_V4_1 */
/*
@@ -447,33 +394,6 @@ static int nfs_sockaddr_cmp(const struct sockaddr *sa1,
return 0;
}
-#if defined(CONFIG_NFS_V4_1)
-/* Common match routine for v4.0 and v4.1 callback services */
-static bool nfs4_cb_match_client(const struct sockaddr *addr,
- struct nfs_client *clp, u32 minorversion)
-{
- struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
-
- /* Don't match clients that failed to initialise */
- if (!(clp->cl_cons_state == NFS_CS_READY ||
- clp->cl_cons_state == NFS_CS_SESSION_INITING))
- return false;
-
- smp_rmb();
-
- /* Match the version and minorversion */
- if (clp->rpc_ops->version != 4 ||
- clp->cl_minorversion != minorversion)
- return false;
-
- /* Match only the IP address, not the port number */
- if (!nfs_sockaddr_match_ipaddr(addr, clap))
- return false;
-
- return true;
-}
-#endif /* CONFIG_NFS_V4_1 */
-
/*
* Find an nfs_client on the list that matches the initialisation data
* that is supplied.
@@ -491,7 +411,7 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
continue;
/* Different NFS versions cannot share the same nfs_client */
- if (clp->rpc_ops != data->rpc_ops)
+ if (clp->rpc_ops != data->nfs_mod->rpc_ops)
continue;
if (clp->cl_proto != data->proto)
@@ -519,6 +439,7 @@ int nfs_wait_client_init_complete(const struct nfs_client *clp)
return wait_event_killable(nfs_client_active_wq,
nfs_client_init_is_complete(clp));
}
+EXPORT_SYMBOL_GPL(nfs_wait_client_init_complete);
/*
* Found an existing client. Make sure it's ready before returning.
@@ -552,7 +473,7 @@ nfs_found_client(const struct nfs_client_initdata *cl_init,
* Look up a client by IP address and protocol version
* - creates a new record if one doesn't yet exist
*/
-static struct nfs_client *
+struct nfs_client *
nfs_get_client(const struct nfs_client_initdata *cl_init,
const struct rpc_timeout *timeparms,
const char *ip_addr,
@@ -560,9 +481,10 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
{
struct nfs_client *clp, *new = NULL;
struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id);
+ const struct nfs_rpc_ops *rpc_ops = cl_init->nfs_mod->rpc_ops;
dprintk("--> nfs_get_client(%s,v%u)\n",
- cl_init->hostname ?: "", cl_init->rpc_ops->version);
+ cl_init->hostname ?: "", rpc_ops->version);
/* see if the client already exists */
do {
@@ -572,27 +494,27 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
if (clp) {
spin_unlock(&nn->nfs_client_lock);
if (new)
- nfs_free_client(new);
+ new->rpc_ops->free_client(new);
return nfs_found_client(cl_init, clp);
}
if (new) {
list_add(&new->cl_share_link, &nn->nfs_client_list);
spin_unlock(&nn->nfs_client_lock);
new->cl_flags = cl_init->init_flags;
- return cl_init->rpc_ops->init_client(new,
- timeparms, ip_addr,
- authflavour);
+ return rpc_ops->init_client(new, timeparms, ip_addr,
+ authflavour);
}
spin_unlock(&nn->nfs_client_lock);
- new = nfs_alloc_client(cl_init);
+ new = rpc_ops->alloc_client(cl_init);
} while (!IS_ERR(new));
dprintk("<-- nfs_get_client() Failed to find %s (%ld)\n",
cl_init->hostname ?: "", PTR_ERR(new));
return new;
}
+EXPORT_SYMBOL_GPL(nfs_get_client);
/*
* Mark a server as ready or failed
@@ -603,11 +525,12 @@ void nfs_mark_client_ready(struct nfs_client *clp, int state)
clp->cl_cons_state = state;
wake_up_all(&nfs_client_active_wq);
}
+EXPORT_SYMBOL_GPL(nfs_mark_client_ready);
/*
* Initialise the timeout values for a connection
*/
-static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
+void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
unsigned int timeo, unsigned int retrans)
{
to->to_initval = timeo * HZ / 10;
@@ -644,13 +567,14 @@ static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
BUG();
}
}
+EXPORT_SYMBOL_GPL(nfs_init_timeout_values);
/*
* Create an RPC client handle
*/
-static int nfs_create_rpc_client(struct nfs_client *clp,
- const struct rpc_timeout *timeparms,
- rpc_authflavor_t flavor)
+int nfs_create_rpc_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ rpc_authflavor_t flavor)
{
struct rpc_clnt *clnt = NULL;
struct rpc_create_args args = {
@@ -683,6 +607,7 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
clp->cl_rpcclient = clnt;
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_create_rpc_client);
/*
* Version 2 or 3 client destruction
@@ -735,39 +660,9 @@ static int nfs_start_lockd(struct nfs_server *server)
}
/*
- * Initialise an NFSv3 ACL client connection
- */
-#ifdef CONFIG_NFS_V3_ACL
-static void nfs_init_server_aclclient(struct nfs_server *server)
-{
- if (server->nfs_client->rpc_ops->version != 3)
- goto out_noacl;
- if (server->flags & NFS_MOUNT_NOACL)
- goto out_noacl;
-
- server->client_acl = rpc_bind_new_program(server->client, &nfsacl_program, 3);
- if (IS_ERR(server->client_acl))
- goto out_noacl;
-
- /* No errors! Assume that Sun nfsacls are supported */
- server->caps |= NFS_CAP_ACLS;
- return;
-
-out_noacl:
- server->caps &= ~NFS_CAP_ACLS;
-}
-#else
-static inline void nfs_init_server_aclclient(struct nfs_server *server)
-{
- server->flags &= ~NFS_MOUNT_NOACL;
- server->caps &= ~NFS_CAP_ACLS;
-}
-#endif
-
-/*
* Create a general RPC client
*/
-static int nfs_init_server_rpcclient(struct nfs_server *server,
+int nfs_init_server_rpcclient(struct nfs_server *server,
const struct rpc_timeout *timeo,
rpc_authflavor_t pseudoflavour)
{
@@ -799,6 +694,7 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_init_server_rpcclient);
/**
* nfs_init_client - Initialise an NFS2 or NFS3 client
@@ -838,18 +734,20 @@ error:
dprintk("<-- nfs_init_client() = xerror %d\n", error);
return ERR_PTR(error);
}
+EXPORT_SYMBOL_GPL(nfs_init_client);
/*
* Create a version 2 or 3 client
*/
static int nfs_init_server(struct nfs_server *server,
- const struct nfs_parsed_mount_data *data)
+ const struct nfs_parsed_mount_data *data,
+ struct nfs_subversion *nfs_mod)
{
struct nfs_client_initdata cl_init = {
.hostname = data->nfs_server.hostname,
.addr = (const struct sockaddr *)&data->nfs_server.address,
.addrlen = data->nfs_server.addrlen,
- .rpc_ops = NULL,
+ .nfs_mod = nfs_mod,
.proto = data->nfs_server.protocol,
.net = data->net,
};
@@ -859,21 +757,6 @@ static int nfs_init_server(struct nfs_server *server,
dprintk("--> nfs_init_server()\n");
- switch (data->version) {
-#ifdef CONFIG_NFS_V2
- case 2:
- cl_init.rpc_ops = &nfs_v2_clientops;
- break;
-#endif
-#ifdef CONFIG_NFS_V3
- case 3:
- cl_init.rpc_ops = &nfs_v3_clientops;
- break;
-#endif
- default:
- return -EPROTONOSUPPORT;
- }
-
nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
data->timeo, data->retrans);
if (data->flags & NFS_MOUNT_NORESVPORT)
@@ -927,8 +810,6 @@ static int nfs_init_server(struct nfs_server *server,
server->mountd_protocol = data->mount_server.protocol;
server->namelen = data->namlen;
- /* Create a client RPC handle for the NFSv3 ACL management interface */
- nfs_init_server_aclclient(server);
dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp);
return 0;
@@ -975,7 +856,6 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
server->wsize = NFS_MAX_FILE_IO_SIZE;
server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
server->pnfs_blksize = fsinfo->blksize;
- set_pnfs_layoutdriver(server, mntfh, fsinfo->layouttype);
server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
@@ -1001,7 +881,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
/*
* Probe filesystem information, including the FSID on v2/v3
*/
-static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, struct nfs_fattr *fattr)
+int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, struct nfs_fattr *fattr)
{
struct nfs_fsinfo fsinfo;
struct nfs_client *clp = server->nfs_client;
@@ -1041,11 +921,12 @@ out_error:
dprintk("nfs_probe_fsinfo: error = %d\n", -error);
return error;
}
+EXPORT_SYMBOL_GPL(nfs_probe_fsinfo);
/*
* Copy useful information when duplicating a server record
*/
-static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source)
+void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source)
{
target->flags = source->flags;
target->rsize = source->rsize;
@@ -1057,8 +938,9 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve
target->caps = source->caps;
target->options = source->options;
}
+EXPORT_SYMBOL_GPL(nfs_server_copy_userdata);
-static void nfs_server_insert_lists(struct nfs_server *server)
+void nfs_server_insert_lists(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
@@ -1070,6 +952,7 @@ static void nfs_server_insert_lists(struct nfs_server *server)
spin_unlock(&nn->nfs_client_lock);
}
+EXPORT_SYMBOL_GPL(nfs_server_insert_lists);
static void nfs_server_remove_lists(struct nfs_server *server)
{
@@ -1092,7 +975,7 @@ static void nfs_server_remove_lists(struct nfs_server *server)
/*
* Allocate and initialise a server record
*/
-static struct nfs_server *nfs_alloc_server(void)
+struct nfs_server *nfs_alloc_server(void)
{
struct nfs_server *server;
@@ -1129,6 +1012,7 @@ static struct nfs_server *nfs_alloc_server(void)
return server;
}
+EXPORT_SYMBOL_GPL(nfs_alloc_server);
/*
* Free up a server record
@@ -1138,7 +1022,6 @@ void nfs_free_server(struct nfs_server *server)
dprintk("--> nfs_free_server()\n");
nfs_server_remove_lists(server);
- unset_pnfs_layoutdriver(server);
if (server->destroy != NULL)
server->destroy(server);
@@ -1158,13 +1041,14 @@ void nfs_free_server(struct nfs_server *server)
nfs_release_automount_timer();
dprintk("<-- nfs_free_server()\n");
}
+EXPORT_SYMBOL_GPL(nfs_free_server);
/*
* Create a version 2 or 3 volume record
* - keyed on server and FSID
*/
-struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
- struct nfs_fh *mntfh)
+struct nfs_server *nfs_create_server(struct nfs_mount_info *mount_info,
+ struct nfs_subversion *nfs_mod)
{
struct nfs_server *server;
struct nfs_fattr *fattr;
@@ -1180,7 +1064,7 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
goto error;
/* Get a client representation */
- error = nfs_init_server(server, data);
+ error = nfs_init_server(server, mount_info->parsed, nfs_mod);
if (error < 0)
goto error;
@@ -1189,13 +1073,13 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
/* Probe the root fh to retrieve its FSID */
- error = nfs_probe_fsinfo(server, mntfh, fattr);
+ error = nfs_probe_fsinfo(server, mount_info->mntfh, fattr);
if (error < 0)
goto error;
if (server->nfs_client->rpc_ops->version == 3) {
if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
server->namelen = NFS3_MAXNAMLEN;
- if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
+ if (!(mount_info->parsed->flags & NFS_MOUNT_NORDIRPLUS))
server->caps |= NFS_CAP_READDIRPLUS;
} else {
if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
@@ -1203,7 +1087,7 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
}
if (!(fattr->valid & NFS_ATTR_FATTR)) {
- error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
+ error = nfs_mod->rpc_ops->getattr(server, mount_info->mntfh, fattr);
if (error < 0) {
dprintk("nfs_create_server: getattr error = %d\n", -error);
goto error;
@@ -1225,522 +1109,7 @@ error:
nfs_free_server(server);
return ERR_PTR(error);
}
-
-#ifdef CONFIG_NFS_V4
-/*
- * NFSv4.0 callback thread helper
- *
- * Find a client by callback identifier
- */
-struct nfs_client *
-nfs4_find_client_ident(struct net *net, int cb_ident)
-{
- struct nfs_client *clp;
- struct nfs_net *nn = net_generic(net, nfs_net_id);
-
- spin_lock(&nn->nfs_client_lock);
- clp = idr_find(&nn->cb_ident_idr, cb_ident);
- if (clp)
- atomic_inc(&clp->cl_count);
- spin_unlock(&nn->nfs_client_lock);
- return clp;
-}
-
-#if defined(CONFIG_NFS_V4_1)
-/*
- * NFSv4.1 callback thread helper
- * For CB_COMPOUND calls, find a client by IP address, protocol version,
- * minorversion, and sessionID
- *
- * Returns NULL if no such client
- */
-struct nfs_client *
-nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
- struct nfs4_sessionid *sid)
-{
- struct nfs_client *clp;
- struct nfs_net *nn = net_generic(net, nfs_net_id);
-
- spin_lock(&nn->nfs_client_lock);
- list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
- if (nfs4_cb_match_client(addr, clp, 1) == false)
- continue;
-
- if (!nfs4_has_session(clp))
- continue;
-
- /* Match sessionid*/
- if (memcmp(clp->cl_session->sess_id.data,
- sid->data, NFS4_MAX_SESSIONID_LEN) != 0)
- continue;
-
- atomic_inc(&clp->cl_count);
- spin_unlock(&nn->nfs_client_lock);
- return clp;
- }
- spin_unlock(&nn->nfs_client_lock);
- return NULL;
-}
-
-#else /* CONFIG_NFS_V4_1 */
-
-struct nfs_client *
-nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
- struct nfs4_sessionid *sid)
-{
- return NULL;
-}
-#endif /* CONFIG_NFS_V4_1 */
-
-/*
- * Initialize the NFS4 callback service
- */
-static int nfs4_init_callback(struct nfs_client *clp)
-{
- int error;
-
- if (clp->rpc_ops->version == 4) {
- struct rpc_xprt *xprt;
-
- xprt = rcu_dereference_raw(clp->cl_rpcclient->cl_xprt);
-
- if (nfs4_has_session(clp)) {
- error = xprt_setup_backchannel(xprt,
- NFS41_BC_MIN_CALLBACKS);
- if (error < 0)
- return error;
- }
-
- error = nfs_callback_up(clp->cl_mvops->minor_version, xprt);
- if (error < 0) {
- dprintk("%s: failed to start callback. Error = %d\n",
- __func__, error);
- return error;
- }
- __set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
- }
- return 0;
-}
-
-/*
- * Initialize the minor version specific parts of an NFS4 client record
- */
-static int nfs4_init_client_minor_version(struct nfs_client *clp)
-{
-#if defined(CONFIG_NFS_V4_1)
- if (clp->cl_mvops->minor_version) {
- struct nfs4_session *session = NULL;
- /*
- * Create the session and mark it expired.
- * When a SEQUENCE operation encounters the expired session
- * it will do session recovery to initialize it.
- */
- session = nfs4_alloc_session(clp);
- if (!session)
- return -ENOMEM;
-
- clp->cl_session = session;
- /*
- * The create session reply races with the server back
- * channel probe. Mark the client NFS_CS_SESSION_INITING
- * so that the client back channel can find the
- * nfs_client struct
- */
- nfs_mark_client_ready(clp, NFS_CS_SESSION_INITING);
- }
-#endif /* CONFIG_NFS_V4_1 */
-
- return nfs4_init_callback(clp);
-}
-
-/**
- * nfs4_init_client - Initialise an NFS4 client record
- *
- * @clp: nfs_client to initialise
- * @timeparms: timeout parameters for underlying RPC transport
- * @ip_addr: callback IP address in presentation format
- * @authflavor: authentication flavor for underlying RPC transport
- *
- * Returns pointer to an NFS client, or an ERR_PTR value.
- */
-struct nfs_client *nfs4_init_client(struct nfs_client *clp,
- const struct rpc_timeout *timeparms,
- const char *ip_addr,
- rpc_authflavor_t authflavour)
-{
- char buf[INET6_ADDRSTRLEN + 1];
- int error;
-
- if (clp->cl_cons_state == NFS_CS_READY) {
- /* the client is initialised already */
- dprintk("<-- nfs4_init_client() = 0 [already %p]\n", clp);
- return clp;
- }
-
- /* Check NFS protocol revision and initialize RPC op vector */
- clp->rpc_ops = &nfs_v4_clientops;
-
- __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
- error = nfs_create_rpc_client(clp, timeparms, authflavour);
- if (error < 0)
- goto error;
-
- /* If no clientaddr= option was specified, find a usable cb address */
- if (ip_addr == NULL) {
- struct sockaddr_storage cb_addr;
- struct sockaddr *sap = (struct sockaddr *)&cb_addr;
-
- error = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
- if (error < 0)
- goto error;
- error = rpc_ntop(sap, buf, sizeof(buf));
- if (error < 0)
- goto error;
- ip_addr = (const char *)buf;
- }
- strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
-
- error = nfs_idmap_new(clp);
- if (error < 0) {
- dprintk("%s: failed to create idmapper. Error = %d\n",
- __func__, error);
- goto error;
- }
- __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
-
- error = nfs4_init_client_minor_version(clp);
- if (error < 0)
- goto error;
-
- if (!nfs4_has_session(clp))
- nfs_mark_client_ready(clp, NFS_CS_READY);
- return clp;
-
-error:
- nfs_mark_client_ready(clp, error);
- nfs_put_client(clp);
- dprintk("<-- nfs4_init_client() = xerror %d\n", error);
- return ERR_PTR(error);
-}
-
-/*
- * Set up an NFS4 client
- */
-static int nfs4_set_client(struct nfs_server *server,
- const char *hostname,
- const struct sockaddr *addr,
- const size_t addrlen,
- const char *ip_addr,
- rpc_authflavor_t authflavour,
- int proto, const struct rpc_timeout *timeparms,
- u32 minorversion, struct net *net)
-{
- struct nfs_client_initdata cl_init = {
- .hostname = hostname,
- .addr = addr,
- .addrlen = addrlen,
- .rpc_ops = &nfs_v4_clientops,
- .proto = proto,
- .minorversion = minorversion,
- .net = net,
- };
- struct nfs_client *clp;
- int error;
-
- dprintk("--> nfs4_set_client()\n");
-
- if (server->flags & NFS_MOUNT_NORESVPORT)
- set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
-
- /* Allocate or find a client reference we can use */
- clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour);
- if (IS_ERR(clp)) {
- error = PTR_ERR(clp);
- goto error;
- }
-
- /*
- * Query for the lease time on clientid setup or renewal
- *
- * Note that this will be set on nfs_clients that were created
- * only for the DS role and did not set this bit, but now will
- * serve a dual role.
- */
- set_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state);
-
- server->nfs_client = clp;
- dprintk("<-- nfs4_set_client() = 0 [new %p]\n", clp);
- return 0;
-error:
- dprintk("<-- nfs4_set_client() = xerror %d\n", error);
- return error;
-}
-
-/*
- * Set up a pNFS Data Server client.
- *
- * Return any existing nfs_client that matches server address,port,version
- * and minorversion.
- *
- * For a new nfs_client, use a soft mount (default), a low retrans and a
- * low timeout interval so that if a connection is lost, we retry through
- * the MDS.
- */
-struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
- const struct sockaddr *ds_addr, int ds_addrlen,
- int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
-{
- struct nfs_client_initdata cl_init = {
- .addr = ds_addr,
- .addrlen = ds_addrlen,
- .rpc_ops = &nfs_v4_clientops,
- .proto = ds_proto,
- .minorversion = mds_clp->cl_minorversion,
- .net = mds_clp->cl_net,
- };
- struct rpc_timeout ds_timeout;
- struct nfs_client *clp;
-
- /*
- * Set an authflavor equual to the MDS value. Use the MDS nfs_client
- * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
- * (section 13.1 RFC 5661).
- */
- nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
- clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
- mds_clp->cl_rpcclient->cl_auth->au_flavor);
-
- dprintk("<-- %s %p\n", __func__, clp);
- return clp;
-}
-EXPORT_SYMBOL_GPL(nfs4_set_ds_client);
-
-/*
- * Session has been established, and the client marked ready.
- * Set the mount rsize and wsize with negotiated fore channel
- * attributes which will be bound checked in nfs_server_set_fsinfo.
- */
-static void nfs4_session_set_rwsize(struct nfs_server *server)
-{
-#ifdef CONFIG_NFS_V4_1
- struct nfs4_session *sess;
- u32 server_resp_sz;
- u32 server_rqst_sz;
-
- if (!nfs4_has_session(server->nfs_client))
- return;
- sess = server->nfs_client->cl_session;
- server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
- server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
-
- if (server->rsize > server_resp_sz)
- server->rsize = server_resp_sz;
- if (server->wsize > server_rqst_sz)
- server->wsize = server_rqst_sz;
-#endif /* CONFIG_NFS_V4_1 */
-}
-
-static int nfs4_server_common_setup(struct nfs_server *server,
- struct nfs_fh *mntfh)
-{
- struct nfs_fattr *fattr;
- int error;
-
- BUG_ON(!server->nfs_client);
- BUG_ON(!server->nfs_client->rpc_ops);
- BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
-
- /* data servers support only a subset of NFSv4.1 */
- if (is_ds_only_client(server->nfs_client))
- return -EPROTONOSUPPORT;
-
- fattr = nfs_alloc_fattr();
- if (fattr == NULL)
- return -ENOMEM;
-
- /* We must ensure the session is initialised first */
- error = nfs4_init_session(server);
- if (error < 0)
- goto out;
-
- /* Probe the root fh to retrieve its FSID and filehandle */
- error = nfs4_get_rootfh(server, mntfh);
- if (error < 0)
- goto out;
-
- dprintk("Server FSID: %llx:%llx\n",
- (unsigned long long) server->fsid.major,
- (unsigned long long) server->fsid.minor);
- dprintk("Mount FH: %d\n", mntfh->size);
-
- nfs4_session_set_rwsize(server);
-
- error = nfs_probe_fsinfo(server, mntfh, fattr);
- if (error < 0)
- goto out;
-
- if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
- server->namelen = NFS4_MAXNAMLEN;
-
- nfs_server_insert_lists(server);
- server->mount_time = jiffies;
- server->destroy = nfs4_destroy_server;
-out:
- nfs_free_fattr(fattr);
- return error;
-}
-
-/*
- * Create a version 4 volume record
- */
-static int nfs4_init_server(struct nfs_server *server,
- const struct nfs_parsed_mount_data *data)
-{
- struct rpc_timeout timeparms;
- int error;
-
- dprintk("--> nfs4_init_server()\n");
-
- nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
- data->timeo, data->retrans);
-
- /* Initialise the client representation from the mount data */
- server->flags = data->flags;
- server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|NFS_CAP_POSIX_LOCK;
- if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
- server->caps |= NFS_CAP_READDIRPLUS;
- server->options = data->options;
-
- /* Get a client record */
- error = nfs4_set_client(server,
- data->nfs_server.hostname,
- (const struct sockaddr *)&data->nfs_server.address,
- data->nfs_server.addrlen,
- data->client_address,
- data->auth_flavors[0],
- data->nfs_server.protocol,
- &timeparms,
- data->minorversion,
- data->net);
- if (error < 0)
- goto error;
-
- /*
- * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
- * authentication.
- */
- if (nfs4_disable_idmapping && data->auth_flavors[0] == RPC_AUTH_UNIX)
- server->caps |= NFS_CAP_UIDGID_NOMAP;
-
- if (data->rsize)
- server->rsize = nfs_block_size(data->rsize, NULL);
- if (data->wsize)
- server->wsize = nfs_block_size(data->wsize, NULL);
-
- server->acregmin = data->acregmin * HZ;
- server->acregmax = data->acregmax * HZ;
- server->acdirmin = data->acdirmin * HZ;
- server->acdirmax = data->acdirmax * HZ;
-
- server->port = data->nfs_server.port;
-
- error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]);
-
-error:
- /* Done */
- dprintk("<-- nfs4_init_server() = %d\n", error);
- return error;
-}
-
-/*
- * Create a version 4 volume record
- * - keyed on server and FSID
- */
-struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
- struct nfs_fh *mntfh)
-{
- struct nfs_server *server;
- int error;
-
- dprintk("--> nfs4_create_server()\n");
-
- server = nfs_alloc_server();
- if (!server)
- return ERR_PTR(-ENOMEM);
-
- /* set up the general RPC client */
- error = nfs4_init_server(server, data);
- if (error < 0)
- goto error;
-
- error = nfs4_server_common_setup(server, mntfh);
- if (error < 0)
- goto error;
-
- dprintk("<-- nfs4_create_server() = %p\n", server);
- return server;
-
-error:
- nfs_free_server(server);
- dprintk("<-- nfs4_create_server() = error %d\n", error);
- return ERR_PTR(error);
-}
-
-/*
- * Create an NFS4 referral server record
- */
-struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
- struct nfs_fh *mntfh)
-{
- struct nfs_client *parent_client;
- struct nfs_server *server, *parent_server;
- int error;
-
- dprintk("--> nfs4_create_referral_server()\n");
-
- server = nfs_alloc_server();
- if (!server)
- return ERR_PTR(-ENOMEM);
-
- parent_server = NFS_SB(data->sb);
- parent_client = parent_server->nfs_client;
-
- /* Initialise the client representation from the parent server */
- nfs_server_copy_userdata(server, parent_server);
- server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
-
- /* Get a client representation.
- * Note: NFSv4 always uses TCP, */
- error = nfs4_set_client(server, data->hostname,
- data->addr,
- data->addrlen,
- parent_client->cl_ipaddr,
- data->authflavor,
- rpc_protocol(parent_server->client),
- parent_server->client->cl_timeout,
- parent_client->cl_mvops->minor_version,
- parent_client->cl_net);
- if (error < 0)
- goto error;
-
- error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
- if (error < 0)
- goto error;
-
- error = nfs4_server_common_setup(server, mntfh);
- if (error < 0)
- goto error;
-
- dprintk("<-- nfs_create_referral_server() = %p\n", server);
- return server;
-
-error:
- nfs_free_server(server);
- dprintk("<-- nfs4_create_referral_server() = error %d\n", error);
- return ERR_PTR(error);
-}
-
-#endif /* CONFIG_NFS_V4 */
+EXPORT_SYMBOL_GPL(nfs_create_server);
/*
* Clone an NFS2, NFS3 or NFS4 server record
@@ -1780,8 +1149,6 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
flavor);
if (error < 0)
goto out_free_server;
- if (!IS_ERR(source->client_acl))
- nfs_init_server_aclclient(server);
/* probe the filesystem info for this server filesystem */
error = nfs_probe_fsinfo(server, fh, fattr_fsinfo);
@@ -1812,6 +1179,7 @@ out_free_server:
dprintk("<-- nfs_clone_server() = error %d\n", error);
return ERR_PTR(error);
}
+EXPORT_SYMBOL_GPL(nfs_clone_server);
void nfs_clients_init(struct net *net)
{
@@ -1819,7 +1187,7 @@ void nfs_clients_init(struct net *net)
INIT_LIST_HEAD(&nn->nfs_client_list);
INIT_LIST_HEAD(&nn->nfs_volume_list);
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
idr_init(&nn->cb_ident_idr);
#endif
spin_lock_init(&nn->nfs_client_lock);
@@ -2091,7 +1459,3 @@ void nfs_fs_proc_exit(void)
}
#endif /* CONFIG_PROC_FS */
-
-module_param(nfs4_disable_idmapping, bool, 0644);
-MODULE_PARM_DESC(nfs4_disable_idmapping,
- "Turn off NFSv4 idmapping when using 'sec=sys'");
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index bd3a9601d32d..81c5eec3cf38 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -47,7 +47,7 @@ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
*
* Returns one if inode has the indicated delegation, otherwise zero.
*/
-int nfs_have_delegation(struct inode *inode, fmode_t flags)
+int nfs4_have_delegation(struct inode *inode, fmode_t flags)
{
struct nfs_delegation *delegation;
int ret = 0;
@@ -388,7 +388,7 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode)
*
* Returns zero on success, or a negative errno value.
*/
-int nfs_inode_return_delegation(struct inode *inode)
+int nfs4_inode_return_delegation(struct inode *inode)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_inode *nfsi = NFS_I(inode);
@@ -417,9 +417,8 @@ static void nfs_mark_return_delegation(struct nfs_server *server,
* @sb: sb to process
*
*/
-void nfs_super_return_all_delegations(struct super_block *sb)
+void nfs_server_return_all_delegations(struct nfs_server *server)
{
- struct nfs_server *server = NFS_SB(sb);
struct nfs_client *clp = server->nfs_client;
struct nfs_delegation *delegation;
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 72709c4193fa..bbc6a4dba0d8 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -8,7 +8,7 @@
#ifndef FS_NFS_DELEGATION_H
#define FS_NFS_DELEGATION_H
-#if defined(CONFIG_NFS_V4)
+#if IS_ENABLED(CONFIG_NFS_V4)
/*
* NFSv4 delegation
*/
@@ -33,12 +33,12 @@ enum {
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
-int nfs_inode_return_delegation(struct inode *inode);
+int nfs4_inode_return_delegation(struct inode *inode);
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
void nfs_inode_return_delegation_noreclaim(struct inode *inode);
struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
-void nfs_super_return_all_delegations(struct super_block *sb);
+void nfs_server_return_all_delegations(struct nfs_server *);
void nfs_expire_all_delegations(struct nfs_client *clp);
void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags);
void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
@@ -56,24 +56,13 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl);
bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags);
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
-int nfs_have_delegation(struct inode *inode, fmode_t flags);
+int nfs4_have_delegation(struct inode *inode, fmode_t flags);
-#else
-static inline int nfs_have_delegation(struct inode *inode, fmode_t flags)
-{
- return 0;
-}
-
-static inline int nfs_inode_return_delegation(struct inode *inode)
-{
- nfs_wb_all(inode);
- return 0;
-}
#endif
static inline int nfs_have_delegated_attributes(struct inode *inode)
{
- return nfs_have_delegation(inode, FMODE_READ) &&
+ return NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) &&
!(NFS_I(inode)->cache_validity & NFS_INO_REVAL_FORCED);
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index a6b1c7fb8232..627f108ede23 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -17,6 +17,7 @@
* 6 Jun 1999 Cache readdir lookups in the page cache. -DaveM
*/
+#include <linux/module.h>
#include <linux/time.h>
#include <linux/errno.h>
#include <linux/stat.h>
@@ -46,16 +47,6 @@
static int nfs_opendir(struct inode *, struct file *);
static int nfs_closedir(struct inode *, struct file *);
static int nfs_readdir(struct file *, void *, filldir_t);
-static struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int);
-static int nfs_create(struct inode *, struct dentry *, umode_t, bool);
-static int nfs_mkdir(struct inode *, struct dentry *, umode_t);
-static int nfs_rmdir(struct inode *, struct dentry *);
-static int nfs_unlink(struct inode *, struct dentry *);
-static int nfs_symlink(struct inode *, struct dentry *, const char *);
-static int nfs_link(struct dentry *, struct inode *, struct dentry *);
-static int nfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
-static int nfs_rename(struct inode *, struct dentry *,
- struct inode *, struct dentry *);
static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
static loff_t nfs_llseek_dir(struct file *, loff_t, int);
static void nfs_readdir_clear_array(struct page*);
@@ -69,73 +60,10 @@ const struct file_operations nfs_dir_operations = {
.fsync = nfs_fsync_dir,
};
-const struct inode_operations nfs_dir_inode_operations = {
- .create = nfs_create,
- .lookup = nfs_lookup,
- .link = nfs_link,
- .unlink = nfs_unlink,
- .symlink = nfs_symlink,
- .mkdir = nfs_mkdir,
- .rmdir = nfs_rmdir,
- .mknod = nfs_mknod,
- .rename = nfs_rename,
- .permission = nfs_permission,
- .getattr = nfs_getattr,
- .setattr = nfs_setattr,
-};
-
const struct address_space_operations nfs_dir_aops = {
.freepage = nfs_readdir_clear_array,
};
-#ifdef CONFIG_NFS_V3
-const struct inode_operations nfs3_dir_inode_operations = {
- .create = nfs_create,
- .lookup = nfs_lookup,
- .link = nfs_link,
- .unlink = nfs_unlink,
- .symlink = nfs_symlink,
- .mkdir = nfs_mkdir,
- .rmdir = nfs_rmdir,
- .mknod = nfs_mknod,
- .rename = nfs_rename,
- .permission = nfs_permission,
- .getattr = nfs_getattr,
- .setattr = nfs_setattr,
- .listxattr = nfs3_listxattr,
- .getxattr = nfs3_getxattr,
- .setxattr = nfs3_setxattr,
- .removexattr = nfs3_removexattr,
-};
-#endif /* CONFIG_NFS_V3 */
-
-#ifdef CONFIG_NFS_V4
-
-static int nfs_atomic_open(struct inode *, struct dentry *,
- struct file *, unsigned, umode_t,
- int *);
-const struct inode_operations nfs4_dir_inode_operations = {
- .create = nfs_create,
- .lookup = nfs_lookup,
- .atomic_open = nfs_atomic_open,
- .link = nfs_link,
- .unlink = nfs_unlink,
- .symlink = nfs_symlink,
- .mkdir = nfs_mkdir,
- .rmdir = nfs_rmdir,
- .mknod = nfs_mknod,
- .rename = nfs_rename,
- .permission = nfs_permission,
- .getattr = nfs_getattr,
- .setattr = nfs_setattr,
- .getxattr = generic_getxattr,
- .setxattr = generic_setxattr,
- .listxattr = generic_listxattr,
- .removexattr = generic_removexattr,
-};
-
-#endif /* CONFIG_NFS_V4 */
-
static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir, struct rpc_cred *cred)
{
struct nfs_open_dir_context *ctx;
@@ -1008,6 +936,7 @@ void nfs_force_lookup_revalidate(struct inode *dir)
{
NFS_I(dir)->cache_change_attribute++;
}
+EXPORT_SYMBOL_GPL(nfs_force_lookup_revalidate);
/*
* A check for whether or not the parent directory has changed.
@@ -1128,7 +1057,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
goto out_bad;
}
- if (nfs_have_delegation(inode, FMODE_READ))
+ if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
goto out_set_verifier;
/* Force a full look up iff the parent directory has changed */
@@ -1269,8 +1198,9 @@ const struct dentry_operations nfs_dentry_operations = {
.d_automount = nfs_d_automount,
.d_release = nfs_d_release,
};
+EXPORT_SYMBOL_GPL(nfs_dentry_operations);
-static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
+struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
{
struct dentry *res;
struct dentry *parent;
@@ -1336,8 +1266,9 @@ out:
nfs_free_fhandle(fhandle);
return res;
}
+EXPORT_SYMBOL_GPL(nfs_lookup);
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
const struct dentry_operations nfs4_dentry_operations = {
@@ -1347,6 +1278,7 @@ const struct dentry_operations nfs4_dentry_operations = {
.d_automount = nfs_d_automount,
.d_release = nfs_d_release,
};
+EXPORT_SYMBOL_GPL(nfs4_dentry_operations);
static fmode_t flags_to_mode(int flags)
{
@@ -1398,9 +1330,9 @@ out:
return err;
}
-static int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
- struct file *file, unsigned open_flags,
- umode_t mode, int *opened)
+int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned open_flags,
+ umode_t mode, int *opened)
{
struct nfs_open_context *ctx;
struct dentry *res;
@@ -1489,6 +1421,7 @@ no_open:
return finish_no_open(file, res);
}
+EXPORT_SYMBOL_GPL(nfs_atomic_open);
static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
{
@@ -1581,6 +1514,7 @@ out_error:
dput(parent);
return error;
}
+EXPORT_SYMBOL_GPL(nfs_instantiate);
/*
* Following a failed create operation, we drop the dentry rather
@@ -1588,7 +1522,7 @@ out_error:
* that the operation succeeded on the server, but an error in the
* reply path made it appear to have failed.
*/
-static int nfs_create(struct inode *dir, struct dentry *dentry,
+int nfs_create(struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
struct iattr attr;
@@ -1609,11 +1543,12 @@ out_err:
d_drop(dentry);
return error;
}
+EXPORT_SYMBOL_GPL(nfs_create);
/*
* See comments for nfs_proc_create regarding failed operations.
*/
-static int
+int
nfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct iattr attr;
@@ -1636,11 +1571,12 @@ out_err:
d_drop(dentry);
return status;
}
+EXPORT_SYMBOL_GPL(nfs_mknod);
/*
* See comments for nfs_proc_create regarding failed operations.
*/
-static int nfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+int nfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct iattr attr;
int error;
@@ -1659,6 +1595,7 @@ out_err:
d_drop(dentry);
return error;
}
+EXPORT_SYMBOL_GPL(nfs_mkdir);
static void nfs_dentry_handle_enoent(struct dentry *dentry)
{
@@ -1666,7 +1603,7 @@ static void nfs_dentry_handle_enoent(struct dentry *dentry)
d_delete(dentry);
}
-static int nfs_rmdir(struct inode *dir, struct dentry *dentry)
+int nfs_rmdir(struct inode *dir, struct dentry *dentry)
{
int error;
@@ -1682,6 +1619,7 @@ static int nfs_rmdir(struct inode *dir, struct dentry *dentry)
return error;
}
+EXPORT_SYMBOL_GPL(nfs_rmdir);
/*
* Remove a file after making sure there are no pending writes,
@@ -1706,7 +1644,7 @@ static int nfs_safe_remove(struct dentry *dentry)
}
if (inode != NULL) {
- nfs_inode_return_delegation(inode);
+ NFS_PROTO(inode)->return_delegation(inode);
error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
/* The VFS may want to delete this inode */
if (error == 0)
@@ -1725,7 +1663,7 @@ out:
*
* If sillyrename() returns 0, we do nothing, otherwise we unlink.
*/
-static int nfs_unlink(struct inode *dir, struct dentry *dentry)
+int nfs_unlink(struct inode *dir, struct dentry *dentry)
{
int error;
int need_rehash = 0;
@@ -1753,6 +1691,7 @@ static int nfs_unlink(struct inode *dir, struct dentry *dentry)
d_rehash(dentry);
return error;
}
+EXPORT_SYMBOL_GPL(nfs_unlink);
/*
* To create a symbolic link, most file systems instantiate a new inode,
@@ -1769,7 +1708,7 @@ static int nfs_unlink(struct inode *dir, struct dentry *dentry)
* now have a new file handle and can instantiate an in-core NFS inode
* and move the raw page into its mapping.
*/
-static int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
struct pagevec lru_pvec;
struct page *page;
@@ -1823,8 +1762,9 @@ static int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *sym
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_symlink);
-static int
+int
nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
@@ -1834,7 +1774,7 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
dentry->d_parent->d_name.name, dentry->d_name.name);
- nfs_inode_return_delegation(inode);
+ NFS_PROTO(inode)->return_delegation(inode);
d_drop(dentry);
error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
@@ -1844,6 +1784,7 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
}
return error;
}
+EXPORT_SYMBOL_GPL(nfs_link);
/*
* RENAME
@@ -1869,7 +1810,7 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
* If these conditions are met, we can drop the dentries before doing
* the rename.
*/
-static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct inode *old_inode = old_dentry->d_inode;
@@ -1918,9 +1859,9 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
- nfs_inode_return_delegation(old_inode);
+ NFS_PROTO(old_inode)->return_delegation(old_inode);
if (new_inode != NULL)
- nfs_inode_return_delegation(new_inode);
+ NFS_PROTO(new_inode)->return_delegation(new_inode);
error = NFS_PROTO(old_dir)->rename(old_dir, &old_dentry->d_name,
new_dir, &new_dentry->d_name);
@@ -1942,6 +1883,7 @@ out:
dput(dentry);
return error;
}
+EXPORT_SYMBOL_GPL(nfs_rename);
static DEFINE_SPINLOCK(nfs_access_lru_lock);
static LIST_HEAD(nfs_access_lru_list);
@@ -2042,6 +1984,7 @@ void nfs_access_zap_cache(struct inode *inode)
spin_unlock(&nfs_access_lru_lock);
nfs_access_free_list(&head);
}
+EXPORT_SYMBOL_GPL(nfs_access_zap_cache);
static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, struct rpc_cred *cred)
{
@@ -2202,6 +2145,7 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
{
return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
}
+EXPORT_SYMBOL_GPL(nfs_may_open);
int nfs_permission(struct inode *inode, int mask)
{
@@ -2261,6 +2205,7 @@ out_notsup:
res = generic_permission(inode, mask);
goto out;
}
+EXPORT_SYMBOL_GPL(nfs_permission);
/*
* Local variables:
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 48253372ab1d..1ba385b7c90d 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -115,17 +115,28 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
* @nr_segs: size of iovec array
*
* The presence of this routine in the address space ops vector means
- * the NFS client supports direct I/O. However, we shunt off direct
- * read and write requests before the VFS gets them, so this method
- * should never be called.
+ * the NFS client supports direct I/O. However, for most direct IO, we
+ * shunt off direct read and write requests before the VFS gets them,
+ * so this method is only ever called for swap.
*/
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
{
+#ifndef CONFIG_NFS_SWAP
dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
iocb->ki_filp->f_path.dentry->d_name.name,
(long long) pos, nr_segs);
return -EINVAL;
+#else
+ VM_BUG_ON(iocb->ki_left != PAGE_SIZE);
+ VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);
+
+ if (rw == READ || rw == KERNEL_READ)
+ return nfs_file_direct_read(iocb, iov, nr_segs, pos,
+ rw == READ ? true : false);
+ return nfs_file_direct_write(iocb, iov, nr_segs, pos,
+ rw == WRITE ? true : false);
+#endif /* CONFIG_NFS_SWAP */
}
static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
@@ -303,7 +314,7 @@ static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
*/
static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
const struct iovec *iov,
- loff_t pos)
+ loff_t pos, bool uio)
{
struct nfs_direct_req *dreq = desc->pg_dreq;
struct nfs_open_context *ctx = dreq->ctx;
@@ -331,12 +342,20 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
GFP_KERNEL);
if (!pagevec)
break;
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, user_addr,
+ if (uio) {
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, user_addr,
npages, 1, 0, pagevec, NULL);
- up_read(&current->mm->mmap_sem);
- if (result < 0)
- break;
+ up_read(&current->mm->mmap_sem);
+ if (result < 0)
+ break;
+ } else {
+ WARN_ON(npages != 1);
+ result = get_kernel_page(user_addr, 1, pagevec);
+ if (WARN_ON(result != 1))
+ break;
+ }
+
if ((unsigned)result < npages) {
bytes = result * PAGE_SIZE;
if (bytes <= pgbase) {
@@ -386,21 +405,21 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
const struct iovec *iov,
unsigned long nr_segs,
- loff_t pos)
+ loff_t pos, bool uio)
{
struct nfs_pageio_descriptor desc;
ssize_t result = -EINVAL;
size_t requested_bytes = 0;
unsigned long seg;
- nfs_pageio_init_read(&desc, dreq->inode,
+ NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
&nfs_direct_read_completion_ops);
get_dreq(dreq);
desc.pg_dreq = dreq;
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *vec = &iov[seg];
- result = nfs_direct_read_schedule_segment(&desc, vec, pos);
+ result = nfs_direct_read_schedule_segment(&desc, vec, pos, uio);
if (result < 0)
break;
requested_bytes += result;
@@ -426,7 +445,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
}
static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ unsigned long nr_segs, loff_t pos, bool uio)
{
ssize_t result = -ENOMEM;
struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -444,7 +463,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
- result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
+ result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio);
if (!result)
result = nfs_direct_wait(dreq);
NFS_I(inode)->read_io += result;
@@ -460,7 +479,7 @@ static void nfs_inode_dio_write_done(struct inode *inode)
inode_dio_done(inode);
}
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
struct nfs_pageio_descriptor desc;
@@ -478,7 +497,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
dreq->count = 0;
get_dreq(dreq);
- nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE,
+ NFS_PROTO(dreq->inode)->write_pageio_init(&desc, dreq->inode, FLUSH_STABLE,
&nfs_direct_write_completion_ops);
desc.pg_dreq = dreq;
@@ -610,7 +629,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
*/
static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
const struct iovec *iov,
- loff_t pos)
+ loff_t pos, bool uio)
{
struct nfs_direct_req *dreq = desc->pg_dreq;
struct nfs_open_context *ctx = dreq->ctx;
@@ -638,12 +657,19 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *d
if (!pagevec)
break;
- down_read(&current->mm->mmap_sem);
- result = get_user_pages(current, current->mm, user_addr,
- npages, 0, 0, pagevec, NULL);
- up_read(&current->mm->mmap_sem);
- if (result < 0)
- break;
+ if (uio) {
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, user_addr,
+ npages, 0, 0, pagevec, NULL);
+ up_read(&current->mm->mmap_sem);
+ if (result < 0)
+ break;
+ } else {
+ WARN_ON(npages != 1);
+ result = get_kernel_page(user_addr, 0, pagevec);
+ if (WARN_ON(result != 1))
+ break;
+ }
if ((unsigned)result < npages) {
bytes = result * PAGE_SIZE;
@@ -774,7 +800,7 @@ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
const struct iovec *iov,
unsigned long nr_segs,
- loff_t pos)
+ loff_t pos, bool uio)
{
struct nfs_pageio_descriptor desc;
struct inode *inode = dreq->inode;
@@ -782,7 +808,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
size_t requested_bytes = 0;
unsigned long seg;
- nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE,
+ NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
&nfs_direct_write_completion_ops);
desc.pg_dreq = dreq;
get_dreq(dreq);
@@ -790,7 +816,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *vec = &iov[seg];
- result = nfs_direct_write_schedule_segment(&desc, vec, pos);
+ result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
if (result < 0)
break;
requested_bytes += result;
@@ -818,7 +844,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos,
- size_t count)
+ size_t count, bool uio)
{
ssize_t result = -ENOMEM;
struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -836,7 +862,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
- result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
+ result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio);
if (!result)
result = nfs_direct_wait(dreq);
out_release:
@@ -867,7 +893,7 @@ out:
* cache.
*/
ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ unsigned long nr_segs, loff_t pos, bool uio)
{
ssize_t retval = -EINVAL;
struct file *file = iocb->ki_filp;
@@ -892,7 +918,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
task_io_account_read(count);
- retval = nfs_direct_read(iocb, iov, nr_segs, pos);
+ retval = nfs_direct_read(iocb, iov, nr_segs, pos, uio);
if (retval > 0)
iocb->ki_pos = pos + retval;
@@ -923,7 +949,7 @@ out:
* is no atomic O_APPEND write facility in the NFS protocol.
*/
ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ unsigned long nr_segs, loff_t pos, bool uio)
{
ssize_t retval = -EINVAL;
struct file *file = iocb->ki_filp;
@@ -955,7 +981,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
task_io_account_write(count);
- retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
+ retval = nfs_direct_write(iocb, iov, nr_segs, pos, count, uio);
if (retval > 0) {
struct inode *inode = mapping->host;
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index b3924b8a6000..31c26c4dcc23 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -8,6 +8,7 @@
#ifdef CONFIG_NFS_USE_KERNEL_DNS
+#include <linux/module.h>
#include <linux/sunrpc/clnt.h>
#include <linux/dns_resolver.h>
#include "dns_resolve.h"
@@ -27,9 +28,11 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
kfree(ip_addr);
return ret;
}
+EXPORT_SYMBOL_GPL(nfs_dns_resolve_name);
#else
+#include <linux/module.h>
#include <linux/hash.h>
#include <linux/string.h>
#include <linux/kmod.h>
@@ -345,6 +348,7 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name,
ret = -ESRCH;
return ret;
}
+EXPORT_SYMBOL_GPL(nfs_dns_resolve_name);
int nfs_dns_resolver_cache_init(struct net *net)
{
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index a6708e6b438d..75d6d0a3d32e 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -16,6 +16,7 @@
* nfs regular file handling functions
*/
+#include <linux/module.h>
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -35,42 +36,24 @@
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
-#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_FILE
static const struct vm_operations_struct nfs_file_vm_ops;
-const struct inode_operations nfs_file_inode_operations = {
- .permission = nfs_permission,
- .getattr = nfs_getattr,
- .setattr = nfs_setattr,
-};
-
-#ifdef CONFIG_NFS_V3
-const struct inode_operations nfs3_file_inode_operations = {
- .permission = nfs_permission,
- .getattr = nfs_getattr,
- .setattr = nfs_setattr,
- .listxattr = nfs3_listxattr,
- .getxattr = nfs3_getxattr,
- .setxattr = nfs3_setxattr,
- .removexattr = nfs3_removexattr,
-};
-#endif /* CONFIG_NFS_v3 */
-
/* Hack for future NFS swap support */
#ifndef IS_SWAPFILE
# define IS_SWAPFILE(inode) (0)
#endif
-static int nfs_check_flags(int flags)
+int nfs_check_flags(int flags)
{
if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT))
return -EINVAL;
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_check_flags);
/*
* Open file
@@ -93,7 +76,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
return res;
}
-static int
+int
nfs_file_release(struct inode *inode, struct file *filp)
{
dprintk("NFS: release(%s/%s)\n",
@@ -103,6 +86,7 @@ nfs_file_release(struct inode *inode, struct file *filp)
nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
return nfs_release(inode, filp);
}
+EXPORT_SYMBOL_GPL(nfs_file_release);
/**
* nfs_revalidate_size - Revalidate the file size
@@ -135,7 +119,7 @@ force_reval:
return __nfs_revalidate_inode(server, inode);
}
-static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
{
dprintk("NFS: llseek file(%s/%s, %lld, %d)\n",
filp->f_path.dentry->d_parent->d_name.name,
@@ -156,11 +140,12 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
return generic_file_llseek(filp, offset, origin);
}
+EXPORT_SYMBOL_GPL(nfs_file_llseek);
/*
* Flush all dirty pages, and check for write errors.
*/
-static int
+int
nfs_file_flush(struct file *file, fl_owner_t id)
{
struct dentry *dentry = file->f_path.dentry;
@@ -178,14 +163,15 @@ nfs_file_flush(struct file *file, fl_owner_t id)
* If we're holding a write delegation, then just start the i/o
* but don't wait for completion (or send a commit).
*/
- if (nfs_have_delegation(inode, FMODE_WRITE))
+ if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
return filemap_fdatawrite(file->f_mapping);
/* Flush writes to the server and return any errors */
return vfs_fsync(file, 0);
}
+EXPORT_SYMBOL_GPL(nfs_file_flush);
-static ssize_t
+ssize_t
nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
@@ -194,7 +180,7 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
ssize_t result;
if (iocb->ki_filp->f_flags & O_DIRECT)
- return nfs_file_direct_read(iocb, iov, nr_segs, pos);
+ return nfs_file_direct_read(iocb, iov, nr_segs, pos, true);
dprintk("NFS: read(%s/%s, %lu@%lu)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
@@ -208,8 +194,9 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
}
return result;
}
+EXPORT_SYMBOL_GPL(nfs_file_read);
-static ssize_t
+ssize_t
nfs_file_splice_read(struct file *filp, loff_t *ppos,
struct pipe_inode_info *pipe, size_t count,
unsigned int flags)
@@ -230,8 +217,9 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos,
}
return res;
}
+EXPORT_SYMBOL_GPL(nfs_file_splice_read);
-static int
+int
nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
{
struct dentry *dentry = file->f_path.dentry;
@@ -251,6 +239,7 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
}
return status;
}
+EXPORT_SYMBOL_GPL(nfs_file_mmap);
/*
* Flush any dirty pages for this process, and check for write errors.
@@ -264,8 +253,8 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
* nfs_file_write() that a write error occurred, and hence cause it to
* fall back to doing a synchronous write.
*/
-static int
-nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+int
+nfs_file_fsync_commit(struct file *file, loff_t start, loff_t end, int datasync)
{
struct dentry *dentry = file->f_path.dentry;
struct nfs_open_context *ctx = nfs_file_open_context(file);
@@ -277,9 +266,6 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
dentry->d_parent->d_name.name, dentry->d_name.name,
datasync);
- ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
- mutex_lock(&inode->i_mutex);
-
nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
status = nfs_commit_inode(inode, FLUSH_SYNC);
@@ -290,10 +276,21 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
ret = xchg(&ctx->error, 0);
if (!ret && status < 0)
ret = status;
- if (!ret && !datasync)
- /* application has asked for meta-data sync */
- ret = pnfs_layoutcommit_inode(inode, true);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nfs_file_fsync_commit);
+
+static int
+nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ int ret;
+ struct inode *inode = file->f_path.dentry->d_inode;
+
+ ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ mutex_lock(&inode->i_mutex);
+ ret = nfs_file_fsync_commit(file, start, end, datasync);
mutex_unlock(&inode->i_mutex);
+
return ret;
}
@@ -442,7 +439,7 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
if (offset != 0)
return;
/* Cancel any unstarted writes on this page */
- nfs_wb_page_cancel(page->mapping->host, page);
+ nfs_wb_page_cancel(page_file_mapping(page)->host, page);
nfs_fscache_invalidate_page(page, page->mapping->host);
}
@@ -459,8 +456,11 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
- /* Only do I/O if gfp is a superset of GFP_KERNEL */
- if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
+ /* Only do I/O if gfp is a superset of GFP_KERNEL, and we're not
+ * doing this memory reclaim for a fs-related allocation.
+ */
+ if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL &&
+ !(current->flags & PF_FSTRANS)) {
int how = FLUSH_SYNC;
/* Don't let kswapd deadlock waiting for OOM RPC calls */
@@ -484,7 +484,7 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
*/
static int nfs_launder_page(struct page *page)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_inode *nfsi = NFS_I(inode);
dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
@@ -494,6 +494,20 @@ static int nfs_launder_page(struct page *page)
return nfs_wb_page(inode, page);
}
+#ifdef CONFIG_NFS_SWAP
+static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ sector_t *span)
+{
+ *span = sis->pages;
+ return xs_swapper(NFS_CLIENT(file->f_mapping->host)->cl_xprt, 1);
+}
+
+static void nfs_swap_deactivate(struct file *file)
+{
+ xs_swapper(NFS_CLIENT(file->f_mapping->host)->cl_xprt, 0);
+}
+#endif
+
const struct address_space_operations nfs_file_aops = {
.readpage = nfs_readpage,
.readpages = nfs_readpages,
@@ -508,6 +522,10 @@ const struct address_space_operations nfs_file_aops = {
.migratepage = nfs_migrate_page,
.launder_page = nfs_launder_page,
.error_remove_page = generic_error_remove_page,
+#ifdef CONFIG_NFS_SWAP
+ .swap_activate = nfs_swap_activate,
+ .swap_deactivate = nfs_swap_deactivate,
+#endif
};
/*
@@ -533,7 +551,7 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
nfs_fscache_wait_on_page_write(NFS_I(dentry->d_inode), page);
lock_page(page);
- mapping = page->mapping;
+ mapping = page_file_mapping(page);
if (mapping != dentry->d_inode->i_mapping)
goto out_unlock;
@@ -572,8 +590,8 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
return 0;
}
-static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t pos)
+ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
{
struct dentry * dentry = iocb->ki_filp->f_path.dentry;
struct inode * inode = dentry->d_inode;
@@ -582,7 +600,7 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
size_t count = iov_length(iov, nr_segs);
if (iocb->ki_filp->f_flags & O_DIRECT)
- return nfs_file_direct_write(iocb, iov, nr_segs, pos);
+ return nfs_file_direct_write(iocb, iov, nr_segs, pos, true);
dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
@@ -623,10 +641,11 @@ out_swapfile:
printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
goto out;
}
+EXPORT_SYMBOL_GPL(nfs_file_write);
-static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
- struct file *filp, loff_t *ppos,
- size_t count, unsigned int flags)
+ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *filp, loff_t *ppos,
+ size_t count, unsigned int flags)
{
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
@@ -654,6 +673,7 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
return ret;
}
+EXPORT_SYMBOL_GPL(nfs_file_splice_write);
static int
do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
@@ -670,7 +690,7 @@ do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
}
fl->fl_type = saved_type;
- if (nfs_have_delegation(inode, FMODE_READ))
+ if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
goto out_noconflict;
if (is_local)
@@ -765,7 +785,7 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
* This makes locking act as a cache coherency point.
*/
nfs_sync_mapping(filp->f_mapping);
- if (!nfs_have_delegation(inode, FMODE_READ)) {
+ if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
if (is_time_granular(&NFS_SERVER(inode)->time_delta))
__nfs_revalidate_inode(NFS_SERVER(inode), inode);
else
@@ -778,7 +798,7 @@ out:
/*
* Lock a (portion of) a file
*/
-static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
+int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
{
struct inode *inode = filp->f_mapping->host;
int ret = -ENOLCK;
@@ -814,11 +834,12 @@ static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
out_err:
return ret;
}
+EXPORT_SYMBOL_GPL(nfs_lock);
/*
* Lock a (portion of) a file
*/
-static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
+int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
{
struct inode *inode = filp->f_mapping->host;
int is_local = 0;
@@ -831,6 +852,15 @@ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
if (!(fl->fl_flags & FL_FLOCK))
return -ENOLCK;
+ /*
+ * The NFSv4 protocol doesn't support LOCK_MAND, which is not part of
+ * any standard. In principle we might be able to support LOCK_MAND
+ * on NFSv2/3 since NLMv3/4 support DOS share modes, but for now the
+ * NFS code is not set up for it.
+ */
+ if (fl->fl_type & LOCK_MAND)
+ return -EINVAL;
+
if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK)
is_local = 1;
@@ -843,18 +873,20 @@ static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
return do_unlk(filp, cmd, fl, is_local);
return do_setlk(filp, cmd, fl, is_local);
}
+EXPORT_SYMBOL_GPL(nfs_flock);
/*
* There is no protocol support for leases, so we have no way to implement
* them correctly in the face of opens by other clients.
*/
-static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
+int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
{
dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
file->f_path.dentry->d_parent->d_name.name,
file->f_path.dentry->d_name.name, arg);
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(nfs_setlease);
const struct file_operations nfs_file_operations = {
.llseek = nfs_file_llseek,
@@ -874,104 +906,4 @@ const struct file_operations nfs_file_operations = {
.check_flags = nfs_check_flags,
.setlease = nfs_setlease,
};
-
-#ifdef CONFIG_NFS_V4
-static int
-nfs4_file_open(struct inode *inode, struct file *filp)
-{
- struct nfs_open_context *ctx;
- struct dentry *dentry = filp->f_path.dentry;
- struct dentry *parent = NULL;
- struct inode *dir;
- unsigned openflags = filp->f_flags;
- struct iattr attr;
- int err;
-
- BUG_ON(inode != dentry->d_inode);
- /*
- * If no cached dentry exists or if it's negative, NFSv4 handled the
- * opens in ->lookup() or ->create().
- *
- * We only get this far for a cached positive dentry. We skipped
- * revalidation, so handle it here by dropping the dentry and returning
- * -EOPENSTALE. The VFS will retry the lookup/create/open.
- */
-
- dprintk("NFS: open file(%s/%s)\n",
- dentry->d_parent->d_name.name,
- dentry->d_name.name);
-
- if ((openflags & O_ACCMODE) == 3)
- openflags--;
-
- /* We can't create new files here */
- openflags &= ~(O_CREAT|O_EXCL);
-
- parent = dget_parent(dentry);
- dir = parent->d_inode;
-
- ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
- err = PTR_ERR(ctx);
- if (IS_ERR(ctx))
- goto out;
-
- attr.ia_valid = ATTR_OPEN;
- if (openflags & O_TRUNC) {
- attr.ia_valid |= ATTR_SIZE;
- attr.ia_size = 0;
- nfs_wb_all(inode);
- }
-
- inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
- if (IS_ERR(inode)) {
- err = PTR_ERR(inode);
- switch (err) {
- case -EPERM:
- case -EACCES:
- case -EDQUOT:
- case -ENOSPC:
- case -EROFS:
- goto out_put_ctx;
- default:
- goto out_drop;
- }
- }
- iput(inode);
- if (inode != dentry->d_inode)
- goto out_drop;
-
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- nfs_file_set_open_context(filp, ctx);
- err = 0;
-
-out_put_ctx:
- put_nfs_open_context(ctx);
-out:
- dput(parent);
- return err;
-
-out_drop:
- d_drop(dentry);
- err = -EOPENSTALE;
- goto out_put_ctx;
-}
-
-const struct file_operations nfs4_file_operations = {
- .llseek = nfs_file_llseek,
- .read = do_sync_read,
- .write = do_sync_write,
- .aio_read = nfs_file_read,
- .aio_write = nfs_file_write,
- .mmap = nfs_file_mmap,
- .open = nfs4_file_open,
- .flush = nfs_file_flush,
- .release = nfs_file_release,
- .fsync = nfs_file_fsync,
- .lock = nfs_lock,
- .flock = nfs_flock,
- .splice_read = nfs_file_splice_read,
- .splice_write = nfs_file_splice_write,
- .check_flags = nfs_check_flags,
- .setlease = nfs_setlease,
-};
-#endif /* CONFIG_NFS_V4 */
+EXPORT_SYMBOL_GPL(nfs_file_operations);
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index a67990f90bd7..4654ced096a6 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -23,21 +23,15 @@
#include <linux/sunrpc/stats.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
-#include <linux/nfs4_mount.h>
#include <linux/lockd/bind.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
-#include <linux/nfs_idmap.h>
#include <linux/vfs.h>
#include <linux/namei.h>
#include <linux/security.h>
#include <asm/uaccess.h>
-#include "nfs4_fs.h"
-#include "delegation.h"
-#include "internal.h"
-
#define NFSDBG_FACILITY NFSDBG_CLIENT
/*
@@ -135,47 +129,3 @@ out:
nfs_free_fattr(fsinfo.fattr);
return ret;
}
-
-#ifdef CONFIG_NFS_V4
-
-int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh)
-{
- struct nfs_fsinfo fsinfo;
- int ret = -ENOMEM;
-
- dprintk("--> nfs4_get_rootfh()\n");
-
- fsinfo.fattr = nfs_alloc_fattr();
- if (fsinfo.fattr == NULL)
- goto out;
-
- /* Start by getting the root filehandle from the server */
- ret = nfs4_proc_get_rootfh(server, mntfh, &fsinfo);
- if (ret < 0) {
- dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
- goto out;
- }
-
- if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_TYPE)
- || !S_ISDIR(fsinfo.fattr->mode)) {
- printk(KERN_ERR "nfs4_get_rootfh:"
- " getroot encountered non-directory\n");
- ret = -ENOTDIR;
- goto out;
- }
-
- if (fsinfo.fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
- printk(KERN_ERR "nfs4_get_rootfh:"
- " getroot obtained referral\n");
- ret = -EREMOTE;
- goto out;
- }
-
- memcpy(&server->fsid, &fsinfo.fattr->fsid, sizeof(server->fsid));
-out:
- nfs_free_fattr(fsinfo.fattr);
- dprintk("<-- nfs4_get_rootfh() = %d\n", ret);
- return ret;
-}
-
-#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 864c51e4b400..a850079467d8 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -52,8 +52,6 @@
#define NFS_UINT_MAXLEN 11
-/* Default cache timeout is 10 minutes */
-unsigned int nfs_idmap_cache_timeout = 600;
static const struct cred *id_resolver_cache;
static struct key_type key_type_id_resolver_legacy;
@@ -63,6 +61,12 @@ struct idmap {
struct mutex idmap_mutex;
};
+struct idmap_legacy_upcalldata {
+ struct rpc_pipe_msg pipe_msg;
+ struct idmap_msg idmap_msg;
+ struct idmap *idmap;
+};
+
/**
* nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
* @fattr: fully initialised struct nfs_fattr
@@ -205,12 +209,18 @@ static int nfs_idmap_init_keyring(void)
if (ret < 0)
goto failed_put_key;
+ ret = register_key_type(&key_type_id_resolver_legacy);
+ if (ret < 0)
+ goto failed_reg_legacy;
+
set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
cred->thread_keyring = keyring;
cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
id_resolver_cache = cred;
return 0;
+failed_reg_legacy:
+ unregister_key_type(&key_type_id_resolver);
failed_put_key:
key_put(keyring);
failed_put_cred:
@@ -222,6 +232,7 @@ static void nfs_idmap_quit_keyring(void)
{
key_revoke(id_resolver_cache->thread_keyring);
unregister_key_type(&key_type_id_resolver);
+ unregister_key_type(&key_type_id_resolver_legacy);
put_cred(id_resolver_cache);
}
@@ -319,6 +330,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
name, namelen, type, data,
data_size, idmap);
+ idmap->idmap_key_cons = NULL;
mutex_unlock(&idmap->idmap_mutex);
}
return ret;
@@ -359,7 +371,6 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *typ
}
/* idmap classic begins here */
-module_param(nfs_idmap_cache_timeout, int, 0644);
enum {
Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err
@@ -376,16 +387,18 @@ static const match_table_t nfs_idmap_tokens = {
static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
size_t);
+static void idmap_release_pipe(struct inode *);
static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
static const struct rpc_pipe_ops idmap_upcall_ops = {
.upcall = rpc_pipe_generic_upcall,
.downcall = idmap_pipe_downcall,
+ .release_pipe = idmap_release_pipe,
.destroy_msg = idmap_pipe_destroy_msg,
};
static struct key_type key_type_id_resolver_legacy = {
- .name = "id_resolver",
+ .name = "id_legacy",
.instantiate = user_instantiate,
.match = user_match,
.revoke = user_revoke,
@@ -612,7 +625,8 @@ void nfs_idmap_quit(void)
nfs_idmap_quit_keyring();
}
-static int nfs_idmap_prepare_message(char *desc, struct idmap_msg *im,
+static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap,
+ struct idmap_msg *im,
struct rpc_pipe_msg *msg)
{
substring_t substr;
@@ -655,6 +669,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
const char *op,
void *aux)
{
+ struct idmap_legacy_upcalldata *data;
struct rpc_pipe_msg *msg;
struct idmap_msg *im;
struct idmap *idmap = (struct idmap *)aux;
@@ -662,33 +677,33 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
int ret = -ENOMEM;
/* msg and im are freed in idmap_pipe_destroy_msg */
- msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg)
- goto out0;
-
- im = kmalloc(sizeof(*im), GFP_KERNEL);
- if (!im)
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
goto out1;
- ret = nfs_idmap_prepare_message(key->description, im, msg);
+ msg = &data->pipe_msg;
+ im = &data->idmap_msg;
+ data->idmap = idmap;
+
+ ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
if (ret < 0)
goto out2;
+ BUG_ON(idmap->idmap_key_cons != NULL);
idmap->idmap_key_cons = cons;
ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
if (ret < 0)
- goto out2;
+ goto out3;
return ret;
+out3:
+ idmap->idmap_key_cons = NULL;
out2:
- kfree(im);
+ kfree(data);
out1:
- kfree(msg);
-out0:
- key_revoke(cons->key);
- key_revoke(cons->authkey);
+ complete_request_key(cons, ret);
return ret;
}
@@ -722,11 +737,18 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
{
struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
struct idmap *idmap = (struct idmap *)rpci->private;
- struct key_construction *cons = idmap->idmap_key_cons;
+ struct key_construction *cons;
struct idmap_msg im;
size_t namelen_in;
int ret;
+ /* If instantiation is successful, anyone waiting for key construction
+ * will have been woken up and someone else may now have used
+ * idmap_key_cons - so after this point we may no longer touch it.
+ */
+ cons = ACCESS_ONCE(idmap->idmap_key_cons);
+ idmap->idmap_key_cons = NULL;
+
if (mlen != sizeof(im)) {
ret = -ENOSPC;
goto out;
@@ -738,9 +760,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
}
if (!(im.im_status & IDMAP_STATUS_SUCCESS)) {
- ret = mlen;
- complete_request_key(idmap->idmap_key_cons, -ENOKEY);
- goto out_incomplete;
+ ret = -ENOKEY;
+ goto out;
}
namelen_in = strnlen(im.im_name, IDMAP_NAMESZ);
@@ -756,17 +777,33 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
}
out:
- complete_request_key(idmap->idmap_key_cons, ret);
-out_incomplete:
+ complete_request_key(cons, ret);
return ret;
}
static void
idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
{
+ struct idmap_legacy_upcalldata *data = container_of(msg,
+ struct idmap_legacy_upcalldata,
+ pipe_msg);
+ struct idmap *idmap = data->idmap;
+ struct key_construction *cons;
+ if (msg->errno) {
+ cons = ACCESS_ONCE(idmap->idmap_key_cons);
+ idmap->idmap_key_cons = NULL;
+ complete_request_key(cons, msg->errno);
+ }
/* Free memory allocated in nfs_idmap_legacy_upcall() */
- kfree(msg->data);
- kfree(msg);
+ kfree(data);
+}
+
+static void
+idmap_release_pipe(struct inode *inode)
+{
+ struct rpc_inode *rpci = RPC_I(inode);
+ struct idmap *idmap = (struct idmap *)rpci->private;
+ idmap->idmap_key_cons = NULL;
}
int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index f7296983eba6..c6e895f0fbf3 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -32,7 +32,6 @@
#include <linux/lockd/bind.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
-#include <linux/nfs_idmap.h>
#include <linux/vfs.h>
#include <linux/inet.h>
#include <linux/nfs_xdr.h>
@@ -51,6 +50,7 @@
#include "fscache.h"
#include "dns_resolve.h"
#include "pnfs.h"
+#include "nfs.h"
#include "netns.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -82,6 +82,7 @@ int nfs_wait_bit_killable(void *word)
freezable_schedule();
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
/**
* nfs_compat_user_ino64 - returns the user-visible inode number
@@ -106,7 +107,7 @@ u64 nfs_compat_user_ino64(u64 fileid)
return ino;
}
-static void nfs_clear_inode(struct inode *inode)
+void nfs_clear_inode(struct inode *inode)
{
/*
* The following should never happen...
@@ -117,6 +118,7 @@ static void nfs_clear_inode(struct inode *inode)
nfs_access_zap_cache(inode);
nfs_fscache_release_inode_cookie(inode);
}
+EXPORT_SYMBOL_GPL(nfs_clear_inode);
void nfs_evict_inode(struct inode *inode)
{
@@ -186,6 +188,7 @@ void nfs_zap_acl_cache(struct inode *inode)
NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_ACL;
spin_unlock(&inode->i_lock);
}
+EXPORT_SYMBOL_GPL(nfs_zap_acl_cache);
void nfs_invalidate_atime(struct inode *inode)
{
@@ -193,6 +196,7 @@ void nfs_invalidate_atime(struct inode *inode)
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
spin_unlock(&inode->i_lock);
}
+EXPORT_SYMBOL_GPL(nfs_invalidate_atime);
/*
* Invalidate, but do not unhash, the inode.
@@ -391,6 +395,7 @@ out_no_inode:
dprintk("nfs_fhget: iget failed with error %ld\n", PTR_ERR(inode));
goto out;
}
+EXPORT_SYMBOL_GPL(nfs_fhget);
#define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE|ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET|ATTR_FILE|ATTR_OPEN)
@@ -430,7 +435,7 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
* Return any delegations if we're going to change ACLs
*/
if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
- nfs_inode_return_delegation(inode);
+ NFS_PROTO(inode)->return_delegation(inode);
error = NFS_PROTO(inode)->setattr(dentry, fattr, attr);
if (error == 0)
nfs_refresh_inode(inode, fattr);
@@ -438,6 +443,7 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
out:
return error;
}
+EXPORT_SYMBOL_GPL(nfs_setattr);
/**
* nfs_vmtruncate - unmap mappings "freed" by truncate() syscall
@@ -496,6 +502,7 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
nfs_vmtruncate(inode, attr->ia_size);
}
}
+EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
@@ -535,6 +542,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
out:
return err;
}
+EXPORT_SYMBOL_GPL(nfs_getattr);
static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
{
@@ -623,6 +631,7 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
return;
nfs_revalidate_inode(server, inode);
}
+EXPORT_SYMBOL_GPL(nfs_close_context);
struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode)
{
@@ -649,6 +658,7 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f
ctx->mdsthreshold = NULL;
return ctx;
}
+EXPORT_SYMBOL_GPL(alloc_nfs_open_context);
struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
{
@@ -656,6 +666,7 @@ struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
atomic_inc(&ctx->lock_context.count);
return ctx;
}
+EXPORT_SYMBOL_GPL(get_nfs_open_context);
static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
{
@@ -683,6 +694,7 @@ void put_nfs_open_context(struct nfs_open_context *ctx)
{
__put_nfs_open_context(ctx, 0);
}
+EXPORT_SYMBOL_GPL(put_nfs_open_context);
/*
* Ensure that mmap has a recent RPC credential for use when writing out
@@ -698,6 +710,7 @@ void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
list_add(&ctx->list, &nfsi->open_files);
spin_unlock(&inode->i_lock);
}
+EXPORT_SYMBOL_GPL(nfs_file_set_open_context);
/*
* Given an inode, search for an open context with the desired characteristics
@@ -842,6 +855,7 @@ int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
return NFS_STALE(inode) ? -ESTALE : 0;
return __nfs_revalidate_inode(server, inode);
}
+EXPORT_SYMBOL_GPL(nfs_revalidate_inode);
static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
{
@@ -883,6 +897,10 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
struct nfs_inode *nfsi = NFS_I(inode);
int ret = 0;
+ /* swapfiles are not supposed to be shared. */
+ if (IS_SWAPFILE(inode))
+ goto out;
+
if (nfs_mapping_need_revalidate_inode(inode)) {
ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
if (ret < 0)
@@ -1028,6 +1046,7 @@ void nfs_fattr_init(struct nfs_fattr *fattr)
fattr->owner_name = NULL;
fattr->group_name = NULL;
}
+EXPORT_SYMBOL_GPL(nfs_fattr_init);
struct nfs_fattr *nfs_alloc_fattr(void)
{
@@ -1038,6 +1057,7 @@ struct nfs_fattr *nfs_alloc_fattr(void)
nfs_fattr_init(fattr);
return fattr;
}
+EXPORT_SYMBOL_GPL(nfs_alloc_fattr);
struct nfs_fh *nfs_alloc_fhandle(void)
{
@@ -1048,6 +1068,7 @@ struct nfs_fh *nfs_alloc_fhandle(void)
fh->size = 0;
return fh;
}
+EXPORT_SYMBOL_GPL(nfs_alloc_fhandle);
#ifdef NFS_DEBUG
/*
@@ -1168,6 +1189,7 @@ int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
return status;
}
+EXPORT_SYMBOL_GPL(nfs_refresh_inode);
static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
{
@@ -1204,6 +1226,7 @@ int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
spin_unlock(&inode->i_lock);
return status;
}
+EXPORT_SYMBOL_GPL(nfs_post_op_update_inode);
/**
* nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache
@@ -1255,6 +1278,7 @@ out_noforce:
spin_unlock(&inode->i_lock);
return status;
}
+EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
/*
* Many nfs protocol calls return the new file attributes after
@@ -1457,7 +1481,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
|| S_ISLNK(inode->i_mode)))
invalid &= ~NFS_INO_INVALID_DATA;
- if (!nfs_have_delegation(inode, FMODE_READ) ||
+ if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) ||
(save_cache_validity & NFS_INO_REVAL_FORCED))
nfsi->cache_validity |= invalid;
@@ -1472,27 +1496,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
return -ESTALE;
}
-
-#ifdef CONFIG_NFS_V4
-
-/*
- * Clean out any remaining NFSv4 state that might be left over due
- * to open() calls that passed nfs_atomic_lookup, but failed to call
- * nfs_open().
- */
-void nfs4_evict_inode(struct inode *inode)
-{
- truncate_inode_pages(&inode->i_data, 0);
- clear_inode(inode);
- pnfs_return_layout(inode);
- pnfs_destroy_layout(NFS_I(inode));
- /* If we are holding a delegation, return it! */
- nfs_inode_return_delegation_noreclaim(inode);
- /* First call standard NFS clear_inode() code */
- nfs_clear_inode(inode);
-}
-#endif
-
struct inode *nfs_alloc_inode(struct super_block *sb)
{
struct nfs_inode *nfsi;
@@ -1505,11 +1508,12 @@ struct inode *nfs_alloc_inode(struct super_block *sb)
nfsi->acl_access = ERR_PTR(-EAGAIN);
nfsi->acl_default = ERR_PTR(-EAGAIN);
#endif
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
nfsi->nfs4_acl = NULL;
#endif /* CONFIG_NFS_V4 */
return &nfsi->vfs_inode;
}
+EXPORT_SYMBOL_GPL(nfs_alloc_inode);
static void nfs_i_callback(struct rcu_head *head)
{
@@ -1521,10 +1525,11 @@ void nfs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, nfs_i_callback);
}
+EXPORT_SYMBOL_GPL(nfs_destroy_inode);
static inline void nfs4_init_once(struct nfs_inode *nfsi)
{
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
INIT_LIST_HEAD(&nfsi->open_states);
nfsi->delegation = NULL;
nfsi->delegation_state = 0;
@@ -1570,6 +1575,7 @@ static void nfs_destroy_inodecache(void)
}
struct workqueue_struct *nfsiod_workqueue;
+EXPORT_SYMBOL_GPL(nfsiod_workqueue);
/*
* start up the nfsiod workqueue
@@ -1628,81 +1634,76 @@ static int __init init_nfs_fs(void)
{
int err;
- err = nfs_idmap_init();
- if (err < 0)
- goto out10;
-
err = nfs_dns_resolver_init();
if (err < 0)
- goto out9;
+ goto out10;;
err = register_pernet_subsys(&nfs_net_ops);
if (err < 0)
- goto out8;
+ goto out9;
err = nfs_fscache_register();
if (err < 0)
- goto out7;
+ goto out8;
err = nfsiod_start();
if (err)
- goto out6;
+ goto out7;
err = nfs_fs_proc_init();
if (err)
- goto out5;
+ goto out6;
err = nfs_init_nfspagecache();
if (err)
- goto out4;
+ goto out5;
err = nfs_init_inodecache();
if (err)
- goto out3;
+ goto out4;
err = nfs_init_readpagecache();
if (err)
- goto out2;
+ goto out3;
err = nfs_init_writepagecache();
if (err)
- goto out1;
+ goto out2;
err = nfs_init_directcache();
if (err)
- goto out0;
+ goto out1;
#ifdef CONFIG_PROC_FS
rpc_proc_register(&init_net, &nfs_rpcstat);
#endif
if ((err = register_nfs_fs()) != 0)
- goto out;
+ goto out0;
+
return 0;
-out:
+out0:
#ifdef CONFIG_PROC_FS
rpc_proc_unregister(&init_net, "nfs");
#endif
nfs_destroy_directcache();
-out0:
- nfs_destroy_writepagecache();
out1:
- nfs_destroy_readpagecache();
+ nfs_destroy_writepagecache();
out2:
- nfs_destroy_inodecache();
+ nfs_destroy_readpagecache();
out3:
- nfs_destroy_nfspagecache();
+ nfs_destroy_inodecache();
out4:
- nfs_fs_proc_exit();
+ nfs_destroy_nfspagecache();
out5:
- nfsiod_stop();
+ nfs_fs_proc_exit();
out6:
- nfs_fscache_unregister();
+ nfsiod_stop();
out7:
- unregister_pernet_subsys(&nfs_net_ops);
+ nfs_fscache_unregister();
out8:
- nfs_dns_resolver_destroy();
+ unregister_pernet_subsys(&nfs_net_ops);
out9:
- nfs_idmap_quit();
+ nfs_dns_resolver_destroy();
out10:
return err;
}
@@ -1717,7 +1718,6 @@ static void __exit exit_nfs_fs(void)
nfs_fscache_unregister();
unregister_pernet_subsys(&nfs_net_ops);
nfs_dns_resolver_destroy();
- nfs_idmap_quit();
#ifdef CONFIG_PROC_FS
rpc_proc_unregister(&init_net, "nfs");
#endif
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 18f99ef71343..31fdb03225cd 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -85,6 +85,17 @@ struct nfs_clone_mount {
*/
#define NFS_MAX_READDIR_PAGES 8
+struct nfs_client_initdata {
+ unsigned long init_flags;
+ const char *hostname;
+ const struct sockaddr *addr;
+ size_t addrlen;
+ struct nfs_subversion *nfs_mod;
+ int proto;
+ u32 minorversion;
+ struct net *net;
+};
+
/*
* In-kernel mount arguments
*/
@@ -142,25 +153,45 @@ struct nfs_mount_request {
struct net *net;
};
+struct nfs_mount_info {
+ void (*fill_super)(struct super_block *, struct nfs_mount_info *);
+ int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
+ struct nfs_parsed_mount_data *parsed;
+ struct nfs_clone_mount *cloned;
+ struct nfs_fh *mntfh;
+};
+
extern int nfs_mount(struct nfs_mount_request *info);
extern void nfs_umount(const struct nfs_mount_request *info);
/* client.c */
extern const struct rpc_program nfs_program;
extern void nfs_clients_init(struct net *net);
+extern struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *);
+int nfs_create_rpc_client(struct nfs_client *, const struct rpc_timeout *, rpc_authflavor_t);
+struct nfs_client *nfs_get_client(const struct nfs_client_initdata *,
+ const struct rpc_timeout *, const char *,
+ rpc_authflavor_t);
+int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *, struct nfs_fattr *);
+void nfs_server_insert_lists(struct nfs_server *);
+void nfs_init_timeout_values(struct rpc_timeout *, int, unsigned int, unsigned int);
+int nfs_init_server_rpcclient(struct nfs_server *, const struct rpc_timeout *t,
+ rpc_authflavor_t);
+struct nfs_server *nfs_alloc_server(void);
+void nfs_server_copy_userdata(struct nfs_server *, struct nfs_server *);
extern void nfs_cleanup_cb_ident_idr(struct net *);
extern void nfs_put_client(struct nfs_client *);
+extern void nfs_free_client(struct nfs_client *);
extern struct nfs_client *nfs4_find_client_ident(struct net *, int);
extern struct nfs_client *
nfs4_find_client_sessionid(struct net *, const struct sockaddr *,
struct nfs4_sessionid *);
-extern struct nfs_server *nfs_create_server(
- const struct nfs_parsed_mount_data *,
- struct nfs_fh *);
+extern struct nfs_server *nfs_create_server(struct nfs_mount_info *,
+ struct nfs_subversion *);
extern struct nfs_server *nfs4_create_server(
- const struct nfs_parsed_mount_data *,
- struct nfs_fh *);
+ struct nfs_mount_info *,
+ struct nfs_subversion *);
extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *,
struct nfs_fh *);
extern void nfs_free_server(struct nfs_server *server);
@@ -188,6 +219,17 @@ static inline void nfs_fs_proc_exit(void)
}
#endif
+#ifdef CONFIG_NFS_V4_1
+int nfs_sockaddr_match_ipaddr(const struct sockaddr *, const struct sockaddr *);
+#endif
+
+/* nfs3client.c */
+#if IS_ENABLED(CONFIG_NFS_V3)
+struct nfs_server *nfs3_create_server(struct nfs_mount_info *, struct nfs_subversion *);
+struct nfs_server *nfs3_clone_server(struct nfs_server *, struct nfs_fh *,
+ struct nfs_fattr *, rpc_authflavor_t);
+#endif
+
/* callback_xdr.c */
extern struct svc_version nfs4_callback_version1;
extern struct svc_version nfs4_callback_version4;
@@ -220,7 +262,7 @@ extern int nfs3_decode_dirent(struct xdr_stream *,
struct nfs_entry *, int);
/* nfs4xdr.c */
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
extern int nfs4_decode_dirent(struct xdr_stream *,
struct nfs_entry *, int);
#endif
@@ -230,7 +272,7 @@ extern const u32 nfs41_maxwrite_overhead;
#endif
/* nfs4proc.c */
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
extern struct rpc_procinfo nfs4_procedures[];
#endif
@@ -245,25 +287,63 @@ extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
/* dir.c */
extern int nfs_access_cache_shrinker(struct shrinker *shrink,
struct shrink_control *sc);
+struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int);
+int nfs_create(struct inode *, struct dentry *, umode_t, bool);
+int nfs_mkdir(struct inode *, struct dentry *, umode_t);
+int nfs_rmdir(struct inode *, struct dentry *);
+int nfs_unlink(struct inode *, struct dentry *);
+int nfs_symlink(struct inode *, struct dentry *, const char *);
+int nfs_link(struct dentry *, struct inode *, struct dentry *);
+int nfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
+int nfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+
+/* file.c */
+int nfs_file_fsync_commit(struct file *, loff_t, loff_t, int);
+loff_t nfs_file_llseek(struct file *, loff_t, int);
+int nfs_file_flush(struct file *, fl_owner_t);
+ssize_t nfs_file_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ssize_t nfs_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *,
+ size_t, unsigned int);
+int nfs_file_mmap(struct file *, struct vm_area_struct *);
+ssize_t nfs_file_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+int nfs_file_release(struct inode *, struct file *);
+int nfs_lock(struct file *, int, struct file_lock *);
+int nfs_flock(struct file *, int, struct file_lock *);
+ssize_t nfs_file_splice_write(struct pipe_inode_info *, struct file *, loff_t *,
+ size_t, unsigned int);
+int nfs_check_flags(int);
+int nfs_setlease(struct file *, long, struct file_lock **);
/* inode.c */
extern struct workqueue_struct *nfsiod_workqueue;
extern struct inode *nfs_alloc_inode(struct super_block *sb);
extern void nfs_destroy_inode(struct inode *);
extern int nfs_write_inode(struct inode *, struct writeback_control *);
+extern void nfs_clear_inode(struct inode *);
extern void nfs_evict_inode(struct inode *);
-#ifdef CONFIG_NFS_V4
-extern void nfs4_evict_inode(struct inode *);
-#endif
void nfs_zap_acl_cache(struct inode *inode);
extern int nfs_wait_bit_killable(void *word);
/* super.c */
+extern const struct super_operations nfs_sops;
+extern struct file_system_type nfs_fs_type;
extern struct file_system_type nfs_xdev_fs_type;
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
extern struct file_system_type nfs4_xdev_fs_type;
extern struct file_system_type nfs4_referral_fs_type;
#endif
+struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *,
+ struct nfs_subversion *);
+void nfs_initialise_sb(struct super_block *);
+int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
+int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *);
+struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *,
+ struct nfs_mount_info *, struct nfs_subversion *);
+struct dentry *nfs_fs_mount(struct file_system_type *, int, const char *, void *);
+struct dentry * nfs_xdev_mount_common(struct file_system_type *, int,
+ const char *, struct nfs_mount_info *);
+void nfs_kill_super(struct super_block *);
+void nfs_fill_super(struct super_block *, struct nfs_mount_info *);
extern struct rpc_stat nfs_rpcstat;
@@ -284,7 +364,7 @@ struct vfsmount *nfs_do_submount(struct dentry *, struct nfs_fh *,
/* getroot.c */
extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *,
const char *);
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *,
const char *);
@@ -304,12 +384,23 @@ extern int nfs_initiate_read(struct rpc_clnt *clnt,
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr);
-extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
+extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
struct inode *inode,
const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_readdata_release(struct nfs_read_data *rdata);
+/* super.c */
+void nfs_clone_super(struct super_block *, struct nfs_mount_info *);
+void nfs_umount_begin(struct super_block *);
+int nfs_statfs(struct dentry *, struct kstatfs *);
+int nfs_show_options(struct seq_file *, struct dentry *);
+int nfs_show_devname(struct seq_file *, struct dentry *);
+int nfs_show_path(struct seq_file *, struct dentry *);
+int nfs_show_stats(struct seq_file *, struct dentry *);
+void nfs_put_super(struct super_block *);
+int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
+
/* write.c */
extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
struct inode *inode, int ioflags,
@@ -318,7 +409,7 @@ extern struct nfs_write_header *nfs_writehdr_alloc(void);
extern void nfs_writehdr_free(struct nfs_pgio_header *hdr);
extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr);
-extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
+extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
struct inode *inode, int ioflags,
const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
@@ -463,13 +554,14 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
static inline
unsigned int nfs_page_length(struct page *page)
{
- loff_t i_size = i_size_read(page->mapping->host);
+ loff_t i_size = i_size_read(page_file_mapping(page)->host);
if (i_size > 0) {
+ pgoff_t page_index = page_file_index(page);
pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
- if (page->index < end_index)
+ if (page_index < end_index)
return PAGE_CACHE_SIZE;
- if (page->index == end_index)
+ if (page_index == end_index)
return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1;
}
return 0;
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index 08b9c93675da..655925373b91 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -7,6 +7,7 @@
* NFS namespace
*/
+#include <linux/module.h>
#include <linux/dcache.h>
#include <linux/gfp.h>
#include <linux/mount.h>
@@ -112,6 +113,7 @@ Elong_unlock:
Elong:
return ERR_PTR(-ENAMETOOLONG);
}
+EXPORT_SYMBOL_GPL(nfs_path);
/*
* nfs_d_automount - Handle crossing a mountpoint on the server
@@ -195,20 +197,7 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
const char *devname,
struct nfs_clone_mount *mountdata)
{
-#ifdef CONFIG_NFS_V4
- struct vfsmount *mnt = ERR_PTR(-EINVAL);
- switch (server->nfs_client->rpc_ops->version) {
- case 2:
- case 3:
- mnt = vfs_kern_mount(&nfs_xdev_fs_type, 0, devname, mountdata);
- break;
- case 4:
- mnt = vfs_kern_mount(&nfs4_xdev_fs_type, 0, devname, mountdata);
- }
- return mnt;
-#else
return vfs_kern_mount(&nfs_xdev_fs_type, 0, devname, mountdata);
-#endif
}
/**
@@ -253,6 +242,7 @@ out:
dprintk("<-- nfs_do_submount() = %p\n", mnt);
return mnt;
}
+EXPORT_SYMBOL_GPL(nfs_do_submount);
struct vfsmount *nfs_submount(struct nfs_server *server, struct dentry *dentry,
struct nfs_fh *fh, struct nfs_fattr *fattr)
@@ -268,3 +258,4 @@ struct vfsmount *nfs_submount(struct nfs_server *server, struct dentry *dentry,
return nfs_do_submount(dentry, fh, fattr, server->client->cl_auth->au_flavor);
}
+EXPORT_SYMBOL_GPL(nfs_submount);
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
index 8a6394edb8b0..0539de1b8d1f 100644
--- a/fs/nfs/netns.h
+++ b/fs/nfs/netns.h
@@ -20,7 +20,7 @@ struct nfs_net {
wait_queue_head_t bl_wq;
struct list_head nfs_client_list;
struct list_head nfs_volume_list;
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
struct idr cb_ident_idr; /* Protected by nfs_client_lock */
#endif
spinlock_t nfs_client_lock;
diff --git a/fs/nfs/nfs.h b/fs/nfs/nfs.h
new file mode 100644
index 000000000000..43679df56cd0
--- /dev/null
+++ b/fs/nfs/nfs.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2012 Netapp, Inc. All rights reserved.
+ *
+ * Function and structures exported by the NFS module
+ * for use by NFS version-specific modules.
+ */
+#ifndef __LINUX_INTERNAL_NFS_H
+#define __LINUX_INTERNAL_NFS_H
+
+#include <linux/fs.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/nfs_xdr.h>
+
+struct nfs_subversion {
+ struct module *owner; /* THIS_MODULE pointer */
+ struct file_system_type *nfs_fs; /* NFS filesystem type */
+ const struct rpc_version *rpc_vers; /* NFS version information */
+ const struct nfs_rpc_ops *rpc_ops; /* NFS operations */
+ const struct super_operations *sops; /* NFS Super operations */
+ const struct xattr_handler **xattr; /* NFS xattr handlers */
+ struct list_head list; /* List of NFS versions */
+};
+
+struct nfs_subversion *get_nfs_version(unsigned int);
+void put_nfs_version(struct nfs_subversion *);
+void register_nfs_version(struct nfs_subversion *);
+void unregister_nfs_version(struct nfs_subversion *);
+
+#endif /* __LINUX_INTERNAL_NFS_H */
diff --git a/fs/nfs/nfs2super.c b/fs/nfs/nfs2super.c
new file mode 100644
index 000000000000..0a9782c9171a
--- /dev/null
+++ b/fs/nfs/nfs2super.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2012 Netapp, Inc. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/nfs_fs.h>
+#include "internal.h"
+#include "nfs.h"
+
+static struct nfs_subversion nfs_v2 = {
+ .owner = THIS_MODULE,
+ .nfs_fs = &nfs_fs_type,
+ .rpc_vers = &nfs_version2,
+ .rpc_ops = &nfs_v2_clientops,
+ .sops = &nfs_sops,
+};
+
+static int __init init_nfs_v2(void)
+{
+ register_nfs_version(&nfs_v2);
+ return 0;
+}
+
+static void __exit exit_nfs_v2(void)
+{
+ unregister_nfs_version(&nfs_v2);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_nfs_v2);
+module_exit(exit_nfs_v2);
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index baf759bccd05..d04f0df7be55 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -106,19 +106,16 @@ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
static int decode_nfsdata(struct xdr_stream *xdr, struct nfs_readres *result)
{
u32 recvd, count;
- size_t hdrlen;
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_overflow;
count = be32_to_cpup(p);
- hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
- recvd = xdr->buf->len - hdrlen;
+ recvd = xdr_read_pages(xdr, count);
if (unlikely(count > recvd))
goto out_cheating;
out:
- xdr_read_pages(xdr, count);
result->eof = 0; /* NFSv2 does not pass EOF flag on the wire. */
result->count = count;
return count;
@@ -440,7 +437,6 @@ static void encode_path(struct xdr_stream *xdr, struct page **pages, u32 length)
static int decode_path(struct xdr_stream *xdr)
{
u32 length, recvd;
- size_t hdrlen;
__be32 *p;
p = xdr_inline_decode(xdr, 4);
@@ -449,12 +445,9 @@ static int decode_path(struct xdr_stream *xdr)
length = be32_to_cpup(p);
if (unlikely(length >= xdr->buf->page_len || length > NFS_MAXPATHLEN))
goto out_size;
- hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
- recvd = xdr->buf->len - hdrlen;
+ recvd = xdr_read_pages(xdr, length);
if (unlikely(length > recvd))
goto out_cheating;
-
- xdr_read_pages(xdr, length);
xdr_terminate_string(xdr->buf, length);
return 0;
out_size:
@@ -972,22 +965,7 @@ out_overflow:
*/
static int decode_readdirok(struct xdr_stream *xdr)
{
- u32 recvd, pglen;
- size_t hdrlen;
-
- pglen = xdr->buf->page_len;
- hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
- recvd = xdr->buf->len - hdrlen;
- if (unlikely(pglen > recvd))
- goto out_cheating;
-out:
- xdr_read_pages(xdr, pglen);
- return pglen;
-out_cheating:
- dprintk("NFS: server cheating in readdir result: "
- "pglen %u > recvd %u\n", pglen, recvd);
- pglen = recvd;
- goto out;
+ return xdr_read_pages(xdr, xdr->buf->page_len);
}
static int nfs2_xdr_dec_readdirres(struct rpc_rqst *req,
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
new file mode 100644
index 000000000000..b3fc65ef39ca
--- /dev/null
+++ b/fs/nfs/nfs3client.c
@@ -0,0 +1,65 @@
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include "internal.h"
+
+#ifdef CONFIG_NFS_V3_ACL
+static struct rpc_stat nfsacl_rpcstat = { &nfsacl_program };
+static const struct rpc_version *nfsacl_version[] = {
+ [3] = &nfsacl_version3,
+};
+
+const struct rpc_program nfsacl_program = {
+ .name = "nfsacl",
+ .number = NFS_ACL_PROGRAM,
+ .nrvers = ARRAY_SIZE(nfsacl_version),
+ .version = nfsacl_version,
+ .stats = &nfsacl_rpcstat,
+};
+
+/*
+ * Initialise an NFSv3 ACL client connection
+ */
+static void nfs_init_server_aclclient(struct nfs_server *server)
+{
+ if (server->flags & NFS_MOUNT_NOACL)
+ goto out_noacl;
+
+ server->client_acl = rpc_bind_new_program(server->client, &nfsacl_program, 3);
+ if (IS_ERR(server->client_acl))
+ goto out_noacl;
+
+ /* No errors! Assume that Sun nfsacls are supported */
+ server->caps |= NFS_CAP_ACLS;
+ return;
+
+out_noacl:
+ server->caps &= ~NFS_CAP_ACLS;
+}
+#else
+static inline void nfs_init_server_aclclient(struct nfs_server *server)
+{
+ server->flags &= ~NFS_MOUNT_NOACL;
+ server->caps &= ~NFS_CAP_ACLS;
+}
+#endif
+
+struct nfs_server *nfs3_create_server(struct nfs_mount_info *mount_info,
+ struct nfs_subversion *nfs_mod)
+{
+ struct nfs_server *server = nfs_create_server(mount_info, nfs_mod);
+ /* Create a client RPC handle for the NFS v3 ACL management interface */
+ if (!IS_ERR(server))
+ nfs_init_server_aclclient(server);
+ return server;
+}
+
+struct nfs_server *nfs3_clone_server(struct nfs_server *source,
+ struct nfs_fh *fh,
+ struct nfs_fattr *fattr,
+ rpc_authflavor_t flavor)
+{
+ struct nfs_server *server = nfs_clone_server(source, fh, fattr, flavor);
+ if (!IS_ERR(server) && !IS_ERR(source->client_acl))
+ nfs_init_server_aclclient(server);
+ return server;
+}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 3187e24e8f78..d6b3b5f2d779 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -69,7 +69,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
nfs_fattr_init(info->fattr);
status = rpc_call_sync(client, &msg, 0);
dprintk("%s: reply fsinfo: %d\n", __func__, status);
- if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
+ if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) {
msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
msg.rpc_resp = info->fattr;
status = rpc_call_sync(client, &msg, 0);
@@ -877,6 +877,46 @@ nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
}
+static int nfs3_have_delegation(struct inode *inode, fmode_t flags)
+{
+ return 0;
+}
+
+static int nfs3_return_delegation(struct inode *inode)
+{
+ nfs_wb_all(inode);
+ return 0;
+}
+
+static const struct inode_operations nfs3_dir_inode_operations = {
+ .create = nfs_create,
+ .lookup = nfs_lookup,
+ .link = nfs_link,
+ .unlink = nfs_unlink,
+ .symlink = nfs_symlink,
+ .mkdir = nfs_mkdir,
+ .rmdir = nfs_rmdir,
+ .mknod = nfs_mknod,
+ .rename = nfs_rename,
+ .permission = nfs_permission,
+ .getattr = nfs_getattr,
+ .setattr = nfs_setattr,
+ .listxattr = nfs3_listxattr,
+ .getxattr = nfs3_getxattr,
+ .setxattr = nfs3_setxattr,
+ .removexattr = nfs3_removexattr,
+};
+
+static const struct inode_operations nfs3_file_inode_operations = {
+ .permission = nfs_permission,
+ .getattr = nfs_getattr,
+ .setattr = nfs_setattr,
+ .listxattr = nfs3_listxattr,
+ .getxattr = nfs3_getxattr,
+ .setxattr = nfs3_setxattr,
+ .removexattr = nfs3_removexattr,
+};
+
const struct nfs_rpc_ops nfs_v3_clientops = {
.version = 3, /* protocol version */
.dentry_ops = &nfs_dentry_operations,
@@ -885,6 +925,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.file_ops = &nfs_file_operations,
.getroot = nfs3_proc_get_root,
.submount = nfs_submount,
+ .try_mount = nfs_try_mount,
.getattr = nfs3_proc_getattr,
.setattr = nfs3_proc_setattr,
.lookup = nfs3_proc_lookup,
@@ -910,9 +951,11 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.pathconf = nfs3_proc_pathconf,
.decode_dirent = nfs3_decode_dirent,
.read_setup = nfs3_proc_read_setup,
+ .read_pageio_init = nfs_pageio_init_read,
.read_rpc_prepare = nfs3_proc_read_rpc_prepare,
.read_done = nfs3_read_done,
.write_setup = nfs3_proc_write_setup,
+ .write_pageio_init = nfs_pageio_init_write,
.write_rpc_prepare = nfs3_proc_write_rpc_prepare,
.write_done = nfs3_write_done,
.commit_setup = nfs3_proc_commit_setup,
@@ -921,5 +964,11 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.lock = nfs3_proc_lock,
.clear_acl_cache = nfs3_forget_cached_acls,
.close_context = nfs_close_context,
+ .have_delegation = nfs3_have_delegation,
+ .return_delegation = nfs3_return_delegation,
+ .alloc_client = nfs_alloc_client,
.init_client = nfs_init_client,
+ .free_client = nfs_free_client,
+ .create_server = nfs3_create_server,
+ .clone_server = nfs3_clone_server,
};
diff --git a/fs/nfs/nfs3super.c b/fs/nfs/nfs3super.c
new file mode 100644
index 000000000000..cc471c725230
--- /dev/null
+++ b/fs/nfs/nfs3super.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2012 Netapp, Inc. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/nfs_fs.h>
+#include "internal.h"
+#include "nfs.h"
+
+static struct nfs_subversion nfs_v3 = {
+ .owner = THIS_MODULE,
+ .nfs_fs = &nfs_fs_type,
+ .rpc_vers = &nfs_version3,
+ .rpc_ops = &nfs_v3_clientops,
+ .sops = &nfs_sops,
+};
+
+static int __init init_nfs_v3(void)
+{
+ register_nfs_version(&nfs_v3);
+ return 0;
+}
+
+static void __exit exit_nfs_v3(void)
+{
+ unregister_nfs_version(&nfs_v3);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_nfs_v3);
+module_exit(exit_nfs_v3);
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 902de489ec9b..6cbe89400dfc 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -246,7 +246,6 @@ static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages,
static int decode_nfspath3(struct xdr_stream *xdr)
{
u32 recvd, count;
- size_t hdrlen;
__be32 *p;
p = xdr_inline_decode(xdr, 4);
@@ -255,12 +254,9 @@ static int decode_nfspath3(struct xdr_stream *xdr)
count = be32_to_cpup(p);
if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN))
goto out_nametoolong;
- hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
- recvd = xdr->buf->len - hdrlen;
+ recvd = xdr_read_pages(xdr, count);
if (unlikely(count > recvd))
goto out_cheating;
-
- xdr_read_pages(xdr, count);
xdr_terminate_string(xdr->buf, count);
return 0;
@@ -329,14 +325,14 @@ static void encode_createverf3(struct xdr_stream *xdr, const __be32 *verifier)
memcpy(p, verifier, NFS3_CREATEVERFSIZE);
}
-static int decode_writeverf3(struct xdr_stream *xdr, __be32 *verifier)
+static int decode_writeverf3(struct xdr_stream *xdr, struct nfs_write_verifier *verifier)
{
__be32 *p;
p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE);
if (unlikely(p == NULL))
goto out_overflow;
- memcpy(verifier, p, NFS3_WRITEVERFSIZE);
+ memcpy(verifier->data, p, NFS3_WRITEVERFSIZE);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
@@ -1587,7 +1583,6 @@ static int decode_read3resok(struct xdr_stream *xdr,
struct nfs_readres *result)
{
u32 eof, count, ocount, recvd;
- size_t hdrlen;
__be32 *p;
p = xdr_inline_decode(xdr, 4 + 4 + 4);
@@ -1598,13 +1593,10 @@ static int decode_read3resok(struct xdr_stream *xdr,
ocount = be32_to_cpup(p++);
if (unlikely(ocount != count))
goto out_mismatch;
- hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
- recvd = xdr->buf->len - hdrlen;
+ recvd = xdr_read_pages(xdr, count);
if (unlikely(count > recvd))
goto out_cheating;
-
out:
- xdr_read_pages(xdr, count);
result->eof = eof;
result->count = count;
return count;
@@ -1676,20 +1668,22 @@ static int decode_write3resok(struct xdr_stream *xdr,
{
__be32 *p;
- p = xdr_inline_decode(xdr, 4 + 4 + NFS3_WRITEVERFSIZE);
+ p = xdr_inline_decode(xdr, 4 + 4);
if (unlikely(p == NULL))
goto out_overflow;
result->count = be32_to_cpup(p++);
result->verf->committed = be32_to_cpup(p++);
if (unlikely(result->verf->committed > NFS_FILE_SYNC))
goto out_badvalue;
- memcpy(result->verf->verifier, p, NFS3_WRITEVERFSIZE);
+ if (decode_writeverf3(xdr, &result->verf->verifier))
+ goto out_eio;
return result->count;
out_badvalue:
dprintk("NFS: bad stable_how value: %u\n", result->verf->committed);
return -EIO;
out_overflow:
print_overflow_msg(__func__, xdr);
+out_eio:
return -EIO;
}
@@ -2039,22 +2033,7 @@ out_truncated:
*/
static int decode_dirlist3(struct xdr_stream *xdr)
{
- u32 recvd, pglen;
- size_t hdrlen;
-
- pglen = xdr->buf->page_len;
- hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
- recvd = xdr->buf->len - hdrlen;
- if (unlikely(pglen > recvd))
- goto out_cheating;
-out:
- xdr_read_pages(xdr, pglen);
- return pglen;
-out_cheating:
- dprintk("NFS: server cheating in readdir result: "
- "pglen %u > recvd %u\n", pglen, recvd);
- pglen = recvd;
- goto out;
+ return xdr_read_pages(xdr, xdr->buf->page_len);
}
static int decode_readdir3resok(struct xdr_stream *xdr,
@@ -2337,7 +2316,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
goto out;
if (status != NFS3_OK)
goto out_status;
- error = decode_writeverf3(xdr, result->verf->verifier);
+ error = decode_writeverf3(xdr, &result->verf->verifier);
out:
return error;
out_status:
@@ -2364,7 +2343,7 @@ static inline int decode_getacl3resok(struct xdr_stream *xdr,
if (result->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
goto out;
- hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ hdrlen = xdr_stream_pos(xdr);
acl = NULL;
if (result->mask & NFS_ACL)
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index cc5900ac61b5..da0618aeeadb 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -9,7 +9,7 @@
#ifndef __LINUX_FS_NFS_NFS4_FS_H
#define __LINUX_FS_NFS_NFS4_FS_H
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
struct idmap;
@@ -200,7 +200,13 @@ struct nfs4_state_maintenance_ops {
};
extern const struct dentry_operations nfs4_dentry_operations;
-extern const struct inode_operations nfs4_dir_inode_operations;
+
+/* dir.c */
+int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
+ unsigned, umode_t, int *);
+
+/* super.c */
+extern struct file_system_type nfs4_fs_type;
/* nfs4namespace.c */
rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
@@ -301,6 +307,10 @@ extern const u32 nfs4_pathconf_bitmap[2];
extern const u32 nfs4_fsinfo_bitmap[3];
extern const u32 nfs4_fs_locations_bitmap[2];
+void nfs4_free_client(struct nfs_client *);
+
+struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *);
+
/* nfs4renewd.c */
extern void nfs4_schedule_state_renewal(struct nfs_client *);
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
@@ -354,6 +364,29 @@ extern void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_sta
extern const nfs4_stateid zero_stateid;
+/* nfs4super.c */
+struct nfs_mount_info;
+extern struct nfs_subversion nfs_v4;
+struct dentry *nfs4_try_mount(int, const char *, struct nfs_mount_info *, struct nfs_subversion *);
+extern bool nfs4_disable_idmapping;
+extern unsigned short max_session_slots;
+extern unsigned short send_implementation_id;
+
+/* nfs4sysctl.c */
+#ifdef CONFIG_SYSCTL
+int nfs4_register_sysctl(void);
+void nfs4_unregister_sysctl(void);
+#else
+static inline int nfs4_register_sysctl(void)
+{
+ return 0;
+}
+
+static inline void nfs4_unregister_sysctl(void)
+{
+}
+#endif
+
/* nfs4xdr.c */
extern struct rpc_procinfo nfs4_procedures[];
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
new file mode 100644
index 000000000000..24eb663f8ed5
--- /dev/null
+++ b/fs/nfs/nfs4client.c
@@ -0,0 +1,656 @@
+/*
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+#include <linux/module.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_idmap.h>
+#include <linux/nfs_mount.h>
+#include <linux/sunrpc/auth.h>
+#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/bc_xprt.h>
+#include "internal.h"
+#include "callback.h"
+#include "delegation.h"
+#include "pnfs.h"
+#include "netns.h"
+
+#define NFSDBG_FACILITY NFSDBG_CLIENT
+
+/*
+ * Get a unique NFSv4.0 callback identifier which will be used
+ * by the V4.0 callback service to lookup the nfs_client struct
+ */
+static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
+{
+ int ret = 0;
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
+
+ if (clp->rpc_ops->version != 4 || minorversion != 0)
+ return ret;
+retry:
+ if (!idr_pre_get(&nn->cb_ident_idr, GFP_KERNEL))
+ return -ENOMEM;
+ spin_lock(&nn->nfs_client_lock);
+ ret = idr_get_new(&nn->cb_ident_idr, clp, &clp->cl_cb_ident);
+ spin_unlock(&nn->nfs_client_lock);
+ if (ret == -EAGAIN)
+ goto retry;
+ return ret;
+}
+
+#ifdef CONFIG_NFS_V4_1
+static void nfs4_shutdown_session(struct nfs_client *clp)
+{
+ if (nfs4_has_session(clp)) {
+ nfs4_destroy_session(clp->cl_session);
+ nfs4_destroy_clientid(clp);
+ }
+
+}
+#else /* CONFIG_NFS_V4_1 */
+static void nfs4_shutdown_session(struct nfs_client *clp)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
+{
+ int err;
+ struct nfs_client *clp = nfs_alloc_client(cl_init);
+ if (IS_ERR(clp))
+ return clp;
+
+ err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
+ if (err)
+ goto error;
+
+ spin_lock_init(&clp->cl_lock);
+ INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
+ rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
+ clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
+ clp->cl_minorversion = cl_init->minorversion;
+ clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
+ return clp;
+
+error:
+ nfs_free_client(clp);
+ return ERR_PTR(err);
+}
+
+/*
+ * Destroy the NFS4 callback service
+ */
+static void nfs4_destroy_callback(struct nfs_client *clp)
+{
+ if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
+ nfs_callback_down(clp->cl_mvops->minor_version);
+}
+
+static void nfs4_shutdown_client(struct nfs_client *clp)
+{
+ if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
+ nfs4_kill_renewd(clp);
+ nfs4_shutdown_session(clp);
+ nfs4_destroy_callback(clp);
+ if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
+ nfs_idmap_delete(clp);
+
+ rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
+ kfree(clp->cl_serverowner);
+ kfree(clp->cl_serverscope);
+ kfree(clp->cl_implid);
+}
+
+void nfs4_free_client(struct nfs_client *clp)
+{
+ nfs4_shutdown_client(clp);
+ nfs_free_client(clp);
+}
+
+/*
+ * Initialize the NFS4 callback service
+ */
+static int nfs4_init_callback(struct nfs_client *clp)
+{
+ int error;
+
+ if (clp->rpc_ops->version == 4) {
+ struct rpc_xprt *xprt;
+
+ xprt = rcu_dereference_raw(clp->cl_rpcclient->cl_xprt);
+
+ if (nfs4_has_session(clp)) {
+ error = xprt_setup_backchannel(xprt,
+ NFS41_BC_MIN_CALLBACKS);
+ if (error < 0)
+ return error;
+ }
+
+ error = nfs_callback_up(clp->cl_mvops->minor_version, xprt);
+ if (error < 0) {
+ dprintk("%s: failed to start callback. Error = %d\n",
+ __func__, error);
+ return error;
+ }
+ __set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
+ }
+ return 0;
+}
+
+/*
+ * Initialize the minor version specific parts of an NFS4 client record
+ */
+static int nfs4_init_client_minor_version(struct nfs_client *clp)
+{
+#if defined(CONFIG_NFS_V4_1)
+ if (clp->cl_mvops->minor_version) {
+ struct nfs4_session *session = NULL;
+ /*
+ * Create the session and mark it expired.
+ * When a SEQUENCE operation encounters the expired session
+ * it will do session recovery to initialize it.
+ */
+ session = nfs4_alloc_session(clp);
+ if (!session)
+ return -ENOMEM;
+
+ clp->cl_session = session;
+ /*
+ * The create session reply races with the server back
+ * channel probe. Mark the client NFS_CS_SESSION_INITING
+ * so that the client back channel can find the
+ * nfs_client struct
+ */
+ nfs_mark_client_ready(clp, NFS_CS_SESSION_INITING);
+ }
+#endif /* CONFIG_NFS_V4_1 */
+
+ return nfs4_init_callback(clp);
+}
+
+/**
+ * nfs4_init_client - Initialise an NFS4 client record
+ *
+ * @clp: nfs_client to initialise
+ * @timeparms: timeout parameters for underlying RPC transport
+ * @ip_addr: callback IP address in presentation format
+ * @authflavor: authentication flavor for underlying RPC transport
+ *
+ * Returns pointer to an NFS client, or an ERR_PTR value.
+ */
+struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr,
+ rpc_authflavor_t authflavour)
+{
+ char buf[INET6_ADDRSTRLEN + 1];
+ int error;
+
+ if (clp->cl_cons_state == NFS_CS_READY) {
+ /* the client is initialised already */
+ dprintk("<-- nfs4_init_client() = 0 [already %p]\n", clp);
+ return clp;
+ }
+
+ /* Check NFS protocol revision and initialize RPC op vector */
+ clp->rpc_ops = &nfs_v4_clientops;
+
+ __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+ error = nfs_create_rpc_client(clp, timeparms, authflavour);
+ if (error < 0)
+ goto error;
+
+ /* If no clientaddr= option was specified, find a usable cb address */
+ if (ip_addr == NULL) {
+ struct sockaddr_storage cb_addr;
+ struct sockaddr *sap = (struct sockaddr *)&cb_addr;
+
+ error = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
+ if (error < 0)
+ goto error;
+ error = rpc_ntop(sap, buf, sizeof(buf));
+ if (error < 0)
+ goto error;
+ ip_addr = (const char *)buf;
+ }
+ strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
+
+ error = nfs_idmap_new(clp);
+ if (error < 0) {
+ dprintk("%s: failed to create idmapper. Error = %d\n",
+ __func__, error);
+ goto error;
+ }
+ __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
+
+ error = nfs4_init_client_minor_version(clp);
+ if (error < 0)
+ goto error;
+
+ if (!nfs4_has_session(clp))
+ nfs_mark_client_ready(clp, NFS_CS_READY);
+ return clp;
+
+error:
+ nfs_mark_client_ready(clp, error);
+ nfs_put_client(clp);
+ dprintk("<-- nfs4_init_client() = xerror %d\n", error);
+ return ERR_PTR(error);
+}
+
+static void nfs4_destroy_server(struct nfs_server *server)
+{
+ nfs_server_return_all_delegations(server);
+ unset_pnfs_layoutdriver(server);
+ nfs4_purge_state_owners(server);
+}
+
+/*
+ * NFSv4.0 callback thread helper
+ *
+ * Find a client by callback identifier
+ */
+struct nfs_client *
+nfs4_find_client_ident(struct net *net, int cb_ident)
+{
+ struct nfs_client *clp;
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+
+ spin_lock(&nn->nfs_client_lock);
+ clp = idr_find(&nn->cb_ident_idr, cb_ident);
+ if (clp)
+ atomic_inc(&clp->cl_count);
+ spin_unlock(&nn->nfs_client_lock);
+ return clp;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/* Common match routine for v4.0 and v4.1 callback services */
+static bool nfs4_cb_match_client(const struct sockaddr *addr,
+ struct nfs_client *clp, u32 minorversion)
+{
+ struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
+
+ /* Don't match clients that failed to initialise */
+ if (!(clp->cl_cons_state == NFS_CS_READY ||
+ clp->cl_cons_state == NFS_CS_SESSION_INITING))
+ return false;
+
+ smp_rmb();
+
+ /* Match the version and minorversion */
+ if (clp->rpc_ops->version != 4 ||
+ clp->cl_minorversion != minorversion)
+ return false;
+
+ /* Match only the IP address, not the port number */
+ if (!nfs_sockaddr_match_ipaddr(addr, clap))
+ return false;
+
+ return true;
+}
+
+/*
+ * NFSv4.1 callback thread helper
+ * For CB_COMPOUND calls, find a client by IP address, protocol version,
+ * minorversion, and sessionID
+ *
+ * Returns NULL if no such client
+ */
+struct nfs_client *
+nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
+ struct nfs4_sessionid *sid)
+{
+ struct nfs_client *clp;
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+
+ spin_lock(&nn->nfs_client_lock);
+ list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
+ if (nfs4_cb_match_client(addr, clp, 1) == false)
+ continue;
+
+ if (!nfs4_has_session(clp))
+ continue;
+
+ /* Match sessionid*/
+ if (memcmp(clp->cl_session->sess_id.data,
+ sid->data, NFS4_MAX_SESSIONID_LEN) != 0)
+ continue;
+
+ atomic_inc(&clp->cl_count);
+ spin_unlock(&nn->nfs_client_lock);
+ return clp;
+ }
+ spin_unlock(&nn->nfs_client_lock);
+ return NULL;
+}
+
+#else /* CONFIG_NFS_V4_1 */
+
+struct nfs_client *
+nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
+ struct nfs4_sessionid *sid)
+{
+ return NULL;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * Set up an NFS4 client
+ */
+static int nfs4_set_client(struct nfs_server *server,
+ const char *hostname,
+ const struct sockaddr *addr,
+ const size_t addrlen,
+ const char *ip_addr,
+ rpc_authflavor_t authflavour,
+ int proto, const struct rpc_timeout *timeparms,
+ u32 minorversion, struct net *net)
+{
+ struct nfs_client_initdata cl_init = {
+ .hostname = hostname,
+ .addr = addr,
+ .addrlen = addrlen,
+ .nfs_mod = &nfs_v4,
+ .proto = proto,
+ .minorversion = minorversion,
+ .net = net,
+ };
+ struct nfs_client *clp;
+ int error;
+
+ dprintk("--> nfs4_set_client()\n");
+
+ if (server->flags & NFS_MOUNT_NORESVPORT)
+ set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+
+ /* Allocate or find a client reference we can use */
+ clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour);
+ if (IS_ERR(clp)) {
+ error = PTR_ERR(clp);
+ goto error;
+ }
+
+ /*
+ * Query for the lease time on clientid setup or renewal
+ *
+ * Note that this will be set on nfs_clients that were created
+ * only for the DS role and did not set this bit, but now will
+ * serve a dual role.
+ */
+ set_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state);
+
+ server->nfs_client = clp;
+ dprintk("<-- nfs4_set_client() = 0 [new %p]\n", clp);
+ return 0;
+error:
+ dprintk("<-- nfs4_set_client() = xerror %d\n", error);
+ return error;
+}
+
+/*
+ * Set up a pNFS Data Server client.
+ *
+ * Return any existing nfs_client that matches server address,port,version
+ * and minorversion.
+ *
+ * For a new nfs_client, use a soft mount (default), a low retrans and a
+ * low timeout interval so that if a connection is lost, we retry through
+ * the MDS.
+ */
+struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
+ const struct sockaddr *ds_addr, int ds_addrlen,
+ int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
+{
+ struct nfs_client_initdata cl_init = {
+ .addr = ds_addr,
+ .addrlen = ds_addrlen,
+ .nfs_mod = &nfs_v4,
+ .proto = ds_proto,
+ .minorversion = mds_clp->cl_minorversion,
+ .net = mds_clp->cl_net,
+ };
+ struct rpc_timeout ds_timeout;
+ struct nfs_client *clp;
+
+ /*
+ * Set an authflavor equual to the MDS value. Use the MDS nfs_client
+ * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
+ * (section 13.1 RFC 5661).
+ */
+ nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
+ clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
+ mds_clp->cl_rpcclient->cl_auth->au_flavor);
+
+ dprintk("<-- %s %p\n", __func__, clp);
+ return clp;
+}
+EXPORT_SYMBOL_GPL(nfs4_set_ds_client);
+
+/*
+ * Session has been established, and the client marked ready.
+ * Set the mount rsize and wsize with negotiated fore channel
+ * attributes which will be bound checked in nfs_server_set_fsinfo.
+ */
+static void nfs4_session_set_rwsize(struct nfs_server *server)
+{
+#ifdef CONFIG_NFS_V4_1
+ struct nfs4_session *sess;
+ u32 server_resp_sz;
+ u32 server_rqst_sz;
+
+ if (!nfs4_has_session(server->nfs_client))
+ return;
+ sess = server->nfs_client->cl_session;
+ server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
+ server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
+
+ if (server->rsize > server_resp_sz)
+ server->rsize = server_resp_sz;
+ if (server->wsize > server_rqst_sz)
+ server->wsize = server_rqst_sz;
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+static int nfs4_server_common_setup(struct nfs_server *server,
+ struct nfs_fh *mntfh)
+{
+ struct nfs_fattr *fattr;
+ int error;
+
+ BUG_ON(!server->nfs_client);
+ BUG_ON(!server->nfs_client->rpc_ops);
+ BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
+
+ /* data servers support only a subset of NFSv4.1 */
+ if (is_ds_only_client(server->nfs_client))
+ return -EPROTONOSUPPORT;
+
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ return -ENOMEM;
+
+ /* We must ensure the session is initialised first */
+ error = nfs4_init_session(server);
+ if (error < 0)
+ goto out;
+
+ /* Probe the root fh to retrieve its FSID and filehandle */
+ error = nfs4_get_rootfh(server, mntfh);
+ if (error < 0)
+ goto out;
+
+ dprintk("Server FSID: %llx:%llx\n",
+ (unsigned long long) server->fsid.major,
+ (unsigned long long) server->fsid.minor);
+ dprintk("Mount FH: %d\n", mntfh->size);
+
+ nfs4_session_set_rwsize(server);
+
+ error = nfs_probe_fsinfo(server, mntfh, fattr);
+ if (error < 0)
+ goto out;
+
+ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
+ server->namelen = NFS4_MAXNAMLEN;
+
+ nfs_server_insert_lists(server);
+ server->mount_time = jiffies;
+ server->destroy = nfs4_destroy_server;
+out:
+ nfs_free_fattr(fattr);
+ return error;
+}
+
+/*
+ * Create a version 4 volume record
+ */
+static int nfs4_init_server(struct nfs_server *server,
+ const struct nfs_parsed_mount_data *data)
+{
+ struct rpc_timeout timeparms;
+ int error;
+
+ dprintk("--> nfs4_init_server()\n");
+
+ nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
+ data->timeo, data->retrans);
+
+ /* Initialise the client representation from the mount data */
+ server->flags = data->flags;
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|NFS_CAP_POSIX_LOCK;
+ if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
+ server->caps |= NFS_CAP_READDIRPLUS;
+ server->options = data->options;
+
+ /* Get a client record */
+ error = nfs4_set_client(server,
+ data->nfs_server.hostname,
+ (const struct sockaddr *)&data->nfs_server.address,
+ data->nfs_server.addrlen,
+ data->client_address,
+ data->auth_flavors[0],
+ data->nfs_server.protocol,
+ &timeparms,
+ data->minorversion,
+ data->net);
+ if (error < 0)
+ goto error;
+
+ /*
+ * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
+ * authentication.
+ */
+ if (nfs4_disable_idmapping && data->auth_flavors[0] == RPC_AUTH_UNIX)
+ server->caps |= NFS_CAP_UIDGID_NOMAP;
+
+ if (data->rsize)
+ server->rsize = nfs_block_size(data->rsize, NULL);
+ if (data->wsize)
+ server->wsize = nfs_block_size(data->wsize, NULL);
+
+ server->acregmin = data->acregmin * HZ;
+ server->acregmax = data->acregmax * HZ;
+ server->acdirmin = data->acdirmin * HZ;
+ server->acdirmax = data->acdirmax * HZ;
+
+ server->port = data->nfs_server.port;
+
+ error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]);
+
+error:
+ /* Done */
+ dprintk("<-- nfs4_init_server() = %d\n", error);
+ return error;
+}
+
+/*
+ * Create a version 4 volume record
+ * - keyed on server and FSID
+ */
+/*struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
+ struct nfs_fh *mntfh)*/
+struct nfs_server *nfs4_create_server(struct nfs_mount_info *mount_info,
+ struct nfs_subversion *nfs_mod)
+{
+ struct nfs_server *server;
+ int error;
+
+ dprintk("--> nfs4_create_server()\n");
+
+ server = nfs_alloc_server();
+ if (!server)
+ return ERR_PTR(-ENOMEM);
+
+ /* set up the general RPC client */
+ error = nfs4_init_server(server, mount_info->parsed);
+ if (error < 0)
+ goto error;
+
+ error = nfs4_server_common_setup(server, mount_info->mntfh);
+ if (error < 0)
+ goto error;
+
+ dprintk("<-- nfs4_create_server() = %p\n", server);
+ return server;
+
+error:
+ nfs_free_server(server);
+ dprintk("<-- nfs4_create_server() = error %d\n", error);
+ return ERR_PTR(error);
+}
+
+/*
+ * Create an NFS4 referral server record
+ */
+struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
+ struct nfs_fh *mntfh)
+{
+ struct nfs_client *parent_client;
+ struct nfs_server *server, *parent_server;
+ int error;
+
+ dprintk("--> nfs4_create_referral_server()\n");
+
+ server = nfs_alloc_server();
+ if (!server)
+ return ERR_PTR(-ENOMEM);
+
+ parent_server = NFS_SB(data->sb);
+ parent_client = parent_server->nfs_client;
+
+ /* Initialise the client representation from the parent server */
+ nfs_server_copy_userdata(server, parent_server);
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
+
+ /* Get a client representation.
+ * Note: NFSv4 always uses TCP, */
+ error = nfs4_set_client(server, data->hostname,
+ data->addr,
+ data->addrlen,
+ parent_client->cl_ipaddr,
+ data->authflavor,
+ rpc_protocol(parent_server->client),
+ parent_server->client->cl_timeout,
+ parent_client->cl_mvops->minor_version,
+ parent_client->cl_net);
+ if (error < 0)
+ goto error;
+
+ error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
+ if (error < 0)
+ goto error;
+
+ error = nfs4_server_common_setup(server, mntfh);
+ if (error < 0)
+ goto error;
+
+ dprintk("<-- nfs_create_referral_server() = %p\n", server);
+ return server;
+
+error:
+ nfs_free_server(server);
+ dprintk("<-- nfs4_create_referral_server() = error %d\n", error);
+ return ERR_PTR(error);
+}
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
new file mode 100644
index 000000000000..acb65e7887f8
--- /dev/null
+++ b/fs/nfs/nfs4file.c
@@ -0,0 +1,126 @@
+/*
+ * linux/fs/nfs/file.c
+ *
+ * Copyright (C) 1992 Rick Sladkey
+ */
+#include <linux/nfs_fs.h>
+#include "internal.h"
+#include "pnfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_FILE
+
+static int
+nfs4_file_open(struct inode *inode, struct file *filp)
+{
+ struct nfs_open_context *ctx;
+ struct dentry *dentry = filp->f_path.dentry;
+ struct dentry *parent = NULL;
+ struct inode *dir;
+ unsigned openflags = filp->f_flags;
+ struct iattr attr;
+ int err;
+
+ BUG_ON(inode != dentry->d_inode);
+ /*
+ * If no cached dentry exists or if it's negative, NFSv4 handled the
+ * opens in ->lookup() or ->create().
+ *
+ * We only get this far for a cached positive dentry. We skipped
+ * revalidation, so handle it here by dropping the dentry and returning
+ * -EOPENSTALE. The VFS will retry the lookup/create/open.
+ */
+
+ dprintk("NFS: open file(%s/%s)\n",
+ dentry->d_parent->d_name.name,
+ dentry->d_name.name);
+
+ if ((openflags & O_ACCMODE) == 3)
+ openflags--;
+
+ /* We can't create new files here */
+ openflags &= ~(O_CREAT|O_EXCL);
+
+ parent = dget_parent(dentry);
+ dir = parent->d_inode;
+
+ ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+ err = PTR_ERR(ctx);
+ if (IS_ERR(ctx))
+ goto out;
+
+ attr.ia_valid = ATTR_OPEN;
+ if (openflags & O_TRUNC) {
+ attr.ia_valid |= ATTR_SIZE;
+ attr.ia_size = 0;
+ nfs_wb_all(inode);
+ }
+
+ inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ switch (err) {
+ case -EPERM:
+ case -EACCES:
+ case -EDQUOT:
+ case -ENOSPC:
+ case -EROFS:
+ goto out_put_ctx;
+ default:
+ goto out_drop;
+ }
+ }
+ iput(inode);
+ if (inode != dentry->d_inode)
+ goto out_drop;
+
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ nfs_file_set_open_context(filp, ctx);
+ err = 0;
+
+out_put_ctx:
+ put_nfs_open_context(ctx);
+out:
+ dput(parent);
+ return err;
+
+out_drop:
+ d_drop(dentry);
+ err = -EOPENSTALE;
+ goto out_put_ctx;
+}
+
+static int
+nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ int ret;
+ struct inode *inode = file->f_path.dentry->d_inode;
+
+ ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ mutex_lock(&inode->i_mutex);
+ ret = nfs_file_fsync_commit(file, start, end, datasync);
+ if (!ret && !datasync)
+ /* application has asked for meta-data sync */
+ ret = pnfs_layoutcommit_inode(inode, true);
+ mutex_unlock(&inode->i_mutex);
+
+ return ret;
+}
+
+const struct file_operations nfs4_file_operations = {
+ .llseek = nfs_file_llseek,
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = nfs_file_read,
+ .aio_write = nfs_file_write,
+ .mmap = nfs_file_mmap,
+ .open = nfs4_file_open,
+ .flush = nfs_file_flush,
+ .release = nfs_file_release,
+ .fsync = nfs4_file_fsync,
+ .lock = nfs_lock,
+ .flock = nfs_flock,
+ .splice_read = nfs_file_splice_read,
+ .splice_write = nfs_file_splice_write,
+ .check_flags = nfs_check_flags,
+ .setlease = nfs_setlease,
+};
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index e1340293872c..53f94d915bd1 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -205,9 +205,9 @@ static int filelayout_async_handle_error(struct rpc_task *task,
case -EPIPE:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
- if (!filelayout_test_devid_invalid(devid))
- _pnfs_return_layout(inode);
filelayout_mark_devid_invalid(devid);
+ clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags);
+ _pnfs_return_layout(inode);
rpc_wake_up(&tbl->slot_tbl_waitq);
nfs4_ds_disconnect(clp);
/* fall through */
@@ -351,9 +351,9 @@ static void prepare_to_resend_writes(struct nfs_commit_data *data)
struct nfs_page *first = nfs_list_entry(data->pages.next);
data->task.tk_status = 0;
- memcpy(data->verf.verifier, first->wb_verf.verifier,
- sizeof(first->wb_verf.verifier));
- data->verf.verifier[0]++; /* ensure verifier mismatch */
+ memcpy(&data->verf.verifier, &first->wb_verf,
+ sizeof(data->verf.verifier));
+ data->verf.verifier.data[0]++; /* ensure verifier mismatch */
}
static int filelayout_commit_done_cb(struct rpc_task *task,
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index a1fab8da7f03..f81231f30d94 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -728,7 +728,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_fla
pdev->layout_type = LAYOUT_NFSV4_1_FILES;
pdev->pages = pages;
pdev->pgbase = 0;
- pdev->pglen = PAGE_SIZE * max_pages;
+ pdev->pglen = max_resp_sz;
pdev->mincount = 0;
rc = nfs4_proc_getdeviceinfo(server, pdev);
diff --git a/fs/nfs/nfs4getroot.c b/fs/nfs/nfs4getroot.c
new file mode 100644
index 000000000000..6a83780e0ce6
--- /dev/null
+++ b/fs/nfs/nfs4getroot.c
@@ -0,0 +1,49 @@
+/*
+* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+* Written by David Howells (dhowells@redhat.com)
+*/
+
+#include <linux/nfs_fs.h>
+#include "nfs4_fs.h"
+
+#define NFSDBG_FACILITY NFSDBG_CLIENT
+
+int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh)
+{
+ struct nfs_fsinfo fsinfo;
+ int ret = -ENOMEM;
+
+ dprintk("--> nfs4_get_rootfh()\n");
+
+ fsinfo.fattr = nfs_alloc_fattr();
+ if (fsinfo.fattr == NULL)
+ goto out;
+
+ /* Start by getting the root filehandle from the server */
+ ret = nfs4_proc_get_rootfh(server, mntfh, &fsinfo);
+ if (ret < 0) {
+ dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
+ goto out;
+ }
+
+ if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_TYPE)
+ || !S_ISDIR(fsinfo.fattr->mode)) {
+ printk(KERN_ERR "nfs4_get_rootfh:"
+ " getroot encountered non-directory\n");
+ ret = -ENOTDIR;
+ goto out;
+ }
+
+ if (fsinfo.fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
+ printk(KERN_ERR "nfs4_get_rootfh:"
+ " getroot obtained referral\n");
+ ret = -EREMOTE;
+ goto out;
+ }
+
+ memcpy(&server->fsid, &fsinfo.fattr->fsid, sizeof(server->fsid));
+out:
+ nfs_free_fattr(fsinfo.fattr);
+ dprintk("<-- nfs4_get_rootfh() = %d\n", ret);
+ return ret;
+}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c157b2089b47..635274140b18 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -43,7 +43,6 @@
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sunrpc/clnt.h>
-#include <linux/sunrpc/gss_api.h>
#include <linux/nfs.h>
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
@@ -73,8 +72,6 @@
#define NFS4_MAX_LOOP_ON_RECOVER (10)
-static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
-
struct nfs4_opendata;
static int _nfs4_proc_open(struct nfs4_opendata *data);
static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
@@ -259,7 +256,12 @@ static int nfs4_wait_clnt_recover(struct nfs_client *clp)
res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
nfs_wait_bit_killable, TASK_KILLABLE);
- return res;
+ if (res)
+ return res;
+
+ if (clp->cl_cons_state < 0)
+ return clp->cl_cons_state;
+ return 0;
}
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
@@ -294,8 +296,8 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
case 0:
return 0;
case -NFS4ERR_OPENMODE:
- if (inode && nfs_have_delegation(inode, FMODE_READ)) {
- nfs_inode_return_delegation(inode);
+ if (inode && nfs4_have_delegation(inode, FMODE_READ)) {
+ nfs4_inode_return_delegation(inode);
exception->retry = 1;
return 0;
}
@@ -1065,7 +1067,7 @@ static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmo
return;
}
rcu_read_unlock();
- nfs_inode_return_delegation(inode);
+ nfs4_inode_return_delegation(inode);
}
static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
@@ -1756,33 +1758,70 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta
}
#if defined(CONFIG_NFS_V4_1)
-static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags)
+static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
{
- int status = NFS_OK;
struct nfs_server *server = NFS_SERVER(state->inode);
+ nfs4_stateid *stateid = &state->stateid;
+ int status;
+
+ /* If a state reset has been done, test_stateid is unneeded */
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
+ return;
- if (state->flags & flags) {
- status = nfs41_test_stateid(server, stateid);
- if (status != NFS_OK) {
+ status = nfs41_test_stateid(server, stateid);
+ if (status != NFS_OK) {
+ /* Free the stateid unless the server explicitly
+ * informs us the stateid is unrecognized. */
+ if (status != -NFS4ERR_BAD_STATEID)
nfs41_free_stateid(server, stateid);
- state->flags &= ~flags;
- }
+
+ clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ }
+}
+
+/**
+ * nfs41_check_open_stateid - possibly free an open stateid
+ *
+ * @state: NFSv4 state for an inode
+ *
+ * Returns NFS_OK if recovery for this stateid is now finished.
+ * Otherwise a negative NFS4ERR value is returned.
+ */
+static int nfs41_check_open_stateid(struct nfs4_state *state)
+{
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ nfs4_stateid *stateid = &state->stateid;
+ int status;
+
+ /* If a state reset has been done, test_stateid is unneeded */
+ if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
+ (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
+ (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
+ return -NFS4ERR_BAD_STATEID;
+
+ status = nfs41_test_stateid(server, stateid);
+ if (status != NFS_OK) {
+ /* Free the stateid unless the server explicitly
+ * informs us the stateid is unrecognized. */
+ if (status != -NFS4ERR_BAD_STATEID)
+ nfs41_free_stateid(server, stateid);
+
+ clear_bit(NFS_O_RDONLY_STATE, &state->flags);
+ clear_bit(NFS_O_WRONLY_STATE, &state->flags);
+ clear_bit(NFS_O_RDWR_STATE, &state->flags);
}
return status;
}
static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
- int deleg_status, open_status;
- int deleg_flags = 1 << NFS_DELEGATED_STATE;
- int open_flags = (1 << NFS_O_RDONLY_STATE) | (1 << NFS_O_WRONLY_STATE) | (1 << NFS_O_RDWR_STATE);
-
- deleg_status = nfs41_check_expired_stateid(state, &state->stateid, deleg_flags);
- open_status = nfs41_check_expired_stateid(state, &state->open_stateid, open_flags);
+ int status;
- if ((deleg_status == NFS_OK) && (open_status == NFS_OK))
- return NFS_OK;
- return nfs4_open_expired(sp, state);
+ nfs41_clear_delegation_stateid(state);
+ status = nfs41_check_open_stateid(state);
+ if (status != NFS_OK)
+ status = nfs4_open_expired(sp, state);
+ return status;
}
#endif
@@ -2375,11 +2414,15 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
int i, len, status = 0;
rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
- len = gss_mech_list_pseudoflavors(&flav_array[0]);
- flav_array[len] = RPC_AUTH_NULL;
- len += 1;
+ len = rpcauth_list_flavors(flav_array, ARRAY_SIZE(flav_array));
+ BUG_ON(len < 0);
for (i = 0; i < len; i++) {
+ /* AUTH_UNIX is the default flavor if none was specified,
+ * thus has already been tried. */
+ if (flav_array[i] == RPC_AUTH_UNIX)
+ continue;
+
status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
continue;
@@ -2766,9 +2809,7 @@ static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
*
* In the case of WRITE, we also want to put the GETATTR after
* the operation -- in this case because we want to make sure
- * we get the post-operation mtime and size. This means that
- * we can't use xdr_encode_pages() as written: we need a variant
- * of it which would leave room in the 'tail' iovec.
+ * we get the post-operation mtime and size.
*
* Both of these changes to the XDR layer would in fact be quite
* minor, but I decided to leave them for a subsequent patch.
@@ -2821,7 +2862,9 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
return PTR_ERR(ctx);
sattr->ia_mode &= ~current_umask();
- state = nfs4_do_open(dir, dentry, ctx->mode, flags, sattr, ctx->cred, NULL);
+ state = nfs4_do_open(dir, dentry, ctx->mode,
+ flags, sattr, ctx->cred,
+ &ctx->mdsthreshold);
d_drop(dentry);
if (IS_ERR(state)) {
status = PTR_ERR(state);
@@ -3315,8 +3358,14 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str
static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
{
+ int error;
+
nfs_fattr_init(fsinfo->fattr);
- return nfs4_do_fsinfo(server, fhandle, fsinfo);
+ error = nfs4_do_fsinfo(server, fhandle, fsinfo);
+ if (error == 0)
+ set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
+
+ return error;
}
static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -3443,7 +3492,7 @@ bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
/* Otherwise, request attributes if and only if we don't hold
* a delegation
*/
- return nfs_have_delegation(hdr->inode, FMODE_READ) == 0;
+ return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
}
static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
@@ -3688,9 +3737,10 @@ out:
static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
{
struct nfs4_cached_acl *acl;
+ size_t buflen = sizeof(*acl) + acl_len;
- if (pages && acl_len <= PAGE_SIZE) {
- acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
+ if (pages && buflen <= PAGE_SIZE) {
+ acl = kmalloc(buflen, GFP_KERNEL);
if (acl == NULL)
goto out;
acl->cached = 1;
@@ -3732,7 +3782,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
.rpc_argp = &args,
.rpc_resp = &res,
};
- int ret = -ENOMEM, npages, i, acl_len = 0;
+ int ret = -ENOMEM, npages, i;
+ size_t acl_len = 0;
npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* As long as we're doing a round trip to the server anyway,
@@ -3769,7 +3820,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
if (ret)
goto out_free;
- acl_len = res.acl_len - res.acl_data_offset;
+ acl_len = res.acl_len;
if (acl_len > args.acl_len)
nfs4_write_cached_acl(inode, NULL, 0, acl_len);
else
@@ -3847,7 +3898,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
if (i < 0)
return i;
- nfs_inode_return_delegation(inode);
+ nfs4_inode_return_delegation(inode);
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
/*
@@ -3961,6 +4012,16 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp,
memcpy(bootverf->data, verf, sizeof(bootverf->data));
}
+/**
+ * nfs4_proc_setclientid - Negotiate client ID
+ * @clp: state data structure
+ * @program: RPC program for NFSv4 callback service
+ * @port: IP port number for NFS4 callback service
+ * @cred: RPC credential to use for this call
+ * @res: where to place the result
+ *
+ * Returns zero, a negative errno, or a negative NFS4ERR status code.
+ */
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
unsigned short port, struct rpc_cred *cred,
struct nfs4_setclientid_res *res)
@@ -3977,44 +4038,44 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
.rpc_resp = res,
.rpc_cred = cred,
};
- int loop = 0;
int status;
+ /* nfs_client_id4 */
nfs4_init_boot_verifier(clp, &sc_verifier);
-
- for(;;) {
- rcu_read_lock();
- setclientid.sc_name_len = scnprintf(setclientid.sc_name,
- sizeof(setclientid.sc_name), "%s/%s %s %s %u",
- clp->cl_ipaddr,
- rpc_peeraddr2str(clp->cl_rpcclient,
- RPC_DISPLAY_ADDR),
- rpc_peeraddr2str(clp->cl_rpcclient,
- RPC_DISPLAY_PROTO),
- clp->cl_rpcclient->cl_auth->au_ops->au_name,
- clp->cl_id_uniquifier);
- setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
+ rcu_read_lock();
+ setclientid.sc_name_len = scnprintf(setclientid.sc_name,
+ sizeof(setclientid.sc_name), "%s/%s %s",
+ clp->cl_ipaddr,
+ rpc_peeraddr2str(clp->cl_rpcclient,
+ RPC_DISPLAY_ADDR),
+ rpc_peeraddr2str(clp->cl_rpcclient,
+ RPC_DISPLAY_PROTO));
+ /* cb_client4 */
+ setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
sizeof(setclientid.sc_netid),
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_NETID));
- setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
+ rcu_read_unlock();
+ setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
sizeof(setclientid.sc_uaddr), "%s.%u.%u",
clp->cl_ipaddr, port >> 8, port & 255);
- rcu_read_unlock();
- status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
- if (status != -NFS4ERR_CLID_INUSE)
- break;
- if (loop != 0) {
- ++clp->cl_id_uniquifier;
- break;
- }
- ++loop;
- ssleep(clp->cl_lease_time / HZ + 1);
- }
+ dprintk("NFS call setclientid auth=%s, '%.*s'\n",
+ clp->cl_rpcclient->cl_auth->au_ops->au_name,
+ setclientid.sc_name_len, setclientid.sc_name);
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ dprintk("NFS reply setclientid: %d\n", status);
return status;
}
+/**
+ * nfs4_proc_setclientid_confirm - Confirm client ID
+ * @clp: state data structure
+ * @res: result of a previous SETCLIENTID
+ * @cred: RPC credential to use for this call
+ *
+ * Returns zero, a negative errno, or a negative NFS4ERR status code.
+ */
int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
struct nfs4_setclientid_res *arg,
struct rpc_cred *cred)
@@ -4029,6 +4090,9 @@ int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
unsigned long now;
int status;
+ dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
+ clp->cl_rpcclient->cl_auth->au_ops->au_name,
+ clp->cl_clientid);
now = jiffies;
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status == 0) {
@@ -4037,6 +4101,7 @@ int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
clp->cl_last_renewal = now;
spin_unlock(&clp->cl_lock);
}
+ dprintk("NFS reply setclientid_confirm: %d\n", status);
return status;
}
@@ -4681,9 +4746,17 @@ out:
}
#if defined(CONFIG_NFS_V4_1)
+/**
+ * nfs41_check_expired_locks - possibly free a lock stateid
+ *
+ * @state: NFSv4 state for an inode
+ *
+ * Returns NFS_OK if recovery for this stateid is now finished.
+ * Otherwise a negative NFS4ERR value is returned.
+ */
static int nfs41_check_expired_locks(struct nfs4_state *state)
{
- int status, ret = NFS_OK;
+ int status, ret = -NFS4ERR_BAD_STATEID;
struct nfs4_lock_state *lsp;
struct nfs_server *server = NFS_SERVER(state->inode);
@@ -4691,7 +4764,11 @@ static int nfs41_check_expired_locks(struct nfs4_state *state)
if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
status = nfs41_test_stateid(server, &lsp->ls_stateid);
if (status != NFS_OK) {
- nfs41_free_stateid(server, &lsp->ls_stateid);
+ /* Free the stateid unless the server
+ * informs us the stateid is unrecognized. */
+ if (status != -NFS4ERR_BAD_STATEID)
+ nfs41_free_stateid(server,
+ &lsp->ls_stateid);
lsp->ls_flags &= ~NFS_LOCK_INITIALIZED;
ret = status;
}
@@ -4707,9 +4784,9 @@ static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *reques
if (test_bit(LK_STATE_IN_USE, &state->flags))
status = nfs41_check_expired_locks(state);
- if (status == NFS_OK)
- return status;
- return nfs4_lock_expired(state, request);
+ if (status != NFS_OK)
+ status = nfs4_lock_expired(state, request);
+ return status;
}
#endif
@@ -4807,7 +4884,7 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
* Don't rely on the VFS having checked the file open mode,
* since it won't do this for flock() locks.
*/
- switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
+ switch (request->fl_type) {
case F_RDLCK:
if (!(filp->f_mode & FMODE_READ))
return -EBADF;
@@ -5168,6 +5245,8 @@ out:
/*
* nfs4_proc_exchange_id()
*
+ * Returns zero, a negative errno, or a negative NFS4ERR status code.
+ *
* Since the clientid has expired, all compounds using sessions
* associated with the stale clientid will be returning
* NFS4ERR_BADSESSION in the sequence operation, and will therefore
@@ -5192,16 +5271,14 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
.rpc_cred = cred,
};
- dprintk("--> %s\n", __func__);
- BUG_ON(clp == NULL);
-
nfs4_init_boot_verifier(clp, &verifier);
-
args.id_len = scnprintf(args.id, sizeof(args.id),
- "%s/%s/%u",
+ "%s/%s",
clp->cl_ipaddr,
- clp->cl_rpcclient->cl_nodename,
- clp->cl_rpcclient->cl_auth->au_flavor);
+ clp->cl_rpcclient->cl_nodename);
+ dprintk("NFS call exchange_id auth=%s, '%.*s'\n",
+ clp->cl_rpcclient->cl_auth->au_ops->au_name,
+ args.id_len, args.id);
res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
GFP_NOFS);
@@ -5264,12 +5341,12 @@ out_server_scope:
kfree(res.server_scope);
out:
if (clp->cl_implid != NULL)
- dprintk("%s: Server Implementation ID: "
+ dprintk("NFS reply exchange_id: Server Implementation ID: "
"domain: %s, name: %s, date: %llu,%u\n",
- __func__, clp->cl_implid->domain, clp->cl_implid->name,
+ clp->cl_implid->domain, clp->cl_implid->name,
clp->cl_implid->date.seconds,
clp->cl_implid->date.nseconds);
- dprintk("<-- %s status= %d\n", __func__, status);
+ dprintk("NFS reply exchange_id: %d\n", status);
return status;
}
@@ -6147,11 +6224,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
dprintk("<-- %s\n", __func__);
}
+static size_t max_response_pages(struct nfs_server *server)
+{
+ u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ return nfs_page_array_len(0, max_resp_sz);
+}
+
+static void nfs4_free_pages(struct page **pages, size_t size)
+{
+ int i;
+
+ if (!pages)
+ return;
+
+ for (i = 0; i < size; i++) {
+ if (!pages[i])
+ break;
+ __free_page(pages[i]);
+ }
+ kfree(pages);
+}
+
+static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
+{
+ struct page **pages;
+ int i;
+
+ pages = kcalloc(size, sizeof(struct page *), gfp_flags);
+ if (!pages) {
+ dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
+ return NULL;
+ }
+
+ for (i = 0; i < size; i++) {
+ pages[i] = alloc_page(gfp_flags);
+ if (!pages[i]) {
+ dprintk("%s: failed to allocate page\n", __func__);
+ nfs4_free_pages(pages, size);
+ return NULL;
+ }
+ }
+
+ return pages;
+}
+
static void nfs4_layoutget_release(void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ size_t max_pages = max_response_pages(server);
dprintk("--> %s\n", __func__);
+ nfs4_free_pages(lgp->args.layout.pages, max_pages);
put_nfs_open_context(lgp->args.ctx);
kfree(calldata);
dprintk("<-- %s\n", __func__);
@@ -6163,9 +6287,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
.rpc_release = nfs4_layoutget_release,
};
-int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
+void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
{
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ size_t max_pages = max_response_pages(server);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
@@ -6183,12 +6308,19 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
dprintk("--> %s\n", __func__);
+ lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
+ if (!lgp->args.layout.pages) {
+ nfs4_layoutget_release(lgp);
+ return;
+ }
+ lgp->args.layout.pglen = max_pages * PAGE_SIZE;
+
lgp->res.layoutp = &lgp->args.layout;
lgp->res.seq_res.sr_slot = NULL;
nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
- return PTR_ERR(task);
+ return;
status = nfs4_wait_for_completion_rpc_task(task);
if (status == 0)
status = task->tk_status;
@@ -6196,7 +6328,7 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
status = pnfs_layout_process(lgp);
rpc_put_task(task);
dprintk("<-- %s status=%d\n", __func__, status);
- return status;
+ return;
}
static void
@@ -6228,12 +6360,8 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
return;
}
spin_lock(&lo->plh_inode->i_lock);
- if (task->tk_status == 0) {
- if (lrp->res.lrs_present) {
- pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
- } else
- BUG_ON(!list_empty(&lo->plh_segs));
- }
+ if (task->tk_status == 0 && lrp->res.lrs_present)
+ pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
lo->plh_block_lgets--;
spin_unlock(&lo->plh_inode->i_lock);
dprintk("<-- %s\n", __func__);
@@ -6570,22 +6698,36 @@ static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
.rpc_resp = &res,
};
+ dprintk("NFS call test_stateid %p\n", stateid);
nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
-
- if (status == NFS_OK)
- return res.status;
- return status;
+ if (status != NFS_OK) {
+ dprintk("NFS reply test_stateid: failed, %d\n", status);
+ return status;
+ }
+ dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
+ return -res.status;
}
+/**
+ * nfs41_test_stateid - perform a TEST_STATEID operation
+ *
+ * @server: server / transport on which to perform the operation
+ * @stateid: state ID to test
+ *
+ * Returns NFS_OK if the server recognizes that "stateid" is valid.
+ * Otherwise a negative NFS4ERR value is returned if the operation
+ * failed or the state ID is not currently valid.
+ */
static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
{
struct nfs4_exception exception = { };
int err;
do {
- err = nfs4_handle_exception(server,
- _nfs41_test_stateid(server, stateid),
- &exception);
+ err = _nfs41_test_stateid(server, stateid);
+ if (err != -NFS4ERR_DELAY)
+ break;
+ nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
@@ -6601,19 +6743,34 @@ static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
.rpc_argp = &args,
.rpc_resp = &res,
};
+ int status;
+ dprintk("NFS call free_stateid %p\n", stateid);
nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
- return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
+ status = nfs4_call_sync_sequence(server->client, server, &msg,
+ &args.seq_args, &res.seq_res, 1);
+ dprintk("NFS reply free_stateid: %d\n", status);
+ return status;
}
+/**
+ * nfs41_free_stateid - perform a FREE_STATEID operation
+ *
+ * @server: server / transport on which to perform the operation
+ * @stateid: state ID to release
+ *
+ * Returns NFS_OK if the server freed "stateid". Otherwise a
+ * negative NFS4ERR value is returned.
+ */
static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
{
struct nfs4_exception exception = { };
int err;
do {
- err = nfs4_handle_exception(server,
- _nfs4_free_stateid(server, stateid),
- &exception);
+ err = _nfs4_free_stateid(server, stateid);
+ if (err != -NFS4ERR_DELAY)
+ break;
+ nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
return err;
}
@@ -6725,6 +6882,26 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
#endif
};
+const struct inode_operations nfs4_dir_inode_operations = {
+ .create = nfs_create,
+ .lookup = nfs_lookup,
+ .atomic_open = nfs_atomic_open,
+ .link = nfs_link,
+ .unlink = nfs_unlink,
+ .symlink = nfs_symlink,
+ .mkdir = nfs_mkdir,
+ .rmdir = nfs_rmdir,
+ .mknod = nfs_mknod,
+ .rename = nfs_rename,
+ .permission = nfs_permission,
+ .getattr = nfs_getattr,
+ .setattr = nfs_setattr,
+ .getxattr = generic_getxattr,
+ .setxattr = generic_setxattr,
+ .listxattr = generic_listxattr,
+ .removexattr = generic_removexattr,
+};
+
static const struct inode_operations nfs4_file_inode_operations = {
.permission = nfs_permission,
.getattr = nfs_getattr,
@@ -6743,6 +6920,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.file_ops = &nfs4_file_operations,
.getroot = nfs4_proc_get_root,
.submount = nfs4_submount,
+ .try_mount = nfs4_try_mount,
.getattr = nfs4_proc_getattr,
.setattr = nfs4_proc_setattr,
.lookup = nfs4_proc_lookup,
@@ -6769,9 +6947,11 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.set_capabilities = nfs4_server_capabilities,
.decode_dirent = nfs4_decode_dirent,
.read_setup = nfs4_proc_read_setup,
+ .read_pageio_init = pnfs_pageio_init_read,
.read_rpc_prepare = nfs4_proc_read_rpc_prepare,
.read_done = nfs4_read_done,
.write_setup = nfs4_proc_write_setup,
+ .write_pageio_init = pnfs_pageio_init_write,
.write_rpc_prepare = nfs4_proc_write_rpc_prepare,
.write_done = nfs4_write_done,
.commit_setup = nfs4_proc_commit_setup,
@@ -6781,7 +6961,13 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.clear_acl_cache = nfs4_zap_acl_attr,
.close_context = nfs4_close_context,
.open_context = nfs4_atomic_open,
+ .have_delegation = nfs4_have_delegation,
+ .return_delegation = nfs4_inode_return_delegation,
+ .alloc_client = nfs4_alloc_client,
.init_client = nfs4_init_client,
+ .free_client = nfs4_free_client,
+ .create_server = nfs4_create_server,
+ .clone_server = nfs_clone_server,
};
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
@@ -6796,10 +6982,6 @@ const struct xattr_handler *nfs4_xattr_handlers[] = {
NULL
};
-module_param(max_session_slots, ushort, 0644);
-MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
- "requests the client will negotiate");
-
/*
* Local variables:
* c-basic-offset: 8
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f38300e9f171..55148def5540 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1606,10 +1606,15 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
return -ESERVERFAULT;
/* Lease confirmation error: retry after purging the lease */
ssleep(1);
- case -NFS4ERR_CLID_INUSE:
case -NFS4ERR_STALE_CLIENTID:
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
break;
+ case -NFS4ERR_CLID_INUSE:
+ pr_err("NFS: Server %s reports our clientid is in use\n",
+ clp->cl_hostname);
+ nfs_mark_client_ready(clp, -EPERM);
+ clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ return -EPERM;
case -EACCES:
if (clp->cl_machine_cred == NULL)
return -EACCES;
@@ -1642,7 +1647,7 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
return 0;
}
-static int nfs4_reclaim_lease(struct nfs_client *clp)
+static int nfs4_establish_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
const struct nfs4_state_recovery_ops *ops =
@@ -1655,7 +1660,41 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
status = ops->establish_clid(clp, cred);
put_rpccred(cred);
if (status != 0)
+ return status;
+ pnfs_destroy_all_layouts(clp);
+ return 0;
+}
+
+/*
+ * Returns zero or a negative errno. NFS4ERR values are converted
+ * to local errno values.
+ */
+static int nfs4_reclaim_lease(struct nfs_client *clp)
+{
+ int status;
+
+ status = nfs4_establish_lease(clp);
+ if (status < 0)
+ return nfs4_handle_reclaim_lease_error(clp, status);
+ if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state))
+ nfs4_state_start_reclaim_nograce(clp);
+ if (!test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
+ set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
+ clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
+ clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ return 0;
+}
+
+static int nfs4_purge_lease(struct nfs_client *clp)
+{
+ int status;
+
+ status = nfs4_establish_lease(clp);
+ if (status < 0)
return nfs4_handle_reclaim_lease_error(clp, status);
+ clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs4_state_start_reclaim_nograce(clp);
return 0;
}
@@ -1764,6 +1803,8 @@ static int nfs4_reset_session(struct nfs_client *clp)
struct rpc_cred *cred;
int status;
+ if (!nfs4_has_session(clp))
+ return 0;
nfs4_begin_drain_session(clp);
cred = nfs4_get_exchange_id_cred(clp);
status = nfs4_proc_destroy_session(clp->cl_session, cred);
@@ -1792,12 +1833,14 @@ out:
static int nfs4_recall_slot(struct nfs_client *clp)
{
- struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table;
- struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs;
+ struct nfs4_slot_table *fc_tbl;
struct nfs4_slot *new, *old;
int i;
+ if (!nfs4_has_session(clp))
+ return 0;
nfs4_begin_drain_session(clp);
+ fc_tbl = &clp->cl_session->fc_slot_table;
new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot),
GFP_NOFS);
if (!new)
@@ -1810,11 +1853,10 @@ static int nfs4_recall_slot(struct nfs_client *clp)
fc_tbl->slots = new;
fc_tbl->max_slots = fc_tbl->target_max_slots;
fc_tbl->target_max_slots = 0;
- fc_attrs->max_reqs = fc_tbl->max_slots;
+ clp->cl_session->fc_attrs.max_reqs = fc_tbl->max_slots;
spin_unlock(&fc_tbl->slot_tbl_lock);
kfree(old);
- nfs4_end_drain_session(clp);
return 0;
}
@@ -1823,6 +1865,8 @@ static int nfs4_bind_conn_to_session(struct nfs_client *clp)
struct rpc_cred *cred;
int ret;
+ if (!nfs4_has_session(clp))
+ return 0;
nfs4_begin_drain_session(clp);
cred = nfs4_get_exchange_id_cred(clp);
ret = nfs4_proc_bind_conn_to_session(clp, cred);
@@ -1857,37 +1901,29 @@ static int nfs4_bind_conn_to_session(struct nfs_client *clp)
static void nfs4_state_manager(struct nfs_client *clp)
{
int status = 0;
+ const char *section = "", *section_sep = "";
/* Ensure exclusive access to NFSv4 state */
do {
if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
- status = nfs4_reclaim_lease(clp);
+ section = "purge state";
+ status = nfs4_purge_lease(clp);
if (status < 0)
goto out_error;
- clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
- set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ continue;
}
- if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
+ section = "lease expired";
/* We're going to have to re-establish a clientid */
status = nfs4_reclaim_lease(clp);
if (status < 0)
goto out_error;
- if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
- continue;
- clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
-
- if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH,
- &clp->cl_state))
- nfs4_state_start_reclaim_nograce(clp);
- else
- set_bit(NFS4CLNT_RECLAIM_REBOOT,
- &clp->cl_state);
-
- pnfs_destroy_all_layouts(clp);
+ continue;
}
if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
+ section = "check lease";
status = nfs4_check_lease(clp);
if (status < 0)
goto out_error;
@@ -1896,8 +1932,8 @@ static void nfs4_state_manager(struct nfs_client *clp)
}
/* Initialize or reset the session */
- if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
- && nfs4_has_session(clp)) {
+ if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) {
+ section = "reset session";
status = nfs4_reset_session(clp);
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
continue;
@@ -1907,15 +1943,26 @@ static void nfs4_state_manager(struct nfs_client *clp)
/* Send BIND_CONN_TO_SESSION */
if (test_and_clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
- &clp->cl_state) && nfs4_has_session(clp)) {
+ &clp->cl_state)) {
+ section = "bind conn to session";
status = nfs4_bind_conn_to_session(clp);
if (status < 0)
goto out_error;
continue;
}
+ /* Recall session slots */
+ if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state)) {
+ section = "recall slot";
+ status = nfs4_recall_slot(clp);
+ if (status < 0)
+ goto out_error;
+ continue;
+ }
+
/* First recover reboot state... */
if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
+ section = "reclaim reboot";
status = nfs4_do_reclaim(clp,
clp->cl_mvops->reboot_recovery_ops);
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
@@ -1930,6 +1977,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
/* Now recover expired state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
+ section = "reclaim nograce";
status = nfs4_do_reclaim(clp,
clp->cl_mvops->nograce_recovery_ops);
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
@@ -1945,15 +1993,6 @@ static void nfs4_state_manager(struct nfs_client *clp)
nfs_client_return_marked_delegations(clp);
continue;
}
- /* Recall session slots */
- if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state)
- && nfs4_has_session(clp)) {
- status = nfs4_recall_slot(clp);
- if (status < 0)
- goto out_error;
- continue;
- }
-
nfs4_clear_state_manager_bit(clp);
/* Did we race with an attempt to give us more work? */
@@ -1964,8 +2003,11 @@ static void nfs4_state_manager(struct nfs_client *clp)
} while (atomic_read(&clp->cl_count) > 1);
return;
out_error:
- pr_warn_ratelimited("NFS: state manager failed on NFSv4 server %s"
- " with error %d\n", clp->cl_hostname, -status);
+ if (strlen(section))
+ section_sep = ": ";
+ pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
+ " with error %d\n", section_sep, section,
+ clp->cl_hostname, -status);
nfs4_end_drain_session(clp);
nfs4_clear_state_manager_bit(clp);
}
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
new file mode 100644
index 000000000000..bd61221ad2c5
--- /dev/null
+++ b/fs/nfs/nfs4super.c
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2012 Bryan Schumaker <bjschuma@netapp.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/nfs_idmap.h>
+#include <linux/nfs4_mount.h>
+#include <linux/nfs_fs.h>
+#include "delegation.h"
+#include "internal.h"
+#include "nfs4_fs.h"
+#include "pnfs.h"
+#include "nfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_VFS
+
+static int nfs4_write_inode(struct inode *inode, struct writeback_control *wbc);
+static void nfs4_evict_inode(struct inode *inode);
+static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data);
+static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data);
+static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data);
+
+static struct file_system_type nfs4_remote_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .mount = nfs4_remote_mount,
+ .kill_sb = nfs_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
+static struct file_system_type nfs4_remote_referral_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .mount = nfs4_remote_referral_mount,
+ .kill_sb = nfs_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
+struct file_system_type nfs4_referral_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .mount = nfs4_referral_mount,
+ .kill_sb = nfs_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
+static const struct super_operations nfs4_sops = {
+ .alloc_inode = nfs_alloc_inode,
+ .destroy_inode = nfs_destroy_inode,
+ .write_inode = nfs4_write_inode,
+ .put_super = nfs_put_super,
+ .statfs = nfs_statfs,
+ .evict_inode = nfs4_evict_inode,
+ .umount_begin = nfs_umount_begin,
+ .show_options = nfs_show_options,
+ .show_devname = nfs_show_devname,
+ .show_path = nfs_show_path,
+ .show_stats = nfs_show_stats,
+ .remount_fs = nfs_remount,
+};
+
+struct nfs_subversion nfs_v4 = {
+ .owner = THIS_MODULE,
+ .nfs_fs = &nfs4_fs_type,
+ .rpc_vers = &nfs_version4,
+ .rpc_ops = &nfs_v4_clientops,
+ .sops = &nfs4_sops,
+ .xattr = nfs4_xattr_handlers,
+};
+
+static int nfs4_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ int ret = nfs_write_inode(inode, wbc);
+
+ if (ret >= 0 && test_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags)) {
+ int status;
+ bool sync = true;
+
+ if (wbc->sync_mode == WB_SYNC_NONE)
+ sync = false;
+
+ status = pnfs_layoutcommit_inode(inode, sync);
+ if (status < 0)
+ return status;
+ }
+ return ret;
+}
+
+/*
+ * Clean out any remaining NFSv4 state that might be left over due
+ * to open() calls that passed nfs_atomic_lookup, but failed to call
+ * nfs_open().
+ */
+static void nfs4_evict_inode(struct inode *inode)
+{
+ truncate_inode_pages(&inode->i_data, 0);
+ clear_inode(inode);
+ pnfs_return_layout(inode);
+ pnfs_destroy_layout(NFS_I(inode));
+ /* If we are holding a delegation, return it! */
+ nfs_inode_return_delegation_noreclaim(inode);
+ /* First call standard NFS clear_inode() code */
+ nfs_clear_inode(inode);
+}
+
+/*
+ * Get the superblock for the NFS4 root partition
+ */
+static struct dentry *
+nfs4_remote_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *info)
+{
+ struct nfs_mount_info *mount_info = info;
+ struct nfs_server *server;
+ struct dentry *mntroot = ERR_PTR(-ENOMEM);
+
+ mount_info->set_security = nfs_set_sb_security;
+
+ /* Get a volume representation */
+ server = nfs4_create_server(mount_info, &nfs_v4);
+ if (IS_ERR(server)) {
+ mntroot = ERR_CAST(server);
+ goto out;
+ }
+
+ mntroot = nfs_fs_mount_common(server, flags, dev_name, mount_info, &nfs_v4);
+
+out:
+ return mntroot;
+}
+
+static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
+ int flags, void *data, const char *hostname)
+{
+ struct vfsmount *root_mnt;
+ char *root_devname;
+ size_t len;
+
+ len = strlen(hostname) + 5;
+ root_devname = kmalloc(len, GFP_KERNEL);
+ if (root_devname == NULL)
+ return ERR_PTR(-ENOMEM);
+ /* Does hostname needs to be enclosed in brackets? */
+ if (strchr(hostname, ':'))
+ snprintf(root_devname, len, "[%s]:/", hostname);
+ else
+ snprintf(root_devname, len, "%s:/", hostname);
+ root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
+ kfree(root_devname);
+ return root_mnt;
+}
+
+struct nfs_referral_count {
+ struct list_head list;
+ const struct task_struct *task;
+ unsigned int referral_count;
+};
+
+static LIST_HEAD(nfs_referral_count_list);
+static DEFINE_SPINLOCK(nfs_referral_count_list_lock);
+
+static struct nfs_referral_count *nfs_find_referral_count(void)
+{
+ struct nfs_referral_count *p;
+
+ list_for_each_entry(p, &nfs_referral_count_list, list) {
+ if (p->task == current)
+ return p;
+ }
+ return NULL;
+}
+
+#define NFS_MAX_NESTED_REFERRALS 2
+
+static int nfs_referral_loop_protect(void)
+{
+ struct nfs_referral_count *p, *new;
+ int ret = -ENOMEM;
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ goto out;
+ new->task = current;
+ new->referral_count = 1;
+
+ ret = 0;
+ spin_lock(&nfs_referral_count_list_lock);
+ p = nfs_find_referral_count();
+ if (p != NULL) {
+ if (p->referral_count >= NFS_MAX_NESTED_REFERRALS)
+ ret = -ELOOP;
+ else
+ p->referral_count++;
+ } else {
+ list_add(&new->list, &nfs_referral_count_list);
+ new = NULL;
+ }
+ spin_unlock(&nfs_referral_count_list_lock);
+ kfree(new);
+out:
+ return ret;
+}
+
+static void nfs_referral_loop_unprotect(void)
+{
+ struct nfs_referral_count *p;
+
+ spin_lock(&nfs_referral_count_list_lock);
+ p = nfs_find_referral_count();
+ p->referral_count--;
+ if (p->referral_count == 0)
+ list_del(&p->list);
+ else
+ p = NULL;
+ spin_unlock(&nfs_referral_count_list_lock);
+ kfree(p);
+}
+
+static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
+ const char *export_path)
+{
+ struct dentry *dentry;
+ int err;
+
+ if (IS_ERR(root_mnt))
+ return ERR_CAST(root_mnt);
+
+ err = nfs_referral_loop_protect();
+ if (err) {
+ mntput(root_mnt);
+ return ERR_PTR(err);
+ }
+
+ dentry = mount_subtree(root_mnt, export_path);
+ nfs_referral_loop_unprotect();
+
+ return dentry;
+}
+
+struct dentry *nfs4_try_mount(int flags, const char *dev_name,
+ struct nfs_mount_info *mount_info,
+ struct nfs_subversion *nfs_mod)
+{
+ char *export_path;
+ struct vfsmount *root_mnt;
+ struct dentry *res;
+ struct nfs_parsed_mount_data *data = mount_info->parsed;
+
+ dfprintk(MOUNT, "--> nfs4_try_mount()\n");
+
+ export_path = data->nfs_server.export_path;
+ data->nfs_server.export_path = "/";
+ root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
+ data->nfs_server.hostname);
+ data->nfs_server.export_path = export_path;
+
+ res = nfs_follow_remote_path(root_mnt, export_path);
+
+ dfprintk(MOUNT, "<-- nfs4_try_mount() = %ld%s\n",
+ IS_ERR(res) ? PTR_ERR(res) : 0,
+ IS_ERR(res) ? " [error]" : "");
+ return res;
+}
+
+static struct dentry *
+nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *raw_data)
+{
+ struct nfs_mount_info mount_info = {
+ .fill_super = nfs_fill_super,
+ .set_security = nfs_clone_sb_security,
+ .cloned = raw_data,
+ };
+ struct nfs_server *server;
+ struct dentry *mntroot = ERR_PTR(-ENOMEM);
+
+ dprintk("--> nfs4_referral_get_sb()\n");
+
+ mount_info.mntfh = nfs_alloc_fhandle();
+ if (mount_info.cloned == NULL || mount_info.mntfh == NULL)
+ goto out;
+
+ /* create a new volume representation */
+ server = nfs4_create_referral_server(mount_info.cloned, mount_info.mntfh);
+ if (IS_ERR(server)) {
+ mntroot = ERR_CAST(server);
+ goto out;
+ }
+
+ mntroot = nfs_fs_mount_common(server, flags, dev_name, &mount_info, &nfs_v4);
+out:
+ nfs_free_fhandle(mount_info.mntfh);
+ return mntroot;
+}
+
+/*
+ * Create an NFS4 server record on referral traversal
+ */
+static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data)
+{
+ struct nfs_clone_mount *data = raw_data;
+ char *export_path;
+ struct vfsmount *root_mnt;
+ struct dentry *res;
+
+ dprintk("--> nfs4_referral_mount()\n");
+
+ export_path = data->mnt_path;
+ data->mnt_path = "/";
+
+ root_mnt = nfs_do_root_mount(&nfs4_remote_referral_fs_type,
+ flags, data, data->hostname);
+ data->mnt_path = export_path;
+
+ res = nfs_follow_remote_path(root_mnt, export_path);
+ dprintk("<-- nfs4_referral_mount() = %ld%s\n",
+ IS_ERR(res) ? PTR_ERR(res) : 0,
+ IS_ERR(res) ? " [error]" : "");
+ return res;
+}
+
+
+static int __init init_nfs_v4(void)
+{
+ int err;
+
+ err = nfs_idmap_init();
+ if (err)
+ goto out;
+
+ err = nfs4_register_sysctl();
+ if (err)
+ goto out1;
+
+ register_nfs_version(&nfs_v4);
+ return 0;
+out1:
+ nfs_idmap_quit();
+out:
+ return err;
+}
+
+static void __exit exit_nfs_v4(void)
+{
+ unregister_nfs_version(&nfs_v4);
+ nfs4_unregister_sysctl();
+ nfs_idmap_quit();
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_nfs_v4);
+module_exit(exit_nfs_v4);
diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
new file mode 100644
index 000000000000..5729bc8aa75d
--- /dev/null
+++ b/fs/nfs/nfs4sysctl.c
@@ -0,0 +1,68 @@
+/*
+ * linux/fs/nfs/nfs4sysctl.c
+ *
+ * Sysctl interface to NFS v4 parameters
+ *
+ * Copyright (c) 2006 Trond Myklebust <Trond.Myklebust@netapp.com>
+ */
+#include <linux/sysctl.h>
+#include <linux/nfs_idmap.h>
+#include <linux/nfs_fs.h>
+
+#include "callback.h"
+
+static const int nfs_set_port_min = 0;
+static const int nfs_set_port_max = 65535;
+static struct ctl_table_header *nfs4_callback_sysctl_table;
+
+static ctl_table nfs4_cb_sysctls[] = {
+ {
+ .procname = "nfs_callback_tcpport",
+ .data = &nfs_callback_set_tcpport,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (int *)&nfs_set_port_min,
+ .extra2 = (int *)&nfs_set_port_max,
+ },
+ {
+ .procname = "idmap_cache_timeout",
+ .data = &nfs_idmap_cache_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ { }
+};
+
+static ctl_table nfs4_cb_sysctl_dir[] = {
+ {
+ .procname = "nfs",
+ .mode = 0555,
+ .child = nfs4_cb_sysctls,
+ },
+ { }
+};
+
+static ctl_table nfs4_cb_sysctl_root[] = {
+ {
+ .procname = "fs",
+ .mode = 0555,
+ .child = nfs4_cb_sysctl_dir,
+ },
+ { }
+};
+
+int nfs4_register_sysctl(void)
+{
+ nfs4_callback_sysctl_table = register_sysctl_table(nfs4_cb_sysctl_root);
+ if (nfs4_callback_sysctl_table == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void nfs4_unregister_sysctl(void)
+{
+ unregister_sysctl_table(nfs4_callback_sysctl_table);
+ nfs4_callback_sysctl_table = NULL;
+}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 18fae29b0301..1bfbd67c556d 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -852,12 +852,6 @@ const u32 nfs41_maxread_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
XDR_UNIT);
#endif /* CONFIG_NFS_V4_1 */
-static unsigned short send_implementation_id = 1;
-
-module_param(send_implementation_id, ushort, 0644);
-MODULE_PARM_DESC(send_implementation_id,
- "Send implementation ID with NFSv4.1 exchange_id");
-
static const umode_t nfs_type2fmt[] = {
[NF4BAD] = 0,
[NF4REG] = S_IFREG,
@@ -1236,7 +1230,7 @@ static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct
static inline int nfs4_lock_type(struct file_lock *fl, int block)
{
- if ((fl->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) == F_RDLCK)
+ if (fl->fl_type == F_RDLCK)
return block ? NFS4_READW_LT : NFS4_READ_LT;
return block ? NFS4_WRITEW_LT : NFS4_WRITE_LT;
}
@@ -3078,7 +3072,7 @@ out_overflow:
return -EIO;
}
-static inline int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, __be32 **savep)
+static int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, unsigned int *savep)
{
__be32 *p;
@@ -3086,7 +3080,7 @@ static inline int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen,
if (unlikely(!p))
goto out_overflow;
*attrlen = be32_to_cpup(p);
- *savep = xdr->p;
+ *savep = xdr_stream_pos(xdr);
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
@@ -4068,10 +4062,10 @@ static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, str
return status;
}
-static int verify_attr_len(struct xdr_stream *xdr, __be32 *savep, uint32_t attrlen)
+static int verify_attr_len(struct xdr_stream *xdr, unsigned int savep, uint32_t attrlen)
{
unsigned int attrwords = XDR_QUADLEN(attrlen);
- unsigned int nwords = xdr->p - savep;
+ unsigned int nwords = (xdr_stream_pos(xdr) - savep) >> 2;
if (unlikely(attrwords != nwords)) {
dprintk("%s: server returned incorrect attribute length: "
@@ -4158,13 +4152,18 @@ static int decode_verifier(struct xdr_stream *xdr, void *verifier)
return decode_opaque_fixed(xdr, verifier, NFS4_VERIFIER_SIZE);
}
+static int decode_write_verifier(struct xdr_stream *xdr, struct nfs_write_verifier *verifier)
+{
+ return decode_opaque_fixed(xdr, verifier->data, NFS4_VERIFIER_SIZE);
+}
+
static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
{
int status;
status = decode_op_hdr(xdr, OP_COMMIT);
if (!status)
- status = decode_verifier(xdr, res->verf->verifier);
+ status = decode_write_verifier(xdr, &res->verf->verifier);
return status;
}
@@ -4193,7 +4192,7 @@ out_overflow:
static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_res *res)
{
- __be32 *savep;
+ unsigned int savep;
uint32_t attrlen, bitmap[3] = {0};
int status;
@@ -4222,7 +4221,7 @@ xdr_error:
static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat)
{
- __be32 *savep;
+ unsigned int savep;
uint32_t attrlen, bitmap[3] = {0};
int status;
@@ -4254,7 +4253,7 @@ xdr_error:
static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf)
{
- __be32 *savep;
+ unsigned int savep;
uint32_t attrlen, bitmap[3] = {0};
int status;
@@ -4299,7 +4298,8 @@ out_overflow:
static int decode_first_threshold_item4(struct xdr_stream *xdr,
struct nfs4_threshold *res)
{
- __be32 *p, *savep;
+ __be32 *p;
+ unsigned int savep;
uint32_t bitmap[3] = {0,}, attrlen;
int status;
@@ -4503,7 +4503,7 @@ static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fat
struct nfs_fh *fh, struct nfs4_fs_locations *fs_loc,
const struct nfs_server *server)
{
- __be32 *savep;
+ unsigned int savep;
uint32_t attrlen,
bitmap[3] = {0};
int status;
@@ -4615,7 +4615,7 @@ static int decode_attr_layout_blksize(struct xdr_stream *xdr, uint32_t *bitmap,
static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
{
- __be32 *savep;
+ unsigned int savep;
uint32_t attrlen, bitmap[3];
int status;
@@ -4920,9 +4920,8 @@ static int decode_putrootfh(struct xdr_stream *xdr)
static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_readres *res)
{
- struct kvec *iov = req->rq_rcv_buf.head;
__be32 *p;
- uint32_t count, eof, recvd, hdrlen;
+ uint32_t count, eof, recvd;
int status;
status = decode_op_hdr(xdr, OP_READ);
@@ -4933,15 +4932,13 @@ static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_
goto out_overflow;
eof = be32_to_cpup(p++);
count = be32_to_cpup(p);
- hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
- recvd = req->rq_rcv_buf.len - hdrlen;
+ recvd = xdr_read_pages(xdr, count);
if (count > recvd) {
dprintk("NFS: server cheating in read reply: "
"count %u > recvd %u\n", count, recvd);
count = recvd;
eof = 0;
}
- xdr_read_pages(xdr, count);
res->eof = eof;
res->count = count;
return 0;
@@ -4952,10 +4949,6 @@ out_overflow:
static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir)
{
- struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
- size_t hdrlen;
- u32 recvd, pglen = rcvbuf->page_len;
int status;
__be32 verf[2];
@@ -4967,22 +4960,12 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
memcpy(verf, readdir->verifier.data, sizeof(verf));
dprintk("%s: verifier = %08x:%08x\n",
__func__, verf[0], verf[1]);
-
- hdrlen = (char *) xdr->p - (char *) iov->iov_base;
- recvd = rcvbuf->len - hdrlen;
- if (pglen > recvd)
- pglen = recvd;
- xdr_read_pages(xdr, pglen);
-
-
- return pglen;
+ return xdr_read_pages(xdr, xdr->buf->page_len);
}
static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
- size_t hdrlen;
u32 len, recvd;
__be32 *p;
int status;
@@ -5000,14 +4983,12 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
dprintk("nfs: server returned giant symlink!\n");
return -ENAMETOOLONG;
}
- hdrlen = (char *) xdr->p - (char *) iov->iov_base;
- recvd = req->rq_rcv_buf.len - hdrlen;
+ recvd = xdr_read_pages(xdr, len);
if (recvd < len) {
dprintk("NFS: server cheating in readlink reply: "
"count %u > recvd %u\n", len, recvd);
return -EIO;
}
- xdr_read_pages(xdr, len);
/*
* The XDR encode routine has set things up so that
* the link text will be copied directly into the
@@ -5063,23 +5044,20 @@ decode_restorefh(struct xdr_stream *xdr)
static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
struct nfs_getaclres *res)
{
- __be32 *savep, *bm_p;
+ unsigned int savep;
uint32_t attrlen,
bitmap[3] = {0};
- struct kvec *iov = req->rq_rcv_buf.head;
int status;
- size_t page_len = xdr->buf->page_len;
+ unsigned int pg_offset;
res->acl_len = 0;
if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
goto out;
- bm_p = xdr->p;
- res->acl_data_offset = be32_to_cpup(bm_p) + 2;
- res->acl_data_offset <<= 2;
- /* Check if the acl data starts beyond the allocated buffer */
- if (res->acl_data_offset > page_len)
- return -ERANGE;
+ xdr_enter_page(xdr, xdr->buf->page_len);
+
+ /* Calculate the offset of the page data */
+ pg_offset = xdr->buf->head[0].iov_len;
if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
goto out;
@@ -5089,29 +5067,24 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
- size_t hdrlen;
/* The bitmap (xdr len + bitmaps) and the attr xdr len words
* are stored with the acl data to handle the problem of
* variable length bitmaps.*/
- xdr->p = bm_p;
+ res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset;
/* We ignore &savep and don't do consistency checks on
* the attr length. Let userspace figure it out.... */
- hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
- attrlen += res->acl_data_offset;
- if (attrlen > page_len) {
+ res->acl_len = attrlen;
+ if (attrlen > (xdr->nwords << 2)) {
if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
/* getxattr interface called with a NULL buf */
- res->acl_len = attrlen;
goto out;
}
- dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
- attrlen, page_len);
+ dprintk("NFS: acl reply: attrlen %u > page_len %u\n",
+ attrlen, xdr->nwords << 2);
return -EINVAL;
}
- xdr_read_pages(xdr, attrlen);
- res->acl_len = attrlen;
} else
status = -EOPNOTSUPP;
@@ -5212,13 +5185,12 @@ static int decode_write(struct xdr_stream *xdr, struct nfs_writeres *res)
if (status)
return status;
- p = xdr_inline_decode(xdr, 16);
+ p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
res->count = be32_to_cpup(p++);
res->verf->committed = be32_to_cpup(p++);
- memcpy(res->verf->verifier, p, NFS4_VERIFIER_SIZE);
- return 0;
+ return decode_write_verifier(xdr, &res->verf->verifier);
out_overflow:
print_overflow_msg(__func__, xdr);
return -EIO;
@@ -5599,7 +5571,7 @@ static int decode_getdevicelist(struct xdr_stream *xdr,
{
__be32 *p;
int status, i;
- struct nfs_writeverf verftemp;
+ nfs4_verifier verftemp;
status = decode_op_hdr(xdr, OP_GETDEVICELIST);
if (status)
@@ -5613,7 +5585,7 @@ static int decode_getdevicelist(struct xdr_stream *xdr,
p += 2;
/* Read verifier */
- p = xdr_decode_opaque_fixed(p, verftemp.verifier, NFS4_VERIFIER_SIZE);
+ p = xdr_decode_opaque_fixed(p, verftemp.data, NFS4_VERIFIER_SIZE);
res->num_devs = be32_to_cpup(p);
@@ -5707,9 +5679,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
__be32 *p;
int status;
u32 layout_count;
- struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
- struct kvec *iov = rcvbuf->head;
- u32 hdrlen, recvd;
+ u32 recvd;
status = decode_op_hdr(xdr, OP_LAYOUTGET);
if (status)
@@ -5746,8 +5716,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
res->type,
res->layoutp->len);
- hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
- recvd = req->rq_rcv_buf.len - hdrlen;
+ recvd = xdr_read_pages(xdr, res->layoutp->len);
if (res->layoutp->len > recvd) {
dprintk("NFS: server cheating in layoutget reply: "
"layout len %u > recvd %u\n",
@@ -5755,8 +5724,6 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
return -EINVAL;
}
- xdr_read_pages(xdr, res->layoutp->len);
-
if (layout_count > 1) {
/* We only handle a length one array at the moment. Any
* further entries are just ignored. Note that this means
@@ -7103,6 +7070,7 @@ out:
int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
int plus)
{
+ unsigned int savep;
uint32_t bitmap[3] = {0};
uint32_t len;
__be32 *p = xdr_inline_decode(xdr, 4);
@@ -7141,7 +7109,7 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
if (decode_attr_bitmap(xdr, bitmap) < 0)
goto out_overflow;
- if (decode_attr_length(xdr, &len, &p) < 0)
+ if (decode_attr_length(xdr, &len, &savep) < 0)
goto out_overflow;
if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index f50d3e8d6f22..ea6d111b03e9 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -570,17 +570,66 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
return false;
return pgio->pg_count + req->wb_bytes <=
- OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
+ (unsigned long)pgio->pg_layout_private;
+}
+
+void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+ pnfs_generic_pg_init_read(pgio, req);
+ if (unlikely(pgio->pg_lseg == NULL))
+ return; /* Not pNFS */
+
+ pgio->pg_layout_private = (void *)
+ OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
+}
+
+static bool aligned_on_raid_stripe(u64 offset, struct ore_layout *layout,
+ unsigned long *stripe_end)
+{
+ u32 stripe_off;
+ unsigned stripe_size;
+
+ if (layout->raid_algorithm == PNFS_OSD_RAID_0)
+ return true;
+
+ stripe_size = layout->stripe_unit *
+ (layout->group_width - layout->parity);
+
+ div_u64_rem(offset, stripe_size, &stripe_off);
+ if (!stripe_off)
+ return true;
+
+ *stripe_end = stripe_size - stripe_off;
+ return false;
+}
+
+void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+ unsigned long stripe_end = 0;
+
+ pnfs_generic_pg_init_write(pgio, req);
+ if (unlikely(pgio->pg_lseg == NULL))
+ return; /* Not pNFS */
+
+ if (req->wb_offset ||
+ !aligned_on_raid_stripe(req->wb_index * PAGE_SIZE,
+ &OBJIO_LSEG(pgio->pg_lseg)->layout,
+ &stripe_end)) {
+ pgio->pg_layout_private = (void *)stripe_end;
+ } else {
+ pgio->pg_layout_private = (void *)
+ OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
+ }
}
static const struct nfs_pageio_ops objio_pg_read_ops = {
- .pg_init = pnfs_generic_pg_init_read,
+ .pg_init = objio_init_read,
.pg_test = objio_pg_test,
.pg_doio = pnfs_generic_pg_readpages,
};
static const struct nfs_pageio_ops objio_pg_write_ops = {
- .pg_init = pnfs_generic_pg_init_write,
+ .pg_init = objio_init_write,
.pg_test = objio_pg_test,
.pg_doio = pnfs_generic_pg_writepages,
};
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index aed913c833f4..311a79681e2b 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -49,11 +49,13 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
hdr->io_start = req_offset(hdr->req);
hdr->good_bytes = desc->pg_count;
hdr->dreq = desc->pg_dreq;
+ hdr->layout_private = desc->pg_layout_private;
hdr->release = release;
hdr->completion_ops = desc->pg_completion_ops;
if (hdr->completion_ops->init_hdr)
hdr->completion_ops->init_hdr(hdr);
}
+EXPORT_SYMBOL_GPL(nfs_pgheader_init);
void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
{
@@ -70,7 +72,7 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
static inline struct nfs_page *
nfs_page_alloc(void)
{
- struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
+ struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
if (p)
INIT_LIST_HEAD(&p->wb_list);
return p;
@@ -117,7 +119,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
* long write-back delay. This will be adjusted in
* update_nfs_request below if the region is not locked. */
req->wb_page = page;
- req->wb_index = page->index;
+ req->wb_index = page_file_index(page);
page_cache_get(page);
req->wb_offset = offset;
req->wb_pgbase = offset;
@@ -267,7 +269,9 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_error = 0;
desc->pg_lseg = NULL;
desc->pg_dreq = NULL;
+ desc->pg_layout_private = NULL;
}
+EXPORT_SYMBOL_GPL(nfs_pageio_init);
/**
* nfs_can_coalesce_requests - test two requests for compatibility
@@ -409,6 +413,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
} while (ret);
return ret;
}
+EXPORT_SYMBOL_GPL(nfs_pageio_add_request);
/**
* nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
@@ -424,6 +429,7 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
break;
}
}
+EXPORT_SYMBOL_GPL(nfs_pageio_complete);
/**
* nfs_pageio_cond_complete - Conditional I/O completion
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index bbc49caa7a82..2e00feacd4be 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -583,9 +583,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
struct nfs_server *server = NFS_SERVER(ino);
struct nfs4_layoutget *lgp;
struct pnfs_layout_segment *lseg = NULL;
- struct page **pages = NULL;
- int i;
- u32 max_resp_sz, max_pages;
dprintk("--> %s\n", __func__);
@@ -594,20 +591,6 @@ send_layoutget(struct pnfs_layout_hdr *lo,
if (lgp == NULL)
return NULL;
- /* allocate pages for xdr post processing */
- max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
- max_pages = nfs_page_array_len(0, max_resp_sz);
-
- pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
- if (!pages)
- goto out_err_free;
-
- for (i = 0; i < max_pages; i++) {
- pages[i] = alloc_page(gfp_flags);
- if (!pages[i])
- goto out_err_free;
- }
-
lgp->args.minlength = PAGE_CACHE_SIZE;
if (lgp->args.minlength > range->length)
lgp->args.minlength = range->length;
@@ -616,42 +599,29 @@ send_layoutget(struct pnfs_layout_hdr *lo,
lgp->args.type = server->pnfs_curr_ld->id;
lgp->args.inode = ino;
lgp->args.ctx = get_nfs_open_context(ctx);
- lgp->args.layout.pages = pages;
- lgp->args.layout.pglen = max_pages * PAGE_SIZE;
lgp->lsegpp = &lseg;
lgp->gfp_flags = gfp_flags;
/* Synchronously retrieve layout information from server and
* store in lseg.
*/
- nfs4_proc_layoutget(lgp);
+ nfs4_proc_layoutget(lgp, gfp_flags);
if (!lseg) {
/* remember that LAYOUTGET failed and suspend trying */
set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
}
- /* free xdr pages */
- for (i = 0; i < max_pages; i++)
- __free_page(pages[i]);
- kfree(pages);
-
return lseg;
-
-out_err_free:
- /* free any allocated xdr pages, lgp as it's not used */
- if (pages) {
- for (i = 0; i < max_pages; i++) {
- if (!pages[i])
- break;
- __free_page(pages[i]);
- }
- kfree(pages);
- }
- kfree(lgp);
- return NULL;
}
-/* Initiates a LAYOUTRETURN(FILE) */
+/*
+ * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
+ * when the layout segment list is empty.
+ *
+ * Note that a pnfs_layout_hdr can exist with an empty layout segment
+ * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
+ * deviceid is marked invalid.
+ */
int
_pnfs_return_layout(struct inode *ino)
{
@@ -660,22 +630,31 @@ _pnfs_return_layout(struct inode *ino)
LIST_HEAD(tmp_list);
struct nfs4_layoutreturn *lrp;
nfs4_stateid stateid;
- int status = 0;
+ int status = 0, empty;
- dprintk("--> %s\n", __func__);
+ dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
spin_lock(&ino->i_lock);
lo = nfsi->layout;
- if (!lo) {
+ if (!lo || pnfs_test_layout_returned(lo)) {
spin_unlock(&ino->i_lock);
- dprintk("%s: no layout to return\n", __func__);
- return status;
+ dprintk("NFS: %s no layout to return\n", __func__);
+ goto out;
}
stateid = nfsi->layout->plh_stateid;
/* Reference matched in nfs4_layoutreturn_release */
get_layout_hdr(lo);
+ empty = list_empty(&lo->plh_segs);
mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
+ /* Don't send a LAYOUTRETURN if list was initially empty */
+ if (empty) {
+ spin_unlock(&ino->i_lock);
+ put_layout_hdr(lo);
+ dprintk("NFS: %s no layout segments to return\n", __func__);
+ goto out;
+ }
lo->plh_block_lgets++;
+ pnfs_mark_layout_returned(lo);
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&tmp_list);
@@ -686,6 +665,7 @@ _pnfs_return_layout(struct inode *ino)
status = -ENOMEM;
set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
+ pnfs_clear_layout_returned(lo);
put_layout_hdr(lo);
goto out;
}
@@ -1075,6 +1055,10 @@ pnfs_update_layout(struct inode *ino,
get_layout_hdr(lo);
if (list_empty(&lo->plh_segs))
first = true;
+
+ /* Enable LAYOUTRETURNs */
+ pnfs_clear_layout_returned(lo);
+
spin_unlock(&ino->i_lock);
if (first) {
/* The lo must be on the clp list if there is any
@@ -1209,7 +1193,7 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
-bool
+void
pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
const struct nfs_pgio_completion_ops *compl_ops)
{
@@ -1217,13 +1201,12 @@ pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
if (ld == NULL)
- return false;
- nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops,
- server->rsize, 0);
- return true;
+ nfs_pageio_init_read(pgio, inode, compl_ops);
+ else
+ nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops, server->rsize, 0);
}
-bool
+void
pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
int ioflags,
const struct nfs_pgio_completion_ops *compl_ops)
@@ -1232,10 +1215,9 @@ pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
if (ld == NULL)
- return false;
- nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops,
- server->wsize, ioflags);
- return true;
+ nfs_pageio_init_write(pgio, inode, ioflags, compl_ops);
+ else
+ nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops, server->wsize, ioflags);
}
bool
@@ -1272,7 +1254,7 @@ int pnfs_write_done_resend_to_mds(struct inode *inode,
LIST_HEAD(failed);
/* Resend all requests through the MDS */
- nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE, compl_ops);
+ nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops);
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
@@ -1388,6 +1370,7 @@ static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
put_lseg(hdr->lseg);
nfs_writehdr_free(hdr);
}
+EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
@@ -1427,7 +1410,7 @@ int pnfs_read_done_resend_to_mds(struct inode *inode,
LIST_HEAD(failed);
/* Resend all requests through the MDS */
- nfs_pageio_init_read_mds(&pgio, inode, compl_ops);
+ nfs_pageio_init_read(&pgio, inode, compl_ops);
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
@@ -1542,6 +1525,7 @@ static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
put_lseg(hdr->lseg);
nfs_readhdr_free(hdr);
}
+EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 64f90d845f6a..745aa1b39e7c 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -64,6 +64,7 @@ enum {
NFS_LAYOUT_ROC, /* some lseg had roc bit set */
NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */
NFS_LAYOUT_INVALID, /* layout is being destroyed */
+ NFS_LAYOUT_RETURNED, /* layout has already been returned */
};
enum layoutdriver_policy_flags {
@@ -171,16 +172,16 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server,
struct pnfs_devicelist *devlist);
extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *dev);
-extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
+extern void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
/* pnfs.c */
void get_layout_hdr(struct pnfs_layout_hdr *lo);
void put_lseg(struct pnfs_layout_segment *lseg);
-bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *,
+void pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *,
const struct nfs_pgio_completion_ops *);
-bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *,
+void pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *,
int, const struct nfs_pgio_completion_ops *);
void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32);
@@ -255,6 +256,24 @@ struct nfs4_deviceid_node *nfs4_insert_deviceid_node(struct nfs4_deviceid_node *
bool nfs4_put_deviceid_node(struct nfs4_deviceid_node *);
void nfs4_deviceid_purge_client(const struct nfs_client *);
+static inline void
+pnfs_mark_layout_returned(struct pnfs_layout_hdr *lo)
+{
+ set_bit(NFS_LAYOUT_RETURNED, &lo->plh_flags);
+}
+
+static inline void
+pnfs_clear_layout_returned(struct pnfs_layout_hdr *lo)
+{
+ clear_bit(NFS_LAYOUT_RETURNED, &lo->plh_flags);
+}
+
+static inline bool
+pnfs_test_layout_returned(struct pnfs_layout_hdr *lo)
+{
+ return test_bit(NFS_LAYOUT_RETURNED, &lo->plh_flags);
+}
+
static inline int lo_fail_bit(u32 iomode)
{
return iomode == IOMODE_RW ?
@@ -438,16 +457,16 @@ static inline void unset_pnfs_layoutdriver(struct nfs_server *s)
{
}
-static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+static inline void pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
const struct nfs_pgio_completion_ops *compl_ops)
{
- return false;
+ nfs_pageio_init_read(pgio, inode, compl_ops);
}
-static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags,
+static inline void pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags,
const struct nfs_pgio_completion_ops *compl_ops)
{
- return false;
+ nfs_pageio_init_write(pgio, inode, ioflags, compl_ops);
}
static inline int
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 4433806e116f..50a88c3546ed 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -734,6 +734,38 @@ out_einval:
return -EINVAL;
}
+static int nfs_have_delegation(struct inode *inode, fmode_t flags)
+{
+ return 0;
+}
+
+static int nfs_return_delegation(struct inode *inode)
+{
+ nfs_wb_all(inode);
+ return 0;
+}
+
+static const struct inode_operations nfs_dir_inode_operations = {
+ .create = nfs_create,
+ .lookup = nfs_lookup,
+ .link = nfs_link,
+ .unlink = nfs_unlink,
+ .symlink = nfs_symlink,
+ .mkdir = nfs_mkdir,
+ .rmdir = nfs_rmdir,
+ .mknod = nfs_mknod,
+ .rename = nfs_rename,
+ .permission = nfs_permission,
+ .getattr = nfs_getattr,
+ .setattr = nfs_setattr,
+};
+
+static const struct inode_operations nfs_file_inode_operations = {
+ .permission = nfs_permission,
+ .getattr = nfs_getattr,
+ .setattr = nfs_setattr,
+};
+
const struct nfs_rpc_ops nfs_v2_clientops = {
.version = 2, /* protocol version */
.dentry_ops = &nfs_dentry_operations,
@@ -742,6 +774,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.file_ops = &nfs_file_operations,
.getroot = nfs_proc_get_root,
.submount = nfs_submount,
+ .try_mount = nfs_try_mount,
.getattr = nfs_proc_getattr,
.setattr = nfs_proc_setattr,
.lookup = nfs_proc_lookup,
@@ -767,9 +800,11 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.pathconf = nfs_proc_pathconf,
.decode_dirent = nfs2_decode_dirent,
.read_setup = nfs_proc_read_setup,
+ .read_pageio_init = nfs_pageio_init_read,
.read_rpc_prepare = nfs_proc_read_rpc_prepare,
.read_done = nfs_read_done,
.write_setup = nfs_proc_write_setup,
+ .write_pageio_init = nfs_pageio_init_write,
.write_rpc_prepare = nfs_proc_write_rpc_prepare,
.write_done = nfs_write_done,
.commit_setup = nfs_proc_commit_setup,
@@ -777,5 +812,11 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.lock = nfs_proc_lock,
.lock_check_bounds = nfs_lock_check_bounds,
.close_context = nfs_close_context,
+ .have_delegation = nfs_have_delegation,
+ .return_delegation = nfs_return_delegation,
+ .alloc_client = nfs_alloc_client,
.init_client = nfs_init_client,
+ .free_client = nfs_free_client,
+ .create_server = nfs_create_server,
+ .clone_server = nfs_clone_server,
};
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 86ced7836214..b6bdb18e892c 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -20,8 +20,6 @@
#include <linux/nfs_page.h>
#include <linux/module.h>
-#include "pnfs.h"
-
#include "nfs4_fs.h"
#include "internal.h"
#include "iostat.h"
@@ -50,6 +48,7 @@ struct nfs_read_header *nfs_readhdr_alloc(void)
}
return rhdr;
}
+EXPORT_SYMBOL_GPL(nfs_readhdr_alloc);
static struct nfs_read_data *nfs_readdata_alloc(struct nfs_pgio_header *hdr,
unsigned int pagecount)
@@ -82,6 +81,7 @@ void nfs_readhdr_free(struct nfs_pgio_header *hdr)
kmem_cache_free(nfs_rdata_cachep, rhdr);
}
+EXPORT_SYMBOL_GPL(nfs_readhdr_free);
void nfs_readdata_release(struct nfs_read_data *rdata)
{
@@ -98,6 +98,7 @@ void nfs_readdata_release(struct nfs_read_data *rdata)
if (atomic_dec_and_test(&hdr->refcnt))
hdr->completion_ops->completion(hdr);
}
+EXPORT_SYMBOL_GPL(nfs_readdata_release);
static
int nfs_return_empty_page(struct page *page)
@@ -108,13 +109,14 @@ int nfs_return_empty_page(struct page *page)
return 0;
}
-void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
+void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
struct inode *inode,
const struct nfs_pgio_completion_ops *compl_ops)
{
nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, compl_ops,
NFS_SERVER(inode)->rsize, 0);
}
+EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
{
@@ -123,14 +125,6 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
-void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
- struct inode *inode,
- const struct nfs_pgio_completion_ops *compl_ops)
-{
- if (!pnfs_pageio_init_read(pgio, inode, compl_ops))
- nfs_pageio_init_read_mds(pgio, inode, compl_ops);
-}
-
int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
struct page *page)
{
@@ -149,7 +143,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
if (len < PAGE_CACHE_SIZE)
zero_user_segment(page, len, PAGE_CACHE_SIZE);
- nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
+ NFS_PROTO(inode)->read_pageio_init(&pgio, inode, &nfs_async_read_completion_ops);
nfs_pageio_add_request(&pgio, new);
nfs_pageio_complete(&pgio);
NFS_I(inode)->read_io += pgio.pg_bytes_written;
@@ -407,6 +401,7 @@ int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
return nfs_pagein_multi(desc, hdr);
return nfs_pagein_one(desc, hdr);
}
+EXPORT_SYMBOL_GPL(nfs_generic_pagein);
static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
@@ -532,11 +527,11 @@ static const struct rpc_call_ops nfs_read_common_ops = {
int nfs_readpage(struct file *file, struct page *page)
{
struct nfs_open_context *ctx;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
int error;
dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
- page, PAGE_CACHE_SIZE, page->index);
+ page, PAGE_CACHE_SIZE, page_file_index(page));
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
nfs_add_stats(inode, NFSIOS_READPAGES, 1);
@@ -590,7 +585,7 @@ static int
readpage_async_filler(void *data, struct page *page)
{
struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_page *new;
unsigned int len;
int error;
@@ -652,7 +647,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
if (ret == 0)
goto read_complete; /* all pages were read */
- nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
+ NFS_PROTO(inode)->read_pageio_init(&pgio, inode, &nfs_async_read_completion_ops);
ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 8b2a2977b720..239aff7338eb 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -64,11 +64,12 @@
#include "internal.h"
#include "fscache.h"
#include "pnfs.h"
+#include "nfs.h"
#define NFSDBG_FACILITY NFSDBG_VFS
#define NFS_TEXT_DATA 1
-#ifdef CONFIG_NFS_V3
+#if IS_ENABLED(CONFIG_NFS_V3)
#define NFS_DEFAULT_VERSION 3
#else
#define NFS_DEFAULT_VERSION 2
@@ -278,37 +279,17 @@ static match_table_t nfs_vers_tokens = {
{ Opt_vers_err, NULL }
};
-struct nfs_mount_info {
- void (*fill_super)(struct super_block *, struct nfs_mount_info *);
- int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
- struct nfs_parsed_mount_data *parsed;
- struct nfs_clone_mount *cloned;
- struct nfs_fh *mntfh;
-};
-
-static void nfs_umount_begin(struct super_block *);
-static int nfs_statfs(struct dentry *, struct kstatfs *);
-static int nfs_show_options(struct seq_file *, struct dentry *);
-static int nfs_show_devname(struct seq_file *, struct dentry *);
-static int nfs_show_path(struct seq_file *, struct dentry *);
-static int nfs_show_stats(struct seq_file *, struct dentry *);
-static struct dentry *nfs_fs_mount_common(struct file_system_type *,
- struct nfs_server *, int, const char *, struct nfs_mount_info *);
-static struct dentry *nfs_fs_mount(struct file_system_type *,
- int, const char *, void *);
static struct dentry *nfs_xdev_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data);
-static void nfs_put_super(struct super_block *);
-static void nfs_kill_super(struct super_block *);
-static int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
-static struct file_system_type nfs_fs_type = {
+struct file_system_type nfs_fs_type = {
.owner = THIS_MODULE,
.name = "nfs",
.mount = nfs_fs_mount,
.kill_sb = nfs_kill_super,
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
+EXPORT_SYMBOL_GPL(nfs_fs_type);
struct file_system_type nfs_xdev_fs_type = {
.owner = THIS_MODULE,
@@ -318,7 +299,7 @@ struct file_system_type nfs_xdev_fs_type = {
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
-static const struct super_operations nfs_sops = {
+const struct super_operations nfs_sops = {
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs_write_inode,
@@ -332,77 +313,40 @@ static const struct super_operations nfs_sops = {
.show_stats = nfs_show_stats,
.remount_fs = nfs_remount,
};
+EXPORT_SYMBOL_GPL(nfs_sops);
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
static int nfs4_validate_mount_data(void *options,
struct nfs_parsed_mount_data *args, const char *dev_name);
-static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
- struct nfs_mount_info *mount_info);
-static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data);
-static struct dentry *nfs4_xdev_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data);
-static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data);
-static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data);
-static void nfs4_kill_super(struct super_block *sb);
-
-static struct file_system_type nfs4_fs_type = {
- .owner = THIS_MODULE,
- .name = "nfs4",
- .mount = nfs_fs_mount,
- .kill_sb = nfs4_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
-};
-static struct file_system_type nfs4_remote_fs_type = {
+struct file_system_type nfs4_fs_type = {
.owner = THIS_MODULE,
.name = "nfs4",
- .mount = nfs4_remote_mount,
- .kill_sb = nfs4_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
-};
-
-struct file_system_type nfs4_xdev_fs_type = {
- .owner = THIS_MODULE,
- .name = "nfs4",
- .mount = nfs4_xdev_mount,
- .kill_sb = nfs4_kill_super,
+ .mount = nfs_fs_mount,
+ .kill_sb = nfs_kill_super,
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
+EXPORT_SYMBOL_GPL(nfs4_fs_type);
-static struct file_system_type nfs4_remote_referral_fs_type = {
- .owner = THIS_MODULE,
- .name = "nfs4",
- .mount = nfs4_remote_referral_mount,
- .kill_sb = nfs4_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
-};
+static int __init register_nfs4_fs(void)
+{
+ return register_filesystem(&nfs4_fs_type);
+}
-struct file_system_type nfs4_referral_fs_type = {
- .owner = THIS_MODULE,
- .name = "nfs4",
- .mount = nfs4_referral_mount,
- .kill_sb = nfs4_kill_super,
- .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
-};
+static void unregister_nfs4_fs(void)
+{
+ unregister_filesystem(&nfs4_fs_type);
+}
+#else
+static int __init register_nfs4_fs(void)
+{
+ return 0;
+}
-static const struct super_operations nfs4_sops = {
- .alloc_inode = nfs_alloc_inode,
- .destroy_inode = nfs_destroy_inode,
- .write_inode = nfs_write_inode,
- .put_super = nfs_put_super,
- .statfs = nfs_statfs,
- .evict_inode = nfs4_evict_inode,
- .umount_begin = nfs_umount_begin,
- .show_options = nfs_show_options,
- .show_devname = nfs_show_devname,
- .show_path = nfs_show_path,
- .show_stats = nfs_show_stats,
- .remount_fs = nfs_remount,
-};
+static void unregister_nfs4_fs(void)
+{
+}
#endif
static struct shrinker acl_shrinker = {
@@ -421,21 +365,18 @@ int __init register_nfs_fs(void)
if (ret < 0)
goto error_0;
- ret = nfs_register_sysctl();
+ ret = register_nfs4_fs();
if (ret < 0)
goto error_1;
-#ifdef CONFIG_NFS_V4
- ret = register_filesystem(&nfs4_fs_type);
+
+ ret = nfs_register_sysctl();
if (ret < 0)
goto error_2;
-#endif
register_shrinker(&acl_shrinker);
return 0;
-#ifdef CONFIG_NFS_V4
error_2:
- nfs_unregister_sysctl();
-#endif
+ unregister_nfs4_fs();
error_1:
unregister_filesystem(&nfs_fs_type);
error_0:
@@ -448,10 +389,8 @@ error_0:
void __exit unregister_nfs_fs(void)
{
unregister_shrinker(&acl_shrinker);
-#ifdef CONFIG_NFS_V4
- unregister_filesystem(&nfs4_fs_type);
-#endif
nfs_unregister_sysctl();
+ unregister_nfs4_fs();
unregister_filesystem(&nfs_fs_type);
}
@@ -462,6 +401,7 @@ void nfs_sb_active(struct super_block *sb)
if (atomic_inc_return(&server->active) == 1)
atomic_inc(&sb->s_active);
}
+EXPORT_SYMBOL_GPL(nfs_sb_active);
void nfs_sb_deactive(struct super_block *sb)
{
@@ -470,11 +410,12 @@ void nfs_sb_deactive(struct super_block *sb)
if (atomic_dec_and_test(&server->active))
deactivate_super(sb);
}
+EXPORT_SYMBOL_GPL(nfs_sb_deactive);
/*
* Deliver file system statistics to userspace
*/
-static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct nfs_server *server = NFS_SB(dentry->d_sb);
unsigned char blockbits;
@@ -535,6 +476,7 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
dprintk("%s: statfs error = %d\n", __func__, -error);
return error;
}
+EXPORT_SYMBOL_GPL(nfs_statfs);
/*
* Map the security flavour number to a name
@@ -640,7 +582,7 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
nfs_show_mountd_netid(m, nfss, showdefaults);
}
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss,
int showdefaults)
{
@@ -757,7 +699,7 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
/*
* Describe the mount options on this VFS mountpoint
*/
-static int nfs_show_options(struct seq_file *m, struct dentry *root)
+int nfs_show_options(struct seq_file *m, struct dentry *root)
{
struct nfs_server *nfss = NFS_SB(root->d_sb);
@@ -771,8 +713,9 @@ static int nfs_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_show_options);
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
#ifdef CONFIG_NFS_V4_1
static void show_sessions(struct seq_file *m, struct nfs_server *server)
{
@@ -805,7 +748,7 @@ static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss)
}
}
#else
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
static void show_pnfs(struct seq_file *m, struct nfs_server *server)
{
}
@@ -815,7 +758,7 @@ static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss)
}
#endif
-static int nfs_show_devname(struct seq_file *m, struct dentry *root)
+int nfs_show_devname(struct seq_file *m, struct dentry *root)
{
char *page = (char *) __get_free_page(GFP_KERNEL);
char *devname, *dummy;
@@ -830,17 +773,19 @@ static int nfs_show_devname(struct seq_file *m, struct dentry *root)
free_page((unsigned long)page);
return err;
}
+EXPORT_SYMBOL_GPL(nfs_show_devname);
-static int nfs_show_path(struct seq_file *m, struct dentry *dentry)
+int nfs_show_path(struct seq_file *m, struct dentry *dentry)
{
seq_puts(m, "/");
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_show_path);
/*
* Present statistical information for this VFS mountpoint
*/
-static int nfs_show_stats(struct seq_file *m, struct dentry *root)
+int nfs_show_stats(struct seq_file *m, struct dentry *root)
{
int i, cpu;
struct nfs_server *nfss = NFS_SB(root->d_sb);
@@ -870,7 +815,7 @@ static int nfs_show_stats(struct seq_file *m, struct dentry *root)
seq_printf(m, ",bsize=%u", nfss->bsize);
seq_printf(m, ",namlen=%u", nfss->namelen);
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
if (nfss->nfs_client->rpc_ops->version == 4) {
seq_printf(m, "\n\tnfsv4:\t");
seq_printf(m, "bm0=0x%x", nfss->attr_bitmask[0]);
@@ -928,12 +873,13 @@ static int nfs_show_stats(struct seq_file *m, struct dentry *root)
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_show_stats);
/*
* Begin unmount by attempting to remove all automounted mountpoints we added
* in response to xdev traversals and referrals
*/
-static void nfs_umount_begin(struct super_block *sb)
+void nfs_umount_begin(struct super_block *sb)
{
struct nfs_server *server;
struct rpc_clnt *rpc;
@@ -947,6 +893,7 @@ static void nfs_umount_begin(struct super_block *sb)
if (!IS_ERR(rpc))
rpc_killall_tasks(rpc);
}
+EXPORT_SYMBOL_GPL(nfs_umount_begin);
static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
{
@@ -1748,8 +1695,9 @@ static int nfs_request_mount(struct nfs_parsed_mount_data *args,
return nfs_walk_authlist(args, &request);
}
-static struct dentry *nfs_try_mount(int flags, const char *dev_name,
- struct nfs_mount_info *mount_info)
+struct dentry *nfs_try_mount(int flags, const char *dev_name,
+ struct nfs_mount_info *mount_info,
+ struct nfs_subversion *nfs_mod)
{
int status;
struct nfs_server *server;
@@ -1761,12 +1709,13 @@ static struct dentry *nfs_try_mount(int flags, const char *dev_name,
}
/* Get a volume representation */
- server = nfs_create_server(mount_info->parsed, mount_info->mntfh);
+ server = nfs_mod->rpc_ops->create_server(mount_info, nfs_mod);
if (IS_ERR(server))
return ERR_CAST(server);
- return nfs_fs_mount_common(&nfs_fs_type, server, flags, dev_name, mount_info);
+ return nfs_fs_mount_common(server, flags, dev_name, mount_info, nfs_mod);
}
+EXPORT_SYMBOL_GPL(nfs_try_mount);
/*
* Split "dev_name" into "hostname:export_path".
@@ -1970,7 +1919,7 @@ static int nfs23_validate_mount_data(void *options,
return NFS_TEXT_DATA;
}
-#ifndef CONFIG_NFS_V3
+#if !IS_ENABLED(CONFIG_NFS_V3)
if (args->version == 3)
goto out_v3_not_compiled;
#endif /* !CONFIG_NFS_V3 */
@@ -1990,7 +1939,7 @@ out_no_sec:
dfprintk(MOUNT, "NFS: nfs_mount_data version supports only AUTH_SYS\n");
return -EINVAL;
-#ifndef CONFIG_NFS_V3
+#if !IS_ENABLED(CONFIG_NFS_V3)
out_v3_not_compiled:
dfprintk(MOUNT, "NFS: NFSv3 is not compiled into kernel\n");
return -EPROTONOSUPPORT;
@@ -2009,7 +1958,7 @@ out_invalid_fh:
return -EINVAL;
}
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
static int nfs_validate_mount_data(struct file_system_type *fs_type,
void *options,
struct nfs_parsed_mount_data *args,
@@ -2047,7 +1996,7 @@ static int nfs_validate_text_mount_data(void *options,
goto out_no_address;
if (args->version == 4) {
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
port = NFS_PORT;
max_namelen = NFS4_MAXNAMLEN;
max_pathlen = NFS4_MAXPATHLEN;
@@ -2070,7 +2019,7 @@ static int nfs_validate_text_mount_data(void *options,
&args->nfs_server.export_path,
max_pathlen);
-#ifndef CONFIG_NFS_V4
+#if !IS_ENABLED(CONFIG_NFS_V4)
out_v4_not_compiled:
dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
return -EPROTONOSUPPORT;
@@ -2108,7 +2057,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
return 0;
}
-static int
+int
nfs_remount(struct super_block *sb, int *flags, char *raw_data)
{
int error;
@@ -2169,11 +2118,12 @@ out:
kfree(data);
return error;
}
+EXPORT_SYMBOL_GPL(nfs_remount);
/*
* Initialise the common bits of the superblock
*/
-static inline void nfs_initialise_sb(struct super_block *sb)
+inline void nfs_initialise_sb(struct super_block *sb)
{
struct nfs_server *server = NFS_SB(sb);
@@ -2195,18 +2145,19 @@ static inline void nfs_initialise_sb(struct super_block *sb)
/*
* Finish setting up an NFS2/3 superblock
*/
-static void nfs_fill_super(struct super_block *sb,
- struct nfs_mount_info *mount_info)
+void nfs_fill_super(struct super_block *sb, struct nfs_mount_info *mount_info)
{
struct nfs_parsed_mount_data *data = mount_info->parsed;
struct nfs_server *server = NFS_SB(sb);
sb->s_blocksize_bits = 0;
sb->s_blocksize = 0;
- if (data->bsize)
+ sb->s_xattr = server->nfs_client->cl_nfs_mod->xattr;
+ sb->s_op = server->nfs_client->cl_nfs_mod->sops;
+ if (data && data->bsize)
sb->s_blocksize = nfs_block_size(data->bsize, &sb->s_blocksize_bits);
- if (server->nfs_client->rpc_ops->version == 3) {
+ if (server->nfs_client->rpc_ops->version != 2) {
/* The VFS shouldn't apply the umask to mode bits. We will do
* so ourselves when necessary.
*/
@@ -2214,15 +2165,14 @@ static void nfs_fill_super(struct super_block *sb,
sb->s_time_gran = 1;
}
- sb->s_op = &nfs_sops;
nfs_initialise_sb(sb);
}
+EXPORT_SYMBOL_GPL(nfs_fill_super);
/*
- * Finish setting up a cloned NFS2/3 superblock
+ * Finish setting up a cloned NFS2/3/4 superblock
*/
-static void nfs_clone_super(struct super_block *sb,
- struct nfs_mount_info *mount_info)
+void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info)
{
const struct super_block *old_sb = mount_info->cloned->sb;
struct nfs_server *server = NFS_SB(sb);
@@ -2230,16 +2180,17 @@ static void nfs_clone_super(struct super_block *sb,
sb->s_blocksize_bits = old_sb->s_blocksize_bits;
sb->s_blocksize = old_sb->s_blocksize;
sb->s_maxbytes = old_sb->s_maxbytes;
+ sb->s_xattr = old_sb->s_xattr;
+ sb->s_op = old_sb->s_op;
+ sb->s_time_gran = 1;
- if (server->nfs_client->rpc_ops->version == 3) {
+ if (server->nfs_client->rpc_ops->version != 2) {
/* The VFS shouldn't apply the umask to mode bits. We will do
* so ourselves when necessary.
*/
sb->s_flags |= MS_POSIXACL;
- sb->s_time_gran = 1;
}
- sb->s_op = old_sb->s_op;
nfs_initialise_sb(sb);
}
@@ -2381,14 +2332,15 @@ static int nfs_bdi_register(struct nfs_server *server)
return bdi_register_dev(&server->backing_dev_info, server->s_dev);
}
-static int nfs_set_sb_security(struct super_block *s, struct dentry *mntroot,
- struct nfs_mount_info *mount_info)
+int nfs_set_sb_security(struct super_block *s, struct dentry *mntroot,
+ struct nfs_mount_info *mount_info)
{
return security_sb_set_mnt_opts(s, &mount_info->parsed->lsm_opts);
}
+EXPORT_SYMBOL_GPL(nfs_set_sb_security);
-static int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
- struct nfs_mount_info *mount_info)
+int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
+ struct nfs_mount_info *mount_info)
{
/* clone any lsm security options from the parent to the new sb */
security_sb_clone_mnt_opts(mount_info->cloned->sb, s);
@@ -2396,11 +2348,12 @@ static int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
return -ESTALE;
return 0;
}
+EXPORT_SYMBOL_GPL(nfs_clone_sb_security);
-static struct dentry *nfs_fs_mount_common(struct file_system_type *fs_type,
- struct nfs_server *server,
- int flags, const char *dev_name,
- struct nfs_mount_info *mount_info)
+struct dentry *nfs_fs_mount_common(struct nfs_server *server,
+ int flags, const char *dev_name,
+ struct nfs_mount_info *mount_info,
+ struct nfs_subversion *nfs_mod)
{
struct super_block *s;
struct dentry *mntroot = ERR_PTR(-ENOMEM);
@@ -2419,7 +2372,7 @@ static struct dentry *nfs_fs_mount_common(struct file_system_type *fs_type,
sb_mntdata.mntflags |= MS_SYNCHRONOUS;
/* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(fs_type, compare_super, nfs_set_super, flags, &sb_mntdata);
+ s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
if (IS_ERR(s)) {
mntroot = ERR_CAST(s);
goto out_err_nosb;
@@ -2469,8 +2422,9 @@ error_splat_bdi:
deactivate_locked_super(s);
goto out;
}
+EXPORT_SYMBOL_GPL(nfs_fs_mount_common);
-static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data)
{
struct nfs_mount_info mount_info = {
@@ -2478,6 +2432,7 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
.set_security = nfs_set_sb_security,
};
struct dentry *mntroot = ERR_PTR(-ENOMEM);
+ struct nfs_subversion *nfs_mod;
int error;
mount_info.parsed = nfs_alloc_parsed_mount_data();
@@ -2494,34 +2449,38 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
goto out;
}
-#ifdef CONFIG_NFS_V4
- if (mount_info.parsed->version == 4)
- mntroot = nfs4_try_mount(flags, dev_name, &mount_info);
- else
-#endif /* CONFIG_NFS_V4 */
- mntroot = nfs_try_mount(flags, dev_name, &mount_info);
+ nfs_mod = get_nfs_version(mount_info.parsed->version);
+ if (IS_ERR(nfs_mod)) {
+ mntroot = ERR_CAST(nfs_mod);
+ goto out;
+ }
+
+ mntroot = nfs_mod->rpc_ops->try_mount(flags, dev_name, &mount_info, nfs_mod);
+ put_nfs_version(nfs_mod);
out:
nfs_free_parsed_mount_data(mount_info.parsed);
nfs_free_fhandle(mount_info.mntfh);
return mntroot;
}
+EXPORT_SYMBOL_GPL(nfs_fs_mount);
/*
* Ensure that we unregister the bdi before kill_anon_super
* releases the device name
*/
-static void nfs_put_super(struct super_block *s)
+void nfs_put_super(struct super_block *s)
{
struct nfs_server *server = NFS_SB(s);
bdi_unregister(&server->backing_dev_info);
}
+EXPORT_SYMBOL_GPL(nfs_put_super);
/*
* Destroy an NFS2/3 superblock
*/
-static void nfs_kill_super(struct super_block *s)
+void nfs_kill_super(struct super_block *s)
{
struct nfs_server *server = NFS_SB(s);
@@ -2529,31 +2488,38 @@ static void nfs_kill_super(struct super_block *s)
nfs_fscache_release_super_cookie(s);
nfs_free_server(server);
}
+EXPORT_SYMBOL_GPL(nfs_kill_super);
/*
* Clone an NFS2/3/4 server record on xdev traversal (FSID-change)
*/
-static struct dentry *
-nfs_xdev_mount_common(struct file_system_type *fs_type, int flags,
- const char *dev_name, struct nfs_mount_info *mount_info)
+struct dentry *
+nfs_xdev_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *raw_data)
{
- struct nfs_clone_mount *data = mount_info->cloned;
+ struct nfs_clone_mount *data = raw_data;
+ struct nfs_mount_info mount_info = {
+ .fill_super = nfs_clone_super,
+ .set_security = nfs_clone_sb_security,
+ .cloned = data,
+ };
struct nfs_server *server;
struct dentry *mntroot = ERR_PTR(-ENOMEM);
+ struct nfs_subversion *nfs_mod = NFS_SB(data->sb)->nfs_client->cl_nfs_mod;
int error;
dprintk("--> nfs_xdev_mount_common()\n");
- mount_info->mntfh = data->fh;
+ mount_info.mntfh = mount_info.cloned->fh;
/* create a new volume representation */
- server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
+ server = nfs_mod->rpc_ops->clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
if (IS_ERR(server)) {
error = PTR_ERR(server);
goto out_err;
}
- mntroot = nfs_fs_mount_common(fs_type, server, flags, dev_name, mount_info);
+ mntroot = nfs_fs_mount_common(server, flags, dev_name, &mount_info, nfs_mod);
dprintk("<-- nfs_xdev_mount_common() = 0\n");
out:
return mntroot;
@@ -2563,60 +2529,7 @@ out_err:
goto out;
}
-/*
- * Clone an NFS2/3 server record on xdev traversal (FSID-change)
- */
-static struct dentry *
-nfs_xdev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data)
-{
- struct nfs_mount_info mount_info = {
- .fill_super = nfs_clone_super,
- .set_security = nfs_clone_sb_security,
- .cloned = raw_data,
- };
- return nfs_xdev_mount_common(&nfs_fs_type, flags, dev_name, &mount_info);
-}
-
-#ifdef CONFIG_NFS_V4
-
-/*
- * Finish setting up a cloned NFS4 superblock
- */
-static void nfs4_clone_super(struct super_block *sb,
- struct nfs_mount_info *mount_info)
-{
- const struct super_block *old_sb = mount_info->cloned->sb;
- sb->s_blocksize_bits = old_sb->s_blocksize_bits;
- sb->s_blocksize = old_sb->s_blocksize;
- sb->s_maxbytes = old_sb->s_maxbytes;
- sb->s_time_gran = 1;
- sb->s_op = old_sb->s_op;
- /*
- * The VFS shouldn't apply the umask to mode bits. We will do
- * so ourselves when necessary.
- */
- sb->s_flags |= MS_POSIXACL;
- sb->s_xattr = old_sb->s_xattr;
- nfs_initialise_sb(sb);
-}
-
-/*
- * Set up an NFS4 superblock
- */
-static void nfs4_fill_super(struct super_block *sb,
- struct nfs_mount_info *mount_info)
-{
- sb->s_time_gran = 1;
- sb->s_op = &nfs4_sops;
- /*
- * The VFS shouldn't apply the umask to mode bits. We will do
- * so ourselves when necessary.
- */
- sb->s_flags |= MS_POSIXACL;
- sb->s_xattr = nfs4_xattr_handlers;
- nfs_initialise_sb(sb);
-}
+#if IS_ENABLED(CONFIG_NFS_V4)
static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
{
@@ -2716,249 +2629,57 @@ out_no_address:
}
/*
- * Get the superblock for the NFS4 root partition
+ * NFS v4 module parameters need to stay in the
+ * NFS client for backwards compatibility
*/
-static struct dentry *
-nfs4_remote_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *info)
-{
- struct nfs_mount_info *mount_info = info;
- struct nfs_server *server;
- struct dentry *mntroot = ERR_PTR(-ENOMEM);
-
- mount_info->fill_super = nfs4_fill_super;
- mount_info->set_security = nfs_set_sb_security;
-
- /* Get a volume representation */
- server = nfs4_create_server(mount_info->parsed, mount_info->mntfh);
- if (IS_ERR(server)) {
- mntroot = ERR_CAST(server);
- goto out;
- }
-
- mntroot = nfs_fs_mount_common(fs_type, server, flags, dev_name, mount_info);
-
-out:
- return mntroot;
-}
-
-static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
- int flags, void *data, const char *hostname)
-{
- struct vfsmount *root_mnt;
- char *root_devname;
- size_t len;
+unsigned int nfs_callback_set_tcpport;
+unsigned short nfs_callback_tcpport;
+/* Default cache timeout is 10 minutes */
+unsigned int nfs_idmap_cache_timeout = 600;
+/* Turn off NFSv4 uid/gid mapping when using AUTH_SYS */
+bool nfs4_disable_idmapping = true;
+unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
+unsigned short send_implementation_id = 1;
+
+EXPORT_SYMBOL_GPL(nfs_callback_set_tcpport);
+EXPORT_SYMBOL_GPL(nfs_callback_tcpport);
+EXPORT_SYMBOL_GPL(nfs_idmap_cache_timeout);
+EXPORT_SYMBOL_GPL(nfs4_disable_idmapping);
+EXPORT_SYMBOL_GPL(max_session_slots);
+EXPORT_SYMBOL_GPL(send_implementation_id);
+
+#define NFS_CALLBACK_MAXPORTNR (65535U)
+
+static int param_set_portnr(const char *val, const struct kernel_param *kp)
+{
+ unsigned long num;
+ int ret;
- len = strlen(hostname) + 5;
- root_devname = kmalloc(len, GFP_KERNEL);
- if (root_devname == NULL)
- return ERR_PTR(-ENOMEM);
- /* Does hostname needs to be enclosed in brackets? */
- if (strchr(hostname, ':'))
- snprintf(root_devname, len, "[%s]:/", hostname);
- else
- snprintf(root_devname, len, "%s:/", hostname);
- root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
- kfree(root_devname);
- return root_mnt;
+ if (!val)
+ return -EINVAL;
+ ret = strict_strtoul(val, 0, &num);
+ if (ret == -EINVAL || num > NFS_CALLBACK_MAXPORTNR)
+ return -EINVAL;
+ *((unsigned int *)kp->arg) = num;
+ return 0;
}
-
-struct nfs_referral_count {
- struct list_head list;
- const struct task_struct *task;
- unsigned int referral_count;
+static struct kernel_param_ops param_ops_portnr = {
+ .set = param_set_portnr,
+ .get = param_get_uint,
};
-
-static LIST_HEAD(nfs_referral_count_list);
-static DEFINE_SPINLOCK(nfs_referral_count_list_lock);
-
-static struct nfs_referral_count *nfs_find_referral_count(void)
-{
- struct nfs_referral_count *p;
-
- list_for_each_entry(p, &nfs_referral_count_list, list) {
- if (p->task == current)
- return p;
- }
- return NULL;
-}
-
-#define NFS_MAX_NESTED_REFERRALS 2
-
-static int nfs_referral_loop_protect(void)
-{
- struct nfs_referral_count *p, *new;
- int ret = -ENOMEM;
-
- new = kmalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
- goto out;
- new->task = current;
- new->referral_count = 1;
-
- ret = 0;
- spin_lock(&nfs_referral_count_list_lock);
- p = nfs_find_referral_count();
- if (p != NULL) {
- if (p->referral_count >= NFS_MAX_NESTED_REFERRALS)
- ret = -ELOOP;
- else
- p->referral_count++;
- } else {
- list_add(&new->list, &nfs_referral_count_list);
- new = NULL;
- }
- spin_unlock(&nfs_referral_count_list_lock);
- kfree(new);
-out:
- return ret;
-}
-
-static void nfs_referral_loop_unprotect(void)
-{
- struct nfs_referral_count *p;
-
- spin_lock(&nfs_referral_count_list_lock);
- p = nfs_find_referral_count();
- p->referral_count--;
- if (p->referral_count == 0)
- list_del(&p->list);
- else
- p = NULL;
- spin_unlock(&nfs_referral_count_list_lock);
- kfree(p);
-}
-
-static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
- const char *export_path)
-{
- struct dentry *dentry;
- int err;
-
- if (IS_ERR(root_mnt))
- return ERR_CAST(root_mnt);
-
- err = nfs_referral_loop_protect();
- if (err) {
- mntput(root_mnt);
- return ERR_PTR(err);
- }
-
- dentry = mount_subtree(root_mnt, export_path);
- nfs_referral_loop_unprotect();
-
- return dentry;
-}
-
-static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
- struct nfs_mount_info *mount_info)
-{
- char *export_path;
- struct vfsmount *root_mnt;
- struct dentry *res;
- struct nfs_parsed_mount_data *data = mount_info->parsed;
-
- dfprintk(MOUNT, "--> nfs4_try_mount()\n");
-
- mount_info->fill_super = nfs4_fill_super;
-
- export_path = data->nfs_server.export_path;
- data->nfs_server.export_path = "/";
- root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
- data->nfs_server.hostname);
- data->nfs_server.export_path = export_path;
-
- res = nfs_follow_remote_path(root_mnt, export_path);
-
- dfprintk(MOUNT, "<-- nfs4_try_mount() = %ld%s\n",
- IS_ERR(res) ? PTR_ERR(res) : 0,
- IS_ERR(res) ? " [error]" : "");
- return res;
-}
-
-static void nfs4_kill_super(struct super_block *sb)
-{
- struct nfs_server *server = NFS_SB(sb);
-
- dprintk("--> %s\n", __func__);
- nfs_super_return_all_delegations(sb);
- kill_anon_super(sb);
- nfs_fscache_release_super_cookie(sb);
- nfs_free_server(server);
- dprintk("<-- %s\n", __func__);
-}
-
-/*
- * Clone an NFS4 server record on xdev traversal (FSID-change)
- */
-static struct dentry *
-nfs4_xdev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data)
-{
- struct nfs_mount_info mount_info = {
- .fill_super = nfs4_clone_super,
- .set_security = nfs_clone_sb_security,
- .cloned = raw_data,
- };
- return nfs_xdev_mount_common(&nfs4_fs_type, flags, dev_name, &mount_info);
-}
-
-static struct dentry *
-nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data)
-{
- struct nfs_mount_info mount_info = {
- .fill_super = nfs4_fill_super,
- .set_security = nfs_clone_sb_security,
- .cloned = raw_data,
- };
- struct nfs_server *server;
- struct dentry *mntroot = ERR_PTR(-ENOMEM);
-
- dprintk("--> nfs4_referral_get_sb()\n");
-
- mount_info.mntfh = nfs_alloc_fhandle();
- if (mount_info.cloned == NULL || mount_info.mntfh == NULL)
- goto out;
-
- /* create a new volume representation */
- server = nfs4_create_referral_server(mount_info.cloned, mount_info.mntfh);
- if (IS_ERR(server)) {
- mntroot = ERR_CAST(server);
- goto out;
- }
-
- mntroot = nfs_fs_mount_common(&nfs4_fs_type, server, flags, dev_name, &mount_info);
-out:
- nfs_free_fhandle(mount_info.mntfh);
- return mntroot;
-}
-
-/*
- * Create an NFS4 server record on referral traversal
- */
-static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data)
-{
- struct nfs_clone_mount *data = raw_data;
- char *export_path;
- struct vfsmount *root_mnt;
- struct dentry *res;
-
- dprintk("--> nfs4_referral_mount()\n");
-
- export_path = data->mnt_path;
- data->mnt_path = "/";
-
- root_mnt = nfs_do_root_mount(&nfs4_remote_referral_fs_type,
- flags, data, data->hostname);
- data->mnt_path = export_path;
-
- res = nfs_follow_remote_path(root_mnt, export_path);
- dprintk("<-- nfs4_referral_mount() = %ld%s\n",
- IS_ERR(res) ? PTR_ERR(res) : 0,
- IS_ERR(res) ? " [error]" : "");
- return res;
-}
+#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
+
+module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
+module_param(nfs_idmap_cache_timeout, int, 0644);
+module_param(nfs4_disable_idmapping, bool, 0644);
+MODULE_PARM_DESC(nfs4_disable_idmapping,
+ "Turn off NFSv4 idmapping when using 'sec=sys'");
+module_param(max_session_slots, ushort, 0644);
+MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
+ "requests the client will negotiate");
+module_param(send_implementation_id, ushort, 0644);
+MODULE_PARM_DESC(send_implementation_id,
+ "Send implementation ID with NFSv4.1 exchange_id");
+MODULE_ALIAS("nfs4");
#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index ad4d2e787b20..6b3f2535a3ec 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -9,37 +9,11 @@
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <linux/module.h>
-#include <linux/nfs4.h>
-#include <linux/nfs_idmap.h>
#include <linux/nfs_fs.h>
-#include "callback.h"
-
-#ifdef CONFIG_NFS_V4
-static const int nfs_set_port_min = 0;
-static const int nfs_set_port_max = 65535;
-#endif
static struct ctl_table_header *nfs_callback_sysctl_table;
static ctl_table nfs_cb_sysctls[] = {
-#ifdef CONFIG_NFS_V4
- {
- .procname = "nfs_callback_tcpport",
- .data = &nfs_callback_set_tcpport,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = (int *)&nfs_set_port_min,
- .extra2 = (int *)&nfs_set_port_max,
- },
- {
- .procname = "idmap_cache_timeout",
- .data = &nfs_idmap_cache_timeout,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
-#endif
{
.procname = "nfs_mountpoint_timeout",
.data = &nfs_mountpoint_expiry_timeout,
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 3210a03342f9..13cea637eff8 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -501,7 +501,7 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
(unsigned long long)NFS_FILEID(dentry->d_inode));
/* Return delegation in anticipation of the rename */
- nfs_inode_return_delegation(dentry->d_inode);
+ NFS_PROTO(dentry->d_inode)->return_delegation(dentry->d_inode);
sdentry = NULL;
do {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 4d6861c0dc14..e3b55372726c 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -52,7 +52,7 @@ static mempool_t *nfs_commit_mempool;
struct nfs_commit_data *nfs_commitdata_alloc(void)
{
- struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
+ struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO);
if (p) {
memset(p, 0, sizeof(*p));
@@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(nfs_commit_free);
struct nfs_write_header *nfs_writehdr_alloc(void)
{
- struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
+ struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO);
if (p) {
struct nfs_pgio_header *hdr = &p->header;
@@ -84,6 +84,7 @@ struct nfs_write_header *nfs_writehdr_alloc(void)
}
return p;
}
+EXPORT_SYMBOL_GPL(nfs_writehdr_alloc);
static struct nfs_write_data *nfs_writedata_alloc(struct nfs_pgio_header *hdr,
unsigned int pagecount)
@@ -115,6 +116,7 @@ void nfs_writehdr_free(struct nfs_pgio_header *hdr)
struct nfs_write_header *whdr = container_of(hdr, struct nfs_write_header, header);
mempool_free(whdr, nfs_wdata_mempool);
}
+EXPORT_SYMBOL_GPL(nfs_writehdr_free);
void nfs_writedata_release(struct nfs_write_data *wdata)
{
@@ -131,6 +133,7 @@ void nfs_writedata_release(struct nfs_write_data *wdata)
if (atomic_dec_and_test(&hdr->refcnt))
hdr->completion_ops->completion(hdr);
}
+EXPORT_SYMBOL_GPL(nfs_writedata_release);
static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
{
@@ -139,25 +142,38 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
}
-static struct nfs_page *nfs_page_find_request_locked(struct page *page)
+static struct nfs_page *
+nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
{
struct nfs_page *req = NULL;
- if (PagePrivate(page)) {
+ if (PagePrivate(page))
req = (struct nfs_page *)page_private(page);
- if (req != NULL)
- kref_get(&req->wb_kref);
+ else if (unlikely(PageSwapCache(page))) {
+ struct nfs_page *freq, *t;
+
+ /* Linearly search the commit list for the correct req */
+ list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
+ if (freq->wb_page == page) {
+ req = freq;
+ break;
+ }
+ }
}
+
+ if (req)
+ kref_get(&req->wb_kref);
+
return req;
}
static struct nfs_page *nfs_page_find_request(struct page *page)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_page *req = NULL;
spin_lock(&inode->i_lock);
- req = nfs_page_find_request_locked(page);
+ req = nfs_page_find_request_locked(NFS_I(inode), page);
spin_unlock(&inode->i_lock);
return req;
}
@@ -165,16 +181,16 @@ static struct nfs_page *nfs_page_find_request(struct page *page)
/* Adjust the file length if we're writing beyond the end */
static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
loff_t end, i_size;
pgoff_t end_index;
spin_lock(&inode->i_lock);
i_size = i_size_read(inode);
end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
- if (i_size > 0 && page->index < end_index)
+ if (i_size > 0 && page_file_index(page) < end_index)
goto out;
- end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
+ end = page_file_offset(page) + ((loff_t)offset+count);
if (i_size >= end)
goto out;
i_size_write(inode, end);
@@ -187,7 +203,7 @@ out:
static void nfs_set_pageerror(struct page *page)
{
SetPageError(page);
- nfs_zap_mapping(page->mapping->host, page->mapping);
+ nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page));
}
/* We can set the PG_uptodate flag if we see that a write request
@@ -228,7 +244,7 @@ static int nfs_set_page_writeback(struct page *page)
int ret = test_set_page_writeback(page);
if (!ret) {
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_server *nfss = NFS_SERVER(inode);
if (atomic_long_inc_return(&nfss->writeback) >
@@ -242,7 +258,7 @@ static int nfs_set_page_writeback(struct page *page)
static void nfs_end_page_writeback(struct page *page)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_server *nfss = NFS_SERVER(inode);
end_page_writeback(page);
@@ -252,13 +268,13 @@ static void nfs_end_page_writeback(struct page *page)
static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_page *req;
int ret;
spin_lock(&inode->i_lock);
for (;;) {
- req = nfs_page_find_request_locked(page);
+ req = nfs_page_find_request_locked(NFS_I(inode), page);
if (req == NULL)
break;
if (nfs_lock_request(req))
@@ -313,13 +329,13 @@ out:
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
int ret;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
- nfs_pageio_cond_complete(pgio, page->index);
+ nfs_pageio_cond_complete(pgio, page_file_index(page));
ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
if (ret == -EAGAIN) {
redirty_page_for_writepage(wbc, page);
@@ -336,8 +352,10 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
struct nfs_pageio_descriptor pgio;
int err;
- nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
- &nfs_async_write_completion_ops);
+ NFS_PROTO(page_file_mapping(page)->host)->write_pageio_init(&pgio,
+ page->mapping->host,
+ wb_priority(wbc),
+ &nfs_async_write_completion_ops);
err = nfs_do_writepage(page, wbc, &pgio);
nfs_pageio_complete(&pgio);
if (err < 0)
@@ -380,8 +398,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
- nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
- &nfs_async_write_completion_ops);
+ NFS_PROTO(inode)->write_pageio_init(&pgio, inode, wb_priority(wbc), &nfs_async_write_completion_ops);
err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
nfs_pageio_complete(&pgio);
@@ -410,11 +427,17 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
nfs_lock_request(req);
spin_lock(&inode->i_lock);
- if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
+ if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
inode->i_version++;
- set_bit(PG_MAPPED, &req->wb_flags);
- SetPagePrivate(req->wb_page);
- set_page_private(req->wb_page, (unsigned long)req);
+ /*
+ * Swap-space should not get truncated. Hence no need to plug the race
+ * with invalidate/truncate.
+ */
+ if (likely(!PageSwapCache(req->wb_page))) {
+ set_bit(PG_MAPPED, &req->wb_flags);
+ SetPagePrivate(req->wb_page);
+ set_page_private(req->wb_page, (unsigned long)req);
+ }
nfsi->npages++;
kref_get(&req->wb_kref);
spin_unlock(&inode->i_lock);
@@ -431,9 +454,11 @@ static void nfs_inode_remove_request(struct nfs_page *req)
BUG_ON (!NFS_WBACK_BUSY(req));
spin_lock(&inode->i_lock);
- set_page_private(req->wb_page, 0);
- ClearPagePrivate(req->wb_page);
- clear_bit(PG_MAPPED, &req->wb_flags);
+ if (likely(!PageSwapCache(req->wb_page))) {
+ set_page_private(req->wb_page, 0);
+ ClearPagePrivate(req->wb_page);
+ clear_bit(PG_MAPPED, &req->wb_flags);
+ }
nfsi->npages--;
spin_unlock(&inode->i_lock);
nfs_release_request(req);
@@ -445,7 +470,7 @@ nfs_mark_request_dirty(struct nfs_page *req)
__set_page_dirty_nobuffers(req->wb_page);
}
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
/**
* nfs_request_add_commit_list - add request to a commit list
* @req: pointer to a struct nfs_page
@@ -470,7 +495,7 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
spin_unlock(cinfo->lock);
if (!cinfo->dreq) {
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- inc_bdi_stat(req->wb_page->mapping->backing_dev_info,
+ inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
BDI_RECLAIMABLE);
__mark_inode_dirty(req->wb_context->dentry->d_inode,
I_DIRTY_DATASYNC);
@@ -537,7 +562,7 @@ static void
nfs_clear_page_commit(struct page *page)
{
dec_zone_page_state(page, NR_UNSTABLE_NFS);
- dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
+ dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE);
}
static void
@@ -620,7 +645,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
goto next;
}
if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
- memcpy(&req->wb_verf, hdr->verf, sizeof(req->wb_verf));
+ memcpy(&req->wb_verf, &hdr->verf->verifier, sizeof(req->wb_verf));
nfs_mark_request_commit(req, hdr->lseg, &cinfo);
goto next;
}
@@ -635,7 +660,7 @@ out:
hdr->release(hdr);
}
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
static unsigned long
nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
{
@@ -729,7 +754,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
spin_lock(&inode->i_lock);
for (;;) {
- req = nfs_page_find_request_locked(page);
+ req = nfs_page_find_request_locked(NFS_I(inode), page);
if (req == NULL)
goto out_unlock;
@@ -788,7 +813,7 @@ out_err:
static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
struct page *page, unsigned int offset, unsigned int bytes)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
struct nfs_page *req;
req = nfs_try_to_update_request(inode, page, offset, bytes);
@@ -841,7 +866,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
nfs_release_request(req);
if (!do_flush)
return 0;
- status = nfs_wb_page(page->mapping->host, page);
+ status = nfs_wb_page(page_file_mapping(page)->host, page);
} while (status == 0);
return status;
}
@@ -871,7 +896,7 @@ int nfs_updatepage(struct file *file, struct page *page,
unsigned int offset, unsigned int count)
{
struct nfs_open_context *ctx = nfs_file_open_context(file);
- struct inode *inode = page->mapping->host;
+ struct inode *inode = page_file_mapping(page)->host;
int status = 0;
nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
@@ -879,7 +904,7 @@ int nfs_updatepage(struct file *file, struct page *page,
dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n",
file->f_path.dentry->d_parent->d_name.name,
file->f_path.dentry->d_name.name, count,
- (long long)(page_offset(page) + offset));
+ (long long)(page_file_offset(page) + offset));
/* If we're not using byte range locks, and we know the page
* is up to date, it may be more efficient to extend the write
@@ -1172,6 +1197,7 @@ int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
return nfs_flush_multi(desc, hdr);
return nfs_flush_one(desc, hdr);
}
+EXPORT_SYMBOL_GPL(nfs_generic_flush);
static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
@@ -1202,13 +1228,14 @@ static const struct nfs_pageio_ops nfs_pageio_write_ops = {
.pg_doio = nfs_generic_pg_writepages,
};
-void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
+void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
struct inode *inode, int ioflags,
const struct nfs_pgio_completion_ops *compl_ops)
{
nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops, compl_ops,
NFS_SERVER(inode)->wsize, ioflags);
}
+EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
{
@@ -1217,13 +1244,6 @@ void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
-void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
- struct inode *inode, int ioflags,
- const struct nfs_pgio_completion_ops *compl_ops)
-{
- if (!pnfs_pageio_init_write(pgio, inode, ioflags, compl_ops))
- nfs_pageio_init_write_mds(pgio, inode, ioflags, compl_ops);
-}
void nfs_write_prepare(struct rpc_task *task, void *calldata)
{
@@ -1303,7 +1323,7 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
return;
nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
/* We tried a write call, but the server did not
* commit data to stable storage even though we
@@ -1363,7 +1383,7 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
}
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
{
int ret;
@@ -1475,7 +1495,7 @@ void nfs_retry_commit(struct list_head *page_list,
nfs_mark_request_commit(req, lseg, cinfo);
if (!cinfo->dreq) {
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
+ dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
BDI_RECLAIMABLE);
}
nfs_unlock_and_release_request(req);
@@ -1547,7 +1567,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
/* Okay, COMMIT succeeded, apparently. Check the verifier
* returned by the server against all stored verfs. */
- if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
+ if (!memcmp(&req->wb_verf, &data->verf.verifier, sizeof(req->wb_verf))) {
/* We have a match */
nfs_inode_remove_request(req);
dprintk(" OK\n");
@@ -1677,22 +1697,9 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
- int ret;
-
- ret = nfs_commit_unstable_pages(inode, wbc);
- if (ret >= 0 && test_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags)) {
- int status;
- bool sync = true;
-
- if (wbc->sync_mode == WB_SYNC_NONE)
- sync = false;
-
- status = pnfs_layoutcommit_inode(inode, sync);
- if (status < 0)
- return status;
- }
- return ret;
+ return nfs_commit_unstable_pages(inode, wbc);
}
+EXPORT_SYMBOL_GPL(nfs_write_inode);
/*
* flush the inode to disk.
@@ -1708,6 +1715,7 @@ int nfs_wb_all(struct inode *inode)
return sync_inode(inode, &wbc);
}
+EXPORT_SYMBOL_GPL(nfs_wb_all);
int nfs_wb_page_cancel(struct inode *inode, struct page *page)
{
@@ -1744,7 +1752,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
*/
int nfs_wb_page(struct inode *inode, struct page *page)
{
- loff_t range_start = page_offset(page);
+ loff_t range_start = page_file_offset(page);
loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
@@ -1806,19 +1814,19 @@ int __init nfs_init_writepagecache(void)
nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
nfs_wdata_cachep);
if (nfs_wdata_mempool == NULL)
- return -ENOMEM;
+ goto out_destroy_write_cache;
nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
sizeof(struct nfs_commit_data),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (nfs_cdata_cachep == NULL)
- return -ENOMEM;
+ goto out_destroy_write_mempool;
nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
nfs_wdata_cachep);
if (nfs_commit_mempool == NULL)
- return -ENOMEM;
+ goto out_destroy_commit_cache;
/*
* NFS congestion size, scale with available memory.
@@ -1841,11 +1849,20 @@ int __init nfs_init_writepagecache(void)
nfs_congestion_kb = 256*1024;
return 0;
+
+out_destroy_commit_cache:
+ kmem_cache_destroy(nfs_cdata_cachep);
+out_destroy_write_mempool:
+ mempool_destroy(nfs_wdata_mempool);
+out_destroy_write_cache:
+ kmem_cache_destroy(nfs_wdata_cachep);
+ return -ENOMEM;
}
void nfs_destroy_writepagecache(void)
{
mempool_destroy(nfs_commit_mempool);
+ kmem_cache_destroy(nfs_cdata_cachep);
mempool_destroy(nfs_wdata_mempool);
kmem_cache_destroy(nfs_wdata_cachep);
}
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index ba233499b9a5..a3946cf13fc8 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -398,7 +398,7 @@ fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc)
int migrated, i, err;
/* listsize */
- err = get_int(mesg, &fsloc->locations_count);
+ err = get_uint(mesg, &fsloc->locations_count);
if (err)
return err;
if (fsloc->locations_count > MAX_FS_LOCATIONS)
@@ -456,7 +456,7 @@ static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp)
return -EINVAL;
for (f = exp->ex_flavors; f < exp->ex_flavors + listsize; f++) {
- err = get_int(mesg, &f->pseudoflavor);
+ err = get_uint(mesg, &f->pseudoflavor);
if (err)
return err;
/*
@@ -465,7 +465,7 @@ static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp)
* problem at export time instead of when a client fails
* to authenticate.
*/
- err = get_int(mesg, &f->flags);
+ err = get_uint(mesg, &f->flags);
if (err)
return err;
/* Only some flags are allowed to differ between flavors: */
@@ -929,7 +929,7 @@ struct svc_export *
rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path)
{
struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
- struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct cache_detail *cd = nn->svc_export_cache;
if (rqstp->rq_client == NULL)
@@ -960,7 +960,7 @@ struct svc_export *
rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
{
struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
- struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct cache_detail *cd = nn->svc_export_cache;
if (rqstp->rq_client == NULL)
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 39365636b244..65c2431ea32f 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -34,6 +34,10 @@ struct nfsd_net {
struct cache_detail *idtoname_cache;
struct cache_detail *nametoid_cache;
+
+ struct lock_manager nfsd4_manager;
+ bool grace_ended;
+ time_t boot_time;
};
extern int nfsd_net_id;
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index a5fd6b982f27..4c7bd35b1876 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -651,12 +651,12 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
if (clp->cl_minorversion == 0) {
if (!clp->cl_cred.cr_principal &&
- (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
+ (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
return -EINVAL;
args.client_name = clp->cl_cred.cr_principal;
args.prognumber = conn->cb_prog,
args.protocol = XPRT_TRANSPORT_TCP;
- args.authflavor = clp->cl_flavor;
+ args.authflavor = clp->cl_cred.cr_flavor;
clp->cl_cb_ident = conn->cb_ident;
} else {
if (!conn->cb_xprt)
@@ -756,7 +756,6 @@ static void do_probe_callback(struct nfs4_client *clp)
*/
void nfsd4_probe_callback(struct nfs4_client *clp)
{
- /* XXX: atomicity? Also, should we be using cl_flags? */
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
do_probe_callback(clp);
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index dae36f1dee95..fdc91a6fc9c4 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -546,7 +546,7 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
.type = type,
};
int ret;
- struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
if (namelen + 1 > sizeof(key.name))
return nfserr_badowner;
@@ -571,7 +571,7 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
.type = type,
};
int ret;
- struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 987e719fbae8..c9c1c0a25417 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -354,10 +354,10 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* Openowner is now set, so sequence id will get bumped. Now we need
* these checks before we do any creates: */
status = nfserr_grace;
- if (locks_in_grace() && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
+ if (locks_in_grace(SVC_NET(rqstp)) && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
goto out;
status = nfserr_no_grace;
- if (!locks_in_grace() && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
+ if (!locks_in_grace(SVC_NET(rqstp)) && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
goto out;
switch (open->op_claim_type) {
@@ -686,7 +686,8 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfs4_lock_state();
/* check stateid */
- if ((status = nfs4_preprocess_stateid_op(cstate, &read->rd_stateid,
+ if ((status = nfs4_preprocess_stateid_op(SVC_NET(rqstp),
+ cstate, &read->rd_stateid,
RD_STATE, &read->rd_filp))) {
dprintk("NFSD: nfsd4_read: couldn't process stateid!\n");
goto out;
@@ -741,7 +742,7 @@ nfsd4_remove(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
__be32 status;
- if (locks_in_grace())
+ if (locks_in_grace(SVC_NET(rqstp)))
return nfserr_grace;
status = nfsd_unlink(rqstp, &cstate->current_fh, 0,
remove->rm_name, remove->rm_namelen);
@@ -760,8 +761,8 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (!cstate->save_fh.fh_dentry)
return status;
- if (locks_in_grace() && !(cstate->save_fh.fh_export->ex_flags
- & NFSEXP_NOSUBTREECHECK))
+ if (locks_in_grace(SVC_NET(rqstp)) &&
+ !(cstate->save_fh.fh_export->ex_flags & NFSEXP_NOSUBTREECHECK))
return nfserr_grace;
status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname,
rename->rn_snamelen, &cstate->current_fh,
@@ -845,7 +846,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (setattr->sa_iattr.ia_valid & ATTR_SIZE) {
nfs4_lock_state();
- status = nfs4_preprocess_stateid_op(cstate,
+ status = nfs4_preprocess_stateid_op(SVC_NET(rqstp), cstate,
&setattr->sa_stateid, WR_STATE, NULL);
nfs4_unlock_state();
if (status) {
@@ -890,7 +891,8 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_inval;
nfs4_lock_state();
- status = nfs4_preprocess_stateid_op(cstate, stateid, WR_STATE, &filp);
+ status = nfs4_preprocess_stateid_op(SVC_NET(rqstp),
+ cstate, stateid, WR_STATE, &filp);
if (filp)
get_file(filp);
nfs4_unlock_state();
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 5ff0b7b9fc08..43295d45cc2b 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -154,6 +154,10 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
if (status < 0)
return;
+ status = mnt_want_write_file(rec_file);
+ if (status)
+ return;
+
dir = rec_file->f_path.dentry;
/* lock the parent */
mutex_lock(&dir->d_inode->i_mutex);
@@ -173,11 +177,7 @@ nfsd4_create_clid_dir(struct nfs4_client *clp)
* as well be forgiving and just succeed silently.
*/
goto out_put;
- status = mnt_want_write_file(rec_file);
- if (status)
- goto out_put;
status = vfs_mkdir(dir->d_inode, dentry, S_IRWXU);
- mnt_drop_write_file(rec_file);
out_put:
dput(dentry);
out_unlock:
@@ -189,6 +189,7 @@ out_unlock:
" (err %d); please check that %s exists"
" and is writeable", status,
user_recovery_dirname);
+ mnt_drop_write_file(rec_file);
nfs4_reset_creds(original_cred);
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 94effd5bc4a1..cc894eda385a 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -38,18 +38,21 @@
#include <linux/namei.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
+#include <linux/ratelimit.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/clnt.h>
#include "xdr4.h"
#include "vfs.h"
#include "current_stateid.h"
+#include "fault_inject.h"
+
+#include "netns.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
/* Globals */
time_t nfsd4_lease = 90; /* default lease time */
time_t nfsd4_grace = 90;
-static time_t boot_time;
#define all_ones {{~0,~0},~0}
static const stateid_t one_stateid = {
@@ -862,6 +865,11 @@ static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses,
if (ret)
/* oops; xprt is already down: */
nfsd4_conn_lost(&conn->cn_xpt_user);
+ if (ses->se_client->cl_cb_state == NFSD4_CB_DOWN &&
+ dir & NFS4_CDFC4_BACK) {
+ /* callback channel may be back up */
+ nfsd4_probe_callback(ses->se_client);
+ }
return nfs_ok;
}
@@ -1047,12 +1055,12 @@ renew_client(struct nfs4_client *clp)
/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
static int
-STALE_CLIENTID(clientid_t *clid)
+STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
{
- if (clid->cl_boot == boot_time)
+ if (clid->cl_boot == nn->boot_time)
return 0;
dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
- clid->cl_boot, clid->cl_id, boot_time);
+ clid->cl_boot, clid->cl_id, nn->boot_time);
return 1;
}
@@ -1215,7 +1223,7 @@ static bool groups_equal(struct group_info *g1, struct group_info *g2)
return true;
}
-static int
+static bool
same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
{
if ((cr1->cr_flavor != cr2->cr_flavor)
@@ -1227,14 +1235,15 @@ same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
return true;
if (!cr1->cr_principal || !cr2->cr_principal)
return false;
- return 0 == strcmp(cr1->cr_principal, cr1->cr_principal);
+ return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
}
static void gen_clid(struct nfs4_client *clp)
{
static u32 current_clientid = 1;
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
- clp->cl_clientid.cl_boot = boot_time;
+ clp->cl_clientid.cl_boot = nn->boot_time;
clp->cl_clientid.cl_id = current_clientid++;
}
@@ -2217,8 +2226,9 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
nfs4_verifier confirm = setclientid_confirm->sc_confirm;
clientid_t * clid = &setclientid_confirm->sc_clientid;
__be32 status;
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
- if (STALE_CLIENTID(clid))
+ if (STALE_CLIENTID(clid, nn))
return nfserr_stale_clientid;
nfs4_lock_state();
@@ -2577,8 +2587,9 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
unsigned int strhashval;
struct nfs4_openowner *oo = NULL;
__be32 status;
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
- if (STALE_CLIENTID(&open->op_clientid))
+ if (STALE_CLIENTID(&open->op_clientid, nn))
return nfserr_stale_clientid;
/*
* In case we need it later, after we've already created the
@@ -2876,7 +2887,8 @@ static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
* Attempt to hand out a delegation.
*/
static void
-nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
+nfs4_open_delegation(struct net *net, struct svc_fh *fh,
+ struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
{
struct nfs4_delegation *dp;
struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
@@ -2897,7 +2909,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_
case NFS4_OPEN_CLAIM_NULL:
/* Let's not give out any delegations till everyone's
* had the chance to reclaim theirs.... */
- if (locks_in_grace())
+ if (locks_in_grace(net))
goto out;
if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
goto out;
@@ -3007,14 +3019,12 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
if (status)
goto out;
+ status = nfsd4_truncate(rqstp, current_fh, open);
+ if (status)
+ goto out;
stp = open->op_stp;
open->op_stp = NULL;
init_open_stateid(stp, fp, open);
- status = nfsd4_truncate(rqstp, current_fh, open);
- if (status) {
- release_open_stateid(stp);
- goto out;
- }
}
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
@@ -3033,7 +3043,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
* Attempt to hand out a delegation. No error return, because the
* OPEN succeeds even if we fail.
*/
- nfs4_open_delegation(current_fh, open, stp);
+ nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp);
nodeleg:
status = nfs_ok;
@@ -3087,12 +3097,13 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
struct nfs4_client *clp;
__be32 status;
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
nfs4_lock_state();
dprintk("process_renew(%08x/%08x): starting\n",
clid->cl_boot, clid->cl_id);
status = nfserr_stale_clientid;
- if (STALE_CLIENTID(clid))
+ if (STALE_CLIENTID(clid, nn))
goto out;
clp = find_confirmed_client(clid);
status = nfserr_expired;
@@ -3111,22 +3122,19 @@ out:
return status;
}
-static struct lock_manager nfsd4_manager = {
-};
-
-static bool grace_ended;
-
static void
-nfsd4_end_grace(void)
+nfsd4_end_grace(struct net *net)
{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
/* do nothing if grace period already ended */
- if (grace_ended)
+ if (nn->grace_ended)
return;
dprintk("NFSD: end of grace period\n");
- grace_ended = true;
- nfsd4_record_grace_done(&init_net, boot_time);
- locks_end_grace(&nfsd4_manager);
+ nn->grace_ended = true;
+ nfsd4_record_grace_done(net, nn->boot_time);
+ locks_end_grace(&nn->nfsd4_manager);
/*
* Now that every NFSv4 client has had the chance to recover and
* to see the (possibly new, possibly shorter) lease time, we
@@ -3149,7 +3157,7 @@ nfs4_laundromat(void)
nfs4_lock_state();
dprintk("NFSD: laundromat service - starting\n");
- nfsd4_end_grace();
+ nfsd4_end_grace(&init_net);
INIT_LIST_HEAD(&reaplist);
spin_lock(&client_lock);
list_for_each_safe(pos, next, &client_lru) {
@@ -3231,9 +3239,9 @@ static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *s
}
static int
-STALE_STATEID(stateid_t *stateid)
+STALE_STATEID(stateid_t *stateid, struct nfsd_net *nn)
{
- if (stateid->si_opaque.so_clid.cl_boot == boot_time)
+ if (stateid->si_opaque.so_clid.cl_boot == nn->boot_time)
return 0;
dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
STATEID_VAL(stateid));
@@ -3273,11 +3281,11 @@ out:
}
static inline __be32
-check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
+check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
{
if (ONE_STATEID(stateid) && (flags & RD_STATE))
return nfs_ok;
- else if (locks_in_grace()) {
+ else if (locks_in_grace(net)) {
/* Answer in remaining cases depends on existence of
* conflicting state; so we must wait out the grace period. */
return nfserr_grace;
@@ -3294,9 +3302,9 @@ check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
* that are not able to provide mandatory locking.
*/
static inline int
-grace_disallows_io(struct inode *inode)
+grace_disallows_io(struct net *net, struct inode *inode)
{
- return locks_in_grace() && mandatory_lock(inode);
+ return locks_in_grace(net) && mandatory_lock(inode);
}
/* Returns true iff a is later than b: */
@@ -3333,18 +3341,26 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
return nfserr_old_stateid;
}
-__be32 nfs4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
+static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
{
struct nfs4_stid *s;
struct nfs4_ol_stateid *ols;
__be32 status;
- if (STALE_STATEID(stateid))
- return nfserr_stale_stateid;
-
+ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+ return nfserr_bad_stateid;
+ /* Client debugging aid. */
+ if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
+ char addr_str[INET6_ADDRSTRLEN];
+ rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
+ sizeof(addr_str));
+ pr_warn_ratelimited("NFSD: client %s testing state ID "
+ "with incorrect client ID\n", addr_str);
+ return nfserr_bad_stateid;
+ }
s = find_stateid(cl, stateid);
if (!s)
- return nfserr_stale_stateid;
+ return nfserr_bad_stateid;
status = check_stateid_generation(stateid, &s->sc_stateid, 1);
if (status)
return status;
@@ -3360,10 +3376,11 @@ __be32 nfs4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s)
{
struct nfs4_client *cl;
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
return nfserr_bad_stateid;
- if (STALE_STATEID(stateid))
+ if (STALE_STATEID(stateid, nn))
return nfserr_stale_stateid;
cl = find_confirmed_client(&stateid->si_opaque.so_clid);
if (!cl)
@@ -3379,7 +3396,7 @@ static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, s
* Checks for stateid operations
*/
__be32
-nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
+nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
stateid_t *stateid, int flags, struct file **filpp)
{
struct nfs4_stid *s;
@@ -3392,11 +3409,11 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
if (filpp)
*filpp = NULL;
- if (grace_disallows_io(ino))
+ if (grace_disallows_io(net, ino))
return nfserr_grace;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
- return check_special_stateids(current_fh, stateid, flags);
+ return check_special_stateids(net, current_fh, stateid, flags);
status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s);
if (status)
@@ -3463,7 +3480,8 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfs4_lock_state();
list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
- stateid->ts_id_status = nfs4_validate_stateid(cl, &stateid->ts_id_stateid);
+ stateid->ts_id_status =
+ nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
nfs4_unlock_state();
return nfs_ok;
@@ -3750,12 +3768,19 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfsd4_close_open_stateid(stp);
oo->oo_last_closed_stid = stp;
- /* place unused nfs4_stateowners on so_close_lru list to be
- * released by the laundromat service after the lease period
- * to enable us to handle CLOSE replay
- */
- if (list_empty(&oo->oo_owner.so_stateids))
- move_to_close_lru(oo);
+ if (list_empty(&oo->oo_owner.so_stateids)) {
+ if (cstate->minorversion) {
+ release_openowner(oo);
+ cstate->replay_owner = NULL;
+ } else {
+ /*
+ * In the 4.0 case we need to keep the owners around a
+ * little while to handle CLOSE replay.
+ */
+ if (list_empty(&oo->oo_owner.so_stateids))
+ move_to_close_lru(oo);
+ }
+ }
out:
if (!cstate->replay_owner)
nfs4_unlock_state();
@@ -4027,6 +4052,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
bool new_state = false;
int lkflg;
int err;
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
(long long) lock->lk_offset,
@@ -4044,11 +4070,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfs4_lock_state();
if (lock->lk_is_new) {
- /*
- * Client indicates that this is a new lockowner.
- * Use open owner and open stateid to create lock owner and
- * lock stateid.
- */
struct nfs4_ol_stateid *open_stp = NULL;
if (nfsd4_has_session(cstate))
@@ -4058,7 +4079,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
sizeof(clientid_t));
status = nfserr_stale_clientid;
- if (STALE_CLIENTID(&lock->lk_new_clientid))
+ if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
goto out;
/* validate and update open stateid and open seqid */
@@ -4075,17 +4096,13 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
status = lookup_or_create_lock_state(cstate, open_stp, lock,
&lock_stp, &new_state);
- if (status)
- goto out;
- } else {
- /* lock (lock owner + lock stateid) already exists */
+ } else
status = nfs4_preprocess_seqid_op(cstate,
lock->lk_old_lock_seqid,
&lock->lk_old_lock_stateid,
NFS4_LOCK_STID, &lock_stp);
- if (status)
- goto out;
- }
+ if (status)
+ goto out;
lock_sop = lockowner(lock_stp->st_stateowner);
lkflg = setlkflg(lock->lk_type);
@@ -4094,10 +4111,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
status = nfserr_grace;
- if (locks_in_grace() && !lock->lk_reclaim)
+ if (locks_in_grace(SVC_NET(rqstp)) && !lock->lk_reclaim)
goto out;
status = nfserr_no_grace;
- if (!locks_in_grace() && lock->lk_reclaim)
+ if (!locks_in_grace(SVC_NET(rqstp)) && lock->lk_reclaim)
goto out;
locks_init_lock(&file_lock);
@@ -4196,8 +4213,9 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct file_lock file_lock;
struct nfs4_lockowner *lo;
__be32 status;
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
- if (locks_in_grace())
+ if (locks_in_grace(SVC_NET(rqstp)))
return nfserr_grace;
if (check_lock_length(lockt->lt_offset, lockt->lt_length))
@@ -4206,7 +4224,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfs4_lock_state();
status = nfserr_stale_clientid;
- if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid))
+ if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid, nn))
goto out;
if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
@@ -4355,6 +4373,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
struct list_head matches;
unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
__be32 status;
+ struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
clid->cl_boot, clid->cl_id);
@@ -4362,7 +4381,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
/* XXX check for lease expiration */
status = nfserr_stale_clientid;
- if (STALE_CLIENTID(clid))
+ if (STALE_CLIENTID(clid, nn))
return status;
nfs4_lock_state();
@@ -4564,7 +4583,7 @@ void nfsd_forget_openowners(u64 num)
printk(KERN_INFO "NFSD: Forgot %d open owners", count);
}
-int nfsd_process_n_delegations(u64 num, void (*deleg_func)(struct nfs4_delegation *))
+int nfsd_process_n_delegations(u64 num, struct list_head *list)
{
int i, count = 0;
struct nfs4_file *fp, *fnext;
@@ -4573,7 +4592,7 @@ int nfsd_process_n_delegations(u64 num, void (*deleg_func)(struct nfs4_delegatio
for (i = 0; i < FILE_HASH_SIZE; i++) {
list_for_each_entry_safe(fp, fnext, &file_hashtbl[i], fi_hash) {
list_for_each_entry_safe(dp, dnext, &fp->fi_delegations, dl_perfile) {
- deleg_func(dp);
+ list_move(&dp->dl_recall_lru, list);
if (++count == num)
return count;
}
@@ -4586,9 +4605,16 @@ int nfsd_process_n_delegations(u64 num, void (*deleg_func)(struct nfs4_delegatio
void nfsd_forget_delegations(u64 num)
{
unsigned int count;
+ LIST_HEAD(victims);
+ struct nfs4_delegation *dp, *dnext;
+
+ spin_lock(&recall_lock);
+ count = nfsd_process_n_delegations(num, &victims);
+ spin_unlock(&recall_lock);
nfs4_lock_state();
- count = nfsd_process_n_delegations(num, unhash_delegation);
+ list_for_each_entry_safe(dp, dnext, &victims, dl_recall_lru)
+ unhash_delegation(dp);
nfs4_unlock_state();
printk(KERN_INFO "NFSD: Forgot %d delegations", count);
@@ -4597,12 +4623,16 @@ void nfsd_forget_delegations(u64 num)
void nfsd_recall_delegations(u64 num)
{
unsigned int count;
+ LIST_HEAD(victims);
+ struct nfs4_delegation *dp, *dnext;
- nfs4_lock_state();
spin_lock(&recall_lock);
- count = nfsd_process_n_delegations(num, nfsd_break_one_deleg);
+ count = nfsd_process_n_delegations(num, &victims);
+ list_for_each_entry_safe(dp, dnext, &victims, dl_recall_lru) {
+ list_del(&dp->dl_recall_lru);
+ nfsd_break_one_deleg(dp);
+ }
spin_unlock(&recall_lock);
- nfs4_unlock_state();
printk(KERN_INFO "NFSD: Recalled %d delegations", count);
}
@@ -4665,6 +4695,8 @@ set_max_delegations(void)
int
nfs4_state_start(void)
{
+ struct net *net = &init_net;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int ret;
/*
@@ -4674,11 +4706,11 @@ nfs4_state_start(void)
* to that instead and then do most of the rest of this on a per-net
* basis.
*/
- get_net(&init_net);
- nfsd4_client_tracking_init(&init_net);
- boot_time = get_seconds();
- locks_start_grace(&nfsd4_manager);
- grace_ended = false;
+ get_net(net);
+ nfsd4_client_tracking_init(net);
+ nn->boot_time = get_seconds();
+ locks_start_grace(net, &nn->nfsd4_manager);
+ nn->grace_ended = false;
printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
nfsd4_grace);
ret = set_callback_cred();
@@ -4700,8 +4732,8 @@ nfs4_state_start(void)
out_free_laundry:
destroy_workqueue(laundry_wq);
out_recovery:
- nfsd4_client_tracking_exit(&init_net);
- put_net(&init_net);
+ nfsd4_client_tracking_exit(net);
+ put_net(net);
return ret;
}
@@ -4742,9 +4774,12 @@ __nfs4_state_shutdown(void)
void
nfs4_state_shutdown(void)
{
+ struct net *net = &init_net;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
cancel_delayed_work_sync(&laundromat_work);
destroy_workqueue(laundry_wq);
- locks_end_grace(&nfsd4_manager);
+ locks_end_grace(&nn->nfsd4_manager);
nfs4_lock_state();
__nfs4_state_shutdown();
nfs4_unlock_state();
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 4949667c84ea..6322df36031f 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2259,7 +2259,7 @@ out_acl:
if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
if ((buflen -= 4) < 0)
goto out_resource;
- WRITE32(1);
+ WRITE32(0);
}
if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
if ((buflen -= 4) < 0)
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index c55298ed5772..fa49cff5ee65 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -673,9 +673,7 @@ static ssize_t __write_ports_addfd(char *buf)
err = svc_addsock(nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT);
if (err < 0) {
- if (nfsd_serv->sv_nrthreads == 1)
- svc_shutdown_net(nfsd_serv, net);
- svc_destroy(nfsd_serv);
+ nfsd_destroy(net);
return err;
}
@@ -744,9 +742,7 @@ out_close:
svc_xprt_put(xprt);
}
out_err:
- if (nfsd_serv->sv_nrthreads == 1)
- svc_shutdown_net(nfsd_serv, net);
- svc_destroy(nfsd_serv);
+ nfsd_destroy(net);
return err;
}
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 1671429ffa66..2244222368ab 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -72,6 +72,19 @@ int nfsd_nrthreads(void);
int nfsd_nrpools(void);
int nfsd_get_nrthreads(int n, int *);
int nfsd_set_nrthreads(int n, int *);
+int nfsd_pool_stats_open(struct inode *, struct file *);
+int nfsd_pool_stats_release(struct inode *, struct file *);
+
+static inline void nfsd_destroy(struct net *net)
+{
+ int destroy = (nfsd_serv->sv_nrthreads == 1);
+
+ if (destroy)
+ svc_shutdown_net(nfsd_serv, net);
+ svc_destroy(nfsd_serv);
+ if (destroy)
+ nfsd_serv = NULL;
+}
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
#ifdef CONFIG_NFSD_V2_ACL
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index cc793005a87c..032af381b3aa 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -635,6 +635,7 @@ fh_put(struct svc_fh *fhp)
fhp->fh_post_saved = 0;
#endif
}
+ fh_drop_write(fhp);
if (exp) {
exp_put(exp);
fhp->fh_export = NULL;
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index e15dc45fc5ec..aad6d457b9e8 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -196,6 +196,7 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
struct dentry *dchild;
int type, mode;
__be32 nfserr;
+ int hosterr;
dev_t rdev = 0, wanted = new_decode_dev(attr->ia_size);
dprintk("nfsd: CREATE %s %.*s\n",
@@ -214,6 +215,12 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
nfserr = nfserr_exist;
if (isdotent(argp->name, argp->len))
goto done;
+ hosterr = fh_want_write(dirfhp);
+ if (hosterr) {
+ nfserr = nfserrno(hosterr);
+ goto done;
+ }
+
fh_lock_nested(dirfhp, I_MUTEX_PARENT);
dchild = lookup_one_len(argp->name, dirfhp->fh_dentry, argp->len);
if (IS_ERR(dchild)) {
@@ -330,7 +337,7 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
out_unlock:
/* We don't really need to unlock, as fh_put does it. */
fh_unlock(dirfhp);
-
+ fh_drop_write(dirfhp);
done:
fh_put(dirfhp);
return nfsd_return_dirop(nfserr, resp);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index ee709fc8f58b..240473cb708f 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -254,8 +254,6 @@ static void nfsd_shutdown(void)
static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
{
- /* When last nfsd thread exits we need to do some clean-up */
- nfsd_serv = NULL;
nfsd_shutdown();
svc_rpcb_cleanup(serv, net);
@@ -332,6 +330,7 @@ static int nfsd_get_default_max_blksize(void)
int nfsd_create_serv(void)
{
int error;
+ struct net *net = current->nsproxy->net_ns;
WARN_ON(!mutex_is_locked(&nfsd_mutex));
if (nfsd_serv) {
@@ -346,7 +345,7 @@ int nfsd_create_serv(void)
if (nfsd_serv == NULL)
return -ENOMEM;
- error = svc_bind(nfsd_serv, current->nsproxy->net_ns);
+ error = svc_bind(nfsd_serv, net);
if (error < 0) {
svc_destroy(nfsd_serv);
return error;
@@ -427,11 +426,7 @@ int nfsd_set_nrthreads(int n, int *nthreads)
if (err)
break;
}
-
- if (nfsd_serv->sv_nrthreads == 1)
- svc_shutdown_net(nfsd_serv, net);
- svc_destroy(nfsd_serv);
-
+ nfsd_destroy(net);
return err;
}
@@ -478,9 +473,7 @@ out_shutdown:
if (error < 0 && !nfsd_up_before)
nfsd_shutdown();
out_destroy:
- if (nfsd_serv->sv_nrthreads == 1)
- svc_shutdown_net(nfsd_serv, net);
- svc_destroy(nfsd_serv); /* Release server */
+ nfsd_destroy(net); /* Release server */
out:
mutex_unlock(&nfsd_mutex);
return error;
@@ -563,12 +556,13 @@ nfsd(void *vrqstp)
nfsdstats.th_cnt --;
out:
- if (rqstp->rq_server->sv_nrthreads == 1)
- svc_shutdown_net(rqstp->rq_server, &init_net);
+ rqstp->rq_server = NULL;
/* Release the thread */
svc_exit_thread(rqstp);
+ nfsd_destroy(&init_net);
+
/* Release module */
mutex_unlock(&nfsd_mutex);
module_put_and_exit(0);
@@ -682,9 +676,7 @@ int nfsd_pool_stats_release(struct inode *inode, struct file *file)
mutex_lock(&nfsd_mutex);
/* this function really, really should have been called svc_put() */
- if (nfsd_serv->sv_nrthreads == 1)
- svc_shutdown_net(nfsd_serv, net);
- svc_destroy(nfsd_serv);
+ nfsd_destroy(net);
mutex_unlock(&nfsd_mutex);
return ret;
}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 849091e16ea6..22bd0a66c356 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -231,7 +231,6 @@ struct nfs4_client {
nfs4_verifier cl_verifier; /* generated by client */
time_t cl_time; /* time of last lease renewal */
struct sockaddr_storage cl_addr; /* client ipaddress */
- u32 cl_flavor; /* setclientid pseudoflavor */
struct svc_cred cl_cred; /* setclientid principal */
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
@@ -450,8 +449,10 @@ static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
#define WR_STATE 0x00000020
struct nfsd4_compound_state;
+struct nfsd_net;
-extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
+extern __be32 nfs4_preprocess_stateid_op(struct net *net,
+ struct nfsd4_compound_state *cstate,
stateid_t *stateid, int flags, struct file **filp);
extern void nfs4_lock_state(void);
extern void nfs4_unlock_state(void);
@@ -475,7 +476,6 @@ extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
extern int nfs4_client_to_reclaim(const char *name);
extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id);
extern void release_session_client(struct nfsd4_session *);
-extern __be32 nfs4_validate_stateid(struct nfs4_client *, stateid_t *);
extern void nfsd4_purge_closed_stateid(struct nfs4_stateowner *);
/* nfs4recover operations */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 4700a0a929d7..a9269f142cc4 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -757,8 +757,16 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
* If we get here, then the client has already done an "open",
* and (hopefully) checked permission - so allow OWNER_OVERRIDE
* in case a chmod has now revoked permission.
+ *
+ * Arguably we should also allow the owner override for
+ * directories, but we never have and it doesn't seem to have
+ * caused anyone a problem. If we were to change this, note
+ * also that our filldir callbacks would need a variant of
+ * lookup_one_len that doesn't check permissions.
*/
- err = fh_verify(rqstp, fhp, type, may_flags | NFSD_MAY_OWNER_OVERRIDE);
+ if (type == S_IFREG)
+ may_flags |= NFSD_MAY_OWNER_OVERRIDE;
+ err = fh_verify(rqstp, fhp, type, may_flags);
if (err)
goto out;
@@ -1276,6 +1284,10 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
* If it has, the parent directory should already be locked.
*/
if (!resfhp->fh_dentry) {
+ host_err = fh_want_write(fhp);
+ if (host_err)
+ goto out_nfserr;
+
/* called from nfsd_proc_mkdir, or possibly nfsd3_proc_create */
fh_lock_nested(fhp, I_MUTEX_PARENT);
dchild = lookup_one_len(fname, dentry, flen);
@@ -1319,14 +1331,11 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
}
- host_err = fh_want_write(fhp);
- if (host_err)
- goto out_nfserr;
-
/*
* Get the dir op function pointer.
*/
err = 0;
+ host_err = 0;
switch (type) {
case S_IFREG:
host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
@@ -1343,10 +1352,8 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
break;
}
- if (host_err < 0) {
- fh_drop_write(fhp);
+ if (host_err < 0)
goto out_nfserr;
- }
err = nfsd_create_setattr(rqstp, resfhp, iap);
@@ -1358,7 +1365,6 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
err2 = nfserrno(commit_metadata(fhp));
if (err2)
err = err2;
- fh_drop_write(fhp);
/*
* Update the file handle to get the new inode info.
*/
@@ -1417,6 +1423,11 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
err = nfserr_notdir;
if (!dirp->i_op->lookup)
goto out;
+
+ host_err = fh_want_write(fhp);
+ if (host_err)
+ goto out_nfserr;
+
fh_lock_nested(fhp, I_MUTEX_PARENT);
/*
@@ -1449,9 +1460,6 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
v_atime = verifier[1]&0x7fffffff;
}
- host_err = fh_want_write(fhp);
- if (host_err)
- goto out_nfserr;
if (dchild->d_inode) {
err = 0;
@@ -1522,7 +1530,6 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (!err)
err = nfserrno(commit_metadata(fhp));
- fh_drop_write(fhp);
/*
* Update the filehandle to get the new inode info.
*/
@@ -1533,6 +1540,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
fh_unlock(fhp);
if (dchild && !IS_ERR(dchild))
dput(dchild);
+ fh_drop_write(fhp);
return err;
out_nfserr:
@@ -1613,6 +1621,11 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
+
+ host_err = fh_want_write(fhp);
+ if (host_err)
+ goto out_nfserr;
+
fh_lock(fhp);
dentry = fhp->fh_dentry;
dnew = lookup_one_len(fname, dentry, flen);
@@ -1620,10 +1633,6 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (IS_ERR(dnew))
goto out_nfserr;
- host_err = fh_want_write(fhp);
- if (host_err)
- goto out_nfserr;
-
if (unlikely(path[plen] != 0)) {
char *path_alloced = kmalloc(plen+1, GFP_KERNEL);
if (path_alloced == NULL)
@@ -1683,6 +1692,12 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
if (isdotent(name, len))
goto out;
+ host_err = fh_want_write(tfhp);
+ if (host_err) {
+ err = nfserrno(host_err);
+ goto out;
+ }
+
fh_lock_nested(ffhp, I_MUTEX_PARENT);
ddir = ffhp->fh_dentry;
dirp = ddir->d_inode;
@@ -1694,18 +1709,13 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
dold = tfhp->fh_dentry;
- host_err = fh_want_write(tfhp);
- if (host_err) {
- err = nfserrno(host_err);
- goto out_dput;
- }
err = nfserr_noent;
if (!dold->d_inode)
- goto out_drop_write;
+ goto out_dput;
host_err = nfsd_break_lease(dold->d_inode);
if (host_err) {
err = nfserrno(host_err);
- goto out_drop_write;
+ goto out_dput;
}
host_err = vfs_link(dold, dirp, dnew);
if (!host_err) {
@@ -1718,12 +1728,11 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
else
err = nfserrno(host_err);
}
-out_drop_write:
- fh_drop_write(tfhp);
out_dput:
dput(dnew);
out_unlock:
fh_unlock(ffhp);
+ fh_drop_write(tfhp);
out:
return err;
@@ -1766,6 +1775,12 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
goto out;
+ host_err = fh_want_write(ffhp);
+ if (host_err) {
+ err = nfserrno(host_err);
+ goto out;
+ }
+
/* cannot use fh_lock as we need deadlock protective ordering
* so do it by hand */
trap = lock_rename(tdentry, fdentry);
@@ -1796,17 +1811,14 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
host_err = -EXDEV;
if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
goto out_dput_new;
- host_err = fh_want_write(ffhp);
- if (host_err)
- goto out_dput_new;
host_err = nfsd_break_lease(odentry->d_inode);
if (host_err)
- goto out_drop_write;
+ goto out_dput_new;
if (ndentry->d_inode) {
host_err = nfsd_break_lease(ndentry->d_inode);
if (host_err)
- goto out_drop_write;
+ goto out_dput_new;
}
host_err = vfs_rename(fdir, odentry, tdir, ndentry);
if (!host_err) {
@@ -1814,8 +1826,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (!host_err)
host_err = commit_metadata(ffhp);
}
-out_drop_write:
- fh_drop_write(ffhp);
out_dput_new:
dput(ndentry);
out_dput_old:
@@ -1831,6 +1841,7 @@ out_drop_write:
fill_post_wcc(tfhp);
unlock_rename(tdentry, fdentry);
ffhp->fh_locked = tfhp->fh_locked = 0;
+ fh_drop_write(ffhp);
out:
return err;
@@ -1856,6 +1867,10 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
if (err)
goto out;
+ host_err = fh_want_write(fhp);
+ if (host_err)
+ goto out_nfserr;
+
fh_lock_nested(fhp, I_MUTEX_PARENT);
dentry = fhp->fh_dentry;
dirp = dentry->d_inode;
@@ -1874,21 +1889,15 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
if (!type)
type = rdentry->d_inode->i_mode & S_IFMT;
- host_err = fh_want_write(fhp);
- if (host_err)
- goto out_put;
-
host_err = nfsd_break_lease(rdentry->d_inode);
if (host_err)
- goto out_drop_write;
+ goto out_put;
if (type != S_IFDIR)
host_err = vfs_unlink(dirp, rdentry);
else
host_err = vfs_rmdir(dirp, rdentry);
if (!host_err)
host_err = commit_metadata(fhp);
-out_drop_write:
- fh_drop_write(fhp);
out_put:
dput(rdentry);
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index ec0611b2b738..359594c393d2 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -110,12 +110,19 @@ int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
static inline int fh_want_write(struct svc_fh *fh)
{
- return mnt_want_write(fh->fh_export->ex_path.mnt);
+ int ret = mnt_want_write(fh->fh_export->ex_path.mnt);
+
+ if (!ret)
+ fh->fh_want_write = 1;
+ return ret;
}
static inline void fh_drop_write(struct svc_fh *fh)
{
- mnt_drop_write(fh->fh_export->ex_path.mnt);
+ if (fh->fh_want_write) {
+ fh->fh_want_write = 0;
+ mnt_drop_write(fh->fh_export->ex_path.mnt);
+ }
}
#endif /* LINUX_NFSD_VFS_H */
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index f5fde36b9e28..fb7238100548 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -76,15 +76,23 @@ int nilfs_palloc_freev(struct inode *, __u64 *, size_t);
#define nilfs_clear_bit_atomic ext2_clear_bit_atomic
#define nilfs_find_next_zero_bit find_next_zero_bit_le
-/*
- * persistent object allocator cache
+/**
+ * struct nilfs_bh_assoc - block offset and buffer head association
+ * @blkoff: block offset
+ * @bh: buffer head
*/
-
struct nilfs_bh_assoc {
unsigned long blkoff;
struct buffer_head *bh;
};
+/**
+ * struct nilfs_palloc_cache - persistent object allocator cache
+ * @lock: cache protecting lock
+ * @prev_desc: blockgroup descriptors cache
+ * @prev_bitmap: blockgroup bitmap cache
+ * @prev_entry: translation entries cache
+ */
struct nilfs_palloc_cache {
spinlock_t lock;
struct nilfs_bh_assoc prev_desc;
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index 40d9f453d31c..b89e68076adc 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -135,6 +135,13 @@ struct nilfs_bmap {
/* state */
#define NILFS_BMAP_DIRTY 0x00000001
+/**
+ * struct nilfs_bmap_store - shadow copy of bmap state
+ * @data: cached raw block mapping of on-disk inode
+ * @last_allocated_key: cached value of last allocated key for data block
+ * @last_allocated_ptr: cached value of last allocated ptr for data block
+ * @state: cached value of state field of bmap structure
+ */
struct nilfs_bmap_store {
__le64 data[NILFS_BMAP_SIZE / sizeof(__le64)];
__u64 last_allocated_key;
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 3a4dd2d8d3fc..d876b565ce64 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -29,7 +29,13 @@
#include <linux/fs.h>
#include <linux/backing-dev.h>
-
+/**
+ * struct nilfs_btnode_chkey_ctxt - change key context
+ * @oldkey: old key of block's moving content
+ * @newkey: new key for block's content
+ * @bh: buffer head of old buffer
+ * @newbh: buffer head of new buffer
+ */
struct nilfs_btnode_chkey_ctxt {
__u64 oldkey;
__u64 newkey;
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index dab5c4c6dfaf..deaa3d33a0aa 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -286,7 +286,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
__u64 cno;
void *kaddr;
unsigned long tnicps;
- int ret, ncps, nicps, count, i;
+ int ret, ncps, nicps, nss, count, i;
if (unlikely(start == 0 || start > end)) {
printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
@@ -301,6 +301,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
if (ret < 0)
goto out_sem;
tnicps = 0;
+ nss = 0;
for (cno = start; cno < end; cno += ncps) {
ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end);
@@ -318,8 +319,9 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
cpfile, cno, cp_bh, kaddr);
nicps = 0;
for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
- WARN_ON(nilfs_checkpoint_snapshot(cp));
- if (!nilfs_checkpoint_invalid(cp)) {
+ if (nilfs_checkpoint_snapshot(cp)) {
+ nss++;
+ } else if (!nilfs_checkpoint_invalid(cp)) {
nilfs_checkpoint_set_invalid(cp);
nicps++;
}
@@ -364,6 +366,8 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
}
brelse(header_bh);
+ if (nss > 0)
+ ret = -EBUSY;
out_sem:
up_write(&NILFS_MDT(cpfile)->mi_sem);
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index b5c13f3576b9..fa0f80308c2d 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -33,6 +33,12 @@
#define NILFS_CNO_MIN ((__u64)1)
#define NILFS_CNO_MAX (~(__u64)0)
+/**
+ * struct nilfs_dat_info - on-memory private data of DAT file
+ * @mi: on-memory private data of metadata file
+ * @palloc_cache: persistent object allocator cache of DAT file
+ * @shadow: shadow map of DAT file
+ */
struct nilfs_dat_info {
struct nilfs_mdt_info mi;
struct nilfs_palloc_cache palloc_cache;
diff --git a/fs/nilfs2/export.h b/fs/nilfs2/export.h
index a71cc412b651..19ccbf9522ab 100644
--- a/fs/nilfs2/export.h
+++ b/fs/nilfs2/export.h
@@ -5,6 +5,14 @@
extern const struct export_operations nilfs_export_ops;
+/**
+ * struct nilfs_fid - NILFS file id type
+ * @cno: checkpoint number
+ * @ino: inode number
+ * @gen: file generation (version) for NFS
+ * @parent_gen: parent generation (version) for NFS
+ * @parent_ino: parent inode number
+ */
struct nilfs_fid {
u64 cno;
u64 ino;
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 62cebc8e1a1f..a4d56ac02e6c 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -69,16 +69,18 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page = vmf->page;
struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct nilfs_transaction_info ti;
- int ret;
+ int ret = 0;
if (unlikely(nilfs_near_disk_full(inode->i_sb->s_fs_info)))
return VM_FAULT_SIGBUS; /* -ENOSPC */
+ sb_start_pagefault(inode->i_sb);
lock_page(page);
if (page->mapping != inode->i_mapping ||
page_offset(page) >= i_size_read(inode) || !PageUptodate(page)) {
unlock_page(page);
- return VM_FAULT_NOPAGE; /* make the VM retry the fault */
+ ret = -EFAULT; /* make the VM retry the fault */
+ goto out;
}
/*
@@ -112,19 +114,21 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = nilfs_transaction_begin(inode->i_sb, &ti, 1);
/* never returns -ENOMEM, but may return -ENOSPC */
if (unlikely(ret))
- return VM_FAULT_SIGBUS;
+ goto out;
- ret = block_page_mkwrite(vma, vmf, nilfs_get_block);
- if (ret != VM_FAULT_LOCKED) {
+ ret = __block_page_mkwrite(vma, vmf, nilfs_get_block);
+ if (ret) {
nilfs_transaction_abort(inode->i_sb);
- return ret;
+ goto out;
}
nilfs_set_file_dirty(inode, 1 << (PAGE_SHIFT - inode->i_blkbits));
nilfs_transaction_commit(inode->i_sb);
mapped:
wait_on_page_writeback(page);
- return VM_FAULT_LOCKED;
+ out:
+ sb_end_pagefault(inode->i_sb);
+ return block_page_mkwrite_return(ret);
}
static const struct vm_operations_struct nilfs_file_vm_ops = {
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index 5a48df79d674..d8e65bde083c 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -29,7 +29,11 @@
#include "alloc.h"
#include "ifile.h"
-
+/**
+ * struct nilfs_ifile_info - on-memory private data of ifile
+ * @mi: on-memory private data of metadata file
+ * @palloc_cache: persistent object allocator cache of ifile
+ */
struct nilfs_ifile_info {
struct nilfs_mdt_info mi;
struct nilfs_palloc_cache palloc_cache;
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 7cc64465ec26..6e2c3db976b2 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -34,6 +34,13 @@
#include "cpfile.h"
#include "ifile.h"
+/**
+ * struct nilfs_iget_args - arguments used during comparison between inodes
+ * @ino: inode number
+ * @cno: checkpoint number
+ * @root: pointer on NILFS root object (mounted checkpoint)
+ * @for_gc: inode for GC flag
+ */
struct nilfs_iget_args {
u64 ino;
__u64 cno;
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 06658caa18bd..fdb180769485 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -182,7 +182,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
goto out;
- down_read(&inode->i_sb->s_umount);
+ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_change_cpmode(
@@ -192,7 +192,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
else
nilfs_transaction_commit(inode->i_sb); /* never fails */
- up_read(&inode->i_sb->s_umount);
+ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
out:
mnt_drop_write_file(filp);
return ret;
@@ -660,8 +660,6 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
goto out_free;
}
- vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
-
ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]);
if (ret < 0)
printk(KERN_ERR "NILFS: GC failed during preparation: "
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index ab20a4baa50f..ab172e8549c5 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -28,6 +28,13 @@
#include "nilfs.h"
#include "page.h"
+/**
+ * struct nilfs_shadow_map - shadow mapping of meta data file
+ * @bmap_store: shadow copy of bmap state
+ * @frozen_data: shadowed dirty data pages
+ * @frozen_btnodes: shadowed dirty b-tree nodes' pages
+ * @frozen_buffers: list of frozen buffers
+ */
struct nilfs_shadow_map {
struct nilfs_bmap_store bmap_store;
struct address_space frozen_data;
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 250add84da76..74cece80e9a3 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -32,8 +32,21 @@
#include "the_nilfs.h"
#include "bmap.h"
-/*
- * nilfs inode data in memory
+/**
+ * struct nilfs_inode_info - nilfs inode data in memory
+ * @i_flags: inode flags
+ * @i_state: dynamic state flags
+ * @i_bmap: pointer on i_bmap_data
+ * @i_bmap_data: raw block mapping
+ * @i_xattr: <TODO>
+ * @i_dir_start_lookup: page index of last successful search
+ * @i_cno: checkpoint number for GC inode
+ * @i_btnode_cache: cached pages of b-tree nodes
+ * @i_dirty: list for connecting dirty files
+ * @xattr_sem: semaphore for extended attributes processing
+ * @i_bh: buffer contains disk inode
+ * @i_root: root object of the current filesystem tree
+ * @vfs_inode: VFS inode object
*/
struct nilfs_inode_info {
__u32 i_flags;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 88e11fb346b6..a5752a589932 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -189,7 +189,7 @@ int nilfs_transaction_begin(struct super_block *sb,
if (ret > 0)
return 0;
- vfs_check_frozen(sb, SB_FREEZE_WRITE);
+ sb_start_intwrite(sb);
nilfs = sb->s_fs_info;
down_read(&nilfs->ns_segctor_sem);
@@ -205,6 +205,7 @@ int nilfs_transaction_begin(struct super_block *sb,
current->journal_info = ti->ti_save;
if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
kmem_cache_free(nilfs_transaction_cachep, ti);
+ sb_end_intwrite(sb);
return ret;
}
@@ -246,6 +247,7 @@ int nilfs_transaction_commit(struct super_block *sb)
err = nilfs_construct_segment(sb);
if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
kmem_cache_free(nilfs_transaction_cachep, ti);
+ sb_end_intwrite(sb);
return err;
}
@@ -264,6 +266,7 @@ void nilfs_transaction_abort(struct super_block *sb)
current->journal_info = ti->ti_save;
if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
kmem_cache_free(nilfs_transaction_cachep, ti);
+ sb_end_intwrite(sb);
}
void nilfs_relax_pressure_in_lock(struct super_block *sb)
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index c5b7653a4391..3127e9f438a7 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -30,7 +30,13 @@
#include "mdt.h"
#include "sufile.h"
-
+/**
+ * struct nilfs_sufile_info - on-memory private data of sufile
+ * @mi: on-memory private data of metadata file
+ * @ncleansegs: number of clean segments
+ * @allocmin: lower limit of allocatable segment range
+ * @allocmax: upper limit of allocatable segment range
+ */
struct nilfs_sufile_info {
struct nilfs_mdt_info mi;
unsigned long ncleansegs;/* number of clean segments */
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index d57c42f974ea..6a10812711c1 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -676,20 +676,13 @@ static const struct super_operations nilfs_sops = {
.alloc_inode = nilfs_alloc_inode,
.destroy_inode = nilfs_destroy_inode,
.dirty_inode = nilfs_dirty_inode,
- /* .write_inode = nilfs_write_inode, */
- /* .put_inode = nilfs_put_inode, */
- /* .drop_inode = nilfs_drop_inode, */
.evict_inode = nilfs_evict_inode,
.put_super = nilfs_put_super,
- /* .write_super = nilfs_write_super, */
.sync_fs = nilfs_sync_fs,
.freeze_fs = nilfs_freeze,
.unfreeze_fs = nilfs_unfreeze,
- /* .write_super_lockfs */
- /* .unlockfs */
.statfs = nilfs_statfs,
.remount_fs = nilfs_remount,
- /* .umount_begin */
.show_options = nilfs_show_options
};
@@ -948,6 +941,8 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
struct nilfs_root *root;
int ret;
+ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
+
down_read(&nilfs->ns_segctor_sem);
ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
up_read(&nilfs->ns_segctor_sem);
@@ -972,6 +967,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
ret = nilfs_get_root_dentry(s, root, root_dentry);
nilfs_put_root(root);
out:
+ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
return ret;
}
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 501b7f8b739f..41e6a04a561f 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -76,6 +76,7 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
nilfs->ns_bdev = bdev;
atomic_set(&nilfs->ns_ndirtyblks, 0);
init_rwsem(&nilfs->ns_sem);
+ mutex_init(&nilfs->ns_snapshot_mount_mutex);
INIT_LIST_HEAD(&nilfs->ns_dirty_files);
INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
spin_lock_init(&nilfs->ns_inode_lock);
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 9992b11312ff..be1267a34cea 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -47,11 +47,13 @@ enum {
* @ns_flags: flags
* @ns_bdev: block device
* @ns_sem: semaphore for shared states
+ * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
* @ns_sbh: buffer heads of on-disk super blocks
* @ns_sbp: pointers to super block data
* @ns_sbwtime: previous write time of super block
* @ns_sbwcount: write count of super block
* @ns_sbsize: size of valid data in super block
+ * @ns_mount_state: file system state
* @ns_seg_seq: segment sequence counter
* @ns_segnum: index number of the latest full segment.
* @ns_nextnum: index number of the full segment index to be used next
@@ -99,13 +101,12 @@ struct the_nilfs {
struct block_device *ns_bdev;
struct rw_semaphore ns_sem;
+ struct mutex ns_snapshot_mount_mutex;
/*
* used for
* - loading the latest checkpoint exclusively.
* - allocating a new full segment.
- * - protecting s_dirt in the super_block struct
- * (see nilfs_write_super) and the following fields.
*/
struct buffer_head *ns_sbh[2];
struct nilfs_super_block *ns_sbp[2];
@@ -229,9 +230,8 @@ THE_NILFS_FNS(SB_DIRTY, sb_dirty)
* @count: refcount of this structure
* @nilfs: nilfs object
* @ifile: inode file
- * @root: root inode
* @inodes_count: number of inodes
- * @blocks_count: number of blocks (Reserved)
+ * @blocks_count: number of blocks
*/
struct nilfs_root {
__u64 cno;
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 7389d2d5e51d..1ecf46448f85 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2084,7 +2084,6 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
if (err)
return err;
pos = *ppos;
- vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
/* We can write back this queue in page reclaim. */
current->backing_dev_info = mapping->backing_dev_info;
written = 0;
@@ -2119,6 +2118,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
BUG_ON(iocb->ki_pos != pos);
+ sb_start_write(inode->i_sb);
mutex_lock(&inode->i_mutex);
ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
@@ -2127,6 +2127,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (err < 0)
ret = err;
}
+ sb_end_write(inode->i_sb);
return ret;
}
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index b341492542ca..2bc149d6a784 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2660,31 +2660,14 @@ static const struct super_operations ntfs_sops = {
.alloc_inode = ntfs_alloc_big_inode, /* VFS: Allocate new inode. */
.destroy_inode = ntfs_destroy_big_inode, /* VFS: Deallocate inode. */
#ifdef NTFS_RW
- //.dirty_inode = NULL, /* VFS: Called from
- // __mark_inode_dirty(). */
.write_inode = ntfs_write_inode, /* VFS: Write dirty inode to
disk. */
- //.drop_inode = NULL, /* VFS: Called just after the
- // inode reference count has
- // been decreased to zero.
- // NOTE: The inode lock is
- // held. See fs/inode.c::
- // generic_drop_inode(). */
- //.delete_inode = NULL, /* VFS: Delete inode from disk.
- // Called when i_count becomes
- // 0 and i_nlink is also 0. */
- //.write_super = NULL, /* Flush dirty super block to
- // disk. */
- //.sync_fs = NULL, /* ? */
- //.write_super_lockfs = NULL, /* ? */
- //.unlockfs = NULL, /* ? */
#endif /* NTFS_RW */
.put_super = ntfs_put_super, /* Syscall: umount. */
.statfs = ntfs_statfs, /* Syscall: statfs */
.remount_fs = ntfs_remount, /* Syscall: mount -o remount. */
.evict_inode = ntfs_evict_big_inode, /* VFS: Called when an inode is
removed from memory. */
- //.umount_begin = NULL, /* Forced umount. */
.show_options = ntfs_show_options, /* Show mount options in
proc. */
};
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 7602783d7f41..46a1f6d75104 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1971,6 +1971,7 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd,
{
struct inode *inode = file->f_path.dentry->d_inode;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ int ret;
if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
!ocfs2_writes_unwritten_extents(osb))
@@ -1985,7 +1986,12 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd,
if (!(file->f_mode & FMODE_WRITE))
return -EBADF;
- return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
+ ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
+ mnt_drop_write_file(file);
+ return ret;
}
static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
@@ -2261,7 +2267,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
if (iocb->ki_left == 0)
return 0;
- vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+ sb_start_write(inode->i_sb);
appending = file->f_flags & O_APPEND ? 1 : 0;
direct_io = file->f_flags & O_DIRECT ? 1 : 0;
@@ -2436,6 +2442,7 @@ out_sems:
ocfs2_iocb_clear_sem_locked(iocb);
mutex_unlock(&inode->i_mutex);
+ sb_end_write(inode->i_sb);
if (written)
ret = written;
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index d96f7f81d8dd..f20edcbfe700 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -928,7 +928,12 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (get_user(new_clusters, (int __user *)arg))
return -EFAULT;
- return ocfs2_group_extend(inode, new_clusters);
+ status = mnt_want_write_file(filp);
+ if (status)
+ return status;
+ status = ocfs2_group_extend(inode, new_clusters);
+ mnt_drop_write_file(filp);
+ return status;
case OCFS2_IOC_GROUP_ADD:
case OCFS2_IOC_GROUP_ADD64:
if (!capable(CAP_SYS_RESOURCE))
@@ -937,7 +942,12 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&input, (int __user *) arg, sizeof(input)))
return -EFAULT;
- return ocfs2_group_add(inode, &input);
+ status = mnt_want_write_file(filp);
+ if (status)
+ return status;
+ status = ocfs2_group_add(inode, &input);
+ mnt_drop_write_file(filp);
+ return status;
case OCFS2_IOC_REFLINK:
if (copy_from_user(&args, argp, sizeof(args)))
return -EFAULT;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 0a42ae96dca7..2dd36af79e26 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -355,11 +355,14 @@ handle_t *ocfs2_start_trans(struct ocfs2_super *osb, int max_buffs)
if (journal_current_handle())
return jbd2_journal_start(journal, max_buffs);
+ sb_start_intwrite(osb->sb);
+
down_read(&osb->journal->j_trans_barrier);
handle = jbd2_journal_start(journal, max_buffs);
if (IS_ERR(handle)) {
up_read(&osb->journal->j_trans_barrier);
+ sb_end_intwrite(osb->sb);
mlog_errno(PTR_ERR(handle));
@@ -388,8 +391,10 @@ int ocfs2_commit_trans(struct ocfs2_super *osb,
if (ret < 0)
mlog_errno(ret);
- if (!nested)
+ if (!nested) {
up_read(&journal->j_trans_barrier);
+ sb_end_intwrite(osb->sb);
+ }
return ret;
}
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 210c35237548..a9f78c74d687 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -784,14 +784,10 @@ bail:
static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
{
- int i;
- u8 *buffer;
- u32 count = 0;
+ u32 count;
struct ocfs2_local_alloc *la = OCFS2_LOCAL_ALLOC(alloc);
- buffer = la->la_bitmap;
- for (i = 0; i < le16_to_cpu(la->la_size); i++)
- count += hweight8(buffer[i]);
+ count = memweight(la->la_bitmap, le16_to_cpu(la->la_size));
trace_ocfs2_local_alloc_count_bits(count);
return count;
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 9cd41083e991..d150372fd81d 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -136,6 +136,7 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
sigset_t oldset;
int ret;
+ sb_start_pagefault(inode->i_sb);
ocfs2_block_signals(&oldset);
/*
@@ -165,6 +166,7 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
out:
ocfs2_unblock_signals(&oldset);
+ sb_end_pagefault(inode->i_sb);
return ret;
}
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 9f32d7cbb7a3..30a055049e16 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4466,20 +4466,11 @@ int ocfs2_reflink_ioctl(struct inode *inode,
goto out_dput;
}
- error = mnt_want_write(new_path.mnt);
- if (error) {
- mlog_errno(error);
- goto out_dput;
- }
-
error = ocfs2_vfs_reflink(old_path.dentry,
new_path.dentry->d_inode,
new_dentry, preserve);
- mnt_drop_write(new_path.mnt);
out_dput:
- dput(new_dentry);
- mutex_unlock(&new_path.dentry->d_inode->i_mutex);
- path_put(&new_path);
+ done_path_create(&new_path, new_dentry);
out:
path_put(&old_path);
diff --git a/fs/open.c b/fs/open.c
index 1e914b397e12..e1f2cdb91a4d 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -164,11 +164,13 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
if (IS_APPEND(inode))
goto out_putf;
+ sb_start_write(inode->i_sb);
error = locks_verify_truncate(inode, file, length);
if (!error)
error = security_path_truncate(&file->f_path);
if (!error)
error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file);
+ sb_end_write(inode->i_sb);
out_putf:
fput(file);
out:
@@ -266,7 +268,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (!file->f_op->fallocate)
return -EOPNOTSUPP;
- return file->f_op->fallocate(file, mode, offset, len);
+ sb_start_write(inode->i_sb);
+ ret = file->f_op->fallocate(file, mode, offset, len);
+ sb_end_write(inode->i_sb);
+ return ret;
}
SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len)
@@ -620,7 +625,7 @@ static inline int __get_file_write_access(struct inode *inode,
/*
* Balanced in __fput()
*/
- error = mnt_want_write(mnt);
+ error = __mnt_want_write(mnt);
if (error)
put_write_access(inode);
}
@@ -654,6 +659,7 @@ static int do_dentry_open(struct file *f,
if (unlikely(f->f_flags & O_PATH))
f->f_mode = FMODE_PATH;
+ path_get(&f->f_path);
inode = f->f_path.dentry->d_inode;
if (f->f_mode & FMODE_WRITE) {
error = __get_file_write_access(inode, f->f_path.mnt);
@@ -711,7 +717,7 @@ cleanup_all:
* here, so just reset the state.
*/
file_reset_write(f);
- mnt_drop_write(f->f_path.mnt);
+ __mnt_drop_write(f->f_path.mnt);
}
}
cleanup_file:
@@ -739,9 +745,7 @@ int finish_open(struct file *file, struct dentry *dentry,
int error;
BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
- mntget(file->f_path.mnt);
- file->f_path.dentry = dget(dentry);
-
+ file->f_path.dentry = dentry;
error = do_dentry_open(file, open, current_cred());
if (!error)
*opened |= FILE_OPENED;
@@ -784,7 +788,6 @@ struct file *dentry_open(const struct path *path, int flags,
f->f_flags = flags;
f->f_path = *path;
- path_get(&f->f_path);
error = do_dentry_open(f, NULL, cred);
if (!error) {
error = open_check_o_direct(f);
@@ -849,9 +852,10 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
int lookup_flags = 0;
int acc_mode;
- if (!(flags & O_CREAT))
- mode = 0;
- op->mode = mode;
+ if (flags & O_CREAT)
+ op->mode = (mode & S_IALLUGO) | S_IFREG;
+ else
+ op->mode = 0;
/* Must never be set by userspace */
flags &= ~FMODE_NONOTIFY;
diff --git a/fs/pipe.c b/fs/pipe.c
index 49c1065256fd..8d85d7068c1e 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -224,7 +224,7 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
* and the caller has to be careful not to fault before calling
* the unmap function.
*
- * Note that this function occupies KM_USER0 if @atomic != 0.
+ * Note that this function calls kmap_atomic() if @atomic != 0.
*/
void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
struct pipe_buffer *buf, int atomic)
@@ -1016,18 +1016,16 @@ fail_inode:
return NULL;
}
-struct file *create_write_pipe(int flags)
+int create_pipe_files(struct file **res, int flags)
{
int err;
- struct inode *inode;
+ struct inode *inode = get_pipe_inode();
struct file *f;
struct path path;
- struct qstr name = { .name = "" };
+ static struct qstr name = { .name = "" };
- err = -ENFILE;
- inode = get_pipe_inode();
if (!inode)
- goto err;
+ return -ENFILE;
err = -ENOMEM;
path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
@@ -1041,62 +1039,43 @@ struct file *create_write_pipe(int flags)
f = alloc_file(&path, FMODE_WRITE, &write_pipefifo_fops);
if (!f)
goto err_dentry;
- f->f_mapping = inode->i_mapping;
f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
- f->f_version = 0;
- return f;
+ res[0] = alloc_file(&path, FMODE_READ, &read_pipefifo_fops);
+ if (!res[0])
+ goto err_file;
+
+ path_get(&path);
+ res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
+ res[1] = f;
+ return 0;
- err_dentry:
+err_file:
+ put_filp(f);
+err_dentry:
free_pipe_info(inode);
path_put(&path);
- return ERR_PTR(err);
+ return err;
- err_inode:
+err_inode:
free_pipe_info(inode);
iput(inode);
- err:
- return ERR_PTR(err);
-}
-
-void free_write_pipe(struct file *f)
-{
- free_pipe_info(f->f_dentry->d_inode);
- path_put(&f->f_path);
- put_filp(f);
-}
-
-struct file *create_read_pipe(struct file *wrf, int flags)
-{
- /* Grab pipe from the writer */
- struct file *f = alloc_file(&wrf->f_path, FMODE_READ,
- &read_pipefifo_fops);
- if (!f)
- return ERR_PTR(-ENFILE);
-
- path_get(&wrf->f_path);
- f->f_flags = O_RDONLY | (flags & O_NONBLOCK);
-
- return f;
+ return err;
}
int do_pipe_flags(int *fd, int flags)
{
- struct file *fw, *fr;
+ struct file *files[2];
int error;
int fdw, fdr;
if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
return -EINVAL;
- fw = create_write_pipe(flags);
- if (IS_ERR(fw))
- return PTR_ERR(fw);
- fr = create_read_pipe(fw, flags);
- error = PTR_ERR(fr);
- if (IS_ERR(fr))
- goto err_write_pipe;
+ error = create_pipe_files(files, flags);
+ if (error)
+ return error;
error = get_unused_fd_flags(flags);
if (error < 0)
@@ -1109,8 +1088,8 @@ int do_pipe_flags(int *fd, int flags)
fdw = error;
audit_fd_pair(fdr, fdw);
- fd_install(fdr, fr);
- fd_install(fdw, fw);
+ fd_install(fdr, files[0]);
+ fd_install(fdw, files[1]);
fd[0] = fdr;
fd[1] = fdw;
@@ -1119,10 +1098,8 @@ int do_pipe_flags(int *fd, int flags)
err_fdr:
put_unused_fd(fdr);
err_read_pipe:
- path_put(&fr->f_path);
- put_filp(fr);
- err_write_pipe:
- free_write_pipe(fw);
+ fput(files[0]);
+ fput(files[1]);
return error;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 2772208338f8..1b6c84cbdb73 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -695,8 +695,6 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
mmput(mm);
}
- /* OK to pass negative loff_t, we can catch out-of-range */
- file->f_mode |= FMODE_UNSIGNED_OFFSET;
file->private_data = mm;
return 0;
@@ -704,7 +702,12 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
static int mem_open(struct inode *inode, struct file *file)
{
- return __mem_open(inode, file, PTRACE_MODE_ATTACH);
+ int ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
+
+ /* OK to pass negative loff_t, we can catch out-of-range */
+ file->f_mode |= FMODE_UNSIGNED_OFFSET;
+
+ return ret;
}
static ssize_t mem_rw(struct file *file, char __user *buf,
@@ -827,15 +830,16 @@ static ssize_t environ_read(struct file *file, char __user *buf,
if (!atomic_inc_not_zero(&mm->mm_users))
goto free;
while (count > 0) {
- int this_len, retval, max_len;
-
- this_len = mm->env_end - (mm->env_start + src);
+ size_t this_len, max_len;
+ int retval;
- if (this_len <= 0)
+ if (src >= (mm->env_end - mm->env_start))
break;
- max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
- this_len = (this_len > max_len) ? max_len : this_len;
+ this_len = mm->env_end - (mm->env_start + src);
+
+ max_len = min_t(size_t, PAGE_SIZE, count);
+ this_len = min(max_len, this_len);
retval = access_remote_vm(mm, (mm->env_start + src),
page, this_len, 0);
diff --git a/fs/qnx4/bitmap.c b/fs/qnx4/bitmap.c
index 22e0d60e53ef..76a7a697b778 100644
--- a/fs/qnx4/bitmap.c
+++ b/fs/qnx4/bitmap.c
@@ -17,23 +17,6 @@
#include <linux/bitops.h>
#include "qnx4.h"
-static void count_bits(register const char *bmPart, register int size,
- int *const tf)
-{
- char b;
- int tot = *tf;
-
- if (size > QNX4_BLOCK_SIZE) {
- size = QNX4_BLOCK_SIZE;
- }
- do {
- b = *bmPart++;
- tot += 8 - hweight8(b);
- size--;
- } while (size != 0);
- *tf = tot;
-}
-
unsigned long qnx4_count_free_blocks(struct super_block *sb)
{
int start = le32_to_cpu(qnx4_sb(sb)->BitMap->di_first_xtnt.xtnt_blk) - 1;
@@ -44,13 +27,16 @@ unsigned long qnx4_count_free_blocks(struct super_block *sb)
struct buffer_head *bh;
while (total < size) {
+ int bytes = min(size - total, QNX4_BLOCK_SIZE);
+
if ((bh = sb_bread(sb, start + offset)) == NULL) {
printk(KERN_ERR "qnx4: I/O error in counting free blocks\n");
break;
}
- count_bits(bh->b_data, size - total, &total_free);
+ total_free += bytes * BITS_PER_BYTE -
+ memweight(bh->b_data, bytes);
brelse(bh);
- total += QNX4_BLOCK_SIZE;
+ total += bytes;
offset++;
}
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 36a29b753c79..c495a3055e2a 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1589,10 +1589,10 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
goto out;
}
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warn[cnt].w_type = QUOTA_NL_NOWARN;
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!dquots[cnt])
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index 4c0c7d163d15..a98b7740a0fc 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -1334,9 +1334,7 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
else if (bitmap == 0)
block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
- reiserfs_write_unlock(sb);
bh = sb_bread(sb, block);
- reiserfs_write_lock(sb);
if (bh == NULL)
reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
"reading failed", __func__, block);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index a6d4268fb6c1..855da58db145 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -76,10 +76,10 @@ void reiserfs_evict_inode(struct inode *inode)
;
}
out:
+ reiserfs_write_unlock_once(inode->i_sb, depth);
clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
dquot_drop(inode);
inode->i_blocks = 0;
- reiserfs_write_unlock_once(inode->i_sb, depth);
return;
no_delete:
diff --git a/fs/select.c b/fs/select.c
index bae321569dfa..db14c781335e 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -345,8 +345,8 @@ static int max_select_fd(unsigned long n, fd_set_bits *fds)
struct fdtable *fdt;
/* handle last in-complete long-word first */
- set = ~(~0UL << (n & (__NFDBITS-1)));
- n /= __NFDBITS;
+ set = ~(~0UL << (n & (BITS_PER_LONG-1)));
+ n /= BITS_PER_LONG;
fdt = files_fdtable(current->files);
open_fds = fdt->open_fds + n;
max = 0;
@@ -373,7 +373,7 @@ get_max:
max++;
set >>= 1;
} while (set);
- max += n * __NFDBITS;
+ max += n * BITS_PER_LONG;
}
return max;
@@ -435,11 +435,11 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
in = *inp++; out = *outp++; ex = *exp++;
all_bits = in | out | ex;
if (all_bits == 0) {
- i += __NFDBITS;
+ i += BITS_PER_LONG;
continue;
}
- for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) {
+ for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
int fput_needed;
if (i >= n)
break;
diff --git a/fs/splice.c b/fs/splice.c
index 7bf08fa22ec9..41514dd89462 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -996,6 +996,8 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
};
ssize_t ret;
+ sb_start_write(inode->i_sb);
+
pipe_lock(pipe);
splice_from_pipe_begin(&sd);
@@ -1034,6 +1036,7 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
*ppos += ret;
balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
}
+ sb_end_write(inode->i_sb);
return ret;
}
diff --git a/fs/super.c b/fs/super.c
index c743fb3be4b8..0902cfa6a12e 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -33,12 +33,19 @@
#include <linux/rculist_bl.h>
#include <linux/cleancache.h>
#include <linux/fsnotify.h>
+#include <linux/lockdep.h>
#include "internal.h"
LIST_HEAD(super_blocks);
DEFINE_SPINLOCK(sb_lock);
+static char *sb_writers_name[SB_FREEZE_LEVELS] = {
+ "sb_writers",
+ "sb_pagefaults",
+ "sb_internal",
+};
+
/*
* One thing we have to be careful of with a per-sb shrinker is that we don't
* drop the last active reference to the superblock from within the shrinker.
@@ -62,7 +69,7 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
return -1;
if (!grab_super_passive(sb))
- return !sc->nr_to_scan ? 0 : -1;
+ return -1;
if (sb->s_op && sb->s_op->nr_cached_objects)
fs_objects = sb->s_op->nr_cached_objects(sb);
@@ -102,6 +109,35 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
return total_objects;
}
+static int init_sb_writers(struct super_block *s, struct file_system_type *type)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < SB_FREEZE_LEVELS; i++) {
+ err = percpu_counter_init(&s->s_writers.counter[i], 0);
+ if (err < 0)
+ goto err_out;
+ lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
+ &type->s_writers_key[i], 0);
+ }
+ init_waitqueue_head(&s->s_writers.wait);
+ init_waitqueue_head(&s->s_writers.wait_unfrozen);
+ return 0;
+err_out:
+ while (--i >= 0)
+ percpu_counter_destroy(&s->s_writers.counter[i]);
+ return err;
+}
+
+static void destroy_sb_writers(struct super_block *s)
+{
+ int i;
+
+ for (i = 0; i < SB_FREEZE_LEVELS; i++)
+ percpu_counter_destroy(&s->s_writers.counter[i]);
+}
+
/**
* alloc_super - create new superblock
* @type: filesystem type superblock should belong to
@@ -117,18 +153,19 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
if (s) {
if (security_sb_alloc(s)) {
+ /*
+ * We cannot call security_sb_free() without
+ * security_sb_alloc() succeeding. So bail out manually
+ */
kfree(s);
s = NULL;
goto out;
}
#ifdef CONFIG_SMP
s->s_files = alloc_percpu(struct list_head);
- if (!s->s_files) {
- security_sb_free(s);
- kfree(s);
- s = NULL;
- goto out;
- } else {
+ if (!s->s_files)
+ goto err_out;
+ else {
int i;
for_each_possible_cpu(i)
@@ -137,6 +174,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
#else
INIT_LIST_HEAD(&s->s_files);
#endif
+ if (init_sb_writers(s, type))
+ goto err_out;
s->s_flags = flags;
s->s_bdi = &default_backing_dev_info;
INIT_HLIST_NODE(&s->s_instances);
@@ -178,7 +217,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
mutex_init(&s->s_dquot.dqio_mutex);
mutex_init(&s->s_dquot.dqonoff_mutex);
init_rwsem(&s->s_dquot.dqptr_sem);
- init_waitqueue_head(&s->s_wait_unfrozen);
s->s_maxbytes = MAX_NON_LFS;
s->s_op = &default_op;
s->s_time_gran = 1000000000;
@@ -190,6 +228,16 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
}
out:
return s;
+err_out:
+ security_sb_free(s);
+#ifdef CONFIG_SMP
+ if (s->s_files)
+ free_percpu(s->s_files);
+#endif
+ destroy_sb_writers(s);
+ kfree(s);
+ s = NULL;
+ goto out;
}
/**
@@ -203,6 +251,7 @@ static inline void destroy_super(struct super_block *s)
#ifdef CONFIG_SMP
free_percpu(s->s_files);
#endif
+ destroy_sb_writers(s);
security_sb_free(s);
WARN_ON(!list_empty(&s->s_mounts));
kfree(s->s_subtype);
@@ -320,7 +369,7 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
/*
* grab_super_passive - acquire a passive reference
- * @s: reference we are trying to grab
+ * @sb: reference we are trying to grab
*
* Tries to acquire a passive reference. This is used in places where we
* cannot take an active reference but we need to ensure that the
@@ -488,46 +537,6 @@ void drop_super(struct super_block *sb)
EXPORT_SYMBOL(drop_super);
/**
- * sync_supers - helper for periodic superblock writeback
- *
- * Call the write_super method if present on all dirty superblocks in
- * the system. This is for the periodic writeback used by most older
- * filesystems. For data integrity superblock writeback use
- * sync_filesystems() instead.
- *
- * Note: check the dirty flag before waiting, so we don't
- * hold up the sync while mounting a device. (The newly
- * mounted device won't need syncing.)
- */
-void sync_supers(void)
-{
- struct super_block *sb, *p = NULL;
-
- spin_lock(&sb_lock);
- list_for_each_entry(sb, &super_blocks, s_list) {
- if (hlist_unhashed(&sb->s_instances))
- continue;
- if (sb->s_op->write_super && sb->s_dirt) {
- sb->s_count++;
- spin_unlock(&sb_lock);
-
- down_read(&sb->s_umount);
- if (sb->s_root && sb->s_dirt && (sb->s_flags & MS_BORN))
- sb->s_op->write_super(sb);
- up_read(&sb->s_umount);
-
- spin_lock(&sb_lock);
- if (p)
- __put_super(p);
- p = sb;
- }
- }
- if (p)
- __put_super(p);
- spin_unlock(&sb_lock);
-}
-
-/**
* iterate_supers - call function for all active superblocks
* @f: function to call
* @arg: argument to pass to it
@@ -651,10 +660,11 @@ struct super_block *get_super_thawed(struct block_device *bdev)
{
while (1) {
struct super_block *s = get_super(bdev);
- if (!s || s->s_frozen == SB_UNFROZEN)
+ if (!s || s->s_writers.frozen == SB_UNFROZEN)
return s;
up_read(&s->s_umount);
- vfs_check_frozen(s, SB_FREEZE_WRITE);
+ wait_event(s->s_writers.wait_unfrozen,
+ s->s_writers.frozen == SB_UNFROZEN);
put_super(s);
}
}
@@ -732,7 +742,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
int retval;
int remount_ro;
- if (sb->s_frozen != SB_UNFROZEN)
+ if (sb->s_writers.frozen != SB_UNFROZEN)
return -EBUSY;
#ifdef CONFIG_BLOCK
@@ -1163,6 +1173,120 @@ out:
return ERR_PTR(error);
}
+/*
+ * This is an internal function, please use sb_end_{write,pagefault,intwrite}
+ * instead.
+ */
+void __sb_end_write(struct super_block *sb, int level)
+{
+ percpu_counter_dec(&sb->s_writers.counter[level-1]);
+ /*
+ * Make sure s_writers are updated before we wake up waiters in
+ * freeze_super().
+ */
+ smp_mb();
+ if (waitqueue_active(&sb->s_writers.wait))
+ wake_up(&sb->s_writers.wait);
+ rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_);
+}
+EXPORT_SYMBOL(__sb_end_write);
+
+#ifdef CONFIG_LOCKDEP
+/*
+ * We want lockdep to tell us about possible deadlocks with freezing but
+ * it's it bit tricky to properly instrument it. Getting a freeze protection
+ * works as getting a read lock but there are subtle problems. XFS for example
+ * gets freeze protection on internal level twice in some cases, which is OK
+ * only because we already hold a freeze protection also on higher level. Due
+ * to these cases we have to tell lockdep we are doing trylock when we
+ * already hold a freeze protection for a higher freeze level.
+ */
+static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock,
+ unsigned long ip)
+{
+ int i;
+
+ if (!trylock) {
+ for (i = 0; i < level - 1; i++)
+ if (lock_is_held(&sb->s_writers.lock_map[i])) {
+ trylock = true;
+ break;
+ }
+ }
+ rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip);
+}
+#endif
+
+/*
+ * This is an internal function, please use sb_start_{write,pagefault,intwrite}
+ * instead.
+ */
+int __sb_start_write(struct super_block *sb, int level, bool wait)
+{
+retry:
+ if (unlikely(sb->s_writers.frozen >= level)) {
+ if (!wait)
+ return 0;
+ wait_event(sb->s_writers.wait_unfrozen,
+ sb->s_writers.frozen < level);
+ }
+
+#ifdef CONFIG_LOCKDEP
+ acquire_freeze_lock(sb, level, !wait, _RET_IP_);
+#endif
+ percpu_counter_inc(&sb->s_writers.counter[level-1]);
+ /*
+ * Make sure counter is updated before we check for frozen.
+ * freeze_super() first sets frozen and then checks the counter.
+ */
+ smp_mb();
+ if (unlikely(sb->s_writers.frozen >= level)) {
+ __sb_end_write(sb, level);
+ goto retry;
+ }
+ return 1;
+}
+EXPORT_SYMBOL(__sb_start_write);
+
+/**
+ * sb_wait_write - wait until all writers to given file system finish
+ * @sb: the super for which we wait
+ * @level: type of writers we wait for (normal vs page fault)
+ *
+ * This function waits until there are no writers of given type to given file
+ * system. Caller of this function should make sure there can be no new writers
+ * of type @level before calling this function. Otherwise this function can
+ * livelock.
+ */
+static void sb_wait_write(struct super_block *sb, int level)
+{
+ s64 writers;
+
+ /*
+ * We just cycle-through lockdep here so that it does not complain
+ * about returning with lock to userspace
+ */
+ rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_);
+ rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_);
+
+ do {
+ DEFINE_WAIT(wait);
+
+ /*
+ * We use a barrier in prepare_to_wait() to separate setting
+ * of frozen and checking of the counter
+ */
+ prepare_to_wait(&sb->s_writers.wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+
+ writers = percpu_counter_sum(&sb->s_writers.counter[level-1]);
+ if (writers)
+ schedule();
+
+ finish_wait(&sb->s_writers.wait, &wait);
+ } while (writers);
+}
+
/**
* freeze_super - lock the filesystem and force it into a consistent state
* @sb: the super to lock
@@ -1170,6 +1294,31 @@ out:
* Syncs the super to make sure the filesystem is consistent and calls the fs's
* freeze_fs. Subsequent calls to this without first thawing the fs will return
* -EBUSY.
+ *
+ * During this function, sb->s_writers.frozen goes through these values:
+ *
+ * SB_UNFROZEN: File system is normal, all writes progress as usual.
+ *
+ * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
+ * writes should be blocked, though page faults are still allowed. We wait for
+ * all writes to complete and then proceed to the next stage.
+ *
+ * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
+ * but internal fs threads can still modify the filesystem (although they
+ * should not dirty new pages or inodes), writeback can run etc. After waiting
+ * for all running page faults we sync the filesystem which will clean all
+ * dirty pages and inodes (no new dirty pages or inodes can be created when
+ * sync is running).
+ *
+ * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
+ * modification are blocked (e.g. XFS preallocation truncation on inode
+ * reclaim). This is usually implemented by blocking new transactions for
+ * filesystems that have them and need this additional guard. After all
+ * internal writers are finished we call ->freeze_fs() to finish filesystem
+ * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
+ * mostly auxiliary for filesystems to verify they do not modify frozen fs.
+ *
+ * sb->s_writers.frozen is protected by sb->s_umount.
*/
int freeze_super(struct super_block *sb)
{
@@ -1177,7 +1326,7 @@ int freeze_super(struct super_block *sb)
atomic_inc(&sb->s_active);
down_write(&sb->s_umount);
- if (sb->s_frozen) {
+ if (sb->s_writers.frozen != SB_UNFROZEN) {
deactivate_locked_super(sb);
return -EBUSY;
}
@@ -1188,33 +1337,53 @@ int freeze_super(struct super_block *sb)
}
if (sb->s_flags & MS_RDONLY) {
- sb->s_frozen = SB_FREEZE_TRANS;
- smp_wmb();
+ /* Nothing to do really... */
+ sb->s_writers.frozen = SB_FREEZE_COMPLETE;
up_write(&sb->s_umount);
return 0;
}
- sb->s_frozen = SB_FREEZE_WRITE;
+ /* From now on, no new normal writers can start */
+ sb->s_writers.frozen = SB_FREEZE_WRITE;
smp_wmb();
+ /* Release s_umount to preserve sb_start_write -> s_umount ordering */
+ up_write(&sb->s_umount);
+
+ sb_wait_write(sb, SB_FREEZE_WRITE);
+
+ /* Now we go and block page faults... */
+ down_write(&sb->s_umount);
+ sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
+ smp_wmb();
+
+ sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
+
+ /* All writers are done so after syncing there won't be dirty data */
sync_filesystem(sb);
- sb->s_frozen = SB_FREEZE_TRANS;
+ /* Now wait for internal filesystem counter */
+ sb->s_writers.frozen = SB_FREEZE_FS;
smp_wmb();
+ sb_wait_write(sb, SB_FREEZE_FS);
- sync_blockdev(sb->s_bdev);
if (sb->s_op->freeze_fs) {
ret = sb->s_op->freeze_fs(sb);
if (ret) {
printk(KERN_ERR
"VFS:Filesystem freeze failed\n");
- sb->s_frozen = SB_UNFROZEN;
+ sb->s_writers.frozen = SB_UNFROZEN;
smp_wmb();
- wake_up(&sb->s_wait_unfrozen);
+ wake_up(&sb->s_writers.wait_unfrozen);
deactivate_locked_super(sb);
return ret;
}
}
+ /*
+ * This is just for debugging purposes so that fs can warn if it
+ * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
+ */
+ sb->s_writers.frozen = SB_FREEZE_COMPLETE;
up_write(&sb->s_umount);
return 0;
}
@@ -1231,7 +1400,7 @@ int thaw_super(struct super_block *sb)
int error;
down_write(&sb->s_umount);
- if (sb->s_frozen == SB_UNFROZEN) {
+ if (sb->s_writers.frozen == SB_UNFROZEN) {
up_write(&sb->s_umount);
return -EINVAL;
}
@@ -1244,16 +1413,15 @@ int thaw_super(struct super_block *sb)
if (error) {
printk(KERN_ERR
"VFS:Filesystem thaw failed\n");
- sb->s_frozen = SB_FREEZE_TRANS;
up_write(&sb->s_umount);
return error;
}
}
out:
- sb->s_frozen = SB_UNFROZEN;
+ sb->s_writers.frozen = SB_UNFROZEN;
smp_wmb();
- wake_up(&sb->s_wait_unfrozen);
+ wake_up(&sb->s_writers.wait_unfrozen);
deactivate_locked_super(sb);
return 0;
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index a4759833d62d..614b2b544880 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -228,6 +228,8 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = 0;
if (bb->vm_ops->page_mkwrite)
ret = bb->vm_ops->page_mkwrite(vma, vmf);
+ else
+ file_update_time(file);
sysfs_put_active(attr_sd);
return ret;
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 8b8cc4e945f4..760de723dadb 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -167,7 +167,7 @@ struct ubifs_global_debug_info {
#define ubifs_dbg_msg(type, fmt, ...) \
pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__)
-#define DBG_KEY_BUF_LEN 32
+#define DBG_KEY_BUF_LEN 48
#define ubifs_dbg_msg_key(type, key, fmt, ...) do { \
char __tmp_key_buf[DBG_KEY_BUF_LEN]; \
pr_debug("UBIFS DBG " type ": " fmt "%s\n", ##__VA_ARGS__, \
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 35389ca2d267..7bd6e72afd11 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -37,11 +37,11 @@
*
* A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
* implement. However, this is not true for 'ubifs_writepage()', which may be
- * called with @i_mutex unlocked. For example, when pdflush is doing background
- * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal"
- * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the
- * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()'
- * we are only guaranteed that the page is locked.
+ * called with @i_mutex unlocked. For example, when flusher thread is doing
+ * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex.
+ * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g.
+ * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
+ * 'ubifs_writepage()' we are only guaranteed that the page is locked.
*
* Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
* read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index ce33b2beb151..8640920766ed 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -1749,7 +1749,10 @@ int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr)
return 0;
out_err:
- ubifs_lpt_free(c, 0);
+ if (wr)
+ ubifs_lpt_free(c, 1);
+ if (rd)
+ ubifs_lpt_free(c, 0);
return err;
}
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index c30d976b4be8..edeec499c048 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -788,7 +788,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
corrupted_rescan:
/* Re-scan the corrupted data with verbose messages */
- ubifs_err("corruptio %d", ret);
+ ubifs_err("corruption %d", ret);
ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
corrupted:
ubifs_scanned_corruption(c, lnum, offs, buf);
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index eba46d4a7619..94d78fc5d4e0 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -1026,7 +1026,6 @@ int ubifs_replay_journal(struct ubifs_info *c)
c->replaying = 1;
lnum = c->ltail_lnum = c->lhead_lnum;
- lnum = UBIFS_LOG_LNUM;
do {
err = replay_log_leb(c, lnum, 0, c->sbuf);
if (err == 1)
@@ -1035,7 +1034,7 @@ int ubifs_replay_journal(struct ubifs_info *c)
if (err)
goto out;
lnum = ubifs_next_log_lnum(c, lnum);
- } while (lnum != UBIFS_LOG_LNUM);
+ } while (lnum != c->ltail_lnum);
err = replay_buds(c);
if (err)
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 1c766c39c038..71a197f0f93d 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -303,7 +303,7 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc)
mutex_lock(&ui->ui_mutex);
/*
* Due to races between write-back forced by budgeting
- * (see 'sync_some_inodes()') and pdflush write-back, the inode may
+ * (see 'sync_some_inodes()') and background write-back, the inode may
* have already been synchronized, do not do this again. This might
* also happen if it was synchronized in an VFS operation, e.g.
* 'ubifs_link()'.
@@ -1157,9 +1157,6 @@ static int check_free_space(struct ubifs_info *c)
*
* This function mounts UBIFS file system. Returns zero in case of success and
* a negative error code in case of failure.
- *
- * Note, the function does not de-allocate resources it it fails half way
- * through, and the caller has to do this instead.
*/
static int mount_ubifs(struct ubifs_info *c)
{
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index fafaad795cd6..aa233469b3c1 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1124,14 +1124,17 @@ int udf_setsize(struct inode *inode, loff_t newsize)
if (err)
return err;
down_write(&iinfo->i_data_sem);
- } else
+ } else {
iinfo->i_lenAlloc = newsize;
+ goto set_size;
+ }
}
err = udf_extend_file(inode, newsize);
if (err) {
up_write(&iinfo->i_data_sem);
return err;
}
+set_size:
truncate_setsize(inode, newsize);
up_write(&iinfo->i_data_sem);
} else {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index dcbf98722afc..18fc038a438d 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1344,6 +1344,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
udf_err(sb, "error loading logical volume descriptor: "
"Partition table too long (%u > %lu)\n", table_len,
sb->s_blocksize - sizeof(*lvd));
+ ret = 1;
goto out_bh;
}
@@ -1388,8 +1389,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
UDF_ID_SPARABLE,
strlen(UDF_ID_SPARABLE))) {
if (udf_load_sparable_map(sb, map,
- (struct sparablePartitionMap *)gpm) < 0)
+ (struct sparablePartitionMap *)gpm) < 0) {
+ ret = 1;
goto out_bh;
+ }
} else if (!strncmp(upm2->partIdent.ident,
UDF_ID_METADATA,
strlen(UDF_ID_METADATA))) {
@@ -2000,6 +2003,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
if (!silent)
pr_notice("Rescanning with blocksize %d\n",
UDF_DEFAULT_BLOCKSIZE);
+ brelse(sbi->s_lvid_bh);
+ sbi->s_lvid_bh = NULL;
uopt.blocksize = UDF_DEFAULT_BLOCKSIZE;
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
}
diff --git a/fs/xattr.c b/fs/xattr.c
index 1d7ac3790458..4d45b7189e7e 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -427,6 +427,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
{
ssize_t error;
void *kvalue = NULL;
+ void *vvalue = NULL;
char kname[XATTR_NAME_MAX + 1];
error = strncpy_from_user(kname, name, sizeof(kname));
@@ -438,9 +439,13 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
if (size) {
if (size > XATTR_SIZE_MAX)
size = XATTR_SIZE_MAX;
- kvalue = kzalloc(size, GFP_KERNEL);
- if (!kvalue)
- return -ENOMEM;
+ kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!kvalue) {
+ vvalue = vmalloc(size);
+ if (!vvalue)
+ return -ENOMEM;
+ kvalue = vvalue;
+ }
}
error = vfs_getxattr(d, kname, kvalue, size);
@@ -452,7 +457,10 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
than XATTR_SIZE_MAX bytes. Not possible. */
error = -E2BIG;
}
- kfree(kvalue);
+ if (vvalue)
+ vfree(vvalue);
+ else
+ kfree(kvalue);
return error;
}
diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h
index a6caa0022c9b..359fb86ed876 100644
--- a/fs/xfs/xfs_alloc_btree.h
+++ b/fs/xfs/xfs_alloc_btree.h
@@ -51,20 +51,6 @@ typedef struct xfs_alloc_rec_incore {
typedef __be32 xfs_alloc_ptr_t;
/*
- * Minimum and maximum blocksize and sectorsize.
- * The blocksize upper limit is pretty much arbitrary.
- * The sectorsize upper limit is due to sizeof(sb_sectsize).
- */
-#define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */
-#define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */
-#define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG)
-#define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG)
-#define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */
-#define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */
-#define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG)
-#define XFS_MAX_SECTORSIZE (1 << XFS_MAX_SECTORSIZE_LOG)
-
-/*
* Block numbers in the AG:
* SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
*/
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 8dad722c0041..e562dd43f41f 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -124,6 +124,12 @@ xfs_setfilesize_trans_alloc(
ioend->io_append_trans = tp;
/*
+ * We will pass freeze protection with a transaction. So tell lockdep
+ * we released it.
+ */
+ rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 1, _THIS_IP_);
+ /*
* We hand off the transaction to the completion thread now, so
* clear the flag here.
*/
@@ -179,7 +185,7 @@ xfs_finish_ioend(
if (atomic_dec_and_test(&ioend->io_remaining)) {
struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
- if (ioend->io_type == IO_UNWRITTEN)
+ if (ioend->io_type == XFS_IO_UNWRITTEN)
queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
else if (ioend->io_append_trans)
queue_work(mp->m_data_workqueue, &ioend->io_work);
@@ -199,6 +205,15 @@ xfs_end_io(
struct xfs_inode *ip = XFS_I(ioend->io_inode);
int error = 0;
+ if (ioend->io_append_trans) {
+ /*
+ * We've got freeze protection passed with the transaction.
+ * Tell lockdep about it.
+ */
+ rwsem_acquire_read(
+ &ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 0, 1, _THIS_IP_);
+ }
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
ioend->io_error = -EIO;
goto done;
@@ -210,7 +225,7 @@ xfs_end_io(
* For unwritten extents we need to issue transactions to convert a
* range to normal written extens after the data I/O has finished.
*/
- if (ioend->io_type == IO_UNWRITTEN) {
+ if (ioend->io_type == XFS_IO_UNWRITTEN) {
/*
* For buffered I/O we never preallocate a transaction when
* doing the unwritten extent conversion, but for direct I/O
@@ -312,7 +327,7 @@ xfs_map_blocks(
if (XFS_FORCED_SHUTDOWN(mp))
return -XFS_ERROR(EIO);
- if (type == IO_UNWRITTEN)
+ if (type == XFS_IO_UNWRITTEN)
bmapi_flags |= XFS_BMAPI_IGSTATE;
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
@@ -323,10 +338,10 @@ xfs_map_blocks(
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
(ip->i_df.if_flags & XFS_IFEXTENTS));
- ASSERT(offset <= mp->m_maxioffset);
+ ASSERT(offset <= mp->m_super->s_maxbytes);
- if (offset + count > mp->m_maxioffset)
- count = mp->m_maxioffset - offset;
+ if (offset + count > mp->m_super->s_maxbytes)
+ count = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
@@ -336,7 +351,7 @@ xfs_map_blocks(
if (error)
return -XFS_ERROR(error);
- if (type == IO_DELALLOC &&
+ if (type == XFS_IO_DELALLOC &&
(!nimaps || isnullstartblock(imap->br_startblock))) {
error = xfs_iomap_write_allocate(ip, offset, count, imap);
if (!error)
@@ -345,7 +360,7 @@ xfs_map_blocks(
}
#ifdef DEBUG
- if (type == IO_UNWRITTEN) {
+ if (type == XFS_IO_UNWRITTEN) {
ASSERT(nimaps);
ASSERT(imap->br_startblock != HOLESTARTBLOCK);
ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
@@ -634,11 +649,11 @@ xfs_check_page_type(
bh = head = page_buffers(page);
do {
if (buffer_unwritten(bh))
- acceptable += (type == IO_UNWRITTEN);
+ acceptable += (type == XFS_IO_UNWRITTEN);
else if (buffer_delay(bh))
- acceptable += (type == IO_DELALLOC);
+ acceptable += (type == XFS_IO_DELALLOC);
else if (buffer_dirty(bh) && buffer_mapped(bh))
- acceptable += (type == IO_OVERWRITE);
+ acceptable += (type == XFS_IO_OVERWRITE);
else
break;
} while ((bh = bh->b_this_page) != head);
@@ -721,11 +736,11 @@ xfs_convert_page(
if (buffer_unwritten(bh) || buffer_delay(bh) ||
buffer_mapped(bh)) {
if (buffer_unwritten(bh))
- type = IO_UNWRITTEN;
+ type = XFS_IO_UNWRITTEN;
else if (buffer_delay(bh))
- type = IO_DELALLOC;
+ type = XFS_IO_DELALLOC;
else
- type = IO_OVERWRITE;
+ type = XFS_IO_OVERWRITE;
if (!xfs_imap_valid(inode, imap, offset)) {
done = 1;
@@ -733,7 +748,7 @@ xfs_convert_page(
}
lock_buffer(bh);
- if (type != IO_OVERWRITE)
+ if (type != XFS_IO_OVERWRITE)
xfs_map_at_offset(inode, bh, imap, offset);
xfs_add_to_ioend(inode, bh, offset, type,
ioendp, done);
@@ -831,7 +846,7 @@ xfs_aops_discard_page(
struct buffer_head *bh, *head;
loff_t offset = page_offset(page);
- if (!xfs_check_page_type(page, IO_DELALLOC))
+ if (!xfs_check_page_type(page, XFS_IO_DELALLOC))
goto out_invalidate;
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -927,11 +942,26 @@ xfs_vm_writepage(
end_index = offset >> PAGE_CACHE_SHIFT;
last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
if (page->index >= end_index) {
- if ((page->index >= end_index + 1) ||
- !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
+ unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
+
+ /*
+ * Just skip the page if it is fully outside i_size, e.g. due
+ * to a truncate operation that is in progress.
+ */
+ if (page->index >= end_index + 1 || offset_into_page == 0) {
unlock_page(page);
return 0;
}
+
+ /*
+ * The page straddles i_size. It must be zeroed out on each
+ * and every writepage invocation because it may be mmapped.
+ * "A file is mapped in multiples of the page size. For a file
+ * that is not a multiple of the page size, the remaining
+ * memory is zeroed when mapped, and writes to that region are
+ * not written out to the file."
+ */
+ zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
}
end_offset = min_t(unsigned long long,
@@ -941,7 +971,7 @@ xfs_vm_writepage(
bh = head = page_buffers(page);
offset = page_offset(page);
- type = IO_OVERWRITE;
+ type = XFS_IO_OVERWRITE;
if (wbc->sync_mode == WB_SYNC_NONE)
nonblocking = 1;
@@ -966,18 +996,18 @@ xfs_vm_writepage(
}
if (buffer_unwritten(bh)) {
- if (type != IO_UNWRITTEN) {
- type = IO_UNWRITTEN;
+ if (type != XFS_IO_UNWRITTEN) {
+ type = XFS_IO_UNWRITTEN;
imap_valid = 0;
}
} else if (buffer_delay(bh)) {
- if (type != IO_DELALLOC) {
- type = IO_DELALLOC;
+ if (type != XFS_IO_DELALLOC) {
+ type = XFS_IO_DELALLOC;
imap_valid = 0;
}
} else if (buffer_uptodate(bh)) {
- if (type != IO_OVERWRITE) {
- type = IO_OVERWRITE;
+ if (type != XFS_IO_OVERWRITE) {
+ type = XFS_IO_OVERWRITE;
imap_valid = 0;
}
} else {
@@ -1013,7 +1043,7 @@ xfs_vm_writepage(
}
if (imap_valid) {
lock_buffer(bh);
- if (type != IO_OVERWRITE)
+ if (type != XFS_IO_OVERWRITE)
xfs_map_at_offset(inode, bh, &imap, offset);
xfs_add_to_ioend(inode, bh, offset, type, &ioend,
new_ioend);
@@ -1054,7 +1084,7 @@ xfs_vm_writepage(
* Reserve log space if we might write beyond the on-disk
* inode size.
*/
- if (ioend->io_type != IO_UNWRITTEN &&
+ if (ioend->io_type != XFS_IO_UNWRITTEN &&
xfs_ioend_is_append(ioend)) {
err = xfs_setfilesize_trans_alloc(ioend);
if (err)
@@ -1162,9 +1192,9 @@ __xfs_get_blocks(
lockmode = xfs_ilock_map_shared(ip);
}
- ASSERT(offset <= mp->m_maxioffset);
- if (offset + size > mp->m_maxioffset)
- size = mp->m_maxioffset - offset;
+ ASSERT(offset <= mp->m_super->s_maxbytes);
+ if (offset + size > mp->m_super->s_maxbytes)
+ size = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -1351,7 +1381,7 @@ xfs_end_io_direct_write(
ioend->io_iocb = iocb;
ioend->io_result = ret;
if (private && size > 0)
- ioend->io_type = IO_UNWRITTEN;
+ ioend->io_type = XFS_IO_UNWRITTEN;
if (is_async) {
ioend->io_isasync = 1;
@@ -1383,7 +1413,7 @@ xfs_vm_direct_IO(
* and converts at least on unwritten extent we will cancel
* the still clean transaction after the I/O has finished.
*/
- iocb->private = ioend = xfs_alloc_ioend(inode, IO_DIRECT);
+ iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
if (offset + size > XFS_I(inode)->i_d.di_size) {
ret = xfs_setfilesize_trans_alloc(ioend);
if (ret)
@@ -1410,6 +1440,9 @@ out_trans_cancel:
if (ioend->io_append_trans) {
current_set_flags_nested(&ioend->io_append_trans->t_pflags,
PF_FSTRANS);
+ rwsem_acquire_read(
+ &inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
+ 0, 1, _THIS_IP_);
xfs_trans_cancel(ioend->io_append_trans, 0);
}
out_destroy_ioend:
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 84eafbcb0d9d..c325abb8d61a 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -24,17 +24,17 @@ extern mempool_t *xfs_ioend_pool;
* Types of I/O for bmap clustering and I/O completion tracking.
*/
enum {
- IO_DIRECT = 0, /* special case for direct I/O ioends */
- IO_DELALLOC, /* mapping covers delalloc region */
- IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
- IO_OVERWRITE, /* mapping covers already allocated extent */
+ XFS_IO_DIRECT = 0, /* special case for direct I/O ioends */
+ XFS_IO_DELALLOC, /* covers delalloc region */
+ XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
+ XFS_IO_OVERWRITE, /* covers already allocated extent */
};
#define XFS_IO_TYPES \
{ 0, "" }, \
- { IO_DELALLOC, "delalloc" }, \
- { IO_UNWRITTEN, "unwritten" }, \
- { IO_OVERWRITE, "overwrite" }
+ { XFS_IO_DELALLOC, "delalloc" }, \
+ { XFS_IO_UNWRITTEN, "unwritten" }, \
+ { XFS_IO_OVERWRITE, "overwrite" }
/*
* xfs_ioend struct manages large extent writes for XFS.
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index a17ff01b5adf..0ca1f0be62d2 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -893,7 +893,7 @@ STATIC int
xfs_attr_leaf_addname(xfs_da_args_t *args)
{
xfs_inode_t *dp;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int retval, error, committed, forkoff;
trace_xfs_attr_leaf_addname(args);
@@ -915,11 +915,11 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
*/
retval = xfs_attr_leaf_lookup_int(bp, args);
if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) {
- xfs_da_brelse(args->trans, bp);
+ xfs_trans_brelse(args->trans, bp);
return(retval);
} else if (retval == EEXIST) {
if (args->flags & ATTR_CREATE) { /* pure create op */
- xfs_da_brelse(args->trans, bp);
+ xfs_trans_brelse(args->trans, bp);
return(retval);
}
@@ -937,7 +937,6 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* if required.
*/
retval = xfs_attr_leaf_add(bp, args);
- xfs_da_buf_done(bp);
if (retval == ENOSPC) {
/*
* Promote the attribute list to the Btree format, then
@@ -1065,8 +1064,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
*/
if (committed)
xfs_trans_ijoin(args->trans, dp, 0);
- } else
- xfs_da_buf_done(bp);
+ }
/*
* Commit the remove and start the next trans in series.
@@ -1092,7 +1090,7 @@ STATIC int
xfs_attr_leaf_removename(xfs_da_args_t *args)
{
xfs_inode_t *dp;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int error, committed, forkoff;
trace_xfs_attr_leaf_removename(args);
@@ -1111,7 +1109,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
ASSERT(bp != NULL);
error = xfs_attr_leaf_lookup_int(bp, args);
if (error == ENOATTR) {
- xfs_da_brelse(args->trans, bp);
+ xfs_trans_brelse(args->trans, bp);
return(error);
}
@@ -1141,8 +1139,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
*/
if (committed)
xfs_trans_ijoin(args->trans, dp, 0);
- } else
- xfs_da_buf_done(bp);
+ }
return(0);
}
@@ -1155,7 +1152,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
STATIC int
xfs_attr_leaf_get(xfs_da_args_t *args)
{
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int error;
args->blkno = 0;
@@ -1167,11 +1164,11 @@ xfs_attr_leaf_get(xfs_da_args_t *args)
error = xfs_attr_leaf_lookup_int(bp, args);
if (error != EEXIST) {
- xfs_da_brelse(args->trans, bp);
+ xfs_trans_brelse(args->trans, bp);
return(error);
}
error = xfs_attr_leaf_getvalue(bp, args);
- xfs_da_brelse(args->trans, bp);
+ xfs_trans_brelse(args->trans, bp);
if (!error && (args->rmtblkno > 0) && !(args->flags & ATTR_KERNOVAL)) {
error = xfs_attr_rmtval_get(args);
}
@@ -1186,23 +1183,23 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
{
xfs_attr_leafblock_t *leaf;
int error;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
context->cursor->blkno = 0;
error = xfs_da_read_buf(NULL, context->dp, 0, -1, &bp, XFS_ATTR_FORK);
if (error)
return XFS_ERROR(error);
ASSERT(bp != NULL);
- leaf = bp->data;
+ leaf = bp->b_addr;
if (unlikely(leaf->hdr.info.magic != cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) {
XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW,
context->dp->i_mount, leaf);
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
return XFS_ERROR(EFSCORRUPTED);
}
error = xfs_attr_leaf_list_int(bp, context);
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
return XFS_ERROR(error);
}
@@ -1489,7 +1486,7 @@ xfs_attr_node_removename(xfs_da_args_t *args)
xfs_da_state_t *state;
xfs_da_state_blk_t *blk;
xfs_inode_t *dp;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int retval, error, committed, forkoff;
trace_xfs_attr_node_removename(args);
@@ -1601,14 +1598,13 @@ xfs_attr_node_removename(xfs_da_args_t *args)
*/
ASSERT(state->path.active == 1);
ASSERT(state->path.blk[0].bp);
- xfs_da_buf_done(state->path.blk[0].bp);
state->path.blk[0].bp = NULL;
error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
XFS_ATTR_FORK);
if (error)
goto out;
- ASSERT((((xfs_attr_leafblock_t *)bp->data)->hdr.info.magic) ==
+ ASSERT((((xfs_attr_leafblock_t *)bp->b_addr)->hdr.info.magic) ==
cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
@@ -1635,7 +1631,7 @@ xfs_attr_node_removename(xfs_da_args_t *args)
if (committed)
xfs_trans_ijoin(args->trans, dp, 0);
} else
- xfs_da_brelse(args->trans, bp);
+ xfs_trans_brelse(args->trans, bp);
}
error = 0;
@@ -1665,8 +1661,7 @@ xfs_attr_fillstate(xfs_da_state_t *state)
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->bp) {
- blk->disk_blkno = xfs_da_blkno(blk->bp);
- xfs_da_buf_done(blk->bp);
+ blk->disk_blkno = XFS_BUF_ADDR(blk->bp);
blk->bp = NULL;
} else {
blk->disk_blkno = 0;
@@ -1681,8 +1676,7 @@ xfs_attr_fillstate(xfs_da_state_t *state)
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->bp) {
- blk->disk_blkno = xfs_da_blkno(blk->bp);
- xfs_da_buf_done(blk->bp);
+ blk->disk_blkno = XFS_BUF_ADDR(blk->bp);
blk->bp = NULL;
} else {
blk->disk_blkno = 0;
@@ -1792,7 +1786,7 @@ xfs_attr_node_get(xfs_da_args_t *args)
* If not in a transaction, we have to release all the buffers.
*/
for (i = 0; i < state->path.active; i++) {
- xfs_da_brelse(args->trans, state->path.blk[i].bp);
+ xfs_trans_brelse(args->trans, state->path.blk[i].bp);
state->path.blk[i].bp = NULL;
}
@@ -1808,7 +1802,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
xfs_da_intnode_t *node;
xfs_da_node_entry_t *btree;
int error, i;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
cursor = context->cursor;
cursor->initted = 1;
@@ -1825,30 +1819,30 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
if ((error != 0) && (error != EFSCORRUPTED))
return(error);
if (bp) {
- node = bp->data;
+ node = bp->b_addr;
switch (be16_to_cpu(node->hdr.info.magic)) {
case XFS_DA_NODE_MAGIC:
trace_xfs_attr_list_wrong_blk(context);
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
bp = NULL;
break;
case XFS_ATTR_LEAF_MAGIC:
- leaf = bp->data;
+ leaf = bp->b_addr;
if (cursor->hashval > be32_to_cpu(leaf->entries[
be16_to_cpu(leaf->hdr.count)-1].hashval)) {
trace_xfs_attr_list_wrong_blk(context);
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
bp = NULL;
} else if (cursor->hashval <=
be32_to_cpu(leaf->entries[0].hashval)) {
trace_xfs_attr_list_wrong_blk(context);
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
bp = NULL;
}
break;
default:
trace_xfs_attr_list_wrong_blk(context);
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
bp = NULL;
}
}
@@ -1873,7 +1867,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
context->dp->i_mount);
return(XFS_ERROR(EFSCORRUPTED));
}
- node = bp->data;
+ node = bp->b_addr;
if (node->hdr.info.magic ==
cpu_to_be16(XFS_ATTR_LEAF_MAGIC))
break;
@@ -1883,7 +1877,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
XFS_ERRLEVEL_LOW,
context->dp->i_mount,
node);
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
return(XFS_ERROR(EFSCORRUPTED));
}
btree = node->btree;
@@ -1898,10 +1892,10 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
}
}
if (i == be16_to_cpu(node->hdr.count)) {
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
return(0);
}
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
}
}
ASSERT(bp != NULL);
@@ -1912,24 +1906,24 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
* adding the information.
*/
for (;;) {
- leaf = bp->data;
+ leaf = bp->b_addr;
if (unlikely(leaf->hdr.info.magic !=
cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) {
XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)",
XFS_ERRLEVEL_LOW,
context->dp->i_mount, leaf);
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
return(XFS_ERROR(EFSCORRUPTED));
}
error = xfs_attr_leaf_list_int(bp, context);
if (error) {
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
return error;
}
if (context->seen_enough || leaf->hdr.info.forw == 0)
break;
cursor->blkno = be32_to_cpu(leaf->hdr.info.forw);
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1,
&bp, XFS_ATTR_FORK);
if (error)
@@ -1941,7 +1935,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
return(XFS_ERROR(EFSCORRUPTED));
}
}
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
return(0);
}
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 7d89d800f517..d330111ca738 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -54,10 +54,10 @@
* Routines used for growing the Btree.
*/
STATIC int xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t which_block,
- xfs_dabuf_t **bpp);
-STATIC int xfs_attr_leaf_add_work(xfs_dabuf_t *leaf_buffer, xfs_da_args_t *args,
- int freemap_index);
-STATIC void xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *leaf_buffer);
+ struct xfs_buf **bpp);
+STATIC int xfs_attr_leaf_add_work(struct xfs_buf *leaf_buffer,
+ xfs_da_args_t *args, int freemap_index);
+STATIC void xfs_attr_leaf_compact(xfs_trans_t *tp, struct xfs_buf *leaf_buffer);
STATIC void xfs_attr_leaf_rebalance(xfs_da_state_t *state,
xfs_da_state_blk_t *blk1,
xfs_da_state_blk_t *blk2);
@@ -71,9 +71,9 @@ STATIC int xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
* Routines used for shrinking the Btree.
*/
STATIC int xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
- xfs_dabuf_t *bp, int level);
+ struct xfs_buf *bp, int level);
STATIC int xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
- xfs_dabuf_t *bp);
+ struct xfs_buf *bp);
STATIC int xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
xfs_dablk_t blkno, int blkcnt);
@@ -480,7 +480,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
char *tmpbuffer;
int error, i, size;
xfs_dablk_t blkno;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
xfs_ifork_t *ifp;
trace_xfs_attr_sf_to_leaf(args);
@@ -550,8 +550,6 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
error = 0;
out:
- if(bp)
- xfs_da_buf_done(bp);
kmem_free(tmpbuffer);
return(error);
}
@@ -737,14 +735,16 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
* a shortform attribute list.
*/
int
-xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
+xfs_attr_shortform_allfit(
+ struct xfs_buf *bp,
+ struct xfs_inode *dp)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
xfs_attr_leaf_name_local_t *name_loc;
int bytes, i;
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
entry = &leaf->entries[0];
@@ -774,7 +774,10 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
* Convert a leaf attribute list to shortform attribute list
*/
int
-xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
+xfs_attr_leaf_to_shortform(
+ struct xfs_buf *bp,
+ xfs_da_args_t *args,
+ int forkoff)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
@@ -791,10 +794,10 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
ASSERT(tmpbuffer != NULL);
ASSERT(bp != NULL);
- memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount));
+ memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(dp->i_mount));
leaf = (xfs_attr_leafblock_t *)tmpbuffer;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
- memset(bp->data, 0, XFS_LBSIZE(dp->i_mount));
+ memset(bp->b_addr, 0, XFS_LBSIZE(dp->i_mount));
/*
* Clean out the prior contents of the attribute list.
@@ -855,7 +858,7 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args)
xfs_attr_leafblock_t *leaf;
xfs_da_intnode_t *node;
xfs_inode_t *dp;
- xfs_dabuf_t *bp1, *bp2;
+ struct xfs_buf *bp1, *bp2;
xfs_dablk_t blkno;
int error;
@@ -877,10 +880,9 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args)
if (error)
goto out;
ASSERT(bp2 != NULL);
- memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount));
- xfs_da_buf_done(bp1);
+ memcpy(bp2->b_addr, bp1->b_addr, XFS_LBSIZE(dp->i_mount));
bp1 = NULL;
- xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1);
+ xfs_trans_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1);
/*
* Set up the new root node.
@@ -888,21 +890,17 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args)
error = xfs_da_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK);
if (error)
goto out;
- node = bp1->data;
- leaf = bp2->data;
+ node = bp1->b_addr;
+ leaf = bp2->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
/* both on-disk, don't endian-flip twice */
node->btree[0].hashval =
leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval;
node->btree[0].before = cpu_to_be32(blkno);
node->hdr.count = cpu_to_be16(1);
- xfs_da_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1);
+ xfs_trans_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1);
error = 0;
out:
- if (bp1)
- xfs_da_buf_done(bp1);
- if (bp2)
- xfs_da_buf_done(bp2);
return(error);
}
@@ -916,12 +914,15 @@ out:
* or a leaf in a node attribute list.
*/
STATIC int
-xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
+xfs_attr_leaf_create(
+ xfs_da_args_t *args,
+ xfs_dablk_t blkno,
+ struct xfs_buf **bpp)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_hdr_t *hdr;
xfs_inode_t *dp;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int error;
trace_xfs_attr_leaf_create(args);
@@ -933,7 +934,7 @@ xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
if (error)
return(error);
ASSERT(bp != NULL);
- leaf = bp->data;
+ leaf = bp->b_addr;
memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
hdr = &leaf->hdr;
hdr->info.magic = cpu_to_be16(XFS_ATTR_LEAF_MAGIC);
@@ -947,7 +948,7 @@ xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
hdr->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr->firstused) -
sizeof(xfs_attr_leaf_hdr_t));
- xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1);
+ xfs_trans_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1);
*bpp = bp;
return(0);
@@ -1014,7 +1015,9 @@ xfs_attr_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
* Add a name to the leaf attribute list structure.
*/
int
-xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
+xfs_attr_leaf_add(
+ struct xfs_buf *bp,
+ struct xfs_da_args *args)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_hdr_t *hdr;
@@ -1023,7 +1026,7 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
trace_xfs_attr_leaf_add(args);
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT((args->index >= 0)
&& (args->index <= be16_to_cpu(leaf->hdr.count)));
@@ -1085,7 +1088,10 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
* Add a name to a leaf attribute list structure.
*/
STATIC int
-xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
+xfs_attr_leaf_add_work(
+ struct xfs_buf *bp,
+ xfs_da_args_t *args,
+ int mapindex)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_hdr_t *hdr;
@@ -1096,7 +1102,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
xfs_mount_t *mp;
int tmp, i;
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
hdr = &leaf->hdr;
ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE));
@@ -1110,7 +1116,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
tmp = be16_to_cpu(hdr->count) - args->index;
tmp *= sizeof(xfs_attr_leaf_entry_t);
memmove((char *)(entry+1), (char *)entry, tmp);
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
}
be16_add_cpu(&hdr->count, 1);
@@ -1142,7 +1148,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
args->index2++;
}
}
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
ASSERT((args->index == 0) ||
(be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval)));
@@ -1174,7 +1180,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
args->rmtblkno = 1;
args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen);
}
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index),
xfs_attr_leaf_entsize(leaf, args->index)));
@@ -1198,7 +1204,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
}
}
be16_add_cpu(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index));
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
return(0);
}
@@ -1207,7 +1213,9 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
* Garbage collect a leaf attribute list block by copying it to a new buffer.
*/
STATIC void
-xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp)
+xfs_attr_leaf_compact(
+ struct xfs_trans *trans,
+ struct xfs_buf *bp)
{
xfs_attr_leafblock_t *leaf_s, *leaf_d;
xfs_attr_leaf_hdr_t *hdr_s, *hdr_d;
@@ -1217,14 +1225,14 @@ xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp)
mp = trans->t_mountp;
tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
ASSERT(tmpbuffer != NULL);
- memcpy(tmpbuffer, bp->data, XFS_LBSIZE(mp));
- memset(bp->data, 0, XFS_LBSIZE(mp));
+ memcpy(tmpbuffer, bp->b_addr, XFS_LBSIZE(mp));
+ memset(bp->b_addr, 0, XFS_LBSIZE(mp));
/*
* Copy basic information
*/
leaf_s = (xfs_attr_leafblock_t *)tmpbuffer;
- leaf_d = bp->data;
+ leaf_d = bp->b_addr;
hdr_s = &leaf_s->hdr;
hdr_d = &leaf_d->hdr;
hdr_d->info = hdr_s->info; /* struct copy */
@@ -1247,7 +1255,7 @@ xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp)
*/
xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0,
be16_to_cpu(hdr_s->count), mp);
- xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1);
+ xfs_trans_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1);
kmem_free(tmpbuffer);
}
@@ -1279,8 +1287,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
*/
ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC);
ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
- leaf1 = blk1->bp->data;
- leaf2 = blk2->bp->data;
+ leaf1 = blk1->bp->b_addr;
+ leaf2 = blk2->bp->b_addr;
ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
args = state->args;
@@ -1298,8 +1306,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
tmp_blk = blk1;
blk1 = blk2;
blk2 = tmp_blk;
- leaf1 = blk1->bp->data;
- leaf2 = blk2->bp->data;
+ leaf1 = blk1->bp->b_addr;
+ leaf2 = blk2->bp->b_addr;
swap = 1;
}
hdr1 = &leaf1->hdr;
@@ -1346,8 +1354,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
xfs_attr_leaf_moveents(leaf1, be16_to_cpu(hdr1->count) - count,
leaf2, 0, count, state->mp);
- xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
- xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
+ xfs_trans_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
+ xfs_trans_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
} else if (count > be16_to_cpu(hdr1->count)) {
/*
* I assert that since all callers pass in an empty
@@ -1378,8 +1386,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
xfs_attr_leaf_moveents(leaf2, 0, leaf1,
be16_to_cpu(hdr1->count), count, state->mp);
- xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
- xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
+ xfs_trans_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
+ xfs_trans_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
}
/*
@@ -1448,8 +1456,8 @@ xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
/*
* Set up environment.
*/
- leaf1 = blk1->bp->data;
- leaf2 = blk2->bp->data;
+ leaf1 = blk1->bp->b_addr;
+ leaf2 = blk2->bp->b_addr;
hdr1 = &leaf1->hdr;
hdr2 = &leaf2->hdr;
foundit = 0;
@@ -1551,7 +1559,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
xfs_da_blkinfo_t *info;
int count, bytes, forward, error, retval, i;
xfs_dablk_t blkno;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
/*
* Check for the degenerate case of the block being over 50% full.
@@ -1559,7 +1567,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
* to coalesce with a sibling.
*/
blk = &state->path.blk[ state->path.active-1 ];
- info = blk->bp->data;
+ info = blk->bp->b_addr;
ASSERT(info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
leaf = (xfs_attr_leafblock_t *)info;
count = be16_to_cpu(leaf->hdr.count);
@@ -1622,13 +1630,13 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
count = be16_to_cpu(leaf->hdr.count);
bytes = state->blocksize - (state->blocksize>>2);
bytes -= be16_to_cpu(leaf->hdr.usedbytes);
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
count += be16_to_cpu(leaf->hdr.count);
bytes -= be16_to_cpu(leaf->hdr.usedbytes);
bytes -= count * sizeof(xfs_attr_leaf_entry_t);
bytes -= sizeof(xfs_attr_leaf_hdr_t);
- xfs_da_brelse(state->args->trans, bp);
+ xfs_trans_brelse(state->args->trans, bp);
if (bytes >= 0)
break; /* fits with at least 25% to spare */
}
@@ -1666,7 +1674,9 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
* If two leaves are 37% full, when combined they will leave 25% free.
*/
int
-xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
+xfs_attr_leaf_remove(
+ struct xfs_buf *bp,
+ xfs_da_args_t *args)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_hdr_t *hdr;
@@ -1676,7 +1686,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
int tablesize, tmp, i;
xfs_mount_t *mp;
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
hdr = &leaf->hdr;
mp = args->trans->t_mountp;
@@ -1769,7 +1779,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
*/
memset(xfs_attr_leaf_name(leaf, args->index), 0, entsize);
be16_add_cpu(&hdr->usedbytes, -entsize);
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index),
entsize));
@@ -1777,7 +1787,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
* sizeof(xfs_attr_leaf_entry_t);
memmove((char *)entry, (char *)(entry+1), tmp);
be16_add_cpu(&hdr->count, -1);
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
entry = &leaf->entries[be16_to_cpu(hdr->count)];
memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t));
@@ -1807,7 +1817,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
} else {
hdr->holes = 1; /* mark as needing compaction */
}
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
/*
@@ -1840,8 +1850,8 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
mp = state->mp;
ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC);
ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC);
- drop_leaf = drop_blk->bp->data;
- save_leaf = save_blk->bp->data;
+ drop_leaf = drop_blk->bp->b_addr;
+ save_leaf = save_blk->bp->b_addr;
ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
drop_hdr = &drop_leaf->hdr;
@@ -1906,7 +1916,7 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
kmem_free(tmpbuffer);
}
- xfs_da_log_buf(state->args->trans, save_blk->bp, 0,
+ xfs_trans_log_buf(state->args->trans, save_blk->bp, 0,
state->blocksize - 1);
/*
@@ -1934,7 +1944,9 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
* Don't change the args->value unless we find the attribute.
*/
int
-xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
+xfs_attr_leaf_lookup_int(
+ struct xfs_buf *bp,
+ xfs_da_args_t *args)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
@@ -1945,7 +1957,7 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
trace_xfs_attr_leaf_lookup(args);
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(be16_to_cpu(leaf->hdr.count)
< (XFS_LBSIZE(args->dp->i_mount)/8));
@@ -2041,7 +2053,9 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
* list structure.
*/
int
-xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
+xfs_attr_leaf_getvalue(
+ struct xfs_buf *bp,
+ xfs_da_args_t *args)
{
int valuelen;
xfs_attr_leafblock_t *leaf;
@@ -2049,7 +2063,7 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
xfs_attr_leaf_name_local_t *name_loc;
xfs_attr_leaf_name_remote_t *name_rmt;
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(be16_to_cpu(leaf->hdr.count)
< (XFS_LBSIZE(args->dp->i_mount)/8));
@@ -2247,12 +2261,14 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
* Return 0 unless leaf2 should go before leaf1.
*/
int
-xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp)
+xfs_attr_leaf_order(
+ struct xfs_buf *leaf1_bp,
+ struct xfs_buf *leaf2_bp)
{
xfs_attr_leafblock_t *leaf1, *leaf2;
- leaf1 = leaf1_bp->data;
- leaf2 = leaf2_bp->data;
+ leaf1 = leaf1_bp->b_addr;
+ leaf2 = leaf2_bp->b_addr;
ASSERT((leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) &&
(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)));
if ((be16_to_cpu(leaf1->hdr.count) > 0) &&
@@ -2272,11 +2288,13 @@ xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp)
* Pick up the last hashvalue from a leaf block.
*/
xfs_dahash_t
-xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count)
+xfs_attr_leaf_lasthash(
+ struct xfs_buf *bp,
+ int *count)
{
xfs_attr_leafblock_t *leaf;
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
if (count)
*count = be16_to_cpu(leaf->hdr.count);
@@ -2337,7 +2355,9 @@ xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, int *local)
* Copy out attribute list entries for attr_list(), for leaf attribute lists.
*/
int
-xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
+xfs_attr_leaf_list_int(
+ struct xfs_buf *bp,
+ xfs_attr_list_context_t *context)
{
attrlist_cursor_kern_t *cursor;
xfs_attr_leafblock_t *leaf;
@@ -2345,7 +2365,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
int retval, i;
ASSERT(bp != NULL);
- leaf = bp->data;
+ leaf = bp->b_addr;
cursor = context->cursor;
cursor->initted = 1;
@@ -2463,7 +2483,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
xfs_attr_leaf_name_remote_t *name_rmt;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int error;
#ifdef DEBUG
xfs_attr_leaf_name_local_t *name_loc;
@@ -2482,7 +2502,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
}
ASSERT(bp != NULL);
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
ASSERT(args->index >= 0);
@@ -2505,7 +2525,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
#endif /* DEBUG */
entry->flags &= ~XFS_ATTR_INCOMPLETE;
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
if (args->rmtblkno) {
@@ -2513,10 +2533,9 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
name_rmt->valuelen = cpu_to_be32(args->valuelen);
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
}
- xfs_da_buf_done(bp);
/*
* Commit the flag value change and start the next trans in series.
@@ -2533,7 +2552,7 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args)
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
xfs_attr_leaf_name_remote_t *name_rmt;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int error;
trace_xfs_attr_leaf_setflag(args);
@@ -2548,7 +2567,7 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args)
}
ASSERT(bp != NULL);
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
ASSERT(args->index >= 0);
@@ -2556,16 +2575,15 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args)
ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0);
entry->flags |= XFS_ATTR_INCOMPLETE;
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
if ((entry->flags & XFS_ATTR_LOCAL) == 0) {
name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
name_rmt->valueblk = 0;
name_rmt->valuelen = 0;
- xfs_da_log_buf(args->trans, bp,
+ xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
}
- xfs_da_buf_done(bp);
/*
* Commit the flag value change and start the next trans in series.
@@ -2586,7 +2604,7 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
xfs_attr_leafblock_t *leaf1, *leaf2;
xfs_attr_leaf_entry_t *entry1, *entry2;
xfs_attr_leaf_name_remote_t *name_rmt;
- xfs_dabuf_t *bp1, *bp2;
+ struct xfs_buf *bp1, *bp2;
int error;
#ifdef DEBUG
xfs_attr_leaf_name_local_t *name_loc;
@@ -2620,13 +2638,13 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
bp2 = bp1;
}
- leaf1 = bp1->data;
+ leaf1 = bp1->b_addr;
ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(args->index < be16_to_cpu(leaf1->hdr.count));
ASSERT(args->index >= 0);
entry1 = &leaf1->entries[ args->index ];
- leaf2 = bp2->data;
+ leaf2 = bp2->b_addr;
ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count));
ASSERT(args->index2 >= 0);
@@ -2660,30 +2678,27 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0);
entry1->flags &= ~XFS_ATTR_INCOMPLETE;
- xfs_da_log_buf(args->trans, bp1,
+ xfs_trans_log_buf(args->trans, bp1,
XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1)));
if (args->rmtblkno) {
ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
name_rmt = xfs_attr_leaf_name_remote(leaf1, args->index);
name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
name_rmt->valuelen = cpu_to_be32(args->valuelen);
- xfs_da_log_buf(args->trans, bp1,
+ xfs_trans_log_buf(args->trans, bp1,
XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
}
entry2->flags |= XFS_ATTR_INCOMPLETE;
- xfs_da_log_buf(args->trans, bp2,
+ xfs_trans_log_buf(args->trans, bp2,
XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2)));
if ((entry2->flags & XFS_ATTR_LOCAL) == 0) {
name_rmt = xfs_attr_leaf_name_remote(leaf2, args->index2);
name_rmt->valueblk = 0;
name_rmt->valuelen = 0;
- xfs_da_log_buf(args->trans, bp2,
+ xfs_trans_log_buf(args->trans, bp2,
XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt)));
}
- xfs_da_buf_done(bp1);
- if (bp1 != bp2)
- xfs_da_buf_done(bp2);
/*
* Commit the flag value change and start the next trans in series.
@@ -2706,7 +2721,7 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
{
xfs_da_blkinfo_t *info;
xfs_daddr_t blkno;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int error;
/*
@@ -2718,20 +2733,20 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
error = xfs_da_read_buf(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
if (error)
return(error);
- blkno = xfs_da_blkno(bp);
+ blkno = XFS_BUF_ADDR(bp);
/*
* Invalidate the tree, even if the "tree" is only a single leaf block.
* This is a depth-first traversal!
*/
- info = bp->data;
+ info = bp->b_addr;
if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
error = xfs_attr_node_inactive(trans, dp, bp, 1);
} else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) {
error = xfs_attr_leaf_inactive(trans, dp, bp);
} else {
error = XFS_ERROR(EIO);
- xfs_da_brelse(*trans, bp);
+ xfs_trans_brelse(*trans, bp);
}
if (error)
return(error);
@@ -2742,7 +2757,7 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
if (error)
return(error);
- xfs_da_binval(*trans, bp); /* remove from cache */
+ xfs_trans_binval(*trans, bp); /* remove from cache */
/*
* Commit the invalidate and start the next transaction.
*/
@@ -2756,34 +2771,37 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
* We're doing a depth-first traversal in order to invalidate everything.
*/
STATIC int
-xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
- int level)
+xfs_attr_node_inactive(
+ struct xfs_trans **trans,
+ struct xfs_inode *dp,
+ struct xfs_buf *bp,
+ int level)
{
xfs_da_blkinfo_t *info;
xfs_da_intnode_t *node;
xfs_dablk_t child_fsb;
xfs_daddr_t parent_blkno, child_blkno;
int error, count, i;
- xfs_dabuf_t *child_bp;
+ struct xfs_buf *child_bp;
/*
* Since this code is recursive (gasp!) we must protect ourselves.
*/
if (level > XFS_DA_NODE_MAXDEPTH) {
- xfs_da_brelse(*trans, bp); /* no locks for later trans */
+ xfs_trans_brelse(*trans, bp); /* no locks for later trans */
return(XFS_ERROR(EIO));
}
- node = bp->data;
+ node = bp->b_addr;
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
- parent_blkno = xfs_da_blkno(bp); /* save for re-read later */
+ parent_blkno = XFS_BUF_ADDR(bp); /* save for re-read later */
count = be16_to_cpu(node->hdr.count);
if (!count) {
- xfs_da_brelse(*trans, bp);
+ xfs_trans_brelse(*trans, bp);
return(0);
}
child_fsb = be32_to_cpu(node->btree[0].before);
- xfs_da_brelse(*trans, bp); /* no locks for later trans */
+ xfs_trans_brelse(*trans, bp); /* no locks for later trans */
/*
* If this is the node level just above the leaves, simply loop
@@ -2803,12 +2821,12 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
return(error);
if (child_bp) {
/* save for re-read later */
- child_blkno = xfs_da_blkno(child_bp);
+ child_blkno = XFS_BUF_ADDR(child_bp);
/*
* Invalidate the subtree, however we have to.
*/
- info = child_bp->data;
+ info = child_bp->b_addr;
if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
error = xfs_attr_node_inactive(trans, dp,
child_bp, level+1);
@@ -2817,7 +2835,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
child_bp);
} else {
error = XFS_ERROR(EIO);
- xfs_da_brelse(*trans, child_bp);
+ xfs_trans_brelse(*trans, child_bp);
}
if (error)
return(error);
@@ -2830,7 +2848,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
&child_bp, XFS_ATTR_FORK);
if (error)
return(error);
- xfs_da_binval(*trans, child_bp);
+ xfs_trans_binval(*trans, child_bp);
}
/*
@@ -2843,7 +2861,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
if (error)
return(error);
child_fsb = be32_to_cpu(node->btree[i+1].before);
- xfs_da_brelse(*trans, bp);
+ xfs_trans_brelse(*trans, bp);
}
/*
* Atomically commit the whole invalidate stuff.
@@ -2863,7 +2881,10 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
* caught holding something that the logging code wants to flush to disk.
*/
STATIC int
-xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
+xfs_attr_leaf_inactive(
+ struct xfs_trans **trans,
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
@@ -2871,7 +2892,7 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
xfs_attr_inactive_list_t *list, *lp;
int error, count, size, tmp, i;
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
/*
@@ -2892,7 +2913,7 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
* If there are no "remote" values, we're done.
*/
if (count == 0) {
- xfs_da_brelse(*trans, bp);
+ xfs_trans_brelse(*trans, bp);
return(0);
}
@@ -2919,7 +2940,7 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
}
}
}
- xfs_da_brelse(*trans, bp); /* unlock for trans. in freextent() */
+ xfs_trans_brelse(*trans, bp); /* unlock for trans. in freextent() */
/*
* Invalidate each of the "remote" value extents.
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index 9c7d22fdcf4d..dea17722945e 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -31,7 +31,6 @@
struct attrlist;
struct attrlist_cursor_kern;
struct xfs_attr_list_context;
-struct xfs_dabuf;
struct xfs_da_args;
struct xfs_da_state;
struct xfs_da_state_blk;
@@ -215,7 +214,7 @@ int xfs_attr_shortform_getvalue(struct xfs_da_args *args);
int xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
int xfs_attr_shortform_remove(struct xfs_da_args *args);
int xfs_attr_shortform_list(struct xfs_attr_list_context *context);
-int xfs_attr_shortform_allfit(struct xfs_dabuf *bp, struct xfs_inode *dp);
+int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
@@ -223,7 +222,7 @@ int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
* Internal routines when attribute fork size == XFS_LBSIZE(mp).
*/
int xfs_attr_leaf_to_node(struct xfs_da_args *args);
-int xfs_attr_leaf_to_shortform(struct xfs_dabuf *bp,
+int xfs_attr_leaf_to_shortform(struct xfs_buf *bp,
struct xfs_da_args *args, int forkoff);
int xfs_attr_leaf_clearflag(struct xfs_da_args *args);
int xfs_attr_leaf_setflag(struct xfs_da_args *args);
@@ -235,14 +234,14 @@ int xfs_attr_leaf_flipflags(xfs_da_args_t *args);
int xfs_attr_leaf_split(struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk,
struct xfs_da_state_blk *newblk);
-int xfs_attr_leaf_lookup_int(struct xfs_dabuf *leaf,
+int xfs_attr_leaf_lookup_int(struct xfs_buf *leaf,
struct xfs_da_args *args);
-int xfs_attr_leaf_getvalue(struct xfs_dabuf *bp, struct xfs_da_args *args);
-int xfs_attr_leaf_add(struct xfs_dabuf *leaf_buffer,
+int xfs_attr_leaf_getvalue(struct xfs_buf *bp, struct xfs_da_args *args);
+int xfs_attr_leaf_add(struct xfs_buf *leaf_buffer,
struct xfs_da_args *args);
-int xfs_attr_leaf_remove(struct xfs_dabuf *leaf_buffer,
+int xfs_attr_leaf_remove(struct xfs_buf *leaf_buffer,
struct xfs_da_args *args);
-int xfs_attr_leaf_list_int(struct xfs_dabuf *bp,
+int xfs_attr_leaf_list_int(struct xfs_buf *bp,
struct xfs_attr_list_context *context);
/*
@@ -257,9 +256,9 @@ int xfs_attr_root_inactive(struct xfs_trans **trans, struct xfs_inode *dp);
/*
* Utility routines.
*/
-xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_dabuf *bp, int *count);
-int xfs_attr_leaf_order(struct xfs_dabuf *leaf1_bp,
- struct xfs_dabuf *leaf2_bp);
+xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_buf *bp, int *count);
+int xfs_attr_leaf_order(struct xfs_buf *leaf1_bp,
+ struct xfs_buf *leaf2_bp);
int xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize,
int *local);
#endif /* __XFS_ATTR_LEAF_H__ */
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 58b815ec8c91..848ffa77707b 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -5517,7 +5517,7 @@ xfs_getbmap(
if (xfs_get_extsz_hint(ip) ||
ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
prealloced = 1;
- fixlen = XFS_MAXIOFFSET(mp);
+ fixlen = mp->m_super->s_maxbytes;
} else {
prealloced = 0;
fixlen = XFS_ISIZE(ip);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 269b35c084da..d7a9dd735e1e 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -164,14 +164,49 @@ xfs_buf_stale(
ASSERT(atomic_read(&bp->b_hold) >= 1);
}
+static int
+xfs_buf_get_maps(
+ struct xfs_buf *bp,
+ int map_count)
+{
+ ASSERT(bp->b_maps == NULL);
+ bp->b_map_count = map_count;
+
+ if (map_count == 1) {
+ bp->b_maps = &bp->b_map;
+ return 0;
+ }
+
+ bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
+ KM_NOFS);
+ if (!bp->b_maps)
+ return ENOMEM;
+ return 0;
+}
+
+/*
+ * Frees b_pages if it was allocated.
+ */
+static void
+xfs_buf_free_maps(
+ struct xfs_buf *bp)
+{
+ if (bp->b_maps != &bp->b_map) {
+ kmem_free(bp->b_maps);
+ bp->b_maps = NULL;
+ }
+}
+
struct xfs_buf *
-xfs_buf_alloc(
+_xfs_buf_alloc(
struct xfs_buftarg *target,
- xfs_daddr_t blkno,
- size_t numblks,
+ struct xfs_buf_map *map,
+ int nmaps,
xfs_buf_flags_t flags)
{
struct xfs_buf *bp;
+ int error;
+ int i;
bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
if (unlikely(!bp))
@@ -192,16 +227,28 @@ xfs_buf_alloc(
sema_init(&bp->b_sema, 0); /* held, no waiters */
XB_SET_OWNER(bp);
bp->b_target = target;
+ bp->b_flags = flags;
/*
* Set length and io_length to the same value initially.
* I/O routines should use io_length, which will be the same in
* most cases but may be reset (e.g. XFS recovery).
*/
- bp->b_length = numblks;
- bp->b_io_length = numblks;
- bp->b_flags = flags;
- bp->b_bn = blkno;
+ error = xfs_buf_get_maps(bp, nmaps);
+ if (error) {
+ kmem_zone_free(xfs_buf_zone, bp);
+ return NULL;
+ }
+
+ bp->b_bn = map[0].bm_bn;
+ bp->b_length = 0;
+ for (i = 0; i < nmaps; i++) {
+ bp->b_maps[i].bm_bn = map[i].bm_bn;
+ bp->b_maps[i].bm_len = map[i].bm_len;
+ bp->b_length += map[i].bm_len;
+ }
+ bp->b_io_length = bp->b_length;
+
atomic_set(&bp->b_pin_count, 0);
init_waitqueue_head(&bp->b_waiters);
@@ -280,6 +327,7 @@ xfs_buf_free(
} else if (bp->b_flags & _XBF_KMEM)
kmem_free(bp->b_addr);
_xfs_buf_free_pages(bp);
+ xfs_buf_free_maps(bp);
kmem_zone_free(xfs_buf_zone, bp);
}
@@ -327,8 +375,9 @@ xfs_buf_allocate_memory(
}
use_alloc_page:
- start = BBTOB(bp->b_bn) >> PAGE_SHIFT;
- end = (BBTOB(bp->b_bn + bp->b_length) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start = BBTOB(bp->b_map.bm_bn) >> PAGE_SHIFT;
+ end = (BBTOB(bp->b_map.bm_bn + bp->b_length) + PAGE_SIZE - 1)
+ >> PAGE_SHIFT;
page_count = end - start;
error = _xfs_buf_get_pages(bp, page_count, flags);
if (unlikely(error))
@@ -425,8 +474,8 @@ _xfs_buf_map_pages(
xfs_buf_t *
_xfs_buf_find(
struct xfs_buftarg *btp,
- xfs_daddr_t blkno,
- size_t numblks,
+ struct xfs_buf_map *map,
+ int nmaps,
xfs_buf_flags_t flags,
xfs_buf_t *new_bp)
{
@@ -435,7 +484,12 @@ _xfs_buf_find(
struct rb_node **rbp;
struct rb_node *parent;
xfs_buf_t *bp;
+ xfs_daddr_t blkno = map[0].bm_bn;
+ int numblks = 0;
+ int i;
+ for (i = 0; i < nmaps; i++)
+ numblks += map[i].bm_len;
numbytes = BBTOB(numblks);
/* Check for IOs smaller than the sector size / not sector aligned */
@@ -527,31 +581,31 @@ found:
* more hits than misses.
*/
struct xfs_buf *
-xfs_buf_get(
- xfs_buftarg_t *target,
- xfs_daddr_t blkno,
- size_t numblks,
+xfs_buf_get_map(
+ struct xfs_buftarg *target,
+ struct xfs_buf_map *map,
+ int nmaps,
xfs_buf_flags_t flags)
{
struct xfs_buf *bp;
struct xfs_buf *new_bp;
int error = 0;
- bp = _xfs_buf_find(target, blkno, numblks, flags, NULL);
+ bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
if (likely(bp))
goto found;
- new_bp = xfs_buf_alloc(target, blkno, numblks, flags);
+ new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
if (unlikely(!new_bp))
return NULL;
error = xfs_buf_allocate_memory(new_bp, flags);
if (error) {
- kmem_zone_free(xfs_buf_zone, new_bp);
+ xfs_buf_free(new_bp);
return NULL;
}
- bp = _xfs_buf_find(target, blkno, numblks, flags, new_bp);
+ bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
if (!bp) {
xfs_buf_free(new_bp);
return NULL;
@@ -560,8 +614,6 @@ xfs_buf_get(
if (bp != new_bp)
xfs_buf_free(new_bp);
- bp->b_io_length = bp->b_length;
-
found:
if (!bp->b_addr) {
error = _xfs_buf_map_pages(bp, flags);
@@ -584,7 +636,7 @@ _xfs_buf_read(
xfs_buf_flags_t flags)
{
ASSERT(!(flags & XBF_WRITE));
- ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
+ ASSERT(bp->b_map.bm_bn != XFS_BUF_DADDR_NULL);
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
@@ -596,17 +648,17 @@ _xfs_buf_read(
}
xfs_buf_t *
-xfs_buf_read(
- xfs_buftarg_t *target,
- xfs_daddr_t blkno,
- size_t numblks,
+xfs_buf_read_map(
+ struct xfs_buftarg *target,
+ struct xfs_buf_map *map,
+ int nmaps,
xfs_buf_flags_t flags)
{
- xfs_buf_t *bp;
+ struct xfs_buf *bp;
flags |= XBF_READ;
- bp = xfs_buf_get(target, blkno, numblks, flags);
+ bp = xfs_buf_get_map(target, map, nmaps, flags);
if (bp) {
trace_xfs_buf_read(bp, flags, _RET_IP_);
@@ -634,15 +686,15 @@ xfs_buf_read(
* safe manner.
*/
void
-xfs_buf_readahead(
- xfs_buftarg_t *target,
- xfs_daddr_t blkno,
- size_t numblks)
+xfs_buf_readahead_map(
+ struct xfs_buftarg *target,
+ struct xfs_buf_map *map,
+ int nmaps)
{
if (bdi_read_congested(target->bt_bdi))
return;
- xfs_buf_read(target, blkno, numblks,
+ xfs_buf_read_map(target, map, nmaps,
XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
}
@@ -665,8 +717,10 @@ xfs_buf_read_uncached(
return NULL;
/* set up the buffer for a read IO */
- XFS_BUF_SET_ADDR(bp, daddr);
- XFS_BUF_READ(bp);
+ ASSERT(bp->b_map_count == 1);
+ bp->b_bn = daddr;
+ bp->b_maps[0].bm_bn = daddr;
+ bp->b_flags |= XBF_READ;
xfsbdstrat(target->bt_mount, bp);
error = xfs_buf_iowait(bp);
@@ -694,7 +748,11 @@ xfs_buf_set_empty(
bp->b_addr = NULL;
bp->b_length = numblks;
bp->b_io_length = numblks;
+
+ ASSERT(bp->b_map_count == 1);
bp->b_bn = XFS_BUF_DADDR_NULL;
+ bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
+ bp->b_maps[0].bm_len = bp->b_length;
}
static inline struct page *
@@ -758,9 +816,10 @@ xfs_buf_get_uncached(
{
unsigned long page_count;
int error, i;
- xfs_buf_t *bp;
+ struct xfs_buf *bp;
+ DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
- bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0);
+ bp = _xfs_buf_alloc(target, &map, 1, 0);
if (unlikely(bp == NULL))
goto fail;
@@ -791,6 +850,7 @@ xfs_buf_get_uncached(
__free_page(bp->b_pages[i]);
_xfs_buf_free_pages(bp);
fail_free_buf:
+ xfs_buf_free_maps(bp);
kmem_zone_free(xfs_buf_zone, bp);
fail:
return NULL;
@@ -1144,36 +1204,39 @@ xfs_buf_bio_end_io(
bio_put(bio);
}
-STATIC void
-_xfs_buf_ioapply(
- xfs_buf_t *bp)
+static void
+xfs_buf_ioapply_map(
+ struct xfs_buf *bp,
+ int map,
+ int *buf_offset,
+ int *count,
+ int rw)
{
- int rw, map_i, total_nr_pages, nr_pages;
- struct bio *bio;
- int offset = bp->b_offset;
- int size = BBTOB(bp->b_io_length);
- sector_t sector = bp->b_bn;
+ int page_index;
+ int total_nr_pages = bp->b_page_count;
+ int nr_pages;
+ struct bio *bio;
+ sector_t sector = bp->b_maps[map].bm_bn;
+ int size;
+ int offset;
total_nr_pages = bp->b_page_count;
- map_i = 0;
- if (bp->b_flags & XBF_WRITE) {
- if (bp->b_flags & XBF_SYNCIO)
- rw = WRITE_SYNC;
- else
- rw = WRITE;
- if (bp->b_flags & XBF_FUA)
- rw |= REQ_FUA;
- if (bp->b_flags & XBF_FLUSH)
- rw |= REQ_FLUSH;
- } else if (bp->b_flags & XBF_READ_AHEAD) {
- rw = READA;
- } else {
- rw = READ;
+ /* skip the pages in the buffer before the start offset */
+ page_index = 0;
+ offset = *buf_offset;
+ while (offset >= PAGE_SIZE) {
+ page_index++;
+ offset -= PAGE_SIZE;
}
- /* we only use the buffer cache for meta-data */
- rw |= REQ_META;
+ /*
+ * Limit the IO size to the length of the current vector, and update the
+ * remaining IO count for the next time around.
+ */
+ size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
+ *count -= size;
+ *buf_offset += size;
next_chunk:
atomic_inc(&bp->b_io_remaining);
@@ -1188,13 +1251,14 @@ next_chunk:
bio->bi_private = bp;
- for (; size && nr_pages; nr_pages--, map_i++) {
+ for (; size && nr_pages; nr_pages--, page_index++) {
int rbytes, nbytes = PAGE_SIZE - offset;
if (nbytes > size)
nbytes = size;
- rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
+ rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
+ offset);
if (rbytes < nbytes)
break;
@@ -1216,6 +1280,54 @@ next_chunk:
xfs_buf_ioerror(bp, EIO);
bio_put(bio);
}
+
+}
+
+STATIC void
+_xfs_buf_ioapply(
+ struct xfs_buf *bp)
+{
+ struct blk_plug plug;
+ int rw;
+ int offset;
+ int size;
+ int i;
+
+ if (bp->b_flags & XBF_WRITE) {
+ if (bp->b_flags & XBF_SYNCIO)
+ rw = WRITE_SYNC;
+ else
+ rw = WRITE;
+ if (bp->b_flags & XBF_FUA)
+ rw |= REQ_FUA;
+ if (bp->b_flags & XBF_FLUSH)
+ rw |= REQ_FLUSH;
+ } else if (bp->b_flags & XBF_READ_AHEAD) {
+ rw = READA;
+ } else {
+ rw = READ;
+ }
+
+ /* we only use the buffer cache for meta-data */
+ rw |= REQ_META;
+
+ /*
+ * Walk all the vectors issuing IO on them. Set up the initial offset
+ * into the buffer and the desired IO size before we start -
+ * _xfs_buf_ioapply_vec() will modify them appropriately for each
+ * subsequent call.
+ */
+ offset = bp->b_offset;
+ size = BBTOB(bp->b_io_length);
+ blk_start_plug(&plug);
+ for (i = 0; i < bp->b_map_count; i++) {
+ xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
+ if (bp->b_error)
+ break;
+ if (size <= 0)
+ break; /* all done */
+ }
+ blk_finish_plug(&plug);
}
void
@@ -1557,7 +1669,7 @@ xfs_buf_cmp(
struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
xfs_daddr_t diff;
- diff = ap->b_bn - bp->b_bn;
+ diff = ap->b_map.bm_bn - bp->b_map.bm_bn;
if (diff < 0)
return -1;
if (diff > 0)
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 79344c48008e..d03b73b9604e 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -58,6 +58,7 @@ typedef enum {
#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
#define _XBF_KMEM (1 << 21)/* backed by heap memory */
#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
+#define _XBF_COMPOUND (1 << 23)/* compound buffer */
typedef unsigned int xfs_buf_flags_t;
@@ -75,7 +76,8 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
{ _XBF_PAGES, "PAGES" }, \
{ _XBF_KMEM, "KMEM" }, \
- { _XBF_DELWRI_Q, "DELWRI_Q" }
+ { _XBF_DELWRI_Q, "DELWRI_Q" }, \
+ { _XBF_COMPOUND, "COMPOUND" }
typedef struct xfs_buftarg {
dev_t bt_dev;
@@ -98,6 +100,14 @@ typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
#define XB_PAGES 2
+struct xfs_buf_map {
+ xfs_daddr_t bm_bn; /* block number for I/O */
+ int bm_len; /* size of I/O */
+};
+
+#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
+ struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
+
typedef struct xfs_buf {
/*
* first cacheline holds all the fields needed for an uncontended cache
@@ -107,7 +117,7 @@ typedef struct xfs_buf {
* fast-path on locking.
*/
struct rb_node b_rbnode; /* rbtree node */
- xfs_daddr_t b_bn; /* block number for I/O */
+ xfs_daddr_t b_bn; /* block number of buffer */
int b_length; /* size of buffer in BBs */
atomic_t b_hold; /* reference count */
atomic_t b_lru_ref; /* lru reclaim ref count */
@@ -127,12 +137,16 @@ typedef struct xfs_buf {
struct xfs_trans *b_transp;
struct page **b_pages; /* array of page pointers */
struct page *b_page_array[XB_PAGES]; /* inline pages */
+ struct xfs_buf_map *b_maps; /* compound buffer map */
+ struct xfs_buf_map b_map; /* inline compound buffer map */
+ int b_map_count;
int b_io_length; /* IO size in BBs */
atomic_t b_pin_count; /* pin count */
atomic_t b_io_remaining; /* #outstanding I/O requests */
unsigned int b_page_count; /* size of page array */
unsigned int b_offset; /* page offset in first page */
unsigned short b_error; /* error code on I/O */
+
#ifdef XFS_BUF_LOCK_TRACKING
int b_last_holder;
#endif
@@ -140,22 +154,78 @@ typedef struct xfs_buf {
/* Finding and Reading Buffers */
-struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target, xfs_daddr_t blkno,
- size_t numblks, xfs_buf_flags_t flags,
- struct xfs_buf *new_bp);
-#define xfs_incore(buftarg,blkno,len,lockit) \
- _xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
-
-struct xfs_buf *xfs_buf_get(struct xfs_buftarg *target, xfs_daddr_t blkno,
- size_t numblks, xfs_buf_flags_t flags);
-struct xfs_buf *xfs_buf_read(struct xfs_buftarg *target, xfs_daddr_t blkno,
- size_t numblks, xfs_buf_flags_t flags);
-void xfs_buf_readahead(struct xfs_buftarg *target, xfs_daddr_t blkno,
- size_t numblks);
+struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps,
+ xfs_buf_flags_t flags, struct xfs_buf *new_bp);
+
+static inline struct xfs_buf *
+xfs_incore(
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ size_t numblks,
+ xfs_buf_flags_t flags)
+{
+ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
+ return _xfs_buf_find(target, &map, 1, flags, NULL);
+}
+
+struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps,
+ xfs_buf_flags_t flags);
+
+static inline struct xfs_buf *
+xfs_buf_alloc(
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ size_t numblks,
+ xfs_buf_flags_t flags)
+{
+ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
+ return _xfs_buf_alloc(target, &map, 1, flags);
+}
+
+struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps,
+ xfs_buf_flags_t flags);
+struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps,
+ xfs_buf_flags_t flags);
+void xfs_buf_readahead_map(struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps);
+
+static inline struct xfs_buf *
+xfs_buf_get(
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ size_t numblks,
+ xfs_buf_flags_t flags)
+{
+ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
+ return xfs_buf_get_map(target, &map, 1, flags);
+}
+
+static inline struct xfs_buf *
+xfs_buf_read(
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ size_t numblks,
+ xfs_buf_flags_t flags)
+{
+ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
+ return xfs_buf_read_map(target, &map, 1, flags);
+}
+
+static inline void
+xfs_buf_readahead(
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ size_t numblks)
+{
+ DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
+ return xfs_buf_readahead_map(target, &map, 1);
+}
struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
-struct xfs_buf *xfs_buf_alloc(struct xfs_buftarg *target, xfs_daddr_t blkno,
- size_t numblks, xfs_buf_flags_t flags);
void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
@@ -232,8 +302,18 @@ void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
-#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
-#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
+/*
+ * These macros use the IO block map rather than b_bn. b_bn is now really
+ * just for the buffer cache index for cached buffers. As IO does not use b_bn
+ * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
+ * map directly. Uncached buffers are not allowed to be discontiguous, so this
+ * is safe to do.
+ *
+ * In future, uncached buffers will pass the block number directly to the io
+ * request function and hence these macros will go away at that point.
+ */
+#define XFS_BUF_ADDR(bp) ((bp)->b_map.bm_bn)
+#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_map.bm_bn = (xfs_daddr_t)(bno))
static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
{
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index d9e451115f98..a8d0ed911196 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -153,33 +153,25 @@ STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
* If the XFS_BLI_STALE flag has been set, then log nothing.
*/
STATIC uint
-xfs_buf_item_size(
- struct xfs_log_item *lip)
+xfs_buf_item_size_segment(
+ struct xfs_buf_log_item *bip,
+ struct xfs_buf_log_format *blfp)
{
- struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
uint nvecs;
int next_bit;
int last_bit;
- ASSERT(atomic_read(&bip->bli_refcount) > 0);
- if (bip->bli_flags & XFS_BLI_STALE) {
- /*
- * The buffer is stale, so all we need to log
- * is the buf log format structure with the
- * cancel flag in it.
- */
- trace_xfs_buf_item_size_stale(bip);
- ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
- return 1;
- }
+ last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
+ if (last_bit == -1)
+ return 0;
+
+ /*
+ * initial count for a dirty buffer is 2 vectors - the format structure
+ * and the first dirty region.
+ */
+ nvecs = 2;
- ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
- nvecs = 1;
- last_bit = xfs_next_bit(bip->bli_format.blf_data_map,
- bip->bli_format.blf_map_size, 0);
- ASSERT(last_bit != -1);
- nvecs++;
while (last_bit != -1) {
/*
* This takes the bit number to start looking from and
@@ -187,16 +179,15 @@ xfs_buf_item_size(
* if there are no more bits set or the start bit is
* beyond the end of the bitmap.
*/
- next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
- bip->bli_format.blf_map_size,
- last_bit + 1);
+ next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
+ last_bit + 1);
/*
* If we run out of bits, leave the loop,
* else if we find a new set of bits bump the number of vecs,
* else keep scanning the current set of bits.
*/
if (next_bit == -1) {
- last_bit = -1;
+ break;
} else if (next_bit != last_bit + 1) {
last_bit = next_bit;
nvecs++;
@@ -210,22 +201,73 @@ xfs_buf_item_size(
}
}
- trace_xfs_buf_item_size(bip);
return nvecs;
}
/*
- * This is called to fill in the vector of log iovecs for the
- * given log buf item. It fills the first entry with a buf log
- * format structure, and the rest point to contiguous chunks
- * within the buffer.
+ * This returns the number of log iovecs needed to log the given buf log item.
+ *
+ * It calculates this as 1 iovec for the buf log format structure and 1 for each
+ * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
+ * in a single iovec.
+ *
+ * Discontiguous buffers need a format structure per region that that is being
+ * logged. This makes the changes in the buffer appear to log recovery as though
+ * they came from separate buffers, just like would occur if multiple buffers
+ * were used instead of a single discontiguous buffer. This enables
+ * discontiguous buffers to be in-memory constructs, completely transparent to
+ * what ends up on disk.
+ *
+ * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
+ * format structures.
*/
-STATIC void
-xfs_buf_item_format(
- struct xfs_log_item *lip,
- struct xfs_log_iovec *vecp)
+STATIC uint
+xfs_buf_item_size(
+ struct xfs_log_item *lip)
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ uint nvecs;
+ int i;
+
+ ASSERT(atomic_read(&bip->bli_refcount) > 0);
+ if (bip->bli_flags & XFS_BLI_STALE) {
+ /*
+ * The buffer is stale, so all we need to log
+ * is the buf log format structure with the
+ * cancel flag in it.
+ */
+ trace_xfs_buf_item_size_stale(bip);
+ ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
+ return bip->bli_format_count;
+ }
+
+ ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
+
+ /*
+ * the vector count is based on the number of buffer vectors we have
+ * dirty bits in. This will only be greater than one when we have a
+ * compound buffer with more than one segment dirty. Hence for compound
+ * buffers we need to track which segment the dirty bits correspond to,
+ * and when we move from one segment to the next increment the vector
+ * count for the extra buf log format structure that will need to be
+ * written.
+ */
+ nvecs = 0;
+ for (i = 0; i < bip->bli_format_count; i++) {
+ nvecs += xfs_buf_item_size_segment(bip, &bip->bli_formats[i]);
+ }
+
+ trace_xfs_buf_item_size(bip);
+ return nvecs;
+}
+
+static struct xfs_log_iovec *
+xfs_buf_item_format_segment(
+ struct xfs_buf_log_item *bip,
+ struct xfs_log_iovec *vecp,
+ uint offset,
+ struct xfs_buf_log_format *blfp)
+{
struct xfs_buf *bp = bip->bli_buf;
uint base_size;
uint nvecs;
@@ -235,40 +277,22 @@ xfs_buf_item_format(
uint nbits;
uint buffer_offset;
- ASSERT(atomic_read(&bip->bli_refcount) > 0);
- ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
- (bip->bli_flags & XFS_BLI_STALE));
+ /* copy the flags across from the base format item */
+ blfp->blf_flags = bip->bli_format.blf_flags;
/*
- * The size of the base structure is the size of the
- * declared structure plus the space for the extra words
- * of the bitmap. We subtract one from the map size, because
- * the first element of the bitmap is accounted for in the
- * size of the base structure.
+ * Base size is the actual size of the ondisk structure - it reflects
+ * the actual size of the dirty bitmap rather than the size of the in
+ * memory structure.
*/
- base_size =
- (uint)(sizeof(xfs_buf_log_format_t) +
- ((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
- vecp->i_addr = &bip->bli_format;
+ base_size = offsetof(struct xfs_buf_log_format, blf_data_map) +
+ (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
+ vecp->i_addr = blfp;
vecp->i_len = base_size;
vecp->i_type = XLOG_REG_TYPE_BFORMAT;
vecp++;
nvecs = 1;
- /*
- * If it is an inode buffer, transfer the in-memory state to the
- * format flags and clear the in-memory state. We do not transfer
- * this state if the inode buffer allocation has not yet been committed
- * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
- * correct replay of the inode allocation.
- */
- if (bip->bli_flags & XFS_BLI_INODE_BUF) {
- if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
- xfs_log_item_in_current_chkpt(lip)))
- bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
- bip->bli_flags &= ~XFS_BLI_INODE_BUF;
- }
-
if (bip->bli_flags & XFS_BLI_STALE) {
/*
* The buffer is stale, so all we need to log
@@ -276,16 +300,15 @@ xfs_buf_item_format(
* cancel flag in it.
*/
trace_xfs_buf_item_format_stale(bip);
- ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
- bip->bli_format.blf_size = nvecs;
- return;
+ ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
+ blfp->blf_size = nvecs;
+ return vecp;
}
/*
* Fill in an iovec for each set of contiguous chunks.
*/
- first_bit = xfs_next_bit(bip->bli_format.blf_data_map,
- bip->bli_format.blf_map_size, 0);
+ first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
ASSERT(first_bit != -1);
last_bit = first_bit;
nbits = 1;
@@ -296,9 +319,8 @@ xfs_buf_item_format(
* if there are no more bits set or the start bit is
* beyond the end of the bitmap.
*/
- next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
- bip->bli_format.blf_map_size,
- (uint)last_bit + 1);
+ next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
+ (uint)last_bit + 1);
/*
* If we run out of bits fill in the last iovec and get
* out of the loop.
@@ -309,14 +331,14 @@ xfs_buf_item_format(
* keep counting and scanning.
*/
if (next_bit == -1) {
- buffer_offset = first_bit * XFS_BLF_CHUNK;
+ buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLF_CHUNK;
vecp->i_type = XLOG_REG_TYPE_BCHUNK;
nvecs++;
break;
} else if (next_bit != last_bit + 1) {
- buffer_offset = first_bit * XFS_BLF_CHUNK;
+ buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLF_CHUNK;
vecp->i_type = XLOG_REG_TYPE_BCHUNK;
@@ -325,14 +347,17 @@ xfs_buf_item_format(
first_bit = next_bit;
last_bit = next_bit;
nbits = 1;
- } else if (xfs_buf_offset(bp, next_bit << XFS_BLF_SHIFT) !=
- (xfs_buf_offset(bp, last_bit << XFS_BLF_SHIFT) +
+ } else if (xfs_buf_offset(bp, offset +
+ (next_bit << XFS_BLF_SHIFT)) !=
+ (xfs_buf_offset(bp, offset +
+ (last_bit << XFS_BLF_SHIFT)) +
XFS_BLF_CHUNK)) {
- buffer_offset = first_bit * XFS_BLF_CHUNK;
+ buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLF_CHUNK;
vecp->i_type = XLOG_REG_TYPE_BCHUNK;
-/* You would think we need to bump the nvecs here too, but we do not
+/*
+ * You would think we need to bump the nvecs here too, but we do not
* this number is used by recovery, and it gets confused by the boundary
* split here
* nvecs++;
@@ -347,6 +372,48 @@ xfs_buf_item_format(
}
}
bip->bli_format.blf_size = nvecs;
+ return vecp;
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given log buf item. It fills the first entry with a buf log
+ * format structure, and the rest point to contiguous chunks
+ * within the buffer.
+ */
+STATIC void
+xfs_buf_item_format(
+ struct xfs_log_item *lip,
+ struct xfs_log_iovec *vecp)
+{
+ struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ struct xfs_buf *bp = bip->bli_buf;
+ uint offset = 0;
+ int i;
+
+ ASSERT(atomic_read(&bip->bli_refcount) > 0);
+ ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
+ (bip->bli_flags & XFS_BLI_STALE));
+
+ /*
+ * If it is an inode buffer, transfer the in-memory state to the
+ * format flags and clear the in-memory state. We do not transfer
+ * this state if the inode buffer allocation has not yet been committed
+ * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
+ * correct replay of the inode allocation.
+ */
+ if (bip->bli_flags & XFS_BLI_INODE_BUF) {
+ if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
+ xfs_log_item_in_current_chkpt(lip)))
+ bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
+ bip->bli_flags &= ~XFS_BLI_INODE_BUF;
+ }
+
+ for (i = 0; i < bip->bli_format_count; i++) {
+ vecp = xfs_buf_item_format_segment(bip, vecp, offset,
+ &bip->bli_formats[i]);
+ offset += bp->b_maps[i].bm_len;
+ }
/*
* Check to make sure everything is consistent.
@@ -622,6 +689,35 @@ static const struct xfs_item_ops xfs_buf_item_ops = {
.iop_committing = xfs_buf_item_committing
};
+STATIC int
+xfs_buf_item_get_format(
+ struct xfs_buf_log_item *bip,
+ int count)
+{
+ ASSERT(bip->bli_formats == NULL);
+ bip->bli_format_count = count;
+
+ if (count == 1) {
+ bip->bli_formats = &bip->bli_format;
+ return 0;
+ }
+
+ bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
+ KM_SLEEP);
+ if (!bip->bli_formats)
+ return ENOMEM;
+ return 0;
+}
+
+STATIC void
+xfs_buf_item_free_format(
+ struct xfs_buf_log_item *bip)
+{
+ if (bip->bli_formats != &bip->bli_format) {
+ kmem_free(bip->bli_formats);
+ bip->bli_formats = NULL;
+ }
+}
/*
* Allocate a new buf log item to go with the given buffer.
@@ -639,6 +735,8 @@ xfs_buf_item_init(
xfs_buf_log_item_t *bip;
int chunks;
int map_size;
+ int error;
+ int i;
/*
* Check to see if there is already a buf log item for
@@ -650,25 +748,33 @@ xfs_buf_item_init(
if (lip != NULL && lip->li_type == XFS_LI_BUF)
return;
- /*
- * chunks is the number of XFS_BLF_CHUNK size pieces
- * the buffer can be divided into. Make sure not to
- * truncate any pieces. map_size is the size of the
- * bitmap needed to describe the chunks of the buffer.
- */
- chunks = (int)((BBTOB(bp->b_length) + (XFS_BLF_CHUNK - 1)) >>
- XFS_BLF_SHIFT);
- map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT);
-
- bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
- KM_SLEEP);
+ bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
bip->bli_buf = bp;
xfs_buf_hold(bp);
- bip->bli_format.blf_type = XFS_LI_BUF;
- bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
- bip->bli_format.blf_len = (ushort)bp->b_length;
- bip->bli_format.blf_map_size = map_size;
+
+ /*
+ * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
+ * can be divided into. Make sure not to truncate any pieces.
+ * map_size is the size of the bitmap needed to describe the
+ * chunks of the buffer.
+ *
+ * Discontiguous buffer support follows the layout of the underlying
+ * buffer. This makes the implementation as simple as possible.
+ */
+ error = xfs_buf_item_get_format(bip, bp->b_map_count);
+ ASSERT(error == 0);
+
+ for (i = 0; i < bip->bli_format_count; i++) {
+ chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
+ XFS_BLF_CHUNK);
+ map_size = DIV_ROUND_UP(chunks, NBWORD);
+
+ bip->bli_formats[i].blf_type = XFS_LI_BUF;
+ bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
+ bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
+ bip->bli_formats[i].blf_map_size = map_size;
+ }
#ifdef XFS_TRANS_DEBUG
/*
@@ -699,10 +805,11 @@ xfs_buf_item_init(
* item's bitmap.
*/
void
-xfs_buf_item_log(
- xfs_buf_log_item_t *bip,
+xfs_buf_item_log_segment(
+ struct xfs_buf_log_item *bip,
uint first,
- uint last)
+ uint last,
+ uint *map)
{
uint first_bit;
uint last_bit;
@@ -715,12 +822,6 @@ xfs_buf_item_log(
uint mask;
/*
- * Mark the item as having some dirty data for
- * quick reference in xfs_buf_item_dirty.
- */
- bip->bli_flags |= XFS_BLI_DIRTY;
-
- /*
* Convert byte offsets to bit numbers.
*/
first_bit = first >> XFS_BLF_SHIFT;
@@ -736,7 +837,7 @@ xfs_buf_item_log(
* to set a bit in.
*/
word_num = first_bit >> BIT_TO_WORD_SHIFT;
- wordp = &(bip->bli_format.blf_data_map[word_num]);
+ wordp = &map[word_num];
/*
* Calculate the starting bit in the first word.
@@ -783,6 +884,51 @@ xfs_buf_item_log(
xfs_buf_item_log_debug(bip, first, last);
}
+/*
+ * Mark bytes first through last inclusive as dirty in the buf
+ * item's bitmap.
+ */
+void
+xfs_buf_item_log(
+ xfs_buf_log_item_t *bip,
+ uint first,
+ uint last)
+{
+ int i;
+ uint start;
+ uint end;
+ struct xfs_buf *bp = bip->bli_buf;
+
+ /*
+ * Mark the item as having some dirty data for
+ * quick reference in xfs_buf_item_dirty.
+ */
+ bip->bli_flags |= XFS_BLI_DIRTY;
+
+ /*
+ * walk each buffer segment and mark them dirty appropriately.
+ */
+ start = 0;
+ for (i = 0; i < bip->bli_format_count; i++) {
+ if (start > last)
+ break;
+ end = start + BBTOB(bp->b_maps[i].bm_len);
+ if (first > end) {
+ start += BBTOB(bp->b_maps[i].bm_len);
+ continue;
+ }
+ if (first < start)
+ first = start;
+ if (end > last)
+ end = last;
+
+ xfs_buf_item_log_segment(bip, first, end,
+ &bip->bli_formats[i].blf_data_map[0]);
+
+ start += bp->b_maps[i].bm_len;
+ }
+}
+
/*
* Return 1 if the buffer has some data that has been logged (at any
@@ -804,6 +950,7 @@ xfs_buf_item_free(
kmem_free(bip->bli_logged);
#endif /* XFS_TRANS_DEBUG */
+ xfs_buf_item_free_format(bip);
kmem_zone_free(xfs_buf_item_zone, bip);
}
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index b6ecd2061e7c..6850f49f4af3 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -21,23 +21,6 @@
extern kmem_zone_t *xfs_buf_item_zone;
/*
- * This is the structure used to lay out a buf log item in the
- * log. The data map describes which 128 byte chunks of the buffer
- * have been logged.
- * For 6.2 and beyond, this is XFS_LI_BUF. We use this to log everything.
- */
-typedef struct xfs_buf_log_format {
- unsigned short blf_type; /* buf log item type indicator */
- unsigned short blf_size; /* size of this item */
- ushort blf_flags; /* misc state */
- ushort blf_len; /* number of blocks in this buf */
- __int64_t blf_blkno; /* starting blkno of this buf */
- unsigned int blf_map_size; /* size of data bitmap in words */
- unsigned int blf_data_map[1];/* variable size bitmap of */
- /* regions of buffer in this item */
-} xfs_buf_log_format_t;
-
-/*
* This flag indicates that the buffer contains on disk inodes
* and requires special recovery handling.
*/
@@ -61,6 +44,23 @@ typedef struct xfs_buf_log_format {
#define NBWORD (NBBY * sizeof(unsigned int))
/*
+ * This is the structure used to lay out a buf log item in the
+ * log. The data map describes which 128 byte chunks of the buffer
+ * have been logged.
+ */
+#define XFS_BLF_DATAMAP_SIZE ((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / NBWORD)
+
+typedef struct xfs_buf_log_format {
+ unsigned short blf_type; /* buf log item type indicator */
+ unsigned short blf_size; /* size of this item */
+ ushort blf_flags; /* misc state */
+ ushort blf_len; /* number of blocks in this buf */
+ __int64_t blf_blkno; /* starting blkno of this buf */
+ unsigned int blf_map_size; /* used size of data bitmap in words */
+ unsigned int blf_data_map[XFS_BLF_DATAMAP_SIZE]; /* dirty bitmap */
+} xfs_buf_log_format_t;
+
+/*
* buf log item flags
*/
#define XFS_BLI_HOLD 0x01
@@ -102,7 +102,9 @@ typedef struct xfs_buf_log_item {
char *bli_orig; /* original buffer copy */
char *bli_logged; /* bytes logged (bitmap) */
#endif
- xfs_buf_log_format_t bli_format; /* in-log header */
+ int bli_format_count; /* count of headers */
+ struct xfs_buf_log_format *bli_formats; /* array of in-log header ptrs */
+ struct xfs_buf_log_format bli_format; /* embedded in-log header */
} xfs_buf_log_item_t;
void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 015b946c5808..7bfb7dd334fc 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -83,9 +83,9 @@ STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
/*
* Utility routines.
*/
-STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
-STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
-STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps);
+STATIC uint xfs_da_node_lasthash(struct xfs_buf *bp, int *count);
+STATIC int xfs_da_node_order(struct xfs_buf *node1_bp,
+ struct xfs_buf *node2_bp);
STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
xfs_da_state_blk_t *drop_blk,
xfs_da_state_blk_t *save_blk);
@@ -100,10 +100,10 @@ STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
*/
int
xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
- xfs_dabuf_t **bpp, int whichfork)
+ struct xfs_buf **bpp, int whichfork)
{
xfs_da_intnode_t *node;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int error;
xfs_trans_t *tp;
@@ -114,7 +114,7 @@ xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
if (error)
return(error);
ASSERT(bp != NULL);
- node = bp->data;
+ node = bp->b_addr;
node->hdr.info.forw = 0;
node->hdr.info.back = 0;
node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
@@ -122,7 +122,7 @@ xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
node->hdr.count = 0;
node->hdr.level = cpu_to_be16(level);
- xfs_da_log_buf(tp, bp,
+ xfs_trans_log_buf(tp, bp,
XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
*bpp = bp;
@@ -138,7 +138,7 @@ xfs_da_split(xfs_da_state_t *state)
{
xfs_da_state_blk_t *oldblk, *newblk, *addblk;
xfs_da_intnode_t *node;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int max, action, error, i;
trace_xfs_da_split(state->args);
@@ -203,7 +203,6 @@ xfs_da_split(xfs_da_state_t *state)
case XFS_DA_NODE_MAGIC:
error = xfs_da_node_split(state, oldblk, newblk, addblk,
max - i, &action);
- xfs_da_buf_done(addblk->bp);
addblk->bp = NULL;
if (error)
return(error); /* GROT: dir is inconsistent */
@@ -221,13 +220,6 @@ xfs_da_split(xfs_da_state_t *state)
* Update the btree to show the new hashval for this child.
*/
xfs_da_fixhashpath(state, &state->path);
- /*
- * If we won't need this block again, it's getting dropped
- * from the active path by the loop control, so we need
- * to mark it done now.
- */
- if (i > 0 || !addblk)
- xfs_da_buf_done(oldblk->bp);
}
if (!addblk)
return(0);
@@ -239,8 +231,6 @@ xfs_da_split(xfs_da_state_t *state)
oldblk = &state->path.blk[0];
error = xfs_da_root_split(state, oldblk, addblk);
if (error) {
- xfs_da_buf_done(oldblk->bp);
- xfs_da_buf_done(addblk->bp);
addblk->bp = NULL;
return(error); /* GROT: dir is inconsistent */
}
@@ -252,7 +242,7 @@ xfs_da_split(xfs_da_state_t *state)
* and the original block 0 could be at any position in the list.
*/
- node = oldblk->bp->data;
+ node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
bp = addblk->bp;
@@ -260,13 +250,13 @@ xfs_da_split(xfs_da_state_t *state)
ASSERT(state->extravalid);
bp = state->extrablk.bp;
}
- node = bp->data;
+ node = bp->b_addr;
node->hdr.info.back = cpu_to_be32(oldblk->blkno);
- xfs_da_log_buf(state->args->trans, bp,
+ xfs_trans_log_buf(state->args->trans, bp,
XFS_DA_LOGRANGE(node, &node->hdr.info,
sizeof(node->hdr.info)));
}
- node = oldblk->bp->data;
+ node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
bp = addblk->bp;
@@ -274,14 +264,12 @@ xfs_da_split(xfs_da_state_t *state)
ASSERT(state->extravalid);
bp = state->extrablk.bp;
}
- node = bp->data;
+ node = bp->b_addr;
node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
- xfs_da_log_buf(state->args->trans, bp,
+ xfs_trans_log_buf(state->args->trans, bp,
XFS_DA_LOGRANGE(node, &node->hdr.info,
sizeof(node->hdr.info)));
}
- xfs_da_buf_done(oldblk->bp);
- xfs_da_buf_done(addblk->bp);
addblk->bp = NULL;
return(0);
}
@@ -298,7 +286,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
xfs_da_intnode_t *node, *oldroot;
xfs_da_args_t *args;
xfs_dablk_t blkno;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int error, size;
xfs_inode_t *dp;
xfs_trans_t *tp;
@@ -323,8 +311,8 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
if (error)
return(error);
ASSERT(bp != NULL);
- node = bp->data;
- oldroot = blk1->bp->data;
+ node = bp->b_addr;
+ oldroot = blk1->bp->b_addr;
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
(char *)oldroot);
@@ -335,8 +323,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
(char *)leaf);
}
memcpy(node, oldroot, size);
- xfs_da_log_buf(tp, bp, 0, size - 1);
- xfs_da_buf_done(blk1->bp);
+ xfs_trans_log_buf(tp, bp, 0, size - 1);
blk1->bp = bp;
blk1->blkno = blkno;
@@ -348,7 +335,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
if (error)
return(error);
- node = bp->data;
+ node = bp->b_addr;
node->btree[0].hashval = cpu_to_be32(blk1->hashval);
node->btree[0].before = cpu_to_be32(blk1->blkno);
node->btree[1].hashval = cpu_to_be32(blk2->hashval);
@@ -365,10 +352,9 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
#endif
/* Header is already logged by xfs_da_node_create */
- xfs_da_log_buf(tp, bp,
+ xfs_trans_log_buf(tp, bp,
XFS_DA_LOGRANGE(node, node->btree,
sizeof(xfs_da_node_entry_t) * 2));
- xfs_da_buf_done(bp);
return(0);
}
@@ -389,7 +375,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
trace_xfs_da_node_split(state->args);
- node = oldblk->bp->data;
+ node = oldblk->bp->b_addr;
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
/*
@@ -436,7 +422,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
*
* If we had double-split op below us, then add the extra block too.
*/
- node = oldblk->bp->data;
+ node = oldblk->bp->b_addr;
if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
oldblk->index++;
xfs_da_node_add(state, oldblk, addblk);
@@ -477,8 +463,8 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
trace_xfs_da_node_rebalance(state->args);
- node1 = blk1->bp->data;
- node2 = blk2->bp->data;
+ node1 = blk1->bp->b_addr;
+ node2 = blk2->bp->b_addr;
/*
* Figure out how many entries need to move, and in which direction.
* Swap the nodes around if that makes it simpler.
@@ -532,7 +518,7 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
memcpy(btree_d, btree_s, tmp);
be16_add_cpu(&node1->hdr.count, count);
- xfs_da_log_buf(tp, blk1->bp,
+ xfs_trans_log_buf(tp, blk1->bp,
XFS_DA_LOGRANGE(node1, btree_d, tmp));
/*
@@ -549,9 +535,9 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
/*
* Log header of node 1 and all current bits of node 2.
*/
- xfs_da_log_buf(tp, blk1->bp,
+ xfs_trans_log_buf(tp, blk1->bp,
XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
- xfs_da_log_buf(tp, blk2->bp,
+ xfs_trans_log_buf(tp, blk2->bp,
XFS_DA_LOGRANGE(node2, &node2->hdr,
sizeof(node2->hdr) +
sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
@@ -560,8 +546,8 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
* Record the last hashval from each block for upward propagation.
* (note: don't use the swapped node pointers)
*/
- node1 = blk1->bp->data;
- node2 = blk2->bp->data;
+ node1 = blk1->bp->b_addr;
+ node2 = blk2->bp->b_addr;
blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
@@ -587,7 +573,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
trace_xfs_da_node_add(state->args);
- node = oldblk->bp->data;
+ node = oldblk->bp->b_addr;
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
ASSERT(newblk->blkno != 0);
@@ -606,10 +592,10 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
}
btree->hashval = cpu_to_be32(newblk->hashval);
btree->before = cpu_to_be32(newblk->blkno);
- xfs_da_log_buf(state->args->trans, oldblk->bp,
+ xfs_trans_log_buf(state->args->trans, oldblk->bp,
XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
be16_add_cpu(&node->hdr.count, 1);
- xfs_da_log_buf(state->args->trans, oldblk->bp,
+ xfs_trans_log_buf(state->args->trans, oldblk->bp,
XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
/*
@@ -735,7 +721,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
xfs_da_intnode_t *oldroot;
xfs_da_args_t *args;
xfs_dablk_t child;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int error;
trace_xfs_da_root_join(state->args);
@@ -743,7 +729,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
args = state->args;
ASSERT(args != NULL);
ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
- oldroot = root_blk->bp->data;
+ oldroot = root_blk->bp->b_addr;
ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
ASSERT(!oldroot->hdr.info.forw);
ASSERT(!oldroot->hdr.info.back);
@@ -765,11 +751,11 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
if (error)
return(error);
ASSERT(bp != NULL);
- xfs_da_blkinfo_onlychild_validate(bp->data,
+ xfs_da_blkinfo_onlychild_validate(bp->b_addr,
be16_to_cpu(oldroot->hdr.level));
- memcpy(root_blk->bp->data, bp->data, state->blocksize);
- xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
+ memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
+ xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
error = xfs_da_shrink_inode(args, child, bp);
return(error);
}
@@ -791,7 +777,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
xfs_da_blkinfo_t *info;
int count, forward, error, retval, i;
xfs_dablk_t blkno;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
/*
* Check for the degenerate case of the block being over 50% full.
@@ -799,7 +785,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
* to coalesce with a sibling.
*/
blk = &state->path.blk[ state->path.active-1 ];
- info = blk->bp->data;
+ info = blk->bp->b_addr;
ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
node = (xfs_da_intnode_t *)info;
count = be16_to_cpu(node->hdr.count);
@@ -859,10 +845,10 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
count = state->node_ents;
count -= state->node_ents >> 2;
count -= be16_to_cpu(node->hdr.count);
- node = bp->data;
+ node = bp->b_addr;
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
count -= be16_to_cpu(node->hdr.count);
- xfs_da_brelse(state->args->trans, bp);
+ xfs_trans_brelse(state->args->trans, bp);
if (count >= 0)
break; /* fits with at least 25% to spare */
}
@@ -934,14 +920,14 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
break;
}
for (blk--, level--; level >= 0; blk--, level--) {
- node = blk->bp->data;
+ node = blk->bp->b_addr;
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
btree = &node->btree[ blk->index ];
if (be32_to_cpu(btree->hashval) == lasthash)
break;
blk->hashval = lasthash;
btree->hashval = cpu_to_be32(lasthash);
- xfs_da_log_buf(state->args->trans, blk->bp,
+ xfs_trans_log_buf(state->args->trans, blk->bp,
XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
@@ -960,7 +946,7 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
trace_xfs_da_node_remove(state->args);
- node = drop_blk->bp->data;
+ node = drop_blk->bp->b_addr;
ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
ASSERT(drop_blk->index >= 0);
@@ -972,15 +958,15 @@ xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
tmp *= (uint)sizeof(xfs_da_node_entry_t);
memmove(btree, btree + 1, tmp);
- xfs_da_log_buf(state->args->trans, drop_blk->bp,
+ xfs_trans_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, btree, tmp));
btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
}
memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
- xfs_da_log_buf(state->args->trans, drop_blk->bp,
+ xfs_trans_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
be16_add_cpu(&node->hdr.count, -1);
- xfs_da_log_buf(state->args->trans, drop_blk->bp,
+ xfs_trans_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
/*
@@ -1005,8 +991,8 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
trace_xfs_da_node_unbalance(state->args);
- drop_node = drop_blk->bp->data;
- save_node = save_blk->bp->data;
+ drop_node = drop_blk->bp->b_addr;
+ save_node = save_blk->bp->b_addr;
ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
tp = state->args->trans;
@@ -1023,13 +1009,13 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
memmove(btree, &save_node->btree[0], tmp);
btree = &save_node->btree[0];
- xfs_da_log_buf(tp, save_blk->bp,
+ xfs_trans_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, btree,
(be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
sizeof(xfs_da_node_entry_t)));
} else {
btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
- xfs_da_log_buf(tp, save_blk->bp,
+ xfs_trans_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, btree,
be16_to_cpu(drop_node->hdr.count) *
sizeof(xfs_da_node_entry_t)));
@@ -1042,7 +1028,7 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
memcpy(btree, &drop_node->btree[0], tmp);
be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
- xfs_da_log_buf(tp, save_blk->bp,
+ xfs_trans_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, &save_node->hdr,
sizeof(save_node->hdr)));
@@ -1100,7 +1086,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
state->path.active--;
return(error);
}
- curr = blk->bp->data;
+ curr = blk->bp->b_addr;
blk->magic = be16_to_cpu(curr->magic);
ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
blk->magic == XFS_DIR2_LEAFN_MAGIC ||
@@ -1110,7 +1096,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
* Search an intermediate node for a match.
*/
if (blk->magic == XFS_DA_NODE_MAGIC) {
- node = blk->bp->data;
+ node = blk->bp->b_addr;
max = be16_to_cpu(node->hdr.count);
blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
@@ -1216,15 +1202,15 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
xfs_da_args_t *args;
int before=0, error;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
/*
* Set up environment.
*/
args = state->args;
ASSERT(args != NULL);
- old_info = old_blk->bp->data;
- new_info = new_blk->bp->data;
+ old_info = old_blk->bp->b_addr;
+ new_info = new_blk->bp->b_addr;
ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
old_blk->magic == XFS_ATTR_LEAF_MAGIC);
@@ -1261,12 +1247,11 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
if (error)
return(error);
ASSERT(bp != NULL);
- tmp_info = bp->data;
+ tmp_info = bp->b_addr;
ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
tmp_info->forw = cpu_to_be32(new_blk->blkno);
- xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
- xfs_da_buf_done(bp);
+ xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
}
old_info->back = cpu_to_be32(new_blk->blkno);
} else {
@@ -1283,18 +1268,17 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
if (error)
return(error);
ASSERT(bp != NULL);
- tmp_info = bp->data;
+ tmp_info = bp->b_addr;
ASSERT(tmp_info->magic == old_info->magic);
ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
tmp_info->back = cpu_to_be32(new_blk->blkno);
- xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
- xfs_da_buf_done(bp);
+ xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
}
old_info->forw = cpu_to_be32(new_blk->blkno);
}
- xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
- xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
+ xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
+ xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
return(0);
}
@@ -1302,12 +1286,14 @@ xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
* Compare two intermediate nodes for "order".
*/
STATIC int
-xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
+xfs_da_node_order(
+ struct xfs_buf *node1_bp,
+ struct xfs_buf *node2_bp)
{
xfs_da_intnode_t *node1, *node2;
- node1 = node1_bp->data;
- node2 = node2_bp->data;
+ node1 = node1_bp->b_addr;
+ node2 = node2_bp->b_addr;
ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
@@ -1324,11 +1310,13 @@ xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
* Pick up the last hashvalue from an intermediate node.
*/
STATIC uint
-xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
+xfs_da_node_lasthash(
+ struct xfs_buf *bp,
+ int *count)
{
xfs_da_intnode_t *node;
- node = bp->data;
+ node = bp->b_addr;
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
if (count)
*count = be16_to_cpu(node->hdr.count);
@@ -1346,7 +1334,7 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
{
xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
xfs_da_args_t *args;
- xfs_dabuf_t *bp;
+ struct xfs_buf *bp;
int error;
/*
@@ -1354,8 +1342,8 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
*/
args = state->args;
ASSERT(args != NULL);
- save_info = save_blk->bp->data;
- drop_info = drop_blk->bp->data;
+ save_info = save_blk->bp->b_addr;
+ drop_info = drop_blk->bp->b_addr;
ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
save_blk->magic == XFS_ATTR_LEAF_MAGIC);
@@ -1380,13 +1368,12 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
if (error)
return(error);
ASSERT(bp != NULL);
- tmp_info = bp->data;
+ tmp_info = bp->b_addr;
ASSERT(tmp_info->magic == save_info->magic);
ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
tmp_info->forw = cpu_to_be32(save_blk->blkno);
- xfs_da_log_buf(args->trans, bp, 0,
+ xfs_trans_log_buf(args->trans, bp, 0,
sizeof(*tmp_info) - 1);
- xfs_da_buf_done(bp);
}
} else {
trace_xfs_da_unlink_forward(args);
@@ -1398,17 +1385,16 @@ xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
if (error)
return(error);
ASSERT(bp != NULL);
- tmp_info = bp->data;
+ tmp_info = bp->b_addr;
ASSERT(tmp_info->magic == save_info->magic);
ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
tmp_info->back = cpu_to_be32(save_blk->blkno);
- xfs_da_log_buf(args->trans, bp, 0,
+ xfs_trans_log_buf(args->trans, bp, 0,
sizeof(*tmp_info) - 1);
- xfs_da_buf_done(bp);
}
}
- xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
+ xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
return(0);
}
@@ -1443,7 +1429,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
level = (path->active-1) - 1; /* skip bottom layer in path */
for (blk = &path->blk[level]; level >= 0; blk--, level--) {
ASSERT(blk->bp != NULL);
- node = blk->bp->data;
+ node = blk->bp->b_addr;
ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
blk->index++;
@@ -1471,7 +1457,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
* (if it's dirty, trans won't actually let go)
*/
if (release)
- xfs_da_brelse(args->trans, blk->bp);
+ xfs_trans_brelse(args->trans, blk->bp);
/*
* Read the next child block.
@@ -1482,7 +1468,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
if (error)
return(error);
ASSERT(blk->bp != NULL);
- info = blk->bp->data;
+ info = blk->bp->b_addr;
ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
@@ -1702,11 +1688,13 @@ xfs_da_grow_inode(
* a bmap btree split to do that.
*/
STATIC int
-xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
- xfs_dabuf_t **dead_bufp)
+xfs_da_swap_lastblock(
+ xfs_da_args_t *args,
+ xfs_dablk_t *dead_blknop,
+ struct xfs_buf **dead_bufp)
{
xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
- xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf;
+ struct xfs_buf *dead_buf, *last_buf, *sib_buf, *par_buf;
xfs_fileoff_t lastoff;
xfs_inode_t *ip;
xfs_trans_t *tp;
@@ -1744,9 +1732,9 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
/*
* Copy the last block into the dead buffer and log it.
*/
- memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
- xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
- dead_info = dead_buf->data;
+ memcpy(dead_buf->b_addr, last_buf->b_addr, mp->m_dirblksize);
+ xfs_trans_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
+ dead_info = dead_buf->b_addr;
/*
* Get values from the moved block.
*/
@@ -1767,7 +1755,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
if ((sib_blkno = be32_to_cpu(dead_info->back))) {
if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
goto done;
- sib_info = sib_buf->data;
+ sib_info = sib_buf->b_addr;
if (unlikely(
be32_to_cpu(sib_info->forw) != last_blkno ||
sib_info->magic != dead_info->magic)) {
@@ -1777,10 +1765,9 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
goto done;
}
sib_info->forw = cpu_to_be32(dead_blkno);
- xfs_da_log_buf(tp, sib_buf,
+ xfs_trans_log_buf(tp, sib_buf,
XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
sizeof(sib_info->forw)));
- xfs_da_buf_done(sib_buf);
sib_buf = NULL;
}
/*
@@ -1789,7 +1776,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
goto done;
- sib_info = sib_buf->data;
+ sib_info = sib_buf->b_addr;
if (unlikely(
be32_to_cpu(sib_info->back) != last_blkno ||
sib_info->magic != dead_info->magic)) {
@@ -1799,10 +1786,9 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
goto done;
}
sib_info->back = cpu_to_be32(dead_blkno);
- xfs_da_log_buf(tp, sib_buf,
+ xfs_trans_log_buf(tp, sib_buf,
XFS_DA_LOGRANGE(sib_info, &sib_info->back,
sizeof(sib_info->back)));
- xfs_da_buf_done(sib_buf);
sib_buf = NULL;
}
par_blkno = mp->m_dirleafblk;
@@ -1813,7 +1799,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
for (;;) {
if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
goto done;
- par_node = par_buf->data;
+ par_node = par_buf->b_addr;
if (unlikely(par_node->hdr.info.magic !=
cpu_to_be16(XFS_DA_NODE_MAGIC) ||
(level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
@@ -1837,7 +1823,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
par_blkno = be32_to_cpu(par_node->btree[entno].before);
if (level == dead_level + 1)
break;
- xfs_da_brelse(tp, par_buf);
+ xfs_trans_brelse(tp, par_buf);
par_buf = NULL;
}
/*
@@ -1853,7 +1839,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
if (entno < be16_to_cpu(par_node->hdr.count))
break;
par_blkno = be32_to_cpu(par_node->hdr.info.forw);
- xfs_da_brelse(tp, par_buf);
+ xfs_trans_brelse(tp, par_buf);
par_buf = NULL;
if (unlikely(par_blkno == 0)) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
@@ -1863,7 +1849,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
}
if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
goto done;
- par_node = par_buf->data;
+ par_node = par_buf->b_addr;
if (unlikely(
be16_to_cpu(par_node->hdr.level) != level ||
par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
@@ -1878,20 +1864,18 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
* Update the parent entry pointing to the moved block.
*/
par_node->btree[entno].before = cpu_to_be32(dead_blkno);
- xfs_da_log_buf(tp, par_buf,
+ xfs_trans_log_buf(tp, par_buf,
XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
sizeof(par_node->btree[entno].before)));
- xfs_da_buf_done(par_buf);
- xfs_da_buf_done(dead_buf);
*dead_blknop = last_blkno;
*dead_bufp = last_buf;
return 0;
done:
if (par_buf)
- xfs_da_brelse(tp, par_buf);
+ xfs_trans_brelse(tp, par_buf);
if (sib_buf)
- xfs_da_brelse(tp, sib_buf);
- xfs_da_brelse(tp, last_buf);
+ xfs_trans_brelse(tp, sib_buf);
+ xfs_trans_brelse(tp, last_buf);
return error;
}
@@ -1899,8 +1883,10 @@ done:
* Remove a btree block from a directory or attribute.
*/
int
-xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
- xfs_dabuf_t *dead_buf)
+xfs_da_shrink_inode(
+ xfs_da_args_t *args,
+ xfs_dablk_t dead_blkno,
+ struct xfs_buf *dead_buf)
{
xfs_inode_t *dp;
int done, error, w, count;
@@ -1935,7 +1921,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
break;
}
}
- xfs_da_binval(tp, dead_buf);
+ xfs_trans_binval(tp, dead_buf);
return error;
}
@@ -1967,35 +1953,75 @@ xfs_da_map_covers_blocks(
}
/*
- * Make a dabuf.
- * Used for get_buf, read_buf, read_bufr, and reada_buf.
+ * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
+ *
+ * For the single map case, it is assumed that the caller has provided a pointer
+ * to a valid xfs_buf_map. For the multiple map case, this function will
+ * allocate the xfs_buf_map to hold all the maps and replace the caller's single
+ * map pointer with the allocated map.
*/
-STATIC int
-xfs_da_do_buf(
- xfs_trans_t *trans,
- xfs_inode_t *dp,
- xfs_dablk_t bno,
- xfs_daddr_t *mappedbnop,
- xfs_dabuf_t **bpp,
- int whichfork,
- int caller)
+static int
+xfs_buf_map_from_irec(
+ struct xfs_mount *mp,
+ struct xfs_buf_map **mapp,
+ unsigned int *nmaps,
+ struct xfs_bmbt_irec *irecs,
+ unsigned int nirecs)
{
- xfs_buf_t *bp = NULL;
- xfs_buf_t **bplist;
- int error=0;
- int i;
- xfs_bmbt_irec_t map;
- xfs_bmbt_irec_t *mapp;
- xfs_daddr_t mappedbno;
- xfs_mount_t *mp;
- int nbplist=0;
- int nfsb;
- int nmap;
- xfs_dabuf_t *rbp;
+ struct xfs_buf_map *map;
+ int i;
+
+ ASSERT(*nmaps == 1);
+ ASSERT(nirecs >= 1);
+
+ if (nirecs > 1) {
+ map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_SLEEP);
+ if (!map)
+ return ENOMEM;
+ *mapp = map;
+ }
+
+ *nmaps = nirecs;
+ map = *mapp;
+ for (i = 0; i < *nmaps; i++) {
+ ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
+ irecs[i].br_startblock != HOLESTARTBLOCK);
+ map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
+ map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
+ }
+ return 0;
+}
+
+/*
+ * Map the block we are given ready for reading. There are three possible return
+ * values:
+ * -1 - will be returned if we land in a hole and mappedbno == -2 so the
+ * caller knows not to execute a subsequent read.
+ * 0 - if we mapped the block successfully
+ * >0 - positive error number if there was an error.
+ */
+static int
+xfs_dabuf_map(
+ struct xfs_trans *trans,
+ struct xfs_inode *dp,
+ xfs_dablk_t bno,
+ xfs_daddr_t mappedbno,
+ int whichfork,
+ struct xfs_buf_map **map,
+ int *nmaps)
+{
+ struct xfs_mount *mp = dp->i_mount;
+ int nfsb;
+ int error = 0;
+ struct xfs_bmbt_irec irec;
+ struct xfs_bmbt_irec *irecs = &irec;
+ int nirecs;
+
+ ASSERT(map && *map);
+ ASSERT(*nmaps == 1);
- mp = dp->i_mount;
nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
- mappedbno = *mappedbnop;
+
/*
* Caller doesn't have a mapping. -2 means don't complain
* if we land in a hole.
@@ -2004,112 +2030,150 @@ xfs_da_do_buf(
/*
* Optimize the one-block case.
*/
- if (nfsb == 1)
- mapp = &map;
- else
- mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
+ if (nfsb != 1)
+ irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_SLEEP);
- nmap = nfsb;
- error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, mapp,
- &nmap, xfs_bmapi_aflag(whichfork));
+ nirecs = nfsb;
+ error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
+ &nirecs, xfs_bmapi_aflag(whichfork));
if (error)
- goto exit0;
+ goto out;
} else {
- map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
- map.br_startoff = (xfs_fileoff_t)bno;
- map.br_blockcount = nfsb;
- mapp = &map;
- nmap = 1;
+ irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
+ irecs->br_startoff = (xfs_fileoff_t)bno;
+ irecs->br_blockcount = nfsb;
+ irecs->br_state = 0;
+ nirecs = 1;
}
- if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) {
- error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
+
+ if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
+ error = mappedbno == -2 ? -1 : XFS_ERROR(EFSCORRUPTED);
if (unlikely(error == EFSCORRUPTED)) {
if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
+ int i;
xfs_alert(mp, "%s: bno %lld dir: inode %lld",
__func__, (long long)bno,
(long long)dp->i_ino);
- for (i = 0; i < nmap; i++) {
+ for (i = 0; i < *nmaps; i++) {
xfs_alert(mp,
"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
i,
- (long long)mapp[i].br_startoff,
- (long long)mapp[i].br_startblock,
- (long long)mapp[i].br_blockcount,
- mapp[i].br_state);
+ (long long)irecs[i].br_startoff,
+ (long long)irecs[i].br_startblock,
+ (long long)irecs[i].br_blockcount,
+ irecs[i].br_state);
}
}
XFS_ERROR_REPORT("xfs_da_do_buf(1)",
XFS_ERRLEVEL_LOW, mp);
}
- goto exit0;
+ goto out;
}
- if (caller != 3 && nmap > 1) {
- bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP);
- nbplist = 0;
- } else
- bplist = NULL;
- /*
- * Turn the mapping(s) into buffer(s).
- */
- for (i = 0; i < nmap; i++) {
- int nmapped;
-
- mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock);
- if (i == 0)
- *mappedbnop = mappedbno;
- nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount);
- switch (caller) {
- case 0:
- bp = xfs_trans_get_buf(trans, mp->m_ddev_targp,
- mappedbno, nmapped, 0);
- error = bp ? bp->b_error : XFS_ERROR(EIO);
- break;
- case 1:
- case 2:
- bp = NULL;
- error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp,
- mappedbno, nmapped, 0, &bp);
- break;
- case 3:
- xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped);
+ error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
+out:
+ if (irecs != &irec)
+ kmem_free(irecs);
+ return error;
+}
+
+/*
+ * Get a buffer for the dir/attr block.
+ */
+int
+xfs_da_get_buf(
+ struct xfs_trans *trans,
+ struct xfs_inode *dp,
+ xfs_dablk_t bno,
+ xfs_daddr_t mappedbno,
+ struct xfs_buf **bpp,
+ int whichfork)
+{
+ struct xfs_buf *bp;
+ struct xfs_buf_map map;
+ struct xfs_buf_map *mapp;
+ int nmap;
+ int error;
+
+ *bpp = NULL;
+ mapp = &map;
+ nmap = 1;
+ error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
+ &mapp, &nmap);
+ if (error) {
+ /* mapping a hole is not an error, but we don't continue */
+ if (error == -1)
error = 0;
- bp = NULL;
- break;
- }
- if (error) {
- if (bp)
- xfs_trans_brelse(trans, bp);
- goto exit1;
- }
- if (!bp)
- continue;
- if (caller == 1) {
- if (whichfork == XFS_ATTR_FORK)
- xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
- else
- xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
- }
- if (bplist) {
- bplist[nbplist++] = bp;
- }
+ goto out_free;
}
- /*
- * Build a dabuf structure.
- */
- if (bplist) {
- rbp = xfs_da_buf_make(nbplist, bplist);
- } else if (bp)
- rbp = xfs_da_buf_make(1, &bp);
+
+ bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
+ mapp, nmap, 0);
+ error = bp ? bp->b_error : XFS_ERROR(EIO);
+ if (error) {
+ xfs_trans_brelse(trans, bp);
+ goto out_free;
+ }
+
+ *bpp = bp;
+
+out_free:
+ if (mapp != &map)
+ kmem_free(mapp);
+
+ return error;
+}
+
+/*
+ * Get a buffer for the dir/attr block, fill in the contents.
+ */
+int
+xfs_da_read_buf(
+ struct xfs_trans *trans,
+ struct xfs_inode *dp,
+ xfs_dablk_t bno,
+ xfs_daddr_t mappedbno,
+ struct xfs_buf **bpp,
+ int whichfork)
+{
+ struct xfs_buf *bp;
+ struct xfs_buf_map map;
+ struct xfs_buf_map *mapp;
+ int nmap;
+ int error;
+
+ *bpp = NULL;
+ mapp = &map;
+ nmap = 1;
+ error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
+ &mapp, &nmap);
+ if (error) {
+ /* mapping a hole is not an error, but we don't continue */
+ if (error == -1)
+ error = 0;
+ goto out_free;
+ }
+
+ error = xfs_trans_read_buf_map(dp->i_mount, trans,
+ dp->i_mount->m_ddev_targp,
+ mapp, nmap, 0, &bp);
+ if (error)
+ goto out_free;
+
+ if (whichfork == XFS_ATTR_FORK)
+ xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
else
- rbp = NULL;
+ xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
+
/*
- * For read_buf, check the magic number.
+ * This verification code will be moved to a CRC verification callback
+ * function so just leave it here unchanged until then.
*/
- if (caller == 1) {
- xfs_dir2_data_hdr_t *hdr = rbp->data;
- xfs_dir2_free_t *free = rbp->data;
- xfs_da_blkinfo_t *info = rbp->data;
+ {
+ xfs_dir2_data_hdr_t *hdr = bp->b_addr;
+ xfs_dir2_free_t *free = bp->b_addr;
+ xfs_da_blkinfo_t *info = bp->b_addr;
uint magic, magic1;
+ struct xfs_mount *mp = dp->i_mount;
magic = be16_to_cpu(info->magic);
magic1 = be32_to_cpu(hdr->magic);
@@ -2123,66 +2187,20 @@ xfs_da_do_buf(
(free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
mp, XFS_ERRTAG_DA_READ_BUF,
XFS_RANDOM_DA_READ_BUF))) {
- trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
+ trace_xfs_da_btree_corrupt(bp, _RET_IP_);
XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
XFS_ERRLEVEL_LOW, mp, info);
error = XFS_ERROR(EFSCORRUPTED);
- xfs_da_brelse(trans, rbp);
- nbplist = 0;
- goto exit1;
+ xfs_trans_brelse(trans, bp);
+ goto out_free;
}
}
- if (bplist) {
- kmem_free(bplist);
- }
- if (mapp != &map) {
- kmem_free(mapp);
- }
- if (bpp)
- *bpp = rbp;
- return 0;
-exit1:
- if (bplist) {
- for (i = 0; i < nbplist; i++)
- xfs_trans_brelse(trans, bplist[i]);
- kmem_free(bplist);
- }
-exit0:
+ *bpp = bp;
+out_free:
if (mapp != &map)
kmem_free(mapp);
- if (bpp)
- *bpp = NULL;
- return error;
-}
-
-/*
- * Get a buffer for the dir/attr block.
- */
-int
-xfs_da_get_buf(
- xfs_trans_t *trans,
- xfs_inode_t *dp,
- xfs_dablk_t bno,
- xfs_daddr_t mappedbno,
- xfs_dabuf_t **bpp,
- int whichfork)
-{
- return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0);
-}
-/*
- * Get a buffer for the dir/attr block, fill in the contents.
- */
-int
-xfs_da_read_buf(
- xfs_trans_t *trans,
- xfs_inode_t *dp,
- xfs_dablk_t bno,
- xfs_daddr_t mappedbno,
- xfs_dabuf_t **bpp,
- int whichfork)
-{
- return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1);
+ return error;
}
/*
@@ -2190,22 +2208,41 @@ xfs_da_read_buf(
*/
xfs_daddr_t
xfs_da_reada_buf(
- xfs_trans_t *trans,
- xfs_inode_t *dp,
- xfs_dablk_t bno,
- int whichfork)
+ struct xfs_trans *trans,
+ struct xfs_inode *dp,
+ xfs_dablk_t bno,
+ int whichfork)
{
- xfs_daddr_t rval;
+ xfs_daddr_t mappedbno = -1;
+ struct xfs_buf_map map;
+ struct xfs_buf_map *mapp;
+ int nmap;
+ int error;
+
+ mapp = &map;
+ nmap = 1;
+ error = xfs_dabuf_map(trans, dp, bno, -1, whichfork,
+ &mapp, &nmap);
+ if (error) {
+ /* mapping a hole is not an error, but we don't continue */
+ if (error == -1)
+ error = 0;
+ goto out_free;
+ }
- rval = -1;
- if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3))
+ mappedbno = mapp[0].bm_bn;
+ xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap);
+
+out_free:
+ if (mapp != &map)
+ kmem_free(mapp);
+
+ if (error)
return -1;
- else
- return rval;
+ return mappedbno;
}
kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
-kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
/*
* Allocate a dir-state structure.
@@ -2225,13 +2262,8 @@ xfs_da_state_kill_altpath(xfs_da_state_t *state)
{
int i;
- for (i = 0; i < state->altpath.active; i++) {
- if (state->altpath.blk[i].bp) {
- if (state->altpath.blk[i].bp != state->path.blk[i].bp)
- xfs_da_buf_done(state->altpath.blk[i].bp);
- state->altpath.blk[i].bp = NULL;
- }
- }
+ for (i = 0; i < state->altpath.active; i++)
+ state->altpath.blk[i].bp = NULL;
state->altpath.active = 0;
}
@@ -2241,204 +2273,9 @@ xfs_da_state_kill_altpath(xfs_da_state_t *state)
void
xfs_da_state_free(xfs_da_state_t *state)
{
- int i;
-
xfs_da_state_kill_altpath(state);
- for (i = 0; i < state->path.active; i++) {
- if (state->path.blk[i].bp)
- xfs_da_buf_done(state->path.blk[i].bp);
- }
- if (state->extravalid && state->extrablk.bp)
- xfs_da_buf_done(state->extrablk.bp);
#ifdef DEBUG
memset((char *)state, 0, sizeof(*state));
#endif /* DEBUG */
kmem_zone_free(xfs_da_state_zone, state);
}
-
-/*
- * Create a dabuf.
- */
-/* ARGSUSED */
-STATIC xfs_dabuf_t *
-xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
-{
- xfs_buf_t *bp;
- xfs_dabuf_t *dabuf;
- int i;
- int off;
-
- if (nbuf == 1)
- dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
- else
- dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
- dabuf->dirty = 0;
- if (nbuf == 1) {
- dabuf->nbuf = 1;
- bp = bps[0];
- dabuf->bbcount = bp->b_length;
- dabuf->data = bp->b_addr;
- dabuf->bps[0] = bp;
- } else {
- dabuf->nbuf = nbuf;
- for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
- dabuf->bps[i] = bp = bps[i];
- dabuf->bbcount += bp->b_length;
- }
- dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
- for (i = off = 0; i < nbuf; i++, off += BBTOB(bp->b_length)) {
- bp = bps[i];
- memcpy((char *)dabuf->data + off, bp->b_addr,
- BBTOB(bp->b_length));
- }
- }
- return dabuf;
-}
-
-/*
- * Un-dirty a dabuf.
- */
-STATIC void
-xfs_da_buf_clean(xfs_dabuf_t *dabuf)
-{
- xfs_buf_t *bp;
- int i;
- int off;
-
- if (dabuf->dirty) {
- ASSERT(dabuf->nbuf > 1);
- dabuf->dirty = 0;
- for (i = off = 0; i < dabuf->nbuf;
- i++, off += BBTOB(bp->b_length)) {
- bp = dabuf->bps[i];
- memcpy(bp->b_addr, dabuf->data + off,
- BBTOB(bp->b_length));
- }
- }
-}
-
-/*
- * Release a dabuf.
- */
-void
-xfs_da_buf_done(xfs_dabuf_t *dabuf)
-{
- ASSERT(dabuf);
- ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
- if (dabuf->dirty)
- xfs_da_buf_clean(dabuf);
- if (dabuf->nbuf > 1) {
- kmem_free(dabuf->data);
- kmem_free(dabuf);
- } else {
- kmem_zone_free(xfs_dabuf_zone, dabuf);
- }
-}
-
-/*
- * Log transaction from a dabuf.
- */
-void
-xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
-{
- xfs_buf_t *bp;
- uint f;
- int i;
- uint l;
- int off;
-
- ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
- if (dabuf->nbuf == 1) {
- ASSERT(dabuf->data == dabuf->bps[0]->b_addr);
- xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
- return;
- }
- dabuf->dirty = 1;
- ASSERT(first <= last);
- for (i = off = 0; i < dabuf->nbuf; i++, off += BBTOB(bp->b_length)) {
- bp = dabuf->bps[i];
- f = off;
- l = f + BBTOB(bp->b_length) - 1;
- if (f < first)
- f = first;
- if (l > last)
- l = last;
- if (f <= l)
- xfs_trans_log_buf(tp, bp, f - off, l - off);
- /*
- * B_DONE is set by xfs_trans_log buf.
- * If we don't set it on a new buffer (get not read)
- * then if we don't put anything in the buffer it won't
- * be set, and at commit it it released into the cache,
- * and then a read will fail.
- */
- else if (!(XFS_BUF_ISDONE(bp)))
- XFS_BUF_DONE(bp);
- }
- ASSERT(last < off);
-}
-
-/*
- * Release dabuf from a transaction.
- * Have to free up the dabuf before the buffers are released,
- * since the synchronization on the dabuf is really the lock on the buffer.
- */
-void
-xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
-{
- xfs_buf_t *bp;
- xfs_buf_t **bplist;
- int i;
- int nbuf;
-
- ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
- if ((nbuf = dabuf->nbuf) == 1) {
- bplist = &bp;
- bp = dabuf->bps[0];
- } else {
- bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
- memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
- }
- xfs_da_buf_done(dabuf);
- for (i = 0; i < nbuf; i++)
- xfs_trans_brelse(tp, bplist[i]);
- if (bplist != &bp)
- kmem_free(bplist);
-}
-
-/*
- * Invalidate dabuf from a transaction.
- */
-void
-xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
-{
- xfs_buf_t *bp;
- xfs_buf_t **bplist;
- int i;
- int nbuf;
-
- ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
- if ((nbuf = dabuf->nbuf) == 1) {
- bplist = &bp;
- bp = dabuf->bps[0];
- } else {
- bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
- memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
- }
- xfs_da_buf_done(dabuf);
- for (i = 0; i < nbuf; i++)
- xfs_trans_binval(tp, bplist[i]);
- if (bplist != &bp)
- kmem_free(bplist);
-}
-
-/*
- * Get the first daddr from a dabuf.
- */
-xfs_daddr_t
-xfs_da_blkno(xfs_dabuf_t *dabuf)
-{
- ASSERT(dabuf->nbuf);
- ASSERT(dabuf->data);
- return XFS_BUF_ADDR(dabuf->bps[0]);
-}
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index dbf7c074ae73..132adafb041e 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -32,7 +32,7 @@ struct zone;
/*
* This structure is common to both leaf nodes and non-leaf nodes in the Btree.
*
- * Is is used to manage a doubly linked list of all blocks at the same
+ * It is used to manage a doubly linked list of all blocks at the same
* level in the Btree, and to identify which type of block this is.
*/
#define XFS_DA_NODE_MAGIC 0xfebe /* magic number: non-leaf blocks */
@@ -133,24 +133,6 @@ typedef struct xfs_da_args {
{ XFS_DA_OP_CILOOKUP, "CILOOKUP" }
/*
- * Structure to describe buffer(s) for a block.
- * This is needed in the directory version 2 format case, when
- * multiple non-contiguous fsblocks might be needed to cover one
- * logical directory block.
- * If the buffer count is 1 then the data pointer points to the
- * same place as the b_addr field for the buffer, else to kmem_alloced memory.
- */
-typedef struct xfs_dabuf {
- int nbuf; /* number of buffer pointers present */
- short dirty; /* data needs to be copied back */
- short bbcount; /* how large is data in bbs */
- void *data; /* pointer for buffers' data */
- struct xfs_buf *bps[1]; /* actually nbuf of these */
-} xfs_dabuf_t;
-#define XFS_DA_BUF_SIZE(n) \
- (sizeof(xfs_dabuf_t) + sizeof(struct xfs_buf *) * ((n) - 1))
-
-/*
* Storage for holding state during Btree searches and split/join ops.
*
* Only need space for 5 intermediate nodes. With a minimum of 62-way
@@ -158,7 +140,7 @@ typedef struct xfs_dabuf {
* which is slightly more than enough.
*/
typedef struct xfs_da_state_blk {
- xfs_dabuf_t *bp; /* buffer containing block */
+ struct xfs_buf *bp; /* buffer containing block */
xfs_dablk_t blkno; /* filesystem blkno of buffer */
xfs_daddr_t disk_blkno; /* on-disk blkno (in BBs) of buffer */
int index; /* relevant index into block */
@@ -211,7 +193,7 @@ struct xfs_nameops {
* Routines used for growing the Btree.
*/
int xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
- xfs_dabuf_t **bpp, int whichfork);
+ struct xfs_buf **bpp, int whichfork);
int xfs_da_split(xfs_da_state_t *state);
/*
@@ -241,14 +223,14 @@ int xfs_da_grow_inode_int(struct xfs_da_args *args, xfs_fileoff_t *bno,
int count);
int xfs_da_get_buf(struct xfs_trans *trans, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mappedbno,
- xfs_dabuf_t **bp, int whichfork);
+ struct xfs_buf **bp, int whichfork);
int xfs_da_read_buf(struct xfs_trans *trans, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mappedbno,
- xfs_dabuf_t **bpp, int whichfork);
+ struct xfs_buf **bpp, int whichfork);
xfs_daddr_t xfs_da_reada_buf(struct xfs_trans *trans, struct xfs_inode *dp,
xfs_dablk_t bno, int whichfork);
int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
- xfs_dabuf_t *dead_buf);
+ struct xfs_buf *dead_buf);
uint xfs_da_hashname(const __uint8_t *name_string, int name_length);
enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
@@ -258,15 +240,7 @@ enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
xfs_da_state_t *xfs_da_state_alloc(void);
void xfs_da_state_free(xfs_da_state_t *state);
-void xfs_da_buf_done(xfs_dabuf_t *dabuf);
-void xfs_da_log_buf(struct xfs_trans *tp, xfs_dabuf_t *dabuf, uint first,
- uint last);
-void xfs_da_brelse(struct xfs_trans *tp, xfs_dabuf_t *dabuf);
-void xfs_da_binval(struct xfs_trans *tp, xfs_dabuf_t *dabuf);
-xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf);
-
extern struct kmem_zone *xfs_da_state_zone;
-extern struct kmem_zone *xfs_dabuf_zone;
extern const struct xfs_nameops xfs_default_nameops;
#endif /* __XFS_DA_BTREE_H__ */
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
index a3721633abc8..1d9643b3dce6 100644
--- a/fs/xfs/xfs_dinode.h
+++ b/fs/xfs/xfs_dinode.h
@@ -33,7 +33,7 @@ typedef struct xfs_timestamp {
* variable size the leftover area split into a data and an attribute fork.
* The format of the data and attribute fork depends on the format of the
* inode as indicated by di_format and di_aformat. To access the data and
- * attribute use the XFS_DFORK_PTR, XFS_DFORK_DPTR, and XFS_DFORK_PTR macros
+ * attribute use the XFS_DFORK_DPTR, XFS_DFORK_APTR, and XFS_DFORK_PTR macros
* below.
*
* There is a very similar struct icdinode in xfs_inode which matches the
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index 67a250c36d41..b26a50f9921d 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -592,7 +592,7 @@ int
xfs_dir2_shrink_inode(
xfs_da_args_t *args,
xfs_dir2_db_t db,
- xfs_dabuf_t *bp)
+ struct xfs_buf *bp)
{
xfs_fileoff_t bno; /* directory file offset */
xfs_dablk_t da; /* directory file offset */
@@ -634,7 +634,7 @@ xfs_dir2_shrink_inode(
/*
* Invalidate the buffer from the transaction.
*/
- xfs_da_binval(tp, bp);
+ xfs_trans_binval(tp, bp);
/*
* If it's not a data block, we're done.
*/
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 586732f2d80d..e93ca8f054f4 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -37,10 +37,10 @@
/*
* Local function prototypes.
*/
-static void xfs_dir2_block_log_leaf(xfs_trans_t *tp, xfs_dabuf_t *bp, int first,
- int last);
-static void xfs_dir2_block_log_tail(xfs_trans_t *tp, xfs_dabuf_t *bp);
-static int xfs_dir2_block_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **bpp,
+static void xfs_dir2_block_log_leaf(xfs_trans_t *tp, struct xfs_buf *bp,
+ int first, int last);
+static void xfs_dir2_block_log_tail(xfs_trans_t *tp, struct xfs_buf *bp);
+static int xfs_dir2_block_lookup_int(xfs_da_args_t *args, struct xfs_buf **bpp,
int *entno);
static int xfs_dir2_block_sort(const void *a, const void *b);
@@ -66,7 +66,7 @@ xfs_dir2_block_addname(
xfs_dir2_data_free_t *bf; /* bestfree table in block */
xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
- xfs_dabuf_t *bp; /* buffer for block */
+ struct xfs_buf *bp; /* buffer for block */
xfs_dir2_block_tail_t *btp; /* block tail */
int compact; /* need to compact leaf ents */
xfs_dir2_data_entry_t *dep; /* block data entry */
@@ -102,14 +102,14 @@ xfs_dir2_block_addname(
return error;
}
ASSERT(bp != NULL);
- hdr = bp->data;
+ hdr = bp->b_addr;
/*
* Check the magic number, corrupted if wrong.
*/
if (unlikely(hdr->magic != cpu_to_be32(XFS_DIR2_BLOCK_MAGIC))) {
XFS_CORRUPTION_ERROR("xfs_dir2_block_addname",
XFS_ERRLEVEL_LOW, mp, hdr);
- xfs_da_brelse(tp, bp);
+ xfs_trans_brelse(tp, bp);
return XFS_ERROR(EFSCORRUPTED);
}
len = xfs_dir2_data_entsize(args->namelen);
@@ -212,7 +212,7 @@ xfs_dir2_block_addname(
* If this isn't a real add, we're done with the buffer.
*/
if (args->op_flags & XFS_DA_OP_JUSTCHECK)
- xfs_da_brelse(tp, bp);
+ xfs_trans_brelse(tp, bp);
/*
* If we don't have space for the new entry & leaf ...
*/
@@ -228,7 +228,6 @@ xfs_dir2_block_addname(
* Then add the new entry in that format.
*/
error = xfs_dir2_block_to_leaf(args, bp);
- xfs_da_buf_done(bp);
if (error)
return error;
return xfs_dir2_leaf_addname(args);
@@ -422,7 +421,6 @@ xfs_dir2_block_addname(
xfs_dir2_block_log_tail(tp, bp);
xfs_dir2_data_log_entry(tp, bp, dep);
xfs_dir2_data_check(dp, bp);
- xfs_da_buf_done(bp);
return 0;
}
@@ -437,7 +435,7 @@ xfs_dir2_block_getdents(
filldir_t filldir)
{
xfs_dir2_data_hdr_t *hdr; /* block header */
- xfs_dabuf_t *bp; /* buffer for block */
+ struct xfs_buf *bp; /* buffer for block */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_dir2_data_unused_t *dup; /* block unused entry */
@@ -469,7 +467,7 @@ xfs_dir2_block_getdents(
* We'll skip entries before this.
*/
wantoff = xfs_dir2_dataptr_to_off(mp, *offset);
- hdr = bp->data;
+ hdr = bp->b_addr;
xfs_dir2_data_check(dp, bp);
/*
* Set up values for the loop.
@@ -514,7 +512,7 @@ xfs_dir2_block_getdents(
cook & 0x7fffffff, be64_to_cpu(dep->inumber),
DT_UNKNOWN)) {
*offset = cook & 0x7fffffff;
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
return 0;
}
}
@@ -525,7 +523,7 @@ xfs_dir2_block_getdents(
*/
*offset = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0) &
0x7fffffff;
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
return 0;
}
@@ -535,17 +533,17 @@ xfs_dir2_block_getdents(
static void
xfs_dir2_block_log_leaf(
xfs_trans_t *tp, /* transaction structure */
- xfs_dabuf_t *bp, /* block buffer */
+ struct xfs_buf *bp, /* block buffer */
int first, /* index of first logged leaf */
int last) /* index of last logged leaf */
{
- xfs_dir2_data_hdr_t *hdr = bp->data;
+ xfs_dir2_data_hdr_t *hdr = bp->b_addr;
xfs_dir2_leaf_entry_t *blp;
xfs_dir2_block_tail_t *btp;
btp = xfs_dir2_block_tail_p(tp->t_mountp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
- xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)hdr),
+ xfs_trans_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)hdr),
(uint)((char *)&blp[last + 1] - (char *)hdr - 1));
}
@@ -555,13 +553,13 @@ xfs_dir2_block_log_leaf(
static void
xfs_dir2_block_log_tail(
xfs_trans_t *tp, /* transaction structure */
- xfs_dabuf_t *bp) /* block buffer */
+ struct xfs_buf *bp) /* block buffer */
{
- xfs_dir2_data_hdr_t *hdr = bp->data;
+ xfs_dir2_data_hdr_t *hdr = bp->b_addr;
xfs_dir2_block_tail_t *btp;
btp = xfs_dir2_block_tail_p(tp->t_mountp, hdr);
- xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)hdr),
+ xfs_trans_log_buf(tp, bp, (uint)((char *)btp - (char *)hdr),
(uint)((char *)(btp + 1) - (char *)hdr - 1));
}
@@ -575,7 +573,7 @@ xfs_dir2_block_lookup(
{
xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
- xfs_dabuf_t *bp; /* block buffer */
+ struct xfs_buf *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_inode_t *dp; /* incore inode */
@@ -593,7 +591,7 @@ xfs_dir2_block_lookup(
return error;
dp = args->dp;
mp = dp->i_mount;
- hdr = bp->data;
+ hdr = bp->b_addr;
xfs_dir2_data_check(dp, bp);
btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
@@ -607,7 +605,7 @@ xfs_dir2_block_lookup(
*/
args->inumber = be64_to_cpu(dep->inumber);
error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
- xfs_da_brelse(args->trans, bp);
+ xfs_trans_brelse(args->trans, bp);
return XFS_ERROR(error);
}
@@ -617,13 +615,13 @@ xfs_dir2_block_lookup(
static int /* error */
xfs_dir2_block_lookup_int(
xfs_da_args_t *args, /* dir lookup arguments */
- xfs_dabuf_t **bpp, /* returned block buffer */
+ struct xfs_buf **bpp, /* returned block buffer */
int *entno) /* returned entry number */
{
xfs_dir2_dataptr_t addr; /* data entry address */
xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
- xfs_dabuf_t *bp; /* block buffer */
+ struct xfs_buf *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_inode_t *dp; /* incore inode */
@@ -647,7 +645,7 @@ xfs_dir2_block_lookup_int(
return error;
}
ASSERT(bp != NULL);
- hdr = bp->data;
+ hdr = bp->b_addr;
xfs_dir2_data_check(dp, bp);
btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
@@ -666,7 +664,7 @@ xfs_dir2_block_lookup_int(
high = mid - 1;
if (low > high) {
ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
- xfs_da_brelse(tp, bp);
+ xfs_trans_brelse(tp, bp);
return XFS_ERROR(ENOENT);
}
}
@@ -714,7 +712,7 @@ xfs_dir2_block_lookup_int(
/*
* No match, release the buffer and return ENOENT.
*/
- xfs_da_brelse(tp, bp);
+ xfs_trans_brelse(tp, bp);
return XFS_ERROR(ENOENT);
}
@@ -728,7 +726,7 @@ xfs_dir2_block_removename(
{
xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf pointer */
- xfs_dabuf_t *bp; /* block buffer */
+ struct xfs_buf *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_inode_t *dp; /* incore inode */
@@ -753,7 +751,7 @@ xfs_dir2_block_removename(
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
- hdr = bp->data;
+ hdr = bp->b_addr;
btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
/*
@@ -790,10 +788,9 @@ xfs_dir2_block_removename(
* See if the size as a shortform is good enough.
*/
size = xfs_dir2_block_sfsize(dp, hdr, &sfh);
- if (size > XFS_IFORK_DSIZE(dp)) {
- xfs_da_buf_done(bp);
+ if (size > XFS_IFORK_DSIZE(dp))
return 0;
- }
+
/*
* If it works, do the conversion.
*/
@@ -810,7 +807,7 @@ xfs_dir2_block_replace(
{
xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
- xfs_dabuf_t *bp; /* block buffer */
+ struct xfs_buf *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
xfs_inode_t *dp; /* incore inode */
@@ -829,7 +826,7 @@ xfs_dir2_block_replace(
}
dp = args->dp;
mp = dp->i_mount;
- hdr = bp->data;
+ hdr = bp->b_addr;
btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
/*
@@ -844,7 +841,6 @@ xfs_dir2_block_replace(
dep->inumber = cpu_to_be64(args->inumber);
xfs_dir2_data_log_entry(args->trans, bp, dep);
xfs_dir2_data_check(dp, bp);
- xfs_da_buf_done(bp);
return 0;
}
@@ -871,8 +867,8 @@ xfs_dir2_block_sort(
int /* error */
xfs_dir2_leaf_to_block(
xfs_da_args_t *args, /* operation arguments */
- xfs_dabuf_t *lbp, /* leaf buffer */
- xfs_dabuf_t *dbp) /* data buffer */
+ struct xfs_buf *lbp, /* leaf buffer */
+ struct xfs_buf *dbp) /* data buffer */
{
__be16 *bestsp; /* leaf bests table */
xfs_dir2_data_hdr_t *hdr; /* block header */
@@ -898,7 +894,7 @@ xfs_dir2_leaf_to_block(
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
- leaf = lbp->data;
+ leaf = lbp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
/*
@@ -914,11 +910,9 @@ xfs_dir2_leaf_to_block(
if ((error =
xfs_dir2_leaf_trim_data(args, lbp,
(xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1))))
- goto out;
- } else {
- error = 0;
- goto out;
- }
+ return error;
+ } else
+ return 0;
}
/*
* Read the data block if we don't already have it, give up if it fails.
@@ -926,9 +920,9 @@ xfs_dir2_leaf_to_block(
if (dbp == NULL &&
(error = xfs_da_read_buf(tp, dp, mp->m_dirdatablk, -1, &dbp,
XFS_DATA_FORK))) {
- goto out;
+ return error;
}
- hdr = dbp->data;
+ hdr = dbp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
/*
* Size of the "leaf" area in the block.
@@ -944,10 +938,9 @@ xfs_dir2_leaf_to_block(
* If it's not free or is too short we can't do it.
*/
if (be16_to_cpu(dup->freetag) != XFS_DIR2_DATA_FREE_TAG ||
- be16_to_cpu(dup->length) < size) {
- error = 0;
- goto out;
- }
+ be16_to_cpu(dup->length) < size)
+ return 0;
+
/*
* Start converting it to block form.
*/
@@ -989,25 +982,17 @@ xfs_dir2_leaf_to_block(
* Pitch the old leaf block.
*/
error = xfs_da_shrink_inode(args, mp->m_dirleafblk, lbp);
- lbp = NULL;
- if (error) {
- goto out;
- }
+ if (error)
+ return error;
+
/*
* Now see if the resulting block can be shrunken to shortform.
*/
size = xfs_dir2_block_sfsize(dp, hdr, &sfh);
- if (size > XFS_IFORK_DSIZE(dp)) {
- error = 0;
- goto out;
- }
+ if (size > XFS_IFORK_DSIZE(dp))
+ return 0;
+
return xfs_dir2_block_to_sf(args, dbp, size, &sfh);
-out:
- if (lbp)
- xfs_da_buf_done(lbp);
- if (dbp)
- xfs_da_buf_done(dbp);
- return error;
}
/*
@@ -1020,7 +1005,7 @@ xfs_dir2_sf_to_block(
xfs_dir2_db_t blkno; /* dir-relative block # (0) */
xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
- xfs_dabuf_t *bp; /* block buffer */
+ struct xfs_buf *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail pointer */
xfs_dir2_data_entry_t *dep; /* data entry pointer */
xfs_inode_t *dp; /* incore directory inode */
@@ -1088,7 +1073,7 @@ xfs_dir2_sf_to_block(
kmem_free(sfp);
return error;
}
- hdr = bp->data;
+ hdr = bp->b_addr;
hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
/*
* Compute size of block "tail" area.
@@ -1217,6 +1202,5 @@ xfs_dir2_sf_to_block(
xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1);
xfs_dir2_block_log_tail(tp, bp);
xfs_dir2_data_check(dp, bp);
- xfs_da_buf_done(bp);
return 0;
}
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index 2046988e9eb2..44ffd4d6bc91 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -42,8 +42,8 @@ xfs_dir2_data_freefind(xfs_dir2_data_hdr_t *hdr, xfs_dir2_data_unused_t *dup);
*/
void
xfs_dir2_data_check(
- xfs_inode_t *dp, /* incore inode pointer */
- xfs_dabuf_t *bp) /* data block's buffer */
+ struct xfs_inode *dp, /* incore inode pointer */
+ struct xfs_buf *bp) /* data block's buffer */
{
xfs_dir2_dataptr_t addr; /* addr for leaf lookup */
xfs_dir2_data_free_t *bf; /* bestfree table */
@@ -65,7 +65,7 @@ xfs_dir2_data_check(
struct xfs_name name;
mp = dp->i_mount;
- hdr = bp->data;
+ hdr = bp->b_addr;
bf = hdr->bestfree;
p = (char *)(hdr + 1);
@@ -389,9 +389,9 @@ int /* error */
xfs_dir2_data_init(
xfs_da_args_t *args, /* directory operation args */
xfs_dir2_db_t blkno, /* logical dir block number */
- xfs_dabuf_t **bpp) /* output block buffer */
+ struct xfs_buf **bpp) /* output block buffer */
{
- xfs_dabuf_t *bp; /* block buffer */
+ struct xfs_buf *bp; /* block buffer */
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_data_unused_t *dup; /* unused entry pointer */
@@ -417,7 +417,7 @@ xfs_dir2_data_init(
/*
* Initialize the header.
*/
- hdr = bp->data;
+ hdr = bp->b_addr;
hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
hdr->bestfree[0].offset = cpu_to_be16(sizeof(*hdr));
for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
@@ -449,16 +449,16 @@ xfs_dir2_data_init(
*/
void
xfs_dir2_data_log_entry(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_dabuf_t *bp, /* block buffer */
+ struct xfs_trans *tp,
+ struct xfs_buf *bp,
xfs_dir2_data_entry_t *dep) /* data entry pointer */
{
- xfs_dir2_data_hdr_t *hdr = bp->data;
+ xfs_dir2_data_hdr_t *hdr = bp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
- xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr),
+ xfs_trans_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr),
(uint)((char *)(xfs_dir2_data_entry_tag_p(dep) + 1) -
(char *)hdr - 1));
}
@@ -468,15 +468,15 @@ xfs_dir2_data_log_entry(
*/
void
xfs_dir2_data_log_header(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_dabuf_t *bp) /* block buffer */
+ struct xfs_trans *tp,
+ struct xfs_buf *bp)
{
- xfs_dir2_data_hdr_t *hdr = bp->data;
+ xfs_dir2_data_hdr_t *hdr = bp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
- xfs_da_log_buf(tp, bp, 0, sizeof(*hdr) - 1);
+ xfs_trans_log_buf(tp, bp, 0, sizeof(*hdr) - 1);
}
/*
@@ -484,11 +484,11 @@ xfs_dir2_data_log_header(
*/
void
xfs_dir2_data_log_unused(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_dabuf_t *bp, /* block buffer */
+ struct xfs_trans *tp,
+ struct xfs_buf *bp,
xfs_dir2_data_unused_t *dup) /* data unused pointer */
{
- xfs_dir2_data_hdr_t *hdr = bp->data;
+ xfs_dir2_data_hdr_t *hdr = bp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
@@ -496,13 +496,13 @@ xfs_dir2_data_log_unused(
/*
* Log the first part of the unused entry.
*/
- xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)hdr),
+ xfs_trans_log_buf(tp, bp, (uint)((char *)dup - (char *)hdr),
(uint)((char *)&dup->length + sizeof(dup->length) -
1 - (char *)hdr));
/*
* Log the end (tag) of the unused entry.
*/
- xfs_da_log_buf(tp, bp,
+ xfs_trans_log_buf(tp, bp,
(uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr),
(uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr +
sizeof(xfs_dir2_data_off_t) - 1));
@@ -514,8 +514,8 @@ xfs_dir2_data_log_unused(
*/
void
xfs_dir2_data_make_free(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_dabuf_t *bp, /* block buffer */
+ struct xfs_trans *tp,
+ struct xfs_buf *bp,
xfs_dir2_data_aoff_t offset, /* starting byte offset */
xfs_dir2_data_aoff_t len, /* length in bytes */
int *needlogp, /* out: log header */
@@ -531,7 +531,7 @@ xfs_dir2_data_make_free(
xfs_dir2_data_unused_t *prevdup; /* unused entry before us */
mp = tp->t_mountp;
- hdr = bp->data;
+ hdr = bp->b_addr;
/*
* Figure out where the end of the data area is.
@@ -696,8 +696,8 @@ xfs_dir2_data_make_free(
*/
void
xfs_dir2_data_use_free(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_dabuf_t *bp, /* data block buffer */
+ struct xfs_trans *tp,
+ struct xfs_buf *bp,
xfs_dir2_data_unused_t *dup, /* unused entry */
xfs_dir2_data_aoff_t offset, /* starting offset to use */
xfs_dir2_data_aoff_t len, /* length to use */
@@ -713,7 +713,7 @@ xfs_dir2_data_use_free(
xfs_dir2_data_unused_t *newdup2; /* another new unused entry */
int oldlen; /* old unused entry's length */
- hdr = bp->data;
+ hdr = bp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG);
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 397ffbcbab1d..0b296253bd01 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -38,15 +38,15 @@
* Local function declarations.
*/
#ifdef DEBUG
-static void xfs_dir2_leaf_check(xfs_inode_t *dp, xfs_dabuf_t *bp);
+static void xfs_dir2_leaf_check(struct xfs_inode *dp, struct xfs_buf *bp);
#else
#define xfs_dir2_leaf_check(dp, bp)
#endif
-static int xfs_dir2_leaf_lookup_int(xfs_da_args_t *args, xfs_dabuf_t **lbpp,
- int *indexp, xfs_dabuf_t **dbpp);
-static void xfs_dir2_leaf_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp,
+static int xfs_dir2_leaf_lookup_int(xfs_da_args_t *args, struct xfs_buf **lbpp,
+ int *indexp, struct xfs_buf **dbpp);
+static void xfs_dir2_leaf_log_bests(struct xfs_trans *tp, struct xfs_buf *bp,
int first, int last);
-static void xfs_dir2_leaf_log_tail(struct xfs_trans *tp, struct xfs_dabuf *bp);
+static void xfs_dir2_leaf_log_tail(struct xfs_trans *tp, struct xfs_buf *bp);
/*
@@ -55,7 +55,7 @@ static void xfs_dir2_leaf_log_tail(struct xfs_trans *tp, struct xfs_dabuf *bp);
int /* error */
xfs_dir2_block_to_leaf(
xfs_da_args_t *args, /* operation arguments */
- xfs_dabuf_t *dbp) /* input block's buffer */
+ struct xfs_buf *dbp) /* input block's buffer */
{
__be16 *bestsp; /* leaf's bestsp entries */
xfs_dablk_t blkno; /* leaf block's bno */
@@ -64,7 +64,7 @@ xfs_dir2_block_to_leaf(
xfs_dir2_block_tail_t *btp; /* block's tail */
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return code */
- xfs_dabuf_t *lbp; /* leaf block's buffer */
+ struct xfs_buf *lbp; /* leaf block's buffer */
xfs_dir2_db_t ldb; /* leaf block's bno */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_tail_t *ltp; /* leaf's tail */
@@ -95,8 +95,8 @@ xfs_dir2_block_to_leaf(
return error;
}
ASSERT(lbp != NULL);
- leaf = lbp->data;
- hdr = dbp->data;
+ leaf = lbp->b_addr;
+ hdr = dbp->b_addr;
xfs_dir2_data_check(dp, dbp);
btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
@@ -143,7 +143,6 @@ xfs_dir2_block_to_leaf(
xfs_dir2_leaf_check(dp, lbp);
xfs_dir2_data_check(dp, dbp);
xfs_dir2_leaf_log_bests(tp, lbp, 0, 0);
- xfs_da_buf_done(lbp);
return 0;
}
@@ -282,7 +281,7 @@ xfs_dir2_leaf_addname(
__be16 *bestsp; /* freespace table in leaf */
int compact; /* need to compact leaves */
xfs_dir2_data_hdr_t *hdr; /* data block header */
- xfs_dabuf_t *dbp; /* data block buffer */
+ struct xfs_buf *dbp; /* data block buffer */
xfs_dir2_data_entry_t *dep; /* data block entry */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_data_unused_t *dup; /* data unused entry */
@@ -291,7 +290,7 @@ xfs_dir2_leaf_addname(
int highstale; /* index of next stale leaf */
int i; /* temporary, index */
int index; /* leaf table position */
- xfs_dabuf_t *lbp; /* leaf's buffer */
+ struct xfs_buf *lbp; /* leaf's buffer */
xfs_dir2_leaf_t *leaf; /* leaf structure */
int length; /* length of new entry */
xfs_dir2_leaf_entry_t *lep; /* leaf entry table pointer */
@@ -328,7 +327,7 @@ xfs_dir2_leaf_addname(
* But if there are dup hash values the index is of the first of those.
*/
index = xfs_dir2_leaf_search_hash(args, lbp);
- leaf = lbp->data;
+ leaf = lbp->b_addr;
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
bestsp = xfs_dir2_leaf_bests_p(ltp);
length = xfs_dir2_data_entsize(args->namelen);
@@ -402,14 +401,13 @@ xfs_dir2_leaf_addname(
*/
if ((args->op_flags & XFS_DA_OP_JUSTCHECK) ||
args->total == 0) {
- xfs_da_brelse(tp, lbp);
+ xfs_trans_brelse(tp, lbp);
return XFS_ERROR(ENOSPC);
}
/*
* Convert to node form.
*/
error = xfs_dir2_leaf_to_node(args, lbp);
- xfs_da_buf_done(lbp);
if (error)
return error;
/*
@@ -427,7 +425,7 @@ xfs_dir2_leaf_addname(
* a new data block.
*/
if (args->op_flags & XFS_DA_OP_JUSTCHECK) {
- xfs_da_brelse(tp, lbp);
+ xfs_trans_brelse(tp, lbp);
return use_block == -1 ? XFS_ERROR(ENOSPC) : 0;
}
/*
@@ -435,7 +433,7 @@ xfs_dir2_leaf_addname(
* changed anything.
*/
if (args->total == 0 && use_block == -1) {
- xfs_da_brelse(tp, lbp);
+ xfs_trans_brelse(tp, lbp);
return XFS_ERROR(ENOSPC);
}
/*
@@ -466,14 +464,14 @@ xfs_dir2_leaf_addname(
*/
if ((error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE,
&use_block))) {
- xfs_da_brelse(tp, lbp);
+ xfs_trans_brelse(tp, lbp);
return error;
}
/*
* Initialize the block.
*/
if ((error = xfs_dir2_data_init(args, use_block, &dbp))) {
- xfs_da_brelse(tp, lbp);
+ xfs_trans_brelse(tp, lbp);
return error;
}
/*
@@ -493,7 +491,7 @@ xfs_dir2_leaf_addname(
*/
else
xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block);
- hdr = dbp->data;
+ hdr = dbp->b_addr;
bestsp[use_block] = hdr->bestfree[0].length;
grown = 1;
}
@@ -505,10 +503,10 @@ xfs_dir2_leaf_addname(
if ((error =
xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, use_block),
-1, &dbp, XFS_DATA_FORK))) {
- xfs_da_brelse(tp, lbp);
+ xfs_trans_brelse(tp, lbp);
return error;
}
- hdr = dbp->data;
+ hdr = dbp->b_addr;
grown = 0;
}
xfs_dir2_data_check(dp, dbp);
@@ -570,9 +568,7 @@ xfs_dir2_leaf_addname(
xfs_dir2_leaf_log_header(tp, lbp);
xfs_dir2_leaf_log_ents(tp, lbp, lfloglow, lfloghigh);
xfs_dir2_leaf_check(dp, lbp);
- xfs_da_buf_done(lbp);
xfs_dir2_data_check(dp, dbp);
- xfs_da_buf_done(dbp);
return 0;
}
@@ -583,8 +579,8 @@ xfs_dir2_leaf_addname(
*/
STATIC void
xfs_dir2_leaf_check(
- xfs_inode_t *dp, /* incore directory inode */
- xfs_dabuf_t *bp) /* leaf's buffer */
+ struct xfs_inode *dp, /* incore directory inode */
+ struct xfs_buf *bp) /* leaf's buffer */
{
int i; /* leaf index */
xfs_dir2_leaf_t *leaf; /* leaf structure */
@@ -592,7 +588,7 @@ xfs_dir2_leaf_check(
xfs_mount_t *mp; /* filesystem mount point */
int stale; /* count of stale leaves */
- leaf = bp->data;
+ leaf = bp->b_addr;
mp = dp->i_mount;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
/*
@@ -628,14 +624,14 @@ xfs_dir2_leaf_check(
void
xfs_dir2_leaf_compact(
xfs_da_args_t *args, /* operation arguments */
- xfs_dabuf_t *bp) /* leaf buffer */
+ struct xfs_buf *bp) /* leaf buffer */
{
int from; /* source leaf index */
xfs_dir2_leaf_t *leaf; /* leaf structure */
int loglow; /* first leaf entry to log */
int to; /* target leaf index */
- leaf = bp->data;
+ leaf = bp->b_addr;
if (!leaf->hdr.stale) {
return;
}
@@ -677,7 +673,7 @@ xfs_dir2_leaf_compact(
*/
void
xfs_dir2_leaf_compact_x1(
- xfs_dabuf_t *bp, /* leaf buffer */
+ struct xfs_buf *bp, /* leaf buffer */
int *indexp, /* insertion index */
int *lowstalep, /* out: stale entry before us */
int *highstalep, /* out: stale entry after us */
@@ -693,7 +689,7 @@ xfs_dir2_leaf_compact_x1(
int newindex=0; /* new insertion index */
int to; /* destination copy index */
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(be16_to_cpu(leaf->hdr.stale) > 1);
index = *indexp;
@@ -763,6 +759,218 @@ xfs_dir2_leaf_compact_x1(
*highstalep = highstale;
}
+struct xfs_dir2_leaf_map_info {
+ xfs_extlen_t map_blocks; /* number of fsbs in map */
+ xfs_dablk_t map_off; /* last mapped file offset */
+ int map_size; /* total entries in *map */
+ int map_valid; /* valid entries in *map */
+ int nmap; /* mappings to ask xfs_bmapi */
+ xfs_dir2_db_t curdb; /* db for current block */
+ int ra_current; /* number of read-ahead blks */
+ int ra_index; /* *map index for read-ahead */
+ int ra_offset; /* map entry offset for ra */
+ int ra_want; /* readahead count wanted */
+ struct xfs_bmbt_irec map[]; /* map vector for blocks */
+};
+
+STATIC int
+xfs_dir2_leaf_readbuf(
+ struct xfs_inode *dp,
+ size_t bufsize,
+ struct xfs_dir2_leaf_map_info *mip,
+ xfs_dir2_off_t *curoff,
+ struct xfs_buf **bpp)
+{
+ struct xfs_mount *mp = dp->i_mount;
+ struct xfs_buf *bp = *bpp;
+ struct xfs_bmbt_irec *map = mip->map;
+ int error = 0;
+ int length;
+ int i;
+ int j;
+
+ /*
+ * If we have a buffer, we need to release it and
+ * take it out of the mapping.
+ */
+
+ if (bp) {
+ xfs_trans_brelse(NULL, bp);
+ bp = NULL;
+ mip->map_blocks -= mp->m_dirblkfsbs;
+ /*
+ * Loop to get rid of the extents for the
+ * directory block.
+ */
+ for (i = mp->m_dirblkfsbs; i > 0; ) {
+ j = min_t(int, map->br_blockcount, i);
+ map->br_blockcount -= j;
+ map->br_startblock += j;
+ map->br_startoff += j;
+ /*
+ * If mapping is done, pitch it from
+ * the table.
+ */
+ if (!map->br_blockcount && --mip->map_valid)
+ memmove(&map[0], &map[1],
+ sizeof(map[0]) * mip->map_valid);
+ i -= j;
+ }
+ }
+
+ /*
+ * Recalculate the readahead blocks wanted.
+ */
+ mip->ra_want = howmany(bufsize + mp->m_dirblksize,
+ mp->m_sb.sb_blocksize) - 1;
+ ASSERT(mip->ra_want >= 0);
+
+ /*
+ * If we don't have as many as we want, and we haven't
+ * run out of data blocks, get some more mappings.
+ */
+ if (1 + mip->ra_want > mip->map_blocks &&
+ mip->map_off < xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET)) {
+ /*
+ * Get more bmaps, fill in after the ones
+ * we already have in the table.
+ */
+ mip->nmap = mip->map_size - mip->map_valid;
+ error = xfs_bmapi_read(dp, mip->map_off,
+ xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET) -
+ mip->map_off,
+ &map[mip->map_valid], &mip->nmap, 0);
+
+ /*
+ * Don't know if we should ignore this or try to return an
+ * error. The trouble with returning errors is that readdir
+ * will just stop without actually passing the error through.
+ */
+ if (error)
+ goto out; /* XXX */
+
+ /*
+ * If we got all the mappings we asked for, set the final map
+ * offset based on the last bmap value received. Otherwise,
+ * we've reached the end.
+ */
+ if (mip->nmap == mip->map_size - mip->map_valid) {
+ i = mip->map_valid + mip->nmap - 1;
+ mip->map_off = map[i].br_startoff + map[i].br_blockcount;
+ } else
+ mip->map_off = xfs_dir2_byte_to_da(mp,
+ XFS_DIR2_LEAF_OFFSET);
+
+ /*
+ * Look for holes in the mapping, and eliminate them. Count up
+ * the valid blocks.
+ */
+ for (i = mip->map_valid; i < mip->map_valid + mip->nmap; ) {
+ if (map[i].br_startblock == HOLESTARTBLOCK) {
+ mip->nmap--;
+ length = mip->map_valid + mip->nmap - i;
+ if (length)
+ memmove(&map[i], &map[i + 1],
+ sizeof(map[i]) * length);
+ } else {
+ mip->map_blocks += map[i].br_blockcount;
+ i++;
+ }
+ }
+ mip->map_valid += mip->nmap;
+ }
+
+ /*
+ * No valid mappings, so no more data blocks.
+ */
+ if (!mip->map_valid) {
+ *curoff = xfs_dir2_da_to_byte(mp, mip->map_off);
+ goto out;
+ }
+
+ /*
+ * Read the directory block starting at the first mapping.
+ */
+ mip->curdb = xfs_dir2_da_to_db(mp, map->br_startoff);
+ error = xfs_da_read_buf(NULL, dp, map->br_startoff,
+ map->br_blockcount >= mp->m_dirblkfsbs ?
+ XFS_FSB_TO_DADDR(mp, map->br_startblock) : -1,
+ &bp, XFS_DATA_FORK);
+
+ /*
+ * Should just skip over the data block instead of giving up.
+ */
+ if (error)
+ goto out; /* XXX */
+
+ /*
+ * Adjust the current amount of read-ahead: we just read a block that
+ * was previously ra.
+ */
+ if (mip->ra_current)
+ mip->ra_current -= mp->m_dirblkfsbs;
+
+ /*
+ * Do we need more readahead?
+ */
+ for (mip->ra_index = mip->ra_offset = i = 0;
+ mip->ra_want > mip->ra_current && i < mip->map_blocks;
+ i += mp->m_dirblkfsbs) {
+ ASSERT(mip->ra_index < mip->map_valid);
+ /*
+ * Read-ahead a contiguous directory block.
+ */
+ if (i > mip->ra_current &&
+ map[mip->ra_index].br_blockcount >= mp->m_dirblkfsbs) {
+ xfs_buf_readahead(mp->m_ddev_targp,
+ XFS_FSB_TO_DADDR(mp,
+ map[mip->ra_index].br_startblock +
+ mip->ra_offset),
+ (int)BTOBB(mp->m_dirblksize));
+ mip->ra_current = i;
+ }
+
+ /*
+ * Read-ahead a non-contiguous directory block. This doesn't
+ * use our mapping, but this is a very rare case.
+ */
+ else if (i > mip->ra_current) {
+ xfs_da_reada_buf(NULL, dp,
+ map[mip->ra_index].br_startoff +
+ mip->ra_offset,
+ XFS_DATA_FORK);
+ mip->ra_current = i;
+ }
+
+ /*
+ * Advance offset through the mapping table.
+ */
+ for (j = 0; j < mp->m_dirblkfsbs; j++) {
+ /*
+ * The rest of this extent but not more than a dir
+ * block.
+ */
+ length = min_t(int, mp->m_dirblkfsbs,
+ map[mip->ra_index].br_blockcount -
+ mip->ra_offset);
+ j += length;
+ mip->ra_offset += length;
+
+ /*
+ * Advance to the next mapping if this one is used up.
+ */
+ if (mip->ra_offset == map[mip->ra_index].br_blockcount) {
+ mip->ra_offset = 0;
+ mip->ra_index++;
+ }
+ }
+ }
+
+out:
+ *bpp = bp;
+ return error;
+}
+
/*
* Getdents (readdir) for leaf and node directories.
* This reads the data blocks only, so is the same for both forms.
@@ -775,30 +983,18 @@ xfs_dir2_leaf_getdents(
xfs_off_t *offset,
filldir_t filldir)
{
- xfs_dabuf_t *bp; /* data block buffer */
- int byteoff; /* offset in current block */
- xfs_dir2_db_t curdb; /* db for current block */
- xfs_dir2_off_t curoff; /* current overall offset */
+ struct xfs_buf *bp = NULL; /* data block buffer */
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_data_entry_t *dep; /* data entry */
xfs_dir2_data_unused_t *dup; /* unused entry */
int error = 0; /* error return value */
- int i; /* temporary loop index */
- int j; /* temporary loop index */
int length; /* temporary length value */
- xfs_bmbt_irec_t *map; /* map vector for blocks */
- xfs_extlen_t map_blocks; /* number of fsbs in map */
- xfs_dablk_t map_off; /* last mapped file offset */
- int map_size; /* total entries in *map */
- int map_valid; /* valid entries in *map */
xfs_mount_t *mp; /* filesystem mount point */
+ int byteoff; /* offset in current block */
+ xfs_dir2_off_t curoff; /* current overall offset */
xfs_dir2_off_t newoff; /* new curoff after new blk */
- int nmap; /* mappings to ask xfs_bmapi */
char *ptr = NULL; /* pointer to current data */
- int ra_current; /* number of read-ahead blks */
- int ra_index; /* *map index for read-ahead */
- int ra_offset; /* map entry offset for ra */
- int ra_want; /* readahead count wanted */
+ struct xfs_dir2_leaf_map_info *map_info;
/*
* If the offset is at or past the largest allowed value,
@@ -814,10 +1010,12 @@ xfs_dir2_leaf_getdents(
* buffer size, the directory block size, and the filesystem
* block size.
*/
- map_size = howmany(bufsize + mp->m_dirblksize, mp->m_sb.sb_blocksize);
- map = kmem_alloc(map_size * sizeof(*map), KM_SLEEP);
- map_valid = ra_index = ra_offset = ra_current = map_blocks = 0;
- bp = NULL;
+ length = howmany(bufsize + mp->m_dirblksize,
+ mp->m_sb.sb_blocksize);
+ map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) +
+ (length * sizeof(struct xfs_bmbt_irec)),
+ KM_SLEEP);
+ map_info->map_size = length;
/*
* Inside the loop we keep the main offset value as a byte offset
@@ -829,7 +1027,9 @@ xfs_dir2_leaf_getdents(
* Force this conversion through db so we truncate the offset
* down to get the start of the data block.
*/
- map_off = xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, curoff));
+ map_info->map_off = xfs_dir2_db_to_da(mp,
+ xfs_dir2_byte_to_db(mp, curoff));
+
/*
* Loop over directory entries until we reach the end offset.
* Get more blocks and readahead as necessary.
@@ -839,191 +1039,17 @@ xfs_dir2_leaf_getdents(
* If we have no buffer, or we're off the end of the
* current buffer, need to get another one.
*/
- if (!bp || ptr >= (char *)bp->data + mp->m_dirblksize) {
- /*
- * If we have a buffer, we need to release it and
- * take it out of the mapping.
- */
- if (bp) {
- xfs_da_brelse(NULL, bp);
- bp = NULL;
- map_blocks -= mp->m_dirblkfsbs;
- /*
- * Loop to get rid of the extents for the
- * directory block.
- */
- for (i = mp->m_dirblkfsbs; i > 0; ) {
- j = MIN((int)map->br_blockcount, i);
- map->br_blockcount -= j;
- map->br_startblock += j;
- map->br_startoff += j;
- /*
- * If mapping is done, pitch it from
- * the table.
- */
- if (!map->br_blockcount && --map_valid)
- memmove(&map[0], &map[1],
- sizeof(map[0]) *
- map_valid);
- i -= j;
- }
- }
- /*
- * Recalculate the readahead blocks wanted.
- */
- ra_want = howmany(bufsize + mp->m_dirblksize,
- mp->m_sb.sb_blocksize) - 1;
- ASSERT(ra_want >= 0);
+ if (!bp || ptr >= (char *)bp->b_addr + mp->m_dirblksize) {
- /*
- * If we don't have as many as we want, and we haven't
- * run out of data blocks, get some more mappings.
- */
- if (1 + ra_want > map_blocks &&
- map_off <
- xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET)) {
- /*
- * Get more bmaps, fill in after the ones
- * we already have in the table.
- */
- nmap = map_size - map_valid;
- error = xfs_bmapi_read(dp, map_off,
- xfs_dir2_byte_to_da(mp,
- XFS_DIR2_LEAF_OFFSET) - map_off,
- &map[map_valid], &nmap, 0);
- /*
- * Don't know if we should ignore this or
- * try to return an error.
- * The trouble with returning errors
- * is that readdir will just stop without
- * actually passing the error through.
- */
- if (error)
- break; /* XXX */
- /*
- * If we got all the mappings we asked for,
- * set the final map offset based on the
- * last bmap value received.
- * Otherwise, we've reached the end.
- */
- if (nmap == map_size - map_valid)
- map_off =
- map[map_valid + nmap - 1].br_startoff +
- map[map_valid + nmap - 1].br_blockcount;
- else
- map_off =
- xfs_dir2_byte_to_da(mp,
- XFS_DIR2_LEAF_OFFSET);
- /*
- * Look for holes in the mapping, and
- * eliminate them. Count up the valid blocks.
- */
- for (i = map_valid; i < map_valid + nmap; ) {
- if (map[i].br_startblock ==
- HOLESTARTBLOCK) {
- nmap--;
- length = map_valid + nmap - i;
- if (length)
- memmove(&map[i],
- &map[i + 1],
- sizeof(map[i]) *
- length);
- } else {
- map_blocks +=
- map[i].br_blockcount;
- i++;
- }
- }
- map_valid += nmap;
- }
- /*
- * No valid mappings, so no more data blocks.
- */
- if (!map_valid) {
- curoff = xfs_dir2_da_to_byte(mp, map_off);
+ error = xfs_dir2_leaf_readbuf(dp, bufsize, map_info,
+ &curoff, &bp);
+ if (error || !map_info->map_valid)
break;
- }
- /*
- * Read the directory block starting at the first
- * mapping.
- */
- curdb = xfs_dir2_da_to_db(mp, map->br_startoff);
- error = xfs_da_read_buf(NULL, dp, map->br_startoff,
- map->br_blockcount >= mp->m_dirblkfsbs ?
- XFS_FSB_TO_DADDR(mp, map->br_startblock) :
- -1,
- &bp, XFS_DATA_FORK);
- /*
- * Should just skip over the data block instead
- * of giving up.
- */
- if (error)
- break; /* XXX */
- /*
- * Adjust the current amount of read-ahead: we just
- * read a block that was previously ra.
- */
- if (ra_current)
- ra_current -= mp->m_dirblkfsbs;
- /*
- * Do we need more readahead?
- */
- for (ra_index = ra_offset = i = 0;
- ra_want > ra_current && i < map_blocks;
- i += mp->m_dirblkfsbs) {
- ASSERT(ra_index < map_valid);
- /*
- * Read-ahead a contiguous directory block.
- */
- if (i > ra_current &&
- map[ra_index].br_blockcount >=
- mp->m_dirblkfsbs) {
- xfs_buf_readahead(mp->m_ddev_targp,
- XFS_FSB_TO_DADDR(mp,
- map[ra_index].br_startblock +
- ra_offset),
- (int)BTOBB(mp->m_dirblksize));
- ra_current = i;
- }
- /*
- * Read-ahead a non-contiguous directory block.
- * This doesn't use our mapping, but this
- * is a very rare case.
- */
- else if (i > ra_current) {
- (void)xfs_da_reada_buf(NULL, dp,
- map[ra_index].br_startoff +
- ra_offset, XFS_DATA_FORK);
- ra_current = i;
- }
- /*
- * Advance offset through the mapping table.
- */
- for (j = 0; j < mp->m_dirblkfsbs; j++) {
- /*
- * The rest of this extent but not
- * more than a dir block.
- */
- length = MIN(mp->m_dirblkfsbs,
- (int)(map[ra_index].br_blockcount -
- ra_offset));
- j += length;
- ra_offset += length;
- /*
- * Advance to the next mapping if
- * this one is used up.
- */
- if (ra_offset ==
- map[ra_index].br_blockcount) {
- ra_offset = 0;
- ra_index++;
- }
- }
- }
+
/*
* Having done a read, we need to set a new offset.
*/
- newoff = xfs_dir2_db_off_to_byte(mp, curdb, 0);
+ newoff = xfs_dir2_db_off_to_byte(mp, map_info->curdb, 0);
/*
* Start of the current block.
*/
@@ -1034,8 +1060,8 @@ xfs_dir2_leaf_getdents(
*/
else if (curoff > newoff)
ASSERT(xfs_dir2_byte_to_db(mp, curoff) ==
- curdb);
- hdr = bp->data;
+ map_info->curdb);
+ hdr = bp->b_addr;
xfs_dir2_data_check(dp, bp);
/*
* Find our position in the block.
@@ -1117,9 +1143,9 @@ xfs_dir2_leaf_getdents(
*offset = XFS_DIR2_MAX_DATAPTR & 0x7fffffff;
else
*offset = xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff;
- kmem_free(map);
+ kmem_free(map_info);
if (bp)
- xfs_da_brelse(NULL, bp);
+ xfs_trans_brelse(NULL, bp);
return error;
}
@@ -1130,10 +1156,10 @@ int
xfs_dir2_leaf_init(
xfs_da_args_t *args, /* operation arguments */
xfs_dir2_db_t bno, /* directory block number */
- xfs_dabuf_t **bpp, /* out: leaf buffer */
+ struct xfs_buf **bpp, /* out: leaf buffer */
int magic) /* magic number for block */
{
- xfs_dabuf_t *bp; /* leaf buffer */
+ struct xfs_buf *bp; /* leaf buffer */
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return code */
xfs_dir2_leaf_t *leaf; /* leaf structure */
@@ -1156,7 +1182,7 @@ xfs_dir2_leaf_init(
return error;
}
ASSERT(bp != NULL);
- leaf = bp->data;
+ leaf = bp->b_addr;
/*
* Initialize the header.
*/
@@ -1186,7 +1212,7 @@ xfs_dir2_leaf_init(
static void
xfs_dir2_leaf_log_bests(
xfs_trans_t *tp, /* transaction pointer */
- xfs_dabuf_t *bp, /* leaf buffer */
+ struct xfs_buf *bp, /* leaf buffer */
int first, /* first entry to log */
int last) /* last entry to log */
{
@@ -1195,12 +1221,12 @@ xfs_dir2_leaf_log_bests(
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
ltp = xfs_dir2_leaf_tail_p(tp->t_mountp, leaf);
firstb = xfs_dir2_leaf_bests_p(ltp) + first;
lastb = xfs_dir2_leaf_bests_p(ltp) + last;
- xfs_da_log_buf(tp, bp, (uint)((char *)firstb - (char *)leaf),
+ xfs_trans_log_buf(tp, bp, (uint)((char *)firstb - (char *)leaf),
(uint)((char *)lastb - (char *)leaf + sizeof(*lastb) - 1));
}
@@ -1210,7 +1236,7 @@ xfs_dir2_leaf_log_bests(
void
xfs_dir2_leaf_log_ents(
xfs_trans_t *tp, /* transaction pointer */
- xfs_dabuf_t *bp, /* leaf buffer */
+ struct xfs_buf *bp, /* leaf buffer */
int first, /* first entry to log */
int last) /* last entry to log */
{
@@ -1218,12 +1244,12 @@ xfs_dir2_leaf_log_ents(
xfs_dir2_leaf_entry_t *lastlep; /* pointer to last entry */
xfs_dir2_leaf_t *leaf; /* leaf structure */
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) ||
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
firstlep = &leaf->ents[first];
lastlep = &leaf->ents[last];
- xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf),
+ xfs_trans_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf),
(uint)((char *)lastlep - (char *)leaf + sizeof(*lastlep) - 1));
}
@@ -1232,15 +1258,15 @@ xfs_dir2_leaf_log_ents(
*/
void
xfs_dir2_leaf_log_header(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_dabuf_t *bp) /* leaf buffer */
+ struct xfs_trans *tp,
+ struct xfs_buf *bp)
{
xfs_dir2_leaf_t *leaf; /* leaf structure */
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) ||
leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
- xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf),
+ xfs_trans_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf),
(uint)(sizeof(leaf->hdr) - 1));
}
@@ -1249,18 +1275,18 @@ xfs_dir2_leaf_log_header(
*/
STATIC void
xfs_dir2_leaf_log_tail(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_dabuf_t *bp) /* leaf buffer */
+ struct xfs_trans *tp,
+ struct xfs_buf *bp)
{
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
xfs_mount_t *mp; /* filesystem mount point */
mp = tp->t_mountp;
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
- xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf),
+ xfs_trans_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf),
(uint)(mp->m_dirblksize - 1));
}
@@ -1273,12 +1299,12 @@ int
xfs_dir2_leaf_lookup(
xfs_da_args_t *args) /* operation arguments */
{
- xfs_dabuf_t *dbp; /* data block buffer */
+ struct xfs_buf *dbp; /* data block buffer */
xfs_dir2_data_entry_t *dep; /* data block entry */
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return code */
int index; /* found entry index */
- xfs_dabuf_t *lbp; /* leaf buffer */
+ struct xfs_buf *lbp; /* leaf buffer */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_trans_t *tp; /* transaction pointer */
@@ -1294,7 +1320,7 @@ xfs_dir2_leaf_lookup(
tp = args->trans;
dp = args->dp;
xfs_dir2_leaf_check(dp, lbp);
- leaf = lbp->data;
+ leaf = lbp->b_addr;
/*
* Get to the leaf entry and contained data entry address.
*/
@@ -1303,15 +1329,15 @@ xfs_dir2_leaf_lookup(
* Point to the data entry.
*/
dep = (xfs_dir2_data_entry_t *)
- ((char *)dbp->data +
+ ((char *)dbp->b_addr +
xfs_dir2_dataptr_to_off(dp->i_mount, be32_to_cpu(lep->address)));
/*
* Return the found inode number & CI name if appropriate
*/
args->inumber = be64_to_cpu(dep->inumber);
error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
- xfs_da_brelse(tp, dbp);
- xfs_da_brelse(tp, lbp);
+ xfs_trans_brelse(tp, dbp);
+ xfs_trans_brelse(tp, lbp);
return XFS_ERROR(error);
}
@@ -1324,17 +1350,17 @@ xfs_dir2_leaf_lookup(
static int /* error */
xfs_dir2_leaf_lookup_int(
xfs_da_args_t *args, /* operation arguments */
- xfs_dabuf_t **lbpp, /* out: leaf buffer */
+ struct xfs_buf **lbpp, /* out: leaf buffer */
int *indexp, /* out: index in leaf block */
- xfs_dabuf_t **dbpp) /* out: data buffer */
+ struct xfs_buf **dbpp) /* out: data buffer */
{
xfs_dir2_db_t curdb = -1; /* current data block number */
- xfs_dabuf_t *dbp = NULL; /* data buffer */
+ struct xfs_buf *dbp = NULL; /* data buffer */
xfs_dir2_data_entry_t *dep; /* data entry */
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return code */
int index; /* index in leaf block */
- xfs_dabuf_t *lbp; /* leaf buffer */
+ struct xfs_buf *lbp; /* leaf buffer */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_mount_t *mp; /* filesystem mount point */
@@ -1354,7 +1380,7 @@ xfs_dir2_leaf_lookup_int(
if (error)
return error;
*lbpp = lbp;
- leaf = lbp->data;
+ leaf = lbp->b_addr;
xfs_dir2_leaf_check(dp, lbp);
/*
* Look for the first leaf entry with our hash value.
@@ -1382,12 +1408,12 @@ xfs_dir2_leaf_lookup_int(
*/
if (newdb != curdb) {
if (dbp)
- xfs_da_brelse(tp, dbp);
+ xfs_trans_brelse(tp, dbp);
error = xfs_da_read_buf(tp, dp,
xfs_dir2_db_to_da(mp, newdb),
-1, &dbp, XFS_DATA_FORK);
if (error) {
- xfs_da_brelse(tp, lbp);
+ xfs_trans_brelse(tp, lbp);
return error;
}
xfs_dir2_data_check(dp, dbp);
@@ -1396,7 +1422,7 @@ xfs_dir2_leaf_lookup_int(
/*
* Point to the data entry.
*/
- dep = (xfs_dir2_data_entry_t *)((char *)dbp->data +
+ dep = (xfs_dir2_data_entry_t *)((char *)dbp->b_addr +
xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
/*
* Compare name and if it's an exact match, return the index
@@ -1424,12 +1450,12 @@ xfs_dir2_leaf_lookup_int(
if (args->cmpresult == XFS_CMP_CASE) {
ASSERT(cidb != -1);
if (cidb != curdb) {
- xfs_da_brelse(tp, dbp);
+ xfs_trans_brelse(tp, dbp);
error = xfs_da_read_buf(tp, dp,
xfs_dir2_db_to_da(mp, cidb),
-1, &dbp, XFS_DATA_FORK);
if (error) {
- xfs_da_brelse(tp, lbp);
+ xfs_trans_brelse(tp, lbp);
return error;
}
}
@@ -1441,8 +1467,8 @@ xfs_dir2_leaf_lookup_int(
*/
ASSERT(cidb == -1);
if (dbp)
- xfs_da_brelse(tp, dbp);
- xfs_da_brelse(tp, lbp);
+ xfs_trans_brelse(tp, dbp);
+ xfs_trans_brelse(tp, lbp);
return XFS_ERROR(ENOENT);
}
@@ -1456,13 +1482,13 @@ xfs_dir2_leaf_removename(
__be16 *bestsp; /* leaf block best freespace */
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_db_t db; /* data block number */
- xfs_dabuf_t *dbp; /* data block buffer */
+ struct xfs_buf *dbp; /* data block buffer */
xfs_dir2_data_entry_t *dep; /* data entry structure */
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return code */
xfs_dir2_db_t i; /* temporary data block # */
int index; /* index into leaf entries */
- xfs_dabuf_t *lbp; /* leaf buffer */
+ struct xfs_buf *lbp; /* leaf buffer */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
@@ -1483,8 +1509,8 @@ xfs_dir2_leaf_removename(
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
- leaf = lbp->data;
- hdr = dbp->data;
+ leaf = lbp->b_addr;
+ hdr = dbp->b_addr;
xfs_dir2_data_check(dp, dbp);
/*
* Point to the leaf entry, use that to point to the data entry.
@@ -1541,12 +1567,9 @@ xfs_dir2_leaf_removename(
* Just go on, returning success, leaving the
* empty block in place.
*/
- if (error == ENOSPC && args->total == 0) {
- xfs_da_buf_done(dbp);
+ if (error == ENOSPC && args->total == 0)
error = 0;
- }
xfs_dir2_leaf_check(dp, lbp);
- xfs_da_buf_done(lbp);
return error;
}
dbp = NULL;
@@ -1577,10 +1600,9 @@ xfs_dir2_leaf_removename(
/*
* If the data block was not the first one, drop it.
*/
- else if (db != mp->m_dirdatablk && dbp != NULL) {
- xfs_da_buf_done(dbp);
+ else if (db != mp->m_dirdatablk)
dbp = NULL;
- }
+
xfs_dir2_leaf_check(dp, lbp);
/*
* See if we can convert to block form.
@@ -1595,12 +1617,12 @@ int /* error */
xfs_dir2_leaf_replace(
xfs_da_args_t *args) /* operation arguments */
{
- xfs_dabuf_t *dbp; /* data block buffer */
+ struct xfs_buf *dbp; /* data block buffer */
xfs_dir2_data_entry_t *dep; /* data block entry */
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return code */
int index; /* index of leaf entry */
- xfs_dabuf_t *lbp; /* leaf buffer */
+ struct xfs_buf *lbp; /* leaf buffer */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_trans_t *tp; /* transaction pointer */
@@ -1614,7 +1636,7 @@ xfs_dir2_leaf_replace(
return error;
}
dp = args->dp;
- leaf = lbp->data;
+ leaf = lbp->b_addr;
/*
* Point to the leaf entry, get data address from it.
*/
@@ -1623,7 +1645,7 @@ xfs_dir2_leaf_replace(
* Point to the data entry.
*/
dep = (xfs_dir2_data_entry_t *)
- ((char *)dbp->data +
+ ((char *)dbp->b_addr +
xfs_dir2_dataptr_to_off(dp->i_mount, be32_to_cpu(lep->address)));
ASSERT(args->inumber != be64_to_cpu(dep->inumber));
/*
@@ -1632,9 +1654,8 @@ xfs_dir2_leaf_replace(
dep->inumber = cpu_to_be64(args->inumber);
tp = args->trans;
xfs_dir2_data_log_entry(tp, dbp, dep);
- xfs_da_buf_done(dbp);
xfs_dir2_leaf_check(dp, lbp);
- xfs_da_brelse(tp, lbp);
+ xfs_trans_brelse(tp, lbp);
return 0;
}
@@ -1646,7 +1667,7 @@ xfs_dir2_leaf_replace(
int /* index value */
xfs_dir2_leaf_search_hash(
xfs_da_args_t *args, /* operation arguments */
- xfs_dabuf_t *lbp) /* leaf buffer */
+ struct xfs_buf *lbp) /* leaf buffer */
{
xfs_dahash_t hash=0; /* hash from this entry */
xfs_dahash_t hashwant; /* hash value looking for */
@@ -1656,7 +1677,7 @@ xfs_dir2_leaf_search_hash(
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
int mid=0; /* current leaf index */
- leaf = lbp->data;
+ leaf = lbp->b_addr;
#ifndef __KERNEL__
if (!leaf->hdr.count)
return 0;
@@ -1699,11 +1720,11 @@ xfs_dir2_leaf_search_hash(
int /* error */
xfs_dir2_leaf_trim_data(
xfs_da_args_t *args, /* operation arguments */
- xfs_dabuf_t *lbp, /* leaf buffer */
+ struct xfs_buf *lbp, /* leaf buffer */
xfs_dir2_db_t db) /* data block number */
{
__be16 *bestsp; /* leaf bests table */
- xfs_dabuf_t *dbp; /* data block buffer */
+ struct xfs_buf *dbp; /* data block buffer */
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return value */
xfs_dir2_leaf_t *leaf; /* leaf structure */
@@ -1722,12 +1743,12 @@ xfs_dir2_leaf_trim_data(
return error;
}
- leaf = lbp->data;
+ leaf = lbp->b_addr;
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
#ifdef DEBUG
{
- struct xfs_dir2_data_hdr *hdr = dbp->data;
+ struct xfs_dir2_data_hdr *hdr = dbp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
ASSERT(be16_to_cpu(hdr->bestfree[0].length) ==
@@ -1741,7 +1762,7 @@ xfs_dir2_leaf_trim_data(
*/
if ((error = xfs_dir2_shrink_inode(args, db, dbp))) {
ASSERT(error != ENOSPC);
- xfs_da_brelse(tp, dbp);
+ xfs_trans_brelse(tp, dbp);
return error;
}
/*
@@ -1781,10 +1802,10 @@ xfs_dir2_node_to_leaf(
xfs_da_args_t *args; /* operation arguments */
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return code */
- xfs_dabuf_t *fbp; /* buffer for freespace block */
+ struct xfs_buf *fbp; /* buffer for freespace block */
xfs_fileoff_t fo; /* freespace file offset */
xfs_dir2_free_t *free; /* freespace structure */
- xfs_dabuf_t *lbp; /* buffer for leaf block */
+ struct xfs_buf *lbp; /* buffer for leaf block */
xfs_dir2_leaf_tail_t *ltp; /* tail of leaf structure */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_mount_t *mp; /* filesystem mount point */
@@ -1838,7 +1859,7 @@ xfs_dir2_node_to_leaf(
if (XFS_FSB_TO_B(mp, fo) > XFS_DIR2_LEAF_OFFSET + mp->m_dirblksize)
return 0;
lbp = state->path.blk[0].bp;
- leaf = lbp->data;
+ leaf = lbp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
/*
* Read the freespace block.
@@ -1847,7 +1868,7 @@ xfs_dir2_node_to_leaf(
XFS_DATA_FORK))) {
return error;
}
- free = fbp->data;
+ free = fbp->b_addr;
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
ASSERT(!free->hdr.firstdb);
@@ -1857,7 +1878,7 @@ xfs_dir2_node_to_leaf(
*/
if (xfs_dir2_leaf_size(&leaf->hdr, be32_to_cpu(free->hdr.nvalid)) >
mp->m_dirblksize) {
- xfs_da_brelse(tp, fbp);
+ xfs_trans_brelse(tp, fbp);
return 0;
}
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index b0f26780449d..6c7052406605 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -36,20 +36,20 @@
/*
* Function declarations.
*/
-static void xfs_dir2_free_log_header(xfs_trans_t *tp, xfs_dabuf_t *bp);
-static int xfs_dir2_leafn_add(xfs_dabuf_t *bp, xfs_da_args_t *args, int index);
+static int xfs_dir2_leafn_add(struct xfs_buf *bp, xfs_da_args_t *args,
+ int index);
#ifdef DEBUG
-static void xfs_dir2_leafn_check(xfs_inode_t *dp, xfs_dabuf_t *bp);
+static void xfs_dir2_leafn_check(struct xfs_inode *dp, struct xfs_buf *bp);
#else
#define xfs_dir2_leafn_check(dp, bp)
#endif
-static void xfs_dir2_leafn_moveents(xfs_da_args_t *args, xfs_dabuf_t *bp_s,
- int start_s, xfs_dabuf_t *bp_d, int start_d,
- int count);
+static void xfs_dir2_leafn_moveents(xfs_da_args_t *args, struct xfs_buf *bp_s,
+ int start_s, struct xfs_buf *bp_d,
+ int start_d, int count);
static void xfs_dir2_leafn_rebalance(xfs_da_state_t *state,
xfs_da_state_blk_t *blk1,
xfs_da_state_blk_t *blk2);
-static int xfs_dir2_leafn_remove(xfs_da_args_t *args, xfs_dabuf_t *bp,
+static int xfs_dir2_leafn_remove(xfs_da_args_t *args, struct xfs_buf *bp,
int index, xfs_da_state_blk_t *dblk,
int *rval);
static int xfs_dir2_node_addname_int(xfs_da_args_t *args,
@@ -60,16 +60,16 @@ static int xfs_dir2_node_addname_int(xfs_da_args_t *args,
*/
STATIC void
xfs_dir2_free_log_bests(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_dabuf_t *bp, /* freespace buffer */
+ struct xfs_trans *tp,
+ struct xfs_buf *bp,
int first, /* first entry to log */
int last) /* last entry to log */
{
xfs_dir2_free_t *free; /* freespace structure */
- free = bp->data;
+ free = bp->b_addr;
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
- xfs_da_log_buf(tp, bp,
+ xfs_trans_log_buf(tp, bp,
(uint)((char *)&free->bests[first] - (char *)free),
(uint)((char *)&free->bests[last] - (char *)free +
sizeof(free->bests[0]) - 1));
@@ -80,14 +80,14 @@ xfs_dir2_free_log_bests(
*/
static void
xfs_dir2_free_log_header(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_dabuf_t *bp) /* freespace buffer */
+ struct xfs_trans *tp,
+ struct xfs_buf *bp)
{
xfs_dir2_free_t *free; /* freespace structure */
- free = bp->data;
+ free = bp->b_addr;
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
- xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free),
+ xfs_trans_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free),
(uint)(sizeof(xfs_dir2_free_hdr_t) - 1));
}
@@ -99,11 +99,11 @@ xfs_dir2_free_log_header(
int /* error */
xfs_dir2_leaf_to_node(
xfs_da_args_t *args, /* operation arguments */
- xfs_dabuf_t *lbp) /* leaf buffer */
+ struct xfs_buf *lbp) /* leaf buffer */
{
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return value */
- xfs_dabuf_t *fbp; /* freespace buffer */
+ struct xfs_buf *fbp; /* freespace buffer */
xfs_dir2_db_t fdb; /* freespace block number */
xfs_dir2_free_t *free; /* freespace structure */
__be16 *from; /* pointer to freespace entry */
@@ -136,8 +136,8 @@ xfs_dir2_leaf_to_node(
return error;
}
ASSERT(fbp != NULL);
- free = fbp->data;
- leaf = lbp->data;
+ free = fbp->b_addr;
+ leaf = lbp->b_addr;
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
/*
* Initialize the freespace block header.
@@ -164,7 +164,6 @@ xfs_dir2_leaf_to_node(
xfs_dir2_leaf_log_header(tp, lbp);
xfs_dir2_free_log_header(tp, fbp);
xfs_dir2_free_log_bests(tp, fbp, 0, be32_to_cpu(free->hdr.nvalid) - 1);
- xfs_da_buf_done(fbp);
xfs_dir2_leafn_check(dp, lbp);
return 0;
}
@@ -175,7 +174,7 @@ xfs_dir2_leaf_to_node(
*/
static int /* error */
xfs_dir2_leafn_add(
- xfs_dabuf_t *bp, /* leaf buffer */
+ struct xfs_buf *bp, /* leaf buffer */
xfs_da_args_t *args, /* operation arguments */
int index) /* insertion pt for new entry */
{
@@ -195,7 +194,7 @@ xfs_dir2_leafn_add(
dp = args->dp;
mp = dp->i_mount;
tp = args->trans;
- leaf = bp->data;
+ leaf = bp->b_addr;
/*
* Quick check just to make sure we are not going to index
@@ -261,15 +260,15 @@ xfs_dir2_leafn_add(
*/
void
xfs_dir2_leafn_check(
- xfs_inode_t *dp, /* incore directory inode */
- xfs_dabuf_t *bp) /* leaf buffer */
+ struct xfs_inode *dp,
+ struct xfs_buf *bp)
{
int i; /* leaf index */
xfs_dir2_leaf_t *leaf; /* leaf structure */
xfs_mount_t *mp; /* filesystem mount point */
int stale; /* count of stale leaves */
- leaf = bp->data;
+ leaf = bp->b_addr;
mp = dp->i_mount;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
ASSERT(be16_to_cpu(leaf->hdr.count) <= xfs_dir2_max_leaf_ents(mp));
@@ -291,12 +290,12 @@ xfs_dir2_leafn_check(
*/
xfs_dahash_t /* hash value */
xfs_dir2_leafn_lasthash(
- xfs_dabuf_t *bp, /* leaf buffer */
+ struct xfs_buf *bp, /* leaf buffer */
int *count) /* count of entries in leaf */
{
xfs_dir2_leaf_t *leaf; /* leaf structure */
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
if (count)
*count = be16_to_cpu(leaf->hdr.count);
@@ -311,12 +310,12 @@ xfs_dir2_leafn_lasthash(
*/
STATIC int
xfs_dir2_leafn_lookup_for_addname(
- xfs_dabuf_t *bp, /* leaf buffer */
+ struct xfs_buf *bp, /* leaf buffer */
xfs_da_args_t *args, /* operation arguments */
int *indexp, /* out: leaf entry index */
xfs_da_state_t *state) /* state to fill in */
{
- xfs_dabuf_t *curbp = NULL; /* current data/free buffer */
+ struct xfs_buf *curbp = NULL; /* current data/free buffer */
xfs_dir2_db_t curdb = -1; /* current data block number */
xfs_dir2_db_t curfdb = -1; /* current free block number */
xfs_inode_t *dp; /* incore directory inode */
@@ -335,7 +334,7 @@ xfs_dir2_leafn_lookup_for_addname(
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
#ifdef __KERNEL__
ASSERT(be16_to_cpu(leaf->hdr.count) > 0);
@@ -352,7 +351,7 @@ xfs_dir2_leafn_lookup_for_addname(
/* If so, it's a free block buffer, get the block number. */
curbp = state->extrablk.bp;
curfdb = state->extrablk.blkno;
- free = curbp->data;
+ free = curbp->b_addr;
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
}
length = xfs_dir2_data_entsize(args->namelen);
@@ -394,7 +393,7 @@ xfs_dir2_leafn_lookup_for_addname(
* If we had one before, drop it.
*/
if (curbp)
- xfs_da_brelse(tp, curbp);
+ xfs_trans_brelse(tp, curbp);
/*
* Read the free block.
*/
@@ -403,7 +402,7 @@ xfs_dir2_leafn_lookup_for_addname(
-1, &curbp, XFS_DATA_FORK);
if (error)
return error;
- free = curbp->data;
+ free = curbp->b_addr;
ASSERT(be32_to_cpu(free->hdr.magic) ==
XFS_DIR2_FREE_MAGIC);
ASSERT((be32_to_cpu(free->hdr.firstdb) %
@@ -424,7 +423,7 @@ xfs_dir2_leafn_lookup_for_addname(
XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
XFS_ERRLEVEL_LOW, mp);
if (curfdb != newfdb)
- xfs_da_brelse(tp, curbp);
+ xfs_trans_brelse(tp, curbp);
return XFS_ERROR(EFSCORRUPTED);
}
curfdb = newfdb;
@@ -459,12 +458,12 @@ out:
*/
STATIC int
xfs_dir2_leafn_lookup_for_entry(
- xfs_dabuf_t *bp, /* leaf buffer */
+ struct xfs_buf *bp, /* leaf buffer */
xfs_da_args_t *args, /* operation arguments */
int *indexp, /* out: leaf entry index */
xfs_da_state_t *state) /* state to fill in */
{
- xfs_dabuf_t *curbp = NULL; /* current data/free buffer */
+ struct xfs_buf *curbp = NULL; /* current data/free buffer */
xfs_dir2_db_t curdb = -1; /* current data block number */
xfs_dir2_data_entry_t *dep; /* data block entry */
xfs_inode_t *dp; /* incore directory inode */
@@ -480,7 +479,7 @@ xfs_dir2_leafn_lookup_for_entry(
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
#ifdef __KERNEL__
ASSERT(be16_to_cpu(leaf->hdr.count) > 0);
@@ -525,7 +524,7 @@ xfs_dir2_leafn_lookup_for_entry(
*/
if (curbp && (args->cmpresult == XFS_CMP_DIFFERENT ||
curdb != state->extrablk.blkno))
- xfs_da_brelse(tp, curbp);
+ xfs_trans_brelse(tp, curbp);
/*
* If needing the block that is saved with a CI match,
* use it otherwise read in the new data block.
@@ -547,7 +546,7 @@ xfs_dir2_leafn_lookup_for_entry(
/*
* Point to the data entry.
*/
- dep = (xfs_dir2_data_entry_t *)((char *)curbp->data +
+ dep = (xfs_dir2_data_entry_t *)((char *)curbp->b_addr +
xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
/*
* Compare the entry and if it's an exact match, return
@@ -559,7 +558,7 @@ xfs_dir2_leafn_lookup_for_entry(
/* If there is a CI match block, drop it */
if (args->cmpresult != XFS_CMP_DIFFERENT &&
curdb != state->extrablk.blkno)
- xfs_da_brelse(tp, state->extrablk.bp);
+ xfs_trans_brelse(tp, state->extrablk.bp);
args->cmpresult = cmp;
args->inumber = be64_to_cpu(dep->inumber);
*indexp = index;
@@ -567,7 +566,7 @@ xfs_dir2_leafn_lookup_for_entry(
state->extrablk.bp = curbp;
state->extrablk.blkno = curdb;
state->extrablk.index = (int)((char *)dep -
- (char *)curbp->data);
+ (char *)curbp->b_addr);
state->extrablk.magic = XFS_DIR2_DATA_MAGIC;
if (cmp == XFS_CMP_EXACT)
return XFS_ERROR(EEXIST);
@@ -586,7 +585,7 @@ xfs_dir2_leafn_lookup_for_entry(
} else {
/* If the curbp is not the CI match block, drop it */
if (state->extrablk.bp != curbp)
- xfs_da_brelse(tp, curbp);
+ xfs_trans_brelse(tp, curbp);
}
} else {
state->extravalid = 0;
@@ -602,7 +601,7 @@ xfs_dir2_leafn_lookup_for_entry(
*/
int
xfs_dir2_leafn_lookup_int(
- xfs_dabuf_t *bp, /* leaf buffer */
+ struct xfs_buf *bp, /* leaf buffer */
xfs_da_args_t *args, /* operation arguments */
int *indexp, /* out: leaf entry index */
xfs_da_state_t *state) /* state to fill in */
@@ -620,9 +619,9 @@ xfs_dir2_leafn_lookup_int(
static void
xfs_dir2_leafn_moveents(
xfs_da_args_t *args, /* operation arguments */
- xfs_dabuf_t *bp_s, /* source leaf buffer */
+ struct xfs_buf *bp_s, /* source leaf buffer */
int start_s, /* source leaf index */
- xfs_dabuf_t *bp_d, /* destination leaf buffer */
+ struct xfs_buf *bp_d, /* destination leaf buffer */
int start_d, /* destination leaf index */
int count) /* count of leaves to copy */
{
@@ -640,8 +639,8 @@ xfs_dir2_leafn_moveents(
return;
}
tp = args->trans;
- leaf_s = bp_s->data;
- leaf_d = bp_d->data;
+ leaf_s = bp_s->b_addr;
+ leaf_d = bp_d->b_addr;
/*
* If the destination index is not the end of the current
* destination leaf entries, open up a hole in the destination
@@ -702,14 +701,14 @@ xfs_dir2_leafn_moveents(
*/
int /* sort order */
xfs_dir2_leafn_order(
- xfs_dabuf_t *leaf1_bp, /* leaf1 buffer */
- xfs_dabuf_t *leaf2_bp) /* leaf2 buffer */
+ struct xfs_buf *leaf1_bp, /* leaf1 buffer */
+ struct xfs_buf *leaf2_bp) /* leaf2 buffer */
{
xfs_dir2_leaf_t *leaf1; /* leaf1 structure */
xfs_dir2_leaf_t *leaf2; /* leaf2 structure */
- leaf1 = leaf1_bp->data;
- leaf2 = leaf2_bp->data;
+ leaf1 = leaf1_bp->b_addr;
+ leaf2 = leaf2_bp->b_addr;
ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
if (be16_to_cpu(leaf1->hdr.count) > 0 &&
@@ -757,8 +756,8 @@ xfs_dir2_leafn_rebalance(
blk1 = blk2;
blk2 = tmp;
}
- leaf1 = blk1->bp->data;
- leaf2 = blk2->bp->data;
+ leaf1 = blk1->bp->b_addr;
+ leaf2 = blk2->bp->b_addr;
oldsum = be16_to_cpu(leaf1->hdr.count) + be16_to_cpu(leaf2->hdr.count);
#ifdef DEBUG
oldstale = be16_to_cpu(leaf1->hdr.stale) + be16_to_cpu(leaf2->hdr.stale);
@@ -834,14 +833,14 @@ xfs_dir2_leafn_rebalance(
static int /* error */
xfs_dir2_leafn_remove(
xfs_da_args_t *args, /* operation arguments */
- xfs_dabuf_t *bp, /* leaf buffer */
+ struct xfs_buf *bp, /* leaf buffer */
int index, /* leaf entry index */
xfs_da_state_blk_t *dblk, /* data block */
int *rval) /* resulting block needs join */
{
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_db_t db; /* data block number */
- xfs_dabuf_t *dbp; /* data block buffer */
+ struct xfs_buf *dbp; /* data block buffer */
xfs_dir2_data_entry_t *dep; /* data block entry */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_leaf_t *leaf; /* leaf structure */
@@ -858,7 +857,7 @@ xfs_dir2_leafn_remove(
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
/*
* Point to the entry we're removing.
@@ -884,7 +883,7 @@ xfs_dir2_leafn_remove(
* in the data block in case it changes.
*/
dbp = dblk->bp;
- hdr = dbp->data;
+ hdr = dbp->b_addr;
dep = (xfs_dir2_data_entry_t *)((char *)hdr + off);
longest = be16_to_cpu(hdr->bestfree[0].length);
needlog = needscan = 0;
@@ -905,7 +904,7 @@ xfs_dir2_leafn_remove(
*/
if (longest < be16_to_cpu(hdr->bestfree[0].length)) {
int error; /* error return value */
- xfs_dabuf_t *fbp; /* freeblock buffer */
+ struct xfs_buf *fbp; /* freeblock buffer */
xfs_dir2_db_t fdb; /* freeblock block number */
int findex; /* index in freeblock entries */
xfs_dir2_free_t *free; /* freeblock structure */
@@ -920,7 +919,7 @@ xfs_dir2_leafn_remove(
-1, &fbp, XFS_DATA_FORK))) {
return error;
}
- free = fbp->data;
+ free = fbp->b_addr;
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
ASSERT(be32_to_cpu(free->hdr.firstdb) ==
xfs_dir2_free_max_bests(mp) *
@@ -948,9 +947,7 @@ xfs_dir2_leafn_remove(
* In this case just drop the buffer and some one else
* will eventually get rid of the empty block.
*/
- else if (error == ENOSPC && args->total == 0)
- xfs_da_buf_done(dbp);
- else
+ else if (!(error == ENOSPC && args->total == 0))
return error;
}
/*
@@ -1018,11 +1015,6 @@ xfs_dir2_leafn_remove(
*/
if (logfree)
xfs_dir2_free_log_bests(tp, fbp, findex, findex);
- /*
- * Drop the buffer if we still have it.
- */
- if (fbp)
- xfs_da_buf_done(fbp);
}
xfs_dir2_leafn_check(dp, bp);
/*
@@ -1114,7 +1106,7 @@ xfs_dir2_leafn_toosmall(
{
xfs_da_state_blk_t *blk; /* leaf block */
xfs_dablk_t blkno; /* leaf block number */
- xfs_dabuf_t *bp; /* leaf buffer */
+ struct xfs_buf *bp; /* leaf buffer */
int bytes; /* bytes in use */
int count; /* leaf live entry count */
int error; /* error return value */
@@ -1130,7 +1122,7 @@ xfs_dir2_leafn_toosmall(
* to coalesce with a sibling.
*/
blk = &state->path.blk[state->path.active - 1];
- info = blk->bp->data;
+ info = blk->bp->b_addr;
ASSERT(info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
leaf = (xfs_dir2_leaf_t *)info;
count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
@@ -1189,7 +1181,7 @@ xfs_dir2_leafn_toosmall(
leaf = (xfs_dir2_leaf_t *)info;
count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
bytes = state->blocksize - (state->blocksize >> 2);
- leaf = bp->data;
+ leaf = bp->b_addr;
ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
count += be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
bytes -= count * (uint)sizeof(leaf->ents[0]);
@@ -1198,7 +1190,7 @@ xfs_dir2_leafn_toosmall(
*/
if (bytes >= 0)
break;
- xfs_da_brelse(state->args->trans, bp);
+ xfs_trans_brelse(state->args->trans, bp);
}
/*
* Didn't like either block, give up.
@@ -1207,11 +1199,7 @@ xfs_dir2_leafn_toosmall(
*action = 0;
return 0;
}
- /*
- * Done with the sibling leaf block here, drop the dabuf
- * so path_shift can get it.
- */
- xfs_da_buf_done(bp);
+
/*
* Make altpath point to the block we want to keep (the lower
* numbered block) and path point to the block we want to drop.
@@ -1247,8 +1235,8 @@ xfs_dir2_leafn_unbalance(
args = state->args;
ASSERT(drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC);
- drop_leaf = drop_blk->bp->data;
- save_leaf = save_blk->bp->data;
+ drop_leaf = drop_blk->bp->b_addr;
+ save_leaf = save_blk->bp->b_addr;
ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
/*
@@ -1356,13 +1344,13 @@ xfs_dir2_node_addname_int(
{
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_db_t dbno; /* data block number */
- xfs_dabuf_t *dbp; /* data block buffer */
+ struct xfs_buf *dbp; /* data block buffer */
xfs_dir2_data_entry_t *dep; /* data entry pointer */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_data_unused_t *dup; /* data unused entry pointer */
int error; /* error return value */
xfs_dir2_db_t fbno; /* freespace block number */
- xfs_dabuf_t *fbp; /* freespace buffer */
+ struct xfs_buf *fbp; /* freespace buffer */
int findex; /* freespace entry index */
xfs_dir2_free_t *free=NULL; /* freespace block structure */
xfs_dir2_db_t ifbno; /* initial freespace block no */
@@ -1390,7 +1378,7 @@ xfs_dir2_node_addname_int(
* Remember initial freespace block number.
*/
ifbno = fblk->blkno;
- free = fbp->data;
+ free = fbp->b_addr;
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
findex = fblk->index;
/*
@@ -1474,7 +1462,7 @@ xfs_dir2_node_addname_int(
if (unlikely(fbp == NULL)) {
continue;
}
- free = fbp->data;
+ free = fbp->b_addr;
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
findex = 0;
}
@@ -1492,7 +1480,7 @@ xfs_dir2_node_addname_int(
/*
* Drop the block.
*/
- xfs_da_brelse(tp, fbp);
+ xfs_trans_brelse(tp, fbp);
fbp = NULL;
if (fblk && fblk->bp)
fblk->bp = NULL;
@@ -1507,36 +1495,23 @@ xfs_dir2_node_addname_int(
/*
* Not allowed to allocate, return failure.
*/
- if ((args->op_flags & XFS_DA_OP_JUSTCHECK) ||
- args->total == 0) {
- /*
- * Drop the freespace buffer unless it came from our
- * caller.
- */
- if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
- xfs_da_buf_done(fbp);
+ if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0)
return XFS_ERROR(ENOSPC);
- }
+
/*
* Allocate and initialize the new data block.
*/
if (unlikely((error = xfs_dir2_grow_inode(args,
XFS_DIR2_DATA_SPACE,
&dbno)) ||
- (error = xfs_dir2_data_init(args, dbno, &dbp)))) {
- /*
- * Drop the freespace buffer unless it came from our
- * caller.
- */
- if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
- xfs_da_buf_done(fbp);
+ (error = xfs_dir2_data_init(args, dbno, &dbp))))
return error;
- }
+
/*
* If (somehow) we have a freespace block, get rid of it.
*/
if (fbp)
- xfs_da_brelse(tp, fbp);
+ xfs_trans_brelse(tp, fbp);
if (fblk && fblk->bp)
fblk->bp = NULL;
@@ -1547,10 +1522,9 @@ xfs_dir2_node_addname_int(
fbno = xfs_dir2_db_to_fdb(mp, dbno);
if (unlikely(error = xfs_da_read_buf(tp, dp,
xfs_dir2_db_to_da(mp, fbno), -2, &fbp,
- XFS_DATA_FORK))) {
- xfs_da_buf_done(dbp);
+ XFS_DATA_FORK)))
return error;
- }
+
/*
* If there wasn't a freespace block, the read will
* return a NULL fbp. Allocate and initialize a new one.
@@ -1598,7 +1572,7 @@ xfs_dir2_node_addname_int(
* Initialize the new block to be empty, and remember
* its first slot as our empty slot.
*/
- free = fbp->data;
+ free = fbp->b_addr;
free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC);
free->hdr.firstdb = cpu_to_be32(
(fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
@@ -1606,7 +1580,7 @@ xfs_dir2_node_addname_int(
free->hdr.nvalid = 0;
free->hdr.nused = 0;
} else {
- free = fbp->data;
+ free = fbp->b_addr;
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
}
@@ -1639,7 +1613,7 @@ xfs_dir2_node_addname_int(
* We haven't allocated the data entry yet so this will
* change again.
*/
- hdr = dbp->data;
+ hdr = dbp->b_addr;
free->bests[findex] = hdr->bestfree[0].length;
logfree = 1;
}
@@ -1650,22 +1624,17 @@ xfs_dir2_node_addname_int(
/*
* If just checking, we succeeded.
*/
- if (args->op_flags & XFS_DA_OP_JUSTCHECK) {
- if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
- xfs_da_buf_done(fbp);
+ if (args->op_flags & XFS_DA_OP_JUSTCHECK)
return 0;
- }
+
/*
* Read the data block in.
*/
- if (unlikely(
- error = xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, dbno),
- -1, &dbp, XFS_DATA_FORK))) {
- if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
- xfs_da_buf_done(fbp);
+ error = xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, dbno),
+ -1, &dbp, XFS_DATA_FORK);
+ if (error)
return error;
- }
- hdr = dbp->data;
+ hdr = dbp->b_addr;
logfree = 0;
}
ASSERT(be16_to_cpu(hdr->bestfree[0].length) >= length);
@@ -1714,16 +1683,10 @@ xfs_dir2_node_addname_int(
if (logfree)
xfs_dir2_free_log_bests(tp, fbp, findex, findex);
/*
- * If the caller didn't hand us the freespace block, drop it.
- */
- if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
- xfs_da_buf_done(fbp);
- /*
* Return the data block and offset in args, then drop the data block.
*/
args->blkno = (xfs_dablk_t)dbno;
args->index = be16_to_cpu(*tagp);
- xfs_da_buf_done(dbp);
return 0;
}
@@ -1761,22 +1724,23 @@ xfs_dir2_node_lookup(
/* If a CI match, dup the actual name and return EEXIST */
xfs_dir2_data_entry_t *dep;
- dep = (xfs_dir2_data_entry_t *)((char *)state->extrablk.bp->
- data + state->extrablk.index);
+ dep = (xfs_dir2_data_entry_t *)
+ ((char *)state->extrablk.bp->b_addr +
+ state->extrablk.index);
rval = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
}
/*
* Release the btree blocks and leaf block.
*/
for (i = 0; i < state->path.active; i++) {
- xfs_da_brelse(args->trans, state->path.blk[i].bp);
+ xfs_trans_brelse(args->trans, state->path.blk[i].bp);
state->path.blk[i].bp = NULL;
}
/*
* Release the data block if we have it.
*/
if (state->extravalid && state->extrablk.bp) {
- xfs_da_brelse(args->trans, state->extrablk.bp);
+ xfs_trans_brelse(args->trans, state->extrablk.bp);
state->extrablk.bp = NULL;
}
xfs_da_state_free(state);
@@ -1893,13 +1857,13 @@ xfs_dir2_node_replace(
*/
blk = &state->path.blk[state->path.active - 1];
ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
- leaf = blk->bp->data;
+ leaf = blk->bp->b_addr;
lep = &leaf->ents[blk->index];
ASSERT(state->extravalid);
/*
* Point to the data entry.
*/
- hdr = state->extrablk.bp->data;
+ hdr = state->extrablk.bp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
dep = (xfs_dir2_data_entry_t *)
((char *)hdr +
@@ -1916,14 +1880,14 @@ xfs_dir2_node_replace(
* Didn't find it, and we're holding a data block. Drop it.
*/
else if (state->extravalid) {
- xfs_da_brelse(args->trans, state->extrablk.bp);
+ xfs_trans_brelse(args->trans, state->extrablk.bp);
state->extrablk.bp = NULL;
}
/*
* Release all the buffers in the cursor.
*/
for (i = 0; i < state->path.active; i++) {
- xfs_da_brelse(args->trans, state->path.blk[i].bp);
+ xfs_trans_brelse(args->trans, state->path.blk[i].bp);
state->path.blk[i].bp = NULL;
}
xfs_da_state_free(state);
@@ -1940,7 +1904,7 @@ xfs_dir2_node_trim_free(
xfs_fileoff_t fo, /* free block number */
int *rvalp) /* out: did something */
{
- xfs_dabuf_t *bp; /* freespace buffer */
+ struct xfs_buf *bp; /* freespace buffer */
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return code */
xfs_dir2_free_t *free; /* freespace structure */
@@ -1965,13 +1929,13 @@ xfs_dir2_node_trim_free(
if (bp == NULL) {
return 0;
}
- free = bp->data;
+ free = bp->b_addr;
ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
/*
* If there are used entries, there's nothing to do.
*/
if (be32_to_cpu(free->hdr.nused) > 0) {
- xfs_da_brelse(tp, bp);
+ xfs_trans_brelse(tp, bp);
*rvalp = 0;
return 0;
}
@@ -1987,7 +1951,7 @@ xfs_dir2_node_trim_free(
* pieces. This is the last block of an extent.
*/
ASSERT(error != ENOSPC);
- xfs_da_brelse(tp, bp);
+ xfs_trans_brelse(tp, bp);
return error;
}
/*
diff --git a/fs/xfs/xfs_dir2_priv.h b/fs/xfs/xfs_dir2_priv.h
index 067f403ecf8a..3523d3e15aa8 100644
--- a/fs/xfs/xfs_dir2_priv.h
+++ b/fs/xfs/xfs_dir2_priv.h
@@ -25,7 +25,7 @@ extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
xfs_dir2_db_t *dbp);
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
- struct xfs_dabuf *bp);
+ struct xfs_buf *bp);
extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
const unsigned char *name, int len);
@@ -37,11 +37,11 @@ extern int xfs_dir2_block_lookup(struct xfs_da_args *args);
extern int xfs_dir2_block_removename(struct xfs_da_args *args);
extern int xfs_dir2_block_replace(struct xfs_da_args *args);
extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args,
- struct xfs_dabuf *lbp, struct xfs_dabuf *dbp);
+ struct xfs_buf *lbp, struct xfs_buf *dbp);
/* xfs_dir2_data.c */
#ifdef DEBUG
-extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp);
+extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
#else
#define xfs_dir2_data_check(dp,bp)
#endif
@@ -51,43 +51,43 @@ xfs_dir2_data_freeinsert(struct xfs_dir2_data_hdr *hdr,
extern void xfs_dir2_data_freescan(struct xfs_mount *mp,
struct xfs_dir2_data_hdr *hdr, int *loghead);
extern int xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
- struct xfs_dabuf **bpp);
-extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp,
+ struct xfs_buf **bpp);
+extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_dir2_data_entry *dep);
extern void xfs_dir2_data_log_header(struct xfs_trans *tp,
- struct xfs_dabuf *bp);
-extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_dabuf *bp,
+ struct xfs_buf *bp);
+extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_dir2_data_unused *dup);
-extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
+extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_buf *bp,
xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len,
int *needlogp, int *needscanp);
-extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
+extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_dir2_data_unused *dup, xfs_dir2_data_aoff_t offset,
xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
/* xfs_dir2_leaf.c */
extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
- struct xfs_dabuf *dbp);
+ struct xfs_buf *dbp);
extern int xfs_dir2_leaf_addname(struct xfs_da_args *args);
extern void xfs_dir2_leaf_compact(struct xfs_da_args *args,
- struct xfs_dabuf *bp);
-extern void xfs_dir2_leaf_compact_x1(struct xfs_dabuf *bp, int *indexp,
+ struct xfs_buf *bp);
+extern void xfs_dir2_leaf_compact_x1(struct xfs_buf *bp, int *indexp,
int *lowstalep, int *highstalep, int *lowlogp, int *highlogp);
extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent,
size_t bufsize, xfs_off_t *offset, filldir_t filldir);
extern int xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno,
- struct xfs_dabuf **bpp, int magic);
-extern void xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp,
+ struct xfs_buf **bpp, int magic);
+extern void xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_buf *bp,
int first, int last);
extern void xfs_dir2_leaf_log_header(struct xfs_trans *tp,
- struct xfs_dabuf *bp);
+ struct xfs_buf *bp);
extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args);
extern int xfs_dir2_leaf_removename(struct xfs_da_args *args);
extern int xfs_dir2_leaf_replace(struct xfs_da_args *args);
extern int xfs_dir2_leaf_search_hash(struct xfs_da_args *args,
- struct xfs_dabuf *lbp);
+ struct xfs_buf *lbp);
extern int xfs_dir2_leaf_trim_data(struct xfs_da_args *args,
- struct xfs_dabuf *lbp, xfs_dir2_db_t db);
+ struct xfs_buf *lbp, xfs_dir2_db_t db);
extern struct xfs_dir2_leaf_entry *
xfs_dir2_leaf_find_entry(struct xfs_dir2_leaf *leaf, int index, int compact,
int lowstale, int highstale,
@@ -96,13 +96,13 @@ extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
/* xfs_dir2_node.c */
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
- struct xfs_dabuf *lbp);
-extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count);
-extern int xfs_dir2_leafn_lookup_int(struct xfs_dabuf *bp,
+ struct xfs_buf *lbp);
+extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_buf *bp, int *count);
+extern int xfs_dir2_leafn_lookup_int(struct xfs_buf *bp,
struct xfs_da_args *args, int *indexp,
struct xfs_da_state *state);
-extern int xfs_dir2_leafn_order(struct xfs_dabuf *leaf1_bp,
- struct xfs_dabuf *leaf2_bp);
+extern int xfs_dir2_leafn_order(struct xfs_buf *leaf1_bp,
+ struct xfs_buf *leaf2_bp);
extern int xfs_dir2_leafn_split(struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk, struct xfs_da_state_blk *newblk);
extern int xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action);
@@ -122,7 +122,7 @@ extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf_hdr *sfp,
struct xfs_dir2_sf_entry *sfep);
extern int xfs_dir2_block_sfsize(struct xfs_inode *dp,
struct xfs_dir2_data_hdr *block, struct xfs_dir2_sf_hdr *sfhp);
-extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp,
+extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_buf *bp,
int size, xfs_dir2_sf_hdr_t *sfhp);
extern int xfs_dir2_sf_addname(struct xfs_da_args *args);
extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index 19bf0c5e38f4..1b9fc3ec7e4b 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -222,7 +222,7 @@ xfs_dir2_block_sfsize(
int /* error */
xfs_dir2_block_to_sf(
xfs_da_args_t *args, /* operation arguments */
- xfs_dabuf_t *bp, /* block buffer */
+ struct xfs_buf *bp,
int size, /* shortform directory size */
xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */
{
@@ -249,7 +249,7 @@ xfs_dir2_block_to_sf(
* and add local data.
*/
hdr = kmem_alloc(mp->m_dirblksize, KM_SLEEP);
- memcpy(hdr, bp->data, mp->m_dirblksize);
+ memcpy(hdr, bp->b_addr, mp->m_dirblksize);
logflags = XFS_ILOG_CORE;
if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) {
ASSERT(error != ENOSPC);
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index f9c3fe304a17..69cf4fcde03e 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -179,12 +179,14 @@ xfs_ioc_trim(
* used by the fstrim application. In the end it really doesn't
* matter as trimming blocks is an advisory interface.
*/
+ if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
+ range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)))
+ return -XFS_ERROR(EINVAL);
+
start = BTOBB(range.start);
end = start + BTOBBT(range.len) - 1;
minlen = BTOBB(max_t(u64, granularity, range.minlen));
- if (XFS_BB_TO_FSB(mp, start) >= mp->m_sb.sb_dblocks)
- return -XFS_ERROR(EINVAL);
if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 9f7ec15a6522..56afcdb2377d 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -236,7 +236,6 @@ xfs_file_aio_read(
ssize_t ret = 0;
int ioflags = 0;
xfs_fsize_t n;
- unsigned long seg;
XFS_STATS_INC(xs_read_calls);
@@ -247,19 +246,9 @@ xfs_file_aio_read(
if (file->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
- /* START copy & waste from filemap.c */
- for (seg = 0; seg < nr_segs; seg++) {
- const struct iovec *iv = &iovp[seg];
-
- /*
- * If any segment has a negative length, or the cumulative
- * length ever wraps negative then return -EINVAL.
- */
- size += iv->iov_len;
- if (unlikely((ssize_t)(size|iv->iov_len) < 0))
- return XFS_ERROR(-EINVAL);
- }
- /* END copy & waste from filemap.c */
+ ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE);
+ if (ret < 0)
+ return ret;
if (unlikely(ioflags & IO_ISDIRECT)) {
xfs_buftarg_t *target =
@@ -273,7 +262,7 @@ xfs_file_aio_read(
}
}
- n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
+ n = mp->m_super->s_maxbytes - iocb->ki_pos;
if (n <= 0 || size == 0)
return 0;
@@ -781,10 +770,12 @@ xfs_file_aio_write(
if (ocount == 0)
return 0;
- xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE);
+ sb_start_write(inode->i_sb);
- if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- return -EIO;
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ ret = -EIO;
+ goto out;
+ }
if (unlikely(file->f_flags & O_DIRECT))
ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount);
@@ -803,6 +794,8 @@ xfs_file_aio_write(
ret = err;
}
+out:
+ sb_end_write(inode->i_sb);
return ret;
}
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 177a21a7ac49..5aceb3f8ecd6 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -442,14 +442,13 @@ xfs_ialloc_next_ag(
* Select an allocation group to look for a free inode in, based on the parent
* inode and then mode. Return the allocation group buffer.
*/
-STATIC xfs_buf_t * /* allocation group buffer */
+STATIC xfs_agnumber_t
xfs_ialloc_ag_select(
xfs_trans_t *tp, /* transaction pointer */
xfs_ino_t parent, /* parent directory inode number */
umode_t mode, /* bits set to indicate file type */
int okalloc) /* ok to allocate more space */
{
- xfs_buf_t *agbp; /* allocation group header buffer */
xfs_agnumber_t agcount; /* number of ag's in the filesystem */
xfs_agnumber_t agno; /* current ag number */
int flags; /* alloc buffer locking flags */
@@ -459,6 +458,7 @@ xfs_ialloc_ag_select(
int needspace; /* file mode implies space allocated */
xfs_perag_t *pag; /* per allocation group data */
xfs_agnumber_t pagno; /* parent (starting) ag number */
+ int error;
/*
* Files of these types need at least one block if length > 0
@@ -474,7 +474,9 @@ xfs_ialloc_ag_select(
if (pagno >= agcount)
pagno = 0;
}
+
ASSERT(pagno < agcount);
+
/*
* Loop through allocation groups, looking for one with a little
* free space in it. Note we don't look for free inodes, exactly.
@@ -486,51 +488,45 @@ xfs_ialloc_ag_select(
flags = XFS_ALLOC_FLAG_TRYLOCK;
for (;;) {
pag = xfs_perag_get(mp, agno);
+ if (!pag->pagi_inodeok) {
+ xfs_ialloc_next_ag(mp);
+ goto nextag;
+ }
+
if (!pag->pagi_init) {
- if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
- agbp = NULL;
+ error = xfs_ialloc_pagi_init(mp, tp, agno);
+ if (error)
goto nextag;
- }
- } else
- agbp = NULL;
+ }
- if (!pag->pagi_inodeok) {
- xfs_ialloc_next_ag(mp);
- goto unlock_nextag;
+ if (pag->pagi_freecount) {
+ xfs_perag_put(pag);
+ return agno;
}
- /*
- * Is there enough free space for the file plus a block
- * of inodes (if we need to allocate some)?
- */
- ineed = pag->pagi_freecount ? 0 : XFS_IALLOC_BLOCKS(mp);
- if (ineed && !pag->pagf_init) {
- if (agbp == NULL &&
- xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
- agbp = NULL;
+ if (!okalloc)
+ goto nextag;
+
+ if (!pag->pagf_init) {
+ error = xfs_alloc_pagf_init(mp, tp, agno, flags);
+ if (error)
goto nextag;
- }
- (void)xfs_alloc_pagf_init(mp, tp, agno, flags);
}
- if (!ineed || pag->pagf_init) {
- if (ineed && !(longest = pag->pagf_longest))
- longest = pag->pagf_flcount > 0;
- if (!ineed ||
- (pag->pagf_freeblks >= needspace + ineed &&
- longest >= ineed &&
- okalloc)) {
- if (agbp == NULL &&
- xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
- agbp = NULL;
- goto nextag;
- }
- xfs_perag_put(pag);
- return agbp;
- }
+
+ /*
+ * Is there enough free space for the file plus a block of
+ * inodes? (if we need to allocate some)?
+ */
+ ineed = XFS_IALLOC_BLOCKS(mp);
+ longest = pag->pagf_longest;
+ if (!longest)
+ longest = pag->pagf_flcount > 0;
+
+ if (pag->pagf_freeblks >= needspace + ineed &&
+ longest >= ineed) {
+ xfs_perag_put(pag);
+ return agno;
}
-unlock_nextag:
- if (agbp)
- xfs_trans_brelse(tp, agbp);
nextag:
xfs_perag_put(pag);
/*
@@ -538,13 +534,13 @@ nextag:
* down.
*/
if (XFS_FORCED_SHUTDOWN(mp))
- return NULL;
+ return NULLAGNUMBER;
agno++;
if (agno >= agcount)
agno = 0;
if (agno == pagno) {
if (flags == 0)
- return NULL;
+ return NULLAGNUMBER;
flags = 0;
}
}
@@ -607,195 +603,39 @@ xfs_ialloc_get_rec(
}
/*
- * Visible inode allocation functions.
- */
-/*
- * Find a free (set) bit in the inode bitmask.
- */
-static inline int xfs_ialloc_find_free(xfs_inofree_t *fp)
-{
- return xfs_lowbit64(*fp);
-}
-
-/*
- * Allocate an inode on disk.
- * Mode is used to tell whether the new inode will need space, and whether
- * it is a directory.
- *
- * The arguments IO_agbp and alloc_done are defined to work within
- * the constraint of one allocation per transaction.
- * xfs_dialloc() is designed to be called twice if it has to do an
- * allocation to make more free inodes. On the first call,
- * IO_agbp should be set to NULL. If an inode is available,
- * i.e., xfs_dialloc() did not need to do an allocation, an inode
- * number is returned. In this case, IO_agbp would be set to the
- * current ag_buf and alloc_done set to false.
- * If an allocation needed to be done, xfs_dialloc would return
- * the current ag_buf in IO_agbp and set alloc_done to true.
- * The caller should then commit the current transaction, allocate a new
- * transaction, and call xfs_dialloc() again, passing in the previous
- * value of IO_agbp. IO_agbp should be held across the transactions.
- * Since the agbp is locked across the two calls, the second call is
- * guaranteed to have a free inode available.
+ * Allocate an inode.
*
- * Once we successfully pick an inode its number is returned and the
- * on-disk data structures are updated. The inode itself is not read
- * in, since doing so would break ordering constraints with xfs_reclaim.
+ * The caller selected an AG for us, and made sure that free inodes are
+ * available.
*/
-int
-xfs_dialloc(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_ino_t parent, /* parent inode (directory) */
- umode_t mode, /* mode bits for new inode */
- int okalloc, /* ok to allocate more space */
- xfs_buf_t **IO_agbp, /* in/out ag header's buffer */
- boolean_t *alloc_done, /* true if we needed to replenish
- inode freelist */
- xfs_ino_t *inop) /* inode number allocated */
+STATIC int
+xfs_dialloc_ag(
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_ino_t parent,
+ xfs_ino_t *inop)
{
- xfs_agnumber_t agcount; /* number of allocation groups */
- xfs_buf_t *agbp; /* allocation group header's buffer */
- xfs_agnumber_t agno; /* allocation group number */
- xfs_agi_t *agi; /* allocation group header structure */
- xfs_btree_cur_t *cur; /* inode allocation btree cursor */
- int error; /* error return value */
- int i; /* result code */
- int ialloced; /* inode allocation status */
- int noroom = 0; /* no space for inode blk allocation */
- xfs_ino_t ino; /* fs-relative inode to be returned */
- /* REFERENCED */
- int j; /* result code */
- xfs_mount_t *mp; /* file system mount structure */
- int offset; /* index of inode in chunk */
- xfs_agino_t pagino; /* parent's AG relative inode # */
- xfs_agnumber_t pagno; /* parent's AG number */
- xfs_inobt_rec_incore_t rec; /* inode allocation record */
- xfs_agnumber_t tagno; /* testing allocation group number */
- xfs_btree_cur_t *tcur; /* temp cursor */
- xfs_inobt_rec_incore_t trec; /* temp inode allocation record */
- struct xfs_perag *pag;
-
-
- if (*IO_agbp == NULL) {
- /*
- * We do not have an agbp, so select an initial allocation
- * group for inode allocation.
- */
- agbp = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
- /*
- * Couldn't find an allocation group satisfying the
- * criteria, give up.
- */
- if (!agbp) {
- *inop = NULLFSINO;
- return 0;
- }
- agi = XFS_BUF_TO_AGI(agbp);
- ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
- } else {
- /*
- * Continue where we left off before. In this case, we
- * know that the allocation group has free inodes.
- */
- agbp = *IO_agbp;
- agi = XFS_BUF_TO_AGI(agbp);
- ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
- ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
- }
- mp = tp->t_mountp;
- agcount = mp->m_sb.sb_agcount;
- agno = be32_to_cpu(agi->agi_seqno);
- tagno = agno;
- pagno = XFS_INO_TO_AGNO(mp, parent);
- pagino = XFS_INO_TO_AGINO(mp, parent);
-
- /*
- * If we have already hit the ceiling of inode blocks then clear
- * okalloc so we scan all available agi structures for a free
- * inode.
- */
-
- if (mp->m_maxicount &&
- mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) {
- noroom = 1;
- okalloc = 0;
- }
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp);
+ xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
+ xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
+ xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
+ struct xfs_perag *pag;
+ struct xfs_btree_cur *cur, *tcur;
+ struct xfs_inobt_rec_incore rec, trec;
+ xfs_ino_t ino;
+ int error;
+ int offset;
+ int i, j;
- /*
- * Loop until we find an allocation group that either has free inodes
- * or in which we can allocate some inodes. Iterate through the
- * allocation groups upward, wrapping at the end.
- */
- *alloc_done = B_FALSE;
- while (!agi->agi_freecount) {
- /*
- * Don't do anything if we're not supposed to allocate
- * any blocks, just go on to the next ag.
- */
- if (okalloc) {
- /*
- * Try to allocate some new inodes in the allocation
- * group.
- */
- if ((error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced))) {
- xfs_trans_brelse(tp, agbp);
- if (error == ENOSPC) {
- *inop = NULLFSINO;
- return 0;
- } else
- return error;
- }
- if (ialloced) {
- /*
- * We successfully allocated some inodes, return
- * the current context to the caller so that it
- * can commit the current transaction and call
- * us again where we left off.
- */
- ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
- *alloc_done = B_TRUE;
- *IO_agbp = agbp;
- *inop = NULLFSINO;
- return 0;
- }
- }
- /*
- * If it failed, give up on this ag.
- */
- xfs_trans_brelse(tp, agbp);
- /*
- * Go on to the next ag: get its ag header.
- */
-nextag:
- if (++tagno == agcount)
- tagno = 0;
- if (tagno == agno) {
- *inop = NULLFSINO;
- return noroom ? ENOSPC : 0;
- }
- pag = xfs_perag_get(mp, tagno);
- if (pag->pagi_inodeok == 0) {
- xfs_perag_put(pag);
- goto nextag;
- }
- error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp);
- xfs_perag_put(pag);
- if (error)
- goto nextag;
- agi = XFS_BUF_TO_AGI(agbp);
- ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
- }
- /*
- * Here with an allocation group that has a free inode.
- * Reset agno since we may have chosen a new ag in the
- * loop above.
- */
- agno = tagno;
- *IO_agbp = NULL;
pag = xfs_perag_get(mp, agno);
+ ASSERT(pag->pagi_init);
+ ASSERT(pag->pagi_inodeok);
+ ASSERT(pag->pagi_freecount > 0);
+
restart_pagno:
- cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno));
+ cur = xfs_inobt_init_cursor(mp, tp, agbp, agno);
/*
* If pagino is 0 (this is the root inode allocation) use newino.
* This must work because we've just allocated some.
@@ -995,7 +835,7 @@ newino:
}
alloc_inode:
- offset = xfs_ialloc_find_free(&rec.ir_free);
+ offset = xfs_lowbit64(rec.ir_free);
ASSERT(offset >= 0);
ASSERT(offset < XFS_INODES_PER_CHUNK);
ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
@@ -1028,6 +868,165 @@ error0:
}
/*
+ * Allocate an inode on disk.
+ *
+ * Mode is used to tell whether the new inode will need space, and whether it
+ * is a directory.
+ *
+ * This function is designed to be called twice if it has to do an allocation
+ * to make more free inodes. On the first call, *IO_agbp should be set to NULL.
+ * If an inode is available without having to performn an allocation, an inode
+ * number is returned. In this case, *IO_agbp would be NULL. If an allocation
+ * needes to be done, xfs_dialloc would return the current AGI buffer in
+ * *IO_agbp. The caller should then commit the current transaction, allocate a
+ * new transaction, and call xfs_dialloc() again, passing in the previous value
+ * of *IO_agbp. IO_agbp should be held across the transactions. Since the AGI
+ * buffer is locked across the two calls, the second call is guaranteed to have
+ * a free inode available.
+ *
+ * Once we successfully pick an inode its number is returned and the on-disk
+ * data structures are updated. The inode itself is not read in, since doing so
+ * would break ordering constraints with xfs_reclaim.
+ */
+int
+xfs_dialloc(
+ struct xfs_trans *tp,
+ xfs_ino_t parent,
+ umode_t mode,
+ int okalloc,
+ struct xfs_buf **IO_agbp,
+ xfs_ino_t *inop)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_buf *agbp;
+ xfs_agnumber_t agno;
+ int error;
+ int ialloced;
+ int noroom = 0;
+ xfs_agnumber_t start_agno;
+ struct xfs_perag *pag;
+
+ if (*IO_agbp) {
+ /*
+ * If the caller passes in a pointer to the AGI buffer,
+ * continue where we left off before. In this case, we
+ * know that the allocation group has free inodes.
+ */
+ agbp = *IO_agbp;
+ goto out_alloc;
+ }
+
+ /*
+ * We do not have an agbp, so select an initial allocation
+ * group for inode allocation.
+ */
+ start_agno = xfs_ialloc_ag_select(tp, parent, mode, okalloc);
+ if (start_agno == NULLAGNUMBER) {
+ *inop = NULLFSINO;
+ return 0;
+ }
+
+ /*
+ * If we have already hit the ceiling of inode blocks then clear
+ * okalloc so we scan all available agi structures for a free
+ * inode.
+ */
+ if (mp->m_maxicount &&
+ mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) {
+ noroom = 1;
+ okalloc = 0;
+ }
+
+ /*
+ * Loop until we find an allocation group that either has free inodes
+ * or in which we can allocate some inodes. Iterate through the
+ * allocation groups upward, wrapping at the end.
+ */
+ agno = start_agno;
+ for (;;) {
+ pag = xfs_perag_get(mp, agno);
+ if (!pag->pagi_inodeok) {
+ xfs_ialloc_next_ag(mp);
+ goto nextag;
+ }
+
+ if (!pag->pagi_init) {
+ error = xfs_ialloc_pagi_init(mp, tp, agno);
+ if (error)
+ goto out_error;
+ }
+
+ /*
+ * Do a first racy fast path check if this AG is usable.
+ */
+ if (!pag->pagi_freecount && !okalloc)
+ goto nextag;
+
+ /*
+ * Then read in the AGI buffer and recheck with the AGI buffer
+ * lock held.
+ */
+ error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
+ if (error)
+ goto out_error;
+
+ if (pag->pagi_freecount) {
+ xfs_perag_put(pag);
+ goto out_alloc;
+ }
+
+ if (!okalloc)
+ goto nextag_relse_buffer;
+
+
+ error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);
+ if (error) {
+ xfs_trans_brelse(tp, agbp);
+
+ if (error != ENOSPC)
+ goto out_error;
+
+ xfs_perag_put(pag);
+ *inop = NULLFSINO;
+ return 0;
+ }
+
+ if (ialloced) {
+ /*
+ * We successfully allocated some inodes, return
+ * the current context to the caller so that it
+ * can commit the current transaction and call
+ * us again where we left off.
+ */
+ ASSERT(pag->pagi_freecount > 0);
+ xfs_perag_put(pag);
+
+ *IO_agbp = agbp;
+ *inop = NULLFSINO;
+ return 0;
+ }
+
+nextag_relse_buffer:
+ xfs_trans_brelse(tp, agbp);
+nextag:
+ xfs_perag_put(pag);
+ if (++agno == mp->m_sb.sb_agcount)
+ agno = 0;
+ if (agno == start_agno) {
+ *inop = NULLFSINO;
+ return noroom ? ENOSPC : 0;
+ }
+ }
+
+out_alloc:
+ *IO_agbp = NULL;
+ return xfs_dialloc_ag(tp, agbp, parent, inop);
+out_error:
+ xfs_perag_put(pag);
+ return XFS_ERROR(error);
+}
+
+/*
* Free disk inode. Carefully avoids touching the incore inode, all
* manipulations incore are the caller's responsibility.
* The on-disk inode is not changed by this operation, only the
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h
index 65ac57c8063c..1fd6ea4e9c91 100644
--- a/fs/xfs/xfs_ialloc.h
+++ b/fs/xfs/xfs_ialloc.h
@@ -75,8 +75,6 @@ xfs_dialloc(
umode_t mode, /* mode bits for new inode */
int okalloc, /* ok to allocate more space */
struct xfs_buf **agbp, /* buf for a.g. inode header */
- boolean_t *alloc_done, /* an allocation was done to replenish
- the free inodes */
xfs_ino_t *inop); /* inode number allocated */
/*
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 1bb4365e8c25..784a803383ec 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -41,17 +41,6 @@
/*
- * Define xfs inode iolock lockdep classes. We need to ensure that all active
- * inodes are considered the same for lockdep purposes, including inodes that
- * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
- * guarantee the locks are considered the same when there are multiple lock
- * initialisation siteѕ. Also, define a reclaimable inode class so it is
- * obvious in lockdep reports which class the report is against.
- */
-static struct lock_class_key xfs_iolock_active;
-struct lock_class_key xfs_iolock_reclaimable;
-
-/*
* Allocate and initialise an xfs_inode.
*/
STATIC struct xfs_inode *
@@ -80,8 +69,6 @@ xfs_inode_alloc(
ASSERT(ip->i_ino == 0);
mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
- lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
- &xfs_iolock_active, "xfs_iolock_active");
/* initialise the xfs inode */
ip->i_ino = ino;
@@ -250,8 +237,6 @@ xfs_iget_cache_hit(
ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
- lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
- &xfs_iolock_active, "xfs_iolock_active");
spin_unlock(&ip->i_flags_lock);
spin_unlock(&pag->pag_ici_lock);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index a59eea09930a..2778258fcfa2 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -132,23 +132,28 @@ xfs_inobp_check(
#endif
/*
- * Find the buffer associated with the given inode map
- * We do basic validation checks on the buffer once it has been
- * retrieved from disk.
+ * This routine is called to map an inode to the buffer containing the on-disk
+ * version of the inode. It returns a pointer to the buffer containing the
+ * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
+ * pointer to the on-disk inode within that buffer.
+ *
+ * If a non-zero error is returned, then the contents of bpp and dipp are
+ * undefined.
*/
-STATIC int
+int
xfs_imap_to_bp(
- xfs_mount_t *mp,
- xfs_trans_t *tp,
- struct xfs_imap *imap,
- xfs_buf_t **bpp,
- uint buf_flags,
- uint iget_flags)
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ struct xfs_imap *imap,
+ struct xfs_dinode **dipp,
+ struct xfs_buf **bpp,
+ uint buf_flags,
+ uint iget_flags)
{
- int error;
- int i;
- int ni;
- xfs_buf_t *bp;
+ struct xfs_buf *bp;
+ int error;
+ int i;
+ int ni;
buf_flags |= XBF_UNMAPPED;
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
@@ -189,8 +194,8 @@ xfs_imap_to_bp(
xfs_trans_brelse(tp, bp);
return XFS_ERROR(EINVAL);
}
- XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
- XFS_ERRLEVEL_HIGH, mp, dip);
+ XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_HIGH,
+ mp, dip);
#ifdef DEBUG
xfs_emerg(mp,
"bad inode magic/vsn daddr %lld #%d (magic=%x)",
@@ -204,96 +209,9 @@ xfs_imap_to_bp(
}
xfs_inobp_check(mp, bp);
- *bpp = bp;
- return 0;
-}
-
-/*
- * This routine is called to map an inode number within a file
- * system to the buffer containing the on-disk version of the
- * inode. It returns a pointer to the buffer containing the
- * on-disk inode in the bpp parameter, and in the dip parameter
- * it returns a pointer to the on-disk inode within that buffer.
- *
- * If a non-zero error is returned, then the contents of bpp and
- * dipp are undefined.
- *
- * Use xfs_imap() to determine the size and location of the
- * buffer to read from disk.
- */
-int
-xfs_inotobp(
- xfs_mount_t *mp,
- xfs_trans_t *tp,
- xfs_ino_t ino,
- xfs_dinode_t **dipp,
- xfs_buf_t **bpp,
- int *offset,
- uint imap_flags)
-{
- struct xfs_imap imap;
- xfs_buf_t *bp;
- int error;
-
- imap.im_blkno = 0;
- error = xfs_imap(mp, tp, ino, &imap, imap_flags);
- if (error)
- return error;
-
- error = xfs_imap_to_bp(mp, tp, &imap, &bp, 0, imap_flags);
- if (error)
- return error;
-
- *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
- *bpp = bp;
- *offset = imap.im_boffset;
- return 0;
-}
-
-
-/*
- * This routine is called to map an inode to the buffer containing
- * the on-disk version of the inode. It returns a pointer to the
- * buffer containing the on-disk inode in the bpp parameter, and in
- * the dip parameter it returns a pointer to the on-disk inode within
- * that buffer.
- *
- * If a non-zero error is returned, then the contents of bpp and
- * dipp are undefined.
- *
- * The inode is expected to already been mapped to its buffer and read
- * in once, thus we can use the mapping information stored in the inode
- * rather than calling xfs_imap(). This allows us to avoid the overhead
- * of looking at the inode btree for small block file systems
- * (see xfs_imap()).
- */
-int
-xfs_itobp(
- xfs_mount_t *mp,
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- xfs_dinode_t **dipp,
- xfs_buf_t **bpp,
- uint buf_flags)
-{
- xfs_buf_t *bp;
- int error;
-
- ASSERT(ip->i_imap.im_blkno != 0);
-
- error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, buf_flags, 0);
- if (error)
- return error;
- if (!bp) {
- ASSERT(buf_flags & XBF_TRYLOCK);
- ASSERT(tp == NULL);
- *bpp = NULL;
- return EAGAIN;
- }
-
- *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
*bpp = bp;
+ *dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset);
return 0;
}
@@ -796,10 +714,9 @@ xfs_iread(
/*
* Get pointers to the on-disk inode and the buffer containing it.
*/
- error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, 0, iget_flags);
+ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
if (error)
return error;
- dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
/*
* If we got something that isn't an inode it means someone
@@ -876,7 +793,7 @@ xfs_iread(
/*
* Use xfs_trans_brelse() to release the buffer containing the
* on-disk inode, because it was acquired with xfs_trans_read_buf()
- * in xfs_itobp() above. If tp is NULL, this is just a normal
+ * in xfs_imap_to_bp() above. If tp is NULL, this is just a normal
* brelse(). If we're within a transaction, then xfs_trans_brelse()
* will only release the buffer if it is not dirty within the
* transaction. It will be OK to release the buffer in this case,
@@ -970,7 +887,6 @@ xfs_ialloc(
prid_t prid,
int okalloc,
xfs_buf_t **ialloc_context,
- boolean_t *call_again,
xfs_inode_t **ipp)
{
xfs_ino_t ino;
@@ -985,10 +901,10 @@ xfs_ialloc(
* the on-disk inode to be allocated.
*/
error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
- ialloc_context, call_again, &ino);
+ ialloc_context, &ino);
if (error)
return error;
- if (*call_again || ino == NULLFSINO) {
+ if (*ialloc_context || ino == NULLFSINO) {
*ipp = NULL;
return 0;
}
@@ -1207,7 +1123,9 @@ xfs_itruncate_extents(
int error = 0;
int done = 0;
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
+ xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(new_size <= XFS_ISIZE(ip));
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
ASSERT(ip->i_itemp != NULL);
@@ -1226,7 +1144,7 @@ xfs_itruncate_extents(
* then there is nothing to do.
*/
first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
- last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
+ last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
if (first_unmap_block == last_block)
return 0;
@@ -1355,7 +1273,8 @@ xfs_iunlink(
* Here we put the head pointer into our next pointer,
* and then we fall through to point the head at us.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
+ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
+ 0, 0);
if (error)
return error;
@@ -1429,16 +1348,16 @@ xfs_iunlink_remove(
if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
/*
- * We're at the head of the list. Get the inode's
- * on-disk buffer to see if there is anyone after us
- * on the list. Only modify our next pointer if it
- * is not already NULLAGINO. This saves us the overhead
- * of dealing with the buffer when there is no need to
- * change it.
+ * We're at the head of the list. Get the inode's on-disk
+ * buffer to see if there is anyone after us on the list.
+ * Only modify our next pointer if it is not already NULLAGINO.
+ * This saves us the overhead of dealing with the buffer when
+ * there is no need to change it.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
+ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
+ 0, 0);
if (error) {
- xfs_warn(mp, "%s: xfs_itobp() returned error %d.",
+ xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
__func__, error);
return error;
}
@@ -1472,34 +1391,45 @@ xfs_iunlink_remove(
next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
last_ibp = NULL;
while (next_agino != agino) {
- /*
- * If the last inode wasn't the one pointing to
- * us, then release its buffer since we're not
- * going to do anything with it.
- */
- if (last_ibp != NULL) {
+ struct xfs_imap imap;
+
+ if (last_ibp)
xfs_trans_brelse(tp, last_ibp);
- }
+
+ imap.im_blkno = 0;
next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
- error = xfs_inotobp(mp, tp, next_ino, &last_dip,
- &last_ibp, &last_offset, 0);
+
+ error = xfs_imap(mp, tp, next_ino, &imap, 0);
+ if (error) {
+ xfs_warn(mp,
+ "%s: xfs_imap returned error %d.",
+ __func__, error);
+ return error;
+ }
+
+ error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
+ &last_ibp, 0, 0);
if (error) {
xfs_warn(mp,
- "%s: xfs_inotobp() returned error %d.",
+ "%s: xfs_imap_to_bp returned error %d.",
__func__, error);
return error;
}
+
+ last_offset = imap.im_boffset;
next_agino = be32_to_cpu(last_dip->di_next_unlinked);
ASSERT(next_agino != NULLAGINO);
ASSERT(next_agino != 0);
}
+
/*
- * Now last_ibp points to the buffer previous to us on
- * the unlinked list. Pull us from the list.
+ * Now last_ibp points to the buffer previous to us on the
+ * unlinked list. Pull us from the list.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
+ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
+ 0, 0);
if (error) {
- xfs_warn(mp, "%s: xfs_itobp(2) returned error %d.",
+ xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
__func__, error);
return error;
}
@@ -1749,7 +1679,8 @@ xfs_ifree(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0);
+ error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &dip, &ibp,
+ 0, 0);
if (error)
return error;
@@ -2428,7 +2359,7 @@ xfs_iflush(
/*
* For stale inodes we cannot rely on the backing buffer remaining
* stale in cache for the remaining life of the stale inode and so
- * xfs_itobp() below may give us a buffer that no longer contains
+ * xfs_imap_to_bp() below may give us a buffer that no longer contains
* inodes below. We have to check this after ensuring the inode is
* unpinned so that it is safe to reclaim the stale inode after the
* flush call.
@@ -2454,7 +2385,8 @@ xfs_iflush(
/*
* Get the buffer containing the on-disk inode.
*/
- error = xfs_itobp(mp, NULL, ip, &dip, &bp, XBF_TRYLOCK);
+ error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
+ 0);
if (error || !bp) {
xfs_ifunlock(ip);
return error;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 1efff36a75b6..94b32f906e79 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -487,8 +487,6 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
#define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT)
#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT)
-extern struct lock_class_key xfs_iolock_reclaimable;
-
/*
* For multiple groups support: if S_ISGID bit is set in the parent
* directory, group of new file is set to that of the parent, and
@@ -517,7 +515,7 @@ void xfs_inode_free(struct xfs_inode *ip);
*/
int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t,
xfs_nlink_t, xfs_dev_t, prid_t, int,
- struct xfs_buf **, boolean_t *, xfs_inode_t **);
+ struct xfs_buf **, xfs_inode_t **);
uint xfs_ip2xflags(struct xfs_inode *);
uint xfs_dic2xflags(struct xfs_dinode *);
@@ -557,12 +555,9 @@ do { \
#define XFS_IGET_UNTRUSTED 0x2
#define XFS_IGET_DONTCACHE 0x4
-int xfs_inotobp(struct xfs_mount *, struct xfs_trans *,
- xfs_ino_t, struct xfs_dinode **,
- struct xfs_buf **, int *, uint);
-int xfs_itobp(struct xfs_mount *, struct xfs_trans *,
- struct xfs_inode *, struct xfs_dinode **,
- struct xfs_buf **, uint);
+int xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *,
+ struct xfs_imap *, struct xfs_dinode **,
+ struct xfs_buf **, uint, uint);
int xfs_iread(struct xfs_mount *, struct xfs_trans *,
struct xfs_inode *, uint);
void xfs_dinode_to_disk(struct xfs_dinode *,
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 1f1535d25a9b..0e0232c3b6d9 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -364,9 +364,15 @@ xfs_fssetdm_by_handle(
if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
return -XFS_ERROR(EFAULT);
+ error = mnt_want_write_file(parfilp);
+ if (error)
+ return error;
+
dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq);
- if (IS_ERR(dentry))
+ if (IS_ERR(dentry)) {
+ mnt_drop_write_file(parfilp);
return PTR_ERR(dentry);
+ }
if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) {
error = -XFS_ERROR(EPERM);
@@ -382,6 +388,7 @@ xfs_fssetdm_by_handle(
fsd.fsd_dmstate);
out:
+ mnt_drop_write_file(parfilp);
dput(dentry);
return error;
}
@@ -634,7 +641,11 @@ xfs_ioc_space(
if (ioflags & IO_INVIS)
attr_flags |= XFS_ATTR_DMI;
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags);
+ mnt_drop_write_file(filp);
return -error;
}
@@ -1163,6 +1174,7 @@ xfs_ioc_fssetxattr(
{
struct fsxattr fa;
unsigned int mask;
+ int error;
if (copy_from_user(&fa, arg, sizeof(fa)))
return -EFAULT;
@@ -1171,7 +1183,12 @@ xfs_ioc_fssetxattr(
if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
mask |= FSX_NONBLOCK;
- return -xfs_ioctl_setattr(ip, &fa, mask);
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
+ error = xfs_ioctl_setattr(ip, &fa, mask);
+ mnt_drop_write_file(filp);
+ return -error;
}
STATIC int
@@ -1196,6 +1213,7 @@ xfs_ioc_setxflags(
struct fsxattr fa;
unsigned int flags;
unsigned int mask;
+ int error;
if (copy_from_user(&flags, arg, sizeof(flags)))
return -EFAULT;
@@ -1210,7 +1228,12 @@ xfs_ioc_setxflags(
mask |= FSX_NONBLOCK;
fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip));
- return -xfs_ioctl_setattr(ip, &fa, mask);
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
+ error = xfs_ioctl_setattr(ip, &fa, mask);
+ mnt_drop_write_file(filp);
+ return -error;
}
STATIC int
@@ -1385,8 +1408,13 @@ xfs_file_ioctl(
if (copy_from_user(&dmi, arg, sizeof(dmi)))
return -XFS_ERROR(EFAULT);
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
+
error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask,
dmi.fsd_dmstate);
+ mnt_drop_write_file(filp);
return -error;
}
@@ -1434,7 +1462,11 @@ xfs_file_ioctl(
if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
return -XFS_ERROR(EFAULT);
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
error = xfs_swapext(&sxp);
+ mnt_drop_write_file(filp);
return -error;
}
@@ -1463,9 +1495,14 @@ xfs_file_ioctl(
if (copy_from_user(&inout, arg, sizeof(inout)))
return -XFS_ERROR(EFAULT);
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
+
/* input parameter is passed in resblks field of structure */
in = inout.resblks;
error = xfs_reserve_blocks(mp, &in, &inout);
+ mnt_drop_write_file(filp);
if (error)
return -error;
@@ -1496,7 +1533,11 @@ xfs_file_ioctl(
if (copy_from_user(&in, arg, sizeof(in)))
return -XFS_ERROR(EFAULT);
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
error = xfs_growfs_data(mp, &in);
+ mnt_drop_write_file(filp);
return -error;
}
@@ -1506,7 +1547,11 @@ xfs_file_ioctl(
if (copy_from_user(&in, arg, sizeof(in)))
return -XFS_ERROR(EFAULT);
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
error = xfs_growfs_log(mp, &in);
+ mnt_drop_write_file(filp);
return -error;
}
@@ -1516,7 +1561,11 @@ xfs_file_ioctl(
if (copy_from_user(&in, arg, sizeof(in)))
return -XFS_ERROR(EFAULT);
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
error = xfs_growfs_rt(mp, &in);
+ mnt_drop_write_file(filp);
return -error;
}
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index c4f2da0d2bf5..1244274a5674 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -600,7 +600,11 @@ xfs_file_compat_ioctl(
if (xfs_compat_growfs_data_copyin(&in, arg))
return -XFS_ERROR(EFAULT);
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
error = xfs_growfs_data(mp, &in);
+ mnt_drop_write_file(filp);
return -error;
}
case XFS_IOC_FSGROWFSRT_32: {
@@ -608,7 +612,11 @@ xfs_file_compat_ioctl(
if (xfs_compat_growfs_rt_copyin(&in, arg))
return -XFS_ERROR(EFAULT);
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
error = xfs_growfs_rt(mp, &in);
+ mnt_drop_write_file(filp);
return -error;
}
#endif
@@ -627,7 +635,11 @@ xfs_file_compat_ioctl(
offsetof(struct xfs_swapext, sx_stat)) ||
xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat))
return -XFS_ERROR(EFAULT);
+ error = mnt_want_write_file(filp);
+ if (error)
+ return error;
error = xfs_swapext(&sxp);
+ mnt_drop_write_file(filp);
return -error;
}
case XFS_IOC_FSBULKSTAT_32:
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index aadfce6681ee..973dff6ad935 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -285,7 +285,7 @@ xfs_iomap_eof_want_preallocate(
* do any speculative allocation.
*/
start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
- count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
+ count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
while (count_fsb > 0) {
imaps = nimaps;
firstblock = NULLFSBLOCK;
@@ -416,8 +416,8 @@ retry:
* Make sure preallocation does not create extents beyond the range we
* actually support in this filesystem.
*/
- if (last_fsb > XFS_B_TO_FSB(mp, mp->m_maxioffset))
- last_fsb = XFS_B_TO_FSB(mp, mp->m_maxioffset);
+ if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes))
+ last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
ASSERT(last_fsb > offset_fsb);
@@ -680,9 +680,9 @@ xfs_iomap_write_unwritten(
* the same inode that we complete here and might deadlock
* on the iolock.
*/
- xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
+ sb_start_intwrite(mp->m_super);
tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
- tp->t_flags |= XFS_TRANS_RESERVE;
+ tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT;
error = xfs_trans_reserve(tp, resblks,
XFS_WRITE_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES,
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 9c4340f5c3e0..4e00cf091d2c 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -897,6 +897,47 @@ xfs_vn_setattr(
return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0);
}
+STATIC int
+xfs_vn_update_time(
+ struct inode *inode,
+ struct timespec *now,
+ int flags)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+
+ trace_xfs_update_time(ip);
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+ error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ return -error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ if (flags & S_CTIME) {
+ inode->i_ctime = *now;
+ ip->i_d.di_ctime.t_sec = (__int32_t)now->tv_sec;
+ ip->i_d.di_ctime.t_nsec = (__int32_t)now->tv_nsec;
+ }
+ if (flags & S_MTIME) {
+ inode->i_mtime = *now;
+ ip->i_d.di_mtime.t_sec = (__int32_t)now->tv_sec;
+ ip->i_d.di_mtime.t_nsec = (__int32_t)now->tv_nsec;
+ }
+ if (flags & S_ATIME) {
+ inode->i_atime = *now;
+ ip->i_d.di_atime.t_sec = (__int32_t)now->tv_sec;
+ ip->i_d.di_atime.t_nsec = (__int32_t)now->tv_nsec;
+ }
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
+ return -xfs_trans_commit(tp, 0);
+}
+
#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
/*
@@ -991,6 +1032,7 @@ static const struct inode_operations xfs_inode_operations = {
.removexattr = generic_removexattr,
.listxattr = xfs_vn_listxattr,
.fiemap = xfs_vn_fiemap,
+ .update_time = xfs_vn_update_time,
};
static const struct inode_operations xfs_dir_inode_operations = {
@@ -1016,6 +1058,7 @@ static const struct inode_operations xfs_dir_inode_operations = {
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
.listxattr = xfs_vn_listxattr,
+ .update_time = xfs_vn_update_time,
};
static const struct inode_operations xfs_dir_ci_inode_operations = {
@@ -1041,6 +1084,7 @@ static const struct inode_operations xfs_dir_ci_inode_operations = {
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
.listxattr = xfs_vn_listxattr,
+ .update_time = xfs_vn_update_time,
};
static const struct inode_operations xfs_symlink_inode_operations = {
@@ -1054,6 +1098,7 @@ static const struct inode_operations xfs_symlink_inode_operations = {
.getxattr = generic_getxattr,
.removexattr = generic_removexattr,
.listxattr = xfs_vn_listxattr,
+ .update_time = xfs_vn_update_time,
};
STATIC void
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index eff577a9b67f..01d10a66e302 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -555,7 +555,7 @@ xfs_bulkstat_single(
/*
* note that requesting valid inode numbers which are not allocated
- * to inodes will most likely cause xfs_itobp to generate warning
+ * to inodes will most likely cause xfs_imap_to_bp to generate warning
* messages about bad magic numbers. This is ok. The fact that
* the inode isn't actually an inode is handled by the
* error check below. Done this way to make the usual case faster
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index d90d4a388609..7f4f9370d0e7 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -45,51 +45,85 @@ xlog_commit_record(
struct xlog_in_core **iclog,
xfs_lsn_t *commitlsnp);
-STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
- xfs_buftarg_t *log_target,
- xfs_daddr_t blk_offset,
- int num_bblks);
+STATIC struct xlog *
+xlog_alloc_log(
+ struct xfs_mount *mp,
+ struct xfs_buftarg *log_target,
+ xfs_daddr_t blk_offset,
+ int num_bblks);
STATIC int
xlog_space_left(
struct xlog *log,
atomic64_t *head);
-STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
-STATIC void xlog_dealloc_log(xlog_t *log);
+STATIC int
+xlog_sync(
+ struct xlog *log,
+ struct xlog_in_core *iclog);
+STATIC void
+xlog_dealloc_log(
+ struct xlog *log);
/* local state machine functions */
STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
-STATIC void xlog_state_do_callback(xlog_t *log,int aborted, xlog_in_core_t *iclog);
-STATIC int xlog_state_get_iclog_space(xlog_t *log,
- int len,
- xlog_in_core_t **iclog,
- xlog_ticket_t *ticket,
- int *continued_write,
- int *logoffsetp);
-STATIC int xlog_state_release_iclog(xlog_t *log,
- xlog_in_core_t *iclog);
-STATIC void xlog_state_switch_iclogs(xlog_t *log,
- xlog_in_core_t *iclog,
- int eventual_size);
-STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
+STATIC void
+xlog_state_do_callback(
+ struct xlog *log,
+ int aborted,
+ struct xlog_in_core *iclog);
+STATIC int
+xlog_state_get_iclog_space(
+ struct xlog *log,
+ int len,
+ struct xlog_in_core **iclog,
+ struct xlog_ticket *ticket,
+ int *continued_write,
+ int *logoffsetp);
+STATIC int
+xlog_state_release_iclog(
+ struct xlog *log,
+ struct xlog_in_core *iclog);
+STATIC void
+xlog_state_switch_iclogs(
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ int eventual_size);
+STATIC void
+xlog_state_want_sync(
+ struct xlog *log,
+ struct xlog_in_core *iclog);
STATIC void
xlog_grant_push_ail(
- struct xlog *log,
- int need_bytes);
-STATIC void xlog_regrant_reserve_log_space(xlog_t *log,
- xlog_ticket_t *ticket);
-STATIC void xlog_ungrant_log_space(xlog_t *log,
- xlog_ticket_t *ticket);
+ struct xlog *log,
+ int need_bytes);
+STATIC void
+xlog_regrant_reserve_log_space(
+ struct xlog *log,
+ struct xlog_ticket *ticket);
+STATIC void
+xlog_ungrant_log_space(
+ struct xlog *log,
+ struct xlog_ticket *ticket);
#if defined(DEBUG)
-STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr);
+STATIC void
+xlog_verify_dest_ptr(
+ struct xlog *log,
+ char *ptr);
STATIC void
xlog_verify_grant_tail(
- struct xlog *log);
-STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
- int count, boolean_t syncing);
-STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
- xfs_lsn_t tail_lsn);
+ struct xlog *log);
+STATIC void
+xlog_verify_iclog(
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ int count,
+ boolean_t syncing);
+STATIC void
+xlog_verify_tail_lsn(
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ xfs_lsn_t tail_lsn);
#else
#define xlog_verify_dest_ptr(a,b)
#define xlog_verify_grant_tail(a)
@@ -97,7 +131,9 @@ STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
#define xlog_verify_tail_lsn(a,b,c)
#endif
-STATIC int xlog_iclogs_empty(xlog_t *log);
+STATIC int
+xlog_iclogs_empty(
+ struct xlog *log);
static void
xlog_grant_sub_space(
@@ -684,7 +720,7 @@ xfs_log_mount_finish(xfs_mount_t *mp)
int
xfs_log_unmount_write(xfs_mount_t *mp)
{
- xlog_t *log = mp->m_log;
+ struct xlog *log = mp->m_log;
xlog_in_core_t *iclog;
#ifdef DEBUG
xlog_in_core_t *first_iclog;
@@ -893,7 +929,7 @@ int
xfs_log_need_covered(xfs_mount_t *mp)
{
int needed = 0;
- xlog_t *log = mp->m_log;
+ struct xlog *log = mp->m_log;
if (!xfs_fs_writable(mp))
return 0;
@@ -1024,9 +1060,9 @@ xlog_space_left(
void
xlog_iodone(xfs_buf_t *bp)
{
- xlog_in_core_t *iclog = bp->b_fspriv;
- xlog_t *l = iclog->ic_log;
- int aborted = 0;
+ struct xlog_in_core *iclog = bp->b_fspriv;
+ struct xlog *l = iclog->ic_log;
+ int aborted = 0;
/*
* Race to shutdown the filesystem if we see an error.
@@ -1067,8 +1103,9 @@ xlog_iodone(xfs_buf_t *bp)
*/
STATIC void
-xlog_get_iclog_buffer_size(xfs_mount_t *mp,
- xlog_t *log)
+xlog_get_iclog_buffer_size(
+ struct xfs_mount *mp,
+ struct xlog *log)
{
int size;
int xhdrs;
@@ -1129,13 +1166,14 @@ done:
* Its primary purpose is to fill in enough, so recovery can occur. However,
* some other stuff may be filled in too.
*/
-STATIC xlog_t *
-xlog_alloc_log(xfs_mount_t *mp,
- xfs_buftarg_t *log_target,
- xfs_daddr_t blk_offset,
- int num_bblks)
+STATIC struct xlog *
+xlog_alloc_log(
+ struct xfs_mount *mp,
+ struct xfs_buftarg *log_target,
+ xfs_daddr_t blk_offset,
+ int num_bblks)
{
- xlog_t *log;
+ struct xlog *log;
xlog_rec_header_t *head;
xlog_in_core_t **iclogp;
xlog_in_core_t *iclog, *prev_iclog=NULL;
@@ -1144,7 +1182,7 @@ xlog_alloc_log(xfs_mount_t *mp,
int error = ENOMEM;
uint log2_size = 0;
- log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL);
+ log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL);
if (!log) {
xfs_warn(mp, "Log allocation failed: No memory!");
goto out;
@@ -1434,8 +1472,9 @@ xlog_bdstrat(
*/
STATIC int
-xlog_sync(xlog_t *log,
- xlog_in_core_t *iclog)
+xlog_sync(
+ struct xlog *log,
+ struct xlog_in_core *iclog)
{
xfs_caddr_t dptr; /* pointer to byte sized element */
xfs_buf_t *bp;
@@ -1584,7 +1623,8 @@ xlog_sync(xlog_t *log,
* Deallocate a log structure
*/
STATIC void
-xlog_dealloc_log(xlog_t *log)
+xlog_dealloc_log(
+ struct xlog *log)
{
xlog_in_core_t *iclog, *next_iclog;
int i;
@@ -1616,10 +1656,11 @@ xlog_dealloc_log(xlog_t *log)
*/
/* ARGSUSED */
static inline void
-xlog_state_finish_copy(xlog_t *log,
- xlog_in_core_t *iclog,
- int record_cnt,
- int copy_bytes)
+xlog_state_finish_copy(
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ int record_cnt,
+ int copy_bytes)
{
spin_lock(&log->l_icloglock);
@@ -2142,7 +2183,8 @@ xlog_write(
* State Change: DIRTY -> ACTIVE
*/
STATIC void
-xlog_state_clean_log(xlog_t *log)
+xlog_state_clean_log(
+ struct xlog *log)
{
xlog_in_core_t *iclog;
int changed = 0;
@@ -2222,7 +2264,7 @@ xlog_state_clean_log(xlog_t *log)
STATIC xfs_lsn_t
xlog_get_lowest_lsn(
- xlog_t *log)
+ struct xlog *log)
{
xlog_in_core_t *lsn_log;
xfs_lsn_t lowest_lsn, lsn;
@@ -2245,9 +2287,9 @@ xlog_get_lowest_lsn(
STATIC void
xlog_state_do_callback(
- xlog_t *log,
- int aborted,
- xlog_in_core_t *ciclog)
+ struct xlog *log,
+ int aborted,
+ struct xlog_in_core *ciclog)
{
xlog_in_core_t *iclog;
xlog_in_core_t *first_iclog; /* used to know when we've
@@ -2467,7 +2509,7 @@ xlog_state_done_syncing(
xlog_in_core_t *iclog,
int aborted)
{
- xlog_t *log = iclog->ic_log;
+ struct xlog *log = iclog->ic_log;
spin_lock(&log->l_icloglock);
@@ -2521,12 +2563,13 @@ xlog_state_done_syncing(
* is copied.
*/
STATIC int
-xlog_state_get_iclog_space(xlog_t *log,
- int len,
- xlog_in_core_t **iclogp,
- xlog_ticket_t *ticket,
- int *continued_write,
- int *logoffsetp)
+xlog_state_get_iclog_space(
+ struct xlog *log,
+ int len,
+ struct xlog_in_core **iclogp,
+ struct xlog_ticket *ticket,
+ int *continued_write,
+ int *logoffsetp)
{
int log_offset;
xlog_rec_header_t *head;
@@ -2631,8 +2674,9 @@ restart:
* move grant reservation head forward.
*/
STATIC void
-xlog_regrant_reserve_log_space(xlog_t *log,
- xlog_ticket_t *ticket)
+xlog_regrant_reserve_log_space(
+ struct xlog *log,
+ struct xlog_ticket *ticket)
{
trace_xfs_log_regrant_reserve_enter(log, ticket);
@@ -2677,8 +2721,9 @@ xlog_regrant_reserve_log_space(xlog_t *log,
* in the current reservation field.
*/
STATIC void
-xlog_ungrant_log_space(xlog_t *log,
- xlog_ticket_t *ticket)
+xlog_ungrant_log_space(
+ struct xlog *log,
+ struct xlog_ticket *ticket)
{
int bytes;
@@ -2717,8 +2762,8 @@ xlog_ungrant_log_space(xlog_t *log,
*/
STATIC int
xlog_state_release_iclog(
- xlog_t *log,
- xlog_in_core_t *iclog)
+ struct xlog *log,
+ struct xlog_in_core *iclog)
{
int sync = 0; /* do we sync? */
@@ -2768,9 +2813,10 @@ xlog_state_release_iclog(
* that every data block. We have run out of space in this log record.
*/
STATIC void
-xlog_state_switch_iclogs(xlog_t *log,
- xlog_in_core_t *iclog,
- int eventual_size)
+xlog_state_switch_iclogs(
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ int eventual_size)
{
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
if (!eventual_size)
@@ -3114,7 +3160,9 @@ xfs_log_force_lsn(
* disk.
*/
STATIC void
-xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
+xlog_state_want_sync(
+ struct xlog *log,
+ struct xlog_in_core *iclog)
{
assert_spin_locked(&log->l_icloglock);
@@ -3158,7 +3206,7 @@ xfs_log_ticket_get(
/*
* Allocate and initialise a new log ticket.
*/
-xlog_ticket_t *
+struct xlog_ticket *
xlog_ticket_alloc(
struct xlog *log,
int unit_bytes,
@@ -3346,9 +3394,10 @@ xlog_verify_grant_tail(
/* check if it will fit */
STATIC void
-xlog_verify_tail_lsn(xlog_t *log,
- xlog_in_core_t *iclog,
- xfs_lsn_t tail_lsn)
+xlog_verify_tail_lsn(
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ xfs_lsn_t tail_lsn)
{
int blocks;
@@ -3385,10 +3434,11 @@ xlog_verify_tail_lsn(xlog_t *log,
* the cycle numbers agree with the current cycle number.
*/
STATIC void
-xlog_verify_iclog(xlog_t *log,
- xlog_in_core_t *iclog,
- int count,
- boolean_t syncing)
+xlog_verify_iclog(
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ int count,
+ boolean_t syncing)
{
xlog_op_header_t *ophead;
xlog_in_core_t *icptr;
@@ -3482,7 +3532,7 @@ xlog_verify_iclog(xlog_t *log,
*/
STATIC int
xlog_state_ioerror(
- xlog_t *log)
+ struct xlog *log)
{
xlog_in_core_t *iclog, *ic;
@@ -3527,7 +3577,7 @@ xfs_log_force_umount(
struct xfs_mount *mp,
int logerror)
{
- xlog_t *log;
+ struct xlog *log;
int retval;
log = mp->m_log;
@@ -3634,7 +3684,8 @@ xfs_log_force_umount(
}
STATIC int
-xlog_iclogs_empty(xlog_t *log)
+xlog_iclogs_empty(
+ struct xlog *log)
{
xlog_in_core_t *iclog;
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 72eba2201b14..18a801d76a42 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -487,7 +487,7 @@ struct xlog_grant_head {
* overflow 31 bits worth of byte offset, so using a byte number will mean
* that round off problems won't occur when releasing partial reservations.
*/
-typedef struct xlog {
+struct xlog {
/* The following fields don't need locking */
struct xfs_mount *l_mp; /* mount point */
struct xfs_ail *l_ailp; /* AIL log is working with */
@@ -540,7 +540,7 @@ typedef struct xlog {
char *l_iclog_bak[XLOG_MAX_ICLOGS];
#endif
-} xlog_t;
+};
#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE))
@@ -548,9 +548,17 @@ typedef struct xlog {
#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR)
/* common routines */
-extern int xlog_recover(xlog_t *log);
-extern int xlog_recover_finish(xlog_t *log);
-extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
+extern int
+xlog_recover(
+ struct xlog *log);
+extern int
+xlog_recover_finish(
+ struct xlog *log);
+extern void
+xlog_pack_data(
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ int);
extern kmem_zone_t *xfs_log_ticket_zone;
struct xlog_ticket *
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index a7be98abd6a9..5da3ace352bf 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -43,10 +43,18 @@
#include "xfs_utils.h"
#include "xfs_trace.h"
-STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
-STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
+STATIC int
+xlog_find_zeroed(
+ struct xlog *,
+ xfs_daddr_t *);
+STATIC int
+xlog_clear_stale_blocks(
+ struct xlog *,
+ xfs_lsn_t);
#if defined(DEBUG)
-STATIC void xlog_recover_check_summary(xlog_t *);
+STATIC void
+xlog_recover_check_summary(
+ struct xlog *);
#else
#define xlog_recover_check_summary(log)
#endif
@@ -74,7 +82,7 @@ struct xfs_buf_cancel {
static inline int
xlog_buf_bbcount_valid(
- xlog_t *log,
+ struct xlog *log,
int bbcount)
{
return bbcount > 0 && bbcount <= log->l_logBBsize;
@@ -87,7 +95,7 @@ xlog_buf_bbcount_valid(
*/
STATIC xfs_buf_t *
xlog_get_bp(
- xlog_t *log,
+ struct xlog *log,
int nbblks)
{
struct xfs_buf *bp;
@@ -138,10 +146,10 @@ xlog_put_bp(
*/
STATIC xfs_caddr_t
xlog_align(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t blk_no,
int nbblks,
- xfs_buf_t *bp)
+ struct xfs_buf *bp)
{
xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
@@ -155,10 +163,10 @@ xlog_align(
*/
STATIC int
xlog_bread_noalign(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t blk_no,
int nbblks,
- xfs_buf_t *bp)
+ struct xfs_buf *bp)
{
int error;
@@ -189,10 +197,10 @@ xlog_bread_noalign(
STATIC int
xlog_bread(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t blk_no,
int nbblks,
- xfs_buf_t *bp,
+ struct xfs_buf *bp,
xfs_caddr_t *offset)
{
int error;
@@ -211,10 +219,10 @@ xlog_bread(
*/
STATIC int
xlog_bread_offset(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t blk_no, /* block to read from */
int nbblks, /* blocks to read */
- xfs_buf_t *bp,
+ struct xfs_buf *bp,
xfs_caddr_t offset)
{
xfs_caddr_t orig_offset = bp->b_addr;
@@ -241,10 +249,10 @@ xlog_bread_offset(
*/
STATIC int
xlog_bwrite(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t blk_no,
int nbblks,
- xfs_buf_t *bp)
+ struct xfs_buf *bp)
{
int error;
@@ -378,8 +386,8 @@ xlog_recover_iodone(
*/
STATIC int
xlog_find_cycle_start(
- xlog_t *log,
- xfs_buf_t *bp,
+ struct xlog *log,
+ struct xfs_buf *bp,
xfs_daddr_t first_blk,
xfs_daddr_t *last_blk,
uint cycle)
@@ -421,7 +429,7 @@ xlog_find_cycle_start(
*/
STATIC int
xlog_find_verify_cycle(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t start_blk,
int nbblks,
uint stop_on_cycle_no,
@@ -490,7 +498,7 @@ out:
*/
STATIC int
xlog_find_verify_log_record(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t start_blk,
xfs_daddr_t *last_blk,
int extra_bblks)
@@ -600,7 +608,7 @@ out:
*/
STATIC int
xlog_find_head(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t *return_head_blk)
{
xfs_buf_t *bp;
@@ -871,7 +879,7 @@ validate_head:
*/
STATIC int
xlog_find_tail(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t *head_blk,
xfs_daddr_t *tail_blk)
{
@@ -1080,7 +1088,7 @@ done:
*/
STATIC int
xlog_find_zeroed(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t *blk_no)
{
xfs_buf_t *bp;
@@ -1183,7 +1191,7 @@ bp_err:
*/
STATIC void
xlog_add_record(
- xlog_t *log,
+ struct xlog *log,
xfs_caddr_t buf,
int cycle,
int block,
@@ -1205,7 +1213,7 @@ xlog_add_record(
STATIC int
xlog_write_log_records(
- xlog_t *log,
+ struct xlog *log,
int cycle,
int start_block,
int blocks,
@@ -1305,7 +1313,7 @@ xlog_write_log_records(
*/
STATIC int
xlog_clear_stale_blocks(
- xlog_t *log,
+ struct xlog *log,
xfs_lsn_t tail_lsn)
{
int tail_cycle, head_cycle;
@@ -2050,11 +2058,11 @@ xfs_qm_dqcheck(
*/
STATIC void
xlog_recover_do_dquot_buffer(
- xfs_mount_t *mp,
- xlog_t *log,
- xlog_recover_item_t *item,
- xfs_buf_t *bp,
- xfs_buf_log_format_t *buf_f)
+ struct xfs_mount *mp,
+ struct xlog *log,
+ struct xlog_recover_item *item,
+ struct xfs_buf *bp,
+ struct xfs_buf_log_format *buf_f)
{
uint type;
@@ -2108,9 +2116,9 @@ xlog_recover_do_dquot_buffer(
*/
STATIC int
xlog_recover_buffer_pass2(
- xlog_t *log,
- struct list_head *buffer_list,
- xlog_recover_item_t *item)
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item)
{
xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
xfs_mount_t *mp = log->l_mp;
@@ -2189,9 +2197,9 @@ xlog_recover_buffer_pass2(
STATIC int
xlog_recover_inode_pass2(
- xlog_t *log,
- struct list_head *buffer_list,
- xlog_recover_item_t *item)
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item)
{
xfs_inode_log_format_t *in_f;
xfs_mount_t *mp = log->l_mp;
@@ -2452,14 +2460,14 @@ error:
}
/*
- * Recover QUOTAOFF records. We simply make a note of it in the xlog_t
+ * Recover QUOTAOFF records. We simply make a note of it in the xlog
* structure, so that we know not to do any dquot item or dquot buffer recovery,
* of that type.
*/
STATIC int
xlog_recover_quotaoff_pass1(
- xlog_t *log,
- xlog_recover_item_t *item)
+ struct xlog *log,
+ struct xlog_recover_item *item)
{
xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
ASSERT(qoff_f);
@@ -2483,9 +2491,9 @@ xlog_recover_quotaoff_pass1(
*/
STATIC int
xlog_recover_dquot_pass2(
- xlog_t *log,
- struct list_head *buffer_list,
- xlog_recover_item_t *item)
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item)
{
xfs_mount_t *mp = log->l_mp;
xfs_buf_t *bp;
@@ -2578,9 +2586,9 @@ xlog_recover_dquot_pass2(
*/
STATIC int
xlog_recover_efi_pass2(
- xlog_t *log,
- xlog_recover_item_t *item,
- xfs_lsn_t lsn)
+ struct xlog *log,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
{
int error;
xfs_mount_t *mp = log->l_mp;
@@ -2616,8 +2624,8 @@ xlog_recover_efi_pass2(
*/
STATIC int
xlog_recover_efd_pass2(
- xlog_t *log,
- xlog_recover_item_t *item)
+ struct xlog *log,
+ struct xlog_recover_item *item)
{
xfs_efd_log_format_t *efd_formatp;
xfs_efi_log_item_t *efip = NULL;
@@ -2812,9 +2820,9 @@ xlog_recover_unmount_trans(
*/
STATIC int
xlog_recover_process_data(
- xlog_t *log,
+ struct xlog *log,
struct hlist_head rhash[],
- xlog_rec_header_t *rhead,
+ struct xlog_rec_header *rhead,
xfs_caddr_t dp,
int pass)
{
@@ -2986,7 +2994,7 @@ abort_error:
*/
STATIC int
xlog_recover_process_efis(
- xlog_t *log)
+ struct xlog *log)
{
xfs_log_item_t *lip;
xfs_efi_log_item_t *efip;
@@ -3098,7 +3106,7 @@ xlog_recover_process_one_iunlink(
/*
* Get the on disk inode to find the next inode in the bucket.
*/
- error = xfs_itobp(mp, NULL, ip, &dip, &ibp, 0);
+ error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
if (error)
goto fail_iput;
@@ -3147,7 +3155,7 @@ xlog_recover_process_one_iunlink(
*/
STATIC void
xlog_recover_process_iunlinks(
- xlog_t *log)
+ struct xlog *log)
{
xfs_mount_t *mp;
xfs_agnumber_t agno;
@@ -3209,9 +3217,9 @@ xlog_recover_process_iunlinks(
#ifdef DEBUG
STATIC void
xlog_pack_data_checksum(
- xlog_t *log,
- xlog_in_core_t *iclog,
- int size)
+ struct xlog *log,
+ struct xlog_in_core *iclog,
+ int size)
{
int i;
__be32 *up;
@@ -3234,8 +3242,8 @@ xlog_pack_data_checksum(
*/
void
xlog_pack_data(
- xlog_t *log,
- xlog_in_core_t *iclog,
+ struct xlog *log,
+ struct xlog_in_core *iclog,
int roundoff)
{
int i, j, k;
@@ -3274,9 +3282,9 @@ xlog_pack_data(
STATIC void
xlog_unpack_data(
- xlog_rec_header_t *rhead,
+ struct xlog_rec_header *rhead,
xfs_caddr_t dp,
- xlog_t *log)
+ struct xlog *log)
{
int i, j, k;
@@ -3299,8 +3307,8 @@ xlog_unpack_data(
STATIC int
xlog_valid_rec_header(
- xlog_t *log,
- xlog_rec_header_t *rhead,
+ struct xlog *log,
+ struct xlog_rec_header *rhead,
xfs_daddr_t blkno)
{
int hlen;
@@ -3343,7 +3351,7 @@ xlog_valid_rec_header(
*/
STATIC int
xlog_do_recovery_pass(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t head_blk,
xfs_daddr_t tail_blk,
int pass)
@@ -3595,7 +3603,7 @@ xlog_do_recovery_pass(
*/
STATIC int
xlog_do_log_recovery(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t head_blk,
xfs_daddr_t tail_blk)
{
@@ -3646,7 +3654,7 @@ xlog_do_log_recovery(
*/
STATIC int
xlog_do_recover(
- xlog_t *log,
+ struct xlog *log,
xfs_daddr_t head_blk,
xfs_daddr_t tail_blk)
{
@@ -3721,7 +3729,7 @@ xlog_do_recover(
*/
int
xlog_recover(
- xlog_t *log)
+ struct xlog *log)
{
xfs_daddr_t head_blk, tail_blk;
int error;
@@ -3767,7 +3775,7 @@ xlog_recover(
*/
int
xlog_recover_finish(
- xlog_t *log)
+ struct xlog *log)
{
/*
* Now we're ready to do the transactions needed for the
@@ -3814,7 +3822,7 @@ xlog_recover_finish(
*/
void
xlog_recover_check_summary(
- xlog_t *log)
+ struct xlog *log)
{
xfs_mount_t *mp;
xfs_agf_t *agfp;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 536021fb3d4e..29c2f83d4147 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1200,8 +1200,6 @@ xfs_mountfs(
xfs_set_maxicount(mp);
- mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog);
-
error = xfs_uuid_mount(mp);
if (error)
goto out;
@@ -1531,6 +1529,15 @@ xfs_unmountfs(
xfs_ail_push_all_sync(mp->m_ail);
xfs_wait_buftarg(mp->m_ddev_targp);
+ /*
+ * The superblock buffer is uncached and xfsaild_push() will lock and
+ * set the XBF_ASYNC flag on the buffer. We cannot do xfs_buf_iowait()
+ * here but a lock on the superblock buffer will block until iodone()
+ * has completed.
+ */
+ xfs_buf_lock(mp->m_sb_bp);
+ xfs_buf_unlock(mp->m_sb_bp);
+
xfs_log_unmount_write(mp);
xfs_log_unmount(mp);
xfs_uuid_unmount(mp);
@@ -1544,7 +1551,7 @@ xfs_unmountfs(
int
xfs_fs_writable(xfs_mount_t *mp)
{
- return !(xfs_test_for_freeze(mp) || XFS_FORCED_SHUTDOWN(mp) ||
+ return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) ||
(mp->m_flags & XFS_MOUNT_RDONLY));
}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 90c1fc9eaea4..05a05a7b6119 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -176,7 +176,6 @@ typedef struct xfs_mount {
uint m_qflags; /* quota status flags */
xfs_trans_reservations_t m_reservations;/* precomputed res values */
__uint64_t m_maxicount; /* maximum inode count */
- __uint64_t m_maxioffset; /* maximum inode offset */
__uint64_t m_resblks; /* total reserved blocks */
__uint64_t m_resblks_avail;/* available reserved blocks */
__uint64_t m_resblks_save; /* reserved blks @ remount,ro */
@@ -297,8 +296,6 @@ xfs_preferred_iosize(xfs_mount_t *mp)
PAGE_CACHE_SIZE));
}
-#define XFS_MAXIOFFSET(mp) ((mp)->m_maxioffset)
-
#define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \
((mp)->m_flags & XFS_MOUNT_WAS_CLEAN)
#define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN)
@@ -314,9 +311,6 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname,
#define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */
#define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */
-#define xfs_test_for_freeze(mp) ((mp)->m_super->s_frozen)
-#define xfs_wait_for_freeze(mp,l) vfs_check_frozen((mp)->m_super, (l))
-
/*
* Flags for xfs_mountfs
*/
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 249db1987764..2e86fa0cfc0d 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -940,7 +940,7 @@ xfs_qm_dqiterate(
map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
lblkno = 0;
- maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
+ maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
do {
nmaps = XFS_DQITER_MAP_SIZE;
/*
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 92d4331cd4f1..ca28a4ba4b54 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -857,7 +857,7 @@ xfs_rtbuf_get(
xfs_buf_t *bp; /* block buffer, result */
xfs_inode_t *ip; /* bitmap or summary inode */
xfs_bmbt_irec_t map;
- int nmap;
+ int nmap = 1;
int error; /* error value */
ip = issum ? mp->m_rsumip : mp->m_rbmip;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 0d9de41a7151..bdaf4cb9f4a2 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -868,67 +868,14 @@ xfs_fs_inode_init_once(
"xfsino", ip->i_ino);
}
-/*
- * This is called by the VFS when dirtying inode metadata. This can happen
- * for a few reasons, but we only care about timestamp updates, given that
- * we handled the rest ourselves. In theory no other calls should happen,
- * but for example generic_write_end() keeps dirtying the inode after
- * updating i_size. Thus we check that the flags are exactly I_DIRTY_SYNC,
- * and skip this call otherwise.
- *
- * We'll hopefull get a different method just for updating timestamps soon,
- * at which point this hack can go away, and maybe we'll also get real
- * error handling here.
- */
-STATIC void
-xfs_fs_dirty_inode(
- struct inode *inode,
- int flags)
-{
- struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_trans *tp;
- int error;
-
- if (flags != I_DIRTY_SYNC)
- return;
-
- trace_xfs_dirty_inode(ip);
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
- error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- goto trouble;
- }
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- /*
- * Grab all the latest timestamps from the Linux inode.
- */
- ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
- ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec;
- ip->i_d.di_ctime.t_sec = (__int32_t)inode->i_ctime.tv_sec;
- ip->i_d.di_ctime.t_nsec = (__int32_t)inode->i_ctime.tv_nsec;
- ip->i_d.di_mtime.t_sec = (__int32_t)inode->i_mtime.tv_sec;
- ip->i_d.di_mtime.t_nsec = (__int32_t)inode->i_mtime.tv_nsec;
-
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
- error = xfs_trans_commit(tp, 0);
- if (error)
- goto trouble;
- return;
-
-trouble:
- xfs_warn(mp, "failed to update timestamps for inode 0x%llx", ip->i_ino);
-}
-
STATIC void
xfs_fs_evict_inode(
struct inode *inode)
{
xfs_inode_t *ip = XFS_I(inode);
+ ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
+
trace_xfs_evict_inode(ip);
truncate_inode_pages(&inode->i_data, 0);
@@ -937,22 +884,6 @@ xfs_fs_evict_inode(
XFS_STATS_INC(vn_remove);
XFS_STATS_DEC(vn_active);
- /*
- * The iolock is used by the file system to coordinate reads,
- * writes, and block truncates. Up to this point the lock
- * protected concurrent accesses by users of the inode. But
- * from here forward we're doing some final processing of the
- * inode because we're done with it, and although we reuse the
- * iolock for protection it is really a distinct lock class
- * (in the lockdep sense) from before. To keep lockdep happy
- * (and basically indicate what we are doing), we explicitly
- * re-init the iolock here.
- */
- ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
- mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
- lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
- &xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
-
xfs_inactive(ip);
}
@@ -1436,7 +1367,6 @@ xfs_fs_free_cached_objects(
static const struct super_operations xfs_super_operations = {
.alloc_inode = xfs_fs_alloc_inode,
.destroy_inode = xfs_fs_destroy_inode,
- .dirty_inode = xfs_fs_dirty_inode,
.evict_inode = xfs_fs_evict_inode,
.drop_inode = xfs_fs_drop_inode,
.put_super = xfs_fs_put_super,
@@ -1491,13 +1421,9 @@ xfs_init_zones(void)
if (!xfs_da_state_zone)
goto out_destroy_btree_cur_zone;
- xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
- if (!xfs_dabuf_zone)
- goto out_destroy_da_state_zone;
-
xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
if (!xfs_ifork_zone)
- goto out_destroy_dabuf_zone;
+ goto out_destroy_da_state_zone;
xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
if (!xfs_trans_zone)
@@ -1514,9 +1440,8 @@ xfs_init_zones(void)
* size possible under XFS. This wastes a little bit of memory,
* but it is much faster.
*/
- xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
- (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
- NBWORD) * sizeof(int))), "xfs_buf_item");
+ xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item),
+ "xfs_buf_item");
if (!xfs_buf_item_zone)
goto out_destroy_log_item_desc_zone;
@@ -1561,8 +1486,6 @@ xfs_init_zones(void)
kmem_zone_destroy(xfs_trans_zone);
out_destroy_ifork_zone:
kmem_zone_destroy(xfs_ifork_zone);
- out_destroy_dabuf_zone:
- kmem_zone_destroy(xfs_dabuf_zone);
out_destroy_da_state_zone:
kmem_zone_destroy(xfs_da_state_zone);
out_destroy_btree_cur_zone:
@@ -1590,7 +1513,6 @@ xfs_destroy_zones(void)
kmem_zone_destroy(xfs_log_item_desc_zone);
kmem_zone_destroy(xfs_trans_zone);
kmem_zone_destroy(xfs_ifork_zone);
- kmem_zone_destroy(xfs_dabuf_zone);
kmem_zone_destroy(xfs_da_state_zone);
kmem_zone_destroy(xfs_btree_cur_zone);
kmem_zone_destroy(xfs_bmap_free_item_zone);
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index 1e9ee064dbb2..96548176db80 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -359,6 +359,15 @@ xfs_quiesce_attr(
* added an item to the AIL, thus flush it again.
*/
xfs_ail_push_all_sync(mp->m_ail);
+
+ /*
+ * The superblock buffer is uncached and xfsaild_push() will lock and
+ * set the XBF_ASYNC flag on the buffer. We cannot do xfs_buf_iowait()
+ * here but a lock on the superblock buffer will block until iodone()
+ * has completed.
+ */
+ xfs_buf_lock(mp->m_sb_bp);
+ xfs_buf_unlock(mp->m_sb_bp);
}
static void
@@ -394,7 +403,7 @@ xfs_sync_worker(
if (!(mp->m_super->s_flags & MS_ACTIVE) &&
!(mp->m_flags & XFS_MOUNT_RDONLY)) {
/* dgc: errors ignored here */
- if (mp->m_super->s_frozen == SB_UNFROZEN &&
+ if (mp->m_super->s_writers.frozen == SB_UNFROZEN &&
xfs_log_need_covered(mp))
error = xfs_fs_log_dummy(mp);
else
@@ -712,8 +721,8 @@ restart:
* Note that xfs_iflush will never block on the inode buffer lock, as
* xfs_ifree_cluster() can lock the inode buffer before it locks the
* ip->i_lock, and we are doing the exact opposite here. As a result,
- * doing a blocking xfs_itobp() to get the cluster buffer would result
- * in an ABBA deadlock with xfs_ifree_cluster().
+ * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
+ * result in an ABBA deadlock with xfs_ifree_cluster().
*
* As xfs_ifree_cluser() must gather all inodes that are active in the
* cache to mark them stale, if we hit this case we don't actually want
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index caf5dabfd553..e5795dd6013a 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -578,8 +578,8 @@ DEFINE_INODE_EVENT(xfs_ioctl_setattr);
DEFINE_INODE_EVENT(xfs_dir_fsync);
DEFINE_INODE_EVENT(xfs_file_fsync);
DEFINE_INODE_EVENT(xfs_destroy_inode);
-DEFINE_INODE_EVENT(xfs_dirty_inode);
DEFINE_INODE_EVENT(xfs_evict_inode);
+DEFINE_INODE_EVENT(xfs_update_time);
DEFINE_INODE_EVENT(xfs_dquot_dqalloc);
DEFINE_INODE_EVENT(xfs_dquot_dqdetach);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index fdf324508c5e..06ed520a767f 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -576,8 +576,12 @@ xfs_trans_alloc(
xfs_mount_t *mp,
uint type)
{
- xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
- return _xfs_trans_alloc(mp, type, KM_SLEEP);
+ xfs_trans_t *tp;
+
+ sb_start_intwrite(mp->m_super);
+ tp = _xfs_trans_alloc(mp, type, KM_SLEEP);
+ tp->t_flags |= XFS_TRANS_FREEZE_PROT;
+ return tp;
}
xfs_trans_t *
@@ -588,6 +592,7 @@ _xfs_trans_alloc(
{
xfs_trans_t *tp;
+ WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
atomic_inc(&mp->m_active_trans);
tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
@@ -611,6 +616,8 @@ xfs_trans_free(
xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
atomic_dec(&tp->t_mountp->m_active_trans);
+ if (tp->t_flags & XFS_TRANS_FREEZE_PROT)
+ sb_end_intwrite(tp->t_mountp->m_super);
xfs_trans_free_dqinfo(tp);
kmem_zone_free(xfs_trans_zone, tp);
}
@@ -643,7 +650,11 @@ xfs_trans_dup(
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
ASSERT(tp->t_ticket != NULL);
- ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE);
+ ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
+ (tp->t_flags & XFS_TRANS_RESERVE) |
+ (tp->t_flags & XFS_TRANS_FREEZE_PROT);
+ /* We gave our writer reference to the new transaction */
+ tp->t_flags &= ~XFS_TRANS_FREEZE_PROT;
ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
tp->t_blk_res = tp->t_blk_res_used;
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 7c37b533aa8e..db056544cbb5 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -179,6 +179,8 @@ struct xfs_log_item_desc {
#define XFS_TRANS_SYNC 0x08 /* make commit synchronous */
#define XFS_TRANS_DQ_DIRTY 0x10 /* at least one dquot in trx dirty */
#define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */
+#define XFS_TRANS_FREEZE_PROT 0x40 /* Transaction has elevated writer
+ count in superblock */
/*
* Values for call flags parameter.
@@ -448,11 +450,51 @@ xfs_trans_t *xfs_trans_dup(xfs_trans_t *);
int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
uint, uint);
void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
-struct xfs_buf *xfs_trans_get_buf(xfs_trans_t *, struct xfs_buftarg *, xfs_daddr_t,
- int, uint);
-int xfs_trans_read_buf(struct xfs_mount *, xfs_trans_t *,
- struct xfs_buftarg *, xfs_daddr_t, int, uint,
- struct xfs_buf **);
+
+struct xfs_buf *xfs_trans_get_buf_map(struct xfs_trans *tp,
+ struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps,
+ uint flags);
+
+static inline struct xfs_buf *
+xfs_trans_get_buf(
+ struct xfs_trans *tp,
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ int numblks,
+ uint flags)
+{
+ struct xfs_buf_map map = {
+ .bm_bn = blkno,
+ .bm_len = numblks,
+ };
+ return xfs_trans_get_buf_map(tp, target, &map, 1, flags);
+}
+
+int xfs_trans_read_buf_map(struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ struct xfs_buftarg *target,
+ struct xfs_buf_map *map, int nmaps,
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp);
+
+static inline int
+xfs_trans_read_buf(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ struct xfs_buftarg *target,
+ xfs_daddr_t blkno,
+ int numblks,
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
+{
+ struct xfs_buf_map map = {
+ .bm_bn = blkno,
+ .bm_len = numblks,
+ };
+ return xfs_trans_read_buf_map(mp, tp, target, &map, 1, flags, bpp);
+}
+
struct xfs_buf *xfs_trans_getsb(xfs_trans_t *, struct xfs_mount *, int);
void xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 9c514483e599..6011ee661339 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -383,6 +383,12 @@ xfsaild_push(
}
spin_lock(&ailp->xa_lock);
+
+ /* barrier matches the xa_target update in xfs_ail_push() */
+ smp_rmb();
+ target = ailp->xa_target;
+ ailp->xa_target_prev = target;
+
lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn);
if (!lip) {
/*
@@ -397,7 +403,6 @@ xfsaild_push(
XFS_STATS_INC(xs_push_ail);
lsn = lip->li_lsn;
- target = ailp->xa_target;
while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
int lock_result;
@@ -527,8 +532,32 @@ xfsaild(
__set_current_state(TASK_KILLABLE);
else
__set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(tout ?
- msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
+
+ spin_lock(&ailp->xa_lock);
+
+ /*
+ * Idle if the AIL is empty and we are not racing with a target
+ * update. We check the AIL after we set the task to a sleep
+ * state to guarantee that we either catch an xa_target update
+ * or that a wake_up resets the state to TASK_RUNNING.
+ * Otherwise, we run the risk of sleeping indefinitely.
+ *
+ * The barrier matches the xa_target update in xfs_ail_push().
+ */
+ smp_rmb();
+ if (!xfs_ail_min(ailp) &&
+ ailp->xa_target == ailp->xa_target_prev) {
+ spin_unlock(&ailp->xa_lock);
+ schedule();
+ tout = 0;
+ continue;
+ }
+ spin_unlock(&ailp->xa_lock);
+
+ if (tout)
+ schedule_timeout(msecs_to_jiffies(tout));
+
+ __set_current_state(TASK_RUNNING);
try_to_freeze();
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 21c5a5e3700d..6311b99c267f 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -41,20 +41,26 @@ STATIC struct xfs_buf *
xfs_trans_buf_item_match(
struct xfs_trans *tp,
struct xfs_buftarg *target,
- xfs_daddr_t blkno,
- int len)
+ struct xfs_buf_map *map,
+ int nmaps)
{
struct xfs_log_item_desc *lidp;
struct xfs_buf_log_item *blip;
+ int len = 0;
+ int i;
+
+ for (i = 0; i < nmaps; i++)
+ len += map[i].bm_len;
- len = BBTOB(len);
list_for_each_entry(lidp, &tp->t_items, lid_trans) {
blip = (struct xfs_buf_log_item *)lidp->lid_item;
if (blip->bli_item.li_type == XFS_LI_BUF &&
blip->bli_buf->b_target == target &&
- XFS_BUF_ADDR(blip->bli_buf) == blkno &&
- BBTOB(blip->bli_buf->b_length) == len)
+ XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn &&
+ blip->bli_buf->b_length == len) {
+ ASSERT(blip->bli_buf->b_map_count == nmaps);
return blip->bli_buf;
+ }
}
return NULL;
@@ -128,21 +134,19 @@ xfs_trans_bjoin(
* If the transaction pointer is NULL, make this just a normal
* get_buf() call.
*/
-xfs_buf_t *
-xfs_trans_get_buf(xfs_trans_t *tp,
- xfs_buftarg_t *target_dev,
- xfs_daddr_t blkno,
- int len,
- uint flags)
+struct xfs_buf *
+xfs_trans_get_buf_map(
+ struct xfs_trans *tp,
+ struct xfs_buftarg *target,
+ struct xfs_buf_map *map,
+ int nmaps,
+ xfs_buf_flags_t flags)
{
xfs_buf_t *bp;
xfs_buf_log_item_t *bip;
- /*
- * Default to a normal get_buf() call if the tp is NULL.
- */
- if (tp == NULL)
- return xfs_buf_get(target_dev, blkno, len, flags);
+ if (!tp)
+ return xfs_buf_get_map(target, map, nmaps, flags);
/*
* If we find the buffer in the cache with this transaction
@@ -150,7 +154,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
* have it locked. In this case we just increment the lock
* recursion count and return the buffer to the caller.
*/
- bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
+ bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
if (bp != NULL) {
ASSERT(xfs_buf_islocked(bp));
if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
@@ -167,7 +171,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
return (bp);
}
- bp = xfs_buf_get(target_dev, blkno, len, flags);
+ bp = xfs_buf_get_map(target, map, nmaps, flags);
if (bp == NULL) {
return NULL;
}
@@ -246,26 +250,22 @@ int xfs_error_mod = 33;
* read_buf() call.
*/
int
-xfs_trans_read_buf(
- xfs_mount_t *mp,
- xfs_trans_t *tp,
- xfs_buftarg_t *target,
- xfs_daddr_t blkno,
- int len,
- uint flags,
- xfs_buf_t **bpp)
+xfs_trans_read_buf_map(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ struct xfs_buftarg *target,
+ struct xfs_buf_map *map,
+ int nmaps,
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
{
xfs_buf_t *bp;
xfs_buf_log_item_t *bip;
int error;
*bpp = NULL;
-
- /*
- * Default to a normal get_buf() call if the tp is NULL.
- */
- if (tp == NULL) {
- bp = xfs_buf_read(target, blkno, len, flags);
+ if (!tp) {
+ bp = xfs_buf_read_map(target, map, nmaps, flags);
if (!bp)
return (flags & XBF_TRYLOCK) ?
EAGAIN : XFS_ERROR(ENOMEM);
@@ -303,7 +303,7 @@ xfs_trans_read_buf(
* If the buffer is not yet read in, then we read it in, increment
* the lock recursion count, and return it to the caller.
*/
- bp = xfs_trans_buf_item_match(tp, target, blkno, len);
+ bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
if (bp != NULL) {
ASSERT(xfs_buf_islocked(bp));
ASSERT(bp->b_transp == tp);
@@ -349,7 +349,7 @@ xfs_trans_read_buf(
return 0;
}
- bp = xfs_buf_read(target, blkno, len, flags);
+ bp = xfs_buf_read_map(target, map, nmaps, flags);
if (bp == NULL) {
*bpp = NULL;
return (flags & XBF_TRYLOCK) ?
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index fb62377d1cbc..53b7c9b0f8f7 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -67,6 +67,7 @@ struct xfs_ail {
struct task_struct *xa_task;
struct list_head xa_ail;
xfs_lsn_t xa_target;
+ xfs_lsn_t xa_target_prev;
struct list_head xa_cursors;
spinlock_t xa_lock;
xfs_lsn_t xa_last_pushed_lsn;
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h
index 398cf681d025..7a41874f4c20 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/xfs_types.h
@@ -133,6 +133,20 @@ typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */
#define MAXAEXTNUM ((xfs_aextnum_t)0x7fff) /* signed short */
/*
+ * Minimum and maximum blocksize and sectorsize.
+ * The blocksize upper limit is pretty much arbitrary.
+ * The sectorsize upper limit is due to sizeof(sb_sectsize).
+ */
+#define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */
+#define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */
+#define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG)
+#define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG)
+#define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */
+#define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */
+#define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG)
+#define XFS_MAX_SECTORSIZE (1 << XFS_MAX_SECTORSIZE_LOG)
+
+/*
* Min numbers of data/attr fork btree root pointers.
*/
#define MINDBTPTRS 3
diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c
index 4e5b9ad5cb97..0025c78ac03c 100644
--- a/fs/xfs/xfs_utils.c
+++ b/fs/xfs/xfs_utils.c
@@ -65,7 +65,6 @@ xfs_dir_ialloc(
xfs_trans_t *ntp;
xfs_inode_t *ip;
xfs_buf_t *ialloc_context = NULL;
- boolean_t call_again = B_FALSE;
int code;
uint log_res;
uint log_count;
@@ -91,7 +90,7 @@ xfs_dir_ialloc(
* the inode(s) that we've just allocated.
*/
code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc,
- &ialloc_context, &call_again, &ip);
+ &ialloc_context, &ip);
/*
* Return an error if we were unable to allocate a new inode.
@@ -102,19 +101,18 @@ xfs_dir_ialloc(
*ipp = NULL;
return code;
}
- if (!call_again && (ip == NULL)) {
+ if (!ialloc_context && !ip) {
*ipp = NULL;
return XFS_ERROR(ENOSPC);
}
/*
- * If call_again is set, then we were unable to get an
+ * If the AGI buffer is non-NULL, then we were unable to get an
* inode in one operation. We need to commit the current
* transaction and call xfs_ialloc() again. It is guaranteed
* to succeed the second time.
*/
- if (call_again) {
-
+ if (ialloc_context) {
/*
* Normally, xfs_trans_commit releases all the locks.
* We call bhold to hang on to the ialloc_context across
@@ -195,7 +193,7 @@ xfs_dir_ialloc(
* this call should always succeed.
*/
code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
- okalloc, &ialloc_context, &call_again, &ip);
+ okalloc, &ialloc_context, &ip);
/*
* If we get an error at this point, return to the caller
@@ -206,12 +204,11 @@ xfs_dir_ialloc(
*ipp = NULL;
return code;
}
- ASSERT ((!call_again) && (ip != NULL));
+ ASSERT(!ialloc_context && ip);
} else {
- if (committed != NULL) {
+ if (committed != NULL)
*committed = 0;
- }
}
*ipp = ip;
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index b6a82d817a82..2a5c637344b4 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -146,11 +146,6 @@ xfs_readlink(
}
/*
- * Flags for xfs_free_eofblocks
- */
-#define XFS_FREE_EOF_TRYLOCK (1<<0)
-
-/*
* This is called by xfs_inactive to free any blocks beyond eof
* when the link count isn't zero and by xfs_dm_punch_hole() when
* punching a hole to EOF.
@@ -159,7 +154,7 @@ STATIC int
xfs_free_eofblocks(
xfs_mount_t *mp,
xfs_inode_t *ip,
- int flags)
+ bool need_iolock)
{
xfs_trans_t *tp;
int error;
@@ -174,7 +169,7 @@ xfs_free_eofblocks(
* of the file. If not, then there is nothing to do.
*/
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
- last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
+ last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
if (last_fsb <= end_fsb)
return 0;
map_len = last_fsb - end_fsb;
@@ -201,13 +196,11 @@ xfs_free_eofblocks(
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
- if (flags & XFS_FREE_EOF_TRYLOCK) {
+ if (need_iolock) {
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
xfs_trans_cancel(tp, 0);
return 0;
}
- } else {
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
}
error = xfs_trans_reserve(tp, 0,
@@ -217,7 +210,8 @@ xfs_free_eofblocks(
if (error) {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
xfs_trans_cancel(tp, 0);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ if (need_iolock)
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return error;
}
@@ -244,7 +238,10 @@ xfs_free_eofblocks(
error = xfs_trans_commit(tp,
XFS_TRANS_RELEASE_LOG_RES);
}
- xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL);
+
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ if (need_iolock)
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
}
return error;
}
@@ -282,23 +279,15 @@ xfs_inactive_symlink_rmt(
* free them all in one bunmapi call.
*/
ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);
- if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
- XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) {
- ASSERT(XFS_FORCED_SHUTDOWN(mp));
- xfs_trans_cancel(tp, 0);
- *tpp = NULL;
- return error;
- }
+
/*
* Lock the inode, fix the size, and join it to the transaction.
* Hold it so in the normal path, we still have it locked for
* the second transaction. In the error paths we need it
* held so the cancel won't rele it, see below.
*/
- xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
size = (int)ip->i_d.di_size;
ip->i_d.di_size = 0;
- xfs_trans_ijoin(tp, ip, 0);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
/*
* Find the block(s) so we can inval and unmap them.
@@ -385,114 +374,14 @@ xfs_inactive_symlink_rmt(
ASSERT(XFS_FORCED_SHUTDOWN(mp));
goto error0;
}
- /*
- * Return with the inode locked but not joined to the transaction.
- */
+
+ xfs_trans_ijoin(tp, ip, 0);
*tpp = tp;
return 0;
error1:
xfs_bmap_cancel(&free_list);
error0:
- /*
- * Have to come here with the inode locked and either
- * (held and in the transaction) or (not in the transaction).
- * If the inode isn't held then cancel would iput it, but
- * that's wrong since this is inactive and the vnode ref
- * count is 0 already.
- * Cancel won't do anything to the inode if held, but it still
- * needs to be locked until the cancel is done, if it was
- * joined to the transaction.
- */
- xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
- *tpp = NULL;
- return error;
-
-}
-
-STATIC int
-xfs_inactive_symlink_local(
- xfs_inode_t *ip,
- xfs_trans_t **tpp)
-{
- int error;
-
- ASSERT(ip->i_d.di_size <= XFS_IFORK_DSIZE(ip));
- /*
- * We're freeing a symlink which fit into
- * the inode. Just free the memory used
- * to hold the old symlink.
- */
- error = xfs_trans_reserve(*tpp, 0,
- XFS_ITRUNCATE_LOG_RES(ip->i_mount),
- 0, XFS_TRANS_PERM_LOG_RES,
- XFS_ITRUNCATE_LOG_COUNT);
-
- if (error) {
- xfs_trans_cancel(*tpp, 0);
- *tpp = NULL;
- return error;
- }
- xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
-
- /*
- * Zero length symlinks _can_ exist.
- */
- if (ip->i_df.if_bytes > 0) {
- xfs_idata_realloc(ip,
- -(ip->i_df.if_bytes),
- XFS_DATA_FORK);
- ASSERT(ip->i_df.if_bytes == 0);
- }
- return 0;
-}
-
-STATIC int
-xfs_inactive_attrs(
- xfs_inode_t *ip,
- xfs_trans_t **tpp)
-{
- xfs_trans_t *tp;
- int error;
- xfs_mount_t *mp;
-
- ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
- tp = *tpp;
- mp = ip->i_mount;
- ASSERT(ip->i_d.di_forkoff != 0);
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- if (error)
- goto error_unlock;
-
- error = xfs_attr_inactive(ip);
- if (error)
- goto error_unlock;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
- error = xfs_trans_reserve(tp, 0,
- XFS_IFREE_LOG_RES(mp),
- 0, XFS_TRANS_PERM_LOG_RES,
- XFS_INACTIVE_LOG_COUNT);
- if (error)
- goto error_cancel;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, 0);
- xfs_idestroy_fork(ip, XFS_ATTR_FORK);
-
- ASSERT(ip->i_d.di_anextents == 0);
-
- *tpp = tp;
- return 0;
-
-error_cancel:
- ASSERT(XFS_FORCED_SHUTDOWN(mp));
- xfs_trans_cancel(tp, 0);
-error_unlock:
- *tpp = NULL;
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return error;
}
@@ -574,8 +463,7 @@ xfs_release(
if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
return 0;
- error = xfs_free_eofblocks(mp, ip,
- XFS_FREE_EOF_TRYLOCK);
+ error = xfs_free_eofblocks(mp, ip, true);
if (error)
return error;
@@ -604,7 +492,7 @@ xfs_inactive(
xfs_trans_t *tp;
xfs_mount_t *mp;
int error;
- int truncate;
+ int truncate = 0;
/*
* If the inode is already free, then there can be nothing
@@ -616,17 +504,6 @@ xfs_inactive(
return VN_INACTIVE_CACHE;
}
- /*
- * Only do a truncate if it's a regular file with
- * some actual space in it. It's OK to look at the
- * inode's fields without the lock because we're the
- * only one with a reference to the inode.
- */
- truncate = ((ip->i_d.di_nlink == 0) &&
- ((ip->i_d.di_size != 0) || XFS_ISIZE(ip) != 0 ||
- (ip->i_d.di_nextents > 0) || (ip->i_delayed_blks > 0)) &&
- S_ISREG(ip->i_d.di_mode));
-
mp = ip->i_mount;
error = 0;
@@ -643,99 +520,100 @@ xfs_inactive(
(!(ip->i_d.di_flags &
(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
ip->i_delayed_blks != 0))) {
- error = xfs_free_eofblocks(mp, ip, 0);
+ error = xfs_free_eofblocks(mp, ip, false);
if (error)
return VN_INACTIVE_CACHE;
}
goto out;
}
- ASSERT(ip->i_d.di_nlink == 0);
+ if (S_ISREG(ip->i_d.di_mode) &&
+ (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
+ ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
+ truncate = 1;
error = xfs_qm_dqattach(ip, 0);
if (error)
return VN_INACTIVE_CACHE;
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
- if (truncate) {
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
-
- error = xfs_trans_reserve(tp, 0,
- XFS_ITRUNCATE_LOG_RES(mp),
- 0, XFS_TRANS_PERM_LOG_RES,
- XFS_ITRUNCATE_LOG_COUNT);
- if (error) {
- /* Don't call itruncate_cleanup */
- ASSERT(XFS_FORCED_SHUTDOWN(mp));
- xfs_trans_cancel(tp, 0);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return VN_INACTIVE_CACHE;
- }
+ error = xfs_trans_reserve(tp, 0,
+ (truncate || S_ISLNK(ip->i_d.di_mode)) ?
+ XFS_ITRUNCATE_LOG_RES(mp) :
+ XFS_IFREE_LOG_RES(mp),
+ 0,
+ XFS_TRANS_PERM_LOG_RES,
+ XFS_ITRUNCATE_LOG_COUNT);
+ if (error) {
+ ASSERT(XFS_FORCED_SHUTDOWN(mp));
+ xfs_trans_cancel(tp, 0);
+ return VN_INACTIVE_CACHE;
+ }
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, 0);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
+ if (S_ISLNK(ip->i_d.di_mode)) {
+ /*
+ * Zero length symlinks _can_ exist.
+ */
+ if (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) {
+ error = xfs_inactive_symlink_rmt(ip, &tp);
+ if (error)
+ goto out_cancel;
+ } else if (ip->i_df.if_bytes > 0) {
+ xfs_idata_realloc(ip, -(ip->i_df.if_bytes),
+ XFS_DATA_FORK);
+ ASSERT(ip->i_df.if_bytes == 0);
+ }
+ } else if (truncate) {
ip->i_d.di_size = 0;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
- if (error) {
- xfs_trans_cancel(tp,
- XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
- return VN_INACTIVE_CACHE;
- }
+ if (error)
+ goto out_cancel;
ASSERT(ip->i_d.di_nextents == 0);
- } else if (S_ISLNK(ip->i_d.di_mode)) {
+ }
- /*
- * If we get an error while cleaning up a
- * symlink we bail out.
- */
- error = (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) ?
- xfs_inactive_symlink_rmt(ip, &tp) :
- xfs_inactive_symlink_local(ip, &tp);
+ /*
+ * If there are attributes associated with the file then blow them away
+ * now. The code calls a routine that recursively deconstructs the
+ * attribute fork. We need to just commit the current transaction
+ * because we can't use it for xfs_attr_inactive().
+ */
+ if (ip->i_d.di_anextents > 0) {
+ ASSERT(ip->i_d.di_forkoff != 0);
- if (error) {
- ASSERT(tp == NULL);
- return VN_INACTIVE_CACHE;
- }
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ if (error)
+ goto out_unlock;
- xfs_trans_ijoin(tp, ip, 0);
- } else {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+ error = xfs_attr_inactive(ip);
+ if (error)
+ goto out;
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
error = xfs_trans_reserve(tp, 0,
XFS_IFREE_LOG_RES(mp),
0, XFS_TRANS_PERM_LOG_RES,
XFS_INACTIVE_LOG_COUNT);
if (error) {
- ASSERT(XFS_FORCED_SHUTDOWN(mp));
xfs_trans_cancel(tp, 0);
- return VN_INACTIVE_CACHE;
+ goto out;
}
- xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
}
- /*
- * If there are attributes associated with the file
- * then blow them away now. The code calls a routine
- * that recursively deconstructs the attribute fork.
- * We need to just commit the current transaction
- * because we can't use it for xfs_attr_inactive().
- */
- if (ip->i_d.di_anextents > 0) {
- error = xfs_inactive_attrs(ip, &tp);
- /*
- * If we got an error, the transaction is already
- * cancelled, and the inode is unlocked. Just get out.
- */
- if (error)
- return VN_INACTIVE_CACHE;
- } else if (ip->i_afp) {
+ if (ip->i_afp)
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
- }
+
+ ASSERT(ip->i_d.di_anextents == 0);
/*
* Free the inode.
@@ -779,10 +657,13 @@ xfs_inactive(
* Release the dquots held by inode, if any.
*/
xfs_qm_dqdetach(ip);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
-
- out:
+out_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+out:
return VN_INACTIVE_CACHE;
+out_cancel:
+ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+ goto out_unlock;
}
/*
@@ -2262,10 +2143,10 @@ xfs_change_file_space(
llen = bf->l_len > 0 ? bf->l_len - 1 : bf->l_len;
- if ( (bf->l_start < 0)
- || (bf->l_start > XFS_MAXIOFFSET(mp))
- || (bf->l_start + llen < 0)
- || (bf->l_start + llen > XFS_MAXIOFFSET(mp)))
+ if (bf->l_start < 0 ||
+ bf->l_start > mp->m_super->s_maxbytes ||
+ bf->l_start + llen < 0 ||
+ bf->l_start + llen > mp->m_super->s_maxbytes)
return XFS_ERROR(EINVAL);
bf->l_whence = 0;
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 92d6e1d701ff..19503449814f 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -52,6 +52,7 @@
#define AE_CODE_ACPI_TABLES 0x2000
#define AE_CODE_AML 0x3000
#define AE_CODE_CONTROL 0x4000
+#define AE_CODE_MAX 0x4000
#define AE_CODE_MASK 0xF000
#define ACPI_SUCCESS(a) (!(a))
@@ -181,7 +182,7 @@
/* Exception strings for acpi_format_exception */
-#ifdef DEFINE_ACPI_GLOBALS
+#ifdef ACPI_DEFINE_EXCEPTION_TABLE
/*
* String versions of the exception codes above
@@ -295,6 +296,6 @@ char const *acpi_gbl_exception_names_ctrl[] = {
"AE_CTRL_PARSE_PENDING"
};
-#endif /* ACPI GLOBALS */
+#endif /* EXCEPTION_TABLE */
#endif /* __ACEXCEP_H__ */
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index b177f97f53b6..d988ac54f41e 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index d7bd661bfae7..2457ac849655 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -213,6 +213,8 @@
#define ACPI_WARNING(plist) acpi_warning plist
#define ACPI_EXCEPTION(plist) acpi_exception plist
#define ACPI_ERROR(plist) acpi_error plist
+#define ACPI_BIOS_WARNING(plist) acpi_bios_warning plist
+#define ACPI_BIOS_ERROR(plist) acpi_bios_error plist
#define ACPI_DEBUG_OBJECT(obj,l,i) acpi_ex_do_debug_object(obj,l,i)
#else
@@ -223,6 +225,8 @@
#define ACPI_WARNING(plist)
#define ACPI_EXCEPTION(plist)
#define ACPI_ERROR(plist)
+#define ACPI_BIOS_WARNING(plist)
+#define ACPI_BIOS_ERROR(plist)
#define ACPI_DEBUG_OBJECT(obj,l,i)
#endif /* ACPI_NO_ERROR_MESSAGES */
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index de39915f6b7f..c433d5e27679 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 01e2925523ea..bde976ee068d 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -50,6 +50,9 @@ acpi_evaluate_reference(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list *arguments,
struct acpi_handle_list *list);
+acpi_status
+acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
+ u32 status_code, struct acpi_buffer *status_buf);
struct acpi_pld {
unsigned int revision:7; /* 0 */
@@ -174,7 +177,8 @@ struct acpi_device_flags {
u32 suprise_removal_ok:1;
u32 power_manageable:1;
u32 performance_manageable:1;
- u32 reserved:24;
+ u32 eject_pending:1;
+ u32 reserved:23;
};
/* File System */
@@ -326,6 +330,11 @@ struct acpi_bus_event {
u32 data;
};
+struct acpi_eject_event {
+ acpi_handle handle;
+ u32 event;
+};
+
extern struct kobject *acpi_kobj;
extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
void acpi_bus_private_data_handler(acpi_handle, void *);
@@ -363,6 +372,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver);
void acpi_bus_unregister_driver(struct acpi_driver *driver);
int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent,
acpi_handle handle, int type);
+void acpi_bus_hot_remove_device(void *context);
int acpi_bus_trim(struct acpi_device *start, int rmdevice);
int acpi_bus_start(struct acpi_device *device);
acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd);
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 21a5548c6686..0650f5fa7ce9 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -8,7 +8,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -205,7 +205,7 @@ acpi_os_execute(acpi_execute_type type,
acpi_status
acpi_os_hotplug_execute(acpi_osd_exec_callback function, void *context);
-void acpi_os_wait_events_complete(void *context);
+void acpi_os_wait_events_complete(void);
void acpi_os_sleep(u64 milliseconds);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 982110134672..26a92fc28a59 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20120320
+#define ACPI_CA_VERSION 0x20120711
#include "acconfig.h"
#include "actypes.h"
@@ -154,15 +154,20 @@ void *acpi_callocate(u32 size);
void acpi_free(void *address);
/*
- * ACPI table manipulation interfaces
+ * ACPI table load/unload interfaces
*/
-acpi_status acpi_reallocate_root_table(void);
+acpi_status acpi_load_table(struct acpi_table_header *table);
-acpi_status acpi_find_root_pointer(acpi_size *rsdp_address);
+acpi_status acpi_unload_parent_table(acpi_handle object);
acpi_status acpi_load_tables(void);
-acpi_status acpi_load_table(struct acpi_table_header *table_ptr);
+/*
+ * ACPI table manipulation interfaces
+ */
+acpi_status acpi_reallocate_root_table(void);
+
+acpi_status acpi_find_root_pointer(acpi_size *rsdp_address);
acpi_status acpi_unload_table_id(acpi_owner_id id);
@@ -486,11 +491,11 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b);
acpi_status acpi_enter_sleep_state_prep(u8 sleep_state);
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags);
+acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state);
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void))
-acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags);
+acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
acpi_status acpi_leave_sleep_state(u8 sleep_state);
@@ -529,6 +534,14 @@ void ACPI_INTERNAL_VAR_XFACE
acpi_info(const char *module_name,
u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+void ACPI_INTERNAL_VAR_XFACE
+acpi_bios_error(const char *module_name,
+ u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+
+void ACPI_INTERNAL_VAR_XFACE
+acpi_bios_warning(const char *module_name,
+ u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
+
/*
* Debug output
*/
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index 3506e39a66b1..40349ae65464 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
* Definitions for Resource Attributes
*/
typedef u16 acpi_rs_length; /* Resource Length field is fixed at 16 bits */
-typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (64_k-1)+3 */
+typedef u32 acpi_rsdesc_size; /* Max Resource Descriptor size is (Length+3) = (64K-1)+3 */
/*
* Memory Attributes
@@ -332,7 +332,7 @@ struct acpi_resource_address64 {
};
struct acpi_resource_extended_address64 {
- ACPI_RESOURCE_ADDRESS_COMMON u8 revision_iD;
+ ACPI_RESOURCE_ADDRESS_COMMON u8 revision_ID;
u64 granularity;
u64 minimum;
u64 maximum;
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 8dea54665dcf..59a73e1b2845 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -212,7 +212,7 @@ struct acpi_table_fadt {
u32 smi_command; /* 32-bit Port address of SMI command port */
u8 acpi_enable; /* Value to write to smi_cmd to enable ACPI */
u8 acpi_disable; /* Value to write to smi_cmd to disable ACPI */
- u8 S4bios_request; /* Value to write to SMI CMD to enter S4BIOS state */
+ u8 s4_bios_request; /* Value to write to SMI CMD to enter S4BIOS state */
u8 pstate_control; /* Processor performance state control */
u32 pm1a_event_block; /* 32-bit Port address of Power Mgt 1a Event Reg Blk */
u32 pm1b_event_block; /* 32-bit Port address of Power Mgt 1b Event Reg Blk */
@@ -230,8 +230,8 @@ struct acpi_table_fadt {
u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */
u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */
u8 cst_control; /* Support for the _CST object and C States change notification */
- u16 C2latency; /* Worst case HW latency to enter/exit C2 state */
- u16 C3latency; /* Worst case HW latency to enter/exit C3 state */
+ u16 c2_latency; /* Worst case HW latency to enter/exit C2 state */
+ u16 c3_latency; /* Worst case HW latency to enter/exit C3 state */
u16 flush_size; /* Processor's memory cache line width, in bytes */
u16 flush_stride; /* Number of flush strides that need to be read */
u8 duty_offset; /* Processor duty cycle index in processor's P_CNT reg */
@@ -291,7 +291,7 @@ struct acpi_table_fadt {
#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: [V4] Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: [V4] System is compatible with remote power on (ACPI 3.0) */
#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */
-#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */
+#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local xAPICs must use physical dest mode (ACPI 3.0) */
#define ACPI_FADT_HW_REDUCED (1<<20) /* 20: [V5] ACPI hardware is not implemented (ACPI 5.0) */
#define ACPI_FADT_LOW_POWER_S0 (1<<21) /* 21: [V5] S0 power savings are equal or better than S3 (ACPI 5.0) */
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 71e747beac8f..300d14e7c5d5 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -676,7 +676,7 @@ struct acpi_madt_local_apic {
struct acpi_madt_io_apic {
struct acpi_subtable_header header;
u8 id; /* I/O APIC ID */
- u8 reserved; /* Reserved - must be zero */
+ u8 reserved; /* reserved - must be zero */
u32 address; /* APIC physical address */
u32 global_irq_base; /* Global system interrupt where INTI lines start */
};
@@ -794,11 +794,11 @@ struct acpi_madt_generic_interrupt {
struct acpi_madt_generic_distributor {
struct acpi_subtable_header header;
- u16 reserved; /* Reserved - must be zero */
+ u16 reserved; /* reserved - must be zero */
u32 gic_id;
u64 base_address;
u32 global_irq_base;
- u32 reserved2; /* Reserved - must be zero */
+ u32 reserved2; /* reserved - must be zero */
};
/*
@@ -841,7 +841,7 @@ struct acpi_table_msct {
u64 max_address; /* Max physical address in system */
};
-/* Subtable - Maximum Proximity Domain Information. Version 1 */
+/* subtable - Maximum Proximity Domain Information. Version 1 */
struct acpi_msct_proximity {
u8 revision;
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 58bdd0545c5a..d9ceb3d31629 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,7 +66,7 @@
#define ACPI_SIG_DBGP "DBGP" /* Debug Port table */
#define ACPI_SIG_DMAR "DMAR" /* DMA Remapping table */
#define ACPI_SIG_HPET "HPET" /* High Precision Event Timer table */
-#define ACPI_SIG_IBFT "IBFT" /* i_sCSI Boot Firmware Table */
+#define ACPI_SIG_IBFT "IBFT" /* iSCSI Boot Firmware Table */
#define ACPI_SIG_IVRS "IVRS" /* I/O Virtualization Reporting Structure */
#define ACPI_SIG_MCFG "MCFG" /* PCI Memory Mapped Configuration table */
#define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */
@@ -334,8 +334,8 @@ struct acpi_dmar_reserved_memory {
struct acpi_dmar_header header;
u16 reserved;
u16 segment;
- u64 base_address; /* 4_k aligned base address */
- u64 end_address; /* 4_k aligned limit address */
+ u64 base_address; /* 4K aligned base address */
+ u64 end_address; /* 4K aligned limit address */
};
/* Masks for Flags field above */
@@ -565,7 +565,7 @@ struct acpi_ivrs_hardware {
/* Masks for Info field above */
#define ACPI_IVHD_MSI_NUMBER_MASK 0x001F /* 5 bits, MSI message number */
-#define ACPI_IVHD_UNIT_ID_MASK 0x1F00 /* 5 bits, unit_iD */
+#define ACPI_IVHD_UNIT_ID_MASK 0x1F00 /* 5 bits, unit_ID */
/*
* Device Entries for IVHD subtable, appear after struct acpi_ivrs_hardware structure.
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index c22ce80e9535..f65a0ed869eb 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index e8bcc4742e0e..3d00bd5bd7e3 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -173,7 +173,7 @@ typedef u64 acpi_physical_address;
* to indicate that special precautions must be taken to avoid alignment faults.
* (IA64 or ia64 is currently used by existing compilers to indicate IPF.)
*
- * Note: Em64_t and other X86-64 processors support misaligned transfers,
+ * Note: EM64T and other X86-64 processors support misaligned transfers,
* so there is no need to define this flag.
*/
#if defined (__IA64__) || defined (__ia64__)
@@ -636,7 +636,7 @@ typedef u32 acpi_event_type;
#define ACPI_NUM_FIXED_EVENTS ACPI_EVENT_MAX + 1
/*
- * Event Status - Per event
+ * Event status - Per event
* -------------
* The encoding of acpi_event_status is illustrated below.
* Note that a set bit (1) indicates the property is TRUE
@@ -706,10 +706,14 @@ typedef u32 acpi_event_status;
#define ACPI_DEVICE_NOTIFY 0x2
#define ACPI_ALL_NOTIFY (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY)
#define ACPI_MAX_NOTIFY_HANDLER_TYPE 0x3
+#define ACPI_NUM_NOTIFY_TYPES 2
#define ACPI_MAX_SYS_NOTIFY 0x7F
#define ACPI_MAX_DEVICE_SPECIFIC_NOTIFY 0xBF
+#define ACPI_SYSTEM_HANDLER_LIST 0 /* Used as index, must be SYSTEM_NOTIFY -1 */
+#define ACPI_DEVICE_HANDLER_LIST 1 /* Used as index, must be DEVICE_NOTIFY -1 */
+
/* Address Space (Operation Region) Types */
typedef u8 acpi_adr_space_type;
@@ -724,8 +728,9 @@ typedef u8 acpi_adr_space_type;
#define ACPI_ADR_SPACE_IPMI (acpi_adr_space_type) 7
#define ACPI_ADR_SPACE_GPIO (acpi_adr_space_type) 8
#define ACPI_ADR_SPACE_GSBUS (acpi_adr_space_type) 9
+#define ACPI_ADR_SPACE_PLATFORM_COMM (acpi_adr_space_type) 10
-#define ACPI_NUM_PREDEFINED_REGIONS 10
+#define ACPI_NUM_PREDEFINED_REGIONS 11
/*
* Special Address Spaces
@@ -798,7 +803,7 @@ typedef u8 acpi_adr_space_type;
/* Sleep function dispatch */
-typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state, u8 flags);
+typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state);
struct acpi_sleep_functions {
ACPI_SLEEP_FUNCTION legacy_function;
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 5af3ed52ef98..560a9f272f34 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index e228893591a9..72553b0c9f33 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 6fbc4cab5834..7509be30ca01 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
index abfb2682de7f..2be8a2dbc868 100644
--- a/include/asm-generic/dma-coherent.h
+++ b/include/asm-generic/dma-coherent.h
@@ -29,6 +29,7 @@ dma_mark_declared_memory_occupied(struct device *dev,
#else
#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
#define dma_release_from_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
#endif
#endif
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 2e248d8924dc..de8bf89940f8 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -176,4 +176,59 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+/**
+ * dma_mmap_attrs - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
+ * @handle: device-view address returned from dma_alloc_attrs
+ * @size: size of memory originally requested in dma_alloc_attrs
+ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
+ * into user space. The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+static inline int
+dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops->mmap)
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
+
+int
+dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+static inline int
+dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops->get_sgtable)
+ return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
+ attrs);
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
+}
+
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+
#endif
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index 9e5b0356e2bb..a48937d4a5ea 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -120,6 +120,10 @@
#define F_GETOWN_EX 16
#endif
+#ifndef F_GETOWNER_UIDS
+#define F_GETOWNER_UIDS 17
+#endif
+
#define F_OWNER_TID 0
#define F_OWNER_PID 1
#define F_OWNER_PGRP 2
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index 0232ccb76f2b..90f99c74dd38 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -2,39 +2,9 @@
#define _ASM_GENERIC_KMAP_TYPES_H
#ifdef __WITH_KM_FENCE
-# define KMAP_D(n) __KM_FENCE_##n ,
+# define KM_TYPE_NR 41
#else
-# define KMAP_D(n)
+# define KM_TYPE_NR 20
#endif
-enum km_type {
-KMAP_D(0) KM_BOUNCE_READ,
-KMAP_D(1) KM_SKB_SUNRPC_DATA,
-KMAP_D(2) KM_SKB_DATA_SOFTIRQ,
-KMAP_D(3) KM_USER0,
-KMAP_D(4) KM_USER1,
-KMAP_D(5) KM_BIO_SRC_IRQ,
-KMAP_D(6) KM_BIO_DST_IRQ,
-KMAP_D(7) KM_PTE0,
-KMAP_D(8) KM_PTE1,
-KMAP_D(9) KM_IRQ0,
-KMAP_D(10) KM_IRQ1,
-KMAP_D(11) KM_SOFTIRQ0,
-KMAP_D(12) KM_SOFTIRQ1,
-KMAP_D(13) KM_SYNC_ICACHE,
-KMAP_D(14) KM_SYNC_DCACHE,
-/* UML specific, for copy_*_user - used in do_op_one_page */
-KMAP_D(15) KM_UML_USERCOPY,
-KMAP_D(16) KM_IRQ_PTE,
-KMAP_D(17) KM_NMI,
-KMAP_D(18) KM_NMI_PTE,
-KMAP_D(19) KM_KDB,
-/*
- * Remember to update debug_kmap_atomic() when adding new kmap types!
- */
-KMAP_D(20) KM_TYPE_NR
-};
-
-#undef KMAP_D
-
#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 580a6d35c700..c04e0db8a2d6 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -26,7 +26,13 @@ static inline void
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
- fail_fn(count);
+ /*
+ * We failed to acquire the lock, so mark it contended
+ * to ensure that any waiting tasks are woken up by the
+ * unlock slow path.
+ */
+ if (likely(atomic_xchg(count, -1) != 1))
+ fail_fn(count);
}
/**
@@ -43,7 +49,8 @@ static inline int
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
- return fail_fn(count);
+ if (likely(atomic_xchg(count, -1) != 1))
+ return fail_fn(count);
return 0;
}
diff --git a/include/asm-generic/sizes.h b/include/asm-generic/sizes.h
index ea5d4ef81061..1dcfad9629ef 100644
--- a/include/asm-generic/sizes.h
+++ b/include/asm-generic/sizes.h
@@ -1,47 +1,2 @@
-/*
- * linux/include/asm-generic/sizes.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_GENERIC_SIZES_H__
-#define __ASM_GENERIC_SIZES_H__
-
-#define SZ_1 0x00000001
-#define SZ_2 0x00000002
-#define SZ_4 0x00000004
-#define SZ_8 0x00000008
-#define SZ_16 0x00000010
-#define SZ_32 0x00000020
-#define SZ_64 0x00000040
-#define SZ_128 0x00000080
-#define SZ_256 0x00000100
-#define SZ_512 0x00000200
-
-#define SZ_1K 0x00000400
-#define SZ_2K 0x00000800
-#define SZ_4K 0x00001000
-#define SZ_8K 0x00002000
-#define SZ_16K 0x00004000
-#define SZ_32K 0x00008000
-#define SZ_64K 0x00010000
-#define SZ_128K 0x00020000
-#define SZ_256K 0x00040000
-#define SZ_512K 0x00080000
-
-#define SZ_1M 0x00100000
-#define SZ_2M 0x00200000
-#define SZ_4M 0x00400000
-#define SZ_8M 0x00800000
-#define SZ_16M 0x01000000
-#define SZ_32M 0x02000000
-#define SZ_64M 0x04000000
-#define SZ_128M 0x08000000
-#define SZ_256M 0x10000000
-#define SZ_512M 0x20000000
-
-#define SZ_1G 0x40000000
-#define SZ_2G 0x80000000
-
-#endif /* __ASM_GENERIC_SIZES_H__ */
+/* This is a placeholder, to be removed over time */
+#include <linux/sizes.h>
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 31ad880ca2ef..d6b67bb9075f 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -348,7 +348,6 @@ struct drm_buf {
struct drm_buf *next; /**< Kernel-only: used for free list */
__volatile__ int waiting; /**< On kernel DMA queue */
__volatile__ int pending; /**< On hardware DMA queue */
- wait_queue_head_t dma_wait; /**< Processes waiting */
struct drm_file *file_priv; /**< Private of holding file descr */
int context; /**< Kernel queue for this buffer */
int while_locked; /**< Dispatch this buffer while locked */
@@ -876,12 +875,6 @@ struct drm_driver {
void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev);
void (*irq_uninstall) (struct drm_device *dev);
- void (*reclaim_buffers) (struct drm_device *dev,
- struct drm_file * file_priv);
- void (*reclaim_buffers_locked) (struct drm_device *dev,
- struct drm_file *file_priv);
- void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
- struct drm_file *file_priv);
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
@@ -1108,12 +1101,8 @@ struct drm_device {
/*@} */
- /** \name DMA queues (contexts) */
+ /** \name DMA support */
/*@{ */
- int queue_count; /**< Number of active DMA queues */
- int queue_reserved; /**< Number of reserved DMA queues */
- int queue_slots; /**< Actual length of queuelist */
- struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */
struct drm_device_dma *dma; /**< Optional pointer for DMA support */
/*@} */
@@ -1540,7 +1529,6 @@ extern int drm_debugfs_cleanup(struct drm_minor *minor);
/* Info file support */
extern int drm_name_info(struct seq_file *m, void *data);
extern int drm_vm_info(struct seq_file *m, void *data);
-extern int drm_queues_info(struct seq_file *m, void *data);
extern int drm_bufs_info(struct seq_file *m, void *data);
extern int drm_vblank_info(struct seq_file *m, void *data);
extern int drm_clients_info(struct seq_file *m, void* data);
@@ -1761,6 +1749,11 @@ extern int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
+#define DRM_PCIE_SPEED_25 1
+#define DRM_PCIE_SPEED_50 2
+#define DRM_PCIE_SPEED_80 4
+
+extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
/* platform section */
extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index bac55c215113..bfacf0d5a225 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -118,7 +118,8 @@ enum drm_mode_status {
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
- .vscan = (vs), .flags = (f), .vrefresh = 0
+ .vscan = (vs), .flags = (f), .vrefresh = 0, \
+ .base.type = DRM_MODE_OBJECT_MODE
#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
@@ -166,8 +167,6 @@ struct drm_display_mode {
int crtc_vsync_start;
int crtc_vsync_end;
int crtc_vtotal;
- int crtc_hadjusted;
- int crtc_vadjusted;
/* Driver private mode info */
int private_size;
@@ -676,8 +675,6 @@ struct drm_plane {
* This is used to set modes.
*/
struct drm_mode_set {
- struct list_head head;
-
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
struct drm_display_mode *mode;
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 7988e55c98d0..e01cc80c9c30 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -62,7 +62,7 @@ struct drm_crtc_helper_funcs {
/* Provider can fixup or change mode timings before modeset occurs */
bool (*mode_fixup)(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/* Actually set the mode */
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
@@ -96,7 +96,7 @@ struct drm_encoder_helper_funcs {
void (*restore)(struct drm_encoder *encoder);
bool (*mode_fixup)(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*prepare)(struct drm_encoder *encoder);
void (*commit)(struct drm_encoder *encoder);
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
index 2f65633d28a7..7dc385233805 100644
--- a/include/drm/drm_encoder_slave.h
+++ b/include/drm/drm_encoder_slave.h
@@ -54,7 +54,7 @@ struct drm_encoder_slave_funcs {
void (*save)(struct drm_encoder *encoder);
void (*restore)(struct drm_encoder *encoder);
bool (*mode_fixup)(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
+ const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
int (*mode_valid)(struct drm_encoder *encoder,
struct drm_display_mode *mode);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 564b14aa7e16..06d7f798a08c 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -50,6 +50,7 @@ struct drm_mm_node {
unsigned scanned_next_free : 1;
unsigned scanned_preceeds_hole : 1;
unsigned allocated : 1;
+ unsigned long color;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
@@ -66,6 +67,7 @@ struct drm_mm {
spinlock_t unused_lock;
unsigned int scan_check_range : 1;
unsigned scan_alignment;
+ unsigned long scan_color;
unsigned long scan_size;
unsigned long scan_hit_start;
unsigned scan_hit_size;
@@ -73,6 +75,9 @@ struct drm_mm {
unsigned long scan_start;
unsigned long scan_end;
struct drm_mm_node *prev_scanned_node;
+
+ void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
+ unsigned long *start, unsigned long *end);
};
static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
@@ -100,11 +105,13 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
+ unsigned long color,
int atomic);
extern struct drm_mm_node *drm_mm_get_block_range_generic(
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
+ unsigned long color,
unsigned long start,
unsigned long end,
int atomic);
@@ -112,13 +119,13 @@ static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
- return drm_mm_get_block_generic(parent, size, alignment, 0);
+ return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
- return drm_mm_get_block_generic(parent, size, alignment, 1);
+ return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
}
static inline struct drm_mm_node *drm_mm_get_block_range(
struct drm_mm_node *parent,
@@ -127,8 +134,19 @@ static inline struct drm_mm_node *drm_mm_get_block_range(
unsigned long start,
unsigned long end)
{
- return drm_mm_get_block_range_generic(parent, size, alignment,
- start, end, 0);
+ return drm_mm_get_block_range_generic(parent, size, alignment, 0,
+ start, end, 0);
+}
+static inline struct drm_mm_node *drm_mm_get_color_block_range(
+ struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end)
+{
+ return drm_mm_get_block_range_generic(parent, size, alignment, color,
+ start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
struct drm_mm_node *parent,
@@ -137,7 +155,7 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
unsigned long start,
unsigned long end)
{
- return drm_mm_get_block_range_generic(parent, size, alignment,
+ return drm_mm_get_block_range_generic(parent, size, alignment, 0,
start, end, 1);
}
extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
@@ -149,18 +167,59 @@ extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern void drm_mm_remove_node(struct drm_mm_node *node);
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
-extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- int best_match);
-extern struct drm_mm_node *drm_mm_search_free_in_range(
+extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ bool best_match);
+extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ bool best_match);
+static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ bool best_match)
+{
+ return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
+}
+static inline struct drm_mm_node *drm_mm_search_free_in_range(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
- int best_match);
-extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
+ bool best_match)
+{
+ return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
+ start, end, best_match);
+}
+static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ bool best_match)
+{
+ return drm_mm_search_free_generic(mm,size, alignment, color, best_match);
+}
+static inline struct drm_mm_node *drm_mm_search_free_in_range_color(
+ const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ bool best_match)
+{
+ return drm_mm_search_free_in_range_generic(mm, size, alignment, color,
+ start, end, best_match);
+}
+extern int drm_mm_init(struct drm_mm *mm,
+ unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
@@ -171,10 +230,14 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
return block->mm;
}
-void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
- unsigned alignment);
-void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+void drm_mm_init_scan(struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color);
+void drm_mm_init_scan_with_range(struct drm_mm *mm,
+ unsigned long size,
unsigned alignment,
+ unsigned long color,
unsigned long start,
unsigned long end);
int drm_mm_scan_add_block(struct drm_mm_node *node);
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 5581980b14f6..3d6301b6ec16 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -359,8 +359,9 @@ struct drm_mode_mode_cmd {
struct drm_mode_modeinfo mode;
};
-#define DRM_MODE_CURSOR_BO (1<<0)
-#define DRM_MODE_CURSOR_MOVE (1<<1)
+#define DRM_MODE_CURSOR_BO 0x01
+#define DRM_MODE_CURSOR_MOVE 0x02
+#define DRM_MODE_CURSOR_FLAGS 0x03
/*
* depending on the value in flags different members are used.
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index a7aec391b7b7..c78bb997e2c6 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -213,9 +213,12 @@
{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -686,14 +689,6 @@
{0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
-#define i830_PCI_IDS \
- {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x8086, 0x358e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
#define gamma_PCI_IDS \
{0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
@@ -726,37 +721,3 @@
#define ffb_PCI_IDS \
{0, 0, 0}
-
-#define i915_PCI_IDS \
- {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2582, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x258a, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x27a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x27ae, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2972, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2982, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2992, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x29a2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x29b2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x29c2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x2e42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0x8086, 0x0102, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
- {0, 0, 0}
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 68733587e700..c20b00181530 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -107,11 +107,6 @@ struct drm_exynos_vidi_connection {
uint64_t edid;
};
-struct drm_exynos_plane_set_zpos {
- __u32 plane_id;
- __s32 zpos;
-};
-
/* memory type definitions. */
enum e_drm_exynos_gem_mem_type {
/* Physically Continuous memory and used as default. */
@@ -164,7 +159,6 @@ struct drm_exynos_g2d_exec {
#define DRM_EXYNOS_GEM_MMAP 0x02
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
#define DRM_EXYNOS_GEM_GET 0x04
-#define DRM_EXYNOS_PLANE_SET_ZPOS 0x06
#define DRM_EXYNOS_VIDI_CONNECTION 0x07
/* G2D */
@@ -184,9 +178,6 @@ struct drm_exynos_g2d_exec {
#define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info)
-#define DRM_IOCTL_EXYNOS_PLANE_SET_ZPOS DRM_IOWR(DRM_COMMAND_BASE + \
- DRM_EXYNOS_PLANE_SET_ZPOS, struct drm_exynos_plane_set_zpos)
-
#define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection)
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index f3f82242bf1d..8cc70837f929 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -200,6 +200,9 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_EXECBUFFER2 0x29
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
+#define DRM_I915_GEM_WAIT 0x2c
+#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
+#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -243,6 +246,9 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
+#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
+#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -298,6 +304,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
#define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_ALIASING_PPGTT 18
+#define I915_PARAM_HAS_WAIT_TIMEOUT 19
typedef struct drm_i915_getparam {
int param;
@@ -656,13 +663,19 @@ struct drm_i915_gem_execbuffer2 {
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
__u64 flags;
- __u64 rsvd1;
+ __u64 rsvd1; /* now used for context info */
__u64 rsvd2;
};
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
+#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
+#define i915_execbuffer2_set_context_id(eb2, context) \
+ (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
+#define i915_execbuffer2_get_context_id(eb2) \
+ ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
+
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
__u32 handle;
@@ -886,4 +899,23 @@ struct drm_intel_sprite_colorkey {
__u32 flags;
};
+struct drm_i915_gem_wait {
+ /** Handle of BO we shall wait on */
+ __u32 bo_handle;
+ __u32 flags;
+ /** Number of nanoseconds to wait, Returns time remaining. */
+ __s64 timeout_ns;
+};
+
+struct drm_i915_gem_context_create {
+ /* output: id of new context*/
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+struct drm_i915_gem_context_destroy {
+ __u32 ctx_id;
+ __u32 pad;
+};
+
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 923afb5dcf0c..8e29d551bb3c 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -19,8 +19,16 @@ const struct intel_gtt {
dma_addr_t scratch_page_dma;
/* for ppgtt PDE access */
u32 __iomem *gtt;
+ /* needed for ioremap in drm/i915 */
+ phys_addr_t gma_bus_addr;
} *intel_gtt_get(void);
+int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
+ struct agp_bridge_data *bridge);
+void intel_gmch_remove(void);
+
+bool intel_enable_gtt(void);
+
void intel_gtt_chipset_flush(void);
void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index 5edd3a76fffa..2a5769fdf8ba 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -25,70 +25,6 @@
#ifndef __NOUVEAU_DRM_H__
#define __NOUVEAU_DRM_H__
-#define NOUVEAU_DRM_HEADER_PATCHLEVEL 16
-
-struct drm_nouveau_channel_alloc {
- uint32_t fb_ctxdma_handle;
- uint32_t tt_ctxdma_handle;
-
- int channel;
- uint32_t pushbuf_domains;
-
- /* Notifier memory */
- uint32_t notifier_handle;
-
- /* DRM-enforced subchannel assignments */
- struct {
- uint32_t handle;
- uint32_t grclass;
- } subchan[8];
- uint32_t nr_subchan;
-};
-
-struct drm_nouveau_channel_free {
- int channel;
-};
-
-struct drm_nouveau_grobj_alloc {
- int channel;
- uint32_t handle;
- int class;
-};
-
-struct drm_nouveau_notifierobj_alloc {
- uint32_t channel;
- uint32_t handle;
- uint32_t size;
- uint32_t offset;
-};
-
-struct drm_nouveau_gpuobj_free {
- int channel;
- uint32_t handle;
-};
-
-/* FIXME : maybe unify {GET,SET}PARAMs */
-#define NOUVEAU_GETPARAM_PCI_VENDOR 3
-#define NOUVEAU_GETPARAM_PCI_DEVICE 4
-#define NOUVEAU_GETPARAM_BUS_TYPE 5
-#define NOUVEAU_GETPARAM_FB_SIZE 8
-#define NOUVEAU_GETPARAM_AGP_SIZE 9
-#define NOUVEAU_GETPARAM_CHIPSET_ID 11
-#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
-#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
-#define NOUVEAU_GETPARAM_PTIMER_TIME 14
-#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
-#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
-struct drm_nouveau_getparam {
- uint64_t param;
- uint64_t value;
-};
-
-struct drm_nouveau_setparam {
- uint64_t param;
- uint64_t value;
-};
-
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
@@ -180,35 +116,19 @@ struct drm_nouveau_gem_cpu_fini {
uint32_t handle;
};
-enum nouveau_bus_type {
- NV_AGP = 0,
- NV_PCI = 1,
- NV_PCIE = 2,
-};
-
-struct drm_nouveau_sarea {
-};
-
-#define DRM_NOUVEAU_GETPARAM 0x00
-#define DRM_NOUVEAU_SETPARAM 0x01
-#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
-#define DRM_NOUVEAU_CHANNEL_FREE 0x03
-#define DRM_NOUVEAU_GROBJ_ALLOC 0x04
-#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05
-#define DRM_NOUVEAU_GPUOBJ_FREE 0x06
+#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
+#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
+#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
+#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
+#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
+#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
#define DRM_NOUVEAU_GEM_INFO 0x44
-#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
-#define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam)
-#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
-#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
-#define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc)
-#define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc)
-#define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free)
#define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
#define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 58056865b8e9..dc3a8cd7db8a 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -964,6 +964,8 @@ struct drm_radeon_cs {
#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
/* max pipes - needed for compute shaders */
#define RADEON_INFO_MAX_PIPES 0x10
+/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
+#define RADEON_INFO_TIMESTAMP 0x11
struct drm_radeon_info {
uint32_t request;
diff --git a/include/drm/sis_drm.h b/include/drm/sis_drm.h
index 035b804dda6d..df3763222d73 100644
--- a/include/drm/sis_drm.h
+++ b/include/drm/sis_drm.h
@@ -51,17 +51,17 @@
typedef struct {
int context;
- unsigned int offset;
- unsigned int size;
+ unsigned long offset;
+ unsigned long size;
unsigned long free;
} drm_sis_mem_t;
typedef struct {
- unsigned int offset, size;
+ unsigned long offset, size;
} drm_sis_agp_t;
typedef struct {
- unsigned int offset, size;
+ unsigned long offset, size;
} drm_sis_fb_t;
struct sis_file_private {
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index a05f1b55714d..084e8989a6e1 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -39,8 +39,6 @@
#include "linux/fs.h"
#include "linux/spinlock.h"
-struct ttm_backend;
-
struct ttm_backend_func {
/**
* struct ttm_backend_func member bind
@@ -119,7 +117,6 @@ struct ttm_tt {
unsigned long num_pages;
struct sg_table *sg; /* for SG objects via dma-buf */
struct ttm_bo_global *glob;
- struct ttm_backend *be;
struct file *swap_storage;
enum ttm_caching_state caching_state;
enum {
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 9547daddf813..fa217607c582 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -386,10 +386,12 @@ header-y += utime.h
header-y += utsname.h
header-y += uuid.h
header-y += uvcvideo.h
+header-y += v4l2-common.h
header-y += v4l2-dv-timings.h
header-y += v4l2-mediabus.h
header-y += v4l2-subdev.h
header-y += veth.h
+header-y += vfio.h
header-y += vhost.h
header-y += videodev2.h
header-y += virtio_9p.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f421dd84f29d..4f2a76224509 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -96,7 +96,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
void acpi_numa_slit_init (struct acpi_table_slit *slit);
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
-void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
+int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
void acpi_numa_arch_fixup(void);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
@@ -190,6 +190,8 @@ extern bool wmi_has_guid(const char *guid);
extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle);
extern long acpi_is_video_device(struct acpi_device *device);
+extern void acpi_video_dmi_promote_vendor(void);
+extern void acpi_video_dmi_demote_vendor(void);
extern int acpi_video_backlight_support(void);
extern int acpi_video_display_switch_support(void);
@@ -205,6 +207,14 @@ static inline long acpi_is_video_device(struct acpi_device *device)
return 0;
}
+static inline void acpi_video_dmi_promote_vendor(void)
+{
+}
+
+static inline void acpi_video_dmi_demote_vendor(void)
+{
+}
+
static inline int acpi_video_backlight_support(void)
{
return 0;
@@ -277,7 +287,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_PAD_SUPPORT 1
#define OSC_SB_PPC_OST_SUPPORT 2
#define OSC_SB_PR3_SUPPORT 4
-#define OSC_SB_CPUHP_OST_SUPPORT 8
+#define OSC_SB_HOTPLUG_OST_SUPPORT 8
#define OSC_SB_APEI_SUPPORT 16
extern bool osc_sb_apei_support_acked;
@@ -309,6 +319,44 @@ extern bool osc_sb_apei_support_acked;
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
u32 *mask, u32 req);
+
+/* Enable _OST when all relevant hotplug operations are enabled */
+#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \
+ (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \
+ defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) && \
+ (defined(CONFIG_ACPI_CONTAINER) || \
+ defined(CONFIG_ACPI_CONTAINER_MODULE))
+#define ACPI_HOTPLUG_OST
+#endif
+
+/* _OST Source Event Code (OSPM Action) */
+#define ACPI_OST_EC_OSPM_SHUTDOWN 0x100
+#define ACPI_OST_EC_OSPM_EJECT 0x103
+#define ACPI_OST_EC_OSPM_INSERTION 0x200
+
+/* _OST General Processing Status Code */
+#define ACPI_OST_SC_SUCCESS 0x0
+#define ACPI_OST_SC_NON_SPECIFIC_FAILURE 0x1
+#define ACPI_OST_SC_UNRECOGNIZED_NOTIFY 0x2
+
+/* _OST OS Shutdown Processing (0x100) Status Code */
+#define ACPI_OST_SC_OS_SHUTDOWN_DENIED 0x80
+#define ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS 0x81
+#define ACPI_OST_SC_OS_SHUTDOWN_COMPLETED 0x82
+#define ACPI_OST_SC_OS_SHUTDOWN_NOT_SUPPORTED 0x83
+
+/* _OST Ejection Request (0x3, 0x103) Status Code */
+#define ACPI_OST_SC_EJECT_NOT_SUPPORTED 0x80
+#define ACPI_OST_SC_DEVICE_IN_USE 0x81
+#define ACPI_OST_SC_DEVICE_BUSY 0x82
+#define ACPI_OST_SC_EJECT_DEPENDENCY_BUSY 0x83
+#define ACPI_OST_SC_EJECT_IN_PROGRESS 0x84
+
+/* _OST Insertion Request (0x200) Status Code */
+#define ACPI_OST_SC_INSERT_IN_PROGRESS 0x80
+#define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81
+#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82
+
extern void acpi_early_init(void);
extern int acpi_nvs_register(__u64 start, __u64 size);
diff --git a/include/linux/aio.h b/include/linux/aio.h
index b1a520ec8b59..31ff6dba4872 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -126,22 +126,20 @@ struct kiocb {
struct eventfd_ctx *ki_eventfd;
};
-#define is_sync_kiocb(iocb) ((iocb)->ki_key == KIOCB_SYNC_KEY)
-#define init_sync_kiocb(x, filp) \
- do { \
- struct task_struct *tsk = current; \
- (x)->ki_flags = 0; \
- (x)->ki_users = 1; \
- (x)->ki_key = KIOCB_SYNC_KEY; \
- (x)->ki_filp = (filp); \
- (x)->ki_ctx = NULL; \
- (x)->ki_cancel = NULL; \
- (x)->ki_retry = NULL; \
- (x)->ki_dtor = NULL; \
- (x)->ki_obj.tsk = tsk; \
- (x)->ki_user_data = 0; \
- (x)->private = NULL; \
- } while (0)
+static inline bool is_sync_kiocb(struct kiocb *kiocb)
+{
+ return kiocb->ki_key == KIOCB_SYNC_KEY;
+}
+
+static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+{
+ *kiocb = (struct kiocb) {
+ .ki_users = 1,
+ .ki_key = KIOCB_SYNC_KEY,
+ .ki_filp = filp,
+ .ki_obj.tsk = current,
+ };
+}
#define AIO_RING_MAGIC 0xa10a10a1
#define AIO_RING_COMPAT_FEATURES 1
@@ -161,8 +159,6 @@ struct aio_ring {
struct io_event io_events[0];
}; /* 128 bytes + ring size */
-#define aio_ring_avail(info, ring) (((ring)->head + (info)->nr - 1 - (ring)->tail) % (info)->nr)
-
#define AIO_RING_PAGES 8
struct aio_ring_info {
unsigned long mmap_base;
@@ -177,6 +173,12 @@ struct aio_ring_info {
struct page *internal_pages[AIO_RING_PAGES];
};
+static inline unsigned aio_ring_avail(struct aio_ring_info *info,
+ struct aio_ring *ring)
+{
+ return (ring->head + info->nr - 1 - ring->tail) % info->nr;
+}
+
struct kioctx {
atomic_t users;
int dead;
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 02549017212a..2a5f64a11b77 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -21,8 +21,9 @@
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
-struct pl08x_lli;
struct pl08x_driver_data;
+struct pl08x_phy_chan;
+struct pl08x_txd;
/* Bitmasks for selecting AHB ports for DMA transfers */
enum {
@@ -46,170 +47,29 @@ enum {
* devices with static assignments
* @muxval: a number usually used to poke into some mux regiser to
* mux in the signal to this channel
- * @cctl_opt: default options for the channel control register
+ * @cctl_memcpy: options for the channel control register for memcpy
+ * *** not used for slave channels ***
* @addr: source/target address in physical memory for this DMA channel,
* can be the address of a FIFO register for burst requests for example.
* This can be left undefined if the PrimeCell API is used for configuring
* this.
- * @circular_buffer: whether the buffer passed in is circular and
- * shall simply be looped round round (like a record baby round
- * round round round)
* @single: the device connected to this channel will request single DMA
* transfers, not bursts. (Bursts are default.)
* @periph_buses: the device connected to this channel is accessible via
* these buses (use PL08X_AHB1 | PL08X_AHB2).
*/
struct pl08x_channel_data {
- char *bus_id;
+ const char *bus_id;
int min_signal;
int max_signal;
u32 muxval;
- u32 cctl;
+ u32 cctl_memcpy;
dma_addr_t addr;
- bool circular_buffer;
bool single;
u8 periph_buses;
};
/**
- * Struct pl08x_bus_data - information of source or destination
- * busses for a transfer
- * @addr: current address
- * @maxwidth: the maximum width of a transfer on this bus
- * @buswidth: the width of this bus in bytes: 1, 2 or 4
- */
-struct pl08x_bus_data {
- dma_addr_t addr;
- u8 maxwidth;
- u8 buswidth;
-};
-
-/**
- * struct pl08x_phy_chan - holder for the physical channels
- * @id: physical index to this channel
- * @lock: a lock to use when altering an instance of this struct
- * @signal: the physical signal (aka channel) serving this physical channel
- * right now
- * @serving: the virtual channel currently being served by this physical
- * channel
- * @locked: channel unavailable for the system, e.g. dedicated to secure
- * world
- */
-struct pl08x_phy_chan {
- unsigned int id;
- void __iomem *base;
- spinlock_t lock;
- int signal;
- struct pl08x_dma_chan *serving;
- bool locked;
-};
-
-/**
- * struct pl08x_sg - structure containing data per sg
- * @src_addr: src address of sg
- * @dst_addr: dst address of sg
- * @len: transfer len in bytes
- * @node: node for txd's dsg_list
- */
-struct pl08x_sg {
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- size_t len;
- struct list_head node;
-};
-
-/**
- * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
- * @tx: async tx descriptor
- * @node: node for txd list for channels
- * @dsg_list: list of children sg's
- * @direction: direction of transfer
- * @llis_bus: DMA memory address (physical) start for the LLIs
- * @llis_va: virtual memory address start for the LLIs
- * @cctl: control reg values for current txd
- * @ccfg: config reg values for current txd
- */
-struct pl08x_txd {
- struct dma_async_tx_descriptor tx;
- struct list_head node;
- struct list_head dsg_list;
- enum dma_transfer_direction direction;
- dma_addr_t llis_bus;
- struct pl08x_lli *llis_va;
- /* Default cctl value for LLIs */
- u32 cctl;
- /*
- * Settings to be put into the physical channel when we
- * trigger this txd. Other registers are in llis_va[0].
- */
- u32 ccfg;
-};
-
-/**
- * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
- * states
- * @PL08X_CHAN_IDLE: the channel is idle
- * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
- * channel and is running a transfer on it
- * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
- * channel, but the transfer is currently paused
- * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
- * channel to become available (only pertains to memcpy channels)
- */
-enum pl08x_dma_chan_state {
- PL08X_CHAN_IDLE,
- PL08X_CHAN_RUNNING,
- PL08X_CHAN_PAUSED,
- PL08X_CHAN_WAITING,
-};
-
-/**
- * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
- * @chan: wrappped abstract channel
- * @phychan: the physical channel utilized by this channel, if there is one
- * @phychan_hold: if non-zero, hold on to the physical channel even if we
- * have no pending entries
- * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
- * @name: name of channel
- * @cd: channel platform data
- * @runtime_addr: address for RX/TX according to the runtime config
- * @runtime_direction: current direction of this channel according to
- * runtime config
- * @pend_list: queued transactions pending on this channel
- * @at: active transaction on this channel
- * @lock: a lock for this channel data
- * @host: a pointer to the host (internal use)
- * @state: whether the channel is idle, paused, running etc
- * @slave: whether this channel is a device (slave) or for memcpy
- * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
- * channels. Fill with 'true' if peripheral should be flow controller. Direction
- * will be selected at Runtime.
- * @waiting: a TX descriptor on this channel which is waiting for a physical
- * channel to become available
- */
-struct pl08x_dma_chan {
- struct dma_chan chan;
- struct pl08x_phy_chan *phychan;
- int phychan_hold;
- struct tasklet_struct tasklet;
- char *name;
- const struct pl08x_channel_data *cd;
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- u32 src_cctl;
- u32 dst_cctl;
- enum dma_transfer_direction runtime_direction;
- struct list_head pend_list;
- struct pl08x_txd *at;
- spinlock_t lock;
- struct pl08x_driver_data *host;
- enum pl08x_dma_chan_state state;
- bool slave;
- bool device_fc;
- struct pl08x_txd *waiting;
-};
-
-/**
* struct pl08x_platform_data - the platform configuration for the PL08x
* PrimeCells.
* @slave_channels: the channels defined for the different devices on the
@@ -229,8 +89,8 @@ struct pl08x_platform_data {
const struct pl08x_channel_data *slave_channels;
unsigned int num_slave_channels;
struct pl08x_channel_data memcpy_channel;
- int (*get_signal)(struct pl08x_dma_chan *);
- void (*put_signal)(struct pl08x_dma_chan *);
+ int (*get_signal)(const struct pl08x_channel_data *);
+ void (*put_signal)(const struct pl08x_channel_data *, int);
u8 lli_buses;
u8 mem_buses;
};
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 22f292a917a3..36abf2aa7e68 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -130,6 +130,7 @@
#define AUDIT_LAST_KERN_ANOM_MSG 1799
#define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */
#define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */
+#define AUDIT_ANOM_LINK 1702 /* Suspicious use of file links */
#define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */
#define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */
#define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */
@@ -687,6 +688,8 @@ extern void audit_log_d_path(struct audit_buffer *ab,
const struct path *path);
extern void audit_log_key(struct audit_buffer *ab,
char *key);
+extern void audit_log_link_denied(const char *operation,
+ struct path *link);
extern void audit_log_lost(const char *message);
#ifdef CONFIG_SECURITY
extern void audit_log_secctx(struct audit_buffer *ab, u32 secid);
@@ -716,6 +719,7 @@ extern int audit_enabled;
#define audit_log_untrustedstring(a,s) do { ; } while (0)
#define audit_log_d_path(b, p, d) do { ; } while (0)
#define audit_log_key(b, k) do { ; } while (0)
+#define audit_log_link_denied(o, l) do { ; } while (0)
#define audit_log_secctx(b,s) do { ; } while (0)
#define audit_enabled 0
#endif
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index b1038bd686ac..2a9a9abc9126 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -10,13 +10,14 @@
#include <linux/percpu_counter.h>
#include <linux/log2.h>
-#include <linux/proportions.h>
+#include <linux/flex_proportions.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/writeback.h>
#include <linux/atomic.h>
+#include <linux/sysctl.h>
struct page;
struct device;
@@ -89,7 +90,7 @@ struct backing_dev_info {
unsigned long dirty_ratelimit;
unsigned long balanced_dirty_ratelimit;
- struct prop_local_percpu completions;
+ struct fprop_local_percpu completions;
int dirty_exceeded;
unsigned int min_ratio;
@@ -123,7 +124,6 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
void bdi_start_background_writeback(struct backing_dev_info *bdi);
int bdi_writeback_thread(void *data);
int bdi_has_dirty_io(struct backing_dev_info *bdi);
-void bdi_arm_supers_timer(void);
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
@@ -304,6 +304,8 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
void set_bdi_congested(struct backing_dev_info *bdi, int sync);
long congestion_wait(int sync, long timeout);
long wait_iff_congested(struct zone *zone, int sync, long timeout);
+int pdflush_proc_obsolete(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
{
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 3c80885fa829..d323a4b4143c 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -89,6 +89,12 @@
#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2
#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2
#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4
+#define BCMA_CC_CHIPST_43228_ILP_DIV_EN 0x00000001
+#define BCMA_CC_CHIPST_43228_OTP_PRESENT 0x00000002
+#define BCMA_CC_CHIPST_43228_SERDES_REFCLK_PADSEL 0x00000004
+#define BCMA_CC_CHIPST_43228_SDIO_MODE 0x00000008
+#define BCMA_CC_CHIPST_43228_SDIO_OTP_PRESENT 0x00000010
+#define BCMA_CC_CHIPST_43228_SDIO_RESET 0x00000020
#define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */
#define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */
#define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 0edb65dd8edd..7b7ac9ccec7a 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -160,6 +160,7 @@ enum rq_flag_bits {
__REQ_FLUSH_SEQ, /* request for flush sequence */
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
+ __REQ_KERNEL, /* direct IO to kernel pages */
__REQ_NR_BITS, /* stops here */
};
@@ -201,5 +202,6 @@ enum rq_flag_bits {
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
#define REQ_SECURE (1 << __REQ_SECURE)
+#define REQ_KERNEL (1 << __REQ_KERNEL)
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 07954b05b86c..4a2ab7c85393 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -46,16 +46,23 @@ struct blkcg_gq;
struct request;
typedef void (rq_end_io_fn)(struct request *, int);
+#define BLK_RL_SYNCFULL (1U << 0)
+#define BLK_RL_ASYNCFULL (1U << 1)
+
struct request_list {
+ struct request_queue *q; /* the queue this rl belongs to */
+#ifdef CONFIG_BLK_CGROUP
+ struct blkcg_gq *blkg; /* blkg this request pool belongs to */
+#endif
/*
* count[], starved[], and wait[] are indexed by
* BLK_RW_SYNC/BLK_RW_ASYNC
*/
- int count[2];
- int starved[2];
- int elvpriv;
- mempool_t *rq_pool;
- wait_queue_head_t wait[2];
+ int count[2];
+ int starved[2];
+ mempool_t *rq_pool;
+ wait_queue_head_t wait[2];
+ unsigned int flags;
};
/*
@@ -138,6 +145,7 @@ struct request {
struct hd_struct *part;
unsigned long start_time;
#ifdef CONFIG_BLK_CGROUP
+ struct request_list *rl; /* rl this rq is alloced from */
unsigned long long start_time_ns;
unsigned long long io_start_time_ns; /* when passed to hardware */
#endif
@@ -282,11 +290,16 @@ struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
+ int nr_rqs[2]; /* # allocated [a]sync rqs */
+ int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
/*
- * the queue request freelist, one for reads and one for writes
+ * If blkcg is not used, @q->root_rl serves all requests. If blkcg
+ * is used, root blkg allocates from @q->root_rl and all other
+ * blkgs from their own blkg->rl. Which one to use should be
+ * determined using bio_request_list().
*/
- struct request_list rq;
+ struct request_list root_rl;
request_fn_proc *request_fn;
make_request_fn *make_request_fn;
@@ -561,27 +574,25 @@ static inline bool rq_is_sync(struct request *rq)
return rw_is_sync(rq->cmd_flags);
}
-static inline int blk_queue_full(struct request_queue *q, int sync)
+static inline bool blk_rl_full(struct request_list *rl, bool sync)
{
- if (sync)
- return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
- return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
+ unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+
+ return rl->flags & flag;
}
-static inline void blk_set_queue_full(struct request_queue *q, int sync)
+static inline void blk_set_rl_full(struct request_list *rl, bool sync)
{
- if (sync)
- queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
- else
- queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
+ unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+
+ rl->flags |= flag;
}
-static inline void blk_clear_queue_full(struct request_queue *q, int sync)
+static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
{
- if (sync)
- queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
- else
- queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
+ unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+
+ rl->flags &= ~flag;
}
@@ -590,7 +601,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
* it already be started by driver.
*/
#define RQ_NOMERGE_FLAGS \
- (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
+ (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD)
#define rq_mergeable(rq) \
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
(((rq)->cmd_flags & REQ_DISCARD) || \
@@ -883,6 +894,8 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
+extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
+ struct scatterlist *sglist);
extern void blk_dump_rq_flags(struct request *, char *);
extern long nr_blockdev_pages(void);
@@ -911,11 +924,15 @@ struct blk_plug {
};
#define BLK_MAX_REQUEST_COUNT 16
+struct blk_plug_cb;
+typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
struct blk_plug_cb {
struct list_head list;
- void (*callback)(struct blk_plug_cb *);
+ blk_plug_cb_fn callback;
+ void *data;
};
-
+extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
+ void *data, int size);
extern void blk_start_plug(struct blk_plug *);
extern void blk_finish_plug(struct blk_plug *);
extern void blk_flush_plug_list(struct blk_plug *, bool);
@@ -1124,6 +1141,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
& (lim->discard_granularity - 1);
}
+static inline int bdev_discard_alignment(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (bdev != bdev->bd_contains)
+ return bdev->bd_part->discard_alignment;
+
+ return q->limits.discard_alignment;
+}
+
static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
{
if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
diff --git a/include/linux/blkpg.h b/include/linux/blkpg.h
index faf8a45af210..a8519446c111 100644
--- a/include/linux/blkpg.h
+++ b/include/linux/blkpg.h
@@ -40,6 +40,7 @@ struct blkpg_ioctl_arg {
/* The subfunctions (for the op field) */
#define BLKPG_ADD_PARTITION 1
#define BLKPG_DEL_PARTITION 2
+#define BLKPG_RESIZE_PARTITION 3
/* Sizes of name fields. Unused at present. */
#define BLKPG_DEVNAMELTH 64
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index f55ab8cdc106..4d0fb3df2f4a 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -67,7 +67,6 @@ void bsg_job_done(struct bsg_job *job, int result,
int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
bsg_job_fn *job_fn, int dd_job_size);
void bsg_request_fn(struct request_queue *q);
-void bsg_remove_queue(struct request_queue *q);
void bsg_goose_queue(struct request_queue *q);
#endif
diff --git a/include/linux/can.h b/include/linux/can.h
index 018055efc034..e52958d7c2d1 100644
--- a/include/linux/can.h
+++ b/include/linux/can.h
@@ -74,20 +74,21 @@ struct can_frame {
/*
* defined bits for canfd_frame.flags
*
- * As the default for CAN FD should be to support the high data rate in the
- * payload section of the frame (HDR) and to support up to 64 byte in the
- * data section (EDL) the bits are only set in the non-default case.
- * Btw. as long as there's no real implementation for CAN FD network driver
- * these bits are only preliminary.
+ * The use of struct canfd_frame implies the Extended Data Length (EDL) bit to
+ * be set in the CAN frame bitstream on the wire. The EDL bit switch turns
+ * the CAN controllers bitstream processor into the CAN FD mode which creates
+ * two new options within the CAN FD frame specification:
*
- * RX: NOHDR/NOEDL - info about received CAN FD frame
- * ESI - bit from originating CAN controller
- * TX: NOHDR/NOEDL - control per-frame settings if supported by CAN controller
- * ESI - bit is set by local CAN controller
+ * Bit Rate Switch - to indicate a second bitrate is/was used for the payload
+ * Error State Indicator - represents the error state of the transmitting node
+ *
+ * As the CANFD_ESI bit is internally generated by the transmitting CAN
+ * controller only the CANFD_BRS bit is relevant for real CAN controllers when
+ * building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make
+ * sense for virtual CAN interfaces to test applications with echoed frames.
*/
-#define CANFD_NOHDR 0x01 /* frame without high data rate */
-#define CANFD_NOEDL 0x02 /* frame without extended data length */
-#define CANFD_ESI 0x04 /* error state indicator */
+#define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */
+#define CANFD_ESI 0x02 /* error state indicator of the transmitting node */
/**
* struct canfd_frame - CAN flexible data rate frame structure
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
new file mode 100644
index 000000000000..dad579b0c0e6
--- /dev/null
+++ b/include/linux/ceph/ceph_features.h
@@ -0,0 +1,27 @@
+#ifndef __CEPH_FEATURES
+#define __CEPH_FEATURES
+
+/*
+ * feature bits
+ */
+#define CEPH_FEATURE_UID (1<<0)
+#define CEPH_FEATURE_NOSRCADDR (1<<1)
+#define CEPH_FEATURE_MONCLOCKCHECK (1<<2)
+#define CEPH_FEATURE_FLOCK (1<<3)
+#define CEPH_FEATURE_SUBSCRIBE2 (1<<4)
+#define CEPH_FEATURE_MONNAMES (1<<5)
+#define CEPH_FEATURE_RECONNECT_SEQ (1<<6)
+#define CEPH_FEATURE_DIRLAYOUTHASH (1<<7)
+/* bits 8-17 defined by user-space; not supported yet here */
+#define CEPH_FEATURE_CRUSH_TUNABLES (1<<18)
+
+/*
+ * Features supported.
+ */
+#define CEPH_FEATURES_SUPPORTED_DEFAULT \
+ (CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_CRUSH_TUNABLES)
+
+#define CEPH_FEATURES_REQUIRED_DEFAULT \
+ (CEPH_FEATURE_NOSRCADDR)
+#endif
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index e81ab30d4896..d021610efd65 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -35,20 +35,6 @@
/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
#define CEPH_MAX_MON 31
-
-/*
- * feature bits
- */
-#define CEPH_FEATURE_UID (1<<0)
-#define CEPH_FEATURE_NOSRCADDR (1<<1)
-#define CEPH_FEATURE_MONCLOCKCHECK (1<<2)
-#define CEPH_FEATURE_FLOCK (1<<3)
-#define CEPH_FEATURE_SUBSCRIBE2 (1<<4)
-#define CEPH_FEATURE_MONNAMES (1<<5)
-#define CEPH_FEATURE_RECONNECT_SEQ (1<<6)
-#define CEPH_FEATURE_DIRLAYOUTHASH (1<<7)
-
-
/*
* ceph_file_layout - describe data layout for a file/inode
*/
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index d8615dee5808..4bbf2db45f46 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -1,6 +1,7 @@
#ifndef __CEPH_DECODE_H
#define __CEPH_DECODE_H
+#include <linux/err.h>
#include <linux/bug.h>
#include <linux/time.h>
#include <asm/unaligned.h>
@@ -85,6 +86,52 @@ static inline int ceph_has_room(void **p, void *end, size_t n)
} while (0)
/*
+ * Allocate a buffer big enough to hold the wire-encoded string, and
+ * decode the string into it. The resulting string will always be
+ * terminated with '\0'. If successful, *p will be advanced
+ * past the decoded data. Also, if lenp is not a null pointer, the
+ * length (not including the terminating '\0') will be recorded in
+ * *lenp. Note that a zero-length string is a valid return value.
+ *
+ * Returns a pointer to the newly-allocated string buffer, or a
+ * pointer-coded errno if an error occurs. Neither *p nor *lenp
+ * will have been updated if an error is returned.
+ *
+ * There are two possible failures:
+ * - converting the string would require accessing memory at or
+ * beyond the "end" pointer provided (-E
+ * - memory could not be allocated for the result
+ */
+static inline char *ceph_extract_encoded_string(void **p, void *end,
+ size_t *lenp, gfp_t gfp)
+{
+ u32 len;
+ void *sp = *p;
+ char *buf;
+
+ ceph_decode_32_safe(&sp, end, len, bad);
+ if (!ceph_has_room(&sp, end, len))
+ goto bad;
+
+ buf = kmalloc(len + 1, gfp);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ if (len)
+ memcpy(buf, sp, len);
+ buf[len] = '\0';
+
+ *p = (char *) *p + sizeof (u32) + len;
+ if (lenp)
+ *lenp = (size_t) len;
+
+ return buf;
+
+bad:
+ return ERR_PTR(-ERANGE);
+}
+
+/*
* struct ceph_timespec <-> struct timespec
*/
static inline void ceph_decode_timespec(struct timespec *ts,
@@ -151,7 +198,7 @@ static inline void ceph_encode_filepath(void **p, void *end,
u64 ino, const char *path)
{
u32 len = path ? strlen(path) : 0;
- BUG_ON(*p + sizeof(ino) + sizeof(len) + len > end);
+ BUG_ON(*p + 1 + sizeof(ino) + sizeof(len) + len > end);
ceph_encode_8(p, 1);
ceph_encode_64(p, ino);
ceph_encode_32(p, len);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index e71d683982a6..42624789b06f 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -23,12 +23,6 @@
#include "ceph_fs.h"
/*
- * Supported features
- */
-#define CEPH_FEATURE_SUPPORTED_DEFAULT CEPH_FEATURE_NOSRCADDR
-#define CEPH_FEATURE_REQUIRED_DEFAULT CEPH_FEATURE_NOSRCADDR
-
-/*
* mount options
*/
#define CEPH_OPT_FSID (1<<0)
@@ -132,7 +126,7 @@ struct ceph_client {
u32 supported_features;
u32 required_features;
- struct ceph_messenger *msgr; /* messenger instance */
+ struct ceph_messenger msgr; /* messenger instance */
struct ceph_mon_client monc;
struct ceph_osd_client osdc;
@@ -160,7 +154,7 @@ struct ceph_client {
struct ceph_snap_context {
atomic_t nref;
u64 seq;
- int num_snaps;
+ u32 num_snaps;
u64 snaps[];
};
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 44c87e731e9d..189ae0637634 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -31,9 +31,6 @@ struct ceph_connection_operations {
int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
int (*invalidate_authorizer)(struct ceph_connection *con);
- /* protocol version mismatch */
- void (*bad_proto) (struct ceph_connection *con);
-
/* there was some error on the socket (disconnect, whatever) */
void (*fault) (struct ceph_connection *con);
@@ -53,6 +50,7 @@ struct ceph_messenger {
struct ceph_entity_inst inst; /* my name+address */
struct ceph_entity_addr my_enc_addr;
+ atomic_t stopping;
bool nocrc;
/*
@@ -80,7 +78,10 @@ struct ceph_msg {
unsigned nr_pages; /* size of page array */
unsigned page_alignment; /* io offset in first page */
struct ceph_pagelist *pagelist; /* instead of pages */
+
+ struct ceph_connection *con;
struct list_head list_head;
+
struct kref kref;
struct bio *bio; /* instead of pages/pagelist */
struct bio *bio_iter; /* bio iterator */
@@ -106,23 +107,6 @@ struct ceph_msg_pos {
#define MAX_DELAY_INTERVAL (5 * 60 * HZ)
/*
- * ceph_connection state bit flags
- */
-#define LOSSYTX 0 /* we can close channel or drop messages on errors */
-#define CONNECTING 1
-#define NEGOTIATING 2
-#define KEEPALIVE_PENDING 3
-#define WRITE_PENDING 4 /* we have data ready to send */
-#define STANDBY 8 /* no outgoing messages, socket closed. we keep
- * the ceph_connection around to maintain shared
- * state with the peer. */
-#define CLOSED 10 /* we've closed the connection */
-#define SOCK_CLOSED 11 /* socket state changed to closed */
-#define OPENING 13 /* open connection w/ (possibly new) peer */
-#define DEAD 14 /* dead, about to kfree */
-#define BACKOFF 15
-
-/*
* A single connection with another host.
*
* We maintain a queue of outgoing messages, and some session state to
@@ -131,18 +115,22 @@ struct ceph_msg_pos {
*/
struct ceph_connection {
void *private;
- atomic_t nref;
const struct ceph_connection_operations *ops;
struct ceph_messenger *msgr;
+
+ atomic_t sock_state;
struct socket *sock;
- unsigned long state; /* connection state (see flags above) */
+ struct ceph_entity_addr peer_addr; /* peer address */
+ struct ceph_entity_addr peer_addr_for_me;
+
+ unsigned long flags;
+ unsigned long state;
const char *error_msg; /* error message, if any */
- struct ceph_entity_addr peer_addr; /* peer address */
struct ceph_entity_name peer_name; /* peer name */
- struct ceph_entity_addr peer_addr_for_me;
+
unsigned peer_features;
u32 connect_seq; /* identify the most recent connection
attempt for this connection, client */
@@ -207,24 +195,26 @@ extern int ceph_msgr_init(void);
extern void ceph_msgr_exit(void);
extern void ceph_msgr_flush(void);
-extern struct ceph_messenger *ceph_messenger_create(
- struct ceph_entity_addr *myaddr,
- u32 features, u32 required);
-extern void ceph_messenger_destroy(struct ceph_messenger *);
+extern void ceph_messenger_init(struct ceph_messenger *msgr,
+ struct ceph_entity_addr *myaddr,
+ u32 supported_features,
+ u32 required_features,
+ bool nocrc);
-extern void ceph_con_init(struct ceph_messenger *msgr,
- struct ceph_connection *con);
+extern void ceph_con_init(struct ceph_connection *con, void *private,
+ const struct ceph_connection_operations *ops,
+ struct ceph_messenger *msgr);
extern void ceph_con_open(struct ceph_connection *con,
+ __u8 entity_type, __u64 entity_num,
struct ceph_entity_addr *addr);
extern bool ceph_con_opened(struct ceph_connection *con);
extern void ceph_con_close(struct ceph_connection *con);
extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg);
-extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg);
-extern void ceph_con_revoke_message(struct ceph_connection *con,
- struct ceph_msg *msg);
+
+extern void ceph_msg_revoke(struct ceph_msg *msg);
+extern void ceph_msg_revoke_incoming(struct ceph_msg *msg);
+
extern void ceph_con_keepalive(struct ceph_connection *con);
-extern struct ceph_connection *ceph_con_get(struct ceph_connection *con);
-extern void ceph_con_put(struct ceph_connection *con);
extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
bool can_fail);
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index 545f85917780..2113e3850a4e 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -70,7 +70,7 @@ struct ceph_mon_client {
bool hunting;
int cur_mon; /* last monitor i contacted */
unsigned long sub_sent, sub_renew_after;
- struct ceph_connection *con;
+ struct ceph_connection con;
bool have_fsid;
/* pending generic requests */
diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h
index a362605f9368..09fa96b43436 100644
--- a/include/linux/ceph/msgpool.h
+++ b/include/linux/ceph/msgpool.h
@@ -11,10 +11,11 @@
struct ceph_msgpool {
const char *name;
mempool_t *pool;
+ int type; /* preallocated message type */
int front_len; /* preallocated payload size */
};
-extern int ceph_msgpool_init(struct ceph_msgpool *pool,
+extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
int front_len, int size, bool blocking,
const char *name);
extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 0bd390ce98b2..dfae957398c3 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -31,7 +31,7 @@ SUBSYS(cpuacct)
/* */
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
SUBSYS(mem_cgroup)
#endif
@@ -72,3 +72,9 @@ SUBSYS(net_prio)
#endif
/* */
+
+#ifdef CONFIG_CGROUP_HUGETLB
+SUBSYS(hugetlb)
+#endif
+
+/* */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 2fd6a4234531..b3ac22d0fc1f 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -85,6 +85,43 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
#endif
/**
+ * clk_prepare - prepare a clock source
+ * @clk: clock source
+ *
+ * This prepares the clock source for use.
+ *
+ * Must not be called from within atomic context.
+ */
+#ifdef CONFIG_HAVE_CLK_PREPARE
+int clk_prepare(struct clk *clk);
+#else
+static inline int clk_prepare(struct clk *clk)
+{
+ might_sleep();
+ return 0;
+}
+#endif
+
+/**
+ * clk_unprepare - undo preparation of a clock source
+ * @clk: clock source
+ *
+ * This undoes a previously prepared clock. The caller must balance
+ * the number of prepare and unprepare calls.
+ *
+ * Must not be called from within atomic context.
+ */
+#ifdef CONFIG_HAVE_CLK_PREPARE
+void clk_unprepare(struct clk *clk);
+#else
+static inline void clk_unprepare(struct clk *clk)
+{
+ might_sleep();
+}
+#endif
+
+#ifdef CONFIG_HAVE_CLK
+/**
* clk_get - lookup and obtain a reference to a clock producer.
* @dev: device for clock "consumer"
* @id: clock consumer ID
@@ -122,24 +159,6 @@ struct clk *clk_get(struct device *dev, const char *id);
struct clk *devm_clk_get(struct device *dev, const char *id);
/**
- * clk_prepare - prepare a clock source
- * @clk: clock source
- *
- * This prepares the clock source for use.
- *
- * Must not be called from within atomic context.
- */
-#ifdef CONFIG_HAVE_CLK_PREPARE
-int clk_prepare(struct clk *clk);
-#else
-static inline int clk_prepare(struct clk *clk)
-{
- might_sleep();
- return 0;
-}
-#endif
-
-/**
* clk_enable - inform the system when the clock source should be running.
* @clk: clock source
*
@@ -167,47 +186,6 @@ int clk_enable(struct clk *clk);
*/
void clk_disable(struct clk *clk);
-
-/**
- * clk_unprepare - undo preparation of a clock source
- * @clk: clock source
- *
- * This undoes a previously prepared clock. The caller must balance
- * the number of prepare and unprepare calls.
- *
- * Must not be called from within atomic context.
- */
-#ifdef CONFIG_HAVE_CLK_PREPARE
-void clk_unprepare(struct clk *clk);
-#else
-static inline void clk_unprepare(struct clk *clk)
-{
- might_sleep();
-}
-#endif
-
-/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
-static inline int clk_prepare_enable(struct clk *clk)
-{
- int ret;
-
- ret = clk_prepare(clk);
- if (ret)
- return ret;
- ret = clk_enable(clk);
- if (ret)
- clk_unprepare(clk);
-
- return ret;
-}
-
-/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
-static inline void clk_disable_unprepare(struct clk *clk)
-{
- clk_disable(clk);
- clk_unprepare(clk);
-}
-
/**
* clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
* This is only valid once the clock source has been enabled.
@@ -298,6 +276,78 @@ struct clk *clk_get_parent(struct clk *clk);
*/
struct clk *clk_get_sys(const char *dev_id, const char *con_id);
+#else /* !CONFIG_HAVE_CLK */
+
+static inline struct clk *clk_get(struct device *dev, const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+ return NULL;
+}
+
+static inline void clk_put(struct clk *clk) {}
+
+static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
+
+static inline int clk_enable(struct clk *clk)
+{
+ return 0;
+}
+
+static inline void clk_disable(struct clk *clk) {}
+
+static inline unsigned long clk_get_rate(struct clk *clk)
+{
+ return 0;
+}
+
+static inline int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
+static inline long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
+static inline int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ return 0;
+}
+
+static inline struct clk *clk_get_parent(struct clk *clk)
+{
+ return NULL;
+}
+
+#endif
+
+/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
+static inline int clk_prepare_enable(struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare(clk);
+ if (ret)
+ return ret;
+ ret = clk_enable(clk);
+ if (ret)
+ clk_unprepare(clk);
+
+ return ret;
+}
+
+/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
+static inline void clk_disable_unprepare(struct clk *clk)
+{
+ clk_disable(clk);
+ clk_unprepare(clk);
+}
+
/**
* clk_add_alias - add a new clock alias
* @alias: name for clock alias
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 51a90b7f2d60..ef658147e4e8 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask,
- bool sync);
+ bool sync, bool *contended);
extern int compact_pgdat(pg_data_t *pgdat, int order);
extern unsigned long compaction_suitable(struct zone *zone, int order);
@@ -58,13 +58,13 @@ static inline bool compaction_deferred(struct zone *zone, int order)
if (++zone->compact_considered > defer_limit)
zone->compact_considered = defer_limit;
- return zone->compact_considered < (1UL << zone->compact_defer_shift);
+ return zone->compact_considered < defer_limit;
}
#else
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
- bool sync)
+ bool sync, bool *contended)
{
return COMPACT_CONTINUE;
}
@@ -85,7 +85,7 @@ static inline void defer_compaction(struct zone *zone, int order)
static inline bool compaction_deferred(struct zone *zone, int order)
{
- return 1;
+ return true;
}
#endif /* CONFIG_COMPACTION */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 4e890394ef99..09b28b7369d7 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -265,9 +265,9 @@ long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
#else
long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
- size_t msgsz, int msgflg);
+ compat_ssize_t msgsz, int msgflg);
long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
- size_t msgsz, long msgtyp, int msgflg);
+ compat_ssize_t msgsz, long msgtyp, int msgflg);
long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
#endif
long compat_sys_msgctl(int first, int second, void __user *uptr);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 89dcd30ac8ea..279b1eaa8b73 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -58,6 +58,7 @@ struct cpuidle_state {
/* Idle State Flags */
#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
+#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
@@ -101,6 +102,12 @@ struct cpuidle_device {
struct list_head device_list;
struct kobject kobj;
struct completion kobj_unregister;
+
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+ int safe_state_index;
+ cpumask_t coupled_cpus;
+ struct cpuidle_coupled *coupled;
+#endif
};
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -185,6 +192,14 @@ static inline int cpuidle_play_dead(void) {return -ENODEV; }
#endif
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
+#else
+static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
+{
+}
+#endif
+
/******************************
* CPUIDLE GOVERNOR INTERFACE *
******************************/
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index a2c819d3c96e..032560295fcb 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -272,6 +272,8 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
+ * Returns 1 if @cpu is set in @cpumask, else returns 0
+ *
* No static inline type checking - see Subtlety (1) above.
*/
#define cpumask_test_cpu(cpu, cpumask) \
@@ -282,6 +284,8 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
+ * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
+ *
* test_and_set_bit wrapper for cpumasks.
*/
static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
@@ -294,6 +298,8 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
+ * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
+ *
* test_and_clear_bit wrapper for cpumasks.
*/
static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
@@ -324,6 +330,8 @@ static inline void cpumask_clear(struct cpumask *dstp)
* @dstp: the cpumask result
* @src1p: the first input
* @src2p: the second input
+ *
+ * If *@dstp is empty, returns 0, else returns 1
*/
static inline int cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
@@ -365,6 +373,8 @@ static inline void cpumask_xor(struct cpumask *dstp,
* @dstp: the cpumask result
* @src1p: the first input
* @src2p: the second input
+ *
+ * If *@dstp is empty, returns 0, else returns 1
*/
static inline int cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
@@ -414,6 +424,8 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
* cpumask_subset - (*src1p & ~*src2p) == 0
* @src1p: the first input
* @src2p: the second input
+ *
+ * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
*/
static inline int cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p)
@@ -579,9 +591,8 @@ static inline int cpulist_scnprintf(char *buf, int len,
}
/**
- * cpulist_parse_user - extract a cpumask from a user string of ranges
+ * cpulist_parse - extract a cpumask from a user string of ranges
* @buf: the buffer to extract from
- * @len: the length of the buffer
* @dstp: the cpumask to set.
*
* Returns -errno, or 0 for success.
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index 7c4750811b96..25baa287cff7 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -154,6 +154,14 @@ struct crush_map {
__s32 max_buckets;
__u32 max_rules;
__s32 max_devices;
+
+ /* choose local retries before re-descent */
+ __u32 choose_local_tries;
+ /* choose local attempts using a fallback permutation before
+ * re-descent */
+ __u32 choose_local_fallback_tries;
+ /* choose attempts before giving up */
+ __u32 choose_total_tries;
};
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 98f34b886f95..38d27a10aa5d 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -66,14 +66,13 @@ typedef int (*dm_request_endio_fn) (struct dm_target *ti,
struct request *clone, int error,
union map_info *map_context);
-typedef void (*dm_flush_fn) (struct dm_target *ti);
typedef void (*dm_presuspend_fn) (struct dm_target *ti);
typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
typedef int (*dm_preresume_fn) (struct dm_target *ti);
typedef void (*dm_resume_fn) (struct dm_target *ti);
typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
- char *result, unsigned int maxlen);
+ unsigned status_flags, char *result, unsigned maxlen);
typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
@@ -139,7 +138,6 @@ struct target_type {
dm_map_request_fn map_rq;
dm_endio_fn end_io;
dm_request_endio_fn rq_end_io;
- dm_flush_fn flush;
dm_presuspend_fn presuspend;
dm_postsuspend_fn postsuspend;
dm_preresume_fn preresume;
@@ -188,8 +186,8 @@ struct dm_target {
sector_t begin;
sector_t len;
- /* Always a power of 2 */
- sector_t split_io;
+ /* If non-zero, maximum size of I/O submitted to a target. */
+ uint32_t max_io_len;
/*
* A number of zero-length barrier requests that will be submitted
@@ -214,15 +212,27 @@ struct dm_target {
char *error;
/*
+ * Set if this target needs to receive flushes regardless of
+ * whether or not its underlying devices have support.
+ */
+ bool flush_supported:1;
+
+ /*
* Set if this target needs to receive discards regardless of
* whether or not its underlying devices have support.
*/
- unsigned discards_supported:1;
+ bool discards_supported:1;
+
+ /*
+ * Set if the target required discard request to be split
+ * on max_io_len boundary.
+ */
+ bool split_discard_requests:1;
/*
* Set if this target does not return zeroes on discarded blocks.
*/
- unsigned discard_zeroes_data_unsupported:1;
+ bool discard_zeroes_data_unsupported:1;
};
/* Each target can link one of these into the table */
@@ -360,6 +370,11 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback
int dm_table_complete(struct dm_table *t);
/*
+ * Target may require that it is never sent I/O larger than len.
+ */
+int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
+
+/*
* Table reference counting.
*/
struct dm_table *dm_get_live_table(struct mapped_device *md);
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 75fd5573516e..91e3a360f611 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 22
+#define DM_VERSION_MINOR 23
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2011-10-19)"
+#define DM_VERSION_EXTRA "-ioctl (2012-07-25)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -307,6 +307,8 @@ enum {
/*
* Set this to suspend without flushing queued ios.
+ * Also disables flushing uncommitted changes in the thin target before
+ * generating statistics for DM_TABLE_STATUS and DM_DEV_WAIT.
*/
#define DM_NOFLUSH_FLAG (1 << 11) /* In */
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index 547ab568d3ae..f83f793223ff 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -15,6 +15,8 @@ enum dma_attr {
DMA_ATTR_WEAK_ORDERING,
DMA_ATTR_WRITE_COMBINE,
DMA_ATTR_NON_CONSISTENT,
+ DMA_ATTR_NO_KERNEL_MAPPING,
+ DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_MAX,
};
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index dfc099e56a66..94af41858513 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -18,6 +18,9 @@ struct dma_map_ops {
int (*mmap)(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t, struct dma_attrs *attrs);
+ int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
+ dma_addr_t, size_t, struct dma_attrs *attrs);
+
dma_addr_t (*map_page)(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 91ba3bae42ee..bab9f8473dc1 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -13,9 +13,11 @@
#define _LINUX_EDAC_H_
#include <linux/atomic.h>
+#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
+#include <linux/debugfs.h>
struct device;
@@ -49,7 +51,19 @@ static inline void opstate_init(void)
#define EDAC_MC_LABEL_LEN 31
#define MC_PROC_NAME_MAX_LEN 7
-/* memory devices */
+/**
+ * enum dev_type - describe the type of memory DRAM chips used at the stick
+ * @DEV_UNKNOWN: Can't be determined, or MC doesn't support detect it
+ * @DEV_X1: 1 bit for data
+ * @DEV_X2: 2 bits for data
+ * @DEV_X4: 4 bits for data
+ * @DEV_X8: 8 bits for data
+ * @DEV_X16: 16 bits for data
+ * @DEV_X32: 32 bits for data
+ * @DEV_X64: 64 bits for data
+ *
+ * Typical values are x4 and x8.
+ */
enum dev_type {
DEV_UNKNOWN = 0,
DEV_X1,
@@ -167,18 +181,30 @@ enum mem_type {
#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
-/* chipset Error Detection and Correction capabilities and mode */
+/**
+ * enum edac-type - Error Detection and Correction capabilities and mode
+ * @EDAC_UNKNOWN: Unknown if ECC is available
+ * @EDAC_NONE: Doesn't support ECC
+ * @EDAC_RESERVED: Reserved ECC type
+ * @EDAC_PARITY: Detects parity errors
+ * @EDAC_EC: Error Checking - no correction
+ * @EDAC_SECDED: Single bit error correction, Double detection
+ * @EDAC_S2ECD2ED: Chipkill x2 devices - do these exist?
+ * @EDAC_S4ECD4ED: Chipkill x4 devices
+ * @EDAC_S8ECD8ED: Chipkill x8 devices
+ * @EDAC_S16ECD16ED: Chipkill x16 devices
+ */
enum edac_type {
- EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
- EDAC_NONE, /* Doesn't support ECC */
- EDAC_RESERVED, /* Reserved ECC type */
- EDAC_PARITY, /* Detects parity errors */
- EDAC_EC, /* Error Checking - no correction */
- EDAC_SECDED, /* Single bit error correction, Double detection */
- EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */
- EDAC_S4ECD4ED, /* Chipkill x4 devices */
- EDAC_S8ECD8ED, /* Chipkill x8 devices */
- EDAC_S16ECD16ED, /* Chipkill x16 devices */
+ EDAC_UNKNOWN = 0,
+ EDAC_NONE,
+ EDAC_RESERVED,
+ EDAC_PARITY,
+ EDAC_EC,
+ EDAC_SECDED,
+ EDAC_S2ECD2ED,
+ EDAC_S4ECD4ED,
+ EDAC_S8ECD8ED,
+ EDAC_S16ECD16ED,
};
#define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN)
@@ -191,18 +217,30 @@ enum edac_type {
#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
-/* scrubbing capabilities */
+/**
+ * enum scrub_type - scrubbing capabilities
+ * @SCRUB_UNKNOWN Unknown if scrubber is available
+ * @SCRUB_NONE: No scrubber
+ * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing
+ * @SCRUB_SW_SRC: Software scrub only errors
+ * @SCRUB_SW_PROG_SRC: Progressive software scrub from an error
+ * @SCRUB_SW_TUNABLE: Software scrub frequency is tunable
+ * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing
+ * @SCRUB_HW_SRC: Hardware scrub only errors
+ * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error
+ * SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable
+ */
enum scrub_type {
- SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */
- SCRUB_NONE, /* No scrubber */
- SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */
- SCRUB_SW_SRC, /* Software scrub only errors */
- SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */
- SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */
- SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */
- SCRUB_HW_SRC, /* Hardware scrub only errors */
- SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */
- SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */
+ SCRUB_UNKNOWN = 0,
+ SCRUB_NONE,
+ SCRUB_SW_PROG,
+ SCRUB_SW_SRC,
+ SCRUB_SW_PROG_SRC,
+ SCRUB_SW_TUNABLE,
+ SCRUB_HW_PROG,
+ SCRUB_HW_SRC,
+ SCRUB_HW_PROG_SRC,
+ SCRUB_HW_TUNABLE
};
#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG)
@@ -374,23 +412,21 @@ struct edac_mc_layer {
#define EDAC_MAX_LAYERS 3
/**
- * EDAC_DIMM_PTR - Macro responsible to find a pointer inside a pointer array
+ * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array
* for the element given by [layer0,layer1,layer2] position
*
* @layers: a struct edac_mc_layer array, describing how many elements
* were allocated for each layer
- * @var: name of the var where we want to get the pointer
- * (like mci->dimms)
* @n_layers: Number of layers at the @layers array
* @layer0: layer0 position
* @layer1: layer1 position. Unused if n_layers < 2
* @layer2: layer2 position. Unused if n_layers < 3
*
- * For 1 layer, this macro returns &var[layer0]
+ * For 1 layer, this macro returns &var[layer0] - &var
* For 2 layers, this macro is similar to allocate a bi-dimensional array
- * and to return "&var[layer0][layer1]"
+ * and to return "&var[layer0][layer1] - &var"
* For 3 layers, this macro is similar to allocate a tri-dimensional array
- * and to return "&var[layer0][layer1][layer2]"
+ * and to return "&var[layer0][layer1][layer2] - &var"
*
* A loop could be used here to make it more generic, but, as we only have
* 3 layers, this is a little faster.
@@ -398,23 +434,52 @@ struct edac_mc_layer {
* a NULL is returned, causing an OOPS during the memory allocation routine,
* with would point to the developer that he's doing something wrong.
*/
-#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
- typeof(var) __p; \
+#define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \
+ int __i; \
if ((nlayers) == 1) \
- __p = &var[layer0]; \
+ __i = layer0; \
else if ((nlayers) == 2) \
- __p = &var[(layer1) + ((layers[1]).size * (layer0))]; \
+ __i = (layer1) + ((layers[1]).size * (layer0)); \
else if ((nlayers) == 3) \
- __p = &var[(layer2) + ((layers[2]).size * ((layer1) + \
- ((layers[1]).size * (layer0))))]; \
+ __i = (layer2) + ((layers[2]).size * ((layer1) + \
+ ((layers[1]).size * (layer0)))); \
else \
+ __i = -EINVAL; \
+ __i; \
+})
+
+/**
+ * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array
+ * for the element given by [layer0,layer1,layer2] position
+ *
+ * @layers: a struct edac_mc_layer array, describing how many elements
+ * were allocated for each layer
+ * @var: name of the var where we want to get the pointer
+ * (like mci->dimms)
+ * @n_layers: Number of layers at the @layers array
+ * @layer0: layer0 position
+ * @layer1: layer1 position. Unused if n_layers < 2
+ * @layer2: layer2 position. Unused if n_layers < 3
+ *
+ * For 1 layer, this macro returns &var[layer0]
+ * For 2 layers, this macro is similar to allocate a bi-dimensional array
+ * and to return "&var[layer0][layer1]"
+ * For 3 layers, this macro is similar to allocate a tri-dimensional array
+ * and to return "&var[layer0][layer1][layer2]"
+ */
+#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
+ typeof(*var) __p; \
+ int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \
+ if (___i < 0) \
__p = NULL; \
+ else \
+ __p = (var)[___i]; \
__p; \
})
-
-/* FIXME: add the proper per-location error counts */
struct dimm_info {
+ struct device dev;
+
char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
/* Memory location data */
@@ -456,6 +521,8 @@ struct rank_info {
};
struct csrow_info {
+ struct device dev;
+
/* Used only by edac_mc_find_csrow_by_page() */
unsigned long first_page; /* first page number in csrow */
unsigned long last_page; /* last page number in csrow */
@@ -469,44 +536,26 @@ struct csrow_info {
struct mem_ctl_info *mci; /* the parent */
- struct kobject kobj; /* sysfs kobject for this csrow */
-
/* channel information for this csrow */
u32 nr_channels;
- struct rank_info *channels;
+ struct rank_info **channels;
};
-struct mcidev_sysfs_group {
- const char *name; /* group name */
- const struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */
-};
-
-struct mcidev_sysfs_group_kobj {
- struct list_head list; /* list for all instances within a mc */
-
- struct kobject kobj; /* kobj for the group */
-
- const struct mcidev_sysfs_group *grp; /* group description table */
- struct mem_ctl_info *mci; /* the parent */
-};
-
-/* mcidev_sysfs_attribute structure
- * used for driver sysfs attributes and in mem_ctl_info
- * sysfs top level entries
+/*
+ * struct errcount_attribute - used to store the several error counts
*/
-struct mcidev_sysfs_attribute {
- /* It should use either attr or grp */
- struct attribute attr;
- const struct mcidev_sysfs_group *grp; /* Points to a group of attributes */
-
- /* Ops for show/store values at the attribute - not used on group */
- ssize_t (*show)(struct mem_ctl_info *,char *);
- ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
+struct errcount_attribute_data {
+ int n_layers;
+ int pos[EDAC_MAX_LAYERS];
+ int layer0, layer1, layer2;
};
/* MEMORY controller information structure
*/
struct mem_ctl_info {
+ struct device dev;
+ struct bus_type bus;
+
struct list_head link; /* for global list of mem_ctl_info structs */
struct module *owner; /* Module owner of this control struct */
@@ -548,10 +597,18 @@ struct mem_ctl_info {
unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
unsigned long page);
int mc_idx;
- struct csrow_info *csrows;
+ struct csrow_info **csrows;
unsigned nr_csrows, num_cschannel;
- /* Memory Controller hierarchy */
+ /*
+ * Memory Controller hierarchy
+ *
+ * There are basically two types of memory controller: the ones that
+ * sees memory sticks ("dimms"), and the ones that sees memory ranks.
+ * All old memory controllers enumerate memories per rank, but most
+ * of the recent drivers enumerate memories per DIMM, instead.
+ * When the memory controller is per rank, mem_is_per_rank is true.
+ */
unsigned n_layers;
struct edac_mc_layer *layers;
bool mem_is_per_rank;
@@ -560,14 +617,14 @@ struct mem_ctl_info {
* DIMM info. Will eventually remove the entire csrows_info some day
*/
unsigned tot_dimms;
- struct dimm_info *dimms;
+ struct dimm_info **dimms;
/*
* FIXME - what about controllers on other busses? - IDs must be
* unique. dev pointer should be sufficiently unique, but
* BUS:SLOT.FUNC numbers may not be unique.
*/
- struct device *dev;
+ struct device *pdev;
const char *mod_name;
const char *mod_ver;
const char *ctl_name;
@@ -586,12 +643,6 @@ struct mem_ctl_info {
struct completion complete;
- /* edac sysfs device control */
- struct kobject edac_mci_kobj;
-
- /* list for all grp instances within a mc */
- struct list_head grp_kobj_list;
-
/* Additional top controller level attributes, but specified
* by the low level driver.
*
@@ -609,6 +660,13 @@ struct mem_ctl_info {
/* the internal state of this controller instance */
int op_state;
+
+#ifdef CONFIG_EDAC_DEBUG
+ struct dentry *debugfs;
+ u8 fake_inject_layer[EDAC_MAX_LAYERS];
+ u32 fake_inject_ue;
+ u16 fake_inject_count;
+#endif
};
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 103adc6d7e3a..ec45ccd8708a 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -503,6 +503,8 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
+extern unsigned long efi_get_time(void);
+extern int efi_set_rtc_mmss(unsigned long nowtime);
extern void efi_reserve_boot_services(void);
extern struct efi_memory_map memmap;
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 7edcf1031718..db04ec5121cb 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -152,7 +152,7 @@ static inline void fw_card_put(struct fw_card *card)
struct fw_attribute_group {
struct attribute_group *groups[2];
struct attribute_group group;
- struct attribute *attrs[12];
+ struct attribute *attrs[13];
};
enum fw_device_state {
@@ -321,7 +321,7 @@ struct fw_transaction {
struct fw_address_handler {
u64 offset;
- size_t length;
+ u64 length;
fw_address_callback_t address_callback;
void *callback_data;
struct list_head link;
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
new file mode 100644
index 000000000000..4ebc49fae391
--- /dev/null
+++ b/include/linux/flex_proportions.h
@@ -0,0 +1,101 @@
+/*
+ * Floating proportions with flexible aging period
+ *
+ * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
+ */
+
+#ifndef _LINUX_FLEX_PROPORTIONS_H
+#define _LINUX_FLEX_PROPORTIONS_H
+
+#include <linux/percpu_counter.h>
+#include <linux/spinlock.h>
+#include <linux/seqlock.h>
+
+/*
+ * When maximum proportion of some event type is specified, this is the
+ * precision with which we allow limitting. Note that this creates an upper
+ * bound on the number of events per period like
+ * ULLONG_MAX >> FPROP_FRAC_SHIFT.
+ */
+#define FPROP_FRAC_SHIFT 10
+#define FPROP_FRAC_BASE (1UL << FPROP_FRAC_SHIFT)
+
+/*
+ * ---- Global proportion definitions ----
+ */
+struct fprop_global {
+ /* Number of events in the current period */
+ struct percpu_counter events;
+ /* Current period */
+ unsigned int period;
+ /* Synchronization with period transitions */
+ seqcount_t sequence;
+};
+
+int fprop_global_init(struct fprop_global *p);
+void fprop_global_destroy(struct fprop_global *p);
+bool fprop_new_period(struct fprop_global *p, int periods);
+
+/*
+ * ---- SINGLE ----
+ */
+struct fprop_local_single {
+ /* the local events counter */
+ unsigned long events;
+ /* Period in which we last updated events */
+ unsigned int period;
+ raw_spinlock_t lock; /* Protect period and numerator */
+};
+
+#define INIT_FPROP_LOCAL_SINGLE(name) \
+{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+}
+
+int fprop_local_init_single(struct fprop_local_single *pl);
+void fprop_local_destroy_single(struct fprop_local_single *pl);
+void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
+void fprop_fraction_single(struct fprop_global *p,
+ struct fprop_local_single *pl, unsigned long *numerator,
+ unsigned long *denominator);
+
+static inline
+void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __fprop_inc_single(p, pl);
+ local_irq_restore(flags);
+}
+
+/*
+ * ---- PERCPU ----
+ */
+struct fprop_local_percpu {
+ /* the local events counter */
+ struct percpu_counter events;
+ /* Period in which we last updated events */
+ unsigned int period;
+ raw_spinlock_t lock; /* Protect period and numerator */
+};
+
+int fprop_local_init_percpu(struct fprop_local_percpu *pl);
+void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
+void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
+void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
+ int max_frac);
+void fprop_fraction_percpu(struct fprop_global *p,
+ struct fprop_local_percpu *pl, unsigned long *numerator,
+ unsigned long *denominator);
+
+static inline
+void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __fprop_inc_percpu(p, pl);
+ local_irq_restore(flags);
+}
+
+#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8fabb037a48d..aa110476a95b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -165,6 +165,8 @@ struct inodes_stat_t {
#define READ 0
#define WRITE RW_MASK
#define READA RWA_MASK
+#define KERNEL_READ (READ|REQ_KERNEL)
+#define KERNEL_WRITE (WRITE|REQ_KERNEL)
#define READ_SYNC (READ | REQ_SYNC)
#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
@@ -412,6 +414,7 @@ struct inodes_stat_t {
#include <linux/shrinker.h>
#include <linux/migrate_mode.h>
#include <linux/uidgid.h>
+#include <linux/lockdep.h>
#include <asm/byteorder.h>
@@ -427,6 +430,7 @@ struct kstatfs;
struct vm_area_struct;
struct vfsmount;
struct cred;
+struct swap_info_struct;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -437,6 +441,8 @@ extern unsigned long get_max_files(void);
extern int sysctl_nr_open;
extern struct inodes_stat_t inodes_stat;
extern int leases_enable, lease_break_time;
+extern int sysctl_protected_symlinks;
+extern int sysctl_protected_hardlinks;
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
@@ -636,6 +642,11 @@ struct address_space_operations {
int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
unsigned long);
int (*error_remove_page)(struct address_space *, struct page *);
+
+ /* swapfile support */
+ int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
+ sector_t *span);
+ void (*swap_deactivate)(struct file *file);
};
extern const struct address_space_operations empty_aops;
@@ -1154,7 +1165,6 @@ struct lock_manager_operations {
int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
void (*lm_notify)(struct file_lock *); /* unblock callback */
int (*lm_grant)(struct file_lock *, struct file_lock *, int);
- void (*lm_release_private)(struct file_lock *);
void (*lm_break)(struct file_lock *);
int (*lm_change)(struct file_lock **, int);
};
@@ -1163,9 +1173,10 @@ struct lock_manager {
struct list_head list;
};
-void locks_start_grace(struct lock_manager *);
+struct net;
+void locks_start_grace(struct net *, struct lock_manager *);
void locks_end_grace(struct lock_manager *);
-int locks_in_grace(void);
+int locks_in_grace(struct net *);
/* that will die - we need it for nfs_lock_info */
#include <linux/nfs_fs_i.h>
@@ -1437,6 +1448,8 @@ extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct fown_struct *fown);
+struct mm_struct;
+
/*
* Umount options
*/
@@ -1450,10 +1463,34 @@ extern int send_sigurg(struct fown_struct *fown);
extern struct list_head super_blocks;
extern spinlock_t sb_lock;
+/* Possible states of 'frozen' field */
+enum {
+ SB_UNFROZEN = 0, /* FS is unfrozen */
+ SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */
+ SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */
+ SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop
+ * internal threads if needed) */
+ SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */
+};
+
+#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
+
+struct sb_writers {
+ /* Counters for counting writers at each level */
+ struct percpu_counter counter[SB_FREEZE_LEVELS];
+ wait_queue_head_t wait; /* queue for waiting for
+ writers / faults to finish */
+ int frozen; /* Is sb frozen? */
+ wait_queue_head_t wait_unfrozen; /* queue for waiting for
+ sb to be thawed */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map lock_map[SB_FREEZE_LEVELS];
+#endif
+};
+
struct super_block {
struct list_head s_list; /* Keep this first */
dev_t s_dev; /* search index; _not_ kdev_t */
- unsigned char s_dirt;
unsigned char s_blocksize_bits;
unsigned long s_blocksize;
loff_t s_maxbytes; /* Max file size */
@@ -1497,8 +1534,7 @@ struct super_block {
struct hlist_node s_instances;
struct quota_info s_dquot; /* Diskquota specific options */
- int s_frozen;
- wait_queue_head_t s_wait_unfrozen;
+ struct sb_writers s_writers;
char s_id[32]; /* Informational name */
u8 s_uuid[16]; /* UUID */
@@ -1553,14 +1589,117 @@ extern struct timespec current_fs_time(struct super_block *sb);
/*
* Snapshotting support.
*/
-enum {
- SB_UNFROZEN = 0,
- SB_FREEZE_WRITE = 1,
- SB_FREEZE_TRANS = 2,
-};
-#define vfs_check_frozen(sb, level) \
- wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level)))
+void __sb_end_write(struct super_block *sb, int level);
+int __sb_start_write(struct super_block *sb, int level, bool wait);
+
+/**
+ * sb_end_write - drop write access to a superblock
+ * @sb: the super we wrote to
+ *
+ * Decrement number of writers to the filesystem. Wake up possible waiters
+ * wanting to freeze the filesystem.
+ */
+static inline void sb_end_write(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_end_pagefault - drop write access to a superblock from a page fault
+ * @sb: the super we wrote to
+ *
+ * Decrement number of processes handling write page fault to the filesystem.
+ * Wake up possible waiters wanting to freeze the filesystem.
+ */
+static inline void sb_end_pagefault(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_PAGEFAULT);
+}
+
+/**
+ * sb_end_intwrite - drop write access to a superblock for internal fs purposes
+ * @sb: the super we wrote to
+ *
+ * Decrement fs-internal number of writers to the filesystem. Wake up possible
+ * waiters wanting to freeze the filesystem.
+ */
+static inline void sb_end_intwrite(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_FS);
+}
+
+/**
+ * sb_start_write - get write access to a superblock
+ * @sb: the super we write to
+ *
+ * When a process wants to write data or metadata to a file system (i.e. dirty
+ * a page or an inode), it should embed the operation in a sb_start_write() -
+ * sb_end_write() pair to get exclusion against file system freezing. This
+ * function increments number of writers preventing freezing. If the file
+ * system is already frozen, the function waits until the file system is
+ * thawed.
+ *
+ * Since freeze protection behaves as a lock, users have to preserve
+ * ordering of freeze protection and other filesystem locks. Generally,
+ * freeze protection should be the outermost lock. In particular, we have:
+ *
+ * sb_start_write
+ * -> i_mutex (write path, truncate, directory ops, ...)
+ * -> s_umount (freeze_super, thaw_super)
+ */
+static inline void sb_start_write(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_WRITE, true);
+}
+
+static inline int sb_start_write_trylock(struct super_block *sb)
+{
+ return __sb_start_write(sb, SB_FREEZE_WRITE, false);
+}
+
+/**
+ * sb_start_pagefault - get write access to a superblock from a page fault
+ * @sb: the super we write to
+ *
+ * When a process starts handling write page fault, it should embed the
+ * operation into sb_start_pagefault() - sb_end_pagefault() pair to get
+ * exclusion against file system freezing. This is needed since the page fault
+ * is going to dirty a page. This function increments number of running page
+ * faults preventing freezing. If the file system is already frozen, the
+ * function waits until the file system is thawed.
+ *
+ * Since page fault freeze protection behaves as a lock, users have to preserve
+ * ordering of freeze protection and other filesystem locks. It is advised to
+ * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault
+ * handling code implies lock dependency:
+ *
+ * mmap_sem
+ * -> sb_start_pagefault
+ */
+static inline void sb_start_pagefault(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true);
+}
+
+/*
+ * sb_start_intwrite - get write access to a superblock for internal fs purposes
+ * @sb: the super we write to
+ *
+ * This is the third level of protection against filesystem freezing. It is
+ * free for use by a filesystem. The only requirement is that it must rank
+ * below sb_start_pagefault.
+ *
+ * For example filesystem can call sb_start_intwrite() when starting a
+ * transaction which somewhat eases handling of freezing for internal sources
+ * of filesystem changes (internal fs threads, discarding preallocation on file
+ * close, etc.).
+ */
+static inline void sb_start_intwrite(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_FS, true);
+}
+
extern bool inode_owner_or_capable(const struct inode *inode);
@@ -1721,7 +1860,6 @@ struct super_operations {
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
- void (*write_super) (struct super_block *);
int (*sync_fs)(struct super_block *sb, int wait);
int (*freeze_fs) (struct super_block *);
int (*unfreeze_fs) (struct super_block *);
@@ -1884,6 +2022,7 @@ struct file_system_type {
struct lock_class_key s_lock_key;
struct lock_class_key s_umount_key;
struct lock_class_key s_vfs_rename_key;
+ struct lock_class_key s_writers_key[SB_FREEZE_LEVELS];
struct lock_class_key i_lock_key;
struct lock_class_key i_mutex_key;
@@ -2256,7 +2395,6 @@ extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
-extern void sync_supers(void);
extern void emergency_sync(void);
extern void emergency_remount(void);
#ifdef CONFIG_BLOCK
@@ -2326,9 +2464,6 @@ static inline void i_readcount_inc(struct inode *inode)
}
#endif
extern int do_pipe_flags(int *, int);
-extern struct file *create_read_pipe(struct file *f, int flags);
-extern struct file *create_write_pipe(int flags);
-extern void free_write_pipe(struct file *);
extern int kernel_read(struct file *, loff_t, char *, unsigned long);
extern struct file * open_exec(const char *);
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index af961d6f7ab1..642928cf57b4 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -306,9 +306,10 @@ extern void *perf_trace_buf_prepare(int size, unsigned short type,
static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
- u64 count, struct pt_regs *regs, void *head)
+ u64 count, struct pt_regs *regs, void *head,
+ struct task_struct *task)
{
- perf_tp_event(addr, count, raw_data, size, regs, head, rctx);
+ perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
}
#endif
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 9303348965fb..d8c713e148e3 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -57,6 +57,9 @@
*
* 7.19
* - add FUSE_FALLOCATE
+ *
+ * 7.20
+ * - add FUSE_AUTO_INVAL_DATA
*/
#ifndef _LINUX_FUSE_H
@@ -88,7 +91,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 19
+#define FUSE_KERNEL_MINOR_VERSION 20
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -163,10 +166,19 @@ struct fuse_file_lock {
/**
* INIT request/reply flags
*
+ * FUSE_ASYNC_READ: asynchronous read requests
* FUSE_POSIX_LOCKS: remote locking for POSIX file locks
+ * FUSE_FILE_OPS: kernel sends file handle for fstat, etc... (not yet supported)
+ * FUSE_ATOMIC_O_TRUNC: handles the O_TRUNC open flag in the filesystem
* FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".."
+ * FUSE_BIG_WRITES: filesystem can handle write size larger than 4kB
* FUSE_DONT_MASK: don't apply umask to file mode on create operations
+ * FUSE_SPLICE_WRITE: kernel supports splice write on the device
+ * FUSE_SPLICE_MOVE: kernel supports splice move on the device
+ * FUSE_SPLICE_READ: kernel supports splice read on the device
* FUSE_FLOCK_LOCKS: remote locking for BSD style file locks
+ * FUSE_HAS_IOCTL_DIR: kernel supports ioctl on directories
+ * FUSE_AUTO_INVAL_DATA: automatically invalidate cached pages
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -175,7 +187,12 @@ struct fuse_file_lock {
#define FUSE_EXPORT_SUPPORT (1 << 4)
#define FUSE_BIG_WRITES (1 << 5)
#define FUSE_DONT_MASK (1 << 6)
+#define FUSE_SPLICE_WRITE (1 << 7)
+#define FUSE_SPLICE_MOVE (1 << 8)
+#define FUSE_SPLICE_READ (1 << 9)
#define FUSE_FLOCK_LOCKS (1 << 10)
+#define FUSE_HAS_IOCTL_DIR (1 << 11)
+#define FUSE_AUTO_INVAL_DATA (1 << 12)
/**
* CUSE INIT request/reply flags
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index ae0aaa9d42fa..4f440b3e89fe 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -97,7 +97,13 @@ struct partition_meta_info {
struct hd_struct {
sector_t start_sect;
+ /*
+ * nr_sects is protected by sequence counter. One might extend a
+ * partition while IO is happening to it and update of nr_sects
+ * can be non-atomic on 32bit machines with 64bit sector_t.
+ */
sector_t nr_sects;
+ seqcount_t nr_sects_seq;
sector_t alignment_offset;
unsigned int discard_alignment;
struct device __dev;
@@ -647,6 +653,57 @@ static inline void hd_struct_put(struct hd_struct *part)
__delete_partition(part);
}
+/*
+ * Any access of part->nr_sects which is not protected by partition
+ * bd_mutex or gendisk bdev bd_mutex, should be done using this
+ * accessor function.
+ *
+ * Code written along the lines of i_size_read() and i_size_write().
+ * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption
+ * on.
+ */
+static inline sector_t part_nr_sects_read(struct hd_struct *part)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
+ sector_t nr_sects;
+ unsigned seq;
+ do {
+ seq = read_seqcount_begin(&part->nr_sects_seq);
+ nr_sects = part->nr_sects;
+ } while (read_seqcount_retry(&part->nr_sects_seq, seq));
+ return nr_sects;
+#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
+ sector_t nr_sects;
+
+ preempt_disable();
+ nr_sects = part->nr_sects;
+ preempt_enable();
+ return nr_sects;
+#else
+ return part->nr_sects;
+#endif
+}
+
+/*
+ * Should be called with mutex lock held (typically bd_mutex) of partition
+ * to provide mutual exlusion among writers otherwise seqcount might be
+ * left in wrong state leaving the readers spinning infinitely.
+ */
+static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
+ write_seqcount_begin(&part->nr_sects_seq);
+ part->nr_sects = size;
+ write_seqcount_end(&part->nr_sects_seq);
+#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
+ preempt_disable();
+ part->nr_sects = size;
+ preempt_enable();
+#else
+ part->nr_sects = size;
+#endif
+}
+
#else /* CONFIG_BLOCK */
static inline void printk_all_partitions(void) { }
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 1e49be49d324..4883f393f50a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -23,6 +23,7 @@ struct vm_area_struct;
#define ___GFP_REPEAT 0x400u
#define ___GFP_NOFAIL 0x800u
#define ___GFP_NORETRY 0x1000u
+#define ___GFP_MEMALLOC 0x2000u
#define ___GFP_COMP 0x4000u
#define ___GFP_ZERO 0x8000u
#define ___GFP_NOMEMALLOC 0x10000u
@@ -76,9 +77,14 @@ struct vm_area_struct;
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
-#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves */
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
+ * This takes precedence over the
+ * __GFP_MEMALLOC flag if both are
+ * set
+ */
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
@@ -129,7 +135,7 @@ struct vm_area_struct;
/* Control page allocator reclaim behavior */
#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
- __GFP_NORETRY|__GFP_NOMEMALLOC)
+ __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
/* Control slab gfp mask during early boot */
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
@@ -379,6 +385,9 @@ void drain_local_pages(void *dummy);
*/
extern gfp_t gfp_allowed_mask;
+/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
+bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
+
extern void pm_restrict_gfp_mask(void);
extern void pm_restore_gfp_mask(void);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index bb7f30971858..305f23cd7cff 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -22,7 +22,7 @@
*
* - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
* - bit 26 is the NMI_MASK
- * - bit 28 is the PREEMPT_ACTIVE flag
+ * - bit 27 is the PREEMPT_ACTIVE flag
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index d3999b4e26cc..ef788b5b4a35 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -39,10 +39,17 @@ extern unsigned long totalhigh_pages;
void kmap_flush_unused(void);
+struct page *kmap_to_page(void *addr);
+
#else /* CONFIG_HIGHMEM */
static inline unsigned int nr_free_highpages(void) { return 0; }
+static inline struct page *kmap_to_page(void *addr)
+{
+ return virt_to_page(addr);
+}
+
#define totalhigh_pages 0UL
#ifndef ARCH_HAS_KMAP
@@ -110,54 +117,15 @@ static inline void kmap_atomic_idx_pop(void)
#endif
/*
- * NOTE:
- * kmap_atomic() and kunmap_atomic() with two arguments are deprecated.
- * We only keep them for backward compatibility, any usage of them
- * are now warned.
- */
-
-#define PASTE(a, b) a ## b
-#define PASTE2(a, b) PASTE(a, b)
-
-#define NARG_(_2, _1, n, ...) n
-#define NARG(...) NARG_(__VA_ARGS__, 2, 1, :)
-
-static inline void __deprecated *kmap_atomic_deprecated(struct page *page,
- enum km_type km)
-{
- return kmap_atomic(page);
-}
-
-#define kmap_atomic1(...) kmap_atomic(__VA_ARGS__)
-#define kmap_atomic2(...) kmap_atomic_deprecated(__VA_ARGS__)
-#define kmap_atomic(...) PASTE2(kmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
-
-static inline void __deprecated __kunmap_atomic_deprecated(void *addr,
- enum km_type km)
-{
- __kunmap_atomic(addr);
-}
-
-/*
* Prevent people trying to call kunmap_atomic() as if it were kunmap()
* kunmap_atomic() should get the return value of kmap_atomic, not the page.
*/
-#define kunmap_atomic_deprecated(addr, km) \
-do { \
- BUILD_BUG_ON(__same_type((addr), struct page *)); \
- __kunmap_atomic_deprecated(addr, km); \
-} while (0)
-
-#define kunmap_atomic_withcheck(addr) \
+#define kunmap_atomic(addr) \
do { \
BUILD_BUG_ON(__same_type((addr), struct page *)); \
__kunmap_atomic(addr); \
} while (0)
-#define kunmap_atomic1(...) kunmap_atomic_withcheck(__VA_ARGS__)
-#define kunmap_atomic2(...) kunmap_atomic_deprecated(__VA_ARGS__)
-#define kunmap_atomic(...) PASTE2(kunmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
-/**** End of C pre-processor tricks for deprecated macros ****/
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index d5d6bbe2259e..225164842ab6 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -4,9 +4,11 @@
#include <linux/mm_types.h>
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
+#include <linux/cgroup.h>
struct ctl_table;
struct user_struct;
+struct mmu_gather;
#ifdef CONFIG_HUGETLB_PAGE
@@ -20,6 +22,11 @@ struct hugepage_subpool {
long max_hpages, used_hpages;
};
+extern spinlock_t hugetlb_lock;
+extern int hugetlb_max_hstate __read_mostly;
+#define for_each_hstate(h) \
+ for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
+
struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
void hugepage_put_subpool(struct hugepage_subpool *spool);
@@ -40,9 +47,14 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **,
unsigned long *, int *, int, unsigned int flags);
void unmap_hugepage_range(struct vm_area_struct *,
- unsigned long, unsigned long, struct page *);
-void __unmap_hugepage_range(struct vm_area_struct *,
- unsigned long, unsigned long, struct page *);
+ unsigned long, unsigned long, struct page *);
+void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct page *ref_page);
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct page *ref_page);
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(int, char *);
@@ -98,7 +110,6 @@ static inline unsigned long hugetlb_total_pages(void)
#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
-#define unmap_hugepage_range(vma, start, end, page) BUG()
static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
@@ -112,13 +123,31 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
#define huge_pte_offset(mm, address) 0
-#define dequeue_hwpoisoned_huge_page(page) 0
+static inline int dequeue_hwpoisoned_huge_page(struct page *page)
+{
+ return 0;
+}
+
static inline void copy_huge_page(struct page *dst, struct page *src)
{
}
#define hugetlb_change_protection(vma, address, end, newprot)
+static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct page *ref_page)
+{
+ BUG();
+}
+
+static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct page *ref_page)
+{
+ BUG();
+}
+
#endif /* !CONFIG_HUGETLB_PAGE */
#define HUGETLB_ANON_FILE "anon_hugepage"
@@ -199,10 +228,15 @@ struct hstate {
unsigned long resv_huge_pages;
unsigned long surplus_huge_pages;
unsigned long nr_overcommit_huge_pages;
+ struct list_head hugepage_activelist;
struct list_head hugepage_freelists[MAX_NUMNODES];
unsigned int nr_huge_pages_node[MAX_NUMNODES];
unsigned int free_huge_pages_node[MAX_NUMNODES];
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
+#ifdef CONFIG_CGROUP_HUGETLB
+ /* cgroup control files */
+ struct cftype cgroup_files[5];
+#endif
char name[HSTATE_NAME_LEN];
};
@@ -302,6 +336,11 @@ static inline unsigned hstate_index_to_shift(unsigned index)
return hstates[index].order + PAGE_SHIFT;
}
+static inline int hstate_index(struct hstate *h)
+{
+ return h - hstates;
+}
+
#else
struct hstate {};
#define alloc_huge_page_node(h, nid) NULL
@@ -320,6 +359,7 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
return 1;
}
#define hstate_index_to_shift(index) 0
+#define hstate_index(h) 0
#endif
#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
new file mode 100644
index 000000000000..d73878c694b3
--- /dev/null
+++ b/include/linux/hugetlb_cgroup.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright IBM Corporation, 2012
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#ifndef _LINUX_HUGETLB_CGROUP_H
+#define _LINUX_HUGETLB_CGROUP_H
+
+#include <linux/res_counter.h>
+
+struct hugetlb_cgroup;
+/*
+ * Minimum page order trackable by hugetlb cgroup.
+ * At least 3 pages are necessary for all the tracking information.
+ */
+#define HUGETLB_CGROUP_MIN_ORDER 2
+
+#ifdef CONFIG_CGROUP_HUGETLB
+
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+{
+ VM_BUG_ON(!PageHuge(page));
+
+ if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
+ return NULL;
+ return (struct hugetlb_cgroup *)page[2].lru.next;
+}
+
+static inline
+int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+{
+ VM_BUG_ON(!PageHuge(page));
+
+ if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
+ return -1;
+ page[2].lru.next = (void *)h_cg;
+ return 0;
+}
+
+static inline bool hugetlb_cgroup_disabled(void)
+{
+ if (hugetlb_subsys.disabled)
+ return true;
+ return false;
+}
+
+extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr);
+extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page);
+extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+ struct page *page);
+extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg);
+extern int hugetlb_cgroup_file_init(int idx) __init;
+extern void hugetlb_cgroup_migrate(struct page *oldhpage,
+ struct page *newhpage);
+
+#else
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+{
+ return NULL;
+}
+
+static inline
+int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+{
+ return 0;
+}
+
+static inline bool hugetlb_cgroup_disabled(void)
+{
+ return true;
+}
+
+static inline int
+hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
+{
+ return 0;
+}
+
+static inline void
+hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
+{
+ return;
+}
+
+static inline void
+hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page)
+{
+ return;
+}
+
+static inline void
+hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
+{
+ return;
+}
+
+static inline int __init hugetlb_cgroup_file_init(int idx)
+{
+ return 0;
+}
+
+static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
+ struct page *newhpage)
+{
+ return;
+}
+
+#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
+#endif
diff --git a/include/linux/i2c-ocores.h b/include/linux/i2c-ocores.h
index 4d5e57ff6614..1c06b5c7c308 100644
--- a/include/linux/i2c-ocores.h
+++ b/include/linux/i2c-ocores.h
@@ -12,7 +12,8 @@
#define _LINUX_I2C_OCORES_H
struct ocores_i2c_platform_data {
- u32 regstep; /* distance between registers */
+ u32 reg_shift; /* register offset shift value */
+ u32 reg_io_width; /* register io read/write width */
u32 clock_khz; /* input clock in kHz */
u8 num_devices; /* number of devices in the devices list */
struct i2c_board_info const *devices; /* devices connected to the bus */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 1d0fe4877b1f..5970266930a2 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -68,6 +68,9 @@ extern int i2c_master_recv(const struct i2c_client *client, char *buf,
*/
extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
int num);
+/* Unlocked flavor */
+extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num);
/* This is the very generalized SMBus access routine. You probably do not
want to use this, though; one of the functions below may be much easier,
diff --git a/include/linux/i2c/pca953x.h b/include/linux/i2c/pca953x.h
index 139ba52667c8..3c98dd4f901f 100644
--- a/include/linux/i2c/pca953x.h
+++ b/include/linux/i2c/pca953x.h
@@ -11,7 +11,7 @@ struct pca953x_platform_data {
unsigned gpio_base;
/* initial polarity inversion setting */
- uint16_t invert;
+ u32 invert;
/* interrupt base */
int irq_base;
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 555382660bc4..7ea898c55a60 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -555,6 +555,8 @@ struct twl4030_clock_init_data {
struct twl4030_bci_platform_data {
int *battery_tmp_tbl;
unsigned int tblsize;
+ int bb_uvolt; /* voltage to charge backup battery */
+ int bb_uamp; /* current for backup battery charging */
};
/* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 6960fc1841a7..aa2e167e1ef4 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -96,21 +96,6 @@ static inline void team_netpoll_send_skb(struct team_port *port,
}
#endif
-static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
- struct sk_buff *skb)
-{
- BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
- sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
- skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
-
- skb->dev = port->dev;
- if (unlikely(netpoll_tx_running(port->dev))) {
- team_netpoll_send_skb(port, skb);
- return 0;
- }
- return dev_queue_xmit(skb);
-}
-
struct team_mode_ops {
int (*init)(struct team *team);
void (*exit)(struct team *team);
@@ -200,6 +185,21 @@ struct team {
long mode_priv[TEAM_MODE_PRIV_LONGS];
};
+static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
+ struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
+ sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
+ skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
+
+ skb->dev = port->dev;
+ if (unlikely(netpoll_tx_running(team->dev))) {
+ team_netpoll_send_skb(port, skb);
+ return 0;
+ }
+ return dev_queue_xmit(skb);
+}
+
static inline struct hlist_head *team_port_index_hash(struct team *team,
int port_index)
{
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
index b76b4a87065e..be91f344d5fc 100644
--- a/include/linux/iio/frequency/adf4350.h
+++ b/include/linux/iio/frequency/adf4350.h
@@ -87,6 +87,8 @@
#define ADF4350_MAX_BANDSEL_CLK 125000 /* Hz */
#define ADF4350_MAX_FREQ_REFIN 250000000 /* Hz */
#define ADF4350_MAX_MODULUS 4095
+#define ADF4350_MAX_R_CNT 1023
+
/**
* struct adf4350_platform_data - platform specific information
diff --git a/include/linux/init.h b/include/linux/init.h
index 6b951095a42f..5e664f671615 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -191,6 +191,7 @@ extern bool initcall_debug;
* initializes variables that couldn't be statically initialized.
*
* This only exists for built-in code, not for modules.
+ * Keep main.c:initcall_level_names[] in sync.
*/
#define pure_initcall(fn) __define_initcall("0",fn,0)
@@ -280,7 +281,7 @@ void __init parse_early_options(char *cmdline);
#else /* MODULE */
-/* Don't use these in modules, but some people do... */
+/* Don't use these in loadable modules, but some people do... */
#define early_initcall(fn) module_init(fn)
#define core_initcall(fn) module_init(fn)
#define postcore_initcall(fn) module_init(fn)
diff --git a/include/linux/input/edt-ft5x06.h b/include/linux/input/edt-ft5x06.h
new file mode 100644
index 000000000000..8a1e0d1a0124
--- /dev/null
+++ b/include/linux/input/edt-ft5x06.h
@@ -0,0 +1,24 @@
+#ifndef _EDT_FT5X06_H
+#define _EDT_FT5X06_H
+
+/*
+ * Copyright (c) 2012 Simon Budig, <simon.budig@kernelconcepts.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+struct edt_ft5x06_platform_data {
+ int irq_pin;
+ int reset_pin;
+
+ /* startup defaults for operational parameters */
+ bool use_parameters;
+ u8 gain;
+ u8 threshold;
+ u8 offset;
+ u8 report_rate;
+};
+
+#endif /* _EDT_FT5X06_H */
diff --git a/include/linux/input/eeti_ts.h b/include/linux/input/eeti_ts.h
index f875b316249d..16625d799b6f 100644
--- a/include/linux/input/eeti_ts.h
+++ b/include/linux/input/eeti_ts.h
@@ -2,6 +2,7 @@
#define LINUX_INPUT_EETI_TS_H
struct eeti_ts_platform_data {
+ int irq_gpio;
unsigned int irq_active_high;
};
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index e68a8e53bb59..c5f856a040b9 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -42,7 +42,6 @@
*
* IRQF_DISABLED - keep irqs disabled when calling the action handler.
* DEPRECATED. This flag is a NOOP and scheduled to be removed
- * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
* IRQF_SHARED - allow sharing the irq among several devices
* IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
* IRQF_TIMER - Flag to mark this interrupt as timer interrupt
@@ -61,7 +60,6 @@
* resume time.
*/
#define IRQF_DISABLED 0x00000020
-#define IRQF_SAMPLE_RANDOM 0x00000040
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
#define __IRQF_TIMER 0x00000200
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 54d6d690073c..7e83370e6fd2 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -20,6 +20,7 @@
#define __LINUX_IOMMU_H
#include <linux/errno.h>
+#include <linux/types.h>
#define IOMMU_READ (1)
#define IOMMU_WRITE (2)
@@ -30,6 +31,7 @@ struct iommu_group;
struct bus_type;
struct device;
struct iommu_domain;
+struct notifier_block;
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 379e433e15e0..879db26ec401 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -369,6 +369,7 @@ struct ipv6_pinfo {
__u8 rcv_tclass;
__u32 dst_cookie;
+ __u32 rx_dst_cookie;
struct ipv6_mc_socklist __rcu *ipv6_mc_list;
struct ipv6_ac_socklist *ipv6_ac_list;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 553fb66da130..216b0ba109d7 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -349,6 +349,7 @@ enum {
IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
IRQCHIP_SKIP_SET_WAKE = (1 << 4),
+ IRQCHIP_ONESHOT_SAFE = (1 << 5),
};
/* This include will go away once we isolated irq_desc usage to core code */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index f1e2527006bd..9a323d12de1c 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -39,7 +39,6 @@ struct module;
*/
struct irq_desc {
struct irq_data irq_data;
- struct timer_rand_state *timer_rand_state;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 5abb533eb8eb..0d5b17bf5e51 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -112,6 +112,11 @@ struct irq_domain {
};
#ifdef CONFIG_IRQ_DOMAIN
+struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
+ unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data);
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
unsigned int size,
unsigned int first_irq,
@@ -144,16 +149,31 @@ static inline struct irq_domain *irq_domain_add_legacy_isa(
extern void irq_domain_remove(struct irq_domain *host);
+extern int irq_domain_associate_many(struct irq_domain *domain,
+ unsigned int irq_base,
+ irq_hw_number_t hwirq_base, int count);
+static inline int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ return irq_domain_associate_many(domain, irq, hwirq, 1);
+}
+
extern unsigned int irq_create_mapping(struct irq_domain *host,
irq_hw_number_t hwirq);
extern void irq_dispose_mapping(unsigned int virq);
extern unsigned int irq_find_mapping(struct irq_domain *host,
irq_hw_number_t hwirq);
extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
-extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq,
- irq_hw_number_t hwirq);
-extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host,
- irq_hw_number_t hwirq);
+extern int irq_create_strict_mappings(struct irq_domain *domain,
+ unsigned int irq_base,
+ irq_hw_number_t hwirq_base, int count);
+
+static inline int irq_create_identity_mapping(struct irq_domain *host,
+ irq_hw_number_t hwirq)
+{
+ return irq_create_strict_mappings(host, hwirq, hwirq, 1);
+}
+
extern unsigned int irq_linear_revmap(struct irq_domain *host,
irq_hw_number_t hwirq);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index f334c7fab967..3efc43f3f162 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1125,6 +1125,7 @@ extern int jbd2_journal_destroy (journal_t *);
extern int jbd2_journal_recover (journal_t *journal);
extern int jbd2_journal_wipe (journal_t *, int);
extern int jbd2_journal_skip_recovery (journal_t *);
+extern void jbd2_journal_update_sb_errno(journal_t *);
extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
unsigned long, int);
extern void __jbd2_journal_abort_hard (journal_t *);
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 265e2c3cbd1c..82680541576d 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -39,9 +39,6 @@
# error Invalid value of HZ.
#endif
-/* LATCH is used in the interval timer and ftape setup. */
-#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
-
/* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can
* improve accuracy by shifting LSH bits, hence calculating:
* (NOM << LSH) / DEN
@@ -54,18 +51,30 @@
#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
+ ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
-/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
-#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
+#ifdef CLOCK_TICK_RATE
+/* LATCH is used in the interval timer and ftape setup. */
+# define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
+
+/*
+ * HZ is the requested value. However the CLOCK_TICK_RATE may not allow
+ * for exactly HZ. So SHIFTED_HZ is high res HZ ("<< 8" is for accuracy)
+ */
+# define SHIFTED_HZ (SH_DIV(CLOCK_TICK_RATE, LATCH, 8))
+#else
+# define SHIFTED_HZ (HZ << 8)
+#endif
-/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
-#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
+/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
+#define TICK_NSEC (SH_DIV(1000000UL * 1000, SHIFTED_HZ, 8))
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
-/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */
-/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
-#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
+/*
+ * TICK_USEC_TO_NSEC is the time between ticks in nsec assuming SHIFTED_HZ and
+ * a value TUSEC for TICK_USEC (can be set bij adjtimex)
+ */
+#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV(TUSEC * USER_HZ * 1000, SHIFTED_HZ, 8))
/* some arch's have a small-data section that can be accessed register-relative
* but that can only take up to, say, 4-byte variables. jiffies being part of
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 064725854db8..42d9e863a313 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -75,8 +75,6 @@ extern const char *kdb_diemsg;
#define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */
#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */
#define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */
-#define KDB_FLAG_ONLY_DO_DUMP (1 << 4) /* Only do a dump, used when
- * kdb is off */
#define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available,
* kdb is disabled */
#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h
new file mode 100644
index 000000000000..866caaa9e2bb
--- /dev/null
+++ b/include/linux/kern_levels.h
@@ -0,0 +1,25 @@
+#ifndef __KERN_LEVELS_H__
+#define __KERN_LEVELS_H__
+
+#define KERN_SOH "\001" /* ASCII Start Of Header */
+#define KERN_SOH_ASCII '\001'
+
+#define KERN_EMERG KERN_SOH "0" /* system is unusable */
+#define KERN_ALERT KERN_SOH "1" /* action must be taken immediately */
+#define KERN_CRIT KERN_SOH "2" /* critical conditions */
+#define KERN_ERR KERN_SOH "3" /* error conditions */
+#define KERN_WARNING KERN_SOH "4" /* warning conditions */
+#define KERN_NOTICE KERN_SOH "5" /* normal but significant condition */
+#define KERN_INFO KERN_SOH "6" /* informational */
+#define KERN_DEBUG KERN_SOH "7" /* debug-level messages */
+
+#define KERN_DEFAULT KERN_SOH "d" /* the default kernel loglevel */
+
+/*
+ * Annotation for a "continued" line of log printout (only done after a
+ * line that had no enclosing \n). Only to be used by core/arch code
+ * during early bootup (a continued line is not SMP-safe otherwise).
+ */
+#define KERN_CONT ""
+
+#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 604382143bcf..594b419b7d20 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -82,10 +82,18 @@
__x - (__x % (y)); \
} \
)
+
+/*
+ * Divide positive or negative dividend by positive divisor and round
+ * to closest integer. Result is undefined for negative divisors.
+ */
#define DIV_ROUND_CLOSEST(x, divisor)( \
{ \
- typeof(divisor) __divisor = divisor; \
- (((x) + ((__divisor) / 2)) / (__divisor)); \
+ typeof(x) __x = x; \
+ typeof(divisor) __d = divisor; \
+ (((typeof(x))-1) >= 0 || (__x) >= 0) ? \
+ (((__x) + ((__d) / 2)) / (__d)) : \
+ (((__x) - ((__d) / 2)) / (__d)); \
} \
)
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 39e3c082c49d..f0c651cda7b0 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -13,6 +13,7 @@
#define _LINUX_KEY_TYPE_H
#include <linux/key.h>
+#include <linux/errno.h>
#ifdef CONFIG_KEYS
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 9c07dcebded7..65af6887872f 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -18,6 +18,7 @@
#include <linux/bug.h>
#include <linux/atomic.h>
#include <linux/kernel.h>
+#include <linux/mutex.h>
struct kref {
atomic_t refcount;
@@ -93,4 +94,21 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
{
return kref_sub(kref, 1, release);
}
+
+static inline int kref_put_mutex(struct kref *kref,
+ void (*release)(struct kref *kref),
+ struct mutex *lock)
+{
+ WARN_ON(release == NULL);
+ if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
+ mutex_lock(lock);
+ if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
+ mutex_unlock(lock);
+ return 0;
+ }
+ release(kref);
+ return 1;
+ }
+ return 0;
+}
#endif /* _KREF_H_ */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 603bec2913b0..06177ba10a16 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -58,13 +58,6 @@ union ktime {
typedef union ktime ktime_t; /* Kill this */
-#define KTIME_MAX ((s64)~((u64)1 << 63))
-#if (BITS_PER_LONG == 64)
-# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
-#else
-# define KTIME_SEC_MAX LONG_MAX
-#endif
-
/*
* ktime_t definitions when using the 64-bit scalar representation:
*/
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 39eee41d8c6f..3aade1d8f410 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -38,6 +38,9 @@ struct led_classdev {
#define LED_SUSPENDED (1 << 0)
/* Upper 16 bits reflect control information */
#define LED_CORE_SUSPENDRESUME (1 << 16)
+#define LED_BLINK_ONESHOT (1 << 17)
+#define LED_BLINK_ONESHOT_STOP (1 << 18)
+#define LED_BLINK_INVERT (1 << 19)
/* Set LED brightness level */
/* Must not sleep, use a workqueue if needed */
@@ -103,7 +106,25 @@ extern void led_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off);
/**
- * led_brightness_set - set LED brightness
+ * led_blink_set_oneshot - do a oneshot software blink
+ * @led_cdev: the LED to start blinking
+ * @delay_on: the time it should be on (in ms)
+ * @delay_off: the time it should ble off (in ms)
+ * @invert: blink off, then on, leaving the led on
+ *
+ * This function makes the LED blink one time for delay_on +
+ * delay_off time, ignoring the request if another one-shot
+ * blink is already in progress.
+ *
+ * If invert is set, led blinks for delay_off first, then for
+ * delay_on and leave the led on after the on-off cycle.
+ */
+extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ int invert);
+/**
+ * led_set_brightness - set LED brightness
* @led_cdev: the LED to set
* @brightness: the brightness to set it to
*
@@ -111,7 +132,7 @@ extern void led_blink_set(struct led_classdev *led_cdev,
* software blink timer that implements blinking when the
* hardware doesn't.
*/
-extern void led_brightness_set(struct led_classdev *led_cdev,
+extern void led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness);
/*
@@ -150,6 +171,10 @@ extern void led_trigger_event(struct led_trigger *trigger,
extern void led_trigger_blink(struct led_trigger *trigger,
unsigned long *delay_on,
unsigned long *delay_off);
+extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ int invert);
#else
diff --git a/include/linux/libfdt.h b/include/linux/libfdt.h
new file mode 100644
index 000000000000..4c0306c69b4e
--- /dev/null
+++ b/include/linux/libfdt.h
@@ -0,0 +1,8 @@
+#ifndef _INCLUDE_LIBFDT_H_
+#define _INCLUDE_LIBFDT_H_
+
+#include <linux/libfdt_env.h>
+#include "../../scripts/dtc/libfdt/fdt.h"
+#include "../../scripts/dtc/libfdt/libfdt.h"
+
+#endif /* _INCLUDE_LIBFDT_H_ */
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
new file mode 100644
index 000000000000..01508c7b8c81
--- /dev/null
+++ b/include/linux/libfdt_env.h
@@ -0,0 +1,13 @@
+#ifndef _LIBFDT_ENV_H
+#define _LIBFDT_ENV_H
+
+#include <linux/string.h>
+
+#include <asm/byteorder.h>
+
+#define fdt32_to_cpu(x) be32_to_cpu(x)
+#define cpu_to_fdt32(x) cpu_to_be32(x)
+#define fdt64_to_cpu(x) be64_to_cpu(x)
+#define cpu_to_fdt64(x) cpu_to_be64(x)
+
+#endif /* _LIBFDT_ENV_H */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index f04ce6ac6d04..f5a051a79273 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -262,11 +262,11 @@ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref);
__be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *, int,
struct nlm_cookie *, int);
-__be32 nlmsvc_unlock(struct nlm_file *, struct nlm_lock *);
+__be32 nlmsvc_unlock(struct net *net, struct nlm_file *, struct nlm_lock *);
__be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *,
struct nlm_lock *, struct nlm_cookie *);
-__be32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *);
+__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
unsigned long nlmsvc_retry_blocked(void);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
@@ -279,7 +279,7 @@ void nlmsvc_release_call(struct nlm_rqst *);
__be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **,
struct nfs_fh *);
void nlm_release_file(struct nlm_file *);
-void nlmsvc_mark_resources(void);
+void nlmsvc_mark_resources(struct net *);
void nlmsvc_free_host_resources(struct nlm_host *);
void nlmsvc_invalidate_all(void);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 83e7ba90d6e5..8d9489fdab2e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -38,7 +38,7 @@ struct mem_cgroup_reclaim_cookie {
unsigned int generation;
};
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
/*
* All "charge" functions with gfp_mask should use GFP_KERNEL or
* (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
@@ -72,8 +72,6 @@ extern void mem_cgroup_uncharge_end(void);
extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_uncharge_cache_page(struct page *page);
-extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
- int order);
bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
struct mem_cgroup *memcg);
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
@@ -100,9 +98,9 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
-extern int
-mem_cgroup_prepare_migration(struct page *page,
- struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
+extern void
+mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
+ struct mem_cgroup **memcgp);
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok);
@@ -124,7 +122,7 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage);
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#ifdef CONFIG_MEMCG_SWAP
extern int do_swap_account;
#endif
@@ -182,7 +180,6 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
-u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -193,7 +190,7 @@ void mem_cgroup_split_huge_fixup(struct page *head);
bool mem_cgroup_bad_page_check(struct page *page);
void mem_cgroup_print_bad_page(struct page *page);
#endif
-#else /* CONFIG_CGROUP_MEM_RES_CTLR */
+#else /* CONFIG_MEMCG */
struct mem_cgroup;
static inline int mem_cgroup_newpage_charge(struct page *page,
@@ -279,11 +276,10 @@ static inline struct cgroup_subsys_state
return NULL;
}
-static inline int
+static inline void
mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
- struct mem_cgroup **memcgp, gfp_t gfp_mask)
+ struct mem_cgroup **memcgp)
{
- return 0;
}
static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
@@ -366,12 +362,6 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
return 0;
}
-static inline
-u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
-{
- return 0;
-}
-
static inline void mem_cgroup_split_huge_fixup(struct page *head)
{
}
@@ -384,9 +374,9 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage)
{
}
-#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
+#endif /* CONFIG_MEMCG */
-#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
+#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
static inline bool
mem_cgroup_bad_page_check(struct page *page)
{
@@ -406,7 +396,7 @@ enum {
};
struct sock;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
void sock_update_memcg(struct sock *sk);
void sock_release_memcg(struct sock *sk);
#else
@@ -416,6 +406,6 @@ static inline void sock_update_memcg(struct sock *sk)
static inline void sock_release_memcg(struct sock *sk)
{
}
-#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+#endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 4aa42732e47f..95b738c7abff 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -215,7 +215,7 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
const nodemask_t *mask);
-extern unsigned slab_node(struct mempolicy *policy);
+extern unsigned slab_node(void);
extern enum zone_type policy_zone;
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index 7c08052e3321..39ed62ab5b8a 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -26,7 +26,8 @@ typedef struct mempool_s {
extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data);
extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data, int nid);
+ mempool_free_t *free_fn, void *pool_data,
+ gfp_t gfp_mask, int nid);
extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
extern void mempool_destroy(mempool_t *pool);
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
new file mode 100644
index 000000000000..a0ca0dca1244
--- /dev/null
+++ b/include/linux/mfd/88pm80x.h
@@ -0,0 +1,369 @@
+/*
+ * Marvell 88PM80x Interface
+ *
+ * Copyright (C) 2012 Marvell International Ltd.
+ * Qiao Zhou <zhouqiao@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_88PM80X_H
+#define __LINUX_MFD_88PM80X_H
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/atomic.h>
+
+#define PM80X_VERSION_MASK (0xFF) /* 80X chip ID mask */
+enum {
+ CHIP_INVALID = 0,
+ CHIP_PM800,
+ CHIP_PM805,
+ CHIP_MAX,
+};
+
+enum {
+ PM800_ID_BUCK1 = 0,
+ PM800_ID_BUCK2,
+ PM800_ID_BUCK3,
+ PM800_ID_BUCK4,
+ PM800_ID_BUCK5,
+
+ PM800_ID_LDO1,
+ PM800_ID_LDO2,
+ PM800_ID_LDO3,
+ PM800_ID_LDO4,
+ PM800_ID_LDO5,
+ PM800_ID_LDO6,
+ PM800_ID_LDO7,
+ PM800_ID_LDO8,
+ PM800_ID_LDO9,
+ PM800_ID_LDO10,
+ PM800_ID_LDO11,
+ PM800_ID_LDO12,
+ PM800_ID_LDO13,
+ PM800_ID_LDO14,
+ PM800_ID_LDO15,
+ PM800_ID_LDO16,
+ PM800_ID_LDO17,
+ PM800_ID_LDO18,
+ PM800_ID_LDO19,
+
+ PM800_ID_RG_MAX,
+};
+#define PM800_MAX_REGULATOR PM800_ID_RG_MAX /* 5 Bucks, 19 LDOs */
+#define PM800_NUM_BUCK (5) /*5 Bucks */
+#define PM800_NUM_LDO (19) /*19 Bucks */
+
+/* page 0 basic: slave adder 0x60 */
+
+#define PM800_STATUS_1 (0x01)
+#define PM800_ONKEY_STS1 (1 << 0)
+#define PM800_EXTON_STS1 (1 << 1)
+#define PM800_CHG_STS1 (1 << 2)
+#define PM800_BAT_STS1 (1 << 3)
+#define PM800_VBUS_STS1 (1 << 4)
+#define PM800_LDO_PGOOD_STS1 (1 << 5)
+#define PM800_BUCK_PGOOD_STS1 (1 << 6)
+
+#define PM800_STATUS_2 (0x02)
+#define PM800_RTC_ALARM_STS2 (1 << 0)
+
+/* Wakeup Registers */
+#define PM800_WAKEUP1 (0x0D)
+
+#define PM800_WAKEUP2 (0x0E)
+#define PM800_WAKEUP2_INV_INT (1 << 0)
+#define PM800_WAKEUP2_INT_CLEAR (1 << 1)
+#define PM800_WAKEUP2_INT_MASK (1 << 2)
+
+#define PM800_POWER_UP_LOG (0x10)
+
+/* Referance and low power registers */
+#define PM800_LOW_POWER1 (0x20)
+#define PM800_LOW_POWER2 (0x21)
+#define PM800_LOW_POWER_CONFIG3 (0x22)
+#define PM800_LOW_POWER_CONFIG4 (0x23)
+
+/* GPIO register */
+#define PM800_GPIO_0_1_CNTRL (0x30)
+#define PM800_GPIO0_VAL (1 << 0)
+#define PM800_GPIO0_GPIO_MODE(x) (x << 1)
+#define PM800_GPIO1_VAL (1 << 4)
+#define PM800_GPIO1_GPIO_MODE(x) (x << 5)
+
+#define PM800_GPIO_2_3_CNTRL (0x31)
+#define PM800_GPIO2_VAL (1 << 0)
+#define PM800_GPIO2_GPIO_MODE(x) (x << 1)
+#define PM800_GPIO3_VAL (1 << 4)
+#define PM800_GPIO3_GPIO_MODE(x) (x << 5)
+#define PM800_GPIO3_MODE_MASK 0x1F
+#define PM800_GPIO3_HEADSET_MODE PM800_GPIO3_GPIO_MODE(6)
+
+#define PM800_GPIO_4_CNTRL (0x32)
+#define PM800_GPIO4_VAL (1 << 0)
+#define PM800_GPIO4_GPIO_MODE(x) (x << 1)
+
+#define PM800_HEADSET_CNTRL (0x38)
+#define PM800_HEADSET_DET_EN (1 << 7)
+#define PM800_HSDET_SLP (1 << 1)
+/* PWM register */
+#define PM800_PWM1 (0x40)
+#define PM800_PWM2 (0x41)
+#define PM800_PWM3 (0x42)
+#define PM800_PWM4 (0x43)
+
+/* RTC Registers */
+#define PM800_RTC_CONTROL (0xD0)
+#define PM800_RTC_MISC1 (0xE1)
+#define PM800_RTC_MISC2 (0xE2)
+#define PM800_RTC_MISC3 (0xE3)
+#define PM800_RTC_MISC4 (0xE4)
+#define PM800_RTC_MISC5 (0xE7)
+/* bit definitions of RTC Register 1 (0xD0) */
+#define PM800_ALARM1_EN (1 << 0)
+#define PM800_ALARM_WAKEUP (1 << 4)
+#define PM800_ALARM (1 << 5)
+#define PM800_RTC1_USE_XO (1 << 7)
+
+/* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */
+
+/* buck registers */
+#define PM800_SLEEP_BUCK1 (0x30)
+
+/* BUCK Sleep Mode Register 1: BUCK[1..4] */
+#define PM800_BUCK_SLP1 (0x5A)
+#define PM800_BUCK1_SLP1_SHIFT 0
+#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT)
+
+/* page 2 GPADC: slave adder 0x02 */
+#define PM800_GPADC_MEAS_EN1 (0x01)
+#define PM800_MEAS_EN1_VBAT (1 << 2)
+#define PM800_GPADC_MEAS_EN2 (0x02)
+#define PM800_MEAS_EN2_RFTMP (1 << 0)
+#define PM800_MEAS_GP0_EN (1 << 2)
+#define PM800_MEAS_GP1_EN (1 << 3)
+#define PM800_MEAS_GP2_EN (1 << 4)
+#define PM800_MEAS_GP3_EN (1 << 5)
+#define PM800_MEAS_GP4_EN (1 << 6)
+
+#define PM800_GPADC_MISC_CONFIG1 (0x05)
+#define PM800_GPADC_MISC_CONFIG2 (0x06)
+#define PM800_GPADC_MISC_GPFSM_EN (1 << 0)
+#define PM800_GPADC_SLOW_MODE(x) (x << 3)
+
+#define PM800_GPADC_MISC_CONFIG3 (0x09)
+#define PM800_GPADC_MISC_CONFIG4 (0x0A)
+
+#define PM800_GPADC_PREBIAS1 (0x0F)
+#define PM800_GPADC0_GP_PREBIAS_TIME(x) (x << 0)
+#define PM800_GPADC_PREBIAS2 (0x10)
+
+#define PM800_GP_BIAS_ENA1 (0x14)
+#define PM800_GPADC_GP_BIAS_EN0 (1 << 0)
+#define PM800_GPADC_GP_BIAS_EN1 (1 << 1)
+#define PM800_GPADC_GP_BIAS_EN2 (1 << 2)
+#define PM800_GPADC_GP_BIAS_EN3 (1 << 3)
+
+#define PM800_GP_BIAS_OUT1 (0x15)
+#define PM800_BIAS_OUT_GP0 (1 << 0)
+#define PM800_BIAS_OUT_GP1 (1 << 1)
+#define PM800_BIAS_OUT_GP2 (1 << 2)
+#define PM800_BIAS_OUT_GP3 (1 << 3)
+
+#define PM800_GPADC0_LOW_TH 0x20
+#define PM800_GPADC1_LOW_TH 0x21
+#define PM800_GPADC2_LOW_TH 0x22
+#define PM800_GPADC3_LOW_TH 0x23
+#define PM800_GPADC4_LOW_TH 0x24
+
+#define PM800_GPADC0_UPP_TH 0x30
+#define PM800_GPADC1_UPP_TH 0x31
+#define PM800_GPADC2_UPP_TH 0x32
+#define PM800_GPADC3_UPP_TH 0x33
+#define PM800_GPADC4_UPP_TH 0x34
+
+#define PM800_VBBAT_MEAS1 0x40
+#define PM800_VBBAT_MEAS2 0x41
+#define PM800_VBAT_MEAS1 0x42
+#define PM800_VBAT_MEAS2 0x43
+#define PM800_VSYS_MEAS1 0x44
+#define PM800_VSYS_MEAS2 0x45
+#define PM800_VCHG_MEAS1 0x46
+#define PM800_VCHG_MEAS2 0x47
+#define PM800_TINT_MEAS1 0x50
+#define PM800_TINT_MEAS2 0x51
+#define PM800_PMOD_MEAS1 0x52
+#define PM800_PMOD_MEAS2 0x53
+
+#define PM800_GPADC0_MEAS1 0x54
+#define PM800_GPADC0_MEAS2 0x55
+#define PM800_GPADC1_MEAS1 0x56
+#define PM800_GPADC1_MEAS2 0x57
+#define PM800_GPADC2_MEAS1 0x58
+#define PM800_GPADC2_MEAS2 0x59
+#define PM800_GPADC3_MEAS1 0x5A
+#define PM800_GPADC3_MEAS2 0x5B
+#define PM800_GPADC4_MEAS1 0x5C
+#define PM800_GPADC4_MEAS2 0x5D
+
+#define PM800_GPADC4_AVG1 0xA8
+#define PM800_GPADC4_AVG2 0xA9
+
+/* 88PM805 Registers */
+#define PM805_MAIN_POWERUP (0x01)
+#define PM805_INT_STATUS0 (0x02) /* for ena/dis all interrupts */
+
+#define PM805_STATUS0_INT_CLEAR (1 << 0)
+#define PM805_STATUS0_INV_INT (1 << 1)
+#define PM800_STATUS0_INT_MASK (1 << 2)
+
+#define PM805_INT_STATUS1 (0x03)
+
+#define PM805_INT1_HP1_SHRT (1 << 0)
+#define PM805_INT1_HP2_SHRT (1 << 1)
+#define PM805_INT1_MIC_CONFLICT (1 << 2)
+#define PM805_INT1_CLIP_FAULT (1 << 3)
+#define PM805_INT1_LDO_OFF (1 << 4)
+#define PM805_INT1_SRC_DPLL_LOCK (1 << 5)
+
+#define PM805_INT_STATUS2 (0x04)
+
+#define PM805_INT2_MIC_DET (1 << 0)
+#define PM805_INT2_SHRT_BTN_DET (1 << 1)
+#define PM805_INT2_VOLM_BTN_DET (1 << 2)
+#define PM805_INT2_VOLP_BTN_DET (1 << 3)
+#define PM805_INT2_RAW_PLL_FAULT (1 << 4)
+#define PM805_INT2_FINE_PLL_FAULT (1 << 5)
+
+#define PM805_INT_MASK1 (0x05)
+#define PM805_INT_MASK2 (0x06)
+#define PM805_SHRT_BTN_DET (1 << 1)
+
+/* number of status and int reg in a row */
+#define PM805_INT_REG_NUM (2)
+
+#define PM805_MIC_DET1 (0x07)
+#define PM805_MIC_DET_EN_MIC_DET (1 << 0)
+#define PM805_MIC_DET2 (0x08)
+#define PM805_MIC_DET_STATUS1 (0x09)
+
+#define PM805_MIC_DET_STATUS3 (0x0A)
+#define PM805_AUTO_SEQ_STATUS1 (0x0B)
+#define PM805_AUTO_SEQ_STATUS2 (0x0C)
+
+#define PM805_ADC_SETTING1 (0x10)
+#define PM805_ADC_SETTING2 (0x11)
+#define PM805_ADC_SETTING3 (0x11)
+#define PM805_ADC_GAIN1 (0x12)
+#define PM805_ADC_GAIN2 (0x13)
+#define PM805_DMIC_SETTING (0x15)
+#define PM805_DWS_SETTING (0x16)
+#define PM805_MIC_CONFLICT_STS (0x17)
+
+#define PM805_PDM_SETTING1 (0x20)
+#define PM805_PDM_SETTING2 (0x21)
+#define PM805_PDM_SETTING3 (0x22)
+#define PM805_PDM_CONTROL1 (0x23)
+#define PM805_PDM_CONTROL2 (0x24)
+#define PM805_PDM_CONTROL3 (0x25)
+
+#define PM805_HEADPHONE_SETTING (0x26)
+#define PM805_HEADPHONE_GAIN_A2A (0x27)
+#define PM805_HEADPHONE_SHORT_STATE (0x28)
+#define PM805_EARPHONE_SETTING (0x29)
+#define PM805_AUTO_SEQ_SETTING (0x2A)
+
+struct pm80x_rtc_pdata {
+ int vrtc;
+ int rtc_wakeup;
+};
+
+struct pm80x_subchip {
+ struct i2c_client *power_page; /* chip client for power page */
+ struct i2c_client *gpadc_page; /* chip client for gpadc page */
+ struct regmap *regmap_power;
+ struct regmap *regmap_gpadc;
+ unsigned short power_page_addr; /* power page I2C address */
+ unsigned short gpadc_page_addr; /* gpadc page I2C address */
+};
+
+struct pm80x_chip {
+ struct pm80x_subchip *subchip;
+ struct device *dev;
+ struct i2c_client *client;
+ struct i2c_client *companion;
+ struct regmap *regmap;
+ struct regmap_irq_chip *regmap_irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+ unsigned char version;
+ int id;
+ int irq;
+ int irq_mode;
+ unsigned long wu_flag;
+ spinlock_t lock;
+};
+
+struct pm80x_platform_data {
+ struct pm80x_rtc_pdata *rtc;
+ unsigned short power_page_addr; /* power page I2C address */
+ unsigned short gpadc_page_addr; /* gpadc page I2C address */
+ int irq_mode; /* Clear interrupt by read/write(0/1) */
+ int batt_det; /* enable/disable */
+ int (*plat_config)(struct pm80x_chip *chip,
+ struct pm80x_platform_data *pdata);
+};
+
+extern const struct dev_pm_ops pm80x_pm_ops;
+extern const struct regmap_config pm80x_regmap_config;
+
+static inline int pm80x_request_irq(struct pm80x_chip *pm80x, int irq,
+ irq_handler_t handler, unsigned long flags,
+ const char *name, void *data)
+{
+ if (!pm80x->irq_data)
+ return -EINVAL;
+ return request_threaded_irq(regmap_irq_get_virq(pm80x->irq_data, irq),
+ NULL, handler, flags, name, data);
+}
+
+static inline void pm80x_free_irq(struct pm80x_chip *pm80x, int irq, void *data)
+{
+ if (!pm80x->irq_data)
+ return;
+ free_irq(regmap_irq_get_virq(pm80x->irq_data, irq), data);
+}
+
+#ifdef CONFIG_PM
+static inline int pm80x_dev_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (device_may_wakeup(dev))
+ set_bit((1 << irq), &chip->wu_flag);
+
+ return 0;
+}
+
+static inline int pm80x_dev_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (device_may_wakeup(dev))
+ clear_bit((1 << irq), &chip->wu_flag);
+
+ return 0;
+}
+#endif
+
+extern int pm80x_init(struct i2c_client *client,
+ const struct i2c_device_id *id) __devinit;
+extern int pm80x_deinit(struct i2c_client *client);
+#endif /* __LINUX_MFD_88PM80X_H */
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
index 84d071ade1d8..7b24943779fa 100644
--- a/include/linux/mfd/88pm860x.h
+++ b/include/linux/mfd/88pm860x.h
@@ -136,6 +136,7 @@ enum {
PM8607_ID_LDO13,
PM8607_ID_LDO14,
PM8607_ID_LDO15,
+ PM8606_ID_PREG,
PM8607_ID_RG_MAX,
};
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index bc9b84b60ec6..3764cb6759e3 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -9,6 +9,7 @@
#include <linux/atomic.h>
#include <linux/mutex.h>
+#include <linux/irqdomain.h>
struct device;
@@ -227,6 +228,7 @@ enum ab8500_version {
* @irq_lock: genirq bus lock
* @transfer_ongoing: 0 if no transfer ongoing
* @irq: irq line
+ * @irq_domain: irq domain
* @version: chip version id (e.g. ab8500 or ab9540)
* @chip_id: chip revision id
* @write: register write
@@ -247,6 +249,7 @@ struct ab8500 {
atomic_t transfer_ongoing;
int irq_base;
int irq;
+ struct irq_domain *domain;
enum ab8500_version version;
u8 chip_id;
@@ -338,4 +341,6 @@ static inline int is_ab8500_2p0(struct ab8500 *ab)
return (is_ab8500(ab) && (ab->chip_id == AB8500_CUT2P0));
}
+int ab8500_irq_get_virq(struct ab8500 *ab8500, int irq);
+
#endif /* MFD_AB8500_H */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
new file mode 100644
index 000000000000..dd231ac0bb1f
--- /dev/null
+++ b/include/linux/mfd/arizona/core.h
@@ -0,0 +1,114 @@
+/*
+ * Arizona MFD internals
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM_ARIZONA_CORE_H
+#define _WM_ARIZONA_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/arizona/pdata.h>
+
+#define ARIZONA_MAX_CORE_SUPPLIES 3
+
+enum arizona_type {
+ WM5102 = 1,
+ WM5110 = 2,
+};
+
+#define ARIZONA_IRQ_GP1 0
+#define ARIZONA_IRQ_GP2 1
+#define ARIZONA_IRQ_GP3 2
+#define ARIZONA_IRQ_GP4 3
+#define ARIZONA_IRQ_GP5_FALL 4
+#define ARIZONA_IRQ_GP5_RISE 5
+#define ARIZONA_IRQ_JD_FALL 6
+#define ARIZONA_IRQ_JD_RISE 7
+#define ARIZONA_IRQ_DSP1_RAM_RDY 8
+#define ARIZONA_IRQ_DSP2_RAM_RDY 9
+#define ARIZONA_IRQ_DSP3_RAM_RDY 10
+#define ARIZONA_IRQ_DSP4_RAM_RDY 11
+#define ARIZONA_IRQ_DSP_IRQ1 12
+#define ARIZONA_IRQ_DSP_IRQ2 13
+#define ARIZONA_IRQ_DSP_IRQ3 14
+#define ARIZONA_IRQ_DSP_IRQ4 15
+#define ARIZONA_IRQ_DSP_IRQ5 16
+#define ARIZONA_IRQ_DSP_IRQ6 17
+#define ARIZONA_IRQ_DSP_IRQ7 18
+#define ARIZONA_IRQ_DSP_IRQ8 19
+#define ARIZONA_IRQ_SPK_SHUTDOWN_WARN 20
+#define ARIZONA_IRQ_SPK_SHUTDOWN 21
+#define ARIZONA_IRQ_MICDET 22
+#define ARIZONA_IRQ_HPDET 23
+#define ARIZONA_IRQ_WSEQ_DONE 24
+#define ARIZONA_IRQ_DRC2_SIG_DET 25
+#define ARIZONA_IRQ_DRC1_SIG_DET 26
+#define ARIZONA_IRQ_ASRC2_LOCK 27
+#define ARIZONA_IRQ_ASRC1_LOCK 28
+#define ARIZONA_IRQ_UNDERCLOCKED 29
+#define ARIZONA_IRQ_OVERCLOCKED 30
+#define ARIZONA_IRQ_FLL2_LOCK 31
+#define ARIZONA_IRQ_FLL1_LOCK 32
+#define ARIZONA_IRQ_CLKGEN_ERR 33
+#define ARIZONA_IRQ_CLKGEN_ERR_ASYNC 34
+#define ARIZONA_IRQ_ASRC_CFG_ERR 35
+#define ARIZONA_IRQ_AIF3_ERR 36
+#define ARIZONA_IRQ_AIF2_ERR 37
+#define ARIZONA_IRQ_AIF1_ERR 38
+#define ARIZONA_IRQ_CTRLIF_ERR 39
+#define ARIZONA_IRQ_MIXER_DROPPED_SAMPLES 40
+#define ARIZONA_IRQ_ASYNC_CLK_ENA_LOW 41
+#define ARIZONA_IRQ_SYSCLK_ENA_LOW 42
+#define ARIZONA_IRQ_ISRC1_CFG_ERR 43
+#define ARIZONA_IRQ_ISRC2_CFG_ERR 44
+#define ARIZONA_IRQ_BOOT_DONE 45
+#define ARIZONA_IRQ_DCS_DAC_DONE 46
+#define ARIZONA_IRQ_DCS_HP_DONE 47
+#define ARIZONA_IRQ_FLL2_CLOCK_OK 48
+#define ARIZONA_IRQ_FLL1_CLOCK_OK 49
+
+#define ARIZONA_NUM_IRQ 50
+
+struct arizona {
+ struct regmap *regmap;
+ struct device *dev;
+
+ enum arizona_type type;
+ unsigned int rev;
+
+ int num_core_supplies;
+ struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES];
+ struct regulator *dcvdd;
+
+ struct arizona_pdata pdata;
+
+ int irq;
+ struct irq_domain *virq;
+ struct regmap_irq_chip_data *aod_irq_chip;
+ struct regmap_irq_chip_data *irq_chip;
+
+ struct mutex clk_lock;
+ int clk32k_ref;
+};
+
+int arizona_clk32k_enable(struct arizona *arizona);
+int arizona_clk32k_disable(struct arizona *arizona);
+
+int arizona_request_irq(struct arizona *arizona, int irq, char *name,
+ irq_handler_t handler, void *data);
+void arizona_free_irq(struct arizona *arizona, int irq, void *data);
+int arizona_set_irq_wake(struct arizona *arizona, int irq, int on);
+
+int wm5102_patch(struct arizona *arizona);
+int wm5110_patch(struct arizona *arizona);
+
+#endif
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
new file mode 100644
index 000000000000..7ab442905a57
--- /dev/null
+++ b/include/linux/mfd/arizona/pdata.h
@@ -0,0 +1,119 @@
+/*
+ * Platform data for Arizona devices
+ *
+ * Copyright 2012 Wolfson Microelectronics. PLC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARIZONA_PDATA_H
+#define _ARIZONA_PDATA_H
+
+#define ARIZONA_GPN_DIR 0x8000 /* GPN_DIR */
+#define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */
+#define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */
+#define ARIZONA_GPN_DIR_WIDTH 1 /* GPN_DIR */
+#define ARIZONA_GPN_PU 0x4000 /* GPN_PU */
+#define ARIZONA_GPN_PU_MASK 0x4000 /* GPN_PU */
+#define ARIZONA_GPN_PU_SHIFT 14 /* GPN_PU */
+#define ARIZONA_GPN_PU_WIDTH 1 /* GPN_PU */
+#define ARIZONA_GPN_PD 0x2000 /* GPN_PD */
+#define ARIZONA_GPN_PD_MASK 0x2000 /* GPN_PD */
+#define ARIZONA_GPN_PD_SHIFT 13 /* GPN_PD */
+#define ARIZONA_GPN_PD_WIDTH 1 /* GPN_PD */
+#define ARIZONA_GPN_LVL 0x0800 /* GPN_LVL */
+#define ARIZONA_GPN_LVL_MASK 0x0800 /* GPN_LVL */
+#define ARIZONA_GPN_LVL_SHIFT 11 /* GPN_LVL */
+#define ARIZONA_GPN_LVL_WIDTH 1 /* GPN_LVL */
+#define ARIZONA_GPN_POL 0x0400 /* GPN_POL */
+#define ARIZONA_GPN_POL_MASK 0x0400 /* GPN_POL */
+#define ARIZONA_GPN_POL_SHIFT 10 /* GPN_POL */
+#define ARIZONA_GPN_POL_WIDTH 1 /* GPN_POL */
+#define ARIZONA_GPN_OP_CFG 0x0200 /* GPN_OP_CFG */
+#define ARIZONA_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */
+#define ARIZONA_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */
+#define ARIZONA_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */
+#define ARIZONA_GPN_DB 0x0100 /* GPN_DB */
+#define ARIZONA_GPN_DB_MASK 0x0100 /* GPN_DB */
+#define ARIZONA_GPN_DB_SHIFT 8 /* GPN_DB */
+#define ARIZONA_GPN_DB_WIDTH 1 /* GPN_DB */
+#define ARIZONA_GPN_FN_MASK 0x007F /* GPN_FN - [6:0] */
+#define ARIZONA_GPN_FN_SHIFT 0 /* GPN_FN - [6:0] */
+#define ARIZONA_GPN_FN_WIDTH 7 /* GPN_FN - [6:0] */
+
+#define ARIZONA_MAX_GPIO 5
+
+#define ARIZONA_32KZ_MCLK1 1
+#define ARIZONA_32KZ_MCLK2 2
+#define ARIZONA_32KZ_NONE 3
+
+#define ARIZONA_MAX_INPUT 4
+
+#define ARIZONA_DMIC_MICVDD 0
+#define ARIZONA_DMIC_MICBIAS1 1
+#define ARIZONA_DMIC_MICBIAS2 2
+#define ARIZONA_DMIC_MICBIAS3 3
+
+#define ARIZONA_INMODE_DIFF 0
+#define ARIZONA_INMODE_SE 1
+#define ARIZONA_INMODE_DMIC 2
+
+#define ARIZONA_MAX_OUTPUT 6
+
+#define ARIZONA_MAX_PDM_SPK 2
+
+struct regulator_init_data;
+
+struct arizona_micd_config {
+ unsigned int src;
+ unsigned int bias;
+ bool gpio;
+};
+
+struct arizona_pdata {
+ int reset; /** GPIO controlling /RESET, if any */
+ int ldoena; /** GPIO controlling LODENA, if any */
+
+ /** Regulator configuration for MICVDD */
+ struct regulator_init_data *micvdd;
+
+ /** Regulator configuration for LDO1 */
+ struct regulator_init_data *ldo1;
+
+ /** If a direct 32kHz clock is provided on an MCLK specify it here */
+ int clk32k_src;
+
+ bool irq_active_high; /** IRQ polarity */
+
+ /* Base GPIO */
+ int gpio_base;
+
+ /** Pin state for GPIO pins */
+ int gpio_defaults[ARIZONA_MAX_GPIO];
+
+ /** GPIO for mic detection polarity */
+ int micd_pol_gpio;
+
+ /** Headset polarity configurations */
+ struct arizona_micd_config *micd_configs;
+ int num_micd_configs;
+
+ /** Reference voltage for DMIC inputs */
+ int dmic_ref[ARIZONA_MAX_INPUT];
+
+ /** Mode of input structures */
+ int inmode[ARIZONA_MAX_INPUT];
+
+ /** Mode for outputs */
+ bool out_mono[ARIZONA_MAX_OUTPUT];
+
+ /** PDM speaker mute setting */
+ unsigned int spk_mute[ARIZONA_MAX_PDM_SPK];
+
+ /** PDM speaker format */
+ unsigned int spk_fmt[ARIZONA_MAX_PDM_SPK];
+};
+
+#endif
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
new file mode 100644
index 000000000000..7671a287dfee
--- /dev/null
+++ b/include/linux/mfd/arizona/registers.h
@@ -0,0 +1,6594 @@
+/*
+ * ARIZONA register definitions
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARIZONA_REGISTERS_H
+#define _ARIZONA_REGISTERS_H
+
+/*
+ * Register values.
+ */
+#define ARIZONA_SOFTWARE_RESET 0x00
+#define ARIZONA_DEVICE_REVISION 0x01
+#define ARIZONA_CTRL_IF_SPI_CFG_1 0x08
+#define ARIZONA_CTRL_IF_I2C1_CFG_1 0x09
+#define ARIZONA_CTRL_IF_I2C2_CFG_1 0x0A
+#define ARIZONA_CTRL_IF_I2C1_CFG_2 0x0B
+#define ARIZONA_CTRL_IF_I2C2_CFG_2 0x0C
+#define ARIZONA_CTRL_IF_STATUS_1 0x0D
+#define ARIZONA_WRITE_SEQUENCER_CTRL_0 0x16
+#define ARIZONA_WRITE_SEQUENCER_CTRL_1 0x17
+#define ARIZONA_WRITE_SEQUENCER_CTRL_2 0x18
+#define ARIZONA_WRITE_SEQUENCER_PROM 0x1A
+#define ARIZONA_TONE_GENERATOR_1 0x20
+#define ARIZONA_TONE_GENERATOR_2 0x21
+#define ARIZONA_TONE_GENERATOR_3 0x22
+#define ARIZONA_TONE_GENERATOR_4 0x23
+#define ARIZONA_TONE_GENERATOR_5 0x24
+#define ARIZONA_PWM_DRIVE_1 0x30
+#define ARIZONA_PWM_DRIVE_2 0x31
+#define ARIZONA_PWM_DRIVE_3 0x32
+#define ARIZONA_WAKE_CONTROL 0x40
+#define ARIZONA_SEQUENCE_CONTROL 0x41
+#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61
+#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62
+#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63
+#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4 0x64
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1 0x68
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2 0x69
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3 0x6A
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4 0x6B
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5 0x6C
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6 0x6D
+#define ARIZONA_COMFORT_NOISE_GENERATOR 0x70
+#define ARIZONA_HAPTICS_CONTROL_1 0x90
+#define ARIZONA_HAPTICS_CONTROL_2 0x91
+#define ARIZONA_HAPTICS_PHASE_1_INTENSITY 0x92
+#define ARIZONA_HAPTICS_PHASE_1_DURATION 0x93
+#define ARIZONA_HAPTICS_PHASE_2_INTENSITY 0x94
+#define ARIZONA_HAPTICS_PHASE_2_DURATION 0x95
+#define ARIZONA_HAPTICS_PHASE_3_INTENSITY 0x96
+#define ARIZONA_HAPTICS_PHASE_3_DURATION 0x97
+#define ARIZONA_HAPTICS_STATUS 0x98
+#define ARIZONA_CLOCK_32K_1 0x100
+#define ARIZONA_SYSTEM_CLOCK_1 0x101
+#define ARIZONA_SAMPLE_RATE_1 0x102
+#define ARIZONA_SAMPLE_RATE_2 0x103
+#define ARIZONA_SAMPLE_RATE_3 0x104
+#define ARIZONA_SAMPLE_RATE_1_STATUS 0x10A
+#define ARIZONA_SAMPLE_RATE_2_STATUS 0x10B
+#define ARIZONA_SAMPLE_RATE_3_STATUS 0x10C
+#define ARIZONA_ASYNC_CLOCK_1 0x112
+#define ARIZONA_ASYNC_SAMPLE_RATE_1 0x113
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B
+#define ARIZONA_OUTPUT_SYSTEM_CLOCK 0x149
+#define ARIZONA_OUTPUT_ASYNC_CLOCK 0x14A
+#define ARIZONA_RATE_ESTIMATOR_1 0x152
+#define ARIZONA_RATE_ESTIMATOR_2 0x153
+#define ARIZONA_RATE_ESTIMATOR_3 0x154
+#define ARIZONA_RATE_ESTIMATOR_4 0x155
+#define ARIZONA_RATE_ESTIMATOR_5 0x156
+#define ARIZONA_FLL1_CONTROL_1 0x171
+#define ARIZONA_FLL1_CONTROL_2 0x172
+#define ARIZONA_FLL1_CONTROL_3 0x173
+#define ARIZONA_FLL1_CONTROL_4 0x174
+#define ARIZONA_FLL1_CONTROL_5 0x175
+#define ARIZONA_FLL1_CONTROL_6 0x176
+#define ARIZONA_FLL1_LOOP_FILTER_TEST_1 0x177
+#define ARIZONA_FLL1_NCO_TEST_0 0x178
+#define ARIZONA_FLL1_SYNCHRONISER_1 0x181
+#define ARIZONA_FLL1_SYNCHRONISER_2 0x182
+#define ARIZONA_FLL1_SYNCHRONISER_3 0x183
+#define ARIZONA_FLL1_SYNCHRONISER_4 0x184
+#define ARIZONA_FLL1_SYNCHRONISER_5 0x185
+#define ARIZONA_FLL1_SYNCHRONISER_6 0x186
+#define ARIZONA_FLL1_SPREAD_SPECTRUM 0x189
+#define ARIZONA_FLL1_GPIO_CLOCK 0x18A
+#define ARIZONA_FLL2_CONTROL_1 0x191
+#define ARIZONA_FLL2_CONTROL_2 0x192
+#define ARIZONA_FLL2_CONTROL_3 0x193
+#define ARIZONA_FLL2_CONTROL_4 0x194
+#define ARIZONA_FLL2_CONTROL_5 0x195
+#define ARIZONA_FLL2_CONTROL_6 0x196
+#define ARIZONA_FLL2_LOOP_FILTER_TEST_1 0x197
+#define ARIZONA_FLL2_NCO_TEST_0 0x198
+#define ARIZONA_FLL2_SYNCHRONISER_1 0x1A1
+#define ARIZONA_FLL2_SYNCHRONISER_2 0x1A2
+#define ARIZONA_FLL2_SYNCHRONISER_3 0x1A3
+#define ARIZONA_FLL2_SYNCHRONISER_4 0x1A4
+#define ARIZONA_FLL2_SYNCHRONISER_5 0x1A5
+#define ARIZONA_FLL2_SYNCHRONISER_6 0x1A6
+#define ARIZONA_FLL2_SPREAD_SPECTRUM 0x1A9
+#define ARIZONA_FLL2_GPIO_CLOCK 0x1AA
+#define ARIZONA_MIC_CHARGE_PUMP_1 0x200
+#define ARIZONA_LDO1_CONTROL_1 0x210
+#define ARIZONA_LDO2_CONTROL_1 0x213
+#define ARIZONA_MIC_BIAS_CTRL_1 0x218
+#define ARIZONA_MIC_BIAS_CTRL_2 0x219
+#define ARIZONA_MIC_BIAS_CTRL_3 0x21A
+#define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293
+#define ARIZONA_HEADPHONE_DETECT_1 0x29B
+#define ARIZONA_HEADPHONE_DETECT_2 0x29C
+#define ARIZONA_MIC_DETECT_1 0x2A3
+#define ARIZONA_MIC_DETECT_2 0x2A4
+#define ARIZONA_MIC_DETECT_3 0x2A5
+#define ARIZONA_MIC_NOISE_MIX_CONTROL_1 0x2C3
+#define ARIZONA_ISOLATION_CONTROL 0x2CB
+#define ARIZONA_JACK_DETECT_ANALOGUE 0x2D3
+#define ARIZONA_INPUT_ENABLES 0x300
+#define ARIZONA_INPUT_ENABLES_STATUS 0x301
+#define ARIZONA_INPUT_RATE 0x308
+#define ARIZONA_INPUT_VOLUME_RAMP 0x309
+#define ARIZONA_IN1L_CONTROL 0x310
+#define ARIZONA_ADC_DIGITAL_VOLUME_1L 0x311
+#define ARIZONA_DMIC1L_CONTROL 0x312
+#define ARIZONA_IN1R_CONTROL 0x314
+#define ARIZONA_ADC_DIGITAL_VOLUME_1R 0x315
+#define ARIZONA_DMIC1R_CONTROL 0x316
+#define ARIZONA_IN2L_CONTROL 0x318
+#define ARIZONA_ADC_DIGITAL_VOLUME_2L 0x319
+#define ARIZONA_DMIC2L_CONTROL 0x31A
+#define ARIZONA_IN2R_CONTROL 0x31C
+#define ARIZONA_ADC_DIGITAL_VOLUME_2R 0x31D
+#define ARIZONA_DMIC2R_CONTROL 0x31E
+#define ARIZONA_IN3L_CONTROL 0x320
+#define ARIZONA_ADC_DIGITAL_VOLUME_3L 0x321
+#define ARIZONA_DMIC3L_CONTROL 0x322
+#define ARIZONA_IN3R_CONTROL 0x324
+#define ARIZONA_ADC_DIGITAL_VOLUME_3R 0x325
+#define ARIZONA_DMIC3R_CONTROL 0x326
+#define ARIZONA_IN4L_CONTROL 0x328
+#define ARIZONA_ADC_DIGITAL_VOLUME_4L 0x329
+#define ARIZONA_DMIC4L_CONTROL 0x32A
+#define ARIZONA_ADC_DIGITAL_VOLUME_4R 0x32D
+#define ARIZONA_DMIC4R_CONTROL 0x32E
+#define ARIZONA_OUTPUT_ENABLES_1 0x400
+#define ARIZONA_OUTPUT_STATUS_1 0x401
+#define ARIZONA_RAW_OUTPUT_STATUS_1 0x406
+#define ARIZONA_OUTPUT_RATE_1 0x408
+#define ARIZONA_OUTPUT_VOLUME_RAMP 0x409
+#define ARIZONA_OUTPUT_PATH_CONFIG_1L 0x410
+#define ARIZONA_DAC_DIGITAL_VOLUME_1L 0x411
+#define ARIZONA_DAC_VOLUME_LIMIT_1L 0x412
+#define ARIZONA_NOISE_GATE_SELECT_1L 0x413
+#define ARIZONA_OUTPUT_PATH_CONFIG_1R 0x414
+#define ARIZONA_DAC_DIGITAL_VOLUME_1R 0x415
+#define ARIZONA_DAC_VOLUME_LIMIT_1R 0x416
+#define ARIZONA_NOISE_GATE_SELECT_1R 0x417
+#define ARIZONA_OUTPUT_PATH_CONFIG_2L 0x418
+#define ARIZONA_DAC_DIGITAL_VOLUME_2L 0x419
+#define ARIZONA_DAC_VOLUME_LIMIT_2L 0x41A
+#define ARIZONA_NOISE_GATE_SELECT_2L 0x41B
+#define ARIZONA_OUTPUT_PATH_CONFIG_2R 0x41C
+#define ARIZONA_DAC_DIGITAL_VOLUME_2R 0x41D
+#define ARIZONA_DAC_VOLUME_LIMIT_2R 0x41E
+#define ARIZONA_NOISE_GATE_SELECT_2R 0x41F
+#define ARIZONA_OUTPUT_PATH_CONFIG_3L 0x420
+#define ARIZONA_DAC_DIGITAL_VOLUME_3L 0x421
+#define ARIZONA_DAC_VOLUME_LIMIT_3L 0x422
+#define ARIZONA_NOISE_GATE_SELECT_3L 0x423
+#define ARIZONA_OUTPUT_PATH_CONFIG_3R 0x424
+#define ARIZONA_DAC_DIGITAL_VOLUME_3R 0x425
+#define ARIZONA_DAC_VOLUME_LIMIT_3R 0x426
+#define ARIZONA_NOISE_GATE_SELECT_3R 0x427
+#define ARIZONA_OUTPUT_PATH_CONFIG_4L 0x428
+#define ARIZONA_DAC_DIGITAL_VOLUME_4L 0x429
+#define ARIZONA_OUT_VOLUME_4L 0x42A
+#define ARIZONA_NOISE_GATE_SELECT_4L 0x42B
+#define ARIZONA_OUTPUT_PATH_CONFIG_4R 0x42C
+#define ARIZONA_DAC_DIGITAL_VOLUME_4R 0x42D
+#define ARIZONA_OUT_VOLUME_4R 0x42E
+#define ARIZONA_NOISE_GATE_SELECT_4R 0x42F
+#define ARIZONA_OUTPUT_PATH_CONFIG_5L 0x430
+#define ARIZONA_DAC_DIGITAL_VOLUME_5L 0x431
+#define ARIZONA_DAC_VOLUME_LIMIT_5L 0x432
+#define ARIZONA_NOISE_GATE_SELECT_5L 0x433
+#define ARIZONA_OUTPUT_PATH_CONFIG_5R 0x434
+#define ARIZONA_DAC_DIGITAL_VOLUME_5R 0x435
+#define ARIZONA_DAC_VOLUME_LIMIT_5R 0x436
+#define ARIZONA_NOISE_GATE_SELECT_5R 0x437
+#define ARIZONA_OUTPUT_PATH_CONFIG_6L 0x438
+#define ARIZONA_DAC_DIGITAL_VOLUME_6L 0x439
+#define ARIZONA_DAC_VOLUME_LIMIT_6L 0x43A
+#define ARIZONA_NOISE_GATE_SELECT_6L 0x43B
+#define ARIZONA_OUTPUT_PATH_CONFIG_6R 0x43C
+#define ARIZONA_DAC_DIGITAL_VOLUME_6R 0x43D
+#define ARIZONA_DAC_VOLUME_LIMIT_6R 0x43E
+#define ARIZONA_NOISE_GATE_SELECT_6R 0x43F
+#define ARIZONA_DAC_AEC_CONTROL_1 0x450
+#define ARIZONA_NOISE_GATE_CONTROL 0x458
+#define ARIZONA_PDM_SPK1_CTRL_1 0x490
+#define ARIZONA_PDM_SPK1_CTRL_2 0x491
+#define ARIZONA_PDM_SPK2_CTRL_1 0x492
+#define ARIZONA_PDM_SPK2_CTRL_2 0x493
+#define ARIZONA_DAC_COMP_1 0x4DC
+#define ARIZONA_DAC_COMP_2 0x4DD
+#define ARIZONA_DAC_COMP_3 0x4DE
+#define ARIZONA_DAC_COMP_4 0x4DF
+#define ARIZONA_AIF1_BCLK_CTRL 0x500
+#define ARIZONA_AIF1_TX_PIN_CTRL 0x501
+#define ARIZONA_AIF1_RX_PIN_CTRL 0x502
+#define ARIZONA_AIF1_RATE_CTRL 0x503
+#define ARIZONA_AIF1_FORMAT 0x504
+#define ARIZONA_AIF1_TX_BCLK_RATE 0x505
+#define ARIZONA_AIF1_RX_BCLK_RATE 0x506
+#define ARIZONA_AIF1_FRAME_CTRL_1 0x507
+#define ARIZONA_AIF1_FRAME_CTRL_2 0x508
+#define ARIZONA_AIF1_FRAME_CTRL_3 0x509
+#define ARIZONA_AIF1_FRAME_CTRL_4 0x50A
+#define ARIZONA_AIF1_FRAME_CTRL_5 0x50B
+#define ARIZONA_AIF1_FRAME_CTRL_6 0x50C
+#define ARIZONA_AIF1_FRAME_CTRL_7 0x50D
+#define ARIZONA_AIF1_FRAME_CTRL_8 0x50E
+#define ARIZONA_AIF1_FRAME_CTRL_9 0x50F
+#define ARIZONA_AIF1_FRAME_CTRL_10 0x510
+#define ARIZONA_AIF1_FRAME_CTRL_11 0x511
+#define ARIZONA_AIF1_FRAME_CTRL_12 0x512
+#define ARIZONA_AIF1_FRAME_CTRL_13 0x513
+#define ARIZONA_AIF1_FRAME_CTRL_14 0x514
+#define ARIZONA_AIF1_FRAME_CTRL_15 0x515
+#define ARIZONA_AIF1_FRAME_CTRL_16 0x516
+#define ARIZONA_AIF1_FRAME_CTRL_17 0x517
+#define ARIZONA_AIF1_FRAME_CTRL_18 0x518
+#define ARIZONA_AIF1_TX_ENABLES 0x519
+#define ARIZONA_AIF1_RX_ENABLES 0x51A
+#define ARIZONA_AIF1_FORCE_WRITE 0x51B
+#define ARIZONA_AIF2_BCLK_CTRL 0x540
+#define ARIZONA_AIF2_TX_PIN_CTRL 0x541
+#define ARIZONA_AIF2_RX_PIN_CTRL 0x542
+#define ARIZONA_AIF2_RATE_CTRL 0x543
+#define ARIZONA_AIF2_FORMAT 0x544
+#define ARIZONA_AIF2_TX_BCLK_RATE 0x545
+#define ARIZONA_AIF2_RX_BCLK_RATE 0x546
+#define ARIZONA_AIF2_FRAME_CTRL_1 0x547
+#define ARIZONA_AIF2_FRAME_CTRL_2 0x548
+#define ARIZONA_AIF2_FRAME_CTRL_3 0x549
+#define ARIZONA_AIF2_FRAME_CTRL_4 0x54A
+#define ARIZONA_AIF2_FRAME_CTRL_11 0x551
+#define ARIZONA_AIF2_FRAME_CTRL_12 0x552
+#define ARIZONA_AIF2_TX_ENABLES 0x559
+#define ARIZONA_AIF2_RX_ENABLES 0x55A
+#define ARIZONA_AIF2_FORCE_WRITE 0x55B
+#define ARIZONA_AIF3_BCLK_CTRL 0x580
+#define ARIZONA_AIF3_TX_PIN_CTRL 0x581
+#define ARIZONA_AIF3_RX_PIN_CTRL 0x582
+#define ARIZONA_AIF3_RATE_CTRL 0x583
+#define ARIZONA_AIF3_FORMAT 0x584
+#define ARIZONA_AIF3_TX_BCLK_RATE 0x585
+#define ARIZONA_AIF3_RX_BCLK_RATE 0x586
+#define ARIZONA_AIF3_FRAME_CTRL_1 0x587
+#define ARIZONA_AIF3_FRAME_CTRL_2 0x588
+#define ARIZONA_AIF3_FRAME_CTRL_3 0x589
+#define ARIZONA_AIF3_FRAME_CTRL_4 0x58A
+#define ARIZONA_AIF3_FRAME_CTRL_11 0x591
+#define ARIZONA_AIF3_FRAME_CTRL_12 0x592
+#define ARIZONA_AIF3_TX_ENABLES 0x599
+#define ARIZONA_AIF3_RX_ENABLES 0x59A
+#define ARIZONA_AIF3_FORCE_WRITE 0x59B
+#define ARIZONA_SLIMBUS_FRAMER_REF_GEAR 0x5E3
+#define ARIZONA_SLIMBUS_RATES_1 0x5E5
+#define ARIZONA_SLIMBUS_RATES_2 0x5E6
+#define ARIZONA_SLIMBUS_RATES_3 0x5E7
+#define ARIZONA_SLIMBUS_RATES_4 0x5E8
+#define ARIZONA_SLIMBUS_RATES_5 0x5E9
+#define ARIZONA_SLIMBUS_RATES_6 0x5EA
+#define ARIZONA_SLIMBUS_RATES_7 0x5EB
+#define ARIZONA_SLIMBUS_RATES_8 0x5EC
+#define ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE 0x5F5
+#define ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE 0x5F6
+#define ARIZONA_SLIMBUS_RX_PORT_STATUS 0x5F7
+#define ARIZONA_SLIMBUS_TX_PORT_STATUS 0x5F8
+#define ARIZONA_PWM1MIX_INPUT_1_SOURCE 0x640
+#define ARIZONA_PWM1MIX_INPUT_1_VOLUME 0x641
+#define ARIZONA_PWM1MIX_INPUT_2_SOURCE 0x642
+#define ARIZONA_PWM1MIX_INPUT_2_VOLUME 0x643
+#define ARIZONA_PWM1MIX_INPUT_3_SOURCE 0x644
+#define ARIZONA_PWM1MIX_INPUT_3_VOLUME 0x645
+#define ARIZONA_PWM1MIX_INPUT_4_SOURCE 0x646
+#define ARIZONA_PWM1MIX_INPUT_4_VOLUME 0x647
+#define ARIZONA_PWM2MIX_INPUT_1_SOURCE 0x648
+#define ARIZONA_PWM2MIX_INPUT_1_VOLUME 0x649
+#define ARIZONA_PWM2MIX_INPUT_2_SOURCE 0x64A
+#define ARIZONA_PWM2MIX_INPUT_2_VOLUME 0x64B
+#define ARIZONA_PWM2MIX_INPUT_3_SOURCE 0x64C
+#define ARIZONA_PWM2MIX_INPUT_3_VOLUME 0x64D
+#define ARIZONA_PWM2MIX_INPUT_4_SOURCE 0x64E
+#define ARIZONA_PWM2MIX_INPUT_4_VOLUME 0x64F
+#define ARIZONA_MICMIX_INPUT_1_SOURCE 0x660
+#define ARIZONA_MICMIX_INPUT_1_VOLUME 0x661
+#define ARIZONA_MICMIX_INPUT_2_SOURCE 0x662
+#define ARIZONA_MICMIX_INPUT_2_VOLUME 0x663
+#define ARIZONA_MICMIX_INPUT_3_SOURCE 0x664
+#define ARIZONA_MICMIX_INPUT_3_VOLUME 0x665
+#define ARIZONA_MICMIX_INPUT_4_SOURCE 0x666
+#define ARIZONA_MICMIX_INPUT_4_VOLUME 0x667
+#define ARIZONA_NOISEMIX_INPUT_1_SOURCE 0x668
+#define ARIZONA_NOISEMIX_INPUT_1_VOLUME 0x669
+#define ARIZONA_NOISEMIX_INPUT_2_SOURCE 0x66A
+#define ARIZONA_NOISEMIX_INPUT_2_VOLUME 0x66B
+#define ARIZONA_NOISEMIX_INPUT_3_SOURCE 0x66C
+#define ARIZONA_NOISEMIX_INPUT_3_VOLUME 0x66D
+#define ARIZONA_NOISEMIX_INPUT_4_SOURCE 0x66E
+#define ARIZONA_NOISEMIX_INPUT_4_VOLUME 0x66F
+#define ARIZONA_OUT1LMIX_INPUT_1_SOURCE 0x680
+#define ARIZONA_OUT1LMIX_INPUT_1_VOLUME 0x681
+#define ARIZONA_OUT1LMIX_INPUT_2_SOURCE 0x682
+#define ARIZONA_OUT1LMIX_INPUT_2_VOLUME 0x683
+#define ARIZONA_OUT1LMIX_INPUT_3_SOURCE 0x684
+#define ARIZONA_OUT1LMIX_INPUT_3_VOLUME 0x685
+#define ARIZONA_OUT1LMIX_INPUT_4_SOURCE 0x686
+#define ARIZONA_OUT1LMIX_INPUT_4_VOLUME 0x687
+#define ARIZONA_OUT1RMIX_INPUT_1_SOURCE 0x688
+#define ARIZONA_OUT1RMIX_INPUT_1_VOLUME 0x689
+#define ARIZONA_OUT1RMIX_INPUT_2_SOURCE 0x68A
+#define ARIZONA_OUT1RMIX_INPUT_2_VOLUME 0x68B
+#define ARIZONA_OUT1RMIX_INPUT_3_SOURCE 0x68C
+#define ARIZONA_OUT1RMIX_INPUT_3_VOLUME 0x68D
+#define ARIZONA_OUT1RMIX_INPUT_4_SOURCE 0x68E
+#define ARIZONA_OUT1RMIX_INPUT_4_VOLUME 0x68F
+#define ARIZONA_OUT2LMIX_INPUT_1_SOURCE 0x690
+#define ARIZONA_OUT2LMIX_INPUT_1_VOLUME 0x691
+#define ARIZONA_OUT2LMIX_INPUT_2_SOURCE 0x692
+#define ARIZONA_OUT2LMIX_INPUT_2_VOLUME 0x693
+#define ARIZONA_OUT2LMIX_INPUT_3_SOURCE 0x694
+#define ARIZONA_OUT2LMIX_INPUT_3_VOLUME 0x695
+#define ARIZONA_OUT2LMIX_INPUT_4_SOURCE 0x696
+#define ARIZONA_OUT2LMIX_INPUT_4_VOLUME 0x697
+#define ARIZONA_OUT2RMIX_INPUT_1_SOURCE 0x698
+#define ARIZONA_OUT2RMIX_INPUT_1_VOLUME 0x699
+#define ARIZONA_OUT2RMIX_INPUT_2_SOURCE 0x69A
+#define ARIZONA_OUT2RMIX_INPUT_2_VOLUME 0x69B
+#define ARIZONA_OUT2RMIX_INPUT_3_SOURCE 0x69C
+#define ARIZONA_OUT2RMIX_INPUT_3_VOLUME 0x69D
+#define ARIZONA_OUT2RMIX_INPUT_4_SOURCE 0x69E
+#define ARIZONA_OUT2RMIX_INPUT_4_VOLUME 0x69F
+#define ARIZONA_OUT3LMIX_INPUT_1_SOURCE 0x6A0
+#define ARIZONA_OUT3LMIX_INPUT_1_VOLUME 0x6A1
+#define ARIZONA_OUT3LMIX_INPUT_2_SOURCE 0x6A2
+#define ARIZONA_OUT3LMIX_INPUT_2_VOLUME 0x6A3
+#define ARIZONA_OUT3LMIX_INPUT_3_SOURCE 0x6A4
+#define ARIZONA_OUT3LMIX_INPUT_3_VOLUME 0x6A5
+#define ARIZONA_OUT3LMIX_INPUT_4_SOURCE 0x6A6
+#define ARIZONA_OUT3LMIX_INPUT_4_VOLUME 0x6A7
+#define ARIZONA_OUT3RMIX_INPUT_1_SOURCE 0x6A8
+#define ARIZONA_OUT3RMIX_INPUT_1_VOLUME 0x6A9
+#define ARIZONA_OUT3RMIX_INPUT_2_SOURCE 0x6AA
+#define ARIZONA_OUT3RMIX_INPUT_2_VOLUME 0x6AB
+#define ARIZONA_OUT3RMIX_INPUT_3_SOURCE 0x6AC
+#define ARIZONA_OUT3RMIX_INPUT_3_VOLUME 0x6AD
+#define ARIZONA_OUT3RMIX_INPUT_4_SOURCE 0x6AE
+#define ARIZONA_OUT3RMIX_INPUT_4_VOLUME 0x6AF
+#define ARIZONA_OUT4LMIX_INPUT_1_SOURCE 0x6B0
+#define ARIZONA_OUT4LMIX_INPUT_1_VOLUME 0x6B1
+#define ARIZONA_OUT4LMIX_INPUT_2_SOURCE 0x6B2
+#define ARIZONA_OUT4LMIX_INPUT_2_VOLUME 0x6B3
+#define ARIZONA_OUT4LMIX_INPUT_3_SOURCE 0x6B4
+#define ARIZONA_OUT4LMIX_INPUT_3_VOLUME 0x6B5
+#define ARIZONA_OUT4LMIX_INPUT_4_SOURCE 0x6B6
+#define ARIZONA_OUT4LMIX_INPUT_4_VOLUME 0x6B7
+#define ARIZONA_OUT4RMIX_INPUT_1_SOURCE 0x6B8
+#define ARIZONA_OUT4RMIX_INPUT_1_VOLUME 0x6B9
+#define ARIZONA_OUT4RMIX_INPUT_2_SOURCE 0x6BA
+#define ARIZONA_OUT4RMIX_INPUT_2_VOLUME 0x6BB
+#define ARIZONA_OUT4RMIX_INPUT_3_SOURCE 0x6BC
+#define ARIZONA_OUT4RMIX_INPUT_3_VOLUME 0x6BD
+#define ARIZONA_OUT4RMIX_INPUT_4_SOURCE 0x6BE
+#define ARIZONA_OUT4RMIX_INPUT_4_VOLUME 0x6BF
+#define ARIZONA_OUT5LMIX_INPUT_1_SOURCE 0x6C0
+#define ARIZONA_OUT5LMIX_INPUT_1_VOLUME 0x6C1
+#define ARIZONA_OUT5LMIX_INPUT_2_SOURCE 0x6C2
+#define ARIZONA_OUT5LMIX_INPUT_2_VOLUME 0x6C3
+#define ARIZONA_OUT5LMIX_INPUT_3_SOURCE 0x6C4
+#define ARIZONA_OUT5LMIX_INPUT_3_VOLUME 0x6C5
+#define ARIZONA_OUT5LMIX_INPUT_4_SOURCE 0x6C6
+#define ARIZONA_OUT5LMIX_INPUT_4_VOLUME 0x6C7
+#define ARIZONA_OUT5RMIX_INPUT_1_SOURCE 0x6C8
+#define ARIZONA_OUT5RMIX_INPUT_1_VOLUME 0x6C9
+#define ARIZONA_OUT5RMIX_INPUT_2_SOURCE 0x6CA
+#define ARIZONA_OUT5RMIX_INPUT_2_VOLUME 0x6CB
+#define ARIZONA_OUT5RMIX_INPUT_3_SOURCE 0x6CC
+#define ARIZONA_OUT5RMIX_INPUT_3_VOLUME 0x6CD
+#define ARIZONA_OUT5RMIX_INPUT_4_SOURCE 0x6CE
+#define ARIZONA_OUT5RMIX_INPUT_4_VOLUME 0x6CF
+#define ARIZONA_OUT6LMIX_INPUT_1_SOURCE 0x6D0
+#define ARIZONA_OUT6LMIX_INPUT_1_VOLUME 0x6D1
+#define ARIZONA_OUT6LMIX_INPUT_2_SOURCE 0x6D2
+#define ARIZONA_OUT6LMIX_INPUT_2_VOLUME 0x6D3
+#define ARIZONA_OUT6LMIX_INPUT_3_SOURCE 0x6D4
+#define ARIZONA_OUT6LMIX_INPUT_3_VOLUME 0x6D5
+#define ARIZONA_OUT6LMIX_INPUT_4_SOURCE 0x6D6
+#define ARIZONA_OUT6LMIX_INPUT_4_VOLUME 0x6D7
+#define ARIZONA_OUT6RMIX_INPUT_1_SOURCE 0x6D8
+#define ARIZONA_OUT6RMIX_INPUT_1_VOLUME 0x6D9
+#define ARIZONA_OUT6RMIX_INPUT_2_SOURCE 0x6DA
+#define ARIZONA_OUT6RMIX_INPUT_2_VOLUME 0x6DB
+#define ARIZONA_OUT6RMIX_INPUT_3_SOURCE 0x6DC
+#define ARIZONA_OUT6RMIX_INPUT_3_VOLUME 0x6DD
+#define ARIZONA_OUT6RMIX_INPUT_4_SOURCE 0x6DE
+#define ARIZONA_OUT6RMIX_INPUT_4_VOLUME 0x6DF
+#define ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE 0x700
+#define ARIZONA_AIF1TX1MIX_INPUT_1_VOLUME 0x701
+#define ARIZONA_AIF1TX1MIX_INPUT_2_SOURCE 0x702
+#define ARIZONA_AIF1TX1MIX_INPUT_2_VOLUME 0x703
+#define ARIZONA_AIF1TX1MIX_INPUT_3_SOURCE 0x704
+#define ARIZONA_AIF1TX1MIX_INPUT_3_VOLUME 0x705
+#define ARIZONA_AIF1TX1MIX_INPUT_4_SOURCE 0x706
+#define ARIZONA_AIF1TX1MIX_INPUT_4_VOLUME 0x707
+#define ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE 0x708
+#define ARIZONA_AIF1TX2MIX_INPUT_1_VOLUME 0x709
+#define ARIZONA_AIF1TX2MIX_INPUT_2_SOURCE 0x70A
+#define ARIZONA_AIF1TX2MIX_INPUT_2_VOLUME 0x70B
+#define ARIZONA_AIF1TX2MIX_INPUT_3_SOURCE 0x70C
+#define ARIZONA_AIF1TX2MIX_INPUT_3_VOLUME 0x70D
+#define ARIZONA_AIF1TX2MIX_INPUT_4_SOURCE 0x70E
+#define ARIZONA_AIF1TX2MIX_INPUT_4_VOLUME 0x70F
+#define ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE 0x710
+#define ARIZONA_AIF1TX3MIX_INPUT_1_VOLUME 0x711
+#define ARIZONA_AIF1TX3MIX_INPUT_2_SOURCE 0x712
+#define ARIZONA_AIF1TX3MIX_INPUT_2_VOLUME 0x713
+#define ARIZONA_AIF1TX3MIX_INPUT_3_SOURCE 0x714
+#define ARIZONA_AIF1TX3MIX_INPUT_3_VOLUME 0x715
+#define ARIZONA_AIF1TX3MIX_INPUT_4_SOURCE 0x716
+#define ARIZONA_AIF1TX3MIX_INPUT_4_VOLUME 0x717
+#define ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE 0x718
+#define ARIZONA_AIF1TX4MIX_INPUT_1_VOLUME 0x719
+#define ARIZONA_AIF1TX4MIX_INPUT_2_SOURCE 0x71A
+#define ARIZONA_AIF1TX4MIX_INPUT_2_VOLUME 0x71B
+#define ARIZONA_AIF1TX4MIX_INPUT_3_SOURCE 0x71C
+#define ARIZONA_AIF1TX4MIX_INPUT_3_VOLUME 0x71D
+#define ARIZONA_AIF1TX4MIX_INPUT_4_SOURCE 0x71E
+#define ARIZONA_AIF1TX4MIX_INPUT_4_VOLUME 0x71F
+#define ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE 0x720
+#define ARIZONA_AIF1TX5MIX_INPUT_1_VOLUME 0x721
+#define ARIZONA_AIF1TX5MIX_INPUT_2_SOURCE 0x722
+#define ARIZONA_AIF1TX5MIX_INPUT_2_VOLUME 0x723
+#define ARIZONA_AIF1TX5MIX_INPUT_3_SOURCE 0x724
+#define ARIZONA_AIF1TX5MIX_INPUT_3_VOLUME 0x725
+#define ARIZONA_AIF1TX5MIX_INPUT_4_SOURCE 0x726
+#define ARIZONA_AIF1TX5MIX_INPUT_4_VOLUME 0x727
+#define ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE 0x728
+#define ARIZONA_AIF1TX6MIX_INPUT_1_VOLUME 0x729
+#define ARIZONA_AIF1TX6MIX_INPUT_2_SOURCE 0x72A
+#define ARIZONA_AIF1TX6MIX_INPUT_2_VOLUME 0x72B
+#define ARIZONA_AIF1TX6MIX_INPUT_3_SOURCE 0x72C
+#define ARIZONA_AIF1TX6MIX_INPUT_3_VOLUME 0x72D
+#define ARIZONA_AIF1TX6MIX_INPUT_4_SOURCE 0x72E
+#define ARIZONA_AIF1TX6MIX_INPUT_4_VOLUME 0x72F
+#define ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE 0x730
+#define ARIZONA_AIF1TX7MIX_INPUT_1_VOLUME 0x731
+#define ARIZONA_AIF1TX7MIX_INPUT_2_SOURCE 0x732
+#define ARIZONA_AIF1TX7MIX_INPUT_2_VOLUME 0x733
+#define ARIZONA_AIF1TX7MIX_INPUT_3_SOURCE 0x734
+#define ARIZONA_AIF1TX7MIX_INPUT_3_VOLUME 0x735
+#define ARIZONA_AIF1TX7MIX_INPUT_4_SOURCE 0x736
+#define ARIZONA_AIF1TX7MIX_INPUT_4_VOLUME 0x737
+#define ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE 0x738
+#define ARIZONA_AIF1TX8MIX_INPUT_1_VOLUME 0x739
+#define ARIZONA_AIF1TX8MIX_INPUT_2_SOURCE 0x73A
+#define ARIZONA_AIF1TX8MIX_INPUT_2_VOLUME 0x73B
+#define ARIZONA_AIF1TX8MIX_INPUT_3_SOURCE 0x73C
+#define ARIZONA_AIF1TX8MIX_INPUT_3_VOLUME 0x73D
+#define ARIZONA_AIF1TX8MIX_INPUT_4_SOURCE 0x73E
+#define ARIZONA_AIF1TX8MIX_INPUT_4_VOLUME 0x73F
+#define ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE 0x740
+#define ARIZONA_AIF2TX1MIX_INPUT_1_VOLUME 0x741
+#define ARIZONA_AIF2TX1MIX_INPUT_2_SOURCE 0x742
+#define ARIZONA_AIF2TX1MIX_INPUT_2_VOLUME 0x743
+#define ARIZONA_AIF2TX1MIX_INPUT_3_SOURCE 0x744
+#define ARIZONA_AIF2TX1MIX_INPUT_3_VOLUME 0x745
+#define ARIZONA_AIF2TX1MIX_INPUT_4_SOURCE 0x746
+#define ARIZONA_AIF2TX1MIX_INPUT_4_VOLUME 0x747
+#define ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE 0x748
+#define ARIZONA_AIF2TX2MIX_INPUT_1_VOLUME 0x749
+#define ARIZONA_AIF2TX2MIX_INPUT_2_SOURCE 0x74A
+#define ARIZONA_AIF2TX2MIX_INPUT_2_VOLUME 0x74B
+#define ARIZONA_AIF2TX2MIX_INPUT_3_SOURCE 0x74C
+#define ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME 0x74D
+#define ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE 0x74E
+#define ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME 0x74F
+#define ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE 0x780
+#define ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME 0x781
+#define ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE 0x782
+#define ARIZONA_AIF3TX1MIX_INPUT_2_VOLUME 0x783
+#define ARIZONA_AIF3TX1MIX_INPUT_3_SOURCE 0x784
+#define ARIZONA_AIF3TX1MIX_INPUT_3_VOLUME 0x785
+#define ARIZONA_AIF3TX1MIX_INPUT_4_SOURCE 0x786
+#define ARIZONA_AIF3TX1MIX_INPUT_4_VOLUME 0x787
+#define ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE 0x788
+#define ARIZONA_AIF3TX2MIX_INPUT_1_VOLUME 0x789
+#define ARIZONA_AIF3TX2MIX_INPUT_2_SOURCE 0x78A
+#define ARIZONA_AIF3TX2MIX_INPUT_2_VOLUME 0x78B
+#define ARIZONA_AIF3TX2MIX_INPUT_3_SOURCE 0x78C
+#define ARIZONA_AIF3TX2MIX_INPUT_3_VOLUME 0x78D
+#define ARIZONA_AIF3TX2MIX_INPUT_4_SOURCE 0x78E
+#define ARIZONA_AIF3TX2MIX_INPUT_4_VOLUME 0x78F
+#define ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE 0x7C0
+#define ARIZONA_SLIMTX1MIX_INPUT_1_VOLUME 0x7C1
+#define ARIZONA_SLIMTX1MIX_INPUT_2_SOURCE 0x7C2
+#define ARIZONA_SLIMTX1MIX_INPUT_2_VOLUME 0x7C3
+#define ARIZONA_SLIMTX1MIX_INPUT_3_SOURCE 0x7C4
+#define ARIZONA_SLIMTX1MIX_INPUT_3_VOLUME 0x7C5
+#define ARIZONA_SLIMTX1MIX_INPUT_4_SOURCE 0x7C6
+#define ARIZONA_SLIMTX1MIX_INPUT_4_VOLUME 0x7C7
+#define ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE 0x7C8
+#define ARIZONA_SLIMTX2MIX_INPUT_1_VOLUME 0x7C9
+#define ARIZONA_SLIMTX2MIX_INPUT_2_SOURCE 0x7CA
+#define ARIZONA_SLIMTX2MIX_INPUT_2_VOLUME 0x7CB
+#define ARIZONA_SLIMTX2MIX_INPUT_3_SOURCE 0x7CC
+#define ARIZONA_SLIMTX2MIX_INPUT_3_VOLUME 0x7CD
+#define ARIZONA_SLIMTX2MIX_INPUT_4_SOURCE 0x7CE
+#define ARIZONA_SLIMTX2MIX_INPUT_4_VOLUME 0x7CF
+#define ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE 0x7D0
+#define ARIZONA_SLIMTX3MIX_INPUT_1_VOLUME 0x7D1
+#define ARIZONA_SLIMTX3MIX_INPUT_2_SOURCE 0x7D2
+#define ARIZONA_SLIMTX3MIX_INPUT_2_VOLUME 0x7D3
+#define ARIZONA_SLIMTX3MIX_INPUT_3_SOURCE 0x7D4
+#define ARIZONA_SLIMTX3MIX_INPUT_3_VOLUME 0x7D5
+#define ARIZONA_SLIMTX3MIX_INPUT_4_SOURCE 0x7D6
+#define ARIZONA_SLIMTX3MIX_INPUT_4_VOLUME 0x7D7
+#define ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE 0x7D8
+#define ARIZONA_SLIMTX4MIX_INPUT_1_VOLUME 0x7D9
+#define ARIZONA_SLIMTX4MIX_INPUT_2_SOURCE 0x7DA
+#define ARIZONA_SLIMTX4MIX_INPUT_2_VOLUME 0x7DB
+#define ARIZONA_SLIMTX4MIX_INPUT_3_SOURCE 0x7DC
+#define ARIZONA_SLIMTX4MIX_INPUT_3_VOLUME 0x7DD
+#define ARIZONA_SLIMTX4MIX_INPUT_4_SOURCE 0x7DE
+#define ARIZONA_SLIMTX4MIX_INPUT_4_VOLUME 0x7DF
+#define ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE 0x7E0
+#define ARIZONA_SLIMTX5MIX_INPUT_1_VOLUME 0x7E1
+#define ARIZONA_SLIMTX5MIX_INPUT_2_SOURCE 0x7E2
+#define ARIZONA_SLIMTX5MIX_INPUT_2_VOLUME 0x7E3
+#define ARIZONA_SLIMTX5MIX_INPUT_3_SOURCE 0x7E4
+#define ARIZONA_SLIMTX5MIX_INPUT_3_VOLUME 0x7E5
+#define ARIZONA_SLIMTX5MIX_INPUT_4_SOURCE 0x7E6
+#define ARIZONA_SLIMTX5MIX_INPUT_4_VOLUME 0x7E7
+#define ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE 0x7E8
+#define ARIZONA_SLIMTX6MIX_INPUT_1_VOLUME 0x7E9
+#define ARIZONA_SLIMTX6MIX_INPUT_2_SOURCE 0x7EA
+#define ARIZONA_SLIMTX6MIX_INPUT_2_VOLUME 0x7EB
+#define ARIZONA_SLIMTX6MIX_INPUT_3_SOURCE 0x7EC
+#define ARIZONA_SLIMTX6MIX_INPUT_3_VOLUME 0x7ED
+#define ARIZONA_SLIMTX6MIX_INPUT_4_SOURCE 0x7EE
+#define ARIZONA_SLIMTX6MIX_INPUT_4_VOLUME 0x7EF
+#define ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE 0x7F0
+#define ARIZONA_SLIMTX7MIX_INPUT_1_VOLUME 0x7F1
+#define ARIZONA_SLIMTX7MIX_INPUT_2_SOURCE 0x7F2
+#define ARIZONA_SLIMTX7MIX_INPUT_2_VOLUME 0x7F3
+#define ARIZONA_SLIMTX7MIX_INPUT_3_SOURCE 0x7F4
+#define ARIZONA_SLIMTX7MIX_INPUT_3_VOLUME 0x7F5
+#define ARIZONA_SLIMTX7MIX_INPUT_4_SOURCE 0x7F6
+#define ARIZONA_SLIMTX7MIX_INPUT_4_VOLUME 0x7F7
+#define ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE 0x7F8
+#define ARIZONA_SLIMTX8MIX_INPUT_1_VOLUME 0x7F9
+#define ARIZONA_SLIMTX8MIX_INPUT_2_SOURCE 0x7FA
+#define ARIZONA_SLIMTX8MIX_INPUT_2_VOLUME 0x7FB
+#define ARIZONA_SLIMTX8MIX_INPUT_3_SOURCE 0x7FC
+#define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD
+#define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE
+#define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF
+#define ARIZONA_EQ1MIX_INPUT_1_SOURCE 0x880
+#define ARIZONA_EQ1MIX_INPUT_1_VOLUME 0x881
+#define ARIZONA_EQ1MIX_INPUT_2_SOURCE 0x882
+#define ARIZONA_EQ1MIX_INPUT_2_VOLUME 0x883
+#define ARIZONA_EQ1MIX_INPUT_3_SOURCE 0x884
+#define ARIZONA_EQ1MIX_INPUT_3_VOLUME 0x885
+#define ARIZONA_EQ1MIX_INPUT_4_SOURCE 0x886
+#define ARIZONA_EQ1MIX_INPUT_4_VOLUME 0x887
+#define ARIZONA_EQ2MIX_INPUT_1_SOURCE 0x888
+#define ARIZONA_EQ2MIX_INPUT_1_VOLUME 0x889
+#define ARIZONA_EQ2MIX_INPUT_2_SOURCE 0x88A
+#define ARIZONA_EQ2MIX_INPUT_2_VOLUME 0x88B
+#define ARIZONA_EQ2MIX_INPUT_3_SOURCE 0x88C
+#define ARIZONA_EQ2MIX_INPUT_3_VOLUME 0x88D
+#define ARIZONA_EQ2MIX_INPUT_4_SOURCE 0x88E
+#define ARIZONA_EQ2MIX_INPUT_4_VOLUME 0x88F
+#define ARIZONA_EQ3MIX_INPUT_1_SOURCE 0x890
+#define ARIZONA_EQ3MIX_INPUT_1_VOLUME 0x891
+#define ARIZONA_EQ3MIX_INPUT_2_SOURCE 0x892
+#define ARIZONA_EQ3MIX_INPUT_2_VOLUME 0x893
+#define ARIZONA_EQ3MIX_INPUT_3_SOURCE 0x894
+#define ARIZONA_EQ3MIX_INPUT_3_VOLUME 0x895
+#define ARIZONA_EQ3MIX_INPUT_4_SOURCE 0x896
+#define ARIZONA_EQ3MIX_INPUT_4_VOLUME 0x897
+#define ARIZONA_EQ4MIX_INPUT_1_SOURCE 0x898
+#define ARIZONA_EQ4MIX_INPUT_1_VOLUME 0x899
+#define ARIZONA_EQ4MIX_INPUT_2_SOURCE 0x89A
+#define ARIZONA_EQ4MIX_INPUT_2_VOLUME 0x89B
+#define ARIZONA_EQ4MIX_INPUT_3_SOURCE 0x89C
+#define ARIZONA_EQ4MIX_INPUT_3_VOLUME 0x89D
+#define ARIZONA_EQ4MIX_INPUT_4_SOURCE 0x89E
+#define ARIZONA_EQ4MIX_INPUT_4_VOLUME 0x89F
+#define ARIZONA_DRC1LMIX_INPUT_1_SOURCE 0x8C0
+#define ARIZONA_DRC1LMIX_INPUT_1_VOLUME 0x8C1
+#define ARIZONA_DRC1LMIX_INPUT_2_SOURCE 0x8C2
+#define ARIZONA_DRC1LMIX_INPUT_2_VOLUME 0x8C3
+#define ARIZONA_DRC1LMIX_INPUT_3_SOURCE 0x8C4
+#define ARIZONA_DRC1LMIX_INPUT_3_VOLUME 0x8C5
+#define ARIZONA_DRC1LMIX_INPUT_4_SOURCE 0x8C6
+#define ARIZONA_DRC1LMIX_INPUT_4_VOLUME 0x8C7
+#define ARIZONA_DRC1RMIX_INPUT_1_SOURCE 0x8C8
+#define ARIZONA_DRC1RMIX_INPUT_1_VOLUME 0x8C9
+#define ARIZONA_DRC1RMIX_INPUT_2_SOURCE 0x8CA
+#define ARIZONA_DRC1RMIX_INPUT_2_VOLUME 0x8CB
+#define ARIZONA_DRC1RMIX_INPUT_3_SOURCE 0x8CC
+#define ARIZONA_DRC1RMIX_INPUT_3_VOLUME 0x8CD
+#define ARIZONA_DRC1RMIX_INPUT_4_SOURCE 0x8CE
+#define ARIZONA_DRC1RMIX_INPUT_4_VOLUME 0x8CF
+#define ARIZONA_DRC2LMIX_INPUT_1_SOURCE 0x8D0
+#define ARIZONA_DRC2LMIX_INPUT_1_VOLUME 0x8D1
+#define ARIZONA_DRC2LMIX_INPUT_2_SOURCE 0x8D2
+#define ARIZONA_DRC2LMIX_INPUT_2_VOLUME 0x8D3
+#define ARIZONA_DRC2LMIX_INPUT_3_SOURCE 0x8D4
+#define ARIZONA_DRC2LMIX_INPUT_3_VOLUME 0x8D5
+#define ARIZONA_DRC2LMIX_INPUT_4_SOURCE 0x8D6
+#define ARIZONA_DRC2LMIX_INPUT_4_VOLUME 0x8D7
+#define ARIZONA_DRC2RMIX_INPUT_1_SOURCE 0x8D8
+#define ARIZONA_DRC2RMIX_INPUT_1_VOLUME 0x8D9
+#define ARIZONA_DRC2RMIX_INPUT_2_SOURCE 0x8DA
+#define ARIZONA_DRC2RMIX_INPUT_2_VOLUME 0x8DB
+#define ARIZONA_DRC2RMIX_INPUT_3_SOURCE 0x8DC
+#define ARIZONA_DRC2RMIX_INPUT_3_VOLUME 0x8DD
+#define ARIZONA_DRC2RMIX_INPUT_4_SOURCE 0x8DE
+#define ARIZONA_DRC2RMIX_INPUT_4_VOLUME 0x8DF
+#define ARIZONA_HPLP1MIX_INPUT_1_SOURCE 0x900
+#define ARIZONA_HPLP1MIX_INPUT_1_VOLUME 0x901
+#define ARIZONA_HPLP1MIX_INPUT_2_SOURCE 0x902
+#define ARIZONA_HPLP1MIX_INPUT_2_VOLUME 0x903
+#define ARIZONA_HPLP1MIX_INPUT_3_SOURCE 0x904
+#define ARIZONA_HPLP1MIX_INPUT_3_VOLUME 0x905
+#define ARIZONA_HPLP1MIX_INPUT_4_SOURCE 0x906
+#define ARIZONA_HPLP1MIX_INPUT_4_VOLUME 0x907
+#define ARIZONA_HPLP2MIX_INPUT_1_SOURCE 0x908
+#define ARIZONA_HPLP2MIX_INPUT_1_VOLUME 0x909
+#define ARIZONA_HPLP2MIX_INPUT_2_SOURCE 0x90A
+#define ARIZONA_HPLP2MIX_INPUT_2_VOLUME 0x90B
+#define ARIZONA_HPLP2MIX_INPUT_3_SOURCE 0x90C
+#define ARIZONA_HPLP2MIX_INPUT_3_VOLUME 0x90D
+#define ARIZONA_HPLP2MIX_INPUT_4_SOURCE 0x90E
+#define ARIZONA_HPLP2MIX_INPUT_4_VOLUME 0x90F
+#define ARIZONA_HPLP3MIX_INPUT_1_SOURCE 0x910
+#define ARIZONA_HPLP3MIX_INPUT_1_VOLUME 0x911
+#define ARIZONA_HPLP3MIX_INPUT_2_SOURCE 0x912
+#define ARIZONA_HPLP3MIX_INPUT_2_VOLUME 0x913
+#define ARIZONA_HPLP3MIX_INPUT_3_SOURCE 0x914
+#define ARIZONA_HPLP3MIX_INPUT_3_VOLUME 0x915
+#define ARIZONA_HPLP3MIX_INPUT_4_SOURCE 0x916
+#define ARIZONA_HPLP3MIX_INPUT_4_VOLUME 0x917
+#define ARIZONA_HPLP4MIX_INPUT_1_SOURCE 0x918
+#define ARIZONA_HPLP4MIX_INPUT_1_VOLUME 0x919
+#define ARIZONA_HPLP4MIX_INPUT_2_SOURCE 0x91A
+#define ARIZONA_HPLP4MIX_INPUT_2_VOLUME 0x91B
+#define ARIZONA_HPLP4MIX_INPUT_3_SOURCE 0x91C
+#define ARIZONA_HPLP4MIX_INPUT_3_VOLUME 0x91D
+#define ARIZONA_HPLP4MIX_INPUT_4_SOURCE 0x91E
+#define ARIZONA_HPLP4MIX_INPUT_4_VOLUME 0x91F
+#define ARIZONA_DSP1LMIX_INPUT_1_SOURCE 0x940
+#define ARIZONA_DSP1LMIX_INPUT_1_VOLUME 0x941
+#define ARIZONA_DSP1LMIX_INPUT_2_SOURCE 0x942
+#define ARIZONA_DSP1LMIX_INPUT_2_VOLUME 0x943
+#define ARIZONA_DSP1LMIX_INPUT_3_SOURCE 0x944
+#define ARIZONA_DSP1LMIX_INPUT_3_VOLUME 0x945
+#define ARIZONA_DSP1LMIX_INPUT_4_SOURCE 0x946
+#define ARIZONA_DSP1LMIX_INPUT_4_VOLUME 0x947
+#define ARIZONA_DSP1RMIX_INPUT_1_SOURCE 0x948
+#define ARIZONA_DSP1RMIX_INPUT_1_VOLUME 0x949
+#define ARIZONA_DSP1RMIX_INPUT_2_SOURCE 0x94A
+#define ARIZONA_DSP1RMIX_INPUT_2_VOLUME 0x94B
+#define ARIZONA_DSP1RMIX_INPUT_3_SOURCE 0x94C
+#define ARIZONA_DSP1RMIX_INPUT_3_VOLUME 0x94D
+#define ARIZONA_DSP1RMIX_INPUT_4_SOURCE 0x94E
+#define ARIZONA_DSP1RMIX_INPUT_4_VOLUME 0x94F
+#define ARIZONA_DSP1AUX1MIX_INPUT_1_SOURCE 0x950
+#define ARIZONA_DSP1AUX2MIX_INPUT_1_SOURCE 0x958
+#define ARIZONA_DSP1AUX3MIX_INPUT_1_SOURCE 0x960
+#define ARIZONA_DSP1AUX4MIX_INPUT_1_SOURCE 0x968
+#define ARIZONA_DSP1AUX5MIX_INPUT_1_SOURCE 0x970
+#define ARIZONA_DSP1AUX6MIX_INPUT_1_SOURCE 0x978
+#define ARIZONA_DSP2LMIX_INPUT_1_SOURCE 0x980
+#define ARIZONA_DSP2LMIX_INPUT_1_VOLUME 0x981
+#define ARIZONA_DSP2LMIX_INPUT_2_SOURCE 0x982
+#define ARIZONA_DSP2LMIX_INPUT_2_VOLUME 0x983
+#define ARIZONA_DSP2LMIX_INPUT_3_SOURCE 0x984
+#define ARIZONA_DSP2LMIX_INPUT_3_VOLUME 0x985
+#define ARIZONA_DSP2LMIX_INPUT_4_SOURCE 0x986
+#define ARIZONA_DSP2LMIX_INPUT_4_VOLUME 0x987
+#define ARIZONA_DSP2RMIX_INPUT_1_SOURCE 0x988
+#define ARIZONA_DSP2RMIX_INPUT_1_VOLUME 0x989
+#define ARIZONA_DSP2RMIX_INPUT_2_SOURCE 0x98A
+#define ARIZONA_DSP2RMIX_INPUT_2_VOLUME 0x98B
+#define ARIZONA_DSP2RMIX_INPUT_3_SOURCE 0x98C
+#define ARIZONA_DSP2RMIX_INPUT_3_VOLUME 0x98D
+#define ARIZONA_DSP2RMIX_INPUT_4_SOURCE 0x98E
+#define ARIZONA_DSP2RMIX_INPUT_4_VOLUME 0x98F
+#define ARIZONA_DSP2AUX1MIX_INPUT_1_SOURCE 0x990
+#define ARIZONA_DSP2AUX2MIX_INPUT_1_SOURCE 0x998
+#define ARIZONA_DSP2AUX3MIX_INPUT_1_SOURCE 0x9A0
+#define ARIZONA_DSP2AUX4MIX_INPUT_1_SOURCE 0x9A8
+#define ARIZONA_DSP2AUX5MIX_INPUT_1_SOURCE 0x9B0
+#define ARIZONA_DSP2AUX6MIX_INPUT_1_SOURCE 0x9B8
+#define ARIZONA_DSP3LMIX_INPUT_1_SOURCE 0x9C0
+#define ARIZONA_DSP3LMIX_INPUT_1_VOLUME 0x9C1
+#define ARIZONA_DSP3LMIX_INPUT_2_SOURCE 0x9C2
+#define ARIZONA_DSP3LMIX_INPUT_2_VOLUME 0x9C3
+#define ARIZONA_DSP3LMIX_INPUT_3_SOURCE 0x9C4
+#define ARIZONA_DSP3LMIX_INPUT_3_VOLUME 0x9C5
+#define ARIZONA_DSP3LMIX_INPUT_4_SOURCE 0x9C6
+#define ARIZONA_DSP3LMIX_INPUT_4_VOLUME 0x9C7
+#define ARIZONA_DSP3RMIX_INPUT_1_SOURCE 0x9C8
+#define ARIZONA_DSP3RMIX_INPUT_1_VOLUME 0x9C9
+#define ARIZONA_DSP3RMIX_INPUT_2_SOURCE 0x9CA
+#define ARIZONA_DSP3RMIX_INPUT_2_VOLUME 0x9CB
+#define ARIZONA_DSP3RMIX_INPUT_3_SOURCE 0x9CC
+#define ARIZONA_DSP3RMIX_INPUT_3_VOLUME 0x9CD
+#define ARIZONA_DSP3RMIX_INPUT_4_SOURCE 0x9CE
+#define ARIZONA_DSP3RMIX_INPUT_4_VOLUME 0x9CF
+#define ARIZONA_DSP3AUX1MIX_INPUT_1_SOURCE 0x9D0
+#define ARIZONA_DSP3AUX2MIX_INPUT_1_SOURCE 0x9D8
+#define ARIZONA_DSP3AUX3MIX_INPUT_1_SOURCE 0x9E0
+#define ARIZONA_DSP3AUX4MIX_INPUT_1_SOURCE 0x9E8
+#define ARIZONA_DSP3AUX5MIX_INPUT_1_SOURCE 0x9F0
+#define ARIZONA_DSP3AUX6MIX_INPUT_1_SOURCE 0x9F8
+#define ARIZONA_DSP4LMIX_INPUT_1_SOURCE 0xA00
+#define ARIZONA_DSP4LMIX_INPUT_1_VOLUME 0xA01
+#define ARIZONA_DSP4LMIX_INPUT_2_SOURCE 0xA02
+#define ARIZONA_DSP4LMIX_INPUT_2_VOLUME 0xA03
+#define ARIZONA_DSP4LMIX_INPUT_3_SOURCE 0xA04
+#define ARIZONA_DSP4LMIX_INPUT_3_VOLUME 0xA05
+#define ARIZONA_DSP4LMIX_INPUT_4_SOURCE 0xA06
+#define ARIZONA_DSP4LMIX_INPUT_4_VOLUME 0xA07
+#define ARIZONA_DSP4RMIX_INPUT_1_SOURCE 0xA08
+#define ARIZONA_DSP4RMIX_INPUT_1_VOLUME 0xA09
+#define ARIZONA_DSP4RMIX_INPUT_2_SOURCE 0xA0A
+#define ARIZONA_DSP4RMIX_INPUT_2_VOLUME 0xA0B
+#define ARIZONA_DSP4RMIX_INPUT_3_SOURCE 0xA0C
+#define ARIZONA_DSP4RMIX_INPUT_3_VOLUME 0xA0D
+#define ARIZONA_DSP4RMIX_INPUT_4_SOURCE 0xA0E
+#define ARIZONA_DSP4RMIX_INPUT_4_VOLUME 0xA0F
+#define ARIZONA_DSP4AUX1MIX_INPUT_1_SOURCE 0xA10
+#define ARIZONA_DSP4AUX2MIX_INPUT_1_SOURCE 0xA18
+#define ARIZONA_DSP4AUX3MIX_INPUT_1_SOURCE 0xA20
+#define ARIZONA_DSP4AUX4MIX_INPUT_1_SOURCE 0xA28
+#define ARIZONA_DSP4AUX5MIX_INPUT_1_SOURCE 0xA30
+#define ARIZONA_DSP4AUX6MIX_INPUT_1_SOURCE 0xA38
+#define ARIZONA_ASRC1LMIX_INPUT_1_SOURCE 0xA80
+#define ARIZONA_ASRC1RMIX_INPUT_1_SOURCE 0xA88
+#define ARIZONA_ASRC2LMIX_INPUT_1_SOURCE 0xA90
+#define ARIZONA_ASRC2RMIX_INPUT_1_SOURCE 0xA98
+#define ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE 0xB00
+#define ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE 0xB08
+#define ARIZONA_ISRC1DEC3MIX_INPUT_1_SOURCE 0xB10
+#define ARIZONA_ISRC1DEC4MIX_INPUT_1_SOURCE 0xB18
+#define ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE 0xB20
+#define ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE 0xB28
+#define ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30
+#define ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38
+#define ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40
+#define ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48
+#define ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60
+#define ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68
+#define ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30
+#define ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38
+#define ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40
+#define ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48
+#define ARIZONA_ISRC2DEC3MIX_INPUT_1_SOURCE 0xB50
+#define ARIZONA_ISRC2DEC4MIX_INPUT_1_SOURCE 0xB58
+#define ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60
+#define ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68
+#define ARIZONA_ISRC2INT3MIX_INPUT_1_SOURCE 0xB70
+#define ARIZONA_ISRC2INT4MIX_INPUT_1_SOURCE 0xB78
+#define ARIZONA_ISRC3DEC1MIX_INPUT_1_SOURCE 0xB80
+#define ARIZONA_ISRC3DEC2MIX_INPUT_1_SOURCE 0xB88
+#define ARIZONA_ISRC3DEC3MIX_INPUT_1_SOURCE 0xB90
+#define ARIZONA_ISRC3DEC4MIX_INPUT_1_SOURCE 0xB98
+#define ARIZONA_ISRC3INT1MIX_INPUT_1_SOURCE 0xBA0
+#define ARIZONA_ISRC3INT2MIX_INPUT_1_SOURCE 0xBA8
+#define ARIZONA_ISRC3INT3MIX_INPUT_1_SOURCE 0xBB0
+#define ARIZONA_ISRC3INT4MIX_INPUT_1_SOURCE 0xBB8
+#define ARIZONA_GPIO1_CTRL 0xC00
+#define ARIZONA_GPIO2_CTRL 0xC01
+#define ARIZONA_GPIO3_CTRL 0xC02
+#define ARIZONA_GPIO4_CTRL 0xC03
+#define ARIZONA_GPIO5_CTRL 0xC04
+#define ARIZONA_IRQ_CTRL_1 0xC0F
+#define ARIZONA_GPIO_DEBOUNCE_CONFIG 0xC10
+#define ARIZONA_MISC_PAD_CTRL_1 0xC20
+#define ARIZONA_MISC_PAD_CTRL_2 0xC21
+#define ARIZONA_MISC_PAD_CTRL_3 0xC22
+#define ARIZONA_MISC_PAD_CTRL_4 0xC23
+#define ARIZONA_MISC_PAD_CTRL_5 0xC24
+#define ARIZONA_MISC_PAD_CTRL_6 0xC25
+#define ARIZONA_MISC_PAD_CTRL_7 0xC30
+#define ARIZONA_MISC_PAD_CTRL_8 0xC31
+#define ARIZONA_MISC_PAD_CTRL_9 0xC32
+#define ARIZONA_MISC_PAD_CTRL_10 0xC33
+#define ARIZONA_MISC_PAD_CTRL_11 0xC34
+#define ARIZONA_MISC_PAD_CTRL_12 0xC35
+#define ARIZONA_MISC_PAD_CTRL_13 0xC36
+#define ARIZONA_MISC_PAD_CTRL_14 0xC37
+#define ARIZONA_MISC_PAD_CTRL_15 0xC38
+#define ARIZONA_MISC_PAD_CTRL_16 0xC39
+#define ARIZONA_MISC_PAD_CTRL_17 0xC3A
+#define ARIZONA_MISC_PAD_CTRL_18 0xC3B
+#define ARIZONA_INTERRUPT_STATUS_1 0xD00
+#define ARIZONA_INTERRUPT_STATUS_2 0xD01
+#define ARIZONA_INTERRUPT_STATUS_3 0xD02
+#define ARIZONA_INTERRUPT_STATUS_4 0xD03
+#define ARIZONA_INTERRUPT_STATUS_5 0xD04
+#define ARIZONA_INTERRUPT_STATUS_1_MASK 0xD08
+#define ARIZONA_INTERRUPT_STATUS_2_MASK 0xD09
+#define ARIZONA_INTERRUPT_STATUS_3_MASK 0xD0A
+#define ARIZONA_INTERRUPT_STATUS_4_MASK 0xD0B
+#define ARIZONA_INTERRUPT_STATUS_5_MASK 0xD0C
+#define ARIZONA_INTERRUPT_CONTROL 0xD0F
+#define ARIZONA_IRQ2_STATUS_1 0xD10
+#define ARIZONA_IRQ2_STATUS_2 0xD11
+#define ARIZONA_IRQ2_STATUS_3 0xD12
+#define ARIZONA_IRQ2_STATUS_4 0xD13
+#define ARIZONA_IRQ2_STATUS_5 0xD14
+#define ARIZONA_IRQ2_STATUS_1_MASK 0xD18
+#define ARIZONA_IRQ2_STATUS_2_MASK 0xD19
+#define ARIZONA_IRQ2_STATUS_3_MASK 0xD1A
+#define ARIZONA_IRQ2_STATUS_4_MASK 0xD1B
+#define ARIZONA_IRQ2_STATUS_5_MASK 0xD1C
+#define ARIZONA_IRQ2_CONTROL 0xD1F
+#define ARIZONA_INTERRUPT_RAW_STATUS_2 0xD20
+#define ARIZONA_INTERRUPT_RAW_STATUS_3 0xD21
+#define ARIZONA_INTERRUPT_RAW_STATUS_4 0xD22
+#define ARIZONA_INTERRUPT_RAW_STATUS_5 0xD23
+#define ARIZONA_INTERRUPT_RAW_STATUS_6 0xD24
+#define ARIZONA_INTERRUPT_RAW_STATUS_7 0xD25
+#define ARIZONA_INTERRUPT_RAW_STATUS_8 0xD26
+#define ARIZONA_IRQ_PIN_STATUS 0xD40
+#define ARIZONA_ADSP2_IRQ0 0xD41
+#define ARIZONA_AOD_WKUP_AND_TRIG 0xD50
+#define ARIZONA_AOD_IRQ1 0xD51
+#define ARIZONA_AOD_IRQ2 0xD52
+#define ARIZONA_AOD_IRQ_MASK_IRQ1 0xD53
+#define ARIZONA_AOD_IRQ_MASK_IRQ2 0xD54
+#define ARIZONA_AOD_IRQ_RAW_STATUS 0xD55
+#define ARIZONA_JACK_DETECT_DEBOUNCE 0xD56
+#define ARIZONA_FX_CTRL1 0xE00
+#define ARIZONA_FX_CTRL2 0xE01
+#define ARIZONA_EQ1_1 0xE10
+#define ARIZONA_EQ1_2 0xE11
+#define ARIZONA_EQ1_3 0xE12
+#define ARIZONA_EQ1_4 0xE13
+#define ARIZONA_EQ1_5 0xE14
+#define ARIZONA_EQ1_6 0xE15
+#define ARIZONA_EQ1_7 0xE16
+#define ARIZONA_EQ1_8 0xE17
+#define ARIZONA_EQ1_9 0xE18
+#define ARIZONA_EQ1_10 0xE19
+#define ARIZONA_EQ1_11 0xE1A
+#define ARIZONA_EQ1_12 0xE1B
+#define ARIZONA_EQ1_13 0xE1C
+#define ARIZONA_EQ1_14 0xE1D
+#define ARIZONA_EQ1_15 0xE1E
+#define ARIZONA_EQ1_16 0xE1F
+#define ARIZONA_EQ1_17 0xE20
+#define ARIZONA_EQ1_18 0xE21
+#define ARIZONA_EQ1_19 0xE22
+#define ARIZONA_EQ1_20 0xE23
+#define ARIZONA_EQ1_21 0xE24
+#define ARIZONA_EQ2_1 0xE26
+#define ARIZONA_EQ2_2 0xE27
+#define ARIZONA_EQ2_3 0xE28
+#define ARIZONA_EQ2_4 0xE29
+#define ARIZONA_EQ2_5 0xE2A
+#define ARIZONA_EQ2_6 0xE2B
+#define ARIZONA_EQ2_7 0xE2C
+#define ARIZONA_EQ2_8 0xE2D
+#define ARIZONA_EQ2_9 0xE2E
+#define ARIZONA_EQ2_10 0xE2F
+#define ARIZONA_EQ2_11 0xE30
+#define ARIZONA_EQ2_12 0xE31
+#define ARIZONA_EQ2_13 0xE32
+#define ARIZONA_EQ2_14 0xE33
+#define ARIZONA_EQ2_15 0xE34
+#define ARIZONA_EQ2_16 0xE35
+#define ARIZONA_EQ2_17 0xE36
+#define ARIZONA_EQ2_18 0xE37
+#define ARIZONA_EQ2_19 0xE38
+#define ARIZONA_EQ2_20 0xE39
+#define ARIZONA_EQ2_21 0xE3A
+#define ARIZONA_EQ3_1 0xE3C
+#define ARIZONA_EQ3_2 0xE3D
+#define ARIZONA_EQ3_3 0xE3E
+#define ARIZONA_EQ3_4 0xE3F
+#define ARIZONA_EQ3_5 0xE40
+#define ARIZONA_EQ3_6 0xE41
+#define ARIZONA_EQ3_7 0xE42
+#define ARIZONA_EQ3_8 0xE43
+#define ARIZONA_EQ3_9 0xE44
+#define ARIZONA_EQ3_10 0xE45
+#define ARIZONA_EQ3_11 0xE46
+#define ARIZONA_EQ3_12 0xE47
+#define ARIZONA_EQ3_13 0xE48
+#define ARIZONA_EQ3_14 0xE49
+#define ARIZONA_EQ3_15 0xE4A
+#define ARIZONA_EQ3_16 0xE4B
+#define ARIZONA_EQ3_17 0xE4C
+#define ARIZONA_EQ3_18 0xE4D
+#define ARIZONA_EQ3_19 0xE4E
+#define ARIZONA_EQ3_20 0xE4F
+#define ARIZONA_EQ3_21 0xE50
+#define ARIZONA_EQ4_1 0xE52
+#define ARIZONA_EQ4_2 0xE53
+#define ARIZONA_EQ4_3 0xE54
+#define ARIZONA_EQ4_4 0xE55
+#define ARIZONA_EQ4_5 0xE56
+#define ARIZONA_EQ4_6 0xE57
+#define ARIZONA_EQ4_7 0xE58
+#define ARIZONA_EQ4_8 0xE59
+#define ARIZONA_EQ4_9 0xE5A
+#define ARIZONA_EQ4_10 0xE5B
+#define ARIZONA_EQ4_11 0xE5C
+#define ARIZONA_EQ4_12 0xE5D
+#define ARIZONA_EQ4_13 0xE5E
+#define ARIZONA_EQ4_14 0xE5F
+#define ARIZONA_EQ4_15 0xE60
+#define ARIZONA_EQ4_16 0xE61
+#define ARIZONA_EQ4_17 0xE62
+#define ARIZONA_EQ4_18 0xE63
+#define ARIZONA_EQ4_19 0xE64
+#define ARIZONA_EQ4_20 0xE65
+#define ARIZONA_EQ4_21 0xE66
+#define ARIZONA_DRC1_CTRL1 0xE80
+#define ARIZONA_DRC1_CTRL2 0xE81
+#define ARIZONA_DRC1_CTRL3 0xE82
+#define ARIZONA_DRC1_CTRL4 0xE83
+#define ARIZONA_DRC1_CTRL5 0xE84
+#define ARIZONA_DRC2_CTRL1 0xE89
+#define ARIZONA_DRC2_CTRL2 0xE8A
+#define ARIZONA_DRC2_CTRL3 0xE8B
+#define ARIZONA_DRC2_CTRL4 0xE8C
+#define ARIZONA_DRC2_CTRL5 0xE8D
+#define ARIZONA_HPLPF1_1 0xEC0
+#define ARIZONA_HPLPF1_2 0xEC1
+#define ARIZONA_HPLPF2_1 0xEC4
+#define ARIZONA_HPLPF2_2 0xEC5
+#define ARIZONA_HPLPF3_1 0xEC8
+#define ARIZONA_HPLPF3_2 0xEC9
+#define ARIZONA_HPLPF4_1 0xECC
+#define ARIZONA_HPLPF4_2 0xECD
+#define ARIZONA_ASRC_ENABLE 0xEE0
+#define ARIZONA_ASRC_STATUS 0xEE1
+#define ARIZONA_ASRC_RATE1 0xEE2
+#define ARIZONA_ASRC_RATE2 0xEE3
+#define ARIZONA_ISRC_1_CTRL_1 0xEF0
+#define ARIZONA_ISRC_1_CTRL_2 0xEF1
+#define ARIZONA_ISRC_1_CTRL_3 0xEF2
+#define ARIZONA_ISRC_2_CTRL_1 0xEF3
+#define ARIZONA_ISRC_2_CTRL_2 0xEF4
+#define ARIZONA_ISRC_2_CTRL_3 0xEF5
+#define ARIZONA_ISRC_3_CTRL_1 0xEF6
+#define ARIZONA_ISRC_3_CTRL_2 0xEF7
+#define ARIZONA_ISRC_3_CTRL_3 0xEF8
+#define ARIZONA_CLOCK_CONTROL 0xF00
+#define ARIZONA_ANC_SRC 0xF01
+#define ARIZONA_DSP_STATUS 0xF02
+#define ARIZONA_DSP1_CONTROL_1 0x1100
+#define ARIZONA_DSP1_CLOCKING_1 0x1101
+#define ARIZONA_DSP1_STATUS_1 0x1104
+#define ARIZONA_DSP1_STATUS_2 0x1105
+#define ARIZONA_DSP2_CONTROL_1 0x1200
+#define ARIZONA_DSP2_CLOCKING_1 0x1201
+#define ARIZONA_DSP2_STATUS_1 0x1204
+#define ARIZONA_DSP2_STATUS_2 0x1205
+#define ARIZONA_DSP3_CONTROL_1 0x1300
+#define ARIZONA_DSP3_CLOCKING_1 0x1301
+#define ARIZONA_DSP3_STATUS_1 0x1304
+#define ARIZONA_DSP3_STATUS_2 0x1305
+#define ARIZONA_DSP4_CONTROL_1 0x1400
+#define ARIZONA_DSP4_CLOCKING_1 0x1401
+#define ARIZONA_DSP4_STATUS_1 0x1404
+#define ARIZONA_DSP4_STATUS_2 0x1405
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - software reset
+ */
+#define ARIZONA_SW_RST_DEV_ID1_MASK 0xFFFF /* SW_RST_DEV_ID1 - [15:0] */
+#define ARIZONA_SW_RST_DEV_ID1_SHIFT 0 /* SW_RST_DEV_ID1 - [15:0] */
+#define ARIZONA_SW_RST_DEV_ID1_WIDTH 16 /* SW_RST_DEV_ID1 - [15:0] */
+
+/*
+ * R1 (0x01) - Device Revision
+ */
+#define ARIZONA_DEVICE_REVISION_MASK 0x00FF /* DEVICE_REVISION - [7:0] */
+#define ARIZONA_DEVICE_REVISION_SHIFT 0 /* DEVICE_REVISION - [7:0] */
+#define ARIZONA_DEVICE_REVISION_WIDTH 8 /* DEVICE_REVISION - [7:0] */
+
+/*
+ * R8 (0x08) - Ctrl IF SPI CFG 1
+ */
+#define ARIZONA_SPI_CFG 0x0010 /* SPI_CFG */
+#define ARIZONA_SPI_CFG_MASK 0x0010 /* SPI_CFG */
+#define ARIZONA_SPI_CFG_SHIFT 4 /* SPI_CFG */
+#define ARIZONA_SPI_CFG_WIDTH 1 /* SPI_CFG */
+#define ARIZONA_SPI_4WIRE 0x0008 /* SPI_4WIRE */
+#define ARIZONA_SPI_4WIRE_MASK 0x0008 /* SPI_4WIRE */
+#define ARIZONA_SPI_4WIRE_SHIFT 3 /* SPI_4WIRE */
+#define ARIZONA_SPI_4WIRE_WIDTH 1 /* SPI_4WIRE */
+#define ARIZONA_SPI_AUTO_INC_MASK 0x0003 /* SPI_AUTO_INC - [1:0] */
+#define ARIZONA_SPI_AUTO_INC_SHIFT 0 /* SPI_AUTO_INC - [1:0] */
+#define ARIZONA_SPI_AUTO_INC_WIDTH 2 /* SPI_AUTO_INC - [1:0] */
+
+/*
+ * R9 (0x09) - Ctrl IF I2C1 CFG 1
+ */
+#define ARIZONA_I2C1_AUTO_INC_MASK 0x0003 /* I2C1_AUTO_INC - [1:0] */
+#define ARIZONA_I2C1_AUTO_INC_SHIFT 0 /* I2C1_AUTO_INC - [1:0] */
+#define ARIZONA_I2C1_AUTO_INC_WIDTH 2 /* I2C1_AUTO_INC - [1:0] */
+
+/*
+ * R13 (0x0D) - Ctrl IF Status 1
+ */
+#define ARIZONA_I2C1_BUSY 0x0020 /* I2C1_BUSY */
+#define ARIZONA_I2C1_BUSY_MASK 0x0020 /* I2C1_BUSY */
+#define ARIZONA_I2C1_BUSY_SHIFT 5 /* I2C1_BUSY */
+#define ARIZONA_I2C1_BUSY_WIDTH 1 /* I2C1_BUSY */
+#define ARIZONA_SPI_BUSY 0x0010 /* SPI_BUSY */
+#define ARIZONA_SPI_BUSY_MASK 0x0010 /* SPI_BUSY */
+#define ARIZONA_SPI_BUSY_SHIFT 4 /* SPI_BUSY */
+#define ARIZONA_SPI_BUSY_WIDTH 1 /* SPI_BUSY */
+
+/*
+ * R22 (0x16) - Write Sequencer Ctrl 0
+ */
+#define ARIZONA_WSEQ_ABORT 0x0800 /* WSEQ_ABORT */
+#define ARIZONA_WSEQ_ABORT_MASK 0x0800 /* WSEQ_ABORT */
+#define ARIZONA_WSEQ_ABORT_SHIFT 11 /* WSEQ_ABORT */
+#define ARIZONA_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */
+#define ARIZONA_WSEQ_START 0x0400 /* WSEQ_START */
+#define ARIZONA_WSEQ_START_MASK 0x0400 /* WSEQ_START */
+#define ARIZONA_WSEQ_START_SHIFT 10 /* WSEQ_START */
+#define ARIZONA_WSEQ_START_WIDTH 1 /* WSEQ_START */
+#define ARIZONA_WSEQ_ENA 0x0200 /* WSEQ_ENA */
+#define ARIZONA_WSEQ_ENA_MASK 0x0200 /* WSEQ_ENA */
+#define ARIZONA_WSEQ_ENA_SHIFT 9 /* WSEQ_ENA */
+#define ARIZONA_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */
+#define ARIZONA_WSEQ_START_INDEX_MASK 0x01FF /* WSEQ_START_INDEX - [8:0] */
+#define ARIZONA_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [8:0] */
+#define ARIZONA_WSEQ_START_INDEX_WIDTH 9 /* WSEQ_START_INDEX - [8:0] */
+
+/*
+ * R23 (0x17) - Write Sequencer Ctrl 1
+ */
+#define ARIZONA_WSEQ_BUSY 0x0200 /* WSEQ_BUSY */
+#define ARIZONA_WSEQ_BUSY_MASK 0x0200 /* WSEQ_BUSY */
+#define ARIZONA_WSEQ_BUSY_SHIFT 9 /* WSEQ_BUSY */
+#define ARIZONA_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */
+#define ARIZONA_WSEQ_CURRENT_INDEX_MASK 0x01FF /* WSEQ_CURRENT_INDEX - [8:0] */
+#define ARIZONA_WSEQ_CURRENT_INDEX_SHIFT 0 /* WSEQ_CURRENT_INDEX - [8:0] */
+#define ARIZONA_WSEQ_CURRENT_INDEX_WIDTH 9 /* WSEQ_CURRENT_INDEX - [8:0] */
+
+/*
+ * R24 (0x18) - Write Sequencer Ctrl 2
+ */
+#define ARIZONA_LOAD_DEFAULTS 0x0002 /* LOAD_DEFAULTS */
+#define ARIZONA_LOAD_DEFAULTS_MASK 0x0002 /* LOAD_DEFAULTS */
+#define ARIZONA_LOAD_DEFAULTS_SHIFT 1 /* LOAD_DEFAULTS */
+#define ARIZONA_LOAD_DEFAULTS_WIDTH 1 /* LOAD_DEFAULTS */
+#define ARIZONA_WSEQ_LOAD_MEM 0x0001 /* WSEQ_LOAD_MEM */
+#define ARIZONA_WSEQ_LOAD_MEM_MASK 0x0001 /* WSEQ_LOAD_MEM */
+#define ARIZONA_WSEQ_LOAD_MEM_SHIFT 0 /* WSEQ_LOAD_MEM */
+#define ARIZONA_WSEQ_LOAD_MEM_WIDTH 1 /* WSEQ_LOAD_MEM */
+
+/*
+ * R26 (0x1A) - Write Sequencer PROM
+ */
+#define ARIZONA_WSEQ_OTP_WRITE 0x0001 /* WSEQ_OTP_WRITE */
+#define ARIZONA_WSEQ_OTP_WRITE_MASK 0x0001 /* WSEQ_OTP_WRITE */
+#define ARIZONA_WSEQ_OTP_WRITE_SHIFT 0 /* WSEQ_OTP_WRITE */
+#define ARIZONA_WSEQ_OTP_WRITE_WIDTH 1 /* WSEQ_OTP_WRITE */
+
+/*
+ * R32 (0x20) - Tone Generator 1
+ */
+#define ARIZONA_TONE_RATE_MASK 0x7800 /* TONE_RATE - [14:11] */
+#define ARIZONA_TONE_RATE_SHIFT 11 /* TONE_RATE - [14:11] */
+#define ARIZONA_TONE_RATE_WIDTH 4 /* TONE_RATE - [14:11] */
+#define ARIZONA_TONE_OFFSET_MASK 0x0300 /* TONE_OFFSET - [9:8] */
+#define ARIZONA_TONE_OFFSET_SHIFT 8 /* TONE_OFFSET - [9:8] */
+#define ARIZONA_TONE_OFFSET_WIDTH 2 /* TONE_OFFSET - [9:8] */
+#define ARIZONA_TONE2_OVD 0x0020 /* TONE2_OVD */
+#define ARIZONA_TONE2_OVD_MASK 0x0020 /* TONE2_OVD */
+#define ARIZONA_TONE2_OVD_SHIFT 5 /* TONE2_OVD */
+#define ARIZONA_TONE2_OVD_WIDTH 1 /* TONE2_OVD */
+#define ARIZONA_TONE1_OVD 0x0010 /* TONE1_OVD */
+#define ARIZONA_TONE1_OVD_MASK 0x0010 /* TONE1_OVD */
+#define ARIZONA_TONE1_OVD_SHIFT 4 /* TONE1_OVD */
+#define ARIZONA_TONE1_OVD_WIDTH 1 /* TONE1_OVD */
+#define ARIZONA_TONE2_ENA 0x0002 /* TONE2_ENA */
+#define ARIZONA_TONE2_ENA_MASK 0x0002 /* TONE2_ENA */
+#define ARIZONA_TONE2_ENA_SHIFT 1 /* TONE2_ENA */
+#define ARIZONA_TONE2_ENA_WIDTH 1 /* TONE2_ENA */
+#define ARIZONA_TONE1_ENA 0x0001 /* TONE1_ENA */
+#define ARIZONA_TONE1_ENA_MASK 0x0001 /* TONE1_ENA */
+#define ARIZONA_TONE1_ENA_SHIFT 0 /* TONE1_ENA */
+#define ARIZONA_TONE1_ENA_WIDTH 1 /* TONE1_ENA */
+
+/*
+ * R33 (0x21) - Tone Generator 2
+ */
+#define ARIZONA_TONE1_LVL_0_MASK 0xFFFF /* TONE1_LVL - [15:0] */
+#define ARIZONA_TONE1_LVL_0_SHIFT 0 /* TONE1_LVL - [15:0] */
+#define ARIZONA_TONE1_LVL_0_WIDTH 16 /* TONE1_LVL - [15:0] */
+
+/*
+ * R34 (0x22) - Tone Generator 3
+ */
+#define ARIZONA_TONE1_LVL_MASK 0x00FF /* TONE1_LVL - [7:0] */
+#define ARIZONA_TONE1_LVL_SHIFT 0 /* TONE1_LVL - [7:0] */
+#define ARIZONA_TONE1_LVL_WIDTH 8 /* TONE1_LVL - [7:0] */
+
+/*
+ * R35 (0x23) - Tone Generator 4
+ */
+#define ARIZONA_TONE2_LVL_0_MASK 0xFFFF /* TONE2_LVL - [15:0] */
+#define ARIZONA_TONE2_LVL_0_SHIFT 0 /* TONE2_LVL - [15:0] */
+#define ARIZONA_TONE2_LVL_0_WIDTH 16 /* TONE2_LVL - [15:0] */
+
+/*
+ * R36 (0x24) - Tone Generator 5
+ */
+#define ARIZONA_TONE2_LVL_MASK 0x00FF /* TONE2_LVL - [7:0] */
+#define ARIZONA_TONE2_LVL_SHIFT 0 /* TONE2_LVL - [7:0] */
+#define ARIZONA_TONE2_LVL_WIDTH 8 /* TONE2_LVL - [7:0] */
+
+/*
+ * R48 (0x30) - PWM Drive 1
+ */
+#define ARIZONA_PWM_RATE_MASK 0x7800 /* PWM_RATE - [14:11] */
+#define ARIZONA_PWM_RATE_SHIFT 11 /* PWM_RATE - [14:11] */
+#define ARIZONA_PWM_RATE_WIDTH 4 /* PWM_RATE - [14:11] */
+#define ARIZONA_PWM_CLK_SEL_MASK 0x0700 /* PWM_CLK_SEL - [10:8] */
+#define ARIZONA_PWM_CLK_SEL_SHIFT 8 /* PWM_CLK_SEL - [10:8] */
+#define ARIZONA_PWM_CLK_SEL_WIDTH 3 /* PWM_CLK_SEL - [10:8] */
+#define ARIZONA_PWM2_OVD 0x0020 /* PWM2_OVD */
+#define ARIZONA_PWM2_OVD_MASK 0x0020 /* PWM2_OVD */
+#define ARIZONA_PWM2_OVD_SHIFT 5 /* PWM2_OVD */
+#define ARIZONA_PWM2_OVD_WIDTH 1 /* PWM2_OVD */
+#define ARIZONA_PWM1_OVD 0x0010 /* PWM1_OVD */
+#define ARIZONA_PWM1_OVD_MASK 0x0010 /* PWM1_OVD */
+#define ARIZONA_PWM1_OVD_SHIFT 4 /* PWM1_OVD */
+#define ARIZONA_PWM1_OVD_WIDTH 1 /* PWM1_OVD */
+#define ARIZONA_PWM2_ENA 0x0002 /* PWM2_ENA */
+#define ARIZONA_PWM2_ENA_MASK 0x0002 /* PWM2_ENA */
+#define ARIZONA_PWM2_ENA_SHIFT 1 /* PWM2_ENA */
+#define ARIZONA_PWM2_ENA_WIDTH 1 /* PWM2_ENA */
+#define ARIZONA_PWM1_ENA 0x0001 /* PWM1_ENA */
+#define ARIZONA_PWM1_ENA_MASK 0x0001 /* PWM1_ENA */
+#define ARIZONA_PWM1_ENA_SHIFT 0 /* PWM1_ENA */
+#define ARIZONA_PWM1_ENA_WIDTH 1 /* PWM1_ENA */
+
+/*
+ * R49 (0x31) - PWM Drive 2
+ */
+#define ARIZONA_PWM1_LVL_MASK 0x03FF /* PWM1_LVL - [9:0] */
+#define ARIZONA_PWM1_LVL_SHIFT 0 /* PWM1_LVL - [9:0] */
+#define ARIZONA_PWM1_LVL_WIDTH 10 /* PWM1_LVL - [9:0] */
+
+/*
+ * R50 (0x32) - PWM Drive 3
+ */
+#define ARIZONA_PWM2_LVL_MASK 0x03FF /* PWM2_LVL - [9:0] */
+#define ARIZONA_PWM2_LVL_SHIFT 0 /* PWM2_LVL - [9:0] */
+#define ARIZONA_PWM2_LVL_WIDTH 10 /* PWM2_LVL - [9:0] */
+
+/*
+ * R64 (0x40) - Wake control
+ */
+#define ARIZONA_WKUP_GP5_FALL 0x0020 /* WKUP_GP5_FALL */
+#define ARIZONA_WKUP_GP5_FALL_MASK 0x0020 /* WKUP_GP5_FALL */
+#define ARIZONA_WKUP_GP5_FALL_SHIFT 5 /* WKUP_GP5_FALL */
+#define ARIZONA_WKUP_GP5_FALL_WIDTH 1 /* WKUP_GP5_FALL */
+#define ARIZONA_WKUP_GP5_RISE 0x0010 /* WKUP_GP5_RISE */
+#define ARIZONA_WKUP_GP5_RISE_MASK 0x0010 /* WKUP_GP5_RISE */
+#define ARIZONA_WKUP_GP5_RISE_SHIFT 4 /* WKUP_GP5_RISE */
+#define ARIZONA_WKUP_GP5_RISE_WIDTH 1 /* WKUP_GP5_RISE */
+#define ARIZONA_WKUP_JD1_FALL 0x0008 /* WKUP_JD1_FALL */
+#define ARIZONA_WKUP_JD1_FALL_MASK 0x0008 /* WKUP_JD1_FALL */
+#define ARIZONA_WKUP_JD1_FALL_SHIFT 3 /* WKUP_JD1_FALL */
+#define ARIZONA_WKUP_JD1_FALL_WIDTH 1 /* WKUP_JD1_FALL */
+#define ARIZONA_WKUP_JD1_RISE 0x0004 /* WKUP_JD1_RISE */
+#define ARIZONA_WKUP_JD1_RISE_MASK 0x0004 /* WKUP_JD1_RISE */
+#define ARIZONA_WKUP_JD1_RISE_SHIFT 2 /* WKUP_JD1_RISE */
+#define ARIZONA_WKUP_JD1_RISE_WIDTH 1 /* WKUP_JD1_RISE */
+#define ARIZONA_WKUP_JD2_FALL 0x0002 /* WKUP_JD2_FALL */
+#define ARIZONA_WKUP_JD2_FALL_MASK 0x0002 /* WKUP_JD2_FALL */
+#define ARIZONA_WKUP_JD2_FALL_SHIFT 1 /* WKUP_JD2_FALL */
+#define ARIZONA_WKUP_JD2_FALL_WIDTH 1 /* WKUP_JD2_FALL */
+#define ARIZONA_WKUP_JD2_RISE 0x0001 /* WKUP_JD2_RISE */
+#define ARIZONA_WKUP_JD2_RISE_MASK 0x0001 /* WKUP_JD2_RISE */
+#define ARIZONA_WKUP_JD2_RISE_SHIFT 0 /* WKUP_JD2_RISE */
+#define ARIZONA_WKUP_JD2_RISE_WIDTH 1 /* WKUP_JD2_RISE */
+
+/*
+ * R65 (0x41) - Sequence control
+ */
+#define ARIZONA_WSEQ_ENA_GP5_FALL 0x0020 /* WSEQ_ENA_GP5_FALL */
+#define ARIZONA_WSEQ_ENA_GP5_FALL_MASK 0x0020 /* WSEQ_ENA_GP5_FALL */
+#define ARIZONA_WSEQ_ENA_GP5_FALL_SHIFT 5 /* WSEQ_ENA_GP5_FALL */
+#define ARIZONA_WSEQ_ENA_GP5_FALL_WIDTH 1 /* WSEQ_ENA_GP5_FALL */
+#define ARIZONA_WSEQ_ENA_GP5_RISE 0x0010 /* WSEQ_ENA_GP5_RISE */
+#define ARIZONA_WSEQ_ENA_GP5_RISE_MASK 0x0010 /* WSEQ_ENA_GP5_RISE */
+#define ARIZONA_WSEQ_ENA_GP5_RISE_SHIFT 4 /* WSEQ_ENA_GP5_RISE */
+#define ARIZONA_WSEQ_ENA_GP5_RISE_WIDTH 1 /* WSEQ_ENA_GP5_RISE */
+#define ARIZONA_WSEQ_ENA_JD1_FALL 0x0008 /* WSEQ_ENA_JD1_FALL */
+#define ARIZONA_WSEQ_ENA_JD1_FALL_MASK 0x0008 /* WSEQ_ENA_JD1_FALL */
+#define ARIZONA_WSEQ_ENA_JD1_FALL_SHIFT 3 /* WSEQ_ENA_JD1_FALL */
+#define ARIZONA_WSEQ_ENA_JD1_FALL_WIDTH 1 /* WSEQ_ENA_JD1_FALL */
+#define ARIZONA_WSEQ_ENA_JD1_RISE 0x0004 /* WSEQ_ENA_JD1_RISE */
+#define ARIZONA_WSEQ_ENA_JD1_RISE_MASK 0x0004 /* WSEQ_ENA_JD1_RISE */
+#define ARIZONA_WSEQ_ENA_JD1_RISE_SHIFT 2 /* WSEQ_ENA_JD1_RISE */
+#define ARIZONA_WSEQ_ENA_JD1_RISE_WIDTH 1 /* WSEQ_ENA_JD1_RISE */
+#define ARIZONA_WSEQ_ENA_JD2_FALL 0x0002 /* WSEQ_ENA_JD2_FALL */
+#define ARIZONA_WSEQ_ENA_JD2_FALL_MASK 0x0002 /* WSEQ_ENA_JD2_FALL */
+#define ARIZONA_WSEQ_ENA_JD2_FALL_SHIFT 1 /* WSEQ_ENA_JD2_FALL */
+#define ARIZONA_WSEQ_ENA_JD2_FALL_WIDTH 1 /* WSEQ_ENA_JD2_FALL */
+#define ARIZONA_WSEQ_ENA_JD2_RISE 0x0001 /* WSEQ_ENA_JD2_RISE */
+#define ARIZONA_WSEQ_ENA_JD2_RISE_MASK 0x0001 /* WSEQ_ENA_JD2_RISE */
+#define ARIZONA_WSEQ_ENA_JD2_RISE_SHIFT 0 /* WSEQ_ENA_JD2_RISE */
+#define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH 1 /* WSEQ_ENA_JD2_RISE */
+
+/*
+ * R97 (0x61) - Sample Rate Sequence Select 1
+ */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_SHIFT 0 /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_WIDTH 9 /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
+
+/*
+ * R98 (0x62) - Sample Rate Sequence Select 2
+ */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR_SHIFT 0 /* WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR_WIDTH 9 /* WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR - [8:0] */
+
+/*
+ * R99 (0x63) - Sample Rate Sequence Select 3
+ */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR_SHIFT 0 /* WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR_WIDTH 9 /* WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR - [8:0] */
+
+/*
+ * R100 (0x64) - Sample Rate Sequence Select 4
+ */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR_SHIFT 0 /* WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR_WIDTH 9 /* WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR - [8:0] */
+
+/*
+ * R104 (0x68) - Always On Triggers Sequence Select 1
+ */
+#define ARIZONA_WSEQ_GP5_RISE_SEQ_ADDR_MASK 0x01FF /* WSEQ_GP5_RISE_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_GP5_RISE_SEQ_ADDR_SHIFT 0 /* WSEQ_GP5_RISE_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_GP5_RISE_SEQ_ADDR_WIDTH 9 /* WSEQ_GP5_RISE_SEQ_ADDR - [8:0] */
+
+/*
+ * R105 (0x69) - Always On Triggers Sequence Select 2
+ */
+#define ARIZONA_WSEQ_GP5_FALL_SEQ_ADDR_MASK 0x01FF /* WSEQ_GP5_FALL_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_GP5_FALL_SEQ_ADDR_SHIFT 0 /* WSEQ_GP5_FALL_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_GP5_FALL_SEQ_ADDR_WIDTH 9 /* WSEQ_GP5_FALL_SEQ_ADDR - [8:0] */
+
+/*
+ * R106 (0x6A) - Always On Triggers Sequence Select 3
+ */
+#define ARIZONA_WSEQ_JD1_RISE_SEQ_ADDR_MASK 0x01FF /* WSEQ_JD1_RISE_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD1_RISE_SEQ_ADDR_SHIFT 0 /* WSEQ_JD1_RISE_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD1_RISE_SEQ_ADDR_WIDTH 9 /* WSEQ_JD1_RISE_SEQ_ADDR - [8:0] */
+
+/*
+ * R107 (0x6B) - Always On Triggers Sequence Select 4
+ */
+#define ARIZONA_WSEQ_JD1_FALL_SEQ_ADDR_MASK 0x01FF /* WSEQ_JD1_FALL_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD1_FALL_SEQ_ADDR_SHIFT 0 /* WSEQ_JD1_FALL_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD1_FALL_SEQ_ADDR_WIDTH 9 /* WSEQ_JD1_FALL_SEQ_ADDR - [8:0] */
+
+/*
+ * R108 (0x6C) - Always On Triggers Sequence Select 5
+ */
+#define ARIZONA_WSEQ_JD2_RISE_SEQ_ADDR_MASK 0x01FF /* WSEQ_JD2_RISE_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD2_RISE_SEQ_ADDR_SHIFT 0 /* WSEQ_JD2_RISE_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD2_RISE_SEQ_ADDR_WIDTH 9 /* WSEQ_JD2_RISE_SEQ_ADDR - [8:0] */
+
+/*
+ * R109 (0x6D) - Always On Triggers Sequence Select 6
+ */
+#define ARIZONA_WSEQ_JD2_FALL_SEQ_ADDR_MASK 0x01FF /* WSEQ_JD2_FALL_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD2_FALL_SEQ_ADDR_SHIFT 0 /* WSEQ_JD2_FALL_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD2_FALL_SEQ_ADDR_WIDTH 9 /* WSEQ_JD2_FALL_SEQ_ADDR - [8:0] */
+
+/*
+ * R112 (0x70) - Comfort Noise Generator
+ */
+#define ARIZONA_NOISE_GEN_RATE_MASK 0x7800 /* NOISE_GEN_RATE - [14:11] */
+#define ARIZONA_NOISE_GEN_RATE_SHIFT 11 /* NOISE_GEN_RATE - [14:11] */
+#define ARIZONA_NOISE_GEN_RATE_WIDTH 4 /* NOISE_GEN_RATE - [14:11] */
+#define ARIZONA_NOISE_GEN_ENA 0x0020 /* NOISE_GEN_ENA */
+#define ARIZONA_NOISE_GEN_ENA_MASK 0x0020 /* NOISE_GEN_ENA */
+#define ARIZONA_NOISE_GEN_ENA_SHIFT 5 /* NOISE_GEN_ENA */
+#define ARIZONA_NOISE_GEN_ENA_WIDTH 1 /* NOISE_GEN_ENA */
+#define ARIZONA_NOISE_GEN_GAIN_MASK 0x001F /* NOISE_GEN_GAIN - [4:0] */
+#define ARIZONA_NOISE_GEN_GAIN_SHIFT 0 /* NOISE_GEN_GAIN - [4:0] */
+#define ARIZONA_NOISE_GEN_GAIN_WIDTH 5 /* NOISE_GEN_GAIN - [4:0] */
+
+/*
+ * R144 (0x90) - Haptics Control 1
+ */
+#define ARIZONA_HAP_RATE_MASK 0x7800 /* HAP_RATE - [14:11] */
+#define ARIZONA_HAP_RATE_SHIFT 11 /* HAP_RATE - [14:11] */
+#define ARIZONA_HAP_RATE_WIDTH 4 /* HAP_RATE - [14:11] */
+#define ARIZONA_ONESHOT_TRIG 0x0010 /* ONESHOT_TRIG */
+#define ARIZONA_ONESHOT_TRIG_MASK 0x0010 /* ONESHOT_TRIG */
+#define ARIZONA_ONESHOT_TRIG_SHIFT 4 /* ONESHOT_TRIG */
+#define ARIZONA_ONESHOT_TRIG_WIDTH 1 /* ONESHOT_TRIG */
+#define ARIZONA_HAP_CTRL_MASK 0x000C /* HAP_CTRL - [3:2] */
+#define ARIZONA_HAP_CTRL_SHIFT 2 /* HAP_CTRL - [3:2] */
+#define ARIZONA_HAP_CTRL_WIDTH 2 /* HAP_CTRL - [3:2] */
+#define ARIZONA_HAP_ACT 0x0002 /* HAP_ACT */
+#define ARIZONA_HAP_ACT_MASK 0x0002 /* HAP_ACT */
+#define ARIZONA_HAP_ACT_SHIFT 1 /* HAP_ACT */
+#define ARIZONA_HAP_ACT_WIDTH 1 /* HAP_ACT */
+
+/*
+ * R145 (0x91) - Haptics Control 2
+ */
+#define ARIZONA_LRA_FREQ_MASK 0x7FFF /* LRA_FREQ - [14:0] */
+#define ARIZONA_LRA_FREQ_SHIFT 0 /* LRA_FREQ - [14:0] */
+#define ARIZONA_LRA_FREQ_WIDTH 15 /* LRA_FREQ - [14:0] */
+
+/*
+ * R146 (0x92) - Haptics phase 1 intensity
+ */
+#define ARIZONA_PHASE1_INTENSITY_MASK 0x00FF /* PHASE1_INTENSITY - [7:0] */
+#define ARIZONA_PHASE1_INTENSITY_SHIFT 0 /* PHASE1_INTENSITY - [7:0] */
+#define ARIZONA_PHASE1_INTENSITY_WIDTH 8 /* PHASE1_INTENSITY - [7:0] */
+
+/*
+ * R147 (0x93) - Haptics phase 1 duration
+ */
+#define ARIZONA_PHASE1_DURATION_MASK 0x01FF /* PHASE1_DURATION - [8:0] */
+#define ARIZONA_PHASE1_DURATION_SHIFT 0 /* PHASE1_DURATION - [8:0] */
+#define ARIZONA_PHASE1_DURATION_WIDTH 9 /* PHASE1_DURATION - [8:0] */
+
+/*
+ * R148 (0x94) - Haptics phase 2 intensity
+ */
+#define ARIZONA_PHASE2_INTENSITY_MASK 0x00FF /* PHASE2_INTENSITY - [7:0] */
+#define ARIZONA_PHASE2_INTENSITY_SHIFT 0 /* PHASE2_INTENSITY - [7:0] */
+#define ARIZONA_PHASE2_INTENSITY_WIDTH 8 /* PHASE2_INTENSITY - [7:0] */
+
+/*
+ * R149 (0x95) - Haptics phase 2 duration
+ */
+#define ARIZONA_PHASE2_DURATION_MASK 0x07FF /* PHASE2_DURATION - [10:0] */
+#define ARIZONA_PHASE2_DURATION_SHIFT 0 /* PHASE2_DURATION - [10:0] */
+#define ARIZONA_PHASE2_DURATION_WIDTH 11 /* PHASE2_DURATION - [10:0] */
+
+/*
+ * R150 (0x96) - Haptics phase 3 intensity
+ */
+#define ARIZONA_PHASE3_INTENSITY_MASK 0x00FF /* PHASE3_INTENSITY - [7:0] */
+#define ARIZONA_PHASE3_INTENSITY_SHIFT 0 /* PHASE3_INTENSITY - [7:0] */
+#define ARIZONA_PHASE3_INTENSITY_WIDTH 8 /* PHASE3_INTENSITY - [7:0] */
+
+/*
+ * R151 (0x97) - Haptics phase 3 duration
+ */
+#define ARIZONA_PHASE3_DURATION_MASK 0x01FF /* PHASE3_DURATION - [8:0] */
+#define ARIZONA_PHASE3_DURATION_SHIFT 0 /* PHASE3_DURATION - [8:0] */
+#define ARIZONA_PHASE3_DURATION_WIDTH 9 /* PHASE3_DURATION - [8:0] */
+
+/*
+ * R152 (0x98) - Haptics Status
+ */
+#define ARIZONA_ONESHOT_STS 0x0001 /* ONESHOT_STS */
+#define ARIZONA_ONESHOT_STS_MASK 0x0001 /* ONESHOT_STS */
+#define ARIZONA_ONESHOT_STS_SHIFT 0 /* ONESHOT_STS */
+#define ARIZONA_ONESHOT_STS_WIDTH 1 /* ONESHOT_STS */
+
+/*
+ * R256 (0x100) - Clock 32k 1
+ */
+#define ARIZONA_CLK_32K_ENA 0x0040 /* CLK_32K_ENA */
+#define ARIZONA_CLK_32K_ENA_MASK 0x0040 /* CLK_32K_ENA */
+#define ARIZONA_CLK_32K_ENA_SHIFT 6 /* CLK_32K_ENA */
+#define ARIZONA_CLK_32K_ENA_WIDTH 1 /* CLK_32K_ENA */
+#define ARIZONA_CLK_32K_SRC_MASK 0x0003 /* CLK_32K_SRC - [1:0] */
+#define ARIZONA_CLK_32K_SRC_SHIFT 0 /* CLK_32K_SRC - [1:0] */
+#define ARIZONA_CLK_32K_SRC_WIDTH 2 /* CLK_32K_SRC - [1:0] */
+
+/*
+ * R257 (0x101) - System Clock 1
+ */
+#define ARIZONA_SYSCLK_FRAC 0x8000 /* SYSCLK_FRAC */
+#define ARIZONA_SYSCLK_FRAC_MASK 0x8000 /* SYSCLK_FRAC */
+#define ARIZONA_SYSCLK_FRAC_SHIFT 15 /* SYSCLK_FRAC */
+#define ARIZONA_SYSCLK_FRAC_WIDTH 1 /* SYSCLK_FRAC */
+#define ARIZONA_SYSCLK_FREQ_MASK 0x0700 /* SYSCLK_FREQ - [10:8] */
+#define ARIZONA_SYSCLK_FREQ_SHIFT 8 /* SYSCLK_FREQ - [10:8] */
+#define ARIZONA_SYSCLK_FREQ_WIDTH 3 /* SYSCLK_FREQ - [10:8] */
+#define ARIZONA_SYSCLK_ENA 0x0040 /* SYSCLK_ENA */
+#define ARIZONA_SYSCLK_ENA_MASK 0x0040 /* SYSCLK_ENA */
+#define ARIZONA_SYSCLK_ENA_SHIFT 6 /* SYSCLK_ENA */
+#define ARIZONA_SYSCLK_ENA_WIDTH 1 /* SYSCLK_ENA */
+#define ARIZONA_SYSCLK_SRC_MASK 0x000F /* SYSCLK_SRC - [3:0] */
+#define ARIZONA_SYSCLK_SRC_SHIFT 0 /* SYSCLK_SRC - [3:0] */
+#define ARIZONA_SYSCLK_SRC_WIDTH 4 /* SYSCLK_SRC - [3:0] */
+
+/*
+ * R258 (0x102) - Sample rate 1
+ */
+#define ARIZONA_SAMPLE_RATE_1_MASK 0x001F /* SAMPLE_RATE_1 - [4:0] */
+#define ARIZONA_SAMPLE_RATE_1_SHIFT 0 /* SAMPLE_RATE_1 - [4:0] */
+#define ARIZONA_SAMPLE_RATE_1_WIDTH 5 /* SAMPLE_RATE_1 - [4:0] */
+
+/*
+ * R259 (0x103) - Sample rate 2
+ */
+#define ARIZONA_SAMPLE_RATE_2_MASK 0x001F /* SAMPLE_RATE_2 - [4:0] */
+#define ARIZONA_SAMPLE_RATE_2_SHIFT 0 /* SAMPLE_RATE_2 - [4:0] */
+#define ARIZONA_SAMPLE_RATE_2_WIDTH 5 /* SAMPLE_RATE_2 - [4:0] */
+
+/*
+ * R260 (0x104) - Sample rate 3
+ */
+#define ARIZONA_SAMPLE_RATE_3_MASK 0x001F /* SAMPLE_RATE_3 - [4:0] */
+#define ARIZONA_SAMPLE_RATE_3_SHIFT 0 /* SAMPLE_RATE_3 - [4:0] */
+#define ARIZONA_SAMPLE_RATE_3_WIDTH 5 /* SAMPLE_RATE_3 - [4:0] */
+
+/*
+ * R266 (0x10A) - Sample rate 1 status
+ */
+#define ARIZONA_SAMPLE_RATE_1_STS_MASK 0x001F /* SAMPLE_RATE_1_STS - [4:0] */
+#define ARIZONA_SAMPLE_RATE_1_STS_SHIFT 0 /* SAMPLE_RATE_1_STS - [4:0] */
+#define ARIZONA_SAMPLE_RATE_1_STS_WIDTH 5 /* SAMPLE_RATE_1_STS - [4:0] */
+
+/*
+ * R267 (0x10B) - Sample rate 2 status
+ */
+#define ARIZONA_SAMPLE_RATE_2_STS_MASK 0x001F /* SAMPLE_RATE_2_STS - [4:0] */
+#define ARIZONA_SAMPLE_RATE_2_STS_SHIFT 0 /* SAMPLE_RATE_2_STS - [4:0] */
+#define ARIZONA_SAMPLE_RATE_2_STS_WIDTH 5 /* SAMPLE_RATE_2_STS - [4:0] */
+
+/*
+ * R268 (0x10C) - Sample rate 3 status
+ */
+#define ARIZONA_SAMPLE_RATE_3_STS_MASK 0x001F /* SAMPLE_RATE_3_STS - [4:0] */
+#define ARIZONA_SAMPLE_RATE_3_STS_SHIFT 0 /* SAMPLE_RATE_3_STS - [4:0] */
+#define ARIZONA_SAMPLE_RATE_3_STS_WIDTH 5 /* SAMPLE_RATE_3_STS - [4:0] */
+
+/*
+ * R274 (0x112) - Async clock 1
+ */
+#define ARIZONA_ASYNC_CLK_FREQ_MASK 0x0700 /* ASYNC_CLK_FREQ - [10:8] */
+#define ARIZONA_ASYNC_CLK_FREQ_SHIFT 8 /* ASYNC_CLK_FREQ - [10:8] */
+#define ARIZONA_ASYNC_CLK_FREQ_WIDTH 3 /* ASYNC_CLK_FREQ - [10:8] */
+#define ARIZONA_ASYNC_CLK_ENA 0x0040 /* ASYNC_CLK_ENA */
+#define ARIZONA_ASYNC_CLK_ENA_MASK 0x0040 /* ASYNC_CLK_ENA */
+#define ARIZONA_ASYNC_CLK_ENA_SHIFT 6 /* ASYNC_CLK_ENA */
+#define ARIZONA_ASYNC_CLK_ENA_WIDTH 1 /* ASYNC_CLK_ENA */
+#define ARIZONA_ASYNC_CLK_SRC_MASK 0x000F /* ASYNC_CLK_SRC - [3:0] */
+#define ARIZONA_ASYNC_CLK_SRC_SHIFT 0 /* ASYNC_CLK_SRC - [3:0] */
+#define ARIZONA_ASYNC_CLK_SRC_WIDTH 4 /* ASYNC_CLK_SRC - [3:0] */
+
+/*
+ * R275 (0x113) - Async sample rate 1
+ */
+#define ARIZONA_ASYNC_SAMPLE_RATE_MASK 0x001F /* ASYNC_SAMPLE_RATE - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_SHIFT 0 /* ASYNC_SAMPLE_RATE - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_WIDTH 5 /* ASYNC_SAMPLE_RATE - [4:0] */
+
+/*
+ * R283 (0x11B) - Async sample rate 1 status
+ */
+#define ARIZONA_ASYNC_SAMPLE_RATE_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_STS - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_STS - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_STS - [4:0] */
+
+/*
+ * R329 (0x149) - Output system clock
+ */
+#define ARIZONA_OPCLK_ENA 0x8000 /* OPCLK_ENA */
+#define ARIZONA_OPCLK_ENA_MASK 0x8000 /* OPCLK_ENA */
+#define ARIZONA_OPCLK_ENA_SHIFT 15 /* OPCLK_ENA */
+#define ARIZONA_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */
+#define ARIZONA_OPCLK_DIV_MASK 0x00F8 /* OPCLK_DIV - [7:3] */
+#define ARIZONA_OPCLK_DIV_SHIFT 3 /* OPCLK_DIV - [7:3] */
+#define ARIZONA_OPCLK_DIV_WIDTH 5 /* OPCLK_DIV - [7:3] */
+#define ARIZONA_OPCLK_SEL_MASK 0x0007 /* OPCLK_SEL - [2:0] */
+#define ARIZONA_OPCLK_SEL_SHIFT 0 /* OPCLK_SEL - [2:0] */
+#define ARIZONA_OPCLK_SEL_WIDTH 3 /* OPCLK_SEL - [2:0] */
+
+/*
+ * R330 (0x14A) - Output async clock
+ */
+#define ARIZONA_OPCLK_ASYNC_ENA 0x8000 /* OPCLK_ASYNC_ENA */
+#define ARIZONA_OPCLK_ASYNC_ENA_MASK 0x8000 /* OPCLK_ASYNC_ENA */
+#define ARIZONA_OPCLK_ASYNC_ENA_SHIFT 15 /* OPCLK_ASYNC_ENA */
+#define ARIZONA_OPCLK_ASYNC_ENA_WIDTH 1 /* OPCLK_ASYNC_ENA */
+#define ARIZONA_OPCLK_ASYNC_DIV_MASK 0x00F8 /* OPCLK_ASYNC_DIV - [7:3] */
+#define ARIZONA_OPCLK_ASYNC_DIV_SHIFT 3 /* OPCLK_ASYNC_DIV - [7:3] */
+#define ARIZONA_OPCLK_ASYNC_DIV_WIDTH 5 /* OPCLK_ASYNC_DIV - [7:3] */
+#define ARIZONA_OPCLK_ASYNC_SEL_MASK 0x0007 /* OPCLK_ASYNC_SEL - [2:0] */
+#define ARIZONA_OPCLK_ASYNC_SEL_SHIFT 0 /* OPCLK_ASYNC_SEL - [2:0] */
+#define ARIZONA_OPCLK_ASYNC_SEL_WIDTH 3 /* OPCLK_ASYNC_SEL - [2:0] */
+
+/*
+ * R338 (0x152) - Rate Estimator 1
+ */
+#define ARIZONA_TRIG_ON_STARTUP 0x0010 /* TRIG_ON_STARTUP */
+#define ARIZONA_TRIG_ON_STARTUP_MASK 0x0010 /* TRIG_ON_STARTUP */
+#define ARIZONA_TRIG_ON_STARTUP_SHIFT 4 /* TRIG_ON_STARTUP */
+#define ARIZONA_TRIG_ON_STARTUP_WIDTH 1 /* TRIG_ON_STARTUP */
+#define ARIZONA_LRCLK_SRC_MASK 0x000E /* LRCLK_SRC - [3:1] */
+#define ARIZONA_LRCLK_SRC_SHIFT 1 /* LRCLK_SRC - [3:1] */
+#define ARIZONA_LRCLK_SRC_WIDTH 3 /* LRCLK_SRC - [3:1] */
+#define ARIZONA_RATE_EST_ENA 0x0001 /* RATE_EST_ENA */
+#define ARIZONA_RATE_EST_ENA_MASK 0x0001 /* RATE_EST_ENA */
+#define ARIZONA_RATE_EST_ENA_SHIFT 0 /* RATE_EST_ENA */
+#define ARIZONA_RATE_EST_ENA_WIDTH 1 /* RATE_EST_ENA */
+
+/*
+ * R339 (0x153) - Rate Estimator 2
+ */
+#define ARIZONA_SAMPLE_RATE_DETECT_A_MASK 0x001F /* SAMPLE_RATE_DETECT_A - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_A_SHIFT 0 /* SAMPLE_RATE_DETECT_A - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_A_WIDTH 5 /* SAMPLE_RATE_DETECT_A - [4:0] */
+
+/*
+ * R340 (0x154) - Rate Estimator 3
+ */
+#define ARIZONA_SAMPLE_RATE_DETECT_B_MASK 0x001F /* SAMPLE_RATE_DETECT_B - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_B_SHIFT 0 /* SAMPLE_RATE_DETECT_B - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_B_WIDTH 5 /* SAMPLE_RATE_DETECT_B - [4:0] */
+
+/*
+ * R341 (0x155) - Rate Estimator 4
+ */
+#define ARIZONA_SAMPLE_RATE_DETECT_C_MASK 0x001F /* SAMPLE_RATE_DETECT_C - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_C_SHIFT 0 /* SAMPLE_RATE_DETECT_C - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_C_WIDTH 5 /* SAMPLE_RATE_DETECT_C - [4:0] */
+
+/*
+ * R342 (0x156) - Rate Estimator 5
+ */
+#define ARIZONA_SAMPLE_RATE_DETECT_D_MASK 0x001F /* SAMPLE_RATE_DETECT_D - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_D_SHIFT 0 /* SAMPLE_RATE_DETECT_D - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_D_WIDTH 5 /* SAMPLE_RATE_DETECT_D - [4:0] */
+
+/*
+ * R369 (0x171) - FLL1 Control 1
+ */
+#define ARIZONA_FLL1_FREERUN 0x0002 /* FLL1_FREERUN */
+#define ARIZONA_FLL1_FREERUN_MASK 0x0002 /* FLL1_FREERUN */
+#define ARIZONA_FLL1_FREERUN_SHIFT 1 /* FLL1_FREERUN */
+#define ARIZONA_FLL1_FREERUN_WIDTH 1 /* FLL1_FREERUN */
+#define ARIZONA_FLL1_ENA 0x0001 /* FLL1_ENA */
+#define ARIZONA_FLL1_ENA_MASK 0x0001 /* FLL1_ENA */
+#define ARIZONA_FLL1_ENA_SHIFT 0 /* FLL1_ENA */
+#define ARIZONA_FLL1_ENA_WIDTH 1 /* FLL1_ENA */
+
+/*
+ * R370 (0x172) - FLL1 Control 2
+ */
+#define ARIZONA_FLL1_CTRL_UPD 0x8000 /* FLL1_CTRL_UPD */
+#define ARIZONA_FLL1_CTRL_UPD_MASK 0x8000 /* FLL1_CTRL_UPD */
+#define ARIZONA_FLL1_CTRL_UPD_SHIFT 15 /* FLL1_CTRL_UPD */
+#define ARIZONA_FLL1_CTRL_UPD_WIDTH 1 /* FLL1_CTRL_UPD */
+#define ARIZONA_FLL1_N_MASK 0x03FF /* FLL1_N - [9:0] */
+#define ARIZONA_FLL1_N_SHIFT 0 /* FLL1_N - [9:0] */
+#define ARIZONA_FLL1_N_WIDTH 10 /* FLL1_N - [9:0] */
+
+/*
+ * R371 (0x173) - FLL1 Control 3
+ */
+#define ARIZONA_FLL1_THETA_MASK 0xFFFF /* FLL1_THETA - [15:0] */
+#define ARIZONA_FLL1_THETA_SHIFT 0 /* FLL1_THETA - [15:0] */
+#define ARIZONA_FLL1_THETA_WIDTH 16 /* FLL1_THETA - [15:0] */
+
+/*
+ * R372 (0x174) - FLL1 Control 4
+ */
+#define ARIZONA_FLL1_LAMBDA_MASK 0xFFFF /* FLL1_LAMBDA - [15:0] */
+#define ARIZONA_FLL1_LAMBDA_SHIFT 0 /* FLL1_LAMBDA - [15:0] */
+#define ARIZONA_FLL1_LAMBDA_WIDTH 16 /* FLL1_LAMBDA - [15:0] */
+
+/*
+ * R373 (0x175) - FLL1 Control 5
+ */
+#define ARIZONA_FLL1_FRATIO_MASK 0x0700 /* FLL1_FRATIO - [10:8] */
+#define ARIZONA_FLL1_FRATIO_SHIFT 8 /* FLL1_FRATIO - [10:8] */
+#define ARIZONA_FLL1_FRATIO_WIDTH 3 /* FLL1_FRATIO - [10:8] */
+#define ARIZONA_FLL1_OUTDIV_MASK 0x000E /* FLL1_OUTDIV - [3:1] */
+#define ARIZONA_FLL1_OUTDIV_SHIFT 1 /* FLL1_OUTDIV - [3:1] */
+#define ARIZONA_FLL1_OUTDIV_WIDTH 3 /* FLL1_OUTDIV - [3:1] */
+
+/*
+ * R374 (0x176) - FLL1 Control 6
+ */
+#define ARIZONA_FLL1_CLK_REF_DIV_MASK 0x00C0 /* FLL1_CLK_REF_DIV - [7:6] */
+#define ARIZONA_FLL1_CLK_REF_DIV_SHIFT 6 /* FLL1_CLK_REF_DIV - [7:6] */
+#define ARIZONA_FLL1_CLK_REF_DIV_WIDTH 2 /* FLL1_CLK_REF_DIV - [7:6] */
+#define ARIZONA_FLL1_CLK_REF_SRC_MASK 0x000F /* FLL1_CLK_REF_SRC - [3:0] */
+#define ARIZONA_FLL1_CLK_REF_SRC_SHIFT 0 /* FLL1_CLK_REF_SRC - [3:0] */
+#define ARIZONA_FLL1_CLK_REF_SRC_WIDTH 4 /* FLL1_CLK_REF_SRC - [3:0] */
+
+/*
+ * R375 (0x177) - FLL1 Loop Filter Test 1
+ */
+#define ARIZONA_FLL1_FRC_INTEG_UPD 0x8000 /* FLL1_FRC_INTEG_UPD */
+#define ARIZONA_FLL1_FRC_INTEG_UPD_MASK 0x8000 /* FLL1_FRC_INTEG_UPD */
+#define ARIZONA_FLL1_FRC_INTEG_UPD_SHIFT 15 /* FLL1_FRC_INTEG_UPD */
+#define ARIZONA_FLL1_FRC_INTEG_UPD_WIDTH 1 /* FLL1_FRC_INTEG_UPD */
+#define ARIZONA_FLL1_FRC_INTEG_VAL_MASK 0x0FFF /* FLL1_FRC_INTEG_VAL - [11:0] */
+#define ARIZONA_FLL1_FRC_INTEG_VAL_SHIFT 0 /* FLL1_FRC_INTEG_VAL - [11:0] */
+#define ARIZONA_FLL1_FRC_INTEG_VAL_WIDTH 12 /* FLL1_FRC_INTEG_VAL - [11:0] */
+
+/*
+ * R385 (0x181) - FLL1 Synchroniser 1
+ */
+#define ARIZONA_FLL1_SYNC_ENA 0x0001 /* FLL1_SYNC_ENA */
+#define ARIZONA_FLL1_SYNC_ENA_MASK 0x0001 /* FLL1_SYNC_ENA */
+#define ARIZONA_FLL1_SYNC_ENA_SHIFT 0 /* FLL1_SYNC_ENA */
+#define ARIZONA_FLL1_SYNC_ENA_WIDTH 1 /* FLL1_SYNC_ENA */
+
+/*
+ * R386 (0x182) - FLL1 Synchroniser 2
+ */
+#define ARIZONA_FLL1_SYNC_N_MASK 0x03FF /* FLL1_SYNC_N - [9:0] */
+#define ARIZONA_FLL1_SYNC_N_SHIFT 0 /* FLL1_SYNC_N - [9:0] */
+#define ARIZONA_FLL1_SYNC_N_WIDTH 10 /* FLL1_SYNC_N - [9:0] */
+
+/*
+ * R387 (0x183) - FLL1 Synchroniser 3
+ */
+#define ARIZONA_FLL1_SYNC_THETA_MASK 0xFFFF /* FLL1_SYNC_THETA - [15:0] */
+#define ARIZONA_FLL1_SYNC_THETA_SHIFT 0 /* FLL1_SYNC_THETA - [15:0] */
+#define ARIZONA_FLL1_SYNC_THETA_WIDTH 16 /* FLL1_SYNC_THETA - [15:0] */
+
+/*
+ * R388 (0x184) - FLL1 Synchroniser 4
+ */
+#define ARIZONA_FLL1_SYNC_LAMBDA_MASK 0xFFFF /* FLL1_SYNC_LAMBDA - [15:0] */
+#define ARIZONA_FLL1_SYNC_LAMBDA_SHIFT 0 /* FLL1_SYNC_LAMBDA - [15:0] */
+#define ARIZONA_FLL1_SYNC_LAMBDA_WIDTH 16 /* FLL1_SYNC_LAMBDA - [15:0] */
+
+/*
+ * R389 (0x185) - FLL1 Synchroniser 5
+ */
+#define ARIZONA_FLL1_SYNC_FRATIO_MASK 0x0700 /* FLL1_SYNC_FRATIO - [10:8] */
+#define ARIZONA_FLL1_SYNC_FRATIO_SHIFT 8 /* FLL1_SYNC_FRATIO - [10:8] */
+#define ARIZONA_FLL1_SYNC_FRATIO_WIDTH 3 /* FLL1_SYNC_FRATIO - [10:8] */
+
+/*
+ * R390 (0x186) - FLL1 Synchroniser 6
+ */
+#define ARIZONA_FLL1_CLK_SYNC_DIV_MASK 0x00C0 /* FLL1_CLK_SYNC_DIV - [7:6] */
+#define ARIZONA_FLL1_CLK_SYNC_DIV_SHIFT 6 /* FLL1_CLK_SYNC_DIV - [7:6] */
+#define ARIZONA_FLL1_CLK_SYNC_DIV_WIDTH 2 /* FLL1_CLK_SYNC_DIV - [7:6] */
+#define ARIZONA_FLL1_CLK_SYNC_SRC_MASK 0x000F /* FLL1_CLK_SYNC_SRC - [3:0] */
+#define ARIZONA_FLL1_CLK_SYNC_SRC_SHIFT 0 /* FLL1_CLK_SYNC_SRC - [3:0] */
+#define ARIZONA_FLL1_CLK_SYNC_SRC_WIDTH 4 /* FLL1_CLK_SYNC_SRC - [3:0] */
+
+/*
+ * R393 (0x189) - FLL1 Spread Spectrum
+ */
+#define ARIZONA_FLL1_SS_AMPL_MASK 0x0030 /* FLL1_SS_AMPL - [5:4] */
+#define ARIZONA_FLL1_SS_AMPL_SHIFT 4 /* FLL1_SS_AMPL - [5:4] */
+#define ARIZONA_FLL1_SS_AMPL_WIDTH 2 /* FLL1_SS_AMPL - [5:4] */
+#define ARIZONA_FLL1_SS_FREQ_MASK 0x000C /* FLL1_SS_FREQ - [3:2] */
+#define ARIZONA_FLL1_SS_FREQ_SHIFT 2 /* FLL1_SS_FREQ - [3:2] */
+#define ARIZONA_FLL1_SS_FREQ_WIDTH 2 /* FLL1_SS_FREQ - [3:2] */
+#define ARIZONA_FLL1_SS_SEL_MASK 0x0003 /* FLL1_SS_SEL - [1:0] */
+#define ARIZONA_FLL1_SS_SEL_SHIFT 0 /* FLL1_SS_SEL - [1:0] */
+#define ARIZONA_FLL1_SS_SEL_WIDTH 2 /* FLL1_SS_SEL - [1:0] */
+
+/*
+ * R394 (0x18A) - FLL1 GPIO Clock
+ */
+#define ARIZONA_FLL1_GPDIV_MASK 0x00FE /* FLL1_GPDIV - [7:1] */
+#define ARIZONA_FLL1_GPDIV_SHIFT 1 /* FLL1_GPDIV - [7:1] */
+#define ARIZONA_FLL1_GPDIV_WIDTH 7 /* FLL1_GPDIV - [7:1] */
+#define ARIZONA_FLL1_GPDIV_ENA 0x0001 /* FLL1_GPDIV_ENA */
+#define ARIZONA_FLL1_GPDIV_ENA_MASK 0x0001 /* FLL1_GPDIV_ENA */
+#define ARIZONA_FLL1_GPDIV_ENA_SHIFT 0 /* FLL1_GPDIV_ENA */
+#define ARIZONA_FLL1_GPDIV_ENA_WIDTH 1 /* FLL1_GPDIV_ENA */
+
+/*
+ * R401 (0x191) - FLL2 Control 1
+ */
+#define ARIZONA_FLL2_FREERUN 0x0002 /* FLL2_FREERUN */
+#define ARIZONA_FLL2_FREERUN_MASK 0x0002 /* FLL2_FREERUN */
+#define ARIZONA_FLL2_FREERUN_SHIFT 1 /* FLL2_FREERUN */
+#define ARIZONA_FLL2_FREERUN_WIDTH 1 /* FLL2_FREERUN */
+#define ARIZONA_FLL2_ENA 0x0001 /* FLL2_ENA */
+#define ARIZONA_FLL2_ENA_MASK 0x0001 /* FLL2_ENA */
+#define ARIZONA_FLL2_ENA_SHIFT 0 /* FLL2_ENA */
+#define ARIZONA_FLL2_ENA_WIDTH 1 /* FLL2_ENA */
+
+/*
+ * R402 (0x192) - FLL2 Control 2
+ */
+#define ARIZONA_FLL2_CTRL_UPD 0x8000 /* FLL2_CTRL_UPD */
+#define ARIZONA_FLL2_CTRL_UPD_MASK 0x8000 /* FLL2_CTRL_UPD */
+#define ARIZONA_FLL2_CTRL_UPD_SHIFT 15 /* FLL2_CTRL_UPD */
+#define ARIZONA_FLL2_CTRL_UPD_WIDTH 1 /* FLL2_CTRL_UPD */
+#define ARIZONA_FLL2_N_MASK 0x03FF /* FLL2_N - [9:0] */
+#define ARIZONA_FLL2_N_SHIFT 0 /* FLL2_N - [9:0] */
+#define ARIZONA_FLL2_N_WIDTH 10 /* FLL2_N - [9:0] */
+
+/*
+ * R403 (0x193) - FLL2 Control 3
+ */
+#define ARIZONA_FLL2_THETA_MASK 0xFFFF /* FLL2_THETA - [15:0] */
+#define ARIZONA_FLL2_THETA_SHIFT 0 /* FLL2_THETA - [15:0] */
+#define ARIZONA_FLL2_THETA_WIDTH 16 /* FLL2_THETA - [15:0] */
+
+/*
+ * R404 (0x194) - FLL2 Control 4
+ */
+#define ARIZONA_FLL2_LAMBDA_MASK 0xFFFF /* FLL2_LAMBDA - [15:0] */
+#define ARIZONA_FLL2_LAMBDA_SHIFT 0 /* FLL2_LAMBDA - [15:0] */
+#define ARIZONA_FLL2_LAMBDA_WIDTH 16 /* FLL2_LAMBDA - [15:0] */
+
+/*
+ * R405 (0x195) - FLL2 Control 5
+ */
+#define ARIZONA_FLL2_FRATIO_MASK 0x0700 /* FLL2_FRATIO - [10:8] */
+#define ARIZONA_FLL2_FRATIO_SHIFT 8 /* FLL2_FRATIO - [10:8] */
+#define ARIZONA_FLL2_FRATIO_WIDTH 3 /* FLL2_FRATIO - [10:8] */
+#define ARIZONA_FLL2_OUTDIV_MASK 0x000E /* FLL2_OUTDIV - [3:1] */
+#define ARIZONA_FLL2_OUTDIV_SHIFT 1 /* FLL2_OUTDIV - [3:1] */
+#define ARIZONA_FLL2_OUTDIV_WIDTH 3 /* FLL2_OUTDIV - [3:1] */
+
+/*
+ * R406 (0x196) - FLL2 Control 6
+ */
+#define ARIZONA_FLL2_CLK_REF_DIV_MASK 0x00C0 /* FLL2_CLK_REF_DIV - [7:6] */
+#define ARIZONA_FLL2_CLK_REF_DIV_SHIFT 6 /* FLL2_CLK_REF_DIV - [7:6] */
+#define ARIZONA_FLL2_CLK_REF_DIV_WIDTH 2 /* FLL2_CLK_REF_DIV - [7:6] */
+#define ARIZONA_FLL2_CLK_REF_SRC_MASK 0x000F /* FLL2_CLK_REF_SRC - [3:0] */
+#define ARIZONA_FLL2_CLK_REF_SRC_SHIFT 0 /* FLL2_CLK_REF_SRC - [3:0] */
+#define ARIZONA_FLL2_CLK_REF_SRC_WIDTH 4 /* FLL2_CLK_REF_SRC - [3:0] */
+
+/*
+ * R407 (0x197) - FLL2 Loop Filter Test 1
+ */
+#define ARIZONA_FLL2_FRC_INTEG_UPD 0x8000 /* FLL2_FRC_INTEG_UPD */
+#define ARIZONA_FLL2_FRC_INTEG_UPD_MASK 0x8000 /* FLL2_FRC_INTEG_UPD */
+#define ARIZONA_FLL2_FRC_INTEG_UPD_SHIFT 15 /* FLL2_FRC_INTEG_UPD */
+#define ARIZONA_FLL2_FRC_INTEG_UPD_WIDTH 1 /* FLL2_FRC_INTEG_UPD */
+#define ARIZONA_FLL2_FRC_INTEG_VAL_MASK 0x0FFF /* FLL2_FRC_INTEG_VAL - [11:0] */
+#define ARIZONA_FLL2_FRC_INTEG_VAL_SHIFT 0 /* FLL2_FRC_INTEG_VAL - [11:0] */
+#define ARIZONA_FLL2_FRC_INTEG_VAL_WIDTH 12 /* FLL2_FRC_INTEG_VAL - [11:0] */
+
+/*
+ * R417 (0x1A1) - FLL2 Synchroniser 1
+ */
+#define ARIZONA_FLL2_SYNC_ENA 0x0001 /* FLL2_SYNC_ENA */
+#define ARIZONA_FLL2_SYNC_ENA_MASK 0x0001 /* FLL2_SYNC_ENA */
+#define ARIZONA_FLL2_SYNC_ENA_SHIFT 0 /* FLL2_SYNC_ENA */
+#define ARIZONA_FLL2_SYNC_ENA_WIDTH 1 /* FLL2_SYNC_ENA */
+
+/*
+ * R418 (0x1A2) - FLL2 Synchroniser 2
+ */
+#define ARIZONA_FLL2_SYNC_N_MASK 0x03FF /* FLL2_SYNC_N - [9:0] */
+#define ARIZONA_FLL2_SYNC_N_SHIFT 0 /* FLL2_SYNC_N - [9:0] */
+#define ARIZONA_FLL2_SYNC_N_WIDTH 10 /* FLL2_SYNC_N - [9:0] */
+
+/*
+ * R419 (0x1A3) - FLL2 Synchroniser 3
+ */
+#define ARIZONA_FLL2_SYNC_THETA_MASK 0xFFFF /* FLL2_SYNC_THETA - [15:0] */
+#define ARIZONA_FLL2_SYNC_THETA_SHIFT 0 /* FLL2_SYNC_THETA - [15:0] */
+#define ARIZONA_FLL2_SYNC_THETA_WIDTH 16 /* FLL2_SYNC_THETA - [15:0] */
+
+/*
+ * R420 (0x1A4) - FLL2 Synchroniser 4
+ */
+#define ARIZONA_FLL2_SYNC_LAMBDA_MASK 0xFFFF /* FLL2_SYNC_LAMBDA - [15:0] */
+#define ARIZONA_FLL2_SYNC_LAMBDA_SHIFT 0 /* FLL2_SYNC_LAMBDA - [15:0] */
+#define ARIZONA_FLL2_SYNC_LAMBDA_WIDTH 16 /* FLL2_SYNC_LAMBDA - [15:0] */
+
+/*
+ * R421 (0x1A5) - FLL2 Synchroniser 5
+ */
+#define ARIZONA_FLL2_SYNC_FRATIO_MASK 0x0700 /* FLL2_SYNC_FRATIO - [10:8] */
+#define ARIZONA_FLL2_SYNC_FRATIO_SHIFT 8 /* FLL2_SYNC_FRATIO - [10:8] */
+#define ARIZONA_FLL2_SYNC_FRATIO_WIDTH 3 /* FLL2_SYNC_FRATIO - [10:8] */
+
+/*
+ * R422 (0x1A6) - FLL2 Synchroniser 6
+ */
+#define ARIZONA_FLL2_CLK_SYNC_DIV_MASK 0x00C0 /* FLL2_CLK_SYNC_DIV - [7:6] */
+#define ARIZONA_FLL2_CLK_SYNC_DIV_SHIFT 6 /* FLL2_CLK_SYNC_DIV - [7:6] */
+#define ARIZONA_FLL2_CLK_SYNC_DIV_WIDTH 2 /* FLL2_CLK_SYNC_DIV - [7:6] */
+#define ARIZONA_FLL2_CLK_SYNC_SRC_MASK 0x000F /* FLL2_CLK_SYNC_SRC - [3:0] */
+#define ARIZONA_FLL2_CLK_SYNC_SRC_SHIFT 0 /* FLL2_CLK_SYNC_SRC - [3:0] */
+#define ARIZONA_FLL2_CLK_SYNC_SRC_WIDTH 4 /* FLL2_CLK_SYNC_SRC - [3:0] */
+
+/*
+ * R425 (0x1A9) - FLL2 Spread Spectrum
+ */
+#define ARIZONA_FLL2_SS_AMPL_MASK 0x0030 /* FLL2_SS_AMPL - [5:4] */
+#define ARIZONA_FLL2_SS_AMPL_SHIFT 4 /* FLL2_SS_AMPL - [5:4] */
+#define ARIZONA_FLL2_SS_AMPL_WIDTH 2 /* FLL2_SS_AMPL - [5:4] */
+#define ARIZONA_FLL2_SS_FREQ_MASK 0x000C /* FLL2_SS_FREQ - [3:2] */
+#define ARIZONA_FLL2_SS_FREQ_SHIFT 2 /* FLL2_SS_FREQ - [3:2] */
+#define ARIZONA_FLL2_SS_FREQ_WIDTH 2 /* FLL2_SS_FREQ - [3:2] */
+#define ARIZONA_FLL2_SS_SEL_MASK 0x0003 /* FLL2_SS_SEL - [1:0] */
+#define ARIZONA_FLL2_SS_SEL_SHIFT 0 /* FLL2_SS_SEL - [1:0] */
+#define ARIZONA_FLL2_SS_SEL_WIDTH 2 /* FLL2_SS_SEL - [1:0] */
+
+/*
+ * R426 (0x1AA) - FLL2 GPIO Clock
+ */
+#define ARIZONA_FLL2_GPDIV_MASK 0x00FE /* FLL2_GPDIV - [7:1] */
+#define ARIZONA_FLL2_GPDIV_SHIFT 1 /* FLL2_GPDIV - [7:1] */
+#define ARIZONA_FLL2_GPDIV_WIDTH 7 /* FLL2_GPDIV - [7:1] */
+#define ARIZONA_FLL2_GPDIV_ENA 0x0001 /* FLL2_GPDIV_ENA */
+#define ARIZONA_FLL2_GPDIV_ENA_MASK 0x0001 /* FLL2_GPDIV_ENA */
+#define ARIZONA_FLL2_GPDIV_ENA_SHIFT 0 /* FLL2_GPDIV_ENA */
+#define ARIZONA_FLL2_GPDIV_ENA_WIDTH 1 /* FLL2_GPDIV_ENA */
+
+/*
+ * R512 (0x200) - Mic Charge Pump 1
+ */
+#define ARIZONA_CPMIC_DISCH 0x0004 /* CPMIC_DISCH */
+#define ARIZONA_CPMIC_DISCH_MASK 0x0004 /* CPMIC_DISCH */
+#define ARIZONA_CPMIC_DISCH_SHIFT 2 /* CPMIC_DISCH */
+#define ARIZONA_CPMIC_DISCH_WIDTH 1 /* CPMIC_DISCH */
+#define ARIZONA_CPMIC_BYPASS 0x0002 /* CPMIC_BYPASS */
+#define ARIZONA_CPMIC_BYPASS_MASK 0x0002 /* CPMIC_BYPASS */
+#define ARIZONA_CPMIC_BYPASS_SHIFT 1 /* CPMIC_BYPASS */
+#define ARIZONA_CPMIC_BYPASS_WIDTH 1 /* CPMIC_BYPASS */
+#define ARIZONA_CPMIC_ENA 0x0001 /* CPMIC_ENA */
+#define ARIZONA_CPMIC_ENA_MASK 0x0001 /* CPMIC_ENA */
+#define ARIZONA_CPMIC_ENA_SHIFT 0 /* CPMIC_ENA */
+#define ARIZONA_CPMIC_ENA_WIDTH 1 /* CPMIC_ENA */
+
+/*
+ * R528 (0x210) - LDO1 Control 1
+ */
+#define ARIZONA_LDO1_VSEL_MASK 0x07E0 /* LDO1_VSEL - [10:5] */
+#define ARIZONA_LDO1_VSEL_SHIFT 5 /* LDO1_VSEL - [10:5] */
+#define ARIZONA_LDO1_VSEL_WIDTH 6 /* LDO1_VSEL - [10:5] */
+#define ARIZONA_LDO1_FAST 0x0010 /* LDO1_FAST */
+#define ARIZONA_LDO1_FAST_MASK 0x0010 /* LDO1_FAST */
+#define ARIZONA_LDO1_FAST_SHIFT 4 /* LDO1_FAST */
+#define ARIZONA_LDO1_FAST_WIDTH 1 /* LDO1_FAST */
+#define ARIZONA_LDO1_DISCH 0x0004 /* LDO1_DISCH */
+#define ARIZONA_LDO1_DISCH_MASK 0x0004 /* LDO1_DISCH */
+#define ARIZONA_LDO1_DISCH_SHIFT 2 /* LDO1_DISCH */
+#define ARIZONA_LDO1_DISCH_WIDTH 1 /* LDO1_DISCH */
+#define ARIZONA_LDO1_BYPASS 0x0002 /* LDO1_BYPASS */
+#define ARIZONA_LDO1_BYPASS_MASK 0x0002 /* LDO1_BYPASS */
+#define ARIZONA_LDO1_BYPASS_SHIFT 1 /* LDO1_BYPASS */
+#define ARIZONA_LDO1_BYPASS_WIDTH 1 /* LDO1_BYPASS */
+#define ARIZONA_LDO1_ENA 0x0001 /* LDO1_ENA */
+#define ARIZONA_LDO1_ENA_MASK 0x0001 /* LDO1_ENA */
+#define ARIZONA_LDO1_ENA_SHIFT 0 /* LDO1_ENA */
+#define ARIZONA_LDO1_ENA_WIDTH 1 /* LDO1_ENA */
+
+/*
+ * R531 (0x213) - LDO2 Control 1
+ */
+#define ARIZONA_LDO2_VSEL_MASK 0x07E0 /* LDO2_VSEL - [10:5] */
+#define ARIZONA_LDO2_VSEL_SHIFT 5 /* LDO2_VSEL - [10:5] */
+#define ARIZONA_LDO2_VSEL_WIDTH 6 /* LDO2_VSEL - [10:5] */
+#define ARIZONA_LDO2_FAST 0x0010 /* LDO2_FAST */
+#define ARIZONA_LDO2_FAST_MASK 0x0010 /* LDO2_FAST */
+#define ARIZONA_LDO2_FAST_SHIFT 4 /* LDO2_FAST */
+#define ARIZONA_LDO2_FAST_WIDTH 1 /* LDO2_FAST */
+#define ARIZONA_LDO2_DISCH 0x0004 /* LDO2_DISCH */
+#define ARIZONA_LDO2_DISCH_MASK 0x0004 /* LDO2_DISCH */
+#define ARIZONA_LDO2_DISCH_SHIFT 2 /* LDO2_DISCH */
+#define ARIZONA_LDO2_DISCH_WIDTH 1 /* LDO2_DISCH */
+#define ARIZONA_LDO2_BYPASS 0x0002 /* LDO2_BYPASS */
+#define ARIZONA_LDO2_BYPASS_MASK 0x0002 /* LDO2_BYPASS */
+#define ARIZONA_LDO2_BYPASS_SHIFT 1 /* LDO2_BYPASS */
+#define ARIZONA_LDO2_BYPASS_WIDTH 1 /* LDO2_BYPASS */
+#define ARIZONA_LDO2_ENA 0x0001 /* LDO2_ENA */
+#define ARIZONA_LDO2_ENA_MASK 0x0001 /* LDO2_ENA */
+#define ARIZONA_LDO2_ENA_SHIFT 0 /* LDO2_ENA */
+#define ARIZONA_LDO2_ENA_WIDTH 1 /* LDO2_ENA */
+
+/*
+ * R536 (0x218) - Mic Bias Ctrl 1
+ */
+#define ARIZONA_MICB1_EXT_CAP 0x8000 /* MICB1_EXT_CAP */
+#define ARIZONA_MICB1_EXT_CAP_MASK 0x8000 /* MICB1_EXT_CAP */
+#define ARIZONA_MICB1_EXT_CAP_SHIFT 15 /* MICB1_EXT_CAP */
+#define ARIZONA_MICB1_EXT_CAP_WIDTH 1 /* MICB1_EXT_CAP */
+#define ARIZONA_MICB1_LVL_MASK 0x01E0 /* MICB1_LVL - [8:5] */
+#define ARIZONA_MICB1_LVL_SHIFT 5 /* MICB1_LVL - [8:5] */
+#define ARIZONA_MICB1_LVL_WIDTH 4 /* MICB1_LVL - [8:5] */
+#define ARIZONA_MICB1_FAST 0x0010 /* MICB1_FAST */
+#define ARIZONA_MICB1_FAST_MASK 0x0010 /* MICB1_FAST */
+#define ARIZONA_MICB1_FAST_SHIFT 4 /* MICB1_FAST */
+#define ARIZONA_MICB1_FAST_WIDTH 1 /* MICB1_FAST */
+#define ARIZONA_MICB1_RATE 0x0008 /* MICB1_RATE */
+#define ARIZONA_MICB1_RATE_MASK 0x0008 /* MICB1_RATE */
+#define ARIZONA_MICB1_RATE_SHIFT 3 /* MICB1_RATE */
+#define ARIZONA_MICB1_RATE_WIDTH 1 /* MICB1_RATE */
+#define ARIZONA_MICB1_DISCH 0x0004 /* MICB1_DISCH */
+#define ARIZONA_MICB1_DISCH_MASK 0x0004 /* MICB1_DISCH */
+#define ARIZONA_MICB1_DISCH_SHIFT 2 /* MICB1_DISCH */
+#define ARIZONA_MICB1_DISCH_WIDTH 1 /* MICB1_DISCH */
+#define ARIZONA_MICB1_BYPASS 0x0002 /* MICB1_BYPASS */
+#define ARIZONA_MICB1_BYPASS_MASK 0x0002 /* MICB1_BYPASS */
+#define ARIZONA_MICB1_BYPASS_SHIFT 1 /* MICB1_BYPASS */
+#define ARIZONA_MICB1_BYPASS_WIDTH 1 /* MICB1_BYPASS */
+#define ARIZONA_MICB1_ENA 0x0001 /* MICB1_ENA */
+#define ARIZONA_MICB1_ENA_MASK 0x0001 /* MICB1_ENA */
+#define ARIZONA_MICB1_ENA_SHIFT 0 /* MICB1_ENA */
+#define ARIZONA_MICB1_ENA_WIDTH 1 /* MICB1_ENA */
+
+/*
+ * R537 (0x219) - Mic Bias Ctrl 2
+ */
+#define ARIZONA_MICB2_EXT_CAP 0x8000 /* MICB2_EXT_CAP */
+#define ARIZONA_MICB2_EXT_CAP_MASK 0x8000 /* MICB2_EXT_CAP */
+#define ARIZONA_MICB2_EXT_CAP_SHIFT 15 /* MICB2_EXT_CAP */
+#define ARIZONA_MICB2_EXT_CAP_WIDTH 1 /* MICB2_EXT_CAP */
+#define ARIZONA_MICB2_LVL_MASK 0x01E0 /* MICB2_LVL - [8:5] */
+#define ARIZONA_MICB2_LVL_SHIFT 5 /* MICB2_LVL - [8:5] */
+#define ARIZONA_MICB2_LVL_WIDTH 4 /* MICB2_LVL - [8:5] */
+#define ARIZONA_MICB2_FAST 0x0010 /* MICB2_FAST */
+#define ARIZONA_MICB2_FAST_MASK 0x0010 /* MICB2_FAST */
+#define ARIZONA_MICB2_FAST_SHIFT 4 /* MICB2_FAST */
+#define ARIZONA_MICB2_FAST_WIDTH 1 /* MICB2_FAST */
+#define ARIZONA_MICB2_RATE 0x0008 /* MICB2_RATE */
+#define ARIZONA_MICB2_RATE_MASK 0x0008 /* MICB2_RATE */
+#define ARIZONA_MICB2_RATE_SHIFT 3 /* MICB2_RATE */
+#define ARIZONA_MICB2_RATE_WIDTH 1 /* MICB2_RATE */
+#define ARIZONA_MICB2_DISCH 0x0004 /* MICB2_DISCH */
+#define ARIZONA_MICB2_DISCH_MASK 0x0004 /* MICB2_DISCH */
+#define ARIZONA_MICB2_DISCH_SHIFT 2 /* MICB2_DISCH */
+#define ARIZONA_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */
+#define ARIZONA_MICB2_BYPASS 0x0002 /* MICB2_BYPASS */
+#define ARIZONA_MICB2_BYPASS_MASK 0x0002 /* MICB2_BYPASS */
+#define ARIZONA_MICB2_BYPASS_SHIFT 1 /* MICB2_BYPASS */
+#define ARIZONA_MICB2_BYPASS_WIDTH 1 /* MICB2_BYPASS */
+#define ARIZONA_MICB2_ENA 0x0001 /* MICB2_ENA */
+#define ARIZONA_MICB2_ENA_MASK 0x0001 /* MICB2_ENA */
+#define ARIZONA_MICB2_ENA_SHIFT 0 /* MICB2_ENA */
+#define ARIZONA_MICB2_ENA_WIDTH 1 /* MICB2_ENA */
+
+/*
+ * R538 (0x21A) - Mic Bias Ctrl 3
+ */
+#define ARIZONA_MICB3_EXT_CAP 0x8000 /* MICB3_EXT_CAP */
+#define ARIZONA_MICB3_EXT_CAP_MASK 0x8000 /* MICB3_EXT_CAP */
+#define ARIZONA_MICB3_EXT_CAP_SHIFT 15 /* MICB3_EXT_CAP */
+#define ARIZONA_MICB3_EXT_CAP_WIDTH 1 /* MICB3_EXT_CAP */
+#define ARIZONA_MICB3_LVL_MASK 0x01E0 /* MICB3_LVL - [8:5] */
+#define ARIZONA_MICB3_LVL_SHIFT 5 /* MICB3_LVL - [8:5] */
+#define ARIZONA_MICB3_LVL_WIDTH 4 /* MICB3_LVL - [8:5] */
+#define ARIZONA_MICB3_FAST 0x0010 /* MICB3_FAST */
+#define ARIZONA_MICB3_FAST_MASK 0x0010 /* MICB3_FAST */
+#define ARIZONA_MICB3_FAST_SHIFT 4 /* MICB3_FAST */
+#define ARIZONA_MICB3_FAST_WIDTH 1 /* MICB3_FAST */
+#define ARIZONA_MICB3_RATE 0x0008 /* MICB3_RATE */
+#define ARIZONA_MICB3_RATE_MASK 0x0008 /* MICB3_RATE */
+#define ARIZONA_MICB3_RATE_SHIFT 3 /* MICB3_RATE */
+#define ARIZONA_MICB3_RATE_WIDTH 1 /* MICB3_RATE */
+#define ARIZONA_MICB3_DISCH 0x0004 /* MICB3_DISCH */
+#define ARIZONA_MICB3_DISCH_MASK 0x0004 /* MICB3_DISCH */
+#define ARIZONA_MICB3_DISCH_SHIFT 2 /* MICB3_DISCH */
+#define ARIZONA_MICB3_DISCH_WIDTH 1 /* MICB3_DISCH */
+#define ARIZONA_MICB3_BYPASS 0x0002 /* MICB3_BYPASS */
+#define ARIZONA_MICB3_BYPASS_MASK 0x0002 /* MICB3_BYPASS */
+#define ARIZONA_MICB3_BYPASS_SHIFT 1 /* MICB3_BYPASS */
+#define ARIZONA_MICB3_BYPASS_WIDTH 1 /* MICB3_BYPASS */
+#define ARIZONA_MICB3_ENA 0x0001 /* MICB3_ENA */
+#define ARIZONA_MICB3_ENA_MASK 0x0001 /* MICB3_ENA */
+#define ARIZONA_MICB3_ENA_SHIFT 0 /* MICB3_ENA */
+#define ARIZONA_MICB3_ENA_WIDTH 1 /* MICB3_ENA */
+
+/*
+ * R659 (0x293) - Accessory Detect Mode 1
+ */
+#define ARIZONA_ACCDET_SRC 0x2000 /* ACCDET_SRC */
+#define ARIZONA_ACCDET_SRC_MASK 0x2000 /* ACCDET_SRC */
+#define ARIZONA_ACCDET_SRC_SHIFT 13 /* ACCDET_SRC */
+#define ARIZONA_ACCDET_SRC_WIDTH 1 /* ACCDET_SRC */
+#define ARIZONA_ACCDET_MODE_MASK 0x0003 /* ACCDET_MODE - [1:0] */
+#define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [1:0] */
+#define ARIZONA_ACCDET_MODE_WIDTH 2 /* ACCDET_MODE - [1:0] */
+
+/*
+ * R667 (0x29B) - Headphone Detect 1
+ */
+#define ARIZONA_HP_STEP_SIZE 0x0100 /* HP_STEP_SIZE */
+#define ARIZONA_HP_STEP_SIZE_MASK 0x0100 /* HP_STEP_SIZE */
+#define ARIZONA_HP_STEP_SIZE_SHIFT 8 /* HP_STEP_SIZE */
+#define ARIZONA_HP_STEP_SIZE_WIDTH 1 /* HP_STEP_SIZE */
+#define ARIZONA_HP_HOLDTIME_MASK 0x00E0 /* HP_HOLDTIME - [7:5] */
+#define ARIZONA_HP_HOLDTIME_SHIFT 5 /* HP_HOLDTIME - [7:5] */
+#define ARIZONA_HP_HOLDTIME_WIDTH 3 /* HP_HOLDTIME - [7:5] */
+#define ARIZONA_HP_CLK_DIV_MASK 0x0018 /* HP_CLK_DIV - [4:3] */
+#define ARIZONA_HP_CLK_DIV_SHIFT 3 /* HP_CLK_DIV - [4:3] */
+#define ARIZONA_HP_CLK_DIV_WIDTH 2 /* HP_CLK_DIV - [4:3] */
+#define ARIZONA_HP_IDAC_STEER 0x0004 /* HP_IDAC_STEER */
+#define ARIZONA_HP_IDAC_STEER_MASK 0x0004 /* HP_IDAC_STEER */
+#define ARIZONA_HP_IDAC_STEER_SHIFT 2 /* HP_IDAC_STEER */
+#define ARIZONA_HP_IDAC_STEER_WIDTH 1 /* HP_IDAC_STEER */
+#define ARIZONA_HP_RATE 0x0002 /* HP_RATE */
+#define ARIZONA_HP_RATE_MASK 0x0002 /* HP_RATE */
+#define ARIZONA_HP_RATE_SHIFT 1 /* HP_RATE */
+#define ARIZONA_HP_RATE_WIDTH 1 /* HP_RATE */
+#define ARIZONA_HP_POLL 0x0001 /* HP_POLL */
+#define ARIZONA_HP_POLL_MASK 0x0001 /* HP_POLL */
+#define ARIZONA_HP_POLL_SHIFT 0 /* HP_POLL */
+#define ARIZONA_HP_POLL_WIDTH 1 /* HP_POLL */
+
+/*
+ * R668 (0x29C) - Headphone Detect 2
+ */
+#define ARIZONA_HP_DONE 0x0080 /* HP_DONE */
+#define ARIZONA_HP_DONE_MASK 0x0080 /* HP_DONE */
+#define ARIZONA_HP_DONE_SHIFT 7 /* HP_DONE */
+#define ARIZONA_HP_DONE_WIDTH 1 /* HP_DONE */
+#define ARIZONA_HP_LVL_MASK 0x007F /* HP_LVL - [6:0] */
+#define ARIZONA_HP_LVL_SHIFT 0 /* HP_LVL - [6:0] */
+#define ARIZONA_HP_LVL_WIDTH 7 /* HP_LVL - [6:0] */
+
+/*
+ * R675 (0x2A3) - Mic Detect 1
+ */
+#define ARIZONA_MICD_BIAS_STARTTIME_MASK 0xF000 /* MICD_BIAS_STARTTIME - [15:12] */
+#define ARIZONA_MICD_BIAS_STARTTIME_SHIFT 12 /* MICD_BIAS_STARTTIME - [15:12] */
+#define ARIZONA_MICD_BIAS_STARTTIME_WIDTH 4 /* MICD_BIAS_STARTTIME - [15:12] */
+#define ARIZONA_MICD_RATE_MASK 0x0F00 /* MICD_RATE - [11:8] */
+#define ARIZONA_MICD_RATE_SHIFT 8 /* MICD_RATE - [11:8] */
+#define ARIZONA_MICD_RATE_WIDTH 4 /* MICD_RATE - [11:8] */
+#define ARIZONA_MICD_BIAS_SRC_MASK 0x0030 /* MICD_BIAS_SRC - [5:4] */
+#define ARIZONA_MICD_BIAS_SRC_SHIFT 4 /* MICD_BIAS_SRC - [5:4] */
+#define ARIZONA_MICD_BIAS_SRC_WIDTH 2 /* MICD_BIAS_SRC - [5:4] */
+#define ARIZONA_MICD_DBTIME 0x0002 /* MICD_DBTIME */
+#define ARIZONA_MICD_DBTIME_MASK 0x0002 /* MICD_DBTIME */
+#define ARIZONA_MICD_DBTIME_SHIFT 1 /* MICD_DBTIME */
+#define ARIZONA_MICD_DBTIME_WIDTH 1 /* MICD_DBTIME */
+#define ARIZONA_MICD_ENA 0x0001 /* MICD_ENA */
+#define ARIZONA_MICD_ENA_MASK 0x0001 /* MICD_ENA */
+#define ARIZONA_MICD_ENA_SHIFT 0 /* MICD_ENA */
+#define ARIZONA_MICD_ENA_WIDTH 1 /* MICD_ENA */
+
+/*
+ * R676 (0x2A4) - Mic Detect 2
+ */
+#define ARIZONA_MICD_LVL_SEL_MASK 0x00FF /* MICD_LVL_SEL - [7:0] */
+#define ARIZONA_MICD_LVL_SEL_SHIFT 0 /* MICD_LVL_SEL - [7:0] */
+#define ARIZONA_MICD_LVL_SEL_WIDTH 8 /* MICD_LVL_SEL - [7:0] */
+
+/*
+ * R677 (0x2A5) - Mic Detect 3
+ */
+#define ARIZONA_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */
+#define ARIZONA_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */
+#define ARIZONA_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */
+#define ARIZONA_MICD_VALID 0x0002 /* MICD_VALID */
+#define ARIZONA_MICD_VALID_MASK 0x0002 /* MICD_VALID */
+#define ARIZONA_MICD_VALID_SHIFT 1 /* MICD_VALID */
+#define ARIZONA_MICD_VALID_WIDTH 1 /* MICD_VALID */
+#define ARIZONA_MICD_STS 0x0001 /* MICD_STS */
+#define ARIZONA_MICD_STS_MASK 0x0001 /* MICD_STS */
+#define ARIZONA_MICD_STS_SHIFT 0 /* MICD_STS */
+#define ARIZONA_MICD_STS_WIDTH 1 /* MICD_STS */
+
+/*
+ * R707 (0x2C3) - Mic noise mix control 1
+ */
+#define ARIZONA_MICMUTE_RATE_MASK 0x7800 /* MICMUTE_RATE - [14:11] */
+#define ARIZONA_MICMUTE_RATE_SHIFT 11 /* MICMUTE_RATE - [14:11] */
+#define ARIZONA_MICMUTE_RATE_WIDTH 4 /* MICMUTE_RATE - [14:11] */
+#define ARIZONA_MICMUTE_MIX_ENA 0x0040 /* MICMUTE_MIX_ENA */
+#define ARIZONA_MICMUTE_MIX_ENA_MASK 0x0040 /* MICMUTE_MIX_ENA */
+#define ARIZONA_MICMUTE_MIX_ENA_SHIFT 6 /* MICMUTE_MIX_ENA */
+#define ARIZONA_MICMUTE_MIX_ENA_WIDTH 1 /* MICMUTE_MIX_ENA */
+
+/*
+ * R715 (0x2CB) - Isolation control
+ */
+#define ARIZONA_ISOLATE_DCVDD1 0x0001 /* ISOLATE_DCVDD1 */
+#define ARIZONA_ISOLATE_DCVDD1_MASK 0x0001 /* ISOLATE_DCVDD1 */
+#define ARIZONA_ISOLATE_DCVDD1_SHIFT 0 /* ISOLATE_DCVDD1 */
+#define ARIZONA_ISOLATE_DCVDD1_WIDTH 1 /* ISOLATE_DCVDD1 */
+
+/*
+ * R723 (0x2D3) - Jack detect analogue
+ */
+#define ARIZONA_JD2_ENA 0x0002 /* JD2_ENA */
+#define ARIZONA_JD2_ENA_MASK 0x0002 /* JD2_ENA */
+#define ARIZONA_JD2_ENA_SHIFT 1 /* JD2_ENA */
+#define ARIZONA_JD2_ENA_WIDTH 1 /* JD2_ENA */
+#define ARIZONA_JD1_ENA 0x0001 /* JD1_ENA */
+#define ARIZONA_JD1_ENA_MASK 0x0001 /* JD1_ENA */
+#define ARIZONA_JD1_ENA_SHIFT 0 /* JD1_ENA */
+#define ARIZONA_JD1_ENA_WIDTH 1 /* JD1_ENA */
+
+/*
+ * R768 (0x300) - Input Enables
+ */
+#define ARIZONA_IN4L_ENA 0x0080 /* IN4L_ENA */
+#define ARIZONA_IN4L_ENA_MASK 0x0080 /* IN4L_ENA */
+#define ARIZONA_IN4L_ENA_SHIFT 7 /* IN4L_ENA */
+#define ARIZONA_IN4L_ENA_WIDTH 1 /* IN4L_ENA */
+#define ARIZONA_IN4R_ENA 0x0040 /* IN4R_ENA */
+#define ARIZONA_IN4R_ENA_MASK 0x0040 /* IN4R_ENA */
+#define ARIZONA_IN4R_ENA_SHIFT 6 /* IN4R_ENA */
+#define ARIZONA_IN4R_ENA_WIDTH 1 /* IN4R_ENA */
+#define ARIZONA_IN3L_ENA 0x0020 /* IN3L_ENA */
+#define ARIZONA_IN3L_ENA_MASK 0x0020 /* IN3L_ENA */
+#define ARIZONA_IN3L_ENA_SHIFT 5 /* IN3L_ENA */
+#define ARIZONA_IN3L_ENA_WIDTH 1 /* IN3L_ENA */
+#define ARIZONA_IN3R_ENA 0x0010 /* IN3R_ENA */
+#define ARIZONA_IN3R_ENA_MASK 0x0010 /* IN3R_ENA */
+#define ARIZONA_IN3R_ENA_SHIFT 4 /* IN3R_ENA */
+#define ARIZONA_IN3R_ENA_WIDTH 1 /* IN3R_ENA */
+#define ARIZONA_IN2L_ENA 0x0008 /* IN2L_ENA */
+#define ARIZONA_IN2L_ENA_MASK 0x0008 /* IN2L_ENA */
+#define ARIZONA_IN2L_ENA_SHIFT 3 /* IN2L_ENA */
+#define ARIZONA_IN2L_ENA_WIDTH 1 /* IN2L_ENA */
+#define ARIZONA_IN2R_ENA 0x0004 /* IN2R_ENA */
+#define ARIZONA_IN2R_ENA_MASK 0x0004 /* IN2R_ENA */
+#define ARIZONA_IN2R_ENA_SHIFT 2 /* IN2R_ENA */
+#define ARIZONA_IN2R_ENA_WIDTH 1 /* IN2R_ENA */
+#define ARIZONA_IN1L_ENA 0x0002 /* IN1L_ENA */
+#define ARIZONA_IN1L_ENA_MASK 0x0002 /* IN1L_ENA */
+#define ARIZONA_IN1L_ENA_SHIFT 1 /* IN1L_ENA */
+#define ARIZONA_IN1L_ENA_WIDTH 1 /* IN1L_ENA */
+#define ARIZONA_IN1R_ENA 0x0001 /* IN1R_ENA */
+#define ARIZONA_IN1R_ENA_MASK 0x0001 /* IN1R_ENA */
+#define ARIZONA_IN1R_ENA_SHIFT 0 /* IN1R_ENA */
+#define ARIZONA_IN1R_ENA_WIDTH 1 /* IN1R_ENA */
+
+/*
+ * R776 (0x308) - Input Rate
+ */
+#define ARIZONA_IN_RATE_MASK 0x7800 /* IN_RATE - [14:11] */
+#define ARIZONA_IN_RATE_SHIFT 11 /* IN_RATE - [14:11] */
+#define ARIZONA_IN_RATE_WIDTH 4 /* IN_RATE - [14:11] */
+
+/*
+ * R777 (0x309) - Input Volume Ramp
+ */
+#define ARIZONA_IN_VD_RAMP_MASK 0x0070 /* IN_VD_RAMP - [6:4] */
+#define ARIZONA_IN_VD_RAMP_SHIFT 4 /* IN_VD_RAMP - [6:4] */
+#define ARIZONA_IN_VD_RAMP_WIDTH 3 /* IN_VD_RAMP - [6:4] */
+#define ARIZONA_IN_VI_RAMP_MASK 0x0007 /* IN_VI_RAMP - [2:0] */
+#define ARIZONA_IN_VI_RAMP_SHIFT 0 /* IN_VI_RAMP - [2:0] */
+#define ARIZONA_IN_VI_RAMP_WIDTH 3 /* IN_VI_RAMP - [2:0] */
+
+/*
+ * R784 (0x310) - IN1L Control
+ */
+#define ARIZONA_IN1_OSR_MASK 0x6000 /* IN1_OSR - [14:13] */
+#define ARIZONA_IN1_OSR_SHIFT 13 /* IN1_OSR - [14:13] */
+#define ARIZONA_IN1_OSR_WIDTH 2 /* IN1_OSR - [14:13] */
+#define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */
+#define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */
+#define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */
+#define ARIZONA_IN1_MODE_MASK 0x0600 /* IN1_MODE - [10:9] */
+#define ARIZONA_IN1_MODE_SHIFT 9 /* IN1_MODE - [10:9] */
+#define ARIZONA_IN1_MODE_WIDTH 2 /* IN1_MODE - [10:9] */
+#define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */
+#define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */
+#define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */
+
+/*
+ * R785 (0x311) - ADC Digital Volume 1L
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN1L_MUTE 0x0100 /* IN1L_MUTE */
+#define ARIZONA_IN1L_MUTE_MASK 0x0100 /* IN1L_MUTE */
+#define ARIZONA_IN1L_MUTE_SHIFT 8 /* IN1L_MUTE */
+#define ARIZONA_IN1L_MUTE_WIDTH 1 /* IN1L_MUTE */
+#define ARIZONA_IN1L_DIG_VOL_MASK 0x00FF /* IN1L_DIG_VOL - [7:0] */
+#define ARIZONA_IN1L_DIG_VOL_SHIFT 0 /* IN1L_DIG_VOL - [7:0] */
+#define ARIZONA_IN1L_DIG_VOL_WIDTH 8 /* IN1L_DIG_VOL - [7:0] */
+
+/*
+ * R786 (0x312) - DMIC1L Control
+ */
+#define ARIZONA_IN1_DMICL_DLY_MASK 0x003F /* IN1_DMICL_DLY - [5:0] */
+#define ARIZONA_IN1_DMICL_DLY_SHIFT 0 /* IN1_DMICL_DLY - [5:0] */
+#define ARIZONA_IN1_DMICL_DLY_WIDTH 6 /* IN1_DMICL_DLY - [5:0] */
+
+/*
+ * R788 (0x314) - IN1R Control
+ */
+#define ARIZONA_IN1R_PGA_VOL_MASK 0x00FE /* IN1R_PGA_VOL - [7:1] */
+#define ARIZONA_IN1R_PGA_VOL_SHIFT 1 /* IN1R_PGA_VOL - [7:1] */
+#define ARIZONA_IN1R_PGA_VOL_WIDTH 7 /* IN1R_PGA_VOL - [7:1] */
+
+/*
+ * R789 (0x315) - ADC Digital Volume 1R
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN1R_MUTE 0x0100 /* IN1R_MUTE */
+#define ARIZONA_IN1R_MUTE_MASK 0x0100 /* IN1R_MUTE */
+#define ARIZONA_IN1R_MUTE_SHIFT 8 /* IN1R_MUTE */
+#define ARIZONA_IN1R_MUTE_WIDTH 1 /* IN1R_MUTE */
+#define ARIZONA_IN1R_DIG_VOL_MASK 0x00FF /* IN1R_DIG_VOL - [7:0] */
+#define ARIZONA_IN1R_DIG_VOL_SHIFT 0 /* IN1R_DIG_VOL - [7:0] */
+#define ARIZONA_IN1R_DIG_VOL_WIDTH 8 /* IN1R_DIG_VOL - [7:0] */
+
+/*
+ * R790 (0x316) - DMIC1R Control
+ */
+#define ARIZONA_IN1_DMICR_DLY_MASK 0x003F /* IN1_DMICR_DLY - [5:0] */
+#define ARIZONA_IN1_DMICR_DLY_SHIFT 0 /* IN1_DMICR_DLY - [5:0] */
+#define ARIZONA_IN1_DMICR_DLY_WIDTH 6 /* IN1_DMICR_DLY - [5:0] */
+
+/*
+ * R792 (0x318) - IN2L Control
+ */
+#define ARIZONA_IN2_OSR_MASK 0x6000 /* IN2_OSR - [14:13] */
+#define ARIZONA_IN2_OSR_SHIFT 13 /* IN2_OSR - [14:13] */
+#define ARIZONA_IN2_OSR_WIDTH 2 /* IN2_OSR - [14:13] */
+#define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */
+#define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */
+#define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */
+#define ARIZONA_IN2_MODE_MASK 0x0600 /* IN2_MODE - [10:9] */
+#define ARIZONA_IN2_MODE_SHIFT 9 /* IN2_MODE - [10:9] */
+#define ARIZONA_IN2_MODE_WIDTH 2 /* IN2_MODE - [10:9] */
+#define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */
+#define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */
+#define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */
+
+/*
+ * R793 (0x319) - ADC Digital Volume 2L
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN2L_MUTE 0x0100 /* IN2L_MUTE */
+#define ARIZONA_IN2L_MUTE_MASK 0x0100 /* IN2L_MUTE */
+#define ARIZONA_IN2L_MUTE_SHIFT 8 /* IN2L_MUTE */
+#define ARIZONA_IN2L_MUTE_WIDTH 1 /* IN2L_MUTE */
+#define ARIZONA_IN2L_DIG_VOL_MASK 0x00FF /* IN2L_DIG_VOL - [7:0] */
+#define ARIZONA_IN2L_DIG_VOL_SHIFT 0 /* IN2L_DIG_VOL - [7:0] */
+#define ARIZONA_IN2L_DIG_VOL_WIDTH 8 /* IN2L_DIG_VOL - [7:0] */
+
+/*
+ * R794 (0x31A) - DMIC2L Control
+ */
+#define ARIZONA_IN2_DMICL_DLY_MASK 0x003F /* IN2_DMICL_DLY - [5:0] */
+#define ARIZONA_IN2_DMICL_DLY_SHIFT 0 /* IN2_DMICL_DLY - [5:0] */
+#define ARIZONA_IN2_DMICL_DLY_WIDTH 6 /* IN2_DMICL_DLY - [5:0] */
+
+/*
+ * R796 (0x31C) - IN2R Control
+ */
+#define ARIZONA_IN2R_PGA_VOL_MASK 0x00FE /* IN2R_PGA_VOL - [7:1] */
+#define ARIZONA_IN2R_PGA_VOL_SHIFT 1 /* IN2R_PGA_VOL - [7:1] */
+#define ARIZONA_IN2R_PGA_VOL_WIDTH 7 /* IN2R_PGA_VOL - [7:1] */
+
+/*
+ * R797 (0x31D) - ADC Digital Volume 2R
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN2R_MUTE 0x0100 /* IN2R_MUTE */
+#define ARIZONA_IN2R_MUTE_MASK 0x0100 /* IN2R_MUTE */
+#define ARIZONA_IN2R_MUTE_SHIFT 8 /* IN2R_MUTE */
+#define ARIZONA_IN2R_MUTE_WIDTH 1 /* IN2R_MUTE */
+#define ARIZONA_IN2R_DIG_VOL_MASK 0x00FF /* IN2R_DIG_VOL - [7:0] */
+#define ARIZONA_IN2R_DIG_VOL_SHIFT 0 /* IN2R_DIG_VOL - [7:0] */
+#define ARIZONA_IN2R_DIG_VOL_WIDTH 8 /* IN2R_DIG_VOL - [7:0] */
+
+/*
+ * R798 (0x31E) - DMIC2R Control
+ */
+#define ARIZONA_IN2_DMICR_DLY_MASK 0x003F /* IN2_DMICR_DLY - [5:0] */
+#define ARIZONA_IN2_DMICR_DLY_SHIFT 0 /* IN2_DMICR_DLY - [5:0] */
+#define ARIZONA_IN2_DMICR_DLY_WIDTH 6 /* IN2_DMICR_DLY - [5:0] */
+
+/*
+ * R800 (0x320) - IN3L Control
+ */
+#define ARIZONA_IN3_OSR_MASK 0x6000 /* IN3_OSR - [14:13] */
+#define ARIZONA_IN3_OSR_SHIFT 13 /* IN3_OSR - [14:13] */
+#define ARIZONA_IN3_OSR_WIDTH 2 /* IN3_OSR - [14:13] */
+#define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */
+#define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */
+#define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */
+#define ARIZONA_IN3_MODE_MASK 0x0600 /* IN3_MODE - [10:9] */
+#define ARIZONA_IN3_MODE_SHIFT 9 /* IN3_MODE - [10:9] */
+#define ARIZONA_IN3_MODE_WIDTH 2 /* IN3_MODE - [10:9] */
+#define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */
+#define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */
+#define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */
+
+/*
+ * R801 (0x321) - ADC Digital Volume 3L
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN3L_MUTE 0x0100 /* IN3L_MUTE */
+#define ARIZONA_IN3L_MUTE_MASK 0x0100 /* IN3L_MUTE */
+#define ARIZONA_IN3L_MUTE_SHIFT 8 /* IN3L_MUTE */
+#define ARIZONA_IN3L_MUTE_WIDTH 1 /* IN3L_MUTE */
+#define ARIZONA_IN3L_DIG_VOL_MASK 0x00FF /* IN3L_DIG_VOL - [7:0] */
+#define ARIZONA_IN3L_DIG_VOL_SHIFT 0 /* IN3L_DIG_VOL - [7:0] */
+#define ARIZONA_IN3L_DIG_VOL_WIDTH 8 /* IN3L_DIG_VOL - [7:0] */
+
+/*
+ * R802 (0x322) - DMIC3L Control
+ */
+#define ARIZONA_IN3_DMICL_DLY_MASK 0x003F /* IN3_DMICL_DLY - [5:0] */
+#define ARIZONA_IN3_DMICL_DLY_SHIFT 0 /* IN3_DMICL_DLY - [5:0] */
+#define ARIZONA_IN3_DMICL_DLY_WIDTH 6 /* IN3_DMICL_DLY - [5:0] */
+
+/*
+ * R804 (0x324) - IN3R Control
+ */
+#define ARIZONA_IN3R_PGA_VOL_MASK 0x00FE /* IN3R_PGA_VOL - [7:1] */
+#define ARIZONA_IN3R_PGA_VOL_SHIFT 1 /* IN3R_PGA_VOL - [7:1] */
+#define ARIZONA_IN3R_PGA_VOL_WIDTH 7 /* IN3R_PGA_VOL - [7:1] */
+
+/*
+ * R805 (0x325) - ADC Digital Volume 3R
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN3R_MUTE 0x0100 /* IN3R_MUTE */
+#define ARIZONA_IN3R_MUTE_MASK 0x0100 /* IN3R_MUTE */
+#define ARIZONA_IN3R_MUTE_SHIFT 8 /* IN3R_MUTE */
+#define ARIZONA_IN3R_MUTE_WIDTH 1 /* IN3R_MUTE */
+#define ARIZONA_IN3R_DIG_VOL_MASK 0x00FF /* IN3R_DIG_VOL - [7:0] */
+#define ARIZONA_IN3R_DIG_VOL_SHIFT 0 /* IN3R_DIG_VOL - [7:0] */
+#define ARIZONA_IN3R_DIG_VOL_WIDTH 8 /* IN3R_DIG_VOL - [7:0] */
+
+/*
+ * R806 (0x326) - DMIC3R Control
+ */
+#define ARIZONA_IN3_DMICR_DLY_MASK 0x003F /* IN3_DMICR_DLY - [5:0] */
+#define ARIZONA_IN3_DMICR_DLY_SHIFT 0 /* IN3_DMICR_DLY - [5:0] */
+#define ARIZONA_IN3_DMICR_DLY_WIDTH 6 /* IN3_DMICR_DLY - [5:0] */
+
+/*
+ * R808 (0x328) - IN4 Control
+ */
+#define ARIZONA_IN4_OSR_MASK 0x6000 /* IN4_OSR - [14:13] */
+#define ARIZONA_IN4_OSR_SHIFT 13 /* IN4_OSR - [14:13] */
+#define ARIZONA_IN4_OSR_WIDTH 2 /* IN4_OSR - [14:13] */
+#define ARIZONA_IN4_DMIC_SUP_MASK 0x1800 /* IN4_DMIC_SUP - [12:11] */
+#define ARIZONA_IN4_DMIC_SUP_SHIFT 11 /* IN4_DMIC_SUP - [12:11] */
+#define ARIZONA_IN4_DMIC_SUP_WIDTH 2 /* IN4_DMIC_SUP - [12:11] */
+
+/*
+ * R809 (0x329) - ADC Digital Volume 4L
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN4L_MUTE 0x0100 /* IN4L_MUTE */
+#define ARIZONA_IN4L_MUTE_MASK 0x0100 /* IN4L_MUTE */
+#define ARIZONA_IN4L_MUTE_SHIFT 8 /* IN4L_MUTE */
+#define ARIZONA_IN4L_MUTE_WIDTH 1 /* IN4L_MUTE */
+#define ARIZONA_IN4L_DIG_VOL_MASK 0x00FF /* IN4L_DIG_VOL - [7:0] */
+#define ARIZONA_IN4L_DIG_VOL_SHIFT 0 /* IN4L_DIG_VOL - [7:0] */
+#define ARIZONA_IN4L_DIG_VOL_WIDTH 8 /* IN4L_DIG_VOL - [7:0] */
+
+/*
+ * R810 (0x32A) - DMIC4L Control
+ */
+#define ARIZONA_IN4L_DMIC_DLY_MASK 0x003F /* IN4L_DMIC_DLY - [5:0] */
+#define ARIZONA_IN4L_DMIC_DLY_SHIFT 0 /* IN4L_DMIC_DLY - [5:0] */
+#define ARIZONA_IN4L_DMIC_DLY_WIDTH 6 /* IN4L_DMIC_DLY - [5:0] */
+
+/*
+ * R813 (0x32D) - ADC Digital Volume 4R
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN4R_MUTE 0x0100 /* IN4R_MUTE */
+#define ARIZONA_IN4R_MUTE_MASK 0x0100 /* IN4R_MUTE */
+#define ARIZONA_IN4R_MUTE_SHIFT 8 /* IN4R_MUTE */
+#define ARIZONA_IN4R_MUTE_WIDTH 1 /* IN4R_MUTE */
+#define ARIZONA_IN4R_DIG_VOL_MASK 0x00FF /* IN4R_DIG_VOL - [7:0] */
+#define ARIZONA_IN4R_DIG_VOL_SHIFT 0 /* IN4R_DIG_VOL - [7:0] */
+#define ARIZONA_IN4R_DIG_VOL_WIDTH 8 /* IN4R_DIG_VOL - [7:0] */
+
+/*
+ * R814 (0x32E) - DMIC4R Control
+ */
+#define ARIZONA_IN4R_DMIC_DLY_MASK 0x003F /* IN4R_DMIC_DLY - [5:0] */
+#define ARIZONA_IN4R_DMIC_DLY_SHIFT 0 /* IN4R_DMIC_DLY - [5:0] */
+#define ARIZONA_IN4R_DMIC_DLY_WIDTH 6 /* IN4R_DMIC_DLY - [5:0] */
+
+/*
+ * R1024 (0x400) - Output Enables 1
+ */
+#define ARIZONA_OUT6L_ENA 0x0800 /* OUT6L_ENA */
+#define ARIZONA_OUT6L_ENA_MASK 0x0800 /* OUT6L_ENA */
+#define ARIZONA_OUT6L_ENA_SHIFT 11 /* OUT6L_ENA */
+#define ARIZONA_OUT6L_ENA_WIDTH 1 /* OUT6L_ENA */
+#define ARIZONA_OUT6R_ENA 0x0400 /* OUT6R_ENA */
+#define ARIZONA_OUT6R_ENA_MASK 0x0400 /* OUT6R_ENA */
+#define ARIZONA_OUT6R_ENA_SHIFT 10 /* OUT6R_ENA */
+#define ARIZONA_OUT6R_ENA_WIDTH 1 /* OUT6R_ENA */
+#define ARIZONA_OUT5L_ENA 0x0200 /* OUT5L_ENA */
+#define ARIZONA_OUT5L_ENA_MASK 0x0200 /* OUT5L_ENA */
+#define ARIZONA_OUT5L_ENA_SHIFT 9 /* OUT5L_ENA */
+#define ARIZONA_OUT5L_ENA_WIDTH 1 /* OUT5L_ENA */
+#define ARIZONA_OUT5R_ENA 0x0100 /* OUT5R_ENA */
+#define ARIZONA_OUT5R_ENA_MASK 0x0100 /* OUT5R_ENA */
+#define ARIZONA_OUT5R_ENA_SHIFT 8 /* OUT5R_ENA */
+#define ARIZONA_OUT5R_ENA_WIDTH 1 /* OUT5R_ENA */
+#define ARIZONA_OUT4L_ENA 0x0080 /* OUT4L_ENA */
+#define ARIZONA_OUT4L_ENA_MASK 0x0080 /* OUT4L_ENA */
+#define ARIZONA_OUT4L_ENA_SHIFT 7 /* OUT4L_ENA */
+#define ARIZONA_OUT4L_ENA_WIDTH 1 /* OUT4L_ENA */
+#define ARIZONA_OUT4R_ENA 0x0040 /* OUT4R_ENA */
+#define ARIZONA_OUT4R_ENA_MASK 0x0040 /* OUT4R_ENA */
+#define ARIZONA_OUT4R_ENA_SHIFT 6 /* OUT4R_ENA */
+#define ARIZONA_OUT4R_ENA_WIDTH 1 /* OUT4R_ENA */
+#define ARIZONA_OUT3L_ENA 0x0020 /* OUT3L_ENA */
+#define ARIZONA_OUT3L_ENA_MASK 0x0020 /* OUT3L_ENA */
+#define ARIZONA_OUT3L_ENA_SHIFT 5 /* OUT3L_ENA */
+#define ARIZONA_OUT3L_ENA_WIDTH 1 /* OUT3L_ENA */
+#define ARIZONA_OUT3R_ENA 0x0010 /* OUT3R_ENA */
+#define ARIZONA_OUT3R_ENA_MASK 0x0010 /* OUT3R_ENA */
+#define ARIZONA_OUT3R_ENA_SHIFT 4 /* OUT3R_ENA */
+#define ARIZONA_OUT3R_ENA_WIDTH 1 /* OUT3R_ENA */
+#define ARIZONA_OUT2L_ENA 0x0008 /* OUT2L_ENA */
+#define ARIZONA_OUT2L_ENA_MASK 0x0008 /* OUT2L_ENA */
+#define ARIZONA_OUT2L_ENA_SHIFT 3 /* OUT2L_ENA */
+#define ARIZONA_OUT2L_ENA_WIDTH 1 /* OUT2L_ENA */
+#define ARIZONA_OUT2R_ENA 0x0004 /* OUT2R_ENA */
+#define ARIZONA_OUT2R_ENA_MASK 0x0004 /* OUT2R_ENA */
+#define ARIZONA_OUT2R_ENA_SHIFT 2 /* OUT2R_ENA */
+#define ARIZONA_OUT2R_ENA_WIDTH 1 /* OUT2R_ENA */
+#define ARIZONA_OUT1L_ENA 0x0002 /* OUT1L_ENA */
+#define ARIZONA_OUT1L_ENA_MASK 0x0002 /* OUT1L_ENA */
+#define ARIZONA_OUT1L_ENA_SHIFT 1 /* OUT1L_ENA */
+#define ARIZONA_OUT1L_ENA_WIDTH 1 /* OUT1L_ENA */
+#define ARIZONA_OUT1R_ENA 0x0001 /* OUT1R_ENA */
+#define ARIZONA_OUT1R_ENA_MASK 0x0001 /* OUT1R_ENA */
+#define ARIZONA_OUT1R_ENA_SHIFT 0 /* OUT1R_ENA */
+#define ARIZONA_OUT1R_ENA_WIDTH 1 /* OUT1R_ENA */
+
+/*
+ * R1025 (0x401) - Output Status 1
+ */
+#define ARIZONA_OUT6L_ENA_STS 0x0800 /* OUT6L_ENA_STS */
+#define ARIZONA_OUT6L_ENA_STS_MASK 0x0800 /* OUT6L_ENA_STS */
+#define ARIZONA_OUT6L_ENA_STS_SHIFT 11 /* OUT6L_ENA_STS */
+#define ARIZONA_OUT6L_ENA_STS_WIDTH 1 /* OUT6L_ENA_STS */
+#define ARIZONA_OUT6R_ENA_STS 0x0400 /* OUT6R_ENA_STS */
+#define ARIZONA_OUT6R_ENA_STS_MASK 0x0400 /* OUT6R_ENA_STS */
+#define ARIZONA_OUT6R_ENA_STS_SHIFT 10 /* OUT6R_ENA_STS */
+#define ARIZONA_OUT6R_ENA_STS_WIDTH 1 /* OUT6R_ENA_STS */
+#define ARIZONA_OUT5L_ENA_STS 0x0200 /* OUT5L_ENA_STS */
+#define ARIZONA_OUT5L_ENA_STS_MASK 0x0200 /* OUT5L_ENA_STS */
+#define ARIZONA_OUT5L_ENA_STS_SHIFT 9 /* OUT5L_ENA_STS */
+#define ARIZONA_OUT5L_ENA_STS_WIDTH 1 /* OUT5L_ENA_STS */
+#define ARIZONA_OUT5R_ENA_STS 0x0100 /* OUT5R_ENA_STS */
+#define ARIZONA_OUT5R_ENA_STS_MASK 0x0100 /* OUT5R_ENA_STS */
+#define ARIZONA_OUT5R_ENA_STS_SHIFT 8 /* OUT5R_ENA_STS */
+#define ARIZONA_OUT5R_ENA_STS_WIDTH 1 /* OUT5R_ENA_STS */
+#define ARIZONA_OUT4L_ENA_STS 0x0080 /* OUT4L_ENA_STS */
+#define ARIZONA_OUT4L_ENA_STS_MASK 0x0080 /* OUT4L_ENA_STS */
+#define ARIZONA_OUT4L_ENA_STS_SHIFT 7 /* OUT4L_ENA_STS */
+#define ARIZONA_OUT4L_ENA_STS_WIDTH 1 /* OUT4L_ENA_STS */
+#define ARIZONA_OUT4R_ENA_STS 0x0040 /* OUT4R_ENA_STS */
+#define ARIZONA_OUT4R_ENA_STS_MASK 0x0040 /* OUT4R_ENA_STS */
+#define ARIZONA_OUT4R_ENA_STS_SHIFT 6 /* OUT4R_ENA_STS */
+#define ARIZONA_OUT4R_ENA_STS_WIDTH 1 /* OUT4R_ENA_STS */
+
+/*
+ * R1032 (0x408) - Output Rate 1
+ */
+#define ARIZONA_OUT_RATE_MASK 0x7800 /* OUT_RATE - [14:11] */
+#define ARIZONA_OUT_RATE_SHIFT 11 /* OUT_RATE - [14:11] */
+#define ARIZONA_OUT_RATE_WIDTH 4 /* OUT_RATE - [14:11] */
+
+/*
+ * R1033 (0x409) - Output Volume Ramp
+ */
+#define ARIZONA_OUT_VD_RAMP_MASK 0x0070 /* OUT_VD_RAMP - [6:4] */
+#define ARIZONA_OUT_VD_RAMP_SHIFT 4 /* OUT_VD_RAMP - [6:4] */
+#define ARIZONA_OUT_VD_RAMP_WIDTH 3 /* OUT_VD_RAMP - [6:4] */
+#define ARIZONA_OUT_VI_RAMP_MASK 0x0007 /* OUT_VI_RAMP - [2:0] */
+#define ARIZONA_OUT_VI_RAMP_SHIFT 0 /* OUT_VI_RAMP - [2:0] */
+#define ARIZONA_OUT_VI_RAMP_WIDTH 3 /* OUT_VI_RAMP - [2:0] */
+
+/*
+ * R1040 (0x410) - Output Path Config 1L
+ */
+#define ARIZONA_OUT1_LP_MODE 0x8000 /* OUT1_LP_MODE */
+#define ARIZONA_OUT1_LP_MODE_MASK 0x8000 /* OUT1_LP_MODE */
+#define ARIZONA_OUT1_LP_MODE_SHIFT 15 /* OUT1_LP_MODE */
+#define ARIZONA_OUT1_LP_MODE_WIDTH 1 /* OUT1_LP_MODE */
+#define ARIZONA_OUT1_OSR 0x2000 /* OUT1_OSR */
+#define ARIZONA_OUT1_OSR_MASK 0x2000 /* OUT1_OSR */
+#define ARIZONA_OUT1_OSR_SHIFT 13 /* OUT1_OSR */
+#define ARIZONA_OUT1_OSR_WIDTH 1 /* OUT1_OSR */
+#define ARIZONA_OUT1_MONO 0x1000 /* OUT1_MONO */
+#define ARIZONA_OUT1_MONO_MASK 0x1000 /* OUT1_MONO */
+#define ARIZONA_OUT1_MONO_SHIFT 12 /* OUT1_MONO */
+#define ARIZONA_OUT1_MONO_WIDTH 1 /* OUT1_MONO */
+#define ARIZONA_OUT1L_ANC_SRC_MASK 0x0C00 /* OUT1L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT1L_ANC_SRC_SHIFT 10 /* OUT1L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT1L_ANC_SRC_WIDTH 2 /* OUT1L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT1L_PGA_VOL_MASK 0x00FE /* OUT1L_PGA_VOL - [7:1] */
+#define ARIZONA_OUT1L_PGA_VOL_SHIFT 1 /* OUT1L_PGA_VOL - [7:1] */
+#define ARIZONA_OUT1L_PGA_VOL_WIDTH 7 /* OUT1L_PGA_VOL - [7:1] */
+
+/*
+ * R1041 (0x411) - DAC Digital Volume 1L
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT1L_MUTE 0x0100 /* OUT1L_MUTE */
+#define ARIZONA_OUT1L_MUTE_MASK 0x0100 /* OUT1L_MUTE */
+#define ARIZONA_OUT1L_MUTE_SHIFT 8 /* OUT1L_MUTE */
+#define ARIZONA_OUT1L_MUTE_WIDTH 1 /* OUT1L_MUTE */
+#define ARIZONA_OUT1L_VOL_MASK 0x00FF /* OUT1L_VOL - [7:0] */
+#define ARIZONA_OUT1L_VOL_SHIFT 0 /* OUT1L_VOL - [7:0] */
+#define ARIZONA_OUT1L_VOL_WIDTH 8 /* OUT1L_VOL - [7:0] */
+
+/*
+ * R1042 (0x412) - DAC Volume Limit 1L
+ */
+#define ARIZONA_OUT1L_VOL_LIM_MASK 0x00FF /* OUT1L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT1L_VOL_LIM_SHIFT 0 /* OUT1L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT1L_VOL_LIM_WIDTH 8 /* OUT1L_VOL_LIM - [7:0] */
+
+/*
+ * R1043 (0x413) - Noise Gate Select 1L
+ */
+#define ARIZONA_OUT1L_NGATE_SRC_MASK 0x0FFF /* OUT1L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT1L_NGATE_SRC_SHIFT 0 /* OUT1L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT1L_NGATE_SRC_WIDTH 12 /* OUT1L_NGATE_SRC - [11:0] */
+
+/*
+ * R1044 (0x414) - Output Path Config 1R
+ */
+#define ARIZONA_OUT1R_ANC_SRC_MASK 0x0C00 /* OUT1R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT1R_ANC_SRC_SHIFT 10 /* OUT1R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT1R_ANC_SRC_WIDTH 2 /* OUT1R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT1R_PGA_VOL_MASK 0x00FE /* OUT1R_PGA_VOL - [7:1] */
+#define ARIZONA_OUT1R_PGA_VOL_SHIFT 1 /* OUT1R_PGA_VOL - [7:1] */
+#define ARIZONA_OUT1R_PGA_VOL_WIDTH 7 /* OUT1R_PGA_VOL - [7:1] */
+
+/*
+ * R1045 (0x415) - DAC Digital Volume 1R
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT1R_MUTE 0x0100 /* OUT1R_MUTE */
+#define ARIZONA_OUT1R_MUTE_MASK 0x0100 /* OUT1R_MUTE */
+#define ARIZONA_OUT1R_MUTE_SHIFT 8 /* OUT1R_MUTE */
+#define ARIZONA_OUT1R_MUTE_WIDTH 1 /* OUT1R_MUTE */
+#define ARIZONA_OUT1R_VOL_MASK 0x00FF /* OUT1R_VOL - [7:0] */
+#define ARIZONA_OUT1R_VOL_SHIFT 0 /* OUT1R_VOL - [7:0] */
+#define ARIZONA_OUT1R_VOL_WIDTH 8 /* OUT1R_VOL - [7:0] */
+
+/*
+ * R1046 (0x416) - DAC Volume Limit 1R
+ */
+#define ARIZONA_OUT1R_VOL_LIM_MASK 0x00FF /* OUT1R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT1R_VOL_LIM_SHIFT 0 /* OUT1R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT1R_VOL_LIM_WIDTH 8 /* OUT1R_VOL_LIM - [7:0] */
+
+/*
+ * R1047 (0x417) - Noise Gate Select 1R
+ */
+#define ARIZONA_OUT1R_NGATE_SRC_MASK 0x0FFF /* OUT1R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT1R_NGATE_SRC_SHIFT 0 /* OUT1R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT1R_NGATE_SRC_WIDTH 12 /* OUT1R_NGATE_SRC - [11:0] */
+
+/*
+ * R1048 (0x418) - Output Path Config 2L
+ */
+#define ARIZONA_OUT2_LP_MODE 0x8000 /* OUT2_LP_MODE */
+#define ARIZONA_OUT2_LP_MODE_MASK 0x8000 /* OUT2_LP_MODE */
+#define ARIZONA_OUT2_LP_MODE_SHIFT 15 /* OUT2_LP_MODE */
+#define ARIZONA_OUT2_LP_MODE_WIDTH 1 /* OUT2_LP_MODE */
+#define ARIZONA_OUT2_OSR 0x2000 /* OUT2_OSR */
+#define ARIZONA_OUT2_OSR_MASK 0x2000 /* OUT2_OSR */
+#define ARIZONA_OUT2_OSR_SHIFT 13 /* OUT2_OSR */
+#define ARIZONA_OUT2_OSR_WIDTH 1 /* OUT2_OSR */
+#define ARIZONA_OUT2_MONO 0x1000 /* OUT2_MONO */
+#define ARIZONA_OUT2_MONO_MASK 0x1000 /* OUT2_MONO */
+#define ARIZONA_OUT2_MONO_SHIFT 12 /* OUT2_MONO */
+#define ARIZONA_OUT2_MONO_WIDTH 1 /* OUT2_MONO */
+#define ARIZONA_OUT2L_ANC_SRC_MASK 0x0C00 /* OUT2L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT2L_ANC_SRC_SHIFT 10 /* OUT2L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT2L_ANC_SRC_WIDTH 2 /* OUT2L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT2L_PGA_VOL_MASK 0x00FE /* OUT2L_PGA_VOL - [7:1] */
+#define ARIZONA_OUT2L_PGA_VOL_SHIFT 1 /* OUT2L_PGA_VOL - [7:1] */
+#define ARIZONA_OUT2L_PGA_VOL_WIDTH 7 /* OUT2L_PGA_VOL - [7:1] */
+
+/*
+ * R1049 (0x419) - DAC Digital Volume 2L
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT2L_MUTE 0x0100 /* OUT2L_MUTE */
+#define ARIZONA_OUT2L_MUTE_MASK 0x0100 /* OUT2L_MUTE */
+#define ARIZONA_OUT2L_MUTE_SHIFT 8 /* OUT2L_MUTE */
+#define ARIZONA_OUT2L_MUTE_WIDTH 1 /* OUT2L_MUTE */
+#define ARIZONA_OUT2L_VOL_MASK 0x00FF /* OUT2L_VOL - [7:0] */
+#define ARIZONA_OUT2L_VOL_SHIFT 0 /* OUT2L_VOL - [7:0] */
+#define ARIZONA_OUT2L_VOL_WIDTH 8 /* OUT2L_VOL - [7:0] */
+
+/*
+ * R1050 (0x41A) - DAC Volume Limit 2L
+ */
+#define ARIZONA_OUT2L_VOL_LIM_MASK 0x00FF /* OUT2L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT2L_VOL_LIM_SHIFT 0 /* OUT2L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT2L_VOL_LIM_WIDTH 8 /* OUT2L_VOL_LIM - [7:0] */
+
+/*
+ * R1051 (0x41B) - Noise Gate Select 2L
+ */
+#define ARIZONA_OUT2L_NGATE_SRC_MASK 0x0FFF /* OUT2L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT2L_NGATE_SRC_SHIFT 0 /* OUT2L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT2L_NGATE_SRC_WIDTH 12 /* OUT2L_NGATE_SRC - [11:0] */
+
+/*
+ * R1052 (0x41C) - Output Path Config 2R
+ */
+#define ARIZONA_OUT2R_ANC_SRC_MASK 0x0C00 /* OUT2R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT2R_ANC_SRC_SHIFT 10 /* OUT2R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT2R_ANC_SRC_WIDTH 2 /* OUT2R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT2R_PGA_VOL_MASK 0x00FE /* OUT2R_PGA_VOL - [7:1] */
+#define ARIZONA_OUT2R_PGA_VOL_SHIFT 1 /* OUT2R_PGA_VOL - [7:1] */
+#define ARIZONA_OUT2R_PGA_VOL_WIDTH 7 /* OUT2R_PGA_VOL - [7:1] */
+
+/*
+ * R1053 (0x41D) - DAC Digital Volume 2R
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT2R_MUTE 0x0100 /* OUT2R_MUTE */
+#define ARIZONA_OUT2R_MUTE_MASK 0x0100 /* OUT2R_MUTE */
+#define ARIZONA_OUT2R_MUTE_SHIFT 8 /* OUT2R_MUTE */
+#define ARIZONA_OUT2R_MUTE_WIDTH 1 /* OUT2R_MUTE */
+#define ARIZONA_OUT2R_VOL_MASK 0x00FF /* OUT2R_VOL - [7:0] */
+#define ARIZONA_OUT2R_VOL_SHIFT 0 /* OUT2R_VOL - [7:0] */
+#define ARIZONA_OUT2R_VOL_WIDTH 8 /* OUT2R_VOL - [7:0] */
+
+/*
+ * R1054 (0x41E) - DAC Volume Limit 2R
+ */
+#define ARIZONA_OUT2R_VOL_LIM_MASK 0x00FF /* OUT2R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT2R_VOL_LIM_SHIFT 0 /* OUT2R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT2R_VOL_LIM_WIDTH 8 /* OUT2R_VOL_LIM - [7:0] */
+
+/*
+ * R1055 (0x41F) - Noise Gate Select 2R
+ */
+#define ARIZONA_OUT2R_NGATE_SRC_MASK 0x0FFF /* OUT2R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT2R_NGATE_SRC_SHIFT 0 /* OUT2R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT2R_NGATE_SRC_WIDTH 12 /* OUT2R_NGATE_SRC - [11:0] */
+
+/*
+ * R1056 (0x420) - Output Path Config 3L
+ */
+#define ARIZONA_OUT3_LP_MODE 0x8000 /* OUT3_LP_MODE */
+#define ARIZONA_OUT3_LP_MODE_MASK 0x8000 /* OUT3_LP_MODE */
+#define ARIZONA_OUT3_LP_MODE_SHIFT 15 /* OUT3_LP_MODE */
+#define ARIZONA_OUT3_LP_MODE_WIDTH 1 /* OUT3_LP_MODE */
+#define ARIZONA_OUT3_OSR 0x2000 /* OUT3_OSR */
+#define ARIZONA_OUT3_OSR_MASK 0x2000 /* OUT3_OSR */
+#define ARIZONA_OUT3_OSR_SHIFT 13 /* OUT3_OSR */
+#define ARIZONA_OUT3_OSR_WIDTH 1 /* OUT3_OSR */
+#define ARIZONA_OUT3_MONO 0x1000 /* OUT3_MONO */
+#define ARIZONA_OUT3_MONO_MASK 0x1000 /* OUT3_MONO */
+#define ARIZONA_OUT3_MONO_SHIFT 12 /* OUT3_MONO */
+#define ARIZONA_OUT3_MONO_WIDTH 1 /* OUT3_MONO */
+#define ARIZONA_OUT3L_ANC_SRC_MASK 0x0C00 /* OUT3L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT3L_ANC_SRC_SHIFT 10 /* OUT3L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT3L_ANC_SRC_WIDTH 2 /* OUT3L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT3L_PGA_VOL_MASK 0x00FE /* OUT3L_PGA_VOL - [7:1] */
+#define ARIZONA_OUT3L_PGA_VOL_SHIFT 1 /* OUT3L_PGA_VOL - [7:1] */
+#define ARIZONA_OUT3L_PGA_VOL_WIDTH 7 /* OUT3L_PGA_VOL - [7:1] */
+
+/*
+ * R1057 (0x421) - DAC Digital Volume 3L
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT3L_MUTE 0x0100 /* OUT3L_MUTE */
+#define ARIZONA_OUT3L_MUTE_MASK 0x0100 /* OUT3L_MUTE */
+#define ARIZONA_OUT3L_MUTE_SHIFT 8 /* OUT3L_MUTE */
+#define ARIZONA_OUT3L_MUTE_WIDTH 1 /* OUT3L_MUTE */
+#define ARIZONA_OUT3L_VOL_MASK 0x00FF /* OUT3L_VOL - [7:0] */
+#define ARIZONA_OUT3L_VOL_SHIFT 0 /* OUT3L_VOL - [7:0] */
+#define ARIZONA_OUT3L_VOL_WIDTH 8 /* OUT3L_VOL - [7:0] */
+
+/*
+ * R1058 (0x422) - DAC Volume Limit 3L
+ */
+#define ARIZONA_OUT3L_VOL_LIM_MASK 0x00FF /* OUT3L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT3L_VOL_LIM_SHIFT 0 /* OUT3L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT3L_VOL_LIM_WIDTH 8 /* OUT3L_VOL_LIM - [7:0] */
+
+/*
+ * R1059 (0x423) - Noise Gate Select 3L
+ */
+#define ARIZONA_OUT3_NGATE_SRC_MASK 0x0FFF /* OUT3_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT3_NGATE_SRC_SHIFT 0 /* OUT3_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT3_NGATE_SRC_WIDTH 12 /* OUT3_NGATE_SRC - [11:0] */
+
+/*
+ * R1060 (0x424) - Output Path Config 3R
+ */
+#define ARIZONA_OUT3R_PGA_VOL_MASK 0x00FE /* OUT3R_PGA_VOL - [7:1] */
+#define ARIZONA_OUT3R_PGA_VOL_SHIFT 1 /* OUT3R_PGA_VOL - [7:1] */
+#define ARIZONA_OUT3R_PGA_VOL_WIDTH 7 /* OUT3R_PGA_VOL - [7:1] */
+
+/*
+ * R1061 (0x425) - DAC Digital Volume 3R
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT3R_MUTE 0x0100 /* OUT3R_MUTE */
+#define ARIZONA_OUT3R_MUTE_MASK 0x0100 /* OUT3R_MUTE */
+#define ARIZONA_OUT3R_MUTE_SHIFT 8 /* OUT3R_MUTE */
+#define ARIZONA_OUT3R_MUTE_WIDTH 1 /* OUT3R_MUTE */
+#define ARIZONA_OUT3R_VOL_MASK 0x00FF /* OUT3R_VOL - [7:0] */
+#define ARIZONA_OUT3R_VOL_SHIFT 0 /* OUT3R_VOL - [7:0] */
+#define ARIZONA_OUT3R_VOL_WIDTH 8 /* OUT3R_VOL - [7:0] */
+
+/*
+ * R1062 (0x426) - DAC Volume Limit 3R
+ */
+#define ARIZONA_OUT3R_ANC_SRC_MASK 0x0C00 /* OUT3R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT3R_ANC_SRC_SHIFT 10 /* OUT3R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT3R_ANC_SRC_WIDTH 2 /* OUT3R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT3R_VOL_LIM_MASK 0x00FF /* OUT3R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT3R_VOL_LIM_SHIFT 0 /* OUT3R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT3R_VOL_LIM_WIDTH 8 /* OUT3R_VOL_LIM - [7:0] */
+
+/*
+ * R1064 (0x428) - Output Path Config 4L
+ */
+#define ARIZONA_OUT4_OSR 0x2000 /* OUT4_OSR */
+#define ARIZONA_OUT4_OSR_MASK 0x2000 /* OUT4_OSR */
+#define ARIZONA_OUT4_OSR_SHIFT 13 /* OUT4_OSR */
+#define ARIZONA_OUT4_OSR_WIDTH 1 /* OUT4_OSR */
+#define ARIZONA_OUT4L_ANC_SRC_MASK 0x0C00 /* OUT4L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT4L_ANC_SRC_SHIFT 10 /* OUT4L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT4L_ANC_SRC_WIDTH 2 /* OUT4L_ANC_SRC - [11:10] */
+
+/*
+ * R1065 (0x429) - DAC Digital Volume 4L
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT4L_MUTE 0x0100 /* OUT4L_MUTE */
+#define ARIZONA_OUT4L_MUTE_MASK 0x0100 /* OUT4L_MUTE */
+#define ARIZONA_OUT4L_MUTE_SHIFT 8 /* OUT4L_MUTE */
+#define ARIZONA_OUT4L_MUTE_WIDTH 1 /* OUT4L_MUTE */
+#define ARIZONA_OUT4L_VOL_MASK 0x00FF /* OUT4L_VOL - [7:0] */
+#define ARIZONA_OUT4L_VOL_SHIFT 0 /* OUT4L_VOL - [7:0] */
+#define ARIZONA_OUT4L_VOL_WIDTH 8 /* OUT4L_VOL - [7:0] */
+
+/*
+ * R1066 (0x42A) - Out Volume 4L
+ */
+#define ARIZONA_OUT4L_VOL_LIM_MASK 0x00FF /* OUT4L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT4L_VOL_LIM_SHIFT 0 /* OUT4L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT4L_VOL_LIM_WIDTH 8 /* OUT4L_VOL_LIM - [7:0] */
+
+/*
+ * R1067 (0x42B) - Noise Gate Select 4L
+ */
+#define ARIZONA_OUT4L_NGATE_SRC_MASK 0x0FFF /* OUT4L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT4L_NGATE_SRC_SHIFT 0 /* OUT4L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT4L_NGATE_SRC_WIDTH 12 /* OUT4L_NGATE_SRC - [11:0] */
+
+/*
+ * R1068 (0x42C) - Output Path Config 4R
+ */
+#define ARIZONA_OUT4R_ANC_SRC_MASK 0x0C00 /* OUT4R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT4R_ANC_SRC_SHIFT 10 /* OUT4R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT4R_ANC_SRC_WIDTH 2 /* OUT4R_ANC_SRC - [11:10] */
+
+/*
+ * R1069 (0x42D) - DAC Digital Volume 4R
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT4R_MUTE 0x0100 /* OUT4R_MUTE */
+#define ARIZONA_OUT4R_MUTE_MASK 0x0100 /* OUT4R_MUTE */
+#define ARIZONA_OUT4R_MUTE_SHIFT 8 /* OUT4R_MUTE */
+#define ARIZONA_OUT4R_MUTE_WIDTH 1 /* OUT4R_MUTE */
+#define ARIZONA_OUT4R_VOL_MASK 0x00FF /* OUT4R_VOL - [7:0] */
+#define ARIZONA_OUT4R_VOL_SHIFT 0 /* OUT4R_VOL - [7:0] */
+#define ARIZONA_OUT4R_VOL_WIDTH 8 /* OUT4R_VOL - [7:0] */
+
+/*
+ * R1070 (0x42E) - Out Volume 4R
+ */
+#define ARIZONA_OUT4R_VOL_LIM_MASK 0x00FF /* OUT4R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT4R_VOL_LIM_SHIFT 0 /* OUT4R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT4R_VOL_LIM_WIDTH 8 /* OUT4R_VOL_LIM - [7:0] */
+
+/*
+ * R1071 (0x42F) - Noise Gate Select 4R
+ */
+#define ARIZONA_OUT4R_NGATE_SRC_MASK 0x0FFF /* OUT4R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT4R_NGATE_SRC_SHIFT 0 /* OUT4R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT4R_NGATE_SRC_WIDTH 12 /* OUT4R_NGATE_SRC - [11:0] */
+
+/*
+ * R1072 (0x430) - Output Path Config 5L
+ */
+#define ARIZONA_OUT5_OSR 0x2000 /* OUT5_OSR */
+#define ARIZONA_OUT5_OSR_MASK 0x2000 /* OUT5_OSR */
+#define ARIZONA_OUT5_OSR_SHIFT 13 /* OUT5_OSR */
+#define ARIZONA_OUT5_OSR_WIDTH 1 /* OUT5_OSR */
+#define ARIZONA_OUT5L_ANC_SRC_MASK 0x0C00 /* OUT5L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT5L_ANC_SRC_SHIFT 10 /* OUT5L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT5L_ANC_SRC_WIDTH 2 /* OUT5L_ANC_SRC - [11:10] */
+
+/*
+ * R1073 (0x431) - DAC Digital Volume 5L
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT5L_MUTE 0x0100 /* OUT5L_MUTE */
+#define ARIZONA_OUT5L_MUTE_MASK 0x0100 /* OUT5L_MUTE */
+#define ARIZONA_OUT5L_MUTE_SHIFT 8 /* OUT5L_MUTE */
+#define ARIZONA_OUT5L_MUTE_WIDTH 1 /* OUT5L_MUTE */
+#define ARIZONA_OUT5L_VOL_MASK 0x00FF /* OUT5L_VOL - [7:0] */
+#define ARIZONA_OUT5L_VOL_SHIFT 0 /* OUT5L_VOL - [7:0] */
+#define ARIZONA_OUT5L_VOL_WIDTH 8 /* OUT5L_VOL - [7:0] */
+
+/*
+ * R1074 (0x432) - DAC Volume Limit 5L
+ */
+#define ARIZONA_OUT5L_VOL_LIM_MASK 0x00FF /* OUT5L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT5L_VOL_LIM_SHIFT 0 /* OUT5L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT5L_VOL_LIM_WIDTH 8 /* OUT5L_VOL_LIM - [7:0] */
+
+/*
+ * R1075 (0x433) - Noise Gate Select 5L
+ */
+#define ARIZONA_OUT5L_NGATE_SRC_MASK 0x0FFF /* OUT5L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT5L_NGATE_SRC_SHIFT 0 /* OUT5L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT5L_NGATE_SRC_WIDTH 12 /* OUT5L_NGATE_SRC - [11:0] */
+
+/*
+ * R1076 (0x434) - Output Path Config 5R
+ */
+#define ARIZONA_OUT5R_ANC_SRC_MASK 0x0C00 /* OUT5R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT5R_ANC_SRC_SHIFT 10 /* OUT5R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT5R_ANC_SRC_WIDTH 2 /* OUT5R_ANC_SRC - [11:10] */
+
+/*
+ * R1077 (0x435) - DAC Digital Volume 5R
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT5R_MUTE 0x0100 /* OUT5R_MUTE */
+#define ARIZONA_OUT5R_MUTE_MASK 0x0100 /* OUT5R_MUTE */
+#define ARIZONA_OUT5R_MUTE_SHIFT 8 /* OUT5R_MUTE */
+#define ARIZONA_OUT5R_MUTE_WIDTH 1 /* OUT5R_MUTE */
+#define ARIZONA_OUT5R_VOL_MASK 0x00FF /* OUT5R_VOL - [7:0] */
+#define ARIZONA_OUT5R_VOL_SHIFT 0 /* OUT5R_VOL - [7:0] */
+#define ARIZONA_OUT5R_VOL_WIDTH 8 /* OUT5R_VOL - [7:0] */
+
+/*
+ * R1078 (0x436) - DAC Volume Limit 5R
+ */
+#define ARIZONA_OUT5R_VOL_LIM_MASK 0x00FF /* OUT5R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT5R_VOL_LIM_SHIFT 0 /* OUT5R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT5R_VOL_LIM_WIDTH 8 /* OUT5R_VOL_LIM - [7:0] */
+
+/*
+ * R1079 (0x437) - Noise Gate Select 5R
+ */
+#define ARIZONA_OUT5R_NGATE_SRC_MASK 0x0FFF /* OUT5R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT5R_NGATE_SRC_SHIFT 0 /* OUT5R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT5R_NGATE_SRC_WIDTH 12 /* OUT5R_NGATE_SRC - [11:0] */
+
+/*
+ * R1080 (0x438) - Output Path Config 6L
+ */
+#define ARIZONA_OUT6_OSR 0x2000 /* OUT6_OSR */
+#define ARIZONA_OUT6_OSR_MASK 0x2000 /* OUT6_OSR */
+#define ARIZONA_OUT6_OSR_SHIFT 13 /* OUT6_OSR */
+#define ARIZONA_OUT6_OSR_WIDTH 1 /* OUT6_OSR */
+#define ARIZONA_OUT6L_ANC_SRC_MASK 0x0C00 /* OUT6L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT6L_ANC_SRC_SHIFT 10 /* OUT6L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT6L_ANC_SRC_WIDTH 2 /* OUT6L_ANC_SRC - [11:10] */
+
+/*
+ * R1081 (0x439) - DAC Digital Volume 6L
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT6L_MUTE 0x0100 /* OUT6L_MUTE */
+#define ARIZONA_OUT6L_MUTE_MASK 0x0100 /* OUT6L_MUTE */
+#define ARIZONA_OUT6L_MUTE_SHIFT 8 /* OUT6L_MUTE */
+#define ARIZONA_OUT6L_MUTE_WIDTH 1 /* OUT6L_MUTE */
+#define ARIZONA_OUT6L_VOL_MASK 0x00FF /* OUT6L_VOL - [7:0] */
+#define ARIZONA_OUT6L_VOL_SHIFT 0 /* OUT6L_VOL - [7:0] */
+#define ARIZONA_OUT6L_VOL_WIDTH 8 /* OUT6L_VOL - [7:0] */
+
+/*
+ * R1082 (0x43A) - DAC Volume Limit 6L
+ */
+#define ARIZONA_OUT6L_VOL_LIM_MASK 0x00FF /* OUT6L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT6L_VOL_LIM_SHIFT 0 /* OUT6L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT6L_VOL_LIM_WIDTH 8 /* OUT6L_VOL_LIM - [7:0] */
+
+/*
+ * R1083 (0x43B) - Noise Gate Select 6L
+ */
+#define ARIZONA_OUT6L_NGATE_SRC_MASK 0x0FFF /* OUT6L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT6L_NGATE_SRC_SHIFT 0 /* OUT6L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT6L_NGATE_SRC_WIDTH 12 /* OUT6L_NGATE_SRC - [11:0] */
+
+/*
+ * R1084 (0x43C) - Output Path Config 6R
+ */
+#define ARIZONA_OUT6R_ANC_SRC_MASK 0x0C00 /* OUT6R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT6R_ANC_SRC_SHIFT 10 /* OUT6R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT6R_ANC_SRC_WIDTH 2 /* OUT6R_ANC_SRC - [11:10] */
+
+/*
+ * R1085 (0x43D) - DAC Digital Volume 6R
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT6R_MUTE 0x0100 /* OUT6R_MUTE */
+#define ARIZONA_OUT6R_MUTE_MASK 0x0100 /* OUT6R_MUTE */
+#define ARIZONA_OUT6R_MUTE_SHIFT 8 /* OUT6R_MUTE */
+#define ARIZONA_OUT6R_MUTE_WIDTH 1 /* OUT6R_MUTE */
+#define ARIZONA_OUT6R_VOL_MASK 0x00FF /* OUT6R_VOL - [7:0] */
+#define ARIZONA_OUT6R_VOL_SHIFT 0 /* OUT6R_VOL - [7:0] */
+#define ARIZONA_OUT6R_VOL_WIDTH 8 /* OUT6R_VOL - [7:0] */
+
+/*
+ * R1086 (0x43E) - DAC Volume Limit 6R
+ */
+#define ARIZONA_OUT6R_VOL_LIM_MASK 0x00FF /* OUT6R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT6R_VOL_LIM_SHIFT 0 /* OUT6R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT6R_VOL_LIM_WIDTH 8 /* OUT6R_VOL_LIM - [7:0] */
+
+/*
+ * R1087 (0x43F) - Noise Gate Select 6R
+ */
+#define ARIZONA_OUT6R_NGATE_SRC_MASK 0x0FFF /* OUT6R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT6R_NGATE_SRC_SHIFT 0 /* OUT6R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT6R_NGATE_SRC_WIDTH 12 /* OUT6R_NGATE_SRC - [11:0] */
+
+/*
+ * R1104 (0x450) - DAC AEC Control 1
+ */
+#define ARIZONA_AEC_LOOPBACK_SRC_MASK 0x003C /* AEC_LOOPBACK_SRC - [5:2] */
+#define ARIZONA_AEC_LOOPBACK_SRC_SHIFT 2 /* AEC_LOOPBACK_SRC - [5:2] */
+#define ARIZONA_AEC_LOOPBACK_SRC_WIDTH 4 /* AEC_LOOPBACK_SRC - [5:2] */
+#define ARIZONA_AEC_ENA_STS 0x0002 /* AEC_ENA_STS */
+#define ARIZONA_AEC_ENA_STS_MASK 0x0002 /* AEC_ENA_STS */
+#define ARIZONA_AEC_ENA_STS_SHIFT 1 /* AEC_ENA_STS */
+#define ARIZONA_AEC_ENA_STS_WIDTH 1 /* AEC_ENA_STS */
+#define ARIZONA_AEC_LOOPBACK_ENA 0x0001 /* AEC_LOOPBACK_ENA */
+#define ARIZONA_AEC_LOOPBACK_ENA_MASK 0x0001 /* AEC_LOOPBACK_ENA */
+#define ARIZONA_AEC_LOOPBACK_ENA_SHIFT 0 /* AEC_LOOPBACK_ENA */
+#define ARIZONA_AEC_LOOPBACK_ENA_WIDTH 1 /* AEC_LOOPBACK_ENA */
+
+/*
+ * R1112 (0x458) - Noise Gate Control
+ */
+#define ARIZONA_NGATE_HOLD_MASK 0x0030 /* NGATE_HOLD - [5:4] */
+#define ARIZONA_NGATE_HOLD_SHIFT 4 /* NGATE_HOLD - [5:4] */
+#define ARIZONA_NGATE_HOLD_WIDTH 2 /* NGATE_HOLD - [5:4] */
+#define ARIZONA_NGATE_THR_MASK 0x000E /* NGATE_THR - [3:1] */
+#define ARIZONA_NGATE_THR_SHIFT 1 /* NGATE_THR - [3:1] */
+#define ARIZONA_NGATE_THR_WIDTH 3 /* NGATE_THR - [3:1] */
+#define ARIZONA_NGATE_ENA 0x0001 /* NGATE_ENA */
+#define ARIZONA_NGATE_ENA_MASK 0x0001 /* NGATE_ENA */
+#define ARIZONA_NGATE_ENA_SHIFT 0 /* NGATE_ENA */
+#define ARIZONA_NGATE_ENA_WIDTH 1 /* NGATE_ENA */
+
+/*
+ * R1168 (0x490) - PDM SPK1 CTRL 1
+ */
+#define ARIZONA_SPK1R_MUTE 0x2000 /* SPK1R_MUTE */
+#define ARIZONA_SPK1R_MUTE_MASK 0x2000 /* SPK1R_MUTE */
+#define ARIZONA_SPK1R_MUTE_SHIFT 13 /* SPK1R_MUTE */
+#define ARIZONA_SPK1R_MUTE_WIDTH 1 /* SPK1R_MUTE */
+#define ARIZONA_SPK1L_MUTE 0x1000 /* SPK1L_MUTE */
+#define ARIZONA_SPK1L_MUTE_MASK 0x1000 /* SPK1L_MUTE */
+#define ARIZONA_SPK1L_MUTE_SHIFT 12 /* SPK1L_MUTE */
+#define ARIZONA_SPK1L_MUTE_WIDTH 1 /* SPK1L_MUTE */
+#define ARIZONA_SPK1_MUTE_ENDIAN 0x0100 /* SPK1_MUTE_ENDIAN */
+#define ARIZONA_SPK1_MUTE_ENDIAN_MASK 0x0100 /* SPK1_MUTE_ENDIAN */
+#define ARIZONA_SPK1_MUTE_ENDIAN_SHIFT 8 /* SPK1_MUTE_ENDIAN */
+#define ARIZONA_SPK1_MUTE_ENDIAN_WIDTH 1 /* SPK1_MUTE_ENDIAN */
+#define ARIZONA_SPK1_MUTE_SEQ1_MASK 0x00FF /* SPK1_MUTE_SEQ1 - [7:0] */
+#define ARIZONA_SPK1_MUTE_SEQ1_SHIFT 0 /* SPK1_MUTE_SEQ1 - [7:0] */
+#define ARIZONA_SPK1_MUTE_SEQ1_WIDTH 8 /* SPK1_MUTE_SEQ1 - [7:0] */
+
+/*
+ * R1169 (0x491) - PDM SPK1 CTRL 2
+ */
+#define ARIZONA_SPK1_FMT 0x0001 /* SPK1_FMT */
+#define ARIZONA_SPK1_FMT_MASK 0x0001 /* SPK1_FMT */
+#define ARIZONA_SPK1_FMT_SHIFT 0 /* SPK1_FMT */
+#define ARIZONA_SPK1_FMT_WIDTH 1 /* SPK1_FMT */
+
+/*
+ * R1170 (0x492) - PDM SPK2 CTRL 1
+ */
+#define ARIZONA_SPK2R_MUTE 0x2000 /* SPK2R_MUTE */
+#define ARIZONA_SPK2R_MUTE_MASK 0x2000 /* SPK2R_MUTE */
+#define ARIZONA_SPK2R_MUTE_SHIFT 13 /* SPK2R_MUTE */
+#define ARIZONA_SPK2R_MUTE_WIDTH 1 /* SPK2R_MUTE */
+#define ARIZONA_SPK2L_MUTE 0x1000 /* SPK2L_MUTE */
+#define ARIZONA_SPK2L_MUTE_MASK 0x1000 /* SPK2L_MUTE */
+#define ARIZONA_SPK2L_MUTE_SHIFT 12 /* SPK2L_MUTE */
+#define ARIZONA_SPK2L_MUTE_WIDTH 1 /* SPK2L_MUTE */
+#define ARIZONA_SPK2_MUTE_ENDIAN 0x0100 /* SPK2_MUTE_ENDIAN */
+#define ARIZONA_SPK2_MUTE_ENDIAN_MASK 0x0100 /* SPK2_MUTE_ENDIAN */
+#define ARIZONA_SPK2_MUTE_ENDIAN_SHIFT 8 /* SPK2_MUTE_ENDIAN */
+#define ARIZONA_SPK2_MUTE_ENDIAN_WIDTH 1 /* SPK2_MUTE_ENDIAN */
+#define ARIZONA_SPK2_MUTE_SEQ_MASK 0x00FF /* SPK2_MUTE_SEQ - [7:0] */
+#define ARIZONA_SPK2_MUTE_SEQ_SHIFT 0 /* SPK2_MUTE_SEQ - [7:0] */
+#define ARIZONA_SPK2_MUTE_SEQ_WIDTH 8 /* SPK2_MUTE_SEQ - [7:0] */
+
+/*
+ * R1171 (0x493) - PDM SPK2 CTRL 2
+ */
+#define ARIZONA_SPK2_FMT 0x0001 /* SPK2_FMT */
+#define ARIZONA_SPK2_FMT_MASK 0x0001 /* SPK2_FMT */
+#define ARIZONA_SPK2_FMT_SHIFT 0 /* SPK2_FMT */
+#define ARIZONA_SPK2_FMT_WIDTH 1 /* SPK2_FMT */
+
+/*
+ * R1244 (0x4DC) - DAC comp 1
+ */
+#define ARIZONA_OUT_COMP_COEFF_MASK 0xFFFF /* OUT_COMP_COEFF - [15:0] */
+#define ARIZONA_OUT_COMP_COEFF_SHIFT 0 /* OUT_COMP_COEFF - [15:0] */
+#define ARIZONA_OUT_COMP_COEFF_WIDTH 16 /* OUT_COMP_COEFF - [15:0] */
+
+/*
+ * R1245 (0x4DD) - DAC comp 2
+ */
+#define ARIZONA_OUT_COMP_COEFF_1 0x0002 /* OUT_COMP_COEFF */
+#define ARIZONA_OUT_COMP_COEFF_1_MASK 0x0002 /* OUT_COMP_COEFF */
+#define ARIZONA_OUT_COMP_COEFF_1_SHIFT 1 /* OUT_COMP_COEFF */
+#define ARIZONA_OUT_COMP_COEFF_1_WIDTH 1 /* OUT_COMP_COEFF */
+#define ARIZONA_OUT_COMP_COEFF_SEL 0x0001 /* OUT_COMP_COEFF_SEL */
+#define ARIZONA_OUT_COMP_COEFF_SEL_MASK 0x0001 /* OUT_COMP_COEFF_SEL */
+#define ARIZONA_OUT_COMP_COEFF_SEL_SHIFT 0 /* OUT_COMP_COEFF_SEL */
+#define ARIZONA_OUT_COMP_COEFF_SEL_WIDTH 1 /* OUT_COMP_COEFF_SEL */
+
+/*
+ * R1246 (0x4DE) - DAC comp 3
+ */
+#define ARIZONA_AEC_COMP_COEFF_MASK 0xFFFF /* AEC_COMP_COEFF - [15:0] */
+#define ARIZONA_AEC_COMP_COEFF_SHIFT 0 /* AEC_COMP_COEFF - [15:0] */
+#define ARIZONA_AEC_COMP_COEFF_WIDTH 16 /* AEC_COMP_COEFF - [15:0] */
+
+/*
+ * R1247 (0x4DF) - DAC comp 4
+ */
+#define ARIZONA_AEC_COMP_COEFF_1 0x0002 /* AEC_COMP_COEFF */
+#define ARIZONA_AEC_COMP_COEFF_1_MASK 0x0002 /* AEC_COMP_COEFF */
+#define ARIZONA_AEC_COMP_COEFF_1_SHIFT 1 /* AEC_COMP_COEFF */
+#define ARIZONA_AEC_COMP_COEFF_1_WIDTH 1 /* AEC_COMP_COEFF */
+#define ARIZONA_AEC_COMP_COEFF_SEL 0x0001 /* AEC_COMP_COEFF_SEL */
+#define ARIZONA_AEC_COMP_COEFF_SEL_MASK 0x0001 /* AEC_COMP_COEFF_SEL */
+#define ARIZONA_AEC_COMP_COEFF_SEL_SHIFT 0 /* AEC_COMP_COEFF_SEL */
+#define ARIZONA_AEC_COMP_COEFF_SEL_WIDTH 1 /* AEC_COMP_COEFF_SEL */
+
+/*
+ * R1280 (0x500) - AIF1 BCLK Ctrl
+ */
+#define ARIZONA_AIF1_BCLK_INV 0x0080 /* AIF1_BCLK_INV */
+#define ARIZONA_AIF1_BCLK_INV_MASK 0x0080 /* AIF1_BCLK_INV */
+#define ARIZONA_AIF1_BCLK_INV_SHIFT 7 /* AIF1_BCLK_INV */
+#define ARIZONA_AIF1_BCLK_INV_WIDTH 1 /* AIF1_BCLK_INV */
+#define ARIZONA_AIF1_BCLK_FRC 0x0040 /* AIF1_BCLK_FRC */
+#define ARIZONA_AIF1_BCLK_FRC_MASK 0x0040 /* AIF1_BCLK_FRC */
+#define ARIZONA_AIF1_BCLK_FRC_SHIFT 6 /* AIF1_BCLK_FRC */
+#define ARIZONA_AIF1_BCLK_FRC_WIDTH 1 /* AIF1_BCLK_FRC */
+#define ARIZONA_AIF1_BCLK_MSTR 0x0020 /* AIF1_BCLK_MSTR */
+#define ARIZONA_AIF1_BCLK_MSTR_MASK 0x0020 /* AIF1_BCLK_MSTR */
+#define ARIZONA_AIF1_BCLK_MSTR_SHIFT 5 /* AIF1_BCLK_MSTR */
+#define ARIZONA_AIF1_BCLK_MSTR_WIDTH 1 /* AIF1_BCLK_MSTR */
+#define ARIZONA_AIF1_BCLK_FREQ_MASK 0x001F /* AIF1_BCLK_FREQ - [4:0] */
+#define ARIZONA_AIF1_BCLK_FREQ_SHIFT 0 /* AIF1_BCLK_FREQ - [4:0] */
+#define ARIZONA_AIF1_BCLK_FREQ_WIDTH 5 /* AIF1_BCLK_FREQ - [4:0] */
+
+/*
+ * R1281 (0x501) - AIF1 Tx Pin Ctrl
+ */
+#define ARIZONA_AIF1TX_DAT_TRI 0x0020 /* AIF1TX_DAT_TRI */
+#define ARIZONA_AIF1TX_DAT_TRI_MASK 0x0020 /* AIF1TX_DAT_TRI */
+#define ARIZONA_AIF1TX_DAT_TRI_SHIFT 5 /* AIF1TX_DAT_TRI */
+#define ARIZONA_AIF1TX_DAT_TRI_WIDTH 1 /* AIF1TX_DAT_TRI */
+#define ARIZONA_AIF1TX_LRCLK_SRC 0x0008 /* AIF1TX_LRCLK_SRC */
+#define ARIZONA_AIF1TX_LRCLK_SRC_MASK 0x0008 /* AIF1TX_LRCLK_SRC */
+#define ARIZONA_AIF1TX_LRCLK_SRC_SHIFT 3 /* AIF1TX_LRCLK_SRC */
+#define ARIZONA_AIF1TX_LRCLK_SRC_WIDTH 1 /* AIF1TX_LRCLK_SRC */
+#define ARIZONA_AIF1TX_LRCLK_INV 0x0004 /* AIF1TX_LRCLK_INV */
+#define ARIZONA_AIF1TX_LRCLK_INV_MASK 0x0004 /* AIF1TX_LRCLK_INV */
+#define ARIZONA_AIF1TX_LRCLK_INV_SHIFT 2 /* AIF1TX_LRCLK_INV */
+#define ARIZONA_AIF1TX_LRCLK_INV_WIDTH 1 /* AIF1TX_LRCLK_INV */
+#define ARIZONA_AIF1TX_LRCLK_FRC 0x0002 /* AIF1TX_LRCLK_FRC */
+#define ARIZONA_AIF1TX_LRCLK_FRC_MASK 0x0002 /* AIF1TX_LRCLK_FRC */
+#define ARIZONA_AIF1TX_LRCLK_FRC_SHIFT 1 /* AIF1TX_LRCLK_FRC */
+#define ARIZONA_AIF1TX_LRCLK_FRC_WIDTH 1 /* AIF1TX_LRCLK_FRC */
+#define ARIZONA_AIF1TX_LRCLK_MSTR 0x0001 /* AIF1TX_LRCLK_MSTR */
+#define ARIZONA_AIF1TX_LRCLK_MSTR_MASK 0x0001 /* AIF1TX_LRCLK_MSTR */
+#define ARIZONA_AIF1TX_LRCLK_MSTR_SHIFT 0 /* AIF1TX_LRCLK_MSTR */
+#define ARIZONA_AIF1TX_LRCLK_MSTR_WIDTH 1 /* AIF1TX_LRCLK_MSTR */
+
+/*
+ * R1282 (0x502) - AIF1 Rx Pin Ctrl
+ */
+#define ARIZONA_AIF1RX_LRCLK_INV 0x0004 /* AIF1RX_LRCLK_INV */
+#define ARIZONA_AIF1RX_LRCLK_INV_MASK 0x0004 /* AIF1RX_LRCLK_INV */
+#define ARIZONA_AIF1RX_LRCLK_INV_SHIFT 2 /* AIF1RX_LRCLK_INV */
+#define ARIZONA_AIF1RX_LRCLK_INV_WIDTH 1 /* AIF1RX_LRCLK_INV */
+#define ARIZONA_AIF1RX_LRCLK_FRC 0x0002 /* AIF1RX_LRCLK_FRC */
+#define ARIZONA_AIF1RX_LRCLK_FRC_MASK 0x0002 /* AIF1RX_LRCLK_FRC */
+#define ARIZONA_AIF1RX_LRCLK_FRC_SHIFT 1 /* AIF1RX_LRCLK_FRC */
+#define ARIZONA_AIF1RX_LRCLK_FRC_WIDTH 1 /* AIF1RX_LRCLK_FRC */
+#define ARIZONA_AIF1RX_LRCLK_MSTR 0x0001 /* AIF1RX_LRCLK_MSTR */
+#define ARIZONA_AIF1RX_LRCLK_MSTR_MASK 0x0001 /* AIF1RX_LRCLK_MSTR */
+#define ARIZONA_AIF1RX_LRCLK_MSTR_SHIFT 0 /* AIF1RX_LRCLK_MSTR */
+#define ARIZONA_AIF1RX_LRCLK_MSTR_WIDTH 1 /* AIF1RX_LRCLK_MSTR */
+
+/*
+ * R1283 (0x503) - AIF1 Rate Ctrl
+ */
+#define ARIZONA_AIF1_RATE_MASK 0x7800 /* AIF1_RATE - [14:11] */
+#define ARIZONA_AIF1_RATE_SHIFT 11 /* AIF1_RATE - [14:11] */
+#define ARIZONA_AIF1_RATE_WIDTH 4 /* AIF1_RATE - [14:11] */
+#define ARIZONA_AIF1_TRI 0x0040 /* AIF1_TRI */
+#define ARIZONA_AIF1_TRI_MASK 0x0040 /* AIF1_TRI */
+#define ARIZONA_AIF1_TRI_SHIFT 6 /* AIF1_TRI */
+#define ARIZONA_AIF1_TRI_WIDTH 1 /* AIF1_TRI */
+
+/*
+ * R1284 (0x504) - AIF1 Format
+ */
+#define ARIZONA_AIF1_FMT_MASK 0x0007 /* AIF1_FMT - [2:0] */
+#define ARIZONA_AIF1_FMT_SHIFT 0 /* AIF1_FMT - [2:0] */
+#define ARIZONA_AIF1_FMT_WIDTH 3 /* AIF1_FMT - [2:0] */
+
+/*
+ * R1285 (0x505) - AIF1 Tx BCLK Rate
+ */
+#define ARIZONA_AIF1TX_BCPF_MASK 0x1FFF /* AIF1TX_BCPF - [12:0] */
+#define ARIZONA_AIF1TX_BCPF_SHIFT 0 /* AIF1TX_BCPF - [12:0] */
+#define ARIZONA_AIF1TX_BCPF_WIDTH 13 /* AIF1TX_BCPF - [12:0] */
+
+/*
+ * R1286 (0x506) - AIF1 Rx BCLK Rate
+ */
+#define ARIZONA_AIF1RX_BCPF_MASK 0x1FFF /* AIF1RX_BCPF - [12:0] */
+#define ARIZONA_AIF1RX_BCPF_SHIFT 0 /* AIF1RX_BCPF - [12:0] */
+#define ARIZONA_AIF1RX_BCPF_WIDTH 13 /* AIF1RX_BCPF - [12:0] */
+
+/*
+ * R1287 (0x507) - AIF1 Frame Ctrl 1
+ */
+#define ARIZONA_AIF1TX_WL_MASK 0x3F00 /* AIF1TX_WL - [13:8] */
+#define ARIZONA_AIF1TX_WL_SHIFT 8 /* AIF1TX_WL - [13:8] */
+#define ARIZONA_AIF1TX_WL_WIDTH 6 /* AIF1TX_WL - [13:8] */
+#define ARIZONA_AIF1TX_SLOT_LEN_MASK 0x00FF /* AIF1TX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF1TX_SLOT_LEN_SHIFT 0 /* AIF1TX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF1TX_SLOT_LEN_WIDTH 8 /* AIF1TX_SLOT_LEN - [7:0] */
+
+/*
+ * R1288 (0x508) - AIF1 Frame Ctrl 2
+ */
+#define ARIZONA_AIF1RX_WL_MASK 0x3F00 /* AIF1RX_WL - [13:8] */
+#define ARIZONA_AIF1RX_WL_SHIFT 8 /* AIF1RX_WL - [13:8] */
+#define ARIZONA_AIF1RX_WL_WIDTH 6 /* AIF1RX_WL - [13:8] */
+#define ARIZONA_AIF1RX_SLOT_LEN_MASK 0x00FF /* AIF1RX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF1RX_SLOT_LEN_SHIFT 0 /* AIF1RX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF1RX_SLOT_LEN_WIDTH 8 /* AIF1RX_SLOT_LEN - [7:0] */
+
+/*
+ * R1289 (0x509) - AIF1 Frame Ctrl 3
+ */
+#define ARIZONA_AIF1TX1_SLOT_MASK 0x003F /* AIF1TX1_SLOT - [5:0] */
+#define ARIZONA_AIF1TX1_SLOT_SHIFT 0 /* AIF1TX1_SLOT - [5:0] */
+#define ARIZONA_AIF1TX1_SLOT_WIDTH 6 /* AIF1TX1_SLOT - [5:0] */
+
+/*
+ * R1290 (0x50A) - AIF1 Frame Ctrl 4
+ */
+#define ARIZONA_AIF1TX2_SLOT_MASK 0x003F /* AIF1TX2_SLOT - [5:0] */
+#define ARIZONA_AIF1TX2_SLOT_SHIFT 0 /* AIF1TX2_SLOT - [5:0] */
+#define ARIZONA_AIF1TX2_SLOT_WIDTH 6 /* AIF1TX2_SLOT - [5:0] */
+
+/*
+ * R1291 (0x50B) - AIF1 Frame Ctrl 5
+ */
+#define ARIZONA_AIF1TX3_SLOT_MASK 0x003F /* AIF1TX3_SLOT - [5:0] */
+#define ARIZONA_AIF1TX3_SLOT_SHIFT 0 /* AIF1TX3_SLOT - [5:0] */
+#define ARIZONA_AIF1TX3_SLOT_WIDTH 6 /* AIF1TX3_SLOT - [5:0] */
+
+/*
+ * R1292 (0x50C) - AIF1 Frame Ctrl 6
+ */
+#define ARIZONA_AIF1TX4_SLOT_MASK 0x003F /* AIF1TX4_SLOT - [5:0] */
+#define ARIZONA_AIF1TX4_SLOT_SHIFT 0 /* AIF1TX4_SLOT - [5:0] */
+#define ARIZONA_AIF1TX4_SLOT_WIDTH 6 /* AIF1TX4_SLOT - [5:0] */
+
+/*
+ * R1293 (0x50D) - AIF1 Frame Ctrl 7
+ */
+#define ARIZONA_AIF1TX5_SLOT_MASK 0x003F /* AIF1TX5_SLOT - [5:0] */
+#define ARIZONA_AIF1TX5_SLOT_SHIFT 0 /* AIF1TX5_SLOT - [5:0] */
+#define ARIZONA_AIF1TX5_SLOT_WIDTH 6 /* AIF1TX5_SLOT - [5:0] */
+
+/*
+ * R1294 (0x50E) - AIF1 Frame Ctrl 8
+ */
+#define ARIZONA_AIF1TX6_SLOT_MASK 0x003F /* AIF1TX6_SLOT - [5:0] */
+#define ARIZONA_AIF1TX6_SLOT_SHIFT 0 /* AIF1TX6_SLOT - [5:0] */
+#define ARIZONA_AIF1TX6_SLOT_WIDTH 6 /* AIF1TX6_SLOT - [5:0] */
+
+/*
+ * R1295 (0x50F) - AIF1 Frame Ctrl 9
+ */
+#define ARIZONA_AIF1TX7_SLOT_MASK 0x003F /* AIF1TX7_SLOT - [5:0] */
+#define ARIZONA_AIF1TX7_SLOT_SHIFT 0 /* AIF1TX7_SLOT - [5:0] */
+#define ARIZONA_AIF1TX7_SLOT_WIDTH 6 /* AIF1TX7_SLOT - [5:0] */
+
+/*
+ * R1296 (0x510) - AIF1 Frame Ctrl 10
+ */
+#define ARIZONA_AIF1TX8_SLOT_MASK 0x003F /* AIF1TX8_SLOT - [5:0] */
+#define ARIZONA_AIF1TX8_SLOT_SHIFT 0 /* AIF1TX8_SLOT - [5:0] */
+#define ARIZONA_AIF1TX8_SLOT_WIDTH 6 /* AIF1TX8_SLOT - [5:0] */
+
+/*
+ * R1297 (0x511) - AIF1 Frame Ctrl 11
+ */
+#define ARIZONA_AIF1RX1_SLOT_MASK 0x003F /* AIF1RX1_SLOT - [5:0] */
+#define ARIZONA_AIF1RX1_SLOT_SHIFT 0 /* AIF1RX1_SLOT - [5:0] */
+#define ARIZONA_AIF1RX1_SLOT_WIDTH 6 /* AIF1RX1_SLOT - [5:0] */
+
+/*
+ * R1298 (0x512) - AIF1 Frame Ctrl 12
+ */
+#define ARIZONA_AIF1RX2_SLOT_MASK 0x003F /* AIF1RX2_SLOT - [5:0] */
+#define ARIZONA_AIF1RX2_SLOT_SHIFT 0 /* AIF1RX2_SLOT - [5:0] */
+#define ARIZONA_AIF1RX2_SLOT_WIDTH 6 /* AIF1RX2_SLOT - [5:0] */
+
+/*
+ * R1299 (0x513) - AIF1 Frame Ctrl 13
+ */
+#define ARIZONA_AIF1RX3_SLOT_MASK 0x003F /* AIF1RX3_SLOT - [5:0] */
+#define ARIZONA_AIF1RX3_SLOT_SHIFT 0 /* AIF1RX3_SLOT - [5:0] */
+#define ARIZONA_AIF1RX3_SLOT_WIDTH 6 /* AIF1RX3_SLOT - [5:0] */
+
+/*
+ * R1300 (0x514) - AIF1 Frame Ctrl 14
+ */
+#define ARIZONA_AIF1RX4_SLOT_MASK 0x003F /* AIF1RX4_SLOT - [5:0] */
+#define ARIZONA_AIF1RX4_SLOT_SHIFT 0 /* AIF1RX4_SLOT - [5:0] */
+#define ARIZONA_AIF1RX4_SLOT_WIDTH 6 /* AIF1RX4_SLOT - [5:0] */
+
+/*
+ * R1301 (0x515) - AIF1 Frame Ctrl 15
+ */
+#define ARIZONA_AIF1RX5_SLOT_MASK 0x003F /* AIF1RX5_SLOT - [5:0] */
+#define ARIZONA_AIF1RX5_SLOT_SHIFT 0 /* AIF1RX5_SLOT - [5:0] */
+#define ARIZONA_AIF1RX5_SLOT_WIDTH 6 /* AIF1RX5_SLOT - [5:0] */
+
+/*
+ * R1302 (0x516) - AIF1 Frame Ctrl 16
+ */
+#define ARIZONA_AIF1RX6_SLOT_MASK 0x003F /* AIF1RX6_SLOT - [5:0] */
+#define ARIZONA_AIF1RX6_SLOT_SHIFT 0 /* AIF1RX6_SLOT - [5:0] */
+#define ARIZONA_AIF1RX6_SLOT_WIDTH 6 /* AIF1RX6_SLOT - [5:0] */
+
+/*
+ * R1303 (0x517) - AIF1 Frame Ctrl 17
+ */
+#define ARIZONA_AIF1RX7_SLOT_MASK 0x003F /* AIF1RX7_SLOT - [5:0] */
+#define ARIZONA_AIF1RX7_SLOT_SHIFT 0 /* AIF1RX7_SLOT - [5:0] */
+#define ARIZONA_AIF1RX7_SLOT_WIDTH 6 /* AIF1RX7_SLOT - [5:0] */
+
+/*
+ * R1304 (0x518) - AIF1 Frame Ctrl 18
+ */
+#define ARIZONA_AIF1RX8_SLOT_MASK 0x003F /* AIF1RX8_SLOT - [5:0] */
+#define ARIZONA_AIF1RX8_SLOT_SHIFT 0 /* AIF1RX8_SLOT - [5:0] */
+#define ARIZONA_AIF1RX8_SLOT_WIDTH 6 /* AIF1RX8_SLOT - [5:0] */
+
+/*
+ * R1305 (0x519) - AIF1 Tx Enables
+ */
+#define ARIZONA_AIF1TX8_ENA 0x0080 /* AIF1TX8_ENA */
+#define ARIZONA_AIF1TX8_ENA_MASK 0x0080 /* AIF1TX8_ENA */
+#define ARIZONA_AIF1TX8_ENA_SHIFT 7 /* AIF1TX8_ENA */
+#define ARIZONA_AIF1TX8_ENA_WIDTH 1 /* AIF1TX8_ENA */
+#define ARIZONA_AIF1TX7_ENA 0x0040 /* AIF1TX7_ENA */
+#define ARIZONA_AIF1TX7_ENA_MASK 0x0040 /* AIF1TX7_ENA */
+#define ARIZONA_AIF1TX7_ENA_SHIFT 6 /* AIF1TX7_ENA */
+#define ARIZONA_AIF1TX7_ENA_WIDTH 1 /* AIF1TX7_ENA */
+#define ARIZONA_AIF1TX6_ENA 0x0020 /* AIF1TX6_ENA */
+#define ARIZONA_AIF1TX6_ENA_MASK 0x0020 /* AIF1TX6_ENA */
+#define ARIZONA_AIF1TX6_ENA_SHIFT 5 /* AIF1TX6_ENA */
+#define ARIZONA_AIF1TX6_ENA_WIDTH 1 /* AIF1TX6_ENA */
+#define ARIZONA_AIF1TX5_ENA 0x0010 /* AIF1TX5_ENA */
+#define ARIZONA_AIF1TX5_ENA_MASK 0x0010 /* AIF1TX5_ENA */
+#define ARIZONA_AIF1TX5_ENA_SHIFT 4 /* AIF1TX5_ENA */
+#define ARIZONA_AIF1TX5_ENA_WIDTH 1 /* AIF1TX5_ENA */
+#define ARIZONA_AIF1TX4_ENA 0x0008 /* AIF1TX4_ENA */
+#define ARIZONA_AIF1TX4_ENA_MASK 0x0008 /* AIF1TX4_ENA */
+#define ARIZONA_AIF1TX4_ENA_SHIFT 3 /* AIF1TX4_ENA */
+#define ARIZONA_AIF1TX4_ENA_WIDTH 1 /* AIF1TX4_ENA */
+#define ARIZONA_AIF1TX3_ENA 0x0004 /* AIF1TX3_ENA */
+#define ARIZONA_AIF1TX3_ENA_MASK 0x0004 /* AIF1TX3_ENA */
+#define ARIZONA_AIF1TX3_ENA_SHIFT 2 /* AIF1TX3_ENA */
+#define ARIZONA_AIF1TX3_ENA_WIDTH 1 /* AIF1TX3_ENA */
+#define ARIZONA_AIF1TX2_ENA 0x0002 /* AIF1TX2_ENA */
+#define ARIZONA_AIF1TX2_ENA_MASK 0x0002 /* AIF1TX2_ENA */
+#define ARIZONA_AIF1TX2_ENA_SHIFT 1 /* AIF1TX2_ENA */
+#define ARIZONA_AIF1TX2_ENA_WIDTH 1 /* AIF1TX2_ENA */
+#define ARIZONA_AIF1TX1_ENA 0x0001 /* AIF1TX1_ENA */
+#define ARIZONA_AIF1TX1_ENA_MASK 0x0001 /* AIF1TX1_ENA */
+#define ARIZONA_AIF1TX1_ENA_SHIFT 0 /* AIF1TX1_ENA */
+#define ARIZONA_AIF1TX1_ENA_WIDTH 1 /* AIF1TX1_ENA */
+
+/*
+ * R1306 (0x51A) - AIF1 Rx Enables
+ */
+#define ARIZONA_AIF1RX8_ENA 0x0080 /* AIF1RX8_ENA */
+#define ARIZONA_AIF1RX8_ENA_MASK 0x0080 /* AIF1RX8_ENA */
+#define ARIZONA_AIF1RX8_ENA_SHIFT 7 /* AIF1RX8_ENA */
+#define ARIZONA_AIF1RX8_ENA_WIDTH 1 /* AIF1RX8_ENA */
+#define ARIZONA_AIF1RX7_ENA 0x0040 /* AIF1RX7_ENA */
+#define ARIZONA_AIF1RX7_ENA_MASK 0x0040 /* AIF1RX7_ENA */
+#define ARIZONA_AIF1RX7_ENA_SHIFT 6 /* AIF1RX7_ENA */
+#define ARIZONA_AIF1RX7_ENA_WIDTH 1 /* AIF1RX7_ENA */
+#define ARIZONA_AIF1RX6_ENA 0x0020 /* AIF1RX6_ENA */
+#define ARIZONA_AIF1RX6_ENA_MASK 0x0020 /* AIF1RX6_ENA */
+#define ARIZONA_AIF1RX6_ENA_SHIFT 5 /* AIF1RX6_ENA */
+#define ARIZONA_AIF1RX6_ENA_WIDTH 1 /* AIF1RX6_ENA */
+#define ARIZONA_AIF1RX5_ENA 0x0010 /* AIF1RX5_ENA */
+#define ARIZONA_AIF1RX5_ENA_MASK 0x0010 /* AIF1RX5_ENA */
+#define ARIZONA_AIF1RX5_ENA_SHIFT 4 /* AIF1RX5_ENA */
+#define ARIZONA_AIF1RX5_ENA_WIDTH 1 /* AIF1RX5_ENA */
+#define ARIZONA_AIF1RX4_ENA 0x0008 /* AIF1RX4_ENA */
+#define ARIZONA_AIF1RX4_ENA_MASK 0x0008 /* AIF1RX4_ENA */
+#define ARIZONA_AIF1RX4_ENA_SHIFT 3 /* AIF1RX4_ENA */
+#define ARIZONA_AIF1RX4_ENA_WIDTH 1 /* AIF1RX4_ENA */
+#define ARIZONA_AIF1RX3_ENA 0x0004 /* AIF1RX3_ENA */
+#define ARIZONA_AIF1RX3_ENA_MASK 0x0004 /* AIF1RX3_ENA */
+#define ARIZONA_AIF1RX3_ENA_SHIFT 2 /* AIF1RX3_ENA */
+#define ARIZONA_AIF1RX3_ENA_WIDTH 1 /* AIF1RX3_ENA */
+#define ARIZONA_AIF1RX2_ENA 0x0002 /* AIF1RX2_ENA */
+#define ARIZONA_AIF1RX2_ENA_MASK 0x0002 /* AIF1RX2_ENA */
+#define ARIZONA_AIF1RX2_ENA_SHIFT 1 /* AIF1RX2_ENA */
+#define ARIZONA_AIF1RX2_ENA_WIDTH 1 /* AIF1RX2_ENA */
+#define ARIZONA_AIF1RX1_ENA 0x0001 /* AIF1RX1_ENA */
+#define ARIZONA_AIF1RX1_ENA_MASK 0x0001 /* AIF1RX1_ENA */
+#define ARIZONA_AIF1RX1_ENA_SHIFT 0 /* AIF1RX1_ENA */
+#define ARIZONA_AIF1RX1_ENA_WIDTH 1 /* AIF1RX1_ENA */
+
+/*
+ * R1307 (0x51B) - AIF1 Force Write
+ */
+#define ARIZONA_AIF1_FRC_WR 0x0001 /* AIF1_FRC_WR */
+#define ARIZONA_AIF1_FRC_WR_MASK 0x0001 /* AIF1_FRC_WR */
+#define ARIZONA_AIF1_FRC_WR_SHIFT 0 /* AIF1_FRC_WR */
+#define ARIZONA_AIF1_FRC_WR_WIDTH 1 /* AIF1_FRC_WR */
+
+/*
+ * R1344 (0x540) - AIF2 BCLK Ctrl
+ */
+#define ARIZONA_AIF2_BCLK_INV 0x0080 /* AIF2_BCLK_INV */
+#define ARIZONA_AIF2_BCLK_INV_MASK 0x0080 /* AIF2_BCLK_INV */
+#define ARIZONA_AIF2_BCLK_INV_SHIFT 7 /* AIF2_BCLK_INV */
+#define ARIZONA_AIF2_BCLK_INV_WIDTH 1 /* AIF2_BCLK_INV */
+#define ARIZONA_AIF2_BCLK_FRC 0x0040 /* AIF2_BCLK_FRC */
+#define ARIZONA_AIF2_BCLK_FRC_MASK 0x0040 /* AIF2_BCLK_FRC */
+#define ARIZONA_AIF2_BCLK_FRC_SHIFT 6 /* AIF2_BCLK_FRC */
+#define ARIZONA_AIF2_BCLK_FRC_WIDTH 1 /* AIF2_BCLK_FRC */
+#define ARIZONA_AIF2_BCLK_MSTR 0x0020 /* AIF2_BCLK_MSTR */
+#define ARIZONA_AIF2_BCLK_MSTR_MASK 0x0020 /* AIF2_BCLK_MSTR */
+#define ARIZONA_AIF2_BCLK_MSTR_SHIFT 5 /* AIF2_BCLK_MSTR */
+#define ARIZONA_AIF2_BCLK_MSTR_WIDTH 1 /* AIF2_BCLK_MSTR */
+#define ARIZONA_AIF2_BCLK_FREQ_MASK 0x001F /* AIF2_BCLK_FREQ - [4:0] */
+#define ARIZONA_AIF2_BCLK_FREQ_SHIFT 0 /* AIF2_BCLK_FREQ - [4:0] */
+#define ARIZONA_AIF2_BCLK_FREQ_WIDTH 5 /* AIF2_BCLK_FREQ - [4:0] */
+
+/*
+ * R1345 (0x541) - AIF2 Tx Pin Ctrl
+ */
+#define ARIZONA_AIF2TX_DAT_TRI 0x0020 /* AIF2TX_DAT_TRI */
+#define ARIZONA_AIF2TX_DAT_TRI_MASK 0x0020 /* AIF2TX_DAT_TRI */
+#define ARIZONA_AIF2TX_DAT_TRI_SHIFT 5 /* AIF2TX_DAT_TRI */
+#define ARIZONA_AIF2TX_DAT_TRI_WIDTH 1 /* AIF2TX_DAT_TRI */
+#define ARIZONA_AIF2TX_LRCLK_SRC 0x0008 /* AIF2TX_LRCLK_SRC */
+#define ARIZONA_AIF2TX_LRCLK_SRC_MASK 0x0008 /* AIF2TX_LRCLK_SRC */
+#define ARIZONA_AIF2TX_LRCLK_SRC_SHIFT 3 /* AIF2TX_LRCLK_SRC */
+#define ARIZONA_AIF2TX_LRCLK_SRC_WIDTH 1 /* AIF2TX_LRCLK_SRC */
+#define ARIZONA_AIF2TX_LRCLK_INV 0x0004 /* AIF2TX_LRCLK_INV */
+#define ARIZONA_AIF2TX_LRCLK_INV_MASK 0x0004 /* AIF2TX_LRCLK_INV */
+#define ARIZONA_AIF2TX_LRCLK_INV_SHIFT 2 /* AIF2TX_LRCLK_INV */
+#define ARIZONA_AIF2TX_LRCLK_INV_WIDTH 1 /* AIF2TX_LRCLK_INV */
+#define ARIZONA_AIF2TX_LRCLK_FRC 0x0002 /* AIF2TX_LRCLK_FRC */
+#define ARIZONA_AIF2TX_LRCLK_FRC_MASK 0x0002 /* AIF2TX_LRCLK_FRC */
+#define ARIZONA_AIF2TX_LRCLK_FRC_SHIFT 1 /* AIF2TX_LRCLK_FRC */
+#define ARIZONA_AIF2TX_LRCLK_FRC_WIDTH 1 /* AIF2TX_LRCLK_FRC */
+#define ARIZONA_AIF2TX_LRCLK_MSTR 0x0001 /* AIF2TX_LRCLK_MSTR */
+#define ARIZONA_AIF2TX_LRCLK_MSTR_MASK 0x0001 /* AIF2TX_LRCLK_MSTR */
+#define ARIZONA_AIF2TX_LRCLK_MSTR_SHIFT 0 /* AIF2TX_LRCLK_MSTR */
+#define ARIZONA_AIF2TX_LRCLK_MSTR_WIDTH 1 /* AIF2TX_LRCLK_MSTR */
+
+/*
+ * R1346 (0x542) - AIF2 Rx Pin Ctrl
+ */
+#define ARIZONA_AIF2RX_LRCLK_INV 0x0004 /* AIF2RX_LRCLK_INV */
+#define ARIZONA_AIF2RX_LRCLK_INV_MASK 0x0004 /* AIF2RX_LRCLK_INV */
+#define ARIZONA_AIF2RX_LRCLK_INV_SHIFT 2 /* AIF2RX_LRCLK_INV */
+#define ARIZONA_AIF2RX_LRCLK_INV_WIDTH 1 /* AIF2RX_LRCLK_INV */
+#define ARIZONA_AIF2RX_LRCLK_FRC 0x0002 /* AIF2RX_LRCLK_FRC */
+#define ARIZONA_AIF2RX_LRCLK_FRC_MASK 0x0002 /* AIF2RX_LRCLK_FRC */
+#define ARIZONA_AIF2RX_LRCLK_FRC_SHIFT 1 /* AIF2RX_LRCLK_FRC */
+#define ARIZONA_AIF2RX_LRCLK_FRC_WIDTH 1 /* AIF2RX_LRCLK_FRC */
+#define ARIZONA_AIF2RX_LRCLK_MSTR 0x0001 /* AIF2RX_LRCLK_MSTR */
+#define ARIZONA_AIF2RX_LRCLK_MSTR_MASK 0x0001 /* AIF2RX_LRCLK_MSTR */
+#define ARIZONA_AIF2RX_LRCLK_MSTR_SHIFT 0 /* AIF2RX_LRCLK_MSTR */
+#define ARIZONA_AIF2RX_LRCLK_MSTR_WIDTH 1 /* AIF2RX_LRCLK_MSTR */
+
+/*
+ * R1347 (0x543) - AIF2 Rate Ctrl
+ */
+#define ARIZONA_AIF2_RATE_MASK 0x7800 /* AIF2_RATE - [14:11] */
+#define ARIZONA_AIF2_RATE_SHIFT 11 /* AIF2_RATE - [14:11] */
+#define ARIZONA_AIF2_RATE_WIDTH 4 /* AIF2_RATE - [14:11] */
+#define ARIZONA_AIF2_TRI 0x0040 /* AIF2_TRI */
+#define ARIZONA_AIF2_TRI_MASK 0x0040 /* AIF2_TRI */
+#define ARIZONA_AIF2_TRI_SHIFT 6 /* AIF2_TRI */
+#define ARIZONA_AIF2_TRI_WIDTH 1 /* AIF2_TRI */
+
+/*
+ * R1348 (0x544) - AIF2 Format
+ */
+#define ARIZONA_AIF2_FMT_MASK 0x0007 /* AIF2_FMT - [2:0] */
+#define ARIZONA_AIF2_FMT_SHIFT 0 /* AIF2_FMT - [2:0] */
+#define ARIZONA_AIF2_FMT_WIDTH 3 /* AIF2_FMT - [2:0] */
+
+/*
+ * R1349 (0x545) - AIF2 Tx BCLK Rate
+ */
+#define ARIZONA_AIF2TX_BCPF_MASK 0x1FFF /* AIF2TX_BCPF - [12:0] */
+#define ARIZONA_AIF2TX_BCPF_SHIFT 0 /* AIF2TX_BCPF - [12:0] */
+#define ARIZONA_AIF2TX_BCPF_WIDTH 13 /* AIF2TX_BCPF - [12:0] */
+
+/*
+ * R1350 (0x546) - AIF2 Rx BCLK Rate
+ */
+#define ARIZONA_AIF2RX_BCPF_MASK 0x1FFF /* AIF2RX_BCPF - [12:0] */
+#define ARIZONA_AIF2RX_BCPF_SHIFT 0 /* AIF2RX_BCPF - [12:0] */
+#define ARIZONA_AIF2RX_BCPF_WIDTH 13 /* AIF2RX_BCPF - [12:0] */
+
+/*
+ * R1351 (0x547) - AIF2 Frame Ctrl 1
+ */
+#define ARIZONA_AIF2TX_WL_MASK 0x3F00 /* AIF2TX_WL - [13:8] */
+#define ARIZONA_AIF2TX_WL_SHIFT 8 /* AIF2TX_WL - [13:8] */
+#define ARIZONA_AIF2TX_WL_WIDTH 6 /* AIF2TX_WL - [13:8] */
+#define ARIZONA_AIF2TX_SLOT_LEN_MASK 0x00FF /* AIF2TX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF2TX_SLOT_LEN_SHIFT 0 /* AIF2TX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF2TX_SLOT_LEN_WIDTH 8 /* AIF2TX_SLOT_LEN - [7:0] */
+
+/*
+ * R1352 (0x548) - AIF2 Frame Ctrl 2
+ */
+#define ARIZONA_AIF2RX_WL_MASK 0x3F00 /* AIF2RX_WL - [13:8] */
+#define ARIZONA_AIF2RX_WL_SHIFT 8 /* AIF2RX_WL - [13:8] */
+#define ARIZONA_AIF2RX_WL_WIDTH 6 /* AIF2RX_WL - [13:8] */
+#define ARIZONA_AIF2RX_SLOT_LEN_MASK 0x00FF /* AIF2RX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF2RX_SLOT_LEN_SHIFT 0 /* AIF2RX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF2RX_SLOT_LEN_WIDTH 8 /* AIF2RX_SLOT_LEN - [7:0] */
+
+/*
+ * R1353 (0x549) - AIF2 Frame Ctrl 3
+ */
+#define ARIZONA_AIF2TX1_SLOT_MASK 0x003F /* AIF2TX1_SLOT - [5:0] */
+#define ARIZONA_AIF2TX1_SLOT_SHIFT 0 /* AIF2TX1_SLOT - [5:0] */
+#define ARIZONA_AIF2TX1_SLOT_WIDTH 6 /* AIF2TX1_SLOT - [5:0] */
+
+/*
+ * R1354 (0x54A) - AIF2 Frame Ctrl 4
+ */
+#define ARIZONA_AIF2TX2_SLOT_MASK 0x003F /* AIF2TX2_SLOT - [5:0] */
+#define ARIZONA_AIF2TX2_SLOT_SHIFT 0 /* AIF2TX2_SLOT - [5:0] */
+#define ARIZONA_AIF2TX2_SLOT_WIDTH 6 /* AIF2TX2_SLOT - [5:0] */
+
+/*
+ * R1361 (0x551) - AIF2 Frame Ctrl 11
+ */
+#define ARIZONA_AIF2RX1_SLOT_MASK 0x003F /* AIF2RX1_SLOT - [5:0] */
+#define ARIZONA_AIF2RX1_SLOT_SHIFT 0 /* AIF2RX1_SLOT - [5:0] */
+#define ARIZONA_AIF2RX1_SLOT_WIDTH 6 /* AIF2RX1_SLOT - [5:0] */
+
+/*
+ * R1362 (0x552) - AIF2 Frame Ctrl 12
+ */
+#define ARIZONA_AIF2RX2_SLOT_MASK 0x003F /* AIF2RX2_SLOT - [5:0] */
+#define ARIZONA_AIF2RX2_SLOT_SHIFT 0 /* AIF2RX2_SLOT - [5:0] */
+#define ARIZONA_AIF2RX2_SLOT_WIDTH 6 /* AIF2RX2_SLOT - [5:0] */
+
+/*
+ * R1369 (0x559) - AIF2 Tx Enables
+ */
+#define ARIZONA_AIF2TX2_ENA 0x0002 /* AIF2TX2_ENA */
+#define ARIZONA_AIF2TX2_ENA_MASK 0x0002 /* AIF2TX2_ENA */
+#define ARIZONA_AIF2TX2_ENA_SHIFT 1 /* AIF2TX2_ENA */
+#define ARIZONA_AIF2TX2_ENA_WIDTH 1 /* AIF2TX2_ENA */
+#define ARIZONA_AIF2TX1_ENA 0x0001 /* AIF2TX1_ENA */
+#define ARIZONA_AIF2TX1_ENA_MASK 0x0001 /* AIF2TX1_ENA */
+#define ARIZONA_AIF2TX1_ENA_SHIFT 0 /* AIF2TX1_ENA */
+#define ARIZONA_AIF2TX1_ENA_WIDTH 1 /* AIF2TX1_ENA */
+
+/*
+ * R1370 (0x55A) - AIF2 Rx Enables
+ */
+#define ARIZONA_AIF2RX2_ENA 0x0002 /* AIF2RX2_ENA */
+#define ARIZONA_AIF2RX2_ENA_MASK 0x0002 /* AIF2RX2_ENA */
+#define ARIZONA_AIF2RX2_ENA_SHIFT 1 /* AIF2RX2_ENA */
+#define ARIZONA_AIF2RX2_ENA_WIDTH 1 /* AIF2RX2_ENA */
+#define ARIZONA_AIF2RX1_ENA 0x0001 /* AIF2RX1_ENA */
+#define ARIZONA_AIF2RX1_ENA_MASK 0x0001 /* AIF2RX1_ENA */
+#define ARIZONA_AIF2RX1_ENA_SHIFT 0 /* AIF2RX1_ENA */
+#define ARIZONA_AIF2RX1_ENA_WIDTH 1 /* AIF2RX1_ENA */
+
+/*
+ * R1371 (0x55B) - AIF2 Force Write
+ */
+#define ARIZONA_AIF2_FRC_WR 0x0001 /* AIF2_FRC_WR */
+#define ARIZONA_AIF2_FRC_WR_MASK 0x0001 /* AIF2_FRC_WR */
+#define ARIZONA_AIF2_FRC_WR_SHIFT 0 /* AIF2_FRC_WR */
+#define ARIZONA_AIF2_FRC_WR_WIDTH 1 /* AIF2_FRC_WR */
+
+/*
+ * R1408 (0x580) - AIF3 BCLK Ctrl
+ */
+#define ARIZONA_AIF3_BCLK_INV 0x0080 /* AIF3_BCLK_INV */
+#define ARIZONA_AIF3_BCLK_INV_MASK 0x0080 /* AIF3_BCLK_INV */
+#define ARIZONA_AIF3_BCLK_INV_SHIFT 7 /* AIF3_BCLK_INV */
+#define ARIZONA_AIF3_BCLK_INV_WIDTH 1 /* AIF3_BCLK_INV */
+#define ARIZONA_AIF3_BCLK_FRC 0x0040 /* AIF3_BCLK_FRC */
+#define ARIZONA_AIF3_BCLK_FRC_MASK 0x0040 /* AIF3_BCLK_FRC */
+#define ARIZONA_AIF3_BCLK_FRC_SHIFT 6 /* AIF3_BCLK_FRC */
+#define ARIZONA_AIF3_BCLK_FRC_WIDTH 1 /* AIF3_BCLK_FRC */
+#define ARIZONA_AIF3_BCLK_MSTR 0x0020 /* AIF3_BCLK_MSTR */
+#define ARIZONA_AIF3_BCLK_MSTR_MASK 0x0020 /* AIF3_BCLK_MSTR */
+#define ARIZONA_AIF3_BCLK_MSTR_SHIFT 5 /* AIF3_BCLK_MSTR */
+#define ARIZONA_AIF3_BCLK_MSTR_WIDTH 1 /* AIF3_BCLK_MSTR */
+#define ARIZONA_AIF3_BCLK_FREQ_MASK 0x001F /* AIF3_BCLK_FREQ - [4:0] */
+#define ARIZONA_AIF3_BCLK_FREQ_SHIFT 0 /* AIF3_BCLK_FREQ - [4:0] */
+#define ARIZONA_AIF3_BCLK_FREQ_WIDTH 5 /* AIF3_BCLK_FREQ - [4:0] */
+
+/*
+ * R1409 (0x581) - AIF3 Tx Pin Ctrl
+ */
+#define ARIZONA_AIF3TX_DAT_TRI 0x0020 /* AIF3TX_DAT_TRI */
+#define ARIZONA_AIF3TX_DAT_TRI_MASK 0x0020 /* AIF3TX_DAT_TRI */
+#define ARIZONA_AIF3TX_DAT_TRI_SHIFT 5 /* AIF3TX_DAT_TRI */
+#define ARIZONA_AIF3TX_DAT_TRI_WIDTH 1 /* AIF3TX_DAT_TRI */
+#define ARIZONA_AIF3TX_LRCLK_SRC 0x0008 /* AIF3TX_LRCLK_SRC */
+#define ARIZONA_AIF3TX_LRCLK_SRC_MASK 0x0008 /* AIF3TX_LRCLK_SRC */
+#define ARIZONA_AIF3TX_LRCLK_SRC_SHIFT 3 /* AIF3TX_LRCLK_SRC */
+#define ARIZONA_AIF3TX_LRCLK_SRC_WIDTH 1 /* AIF3TX_LRCLK_SRC */
+#define ARIZONA_AIF3TX_LRCLK_INV 0x0004 /* AIF3TX_LRCLK_INV */
+#define ARIZONA_AIF3TX_LRCLK_INV_MASK 0x0004 /* AIF3TX_LRCLK_INV */
+#define ARIZONA_AIF3TX_LRCLK_INV_SHIFT 2 /* AIF3TX_LRCLK_INV */
+#define ARIZONA_AIF3TX_LRCLK_INV_WIDTH 1 /* AIF3TX_LRCLK_INV */
+#define ARIZONA_AIF3TX_LRCLK_FRC 0x0002 /* AIF3TX_LRCLK_FRC */
+#define ARIZONA_AIF3TX_LRCLK_FRC_MASK 0x0002 /* AIF3TX_LRCLK_FRC */
+#define ARIZONA_AIF3TX_LRCLK_FRC_SHIFT 1 /* AIF3TX_LRCLK_FRC */
+#define ARIZONA_AIF3TX_LRCLK_FRC_WIDTH 1 /* AIF3TX_LRCLK_FRC */
+#define ARIZONA_AIF3TX_LRCLK_MSTR 0x0001 /* AIF3TX_LRCLK_MSTR */
+#define ARIZONA_AIF3TX_LRCLK_MSTR_MASK 0x0001 /* AIF3TX_LRCLK_MSTR */
+#define ARIZONA_AIF3TX_LRCLK_MSTR_SHIFT 0 /* AIF3TX_LRCLK_MSTR */
+#define ARIZONA_AIF3TX_LRCLK_MSTR_WIDTH 1 /* AIF3TX_LRCLK_MSTR */
+
+/*
+ * R1410 (0x582) - AIF3 Rx Pin Ctrl
+ */
+#define ARIZONA_AIF3RX_LRCLK_INV 0x0004 /* AIF3RX_LRCLK_INV */
+#define ARIZONA_AIF3RX_LRCLK_INV_MASK 0x0004 /* AIF3RX_LRCLK_INV */
+#define ARIZONA_AIF3RX_LRCLK_INV_SHIFT 2 /* AIF3RX_LRCLK_INV */
+#define ARIZONA_AIF3RX_LRCLK_INV_WIDTH 1 /* AIF3RX_LRCLK_INV */
+#define ARIZONA_AIF3RX_LRCLK_FRC 0x0002 /* AIF3RX_LRCLK_FRC */
+#define ARIZONA_AIF3RX_LRCLK_FRC_MASK 0x0002 /* AIF3RX_LRCLK_FRC */
+#define ARIZONA_AIF3RX_LRCLK_FRC_SHIFT 1 /* AIF3RX_LRCLK_FRC */
+#define ARIZONA_AIF3RX_LRCLK_FRC_WIDTH 1 /* AIF3RX_LRCLK_FRC */
+#define ARIZONA_AIF3RX_LRCLK_MSTR 0x0001 /* AIF3RX_LRCLK_MSTR */
+#define ARIZONA_AIF3RX_LRCLK_MSTR_MASK 0x0001 /* AIF3RX_LRCLK_MSTR */
+#define ARIZONA_AIF3RX_LRCLK_MSTR_SHIFT 0 /* AIF3RX_LRCLK_MSTR */
+#define ARIZONA_AIF3RX_LRCLK_MSTR_WIDTH 1 /* AIF3RX_LRCLK_MSTR */
+
+/*
+ * R1411 (0x583) - AIF3 Rate Ctrl
+ */
+#define ARIZONA_AIF3_RATE_MASK 0x7800 /* AIF3_RATE - [14:11] */
+#define ARIZONA_AIF3_RATE_SHIFT 11 /* AIF3_RATE - [14:11] */
+#define ARIZONA_AIF3_RATE_WIDTH 4 /* AIF3_RATE - [14:11] */
+#define ARIZONA_AIF3_TRI 0x0040 /* AIF3_TRI */
+#define ARIZONA_AIF3_TRI_MASK 0x0040 /* AIF3_TRI */
+#define ARIZONA_AIF3_TRI_SHIFT 6 /* AIF3_TRI */
+#define ARIZONA_AIF3_TRI_WIDTH 1 /* AIF3_TRI */
+
+/*
+ * R1412 (0x584) - AIF3 Format
+ */
+#define ARIZONA_AIF3_FMT_MASK 0x0007 /* AIF3_FMT - [2:0] */
+#define ARIZONA_AIF3_FMT_SHIFT 0 /* AIF3_FMT - [2:0] */
+#define ARIZONA_AIF3_FMT_WIDTH 3 /* AIF3_FMT - [2:0] */
+
+/*
+ * R1413 (0x585) - AIF3 Tx BCLK Rate
+ */
+#define ARIZONA_AIF3TX_BCPF_MASK 0x1FFF /* AIF3TX_BCPF - [12:0] */
+#define ARIZONA_AIF3TX_BCPF_SHIFT 0 /* AIF3TX_BCPF - [12:0] */
+#define ARIZONA_AIF3TX_BCPF_WIDTH 13 /* AIF3TX_BCPF - [12:0] */
+
+/*
+ * R1414 (0x586) - AIF3 Rx BCLK Rate
+ */
+#define ARIZONA_AIF3RX_BCPF_MASK 0x1FFF /* AIF3RX_BCPF - [12:0] */
+#define ARIZONA_AIF3RX_BCPF_SHIFT 0 /* AIF3RX_BCPF - [12:0] */
+#define ARIZONA_AIF3RX_BCPF_WIDTH 13 /* AIF3RX_BCPF - [12:0] */
+
+/*
+ * R1415 (0x587) - AIF3 Frame Ctrl 1
+ */
+#define ARIZONA_AIF3TX_WL_MASK 0x3F00 /* AIF3TX_WL - [13:8] */
+#define ARIZONA_AIF3TX_WL_SHIFT 8 /* AIF3TX_WL - [13:8] */
+#define ARIZONA_AIF3TX_WL_WIDTH 6 /* AIF3TX_WL - [13:8] */
+#define ARIZONA_AIF3TX_SLOT_LEN_MASK 0x00FF /* AIF3TX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF3TX_SLOT_LEN_SHIFT 0 /* AIF3TX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF3TX_SLOT_LEN_WIDTH 8 /* AIF3TX_SLOT_LEN - [7:0] */
+
+/*
+ * R1416 (0x588) - AIF3 Frame Ctrl 2
+ */
+#define ARIZONA_AIF3RX_WL_MASK 0x3F00 /* AIF3RX_WL - [13:8] */
+#define ARIZONA_AIF3RX_WL_SHIFT 8 /* AIF3RX_WL - [13:8] */
+#define ARIZONA_AIF3RX_WL_WIDTH 6 /* AIF3RX_WL - [13:8] */
+#define ARIZONA_AIF3RX_SLOT_LEN_MASK 0x00FF /* AIF3RX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF3RX_SLOT_LEN_SHIFT 0 /* AIF3RX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF3RX_SLOT_LEN_WIDTH 8 /* AIF3RX_SLOT_LEN - [7:0] */
+
+/*
+ * R1417 (0x589) - AIF3 Frame Ctrl 3
+ */
+#define ARIZONA_AIF3TX1_SLOT_MASK 0x003F /* AIF3TX1_SLOT - [5:0] */
+#define ARIZONA_AIF3TX1_SLOT_SHIFT 0 /* AIF3TX1_SLOT - [5:0] */
+#define ARIZONA_AIF3TX1_SLOT_WIDTH 6 /* AIF3TX1_SLOT - [5:0] */
+
+/*
+ * R1418 (0x58A) - AIF3 Frame Ctrl 4
+ */
+#define ARIZONA_AIF3TX2_SLOT_MASK 0x003F /* AIF3TX2_SLOT - [5:0] */
+#define ARIZONA_AIF3TX2_SLOT_SHIFT 0 /* AIF3TX2_SLOT - [5:0] */
+#define ARIZONA_AIF3TX2_SLOT_WIDTH 6 /* AIF3TX2_SLOT - [5:0] */
+
+/*
+ * R1425 (0x591) - AIF3 Frame Ctrl 11
+ */
+#define ARIZONA_AIF3RX1_SLOT_MASK 0x003F /* AIF3RX1_SLOT - [5:0] */
+#define ARIZONA_AIF3RX1_SLOT_SHIFT 0 /* AIF3RX1_SLOT - [5:0] */
+#define ARIZONA_AIF3RX1_SLOT_WIDTH 6 /* AIF3RX1_SLOT - [5:0] */
+
+/*
+ * R1426 (0x592) - AIF3 Frame Ctrl 12
+ */
+#define ARIZONA_AIF3RX2_SLOT_MASK 0x003F /* AIF3RX2_SLOT - [5:0] */
+#define ARIZONA_AIF3RX2_SLOT_SHIFT 0 /* AIF3RX2_SLOT - [5:0] */
+#define ARIZONA_AIF3RX2_SLOT_WIDTH 6 /* AIF3RX2_SLOT - [5:0] */
+
+/*
+ * R1433 (0x599) - AIF3 Tx Enables
+ */
+#define ARIZONA_AIF3TX2_ENA 0x0002 /* AIF3TX2_ENA */
+#define ARIZONA_AIF3TX2_ENA_MASK 0x0002 /* AIF3TX2_ENA */
+#define ARIZONA_AIF3TX2_ENA_SHIFT 1 /* AIF3TX2_ENA */
+#define ARIZONA_AIF3TX2_ENA_WIDTH 1 /* AIF3TX2_ENA */
+#define ARIZONA_AIF3TX1_ENA 0x0001 /* AIF3TX1_ENA */
+#define ARIZONA_AIF3TX1_ENA_MASK 0x0001 /* AIF3TX1_ENA */
+#define ARIZONA_AIF3TX1_ENA_SHIFT 0 /* AIF3TX1_ENA */
+#define ARIZONA_AIF3TX1_ENA_WIDTH 1 /* AIF3TX1_ENA */
+
+/*
+ * R1434 (0x59A) - AIF3 Rx Enables
+ */
+#define ARIZONA_AIF3RX2_ENA 0x0002 /* AIF3RX2_ENA */
+#define ARIZONA_AIF3RX2_ENA_MASK 0x0002 /* AIF3RX2_ENA */
+#define ARIZONA_AIF3RX2_ENA_SHIFT 1 /* AIF3RX2_ENA */
+#define ARIZONA_AIF3RX2_ENA_WIDTH 1 /* AIF3RX2_ENA */
+#define ARIZONA_AIF3RX1_ENA 0x0001 /* AIF3RX1_ENA */
+#define ARIZONA_AIF3RX1_ENA_MASK 0x0001 /* AIF3RX1_ENA */
+#define ARIZONA_AIF3RX1_ENA_SHIFT 0 /* AIF3RX1_ENA */
+#define ARIZONA_AIF3RX1_ENA_WIDTH 1 /* AIF3RX1_ENA */
+
+/*
+ * R1435 (0x59B) - AIF3 Force Write
+ */
+#define ARIZONA_AIF3_FRC_WR 0x0001 /* AIF3_FRC_WR */
+#define ARIZONA_AIF3_FRC_WR_MASK 0x0001 /* AIF3_FRC_WR */
+#define ARIZONA_AIF3_FRC_WR_SHIFT 0 /* AIF3_FRC_WR */
+#define ARIZONA_AIF3_FRC_WR_WIDTH 1 /* AIF3_FRC_WR */
+
+/*
+ * R1507 (0x5E3) - SLIMbus Framer Ref Gear
+ */
+#define ARIZONA_SLIMCLK_SRC 0x0010 /* SLIMCLK_SRC */
+#define ARIZONA_SLIMCLK_SRC_MASK 0x0010 /* SLIMCLK_SRC */
+#define ARIZONA_SLIMCLK_SRC_SHIFT 4 /* SLIMCLK_SRC */
+#define ARIZONA_SLIMCLK_SRC_WIDTH 1 /* SLIMCLK_SRC */
+#define ARIZONA_FRAMER_REF_GEAR_MASK 0x000F /* FRAMER_REF_GEAR - [3:0] */
+#define ARIZONA_FRAMER_REF_GEAR_SHIFT 0 /* FRAMER_REF_GEAR - [3:0] */
+#define ARIZONA_FRAMER_REF_GEAR_WIDTH 4 /* FRAMER_REF_GEAR - [3:0] */
+
+/*
+ * R1509 (0x5E5) - SLIMbus Rates 1
+ */
+#define ARIZONA_SLIMRX2_RATE_MASK 0x7800 /* SLIMRX2_RATE - [14:11] */
+#define ARIZONA_SLIMRX2_RATE_SHIFT 11 /* SLIMRX2_RATE - [14:11] */
+#define ARIZONA_SLIMRX2_RATE_WIDTH 4 /* SLIMRX2_RATE - [14:11] */
+#define ARIZONA_SLIMRX1_RATE_MASK 0x0078 /* SLIMRX1_RATE - [6:3] */
+#define ARIZONA_SLIMRX1_RATE_SHIFT 3 /* SLIMRX1_RATE - [6:3] */
+#define ARIZONA_SLIMRX1_RATE_WIDTH 4 /* SLIMRX1_RATE - [6:3] */
+
+/*
+ * R1510 (0x5E6) - SLIMbus Rates 2
+ */
+#define ARIZONA_SLIMRX4_RATE_MASK 0x7800 /* SLIMRX4_RATE - [14:11] */
+#define ARIZONA_SLIMRX4_RATE_SHIFT 11 /* SLIMRX4_RATE - [14:11] */
+#define ARIZONA_SLIMRX4_RATE_WIDTH 4 /* SLIMRX4_RATE - [14:11] */
+#define ARIZONA_SLIMRX3_RATE_MASK 0x0078 /* SLIMRX3_RATE - [6:3] */
+#define ARIZONA_SLIMRX3_RATE_SHIFT 3 /* SLIMRX3_RATE - [6:3] */
+#define ARIZONA_SLIMRX3_RATE_WIDTH 4 /* SLIMRX3_RATE - [6:3] */
+
+/*
+ * R1511 (0x5E7) - SLIMbus Rates 3
+ */
+#define ARIZONA_SLIMRX6_RATE_MASK 0x7800 /* SLIMRX6_RATE - [14:11] */
+#define ARIZONA_SLIMRX6_RATE_SHIFT 11 /* SLIMRX6_RATE - [14:11] */
+#define ARIZONA_SLIMRX6_RATE_WIDTH 4 /* SLIMRX6_RATE - [14:11] */
+#define ARIZONA_SLIMRX5_RATE_MASK 0x0078 /* SLIMRX5_RATE - [6:3] */
+#define ARIZONA_SLIMRX5_RATE_SHIFT 3 /* SLIMRX5_RATE - [6:3] */
+#define ARIZONA_SLIMRX5_RATE_WIDTH 4 /* SLIMRX5_RATE - [6:3] */
+
+/*
+ * R1512 (0x5E8) - SLIMbus Rates 4
+ */
+#define ARIZONA_SLIMRX8_RATE_MASK 0x7800 /* SLIMRX8_RATE - [14:11] */
+#define ARIZONA_SLIMRX8_RATE_SHIFT 11 /* SLIMRX8_RATE - [14:11] */
+#define ARIZONA_SLIMRX8_RATE_WIDTH 4 /* SLIMRX8_RATE - [14:11] */
+#define ARIZONA_SLIMRX7_RATE_MASK 0x0078 /* SLIMRX7_RATE - [6:3] */
+#define ARIZONA_SLIMRX7_RATE_SHIFT 3 /* SLIMRX7_RATE - [6:3] */
+#define ARIZONA_SLIMRX7_RATE_WIDTH 4 /* SLIMRX7_RATE - [6:3] */
+
+/*
+ * R1513 (0x5E9) - SLIMbus Rates 5
+ */
+#define ARIZONA_SLIMTX2_RATE_MASK 0x7800 /* SLIMTX2_RATE - [14:11] */
+#define ARIZONA_SLIMTX2_RATE_SHIFT 11 /* SLIMTX2_RATE - [14:11] */
+#define ARIZONA_SLIMTX2_RATE_WIDTH 4 /* SLIMTX2_RATE - [14:11] */
+#define ARIZONA_SLIMTX1_RATE_MASK 0x0078 /* SLIMTX1_RATE - [6:3] */
+#define ARIZONA_SLIMTX1_RATE_SHIFT 3 /* SLIMTX1_RATE - [6:3] */
+#define ARIZONA_SLIMTX1_RATE_WIDTH 4 /* SLIMTX1_RATE - [6:3] */
+
+/*
+ * R1514 (0x5EA) - SLIMbus Rates 6
+ */
+#define ARIZONA_SLIMTX4_RATE_MASK 0x7800 /* SLIMTX4_RATE - [14:11] */
+#define ARIZONA_SLIMTX4_RATE_SHIFT 11 /* SLIMTX4_RATE - [14:11] */
+#define ARIZONA_SLIMTX4_RATE_WIDTH 4 /* SLIMTX4_RATE - [14:11] */
+#define ARIZONA_SLIMTX3_RATE_MASK 0x0078 /* SLIMTX3_RATE - [6:3] */
+#define ARIZONA_SLIMTX3_RATE_SHIFT 3 /* SLIMTX3_RATE - [6:3] */
+#define ARIZONA_SLIMTX3_RATE_WIDTH 4 /* SLIMTX3_RATE - [6:3] */
+
+/*
+ * R1515 (0x5EB) - SLIMbus Rates 7
+ */
+#define ARIZONA_SLIMTX6_RATE_MASK 0x7800 /* SLIMTX6_RATE - [14:11] */
+#define ARIZONA_SLIMTX6_RATE_SHIFT 11 /* SLIMTX6_RATE - [14:11] */
+#define ARIZONA_SLIMTX6_RATE_WIDTH 4 /* SLIMTX6_RATE - [14:11] */
+#define ARIZONA_SLIMTX5_RATE_MASK 0x0078 /* SLIMTX5_RATE - [6:3] */
+#define ARIZONA_SLIMTX5_RATE_SHIFT 3 /* SLIMTX5_RATE - [6:3] */
+#define ARIZONA_SLIMTX5_RATE_WIDTH 4 /* SLIMTX5_RATE - [6:3] */
+
+/*
+ * R1516 (0x5EC) - SLIMbus Rates 8
+ */
+#define ARIZONA_SLIMTX8_RATE_MASK 0x7800 /* SLIMTX8_RATE - [14:11] */
+#define ARIZONA_SLIMTX8_RATE_SHIFT 11 /* SLIMTX8_RATE - [14:11] */
+#define ARIZONA_SLIMTX8_RATE_WIDTH 4 /* SLIMTX8_RATE - [14:11] */
+#define ARIZONA_SLIMTX7_RATE_MASK 0x0078 /* SLIMTX7_RATE - [6:3] */
+#define ARIZONA_SLIMTX7_RATE_SHIFT 3 /* SLIMTX7_RATE - [6:3] */
+#define ARIZONA_SLIMTX7_RATE_WIDTH 4 /* SLIMTX7_RATE - [6:3] */
+
+/*
+ * R1525 (0x5F5) - SLIMbus RX Channel Enable
+ */
+#define ARIZONA_SLIMRX8_ENA 0x0080 /* SLIMRX8_ENA */
+#define ARIZONA_SLIMRX8_ENA_MASK 0x0080 /* SLIMRX8_ENA */
+#define ARIZONA_SLIMRX8_ENA_SHIFT 7 /* SLIMRX8_ENA */
+#define ARIZONA_SLIMRX8_ENA_WIDTH 1 /* SLIMRX8_ENA */
+#define ARIZONA_SLIMRX7_ENA 0x0040 /* SLIMRX7_ENA */
+#define ARIZONA_SLIMRX7_ENA_MASK 0x0040 /* SLIMRX7_ENA */
+#define ARIZONA_SLIMRX7_ENA_SHIFT 6 /* SLIMRX7_ENA */
+#define ARIZONA_SLIMRX7_ENA_WIDTH 1 /* SLIMRX7_ENA */
+#define ARIZONA_SLIMRX6_ENA 0x0020 /* SLIMRX6_ENA */
+#define ARIZONA_SLIMRX6_ENA_MASK 0x0020 /* SLIMRX6_ENA */
+#define ARIZONA_SLIMRX6_ENA_SHIFT 5 /* SLIMRX6_ENA */
+#define ARIZONA_SLIMRX6_ENA_WIDTH 1 /* SLIMRX6_ENA */
+#define ARIZONA_SLIMRX5_ENA 0x0010 /* SLIMRX5_ENA */
+#define ARIZONA_SLIMRX5_ENA_MASK 0x0010 /* SLIMRX5_ENA */
+#define ARIZONA_SLIMRX5_ENA_SHIFT 4 /* SLIMRX5_ENA */
+#define ARIZONA_SLIMRX5_ENA_WIDTH 1 /* SLIMRX5_ENA */
+#define ARIZONA_SLIMRX4_ENA 0x0008 /* SLIMRX4_ENA */
+#define ARIZONA_SLIMRX4_ENA_MASK 0x0008 /* SLIMRX4_ENA */
+#define ARIZONA_SLIMRX4_ENA_SHIFT 3 /* SLIMRX4_ENA */
+#define ARIZONA_SLIMRX4_ENA_WIDTH 1 /* SLIMRX4_ENA */
+#define ARIZONA_SLIMRX3_ENA 0x0004 /* SLIMRX3_ENA */
+#define ARIZONA_SLIMRX3_ENA_MASK 0x0004 /* SLIMRX3_ENA */
+#define ARIZONA_SLIMRX3_ENA_SHIFT 2 /* SLIMRX3_ENA */
+#define ARIZONA_SLIMRX3_ENA_WIDTH 1 /* SLIMRX3_ENA */
+#define ARIZONA_SLIMRX2_ENA 0x0002 /* SLIMRX2_ENA */
+#define ARIZONA_SLIMRX2_ENA_MASK 0x0002 /* SLIMRX2_ENA */
+#define ARIZONA_SLIMRX2_ENA_SHIFT 1 /* SLIMRX2_ENA */
+#define ARIZONA_SLIMRX2_ENA_WIDTH 1 /* SLIMRX2_ENA */
+#define ARIZONA_SLIMRX1_ENA 0x0001 /* SLIMRX1_ENA */
+#define ARIZONA_SLIMRX1_ENA_MASK 0x0001 /* SLIMRX1_ENA */
+#define ARIZONA_SLIMRX1_ENA_SHIFT 0 /* SLIMRX1_ENA */
+#define ARIZONA_SLIMRX1_ENA_WIDTH 1 /* SLIMRX1_ENA */
+
+/*
+ * R1526 (0x5F6) - SLIMbus TX Channel Enable
+ */
+#define ARIZONA_SLIMTX8_ENA 0x0080 /* SLIMTX8_ENA */
+#define ARIZONA_SLIMTX8_ENA_MASK 0x0080 /* SLIMTX8_ENA */
+#define ARIZONA_SLIMTX8_ENA_SHIFT 7 /* SLIMTX8_ENA */
+#define ARIZONA_SLIMTX8_ENA_WIDTH 1 /* SLIMTX8_ENA */
+#define ARIZONA_SLIMTX7_ENA 0x0040 /* SLIMTX7_ENA */
+#define ARIZONA_SLIMTX7_ENA_MASK 0x0040 /* SLIMTX7_ENA */
+#define ARIZONA_SLIMTX7_ENA_SHIFT 6 /* SLIMTX7_ENA */
+#define ARIZONA_SLIMTX7_ENA_WIDTH 1 /* SLIMTX7_ENA */
+#define ARIZONA_SLIMTX6_ENA 0x0020 /* SLIMTX6_ENA */
+#define ARIZONA_SLIMTX6_ENA_MASK 0x0020 /* SLIMTX6_ENA */
+#define ARIZONA_SLIMTX6_ENA_SHIFT 5 /* SLIMTX6_ENA */
+#define ARIZONA_SLIMTX6_ENA_WIDTH 1 /* SLIMTX6_ENA */
+#define ARIZONA_SLIMTX5_ENA 0x0010 /* SLIMTX5_ENA */
+#define ARIZONA_SLIMTX5_ENA_MASK 0x0010 /* SLIMTX5_ENA */
+#define ARIZONA_SLIMTX5_ENA_SHIFT 4 /* SLIMTX5_ENA */
+#define ARIZONA_SLIMTX5_ENA_WIDTH 1 /* SLIMTX5_ENA */
+#define ARIZONA_SLIMTX4_ENA 0x0008 /* SLIMTX4_ENA */
+#define ARIZONA_SLIMTX4_ENA_MASK 0x0008 /* SLIMTX4_ENA */
+#define ARIZONA_SLIMTX4_ENA_SHIFT 3 /* SLIMTX4_ENA */
+#define ARIZONA_SLIMTX4_ENA_WIDTH 1 /* SLIMTX4_ENA */
+#define ARIZONA_SLIMTX3_ENA 0x0004 /* SLIMTX3_ENA */
+#define ARIZONA_SLIMTX3_ENA_MASK 0x0004 /* SLIMTX3_ENA */
+#define ARIZONA_SLIMTX3_ENA_SHIFT 2 /* SLIMTX3_ENA */
+#define ARIZONA_SLIMTX3_ENA_WIDTH 1 /* SLIMTX3_ENA */
+#define ARIZONA_SLIMTX2_ENA 0x0002 /* SLIMTX2_ENA */
+#define ARIZONA_SLIMTX2_ENA_MASK 0x0002 /* SLIMTX2_ENA */
+#define ARIZONA_SLIMTX2_ENA_SHIFT 1 /* SLIMTX2_ENA */
+#define ARIZONA_SLIMTX2_ENA_WIDTH 1 /* SLIMTX2_ENA */
+#define ARIZONA_SLIMTX1_ENA 0x0001 /* SLIMTX1_ENA */
+#define ARIZONA_SLIMTX1_ENA_MASK 0x0001 /* SLIMTX1_ENA */
+#define ARIZONA_SLIMTX1_ENA_SHIFT 0 /* SLIMTX1_ENA */
+#define ARIZONA_SLIMTX1_ENA_WIDTH 1 /* SLIMTX1_ENA */
+
+/*
+ * R1527 (0x5F7) - SLIMbus RX Port Status
+ */
+#define ARIZONA_SLIMRX8_PORT_STS 0x0080 /* SLIMRX8_PORT_STS */
+#define ARIZONA_SLIMRX8_PORT_STS_MASK 0x0080 /* SLIMRX8_PORT_STS */
+#define ARIZONA_SLIMRX8_PORT_STS_SHIFT 7 /* SLIMRX8_PORT_STS */
+#define ARIZONA_SLIMRX8_PORT_STS_WIDTH 1 /* SLIMRX8_PORT_STS */
+#define ARIZONA_SLIMRX7_PORT_STS 0x0040 /* SLIMRX7_PORT_STS */
+#define ARIZONA_SLIMRX7_PORT_STS_MASK 0x0040 /* SLIMRX7_PORT_STS */
+#define ARIZONA_SLIMRX7_PORT_STS_SHIFT 6 /* SLIMRX7_PORT_STS */
+#define ARIZONA_SLIMRX7_PORT_STS_WIDTH 1 /* SLIMRX7_PORT_STS */
+#define ARIZONA_SLIMRX6_PORT_STS 0x0020 /* SLIMRX6_PORT_STS */
+#define ARIZONA_SLIMRX6_PORT_STS_MASK 0x0020 /* SLIMRX6_PORT_STS */
+#define ARIZONA_SLIMRX6_PORT_STS_SHIFT 5 /* SLIMRX6_PORT_STS */
+#define ARIZONA_SLIMRX6_PORT_STS_WIDTH 1 /* SLIMRX6_PORT_STS */
+#define ARIZONA_SLIMRX5_PORT_STS 0x0010 /* SLIMRX5_PORT_STS */
+#define ARIZONA_SLIMRX5_PORT_STS_MASK 0x0010 /* SLIMRX5_PORT_STS */
+#define ARIZONA_SLIMRX5_PORT_STS_SHIFT 4 /* SLIMRX5_PORT_STS */
+#define ARIZONA_SLIMRX5_PORT_STS_WIDTH 1 /* SLIMRX5_PORT_STS */
+#define ARIZONA_SLIMRX4_PORT_STS 0x0008 /* SLIMRX4_PORT_STS */
+#define ARIZONA_SLIMRX4_PORT_STS_MASK 0x0008 /* SLIMRX4_PORT_STS */
+#define ARIZONA_SLIMRX4_PORT_STS_SHIFT 3 /* SLIMRX4_PORT_STS */
+#define ARIZONA_SLIMRX4_PORT_STS_WIDTH 1 /* SLIMRX4_PORT_STS */
+#define ARIZONA_SLIMRX3_PORT_STS 0x0004 /* SLIMRX3_PORT_STS */
+#define ARIZONA_SLIMRX3_PORT_STS_MASK 0x0004 /* SLIMRX3_PORT_STS */
+#define ARIZONA_SLIMRX3_PORT_STS_SHIFT 2 /* SLIMRX3_PORT_STS */
+#define ARIZONA_SLIMRX3_PORT_STS_WIDTH 1 /* SLIMRX3_PORT_STS */
+#define ARIZONA_SLIMRX2_PORT_STS 0x0002 /* SLIMRX2_PORT_STS */
+#define ARIZONA_SLIMRX2_PORT_STS_MASK 0x0002 /* SLIMRX2_PORT_STS */
+#define ARIZONA_SLIMRX2_PORT_STS_SHIFT 1 /* SLIMRX2_PORT_STS */
+#define ARIZONA_SLIMRX2_PORT_STS_WIDTH 1 /* SLIMRX2_PORT_STS */
+#define ARIZONA_SLIMRX1_PORT_STS 0x0001 /* SLIMRX1_PORT_STS */
+#define ARIZONA_SLIMRX1_PORT_STS_MASK 0x0001 /* SLIMRX1_PORT_STS */
+#define ARIZONA_SLIMRX1_PORT_STS_SHIFT 0 /* SLIMRX1_PORT_STS */
+#define ARIZONA_SLIMRX1_PORT_STS_WIDTH 1 /* SLIMRX1_PORT_STS */
+
+/*
+ * R1528 (0x5F8) - SLIMbus TX Port Status
+ */
+#define ARIZONA_SLIMTX8_PORT_STS 0x0080 /* SLIMTX8_PORT_STS */
+#define ARIZONA_SLIMTX8_PORT_STS_MASK 0x0080 /* SLIMTX8_PORT_STS */
+#define ARIZONA_SLIMTX8_PORT_STS_SHIFT 7 /* SLIMTX8_PORT_STS */
+#define ARIZONA_SLIMTX8_PORT_STS_WIDTH 1 /* SLIMTX8_PORT_STS */
+#define ARIZONA_SLIMTX7_PORT_STS 0x0040 /* SLIMTX7_PORT_STS */
+#define ARIZONA_SLIMTX7_PORT_STS_MASK 0x0040 /* SLIMTX7_PORT_STS */
+#define ARIZONA_SLIMTX7_PORT_STS_SHIFT 6 /* SLIMTX7_PORT_STS */
+#define ARIZONA_SLIMTX7_PORT_STS_WIDTH 1 /* SLIMTX7_PORT_STS */
+#define ARIZONA_SLIMTX6_PORT_STS 0x0020 /* SLIMTX6_PORT_STS */
+#define ARIZONA_SLIMTX6_PORT_STS_MASK 0x0020 /* SLIMTX6_PORT_STS */
+#define ARIZONA_SLIMTX6_PORT_STS_SHIFT 5 /* SLIMTX6_PORT_STS */
+#define ARIZONA_SLIMTX6_PORT_STS_WIDTH 1 /* SLIMTX6_PORT_STS */
+#define ARIZONA_SLIMTX5_PORT_STS 0x0010 /* SLIMTX5_PORT_STS */
+#define ARIZONA_SLIMTX5_PORT_STS_MASK 0x0010 /* SLIMTX5_PORT_STS */
+#define ARIZONA_SLIMTX5_PORT_STS_SHIFT 4 /* SLIMTX5_PORT_STS */
+#define ARIZONA_SLIMTX5_PORT_STS_WIDTH 1 /* SLIMTX5_PORT_STS */
+#define ARIZONA_SLIMTX4_PORT_STS 0x0008 /* SLIMTX4_PORT_STS */
+#define ARIZONA_SLIMTX4_PORT_STS_MASK 0x0008 /* SLIMTX4_PORT_STS */
+#define ARIZONA_SLIMTX4_PORT_STS_SHIFT 3 /* SLIMTX4_PORT_STS */
+#define ARIZONA_SLIMTX4_PORT_STS_WIDTH 1 /* SLIMTX4_PORT_STS */
+#define ARIZONA_SLIMTX3_PORT_STS 0x0004 /* SLIMTX3_PORT_STS */
+#define ARIZONA_SLIMTX3_PORT_STS_MASK 0x0004 /* SLIMTX3_PORT_STS */
+#define ARIZONA_SLIMTX3_PORT_STS_SHIFT 2 /* SLIMTX3_PORT_STS */
+#define ARIZONA_SLIMTX3_PORT_STS_WIDTH 1 /* SLIMTX3_PORT_STS */
+#define ARIZONA_SLIMTX2_PORT_STS 0x0002 /* SLIMTX2_PORT_STS */
+#define ARIZONA_SLIMTX2_PORT_STS_MASK 0x0002 /* SLIMTX2_PORT_STS */
+#define ARIZONA_SLIMTX2_PORT_STS_SHIFT 1 /* SLIMTX2_PORT_STS */
+#define ARIZONA_SLIMTX2_PORT_STS_WIDTH 1 /* SLIMTX2_PORT_STS */
+#define ARIZONA_SLIMTX1_PORT_STS 0x0001 /* SLIMTX1_PORT_STS */
+#define ARIZONA_SLIMTX1_PORT_STS_MASK 0x0001 /* SLIMTX1_PORT_STS */
+#define ARIZONA_SLIMTX1_PORT_STS_SHIFT 0 /* SLIMTX1_PORT_STS */
+#define ARIZONA_SLIMTX1_PORT_STS_WIDTH 1 /* SLIMTX1_PORT_STS */
+
+/*
+ * R3087 (0xC0F) - IRQ CTRL 1
+ */
+#define ARIZONA_IRQ_POL 0x0400 /* IRQ_POL */
+#define ARIZONA_IRQ_POL_MASK 0x0400 /* IRQ_POL */
+#define ARIZONA_IRQ_POL_SHIFT 10 /* IRQ_POL */
+#define ARIZONA_IRQ_POL_WIDTH 1 /* IRQ_POL */
+#define ARIZONA_IRQ_OP_CFG 0x0200 /* IRQ_OP_CFG */
+#define ARIZONA_IRQ_OP_CFG_MASK 0x0200 /* IRQ_OP_CFG */
+#define ARIZONA_IRQ_OP_CFG_SHIFT 9 /* IRQ_OP_CFG */
+#define ARIZONA_IRQ_OP_CFG_WIDTH 1 /* IRQ_OP_CFG */
+
+/*
+ * R3088 (0xC10) - GPIO Debounce Config
+ */
+#define ARIZONA_GP_DBTIME_MASK 0xF000 /* GP_DBTIME - [15:12] */
+#define ARIZONA_GP_DBTIME_SHIFT 12 /* GP_DBTIME - [15:12] */
+#define ARIZONA_GP_DBTIME_WIDTH 4 /* GP_DBTIME - [15:12] */
+
+/*
+ * R3104 (0xC20) - Misc Pad Ctrl 1
+ */
+#define ARIZONA_LDO1ENA_PD 0x8000 /* LDO1ENA_PD */
+#define ARIZONA_LDO1ENA_PD_MASK 0x8000 /* LDO1ENA_PD */
+#define ARIZONA_LDO1ENA_PD_SHIFT 15 /* LDO1ENA_PD */
+#define ARIZONA_LDO1ENA_PD_WIDTH 1 /* LDO1ENA_PD */
+#define ARIZONA_MCLK2_PD 0x2000 /* MCLK2_PD */
+#define ARIZONA_MCLK2_PD_MASK 0x2000 /* MCLK2_PD */
+#define ARIZONA_MCLK2_PD_SHIFT 13 /* MCLK2_PD */
+#define ARIZONA_MCLK2_PD_WIDTH 1 /* MCLK2_PD */
+#define ARIZONA_RSTB_PU 0x0002 /* RSTB_PU */
+#define ARIZONA_RSTB_PU_MASK 0x0002 /* RSTB_PU */
+#define ARIZONA_RSTB_PU_SHIFT 1 /* RSTB_PU */
+#define ARIZONA_RSTB_PU_WIDTH 1 /* RSTB_PU */
+
+/*
+ * R3105 (0xC21) - Misc Pad Ctrl 2
+ */
+#define ARIZONA_MCLK1_PD 0x1000 /* MCLK1_PD */
+#define ARIZONA_MCLK1_PD_MASK 0x1000 /* MCLK1_PD */
+#define ARIZONA_MCLK1_PD_SHIFT 12 /* MCLK1_PD */
+#define ARIZONA_MCLK1_PD_WIDTH 1 /* MCLK1_PD */
+#define ARIZONA_MICD_PD 0x0100 /* MICD_PD */
+#define ARIZONA_MICD_PD_MASK 0x0100 /* MICD_PD */
+#define ARIZONA_MICD_PD_SHIFT 8 /* MICD_PD */
+#define ARIZONA_MICD_PD_WIDTH 1 /* MICD_PD */
+#define ARIZONA_ADDR_PD 0x0001 /* ADDR_PD */
+#define ARIZONA_ADDR_PD_MASK 0x0001 /* ADDR_PD */
+#define ARIZONA_ADDR_PD_SHIFT 0 /* ADDR_PD */
+#define ARIZONA_ADDR_PD_WIDTH 1 /* ADDR_PD */
+
+/*
+ * R3106 (0xC22) - Misc Pad Ctrl 3
+ */
+#define ARIZONA_DMICDAT4_PD 0x0008 /* DMICDAT4_PD */
+#define ARIZONA_DMICDAT4_PD_MASK 0x0008 /* DMICDAT4_PD */
+#define ARIZONA_DMICDAT4_PD_SHIFT 3 /* DMICDAT4_PD */
+#define ARIZONA_DMICDAT4_PD_WIDTH 1 /* DMICDAT4_PD */
+#define ARIZONA_DMICDAT3_PD 0x0004 /* DMICDAT3_PD */
+#define ARIZONA_DMICDAT3_PD_MASK 0x0004 /* DMICDAT3_PD */
+#define ARIZONA_DMICDAT3_PD_SHIFT 2 /* DMICDAT3_PD */
+#define ARIZONA_DMICDAT3_PD_WIDTH 1 /* DMICDAT3_PD */
+#define ARIZONA_DMICDAT2_PD 0x0002 /* DMICDAT2_PD */
+#define ARIZONA_DMICDAT2_PD_MASK 0x0002 /* DMICDAT2_PD */
+#define ARIZONA_DMICDAT2_PD_SHIFT 1 /* DMICDAT2_PD */
+#define ARIZONA_DMICDAT2_PD_WIDTH 1 /* DMICDAT2_PD */
+#define ARIZONA_DMICDAT1_PD 0x0001 /* DMICDAT1_PD */
+#define ARIZONA_DMICDAT1_PD_MASK 0x0001 /* DMICDAT1_PD */
+#define ARIZONA_DMICDAT1_PD_SHIFT 0 /* DMICDAT1_PD */
+#define ARIZONA_DMICDAT1_PD_WIDTH 1 /* DMICDAT1_PD */
+
+/*
+ * R3107 (0xC23) - Misc Pad Ctrl 4
+ */
+#define ARIZONA_AIF1RXLRCLK_PU 0x0020 /* AIF1RXLRCLK_PU */
+#define ARIZONA_AIF1RXLRCLK_PU_MASK 0x0020 /* AIF1RXLRCLK_PU */
+#define ARIZONA_AIF1RXLRCLK_PU_SHIFT 5 /* AIF1RXLRCLK_PU */
+#define ARIZONA_AIF1RXLRCLK_PU_WIDTH 1 /* AIF1RXLRCLK_PU */
+#define ARIZONA_AIF1RXLRCLK_PD 0x0010 /* AIF1RXLRCLK_PD */
+#define ARIZONA_AIF1RXLRCLK_PD_MASK 0x0010 /* AIF1RXLRCLK_PD */
+#define ARIZONA_AIF1RXLRCLK_PD_SHIFT 4 /* AIF1RXLRCLK_PD */
+#define ARIZONA_AIF1RXLRCLK_PD_WIDTH 1 /* AIF1RXLRCLK_PD */
+#define ARIZONA_AIF1BCLK_PU 0x0008 /* AIF1BCLK_PU */
+#define ARIZONA_AIF1BCLK_PU_MASK 0x0008 /* AIF1BCLK_PU */
+#define ARIZONA_AIF1BCLK_PU_SHIFT 3 /* AIF1BCLK_PU */
+#define ARIZONA_AIF1BCLK_PU_WIDTH 1 /* AIF1BCLK_PU */
+#define ARIZONA_AIF1BCLK_PD 0x0004 /* AIF1BCLK_PD */
+#define ARIZONA_AIF1BCLK_PD_MASK 0x0004 /* AIF1BCLK_PD */
+#define ARIZONA_AIF1BCLK_PD_SHIFT 2 /* AIF1BCLK_PD */
+#define ARIZONA_AIF1BCLK_PD_WIDTH 1 /* AIF1BCLK_PD */
+#define ARIZONA_AIF1RXDAT_PU 0x0002 /* AIF1RXDAT_PU */
+#define ARIZONA_AIF1RXDAT_PU_MASK 0x0002 /* AIF1RXDAT_PU */
+#define ARIZONA_AIF1RXDAT_PU_SHIFT 1 /* AIF1RXDAT_PU */
+#define ARIZONA_AIF1RXDAT_PU_WIDTH 1 /* AIF1RXDAT_PU */
+#define ARIZONA_AIF1RXDAT_PD 0x0001 /* AIF1RXDAT_PD */
+#define ARIZONA_AIF1RXDAT_PD_MASK 0x0001 /* AIF1RXDAT_PD */
+#define ARIZONA_AIF1RXDAT_PD_SHIFT 0 /* AIF1RXDAT_PD */
+#define ARIZONA_AIF1RXDAT_PD_WIDTH 1 /* AIF1RXDAT_PD */
+
+/*
+ * R3108 (0xC24) - Misc Pad Ctrl 5
+ */
+#define ARIZONA_AIF2RXLRCLK_PU 0x0020 /* AIF2RXLRCLK_PU */
+#define ARIZONA_AIF2RXLRCLK_PU_MASK 0x0020 /* AIF2RXLRCLK_PU */
+#define ARIZONA_AIF2RXLRCLK_PU_SHIFT 5 /* AIF2RXLRCLK_PU */
+#define ARIZONA_AIF2RXLRCLK_PU_WIDTH 1 /* AIF2RXLRCLK_PU */
+#define ARIZONA_AIF2RXLRCLK_PD 0x0010 /* AIF2RXLRCLK_PD */
+#define ARIZONA_AIF2RXLRCLK_PD_MASK 0x0010 /* AIF2RXLRCLK_PD */
+#define ARIZONA_AIF2RXLRCLK_PD_SHIFT 4 /* AIF2RXLRCLK_PD */
+#define ARIZONA_AIF2RXLRCLK_PD_WIDTH 1 /* AIF2RXLRCLK_PD */
+#define ARIZONA_AIF2BCLK_PU 0x0008 /* AIF2BCLK_PU */
+#define ARIZONA_AIF2BCLK_PU_MASK 0x0008 /* AIF2BCLK_PU */
+#define ARIZONA_AIF2BCLK_PU_SHIFT 3 /* AIF2BCLK_PU */
+#define ARIZONA_AIF2BCLK_PU_WIDTH 1 /* AIF2BCLK_PU */
+#define ARIZONA_AIF2BCLK_PD 0x0004 /* AIF2BCLK_PD */
+#define ARIZONA_AIF2BCLK_PD_MASK 0x0004 /* AIF2BCLK_PD */
+#define ARIZONA_AIF2BCLK_PD_SHIFT 2 /* AIF2BCLK_PD */
+#define ARIZONA_AIF2BCLK_PD_WIDTH 1 /* AIF2BCLK_PD */
+#define ARIZONA_AIF2RXDAT_PU 0x0002 /* AIF2RXDAT_PU */
+#define ARIZONA_AIF2RXDAT_PU_MASK 0x0002 /* AIF2RXDAT_PU */
+#define ARIZONA_AIF2RXDAT_PU_SHIFT 1 /* AIF2RXDAT_PU */
+#define ARIZONA_AIF2RXDAT_PU_WIDTH 1 /* AIF2RXDAT_PU */
+#define ARIZONA_AIF2RXDAT_PD 0x0001 /* AIF2RXDAT_PD */
+#define ARIZONA_AIF2RXDAT_PD_MASK 0x0001 /* AIF2RXDAT_PD */
+#define ARIZONA_AIF2RXDAT_PD_SHIFT 0 /* AIF2RXDAT_PD */
+#define ARIZONA_AIF2RXDAT_PD_WIDTH 1 /* AIF2RXDAT_PD */
+
+/*
+ * R3109 (0xC25) - Misc Pad Ctrl 6
+ */
+#define ARIZONA_AIF3RXLRCLK_PU 0x0020 /* AIF3RXLRCLK_PU */
+#define ARIZONA_AIF3RXLRCLK_PU_MASK 0x0020 /* AIF3RXLRCLK_PU */
+#define ARIZONA_AIF3RXLRCLK_PU_SHIFT 5 /* AIF3RXLRCLK_PU */
+#define ARIZONA_AIF3RXLRCLK_PU_WIDTH 1 /* AIF3RXLRCLK_PU */
+#define ARIZONA_AIF3RXLRCLK_PD 0x0010 /* AIF3RXLRCLK_PD */
+#define ARIZONA_AIF3RXLRCLK_PD_MASK 0x0010 /* AIF3RXLRCLK_PD */
+#define ARIZONA_AIF3RXLRCLK_PD_SHIFT 4 /* AIF3RXLRCLK_PD */
+#define ARIZONA_AIF3RXLRCLK_PD_WIDTH 1 /* AIF3RXLRCLK_PD */
+#define ARIZONA_AIF3BCLK_PU 0x0008 /* AIF3BCLK_PU */
+#define ARIZONA_AIF3BCLK_PU_MASK 0x0008 /* AIF3BCLK_PU */
+#define ARIZONA_AIF3BCLK_PU_SHIFT 3 /* AIF3BCLK_PU */
+#define ARIZONA_AIF3BCLK_PU_WIDTH 1 /* AIF3BCLK_PU */
+#define ARIZONA_AIF3BCLK_PD 0x0004 /* AIF3BCLK_PD */
+#define ARIZONA_AIF3BCLK_PD_MASK 0x0004 /* AIF3BCLK_PD */
+#define ARIZONA_AIF3BCLK_PD_SHIFT 2 /* AIF3BCLK_PD */
+#define ARIZONA_AIF3BCLK_PD_WIDTH 1 /* AIF3BCLK_PD */
+#define ARIZONA_AIF3RXDAT_PU 0x0002 /* AIF3RXDAT_PU */
+#define ARIZONA_AIF3RXDAT_PU_MASK 0x0002 /* AIF3RXDAT_PU */
+#define ARIZONA_AIF3RXDAT_PU_SHIFT 1 /* AIF3RXDAT_PU */
+#define ARIZONA_AIF3RXDAT_PU_WIDTH 1 /* AIF3RXDAT_PU */
+#define ARIZONA_AIF3RXDAT_PD 0x0001 /* AIF3RXDAT_PD */
+#define ARIZONA_AIF3RXDAT_PD_MASK 0x0001 /* AIF3RXDAT_PD */
+#define ARIZONA_AIF3RXDAT_PD_SHIFT 0 /* AIF3RXDAT_PD */
+#define ARIZONA_AIF3RXDAT_PD_WIDTH 1 /* AIF3RXDAT_PD */
+
+/*
+ * R3328 (0xD00) - Interrupt Status 1
+ */
+#define ARIZONA_GP4_EINT1 0x0008 /* GP4_EINT1 */
+#define ARIZONA_GP4_EINT1_MASK 0x0008 /* GP4_EINT1 */
+#define ARIZONA_GP4_EINT1_SHIFT 3 /* GP4_EINT1 */
+#define ARIZONA_GP4_EINT1_WIDTH 1 /* GP4_EINT1 */
+#define ARIZONA_GP3_EINT1 0x0004 /* GP3_EINT1 */
+#define ARIZONA_GP3_EINT1_MASK 0x0004 /* GP3_EINT1 */
+#define ARIZONA_GP3_EINT1_SHIFT 2 /* GP3_EINT1 */
+#define ARIZONA_GP3_EINT1_WIDTH 1 /* GP3_EINT1 */
+#define ARIZONA_GP2_EINT1 0x0002 /* GP2_EINT1 */
+#define ARIZONA_GP2_EINT1_MASK 0x0002 /* GP2_EINT1 */
+#define ARIZONA_GP2_EINT1_SHIFT 1 /* GP2_EINT1 */
+#define ARIZONA_GP2_EINT1_WIDTH 1 /* GP2_EINT1 */
+#define ARIZONA_GP1_EINT1 0x0001 /* GP1_EINT1 */
+#define ARIZONA_GP1_EINT1_MASK 0x0001 /* GP1_EINT1 */
+#define ARIZONA_GP1_EINT1_SHIFT 0 /* GP1_EINT1 */
+#define ARIZONA_GP1_EINT1_WIDTH 1 /* GP1_EINT1 */
+
+/*
+ * R3329 (0xD01) - Interrupt Status 2
+ */
+#define ARIZONA_DSP4_RAM_RDY_EINT1 0x0800 /* DSP4_RAM_RDY_EINT1 */
+#define ARIZONA_DSP4_RAM_RDY_EINT1_MASK 0x0800 /* DSP4_RAM_RDY_EINT1 */
+#define ARIZONA_DSP4_RAM_RDY_EINT1_SHIFT 11 /* DSP4_RAM_RDY_EINT1 */
+#define ARIZONA_DSP4_RAM_RDY_EINT1_WIDTH 1 /* DSP4_RAM_RDY_EINT1 */
+#define ARIZONA_DSP3_RAM_RDY_EINT1 0x0400 /* DSP3_RAM_RDY_EINT1 */
+#define ARIZONA_DSP3_RAM_RDY_EINT1_MASK 0x0400 /* DSP3_RAM_RDY_EINT1 */
+#define ARIZONA_DSP3_RAM_RDY_EINT1_SHIFT 10 /* DSP3_RAM_RDY_EINT1 */
+#define ARIZONA_DSP3_RAM_RDY_EINT1_WIDTH 1 /* DSP3_RAM_RDY_EINT1 */
+#define ARIZONA_DSP2_RAM_RDY_EINT1 0x0200 /* DSP2_RAM_RDY_EINT1 */
+#define ARIZONA_DSP2_RAM_RDY_EINT1_MASK 0x0200 /* DSP2_RAM_RDY_EINT1 */
+#define ARIZONA_DSP2_RAM_RDY_EINT1_SHIFT 9 /* DSP2_RAM_RDY_EINT1 */
+#define ARIZONA_DSP2_RAM_RDY_EINT1_WIDTH 1 /* DSP2_RAM_RDY_EINT1 */
+#define ARIZONA_DSP1_RAM_RDY_EINT1 0x0100 /* DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_DSP1_RAM_RDY_EINT1_MASK 0x0100 /* DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_DSP1_RAM_RDY_EINT1_SHIFT 8 /* DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_DSP1_RAM_RDY_EINT1_WIDTH 1 /* DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_DSP_IRQ8_EINT1 0x0080 /* DSP_IRQ8_EINT1 */
+#define ARIZONA_DSP_IRQ8_EINT1_MASK 0x0080 /* DSP_IRQ8_EINT1 */
+#define ARIZONA_DSP_IRQ8_EINT1_SHIFT 7 /* DSP_IRQ8_EINT1 */
+#define ARIZONA_DSP_IRQ8_EINT1_WIDTH 1 /* DSP_IRQ8_EINT1 */
+#define ARIZONA_DSP_IRQ7_EINT1 0x0040 /* DSP_IRQ7_EINT1 */
+#define ARIZONA_DSP_IRQ7_EINT1_MASK 0x0040 /* DSP_IRQ7_EINT1 */
+#define ARIZONA_DSP_IRQ7_EINT1_SHIFT 6 /* DSP_IRQ7_EINT1 */
+#define ARIZONA_DSP_IRQ7_EINT1_WIDTH 1 /* DSP_IRQ7_EINT1 */
+#define ARIZONA_DSP_IRQ6_EINT1 0x0020 /* DSP_IRQ6_EINT1 */
+#define ARIZONA_DSP_IRQ6_EINT1_MASK 0x0020 /* DSP_IRQ6_EINT1 */
+#define ARIZONA_DSP_IRQ6_EINT1_SHIFT 5 /* DSP_IRQ6_EINT1 */
+#define ARIZONA_DSP_IRQ6_EINT1_WIDTH 1 /* DSP_IRQ6_EINT1 */
+#define ARIZONA_DSP_IRQ5_EINT1 0x0010 /* DSP_IRQ5_EINT1 */
+#define ARIZONA_DSP_IRQ5_EINT1_MASK 0x0010 /* DSP_IRQ5_EINT1 */
+#define ARIZONA_DSP_IRQ5_EINT1_SHIFT 4 /* DSP_IRQ5_EINT1 */
+#define ARIZONA_DSP_IRQ5_EINT1_WIDTH 1 /* DSP_IRQ5_EINT1 */
+#define ARIZONA_DSP_IRQ4_EINT1 0x0008 /* DSP_IRQ4_EINT1 */
+#define ARIZONA_DSP_IRQ4_EINT1_MASK 0x0008 /* DSP_IRQ4_EINT1 */
+#define ARIZONA_DSP_IRQ4_EINT1_SHIFT 3 /* DSP_IRQ4_EINT1 */
+#define ARIZONA_DSP_IRQ4_EINT1_WIDTH 1 /* DSP_IRQ4_EINT1 */
+#define ARIZONA_DSP_IRQ3_EINT1 0x0004 /* DSP_IRQ3_EINT1 */
+#define ARIZONA_DSP_IRQ3_EINT1_MASK 0x0004 /* DSP_IRQ3_EINT1 */
+#define ARIZONA_DSP_IRQ3_EINT1_SHIFT 2 /* DSP_IRQ3_EINT1 */
+#define ARIZONA_DSP_IRQ3_EINT1_WIDTH 1 /* DSP_IRQ3_EINT1 */
+#define ARIZONA_DSP_IRQ2_EINT1 0x0002 /* DSP_IRQ2_EINT1 */
+#define ARIZONA_DSP_IRQ2_EINT1_MASK 0x0002 /* DSP_IRQ2_EINT1 */
+#define ARIZONA_DSP_IRQ2_EINT1_SHIFT 1 /* DSP_IRQ2_EINT1 */
+#define ARIZONA_DSP_IRQ2_EINT1_WIDTH 1 /* DSP_IRQ2_EINT1 */
+#define ARIZONA_DSP_IRQ1_EINT1 0x0001 /* DSP_IRQ1_EINT1 */
+#define ARIZONA_DSP_IRQ1_EINT1_MASK 0x0001 /* DSP_IRQ1_EINT1 */
+#define ARIZONA_DSP_IRQ1_EINT1_SHIFT 0 /* DSP_IRQ1_EINT1 */
+#define ARIZONA_DSP_IRQ1_EINT1_WIDTH 1 /* DSP_IRQ1_EINT1 */
+
+/*
+ * R3330 (0xD02) - Interrupt Status 3
+ */
+#define ARIZONA_SPK_SHUTDOWN_WARN_EINT1 0x8000 /* SPK_SHUTDOWN_WARN_EINT1 */
+#define ARIZONA_SPK_SHUTDOWN_WARN_EINT1_MASK 0x8000 /* SPK_SHUTDOWN_WARN_EINT1 */
+#define ARIZONA_SPK_SHUTDOWN_WARN_EINT1_SHIFT 15 /* SPK_SHUTDOWN_WARN_EINT1 */
+#define ARIZONA_SPK_SHUTDOWN_WARN_EINT1_WIDTH 1 /* SPK_SHUTDOWN_WARN_EINT1 */
+#define ARIZONA_SPK_SHUTDOWN_EINT1 0x4000 /* SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_SPK_SHUTDOWN_EINT1_MASK 0x4000 /* SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_SPK_SHUTDOWN_EINT1_SHIFT 14 /* SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_SPK_SHUTDOWN_EINT1_WIDTH 1 /* SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_HPDET_EINT1 0x2000 /* HPDET_EINT1 */
+#define ARIZONA_HPDET_EINT1_MASK 0x2000 /* HPDET_EINT1 */
+#define ARIZONA_HPDET_EINT1_SHIFT 13 /* HPDET_EINT1 */
+#define ARIZONA_HPDET_EINT1_WIDTH 1 /* HPDET_EINT1 */
+#define ARIZONA_MICDET_EINT1 0x1000 /* MICDET_EINT1 */
+#define ARIZONA_MICDET_EINT1_MASK 0x1000 /* MICDET_EINT1 */
+#define ARIZONA_MICDET_EINT1_SHIFT 12 /* MICDET_EINT1 */
+#define ARIZONA_MICDET_EINT1_WIDTH 1 /* MICDET_EINT1 */
+#define ARIZONA_WSEQ_DONE_EINT1 0x0800 /* WSEQ_DONE_EINT1 */
+#define ARIZONA_WSEQ_DONE_EINT1_MASK 0x0800 /* WSEQ_DONE_EINT1 */
+#define ARIZONA_WSEQ_DONE_EINT1_SHIFT 11 /* WSEQ_DONE_EINT1 */
+#define ARIZONA_WSEQ_DONE_EINT1_WIDTH 1 /* WSEQ_DONE_EINT1 */
+#define ARIZONA_DRC2_SIG_DET_EINT1 0x0400 /* DRC2_SIG_DET_EINT1 */
+#define ARIZONA_DRC2_SIG_DET_EINT1_MASK 0x0400 /* DRC2_SIG_DET_EINT1 */
+#define ARIZONA_DRC2_SIG_DET_EINT1_SHIFT 10 /* DRC2_SIG_DET_EINT1 */
+#define ARIZONA_DRC2_SIG_DET_EINT1_WIDTH 1 /* DRC2_SIG_DET_EINT1 */
+#define ARIZONA_DRC1_SIG_DET_EINT1 0x0200 /* DRC1_SIG_DET_EINT1 */
+#define ARIZONA_DRC1_SIG_DET_EINT1_MASK 0x0200 /* DRC1_SIG_DET_EINT1 */
+#define ARIZONA_DRC1_SIG_DET_EINT1_SHIFT 9 /* DRC1_SIG_DET_EINT1 */
+#define ARIZONA_DRC1_SIG_DET_EINT1_WIDTH 1 /* DRC1_SIG_DET_EINT1 */
+#define ARIZONA_ASRC2_LOCK_EINT1 0x0100 /* ASRC2_LOCK_EINT1 */
+#define ARIZONA_ASRC2_LOCK_EINT1_MASK 0x0100 /* ASRC2_LOCK_EINT1 */
+#define ARIZONA_ASRC2_LOCK_EINT1_SHIFT 8 /* ASRC2_LOCK_EINT1 */
+#define ARIZONA_ASRC2_LOCK_EINT1_WIDTH 1 /* ASRC2_LOCK_EINT1 */
+#define ARIZONA_ASRC1_LOCK_EINT1 0x0080 /* ASRC1_LOCK_EINT1 */
+#define ARIZONA_ASRC1_LOCK_EINT1_MASK 0x0080 /* ASRC1_LOCK_EINT1 */
+#define ARIZONA_ASRC1_LOCK_EINT1_SHIFT 7 /* ASRC1_LOCK_EINT1 */
+#define ARIZONA_ASRC1_LOCK_EINT1_WIDTH 1 /* ASRC1_LOCK_EINT1 */
+#define ARIZONA_UNDERCLOCKED_EINT1 0x0040 /* UNDERCLOCKED_EINT1 */
+#define ARIZONA_UNDERCLOCKED_EINT1_MASK 0x0040 /* UNDERCLOCKED_EINT1 */
+#define ARIZONA_UNDERCLOCKED_EINT1_SHIFT 6 /* UNDERCLOCKED_EINT1 */
+#define ARIZONA_UNDERCLOCKED_EINT1_WIDTH 1 /* UNDERCLOCKED_EINT1 */
+#define ARIZONA_OVERCLOCKED_EINT1 0x0020 /* OVERCLOCKED_EINT1 */
+#define ARIZONA_OVERCLOCKED_EINT1_MASK 0x0020 /* OVERCLOCKED_EINT1 */
+#define ARIZONA_OVERCLOCKED_EINT1_SHIFT 5 /* OVERCLOCKED_EINT1 */
+#define ARIZONA_OVERCLOCKED_EINT1_WIDTH 1 /* OVERCLOCKED_EINT1 */
+#define ARIZONA_FLL2_LOCK_EINT1 0x0008 /* FLL2_LOCK_EINT1 */
+#define ARIZONA_FLL2_LOCK_EINT1_MASK 0x0008 /* FLL2_LOCK_EINT1 */
+#define ARIZONA_FLL2_LOCK_EINT1_SHIFT 3 /* FLL2_LOCK_EINT1 */
+#define ARIZONA_FLL2_LOCK_EINT1_WIDTH 1 /* FLL2_LOCK_EINT1 */
+#define ARIZONA_FLL1_LOCK_EINT1 0x0004 /* FLL1_LOCK_EINT1 */
+#define ARIZONA_FLL1_LOCK_EINT1_MASK 0x0004 /* FLL1_LOCK_EINT1 */
+#define ARIZONA_FLL1_LOCK_EINT1_SHIFT 2 /* FLL1_LOCK_EINT1 */
+#define ARIZONA_FLL1_LOCK_EINT1_WIDTH 1 /* FLL1_LOCK_EINT1 */
+#define ARIZONA_CLKGEN_ERR_EINT1 0x0002 /* CLKGEN_ERR_EINT1 */
+#define ARIZONA_CLKGEN_ERR_EINT1_MASK 0x0002 /* CLKGEN_ERR_EINT1 */
+#define ARIZONA_CLKGEN_ERR_EINT1_SHIFT 1 /* CLKGEN_ERR_EINT1 */
+#define ARIZONA_CLKGEN_ERR_EINT1_WIDTH 1 /* CLKGEN_ERR_EINT1 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT1 0x0001 /* CLKGEN_ERR_ASYNC_EINT1 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT1_MASK 0x0001 /* CLKGEN_ERR_ASYNC_EINT1 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT1_SHIFT 0 /* CLKGEN_ERR_ASYNC_EINT1 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT1_WIDTH 1 /* CLKGEN_ERR_ASYNC_EINT1 */
+
+/*
+ * R3331 (0xD03) - Interrupt Status 4
+ */
+#define ARIZONA_ASRC_CFG_ERR_EINT1 0x8000 /* ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_ASRC_CFG_ERR_EINT1_MASK 0x8000 /* ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_ASRC_CFG_ERR_EINT1_SHIFT 15 /* ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_ASRC_CFG_ERR_EINT1_WIDTH 1 /* ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_AIF3_ERR_EINT1 0x4000 /* AIF3_ERR_EINT1 */
+#define ARIZONA_AIF3_ERR_EINT1_MASK 0x4000 /* AIF3_ERR_EINT1 */
+#define ARIZONA_AIF3_ERR_EINT1_SHIFT 14 /* AIF3_ERR_EINT1 */
+#define ARIZONA_AIF3_ERR_EINT1_WIDTH 1 /* AIF3_ERR_EINT1 */
+#define ARIZONA_AIF2_ERR_EINT1 0x2000 /* AIF2_ERR_EINT1 */
+#define ARIZONA_AIF2_ERR_EINT1_MASK 0x2000 /* AIF2_ERR_EINT1 */
+#define ARIZONA_AIF2_ERR_EINT1_SHIFT 13 /* AIF2_ERR_EINT1 */
+#define ARIZONA_AIF2_ERR_EINT1_WIDTH 1 /* AIF2_ERR_EINT1 */
+#define ARIZONA_AIF1_ERR_EINT1 0x1000 /* AIF1_ERR_EINT1 */
+#define ARIZONA_AIF1_ERR_EINT1_MASK 0x1000 /* AIF1_ERR_EINT1 */
+#define ARIZONA_AIF1_ERR_EINT1_SHIFT 12 /* AIF1_ERR_EINT1 */
+#define ARIZONA_AIF1_ERR_EINT1_WIDTH 1 /* AIF1_ERR_EINT1 */
+#define ARIZONA_CTRLIF_ERR_EINT1 0x0800 /* CTRLIF_ERR_EINT1 */
+#define ARIZONA_CTRLIF_ERR_EINT1_MASK 0x0800 /* CTRLIF_ERR_EINT1 */
+#define ARIZONA_CTRLIF_ERR_EINT1_SHIFT 11 /* CTRLIF_ERR_EINT1 */
+#define ARIZONA_CTRLIF_ERR_EINT1_WIDTH 1 /* CTRLIF_ERR_EINT1 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT1 0x0400 /* MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT1_MASK 0x0400 /* MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT1_SHIFT 10 /* MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT1_WIDTH 1 /* MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT1 0x0200 /* ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT1_MASK 0x0200 /* ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT1_SHIFT 9 /* ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT1_WIDTH 1 /* ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT1 0x0100 /* SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT1_MASK 0x0100 /* SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT1_SHIFT 8 /* SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT1_WIDTH 1 /* SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT1 0x0080 /* ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT1_MASK 0x0080 /* ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT1_SHIFT 7 /* ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT1_WIDTH 1 /* ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT1 0x0040 /* ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT1_MASK 0x0040 /* ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT1_SHIFT 6 /* ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT1_WIDTH 1 /* ISRC2_CFG_ERR_EINT1 */
+
+/*
+ * R3332 (0xD04) - Interrupt Status 5
+ */
+#define ARIZONA_BOOT_DONE_EINT1 0x0100 /* BOOT_DONE_EINT1 */
+#define ARIZONA_BOOT_DONE_EINT1_MASK 0x0100 /* BOOT_DONE_EINT1 */
+#define ARIZONA_BOOT_DONE_EINT1_SHIFT 8 /* BOOT_DONE_EINT1 */
+#define ARIZONA_BOOT_DONE_EINT1_WIDTH 1 /* BOOT_DONE_EINT1 */
+#define ARIZONA_DCS_DAC_DONE_EINT1 0x0080 /* DCS_DAC_DONE_EINT1 */
+#define ARIZONA_DCS_DAC_DONE_EINT1_MASK 0x0080 /* DCS_DAC_DONE_EINT1 */
+#define ARIZONA_DCS_DAC_DONE_EINT1_SHIFT 7 /* DCS_DAC_DONE_EINT1 */
+#define ARIZONA_DCS_DAC_DONE_EINT1_WIDTH 1 /* DCS_DAC_DONE_EINT1 */
+#define ARIZONA_DCS_HP_DONE_EINT1 0x0040 /* DCS_HP_DONE_EINT1 */
+#define ARIZONA_DCS_HP_DONE_EINT1_MASK 0x0040 /* DCS_HP_DONE_EINT1 */
+#define ARIZONA_DCS_HP_DONE_EINT1_SHIFT 6 /* DCS_HP_DONE_EINT1 */
+#define ARIZONA_DCS_HP_DONE_EINT1_WIDTH 1 /* DCS_HP_DONE_EINT1 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT1 0x0002 /* FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT1_MASK 0x0002 /* FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT1_SHIFT 1 /* FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT1_WIDTH 1 /* FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT1 0x0001 /* FLL1_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT1_MASK 0x0001 /* FLL1_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT1_SHIFT 0 /* FLL1_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT1_WIDTH 1 /* FLL1_CLOCK_OK_EINT1 */
+
+/*
+ * R3336 (0xD08) - Interrupt Status 1 Mask
+ */
+#define ARIZONA_IM_GP4_EINT1 0x0008 /* IM_GP4_EINT1 */
+#define ARIZONA_IM_GP4_EINT1_MASK 0x0008 /* IM_GP4_EINT1 */
+#define ARIZONA_IM_GP4_EINT1_SHIFT 3 /* IM_GP4_EINT1 */
+#define ARIZONA_IM_GP4_EINT1_WIDTH 1 /* IM_GP4_EINT1 */
+#define ARIZONA_IM_GP3_EINT1 0x0004 /* IM_GP3_EINT1 */
+#define ARIZONA_IM_GP3_EINT1_MASK 0x0004 /* IM_GP3_EINT1 */
+#define ARIZONA_IM_GP3_EINT1_SHIFT 2 /* IM_GP3_EINT1 */
+#define ARIZONA_IM_GP3_EINT1_WIDTH 1 /* IM_GP3_EINT1 */
+#define ARIZONA_IM_GP2_EINT1 0x0002 /* IM_GP2_EINT1 */
+#define ARIZONA_IM_GP2_EINT1_MASK 0x0002 /* IM_GP2_EINT1 */
+#define ARIZONA_IM_GP2_EINT1_SHIFT 1 /* IM_GP2_EINT1 */
+#define ARIZONA_IM_GP2_EINT1_WIDTH 1 /* IM_GP2_EINT1 */
+#define ARIZONA_IM_GP1_EINT1 0x0001 /* IM_GP1_EINT1 */
+#define ARIZONA_IM_GP1_EINT1_MASK 0x0001 /* IM_GP1_EINT1 */
+#define ARIZONA_IM_GP1_EINT1_SHIFT 0 /* IM_GP1_EINT1 */
+#define ARIZONA_IM_GP1_EINT1_WIDTH 1 /* IM_GP1_EINT1 */
+
+/*
+ * R3337 (0xD09) - Interrupt Status 2 Mask
+ */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT1 0x0100 /* IM_DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT1_MASK 0x0100 /* IM_DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT1_SHIFT 8 /* IM_DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT1_WIDTH 1 /* IM_DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_IM_DSP_IRQ2_EINT1 0x0002 /* IM_DSP_IRQ2_EINT1 */
+#define ARIZONA_IM_DSP_IRQ2_EINT1_MASK 0x0002 /* IM_DSP_IRQ2_EINT1 */
+#define ARIZONA_IM_DSP_IRQ2_EINT1_SHIFT 1 /* IM_DSP_IRQ2_EINT1 */
+#define ARIZONA_IM_DSP_IRQ2_EINT1_WIDTH 1 /* IM_DSP_IRQ2_EINT1 */
+#define ARIZONA_IM_DSP_IRQ1_EINT1 0x0001 /* IM_DSP_IRQ1_EINT1 */
+#define ARIZONA_IM_DSP_IRQ1_EINT1_MASK 0x0001 /* IM_DSP_IRQ1_EINT1 */
+#define ARIZONA_IM_DSP_IRQ1_EINT1_SHIFT 0 /* IM_DSP_IRQ1_EINT1 */
+#define ARIZONA_IM_DSP_IRQ1_EINT1_WIDTH 1 /* IM_DSP_IRQ1_EINT1 */
+
+/*
+ * R3338 (0xD0A) - Interrupt Status 3 Mask
+ */
+#define ARIZONA_IM_SPK_SHUTDOWN_WARN_EINT1 0x8000 /* IM_SPK_SHUTDOWN_WARN_EINT1 */
+#define ARIZONA_IM_SPK_SHUTDOWN_WARN_EINT1_MASK 0x8000 /* IM_SPK_SHUTDOWN_WARN_EINT1 */
+#define ARIZONA_IM_SPK_SHUTDOWN_WARN_EINT1_SHIFT 15 /* IM_SPK_SHUTDOWN_WARN_EINT1 */
+#define ARIZONA_IM_SPK_SHUTDOWN_WARN_EINT1_WIDTH 1 /* IM_SPK_SHUTDOWN_WARN_EINT1 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT1 0x4000 /* IM_SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT1_MASK 0x4000 /* IM_SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT1_SHIFT 14 /* IM_SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT1_WIDTH 1 /* IM_SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_IM_HPDET_EINT1 0x2000 /* IM_HPDET_EINT1 */
+#define ARIZONA_IM_HPDET_EINT1_MASK 0x2000 /* IM_HPDET_EINT1 */
+#define ARIZONA_IM_HPDET_EINT1_SHIFT 13 /* IM_HPDET_EINT1 */
+#define ARIZONA_IM_HPDET_EINT1_WIDTH 1 /* IM_HPDET_EINT1 */
+#define ARIZONA_IM_MICDET_EINT1 0x1000 /* IM_MICDET_EINT1 */
+#define ARIZONA_IM_MICDET_EINT1_MASK 0x1000 /* IM_MICDET_EINT1 */
+#define ARIZONA_IM_MICDET_EINT1_SHIFT 12 /* IM_MICDET_EINT1 */
+#define ARIZONA_IM_MICDET_EINT1_WIDTH 1 /* IM_MICDET_EINT1 */
+#define ARIZONA_IM_WSEQ_DONE_EINT1 0x0800 /* IM_WSEQ_DONE_EINT1 */
+#define ARIZONA_IM_WSEQ_DONE_EINT1_MASK 0x0800 /* IM_WSEQ_DONE_EINT1 */
+#define ARIZONA_IM_WSEQ_DONE_EINT1_SHIFT 11 /* IM_WSEQ_DONE_EINT1 */
+#define ARIZONA_IM_WSEQ_DONE_EINT1_WIDTH 1 /* IM_WSEQ_DONE_EINT1 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT1 0x0400 /* IM_DRC2_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT1_MASK 0x0400 /* IM_DRC2_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT1_SHIFT 10 /* IM_DRC2_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT1_WIDTH 1 /* IM_DRC2_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT1 0x0200 /* IM_DRC1_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT1_MASK 0x0200 /* IM_DRC1_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT1_SHIFT 9 /* IM_DRC1_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT1_WIDTH 1 /* IM_DRC1_SIG_DET_EINT1 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT1 0x0100 /* IM_ASRC2_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT1_MASK 0x0100 /* IM_ASRC2_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT1_SHIFT 8 /* IM_ASRC2_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT1_WIDTH 1 /* IM_ASRC2_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT1 0x0080 /* IM_ASRC1_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT1_MASK 0x0080 /* IM_ASRC1_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT1_SHIFT 7 /* IM_ASRC1_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT1_WIDTH 1 /* IM_ASRC1_LOCK_EINT1 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT1 0x0040 /* IM_UNDERCLOCKED_EINT1 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT1_MASK 0x0040 /* IM_UNDERCLOCKED_EINT1 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT1_SHIFT 6 /* IM_UNDERCLOCKED_EINT1 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT1_WIDTH 1 /* IM_UNDERCLOCKED_EINT1 */
+#define ARIZONA_IM_OVERCLOCKED_EINT1 0x0020 /* IM_OVERCLOCKED_EINT1 */
+#define ARIZONA_IM_OVERCLOCKED_EINT1_MASK 0x0020 /* IM_OVERCLOCKED_EINT1 */
+#define ARIZONA_IM_OVERCLOCKED_EINT1_SHIFT 5 /* IM_OVERCLOCKED_EINT1 */
+#define ARIZONA_IM_OVERCLOCKED_EINT1_WIDTH 1 /* IM_OVERCLOCKED_EINT1 */
+#define ARIZONA_IM_FLL2_LOCK_EINT1 0x0008 /* IM_FLL2_LOCK_EINT1 */
+#define ARIZONA_IM_FLL2_LOCK_EINT1_MASK 0x0008 /* IM_FLL2_LOCK_EINT1 */
+#define ARIZONA_IM_FLL2_LOCK_EINT1_SHIFT 3 /* IM_FLL2_LOCK_EINT1 */
+#define ARIZONA_IM_FLL2_LOCK_EINT1_WIDTH 1 /* IM_FLL2_LOCK_EINT1 */
+#define ARIZONA_IM_FLL1_LOCK_EINT1 0x0004 /* IM_FLL1_LOCK_EINT1 */
+#define ARIZONA_IM_FLL1_LOCK_EINT1_MASK 0x0004 /* IM_FLL1_LOCK_EINT1 */
+#define ARIZONA_IM_FLL1_LOCK_EINT1_SHIFT 2 /* IM_FLL1_LOCK_EINT1 */
+#define ARIZONA_IM_FLL1_LOCK_EINT1_WIDTH 1 /* IM_FLL1_LOCK_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT1 0x0002 /* IM_CLKGEN_ERR_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT1_MASK 0x0002 /* IM_CLKGEN_ERR_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT1_SHIFT 1 /* IM_CLKGEN_ERR_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT1_WIDTH 1 /* IM_CLKGEN_ERR_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT1 0x0001 /* IM_CLKGEN_ERR_ASYNC_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT1_MASK 0x0001 /* IM_CLKGEN_ERR_ASYNC_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT1_SHIFT 0 /* IM_CLKGEN_ERR_ASYNC_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT1_WIDTH 1 /* IM_CLKGEN_ERR_ASYNC_EINT1 */
+
+/*
+ * R3339 (0xD0B) - Interrupt Status 4 Mask
+ */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT1 0x8000 /* IM_ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT1_MASK 0x8000 /* IM_ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT1_SHIFT 15 /* IM_ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT1_WIDTH 1 /* IM_ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_IM_AIF3_ERR_EINT1 0x4000 /* IM_AIF3_ERR_EINT1 */
+#define ARIZONA_IM_AIF3_ERR_EINT1_MASK 0x4000 /* IM_AIF3_ERR_EINT1 */
+#define ARIZONA_IM_AIF3_ERR_EINT1_SHIFT 14 /* IM_AIF3_ERR_EINT1 */
+#define ARIZONA_IM_AIF3_ERR_EINT1_WIDTH 1 /* IM_AIF3_ERR_EINT1 */
+#define ARIZONA_IM_AIF2_ERR_EINT1 0x2000 /* IM_AIF2_ERR_EINT1 */
+#define ARIZONA_IM_AIF2_ERR_EINT1_MASK 0x2000 /* IM_AIF2_ERR_EINT1 */
+#define ARIZONA_IM_AIF2_ERR_EINT1_SHIFT 13 /* IM_AIF2_ERR_EINT1 */
+#define ARIZONA_IM_AIF2_ERR_EINT1_WIDTH 1 /* IM_AIF2_ERR_EINT1 */
+#define ARIZONA_IM_AIF1_ERR_EINT1 0x1000 /* IM_AIF1_ERR_EINT1 */
+#define ARIZONA_IM_AIF1_ERR_EINT1_MASK 0x1000 /* IM_AIF1_ERR_EINT1 */
+#define ARIZONA_IM_AIF1_ERR_EINT1_SHIFT 12 /* IM_AIF1_ERR_EINT1 */
+#define ARIZONA_IM_AIF1_ERR_EINT1_WIDTH 1 /* IM_AIF1_ERR_EINT1 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT1 0x0800 /* IM_CTRLIF_ERR_EINT1 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT1_MASK 0x0800 /* IM_CTRLIF_ERR_EINT1 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT1_SHIFT 11 /* IM_CTRLIF_ERR_EINT1 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT1_WIDTH 1 /* IM_CTRLIF_ERR_EINT1 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT1 0x0400 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT1_MASK 0x0400 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT1_SHIFT 10 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT1_WIDTH 1 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT1 0x0200 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT1_MASK 0x0200 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT1_SHIFT 9 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT1_WIDTH 1 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT1 0x0100 /* IM_SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT1_MASK 0x0100 /* IM_SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT1_SHIFT 8 /* IM_SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT1_WIDTH 1 /* IM_SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT1 0x0080 /* IM_ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT1_MASK 0x0080 /* IM_ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT1_SHIFT 7 /* IM_ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT1_WIDTH 1 /* IM_ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT1 0x0040 /* IM_ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT1_MASK 0x0040 /* IM_ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT1_SHIFT 6 /* IM_ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT1_WIDTH 1 /* IM_ISRC2_CFG_ERR_EINT1 */
+
+/*
+ * R3340 (0xD0C) - Interrupt Status 5 Mask
+ */
+#define ARIZONA_IM_BOOT_DONE_EINT1 0x0100 /* IM_BOOT_DONE_EINT1 */
+#define ARIZONA_IM_BOOT_DONE_EINT1_MASK 0x0100 /* IM_BOOT_DONE_EINT1 */
+#define ARIZONA_IM_BOOT_DONE_EINT1_SHIFT 8 /* IM_BOOT_DONE_EINT1 */
+#define ARIZONA_IM_BOOT_DONE_EINT1_WIDTH 1 /* IM_BOOT_DONE_EINT1 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT1 0x0080 /* IM_DCS_DAC_DONE_EINT1 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT1_MASK 0x0080 /* IM_DCS_DAC_DONE_EINT1 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT1_SHIFT 7 /* IM_DCS_DAC_DONE_EINT1 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT1_WIDTH 1 /* IM_DCS_DAC_DONE_EINT1 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT1 0x0040 /* IM_DCS_HP_DONE_EINT1 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT1_MASK 0x0040 /* IM_DCS_HP_DONE_EINT1 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT1_SHIFT 6 /* IM_DCS_HP_DONE_EINT1 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT1_WIDTH 1 /* IM_DCS_HP_DONE_EINT1 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT1 0x0002 /* IM_FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT1_MASK 0x0002 /* IM_FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT1_SHIFT 1 /* IM_FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT1_WIDTH 1 /* IM_FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT1 0x0001 /* IM_FLL1_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT1_MASK 0x0001 /* IM_FLL1_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT1_SHIFT 0 /* IM_FLL1_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT1_WIDTH 1 /* IM_FLL1_CLOCK_OK_EINT1 */
+
+/*
+ * R3343 (0xD0F) - Interrupt Control
+ */
+#define ARIZONA_IM_IRQ1 0x0001 /* IM_IRQ1 */
+#define ARIZONA_IM_IRQ1_MASK 0x0001 /* IM_IRQ1 */
+#define ARIZONA_IM_IRQ1_SHIFT 0 /* IM_IRQ1 */
+#define ARIZONA_IM_IRQ1_WIDTH 1 /* IM_IRQ1 */
+
+/*
+ * R3344 (0xD10) - IRQ2 Status 1
+ */
+#define ARIZONA_GP4_EINT2 0x0008 /* GP4_EINT2 */
+#define ARIZONA_GP4_EINT2_MASK 0x0008 /* GP4_EINT2 */
+#define ARIZONA_GP4_EINT2_SHIFT 3 /* GP4_EINT2 */
+#define ARIZONA_GP4_EINT2_WIDTH 1 /* GP4_EINT2 */
+#define ARIZONA_GP3_EINT2 0x0004 /* GP3_EINT2 */
+#define ARIZONA_GP3_EINT2_MASK 0x0004 /* GP3_EINT2 */
+#define ARIZONA_GP3_EINT2_SHIFT 2 /* GP3_EINT2 */
+#define ARIZONA_GP3_EINT2_WIDTH 1 /* GP3_EINT2 */
+#define ARIZONA_GP2_EINT2 0x0002 /* GP2_EINT2 */
+#define ARIZONA_GP2_EINT2_MASK 0x0002 /* GP2_EINT2 */
+#define ARIZONA_GP2_EINT2_SHIFT 1 /* GP2_EINT2 */
+#define ARIZONA_GP2_EINT2_WIDTH 1 /* GP2_EINT2 */
+#define ARIZONA_GP1_EINT2 0x0001 /* GP1_EINT2 */
+#define ARIZONA_GP1_EINT2_MASK 0x0001 /* GP1_EINT2 */
+#define ARIZONA_GP1_EINT2_SHIFT 0 /* GP1_EINT2 */
+#define ARIZONA_GP1_EINT2_WIDTH 1 /* GP1_EINT2 */
+
+/*
+ * R3345 (0xD11) - IRQ2 Status 2
+ */
+#define ARIZONA_DSP1_RAM_RDY_EINT2 0x0100 /* DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_DSP1_RAM_RDY_EINT2_MASK 0x0100 /* DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_DSP1_RAM_RDY_EINT2_SHIFT 8 /* DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_DSP1_RAM_RDY_EINT2_WIDTH 1 /* DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_DSP_IRQ2_EINT2 0x0002 /* DSP_IRQ2_EINT2 */
+#define ARIZONA_DSP_IRQ2_EINT2_MASK 0x0002 /* DSP_IRQ2_EINT2 */
+#define ARIZONA_DSP_IRQ2_EINT2_SHIFT 1 /* DSP_IRQ2_EINT2 */
+#define ARIZONA_DSP_IRQ2_EINT2_WIDTH 1 /* DSP_IRQ2_EINT2 */
+#define ARIZONA_DSP_IRQ1_EINT2 0x0001 /* DSP_IRQ1_EINT2 */
+#define ARIZONA_DSP_IRQ1_EINT2_MASK 0x0001 /* DSP_IRQ1_EINT2 */
+#define ARIZONA_DSP_IRQ1_EINT2_SHIFT 0 /* DSP_IRQ1_EINT2 */
+#define ARIZONA_DSP_IRQ1_EINT2_WIDTH 1 /* DSP_IRQ1_EINT2 */
+
+/*
+ * R3346 (0xD12) - IRQ2 Status 3
+ */
+#define ARIZONA_SPK_SHUTDOWN_WARN_EINT2 0x8000 /* SPK_SHUTDOWN_WARN_EINT2 */
+#define ARIZONA_SPK_SHUTDOWN_WARN_EINT2_MASK 0x8000 /* SPK_SHUTDOWN_WARN_EINT2 */
+#define ARIZONA_SPK_SHUTDOWN_WARN_EINT2_SHIFT 15 /* SPK_SHUTDOWN_WARN_EINT2 */
+#define ARIZONA_SPK_SHUTDOWN_WARN_EINT2_WIDTH 1 /* SPK_SHUTDOWN_WARN_EINT2 */
+#define ARIZONA_SPK_SHUTDOWN_EINT2 0x4000 /* SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_SPK_SHUTDOWN_EINT2_MASK 0x4000 /* SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_SPK_SHUTDOWN_EINT2_SHIFT 14 /* SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_SPK_SHUTDOWN_EINT2_WIDTH 1 /* SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_HPDET_EINT2 0x2000 /* HPDET_EINT2 */
+#define ARIZONA_HPDET_EINT2_MASK 0x2000 /* HPDET_EINT2 */
+#define ARIZONA_HPDET_EINT2_SHIFT 13 /* HPDET_EINT2 */
+#define ARIZONA_HPDET_EINT2_WIDTH 1 /* HPDET_EINT2 */
+#define ARIZONA_MICDET_EINT2 0x1000 /* MICDET_EINT2 */
+#define ARIZONA_MICDET_EINT2_MASK 0x1000 /* MICDET_EINT2 */
+#define ARIZONA_MICDET_EINT2_SHIFT 12 /* MICDET_EINT2 */
+#define ARIZONA_MICDET_EINT2_WIDTH 1 /* MICDET_EINT2 */
+#define ARIZONA_WSEQ_DONE_EINT2 0x0800 /* WSEQ_DONE_EINT2 */
+#define ARIZONA_WSEQ_DONE_EINT2_MASK 0x0800 /* WSEQ_DONE_EINT2 */
+#define ARIZONA_WSEQ_DONE_EINT2_SHIFT 11 /* WSEQ_DONE_EINT2 */
+#define ARIZONA_WSEQ_DONE_EINT2_WIDTH 1 /* WSEQ_DONE_EINT2 */
+#define ARIZONA_DRC2_SIG_DET_EINT2 0x0400 /* DRC2_SIG_DET_EINT2 */
+#define ARIZONA_DRC2_SIG_DET_EINT2_MASK 0x0400 /* DRC2_SIG_DET_EINT2 */
+#define ARIZONA_DRC2_SIG_DET_EINT2_SHIFT 10 /* DRC2_SIG_DET_EINT2 */
+#define ARIZONA_DRC2_SIG_DET_EINT2_WIDTH 1 /* DRC2_SIG_DET_EINT2 */
+#define ARIZONA_DRC1_SIG_DET_EINT2 0x0200 /* DRC1_SIG_DET_EINT2 */
+#define ARIZONA_DRC1_SIG_DET_EINT2_MASK 0x0200 /* DRC1_SIG_DET_EINT2 */
+#define ARIZONA_DRC1_SIG_DET_EINT2_SHIFT 9 /* DRC1_SIG_DET_EINT2 */
+#define ARIZONA_DRC1_SIG_DET_EINT2_WIDTH 1 /* DRC1_SIG_DET_EINT2 */
+#define ARIZONA_ASRC2_LOCK_EINT2 0x0100 /* ASRC2_LOCK_EINT2 */
+#define ARIZONA_ASRC2_LOCK_EINT2_MASK 0x0100 /* ASRC2_LOCK_EINT2 */
+#define ARIZONA_ASRC2_LOCK_EINT2_SHIFT 8 /* ASRC2_LOCK_EINT2 */
+#define ARIZONA_ASRC2_LOCK_EINT2_WIDTH 1 /* ASRC2_LOCK_EINT2 */
+#define ARIZONA_ASRC1_LOCK_EINT2 0x0080 /* ASRC1_LOCK_EINT2 */
+#define ARIZONA_ASRC1_LOCK_EINT2_MASK 0x0080 /* ASRC1_LOCK_EINT2 */
+#define ARIZONA_ASRC1_LOCK_EINT2_SHIFT 7 /* ASRC1_LOCK_EINT2 */
+#define ARIZONA_ASRC1_LOCK_EINT2_WIDTH 1 /* ASRC1_LOCK_EINT2 */
+#define ARIZONA_UNDERCLOCKED_EINT2 0x0040 /* UNDERCLOCKED_EINT2 */
+#define ARIZONA_UNDERCLOCKED_EINT2_MASK 0x0040 /* UNDERCLOCKED_EINT2 */
+#define ARIZONA_UNDERCLOCKED_EINT2_SHIFT 6 /* UNDERCLOCKED_EINT2 */
+#define ARIZONA_UNDERCLOCKED_EINT2_WIDTH 1 /* UNDERCLOCKED_EINT2 */
+#define ARIZONA_OVERCLOCKED_EINT2 0x0020 /* OVERCLOCKED_EINT2 */
+#define ARIZONA_OVERCLOCKED_EINT2_MASK 0x0020 /* OVERCLOCKED_EINT2 */
+#define ARIZONA_OVERCLOCKED_EINT2_SHIFT 5 /* OVERCLOCKED_EINT2 */
+#define ARIZONA_OVERCLOCKED_EINT2_WIDTH 1 /* OVERCLOCKED_EINT2 */
+#define ARIZONA_FLL2_LOCK_EINT2 0x0008 /* FLL2_LOCK_EINT2 */
+#define ARIZONA_FLL2_LOCK_EINT2_MASK 0x0008 /* FLL2_LOCK_EINT2 */
+#define ARIZONA_FLL2_LOCK_EINT2_SHIFT 3 /* FLL2_LOCK_EINT2 */
+#define ARIZONA_FLL2_LOCK_EINT2_WIDTH 1 /* FLL2_LOCK_EINT2 */
+#define ARIZONA_FLL1_LOCK_EINT2 0x0004 /* FLL1_LOCK_EINT2 */
+#define ARIZONA_FLL1_LOCK_EINT2_MASK 0x0004 /* FLL1_LOCK_EINT2 */
+#define ARIZONA_FLL1_LOCK_EINT2_SHIFT 2 /* FLL1_LOCK_EINT2 */
+#define ARIZONA_FLL1_LOCK_EINT2_WIDTH 1 /* FLL1_LOCK_EINT2 */
+#define ARIZONA_CLKGEN_ERR_EINT2 0x0002 /* CLKGEN_ERR_EINT2 */
+#define ARIZONA_CLKGEN_ERR_EINT2_MASK 0x0002 /* CLKGEN_ERR_EINT2 */
+#define ARIZONA_CLKGEN_ERR_EINT2_SHIFT 1 /* CLKGEN_ERR_EINT2 */
+#define ARIZONA_CLKGEN_ERR_EINT2_WIDTH 1 /* CLKGEN_ERR_EINT2 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT2 0x0001 /* CLKGEN_ERR_ASYNC_EINT2 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT2_MASK 0x0001 /* CLKGEN_ERR_ASYNC_EINT2 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT2_SHIFT 0 /* CLKGEN_ERR_ASYNC_EINT2 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT2_WIDTH 1 /* CLKGEN_ERR_ASYNC_EINT2 */
+
+/*
+ * R3347 (0xD13) - IRQ2 Status 4
+ */
+#define ARIZONA_ASRC_CFG_ERR_EINT2 0x8000 /* ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_ASRC_CFG_ERR_EINT2_MASK 0x8000 /* ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_ASRC_CFG_ERR_EINT2_SHIFT 15 /* ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_ASRC_CFG_ERR_EINT2_WIDTH 1 /* ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_AIF3_ERR_EINT2 0x4000 /* AIF3_ERR_EINT2 */
+#define ARIZONA_AIF3_ERR_EINT2_MASK 0x4000 /* AIF3_ERR_EINT2 */
+#define ARIZONA_AIF3_ERR_EINT2_SHIFT 14 /* AIF3_ERR_EINT2 */
+#define ARIZONA_AIF3_ERR_EINT2_WIDTH 1 /* AIF3_ERR_EINT2 */
+#define ARIZONA_AIF2_ERR_EINT2 0x2000 /* AIF2_ERR_EINT2 */
+#define ARIZONA_AIF2_ERR_EINT2_MASK 0x2000 /* AIF2_ERR_EINT2 */
+#define ARIZONA_AIF2_ERR_EINT2_SHIFT 13 /* AIF2_ERR_EINT2 */
+#define ARIZONA_AIF2_ERR_EINT2_WIDTH 1 /* AIF2_ERR_EINT2 */
+#define ARIZONA_AIF1_ERR_EINT2 0x1000 /* AIF1_ERR_EINT2 */
+#define ARIZONA_AIF1_ERR_EINT2_MASK 0x1000 /* AIF1_ERR_EINT2 */
+#define ARIZONA_AIF1_ERR_EINT2_SHIFT 12 /* AIF1_ERR_EINT2 */
+#define ARIZONA_AIF1_ERR_EINT2_WIDTH 1 /* AIF1_ERR_EINT2 */
+#define ARIZONA_CTRLIF_ERR_EINT2 0x0800 /* CTRLIF_ERR_EINT2 */
+#define ARIZONA_CTRLIF_ERR_EINT2_MASK 0x0800 /* CTRLIF_ERR_EINT2 */
+#define ARIZONA_CTRLIF_ERR_EINT2_SHIFT 11 /* CTRLIF_ERR_EINT2 */
+#define ARIZONA_CTRLIF_ERR_EINT2_WIDTH 1 /* CTRLIF_ERR_EINT2 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT2 0x0400 /* MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT2_MASK 0x0400 /* MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT2_SHIFT 10 /* MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT2_WIDTH 1 /* MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT2 0x0200 /* ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT2_MASK 0x0200 /* ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT2_SHIFT 9 /* ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT2_WIDTH 1 /* ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT2 0x0100 /* SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT2_MASK 0x0100 /* SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT2_SHIFT 8 /* SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT2_WIDTH 1 /* SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT2 0x0080 /* ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT2_MASK 0x0080 /* ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT2_SHIFT 7 /* ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT2_WIDTH 1 /* ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT2 0x0040 /* ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT2_MASK 0x0040 /* ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT2_SHIFT 6 /* ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT2_WIDTH 1 /* ISRC2_CFG_ERR_EINT2 */
+
+/*
+ * R3348 (0xD14) - IRQ2 Status 5
+ */
+#define ARIZONA_BOOT_DONE_EINT2 0x0100 /* BOOT_DONE_EINT2 */
+#define ARIZONA_BOOT_DONE_EINT2_MASK 0x0100 /* BOOT_DONE_EINT2 */
+#define ARIZONA_BOOT_DONE_EINT2_SHIFT 8 /* BOOT_DONE_EINT2 */
+#define ARIZONA_BOOT_DONE_EINT2_WIDTH 1 /* BOOT_DONE_EINT2 */
+#define ARIZONA_DCS_DAC_DONE_EINT2 0x0080 /* DCS_DAC_DONE_EINT2 */
+#define ARIZONA_DCS_DAC_DONE_EINT2_MASK 0x0080 /* DCS_DAC_DONE_EINT2 */
+#define ARIZONA_DCS_DAC_DONE_EINT2_SHIFT 7 /* DCS_DAC_DONE_EINT2 */
+#define ARIZONA_DCS_DAC_DONE_EINT2_WIDTH 1 /* DCS_DAC_DONE_EINT2 */
+#define ARIZONA_DCS_HP_DONE_EINT2 0x0040 /* DCS_HP_DONE_EINT2 */
+#define ARIZONA_DCS_HP_DONE_EINT2_MASK 0x0040 /* DCS_HP_DONE_EINT2 */
+#define ARIZONA_DCS_HP_DONE_EINT2_SHIFT 6 /* DCS_HP_DONE_EINT2 */
+#define ARIZONA_DCS_HP_DONE_EINT2_WIDTH 1 /* DCS_HP_DONE_EINT2 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT2 0x0002 /* FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT2_MASK 0x0002 /* FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT2_SHIFT 1 /* FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT2_WIDTH 1 /* FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT2 0x0001 /* FLL1_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT2_MASK 0x0001 /* FLL1_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT2_SHIFT 0 /* FLL1_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT2_WIDTH 1 /* FLL1_CLOCK_OK_EINT2 */
+
+/*
+ * R3352 (0xD18) - IRQ2 Status 1 Mask
+ */
+#define ARIZONA_IM_GP4_EINT2 0x0008 /* IM_GP4_EINT2 */
+#define ARIZONA_IM_GP4_EINT2_MASK 0x0008 /* IM_GP4_EINT2 */
+#define ARIZONA_IM_GP4_EINT2_SHIFT 3 /* IM_GP4_EINT2 */
+#define ARIZONA_IM_GP4_EINT2_WIDTH 1 /* IM_GP4_EINT2 */
+#define ARIZONA_IM_GP3_EINT2 0x0004 /* IM_GP3_EINT2 */
+#define ARIZONA_IM_GP3_EINT2_MASK 0x0004 /* IM_GP3_EINT2 */
+#define ARIZONA_IM_GP3_EINT2_SHIFT 2 /* IM_GP3_EINT2 */
+#define ARIZONA_IM_GP3_EINT2_WIDTH 1 /* IM_GP3_EINT2 */
+#define ARIZONA_IM_GP2_EINT2 0x0002 /* IM_GP2_EINT2 */
+#define ARIZONA_IM_GP2_EINT2_MASK 0x0002 /* IM_GP2_EINT2 */
+#define ARIZONA_IM_GP2_EINT2_SHIFT 1 /* IM_GP2_EINT2 */
+#define ARIZONA_IM_GP2_EINT2_WIDTH 1 /* IM_GP2_EINT2 */
+#define ARIZONA_IM_GP1_EINT2 0x0001 /* IM_GP1_EINT2 */
+#define ARIZONA_IM_GP1_EINT2_MASK 0x0001 /* IM_GP1_EINT2 */
+#define ARIZONA_IM_GP1_EINT2_SHIFT 0 /* IM_GP1_EINT2 */
+#define ARIZONA_IM_GP1_EINT2_WIDTH 1 /* IM_GP1_EINT2 */
+
+/*
+ * R3353 (0xD19) - IRQ2 Status 2 Mask
+ */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT2 0x0100 /* IM_DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT2_MASK 0x0100 /* IM_DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT2_SHIFT 8 /* IM_DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT2_WIDTH 1 /* IM_DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_IM_DSP_IRQ2_EINT2 0x0002 /* IM_DSP_IRQ2_EINT2 */
+#define ARIZONA_IM_DSP_IRQ2_EINT2_MASK 0x0002 /* IM_DSP_IRQ2_EINT2 */
+#define ARIZONA_IM_DSP_IRQ2_EINT2_SHIFT 1 /* IM_DSP_IRQ2_EINT2 */
+#define ARIZONA_IM_DSP_IRQ2_EINT2_WIDTH 1 /* IM_DSP_IRQ2_EINT2 */
+#define ARIZONA_IM_DSP_IRQ1_EINT2 0x0001 /* IM_DSP_IRQ1_EINT2 */
+#define ARIZONA_IM_DSP_IRQ1_EINT2_MASK 0x0001 /* IM_DSP_IRQ1_EINT2 */
+#define ARIZONA_IM_DSP_IRQ1_EINT2_SHIFT 0 /* IM_DSP_IRQ1_EINT2 */
+#define ARIZONA_IM_DSP_IRQ1_EINT2_WIDTH 1 /* IM_DSP_IRQ1_EINT2 */
+
+/*
+ * R3354 (0xD1A) - IRQ2 Status 3 Mask
+ */
+#define ARIZONA_IM_SPK_SHUTDOWN_WARN_EINT2 0x8000 /* IM_SPK_SHUTDOWN_WARN_EINT2 */
+#define ARIZONA_IM_SPK_SHUTDOWN_WARN_EINT2_MASK 0x8000 /* IM_SPK_SHUTDOWN_WARN_EINT2 */
+#define ARIZONA_IM_SPK_SHUTDOWN_WARN_EINT2_SHIFT 15 /* IM_SPK_SHUTDOWN_WARN_EINT2 */
+#define ARIZONA_IM_SPK_SHUTDOWN_WARN_EINT2_WIDTH 1 /* IM_SPK_SHUTDOWN_WARN_EINT2 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT2 0x4000 /* IM_SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT2_MASK 0x4000 /* IM_SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT2_SHIFT 14 /* IM_SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT2_WIDTH 1 /* IM_SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_IM_HPDET_EINT2 0x2000 /* IM_HPDET_EINT2 */
+#define ARIZONA_IM_HPDET_EINT2_MASK 0x2000 /* IM_HPDET_EINT2 */
+#define ARIZONA_IM_HPDET_EINT2_SHIFT 13 /* IM_HPDET_EINT2 */
+#define ARIZONA_IM_HPDET_EINT2_WIDTH 1 /* IM_HPDET_EINT2 */
+#define ARIZONA_IM_MICDET_EINT2 0x1000 /* IM_MICDET_EINT2 */
+#define ARIZONA_IM_MICDET_EINT2_MASK 0x1000 /* IM_MICDET_EINT2 */
+#define ARIZONA_IM_MICDET_EINT2_SHIFT 12 /* IM_MICDET_EINT2 */
+#define ARIZONA_IM_MICDET_EINT2_WIDTH 1 /* IM_MICDET_EINT2 */
+#define ARIZONA_IM_WSEQ_DONE_EINT2 0x0800 /* IM_WSEQ_DONE_EINT2 */
+#define ARIZONA_IM_WSEQ_DONE_EINT2_MASK 0x0800 /* IM_WSEQ_DONE_EINT2 */
+#define ARIZONA_IM_WSEQ_DONE_EINT2_SHIFT 11 /* IM_WSEQ_DONE_EINT2 */
+#define ARIZONA_IM_WSEQ_DONE_EINT2_WIDTH 1 /* IM_WSEQ_DONE_EINT2 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT2 0x0400 /* IM_DRC2_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT2_MASK 0x0400 /* IM_DRC2_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT2_SHIFT 10 /* IM_DRC2_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT2_WIDTH 1 /* IM_DRC2_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT2 0x0200 /* IM_DRC1_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT2_MASK 0x0200 /* IM_DRC1_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT2_SHIFT 9 /* IM_DRC1_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT2_WIDTH 1 /* IM_DRC1_SIG_DET_EINT2 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT2 0x0100 /* IM_ASRC2_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT2_MASK 0x0100 /* IM_ASRC2_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT2_SHIFT 8 /* IM_ASRC2_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT2_WIDTH 1 /* IM_ASRC2_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT2 0x0080 /* IM_ASRC1_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT2_MASK 0x0080 /* IM_ASRC1_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT2_SHIFT 7 /* IM_ASRC1_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT2_WIDTH 1 /* IM_ASRC1_LOCK_EINT2 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT2 0x0040 /* IM_UNDERCLOCKED_EINT2 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT2_MASK 0x0040 /* IM_UNDERCLOCKED_EINT2 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT2_SHIFT 6 /* IM_UNDERCLOCKED_EINT2 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT2_WIDTH 1 /* IM_UNDERCLOCKED_EINT2 */
+#define ARIZONA_IM_OVERCLOCKED_EINT2 0x0020 /* IM_OVERCLOCKED_EINT2 */
+#define ARIZONA_IM_OVERCLOCKED_EINT2_MASK 0x0020 /* IM_OVERCLOCKED_EINT2 */
+#define ARIZONA_IM_OVERCLOCKED_EINT2_SHIFT 5 /* IM_OVERCLOCKED_EINT2 */
+#define ARIZONA_IM_OVERCLOCKED_EINT2_WIDTH 1 /* IM_OVERCLOCKED_EINT2 */
+#define ARIZONA_IM_FLL2_LOCK_EINT2 0x0008 /* IM_FLL2_LOCK_EINT2 */
+#define ARIZONA_IM_FLL2_LOCK_EINT2_MASK 0x0008 /* IM_FLL2_LOCK_EINT2 */
+#define ARIZONA_IM_FLL2_LOCK_EINT2_SHIFT 3 /* IM_FLL2_LOCK_EINT2 */
+#define ARIZONA_IM_FLL2_LOCK_EINT2_WIDTH 1 /* IM_FLL2_LOCK_EINT2 */
+#define ARIZONA_IM_FLL1_LOCK_EINT2 0x0004 /* IM_FLL1_LOCK_EINT2 */
+#define ARIZONA_IM_FLL1_LOCK_EINT2_MASK 0x0004 /* IM_FLL1_LOCK_EINT2 */
+#define ARIZONA_IM_FLL1_LOCK_EINT2_SHIFT 2 /* IM_FLL1_LOCK_EINT2 */
+#define ARIZONA_IM_FLL1_LOCK_EINT2_WIDTH 1 /* IM_FLL1_LOCK_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT2 0x0002 /* IM_CLKGEN_ERR_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT2_MASK 0x0002 /* IM_CLKGEN_ERR_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT2_SHIFT 1 /* IM_CLKGEN_ERR_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT2_WIDTH 1 /* IM_CLKGEN_ERR_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT2 0x0001 /* IM_CLKGEN_ERR_ASYNC_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT2_MASK 0x0001 /* IM_CLKGEN_ERR_ASYNC_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT2_SHIFT 0 /* IM_CLKGEN_ERR_ASYNC_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT2_WIDTH 1 /* IM_CLKGEN_ERR_ASYNC_EINT2 */
+
+/*
+ * R3355 (0xD1B) - IRQ2 Status 4 Mask
+ */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT2 0x8000 /* IM_ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT2_MASK 0x8000 /* IM_ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT2_SHIFT 15 /* IM_ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT2_WIDTH 1 /* IM_ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_IM_AIF3_ERR_EINT2 0x4000 /* IM_AIF3_ERR_EINT2 */
+#define ARIZONA_IM_AIF3_ERR_EINT2_MASK 0x4000 /* IM_AIF3_ERR_EINT2 */
+#define ARIZONA_IM_AIF3_ERR_EINT2_SHIFT 14 /* IM_AIF3_ERR_EINT2 */
+#define ARIZONA_IM_AIF3_ERR_EINT2_WIDTH 1 /* IM_AIF3_ERR_EINT2 */
+#define ARIZONA_IM_AIF2_ERR_EINT2 0x2000 /* IM_AIF2_ERR_EINT2 */
+#define ARIZONA_IM_AIF2_ERR_EINT2_MASK 0x2000 /* IM_AIF2_ERR_EINT2 */
+#define ARIZONA_IM_AIF2_ERR_EINT2_SHIFT 13 /* IM_AIF2_ERR_EINT2 */
+#define ARIZONA_IM_AIF2_ERR_EINT2_WIDTH 1 /* IM_AIF2_ERR_EINT2 */
+#define ARIZONA_IM_AIF1_ERR_EINT2 0x1000 /* IM_AIF1_ERR_EINT2 */
+#define ARIZONA_IM_AIF1_ERR_EINT2_MASK 0x1000 /* IM_AIF1_ERR_EINT2 */
+#define ARIZONA_IM_AIF1_ERR_EINT2_SHIFT 12 /* IM_AIF1_ERR_EINT2 */
+#define ARIZONA_IM_AIF1_ERR_EINT2_WIDTH 1 /* IM_AIF1_ERR_EINT2 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT2 0x0800 /* IM_CTRLIF_ERR_EINT2 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT2_MASK 0x0800 /* IM_CTRLIF_ERR_EINT2 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT2_SHIFT 11 /* IM_CTRLIF_ERR_EINT2 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT2_WIDTH 1 /* IM_CTRLIF_ERR_EINT2 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT2 0x0400 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT2_MASK 0x0400 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT2_SHIFT 10 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT2_WIDTH 1 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT2 0x0200 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT2_MASK 0x0200 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT2_SHIFT 9 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT2_WIDTH 1 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT2 0x0100 /* IM_SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT2_MASK 0x0100 /* IM_SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT2_SHIFT 8 /* IM_SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT2_WIDTH 1 /* IM_SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT2 0x0080 /* IM_ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT2_MASK 0x0080 /* IM_ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT2_SHIFT 7 /* IM_ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT2_WIDTH 1 /* IM_ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT2 0x0040 /* IM_ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT2_MASK 0x0040 /* IM_ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT2_SHIFT 6 /* IM_ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT2_WIDTH 1 /* IM_ISRC2_CFG_ERR_EINT2 */
+
+/*
+ * R3356 (0xD1C) - IRQ2 Status 5 Mask
+ */
+
+#define ARIZONA_IM_BOOT_DONE_EINT2 0x0100 /* IM_BOOT_DONE_EINT2 */
+#define ARIZONA_IM_BOOT_DONE_EINT2_MASK 0x0100 /* IM_BOOT_DONE_EINT2 */
+#define ARIZONA_IM_BOOT_DONE_EINT2_SHIFT 8 /* IM_BOOT_DONE_EINT2 */
+#define ARIZONA_IM_BOOT_DONE_EINT2_WIDTH 1 /* IM_BOOT_DONE_EINT2 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT2 0x0080 /* IM_DCS_DAC_DONE_EINT2 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT2_MASK 0x0080 /* IM_DCS_DAC_DONE_EINT2 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT2_SHIFT 7 /* IM_DCS_DAC_DONE_EINT2 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT2_WIDTH 1 /* IM_DCS_DAC_DONE_EINT2 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT2 0x0040 /* IM_DCS_HP_DONE_EINT2 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT2_MASK 0x0040 /* IM_DCS_HP_DONE_EINT2 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT2_SHIFT 6 /* IM_DCS_HP_DONE_EINT2 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT2_WIDTH 1 /* IM_DCS_HP_DONE_EINT2 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT2 0x0002 /* IM_FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT2_MASK 0x0002 /* IM_FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT2_SHIFT 1 /* IM_FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT2_WIDTH 1 /* IM_FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT2 0x0001 /* IM_FLL1_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT2_MASK 0x0001 /* IM_FLL1_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT2_SHIFT 0 /* IM_FLL1_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT2_WIDTH 1 /* IM_FLL1_CLOCK_OK_EINT2 */
+
+/*
+ * R3359 (0xD1F) - IRQ2 Control
+ */
+#define ARIZONA_IM_IRQ2 0x0001 /* IM_IRQ2 */
+#define ARIZONA_IM_IRQ2_MASK 0x0001 /* IM_IRQ2 */
+#define ARIZONA_IM_IRQ2_SHIFT 0 /* IM_IRQ2 */
+#define ARIZONA_IM_IRQ2_WIDTH 1 /* IM_IRQ2 */
+
+/*
+ * R3360 (0xD20) - Interrupt Raw Status 2
+ */
+#define ARIZONA_DSP1_RAM_RDY_STS 0x0100 /* DSP1_RAM_RDY_STS */
+#define ARIZONA_DSP1_RAM_RDY_STS_MASK 0x0100 /* DSP1_RAM_RDY_STS */
+#define ARIZONA_DSP1_RAM_RDY_STS_SHIFT 8 /* DSP1_RAM_RDY_STS */
+#define ARIZONA_DSP1_RAM_RDY_STS_WIDTH 1 /* DSP1_RAM_RDY_STS */
+#define ARIZONA_DSP_IRQ2_STS 0x0002 /* DSP_IRQ2_STS */
+#define ARIZONA_DSP_IRQ2_STS_MASK 0x0002 /* DSP_IRQ2_STS */
+#define ARIZONA_DSP_IRQ2_STS_SHIFT 1 /* DSP_IRQ2_STS */
+#define ARIZONA_DSP_IRQ2_STS_WIDTH 1 /* DSP_IRQ2_STS */
+#define ARIZONA_DSP_IRQ1_STS 0x0001 /* DSP_IRQ1_STS */
+#define ARIZONA_DSP_IRQ1_STS_MASK 0x0001 /* DSP_IRQ1_STS */
+#define ARIZONA_DSP_IRQ1_STS_SHIFT 0 /* DSP_IRQ1_STS */
+#define ARIZONA_DSP_IRQ1_STS_WIDTH 1 /* DSP_IRQ1_STS */
+
+/*
+ * R3361 (0xD21) - Interrupt Raw Status 3
+ */
+#define ARIZONA_SPK_SHUTDOWN_WARN_STS 0x8000 /* SPK_SHUTDOWN_WARN_STS */
+#define ARIZONA_SPK_SHUTDOWN_WARN_STS_MASK 0x8000 /* SPK_SHUTDOWN_WARN_STS */
+#define ARIZONA_SPK_SHUTDOWN_WARN_STS_SHIFT 15 /* SPK_SHUTDOWN_WARN_STS */
+#define ARIZONA_SPK_SHUTDOWN_WARN_STS_WIDTH 1 /* SPK_SHUTDOWN_WARN_STS */
+#define ARIZONA_SPK_SHUTDOWN_STS 0x4000 /* SPK_SHUTDOWN_STS */
+#define ARIZONA_SPK_SHUTDOWN_STS_MASK 0x4000 /* SPK_SHUTDOWN_STS */
+#define ARIZONA_SPK_SHUTDOWN_STS_SHIFT 14 /* SPK_SHUTDOWN_STS */
+#define ARIZONA_SPK_SHUTDOWN_STS_WIDTH 1 /* SPK_SHUTDOWN_STS */
+#define ARIZONA_HPDET_STS 0x2000 /* HPDET_STS */
+#define ARIZONA_HPDET_STS_MASK 0x2000 /* HPDET_STS */
+#define ARIZONA_HPDET_STS_SHIFT 13 /* HPDET_STS */
+#define ARIZONA_HPDET_STS_WIDTH 1 /* HPDET_STS */
+#define ARIZONA_MICDET_STS 0x1000 /* MICDET_STS */
+#define ARIZONA_MICDET_STS_MASK 0x1000 /* MICDET_STS */
+#define ARIZONA_MICDET_STS_SHIFT 12 /* MICDET_STS */
+#define ARIZONA_MICDET_STS_WIDTH 1 /* MICDET_STS */
+#define ARIZONA_WSEQ_DONE_STS 0x0800 /* WSEQ_DONE_STS */
+#define ARIZONA_WSEQ_DONE_STS_MASK 0x0800 /* WSEQ_DONE_STS */
+#define ARIZONA_WSEQ_DONE_STS_SHIFT 11 /* WSEQ_DONE_STS */
+#define ARIZONA_WSEQ_DONE_STS_WIDTH 1 /* WSEQ_DONE_STS */
+#define ARIZONA_DRC2_SIG_DET_STS 0x0400 /* DRC2_SIG_DET_STS */
+#define ARIZONA_DRC2_SIG_DET_STS_MASK 0x0400 /* DRC2_SIG_DET_STS */
+#define ARIZONA_DRC2_SIG_DET_STS_SHIFT 10 /* DRC2_SIG_DET_STS */
+#define ARIZONA_DRC2_SIG_DET_STS_WIDTH 1 /* DRC2_SIG_DET_STS */
+#define ARIZONA_DRC1_SIG_DET_STS 0x0200 /* DRC1_SIG_DET_STS */
+#define ARIZONA_DRC1_SIG_DET_STS_MASK 0x0200 /* DRC1_SIG_DET_STS */
+#define ARIZONA_DRC1_SIG_DET_STS_SHIFT 9 /* DRC1_SIG_DET_STS */
+#define ARIZONA_DRC1_SIG_DET_STS_WIDTH 1 /* DRC1_SIG_DET_STS */
+#define ARIZONA_ASRC2_LOCK_STS 0x0100 /* ASRC2_LOCK_STS */
+#define ARIZONA_ASRC2_LOCK_STS_MASK 0x0100 /* ASRC2_LOCK_STS */
+#define ARIZONA_ASRC2_LOCK_STS_SHIFT 8 /* ASRC2_LOCK_STS */
+#define ARIZONA_ASRC2_LOCK_STS_WIDTH 1 /* ASRC2_LOCK_STS */
+#define ARIZONA_ASRC1_LOCK_STS 0x0080 /* ASRC1_LOCK_STS */
+#define ARIZONA_ASRC1_LOCK_STS_MASK 0x0080 /* ASRC1_LOCK_STS */
+#define ARIZONA_ASRC1_LOCK_STS_SHIFT 7 /* ASRC1_LOCK_STS */
+#define ARIZONA_ASRC1_LOCK_STS_WIDTH 1 /* ASRC1_LOCK_STS */
+#define ARIZONA_UNDERCLOCKED_STS 0x0040 /* UNDERCLOCKED_STS */
+#define ARIZONA_UNDERCLOCKED_STS_MASK 0x0040 /* UNDERCLOCKED_STS */
+#define ARIZONA_UNDERCLOCKED_STS_SHIFT 6 /* UNDERCLOCKED_STS */
+#define ARIZONA_UNDERCLOCKED_STS_WIDTH 1 /* UNDERCLOCKED_STS */
+#define ARIZONA_OVERCLOCKED_STS 0x0020 /* OVERCLOCKED_STS */
+#define ARIZONA_OVERCLOCKED_STS_MASK 0x0020 /* OVERCLOCKED_STS */
+#define ARIZONA_OVERCLOCKED_STS_SHIFT 5 /* OVERCLOCKED_STS */
+#define ARIZONA_OVERCLOCKED_STS_WIDTH 1 /* OVERCLOCKED_STS */
+#define ARIZONA_FLL2_LOCK_STS 0x0008 /* FLL2_LOCK_STS */
+#define ARIZONA_FLL2_LOCK_STS_MASK 0x0008 /* FLL2_LOCK_STS */
+#define ARIZONA_FLL2_LOCK_STS_SHIFT 3 /* FLL2_LOCK_STS */
+#define ARIZONA_FLL2_LOCK_STS_WIDTH 1 /* FLL2_LOCK_STS */
+#define ARIZONA_FLL1_LOCK_STS 0x0004 /* FLL1_LOCK_STS */
+#define ARIZONA_FLL1_LOCK_STS_MASK 0x0004 /* FLL1_LOCK_STS */
+#define ARIZONA_FLL1_LOCK_STS_SHIFT 2 /* FLL1_LOCK_STS */
+#define ARIZONA_FLL1_LOCK_STS_WIDTH 1 /* FLL1_LOCK_STS */
+#define ARIZONA_CLKGEN_ERR_STS 0x0002 /* CLKGEN_ERR_STS */
+#define ARIZONA_CLKGEN_ERR_STS_MASK 0x0002 /* CLKGEN_ERR_STS */
+#define ARIZONA_CLKGEN_ERR_STS_SHIFT 1 /* CLKGEN_ERR_STS */
+#define ARIZONA_CLKGEN_ERR_STS_WIDTH 1 /* CLKGEN_ERR_STS */
+#define ARIZONA_CLKGEN_ERR_ASYNC_STS 0x0001 /* CLKGEN_ERR_ASYNC_STS */
+#define ARIZONA_CLKGEN_ERR_ASYNC_STS_MASK 0x0001 /* CLKGEN_ERR_ASYNC_STS */
+#define ARIZONA_CLKGEN_ERR_ASYNC_STS_SHIFT 0 /* CLKGEN_ERR_ASYNC_STS */
+#define ARIZONA_CLKGEN_ERR_ASYNC_STS_WIDTH 1 /* CLKGEN_ERR_ASYNC_STS */
+
+/*
+ * R3362 (0xD22) - Interrupt Raw Status 4
+ */
+#define ARIZONA_ASRC_CFG_ERR_STS 0x8000 /* ASRC_CFG_ERR_STS */
+#define ARIZONA_ASRC_CFG_ERR_STS_MASK 0x8000 /* ASRC_CFG_ERR_STS */
+#define ARIZONA_ASRC_CFG_ERR_STS_SHIFT 15 /* ASRC_CFG_ERR_STS */
+#define ARIZONA_ASRC_CFG_ERR_STS_WIDTH 1 /* ASRC_CFG_ERR_STS */
+#define ARIZONA_AIF3_ERR_STS 0x4000 /* AIF3_ERR_STS */
+#define ARIZONA_AIF3_ERR_STS_MASK 0x4000 /* AIF3_ERR_STS */
+#define ARIZONA_AIF3_ERR_STS_SHIFT 14 /* AIF3_ERR_STS */
+#define ARIZONA_AIF3_ERR_STS_WIDTH 1 /* AIF3_ERR_STS */
+#define ARIZONA_AIF2_ERR_STS 0x2000 /* AIF2_ERR_STS */
+#define ARIZONA_AIF2_ERR_STS_MASK 0x2000 /* AIF2_ERR_STS */
+#define ARIZONA_AIF2_ERR_STS_SHIFT 13 /* AIF2_ERR_STS */
+#define ARIZONA_AIF2_ERR_STS_WIDTH 1 /* AIF2_ERR_STS */
+#define ARIZONA_AIF1_ERR_STS 0x1000 /* AIF1_ERR_STS */
+#define ARIZONA_AIF1_ERR_STS_MASK 0x1000 /* AIF1_ERR_STS */
+#define ARIZONA_AIF1_ERR_STS_SHIFT 12 /* AIF1_ERR_STS */
+#define ARIZONA_AIF1_ERR_STS_WIDTH 1 /* AIF1_ERR_STS */
+#define ARIZONA_CTRLIF_ERR_STS 0x0800 /* CTRLIF_ERR_STS */
+#define ARIZONA_CTRLIF_ERR_STS_MASK 0x0800 /* CTRLIF_ERR_STS */
+#define ARIZONA_CTRLIF_ERR_STS_SHIFT 11 /* CTRLIF_ERR_STS */
+#define ARIZONA_CTRLIF_ERR_STS_WIDTH 1 /* CTRLIF_ERR_STS */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_STS 0x0400 /* MIXER_DROPPED_SAMPLE_STS */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_STS_MASK 0x0400 /* MIXER_DROPPED_SAMPLE_STS */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_STS_SHIFT 10 /* MIXER_DROPPED_SAMPLE_STS */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_STS_WIDTH 1 /* MIXER_DROPPED_SAMPLE_STS */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_STS 0x0200 /* ASYNC_CLK_ENA_LOW_STS */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_STS_MASK 0x0200 /* ASYNC_CLK_ENA_LOW_STS */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_STS_SHIFT 9 /* ASYNC_CLK_ENA_LOW_STS */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_STS_WIDTH 1 /* ASYNC_CLK_ENA_LOW_STS */
+#define ARIZONA_SYSCLK_ENA_LOW_STS 0x0100 /* SYSCLK_ENA_LOW_STS */
+#define ARIZONA_SYSCLK_ENA_LOW_STS_MASK 0x0100 /* SYSCLK_ENA_LOW_STS */
+#define ARIZONA_SYSCLK_ENA_LOW_STS_SHIFT 8 /* SYSCLK_ENA_LOW_STS */
+#define ARIZONA_SYSCLK_ENA_LOW_STS_WIDTH 1 /* SYSCLK_ENA_LOW_STS */
+#define ARIZONA_ISRC1_CFG_ERR_STS 0x0080 /* ISRC1_CFG_ERR_STS */
+#define ARIZONA_ISRC1_CFG_ERR_STS_MASK 0x0080 /* ISRC1_CFG_ERR_STS */
+#define ARIZONA_ISRC1_CFG_ERR_STS_SHIFT 7 /* ISRC1_CFG_ERR_STS */
+#define ARIZONA_ISRC1_CFG_ERR_STS_WIDTH 1 /* ISRC1_CFG_ERR_STS */
+#define ARIZONA_ISRC2_CFG_ERR_STS 0x0040 /* ISRC2_CFG_ERR_STS */
+#define ARIZONA_ISRC2_CFG_ERR_STS_MASK 0x0040 /* ISRC2_CFG_ERR_STS */
+#define ARIZONA_ISRC2_CFG_ERR_STS_SHIFT 6 /* ISRC2_CFG_ERR_STS */
+#define ARIZONA_ISRC2_CFG_ERR_STS_WIDTH 1 /* ISRC2_CFG_ERR_STS */
+
+/*
+ * R3363 (0xD23) - Interrupt Raw Status 5
+ */
+#define ARIZONA_BOOT_DONE_STS 0x0100 /* BOOT_DONE_STS */
+#define ARIZONA_BOOT_DONE_STS_MASK 0x0100 /* BOOT_DONE_STS */
+#define ARIZONA_BOOT_DONE_STS_SHIFT 8 /* BOOT_DONE_STS */
+#define ARIZONA_BOOT_DONE_STS_WIDTH 1 /* BOOT_DONE_STS */
+#define ARIZONA_DCS_DAC_DONE_STS 0x0080 /* DCS_DAC_DONE_STS */
+#define ARIZONA_DCS_DAC_DONE_STS_MASK 0x0080 /* DCS_DAC_DONE_STS */
+#define ARIZONA_DCS_DAC_DONE_STS_SHIFT 7 /* DCS_DAC_DONE_STS */
+#define ARIZONA_DCS_DAC_DONE_STS_WIDTH 1 /* DCS_DAC_DONE_STS */
+#define ARIZONA_DCS_HP_DONE_STS 0x0040 /* DCS_HP_DONE_STS */
+#define ARIZONA_DCS_HP_DONE_STS_MASK 0x0040 /* DCS_HP_DONE_STS */
+#define ARIZONA_DCS_HP_DONE_STS_SHIFT 6 /* DCS_HP_DONE_STS */
+#define ARIZONA_DCS_HP_DONE_STS_WIDTH 1 /* DCS_HP_DONE_STS */
+#define ARIZONA_FLL2_CLOCK_OK_STS 0x0002 /* FLL2_CLOCK_OK_STS */
+#define ARIZONA_FLL2_CLOCK_OK_STS_MASK 0x0002 /* FLL2_CLOCK_OK_STS */
+#define ARIZONA_FLL2_CLOCK_OK_STS_SHIFT 1 /* FLL2_CLOCK_OK_STS */
+#define ARIZONA_FLL2_CLOCK_OK_STS_WIDTH 1 /* FLL2_CLOCK_OK_STS */
+#define ARIZONA_FLL1_CLOCK_OK_STS 0x0001 /* FLL1_CLOCK_OK_STS */
+#define ARIZONA_FLL1_CLOCK_OK_STS_MASK 0x0001 /* FLL1_CLOCK_OK_STS */
+#define ARIZONA_FLL1_CLOCK_OK_STS_SHIFT 0 /* FLL1_CLOCK_OK_STS */
+#define ARIZONA_FLL1_CLOCK_OK_STS_WIDTH 1 /* FLL1_CLOCK_OK_STS */
+
+/*
+ * R3364 (0xD24) - Interrupt Raw Status 6
+ */
+#define ARIZONA_PWM_OVERCLOCKED_STS 0x2000 /* PWM_OVERCLOCKED_STS */
+#define ARIZONA_PWM_OVERCLOCKED_STS_MASK 0x2000 /* PWM_OVERCLOCKED_STS */
+#define ARIZONA_PWM_OVERCLOCKED_STS_SHIFT 13 /* PWM_OVERCLOCKED_STS */
+#define ARIZONA_PWM_OVERCLOCKED_STS_WIDTH 1 /* PWM_OVERCLOCKED_STS */
+#define ARIZONA_FX_CORE_OVERCLOCKED_STS 0x1000 /* FX_CORE_OVERCLOCKED_STS */
+#define ARIZONA_FX_CORE_OVERCLOCKED_STS_MASK 0x1000 /* FX_CORE_OVERCLOCKED_STS */
+#define ARIZONA_FX_CORE_OVERCLOCKED_STS_SHIFT 12 /* FX_CORE_OVERCLOCKED_STS */
+#define ARIZONA_FX_CORE_OVERCLOCKED_STS_WIDTH 1 /* FX_CORE_OVERCLOCKED_STS */
+#define ARIZONA_DAC_SYS_OVERCLOCKED_STS 0x0400 /* DAC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_DAC_SYS_OVERCLOCKED_STS_MASK 0x0400 /* DAC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_DAC_SYS_OVERCLOCKED_STS_SHIFT 10 /* DAC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_DAC_SYS_OVERCLOCKED_STS_WIDTH 1 /* DAC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_DAC_WARP_OVERCLOCKED_STS 0x0200 /* DAC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_DAC_WARP_OVERCLOCKED_STS_MASK 0x0200 /* DAC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_DAC_WARP_OVERCLOCKED_STS_SHIFT 9 /* DAC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_DAC_WARP_OVERCLOCKED_STS_WIDTH 1 /* DAC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ADC_OVERCLOCKED_STS 0x0100 /* ADC_OVERCLOCKED_STS */
+#define ARIZONA_ADC_OVERCLOCKED_STS_MASK 0x0100 /* ADC_OVERCLOCKED_STS */
+#define ARIZONA_ADC_OVERCLOCKED_STS_SHIFT 8 /* ADC_OVERCLOCKED_STS */
+#define ARIZONA_ADC_OVERCLOCKED_STS_WIDTH 1 /* ADC_OVERCLOCKED_STS */
+#define ARIZONA_MIXER_OVERCLOCKED_STS 0x0080 /* MIXER_OVERCLOCKED_STS */
+#define ARIZONA_MIXER_OVERCLOCKED_STS_MASK 0x0080 /* MIXER_OVERCLOCKED_STS */
+#define ARIZONA_MIXER_OVERCLOCKED_STS_SHIFT 7 /* MIXER_OVERCLOCKED_STS */
+#define ARIZONA_MIXER_OVERCLOCKED_STS_WIDTH 1 /* MIXER_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_ASYNC_OVERCLOCKED_STS 0x0040 /* AIF3_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_ASYNC_OVERCLOCKED_STS_MASK 0x0040 /* AIF3_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_ASYNC_OVERCLOCKED_STS_SHIFT 6 /* AIF3_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_ASYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF3_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_ASYNC_OVERCLOCKED_STS 0x0020 /* AIF2_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_ASYNC_OVERCLOCKED_STS_MASK 0x0020 /* AIF2_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_ASYNC_OVERCLOCKED_STS_SHIFT 5 /* AIF2_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_ASYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF2_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_ASYNC_OVERCLOCKED_STS 0x0010 /* AIF1_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_ASYNC_OVERCLOCKED_STS_MASK 0x0010 /* AIF1_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_ASYNC_OVERCLOCKED_STS_SHIFT 4 /* AIF1_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_ASYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF1_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_SYNC_OVERCLOCKED_STS 0x0008 /* AIF3_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_SYNC_OVERCLOCKED_STS_MASK 0x0008 /* AIF3_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_SYNC_OVERCLOCKED_STS_SHIFT 3 /* AIF3_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_SYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF3_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_SYNC_OVERCLOCKED_STS 0x0004 /* AIF2_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_SYNC_OVERCLOCKED_STS_MASK 0x0004 /* AIF2_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_SYNC_OVERCLOCKED_STS_SHIFT 2 /* AIF2_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_SYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF2_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_SYNC_OVERCLOCKED_STS 0x0002 /* AIF1_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_SYNC_OVERCLOCKED_STS_MASK 0x0002 /* AIF1_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_SYNC_OVERCLOCKED_STS_SHIFT 1 /* AIF1_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_SYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF1_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_PAD_CTRL_OVERCLOCKED_STS 0x0001 /* PAD_CTRL_OVERCLOCKED_STS */
+#define ARIZONA_PAD_CTRL_OVERCLOCKED_STS_MASK 0x0001 /* PAD_CTRL_OVERCLOCKED_STS */
+#define ARIZONA_PAD_CTRL_OVERCLOCKED_STS_SHIFT 0 /* PAD_CTRL_OVERCLOCKED_STS */
+#define ARIZONA_PAD_CTRL_OVERCLOCKED_STS_WIDTH 1 /* PAD_CTRL_OVERCLOCKED_STS */
+
+/*
+ * R3365 (0xD25) - Interrupt Raw Status 7
+ */
+#define ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS 0x8000 /* SLIMBUS_SUBSYS_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS_MASK 0x8000 /* SLIMBUS_SUBSYS_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS_SHIFT 15 /* SLIMBUS_SUBSYS_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS_WIDTH 1 /* SLIMBUS_SUBSYS_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS 0x4000 /* SLIMBUS_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS_MASK 0x4000 /* SLIMBUS_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS_SHIFT 14 /* SLIMBUS_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS_WIDTH 1 /* SLIMBUS_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS 0x2000 /* SLIMBUS_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS_MASK 0x2000 /* SLIMBUS_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS_SHIFT 13 /* SLIMBUS_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS_WIDTH 1 /* SLIMBUS_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS 0x1000 /* ASRC_ASYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS_MASK 0x1000 /* ASRC_ASYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS_SHIFT 12 /* ASRC_ASYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS_WIDTH 1 /* ASRC_ASYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS 0x0800 /* ASRC_ASYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS_MASK 0x0800 /* ASRC_ASYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS_SHIFT 11 /* ASRC_ASYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS_WIDTH 1 /* ASRC_ASYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS 0x0400 /* ASRC_SYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS_MASK 0x0400 /* ASRC_SYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS_SHIFT 10 /* ASRC_SYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS_WIDTH 1 /* ASRC_SYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS 0x0200 /* ASRC_SYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS_MASK 0x0200 /* ASRC_SYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS_SHIFT 9 /* ASRC_SYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS_WIDTH 1 /* ASRC_SYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ADSP2_1_OVERCLOCKED_STS 0x0008 /* ADSP2_1_OVERCLOCKED_STS */
+#define ARIZONA_ADSP2_1_OVERCLOCKED_STS_MASK 0x0008 /* ADSP2_1_OVERCLOCKED_STS */
+#define ARIZONA_ADSP2_1_OVERCLOCKED_STS_SHIFT 3 /* ADSP2_1_OVERCLOCKED_STS */
+#define ARIZONA_ADSP2_1_OVERCLOCKED_STS_WIDTH 1 /* ADSP2_1_OVERCLOCKED_STS */
+#define ARIZONA_ISRC2_OVERCLOCKED_STS 0x0002 /* ISRC2_OVERCLOCKED_STS */
+#define ARIZONA_ISRC2_OVERCLOCKED_STS_MASK 0x0002 /* ISRC2_OVERCLOCKED_STS */
+#define ARIZONA_ISRC2_OVERCLOCKED_STS_SHIFT 1 /* ISRC2_OVERCLOCKED_STS */
+#define ARIZONA_ISRC2_OVERCLOCKED_STS_WIDTH 1 /* ISRC2_OVERCLOCKED_STS */
+#define ARIZONA_ISRC1_OVERCLOCKED_STS 0x0001 /* ISRC1_OVERCLOCKED_STS */
+#define ARIZONA_ISRC1_OVERCLOCKED_STS_MASK 0x0001 /* ISRC1_OVERCLOCKED_STS */
+#define ARIZONA_ISRC1_OVERCLOCKED_STS_SHIFT 0 /* ISRC1_OVERCLOCKED_STS */
+#define ARIZONA_ISRC1_OVERCLOCKED_STS_WIDTH 1 /* ISRC1_OVERCLOCKED_STS */
+
+/*
+ * R3366 (0xD26) - Interrupt Raw Status 8
+ */
+#define ARIZONA_AIF3_UNDERCLOCKED_STS 0x0400 /* AIF3_UNDERCLOCKED_STS */
+#define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK 0x0400 /* AIF3_UNDERCLOCKED_STS */
+#define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT 10 /* AIF3_UNDERCLOCKED_STS */
+#define ARIZONA_AIF3_UNDERCLOCKED_STS_WIDTH 1 /* AIF3_UNDERCLOCKED_STS */
+#define ARIZONA_AIF2_UNDERCLOCKED_STS 0x0200 /* AIF2_UNDERCLOCKED_STS */
+#define ARIZONA_AIF2_UNDERCLOCKED_STS_MASK 0x0200 /* AIF2_UNDERCLOCKED_STS */
+#define ARIZONA_AIF2_UNDERCLOCKED_STS_SHIFT 9 /* AIF2_UNDERCLOCKED_STS */
+#define ARIZONA_AIF2_UNDERCLOCKED_STS_WIDTH 1 /* AIF2_UNDERCLOCKED_STS */
+#define ARIZONA_AIF1_UNDERCLOCKED_STS 0x0100 /* AIF1_UNDERCLOCKED_STS */
+#define ARIZONA_AIF1_UNDERCLOCKED_STS_MASK 0x0100 /* AIF1_UNDERCLOCKED_STS */
+#define ARIZONA_AIF1_UNDERCLOCKED_STS_SHIFT 8 /* AIF1_UNDERCLOCKED_STS */
+#define ARIZONA_AIF1_UNDERCLOCKED_STS_WIDTH 1 /* AIF1_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC2_UNDERCLOCKED_STS 0x0040 /* ISRC2_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC2_UNDERCLOCKED_STS_MASK 0x0040 /* ISRC2_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC2_UNDERCLOCKED_STS_SHIFT 6 /* ISRC2_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC2_UNDERCLOCKED_STS_WIDTH 1 /* ISRC2_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC1_UNDERCLOCKED_STS 0x0020 /* ISRC1_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC1_UNDERCLOCKED_STS_MASK 0x0020 /* ISRC1_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC1_UNDERCLOCKED_STS_SHIFT 5 /* ISRC1_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC1_UNDERCLOCKED_STS_WIDTH 1 /* ISRC1_UNDERCLOCKED_STS */
+#define ARIZONA_FX_UNDERCLOCKED_STS 0x0010 /* FX_UNDERCLOCKED_STS */
+#define ARIZONA_FX_UNDERCLOCKED_STS_MASK 0x0010 /* FX_UNDERCLOCKED_STS */
+#define ARIZONA_FX_UNDERCLOCKED_STS_SHIFT 4 /* FX_UNDERCLOCKED_STS */
+#define ARIZONA_FX_UNDERCLOCKED_STS_WIDTH 1 /* FX_UNDERCLOCKED_STS */
+#define ARIZONA_ASRC_UNDERCLOCKED_STS 0x0008 /* ASRC_UNDERCLOCKED_STS */
+#define ARIZONA_ASRC_UNDERCLOCKED_STS_MASK 0x0008 /* ASRC_UNDERCLOCKED_STS */
+#define ARIZONA_ASRC_UNDERCLOCKED_STS_SHIFT 3 /* ASRC_UNDERCLOCKED_STS */
+#define ARIZONA_ASRC_UNDERCLOCKED_STS_WIDTH 1 /* ASRC_UNDERCLOCKED_STS */
+#define ARIZONA_DAC_UNDERCLOCKED_STS 0x0004 /* DAC_UNDERCLOCKED_STS */
+#define ARIZONA_DAC_UNDERCLOCKED_STS_MASK 0x0004 /* DAC_UNDERCLOCKED_STS */
+#define ARIZONA_DAC_UNDERCLOCKED_STS_SHIFT 2 /* DAC_UNDERCLOCKED_STS */
+#define ARIZONA_DAC_UNDERCLOCKED_STS_WIDTH 1 /* DAC_UNDERCLOCKED_STS */
+#define ARIZONA_ADC_UNDERCLOCKED_STS 0x0002 /* ADC_UNDERCLOCKED_STS */
+#define ARIZONA_ADC_UNDERCLOCKED_STS_MASK 0x0002 /* ADC_UNDERCLOCKED_STS */
+#define ARIZONA_ADC_UNDERCLOCKED_STS_SHIFT 1 /* ADC_UNDERCLOCKED_STS */
+#define ARIZONA_ADC_UNDERCLOCKED_STS_WIDTH 1 /* ADC_UNDERCLOCKED_STS */
+#define ARIZONA_MIXER_UNDERCLOCKED_STS 0x0001 /* MIXER_UNDERCLOCKED_STS */
+#define ARIZONA_MIXER_UNDERCLOCKED_STS_MASK 0x0001 /* MIXER_UNDERCLOCKED_STS */
+#define ARIZONA_MIXER_UNDERCLOCKED_STS_SHIFT 0 /* MIXER_UNDERCLOCKED_STS */
+#define ARIZONA_MIXER_UNDERCLOCKED_STS_WIDTH 1 /* MIXER_UNDERCLOCKED_STS */
+
+/*
+ * R3392 (0xD40) - IRQ Pin Status
+ */
+#define ARIZONA_IRQ2_STS 0x0002 /* IRQ2_STS */
+#define ARIZONA_IRQ2_STS_MASK 0x0002 /* IRQ2_STS */
+#define ARIZONA_IRQ2_STS_SHIFT 1 /* IRQ2_STS */
+#define ARIZONA_IRQ2_STS_WIDTH 1 /* IRQ2_STS */
+#define ARIZONA_IRQ1_STS 0x0001 /* IRQ1_STS */
+#define ARIZONA_IRQ1_STS_MASK 0x0001 /* IRQ1_STS */
+#define ARIZONA_IRQ1_STS_SHIFT 0 /* IRQ1_STS */
+#define ARIZONA_IRQ1_STS_WIDTH 1 /* IRQ1_STS */
+
+/*
+ * R3393 (0xD41) - ADSP2 IRQ0
+ */
+#define ARIZONA_DSP_IRQ2 0x0002 /* DSP_IRQ2 */
+#define ARIZONA_DSP_IRQ2_MASK 0x0002 /* DSP_IRQ2 */
+#define ARIZONA_DSP_IRQ2_SHIFT 1 /* DSP_IRQ2 */
+#define ARIZONA_DSP_IRQ2_WIDTH 1 /* DSP_IRQ2 */
+#define ARIZONA_DSP_IRQ1 0x0001 /* DSP_IRQ1 */
+#define ARIZONA_DSP_IRQ1_MASK 0x0001 /* DSP_IRQ1 */
+#define ARIZONA_DSP_IRQ1_SHIFT 0 /* DSP_IRQ1 */
+#define ARIZONA_DSP_IRQ1_WIDTH 1 /* DSP_IRQ1 */
+
+/*
+ * R3408 (0xD50) - AOD wkup and trig
+ */
+#define ARIZONA_GP5_FALL_TRIG_STS 0x0020 /* GP5_FALL_TRIG_STS */
+#define ARIZONA_GP5_FALL_TRIG_STS_MASK 0x0020 /* GP5_FALL_TRIG_STS */
+#define ARIZONA_GP5_FALL_TRIG_STS_SHIFT 5 /* GP5_FALL_TRIG_STS */
+#define ARIZONA_GP5_FALL_TRIG_STS_WIDTH 1 /* GP5_FALL_TRIG_STS */
+#define ARIZONA_GP5_RISE_TRIG_STS 0x0010 /* GP5_RISE_TRIG_STS */
+#define ARIZONA_GP5_RISE_TRIG_STS_MASK 0x0010 /* GP5_RISE_TRIG_STS */
+#define ARIZONA_GP5_RISE_TRIG_STS_SHIFT 4 /* GP5_RISE_TRIG_STS */
+#define ARIZONA_GP5_RISE_TRIG_STS_WIDTH 1 /* GP5_RISE_TRIG_STS */
+#define ARIZONA_JD1_FALL_TRIG_STS 0x0008 /* JD1_FALL_TRIG_STS */
+#define ARIZONA_JD1_FALL_TRIG_STS_MASK 0x0008 /* JD1_FALL_TRIG_STS */
+#define ARIZONA_JD1_FALL_TRIG_STS_SHIFT 3 /* JD1_FALL_TRIG_STS */
+#define ARIZONA_JD1_FALL_TRIG_STS_WIDTH 1 /* JD1_FALL_TRIG_STS */
+#define ARIZONA_JD1_RISE_TRIG_STS 0x0004 /* JD1_RISE_TRIG_STS */
+#define ARIZONA_JD1_RISE_TRIG_STS_MASK 0x0004 /* JD1_RISE_TRIG_STS */
+#define ARIZONA_JD1_RISE_TRIG_STS_SHIFT 2 /* JD1_RISE_TRIG_STS */
+#define ARIZONA_JD1_RISE_TRIG_STS_WIDTH 1 /* JD1_RISE_TRIG_STS */
+#define ARIZONA_JD2_FALL_TRIG_STS 0x0002 /* JD2_FALL_TRIG_STS */
+#define ARIZONA_JD2_FALL_TRIG_STS_MASK 0x0002 /* JD2_FALL_TRIG_STS */
+#define ARIZONA_JD2_FALL_TRIG_STS_SHIFT 1 /* JD2_FALL_TRIG_STS */
+#define ARIZONA_JD2_FALL_TRIG_STS_WIDTH 1 /* JD2_FALL_TRIG_STS */
+#define ARIZONA_JD2_RISE_TRIG_STS 0x0001 /* JD2_RISE_TRIG_STS */
+#define ARIZONA_JD2_RISE_TRIG_STS_MASK 0x0001 /* JD2_RISE_TRIG_STS */
+#define ARIZONA_JD2_RISE_TRIG_STS_SHIFT 0 /* JD2_RISE_TRIG_STS */
+#define ARIZONA_JD2_RISE_TRIG_STS_WIDTH 1 /* JD2_RISE_TRIG_STS */
+
+/*
+ * R3409 (0xD51) - AOD IRQ1
+ */
+#define ARIZONA_GP5_FALL_EINT1 0x0020 /* GP5_FALL_EINT1 */
+#define ARIZONA_GP5_FALL_EINT1_MASK 0x0020 /* GP5_FALL_EINT1 */
+#define ARIZONA_GP5_FALL_EINT1_SHIFT 5 /* GP5_FALL_EINT1 */
+#define ARIZONA_GP5_FALL_EINT1_WIDTH 1 /* GP5_FALL_EINT1 */
+#define ARIZONA_GP5_RISE_EINT1 0x0010 /* GP5_RISE_EINT1 */
+#define ARIZONA_GP5_RISE_EINT1_MASK 0x0010 /* GP5_RISE_EINT1 */
+#define ARIZONA_GP5_RISE_EINT1_SHIFT 4 /* GP5_RISE_EINT1 */
+#define ARIZONA_GP5_RISE_EINT1_WIDTH 1 /* GP5_RISE_EINT1 */
+#define ARIZONA_JD1_FALL_EINT1 0x0008 /* JD1_FALL_EINT1 */
+#define ARIZONA_JD1_FALL_EINT1_MASK 0x0008 /* JD1_FALL_EINT1 */
+#define ARIZONA_JD1_FALL_EINT1_SHIFT 3 /* JD1_FALL_EINT1 */
+#define ARIZONA_JD1_FALL_EINT1_WIDTH 1 /* JD1_FALL_EINT1 */
+#define ARIZONA_JD1_RISE_EINT1 0x0004 /* JD1_RISE_EINT1 */
+#define ARIZONA_JD1_RISE_EINT1_MASK 0x0004 /* JD1_RISE_EINT1 */
+#define ARIZONA_JD1_RISE_EINT1_SHIFT 2 /* JD1_RISE_EINT1 */
+#define ARIZONA_JD1_RISE_EINT1_WIDTH 1 /* JD1_RISE_EINT1 */
+#define ARIZONA_JD2_FALL_EINT1 0x0002 /* JD2_FALL_EINT1 */
+#define ARIZONA_JD2_FALL_EINT1_MASK 0x0002 /* JD2_FALL_EINT1 */
+#define ARIZONA_JD2_FALL_EINT1_SHIFT 1 /* JD2_FALL_EINT1 */
+#define ARIZONA_JD2_FALL_EINT1_WIDTH 1 /* JD2_FALL_EINT1 */
+#define ARIZONA_JD2_RISE_EINT1 0x0001 /* JD2_RISE_EINT1 */
+#define ARIZONA_JD2_RISE_EINT1_MASK 0x0001 /* JD2_RISE_EINT1 */
+#define ARIZONA_JD2_RISE_EINT1_SHIFT 0 /* JD2_RISE_EINT1 */
+#define ARIZONA_JD2_RISE_EINT1_WIDTH 1 /* JD2_RISE_EINT1 */
+
+/*
+ * R3410 (0xD52) - AOD IRQ2
+ */
+#define ARIZONA_GP5_FALL_EINT2 0x0020 /* GP5_FALL_EINT2 */
+#define ARIZONA_GP5_FALL_EINT2_MASK 0x0020 /* GP5_FALL_EINT2 */
+#define ARIZONA_GP5_FALL_EINT2_SHIFT 5 /* GP5_FALL_EINT2 */
+#define ARIZONA_GP5_FALL_EINT2_WIDTH 1 /* GP5_FALL_EINT2 */
+#define ARIZONA_GP5_RISE_EINT2 0x0010 /* GP5_RISE_EINT2 */
+#define ARIZONA_GP5_RISE_EINT2_MASK 0x0010 /* GP5_RISE_EINT2 */
+#define ARIZONA_GP5_RISE_EINT2_SHIFT 4 /* GP5_RISE_EINT2 */
+#define ARIZONA_GP5_RISE_EINT2_WIDTH 1 /* GP5_RISE_EINT2 */
+#define ARIZONA_JD1_FALL_EINT2 0x0008 /* JD1_FALL_EINT2 */
+#define ARIZONA_JD1_FALL_EINT2_MASK 0x0008 /* JD1_FALL_EINT2 */
+#define ARIZONA_JD1_FALL_EINT2_SHIFT 3 /* JD1_FALL_EINT2 */
+#define ARIZONA_JD1_FALL_EINT2_WIDTH 1 /* JD1_FALL_EINT2 */
+#define ARIZONA_JD1_RISE_EINT2 0x0004 /* JD1_RISE_EINT2 */
+#define ARIZONA_JD1_RISE_EINT2_MASK 0x0004 /* JD1_RISE_EINT2 */
+#define ARIZONA_JD1_RISE_EINT2_SHIFT 2 /* JD1_RISE_EINT2 */
+#define ARIZONA_JD1_RISE_EINT2_WIDTH 1 /* JD1_RISE_EINT2 */
+#define ARIZONA_JD2_FALL_EINT2 0x0002 /* JD2_FALL_EINT2 */
+#define ARIZONA_JD2_FALL_EINT2_MASK 0x0002 /* JD2_FALL_EINT2 */
+#define ARIZONA_JD2_FALL_EINT2_SHIFT 1 /* JD2_FALL_EINT2 */
+#define ARIZONA_JD2_FALL_EINT2_WIDTH 1 /* JD2_FALL_EINT2 */
+#define ARIZONA_JD2_RISE_EINT2 0x0001 /* JD2_RISE_EINT2 */
+#define ARIZONA_JD2_RISE_EINT2_MASK 0x0001 /* JD2_RISE_EINT2 */
+#define ARIZONA_JD2_RISE_EINT2_SHIFT 0 /* JD2_RISE_EINT2 */
+#define ARIZONA_JD2_RISE_EINT2_WIDTH 1 /* JD2_RISE_EINT2 */
+
+/*
+ * R3411 (0xD53) - AOD IRQ Mask IRQ1
+ */
+#define ARIZONA_IM_GP5_FALL_EINT1 0x0020 /* IM_GP5_FALL_EINT1 */
+#define ARIZONA_IM_GP5_FALL_EINT1_MASK 0x0020 /* IM_GP5_FALL_EINT1 */
+#define ARIZONA_IM_GP5_FALL_EINT1_SHIFT 5 /* IM_GP5_FALL_EINT1 */
+#define ARIZONA_IM_GP5_FALL_EINT1_WIDTH 1 /* IM_GP5_FALL_EINT1 */
+#define ARIZONA_IM_GP5_RISE_EINT1 0x0010 /* IM_GP5_RISE_EINT1 */
+#define ARIZONA_IM_GP5_RISE_EINT1_MASK 0x0010 /* IM_GP5_RISE_EINT1 */
+#define ARIZONA_IM_GP5_RISE_EINT1_SHIFT 4 /* IM_GP5_RISE_EINT1 */
+#define ARIZONA_IM_GP5_RISE_EINT1_WIDTH 1 /* IM_GP5_RISE_EINT1 */
+#define ARIZONA_IM_JD1_FALL_EINT1 0x0008 /* IM_JD1_FALL_EINT1 */
+#define ARIZONA_IM_JD1_FALL_EINT1_MASK 0x0008 /* IM_JD1_FALL_EINT1 */
+#define ARIZONA_IM_JD1_FALL_EINT1_SHIFT 3 /* IM_JD1_FALL_EINT1 */
+#define ARIZONA_IM_JD1_FALL_EINT1_WIDTH 1 /* IM_JD1_FALL_EINT1 */
+#define ARIZONA_IM_JD1_RISE_EINT1 0x0004 /* IM_JD1_RISE_EINT1 */
+#define ARIZONA_IM_JD1_RISE_EINT1_MASK 0x0004 /* IM_JD1_RISE_EINT1 */
+#define ARIZONA_IM_JD1_RISE_EINT1_SHIFT 2 /* IM_JD1_RISE_EINT1 */
+#define ARIZONA_IM_JD1_RISE_EINT1_WIDTH 1 /* IM_JD1_RISE_EINT1 */
+#define ARIZONA_IM_JD2_FALL_EINT1 0x0002 /* IM_JD2_FALL_EINT1 */
+#define ARIZONA_IM_JD2_FALL_EINT1_MASK 0x0002 /* IM_JD2_FALL_EINT1 */
+#define ARIZONA_IM_JD2_FALL_EINT1_SHIFT 1 /* IM_JD2_FALL_EINT1 */
+#define ARIZONA_IM_JD2_FALL_EINT1_WIDTH 1 /* IM_JD2_FALL_EINT1 */
+#define ARIZONA_IM_JD2_RISE_EINT1 0x0001 /* IM_JD2_RISE_EINT1 */
+#define ARIZONA_IM_JD2_RISE_EINT1_MASK 0x0001 /* IM_JD2_RISE_EINT1 */
+#define ARIZONA_IM_JD2_RISE_EINT1_SHIFT 0 /* IM_JD2_RISE_EINT1 */
+#define ARIZONA_IM_JD2_RISE_EINT1_WIDTH 1 /* IM_JD2_RISE_EINT1 */
+
+/*
+ * R3412 (0xD54) - AOD IRQ Mask IRQ2
+ */
+#define ARIZONA_IM_GP5_FALL_EINT2 0x0020 /* IM_GP5_FALL_EINT2 */
+#define ARIZONA_IM_GP5_FALL_EINT2_MASK 0x0020 /* IM_GP5_FALL_EINT2 */
+#define ARIZONA_IM_GP5_FALL_EINT2_SHIFT 5 /* IM_GP5_FALL_EINT2 */
+#define ARIZONA_IM_GP5_FALL_EINT2_WIDTH 1 /* IM_GP5_FALL_EINT2 */
+#define ARIZONA_IM_GP5_RISE_EINT2 0x0010 /* IM_GP5_RISE_EINT2 */
+#define ARIZONA_IM_GP5_RISE_EINT2_MASK 0x0010 /* IM_GP5_RISE_EINT2 */
+#define ARIZONA_IM_GP5_RISE_EINT2_SHIFT 4 /* IM_GP5_RISE_EINT2 */
+#define ARIZONA_IM_GP5_RISE_EINT2_WIDTH 1 /* IM_GP5_RISE_EINT2 */
+#define ARIZONA_IM_JD1_FALL_EINT2 0x0008 /* IM_JD1_FALL_EINT2 */
+#define ARIZONA_IM_JD1_FALL_EINT2_MASK 0x0008 /* IM_JD1_FALL_EINT2 */
+#define ARIZONA_IM_JD1_FALL_EINT2_SHIFT 3 /* IM_JD1_FALL_EINT2 */
+#define ARIZONA_IM_JD1_FALL_EINT2_WIDTH 1 /* IM_JD1_FALL_EINT2 */
+#define ARIZONA_IM_JD1_RISE_EINT2 0x0004 /* IM_JD1_RISE_EINT2 */
+#define ARIZONA_IM_JD1_RISE_EINT2_MASK 0x0004 /* IM_JD1_RISE_EINT2 */
+#define ARIZONA_IM_JD1_RISE_EINT2_SHIFT 2 /* IM_JD1_RISE_EINT2 */
+#define ARIZONA_IM_JD1_RISE_EINT2_WIDTH 1 /* IM_JD1_RISE_EINT2 */
+#define ARIZONA_IM_JD2_FALL_EINT2 0x0002 /* IM_JD2_FALL_EINT2 */
+#define ARIZONA_IM_JD2_FALL_EINT2_MASK 0x0002 /* IM_JD2_FALL_EINT2 */
+#define ARIZONA_IM_JD2_FALL_EINT2_SHIFT 1 /* IM_JD2_FALL_EINT2 */
+#define ARIZONA_IM_JD2_FALL_EINT2_WIDTH 1 /* IM_JD2_FALL_EINT2 */
+#define ARIZONA_IM_JD2_RISE_EINT2 0x0001 /* IM_JD2_RISE_EINT2 */
+#define ARIZONA_IM_JD2_RISE_EINT2_MASK 0x0001 /* IM_JD2_RISE_EINT2 */
+#define ARIZONA_IM_JD2_RISE_EINT2_SHIFT 0 /* IM_JD2_RISE_EINT2 */
+#define ARIZONA_IM_JD2_RISE_EINT2_WIDTH 1 /* IM_JD2_RISE_EINT2 */
+
+/*
+ * R3413 (0xD55) - AOD IRQ Raw Status
+ */
+#define ARIZONA_GP5_STS 0x0004 /* GP5_STS */
+#define ARIZONA_GP5_STS_MASK 0x0004 /* GP5_STS */
+#define ARIZONA_GP5_STS_SHIFT 2 /* GP5_STS */
+#define ARIZONA_GP5_STS_WIDTH 1 /* GP5_STS */
+#define ARIZONA_JD2_STS 0x0002 /* JD2_STS */
+#define ARIZONA_JD2_STS_MASK 0x0002 /* JD2_STS */
+#define ARIZONA_JD2_STS_SHIFT 1 /* JD2_STS */
+#define ARIZONA_JD2_STS_WIDTH 1 /* JD2_STS */
+#define ARIZONA_JD1_STS 0x0001 /* JD1_STS */
+#define ARIZONA_JD1_STS_MASK 0x0001 /* JD1_STS */
+#define ARIZONA_JD1_STS_SHIFT 0 /* JD1_STS */
+#define ARIZONA_JD1_STS_WIDTH 1 /* JD1_STS */
+
+/*
+ * R3414 (0xD56) - Jack detect debounce
+ */
+#define ARIZONA_JD2_DB 0x0002 /* JD2_DB */
+#define ARIZONA_JD2_DB_MASK 0x0002 /* JD2_DB */
+#define ARIZONA_JD2_DB_SHIFT 1 /* JD2_DB */
+#define ARIZONA_JD2_DB_WIDTH 1 /* JD2_DB */
+#define ARIZONA_JD1_DB 0x0001 /* JD1_DB */
+#define ARIZONA_JD1_DB_MASK 0x0001 /* JD1_DB */
+#define ARIZONA_JD1_DB_SHIFT 0 /* JD1_DB */
+#define ARIZONA_JD1_DB_WIDTH 1 /* JD1_DB */
+
+/*
+ * R3584 (0xE00) - FX_Ctrl1
+ */
+#define ARIZONA_FX_RATE_MASK 0x7800 /* FX_RATE - [14:11] */
+#define ARIZONA_FX_RATE_SHIFT 11 /* FX_RATE - [14:11] */
+#define ARIZONA_FX_RATE_WIDTH 4 /* FX_RATE - [14:11] */
+
+/*
+ * R3585 (0xE01) - FX_Ctrl2
+ */
+#define ARIZONA_FX_STS_MASK 0xFFF0 /* FX_STS - [15:4] */
+#define ARIZONA_FX_STS_SHIFT 4 /* FX_STS - [15:4] */
+#define ARIZONA_FX_STS_WIDTH 12 /* FX_STS - [15:4] */
+
+/*
+ * R3600 (0xE10) - EQ1_1
+ */
+#define ARIZONA_EQ1_B1_GAIN_MASK 0xF800 /* EQ1_B1_GAIN - [15:11] */
+#define ARIZONA_EQ1_B1_GAIN_SHIFT 11 /* EQ1_B1_GAIN - [15:11] */
+#define ARIZONA_EQ1_B1_GAIN_WIDTH 5 /* EQ1_B1_GAIN - [15:11] */
+#define ARIZONA_EQ1_B2_GAIN_MASK 0x07C0 /* EQ1_B2_GAIN - [10:6] */
+#define ARIZONA_EQ1_B2_GAIN_SHIFT 6 /* EQ1_B2_GAIN - [10:6] */
+#define ARIZONA_EQ1_B2_GAIN_WIDTH 5 /* EQ1_B2_GAIN - [10:6] */
+#define ARIZONA_EQ1_B3_GAIN_MASK 0x003E /* EQ1_B3_GAIN - [5:1] */
+#define ARIZONA_EQ1_B3_GAIN_SHIFT 1 /* EQ1_B3_GAIN - [5:1] */
+#define ARIZONA_EQ1_B3_GAIN_WIDTH 5 /* EQ1_B3_GAIN - [5:1] */
+#define ARIZONA_EQ1_ENA 0x0001 /* EQ1_ENA */
+#define ARIZONA_EQ1_ENA_MASK 0x0001 /* EQ1_ENA */
+#define ARIZONA_EQ1_ENA_SHIFT 0 /* EQ1_ENA */
+#define ARIZONA_EQ1_ENA_WIDTH 1 /* EQ1_ENA */
+
+/*
+ * R3601 (0xE11) - EQ1_2
+ */
+#define ARIZONA_EQ1_B4_GAIN_MASK 0xF800 /* EQ1_B4_GAIN - [15:11] */
+#define ARIZONA_EQ1_B4_GAIN_SHIFT 11 /* EQ1_B4_GAIN - [15:11] */
+#define ARIZONA_EQ1_B4_GAIN_WIDTH 5 /* EQ1_B4_GAIN - [15:11] */
+#define ARIZONA_EQ1_B5_GAIN_MASK 0x07C0 /* EQ1_B5_GAIN - [10:6] */
+#define ARIZONA_EQ1_B5_GAIN_SHIFT 6 /* EQ1_B5_GAIN - [10:6] */
+#define ARIZONA_EQ1_B5_GAIN_WIDTH 5 /* EQ1_B5_GAIN - [10:6] */
+#define ARIZONA_EQ1_B1_MODE 0x0001 /* EQ1_B1_MODE */
+#define ARIZONA_EQ1_B1_MODE_MASK 0x0001 /* EQ1_B1_MODE */
+#define ARIZONA_EQ1_B1_MODE_SHIFT 0 /* EQ1_B1_MODE */
+#define ARIZONA_EQ1_B1_MODE_WIDTH 1 /* EQ1_B1_MODE */
+
+/*
+ * R3602 (0xE12) - EQ1_3
+ */
+#define ARIZONA_EQ1_B1_A_MASK 0xFFFF /* EQ1_B1_A - [15:0] */
+#define ARIZONA_EQ1_B1_A_SHIFT 0 /* EQ1_B1_A - [15:0] */
+#define ARIZONA_EQ1_B1_A_WIDTH 16 /* EQ1_B1_A - [15:0] */
+
+/*
+ * R3603 (0xE13) - EQ1_4
+ */
+#define ARIZONA_EQ1_B1_B_MASK 0xFFFF /* EQ1_B1_B - [15:0] */
+#define ARIZONA_EQ1_B1_B_SHIFT 0 /* EQ1_B1_B - [15:0] */
+#define ARIZONA_EQ1_B1_B_WIDTH 16 /* EQ1_B1_B - [15:0] */
+
+/*
+ * R3604 (0xE14) - EQ1_5
+ */
+#define ARIZONA_EQ1_B1_PG_MASK 0xFFFF /* EQ1_B1_PG - [15:0] */
+#define ARIZONA_EQ1_B1_PG_SHIFT 0 /* EQ1_B1_PG - [15:0] */
+#define ARIZONA_EQ1_B1_PG_WIDTH 16 /* EQ1_B1_PG - [15:0] */
+
+/*
+ * R3605 (0xE15) - EQ1_6
+ */
+#define ARIZONA_EQ1_B2_A_MASK 0xFFFF /* EQ1_B2_A - [15:0] */
+#define ARIZONA_EQ1_B2_A_SHIFT 0 /* EQ1_B2_A - [15:0] */
+#define ARIZONA_EQ1_B2_A_WIDTH 16 /* EQ1_B2_A - [15:0] */
+
+/*
+ * R3606 (0xE16) - EQ1_7
+ */
+#define ARIZONA_EQ1_B2_B_MASK 0xFFFF /* EQ1_B2_B - [15:0] */
+#define ARIZONA_EQ1_B2_B_SHIFT 0 /* EQ1_B2_B - [15:0] */
+#define ARIZONA_EQ1_B2_B_WIDTH 16 /* EQ1_B2_B - [15:0] */
+
+/*
+ * R3607 (0xE17) - EQ1_8
+ */
+#define ARIZONA_EQ1_B2_C_MASK 0xFFFF /* EQ1_B2_C - [15:0] */
+#define ARIZONA_EQ1_B2_C_SHIFT 0 /* EQ1_B2_C - [15:0] */
+#define ARIZONA_EQ1_B2_C_WIDTH 16 /* EQ1_B2_C - [15:0] */
+
+/*
+ * R3608 (0xE18) - EQ1_9
+ */
+#define ARIZONA_EQ1_B2_PG_MASK 0xFFFF /* EQ1_B2_PG - [15:0] */
+#define ARIZONA_EQ1_B2_PG_SHIFT 0 /* EQ1_B2_PG - [15:0] */
+#define ARIZONA_EQ1_B2_PG_WIDTH 16 /* EQ1_B2_PG - [15:0] */
+
+/*
+ * R3609 (0xE19) - EQ1_10
+ */
+#define ARIZONA_EQ1_B3_A_MASK 0xFFFF /* EQ1_B3_A - [15:0] */
+#define ARIZONA_EQ1_B3_A_SHIFT 0 /* EQ1_B3_A - [15:0] */
+#define ARIZONA_EQ1_B3_A_WIDTH 16 /* EQ1_B3_A - [15:0] */
+
+/*
+ * R3610 (0xE1A) - EQ1_11
+ */
+#define ARIZONA_EQ1_B3_B_MASK 0xFFFF /* EQ1_B3_B - [15:0] */
+#define ARIZONA_EQ1_B3_B_SHIFT 0 /* EQ1_B3_B - [15:0] */
+#define ARIZONA_EQ1_B3_B_WIDTH 16 /* EQ1_B3_B - [15:0] */
+
+/*
+ * R3611 (0xE1B) - EQ1_12
+ */
+#define ARIZONA_EQ1_B3_C_MASK 0xFFFF /* EQ1_B3_C - [15:0] */
+#define ARIZONA_EQ1_B3_C_SHIFT 0 /* EQ1_B3_C - [15:0] */
+#define ARIZONA_EQ1_B3_C_WIDTH 16 /* EQ1_B3_C - [15:0] */
+
+/*
+ * R3612 (0xE1C) - EQ1_13
+ */
+#define ARIZONA_EQ1_B3_PG_MASK 0xFFFF /* EQ1_B3_PG - [15:0] */
+#define ARIZONA_EQ1_B3_PG_SHIFT 0 /* EQ1_B3_PG - [15:0] */
+#define ARIZONA_EQ1_B3_PG_WIDTH 16 /* EQ1_B3_PG - [15:0] */
+
+/*
+ * R3613 (0xE1D) - EQ1_14
+ */
+#define ARIZONA_EQ1_B4_A_MASK 0xFFFF /* EQ1_B4_A - [15:0] */
+#define ARIZONA_EQ1_B4_A_SHIFT 0 /* EQ1_B4_A - [15:0] */
+#define ARIZONA_EQ1_B4_A_WIDTH 16 /* EQ1_B4_A - [15:0] */
+
+/*
+ * R3614 (0xE1E) - EQ1_15
+ */
+#define ARIZONA_EQ1_B4_B_MASK 0xFFFF /* EQ1_B4_B - [15:0] */
+#define ARIZONA_EQ1_B4_B_SHIFT 0 /* EQ1_B4_B - [15:0] */
+#define ARIZONA_EQ1_B4_B_WIDTH 16 /* EQ1_B4_B - [15:0] */
+
+/*
+ * R3615 (0xE1F) - EQ1_16
+ */
+#define ARIZONA_EQ1_B4_C_MASK 0xFFFF /* EQ1_B4_C - [15:0] */
+#define ARIZONA_EQ1_B4_C_SHIFT 0 /* EQ1_B4_C - [15:0] */
+#define ARIZONA_EQ1_B4_C_WIDTH 16 /* EQ1_B4_C - [15:0] */
+
+/*
+ * R3616 (0xE20) - EQ1_17
+ */
+#define ARIZONA_EQ1_B4_PG_MASK 0xFFFF /* EQ1_B4_PG - [15:0] */
+#define ARIZONA_EQ1_B4_PG_SHIFT 0 /* EQ1_B4_PG - [15:0] */
+#define ARIZONA_EQ1_B4_PG_WIDTH 16 /* EQ1_B4_PG - [15:0] */
+
+/*
+ * R3617 (0xE21) - EQ1_18
+ */
+#define ARIZONA_EQ1_B5_A_MASK 0xFFFF /* EQ1_B5_A - [15:0] */
+#define ARIZONA_EQ1_B5_A_SHIFT 0 /* EQ1_B5_A - [15:0] */
+#define ARIZONA_EQ1_B5_A_WIDTH 16 /* EQ1_B5_A - [15:0] */
+
+/*
+ * R3618 (0xE22) - EQ1_19
+ */
+#define ARIZONA_EQ1_B5_B_MASK 0xFFFF /* EQ1_B5_B - [15:0] */
+#define ARIZONA_EQ1_B5_B_SHIFT 0 /* EQ1_B5_B - [15:0] */
+#define ARIZONA_EQ1_B5_B_WIDTH 16 /* EQ1_B5_B - [15:0] */
+
+/*
+ * R3619 (0xE23) - EQ1_20
+ */
+#define ARIZONA_EQ1_B5_PG_MASK 0xFFFF /* EQ1_B5_PG - [15:0] */
+#define ARIZONA_EQ1_B5_PG_SHIFT 0 /* EQ1_B5_PG - [15:0] */
+#define ARIZONA_EQ1_B5_PG_WIDTH 16 /* EQ1_B5_PG - [15:0] */
+
+/*
+ * R3620 (0xE24) - EQ1_21
+ */
+#define ARIZONA_EQ1_B1_C_MASK 0xFFFF /* EQ1_B1_C - [15:0] */
+#define ARIZONA_EQ1_B1_C_SHIFT 0 /* EQ1_B1_C - [15:0] */
+#define ARIZONA_EQ1_B1_C_WIDTH 16 /* EQ1_B1_C - [15:0] */
+
+/*
+ * R3622 (0xE26) - EQ2_1
+ */
+#define ARIZONA_EQ2_B1_GAIN_MASK 0xF800 /* EQ2_B1_GAIN - [15:11] */
+#define ARIZONA_EQ2_B1_GAIN_SHIFT 11 /* EQ2_B1_GAIN - [15:11] */
+#define ARIZONA_EQ2_B1_GAIN_WIDTH 5 /* EQ2_B1_GAIN - [15:11] */
+#define ARIZONA_EQ2_B2_GAIN_MASK 0x07C0 /* EQ2_B2_GAIN - [10:6] */
+#define ARIZONA_EQ2_B2_GAIN_SHIFT 6 /* EQ2_B2_GAIN - [10:6] */
+#define ARIZONA_EQ2_B2_GAIN_WIDTH 5 /* EQ2_B2_GAIN - [10:6] */
+#define ARIZONA_EQ2_B3_GAIN_MASK 0x003E /* EQ2_B3_GAIN - [5:1] */
+#define ARIZONA_EQ2_B3_GAIN_SHIFT 1 /* EQ2_B3_GAIN - [5:1] */
+#define ARIZONA_EQ2_B3_GAIN_WIDTH 5 /* EQ2_B3_GAIN - [5:1] */
+#define ARIZONA_EQ2_ENA 0x0001 /* EQ2_ENA */
+#define ARIZONA_EQ2_ENA_MASK 0x0001 /* EQ2_ENA */
+#define ARIZONA_EQ2_ENA_SHIFT 0 /* EQ2_ENA */
+#define ARIZONA_EQ2_ENA_WIDTH 1 /* EQ2_ENA */
+
+/*
+ * R3623 (0xE27) - EQ2_2
+ */
+#define ARIZONA_EQ2_B4_GAIN_MASK 0xF800 /* EQ2_B4_GAIN - [15:11] */
+#define ARIZONA_EQ2_B4_GAIN_SHIFT 11 /* EQ2_B4_GAIN - [15:11] */
+#define ARIZONA_EQ2_B4_GAIN_WIDTH 5 /* EQ2_B4_GAIN - [15:11] */
+#define ARIZONA_EQ2_B5_GAIN_MASK 0x07C0 /* EQ2_B5_GAIN - [10:6] */
+#define ARIZONA_EQ2_B5_GAIN_SHIFT 6 /* EQ2_B5_GAIN - [10:6] */
+#define ARIZONA_EQ2_B5_GAIN_WIDTH 5 /* EQ2_B5_GAIN - [10:6] */
+#define ARIZONA_EQ2_B1_MODE 0x0001 /* EQ2_B1_MODE */
+#define ARIZONA_EQ2_B1_MODE_MASK 0x0001 /* EQ2_B1_MODE */
+#define ARIZONA_EQ2_B1_MODE_SHIFT 0 /* EQ2_B1_MODE */
+#define ARIZONA_EQ2_B1_MODE_WIDTH 1 /* EQ2_B1_MODE */
+
+/*
+ * R3624 (0xE28) - EQ2_3
+ */
+#define ARIZONA_EQ2_B1_A_MASK 0xFFFF /* EQ2_B1_A - [15:0] */
+#define ARIZONA_EQ2_B1_A_SHIFT 0 /* EQ2_B1_A - [15:0] */
+#define ARIZONA_EQ2_B1_A_WIDTH 16 /* EQ2_B1_A - [15:0] */
+
+/*
+ * R3625 (0xE29) - EQ2_4
+ */
+#define ARIZONA_EQ2_B1_B_MASK 0xFFFF /* EQ2_B1_B - [15:0] */
+#define ARIZONA_EQ2_B1_B_SHIFT 0 /* EQ2_B1_B - [15:0] */
+#define ARIZONA_EQ2_B1_B_WIDTH 16 /* EQ2_B1_B - [15:0] */
+
+/*
+ * R3626 (0xE2A) - EQ2_5
+ */
+#define ARIZONA_EQ2_B1_PG_MASK 0xFFFF /* EQ2_B1_PG - [15:0] */
+#define ARIZONA_EQ2_B1_PG_SHIFT 0 /* EQ2_B1_PG - [15:0] */
+#define ARIZONA_EQ2_B1_PG_WIDTH 16 /* EQ2_B1_PG - [15:0] */
+
+/*
+ * R3627 (0xE2B) - EQ2_6
+ */
+#define ARIZONA_EQ2_B2_A_MASK 0xFFFF /* EQ2_B2_A - [15:0] */
+#define ARIZONA_EQ2_B2_A_SHIFT 0 /* EQ2_B2_A - [15:0] */
+#define ARIZONA_EQ2_B2_A_WIDTH 16 /* EQ2_B2_A - [15:0] */
+
+/*
+ * R3628 (0xE2C) - EQ2_7
+ */
+#define ARIZONA_EQ2_B2_B_MASK 0xFFFF /* EQ2_B2_B - [15:0] */
+#define ARIZONA_EQ2_B2_B_SHIFT 0 /* EQ2_B2_B - [15:0] */
+#define ARIZONA_EQ2_B2_B_WIDTH 16 /* EQ2_B2_B - [15:0] */
+
+/*
+ * R3629 (0xE2D) - EQ2_8
+ */
+#define ARIZONA_EQ2_B2_C_MASK 0xFFFF /* EQ2_B2_C - [15:0] */
+#define ARIZONA_EQ2_B2_C_SHIFT 0 /* EQ2_B2_C - [15:0] */
+#define ARIZONA_EQ2_B2_C_WIDTH 16 /* EQ2_B2_C - [15:0] */
+
+/*
+ * R3630 (0xE2E) - EQ2_9
+ */
+#define ARIZONA_EQ2_B2_PG_MASK 0xFFFF /* EQ2_B2_PG - [15:0] */
+#define ARIZONA_EQ2_B2_PG_SHIFT 0 /* EQ2_B2_PG - [15:0] */
+#define ARIZONA_EQ2_B2_PG_WIDTH 16 /* EQ2_B2_PG - [15:0] */
+
+/*
+ * R3631 (0xE2F) - EQ2_10
+ */
+#define ARIZONA_EQ2_B3_A_MASK 0xFFFF /* EQ2_B3_A - [15:0] */
+#define ARIZONA_EQ2_B3_A_SHIFT 0 /* EQ2_B3_A - [15:0] */
+#define ARIZONA_EQ2_B3_A_WIDTH 16 /* EQ2_B3_A - [15:0] */
+
+/*
+ * R3632 (0xE30) - EQ2_11
+ */
+#define ARIZONA_EQ2_B3_B_MASK 0xFFFF /* EQ2_B3_B - [15:0] */
+#define ARIZONA_EQ2_B3_B_SHIFT 0 /* EQ2_B3_B - [15:0] */
+#define ARIZONA_EQ2_B3_B_WIDTH 16 /* EQ2_B3_B - [15:0] */
+
+/*
+ * R3633 (0xE31) - EQ2_12
+ */
+#define ARIZONA_EQ2_B3_C_MASK 0xFFFF /* EQ2_B3_C - [15:0] */
+#define ARIZONA_EQ2_B3_C_SHIFT 0 /* EQ2_B3_C - [15:0] */
+#define ARIZONA_EQ2_B3_C_WIDTH 16 /* EQ2_B3_C - [15:0] */
+
+/*
+ * R3634 (0xE32) - EQ2_13
+ */
+#define ARIZONA_EQ2_B3_PG_MASK 0xFFFF /* EQ2_B3_PG - [15:0] */
+#define ARIZONA_EQ2_B3_PG_SHIFT 0 /* EQ2_B3_PG - [15:0] */
+#define ARIZONA_EQ2_B3_PG_WIDTH 16 /* EQ2_B3_PG - [15:0] */
+
+/*
+ * R3635 (0xE33) - EQ2_14
+ */
+#define ARIZONA_EQ2_B4_A_MASK 0xFFFF /* EQ2_B4_A - [15:0] */
+#define ARIZONA_EQ2_B4_A_SHIFT 0 /* EQ2_B4_A - [15:0] */
+#define ARIZONA_EQ2_B4_A_WIDTH 16 /* EQ2_B4_A - [15:0] */
+
+/*
+ * R3636 (0xE34) - EQ2_15
+ */
+#define ARIZONA_EQ2_B4_B_MASK 0xFFFF /* EQ2_B4_B - [15:0] */
+#define ARIZONA_EQ2_B4_B_SHIFT 0 /* EQ2_B4_B - [15:0] */
+#define ARIZONA_EQ2_B4_B_WIDTH 16 /* EQ2_B4_B - [15:0] */
+
+/*
+ * R3637 (0xE35) - EQ2_16
+ */
+#define ARIZONA_EQ2_B4_C_MASK 0xFFFF /* EQ2_B4_C - [15:0] */
+#define ARIZONA_EQ2_B4_C_SHIFT 0 /* EQ2_B4_C - [15:0] */
+#define ARIZONA_EQ2_B4_C_WIDTH 16 /* EQ2_B4_C - [15:0] */
+
+/*
+ * R3638 (0xE36) - EQ2_17
+ */
+#define ARIZONA_EQ2_B4_PG_MASK 0xFFFF /* EQ2_B4_PG - [15:0] */
+#define ARIZONA_EQ2_B4_PG_SHIFT 0 /* EQ2_B4_PG - [15:0] */
+#define ARIZONA_EQ2_B4_PG_WIDTH 16 /* EQ2_B4_PG - [15:0] */
+
+/*
+ * R3639 (0xE37) - EQ2_18
+ */
+#define ARIZONA_EQ2_B5_A_MASK 0xFFFF /* EQ2_B5_A - [15:0] */
+#define ARIZONA_EQ2_B5_A_SHIFT 0 /* EQ2_B5_A - [15:0] */
+#define ARIZONA_EQ2_B5_A_WIDTH 16 /* EQ2_B5_A - [15:0] */
+
+/*
+ * R3640 (0xE38) - EQ2_19
+ */
+#define ARIZONA_EQ2_B5_B_MASK 0xFFFF /* EQ2_B5_B - [15:0] */
+#define ARIZONA_EQ2_B5_B_SHIFT 0 /* EQ2_B5_B - [15:0] */
+#define ARIZONA_EQ2_B5_B_WIDTH 16 /* EQ2_B5_B - [15:0] */
+
+/*
+ * R3641 (0xE39) - EQ2_20
+ */
+#define ARIZONA_EQ2_B5_PG_MASK 0xFFFF /* EQ2_B5_PG - [15:0] */
+#define ARIZONA_EQ2_B5_PG_SHIFT 0 /* EQ2_B5_PG - [15:0] */
+#define ARIZONA_EQ2_B5_PG_WIDTH 16 /* EQ2_B5_PG - [15:0] */
+
+/*
+ * R3642 (0xE3A) - EQ2_21
+ */
+#define ARIZONA_EQ2_B1_C_MASK 0xFFFF /* EQ2_B1_C - [15:0] */
+#define ARIZONA_EQ2_B1_C_SHIFT 0 /* EQ2_B1_C - [15:0] */
+#define ARIZONA_EQ2_B1_C_WIDTH 16 /* EQ2_B1_C - [15:0] */
+
+/*
+ * R3644 (0xE3C) - EQ3_1
+ */
+#define ARIZONA_EQ3_B1_GAIN_MASK 0xF800 /* EQ3_B1_GAIN - [15:11] */
+#define ARIZONA_EQ3_B1_GAIN_SHIFT 11 /* EQ3_B1_GAIN - [15:11] */
+#define ARIZONA_EQ3_B1_GAIN_WIDTH 5 /* EQ3_B1_GAIN - [15:11] */
+#define ARIZONA_EQ3_B2_GAIN_MASK 0x07C0 /* EQ3_B2_GAIN - [10:6] */
+#define ARIZONA_EQ3_B2_GAIN_SHIFT 6 /* EQ3_B2_GAIN - [10:6] */
+#define ARIZONA_EQ3_B2_GAIN_WIDTH 5 /* EQ3_B2_GAIN - [10:6] */
+#define ARIZONA_EQ3_B3_GAIN_MASK 0x003E /* EQ3_B3_GAIN - [5:1] */
+#define ARIZONA_EQ3_B3_GAIN_SHIFT 1 /* EQ3_B3_GAIN - [5:1] */
+#define ARIZONA_EQ3_B3_GAIN_WIDTH 5 /* EQ3_B3_GAIN - [5:1] */
+#define ARIZONA_EQ3_ENA 0x0001 /* EQ3_ENA */
+#define ARIZONA_EQ3_ENA_MASK 0x0001 /* EQ3_ENA */
+#define ARIZONA_EQ3_ENA_SHIFT 0 /* EQ3_ENA */
+#define ARIZONA_EQ3_ENA_WIDTH 1 /* EQ3_ENA */
+
+/*
+ * R3645 (0xE3D) - EQ3_2
+ */
+#define ARIZONA_EQ3_B4_GAIN_MASK 0xF800 /* EQ3_B4_GAIN - [15:11] */
+#define ARIZONA_EQ3_B4_GAIN_SHIFT 11 /* EQ3_B4_GAIN - [15:11] */
+#define ARIZONA_EQ3_B4_GAIN_WIDTH 5 /* EQ3_B4_GAIN - [15:11] */
+#define ARIZONA_EQ3_B5_GAIN_MASK 0x07C0 /* EQ3_B5_GAIN - [10:6] */
+#define ARIZONA_EQ3_B5_GAIN_SHIFT 6 /* EQ3_B5_GAIN - [10:6] */
+#define ARIZONA_EQ3_B5_GAIN_WIDTH 5 /* EQ3_B5_GAIN - [10:6] */
+#define ARIZONA_EQ3_B1_MODE 0x0001 /* EQ3_B1_MODE */
+#define ARIZONA_EQ3_B1_MODE_MASK 0x0001 /* EQ3_B1_MODE */
+#define ARIZONA_EQ3_B1_MODE_SHIFT 0 /* EQ3_B1_MODE */
+#define ARIZONA_EQ3_B1_MODE_WIDTH 1 /* EQ3_B1_MODE */
+
+/*
+ * R3646 (0xE3E) - EQ3_3
+ */
+#define ARIZONA_EQ3_B1_A_MASK 0xFFFF /* EQ3_B1_A - [15:0] */
+#define ARIZONA_EQ3_B1_A_SHIFT 0 /* EQ3_B1_A - [15:0] */
+#define ARIZONA_EQ3_B1_A_WIDTH 16 /* EQ3_B1_A - [15:0] */
+
+/*
+ * R3647 (0xE3F) - EQ3_4
+ */
+#define ARIZONA_EQ3_B1_B_MASK 0xFFFF /* EQ3_B1_B - [15:0] */
+#define ARIZONA_EQ3_B1_B_SHIFT 0 /* EQ3_B1_B - [15:0] */
+#define ARIZONA_EQ3_B1_B_WIDTH 16 /* EQ3_B1_B - [15:0] */
+
+/*
+ * R3648 (0xE40) - EQ3_5
+ */
+#define ARIZONA_EQ3_B1_PG_MASK 0xFFFF /* EQ3_B1_PG - [15:0] */
+#define ARIZONA_EQ3_B1_PG_SHIFT 0 /* EQ3_B1_PG - [15:0] */
+#define ARIZONA_EQ3_B1_PG_WIDTH 16 /* EQ3_B1_PG - [15:0] */
+
+/*
+ * R3649 (0xE41) - EQ3_6
+ */
+#define ARIZONA_EQ3_B2_A_MASK 0xFFFF /* EQ3_B2_A - [15:0] */
+#define ARIZONA_EQ3_B2_A_SHIFT 0 /* EQ3_B2_A - [15:0] */
+#define ARIZONA_EQ3_B2_A_WIDTH 16 /* EQ3_B2_A - [15:0] */
+
+/*
+ * R3650 (0xE42) - EQ3_7
+ */
+#define ARIZONA_EQ3_B2_B_MASK 0xFFFF /* EQ3_B2_B - [15:0] */
+#define ARIZONA_EQ3_B2_B_SHIFT 0 /* EQ3_B2_B - [15:0] */
+#define ARIZONA_EQ3_B2_B_WIDTH 16 /* EQ3_B2_B - [15:0] */
+
+/*
+ * R3651 (0xE43) - EQ3_8
+ */
+#define ARIZONA_EQ3_B2_C_MASK 0xFFFF /* EQ3_B2_C - [15:0] */
+#define ARIZONA_EQ3_B2_C_SHIFT 0 /* EQ3_B2_C - [15:0] */
+#define ARIZONA_EQ3_B2_C_WIDTH 16 /* EQ3_B2_C - [15:0] */
+
+/*
+ * R3652 (0xE44) - EQ3_9
+ */
+#define ARIZONA_EQ3_B2_PG_MASK 0xFFFF /* EQ3_B2_PG - [15:0] */
+#define ARIZONA_EQ3_B2_PG_SHIFT 0 /* EQ3_B2_PG - [15:0] */
+#define ARIZONA_EQ3_B2_PG_WIDTH 16 /* EQ3_B2_PG - [15:0] */
+
+/*
+ * R3653 (0xE45) - EQ3_10
+ */
+#define ARIZONA_EQ3_B3_A_MASK 0xFFFF /* EQ3_B3_A - [15:0] */
+#define ARIZONA_EQ3_B3_A_SHIFT 0 /* EQ3_B3_A - [15:0] */
+#define ARIZONA_EQ3_B3_A_WIDTH 16 /* EQ3_B3_A - [15:0] */
+
+/*
+ * R3654 (0xE46) - EQ3_11
+ */
+#define ARIZONA_EQ3_B3_B_MASK 0xFFFF /* EQ3_B3_B - [15:0] */
+#define ARIZONA_EQ3_B3_B_SHIFT 0 /* EQ3_B3_B - [15:0] */
+#define ARIZONA_EQ3_B3_B_WIDTH 16 /* EQ3_B3_B - [15:0] */
+
+/*
+ * R3655 (0xE47) - EQ3_12
+ */
+#define ARIZONA_EQ3_B3_C_MASK 0xFFFF /* EQ3_B3_C - [15:0] */
+#define ARIZONA_EQ3_B3_C_SHIFT 0 /* EQ3_B3_C - [15:0] */
+#define ARIZONA_EQ3_B3_C_WIDTH 16 /* EQ3_B3_C - [15:0] */
+
+/*
+ * R3656 (0xE48) - EQ3_13
+ */
+#define ARIZONA_EQ3_B3_PG_MASK 0xFFFF /* EQ3_B3_PG - [15:0] */
+#define ARIZONA_EQ3_B3_PG_SHIFT 0 /* EQ3_B3_PG - [15:0] */
+#define ARIZONA_EQ3_B3_PG_WIDTH 16 /* EQ3_B3_PG - [15:0] */
+
+/*
+ * R3657 (0xE49) - EQ3_14
+ */
+#define ARIZONA_EQ3_B4_A_MASK 0xFFFF /* EQ3_B4_A - [15:0] */
+#define ARIZONA_EQ3_B4_A_SHIFT 0 /* EQ3_B4_A - [15:0] */
+#define ARIZONA_EQ3_B4_A_WIDTH 16 /* EQ3_B4_A - [15:0] */
+
+/*
+ * R3658 (0xE4A) - EQ3_15
+ */
+#define ARIZONA_EQ3_B4_B_MASK 0xFFFF /* EQ3_B4_B - [15:0] */
+#define ARIZONA_EQ3_B4_B_SHIFT 0 /* EQ3_B4_B - [15:0] */
+#define ARIZONA_EQ3_B4_B_WIDTH 16 /* EQ3_B4_B - [15:0] */
+
+/*
+ * R3659 (0xE4B) - EQ3_16
+ */
+#define ARIZONA_EQ3_B4_C_MASK 0xFFFF /* EQ3_B4_C - [15:0] */
+#define ARIZONA_EQ3_B4_C_SHIFT 0 /* EQ3_B4_C - [15:0] */
+#define ARIZONA_EQ3_B4_C_WIDTH 16 /* EQ3_B4_C - [15:0] */
+
+/*
+ * R3660 (0xE4C) - EQ3_17
+ */
+#define ARIZONA_EQ3_B4_PG_MASK 0xFFFF /* EQ3_B4_PG - [15:0] */
+#define ARIZONA_EQ3_B4_PG_SHIFT 0 /* EQ3_B4_PG - [15:0] */
+#define ARIZONA_EQ3_B4_PG_WIDTH 16 /* EQ3_B4_PG - [15:0] */
+
+/*
+ * R3661 (0xE4D) - EQ3_18
+ */
+#define ARIZONA_EQ3_B5_A_MASK 0xFFFF /* EQ3_B5_A - [15:0] */
+#define ARIZONA_EQ3_B5_A_SHIFT 0 /* EQ3_B5_A - [15:0] */
+#define ARIZONA_EQ3_B5_A_WIDTH 16 /* EQ3_B5_A - [15:0] */
+
+/*
+ * R3662 (0xE4E) - EQ3_19
+ */
+#define ARIZONA_EQ3_B5_B_MASK 0xFFFF /* EQ3_B5_B - [15:0] */
+#define ARIZONA_EQ3_B5_B_SHIFT 0 /* EQ3_B5_B - [15:0] */
+#define ARIZONA_EQ3_B5_B_WIDTH 16 /* EQ3_B5_B - [15:0] */
+
+/*
+ * R3663 (0xE4F) - EQ3_20
+ */
+#define ARIZONA_EQ3_B5_PG_MASK 0xFFFF /* EQ3_B5_PG - [15:0] */
+#define ARIZONA_EQ3_B5_PG_SHIFT 0 /* EQ3_B5_PG - [15:0] */
+#define ARIZONA_EQ3_B5_PG_WIDTH 16 /* EQ3_B5_PG - [15:0] */
+
+/*
+ * R3664 (0xE50) - EQ3_21
+ */
+#define ARIZONA_EQ3_B1_C_MASK 0xFFFF /* EQ3_B1_C - [15:0] */
+#define ARIZONA_EQ3_B1_C_SHIFT 0 /* EQ3_B1_C - [15:0] */
+#define ARIZONA_EQ3_B1_C_WIDTH 16 /* EQ3_B1_C - [15:0] */
+
+/*
+ * R3666 (0xE52) - EQ4_1
+ */
+#define ARIZONA_EQ4_B1_GAIN_MASK 0xF800 /* EQ4_B1_GAIN - [15:11] */
+#define ARIZONA_EQ4_B1_GAIN_SHIFT 11 /* EQ4_B1_GAIN - [15:11] */
+#define ARIZONA_EQ4_B1_GAIN_WIDTH 5 /* EQ4_B1_GAIN - [15:11] */
+#define ARIZONA_EQ4_B2_GAIN_MASK 0x07C0 /* EQ4_B2_GAIN - [10:6] */
+#define ARIZONA_EQ4_B2_GAIN_SHIFT 6 /* EQ4_B2_GAIN - [10:6] */
+#define ARIZONA_EQ4_B2_GAIN_WIDTH 5 /* EQ4_B2_GAIN - [10:6] */
+#define ARIZONA_EQ4_B3_GAIN_MASK 0x003E /* EQ4_B3_GAIN - [5:1] */
+#define ARIZONA_EQ4_B3_GAIN_SHIFT 1 /* EQ4_B3_GAIN - [5:1] */
+#define ARIZONA_EQ4_B3_GAIN_WIDTH 5 /* EQ4_B3_GAIN - [5:1] */
+#define ARIZONA_EQ4_ENA 0x0001 /* EQ4_ENA */
+#define ARIZONA_EQ4_ENA_MASK 0x0001 /* EQ4_ENA */
+#define ARIZONA_EQ4_ENA_SHIFT 0 /* EQ4_ENA */
+#define ARIZONA_EQ4_ENA_WIDTH 1 /* EQ4_ENA */
+
+/*
+ * R3667 (0xE53) - EQ4_2
+ */
+#define ARIZONA_EQ4_B4_GAIN_MASK 0xF800 /* EQ4_B4_GAIN - [15:11] */
+#define ARIZONA_EQ4_B4_GAIN_SHIFT 11 /* EQ4_B4_GAIN - [15:11] */
+#define ARIZONA_EQ4_B4_GAIN_WIDTH 5 /* EQ4_B4_GAIN - [15:11] */
+#define ARIZONA_EQ4_B5_GAIN_MASK 0x07C0 /* EQ4_B5_GAIN - [10:6] */
+#define ARIZONA_EQ4_B5_GAIN_SHIFT 6 /* EQ4_B5_GAIN - [10:6] */
+#define ARIZONA_EQ4_B5_GAIN_WIDTH 5 /* EQ4_B5_GAIN - [10:6] */
+#define ARIZONA_EQ4_B1_MODE 0x0001 /* EQ4_B1_MODE */
+#define ARIZONA_EQ4_B1_MODE_MASK 0x0001 /* EQ4_B1_MODE */
+#define ARIZONA_EQ4_B1_MODE_SHIFT 0 /* EQ4_B1_MODE */
+#define ARIZONA_EQ4_B1_MODE_WIDTH 1 /* EQ4_B1_MODE */
+
+/*
+ * R3668 (0xE54) - EQ4_3
+ */
+#define ARIZONA_EQ4_B1_A_MASK 0xFFFF /* EQ4_B1_A - [15:0] */
+#define ARIZONA_EQ4_B1_A_SHIFT 0 /* EQ4_B1_A - [15:0] */
+#define ARIZONA_EQ4_B1_A_WIDTH 16 /* EQ4_B1_A - [15:0] */
+
+/*
+ * R3669 (0xE55) - EQ4_4
+ */
+#define ARIZONA_EQ4_B1_B_MASK 0xFFFF /* EQ4_B1_B - [15:0] */
+#define ARIZONA_EQ4_B1_B_SHIFT 0 /* EQ4_B1_B - [15:0] */
+#define ARIZONA_EQ4_B1_B_WIDTH 16 /* EQ4_B1_B - [15:0] */
+
+/*
+ * R3670 (0xE56) - EQ4_5
+ */
+#define ARIZONA_EQ4_B1_PG_MASK 0xFFFF /* EQ4_B1_PG - [15:0] */
+#define ARIZONA_EQ4_B1_PG_SHIFT 0 /* EQ4_B1_PG - [15:0] */
+#define ARIZONA_EQ4_B1_PG_WIDTH 16 /* EQ4_B1_PG - [15:0] */
+
+/*
+ * R3671 (0xE57) - EQ4_6
+ */
+#define ARIZONA_EQ4_B2_A_MASK 0xFFFF /* EQ4_B2_A - [15:0] */
+#define ARIZONA_EQ4_B2_A_SHIFT 0 /* EQ4_B2_A - [15:0] */
+#define ARIZONA_EQ4_B2_A_WIDTH 16 /* EQ4_B2_A - [15:0] */
+
+/*
+ * R3672 (0xE58) - EQ4_7
+ */
+#define ARIZONA_EQ4_B2_B_MASK 0xFFFF /* EQ4_B2_B - [15:0] */
+#define ARIZONA_EQ4_B2_B_SHIFT 0 /* EQ4_B2_B - [15:0] */
+#define ARIZONA_EQ4_B2_B_WIDTH 16 /* EQ4_B2_B - [15:0] */
+
+/*
+ * R3673 (0xE59) - EQ4_8
+ */
+#define ARIZONA_EQ4_B2_C_MASK 0xFFFF /* EQ4_B2_C - [15:0] */
+#define ARIZONA_EQ4_B2_C_SHIFT 0 /* EQ4_B2_C - [15:0] */
+#define ARIZONA_EQ4_B2_C_WIDTH 16 /* EQ4_B2_C - [15:0] */
+
+/*
+ * R3674 (0xE5A) - EQ4_9
+ */
+#define ARIZONA_EQ4_B2_PG_MASK 0xFFFF /* EQ4_B2_PG - [15:0] */
+#define ARIZONA_EQ4_B2_PG_SHIFT 0 /* EQ4_B2_PG - [15:0] */
+#define ARIZONA_EQ4_B2_PG_WIDTH 16 /* EQ4_B2_PG - [15:0] */
+
+/*
+ * R3675 (0xE5B) - EQ4_10
+ */
+#define ARIZONA_EQ4_B3_A_MASK 0xFFFF /* EQ4_B3_A - [15:0] */
+#define ARIZONA_EQ4_B3_A_SHIFT 0 /* EQ4_B3_A - [15:0] */
+#define ARIZONA_EQ4_B3_A_WIDTH 16 /* EQ4_B3_A - [15:0] */
+
+/*
+ * R3676 (0xE5C) - EQ4_11
+ */
+#define ARIZONA_EQ4_B3_B_MASK 0xFFFF /* EQ4_B3_B - [15:0] */
+#define ARIZONA_EQ4_B3_B_SHIFT 0 /* EQ4_B3_B - [15:0] */
+#define ARIZONA_EQ4_B3_B_WIDTH 16 /* EQ4_B3_B - [15:0] */
+
+/*
+ * R3677 (0xE5D) - EQ4_12
+ */
+#define ARIZONA_EQ4_B3_C_MASK 0xFFFF /* EQ4_B3_C - [15:0] */
+#define ARIZONA_EQ4_B3_C_SHIFT 0 /* EQ4_B3_C - [15:0] */
+#define ARIZONA_EQ4_B3_C_WIDTH 16 /* EQ4_B3_C - [15:0] */
+
+/*
+ * R3678 (0xE5E) - EQ4_13
+ */
+#define ARIZONA_EQ4_B3_PG_MASK 0xFFFF /* EQ4_B3_PG - [15:0] */
+#define ARIZONA_EQ4_B3_PG_SHIFT 0 /* EQ4_B3_PG - [15:0] */
+#define ARIZONA_EQ4_B3_PG_WIDTH 16 /* EQ4_B3_PG - [15:0] */
+
+/*
+ * R3679 (0xE5F) - EQ4_14
+ */
+#define ARIZONA_EQ4_B4_A_MASK 0xFFFF /* EQ4_B4_A - [15:0] */
+#define ARIZONA_EQ4_B4_A_SHIFT 0 /* EQ4_B4_A - [15:0] */
+#define ARIZONA_EQ4_B4_A_WIDTH 16 /* EQ4_B4_A - [15:0] */
+
+/*
+ * R3680 (0xE60) - EQ4_15
+ */
+#define ARIZONA_EQ4_B4_B_MASK 0xFFFF /* EQ4_B4_B - [15:0] */
+#define ARIZONA_EQ4_B4_B_SHIFT 0 /* EQ4_B4_B - [15:0] */
+#define ARIZONA_EQ4_B4_B_WIDTH 16 /* EQ4_B4_B - [15:0] */
+
+/*
+ * R3681 (0xE61) - EQ4_16
+ */
+#define ARIZONA_EQ4_B4_C_MASK 0xFFFF /* EQ4_B4_C - [15:0] */
+#define ARIZONA_EQ4_B4_C_SHIFT 0 /* EQ4_B4_C - [15:0] */
+#define ARIZONA_EQ4_B4_C_WIDTH 16 /* EQ4_B4_C - [15:0] */
+
+/*
+ * R3682 (0xE62) - EQ4_17
+ */
+#define ARIZONA_EQ4_B4_PG_MASK 0xFFFF /* EQ4_B4_PG - [15:0] */
+#define ARIZONA_EQ4_B4_PG_SHIFT 0 /* EQ4_B4_PG - [15:0] */
+#define ARIZONA_EQ4_B4_PG_WIDTH 16 /* EQ4_B4_PG - [15:0] */
+
+/*
+ * R3683 (0xE63) - EQ4_18
+ */
+#define ARIZONA_EQ4_B5_A_MASK 0xFFFF /* EQ4_B5_A - [15:0] */
+#define ARIZONA_EQ4_B5_A_SHIFT 0 /* EQ4_B5_A - [15:0] */
+#define ARIZONA_EQ4_B5_A_WIDTH 16 /* EQ4_B5_A - [15:0] */
+
+/*
+ * R3684 (0xE64) - EQ4_19
+ */
+#define ARIZONA_EQ4_B5_B_MASK 0xFFFF /* EQ4_B5_B - [15:0] */
+#define ARIZONA_EQ4_B5_B_SHIFT 0 /* EQ4_B5_B - [15:0] */
+#define ARIZONA_EQ4_B5_B_WIDTH 16 /* EQ4_B5_B - [15:0] */
+
+/*
+ * R3685 (0xE65) - EQ4_20
+ */
+#define ARIZONA_EQ4_B5_PG_MASK 0xFFFF /* EQ4_B5_PG - [15:0] */
+#define ARIZONA_EQ4_B5_PG_SHIFT 0 /* EQ4_B5_PG - [15:0] */
+#define ARIZONA_EQ4_B5_PG_WIDTH 16 /* EQ4_B5_PG - [15:0] */
+
+/*
+ * R3686 (0xE66) - EQ4_21
+ */
+#define ARIZONA_EQ4_B1_C_MASK 0xFFFF /* EQ4_B1_C - [15:0] */
+#define ARIZONA_EQ4_B1_C_SHIFT 0 /* EQ4_B1_C - [15:0] */
+#define ARIZONA_EQ4_B1_C_WIDTH 16 /* EQ4_B1_C - [15:0] */
+
+/*
+ * R3712 (0xE80) - DRC1 ctrl1
+ */
+#define ARIZONA_DRC1_SIG_DET_RMS_MASK 0xF800 /* DRC1_SIG_DET_RMS - [15:11] */
+#define ARIZONA_DRC1_SIG_DET_RMS_SHIFT 11 /* DRC1_SIG_DET_RMS - [15:11] */
+#define ARIZONA_DRC1_SIG_DET_RMS_WIDTH 5 /* DRC1_SIG_DET_RMS - [15:11] */
+#define ARIZONA_DRC1_SIG_DET_PK_MASK 0x0600 /* DRC1_SIG_DET_PK - [10:9] */
+#define ARIZONA_DRC1_SIG_DET_PK_SHIFT 9 /* DRC1_SIG_DET_PK - [10:9] */
+#define ARIZONA_DRC1_SIG_DET_PK_WIDTH 2 /* DRC1_SIG_DET_PK - [10:9] */
+#define ARIZONA_DRC1_NG_ENA 0x0100 /* DRC1_NG_ENA */
+#define ARIZONA_DRC1_NG_ENA_MASK 0x0100 /* DRC1_NG_ENA */
+#define ARIZONA_DRC1_NG_ENA_SHIFT 8 /* DRC1_NG_ENA */
+#define ARIZONA_DRC1_NG_ENA_WIDTH 1 /* DRC1_NG_ENA */
+#define ARIZONA_DRC1_SIG_DET_MODE 0x0080 /* DRC1_SIG_DET_MODE */
+#define ARIZONA_DRC1_SIG_DET_MODE_MASK 0x0080 /* DRC1_SIG_DET_MODE */
+#define ARIZONA_DRC1_SIG_DET_MODE_SHIFT 7 /* DRC1_SIG_DET_MODE */
+#define ARIZONA_DRC1_SIG_DET_MODE_WIDTH 1 /* DRC1_SIG_DET_MODE */
+#define ARIZONA_DRC1_SIG_DET 0x0040 /* DRC1_SIG_DET */
+#define ARIZONA_DRC1_SIG_DET_MASK 0x0040 /* DRC1_SIG_DET */
+#define ARIZONA_DRC1_SIG_DET_SHIFT 6 /* DRC1_SIG_DET */
+#define ARIZONA_DRC1_SIG_DET_WIDTH 1 /* DRC1_SIG_DET */
+#define ARIZONA_DRC1_KNEE2_OP_ENA 0x0020 /* DRC1_KNEE2_OP_ENA */
+#define ARIZONA_DRC1_KNEE2_OP_ENA_MASK 0x0020 /* DRC1_KNEE2_OP_ENA */
+#define ARIZONA_DRC1_KNEE2_OP_ENA_SHIFT 5 /* DRC1_KNEE2_OP_ENA */
+#define ARIZONA_DRC1_KNEE2_OP_ENA_WIDTH 1 /* DRC1_KNEE2_OP_ENA */
+#define ARIZONA_DRC1_QR 0x0010 /* DRC1_QR */
+#define ARIZONA_DRC1_QR_MASK 0x0010 /* DRC1_QR */
+#define ARIZONA_DRC1_QR_SHIFT 4 /* DRC1_QR */
+#define ARIZONA_DRC1_QR_WIDTH 1 /* DRC1_QR */
+#define ARIZONA_DRC1_ANTICLIP 0x0008 /* DRC1_ANTICLIP */
+#define ARIZONA_DRC1_ANTICLIP_MASK 0x0008 /* DRC1_ANTICLIP */
+#define ARIZONA_DRC1_ANTICLIP_SHIFT 3 /* DRC1_ANTICLIP */
+#define ARIZONA_DRC1_ANTICLIP_WIDTH 1 /* DRC1_ANTICLIP */
+#define ARIZONA_DRC1L_ENA 0x0002 /* DRC1L_ENA */
+#define ARIZONA_DRC1L_ENA_MASK 0x0002 /* DRC1L_ENA */
+#define ARIZONA_DRC1L_ENA_SHIFT 1 /* DRC1L_ENA */
+#define ARIZONA_DRC1L_ENA_WIDTH 1 /* DRC1L_ENA */
+#define ARIZONA_DRC1R_ENA 0x0001 /* DRC1R_ENA */
+#define ARIZONA_DRC1R_ENA_MASK 0x0001 /* DRC1R_ENA */
+#define ARIZONA_DRC1R_ENA_SHIFT 0 /* DRC1R_ENA */
+#define ARIZONA_DRC1R_ENA_WIDTH 1 /* DRC1R_ENA */
+
+/*
+ * R3713 (0xE81) - DRC1 ctrl2
+ */
+#define ARIZONA_DRC1_ATK_MASK 0x1E00 /* DRC1_ATK - [12:9] */
+#define ARIZONA_DRC1_ATK_SHIFT 9 /* DRC1_ATK - [12:9] */
+#define ARIZONA_DRC1_ATK_WIDTH 4 /* DRC1_ATK - [12:9] */
+#define ARIZONA_DRC1_DCY_MASK 0x01E0 /* DRC1_DCY - [8:5] */
+#define ARIZONA_DRC1_DCY_SHIFT 5 /* DRC1_DCY - [8:5] */
+#define ARIZONA_DRC1_DCY_WIDTH 4 /* DRC1_DCY - [8:5] */
+#define ARIZONA_DRC1_MINGAIN_MASK 0x001C /* DRC1_MINGAIN - [4:2] */
+#define ARIZONA_DRC1_MINGAIN_SHIFT 2 /* DRC1_MINGAIN - [4:2] */
+#define ARIZONA_DRC1_MINGAIN_WIDTH 3 /* DRC1_MINGAIN - [4:2] */
+#define ARIZONA_DRC1_MAXGAIN_MASK 0x0003 /* DRC1_MAXGAIN - [1:0] */
+#define ARIZONA_DRC1_MAXGAIN_SHIFT 0 /* DRC1_MAXGAIN - [1:0] */
+#define ARIZONA_DRC1_MAXGAIN_WIDTH 2 /* DRC1_MAXGAIN - [1:0] */
+
+/*
+ * R3714 (0xE82) - DRC1 ctrl3
+ */
+#define ARIZONA_DRC1_NG_MINGAIN_MASK 0xF000 /* DRC1_NG_MINGAIN - [15:12] */
+#define ARIZONA_DRC1_NG_MINGAIN_SHIFT 12 /* DRC1_NG_MINGAIN - [15:12] */
+#define ARIZONA_DRC1_NG_MINGAIN_WIDTH 4 /* DRC1_NG_MINGAIN - [15:12] */
+#define ARIZONA_DRC1_NG_EXP_MASK 0x0C00 /* DRC1_NG_EXP - [11:10] */
+#define ARIZONA_DRC1_NG_EXP_SHIFT 10 /* DRC1_NG_EXP - [11:10] */
+#define ARIZONA_DRC1_NG_EXP_WIDTH 2 /* DRC1_NG_EXP - [11:10] */
+#define ARIZONA_DRC1_QR_THR_MASK 0x0300 /* DRC1_QR_THR - [9:8] */
+#define ARIZONA_DRC1_QR_THR_SHIFT 8 /* DRC1_QR_THR - [9:8] */
+#define ARIZONA_DRC1_QR_THR_WIDTH 2 /* DRC1_QR_THR - [9:8] */
+#define ARIZONA_DRC1_QR_DCY_MASK 0x00C0 /* DRC1_QR_DCY - [7:6] */
+#define ARIZONA_DRC1_QR_DCY_SHIFT 6 /* DRC1_QR_DCY - [7:6] */
+#define ARIZONA_DRC1_QR_DCY_WIDTH 2 /* DRC1_QR_DCY - [7:6] */
+#define ARIZONA_DRC1_HI_COMP_MASK 0x0038 /* DRC1_HI_COMP - [5:3] */
+#define ARIZONA_DRC1_HI_COMP_SHIFT 3 /* DRC1_HI_COMP - [5:3] */
+#define ARIZONA_DRC1_HI_COMP_WIDTH 3 /* DRC1_HI_COMP - [5:3] */
+#define ARIZONA_DRC1_LO_COMP_MASK 0x0007 /* DRC1_LO_COMP - [2:0] */
+#define ARIZONA_DRC1_LO_COMP_SHIFT 0 /* DRC1_LO_COMP - [2:0] */
+#define ARIZONA_DRC1_LO_COMP_WIDTH 3 /* DRC1_LO_COMP - [2:0] */
+
+/*
+ * R3715 (0xE83) - DRC1 ctrl4
+ */
+#define ARIZONA_DRC1_KNEE_IP_MASK 0x07E0 /* DRC1_KNEE_IP - [10:5] */
+#define ARIZONA_DRC1_KNEE_IP_SHIFT 5 /* DRC1_KNEE_IP - [10:5] */
+#define ARIZONA_DRC1_KNEE_IP_WIDTH 6 /* DRC1_KNEE_IP - [10:5] */
+#define ARIZONA_DRC1_KNEE_OP_MASK 0x001F /* DRC1_KNEE_OP - [4:0] */
+#define ARIZONA_DRC1_KNEE_OP_SHIFT 0 /* DRC1_KNEE_OP - [4:0] */
+#define ARIZONA_DRC1_KNEE_OP_WIDTH 5 /* DRC1_KNEE_OP - [4:0] */
+
+/*
+ * R3716 (0xE84) - DRC1 ctrl5
+ */
+#define ARIZONA_DRC1_KNEE2_IP_MASK 0x03E0 /* DRC1_KNEE2_IP - [9:5] */
+#define ARIZONA_DRC1_KNEE2_IP_SHIFT 5 /* DRC1_KNEE2_IP - [9:5] */
+#define ARIZONA_DRC1_KNEE2_IP_WIDTH 5 /* DRC1_KNEE2_IP - [9:5] */
+#define ARIZONA_DRC1_KNEE2_OP_MASK 0x001F /* DRC1_KNEE2_OP - [4:0] */
+#define ARIZONA_DRC1_KNEE2_OP_SHIFT 0 /* DRC1_KNEE2_OP - [4:0] */
+#define ARIZONA_DRC1_KNEE2_OP_WIDTH 5 /* DRC1_KNEE2_OP - [4:0] */
+
+/*
+ * R3721 (0xE89) - DRC2 ctrl1
+ */
+#define ARIZONA_DRC2_SIG_DET_RMS_MASK 0xF800 /* DRC2_SIG_DET_RMS - [15:11] */
+#define ARIZONA_DRC2_SIG_DET_RMS_SHIFT 11 /* DRC2_SIG_DET_RMS - [15:11] */
+#define ARIZONA_DRC2_SIG_DET_RMS_WIDTH 5 /* DRC2_SIG_DET_RMS - [15:11] */
+#define ARIZONA_DRC2_SIG_DET_PK_MASK 0x0600 /* DRC2_SIG_DET_PK - [10:9] */
+#define ARIZONA_DRC2_SIG_DET_PK_SHIFT 9 /* DRC2_SIG_DET_PK - [10:9] */
+#define ARIZONA_DRC2_SIG_DET_PK_WIDTH 2 /* DRC2_SIG_DET_PK - [10:9] */
+#define ARIZONA_DRC2_NG_ENA 0x0100 /* DRC2_NG_ENA */
+#define ARIZONA_DRC2_NG_ENA_MASK 0x0100 /* DRC2_NG_ENA */
+#define ARIZONA_DRC2_NG_ENA_SHIFT 8 /* DRC2_NG_ENA */
+#define ARIZONA_DRC2_NG_ENA_WIDTH 1 /* DRC2_NG_ENA */
+#define ARIZONA_DRC2_SIG_DET_MODE 0x0080 /* DRC2_SIG_DET_MODE */
+#define ARIZONA_DRC2_SIG_DET_MODE_MASK 0x0080 /* DRC2_SIG_DET_MODE */
+#define ARIZONA_DRC2_SIG_DET_MODE_SHIFT 7 /* DRC2_SIG_DET_MODE */
+#define ARIZONA_DRC2_SIG_DET_MODE_WIDTH 1 /* DRC2_SIG_DET_MODE */
+#define ARIZONA_DRC2_SIG_DET 0x0040 /* DRC2_SIG_DET */
+#define ARIZONA_DRC2_SIG_DET_MASK 0x0040 /* DRC2_SIG_DET */
+#define ARIZONA_DRC2_SIG_DET_SHIFT 6 /* DRC2_SIG_DET */
+#define ARIZONA_DRC2_SIG_DET_WIDTH 1 /* DRC2_SIG_DET */
+#define ARIZONA_DRC2_KNEE2_OP_ENA 0x0020 /* DRC2_KNEE2_OP_ENA */
+#define ARIZONA_DRC2_KNEE2_OP_ENA_MASK 0x0020 /* DRC2_KNEE2_OP_ENA */
+#define ARIZONA_DRC2_KNEE2_OP_ENA_SHIFT 5 /* DRC2_KNEE2_OP_ENA */
+#define ARIZONA_DRC2_KNEE2_OP_ENA_WIDTH 1 /* DRC2_KNEE2_OP_ENA */
+#define ARIZONA_DRC2_QR 0x0010 /* DRC2_QR */
+#define ARIZONA_DRC2_QR_MASK 0x0010 /* DRC2_QR */
+#define ARIZONA_DRC2_QR_SHIFT 4 /* DRC2_QR */
+#define ARIZONA_DRC2_QR_WIDTH 1 /* DRC2_QR */
+#define ARIZONA_DRC2_ANTICLIP 0x0008 /* DRC2_ANTICLIP */
+#define ARIZONA_DRC2_ANTICLIP_MASK 0x0008 /* DRC2_ANTICLIP */
+#define ARIZONA_DRC2_ANTICLIP_SHIFT 3 /* DRC2_ANTICLIP */
+#define ARIZONA_DRC2_ANTICLIP_WIDTH 1 /* DRC2_ANTICLIP */
+#define ARIZONA_DRC2L_ENA 0x0002 /* DRC2L_ENA */
+#define ARIZONA_DRC2L_ENA_MASK 0x0002 /* DRC2L_ENA */
+#define ARIZONA_DRC2L_ENA_SHIFT 1 /* DRC2L_ENA */
+#define ARIZONA_DRC2L_ENA_WIDTH 1 /* DRC2L_ENA */
+#define ARIZONA_DRC2R_ENA 0x0001 /* DRC2R_ENA */
+#define ARIZONA_DRC2R_ENA_MASK 0x0001 /* DRC2R_ENA */
+#define ARIZONA_DRC2R_ENA_SHIFT 0 /* DRC2R_ENA */
+#define ARIZONA_DRC2R_ENA_WIDTH 1 /* DRC2R_ENA */
+
+/*
+ * R3722 (0xE8A) - DRC2 ctrl2
+ */
+#define ARIZONA_DRC2_ATK_MASK 0x1E00 /* DRC2_ATK - [12:9] */
+#define ARIZONA_DRC2_ATK_SHIFT 9 /* DRC2_ATK - [12:9] */
+#define ARIZONA_DRC2_ATK_WIDTH 4 /* DRC2_ATK - [12:9] */
+#define ARIZONA_DRC2_DCY_MASK 0x01E0 /* DRC2_DCY - [8:5] */
+#define ARIZONA_DRC2_DCY_SHIFT 5 /* DRC2_DCY - [8:5] */
+#define ARIZONA_DRC2_DCY_WIDTH 4 /* DRC2_DCY - [8:5] */
+#define ARIZONA_DRC2_MINGAIN_MASK 0x001C /* DRC2_MINGAIN - [4:2] */
+#define ARIZONA_DRC2_MINGAIN_SHIFT 2 /* DRC2_MINGAIN - [4:2] */
+#define ARIZONA_DRC2_MINGAIN_WIDTH 3 /* DRC2_MINGAIN - [4:2] */
+#define ARIZONA_DRC2_MAXGAIN_MASK 0x0003 /* DRC2_MAXGAIN - [1:0] */
+#define ARIZONA_DRC2_MAXGAIN_SHIFT 0 /* DRC2_MAXGAIN - [1:0] */
+#define ARIZONA_DRC2_MAXGAIN_WIDTH 2 /* DRC2_MAXGAIN - [1:0] */
+
+/*
+ * R3723 (0xE8B) - DRC2 ctrl3
+ */
+#define ARIZONA_DRC2_NG_MINGAIN_MASK 0xF000 /* DRC2_NG_MINGAIN - [15:12] */
+#define ARIZONA_DRC2_NG_MINGAIN_SHIFT 12 /* DRC2_NG_MINGAIN - [15:12] */
+#define ARIZONA_DRC2_NG_MINGAIN_WIDTH 4 /* DRC2_NG_MINGAIN - [15:12] */
+#define ARIZONA_DRC2_NG_EXP_MASK 0x0C00 /* DRC2_NG_EXP - [11:10] */
+#define ARIZONA_DRC2_NG_EXP_SHIFT 10 /* DRC2_NG_EXP - [11:10] */
+#define ARIZONA_DRC2_NG_EXP_WIDTH 2 /* DRC2_NG_EXP - [11:10] */
+#define ARIZONA_DRC2_QR_THR_MASK 0x0300 /* DRC2_QR_THR - [9:8] */
+#define ARIZONA_DRC2_QR_THR_SHIFT 8 /* DRC2_QR_THR - [9:8] */
+#define ARIZONA_DRC2_QR_THR_WIDTH 2 /* DRC2_QR_THR - [9:8] */
+#define ARIZONA_DRC2_QR_DCY_MASK 0x00C0 /* DRC2_QR_DCY - [7:6] */
+#define ARIZONA_DRC2_QR_DCY_SHIFT 6 /* DRC2_QR_DCY - [7:6] */
+#define ARIZONA_DRC2_QR_DCY_WIDTH 2 /* DRC2_QR_DCY - [7:6] */
+#define ARIZONA_DRC2_HI_COMP_MASK 0x0038 /* DRC2_HI_COMP - [5:3] */
+#define ARIZONA_DRC2_HI_COMP_SHIFT 3 /* DRC2_HI_COMP - [5:3] */
+#define ARIZONA_DRC2_HI_COMP_WIDTH 3 /* DRC2_HI_COMP - [5:3] */
+#define ARIZONA_DRC2_LO_COMP_MASK 0x0007 /* DRC2_LO_COMP - [2:0] */
+#define ARIZONA_DRC2_LO_COMP_SHIFT 0 /* DRC2_LO_COMP - [2:0] */
+#define ARIZONA_DRC2_LO_COMP_WIDTH 3 /* DRC2_LO_COMP - [2:0] */
+
+/*
+ * R3724 (0xE8C) - DRC2 ctrl4
+ */
+#define ARIZONA_DRC2_KNEE_IP_MASK 0x07E0 /* DRC2_KNEE_IP - [10:5] */
+#define ARIZONA_DRC2_KNEE_IP_SHIFT 5 /* DRC2_KNEE_IP - [10:5] */
+#define ARIZONA_DRC2_KNEE_IP_WIDTH 6 /* DRC2_KNEE_IP - [10:5] */
+#define ARIZONA_DRC2_KNEE_OP_MASK 0x001F /* DRC2_KNEE_OP - [4:0] */
+#define ARIZONA_DRC2_KNEE_OP_SHIFT 0 /* DRC2_KNEE_OP - [4:0] */
+#define ARIZONA_DRC2_KNEE_OP_WIDTH 5 /* DRC2_KNEE_OP - [4:0] */
+
+/*
+ * R3725 (0xE8D) - DRC2 ctrl5
+ */
+#define ARIZONA_DRC2_KNEE2_IP_MASK 0x03E0 /* DRC2_KNEE2_IP - [9:5] */
+#define ARIZONA_DRC2_KNEE2_IP_SHIFT 5 /* DRC2_KNEE2_IP - [9:5] */
+#define ARIZONA_DRC2_KNEE2_IP_WIDTH 5 /* DRC2_KNEE2_IP - [9:5] */
+#define ARIZONA_DRC2_KNEE2_OP_MASK 0x001F /* DRC2_KNEE2_OP - [4:0] */
+#define ARIZONA_DRC2_KNEE2_OP_SHIFT 0 /* DRC2_KNEE2_OP - [4:0] */
+#define ARIZONA_DRC2_KNEE2_OP_WIDTH 5 /* DRC2_KNEE2_OP - [4:0] */
+
+/*
+ * R3776 (0xEC0) - HPLPF1_1
+ */
+#define ARIZONA_LHPF1_MODE 0x0002 /* LHPF1_MODE */
+#define ARIZONA_LHPF1_MODE_MASK 0x0002 /* LHPF1_MODE */
+#define ARIZONA_LHPF1_MODE_SHIFT 1 /* LHPF1_MODE */
+#define ARIZONA_LHPF1_MODE_WIDTH 1 /* LHPF1_MODE */
+#define ARIZONA_LHPF1_ENA 0x0001 /* LHPF1_ENA */
+#define ARIZONA_LHPF1_ENA_MASK 0x0001 /* LHPF1_ENA */
+#define ARIZONA_LHPF1_ENA_SHIFT 0 /* LHPF1_ENA */
+#define ARIZONA_LHPF1_ENA_WIDTH 1 /* LHPF1_ENA */
+
+/*
+ * R3777 (0xEC1) - HPLPF1_2
+ */
+#define ARIZONA_LHPF1_COEFF_MASK 0xFFFF /* LHPF1_COEFF - [15:0] */
+#define ARIZONA_LHPF1_COEFF_SHIFT 0 /* LHPF1_COEFF - [15:0] */
+#define ARIZONA_LHPF1_COEFF_WIDTH 16 /* LHPF1_COEFF - [15:0] */
+
+/*
+ * R3780 (0xEC4) - HPLPF2_1
+ */
+#define ARIZONA_LHPF2_MODE 0x0002 /* LHPF2_MODE */
+#define ARIZONA_LHPF2_MODE_MASK 0x0002 /* LHPF2_MODE */
+#define ARIZONA_LHPF2_MODE_SHIFT 1 /* LHPF2_MODE */
+#define ARIZONA_LHPF2_MODE_WIDTH 1 /* LHPF2_MODE */
+#define ARIZONA_LHPF2_ENA 0x0001 /* LHPF2_ENA */
+#define ARIZONA_LHPF2_ENA_MASK 0x0001 /* LHPF2_ENA */
+#define ARIZONA_LHPF2_ENA_SHIFT 0 /* LHPF2_ENA */
+#define ARIZONA_LHPF2_ENA_WIDTH 1 /* LHPF2_ENA */
+
+/*
+ * R3781 (0xEC5) - HPLPF2_2
+ */
+#define ARIZONA_LHPF2_COEFF_MASK 0xFFFF /* LHPF2_COEFF - [15:0] */
+#define ARIZONA_LHPF2_COEFF_SHIFT 0 /* LHPF2_COEFF - [15:0] */
+#define ARIZONA_LHPF2_COEFF_WIDTH 16 /* LHPF2_COEFF - [15:0] */
+
+/*
+ * R3784 (0xEC8) - HPLPF3_1
+ */
+#define ARIZONA_LHPF3_MODE 0x0002 /* LHPF3_MODE */
+#define ARIZONA_LHPF3_MODE_MASK 0x0002 /* LHPF3_MODE */
+#define ARIZONA_LHPF3_MODE_SHIFT 1 /* LHPF3_MODE */
+#define ARIZONA_LHPF3_MODE_WIDTH 1 /* LHPF3_MODE */
+#define ARIZONA_LHPF3_ENA 0x0001 /* LHPF3_ENA */
+#define ARIZONA_LHPF3_ENA_MASK 0x0001 /* LHPF3_ENA */
+#define ARIZONA_LHPF3_ENA_SHIFT 0 /* LHPF3_ENA */
+#define ARIZONA_LHPF3_ENA_WIDTH 1 /* LHPF3_ENA */
+
+/*
+ * R3785 (0xEC9) - HPLPF3_2
+ */
+#define ARIZONA_LHPF3_COEFF_MASK 0xFFFF /* LHPF3_COEFF - [15:0] */
+#define ARIZONA_LHPF3_COEFF_SHIFT 0 /* LHPF3_COEFF - [15:0] */
+#define ARIZONA_LHPF3_COEFF_WIDTH 16 /* LHPF3_COEFF - [15:0] */
+
+/*
+ * R3788 (0xECC) - HPLPF4_1
+ */
+#define ARIZONA_LHPF4_MODE 0x0002 /* LHPF4_MODE */
+#define ARIZONA_LHPF4_MODE_MASK 0x0002 /* LHPF4_MODE */
+#define ARIZONA_LHPF4_MODE_SHIFT 1 /* LHPF4_MODE */
+#define ARIZONA_LHPF4_MODE_WIDTH 1 /* LHPF4_MODE */
+#define ARIZONA_LHPF4_ENA 0x0001 /* LHPF4_ENA */
+#define ARIZONA_LHPF4_ENA_MASK 0x0001 /* LHPF4_ENA */
+#define ARIZONA_LHPF4_ENA_SHIFT 0 /* LHPF4_ENA */
+#define ARIZONA_LHPF4_ENA_WIDTH 1 /* LHPF4_ENA */
+
+/*
+ * R3789 (0xECD) - HPLPF4_2
+ */
+#define ARIZONA_LHPF4_COEFF_MASK 0xFFFF /* LHPF4_COEFF - [15:0] */
+#define ARIZONA_LHPF4_COEFF_SHIFT 0 /* LHPF4_COEFF - [15:0] */
+#define ARIZONA_LHPF4_COEFF_WIDTH 16 /* LHPF4_COEFF - [15:0] */
+
+/*
+ * R3808 (0xEE0) - ASRC_ENABLE
+ */
+#define ARIZONA_ASRC2L_ENA 0x0008 /* ASRC2L_ENA */
+#define ARIZONA_ASRC2L_ENA_MASK 0x0008 /* ASRC2L_ENA */
+#define ARIZONA_ASRC2L_ENA_SHIFT 3 /* ASRC2L_ENA */
+#define ARIZONA_ASRC2L_ENA_WIDTH 1 /* ASRC2L_ENA */
+#define ARIZONA_ASRC2R_ENA 0x0004 /* ASRC2R_ENA */
+#define ARIZONA_ASRC2R_ENA_MASK 0x0004 /* ASRC2R_ENA */
+#define ARIZONA_ASRC2R_ENA_SHIFT 2 /* ASRC2R_ENA */
+#define ARIZONA_ASRC2R_ENA_WIDTH 1 /* ASRC2R_ENA */
+#define ARIZONA_ASRC1L_ENA 0x0002 /* ASRC1L_ENA */
+#define ARIZONA_ASRC1L_ENA_MASK 0x0002 /* ASRC1L_ENA */
+#define ARIZONA_ASRC1L_ENA_SHIFT 1 /* ASRC1L_ENA */
+#define ARIZONA_ASRC1L_ENA_WIDTH 1 /* ASRC1L_ENA */
+#define ARIZONA_ASRC1R_ENA 0x0001 /* ASRC1R_ENA */
+#define ARIZONA_ASRC1R_ENA_MASK 0x0001 /* ASRC1R_ENA */
+#define ARIZONA_ASRC1R_ENA_SHIFT 0 /* ASRC1R_ENA */
+#define ARIZONA_ASRC1R_ENA_WIDTH 1 /* ASRC1R_ENA */
+
+/*
+ * R3810 (0xEE2) - ASRC_RATE1
+ */
+#define ARIZONA_ASRC_RATE1_MASK 0x7800 /* ASRC_RATE1 - [14:11] */
+#define ARIZONA_ASRC_RATE1_SHIFT 11 /* ASRC_RATE1 - [14:11] */
+#define ARIZONA_ASRC_RATE1_WIDTH 4 /* ASRC_RATE1 - [14:11] */
+
+/*
+ * R3811 (0xEE3) - ASRC_RATE2
+ */
+#define ARIZONA_ASRC_RATE2_MASK 0x7800 /* ASRC_RATE2 - [14:11] */
+#define ARIZONA_ASRC_RATE2_SHIFT 11 /* ASRC_RATE2 - [14:11] */
+#define ARIZONA_ASRC_RATE2_WIDTH 4 /* ASRC_RATE2 - [14:11] */
+
+/*
+ * R3824 (0xEF0) - ISRC 1 CTRL 1
+ */
+#define ARIZONA_ISRC1_FSH_MASK 0x7800 /* ISRC1_FSH - [14:11] */
+#define ARIZONA_ISRC1_FSH_SHIFT 11 /* ISRC1_FSH - [14:11] */
+#define ARIZONA_ISRC1_FSH_WIDTH 4 /* ISRC1_FSH - [14:11] */
+#define ARIZONA_ISRC1_CLK_SEL_MASK 0x0700 /* ISRC1_CLK_SEL - [10:8] */
+#define ARIZONA_ISRC1_CLK_SEL_SHIFT 8 /* ISRC1_CLK_SEL - [10:8] */
+#define ARIZONA_ISRC1_CLK_SEL_WIDTH 3 /* ISRC1_CLK_SEL - [10:8] */
+
+/*
+ * R3825 (0xEF1) - ISRC 1 CTRL 2
+ */
+#define ARIZONA_ISRC1_FSL_MASK 0x7800 /* ISRC1_FSL - [14:11] */
+#define ARIZONA_ISRC1_FSL_SHIFT 11 /* ISRC1_FSL - [14:11] */
+#define ARIZONA_ISRC1_FSL_WIDTH 4 /* ISRC1_FSL - [14:11] */
+
+/*
+ * R3826 (0xEF2) - ISRC 1 CTRL 3
+ */
+#define ARIZONA_ISRC1_INT0_ENA 0x8000 /* ISRC1_INT0_ENA */
+#define ARIZONA_ISRC1_INT0_ENA_MASK 0x8000 /* ISRC1_INT0_ENA */
+#define ARIZONA_ISRC1_INT0_ENA_SHIFT 15 /* ISRC1_INT0_ENA */
+#define ARIZONA_ISRC1_INT0_ENA_WIDTH 1 /* ISRC1_INT0_ENA */
+#define ARIZONA_ISRC1_INT1_ENA 0x4000 /* ISRC1_INT1_ENA */
+#define ARIZONA_ISRC1_INT1_ENA_MASK 0x4000 /* ISRC1_INT1_ENA */
+#define ARIZONA_ISRC1_INT1_ENA_SHIFT 14 /* ISRC1_INT1_ENA */
+#define ARIZONA_ISRC1_INT1_ENA_WIDTH 1 /* ISRC1_INT1_ENA */
+#define ARIZONA_ISRC1_INT2_ENA 0x2000 /* ISRC1_INT2_ENA */
+#define ARIZONA_ISRC1_INT2_ENA_MASK 0x2000 /* ISRC1_INT2_ENA */
+#define ARIZONA_ISRC1_INT2_ENA_SHIFT 13 /* ISRC1_INT2_ENA */
+#define ARIZONA_ISRC1_INT2_ENA_WIDTH 1 /* ISRC1_INT2_ENA */
+#define ARIZONA_ISRC1_INT3_ENA 0x1000 /* ISRC1_INT3_ENA */
+#define ARIZONA_ISRC1_INT3_ENA_MASK 0x1000 /* ISRC1_INT3_ENA */
+#define ARIZONA_ISRC1_INT3_ENA_SHIFT 12 /* ISRC1_INT3_ENA */
+#define ARIZONA_ISRC1_INT3_ENA_WIDTH 1 /* ISRC1_INT3_ENA */
+#define ARIZONA_ISRC1_DEC0_ENA 0x0200 /* ISRC1_DEC0_ENA */
+#define ARIZONA_ISRC1_DEC0_ENA_MASK 0x0200 /* ISRC1_DEC0_ENA */
+#define ARIZONA_ISRC1_DEC0_ENA_SHIFT 9 /* ISRC1_DEC0_ENA */
+#define ARIZONA_ISRC1_DEC0_ENA_WIDTH 1 /* ISRC1_DEC0_ENA */
+#define ARIZONA_ISRC1_DEC1_ENA 0x0100 /* ISRC1_DEC1_ENA */
+#define ARIZONA_ISRC1_DEC1_ENA_MASK 0x0100 /* ISRC1_DEC1_ENA */
+#define ARIZONA_ISRC1_DEC1_ENA_SHIFT 8 /* ISRC1_DEC1_ENA */
+#define ARIZONA_ISRC1_DEC1_ENA_WIDTH 1 /* ISRC1_DEC1_ENA */
+#define ARIZONA_ISRC1_DEC2_ENA 0x0080 /* ISRC1_DEC2_ENA */
+#define ARIZONA_ISRC1_DEC2_ENA_MASK 0x0080 /* ISRC1_DEC2_ENA */
+#define ARIZONA_ISRC1_DEC2_ENA_SHIFT 7 /* ISRC1_DEC2_ENA */
+#define ARIZONA_ISRC1_DEC2_ENA_WIDTH 1 /* ISRC1_DEC2_ENA */
+#define ARIZONA_ISRC1_DEC3_ENA 0x0040 /* ISRC1_DEC3_ENA */
+#define ARIZONA_ISRC1_DEC3_ENA_MASK 0x0040 /* ISRC1_DEC3_ENA */
+#define ARIZONA_ISRC1_DEC3_ENA_SHIFT 6 /* ISRC1_DEC3_ENA */
+#define ARIZONA_ISRC1_DEC3_ENA_WIDTH 1 /* ISRC1_DEC3_ENA */
+#define ARIZONA_ISRC1_NOTCH_ENA 0x0001 /* ISRC1_NOTCH_ENA */
+#define ARIZONA_ISRC1_NOTCH_ENA_MASK 0x0001 /* ISRC1_NOTCH_ENA */
+#define ARIZONA_ISRC1_NOTCH_ENA_SHIFT 0 /* ISRC1_NOTCH_ENA */
+#define ARIZONA_ISRC1_NOTCH_ENA_WIDTH 1 /* ISRC1_NOTCH_ENA */
+
+/*
+ * R3827 (0xEF3) - ISRC 2 CTRL 1
+ */
+#define ARIZONA_ISRC2_FSH_MASK 0x7800 /* ISRC2_FSH - [14:11] */
+#define ARIZONA_ISRC2_FSH_SHIFT 11 /* ISRC2_FSH - [14:11] */
+#define ARIZONA_ISRC2_FSH_WIDTH 4 /* ISRC2_FSH - [14:11] */
+#define ARIZONA_ISRC2_CLK_SEL_MASK 0x0700 /* ISRC2_CLK_SEL - [10:8] */
+#define ARIZONA_ISRC2_CLK_SEL_SHIFT 8 /* ISRC2_CLK_SEL - [10:8] */
+#define ARIZONA_ISRC2_CLK_SEL_WIDTH 3 /* ISRC2_CLK_SEL - [10:8] */
+
+/*
+ * R3828 (0xEF4) - ISRC 2 CTRL 2
+ */
+#define ARIZONA_ISRC2_FSL_MASK 0x7800 /* ISRC2_FSL - [14:11] */
+#define ARIZONA_ISRC2_FSL_SHIFT 11 /* ISRC2_FSL - [14:11] */
+#define ARIZONA_ISRC2_FSL_WIDTH 4 /* ISRC2_FSL - [14:11] */
+
+/*
+ * R3829 (0xEF5) - ISRC 2 CTRL 3
+ */
+#define ARIZONA_ISRC2_INT0_ENA 0x8000 /* ISRC2_INT0_ENA */
+#define ARIZONA_ISRC2_INT0_ENA_MASK 0x8000 /* ISRC2_INT0_ENA */
+#define ARIZONA_ISRC2_INT0_ENA_SHIFT 15 /* ISRC2_INT0_ENA */
+#define ARIZONA_ISRC2_INT0_ENA_WIDTH 1 /* ISRC2_INT0_ENA */
+#define ARIZONA_ISRC2_INT1_ENA 0x4000 /* ISRC2_INT1_ENA */
+#define ARIZONA_ISRC2_INT1_ENA_MASK 0x4000 /* ISRC2_INT1_ENA */
+#define ARIZONA_ISRC2_INT1_ENA_SHIFT 14 /* ISRC2_INT1_ENA */
+#define ARIZONA_ISRC2_INT1_ENA_WIDTH 1 /* ISRC2_INT1_ENA */
+#define ARIZONA_ISRC2_INT2_ENA 0x2000 /* ISRC2_INT2_ENA */
+#define ARIZONA_ISRC2_INT2_ENA_MASK 0x2000 /* ISRC2_INT2_ENA */
+#define ARIZONA_ISRC2_INT2_ENA_SHIFT 13 /* ISRC2_INT2_ENA */
+#define ARIZONA_ISRC2_INT2_ENA_WIDTH 1 /* ISRC2_INT2_ENA */
+#define ARIZONA_ISRC2_INT3_ENA 0x1000 /* ISRC2_INT3_ENA */
+#define ARIZONA_ISRC2_INT3_ENA_MASK 0x1000 /* ISRC2_INT3_ENA */
+#define ARIZONA_ISRC2_INT3_ENA_SHIFT 12 /* ISRC2_INT3_ENA */
+#define ARIZONA_ISRC2_INT3_ENA_WIDTH 1 /* ISRC2_INT3_ENA */
+#define ARIZONA_ISRC2_DEC0_ENA 0x0200 /* ISRC2_DEC0_ENA */
+#define ARIZONA_ISRC2_DEC0_ENA_MASK 0x0200 /* ISRC2_DEC0_ENA */
+#define ARIZONA_ISRC2_DEC0_ENA_SHIFT 9 /* ISRC2_DEC0_ENA */
+#define ARIZONA_ISRC2_DEC0_ENA_WIDTH 1 /* ISRC2_DEC0_ENA */
+#define ARIZONA_ISRC2_DEC1_ENA 0x0100 /* ISRC2_DEC1_ENA */
+#define ARIZONA_ISRC2_DEC1_ENA_MASK 0x0100 /* ISRC2_DEC1_ENA */
+#define ARIZONA_ISRC2_DEC1_ENA_SHIFT 8 /* ISRC2_DEC1_ENA */
+#define ARIZONA_ISRC2_DEC1_ENA_WIDTH 1 /* ISRC2_DEC1_ENA */
+#define ARIZONA_ISRC2_DEC2_ENA 0x0080 /* ISRC2_DEC2_ENA */
+#define ARIZONA_ISRC2_DEC2_ENA_MASK 0x0080 /* ISRC2_DEC2_ENA */
+#define ARIZONA_ISRC2_DEC2_ENA_SHIFT 7 /* ISRC2_DEC2_ENA */
+#define ARIZONA_ISRC2_DEC2_ENA_WIDTH 1 /* ISRC2_DEC2_ENA */
+#define ARIZONA_ISRC2_DEC3_ENA 0x0040 /* ISRC2_DEC3_ENA */
+#define ARIZONA_ISRC2_DEC3_ENA_MASK 0x0040 /* ISRC2_DEC3_ENA */
+#define ARIZONA_ISRC2_DEC3_ENA_SHIFT 6 /* ISRC2_DEC3_ENA */
+#define ARIZONA_ISRC2_DEC3_ENA_WIDTH 1 /* ISRC2_DEC3_ENA */
+#define ARIZONA_ISRC2_NOTCH_ENA 0x0001 /* ISRC2_NOTCH_ENA */
+#define ARIZONA_ISRC2_NOTCH_ENA_MASK 0x0001 /* ISRC2_NOTCH_ENA */
+#define ARIZONA_ISRC2_NOTCH_ENA_SHIFT 0 /* ISRC2_NOTCH_ENA */
+#define ARIZONA_ISRC2_NOTCH_ENA_WIDTH 1 /* ISRC2_NOTCH_ENA */
+
+/*
+ * R3830 (0xEF6) - ISRC 3 CTRL 1
+ */
+#define ARIZONA_ISRC3_FSH_MASK 0x7800 /* ISRC3_FSH - [14:11] */
+#define ARIZONA_ISRC3_FSH_SHIFT 11 /* ISRC3_FSH - [14:11] */
+#define ARIZONA_ISRC3_FSH_WIDTH 4 /* ISRC3_FSH - [14:11] */
+#define ARIZONA_ISRC3_CLK_SEL_MASK 0x0700 /* ISRC3_CLK_SEL - [10:8] */
+#define ARIZONA_ISRC3_CLK_SEL_SHIFT 8 /* ISRC3_CLK_SEL - [10:8] */
+#define ARIZONA_ISRC3_CLK_SEL_WIDTH 3 /* ISRC3_CLK_SEL - [10:8] */
+
+/*
+ * R3831 (0xEF7) - ISRC 3 CTRL 2
+ */
+#define ARIZONA_ISRC3_FSL_MASK 0x7800 /* ISRC3_FSL - [14:11] */
+#define ARIZONA_ISRC3_FSL_SHIFT 11 /* ISRC3_FSL - [14:11] */
+#define ARIZONA_ISRC3_FSL_WIDTH 4 /* ISRC3_FSL - [14:11] */
+
+/*
+ * R3832 (0xEF8) - ISRC 3 CTRL 3
+ */
+#define ARIZONA_ISRC3_INT0_ENA 0x8000 /* ISRC3_INT0_ENA */
+#define ARIZONA_ISRC3_INT0_ENA_MASK 0x8000 /* ISRC3_INT0_ENA */
+#define ARIZONA_ISRC3_INT0_ENA_SHIFT 15 /* ISRC3_INT0_ENA */
+#define ARIZONA_ISRC3_INT0_ENA_WIDTH 1 /* ISRC3_INT0_ENA */
+#define ARIZONA_ISRC3_INT1_ENA 0x4000 /* ISRC3_INT1_ENA */
+#define ARIZONA_ISRC3_INT1_ENA_MASK 0x4000 /* ISRC3_INT1_ENA */
+#define ARIZONA_ISRC3_INT1_ENA_SHIFT 14 /* ISRC3_INT1_ENA */
+#define ARIZONA_ISRC3_INT1_ENA_WIDTH 1 /* ISRC3_INT1_ENA */
+#define ARIZONA_ISRC3_INT2_ENA 0x2000 /* ISRC3_INT2_ENA */
+#define ARIZONA_ISRC3_INT2_ENA_MASK 0x2000 /* ISRC3_INT2_ENA */
+#define ARIZONA_ISRC3_INT2_ENA_SHIFT 13 /* ISRC3_INT2_ENA */
+#define ARIZONA_ISRC3_INT2_ENA_WIDTH 1 /* ISRC3_INT2_ENA */
+#define ARIZONA_ISRC3_INT3_ENA 0x1000 /* ISRC3_INT3_ENA */
+#define ARIZONA_ISRC3_INT3_ENA_MASK 0x1000 /* ISRC3_INT3_ENA */
+#define ARIZONA_ISRC3_INT3_ENA_SHIFT 12 /* ISRC3_INT3_ENA */
+#define ARIZONA_ISRC3_INT3_ENA_WIDTH 1 /* ISRC3_INT3_ENA */
+#define ARIZONA_ISRC3_DEC0_ENA 0x0200 /* ISRC3_DEC0_ENA */
+#define ARIZONA_ISRC3_DEC0_ENA_MASK 0x0200 /* ISRC3_DEC0_ENA */
+#define ARIZONA_ISRC3_DEC0_ENA_SHIFT 9 /* ISRC3_DEC0_ENA */
+#define ARIZONA_ISRC3_DEC0_ENA_WIDTH 1 /* ISRC3_DEC0_ENA */
+#define ARIZONA_ISRC3_DEC1_ENA 0x0100 /* ISRC3_DEC1_ENA */
+#define ARIZONA_ISRC3_DEC1_ENA_MASK 0x0100 /* ISRC3_DEC1_ENA */
+#define ARIZONA_ISRC3_DEC1_ENA_SHIFT 8 /* ISRC3_DEC1_ENA */
+#define ARIZONA_ISRC3_DEC1_ENA_WIDTH 1 /* ISRC3_DEC1_ENA */
+#define ARIZONA_ISRC3_DEC2_ENA 0x0080 /* ISRC3_DEC2_ENA */
+#define ARIZONA_ISRC3_DEC2_ENA_MASK 0x0080 /* ISRC3_DEC2_ENA */
+#define ARIZONA_ISRC3_DEC2_ENA_SHIFT 7 /* ISRC3_DEC2_ENA */
+#define ARIZONA_ISRC3_DEC2_ENA_WIDTH 1 /* ISRC3_DEC2_ENA */
+#define ARIZONA_ISRC3_DEC3_ENA 0x0040 /* ISRC3_DEC3_ENA */
+#define ARIZONA_ISRC3_DEC3_ENA_MASK 0x0040 /* ISRC3_DEC3_ENA */
+#define ARIZONA_ISRC3_DEC3_ENA_SHIFT 6 /* ISRC3_DEC3_ENA */
+#define ARIZONA_ISRC3_DEC3_ENA_WIDTH 1 /* ISRC3_DEC3_ENA */
+#define ARIZONA_ISRC3_NOTCH_ENA 0x0001 /* ISRC3_NOTCH_ENA */
+#define ARIZONA_ISRC3_NOTCH_ENA_MASK 0x0001 /* ISRC3_NOTCH_ENA */
+#define ARIZONA_ISRC3_NOTCH_ENA_SHIFT 0 /* ISRC3_NOTCH_ENA */
+#define ARIZONA_ISRC3_NOTCH_ENA_WIDTH 1 /* ISRC3_NOTCH_ENA */
+
+/*
+ * R4352 (0x1100) - DSP1 Control 1
+ */
+#define ARIZONA_DSP1_RATE_MASK 0x7800 /* DSP1_RATE - [14:11] */
+#define ARIZONA_DSP1_RATE_SHIFT 11 /* DSP1_RATE - [14:11] */
+#define ARIZONA_DSP1_RATE_WIDTH 4 /* DSP1_RATE - [14:11] */
+#define ARIZONA_DSP1_MEM_ENA 0x0010 /* DSP1_MEM_ENA */
+#define ARIZONA_DSP1_MEM_ENA_MASK 0x0010 /* DSP1_MEM_ENA */
+#define ARIZONA_DSP1_MEM_ENA_SHIFT 4 /* DSP1_MEM_ENA */
+#define ARIZONA_DSP1_MEM_ENA_WIDTH 1 /* DSP1_MEM_ENA */
+#define ARIZONA_DSP1_SYS_ENA 0x0004 /* DSP1_SYS_ENA */
+#define ARIZONA_DSP1_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */
+#define ARIZONA_DSP1_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */
+#define ARIZONA_DSP1_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */
+#define ARIZONA_DSP1_CORE_ENA 0x0002 /* DSP1_CORE_ENA */
+#define ARIZONA_DSP1_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */
+#define ARIZONA_DSP1_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */
+#define ARIZONA_DSP1_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */
+#define ARIZONA_DSP1_START 0x0001 /* DSP1_START */
+#define ARIZONA_DSP1_START_MASK 0x0001 /* DSP1_START */
+#define ARIZONA_DSP1_START_SHIFT 0 /* DSP1_START */
+#define ARIZONA_DSP1_START_WIDTH 1 /* DSP1_START */
+
+/*
+ * R4353 (0x1101) - DSP1 Clocking 1
+ */
+#define ARIZONA_DSP1_CLK_SEL_MASK 0x0007 /* DSP1_CLK_SEL - [2:0] */
+#define ARIZONA_DSP1_CLK_SEL_SHIFT 0 /* DSP1_CLK_SEL - [2:0] */
+#define ARIZONA_DSP1_CLK_SEL_WIDTH 3 /* DSP1_CLK_SEL - [2:0] */
+
+/*
+ * R4356 (0x1104) - DSP1 Status 1
+ */
+#define ARIZONA_DSP1_RAM_RDY 0x0001 /* DSP1_RAM_RDY */
+#define ARIZONA_DSP1_RAM_RDY_MASK 0x0001 /* DSP1_RAM_RDY */
+#define ARIZONA_DSP1_RAM_RDY_SHIFT 0 /* DSP1_RAM_RDY */
+#define ARIZONA_DSP1_RAM_RDY_WIDTH 1 /* DSP1_RAM_RDY */
+
+/*
+ * R4357 (0x1105) - DSP1 Status 2
+ */
+#define ARIZONA_DSP1_PING_FULL 0x8000 /* DSP1_PING_FULL */
+#define ARIZONA_DSP1_PING_FULL_MASK 0x8000 /* DSP1_PING_FULL */
+#define ARIZONA_DSP1_PING_FULL_SHIFT 15 /* DSP1_PING_FULL */
+#define ARIZONA_DSP1_PING_FULL_WIDTH 1 /* DSP1_PING_FULL */
+#define ARIZONA_DSP1_PONG_FULL 0x4000 /* DSP1_PONG_FULL */
+#define ARIZONA_DSP1_PONG_FULL_MASK 0x4000 /* DSP1_PONG_FULL */
+#define ARIZONA_DSP1_PONG_FULL_SHIFT 14 /* DSP1_PONG_FULL */
+#define ARIZONA_DSP1_PONG_FULL_WIDTH 1 /* DSP1_PONG_FULL */
+#define ARIZONA_DSP1_WDMA_ACTIVE_CHANNELS_MASK 0x00FF /* DSP1_WDMA_ACTIVE_CHANNELS - [7:0] */
+#define ARIZONA_DSP1_WDMA_ACTIVE_CHANNELS_SHIFT 0 /* DSP1_WDMA_ACTIVE_CHANNELS - [7:0] */
+#define ARIZONA_DSP1_WDMA_ACTIVE_CHANNELS_WIDTH 8 /* DSP1_WDMA_ACTIVE_CHANNELS - [7:0] */
+
+#endif
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index 4e76163dd862..3a8435a8058f 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -36,6 +36,11 @@ struct mfd_cell {
/* platform data passed to the sub devices drivers */
void *platform_data;
size_t pdata_size;
+ /*
+ * Device Tree compatible string
+ * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details
+ */
+ const char *of_compatible;
/*
* These resources can be specified relative to the parent device.
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index b3a43b1263fe..b82f6ee66a0b 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -530,7 +530,7 @@ int db8500_prcmu_stop_temp_sense(void);
int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
-void prcmu_ac_wake_req(void);
+int prcmu_ac_wake_req(void);
void prcmu_ac_sleep_req(void);
void db8500_prcmu_modem_reset(void);
@@ -680,7 +680,10 @@ static inline int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
return -ENOSYS;
}
-static inline void prcmu_ac_wake_req(void) {}
+static inline int prcmu_ac_wake_req(void)
+{
+ return 0;
+}
static inline void prcmu_ac_sleep_req(void) {}
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index 5a13f93d8f1c..5b90e94399e1 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -345,7 +345,7 @@ static inline u16 prcmu_get_reset_code(void)
return db8500_prcmu_get_reset_code();
}
-void prcmu_ac_wake_req(void);
+int prcmu_ac_wake_req(void);
void prcmu_ac_sleep_req(void);
static inline void prcmu_modem_reset(void)
{
@@ -533,7 +533,10 @@ static inline u16 prcmu_get_reset_code(void)
return 0;
}
-static inline void prcmu_ac_wake_req(void) {}
+static inline int prcmu_ac_wake_req(void)
+{
+ return 0;
+}
static inline void prcmu_ac_sleep_req(void) {}
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index 40c372165f3e..32a1b5cfeba1 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -16,6 +16,7 @@ struct pcap_subdev {
struct pcap_platform_data {
unsigned int irq_base;
unsigned int config;
+ int gpio;
void (*init) (void *); /* board specific init */
int num_subdevs;
struct pcap_subdev *subdevs;
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
new file mode 100644
index 000000000000..d327d4971e4f
--- /dev/null
+++ b/include/linux/mfd/max77686-private.h
@@ -0,0 +1,246 @@
+/*
+ * max77686.h - Voltage regulator driver for the Maxim 77686
+ *
+ * Copyright (C) 2012 Samsung Electrnoics
+ * Chiwoong Byun <woong.byun@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_MFD_MAX77686_PRIV_H
+#define __LINUX_MFD_MAX77686_PRIV_H
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+
+#define MAX77686_REG_INVALID (0xff)
+
+enum max77686_pmic_reg {
+ MAX77686_REG_DEVICE_ID = 0x00,
+ MAX77686_REG_INTSRC = 0x01,
+ MAX77686_REG_INT1 = 0x02,
+ MAX77686_REG_INT2 = 0x03,
+
+ MAX77686_REG_INT1MSK = 0x04,
+ MAX77686_REG_INT2MSK = 0x05,
+
+ MAX77686_REG_STATUS1 = 0x06,
+ MAX77686_REG_STATUS2 = 0x07,
+
+ MAX77686_REG_PWRON = 0x08,
+ MAX77686_REG_ONOFF_DELAY = 0x09,
+ MAX77686_REG_MRSTB = 0x0A,
+ /* Reserved: 0x0B-0x0F */
+
+ MAX77686_REG_BUCK1CTRL = 0x10,
+ MAX77686_REG_BUCK1OUT = 0x11,
+ MAX77686_REG_BUCK2CTRL1 = 0x12,
+ MAX77686_REG_BUCK234FREQ = 0x13,
+ MAX77686_REG_BUCK2DVS1 = 0x14,
+ MAX77686_REG_BUCK2DVS2 = 0x15,
+ MAX77686_REG_BUCK2DVS3 = 0x16,
+ MAX77686_REG_BUCK2DVS4 = 0x17,
+ MAX77686_REG_BUCK2DVS5 = 0x18,
+ MAX77686_REG_BUCK2DVS6 = 0x19,
+ MAX77686_REG_BUCK2DVS7 = 0x1A,
+ MAX77686_REG_BUCK2DVS8 = 0x1B,
+ MAX77686_REG_BUCK3CTRL1 = 0x1C,
+ /* Reserved: 0x1D */
+ MAX77686_REG_BUCK3DVS1 = 0x1E,
+ MAX77686_REG_BUCK3DVS2 = 0x1F,
+ MAX77686_REG_BUCK3DVS3 = 0x20,
+ MAX77686_REG_BUCK3DVS4 = 0x21,
+ MAX77686_REG_BUCK3DVS5 = 0x22,
+ MAX77686_REG_BUCK3DVS6 = 0x23,
+ MAX77686_REG_BUCK3DVS7 = 0x24,
+ MAX77686_REG_BUCK3DVS8 = 0x25,
+ MAX77686_REG_BUCK4CTRL1 = 0x26,
+ /* Reserved: 0x27 */
+ MAX77686_REG_BUCK4DVS1 = 0x28,
+ MAX77686_REG_BUCK4DVS2 = 0x29,
+ MAX77686_REG_BUCK4DVS3 = 0x2A,
+ MAX77686_REG_BUCK4DVS4 = 0x2B,
+ MAX77686_REG_BUCK4DVS5 = 0x2C,
+ MAX77686_REG_BUCK4DVS6 = 0x2D,
+ MAX77686_REG_BUCK4DVS7 = 0x2E,
+ MAX77686_REG_BUCK4DVS8 = 0x2F,
+ MAX77686_REG_BUCK5CTRL = 0x30,
+ MAX77686_REG_BUCK5OUT = 0x31,
+ MAX77686_REG_BUCK6CTRL = 0x32,
+ MAX77686_REG_BUCK6OUT = 0x33,
+ MAX77686_REG_BUCK7CTRL = 0x34,
+ MAX77686_REG_BUCK7OUT = 0x35,
+ MAX77686_REG_BUCK8CTRL = 0x36,
+ MAX77686_REG_BUCK8OUT = 0x37,
+ MAX77686_REG_BUCK9CTRL = 0x38,
+ MAX77686_REG_BUCK9OUT = 0x39,
+ /* Reserved: 0x3A-0x3F */
+
+ MAX77686_REG_LDO1CTRL1 = 0x40,
+ MAX77686_REG_LDO2CTRL1 = 0x41,
+ MAX77686_REG_LDO3CTRL1 = 0x42,
+ MAX77686_REG_LDO4CTRL1 = 0x43,
+ MAX77686_REG_LDO5CTRL1 = 0x44,
+ MAX77686_REG_LDO6CTRL1 = 0x45,
+ MAX77686_REG_LDO7CTRL1 = 0x46,
+ MAX77686_REG_LDO8CTRL1 = 0x47,
+ MAX77686_REG_LDO9CTRL1 = 0x48,
+ MAX77686_REG_LDO10CTRL1 = 0x49,
+ MAX77686_REG_LDO11CTRL1 = 0x4A,
+ MAX77686_REG_LDO12CTRL1 = 0x4B,
+ MAX77686_REG_LDO13CTRL1 = 0x4C,
+ MAX77686_REG_LDO14CTRL1 = 0x4D,
+ MAX77686_REG_LDO15CTRL1 = 0x4E,
+ MAX77686_REG_LDO16CTRL1 = 0x4F,
+ MAX77686_REG_LDO17CTRL1 = 0x50,
+ MAX77686_REG_LDO18CTRL1 = 0x51,
+ MAX77686_REG_LDO19CTRL1 = 0x52,
+ MAX77686_REG_LDO20CTRL1 = 0x53,
+ MAX77686_REG_LDO21CTRL1 = 0x54,
+ MAX77686_REG_LDO22CTRL1 = 0x55,
+ MAX77686_REG_LDO23CTRL1 = 0x56,
+ MAX77686_REG_LDO24CTRL1 = 0x57,
+ MAX77686_REG_LDO25CTRL1 = 0x58,
+ MAX77686_REG_LDO26CTRL1 = 0x59,
+ /* Reserved: 0x5A-0x5F */
+ MAX77686_REG_LDO1CTRL2 = 0x60,
+ MAX77686_REG_LDO2CTRL2 = 0x61,
+ MAX77686_REG_LDO3CTRL2 = 0x62,
+ MAX77686_REG_LDO4CTRL2 = 0x63,
+ MAX77686_REG_LDO5CTRL2 = 0x64,
+ MAX77686_REG_LDO6CTRL2 = 0x65,
+ MAX77686_REG_LDO7CTRL2 = 0x66,
+ MAX77686_REG_LDO8CTRL2 = 0x67,
+ MAX77686_REG_LDO9CTRL2 = 0x68,
+ MAX77686_REG_LDO10CTRL2 = 0x69,
+ MAX77686_REG_LDO11CTRL2 = 0x6A,
+ MAX77686_REG_LDO12CTRL2 = 0x6B,
+ MAX77686_REG_LDO13CTRL2 = 0x6C,
+ MAX77686_REG_LDO14CTRL2 = 0x6D,
+ MAX77686_REG_LDO15CTRL2 = 0x6E,
+ MAX77686_REG_LDO16CTRL2 = 0x6F,
+ MAX77686_REG_LDO17CTRL2 = 0x70,
+ MAX77686_REG_LDO18CTRL2 = 0x71,
+ MAX77686_REG_LDO19CTRL2 = 0x72,
+ MAX77686_REG_LDO20CTRL2 = 0x73,
+ MAX77686_REG_LDO21CTRL2 = 0x74,
+ MAX77686_REG_LDO22CTRL2 = 0x75,
+ MAX77686_REG_LDO23CTRL2 = 0x76,
+ MAX77686_REG_LDO24CTRL2 = 0x77,
+ MAX77686_REG_LDO25CTRL2 = 0x78,
+ MAX77686_REG_LDO26CTRL2 = 0x79,
+ /* Reserved: 0x7A-0x7D */
+
+ MAX77686_REG_BBAT_CHG = 0x7E,
+ MAX77686_REG_32KHZ = 0x7F,
+
+ MAX77686_REG_PMIC_END = 0x80,
+};
+
+enum max77686_rtc_reg {
+ MAX77686_RTC_INT = 0x00,
+ MAX77686_RTC_INTM = 0x01,
+ MAX77686_RTC_CONTROLM = 0x02,
+ MAX77686_RTC_CONTROL = 0x03,
+ MAX77686_RTC_UPDATE0 = 0x04,
+ /* Reserved: 0x5 */
+ MAX77686_WTSR_SMPL_CNTL = 0x06,
+ MAX77686_RTC_SEC = 0x07,
+ MAX77686_RTC_MIN = 0x08,
+ MAX77686_RTC_HOUR = 0x09,
+ MAX77686_RTC_WEEKDAY = 0x0A,
+ MAX77686_RTC_MONTH = 0x0B,
+ MAX77686_RTC_YEAR = 0x0C,
+ MAX77686_RTC_DATE = 0x0D,
+ MAX77686_ALARM1_SEC = 0x0E,
+ MAX77686_ALARM1_MIN = 0x0F,
+ MAX77686_ALARM1_HOUR = 0x10,
+ MAX77686_ALARM1_WEEKDAY = 0x11,
+ MAX77686_ALARM1_MONTH = 0x12,
+ MAX77686_ALARM1_YEAR = 0x13,
+ MAX77686_ALARM1_DATE = 0x14,
+ MAX77686_ALARM2_SEC = 0x15,
+ MAX77686_ALARM2_MIN = 0x16,
+ MAX77686_ALARM2_HOUR = 0x17,
+ MAX77686_ALARM2_WEEKDAY = 0x18,
+ MAX77686_ALARM2_MONTH = 0x19,
+ MAX77686_ALARM2_YEAR = 0x1A,
+ MAX77686_ALARM2_DATE = 0x1B,
+};
+
+#define MAX77686_IRQSRC_PMIC (0)
+#define MAX77686_IRQSRC_RTC (1 << 0)
+
+enum max77686_irq_source {
+ PMIC_INT1 = 0,
+ PMIC_INT2,
+ RTC_INT,
+
+ MAX77686_IRQ_GROUP_NR,
+};
+
+enum max77686_irq {
+ MAX77686_PMICIRQ_PWRONF,
+ MAX77686_PMICIRQ_PWRONR,
+ MAX77686_PMICIRQ_JIGONBF,
+ MAX77686_PMICIRQ_JIGONBR,
+ MAX77686_PMICIRQ_ACOKBF,
+ MAX77686_PMICIRQ_ACOKBR,
+ MAX77686_PMICIRQ_ONKEY1S,
+ MAX77686_PMICIRQ_MRSTB,
+
+ MAX77686_PMICIRQ_140C,
+ MAX77686_PMICIRQ_120C,
+
+ MAX77686_RTCIRQ_RTC60S,
+ MAX77686_RTCIRQ_RTCA1,
+ MAX77686_RTCIRQ_RTCA2,
+ MAX77686_RTCIRQ_SMPL,
+ MAX77686_RTCIRQ_RTC1S,
+ MAX77686_RTCIRQ_WTSR,
+
+ MAX77686_IRQ_NR,
+};
+
+struct max77686_dev {
+ struct device *dev;
+ struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */
+ struct i2c_client *rtc; /* slave addr 0x0c */
+
+ int type;
+
+ struct regmap *regmap; /* regmap for mfd */
+ struct regmap *rtc_regmap; /* regmap for rtc */
+
+ struct irq_domain *irq_domain;
+
+ int irq;
+ int irq_gpio;
+ bool wakeup;
+ struct mutex irqlock;
+ int irq_masks_cur[MAX77686_IRQ_GROUP_NR];
+ int irq_masks_cache[MAX77686_IRQ_GROUP_NR];
+};
+
+enum max77686_types {
+ TYPE_MAX77686,
+};
+
+extern int max77686_irq_init(struct max77686_dev *max77686);
+extern void max77686_irq_exit(struct max77686_dev *max77686);
+extern int max77686_irq_resume(struct max77686_dev *max77686);
+
+#endif /* __LINUX_MFD_MAX77686_PRIV_H */
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
new file mode 100644
index 000000000000..3d7ae4d7fd36
--- /dev/null
+++ b/include/linux/mfd/max77686.h
@@ -0,0 +1,114 @@
+/*
+ * max77686.h - Driver for the Maxim 77686
+ *
+ * Copyright (C) 2012 Samsung Electrnoics
+ * Chiwoong Byun <woong.byun@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997.h
+ *
+ * MAX77686 has PMIC, RTC devices.
+ * The devices share the same I2C bus and included in
+ * this mfd driver.
+ */
+
+#ifndef __LINUX_MFD_MAX77686_H
+#define __LINUX_MFD_MAX77686_H
+
+#include <linux/regulator/consumer.h>
+
+/* MAX77686 regulator IDs */
+enum max77686_regulators {
+ MAX77686_LDO1 = 0,
+ MAX77686_LDO2,
+ MAX77686_LDO3,
+ MAX77686_LDO4,
+ MAX77686_LDO5,
+ MAX77686_LDO6,
+ MAX77686_LDO7,
+ MAX77686_LDO8,
+ MAX77686_LDO9,
+ MAX77686_LDO10,
+ MAX77686_LDO11,
+ MAX77686_LDO12,
+ MAX77686_LDO13,
+ MAX77686_LDO14,
+ MAX77686_LDO15,
+ MAX77686_LDO16,
+ MAX77686_LDO17,
+ MAX77686_LDO18,
+ MAX77686_LDO19,
+ MAX77686_LDO20,
+ MAX77686_LDO21,
+ MAX77686_LDO22,
+ MAX77686_LDO23,
+ MAX77686_LDO24,
+ MAX77686_LDO25,
+ MAX77686_LDO26,
+ MAX77686_BUCK1,
+ MAX77686_BUCK2,
+ MAX77686_BUCK3,
+ MAX77686_BUCK4,
+ MAX77686_BUCK5,
+ MAX77686_BUCK6,
+ MAX77686_BUCK7,
+ MAX77686_BUCK8,
+ MAX77686_BUCK9,
+
+ MAX77686_REG_MAX,
+};
+
+struct max77686_regulator_data {
+ int id;
+ struct regulator_init_data *initdata;
+};
+
+enum max77686_opmode {
+ MAX77686_OPMODE_NORMAL,
+ MAX77686_OPMODE_LP,
+ MAX77686_OPMODE_STANDBY,
+};
+
+struct max77686_opmode_data {
+ int id;
+ int mode;
+};
+
+struct max77686_platform_data {
+ /* IRQ */
+ int irq_gpio;
+ int ono;
+ int wakeup;
+
+ /* ---- PMIC ---- */
+ struct max77686_regulator_data *regulators;
+ int num_regulators;
+
+ struct max77686_opmode_data *opmode_data;
+
+ /*
+ * GPIO-DVS feature is not enabled with the current version of
+ * MAX77686 driver. Buck2/3/4_voltages[0] is used as the default
+ * voltage at probe. DVS/SELB gpios are set as OUTPUT-LOW.
+ */
+ int buck234_gpio_dvs[3]; /* GPIO of [0]DVS1, [1]DVS2, [2]DVS3 */
+ int buck234_gpio_selb[3]; /* [0]SELB2, [1]SELB3, [2]SELB4 */
+ unsigned int buck2_voltage[8]; /* buckx_voltage in uV */
+ unsigned int buck3_voltage[8];
+ unsigned int buck4_voltage[8];
+};
+
+#endif /* __LINUX_MFD_MAX77686_H */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 68263c5fa53c..1eeae5c07915 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -190,7 +190,6 @@ struct max77693_dev {
struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
struct i2c_client *muic; /* 0x4A , MUIC */
struct i2c_client *haptic; /* 0x90 , Haptic */
- struct mutex iolock;
int type;
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 3f4deb62d6b0..830152cfae33 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -23,6 +23,8 @@
#define __LINUX_MFD_MAX8997_PRIV_H
#include <linux/i2c.h>
+#include <linux/export.h>
+#include <linux/irqdomain.h>
#define MAX8997_REG_INVALID (0xff)
@@ -325,7 +327,7 @@ struct max8997_dev {
int irq;
int ono;
- int irq_base;
+ struct irq_domain *irq_domain;
struct mutex irqlock;
int irq_masks_cur[MAX8997_IRQ_GROUP_NR];
int irq_masks_cache[MAX8997_IRQ_GROUP_NR];
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index b40c08cd30bc..328d8e24b533 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -181,7 +181,6 @@ struct max8997_led_platform_data {
struct max8997_platform_data {
/* IRQ */
- int irq_base;
int ono;
int wakeup;
diff --git a/include/linux/mfd/s5m87xx/s5m-core.h b/include/linux/mfd/s5m87xx/s5m-core.h
deleted file mode 100644
index 0b2e0ed309f5..000000000000
--- a/include/linux/mfd/s5m87xx/s5m-core.h
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * s5m-core.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef __LINUX_MFD_S5M_CORE_H
-#define __LINUX_MFD_S5M_CORE_H
-
-#define NUM_IRQ_REGS 4
-
-enum s5m_device_type {
- S5M8751X,
- S5M8763X,
- S5M8767X,
-};
-
-/* S5M8767 registers */
-enum s5m8767_reg {
- S5M8767_REG_ID,
- S5M8767_REG_INT1,
- S5M8767_REG_INT2,
- S5M8767_REG_INT3,
- S5M8767_REG_INT1M,
- S5M8767_REG_INT2M,
- S5M8767_REG_INT3M,
- S5M8767_REG_STATUS1,
- S5M8767_REG_STATUS2,
- S5M8767_REG_STATUS3,
- S5M8767_REG_CTRL1,
- S5M8767_REG_CTRL2,
- S5M8767_REG_LOWBAT1,
- S5M8767_REG_LOWBAT2,
- S5M8767_REG_BUCHG,
- S5M8767_REG_DVSRAMP,
- S5M8767_REG_DVSTIMER2 = 0x10,
- S5M8767_REG_DVSTIMER3,
- S5M8767_REG_DVSTIMER4,
- S5M8767_REG_LDO1,
- S5M8767_REG_LDO2,
- S5M8767_REG_LDO3,
- S5M8767_REG_LDO4,
- S5M8767_REG_LDO5,
- S5M8767_REG_LDO6,
- S5M8767_REG_LDO7,
- S5M8767_REG_LDO8,
- S5M8767_REG_LDO9,
- S5M8767_REG_LDO10,
- S5M8767_REG_LDO11,
- S5M8767_REG_LDO12,
- S5M8767_REG_LDO13,
- S5M8767_REG_LDO14 = 0x20,
- S5M8767_REG_LDO15,
- S5M8767_REG_LDO16,
- S5M8767_REG_LDO17,
- S5M8767_REG_LDO18,
- S5M8767_REG_LDO19,
- S5M8767_REG_LDO20,
- S5M8767_REG_LDO21,
- S5M8767_REG_LDO22,
- S5M8767_REG_LDO23,
- S5M8767_REG_LDO24,
- S5M8767_REG_LDO25,
- S5M8767_REG_LDO26,
- S5M8767_REG_LDO27,
- S5M8767_REG_LDO28,
- S5M8767_REG_UVLO = 0x31,
- S5M8767_REG_BUCK1CTRL1,
- S5M8767_REG_BUCK1CTRL2,
- S5M8767_REG_BUCK2CTRL,
- S5M8767_REG_BUCK2DVS1,
- S5M8767_REG_BUCK2DVS2,
- S5M8767_REG_BUCK2DVS3,
- S5M8767_REG_BUCK2DVS4,
- S5M8767_REG_BUCK2DVS5,
- S5M8767_REG_BUCK2DVS6,
- S5M8767_REG_BUCK2DVS7,
- S5M8767_REG_BUCK2DVS8,
- S5M8767_REG_BUCK3CTRL,
- S5M8767_REG_BUCK3DVS1,
- S5M8767_REG_BUCK3DVS2,
- S5M8767_REG_BUCK3DVS3,
- S5M8767_REG_BUCK3DVS4,
- S5M8767_REG_BUCK3DVS5,
- S5M8767_REG_BUCK3DVS6,
- S5M8767_REG_BUCK3DVS7,
- S5M8767_REG_BUCK3DVS8,
- S5M8767_REG_BUCK4CTRL,
- S5M8767_REG_BUCK4DVS1,
- S5M8767_REG_BUCK4DVS2,
- S5M8767_REG_BUCK4DVS3,
- S5M8767_REG_BUCK4DVS4,
- S5M8767_REG_BUCK4DVS5,
- S5M8767_REG_BUCK4DVS6,
- S5M8767_REG_BUCK4DVS7,
- S5M8767_REG_BUCK4DVS8,
- S5M8767_REG_BUCK5CTRL1,
- S5M8767_REG_BUCK5CTRL2,
- S5M8767_REG_BUCK5CTRL3,
- S5M8767_REG_BUCK5CTRL4,
- S5M8767_REG_BUCK5CTRL5,
- S5M8767_REG_BUCK6CTRL1,
- S5M8767_REG_BUCK6CTRL2,
- S5M8767_REG_BUCK7CTRL1,
- S5M8767_REG_BUCK7CTRL2,
- S5M8767_REG_BUCK8CTRL1,
- S5M8767_REG_BUCK8CTRL2,
- S5M8767_REG_BUCK9CTRL1,
- S5M8767_REG_BUCK9CTRL2,
- S5M8767_REG_LDO1CTRL,
- S5M8767_REG_LDO2_1CTRL,
- S5M8767_REG_LDO2_2CTRL,
- S5M8767_REG_LDO2_3CTRL,
- S5M8767_REG_LDO2_4CTRL,
- S5M8767_REG_LDO3CTRL,
- S5M8767_REG_LDO4CTRL,
- S5M8767_REG_LDO5CTRL,
- S5M8767_REG_LDO6CTRL,
- S5M8767_REG_LDO7CTRL,
- S5M8767_REG_LDO8CTRL,
- S5M8767_REG_LDO9CTRL,
- S5M8767_REG_LDO10CTRL,
- S5M8767_REG_LDO11CTRL,
- S5M8767_REG_LDO12CTRL,
- S5M8767_REG_LDO13CTRL,
- S5M8767_REG_LDO14CTRL,
- S5M8767_REG_LDO15CTRL,
- S5M8767_REG_LDO16CTRL,
- S5M8767_REG_LDO17CTRL,
- S5M8767_REG_LDO18CTRL,
- S5M8767_REG_LDO19CTRL,
- S5M8767_REG_LDO20CTRL,
- S5M8767_REG_LDO21CTRL,
- S5M8767_REG_LDO22CTRL,
- S5M8767_REG_LDO23CTRL,
- S5M8767_REG_LDO24CTRL,
- S5M8767_REG_LDO25CTRL,
- S5M8767_REG_LDO26CTRL,
- S5M8767_REG_LDO27CTRL,
- S5M8767_REG_LDO28CTRL,
-};
-
-/* S5M8763 registers */
-enum s5m8763_reg {
- S5M8763_REG_IRQ1,
- S5M8763_REG_IRQ2,
- S5M8763_REG_IRQ3,
- S5M8763_REG_IRQ4,
- S5M8763_REG_IRQM1,
- S5M8763_REG_IRQM2,
- S5M8763_REG_IRQM3,
- S5M8763_REG_IRQM4,
- S5M8763_REG_STATUS1,
- S5M8763_REG_STATUS2,
- S5M8763_REG_STATUSM1,
- S5M8763_REG_STATUSM2,
- S5M8763_REG_CHGR1,
- S5M8763_REG_CHGR2,
- S5M8763_REG_LDO_ACTIVE_DISCHARGE1,
- S5M8763_REG_LDO_ACTIVE_DISCHARGE2,
- S5M8763_REG_BUCK_ACTIVE_DISCHARGE3,
- S5M8763_REG_ONOFF1,
- S5M8763_REG_ONOFF2,
- S5M8763_REG_ONOFF3,
- S5M8763_REG_ONOFF4,
- S5M8763_REG_BUCK1_VOLTAGE1,
- S5M8763_REG_BUCK1_VOLTAGE2,
- S5M8763_REG_BUCK1_VOLTAGE3,
- S5M8763_REG_BUCK1_VOLTAGE4,
- S5M8763_REG_BUCK2_VOLTAGE1,
- S5M8763_REG_BUCK2_VOLTAGE2,
- S5M8763_REG_BUCK3,
- S5M8763_REG_BUCK4,
- S5M8763_REG_LDO1_LDO2,
- S5M8763_REG_LDO3,
- S5M8763_REG_LDO4,
- S5M8763_REG_LDO5,
- S5M8763_REG_LDO6,
- S5M8763_REG_LDO7,
- S5M8763_REG_LDO7_LDO8,
- S5M8763_REG_LDO9_LDO10,
- S5M8763_REG_LDO11,
- S5M8763_REG_LDO12,
- S5M8763_REG_LDO13,
- S5M8763_REG_LDO14,
- S5M8763_REG_LDO15,
- S5M8763_REG_LDO16,
- S5M8763_REG_BKCHR,
- S5M8763_REG_LBCNFG1,
- S5M8763_REG_LBCNFG2,
-};
-
-enum s5m8767_irq {
- S5M8767_IRQ_PWRR,
- S5M8767_IRQ_PWRF,
- S5M8767_IRQ_PWR1S,
- S5M8767_IRQ_JIGR,
- S5M8767_IRQ_JIGF,
- S5M8767_IRQ_LOWBAT2,
- S5M8767_IRQ_LOWBAT1,
-
- S5M8767_IRQ_MRB,
- S5M8767_IRQ_DVSOK2,
- S5M8767_IRQ_DVSOK3,
- S5M8767_IRQ_DVSOK4,
-
- S5M8767_IRQ_RTC60S,
- S5M8767_IRQ_RTCA1,
- S5M8767_IRQ_RTCA2,
- S5M8767_IRQ_SMPL,
- S5M8767_IRQ_RTC1S,
- S5M8767_IRQ_WTSR,
-
- S5M8767_IRQ_NR,
-};
-
-#define S5M8767_IRQ_PWRR_MASK (1 << 0)
-#define S5M8767_IRQ_PWRF_MASK (1 << 1)
-#define S5M8767_IRQ_PWR1S_MASK (1 << 3)
-#define S5M8767_IRQ_JIGR_MASK (1 << 4)
-#define S5M8767_IRQ_JIGF_MASK (1 << 5)
-#define S5M8767_IRQ_LOWBAT2_MASK (1 << 6)
-#define S5M8767_IRQ_LOWBAT1_MASK (1 << 7)
-
-#define S5M8767_IRQ_MRB_MASK (1 << 2)
-#define S5M8767_IRQ_DVSOK2_MASK (1 << 3)
-#define S5M8767_IRQ_DVSOK3_MASK (1 << 4)
-#define S5M8767_IRQ_DVSOK4_MASK (1 << 5)
-
-#define S5M8767_IRQ_RTC60S_MASK (1 << 0)
-#define S5M8767_IRQ_RTCA1_MASK (1 << 1)
-#define S5M8767_IRQ_RTCA2_MASK (1 << 2)
-#define S5M8767_IRQ_SMPL_MASK (1 << 3)
-#define S5M8767_IRQ_RTC1S_MASK (1 << 4)
-#define S5M8767_IRQ_WTSR_MASK (1 << 5)
-
-enum s5m8763_irq {
- S5M8763_IRQ_DCINF,
- S5M8763_IRQ_DCINR,
- S5M8763_IRQ_JIGF,
- S5M8763_IRQ_JIGR,
- S5M8763_IRQ_PWRONF,
- S5M8763_IRQ_PWRONR,
-
- S5M8763_IRQ_WTSREVNT,
- S5M8763_IRQ_SMPLEVNT,
- S5M8763_IRQ_ALARM1,
- S5M8763_IRQ_ALARM0,
-
- S5M8763_IRQ_ONKEY1S,
- S5M8763_IRQ_TOPOFFR,
- S5M8763_IRQ_DCINOVPR,
- S5M8763_IRQ_CHGRSTF,
- S5M8763_IRQ_DONER,
- S5M8763_IRQ_CHGFAULT,
-
- S5M8763_IRQ_LOBAT1,
- S5M8763_IRQ_LOBAT2,
-
- S5M8763_IRQ_NR,
-};
-
-#define S5M8763_IRQ_DCINF_MASK (1 << 2)
-#define S5M8763_IRQ_DCINR_MASK (1 << 3)
-#define S5M8763_IRQ_JIGF_MASK (1 << 4)
-#define S5M8763_IRQ_JIGR_MASK (1 << 5)
-#define S5M8763_IRQ_PWRONF_MASK (1 << 6)
-#define S5M8763_IRQ_PWRONR_MASK (1 << 7)
-
-#define S5M8763_IRQ_WTSREVNT_MASK (1 << 0)
-#define S5M8763_IRQ_SMPLEVNT_MASK (1 << 1)
-#define S5M8763_IRQ_ALARM1_MASK (1 << 2)
-#define S5M8763_IRQ_ALARM0_MASK (1 << 3)
-
-#define S5M8763_IRQ_ONKEY1S_MASK (1 << 0)
-#define S5M8763_IRQ_TOPOFFR_MASK (1 << 2)
-#define S5M8763_IRQ_DCINOVPR_MASK (1 << 3)
-#define S5M8763_IRQ_CHGRSTF_MASK (1 << 4)
-#define S5M8763_IRQ_DONER_MASK (1 << 5)
-#define S5M8763_IRQ_CHGFAULT_MASK (1 << 7)
-
-#define S5M8763_IRQ_LOBAT1_MASK (1 << 0)
-#define S5M8763_IRQ_LOBAT2_MASK (1 << 1)
-
-#define S5M8763_ENRAMP (1 << 4)
-
-/**
- * struct s5m87xx_dev - s5m87xx master device for sub-drivers
- * @dev: master device of the chip (can be used to access platform data)
- * @i2c: i2c client private data for regulator
- * @rtc: i2c client private data for rtc
- * @iolock: mutex for serializing io access
- * @irqlock: mutex for buslock
- * @irq_base: base IRQ number for s5m87xx, required for IRQs
- * @irq: generic IRQ number for s5m87xx
- * @ono: power onoff IRQ number for s5m87xx
- * @irq_masks_cur: currently active value
- * @irq_masks_cache: cached hardware value
- * @type: indicate which s5m87xx "variant" is used
- */
-struct s5m87xx_dev {
- struct device *dev;
- struct regmap *regmap;
- struct i2c_client *i2c;
- struct i2c_client *rtc;
- struct mutex iolock;
- struct mutex irqlock;
-
- int device_type;
- int irq_base;
- int irq;
- int ono;
- u8 irq_masks_cur[NUM_IRQ_REGS];
- u8 irq_masks_cache[NUM_IRQ_REGS];
- int type;
- bool wakeup;
-};
-
-int s5m_irq_init(struct s5m87xx_dev *s5m87xx);
-void s5m_irq_exit(struct s5m87xx_dev *s5m87xx);
-int s5m_irq_resume(struct s5m87xx_dev *s5m87xx);
-
-extern int s5m_reg_read(struct s5m87xx_dev *s5m87xx, u8 reg, void *dest);
-extern int s5m_bulk_read(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf);
-extern int s5m_reg_write(struct s5m87xx_dev *s5m87xx, u8 reg, u8 value);
-extern int s5m_bulk_write(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf);
-extern int s5m_reg_update(struct s5m87xx_dev *s5m87xx, u8 reg, u8 val, u8 mask);
-
-struct s5m_platform_data {
- struct s5m_regulator_data *regulators;
- struct s5m_opmode_data *opmode;
- int device_type;
- int num_regulators;
-
- int irq_base;
- int (*cfg_pmic_irq)(void);
-
- int ono;
- bool wakeup;
- bool buck_voltage_lock;
-
- int buck_gpios[3];
- int buck_ds[3];
- int buck2_voltage[8];
- bool buck2_gpiodvs;
- int buck3_voltage[8];
- bool buck3_gpiodvs;
- int buck4_voltage[8];
- bool buck4_gpiodvs;
-
- int buck_set1;
- int buck_set2;
- int buck_set3;
- int buck2_enable;
- int buck3_enable;
- int buck4_enable;
- int buck_default_idx;
- int buck2_default_idx;
- int buck3_default_idx;
- int buck4_default_idx;
-
- int buck_ramp_delay;
- bool buck2_ramp_enable;
- bool buck3_ramp_enable;
- bool buck4_ramp_enable;
-
- int buck2_init;
- int buck3_init;
- int buck4_init;
-};
-
-#endif /* __LINUX_MFD_S5M_CORE_H */
diff --git a/include/linux/mfd/s5m87xx/s5m-pmic.h b/include/linux/mfd/s5m87xx/s5m-pmic.h
deleted file mode 100644
index 7c719f20f58a..000000000000
--- a/include/linux/mfd/s5m87xx/s5m-pmic.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* s5m87xx.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __LINUX_MFD_S5M_PMIC_H
-#define __LINUX_MFD_S5M_PMIC_H
-
-#include <linux/regulator/machine.h>
-
-/* S5M8767 regulator ids */
-enum s5m8767_regulators {
- S5M8767_LDO1,
- S5M8767_LDO2,
- S5M8767_LDO3,
- S5M8767_LDO4,
- S5M8767_LDO5,
- S5M8767_LDO6,
- S5M8767_LDO7,
- S5M8767_LDO8,
- S5M8767_LDO9,
- S5M8767_LDO10,
- S5M8767_LDO11,
- S5M8767_LDO12,
- S5M8767_LDO13,
- S5M8767_LDO14,
- S5M8767_LDO15,
- S5M8767_LDO16,
- S5M8767_LDO17,
- S5M8767_LDO18,
- S5M8767_LDO19,
- S5M8767_LDO20,
- S5M8767_LDO21,
- S5M8767_LDO22,
- S5M8767_LDO23,
- S5M8767_LDO24,
- S5M8767_LDO25,
- S5M8767_LDO26,
- S5M8767_LDO27,
- S5M8767_LDO28,
- S5M8767_BUCK1,
- S5M8767_BUCK2,
- S5M8767_BUCK3,
- S5M8767_BUCK4,
- S5M8767_BUCK5,
- S5M8767_BUCK6,
- S5M8767_BUCK7,
- S5M8767_BUCK8,
- S5M8767_BUCK9,
- S5M8767_AP_EN32KHZ,
- S5M8767_CP_EN32KHZ,
-
- S5M8767_REG_MAX,
-};
-
-#define S5M8767_ENCTRL_SHIFT 6
-
-/* S5M8763 regulator ids */
-enum s5m8763_regulators {
- S5M8763_LDO1,
- S5M8763_LDO2,
- S5M8763_LDO3,
- S5M8763_LDO4,
- S5M8763_LDO5,
- S5M8763_LDO6,
- S5M8763_LDO7,
- S5M8763_LDO8,
- S5M8763_LDO9,
- S5M8763_LDO10,
- S5M8763_LDO11,
- S5M8763_LDO12,
- S5M8763_LDO13,
- S5M8763_LDO14,
- S5M8763_LDO15,
- S5M8763_LDO16,
- S5M8763_BUCK1,
- S5M8763_BUCK2,
- S5M8763_BUCK3,
- S5M8763_BUCK4,
- S5M8763_AP_EN32KHZ,
- S5M8763_CP_EN32KHZ,
- S5M8763_ENCHGVI,
- S5M8763_ESAFEUSB1,
- S5M8763_ESAFEUSB2,
-};
-
-/**
- * s5m87xx_regulator_data - regulator data
- * @id: regulator id
- * @initdata: regulator init data (contraints, supplies, ...)
- */
-struct s5m_regulator_data {
- int id;
- struct regulator_init_data *initdata;
-};
-
-/*
- * s5m_opmode_data - regulator operation mode data
- * @id: regulator id
- * @mode: regulator operation mode
- */
-struct s5m_opmode_data {
- int id;
- int mode;
-};
-
-/*
- * s5m regulator operation mode
- * S5M_OPMODE_OFF Regulator always OFF
- * S5M_OPMODE_ON Regulator always ON
- * S5M_OPMODE_LOWPOWER Regulator is on in low-power mode
- * S5M_OPMODE_SUSPEND Regulator is changed by PWREN pin
- * If PWREN is high, regulator is on
- * If PWREN is low, regulator is off
- */
-
-enum s5m_opmode {
- S5M_OPMODE_OFF,
- S5M_OPMODE_ON,
- S5M_OPMODE_LOWPOWER,
- S5M_OPMODE_SUSPEND,
-};
-
-#endif /* __LINUX_MFD_S5M_PMIC_H */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
new file mode 100644
index 000000000000..b50c38f8bc48
--- /dev/null
+++ b/include/linux/mfd/samsung/core.h
@@ -0,0 +1,159 @@
+/*
+ * core.h
+ *
+ * copyright (c) 2011 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_SEC_CORE_H
+#define __LINUX_MFD_SEC_CORE_H
+
+#define NUM_IRQ_REGS 4
+
+enum sec_device_type {
+ S5M8751X,
+ S5M8763X,
+ S5M8767X,
+ S2MPS11X,
+};
+
+/**
+ * struct sec_pmic_dev - s5m87xx master device for sub-drivers
+ * @dev: master device of the chip (can be used to access platform data)
+ * @i2c: i2c client private data for regulator
+ * @rtc: i2c client private data for rtc
+ * @iolock: mutex for serializing io access
+ * @irqlock: mutex for buslock
+ * @irq_base: base IRQ number for sec-pmic, required for IRQs
+ * @irq: generic IRQ number for s5m87xx
+ * @ono: power onoff IRQ number for s5m87xx
+ * @irq_masks_cur: currently active value
+ * @irq_masks_cache: cached hardware value
+ * @type: indicate which s5m87xx "variant" is used
+ */
+struct sec_pmic_dev {
+ struct device *dev;
+ struct regmap *regmap;
+ struct i2c_client *i2c;
+ struct i2c_client *rtc;
+ struct mutex iolock;
+ struct mutex irqlock;
+
+ int device_type;
+ int irq_base;
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+
+ int ono;
+ u8 irq_masks_cur[NUM_IRQ_REGS];
+ u8 irq_masks_cache[NUM_IRQ_REGS];
+ int type;
+ bool wakeup;
+};
+
+int sec_irq_init(struct sec_pmic_dev *sec_pmic);
+void sec_irq_exit(struct sec_pmic_dev *sec_pmic);
+int sec_irq_resume(struct sec_pmic_dev *sec_pmic);
+
+extern int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest);
+extern int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf);
+extern int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value);
+extern int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf);
+extern int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask);
+
+struct sec_platform_data {
+ struct sec_regulator_data *regulators;
+ struct sec_opmode_data *opmode;
+ int device_type;
+ int num_regulators;
+
+ int irq_base;
+ int (*cfg_pmic_irq)(void);
+
+ int ono;
+ bool wakeup;
+ bool buck_voltage_lock;
+
+ int buck_gpios[3];
+ int buck_ds[3];
+ int buck2_voltage[8];
+ bool buck2_gpiodvs;
+ int buck3_voltage[8];
+ bool buck3_gpiodvs;
+ int buck4_voltage[8];
+ bool buck4_gpiodvs;
+
+ int buck_set1;
+ int buck_set2;
+ int buck_set3;
+ int buck2_enable;
+ int buck3_enable;
+ int buck4_enable;
+ int buck_default_idx;
+ int buck2_default_idx;
+ int buck3_default_idx;
+ int buck4_default_idx;
+
+ int buck_ramp_delay;
+
+ int buck2_ramp_delay;
+ int buck34_ramp_delay;
+ int buck5_ramp_delay;
+ int buck16_ramp_delay;
+ int buck7810_ramp_delay;
+ int buck9_ramp_delay;
+
+ bool buck2_ramp_enable;
+ bool buck3_ramp_enable;
+ bool buck4_ramp_enable;
+ bool buck6_ramp_enable;
+
+ int buck2_init;
+ int buck3_init;
+ int buck4_init;
+};
+
+/**
+ * sec_regulator_data - regulator data
+ * @id: regulator id
+ * @initdata: regulator init data (contraints, supplies, ...)
+ */
+struct sec_regulator_data {
+ int id;
+ struct regulator_init_data *initdata;
+};
+
+/*
+ * sec_opmode_data - regulator operation mode data
+ * @id: regulator id
+ * @mode: regulator operation mode
+ */
+struct sec_opmode_data {
+ int id;
+ int mode;
+};
+
+/*
+ * samsung regulator operation mode
+ * SEC_OPMODE_OFF Regulator always OFF
+ * SEC_OPMODE_ON Regulator always ON
+ * SEC_OPMODE_LOWPOWER Regulator is on in low-power mode
+ * SEC_OPMODE_SUSPEND Regulator is changed by PWREN pin
+ * If PWREN is high, regulator is on
+ * If PWREN is low, regulator is off
+ */
+
+enum sec_opmode {
+ SEC_OPMODE_OFF,
+ SEC_OPMODE_ON,
+ SEC_OPMODE_LOWPOWER,
+ SEC_OPMODE_SUSPEND,
+};
+
+#endif /* __LINUX_MFD_SEC_CORE_H */
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
new file mode 100644
index 000000000000..d43b4f9e7fb2
--- /dev/null
+++ b/include/linux/mfd/samsung/irq.h
@@ -0,0 +1,152 @@
+/* irq.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_SEC_IRQ_H
+#define __LINUX_MFD_SEC_IRQ_H
+
+enum s2mps11_irq {
+ S2MPS11_IRQ_PWRONF,
+ S2MPS11_IRQ_PWRONR,
+ S2MPS11_IRQ_JIGONBF,
+ S2MPS11_IRQ_JIGONBR,
+ S2MPS11_IRQ_ACOKBF,
+ S2MPS11_IRQ_ACOKBR,
+ S2MPS11_IRQ_PWRON1S,
+ S2MPS11_IRQ_MRB,
+
+ S2MPS11_IRQ_RTC60S,
+ S2MPS11_IRQ_RTCA1,
+ S2MPS11_IRQ_RTCA2,
+ S2MPS11_IRQ_SMPL,
+ S2MPS11_IRQ_RTC1S,
+ S2MPS11_IRQ_WTSR,
+
+ S2MPS11_IRQ_INT120C,
+ S2MPS11_IRQ_INT140C,
+
+ S2MPS11_IRQ_NR,
+};
+
+#define S2MPS11_IRQ_PWRONF_MASK (1 << 0)
+#define S2MPS11_IRQ_PWRONR_MASK (1 << 1)
+#define S2MPS11_IRQ_JIGONBF_MASK (1 << 2)
+#define S2MPS11_IRQ_JIGONBR_MASK (1 << 3)
+#define S2MPS11_IRQ_ACOKBF_MASK (1 << 4)
+#define S2MPS11_IRQ_ACOKBR_MASK (1 << 5)
+#define S2MPS11_IRQ_PWRON1S_MASK (1 << 6)
+#define S2MPS11_IRQ_MRB_MASK (1 << 7)
+
+#define S2MPS11_IRQ_RTC60S_MASK (1 << 0)
+#define S2MPS11_IRQ_RTCA1_MASK (1 << 1)
+#define S2MPS11_IRQ_RTCA2_MASK (1 << 2)
+#define S2MPS11_IRQ_SMPL_MASK (1 << 3)
+#define S2MPS11_IRQ_RTC1S_MASK (1 << 4)
+#define S2MPS11_IRQ_WTSR_MASK (1 << 5)
+
+#define S2MPS11_IRQ_INT120C_MASK (1 << 0)
+#define S2MPS11_IRQ_INT140C_MASK (1 << 1)
+
+enum s5m8767_irq {
+ S5M8767_IRQ_PWRR,
+ S5M8767_IRQ_PWRF,
+ S5M8767_IRQ_PWR1S,
+ S5M8767_IRQ_JIGR,
+ S5M8767_IRQ_JIGF,
+ S5M8767_IRQ_LOWBAT2,
+ S5M8767_IRQ_LOWBAT1,
+
+ S5M8767_IRQ_MRB,
+ S5M8767_IRQ_DVSOK2,
+ S5M8767_IRQ_DVSOK3,
+ S5M8767_IRQ_DVSOK4,
+
+ S5M8767_IRQ_RTC60S,
+ S5M8767_IRQ_RTCA1,
+ S5M8767_IRQ_RTCA2,
+ S5M8767_IRQ_SMPL,
+ S5M8767_IRQ_RTC1S,
+ S5M8767_IRQ_WTSR,
+
+ S5M8767_IRQ_NR,
+};
+
+#define S5M8767_IRQ_PWRR_MASK (1 << 0)
+#define S5M8767_IRQ_PWRF_MASK (1 << 1)
+#define S5M8767_IRQ_PWR1S_MASK (1 << 3)
+#define S5M8767_IRQ_JIGR_MASK (1 << 4)
+#define S5M8767_IRQ_JIGF_MASK (1 << 5)
+#define S5M8767_IRQ_LOWBAT2_MASK (1 << 6)
+#define S5M8767_IRQ_LOWBAT1_MASK (1 << 7)
+
+#define S5M8767_IRQ_MRB_MASK (1 << 2)
+#define S5M8767_IRQ_DVSOK2_MASK (1 << 3)
+#define S5M8767_IRQ_DVSOK3_MASK (1 << 4)
+#define S5M8767_IRQ_DVSOK4_MASK (1 << 5)
+
+#define S5M8767_IRQ_RTC60S_MASK (1 << 0)
+#define S5M8767_IRQ_RTCA1_MASK (1 << 1)
+#define S5M8767_IRQ_RTCA2_MASK (1 << 2)
+#define S5M8767_IRQ_SMPL_MASK (1 << 3)
+#define S5M8767_IRQ_RTC1S_MASK (1 << 4)
+#define S5M8767_IRQ_WTSR_MASK (1 << 5)
+
+enum s5m8763_irq {
+ S5M8763_IRQ_DCINF,
+ S5M8763_IRQ_DCINR,
+ S5M8763_IRQ_JIGF,
+ S5M8763_IRQ_JIGR,
+ S5M8763_IRQ_PWRONF,
+ S5M8763_IRQ_PWRONR,
+
+ S5M8763_IRQ_WTSREVNT,
+ S5M8763_IRQ_SMPLEVNT,
+ S5M8763_IRQ_ALARM1,
+ S5M8763_IRQ_ALARM0,
+
+ S5M8763_IRQ_ONKEY1S,
+ S5M8763_IRQ_TOPOFFR,
+ S5M8763_IRQ_DCINOVPR,
+ S5M8763_IRQ_CHGRSTF,
+ S5M8763_IRQ_DONER,
+ S5M8763_IRQ_CHGFAULT,
+
+ S5M8763_IRQ_LOBAT1,
+ S5M8763_IRQ_LOBAT2,
+
+ S5M8763_IRQ_NR,
+};
+
+#define S5M8763_IRQ_DCINF_MASK (1 << 2)
+#define S5M8763_IRQ_DCINR_MASK (1 << 3)
+#define S5M8763_IRQ_JIGF_MASK (1 << 4)
+#define S5M8763_IRQ_JIGR_MASK (1 << 5)
+#define S5M8763_IRQ_PWRONF_MASK (1 << 6)
+#define S5M8763_IRQ_PWRONR_MASK (1 << 7)
+
+#define S5M8763_IRQ_WTSREVNT_MASK (1 << 0)
+#define S5M8763_IRQ_SMPLEVNT_MASK (1 << 1)
+#define S5M8763_IRQ_ALARM1_MASK (1 << 2)
+#define S5M8763_IRQ_ALARM0_MASK (1 << 3)
+
+#define S5M8763_IRQ_ONKEY1S_MASK (1 << 0)
+#define S5M8763_IRQ_TOPOFFR_MASK (1 << 2)
+#define S5M8763_IRQ_DCINOVPR_MASK (1 << 3)
+#define S5M8763_IRQ_CHGRSTF_MASK (1 << 4)
+#define S5M8763_IRQ_DONER_MASK (1 << 5)
+#define S5M8763_IRQ_CHGFAULT_MASK (1 << 7)
+
+#define S5M8763_IRQ_LOBAT1_MASK (1 << 0)
+#define S5M8763_IRQ_LOBAT2_MASK (1 << 1)
+
+#define S5M8763_ENRAMP (1 << 4)
+
+#endif /* __LINUX_MFD_SEC_IRQ_H */
diff --git a/include/linux/mfd/s5m87xx/s5m-rtc.h b/include/linux/mfd/samsung/rtc.h
index 6ce8da264cec..71597e20cddb 100644
--- a/include/linux/mfd/s5m87xx/s5m-rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -1,5 +1,4 @@
-/*
- * s5m-rtc.h
+/* rtc.h
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
@@ -11,39 +10,39 @@
*
*/
-#ifndef __LINUX_MFD_S5M_RTC_H
-#define __LINUX_MFD_S5M_RTC_H
+#ifndef __LINUX_MFD_SEC_RTC_H
+#define __LINUX_MFD_SEC_RTC_H
-enum s5m87xx_rtc_reg {
- S5M87XX_RTC_SEC,
- S5M87XX_RTC_MIN,
- S5M87XX_RTC_HOUR,
- S5M87XX_RTC_WEEKDAY,
- S5M87XX_RTC_DATE,
- S5M87XX_RTC_MONTH,
- S5M87XX_RTC_YEAR1,
- S5M87XX_RTC_YEAR2,
- S5M87XX_ALARM0_SEC,
- S5M87XX_ALARM0_MIN,
- S5M87XX_ALARM0_HOUR,
- S5M87XX_ALARM0_WEEKDAY,
- S5M87XX_ALARM0_DATE,
- S5M87XX_ALARM0_MONTH,
- S5M87XX_ALARM0_YEAR1,
- S5M87XX_ALARM0_YEAR2,
- S5M87XX_ALARM1_SEC,
- S5M87XX_ALARM1_MIN,
- S5M87XX_ALARM1_HOUR,
- S5M87XX_ALARM1_WEEKDAY,
- S5M87XX_ALARM1_DATE,
- S5M87XX_ALARM1_MONTH,
- S5M87XX_ALARM1_YEAR1,
- S5M87XX_ALARM1_YEAR2,
- S5M87XX_ALARM0_CONF,
- S5M87XX_ALARM1_CONF,
- S5M87XX_RTC_STATUS,
- S5M87XX_WTSR_SMPL_CNTL,
- S5M87XX_RTC_UDR_CON,
+enum sec_rtc_reg {
+ SEC_RTC_SEC,
+ SEC_RTC_MIN,
+ SEC_RTC_HOUR,
+ SEC_RTC_WEEKDAY,
+ SEC_RTC_DATE,
+ SEC_RTC_MONTH,
+ SEC_RTC_YEAR1,
+ SEC_RTC_YEAR2,
+ SEC_ALARM0_SEC,
+ SEC_ALARM0_MIN,
+ SEC_ALARM0_HOUR,
+ SEC_ALARM0_WEEKDAY,
+ SEC_ALARM0_DATE,
+ SEC_ALARM0_MONTH,
+ SEC_ALARM0_YEAR1,
+ SEC_ALARM0_YEAR2,
+ SEC_ALARM1_SEC,
+ SEC_ALARM1_MIN,
+ SEC_ALARM1_HOUR,
+ SEC_ALARM1_WEEKDAY,
+ SEC_ALARM1_DATE,
+ SEC_ALARM1_MONTH,
+ SEC_ALARM1_YEAR1,
+ SEC_ALARM1_YEAR2,
+ SEC_ALARM0_CONF,
+ SEC_ALARM1_CONF,
+ SEC_RTC_STATUS,
+ SEC_WTSR_SMPL_CNTL,
+ SEC_RTC_UDR_CON,
};
#define RTC_I2C_ADDR (0x0C >> 1)
@@ -81,4 +80,4 @@ enum {
RTC_YEAR2,
};
-#endif /* __LINUX_MFD_S5M_RTC_H */
+#endif /* __LINUX_MFD_SEC_RTC_H */
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
new file mode 100644
index 000000000000..ad2252f239d7
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -0,0 +1,196 @@
+/*
+ * s2mps11.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_S2MPS11_H
+#define __LINUX_MFD_S2MPS11_H
+
+/* S2MPS11 registers */
+enum s2mps11_reg {
+ S2MPS11_REG_ID,
+ S2MPS11_REG_INT1,
+ S2MPS11_REG_INT2,
+ S2MPS11_REG_INT3,
+ S2MPS11_REG_INT1M,
+ S2MPS11_REG_INT2M,
+ S2MPS11_REG_INT3M,
+ S2MPS11_REG_ST1,
+ S2MPS11_REG_ST2,
+ S2MPS11_REG_OFFSRC,
+ S2MPS11_REG_PWRONSRC,
+ S2MPS11_REG_RTC_CTRL,
+ S2MPS11_REG_CTRL1,
+ S2MPS11_REG_ETC_TEST,
+ S2MPS11_REG_RSVD3,
+ S2MPS11_REG_BU_CHG,
+ S2MPS11_REG_RAMP,
+ S2MPS11_REG_RAMP_BUCK,
+ S2MPS11_REG_LDO1_8,
+ S2MPS11_REG_LDO9_16,
+ S2MPS11_REG_LDO17_24,
+ S2MPS11_REG_LDO25_32,
+ S2MPS11_REG_LDO33_38,
+ S2MPS11_REG_LDO1_8_1,
+ S2MPS11_REG_LDO9_16_1,
+ S2MPS11_REG_LDO17_24_1,
+ S2MPS11_REG_LDO25_32_1,
+ S2MPS11_REG_LDO33_38_1,
+ S2MPS11_REG_OTP_ADRL,
+ S2MPS11_REG_OTP_ADRH,
+ S2MPS11_REG_OTP_DATA,
+ S2MPS11_REG_MON1SEL,
+ S2MPS11_REG_MON2SEL,
+ S2MPS11_REG_LEE,
+ S2MPS11_REG_RSVD_NO,
+ S2MPS11_REG_UVLO,
+ S2MPS11_REG_LEE_NO,
+ S2MPS11_REG_B1CTRL1,
+ S2MPS11_REG_B1CTRL2,
+ S2MPS11_REG_B2CTRL1,
+ S2MPS11_REG_B2CTRL2,
+ S2MPS11_REG_B3CTRL1,
+ S2MPS11_REG_B3CTRL2,
+ S2MPS11_REG_B4CTRL1,
+ S2MPS11_REG_B4CTRL2,
+ S2MPS11_REG_B5CTRL1,
+ S2MPS11_REG_BUCK5_SW,
+ S2MPS11_REG_B5CTRL2,
+ S2MPS11_REG_B5CTRL3,
+ S2MPS11_REG_B5CTRL4,
+ S2MPS11_REG_B5CTRL5,
+ S2MPS11_REG_B6CTRL1,
+ S2MPS11_REG_B6CTRL2,
+ S2MPS11_REG_B7CTRL1,
+ S2MPS11_REG_B7CTRL2,
+ S2MPS11_REG_B8CTRL1,
+ S2MPS11_REG_B8CTRL2,
+ S2MPS11_REG_B9CTRL1,
+ S2MPS11_REG_B9CTRL2,
+ S2MPS11_REG_B10CTRL1,
+ S2MPS11_REG_B10CTRL2,
+ S2MPS11_REG_L1CTRL,
+ S2MPS11_REG_L2CTRL,
+ S2MPS11_REG_L3CTRL,
+ S2MPS11_REG_L4CTRL,
+ S2MPS11_REG_L5CTRL,
+ S2MPS11_REG_L6CTRL,
+ S2MPS11_REG_L7CTRL,
+ S2MPS11_REG_L8CTRL,
+ S2MPS11_REG_L9CTRL,
+ S2MPS11_REG_L10CTRL,
+ S2MPS11_REG_L11CTRL,
+ S2MPS11_REG_L12CTRL,
+ S2MPS11_REG_L13CTRL,
+ S2MPS11_REG_L14CTRL,
+ S2MPS11_REG_L15CTRL,
+ S2MPS11_REG_L16CTRL,
+ S2MPS11_REG_L17CTRL,
+ S2MPS11_REG_L18CTRL,
+ S2MPS11_REG_L19CTRL,
+ S2MPS11_REG_L20CTRL,
+ S2MPS11_REG_L21CTRL,
+ S2MPS11_REG_L22CTRL,
+ S2MPS11_REG_L23CTRL,
+ S2MPS11_REG_L24CTRL,
+ S2MPS11_REG_L25CTRL,
+ S2MPS11_REG_L26CTRL,
+ S2MPS11_REG_L27CTRL,
+ S2MPS11_REG_L28CTRL,
+ S2MPS11_REG_L29CTRL,
+ S2MPS11_REG_L30CTRL,
+ S2MPS11_REG_L31CTRL,
+ S2MPS11_REG_L32CTRL,
+ S2MPS11_REG_L33CTRL,
+ S2MPS11_REG_L34CTRL,
+ S2MPS11_REG_L35CTRL,
+ S2MPS11_REG_L36CTRL,
+ S2MPS11_REG_L37CTRL,
+ S2MPS11_REG_L38CTRL,
+};
+
+/* S2MPS11 regulator ids */
+enum s2mps11_regulators {
+ S2MPS11_LDO1,
+ S2MPS11_LDO2,
+ S2MPS11_LDO3,
+ S2MPS11_LDO4,
+ S2MPS11_LDO5,
+ S2MPS11_LDO6,
+ S2MPS11_LDO7,
+ S2MPS11_LDO8,
+ S2MPS11_LDO9,
+ S2MPS11_LDO10,
+ S2MPS11_LDO11,
+ S2MPS11_LDO12,
+ S2MPS11_LDO13,
+ S2MPS11_LDO14,
+ S2MPS11_LDO15,
+ S2MPS11_LDO16,
+ S2MPS11_LDO17,
+ S2MPS11_LDO18,
+ S2MPS11_LDO19,
+ S2MPS11_LDO20,
+ S2MPS11_LDO21,
+ S2MPS11_LDO22,
+ S2MPS11_LDO23,
+ S2MPS11_LDO24,
+ S2MPS11_LDO25,
+ S2MPS11_LDO26,
+ S2MPS11_LDO27,
+ S2MPS11_LDO28,
+ S2MPS11_LDO29,
+ S2MPS11_LDO30,
+ S2MPS11_LDO31,
+ S2MPS11_LDO32,
+ S2MPS11_LDO33,
+ S2MPS11_LDO34,
+ S2MPS11_LDO35,
+ S2MPS11_LDO36,
+ S2MPS11_LDO37,
+ S2MPS11_LDO38,
+ S2MPS11_BUCK1,
+ S2MPS11_BUCK2,
+ S2MPS11_BUCK3,
+ S2MPS11_BUCK4,
+ S2MPS11_BUCK5,
+ S2MPS11_BUCK6,
+ S2MPS11_BUCK7,
+ S2MPS11_BUCK8,
+ S2MPS11_BUCK9,
+ S2MPS11_BUCK10,
+ S2MPS11_AP_EN32KHZ,
+ S2MPS11_CP_EN32KHZ,
+ S2MPS11_BT_EN32KHZ,
+
+ S2MPS11_REG_MAX,
+};
+
+#define S2MPS11_BUCK_MIN1 600000
+#define S2MPS11_BUCK_MIN2 750000
+#define S2MPS11_BUCK_MIN3 3000000
+#define S2MPS11_LDO_MIN 800000
+#define S2MPS11_BUCK_STEP1 6250
+#define S2MPS11_BUCK_STEP2 12500
+#define S2MPS11_BUCK_STEP3 25000
+#define S2MPS11_LDO_STEP1 50000
+#define S2MPS11_LDO_STEP2 25000
+#define S2MPS11_LDO_VSEL_MASK 0x3F
+#define S2MPS11_BUCK_VSEL_MASK 0xFF
+#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
+#define S2MPS11_ENABLE_SHIFT 0x06
+#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
+#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
+
+#define S2MPS11_PMIC_EN_SHIFT 6
+#define S2MPS11_REGULATOR_MAX (S2MPS11_REG_MAX - 3)
+
+#endif /* __LINUX_MFD_S2MPS11_H */
diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h
new file mode 100644
index 000000000000..e025418e5589
--- /dev/null
+++ b/include/linux/mfd/samsung/s5m8763.h
@@ -0,0 +1,96 @@
+/* s5m8763.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_S5M8763_H
+#define __LINUX_MFD_S5M8763_H
+
+/* S5M8763 registers */
+enum s5m8763_reg {
+ S5M8763_REG_IRQ1,
+ S5M8763_REG_IRQ2,
+ S5M8763_REG_IRQ3,
+ S5M8763_REG_IRQ4,
+ S5M8763_REG_IRQM1,
+ S5M8763_REG_IRQM2,
+ S5M8763_REG_IRQM3,
+ S5M8763_REG_IRQM4,
+ S5M8763_REG_STATUS1,
+ S5M8763_REG_STATUS2,
+ S5M8763_REG_STATUSM1,
+ S5M8763_REG_STATUSM2,
+ S5M8763_REG_CHGR1,
+ S5M8763_REG_CHGR2,
+ S5M8763_REG_LDO_ACTIVE_DISCHARGE1,
+ S5M8763_REG_LDO_ACTIVE_DISCHARGE2,
+ S5M8763_REG_BUCK_ACTIVE_DISCHARGE3,
+ S5M8763_REG_ONOFF1,
+ S5M8763_REG_ONOFF2,
+ S5M8763_REG_ONOFF3,
+ S5M8763_REG_ONOFF4,
+ S5M8763_REG_BUCK1_VOLTAGE1,
+ S5M8763_REG_BUCK1_VOLTAGE2,
+ S5M8763_REG_BUCK1_VOLTAGE3,
+ S5M8763_REG_BUCK1_VOLTAGE4,
+ S5M8763_REG_BUCK2_VOLTAGE1,
+ S5M8763_REG_BUCK2_VOLTAGE2,
+ S5M8763_REG_BUCK3,
+ S5M8763_REG_BUCK4,
+ S5M8763_REG_LDO1_LDO2,
+ S5M8763_REG_LDO3,
+ S5M8763_REG_LDO4,
+ S5M8763_REG_LDO5,
+ S5M8763_REG_LDO6,
+ S5M8763_REG_LDO7,
+ S5M8763_REG_LDO7_LDO8,
+ S5M8763_REG_LDO9_LDO10,
+ S5M8763_REG_LDO11,
+ S5M8763_REG_LDO12,
+ S5M8763_REG_LDO13,
+ S5M8763_REG_LDO14,
+ S5M8763_REG_LDO15,
+ S5M8763_REG_LDO16,
+ S5M8763_REG_BKCHR,
+ S5M8763_REG_LBCNFG1,
+ S5M8763_REG_LBCNFG2,
+};
+
+/* S5M8763 regulator ids */
+enum s5m8763_regulators {
+ S5M8763_LDO1,
+ S5M8763_LDO2,
+ S5M8763_LDO3,
+ S5M8763_LDO4,
+ S5M8763_LDO5,
+ S5M8763_LDO6,
+ S5M8763_LDO7,
+ S5M8763_LDO8,
+ S5M8763_LDO9,
+ S5M8763_LDO10,
+ S5M8763_LDO11,
+ S5M8763_LDO12,
+ S5M8763_LDO13,
+ S5M8763_LDO14,
+ S5M8763_LDO15,
+ S5M8763_LDO16,
+ S5M8763_BUCK1,
+ S5M8763_BUCK2,
+ S5M8763_BUCK3,
+ S5M8763_BUCK4,
+ S5M8763_AP_EN32KHZ,
+ S5M8763_CP_EN32KHZ,
+ S5M8763_ENCHGVI,
+ S5M8763_ESAFEUSB1,
+ S5M8763_ESAFEUSB2,
+};
+
+#define S5M8763_ENRAMP (1 << 4)
+#endif /* __LINUX_MFD_S5M8763_H */
diff --git a/include/linux/mfd/samsung/s5m8767.h b/include/linux/mfd/samsung/s5m8767.h
new file mode 100644
index 000000000000..306a95fc558c
--- /dev/null
+++ b/include/linux/mfd/samsung/s5m8767.h
@@ -0,0 +1,188 @@
+/* s5m8767.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_S5M8767_H
+#define __LINUX_MFD_S5M8767_H
+
+/* S5M8767 registers */
+enum s5m8767_reg {
+ S5M8767_REG_ID,
+ S5M8767_REG_INT1,
+ S5M8767_REG_INT2,
+ S5M8767_REG_INT3,
+ S5M8767_REG_INT1M,
+ S5M8767_REG_INT2M,
+ S5M8767_REG_INT3M,
+ S5M8767_REG_STATUS1,
+ S5M8767_REG_STATUS2,
+ S5M8767_REG_STATUS3,
+ S5M8767_REG_CTRL1,
+ S5M8767_REG_CTRL2,
+ S5M8767_REG_LOWBAT1,
+ S5M8767_REG_LOWBAT2,
+ S5M8767_REG_BUCHG,
+ S5M8767_REG_DVSRAMP,
+ S5M8767_REG_DVSTIMER2 = 0x10,
+ S5M8767_REG_DVSTIMER3,
+ S5M8767_REG_DVSTIMER4,
+ S5M8767_REG_LDO1,
+ S5M8767_REG_LDO2,
+ S5M8767_REG_LDO3,
+ S5M8767_REG_LDO4,
+ S5M8767_REG_LDO5,
+ S5M8767_REG_LDO6,
+ S5M8767_REG_LDO7,
+ S5M8767_REG_LDO8,
+ S5M8767_REG_LDO9,
+ S5M8767_REG_LDO10,
+ S5M8767_REG_LDO11,
+ S5M8767_REG_LDO12,
+ S5M8767_REG_LDO13,
+ S5M8767_REG_LDO14 = 0x20,
+ S5M8767_REG_LDO15,
+ S5M8767_REG_LDO16,
+ S5M8767_REG_LDO17,
+ S5M8767_REG_LDO18,
+ S5M8767_REG_LDO19,
+ S5M8767_REG_LDO20,
+ S5M8767_REG_LDO21,
+ S5M8767_REG_LDO22,
+ S5M8767_REG_LDO23,
+ S5M8767_REG_LDO24,
+ S5M8767_REG_LDO25,
+ S5M8767_REG_LDO26,
+ S5M8767_REG_LDO27,
+ S5M8767_REG_LDO28,
+ S5M8767_REG_UVLO = 0x31,
+ S5M8767_REG_BUCK1CTRL1,
+ S5M8767_REG_BUCK1CTRL2,
+ S5M8767_REG_BUCK2CTRL,
+ S5M8767_REG_BUCK2DVS1,
+ S5M8767_REG_BUCK2DVS2,
+ S5M8767_REG_BUCK2DVS3,
+ S5M8767_REG_BUCK2DVS4,
+ S5M8767_REG_BUCK2DVS5,
+ S5M8767_REG_BUCK2DVS6,
+ S5M8767_REG_BUCK2DVS7,
+ S5M8767_REG_BUCK2DVS8,
+ S5M8767_REG_BUCK3CTRL,
+ S5M8767_REG_BUCK3DVS1,
+ S5M8767_REG_BUCK3DVS2,
+ S5M8767_REG_BUCK3DVS3,
+ S5M8767_REG_BUCK3DVS4,
+ S5M8767_REG_BUCK3DVS5,
+ S5M8767_REG_BUCK3DVS6,
+ S5M8767_REG_BUCK3DVS7,
+ S5M8767_REG_BUCK3DVS8,
+ S5M8767_REG_BUCK4CTRL,
+ S5M8767_REG_BUCK4DVS1,
+ S5M8767_REG_BUCK4DVS2,
+ S5M8767_REG_BUCK4DVS3,
+ S5M8767_REG_BUCK4DVS4,
+ S5M8767_REG_BUCK4DVS5,
+ S5M8767_REG_BUCK4DVS6,
+ S5M8767_REG_BUCK4DVS7,
+ S5M8767_REG_BUCK4DVS8,
+ S5M8767_REG_BUCK5CTRL1,
+ S5M8767_REG_BUCK5CTRL2,
+ S5M8767_REG_BUCK5CTRL3,
+ S5M8767_REG_BUCK5CTRL4,
+ S5M8767_REG_BUCK5CTRL5,
+ S5M8767_REG_BUCK6CTRL1,
+ S5M8767_REG_BUCK6CTRL2,
+ S5M8767_REG_BUCK7CTRL1,
+ S5M8767_REG_BUCK7CTRL2,
+ S5M8767_REG_BUCK8CTRL1,
+ S5M8767_REG_BUCK8CTRL2,
+ S5M8767_REG_BUCK9CTRL1,
+ S5M8767_REG_BUCK9CTRL2,
+ S5M8767_REG_LDO1CTRL,
+ S5M8767_REG_LDO2_1CTRL,
+ S5M8767_REG_LDO2_2CTRL,
+ S5M8767_REG_LDO2_3CTRL,
+ S5M8767_REG_LDO2_4CTRL,
+ S5M8767_REG_LDO3CTRL,
+ S5M8767_REG_LDO4CTRL,
+ S5M8767_REG_LDO5CTRL,
+ S5M8767_REG_LDO6CTRL,
+ S5M8767_REG_LDO7CTRL,
+ S5M8767_REG_LDO8CTRL,
+ S5M8767_REG_LDO9CTRL,
+ S5M8767_REG_LDO10CTRL,
+ S5M8767_REG_LDO11CTRL,
+ S5M8767_REG_LDO12CTRL,
+ S5M8767_REG_LDO13CTRL,
+ S5M8767_REG_LDO14CTRL,
+ S5M8767_REG_LDO15CTRL,
+ S5M8767_REG_LDO16CTRL,
+ S5M8767_REG_LDO17CTRL,
+ S5M8767_REG_LDO18CTRL,
+ S5M8767_REG_LDO19CTRL,
+ S5M8767_REG_LDO20CTRL,
+ S5M8767_REG_LDO21CTRL,
+ S5M8767_REG_LDO22CTRL,
+ S5M8767_REG_LDO23CTRL,
+ S5M8767_REG_LDO24CTRL,
+ S5M8767_REG_LDO25CTRL,
+ S5M8767_REG_LDO26CTRL,
+ S5M8767_REG_LDO27CTRL,
+ S5M8767_REG_LDO28CTRL,
+};
+
+/* S5M8767 regulator ids */
+enum s5m8767_regulators {
+ S5M8767_LDO1,
+ S5M8767_LDO2,
+ S5M8767_LDO3,
+ S5M8767_LDO4,
+ S5M8767_LDO5,
+ S5M8767_LDO6,
+ S5M8767_LDO7,
+ S5M8767_LDO8,
+ S5M8767_LDO9,
+ S5M8767_LDO10,
+ S5M8767_LDO11,
+ S5M8767_LDO12,
+ S5M8767_LDO13,
+ S5M8767_LDO14,
+ S5M8767_LDO15,
+ S5M8767_LDO16,
+ S5M8767_LDO17,
+ S5M8767_LDO18,
+ S5M8767_LDO19,
+ S5M8767_LDO20,
+ S5M8767_LDO21,
+ S5M8767_LDO22,
+ S5M8767_LDO23,
+ S5M8767_LDO24,
+ S5M8767_LDO25,
+ S5M8767_LDO26,
+ S5M8767_LDO27,
+ S5M8767_LDO28,
+ S5M8767_BUCK1,
+ S5M8767_BUCK2,
+ S5M8767_BUCK3,
+ S5M8767_BUCK4,
+ S5M8767_BUCK5,
+ S5M8767_BUCK6,
+ S5M8767_BUCK7,
+ S5M8767_BUCK8,
+ S5M8767_BUCK9,
+ S5M8767_AP_EN32KHZ,
+ S5M8767_CP_EN32KHZ,
+
+ S5M8767_REG_MAX,
+};
+
+#define S5M8767_ENCTRL_SHIFT 6
+
+#endif /* __LINUX_MFD_S5M8767_H */
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 6c4c478e21a4..9bf8767818b4 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -807,6 +807,7 @@ struct tps65910_board {
int irq_base;
int vmbch_threshold;
int vmbch2_threshold;
+ bool en_ck32k_xtal;
bool en_dev_slp;
struct tps65910_sleep_keepon_data *slp_keepon;
bool en_gpio_sleep[TPS6591X_MAX_NUM_GPIO];
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 6659487c31e7..eaad49f7c130 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -161,8 +161,9 @@
#define TWL6040_CELLS 2
#define TWL6040_REV_ES1_0 0x00
-#define TWL6040_REV_ES1_1 0x01
-#define TWL6040_REV_ES1_2 0x02
+#define TWL6040_REV_ES1_1 0x01 /* Rev ES1.1 and ES1.2 */
+#define TWL6040_REV_ES1_3 0x02
+#define TWL6041_REV_ES2_0 0x10
#define TWL6040_IRQ_TH 0
#define TWL6040_IRQ_PLUG 1
@@ -206,7 +207,6 @@ struct twl6040 {
struct regmap *regmap;
struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */
struct mutex mutex;
- struct mutex io_mutex;
struct mutex irq_mutex;
struct mfd_cell cells[TWL6040_CELLS];
struct completion ready;
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 9192b6404a73..509481d9cf19 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -17,6 +17,7 @@
#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
+#include <linux/regmap.h>
#include <linux/mfd/wm8350/audio.h>
#include <linux/mfd/wm8350/gpio.h>
@@ -66,6 +67,9 @@
#define WM8350_MAX_REGISTER 0xFF
+#define WM8350_UNLOCK_KEY 0x0013
+#define WM8350_LOCK_KEY 0x0000
+
/*
* Field Definitions.
*/
@@ -582,27 +586,9 @@
#define WM8350_NUM_IRQ_REGS 7
-struct wm8350_reg_access {
- u16 readable; /* Mask of readable bits */
- u16 writable; /* Mask of writable bits */
- u16 vol; /* Mask of volatile bits */
-};
-extern const struct wm8350_reg_access wm8350_reg_io_map[];
-extern const u16 wm8350_mode0_defaults[];
-extern const u16 wm8350_mode1_defaults[];
-extern const u16 wm8350_mode2_defaults[];
-extern const u16 wm8350_mode3_defaults[];
-extern const u16 wm8351_mode0_defaults[];
-extern const u16 wm8351_mode1_defaults[];
-extern const u16 wm8351_mode2_defaults[];
-extern const u16 wm8351_mode3_defaults[];
-extern const u16 wm8352_mode0_defaults[];
-extern const u16 wm8352_mode1_defaults[];
-extern const u16 wm8352_mode2_defaults[];
-extern const u16 wm8352_mode3_defaults[];
+extern const struct regmap_config wm8350_regmap;
struct wm8350;
-struct regmap;
struct wm8350_hwmon {
struct platform_device *pdev;
@@ -614,7 +600,7 @@ struct wm8350 {
/* device IO */
struct regmap *regmap;
- u16 *reg_cache;
+ bool unlocked;
struct mutex auxadc_mutex;
struct completion auxadc_done;
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index 893267bb6229..f0361c031927 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -141,6 +141,7 @@ struct wm8994_pdata {
struct wm8994_ldo_pdata ldo[WM8994_NUM_LDO];
int irq_base; /** Base IRQ number for WM8994, required for IRQs */
+ unsigned long irq_flags; /** user irq flags */
int num_drc_cfgs;
struct wm8994_drc_cfg *drc_cfgs;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 855c337b20c3..ce7e6671968b 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -15,7 +15,7 @@ extern int migrate_page(struct address_space *,
extern int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
enum migrate_mode mode);
-extern int migrate_huge_pages(struct list_head *l, new_page_t x,
+extern int migrate_huge_page(struct page *, new_page_t x,
unsigned long private, bool offlining,
enum migrate_mode mode);
@@ -36,7 +36,7 @@ static inline void putback_lru_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
enum migrate_mode mode) { return -ENOSYS; }
-static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
+static inline int migrate_huge_page(struct page *page, new_page_t x,
unsigned long private, bool offlining,
enum migrate_mode mode) { return -ENOSYS; }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f9f279cf5b1b..311be906b57d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -805,6 +805,17 @@ static inline void *page_rmapping(struct page *page)
return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
}
+extern struct address_space *__page_file_mapping(struct page *);
+
+static inline
+struct address_space *page_file_mapping(struct page *page)
+{
+ if (unlikely(PageSwapCache(page)))
+ return __page_file_mapping(page);
+
+ return page->mapping;
+}
+
static inline int PageAnon(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -821,6 +832,20 @@ static inline pgoff_t page_index(struct page *page)
return page->index;
}
+extern pgoff_t __page_file_index(struct page *page);
+
+/*
+ * Return the file index of the page. Regular pagecache pages use ->index
+ * whereas swapcache pages use swp_offset(->private)
+ */
+static inline pgoff_t page_file_index(struct page *page)
+{
+ if (unlikely(PageSwapCache(page)))
+ return __page_file_index(page);
+
+ return page->index;
+}
+
/*
* Return true if this page is mapped into pagetables.
*/
@@ -994,6 +1019,10 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
struct page **pages, struct vm_area_struct **vmas);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
+struct kvec;
+int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
+ struct page **pages);
+int get_kernel_page(unsigned long start, int write, struct page **pages);
struct page *get_dump_page(unsigned long addr);
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
@@ -1331,6 +1360,7 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
extern void zone_pcp_update(struct zone *zone);
+extern void zone_pcp_reset(struct zone *zone);
/* nommu.c */
extern atomic_long_t mmap_pages_allocated;
@@ -1411,6 +1441,7 @@ extern void truncate_inode_pages_range(struct address_space *,
/* generic vm_area_ops exported for stackable file systems */
extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
+extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
/* mm/page-writeback.c */
int write_one_page(struct page *page, int wait);
@@ -1528,6 +1559,7 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
static inline void vm_stat_account(struct mm_struct *mm,
unsigned long flags, struct file *file, long pages)
{
+ mm->total_vm += pages;
}
#endif /* CONFIG_PROC_FS */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 704a626d94a0..bf7867200b95 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -53,7 +53,16 @@ struct page {
struct {
union {
pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* slub first free object */
+ void *freelist; /* slub/slob first free object */
+ bool pfmemalloc; /* If set by the page allocator,
+ * ALLOC_NO_WATERMARKS was set
+ * and the low watermark was not
+ * met implying that the system
+ * is under some pressure. The
+ * caller should try ensure
+ * this page is only used to
+ * free other pages.
+ */
};
union {
@@ -91,11 +100,12 @@ struct page {
*/
atomic_t _mapcount;
- struct {
+ struct { /* SLUB */
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
+ int units; /* SLOB */
};
atomic_t _count; /* Usage count, see below. */
};
@@ -117,6 +127,12 @@ struct page {
short int pobjects;
#endif
};
+
+ struct list_head list; /* slobs list of pages */
+ struct { /* slab fields */
+ struct kmem_cache *slab_cache;
+ struct slab *slab_page;
+ };
};
/* Remainder is not double word aligned */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 111aca5e97f3..4b27f9f503e4 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -239,6 +239,7 @@ struct mmc_card {
#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
+#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
/* byte mode */
unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */
#define MMC_NO_POWER_NOTIFICATION 0
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 458988bd55a1..2daa54f55db7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -201,7 +201,7 @@ struct zone_reclaim_stat {
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
struct zone *zone;
#endif
};
@@ -209,7 +209,6 @@ struct lruvec {
/* Mask used at gathering information at once (see memcontrol.c) */
#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
-#define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
/* Isolate clean file */
@@ -369,6 +368,10 @@ struct zone {
*/
spinlock_t lock;
int all_unreclaimable; /* All pages pinned */
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+ /* pfn where the last incremental compaction isolated free pages */
+ unsigned long compact_cached_free_pfn;
+#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
@@ -475,6 +478,14 @@ struct zone {
* rarely used fields:
*/
const char *name;
+#ifdef CONFIG_MEMORY_ISOLATION
+ /*
+ * the number of MIGRATE_ISOLATE *pageblock*.
+ * We need this for free page counting. Look at zone_watermark_ok_safe.
+ * It's protected by zone->lock
+ */
+ int nr_pageblock_isolate;
+#endif
} ____cacheline_internodealigned_in_smp;
typedef enum {
@@ -671,7 +682,7 @@ typedef struct pglist_data {
int nr_zones;
#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
struct page *node_mem_map;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
struct page_cgroup *node_page_cgroup;
#endif
#endif
@@ -694,6 +705,7 @@ typedef struct pglist_data {
range, including holes */
int node_id;
wait_queue_head_t kswapd_wait;
+ wait_queue_head_t pfmemalloc_wait;
struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
int kswapd_max_order;
enum zone_type classzone_idx;
@@ -718,7 +730,7 @@ typedef struct pglist_data {
#include <linux/memory_hotplug.h>
extern struct mutex zonelists_mutex;
-void build_all_zonelists(void *data);
+void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags);
@@ -736,7 +748,7 @@ extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
static inline struct zone *lruvec_zone(struct lruvec *lruvec)
{
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
return lruvec->zone;
#else
return container_of(lruvec, struct zone, lruvec);
@@ -773,7 +785,7 @@ extern int movable_zone;
static inline int zone_movable_is_highmem(void)
{
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
return movable_zone == ZONE_HIGHMEM;
#else
return 0;
@@ -1052,7 +1064,7 @@ struct mem_section {
/* See declaration of similar field in struct zone */
unsigned long *pageblock_flags;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
/*
* If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
* section. (see memcontrol.h/page_cgroup.h about this.)
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 51bf8ada6dc0..49258e0ed1c6 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -15,6 +15,8 @@
#define MV643XX_ETH_SIZE_REG_4 0x2224
#define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290
+#define MV643XX_TX_CSUM_DEFAULT_LIMIT 0
+
struct mv643xx_eth_shared_platform_data {
struct mbus_dram_target_info *dram;
struct platform_device *shared_smi;
diff --git a/include/linux/namei.h b/include/linux/namei.h
index d2ef8b34b967..4bf19d8174ed 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -67,6 +67,7 @@ extern int kern_path(const char *, unsigned, struct path *);
extern struct dentry *kern_path_create(int, const char *, struct path *, int);
extern struct dentry *user_path_create(int, const char __user *, struct path *, int);
+extern void done_path_create(struct path *, struct dentry *);
extern struct dentry *kern_path_locked(const char *, struct path *);
extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
const char *, unsigned int, struct path *);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index eb06e58bed0b..59dc05f38247 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -953,7 +953,8 @@ struct net_device_ops {
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
int (*ndo_netpoll_setup)(struct net_device *dev,
- struct netpoll_info *info);
+ struct netpoll_info *info,
+ gfp_t gfp);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
@@ -1300,6 +1301,8 @@ struct net_device {
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
+#define GSO_MAX_SEGS 65535
+ u16 gso_max_segs;
#ifdef CONFIG_DCB
/* Data Center Bridging netlink ops */
@@ -1519,6 +1522,8 @@ struct packet_type {
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
+ bool (*id_match)(struct packet_type *ptype,
+ struct sock *sk);
void *af_packet_priv;
struct list_head list;
};
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index 0dfc8b7210a3..89f2a627f3f0 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -164,7 +164,7 @@ extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr
unsigned int dataoff, unsigned int datalen,
const char *name,
unsigned int *matchoff, unsigned int *matchlen,
- union nf_inet_addr *addr);
+ union nf_inet_addr *addr, bool delim);
extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
unsigned int off, unsigned int datalen,
const char *name,
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 28f5389c924b..66d5379c305e 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -23,6 +23,7 @@ struct netpoll {
u8 remote_mac[ETH_ALEN];
struct list_head rx; /* rx_np list element */
+ struct rcu_head rcu;
};
struct netpoll_info {
@@ -38,28 +39,40 @@ struct netpoll_info {
struct delayed_work tx_work;
struct netpoll *netpoll;
+ struct rcu_head rcu;
};
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
int netpoll_parse_options(struct netpoll *np, char *opt);
-int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
+int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
int netpoll_setup(struct netpoll *np);
int netpoll_trap(void);
void netpoll_set_trap(int trap);
void __netpoll_cleanup(struct netpoll *np);
+void __netpoll_free_rcu(struct netpoll *np);
void netpoll_cleanup(struct netpoll *np);
-int __netpoll_rx(struct sk_buff *skb);
+int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
struct net_device *dev);
static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
+ unsigned long flags;
+ local_irq_save(flags);
netpoll_send_skb_on_dev(np, skb, np->dev);
+ local_irq_restore(flags);
}
#ifdef CONFIG_NETPOLL
+static inline bool netpoll_rx_on(struct sk_buff *skb)
+{
+ struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
+
+ return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
+}
+
static inline bool netpoll_rx(struct sk_buff *skb)
{
struct netpoll_info *npinfo;
@@ -67,14 +80,14 @@ static inline bool netpoll_rx(struct sk_buff *skb)
bool ret = false;
local_irq_save(flags);
- npinfo = rcu_dereference_bh(skb->dev->npinfo);
- if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
+ if (!netpoll_rx_on(skb))
goto out;
+ npinfo = rcu_dereference_bh(skb->dev->npinfo);
spin_lock(&npinfo->rx_lock);
/* check rx_flags again with the lock held */
- if (npinfo->rx_flags && __netpoll_rx(skb))
+ if (npinfo->rx_flags && __netpoll_rx(skb, npinfo))
ret = true;
spin_unlock(&npinfo->rx_lock);
@@ -83,13 +96,6 @@ out:
return ret;
}
-static inline int netpoll_rx_on(struct sk_buff *skb)
-{
- struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
-
- return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
-}
-
static inline int netpoll_receive_skb(struct sk_buff *skb)
{
if (!list_empty(&skb->dev->napi_list))
@@ -119,7 +125,7 @@ static inline void netpoll_poll_unlock(void *have)
}
}
-static inline int netpoll_tx_running(struct net_device *dev)
+static inline bool netpoll_tx_running(struct net_device *dev)
{
return irqs_disabled();
}
@@ -127,11 +133,11 @@ static inline int netpoll_tx_running(struct net_device *dev)
#else
static inline bool netpoll_rx(struct sk_buff *skb)
{
- return 0;
+ return false;
}
-static inline int netpoll_rx_on(struct sk_buff *skb)
+static inline bool netpoll_rx_on(struct sk_buff *skb)
{
- return 0;
+ return false;
}
static inline int netpoll_receive_skb(struct sk_buff *skb)
{
@@ -147,9 +153,9 @@ static inline void netpoll_poll_unlock(void *have)
static inline void netpoll_netdev_init(struct net_device *dev)
{
}
-static inline int netpoll_tx_running(struct net_device *dev)
+static inline bool netpoll_tx_running(struct net_device *dev)
{
- return 0;
+ return false;
}
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index b23cfc120edb..1f8fc7f9bcd8 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -191,7 +191,7 @@ struct nfs_inode {
struct hlist_head silly_list;
wait_queue_head_t waitqueue;
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl;
/* NFSv4 state */
struct list_head open_states;
@@ -427,12 +427,8 @@ extern __be32 root_nfs_parse_addr(char *name); /*__init*/
/*
* linux/fs/nfs/file.c
*/
-extern const struct inode_operations nfs_file_inode_operations;
-#ifdef CONFIG_NFS_V3
-extern const struct inode_operations nfs3_file_inode_operations;
-#endif /* CONFIG_NFS_V3 */
extern const struct file_operations nfs_file_operations;
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
extern const struct file_operations nfs4_file_operations;
#endif /* CONFIG_NFS_V4 */
extern const struct address_space_operations nfs_file_aops;
@@ -477,18 +473,14 @@ extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t,
unsigned long);
extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
const struct iovec *iov, unsigned long nr_segs,
- loff_t pos);
+ loff_t pos, bool uio);
extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
const struct iovec *iov, unsigned long nr_segs,
- loff_t pos);
+ loff_t pos, bool uio);
/*
* linux/fs/nfs/dir.c
*/
-extern const struct inode_operations nfs_dir_inode_operations;
-#ifdef CONFIG_NFS_V3
-extern const struct inode_operations nfs3_dir_inode_operations;
-#endif /* CONFIG_NFS_V3 */
extern const struct file_operations nfs_dir_operations;
extern const struct dentry_operations nfs_dentry_operations;
@@ -546,7 +538,7 @@ extern void nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page* page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_commit_data *data);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index f58325a1d8fb..310c63c8ab2c 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -48,11 +48,12 @@ struct nfs_client {
struct rpc_clnt * cl_rpcclient;
const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */
int cl_proto; /* Network transport protocol */
+ struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */
u32 cl_minorversion;/* NFSv4 minorversion */
struct rpc_cred *cl_machine_cred;
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
u64 cl_clientid; /* constant */
nfs4_verifier cl_confirm; /* Clientid verifier */
unsigned long cl_state;
@@ -69,10 +70,9 @@ struct nfs_client {
struct idmap * cl_idmap;
/* Our own IP address, as a null-terminated string.
- * This is used to generate the clientid, and the callback address.
+ * This is used to generate the mv0 callback address.
*/
char cl_ipaddr[48];
- unsigned char cl_id_uniquifier;
u32 cl_cb_ident; /* v4.0 callback identifier */
const struct nfs4_minor_version_ops *cl_mvops;
@@ -138,7 +138,7 @@ struct nfs_server {
#endif
u32 pnfs_blksize; /* layout_blksize attr */
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
u32 attr_bitmask[3];/* V4 bitmask representing the set
of attributes supported on this
filesystem */
@@ -201,7 +201,7 @@ struct nfs_server {
#define NFS4_MAX_SLOT_TABLE (256U)
#define NFS4_NO_SLOT ((u32)-1)
-#if defined(CONFIG_NFS_V4)
+#if IS_ENABLED(CONFIG_NFS_V4)
/* Sessions */
#define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long))
diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h
index 7eed2012d288..ece91c57ad79 100644
--- a/include/linux/nfs_idmap.h
+++ b/include/linux/nfs_idmap.h
@@ -69,7 +69,7 @@ struct nfs_server;
struct nfs_fattr;
struct nfs4_string;
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
int nfs_idmap_init(void);
void nfs_idmap_quit(void);
#else
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 88d166b555e8..92ce5783b707 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -42,7 +42,7 @@ struct nfs_page {
wb_bytes; /* Length of request */
struct kref wb_kref; /* reference count */
unsigned long wb_flags;
- struct nfs_writeverf wb_verf; /* Commit cookie */
+ struct nfs_write_verifier wb_verf; /* Commit cookie */
};
struct nfs_pageio_descriptor;
@@ -69,6 +69,7 @@ struct nfs_pageio_descriptor {
const struct nfs_pgio_completion_ops *pg_completion_ops;
struct pnfs_layout_segment *pg_lseg;
struct nfs_direct_req *pg_dreq;
+ void *pg_layout_private;
};
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index d3b7c18b18f4..ac7c8ae254f2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -514,9 +514,13 @@ struct nfs_writeargs {
struct nfs4_sequence_args seq_args;
};
+struct nfs_write_verifier {
+ char data[8];
+};
+
struct nfs_writeverf {
+ struct nfs_write_verifier verifier;
enum nfs3_stable_how committed;
- __be32 verifier[2];
};
struct nfs_writeres {
@@ -820,7 +824,7 @@ struct nfs3_getaclres {
struct posix_acl * acl_default;
};
-#ifdef CONFIG_NFS_V4
+#if IS_ENABLED(CONFIG_NFS_V4)
typedef u64 clientid4;
@@ -1244,6 +1248,7 @@ struct nfs_pgio_header {
void (*release) (struct nfs_pgio_header *hdr);
const struct nfs_pgio_completion_ops *completion_ops;
struct nfs_direct_req *dreq;
+ void *layout_private;
spinlock_t lock;
/* fields protected by lock */
int pnfs_error;
@@ -1349,6 +1354,10 @@ struct nfs_renamedata {
struct nfs_access_entry;
struct nfs_client;
struct rpc_timeout;
+struct nfs_subversion;
+struct nfs_mount_info;
+struct nfs_client_initdata;
+struct nfs_pageio_descriptor;
/*
* RPC procedure vector for NFSv2/NFSv3 demuxing
@@ -1364,6 +1373,8 @@ struct nfs_rpc_ops {
struct nfs_fsinfo *);
struct vfsmount *(*submount) (struct nfs_server *, struct dentry *,
struct nfs_fh *, struct nfs_fattr *);
+ struct dentry *(*try_mount) (int, const char *, struct nfs_mount_info *,
+ struct nfs_subversion *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
@@ -1402,9 +1413,13 @@ struct nfs_rpc_ops {
int (*set_capabilities)(struct nfs_server *, struct nfs_fh *);
int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int);
void (*read_setup) (struct nfs_read_data *, struct rpc_message *);
+ void (*read_pageio_init)(struct nfs_pageio_descriptor *, struct inode *,
+ const struct nfs_pgio_completion_ops *);
void (*read_rpc_prepare)(struct rpc_task *, struct nfs_read_data *);
int (*read_done) (struct rpc_task *, struct nfs_read_data *);
void (*write_setup) (struct nfs_write_data *, struct rpc_message *);
+ void (*write_pageio_init)(struct nfs_pageio_descriptor *, struct inode *, int,
+ const struct nfs_pgio_completion_ops *);
void (*write_rpc_prepare)(struct rpc_task *, struct nfs_write_data *);
int (*write_done) (struct rpc_task *, struct nfs_write_data *);
void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *);
@@ -1418,9 +1433,16 @@ struct nfs_rpc_ops {
struct nfs_open_context *ctx,
int open_flags,
struct iattr *iattr);
+ int (*have_delegation)(struct inode *, fmode_t);
+ int (*return_delegation)(struct inode *);
+ struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
struct nfs_client *
(*init_client) (struct nfs_client *, const struct rpc_timeout *,
const char *, rpc_authflavor_t);
+ void (*free_client) (struct nfs_client *);
+ struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *);
+ struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
+ struct nfs_fattr *, rpc_authflavor_t);
};
/*
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index ce4743a26015..fa63048fecff 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -143,6 +143,7 @@ typedef struct svc_fh {
int fh_maxsize; /* max size for fh_handle */
unsigned char fh_locked; /* inode locked by us */
+ unsigned char fh_want_write; /* remount protection taken */
#ifdef CONFIG_NFSD_V3
unsigned char fh_post_saved; /* post-op attrs saved */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index 89bd4a4dcfb4..98755767c7b0 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -293,7 +293,7 @@ struct nilfs_dir_entry {
__le64 inode; /* Inode number */
__le16 rec_len; /* Directory entry length */
__u8 name_len; /* Name length */
- __u8 file_type;
+ __u8 file_type; /* Dir entry type (file, dir, etc) */
char name[NILFS_NAME_LEN]; /* File name */
char pad;
};
@@ -395,7 +395,7 @@ union nilfs_binfo {
};
/**
- * struct nilfs_segment_summary - segment summary
+ * struct nilfs_segment_summary - segment summary header
* @ss_datasum: checksum of data
* @ss_sumsum: checksum of segment summary
* @ss_magic: magic number
@@ -683,9 +683,9 @@ struct nilfs_sufile_header {
/**
* nilfs_suinfo - segment usage information
- * @sui_lastmod:
- * @sui_nblocks:
- * @sui_flags:
+ * @sui_lastmod: timestamp of last modification
+ * @sui_nblocks: number of written blocks in segment
+ * @sui_flags: segment usage flags
*/
struct nilfs_suinfo {
__u64 sui_lastmod;
@@ -716,9 +716,10 @@ enum {
};
/**
- * struct nilfs_cpmode -
- * @cc_cno:
- * @cc_mode:
+ * struct nilfs_cpmode - change checkpoint mode structure
+ * @cm_cno: checkpoint number
+ * @cm_mode: mode of checkpoint
+ * @cm_pad: padding
*/
struct nilfs_cpmode {
__u64 cm_cno;
@@ -728,11 +729,11 @@ struct nilfs_cpmode {
/**
* struct nilfs_argv - argument vector
- * @v_base:
- * @v_nmembs:
- * @v_size:
- * @v_flags:
- * @v_index:
+ * @v_base: pointer on data array from userspace
+ * @v_nmembs: number of members in data array
+ * @v_size: size of data array in bytes
+ * @v_flags: flags
+ * @v_index: start number of target data items
*/
struct nilfs_argv {
__u64 v_base;
@@ -743,9 +744,9 @@ struct nilfs_argv {
};
/**
- * struct nilfs_period -
- * @p_start:
- * @p_end:
+ * struct nilfs_period - period of checkpoint numbers
+ * @p_start: start checkpoint number (inclusive)
+ * @p_end: end checkpoint number (exclusive)
*/
struct nilfs_period {
__u64 p_start;
@@ -753,7 +754,7 @@ struct nilfs_period {
};
/**
- * struct nilfs_cpstat -
+ * struct nilfs_cpstat - checkpoint statistics
* @cs_cno: checkpoint number
* @cs_ncps: number of checkpoints
* @cs_nsss: number of snapshots
@@ -765,7 +766,7 @@ struct nilfs_cpstat {
};
/**
- * struct nilfs_sustat -
+ * struct nilfs_sustat - segment usage statistics
* @ss_nsegs: number of segments
* @ss_ncleansegs: number of clean segments
* @ss_ndirtysegs: number of dirty segments
@@ -784,10 +785,10 @@ struct nilfs_sustat {
/**
* struct nilfs_vinfo - virtual block number information
- * @vi_vblocknr:
- * @vi_start:
- * @vi_end:
- * @vi_blocknr:
+ * @vi_vblocknr: virtual block number
+ * @vi_start: start checkpoint number (inclusive)
+ * @vi_end: end checkpoint number (exclusive)
+ * @vi_blocknr: disk block number
*/
struct nilfs_vinfo {
__u64 vi_vblocknr;
@@ -797,7 +798,15 @@ struct nilfs_vinfo {
};
/**
- * struct nilfs_vdesc -
+ * struct nilfs_vdesc - descriptor of virtual block number
+ * @vd_ino: inode number
+ * @vd_cno: checkpoint number
+ * @vd_vblocknr: virtual block number
+ * @vd_period: period of checkpoint numbers
+ * @vd_blocknr: disk block number
+ * @vd_offset: logical block offset inside a file
+ * @vd_flags: flags (data or node block)
+ * @vd_pad: padding
*/
struct nilfs_vdesc {
__u64 vd_ino;
@@ -811,7 +820,13 @@ struct nilfs_vdesc {
};
/**
- * struct nilfs_bdesc -
+ * struct nilfs_bdesc - descriptor of disk block number
+ * @bd_ino: inode number
+ * @bd_oblocknr: disk block address (for skipping dead blocks)
+ * @bd_blocknr: disk block address
+ * @bd_offset: logical block offset inside a file
+ * @bd_level: level in the b-tree organization
+ * @bd_pad: padding
*/
struct nilfs_bdesc {
__u64 bd_ino;
diff --git a/include/linux/of.h b/include/linux/of.h
index 0e9cf9eec085..1b1163225f3b 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -21,6 +21,7 @@
#include <linux/kref.h>
#include <linux/mod_devicetable.h>
#include <linux/spinlock.h>
+#include <linux/topology.h>
#include <asm/byteorder.h>
#include <asm/errno.h>
@@ -158,11 +159,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#define OF_BAD_ADDR ((u64)-1)
-#ifndef of_node_to_nid
-static inline int of_node_to_nid(struct device_node *np) { return -1; }
-#define of_node_to_nid of_node_to_nid
-#endif
-
static inline const char* of_node_full_name(struct device_node *np)
{
return np ? np->full_name : "<no-node>";
@@ -194,10 +190,17 @@ extern struct device_node *of_get_parent(const struct device_node *node);
extern struct device_node *of_get_next_parent(struct device_node *node);
extern struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev);
+extern struct device_node *of_get_next_available_child(
+ const struct device_node *node, struct device_node *prev);
+
#define for_each_child_of_node(parent, child) \
for (child = of_get_next_child(parent, NULL); child != NULL; \
child = of_get_next_child(parent, child))
+#define for_each_available_child_of_node(parent, child) \
+ for (child = of_get_next_available_child(parent, NULL); child != NULL; \
+ child = of_get_next_available_child(parent, child))
+
static inline int of_get_child_count(const struct device_node *np)
{
struct device_node *child;
@@ -386,6 +389,13 @@ static inline int of_property_read_u64(const struct device_node *np,
return -ENOSYS;
}
+static inline int of_property_match_string(struct device_node *np,
+ const char *propname,
+ const char *string)
+{
+ return -ENOSYS;
+}
+
static inline struct device_node *of_parse_phandle(struct device_node *np,
const char *phandle_name,
int index)
@@ -393,6 +403,15 @@ static inline struct device_node *of_parse_phandle(struct device_node *np,
return NULL;
}
+static inline int of_parse_phandle_with_args(struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return -ENOSYS;
+}
+
static inline int of_alias_get_id(struct device_node *np, const char *stem)
{
return -ENOSYS;
@@ -411,6 +430,15 @@ static inline int of_machine_is_compatible(const char *compat)
while (0)
#endif /* CONFIG_OF */
+#ifndef of_node_to_nid
+static inline int of_node_to_nid(struct device_node *np)
+{
+ return numa_node_id();
+}
+
+#define of_node_to_nid of_node_to_nid
+#endif
+
/**
* of_property_read_bool - Findfrom a property
* @np: device node from which the property value is to be read.
diff --git a/include/linux/olpc-ec.h b/include/linux/olpc-ec.h
new file mode 100644
index 000000000000..5bb6e760aa61
--- /dev/null
+++ b/include/linux/olpc-ec.h
@@ -0,0 +1,41 @@
+#ifndef _LINUX_OLPC_EC_H
+#define _LINUX_OLPC_EC_H
+
+/* XO-1 EC commands */
+#define EC_FIRMWARE_REV 0x08
+#define EC_WRITE_SCI_MASK 0x1b
+#define EC_WAKE_UP_WLAN 0x24
+#define EC_WLAN_LEAVE_RESET 0x25
+#define EC_READ_EB_MODE 0x2a
+#define EC_SET_SCI_INHIBIT 0x32
+#define EC_SET_SCI_INHIBIT_RELEASE 0x34
+#define EC_WLAN_ENTER_RESET 0x35
+#define EC_WRITE_EXT_SCI_MASK 0x38
+#define EC_SCI_QUERY 0x84
+#define EC_EXT_SCI_QUERY 0x85
+
+struct platform_device;
+
+struct olpc_ec_driver {
+ int (*probe)(struct platform_device *);
+ int (*suspend)(struct platform_device *);
+ int (*resume)(struct platform_device *);
+
+ int (*ec_cmd)(u8, u8 *, size_t, u8 *, size_t, void *);
+};
+
+#ifdef CONFIG_OLPC
+
+extern void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg);
+
+extern int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf,
+ size_t outlen);
+
+#else
+
+static inline int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf,
+ size_t outlen) { return -ENODEV; }
+
+#endif /* CONFIG_OLPC */
+
+#endif /* _LINUX_OLPC_EC_H */
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
new file mode 100644
index 000000000000..eb475a8ea25b
--- /dev/null
+++ b/include/linux/omap-dma.h
@@ -0,0 +1,22 @@
+/*
+ * OMAP DMA Engine support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_OMAP_DMA_H
+#define __LINUX_OMAP_DMA_H
+
+struct dma_chan;
+
+#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE)
+bool omap_dma_filter_fn(struct dma_chan *, void *);
+#else
+static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/include/linux/oom.h b/include/linux/oom.h
index e4c29bc72e70..49a3031fda50 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -40,15 +40,36 @@ enum oom_constraint {
CONSTRAINT_MEMCG,
};
+enum oom_scan_t {
+ OOM_SCAN_OK, /* scan thread and find its badness */
+ OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */
+ OOM_SCAN_ABORT, /* abort the iteration and return */
+ OOM_SCAN_SELECT, /* always select this thread first */
+};
+
extern void compare_swap_oom_score_adj(int old_val, int new_val);
extern int test_set_oom_score_adj(int new_val);
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
+extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ unsigned int points, unsigned long totalpages,
+ struct mem_cgroup *memcg, nodemask_t *nodemask,
+ const char *message);
+
extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
+extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
+ int order, const nodemask_t *nodemask);
+
+extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
+ unsigned long totalpages, const nodemask_t *nodemask,
+ bool force_kill);
+extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ int order);
+
extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *mask, bool force_kill);
extern int register_oom_notifier(struct notifier_block *nb);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c88d2a9451af..b5d13841604e 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/bug.h>
+#include <linux/mmdebug.h>
#ifndef __GENERATING_BOUNDS_H
#include <linux/mm_types.h>
#include <generated/bounds.h>
@@ -453,6 +454,34 @@ static inline int PageTransTail(struct page *page)
}
#endif
+/*
+ * If network-based swap is enabled, sl*b must keep track of whether pages
+ * were allocated from pfmemalloc reserves.
+ */
+static inline int PageSlabPfmemalloc(struct page *page)
+{
+ VM_BUG_ON(!PageSlab(page));
+ return PageActive(page);
+}
+
+static inline void SetPageSlabPfmemalloc(struct page *page)
+{
+ VM_BUG_ON(!PageSlab(page));
+ SetPageActive(page);
+}
+
+static inline void __ClearPageSlabPfmemalloc(struct page *page)
+{
+ VM_BUG_ON(!PageSlab(page));
+ __ClearPageActive(page);
+}
+
+static inline void ClearPageSlabPfmemalloc(struct page *page)
+{
+ VM_BUG_ON(!PageSlab(page));
+ ClearPageActive(page);
+}
+
#ifdef CONFIG_MMU
#define __PG_MLOCKED (1 << PG_mlocked)
#else
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 3bdcab30ca41..105077aa7685 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -1,6 +1,11 @@
#ifndef __LINUX_PAGEISOLATION_H
#define __LINUX_PAGEISOLATION_H
+
+bool has_unmovable_pages(struct zone *zone, struct page *page, int count);
+void set_pageblock_migratetype(struct page *page, int migratetype);
+int move_freepages_block(struct zone *zone, struct page *page,
+ int migratetype);
/*
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
* If specified range includes migrate types other than MOVABLE or CMA,
@@ -10,7 +15,7 @@
* free all pages in the range. test_page_isolated() can be used for
* test it.
*/
-extern int
+int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype);
@@ -18,7 +23,7 @@ start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
* target range is [start_pfn, end_pfn)
*/
-extern int
+int
undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype);
@@ -30,8 +35,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
/*
* Internal functions. Changes pageblock's migrate type.
*/
-extern int set_migratetype_isolate(struct page *page);
-extern void unset_migratetype_isolate(struct page *page, unsigned migratetype);
+int set_migratetype_isolate(struct page *page);
+void unset_migratetype_isolate(struct page *page, unsigned migratetype);
#endif
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index a88cdba27809..777a524716db 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -12,7 +12,7 @@ enum {
#ifndef __GENERATING_BOUNDS_H
#include <generated/bounds.h>
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
#include <linux/bit_spinlock.h>
/*
@@ -82,7 +82,7 @@ static inline void unlock_page_cgroup(struct page_cgroup *pc)
bit_spin_unlock(PCG_LOCK, &pc->flags);
}
-#else /* CONFIG_CGROUP_MEM_RES_CTLR */
+#else /* CONFIG_MEMCG */
struct page_cgroup;
static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
@@ -102,11 +102,11 @@ static inline void __init page_cgroup_init_flatmem(void)
{
}
-#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
+#endif /* CONFIG_MEMCG */
#include <linux/swap.h>
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#ifdef CONFIG_MEMCG_SWAP
extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
unsigned short old, unsigned short new);
extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
@@ -138,7 +138,7 @@ static inline void swap_cgroup_swapoff(int type)
return;
}
-#endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
+#endif /* CONFIG_MEMCG_SWAP */
#endif /* !__GENERATING_BOUNDS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7cfad3bbb0cc..e42c762f0dc7 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -286,6 +286,11 @@ static inline loff_t page_offset(struct page *page)
return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
}
+static inline loff_t page_file_offset(struct page *page)
+{
+ return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
+}
+
extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
unsigned long address);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index fc3526077348..6b4565c440c8 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2149,7 +2149,7 @@
#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
#define PCI_DEVICE_ID_NX2_5706S 0x16aa
-#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab
+#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
#define PCI_DEVICE_ID_NX2_5708S 0x16ac
#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 53274bff5773..7fb75b143755 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -543,6 +543,11 @@
#define PCI_EXP_OBFF_MSGB_EN 0x4000 /* OBFF enable with Message type B */
#define PCI_EXP_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints end here */
+#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
+#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x01 /* Current Link Speed 2.5GT/s */
+#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x02 /* Current Link Speed 5.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x04 /* Current Link Speed 8.0GT/s */
+#define PCI_EXP_LNKCAP2_CROSSLINK 0x100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 76c5c8b724a7..7602ccb3f40e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1272,7 +1272,8 @@ static inline bool perf_paranoid_kernel(void)
extern void perf_event_init(void);
extern void perf_tp_event(u64 addr, u64 count, void *record,
int entry_size, struct pt_regs *regs,
- struct hlist_head *head, int rctx);
+ struct hlist_head *head, int rctx,
+ struct task_struct *task);
extern void perf_bp_event(struct perf_event *event, void *data);
#ifndef perf_misc_flags
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 6dd96fb45482..e9b7f4350844 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -20,6 +20,7 @@
/* This struct is private to the core and should be regarded as a cookie */
struct pinctrl;
struct pinctrl_state;
+struct device;
#ifdef CONFIG_PINCTRL
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index e1ac1ce16fb0..ad1a427b5267 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -86,11 +86,9 @@ struct pipe_buf_operations {
* mapping or not. The atomic map is faster, however you can't take
* page faults before calling ->unmap() again. So if you need to eg
* access user data through copy_to/from_user(), then you must get
- * a non-atomic map. ->map() uses the KM_USER0 atomic slot for
- * atomic maps, so you can't map more than one pipe_buffer at once
- * and you have to be careful if mapping another page as source
- * or destination for a copy (IOW, it has to use something else
- * than KM_USER0).
+ * a non-atomic map. ->map() uses the kmap_atomic slot for
+ * atomic maps, you have to be careful if mapping another page as
+ * source or destination for a copy.
*/
void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
@@ -162,4 +160,6 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
struct pipe_inode_info *get_pipe_info(struct file *file);
+int create_pipe_files(struct file **, int);
+
#endif
diff --git a/arch/arm/plat-nomadik/include/plat/i2c.h b/include/linux/platform_data/i2c-nomadik.h
index 8ba70ffc31ec..c2303c3e4803 100644
--- a/arch/arm/plat-nomadik/include/plat/i2c.h
+++ b/include/linux/platform_data/i2c-nomadik.h
@@ -5,8 +5,8 @@
* it under the terms of the GNU General Public License version 2, as
* published by the Free Software Foundation.
*/
-#ifndef __PLAT_I2C_H
-#define __PLAT_I2C_H
+#ifndef __PDATA_I2C_NOMADIK_H
+#define __PDATA_I2C_NOMADIK_H
enum i2c_freq_mode {
I2C_FREQ_MODE_STANDARD, /* up to 100 Kb/s */
@@ -36,4 +36,4 @@ struct nmk_i2c_controller {
enum i2c_freq_mode sm;
};
-#endif /* __PLAT_I2C_H */
+#endif /* __PDATA_I2C_NOMADIK_H */
diff --git a/include/linux/platform_data/leds-lm3556.h b/include/linux/platform_data/leds-lm3556.h
new file mode 100644
index 000000000000..4b4e7d6b0527
--- /dev/null
+++ b/include/linux/platform_data/leds-lm3556.h
@@ -0,0 +1,50 @@
+/*
+ * Simple driver for Texas Instruments LM3556 LED Flash driver chip (Rev0x03)
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_LM3556_H
+#define __LINUX_LM3556_H
+
+#define LM3556_NAME "leds-lm3556"
+
+enum lm3556_pin_polarity {
+ PIN_LOW_ACTIVE = 0,
+ PIN_HIGH_ACTIVE,
+};
+
+enum lm3556_pin_enable {
+ PIN_DISABLED = 0,
+ PIN_ENABLED,
+};
+
+enum lm3556_strobe_usuage {
+ STROBE_EDGE_DETECT = 0,
+ STROBE_LEVEL_DETECT,
+};
+
+enum lm3556_indic_mode {
+ INDIC_MODE_INTERNAL = 0,
+ INDIC_MODE_EXTERNAL,
+};
+
+struct lm3556_platform_data {
+ enum lm3556_pin_enable torch_pin_en;
+ enum lm3556_pin_polarity torch_pin_polarity;
+
+ enum lm3556_strobe_usuage strobe_usuage;
+ enum lm3556_pin_enable strobe_pin_en;
+ enum lm3556_pin_polarity strobe_pin_polarity;
+
+ enum lm3556_pin_enable tx_pin_en;
+ enum lm3556_pin_polarity tx_pin_polarity;
+
+ enum lm3556_indic_mode indicator_mode;
+};
+
+#endif /* __LINUX_LM3556_H */
diff --git a/include/linux/lp855x.h b/include/linux/platform_data/lp855x.h
index 781a490a451b..cc76f1f18f18 100644
--- a/include/linux/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -47,12 +47,6 @@
(LP8556_I2C_ONLY << BRT_MODE_SHFT))
#define LP8556_COMB2_CONFIG (LP8556_COMBINED2 << BRT_MODE_SHFT)
-/* ROM area boundary */
-#define EEPROM_START (0xA0)
-#define EEPROM_END (0xA7)
-#define EPROM_START (0xA0)
-#define EPROM_END (0xAF)
-
enum lp855x_chip_id {
LP8550,
LP8551,
diff --git a/include/linux/lp8727.h b/include/linux/platform_data/lp8727.h
index ea98c6133d32..ea98c6133d32 100644
--- a/include/linux/lp8727.h
+++ b/include/linux/platform_data/lp8727.h
diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h
index d94804aca764..944b01dd103e 100644
--- a/include/linux/platform_data/mv_usb.h
+++ b/include/linux/platform_data/mv_usb.h
@@ -52,13 +52,4 @@ struct mv_usb_platform_data {
int (*set_vbus)(unsigned int vbus);
int (*private_init)(void __iomem *opregs, void __iomem *phyregs);
};
-
-#ifndef CONFIG_HAVE_CLK
-/* Dummy stub for clk framework */
-#define clk_get(dev, id) NULL
-#define clk_put(clock) do {} while (0)
-#define clk_enable(clock) do {} while (0)
-#define clk_disable(clock) do {} while (0)
-#endif
-
#endif
diff --git a/include/linux/platform_data/spear_thermal.h b/include/linux/platform_data/spear_thermal.h
deleted file mode 100644
index 724f2e1cbbcb..000000000000
--- a/include/linux/platform_data/spear_thermal.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * SPEAr thermal driver platform data.
- *
- * Copyright (C) 2011-2012 ST Microelectronics
- * Author: Vincenzo Frascino <vincenzo.frascino@st.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-#ifndef SPEAR_THERMAL_H
-#define SPEAR_THERMAL_H
-
-/* SPEAr Thermal Sensor Platform Data */
-struct spear_thermal_pdata {
- /* flags used to enable thermal sensor */
- unsigned int thermal_flags;
-};
-
-#endif /* SPEAR_THERMAL_H */
diff --git a/include/linux/posix_types.h b/include/linux/posix_types.h
index f04c98cf44f3..988f76e636e3 100644
--- a/include/linux/posix_types.h
+++ b/include/linux/posix_types.h
@@ -15,26 +15,14 @@
*/
/*
- * Those macros may have been defined in <gnu/types.h>. But we always
- * use the ones here.
+ * This macro may have been defined in <gnu/types.h>. But we always
+ * use the one here.
*/
-#undef __NFDBITS
-#define __NFDBITS (8 * sizeof(unsigned long))
-
#undef __FD_SETSIZE
#define __FD_SETSIZE 1024
-#undef __FDSET_LONGS
-#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS)
-
-#undef __FDELT
-#define __FDELT(d) ((d) / __NFDBITS)
-
-#undef __FDMASK
-#define __FDMASK(d) (1UL << ((d) % __NFDBITS))
-
typedef struct {
- unsigned long fds_bits [__FDSET_LONGS];
+ unsigned long fds_bits[__FD_SETSIZE / (8 * sizeof(long))];
} __kernel_fd_set;
/* Type of a signal handler. */
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index 241065c9ce51..cd22029e32aa 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -16,6 +16,7 @@
#define _CHARGER_MANAGER_H
#include <linux/power_supply.h>
+#include <linux/extcon.h>
enum data_source {
CM_BATTERY_PRESENT,
@@ -65,6 +66,70 @@ struct charger_global_desc {
};
/**
+ * struct charger_cable
+ * @extcon_name: the name of extcon device.
+ * @name: the name of charger cable(external connector).
+ * @extcon_dev: the extcon device.
+ * @wq: the workqueue to control charger according to the state of
+ * charger cable. If charger cable is attached, enable charger.
+ * But if charger cable is detached, disable charger.
+ * @nb: the notifier block to receive changed state from EXTCON
+ * (External Connector) when charger cable is attached/detached.
+ * @attached: the state of charger cable.
+ * true: the charger cable is attached
+ * false: the charger cable is detached
+ * @charger: the instance of struct charger_regulator.
+ * @cm: the Charger Manager representing the battery.
+ */
+struct charger_cable {
+ const char *extcon_name;
+ const char *name;
+
+ /* The charger-manager use Exton framework*/
+ struct extcon_specific_cable_nb extcon_dev;
+ struct work_struct wq;
+ struct notifier_block nb;
+
+ /* The state of charger cable */
+ bool attached;
+
+ struct charger_regulator *charger;
+
+ /*
+ * Set min/max current of regulator to protect over-current issue
+ * according to a kind of charger cable when cable is attached.
+ */
+ int min_uA;
+ int max_uA;
+
+ struct charger_manager *cm;
+};
+
+/**
+ * struct charger_regulator
+ * @regulator_name: the name of regulator for using charger.
+ * @consumer: the regulator consumer for the charger.
+ * @cables:
+ * the array of charger cables to enable/disable charger
+ * and set current limit according to constratint data of
+ * struct charger_cable if only charger cable included
+ * in the array of charger cables is attached/detached.
+ * @num_cables: the number of charger cables.
+ */
+struct charger_regulator {
+ /* The name of regulator for charging */
+ const char *regulator_name;
+ struct regulator *consumer;
+
+ /*
+ * Store constraint information related to current limit,
+ * each cable have different condition for charging.
+ */
+ struct charger_cable *cables;
+ int num_cables;
+};
+
+/**
* struct charger_desc
* @psy_name: the name of power-supply-class for charger manager
* @polling_mode:
@@ -109,7 +174,7 @@ struct charger_desc {
char **psy_charger_stat;
int num_charger_regulators;
- struct regulator_bulk_data *charger_regulators;
+ struct charger_regulator *charger_regulators;
char *psy_fuel_gauge;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 3b912bee28d1..0bafbb15f29c 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -109,6 +109,8 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_AVG,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN,
POWER_SUPPLY_PROP_ENERGY_FULL,
@@ -116,9 +118,15 @@ enum power_supply_property {
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_ENERGY_AVG,
POWER_SUPPLY_PROP_CAPACITY, /* in percents! */
+ POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */
+ POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
POWER_SUPPLY_PROP_TEMP_AMBIENT,
+ POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN,
+ POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
@@ -173,6 +181,9 @@ struct power_supply {
/* private */
struct device *dev;
struct work_struct changed_work;
+#ifdef CONFIG_THERMAL
+ struct thermal_zone_device *tzd;
+#endif
#ifdef CONFIG_LEDS_TRIGGERS
struct led_trigger *charging_full_trig;
@@ -236,6 +247,7 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_CHARGE_NOW:
case POWER_SUPPLY_PROP_CHARGE_AVG:
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CURRENT_MAX:
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_CURRENT_AVG:
@@ -263,6 +275,7 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
case POWER_SUPPLY_PROP_POWER_NOW:
return 1;
default:
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 1bec2f7a2d42..9afc01e5a0a6 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -2,27 +2,34 @@
#define __KERNEL_PRINTK__
#include <linux/init.h>
+#include <linux/kern_levels.h>
extern const char linux_banner[];
extern const char linux_proc_banner[];
-#define KERN_EMERG "<0>" /* system is unusable */
-#define KERN_ALERT "<1>" /* action must be taken immediately */
-#define KERN_CRIT "<2>" /* critical conditions */
-#define KERN_ERR "<3>" /* error conditions */
-#define KERN_WARNING "<4>" /* warning conditions */
-#define KERN_NOTICE "<5>" /* normal but significant condition */
-#define KERN_INFO "<6>" /* informational */
-#define KERN_DEBUG "<7>" /* debug-level messages */
-
-/* Use the default kernel loglevel */
-#define KERN_DEFAULT "<d>"
-/*
- * Annotation for a "continued" line of log printout (only done after a
- * line that had no enclosing \n). Only to be used by core/arch code
- * during early bootup (a continued line is not SMP-safe otherwise).
- */
-#define KERN_CONT "<c>"
+static inline int printk_get_level(const char *buffer)
+{
+ if (buffer[0] == KERN_SOH_ASCII && buffer[1]) {
+ switch (buffer[1]) {
+ case '0' ... '7':
+ case 'd': /* KERN_DEFAULT */
+ return buffer[1];
+ }
+ }
+ return 0;
+}
+
+static inline const char *printk_skip_level(const char *buffer)
+{
+ if (printk_get_level(buffer)) {
+ switch (buffer[1]) {
+ case '0' ... '7':
+ case 'd': /* KERN_DEFAULT */
+ return buffer + 2;
+ }
+ }
+ return buffer;
+}
extern int console_printk[];
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 7c775751392c..21d076c5089e 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -1,7 +1,10 @@
#ifndef __LINUX_PWM_H
#define __LINUX_PWM_H
+#include <linux/of.h>
+
struct pwm_device;
+struct seq_file;
/*
* pwm_request - request a PWM device
@@ -28,4 +31,118 @@ int pwm_enable(struct pwm_device *pwm);
*/
void pwm_disable(struct pwm_device *pwm);
+#ifdef CONFIG_PWM
+struct pwm_chip;
+
+enum {
+ PWMF_REQUESTED = 1 << 0,
+ PWMF_ENABLED = 1 << 1,
+};
+
+struct pwm_device {
+ const char *label;
+ unsigned long flags;
+ unsigned int hwpwm;
+ unsigned int pwm;
+ struct pwm_chip *chip;
+ void *chip_data;
+
+ unsigned int period; /* in nanoseconds */
+};
+
+static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
+{
+ if (pwm)
+ pwm->period = period;
+}
+
+static inline unsigned int pwm_get_period(struct pwm_device *pwm)
+{
+ return pwm ? pwm->period : 0;
+}
+
+/**
+ * struct pwm_ops - PWM controller operations
+ * @request: optional hook for requesting a PWM
+ * @free: optional hook for freeing a PWM
+ * @config: configure duty cycles and period length for this PWM
+ * @enable: enable PWM output toggling
+ * @disable: disable PWM output toggling
+ * @dbg_show: optional routine to show contents in debugfs
+ * @owner: helps prevent removal of modules exporting active PWMs
+ */
+struct pwm_ops {
+ int (*request)(struct pwm_chip *chip,
+ struct pwm_device *pwm);
+ void (*free)(struct pwm_chip *chip,
+ struct pwm_device *pwm);
+ int (*config)(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ int duty_ns, int period_ns);
+ int (*enable)(struct pwm_chip *chip,
+ struct pwm_device *pwm);
+ void (*disable)(struct pwm_chip *chip,
+ struct pwm_device *pwm);
+#ifdef CONFIG_DEBUG_FS
+ void (*dbg_show)(struct pwm_chip *chip,
+ struct seq_file *s);
+#endif
+ struct module *owner;
+};
+
+/**
+ * struct pwm_chip - abstract a PWM controller
+ * @dev: device providing the PWMs
+ * @list: list node for internal use
+ * @ops: callbacks for this PWM controller
+ * @base: number of first PWM controlled by this chip
+ * @npwm: number of PWMs controlled by this chip
+ * @pwms: array of PWM devices allocated by the framework
+ */
+struct pwm_chip {
+ struct device *dev;
+ struct list_head list;
+ const struct pwm_ops *ops;
+ int base;
+ unsigned int npwm;
+
+ struct pwm_device *pwms;
+
+ struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ const struct of_phandle_args *args);
+ unsigned int of_pwm_n_cells;
+};
+
+int pwm_set_chip_data(struct pwm_device *pwm, void *data);
+void *pwm_get_chip_data(struct pwm_device *pwm);
+
+int pwmchip_add(struct pwm_chip *chip);
+int pwmchip_remove(struct pwm_chip *chip);
+struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ unsigned int index,
+ const char *label);
+
+struct pwm_device *pwm_get(struct device *dev, const char *consumer);
+void pwm_put(struct pwm_device *pwm);
+
+struct pwm_lookup {
+ struct list_head list;
+ const char *provider;
+ unsigned int index;
+ const char *dev_id;
+ const char *con_id;
+};
+
+#define PWM_LOOKUP(_provider, _index, _dev_id, _con_id) \
+ { \
+ .provider = _provider, \
+ .index = _index, \
+ .dev_id = _dev_id, \
+ .con_id = _con_id, \
+ }
+
+void pwm_add_table(struct pwm_lookup *table, size_t num);
+
+#endif
+
#endif /* __LINUX_PWM_H */
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 63d2df43e61a..56f4a866539a 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -12,6 +12,7 @@ struct platform_pwm_backlight_data {
unsigned int dft_brightness;
unsigned int lth_brightness;
unsigned int pwm_period_ns;
+ unsigned int *levels;
int (*init)(struct device *dev);
int (*notify)(struct device *dev, int brightness);
void (*notify_after)(struct device *dev, int brightness);
diff --git a/include/linux/random.h b/include/linux/random.h
index 8f74538c96db..ac621ce886ca 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -48,13 +48,13 @@ struct rnd_state {
#ifdef __KERNEL__
-extern void rand_initialize_irq(int irq);
-
+extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
-extern void add_interrupt_randomness(int irq);
+extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes);
+extern void get_random_bytes_arch(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
#ifndef MODULE
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index f1ffabb978d3..131b53957b9f 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -36,7 +36,6 @@
#define REMOTEPROC_H
#include <linux/types.h>
-#include <linux/kref.h>
#include <linux/klist.h>
#include <linux/mutex.h>
#include <linux/virtio.h>
@@ -369,8 +368,8 @@ enum rproc_state {
* @firmware: name of firmware file to be loaded
* @priv: private data which belongs to the platform-specific rproc module
* @ops: platform-specific start/stop rproc handlers
- * @dev: underlying device
- * @refcount: refcount of users that have a valid pointer to this rproc
+ * @dev: virtual device for refcounting and common remoteproc behavior
+ * @fw_ops: firmware-specific handlers
* @power: refcount of users who need this rproc powered up
* @state: state of the device
* @lock: lock which protects concurrent manipulations of the rproc
@@ -383,6 +382,7 @@ enum rproc_state {
* @bootaddr: address of first instruction to boot rproc with (optional)
* @rvdevs: list of remote virtio devices
* @notifyids: idr for dynamically assigning rproc-wide unique notify ids
+ * @index: index of this rproc device
*/
struct rproc {
struct klist_node node;
@@ -391,8 +391,8 @@ struct rproc {
const char *firmware;
void *priv;
const struct rproc_ops *ops;
- struct device *dev;
- struct kref refcount;
+ struct device dev;
+ const struct rproc_fw_ops *fw_ops;
atomic_t power;
unsigned int state;
struct mutex lock;
@@ -405,6 +405,7 @@ struct rproc {
u32 bootaddr;
struct list_head rvdevs;
struct idr notifyids;
+ int index;
};
/* we currently support only two vrings per rvdev */
@@ -450,15 +451,12 @@ struct rproc_vdev {
unsigned long gfeatures;
};
-struct rproc *rproc_get_by_name(const char *name);
-void rproc_put(struct rproc *rproc);
-
struct rproc *rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
const char *firmware, int len);
-void rproc_free(struct rproc *rproc);
-int rproc_register(struct rproc *rproc);
-int rproc_unregister(struct rproc *rproc);
+void rproc_put(struct rproc *rproc);
+int rproc_add(struct rproc *rproc);
+int rproc_del(struct rproc *rproc);
int rproc_boot(struct rproc *rproc);
void rproc_shutdown(struct rproc *rproc);
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index ac9586dadfa5..7b600da9a635 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -214,6 +214,10 @@ void sg_free_table(struct sg_table *);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
+int sg_alloc_table_from_pages(struct sg_table *sgt,
+ struct page **pages, unsigned int n_pages,
+ unsigned long offset, unsigned long size,
+ gfp_t gfp_mask);
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a721cef7e2d4..b8c86648a2f9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -406,6 +406,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);
+/* get/set_dumpable() values */
+#define SUID_DUMPABLE_DISABLED 0
+#define SUID_DUMPABLE_ENABLED 1
+#define SUID_DUMPABLE_SAFE 2
+
/* mm flags */
/* dumpable bits */
#define MMF_DUMPABLE 0 /* core dump is permitted */
@@ -1571,7 +1576,7 @@ struct task_struct {
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
+#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
struct memcg_batch_info {
int do_batch; /* incremented when batch uncharge started */
struct mem_cgroup *memcg; /* target memcg of uncharge */
@@ -1881,6 +1886,13 @@ static inline void rcu_copy_process(struct task_struct *p)
#endif
+static inline void tsk_restore_flags(struct task_struct *task,
+ unsigned long orig_flags, unsigned long flags)
+{
+ task->flags &= ~flags;
+ task->flags |= orig_flags & flags;
+}
+
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask);
diff --git a/include/linux/security.h b/include/linux/security.h
index 4e5a73cdbbef..3dea6a9d568f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1242,8 +1242,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Check that the @parent process has sufficient permission to trace the
* current process before allowing the current process to present itself
* to the @parent process for tracing.
- * The parent process will still have to undergo the ptrace_access_check
- * checks before it is allowed to trace this one.
* @parent contains the task_struct structure for debugger process.
* Return 0 if permission is granted.
* @capget:
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 65db9928e15f..0253c2022e53 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -47,7 +47,8 @@
#define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
#define PORT_TEGRA 20 /* NVIDIA Tegra internal UART */
#define PORT_XR17D15X 21 /* Exar XR17D15x UART */
-#define PORT_MAX_8250 21 /* max port ID */
+#define PORT_LPC3220 22 /* NXP LPC32xx SoC "Standard" UART */
+#define PORT_MAX_8250 22 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 93f9821554b6..a3728bf66f0e 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -50,6 +50,7 @@ struct shdma_desc {
struct list_head node;
struct dma_async_tx_descriptor async_tx;
enum dma_transfer_direction direction;
+ size_t partial;
dma_cookie_t cookie;
int chunks;
int mark;
@@ -98,6 +99,7 @@ struct shdma_ops {
void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
struct shdma_desc *(*embedded_desc)(void *, int);
bool (*chan_irq)(struct shdma_chan *, int);
+ size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *);
};
struct shdma_dev {
diff --git a/include/linux/shm.h b/include/linux/shm.h
index 92808b86703b..edd086883ccb 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -107,12 +107,14 @@ struct shmid_kernel /* private to the kernel */
#define SHM_NORESERVE 010000 /* don't check for reservations */
#ifdef CONFIG_SYSVIPC
-long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr);
+long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr,
+ unsigned long shmlba);
extern int is_file_shm_hugepages(struct file *file);
extern void exit_shm(struct task_struct *task);
#else
static inline long do_shmat(int shmid, char __user *shmaddr,
- int shmflg, unsigned long *addr)
+ int shmflg, unsigned long *addr,
+ unsigned long shmlba)
{
return -ENOSYS;
}
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 07ceb97d53fa..ac6b8ee07825 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -20,7 +20,6 @@ struct shrink_control {
* 'nr_to_scan' entries and attempt to free them up. It should return
* the number of objects which remain in the cache. If it returns -1, it means
* it cannot do any scanning at this time (eg. there is a risk of deadlock).
- * The callback must not return -1 if nr_to_scan is zero.
*
* The 'gfpmask' refers to the allocation we are currently trying to
* fulfil.
diff --git a/include/linux/sizes.h b/include/linux/sizes.h
new file mode 100644
index 000000000000..ce3e8150c174
--- /dev/null
+++ b/include/linux/sizes.h
@@ -0,0 +1,47 @@
+/*
+ * include/linux/sizes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_SIZES_H__
+#define __LINUX_SIZES_H__
+
+#define SZ_1 0x00000001
+#define SZ_2 0x00000002
+#define SZ_4 0x00000004
+#define SZ_8 0x00000008
+#define SZ_16 0x00000010
+#define SZ_32 0x00000020
+#define SZ_64 0x00000040
+#define SZ_128 0x00000080
+#define SZ_256 0x00000100
+#define SZ_512 0x00000200
+
+#define SZ_1K 0x00000400
+#define SZ_2K 0x00000800
+#define SZ_4K 0x00001000
+#define SZ_8K 0x00002000
+#define SZ_16K 0x00004000
+#define SZ_32K 0x00008000
+#define SZ_64K 0x00010000
+#define SZ_128K 0x00020000
+#define SZ_256K 0x00040000
+#define SZ_512K 0x00080000
+
+#define SZ_1M 0x00100000
+#define SZ_2M 0x00200000
+#define SZ_4M 0x00400000
+#define SZ_8M 0x00800000
+#define SZ_16M 0x01000000
+#define SZ_32M 0x02000000
+#define SZ_64M 0x04000000
+#define SZ_128M 0x08000000
+#define SZ_256M 0x10000000
+#define SZ_512M 0x20000000
+
+#define SZ_1G 0x40000000
+#define SZ_2G 0x80000000
+
+#endif /* __LINUX_SIZES_H__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d205c4be7f5b..7632c87da2c9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -462,6 +462,7 @@ struct sk_buff {
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
+ __u8 pfmemalloc:1;
__u8 ooo_okay:1;
__u8 l4_rxhash:1;
__u8 wifi_acked_valid:1;
@@ -502,6 +503,15 @@ struct sk_buff {
#include <linux/slab.h>
+#define SKB_ALLOC_FCLONE 0x01
+#define SKB_ALLOC_RX 0x02
+
+/* Returns true if the skb was allocated from PFMEMALLOC reserves */
+static inline bool skb_pfmemalloc(const struct sk_buff *skb)
+{
+ return unlikely(skb->pfmemalloc);
+}
+
/*
* skb might have a dst pointer attached, refcounted or not.
* _skb_refdst low order bit is set if refcount was _not_ taken
@@ -565,7 +575,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
bool *fragstolen, int *delta_truesize);
extern struct sk_buff *__alloc_skb(unsigned int size,
- gfp_t priority, int fclone, int node);
+ gfp_t priority, int flags, int node);
extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
@@ -576,7 +586,7 @@ static inline struct sk_buff *alloc_skb(unsigned int size,
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority)
{
- return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
+ return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
}
extern void skb_recycle(struct sk_buff *skb);
@@ -1237,6 +1247,17 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ /*
+ * Propagate page->pfmemalloc to the skb if we can. The problem is
+ * that not all callers have unique ownership of the page. If
+ * pfmemalloc is set, we check the mapping as a mapping implies
+ * page->index is set (index and pfmemalloc share space).
+ * If it's a valid mapping, we cannot use page->pfmemalloc but we
+ * do not lose pfmemalloc information as the pages would not be
+ * allocated using __GFP_MEMALLOC.
+ */
+ if (page->pfmemalloc && !page->mapping)
+ skb->pfmemalloc = true;
frag->page.p = page;
frag->page_offset = off;
skb_frag_size_set(frag, size);
@@ -1753,6 +1774,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}
+/*
+ * __skb_alloc_page - allocate pages for ps-rx on a skb and preserve pfmemalloc data
+ * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
+ * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
+ * @order: size of the allocation
+ *
+ * Allocate a new page.
+ *
+ * %NULL is returned if there is no free memory.
+*/
+static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
+ struct sk_buff *skb,
+ unsigned int order)
+{
+ struct page *page;
+
+ gfp_mask |= __GFP_COLD;
+
+ if (!(gfp_mask & __GFP_NOMEMALLOC))
+ gfp_mask |= __GFP_MEMALLOC;
+
+ page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
+ if (skb && page && page->pfmemalloc)
+ skb->pfmemalloc = true;
+
+ return page;
+}
+
+/**
+ * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data
+ * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
+ * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
+ *
+ * Allocate a new page.
+ *
+ * %NULL is returned if there is no free memory.
+ */
+static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
+ struct sk_buff *skb)
+{
+ return __skb_alloc_pages(gfp_mask, skb, 0);
+}
+
+/**
+ * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
+ * @page: The page that was allocated from skb_alloc_page
+ * @skb: The skb that may need pfmemalloc set
+ */
+static inline void skb_propagate_pfmemalloc(struct page *page,
+ struct sk_buff *skb)
+{
+ if (page && page->pfmemalloc)
+ skb->pfmemalloc = true;
+}
+
/**
* skb_frag_page - retrieve the page refered to by a paged fragment
* @frag: the paged fragment
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 67d5d94b783a..0dd2dfa7beca 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -93,6 +93,30 @@
(unsigned long)ZERO_SIZE_PTR)
/*
+ * Common fields provided in kmem_cache by all slab allocators
+ * This struct is either used directly by the allocator (SLOB)
+ * or the allocator must include definitions for all fields
+ * provided in kmem_cache_common in their definition of kmem_cache.
+ *
+ * Once we can do anonymous structs (C11 standard) we could put a
+ * anonymous struct definition in these allocators so that the
+ * separate allocations in the kmem_cache structure of SLAB and
+ * SLUB is no longer needed.
+ */
+#ifdef CONFIG_SLOB
+struct kmem_cache {
+ unsigned int object_size;/* The original size of the object */
+ unsigned int size; /* The aligned/padded/added on size */
+ unsigned int align; /* Alignment as calculated */
+ unsigned long flags; /* Active flags on the slab */
+ const char *name; /* Slab name for sysfs */
+ int refcount; /* Use counter */
+ void (*ctor)(void *); /* Called on object slot creation */
+ struct list_head list; /* List of all slab caches on the system */
+};
+#endif
+
+/*
* struct kmem_cache related prototypes
*/
void __init kmem_cache_init(void);
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index fbd1117fdfde..0c634fa376c9 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -27,7 +27,7 @@ struct kmem_cache {
unsigned int limit;
unsigned int shared;
- unsigned int buffer_size;
+ unsigned int size;
u32 reciprocal_buffer_size;
/* 2) touched by every alloc & free from the backend */
@@ -39,7 +39,7 @@ struct kmem_cache {
unsigned int gfporder;
/* force GFP flags, e.g. GFP_DMA */
- gfp_t gfpflags;
+ gfp_t allocflags;
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
@@ -52,7 +52,10 @@ struct kmem_cache {
/* 4) cache creation/removal */
const char *name;
- struct list_head next;
+ struct list_head list;
+ int refcount;
+ int object_size;
+ int align;
/* 5) statistics */
#ifdef CONFIG_DEBUG_SLAB
@@ -73,12 +76,11 @@ struct kmem_cache {
/*
* If debugging is enabled, then the allocator can add additional
- * fields and/or padding to every object. buffer_size contains the total
+ * fields and/or padding to every object. size contains the total
* object size including these internal fields, the following two
* variables contain the offset to the user object and its size.
*/
int obj_offset;
- int obj_size;
#endif /* CONFIG_DEBUG_SLAB */
/* 6) per-cpu/per-node data, touched during every alloc/free */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index c2f8c8bc56ed..df448adb7283 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -48,7 +48,6 @@ struct kmem_cache_cpu {
unsigned long tid; /* Globally unique transaction id */
struct page *page; /* The slab from which we are allocating */
struct page *partial; /* Partially allocated frozen slabs */
- int node; /* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
@@ -83,7 +82,7 @@ struct kmem_cache {
unsigned long flags;
unsigned long min_partial;
int size; /* The size of an object including meta data */
- int objsize; /* The size of an object without meta data */
+ int object_size; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
int cpu_partial; /* Number of per cpu partial objects to keep around */
struct kmem_cache_order_objects oo;
diff --git a/include/linux/string.h b/include/linux/string.h
index e033564f10ba..b9178812d9df 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -144,5 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
{
return strncmp(str, prefix, strlen(prefix)) == 0;
}
-#endif
+
+extern size_t memweight(const void *ptr, size_t bytes);
+
+#endif /* __KERNEL__ */
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 492a36d72829..f25ba922baaf 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -101,6 +101,7 @@ struct rpc_authops {
struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int);
int (*pipes_create)(struct rpc_auth *);
void (*pipes_destroy)(struct rpc_auth *);
+ int (*list_pseudoflavors)(rpc_authflavor_t *, int);
};
struct rpc_credops {
@@ -135,6 +136,7 @@ int rpcauth_register(const struct rpc_authops *);
int rpcauth_unregister(const struct rpc_authops *);
struct rpc_auth * rpcauth_create(rpc_authflavor_t, struct rpc_clnt *);
void rpcauth_release(struct rpc_auth *);
+int rpcauth_list_flavors(rpc_authflavor_t *, int);
struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int);
void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index f5fd6160dbca..f792794f6634 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -217,14 +217,32 @@ extern int qword_get(char **bpp, char *dest, int bufsize);
static inline int get_int(char **bpp, int *anint)
{
char buf[50];
- char *ep;
- int rv;
- int len = qword_get(bpp, buf, 50);
- if (len < 0) return -EINVAL;
- if (len ==0) return -ENOENT;
- rv = simple_strtol(buf, &ep, 0);
- if (*ep) return -EINVAL;
- *anint = rv;
+ int len = qword_get(bpp, buf, sizeof(buf));
+
+ if (len < 0)
+ return -EINVAL;
+ if (len == 0)
+ return -ENOENT;
+
+ if (kstrtoint(buf, 0, anint))
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int get_uint(char **bpp, unsigned int *anint)
+{
+ char buf[50];
+ int len = qword_get(bpp, buf, sizeof(buf));
+
+ if (len < 0)
+ return -EINVAL;
+ if (len == 0)
+ return -ENOENT;
+
+ if (kstrtouint(buf, 0, anint))
+ return -EINVAL;
+
return 0;
}
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 332da61cf8b7..a19e2547ae6a 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -14,6 +14,7 @@
#ifdef __KERNEL__
#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/msg_prot.h>
#include <linux/uio.h>
/* The mechanism-independent gss-api context: */
@@ -127,7 +128,7 @@ struct gss_api_mech *gss_mech_get_by_name(const char *);
struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32);
/* Fill in an array with a list of supported pseudoflavors */
-int gss_mech_list_pseudoflavors(u32 *);
+int gss_mech_list_pseudoflavors(rpc_authflavor_t *, int);
/* Just increments the mechanism's reference count and returns its input: */
struct gss_api_mech * gss_mech_get(struct gss_api_mech *);
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 40e0a273faea..d83db800fe02 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -278,6 +278,8 @@ struct svc_rqst {
struct task_struct *rq_task; /* service thread */
};
+#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
+
/*
* Rigorous type checking on sockaddr type conversions
*/
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index af70af333546..63988990bd36 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -104,8 +104,6 @@ __be32 *xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp,
__be32 *xdr_encode_netobj(__be32 *p, const struct xdr_netobj *);
__be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *);
-void xdr_encode_pages(struct xdr_buf *, struct page **, unsigned int,
- unsigned int);
void xdr_inline_pages(struct xdr_buf *, unsigned int,
struct page **, unsigned int, unsigned int);
void xdr_terminate_string(struct xdr_buf *, const u32);
@@ -205,6 +203,7 @@ struct xdr_stream {
struct kvec *iov; /* pointer to the current kvec */
struct kvec scratch; /* Scratch buffer */
struct page **page_ptr; /* pointer to the current page */
+ unsigned int nwords; /* Remaining decode buffer length */
};
/*
@@ -217,12 +216,13 @@ extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32
extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
unsigned int base, unsigned int len);
+extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr);
extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
struct page **pages, unsigned int len);
extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
-extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
+extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 77d278defa70..cff40aa7db62 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -174,6 +174,8 @@ struct rpc_xprt {
unsigned long state; /* transport state */
unsigned char shutdown : 1, /* being shut down */
resvport : 1; /* use a reserved port */
+ unsigned int swapper; /* we're swapping over this
+ transport */
unsigned int bind_index; /* bind function index */
/*
@@ -316,6 +318,7 @@ void xprt_release_rqst_cong(struct rpc_task *task);
void xprt_disconnect_done(struct rpc_xprt *xprt);
void xprt_force_disconnect(struct rpc_xprt *xprt);
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
+int xs_swapper(struct rpc_xprt *xprt, int enable);
/*
* Reserved bit positions in xprt->state
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c84ec68eaec9..388e70601413 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -151,6 +151,7 @@ enum {
SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
SWP_BLKDEV = (1 << 6), /* its a block device */
+ SWP_FILE = (1 << 7), /* set after swap_activate success */
/* add others here before... */
SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
};
@@ -301,7 +302,7 @@ static inline void scan_unevictable_unregister_node(struct node *node)
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
extern int mem_cgroup_swappiness(struct mem_cgroup *mem);
#else
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
@@ -309,7 +310,7 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
return vm_swappiness;
}
#endif
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#ifdef CONFIG_MEMCG_SWAP
extern void mem_cgroup_uncharge_swap(swp_entry_t ent);
#else
static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
@@ -320,8 +321,14 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
/* linux/mm/page_io.c */
extern int swap_readpage(struct page *);
extern int swap_writepage(struct page *page, struct writeback_control *wbc);
+extern int swap_set_page_dirty(struct page *page);
extern void end_swap_bio_read(struct bio *bio, int err);
+int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
+ unsigned long nr_pages, sector_t start_block);
+int generic_swapfile_activate(struct swap_info_struct *, struct file *,
+ sector_t *);
+
/* linux/mm/swap_state.c */
extern struct address_space swapper_space;
#define total_swapcache_pages swapper_space.nrpages
@@ -356,11 +363,12 @@ extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
extern int page_swapcount(struct page *);
+extern struct swap_info_struct *page_swap_info(struct page *);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
extern void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
#else
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 796f1ff0388c..4b94a61955df 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -58,6 +58,12 @@ struct thermal_zone_device_ops {
enum thermal_trip_type *);
int (*get_trip_temp) (struct thermal_zone_device *, int,
unsigned long *);
+ int (*set_trip_temp) (struct thermal_zone_device *, int,
+ unsigned long);
+ int (*get_trip_hyst) (struct thermal_zone_device *, int,
+ unsigned long *);
+ int (*set_trip_hyst) (struct thermal_zone_device *, int,
+ unsigned long);
int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
int (*notify) (struct thermal_zone_device *, int,
enum thermal_trip_type);
@@ -85,10 +91,18 @@ struct thermal_cooling_device {
((long)t-2732+5)/10 : ((long)t-2732-5)/10)
#define CELSIUS_TO_KELVIN(t) ((t)*10+2732)
+struct thermal_attr {
+ struct device_attribute attr;
+ char name[THERMAL_NAME_LENGTH];
+};
+
struct thermal_zone_device {
int id;
char type[THERMAL_NAME_LENGTH];
struct device device;
+ struct thermal_attr *trip_temp_attrs;
+ struct thermal_attr *trip_type_attrs;
+ struct thermal_attr *trip_hyst_attrs;
void *devdata;
int trips;
int tc1;
@@ -137,9 +151,9 @@ enum {
};
#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
-struct thermal_zone_device *thermal_zone_device_register(char *, int, void *,
- const struct thermal_zone_device_ops *, int tc1, int tc2,
- int passive_freq, int polling_freq);
+struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
+ void *, const struct thermal_zone_device_ops *, int tc1,
+ int tc2, int passive_freq, int polling_freq);
void thermal_zone_device_unregister(struct thermal_zone_device *);
int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
diff --git a/include/linux/time.h b/include/linux/time.h
index 179f4d6755fc..b51e664c83e7 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -107,11 +107,36 @@ static inline struct timespec timespec_sub(struct timespec lhs,
return ts_delta;
}
+#define KTIME_MAX ((s64)~((u64)1 << 63))
+#if (BITS_PER_LONG == 64)
+# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+#else
+# define KTIME_SEC_MAX LONG_MAX
+#endif
+
/*
* Returns true if the timespec is norm, false if denorm:
*/
-#define timespec_valid(ts) \
- (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
+static inline bool timespec_valid(const struct timespec *ts)
+{
+ /* Dates before 1970 are bogus */
+ if (ts->tv_sec < 0)
+ return false;
+ /* Can't have more nanoseconds then a second */
+ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+ return false;
+ return true;
+}
+
+static inline bool timespec_valid_strict(const struct timespec *ts)
+{
+ if (!timespec_valid(ts))
+ return false;
+ /* Disallow values that could overflow ktime_t */
+ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
+ return false;
+ return true;
+}
extern void read_persistent_clock(struct timespec *ts);
extern void read_boot_clock(struct timespec *ts);
@@ -257,14 +282,6 @@ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
#endif /* __KERNEL__ */
-#define NFDBITS __NFDBITS
-
-#define FD_SETSIZE __FD_SETSIZE
-#define FD_SET(fd,fdsetp) __FD_SET(fd,fdsetp)
-#define FD_CLR(fd,fdsetp) __FD_CLR(fd,fdsetp)
-#define FD_ISSET(fd,fdsetp) __FD_ISSET(fd,fdsetp)
-#define FD_ZERO(fdsetp) __FD_ZERO(fdsetp)
-
/*
* Names of the interval timers, and structure
* defining a timer setting:
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 99bc88b1fc02..7c5ceb20e03a 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -232,7 +232,7 @@ struct timex {
* estimated error = NTP dispersion.
*/
extern unsigned long tick_usec; /* USER_HZ period (usec) */
-extern unsigned long tick_nsec; /* ACTHZ period (nsec) */
+extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */
extern void ntp_init(void);
extern void ntp_clear(void);
diff --git a/include/linux/topology.h b/include/linux/topology.h
index e91cd43394df..fec12d667211 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -164,6 +164,7 @@ int arch_update_cpu_topology(void);
| 0*SD_SHARE_CPUPOWER \
| 0*SD_SHARE_PKG_RESOURCES \
| 0*SD_SERIALIZE \
+ | 1*SD_PREFER_SIBLING \
, \
.last_balance = jiffies, \
.balance_interval = 1, \
diff --git a/include/linux/uvcvideo.h b/include/linux/uvcvideo.h
index f46a53f060d7..3b081862b9e8 100644
--- a/include/linux/uvcvideo.h
+++ b/include/linux/uvcvideo.h
@@ -58,7 +58,8 @@ struct uvc_xu_control_mapping {
struct uvc_xu_control_query {
__u8 unit;
__u8 selector;
- __u8 query;
+ __u8 query; /* Video Class-Specific Request Code, */
+ /* defined in linux/usb/video.h A.8. */
__u16 size;
__u8 __user *data;
};
diff --git a/include/linux/v4l2-common.h b/include/linux/v4l2-common.h
new file mode 100644
index 000000000000..0fa8b64c3cdb
--- /dev/null
+++ b/include/linux/v4l2-common.h
@@ -0,0 +1,71 @@
+/*
+ * include/linux/v4l2-common.h
+ *
+ * Common V4L2 and V4L2 subdev definitions.
+ *
+ * Users are advised to #include this file either through videodev2.h
+ * (V4L2) or through v4l2-subdev.h (V4L2 subdev) rather than to refer
+ * to this file directly.
+ *
+ * Copyright (C) 2012 Nokia Corporation
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __V4L2_COMMON__
+#define __V4L2_COMMON__
+
+/*
+ *
+ * Selection interface definitions
+ *
+ */
+
+/* Current cropping area */
+#define V4L2_SEL_TGT_CROP 0x0000
+/* Default cropping area */
+#define V4L2_SEL_TGT_CROP_DEFAULT 0x0001
+/* Cropping bounds */
+#define V4L2_SEL_TGT_CROP_BOUNDS 0x0002
+/* Current composing area */
+#define V4L2_SEL_TGT_COMPOSE 0x0100
+/* Default composing area */
+#define V4L2_SEL_TGT_COMPOSE_DEFAULT 0x0101
+/* Composing bounds */
+#define V4L2_SEL_TGT_COMPOSE_BOUNDS 0x0102
+/* Current composing area plus all padding pixels */
+#define V4L2_SEL_TGT_COMPOSE_PADDED 0x0103
+
+/* Backward compatibility target definitions --- to be removed. */
+#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP
+#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE
+#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL \
+ V4L2_SEL_TGT_CROP
+#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL \
+ V4L2_SEL_TGT_COMPOSE
+
+/* Selection flags */
+#define V4L2_SEL_FLAG_GE (1 << 0)
+#define V4L2_SEL_FLAG_LE (1 << 1)
+#define V4L2_SEL_FLAG_KEEP_CONFIG (1 << 2)
+
+/* Backward compatibility flag definitions --- to be removed. */
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE
+#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG
+
+#endif /* __V4L2_COMMON__ */
diff --git a/include/linux/v4l2-subdev.h b/include/linux/v4l2-subdev.h
index 812019ee1e06..8c57ee9872bb 100644
--- a/include/linux/v4l2-subdev.h
+++ b/include/linux/v4l2-subdev.h
@@ -25,6 +25,7 @@
#include <linux/ioctl.h>
#include <linux/types.h>
+#include <linux/v4l2-common.h>
#include <linux/v4l2-mediabus.h>
/**
@@ -123,27 +124,14 @@ struct v4l2_subdev_frame_interval_enum {
__u32 reserved[9];
};
-#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE (1 << 0)
-#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE (1 << 1)
-#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG (1 << 2)
-
-/* active cropping area */
-#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL 0x0000
-/* cropping bounds */
-#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS 0x0002
-/* current composing area */
-#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL 0x0100
-/* composing bounds */
-#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS 0x0102
-
-
/**
* struct v4l2_subdev_selection - selection info
*
* @which: either V4L2_SUBDEV_FORMAT_ACTIVE or V4L2_SUBDEV_FORMAT_TRY
* @pad: pad number, as reported by the media API
- * @target: selection target, used to choose one of possible rectangles
- * @flags: constraint flags
+ * @target: Selection target, used to choose one of possible rectangles,
+ * defined in v4l2-common.h; V4L2_SEL_TGT_* .
+ * @flags: constraint flags, defined in v4l2-common.h; V4L2_SEL_FLAG_*.
* @r: coordinates of the selection window
* @reserved: for future use, set to zero for now
*
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
new file mode 100644
index 000000000000..0a4f180a11d8
--- /dev/null
+++ b/include/linux/vfio.h
@@ -0,0 +1,445 @@
+/*
+ * VFIO API definition
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef VFIO_H
+#define VFIO_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define VFIO_API_VERSION 0
+
+#ifdef __KERNEL__ /* Internal VFIO-core/bus driver API */
+
+#include <linux/iommu.h>
+#include <linux/mm.h>
+
+/**
+ * struct vfio_device_ops - VFIO bus driver device callbacks
+ *
+ * @open: Called when userspace creates new file descriptor for device
+ * @release: Called when userspace releases file descriptor for device
+ * @read: Perform read(2) on device file descriptor
+ * @write: Perform write(2) on device file descriptor
+ * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
+ * operations documented below
+ * @mmap: Perform mmap(2) on a region of the device file descriptor
+ */
+struct vfio_device_ops {
+ char *name;
+ int (*open)(void *device_data);
+ void (*release)(void *device_data);
+ ssize_t (*read)(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*write)(void *device_data, const char __user *buf,
+ size_t count, loff_t *size);
+ long (*ioctl)(void *device_data, unsigned int cmd,
+ unsigned long arg);
+ int (*mmap)(void *device_data, struct vm_area_struct *vma);
+};
+
+extern int vfio_add_group_dev(struct device *dev,
+ const struct vfio_device_ops *ops,
+ void *device_data);
+
+extern void *vfio_del_group_dev(struct device *dev);
+
+/**
+ * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
+ */
+struct vfio_iommu_driver_ops {
+ char *name;
+ struct module *owner;
+ void *(*open)(unsigned long arg);
+ void (*release)(void *iommu_data);
+ ssize_t (*read)(void *iommu_data, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*write)(void *iommu_data, const char __user *buf,
+ size_t count, loff_t *size);
+ long (*ioctl)(void *iommu_data, unsigned int cmd,
+ unsigned long arg);
+ int (*mmap)(void *iommu_data, struct vm_area_struct *vma);
+ int (*attach_group)(void *iommu_data,
+ struct iommu_group *group);
+ void (*detach_group)(void *iommu_data,
+ struct iommu_group *group);
+
+};
+
+extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
+
+extern void vfio_unregister_iommu_driver(
+ const struct vfio_iommu_driver_ops *ops);
+
+/**
+ * offsetofend(TYPE, MEMBER)
+ *
+ * @TYPE: The type of the structure
+ * @MEMBER: The member within the structure to get the end offset of
+ *
+ * Simple helper macro for dealing with variable sized structures passed
+ * from user space. This allows us to easily determine if the provided
+ * structure is sized to include various fields.
+ */
+#define offsetofend(TYPE, MEMBER) ({ \
+ TYPE tmp; \
+ offsetof(TYPE, MEMBER) + sizeof(tmp.MEMBER); }) \
+
+#endif /* __KERNEL__ */
+
+/* Kernel & User level defines for VFIO IOCTLs. */
+
+/* Extensions */
+
+#define VFIO_TYPE1_IOMMU 1
+
+/*
+ * The IOCTL interface is designed for extensibility by embedding the
+ * structure length (argsz) and flags into structures passed between
+ * kernel and userspace. We therefore use the _IO() macro for these
+ * defines to avoid implicitly embedding a size into the ioctl request.
+ * As structure fields are added, argsz will increase to match and flag
+ * bits will be defined to indicate additional fields with valid data.
+ * It's *always* the caller's responsibility to indicate the size of
+ * the structure passed by setting argsz appropriately.
+ */
+
+#define VFIO_TYPE (';')
+#define VFIO_BASE 100
+
+/* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
+
+/**
+ * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0)
+ *
+ * Report the version of the VFIO API. This allows us to bump the entire
+ * API version should we later need to add or change features in incompatible
+ * ways.
+ * Return: VFIO_API_VERSION
+ * Availability: Always
+ */
+#define VFIO_GET_API_VERSION _IO(VFIO_TYPE, VFIO_BASE + 0)
+
+/**
+ * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32)
+ *
+ * Check whether an extension is supported.
+ * Return: 0 if not supported, 1 (or some other positive integer) if supported.
+ * Availability: Always
+ */
+#define VFIO_CHECK_EXTENSION _IO(VFIO_TYPE, VFIO_BASE + 1)
+
+/**
+ * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32)
+ *
+ * Set the iommu to the given type. The type must be supported by an
+ * iommu driver as verified by calling CHECK_EXTENSION using the same
+ * type. A group must be set to this file descriptor before this
+ * ioctl is available. The IOMMU interfaces enabled by this call are
+ * specific to the value set.
+ * Return: 0 on success, -errno on failure
+ * Availability: When VFIO group attached
+ */
+#define VFIO_SET_IOMMU _IO(VFIO_TYPE, VFIO_BASE + 2)
+
+/* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */
+
+/**
+ * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,
+ * struct vfio_group_status)
+ *
+ * Retrieve information about the group. Fills in provided
+ * struct vfio_group_info. Caller sets argsz.
+ * Return: 0 on succes, -errno on failure.
+ * Availability: Always
+ */
+struct vfio_group_status {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_GROUP_FLAGS_VIABLE (1 << 0)
+#define VFIO_GROUP_FLAGS_CONTAINER_SET (1 << 1)
+};
+#define VFIO_GROUP_GET_STATUS _IO(VFIO_TYPE, VFIO_BASE + 3)
+
+/**
+ * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32)
+ *
+ * Set the container for the VFIO group to the open VFIO file
+ * descriptor provided. Groups may only belong to a single
+ * container. Containers may, at their discretion, support multiple
+ * groups. Only when a container is set are all of the interfaces
+ * of the VFIO file descriptor and the VFIO group file descriptor
+ * available to the user.
+ * Return: 0 on success, -errno on failure.
+ * Availability: Always
+ */
+#define VFIO_GROUP_SET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 4)
+
+/**
+ * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5)
+ *
+ * Remove the group from the attached container. This is the
+ * opposite of the SET_CONTAINER call and returns the group to
+ * an initial state. All device file descriptors must be released
+ * prior to calling this interface. When removing the last group
+ * from a container, the IOMMU will be disabled and all state lost,
+ * effectively also returning the VFIO file descriptor to an initial
+ * state.
+ * Return: 0 on success, -errno on failure.
+ * Availability: When attached to container
+ */
+#define VFIO_GROUP_UNSET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 5)
+
+/**
+ * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char)
+ *
+ * Return a new file descriptor for the device object described by
+ * the provided string. The string should match a device listed in
+ * the devices subdirectory of the IOMMU group sysfs entry. The
+ * group containing the device must already be added to this context.
+ * Return: new file descriptor on success, -errno on failure.
+ * Availability: When attached to container
+ */
+#define VFIO_GROUP_GET_DEVICE_FD _IO(VFIO_TYPE, VFIO_BASE + 6)
+
+/* --------------- IOCTLs for DEVICE file descriptors --------------- */
+
+/**
+ * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,
+ * struct vfio_device_info)
+ *
+ * Retrieve information about the device. Fills in provided
+ * struct vfio_device_info. Caller sets argsz.
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_info {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_DEVICE_FLAGS_RESET (1 << 0) /* Device supports reset */
+#define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */
+ __u32 num_regions; /* Max region index + 1 */
+ __u32 num_irqs; /* Max IRQ index + 1 */
+};
+#define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7)
+
+/**
+ * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
+ * struct vfio_region_info)
+ *
+ * Retrieve information about a device region. Caller provides
+ * struct vfio_region_info with index value set. Caller sets argsz.
+ * Implementation of region mapping is bus driver specific. This is
+ * intended to describe MMIO, I/O port, as well as bus specific
+ * regions (ex. PCI config space). Zero sized regions may be used
+ * to describe unimplemented regions (ex. unimplemented PCI BARs).
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_region_info {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_REGION_INFO_FLAG_READ (1 << 0) /* Region supports read */
+#define VFIO_REGION_INFO_FLAG_WRITE (1 << 1) /* Region supports write */
+#define VFIO_REGION_INFO_FLAG_MMAP (1 << 2) /* Region supports mmap */
+ __u32 index; /* Region index */
+ __u32 resv; /* Reserved for alignment */
+ __u64 size; /* Region size (bytes) */
+ __u64 offset; /* Region offset from start of device fd */
+};
+#define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8)
+
+/**
+ * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
+ * struct vfio_irq_info)
+ *
+ * Retrieve information about a device IRQ. Caller provides
+ * struct vfio_irq_info with index value set. Caller sets argsz.
+ * Implementation of IRQ mapping is bus driver specific. Indexes
+ * using multiple IRQs are primarily intended to support MSI-like
+ * interrupt blocks. Zero count irq blocks may be used to describe
+ * unimplemented interrupt types.
+ *
+ * The EVENTFD flag indicates the interrupt index supports eventfd based
+ * signaling.
+ *
+ * The MASKABLE flags indicates the index supports MASK and UNMASK
+ * actions described below.
+ *
+ * AUTOMASKED indicates that after signaling, the interrupt line is
+ * automatically masked by VFIO and the user needs to unmask the line
+ * to receive new interrupts. This is primarily intended to distinguish
+ * level triggered interrupts.
+ *
+ * The NORESIZE flag indicates that the interrupt lines within the index
+ * are setup as a set and new subindexes cannot be enabled without first
+ * disabling the entire index. This is used for interrupts like PCI MSI
+ * and MSI-X where the driver may only use a subset of the available
+ * indexes, but VFIO needs to enable a specific number of vectors
+ * upfront. In the case of MSI-X, where the user can enable MSI-X and
+ * then add and unmask vectors, it's up to userspace to make the decision
+ * whether to allocate the maximum supported number of vectors or tear
+ * down setup and incrementally increase the vectors as each is enabled.
+ */
+struct vfio_irq_info {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_IRQ_INFO_EVENTFD (1 << 0)
+#define VFIO_IRQ_INFO_MASKABLE (1 << 1)
+#define VFIO_IRQ_INFO_AUTOMASKED (1 << 2)
+#define VFIO_IRQ_INFO_NORESIZE (1 << 3)
+ __u32 index; /* IRQ index */
+ __u32 count; /* Number of IRQs within this index */
+};
+#define VFIO_DEVICE_GET_IRQ_INFO _IO(VFIO_TYPE, VFIO_BASE + 9)
+
+/**
+ * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
+ *
+ * Set signaling, masking, and unmasking of interrupts. Caller provides
+ * struct vfio_irq_set with all fields set. 'start' and 'count' indicate
+ * the range of subindexes being specified.
+ *
+ * The DATA flags specify the type of data provided. If DATA_NONE, the
+ * operation performs the specified action immediately on the specified
+ * interrupt(s). For example, to unmask AUTOMASKED interrupt [0,0]:
+ * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1.
+ *
+ * DATA_BOOL allows sparse support for the same on arrays of interrupts.
+ * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):
+ * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,
+ * data = {1,0,1}
+ *
+ * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd.
+ * A value of -1 can be used to either de-assign interrupts if already
+ * assigned or skip un-assigned interrupts. For example, to set an eventfd
+ * to be trigger for interrupts [0,0] and [0,2]:
+ * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,
+ * data = {fd1, -1, fd2}
+ * If index [0,1] is previously set, two count = 1 ioctls calls would be
+ * required to set [0,0] and [0,2] without changing [0,1].
+ *
+ * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used
+ * with ACTION_TRIGGER to perform kernel level interrupt loopback testing
+ * from userspace (ie. simulate hardware triggering).
+ *
+ * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER
+ * enables the interrupt index for the device. Individual subindex interrupts
+ * can be disabled using the -1 value for DATA_EVENTFD or the index can be
+ * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0.
+ *
+ * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while
+ * ACTION_TRIGGER specifies kernel->user signaling.
+ */
+struct vfio_irq_set {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_IRQ_SET_DATA_NONE (1 << 0) /* Data not present */
+#define VFIO_IRQ_SET_DATA_BOOL (1 << 1) /* Data is bool (u8) */
+#define VFIO_IRQ_SET_DATA_EVENTFD (1 << 2) /* Data is eventfd (s32) */
+#define VFIO_IRQ_SET_ACTION_MASK (1 << 3) /* Mask interrupt */
+#define VFIO_IRQ_SET_ACTION_UNMASK (1 << 4) /* Unmask interrupt */
+#define VFIO_IRQ_SET_ACTION_TRIGGER (1 << 5) /* Trigger interrupt */
+ __u32 index;
+ __u32 start;
+ __u32 count;
+ __u8 data[];
+};
+#define VFIO_DEVICE_SET_IRQS _IO(VFIO_TYPE, VFIO_BASE + 10)
+
+#define VFIO_IRQ_SET_DATA_TYPE_MASK (VFIO_IRQ_SET_DATA_NONE | \
+ VFIO_IRQ_SET_DATA_BOOL | \
+ VFIO_IRQ_SET_DATA_EVENTFD)
+#define VFIO_IRQ_SET_ACTION_TYPE_MASK (VFIO_IRQ_SET_ACTION_MASK | \
+ VFIO_IRQ_SET_ACTION_UNMASK | \
+ VFIO_IRQ_SET_ACTION_TRIGGER)
+/**
+ * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11)
+ *
+ * Reset a device.
+ */
+#define VFIO_DEVICE_RESET _IO(VFIO_TYPE, VFIO_BASE + 11)
+
+/*
+ * The VFIO-PCI bus driver makes use of the following fixed region and
+ * IRQ index mapping. Unimplemented regions return a size of zero.
+ * Unimplemented IRQ types return a count of zero.
+ */
+
+enum {
+ VFIO_PCI_BAR0_REGION_INDEX,
+ VFIO_PCI_BAR1_REGION_INDEX,
+ VFIO_PCI_BAR2_REGION_INDEX,
+ VFIO_PCI_BAR3_REGION_INDEX,
+ VFIO_PCI_BAR4_REGION_INDEX,
+ VFIO_PCI_BAR5_REGION_INDEX,
+ VFIO_PCI_ROM_REGION_INDEX,
+ VFIO_PCI_CONFIG_REGION_INDEX,
+ VFIO_PCI_NUM_REGIONS
+};
+
+enum {
+ VFIO_PCI_INTX_IRQ_INDEX,
+ VFIO_PCI_MSI_IRQ_INDEX,
+ VFIO_PCI_MSIX_IRQ_INDEX,
+ VFIO_PCI_NUM_IRQS
+};
+
+/* -------- API for Type1 VFIO IOMMU -------- */
+
+/**
+ * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)
+ *
+ * Retrieve information about the IOMMU object. Fills in provided
+ * struct vfio_iommu_info. Caller sets argsz.
+ *
+ * XXX Should we do these by CHECK_EXTENSION too?
+ */
+struct vfio_iommu_type1_info {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */
+ __u64 iova_pgsizes; /* Bitmap of supported page sizes */
+};
+
+#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
+
+/**
+ * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)
+ *
+ * Map process virtual addresses to IO virtual addresses using the
+ * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
+ */
+struct vfio_iommu_type1_dma_map {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */
+#define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */
+ __u64 vaddr; /* Process virtual address */
+ __u64 iova; /* IO virtual address */
+ __u64 size; /* Size of mapping (bytes) */
+};
+
+#define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
+
+/**
+ * VFIO_IOMMU_UNMAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 14, struct vfio_dma_unmap)
+ *
+ * Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
+ * Caller sets argsz.
+ */
+struct vfio_iommu_type1_dma_unmap {
+ __u32 argsz;
+ __u32 flags;
+ __u64 iova; /* IO virtual address */
+ __u64 size; /* Size of mapping (bytes) */
+};
+
+#define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
+
+#endif /* VFIO_H */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 2039c5d3292e..7a147c8299ab 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -64,6 +64,7 @@
#include <linux/compiler.h>
#include <linux/ioctl.h>
#include <linux/types.h>
+#include <linux/v4l2-common.h>
/*
* Common stuff for both V4L1 and V4L2
@@ -273,6 +274,10 @@ struct v4l2_capability {
#define V4L2_CAP_VIDEO_CAPTURE_MPLANE 0x00001000
/* Is a video output device that supports multiplanar formats */
#define V4L2_CAP_VIDEO_OUTPUT_MPLANE 0x00002000
+/* Is a video mem-to-mem device that supports multiplanar formats */
+#define V4L2_CAP_VIDEO_M2M_MPLANE 0x00004000
+/* Is a video mem-to-mem device */
+#define V4L2_CAP_VIDEO_M2M 0x00008000
#define V4L2_CAP_TUNER 0x00010000 /* has a tuner */
#define V4L2_CAP_AUDIO 0x00020000 /* has audio support */
@@ -657,7 +662,7 @@ struct v4l2_buffer {
struct v4l2_plane *planes;
} m;
__u32 length;
- __u32 input;
+ __u32 reserved2;
__u32 reserved;
};
@@ -671,7 +676,6 @@ struct v4l2_buffer {
/* Buffer is ready, but the data contained within is corrupted. */
#define V4L2_BUF_FLAG_ERROR 0x0040
#define V4L2_BUF_FLAG_TIMECODE 0x0100 /* timecode field is valid */
-#define V4L2_BUF_FLAG_INPUT 0x0200 /* input field is valid */
#define V4L2_BUF_FLAG_PREPARED 0x0400 /* Buffer is prepared for queuing */
/* Cache handling flags */
#define V4L2_BUF_FLAG_NO_CACHE_INVALIDATE 0x0800
@@ -761,32 +765,12 @@ struct v4l2_crop {
struct v4l2_rect c;
};
-/* Hints for adjustments of selection rectangle */
-#define V4L2_SEL_FLAG_GE 0x00000001
-#define V4L2_SEL_FLAG_LE 0x00000002
-
-/* Selection targets */
-
-/* Current cropping area */
-#define V4L2_SEL_TGT_CROP_ACTIVE 0x0000
-/* Default cropping area */
-#define V4L2_SEL_TGT_CROP_DEFAULT 0x0001
-/* Cropping bounds */
-#define V4L2_SEL_TGT_CROP_BOUNDS 0x0002
-/* Current composing area */
-#define V4L2_SEL_TGT_COMPOSE_ACTIVE 0x0100
-/* Default composing area */
-#define V4L2_SEL_TGT_COMPOSE_DEFAULT 0x0101
-/* Composing bounds */
-#define V4L2_SEL_TGT_COMPOSE_BOUNDS 0x0102
-/* Current composing area plus all padding pixels */
-#define V4L2_SEL_TGT_COMPOSE_PADDED 0x0103
-
/**
* struct v4l2_selection - selection info
* @type: buffer type (do not use *_MPLANE types)
- * @target: selection target, used to choose one of possible rectangles
- * @flags: constraints flags
+ * @target: Selection target, used to choose one of possible rectangles;
+ * defined in v4l2-common.h; V4L2_SEL_TGT_* .
+ * @flags: constraints flags, defined in v4l2-common.h; V4L2_SEL_FLAG_*.
* @r: coordinates of selection window
* @reserved: for future use, rounds structure size to 64 bytes, set to zero
*
@@ -2039,6 +2023,8 @@ struct v4l2_modulator {
/* Flags for the 'capability' field */
#define V4L2_TUNER_CAP_LOW 0x0001
#define V4L2_TUNER_CAP_NORM 0x0002
+#define V4L2_TUNER_CAP_HWSEEK_BOUNDED 0x0004
+#define V4L2_TUNER_CAP_HWSEEK_WRAP 0x0008
#define V4L2_TUNER_CAP_STEREO 0x0010
#define V4L2_TUNER_CAP_LANG2 0x0020
#define V4L2_TUNER_CAP_SAP 0x0020
@@ -2046,6 +2032,8 @@ struct v4l2_modulator {
#define V4L2_TUNER_CAP_RDS 0x0080
#define V4L2_TUNER_CAP_RDS_BLOCK_IO 0x0100
#define V4L2_TUNER_CAP_RDS_CONTROLS 0x0200
+#define V4L2_TUNER_CAP_FREQ_BANDS 0x0400
+#define V4L2_TUNER_CAP_HWSEEK_PROG_LIM 0x0800
/* Flags for the 'rxsubchans' field */
#define V4L2_TUNER_SUB_MONO 0x0001
@@ -2064,19 +2052,36 @@ struct v4l2_modulator {
#define V4L2_TUNER_MODE_LANG1_LANG2 0x0004
struct v4l2_frequency {
- __u32 tuner;
- __u32 type; /* enum v4l2_tuner_type */
- __u32 frequency;
- __u32 reserved[8];
+ __u32 tuner;
+ __u32 type; /* enum v4l2_tuner_type */
+ __u32 frequency;
+ __u32 reserved[8];
+};
+
+#define V4L2_BAND_MODULATION_VSB (1 << 1)
+#define V4L2_BAND_MODULATION_FM (1 << 2)
+#define V4L2_BAND_MODULATION_AM (1 << 3)
+
+struct v4l2_frequency_band {
+ __u32 tuner;
+ __u32 type; /* enum v4l2_tuner_type */
+ __u32 index;
+ __u32 capability;
+ __u32 rangelow;
+ __u32 rangehigh;
+ __u32 modulation;
+ __u32 reserved[9];
};
struct v4l2_hw_freq_seek {
- __u32 tuner;
- __u32 type; /* enum v4l2_tuner_type */
- __u32 seek_upward;
- __u32 wrap_around;
- __u32 spacing;
- __u32 reserved[7];
+ __u32 tuner;
+ __u32 type; /* enum v4l2_tuner_type */
+ __u32 seek_upward;
+ __u32 wrap_around;
+ __u32 spacing;
+ __u32 rangelow;
+ __u32 rangehigh;
+ __u32 reserved[5];
};
/*
@@ -2644,6 +2649,10 @@ struct v4l2_create_buffers {
#define VIDIOC_QUERY_DV_TIMINGS _IOR('V', 99, struct v4l2_dv_timings)
#define VIDIOC_DV_TIMINGS_CAP _IOWR('V', 100, struct v4l2_dv_timings_cap)
+/* Experimental, this ioctl may change over the next couple of kernel
+ versions. */
+#define VIDIOC_ENUM_FREQ_BANDS _IOWR('V', 101, struct v4l2_frequency_band)
+
/* Reminder: when adding new ioctls please add support for them to
drivers/media/video/v4l2-compat-ioctl32.c as well! */
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index e0edb40ca7aa..6d8e61c48563 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -37,8 +37,14 @@
#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
-#define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */
+#define VIRTIO_BLK_F_WCE 9 /* Writeback mode enabled after reset */
#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */
+#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */
+
+#ifndef __KERNEL__
+/* Old (deprecated) name for VIRTIO_BLK_F_WCE. */
+#define VIRTIO_BLK_F_FLUSH VIRTIO_BLK_F_WCE
+#endif
#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */
@@ -69,6 +75,8 @@ struct virtio_blk_config {
/* optimal sustained I/O size in logical blocks. */
__u32 opt_io_size;
+ /* writeback mode (if VIRTIO_BLK_F_CONFIG_WCE) */
+ __u8 wce;
} __attribute__((packed));
/*
diff --git a/include/linux/virtio_ids.h b/include/linux/virtio_ids.h
index 7529b854b7fd..270fb22c5811 100644
--- a/include/linux/virtio_ids.h
+++ b/include/linux/virtio_ids.h
@@ -32,7 +32,7 @@
#define VIRTIO_ID_NET 1 /* virtio net */
#define VIRTIO_ID_BLOCK 2 /* virtio block */
#define VIRTIO_ID_CONSOLE 3 /* virtio console */
-#define VIRTIO_ID_RNG 4 /* virtio ring */
+#define VIRTIO_ID_RNG 4 /* virtio rng */
#define VIRTIO_ID_BALLOON 5 /* virtio balloon */
#define VIRTIO_ID_RPMSG 7 /* virtio remote processor messaging */
#define VIRTIO_ID_SCSI 8 /* virtio scsi */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 06f8e3858251..57f7b1091511 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -30,6 +30,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGSTEAL_DIRECT),
FOR_ALL_ZONES(PGSCAN_KSWAPD),
FOR_ALL_ZONES(PGSCAN_DIRECT),
+ PGSCAN_DIRECT_THROTTLE,
#ifdef CONFIG_NUMA
PGSCAN_ZONE_RECLAIM_FAILED,
#endif
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index dcdfc2bda922..6071e911c7f4 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -32,7 +32,7 @@ struct vm_struct {
struct page **pages;
unsigned int nr_pages;
phys_addr_t phys_addr;
- void *caller;
+ const void *caller;
};
/*
@@ -62,7 +62,7 @@ extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, int node, void *caller);
+ pgprot_t prot, int node, const void *caller);
extern void vfree(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
@@ -85,14 +85,15 @@ static inline size_t get_vm_area_size(const struct vm_struct *area)
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *get_vm_area_caller(unsigned long size,
- unsigned long flags, void *caller);
+ unsigned long flags, const void *caller);
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end);
extern struct vm_struct *__get_vm_area_caller(unsigned long size,
unsigned long flags,
unsigned long start, unsigned long end,
- void *caller);
+ const void *caller);
extern struct vm_struct *remove_vm_area(const void *addr);
+extern struct vm_struct *find_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 65efb92da996..ad2cfd53dadc 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -179,11 +179,6 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
-static inline void zap_zone_vm_stats(struct zone *zone)
-{
- memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
-}
-
extern void inc_zone_state(struct zone *, enum zone_stat_item);
#ifdef CONFIG_SMP
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 6d0a0fcd80e7..50c3e8fa06a8 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -104,7 +104,6 @@ static inline void wait_on_inode(struct inode *inode)
wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
}
-
/*
* mm/page-writeback.c
*/
@@ -189,9 +188,4 @@ void tag_pages_for_writeback(struct address_space *mapping,
void account_page_redirty(struct page *page);
-/* pdflush.c */
-extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
- read-only. */
-
-
#endif /* WRITEBACK_H */
diff --git a/include/media/adv7393.h b/include/media/adv7393.h
new file mode 100644
index 000000000000..b28edf351842
--- /dev/null
+++ b/include/media/adv7393.h
@@ -0,0 +1,28 @@
+/*
+ * ADV7393 header file
+ *
+ * Copyright (C) 2010-2012 ADVANSEE - http://www.advansee.com/
+ * Benoît Thébaudeau <benoit.thebaudeau@advansee.com>
+ *
+ * Based on ADV7343 driver,
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed .as is. WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ADV7393_H
+#define ADV7393_H
+
+#define ADV7393_COMPOSITE_ID (0)
+#define ADV7393_COMPONENT_ID (1)
+#define ADV7393_SVIDEO_ID (2)
+
+#endif /* End of #ifndef ADV7393_H */
diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h
index bd8217c2577c..d8f6ab1943e4 100644
--- a/include/media/davinci/vpif_types.h
+++ b/include/media/davinci/vpif_types.h
@@ -50,6 +50,8 @@ struct vpif_display_config {
const char **output;
int output_count;
const char *card_name;
+ bool ch2_clip_en;
+ bool ch3_clip_en;
};
struct vpif_input {
diff --git a/include/media/gpio-ir-recv.h b/include/media/gpio-ir-recv.h
index 67797bf5d432..0142736a59db 100644
--- a/include/media/gpio-ir-recv.h
+++ b/include/media/gpio-ir-recv.h
@@ -14,8 +14,10 @@
#define __GPIO_IR_RECV_H__
struct gpio_ir_recv_platform_data {
- int gpio_nr;
- bool active_low;
+ int gpio_nr;
+ bool active_low;
+ u64 allowed_protos;
+ const char *map_name;
};
#endif /* __GPIO_IR_RECV_H__ */
diff --git a/include/media/mt9t001.h b/include/media/mt9t001.h
index e839a78bb9c5..03fd63edd133 100644
--- a/include/media/mt9t001.h
+++ b/include/media/mt9t001.h
@@ -3,6 +3,7 @@
struct mt9t001_platform_data {
unsigned int clk_pol:1;
+ unsigned int ext_clk;
};
#endif
diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h
index 7395c815939d..58f914a40b20 100644
--- a/include/media/v4l2-chip-ident.h
+++ b/include/media/v4l2-chip-ident.h
@@ -180,6 +180,9 @@ enum {
/* module adv7343: just ident 7343 */
V4L2_IDENT_ADV7343 = 7343,
+ /* module adv7393: just ident 7393 */
+ V4L2_IDENT_ADV7393 = 7393,
+
/* module saa7706h: just ident 7706 */
V4L2_IDENT_SAA7706H = 7706,
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index a056e6ee1b68..5c416cdc88d5 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -100,6 +100,9 @@ struct video_device
/* Control handler associated with this device node. May be NULL. */
struct v4l2_ctrl_handler *ctrl_handler;
+ /* vb2_queue associated with this device node. May be NULL. */
+ struct vb2_queue *queue;
+
/* Priority state. If NULL, then v4l2_dev->prio will be used. */
struct v4l2_prio_state *prio;
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index d8b76f7392f8..e614c9c15e56 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -230,6 +230,8 @@ struct v4l2_ioctl_ops {
struct v4l2_frequency *a);
int (*vidioc_s_frequency) (struct file *file, void *fh,
struct v4l2_frequency *a);
+ int (*vidioc_enum_freq_bands) (struct file *file, void *fh,
+ struct v4l2_frequency_band *band);
/* Sliced VBI cap */
int (*vidioc_g_sliced_vbi_cap) (struct file *file, void *fh,
@@ -295,28 +297,19 @@ struct v4l2_ioctl_ops {
#define V4L2_DEBUG_IOCTL 0x01
#define V4L2_DEBUG_IOCTL_ARG 0x02
-/* Use this macro for non-I2C drivers. Pass the driver name as the first arg. */
-#define v4l_print_ioctl(name, cmd) \
- do { \
- printk(KERN_DEBUG "%s: ", name); \
- v4l_printk_ioctl(cmd); \
- } while (0)
-
-/* Use this macro in I2C drivers where 'client' is the struct i2c_client
- pointer */
-#define v4l_i2c_print_ioctl(client, cmd) \
- do { \
- v4l_client_printk(KERN_DEBUG, client, ""); \
- v4l_printk_ioctl(cmd); \
- } while (0)
-
/* Video standard functions */
extern const char *v4l2_norm_to_name(v4l2_std_id id);
extern void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod);
extern int v4l2_video_std_construct(struct v4l2_standard *vs,
int id, const char *name);
-/* Prints the ioctl in a human-readable format */
-extern void v4l_printk_ioctl(unsigned int cmd);
+/* Prints the ioctl in a human-readable format. If prefix != NULL,
+ then do printk(KERN_DEBUG "%s: ", prefix) first. */
+extern void v4l_printk_ioctl(const char *prefix, unsigned int cmd);
+
+/* Internal use only: get the mutex (if any) that we need to lock for the
+ given command. */
+struct video_device;
+extern struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev, unsigned cmd);
/* names for fancy debug output */
extern const char *v4l2_field_names[];
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index 90ed895e217d..8c6e825940e5 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -72,7 +72,6 @@ struct videobuf_buffer {
unsigned int height;
unsigned int bytesperline; /* use only if != 0 */
unsigned long size;
- unsigned int input;
enum v4l2_field field;
enum videobuf_state state;
struct list_head stream; /* QBUF/DQBUF list */
@@ -142,7 +141,6 @@ struct videobuf_queue {
wait_queue_head_t wait; /* wait if queue is empty */
enum v4l2_buf_type type;
- unsigned int inputs; /* for V4L2_BUF_FLAG_INPUT */
unsigned int msize;
enum v4l2_field field;
enum v4l2_field last; /* for field=V4L2_FIELD_ALTERNATE */
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index a15d1f1b319e..8dd9b6cc296b 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -244,12 +244,23 @@ struct vb2_ops {
void (*buf_queue)(struct vb2_buffer *vb);
};
+struct v4l2_fh;
+
/**
* struct vb2_queue - a videobuf queue
*
* @type: queue type (see V4L2_BUF_TYPE_* in linux/videodev2.h
* @io_modes: supported io methods (see vb2_io_modes enum)
* @io_flags: additional io flags (see vb2_fileio_flags enum)
+ * @lock: pointer to a mutex that protects the vb2_queue struct. The
+ * driver can set this to a mutex to let the v4l2 core serialize
+ * the queuing ioctls. If the driver wants to handle locking
+ * itself, then this should be set to NULL. This lock is not used
+ * by the videobuf2 core API.
+ * @owner: The filehandle that 'owns' the buffers, i.e. the filehandle
+ * that called reqbufs, create_buffers or started fileio.
+ * This field is not used by the videobuf2 core API, but it allows
+ * drivers to easily associate an owner filehandle with the queue.
* @ops: driver-specific callbacks
* @mem_ops: memory allocator specific callbacks
* @drv_priv: driver private data
@@ -273,6 +284,8 @@ struct vb2_queue {
enum v4l2_buf_type type;
unsigned int io_modes;
unsigned int io_flags;
+ struct mutex *lock;
+ struct v4l2_fh *owner;
const struct vb2_ops *ops;
const struct vb2_mem_ops *mem_ops;
@@ -404,4 +417,45 @@ vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
return 0;
}
+/*
+ * The following functions are not part of the vb2 core API, but are simple
+ * helper functions that you can use in your struct v4l2_file_operations,
+ * struct v4l2_ioctl_ops and struct vb2_ops. They will serialize if vb2_queue->lock
+ * or video_device->lock is set, and they will set and test vb2_queue->owner
+ * to check if the calling filehandle is permitted to do the queuing operation.
+ */
+
+/* struct v4l2_ioctl_ops helpers */
+
+int vb2_ioctl_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p);
+int vb2_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *p);
+int vb2_ioctl_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *p);
+int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p);
+int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p);
+int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p);
+int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i);
+int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i);
+
+/* struct v4l2_file_operations helpers */
+
+int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma);
+int vb2_fop_release(struct file *file);
+ssize_t vb2_fop_write(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos);
+ssize_t vb2_fop_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos);
+unsigned int vb2_fop_poll(struct file *file, poll_table *wait);
+#ifndef CONFIG_MMU
+unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags);
+#endif
+
+/* struct vb2_ops helpers, only use if vq->lock is non-NULL. */
+
+void vb2_ops_wait_prepare(struct vb2_queue *vq);
+void vb2_ops_wait_finish(struct vb2_queue *vq);
+
#endif /* _MEDIA_VIDEOBUF2_CORE_H */
diff --git a/include/media/videobuf2-dma-contig.h b/include/media/videobuf2-dma-contig.h
index 19ae1e350567..8197f87d6c61 100644
--- a/include/media/videobuf2-dma-contig.h
+++ b/include/media/videobuf2-dma-contig.h
@@ -1,5 +1,5 @@
/*
- * videobuf2-dma-coherent.h - DMA coherent memory allocator for videobuf2
+ * videobuf2-dma-contig.h - DMA contig memory allocator for videobuf2
*
* Copyright (C) 2010 Samsung Electronics
*
@@ -10,8 +10,8 @@
* the Free Software Foundation.
*/
-#ifndef _MEDIA_VIDEOBUF2_DMA_COHERENT_H
-#define _MEDIA_VIDEOBUF2_DMA_COHERENT_H
+#ifndef _MEDIA_VIDEOBUF2_DMA_CONTIG_H
+#define _MEDIA_VIDEOBUF2_DMA_CONTIG_H
#include <media/videobuf2-core.h>
#include <linux/dma-mapping.h>
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 493fa0c79005..3d254e10ff30 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -96,6 +96,7 @@ enum ieee80211_band {
* is not permitted.
* @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
* is not permitted.
+ * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel.
*/
enum ieee80211_channel_flags {
IEEE80211_CHAN_DISABLED = 1<<0,
@@ -104,6 +105,7 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_RADAR = 1<<3,
IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
+ IEEE80211_CHAN_NO_OFDM = 1<<6,
};
#define IEEE80211_CHAN_NO_HT40 \
diff --git a/include/net/codel.h b/include/net/codel.h
index 550debfc2403..389cf621161d 100644
--- a/include/net/codel.h
+++ b/include/net/codel.h
@@ -305,6 +305,8 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
}
}
} else if (drop) {
+ u32 delta;
+
if (params->ecn && INET_ECN_set_ce(skb)) {
stats->ecn_mark++;
} else {
@@ -320,9 +322,11 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
* assume that the drop rate that controlled the queue on the
* last cycle is a good starting point to control it now.
*/
- if (codel_time_before(now - vars->drop_next,
+ delta = vars->count - vars->lastcount;
+ if (delta > 1 &&
+ codel_time_before(now - vars->drop_next,
16 * params->interval)) {
- vars->count = (vars->count - vars->lastcount) | 1;
+ vars->count = delta;
/* we dont care if rec_inv_sqrt approximation
* is not very precise :
* Next Newton steps will correct it quadratically.
diff --git a/include/net/dst.h b/include/net/dst.h
index baf597890064..621e3513ef5e 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -110,7 +110,7 @@ struct dst_entry {
};
extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
-extern const u32 dst_default_metrics[RTAX_MAX];
+extern const u32 dst_default_metrics[];
#define DST_METRICS_READ_ONLY 0x1UL
#define __DST_METRICS_PTR(Y) \
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 00cbb4384c79..9e34c877a770 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -96,14 +96,15 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
const __be16 sport,
const __be16 dport)
{
- struct sock *sk;
+ struct sock *sk = skb_steal_sock(skb);
- if (unlikely(sk = skb_steal_sock(skb)))
+ if (sk)
return sk;
- else return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
- &ipv6_hdr(skb)->saddr, sport,
- &ipv6_hdr(skb)->daddr, ntohs(dport),
- inet6_iif(skb));
+
+ return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
+ &ipv6_hdr(skb)->saddr, sport,
+ &ipv6_hdr(skb)->daddr, ntohs(dport),
+ inet6_iif(skb));
}
extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 5ee66f517b4f..ba1d3615acbb 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -39,6 +39,7 @@ struct inet_connection_sock_af_ops {
int (*queue_xmit)(struct sk_buff *skb, struct flowi *fl);
void (*send_check)(struct sock *sk, struct sk_buff *skb);
int (*rebuild_header)(struct sock *sk);
+ void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
int (*conn_request)(struct sock *sk, struct sk_buff *skb);
struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
diff --git a/include/net/ip.h b/include/net/ip.h
index bd5e444a19ce..5a5d84d3d2c6 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -120,7 +120,7 @@ extern struct sk_buff *__ip_make_skb(struct sock *sk,
struct flowi4 *fl4,
struct sk_buff_head *queue,
struct inet_cork *cork);
-extern int ip_send_skb(struct sk_buff *skb);
+extern int ip_send_skb(struct net *net, struct sk_buff *skb);
extern int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
extern void ip_flush_pending_frames(struct sock *sk);
extern struct sk_buff *ip_make_skb(struct sock *sk,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index e69c3a47153d..926142ed8d7a 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -21,6 +21,7 @@
#include <linux/rcupdate.h>
#include <net/fib_rules.h>
#include <net/inetpeer.h>
+#include <linux/percpu.h>
struct fib_config {
u8 fc_dst_len;
@@ -54,6 +55,7 @@ struct fib_nh_exception {
u32 fnhe_pmtu;
__be32 fnhe_gw;
unsigned long fnhe_expires;
+ struct rtable __rcu *fnhe_rth;
unsigned long fnhe_stamp;
};
@@ -81,8 +83,8 @@ struct fib_nh {
__be32 nh_gw;
__be32 nh_saddr;
int nh_saddr_genid;
- struct rtable *nh_rth_output;
- struct rtable *nh_rth_input;
+ struct rtable __rcu * __percpu *nh_pcpu_rth_output;
+ struct rtable __rcu *nh_rth_input;
struct fnhe_hash_bucket *nh_exceptions;
};
diff --git a/include/net/llc.h b/include/net/llc.h
index 226c846cab08..f2d0fc570527 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -133,7 +133,7 @@ extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_station_init(void);
+extern void llc_station_init(void);
extern void llc_station_exit(void);
#ifdef CONFIG_PROC_FS
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index e1ce1048fe5f..4a045cda9c60 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
u16 ctmask; /* bitmask of ct events to be delivered */
u16 expmask; /* bitmask of expect events to be delivered */
u32 pid; /* netlink pid of destroyer */
+ struct timer_list timeout;
};
static inline struct nf_conntrack_ecache *
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 0ffb8e31f3cd..1474dd65c66f 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -61,8 +61,6 @@ struct netns_ipv4 {
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
int sysctl_icmp_errors_use_inbound_ifaddr;
- int sysctl_rt_cache_rebuild_count;
- int current_rt_cache_rebuild_count;
unsigned int sysctl_ping_group_range[2];
long sysctl_tcp_mem[3];
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 057f2d315567..929528c73fe8 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -52,6 +52,8 @@ struct net_protocol {
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_protocol {
+ void (*early_demux)(struct sk_buff *skb);
+
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb,
diff --git a/include/net/route.h b/include/net/route.h
index c29ef2733f2d..776a27f1ab78 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -30,6 +30,7 @@
#include <net/inet_sock.h>
#include <linux/in_route.h>
#include <linux/rtnetlink.h>
+#include <linux/rcupdate.h>
#include <linux/route.h>
#include <linux/ip.h>
#include <linux/cache.h>
@@ -56,6 +57,8 @@ struct rtable {
/* Miscellaneous cached information */
u32 rt_pmtu;
+
+ struct list_head rt_uncached;
};
static inline bool rt_is_input_route(const struct rtable *rt)
@@ -106,6 +109,7 @@ extern struct ip_rt_acct __percpu *ip_rt_acct;
struct in_device;
extern int ip_rt_init(void);
extern void rt_cache_flush(struct net *net, int how);
+extern void rt_flush_dev(struct net_device *dev);
extern struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
extern struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
struct sock *sk);
@@ -157,8 +161,22 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
return ip_route_output_key(net, fl4);
}
-extern int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin);
+extern int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin);
+
+static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin)
+{
+ int err;
+
+ rcu_read_lock();
+ err = ip_route_input_noref(skb, dst, src, tos, devin);
+ if (!err)
+ skb_dst_force(skb);
+ rcu_read_unlock();
+
+ return err;
+}
extern void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
int oif, u32 mark, u8 protocol, int flow_flags);
diff --git a/include/net/scm.h b/include/net/scm.h
index 079d7887dac1..7dc0854f0b38 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -70,9 +70,11 @@ static __inline__ void scm_destroy(struct scm_cookie *scm)
}
static __inline__ int scm_send(struct socket *sock, struct msghdr *msg,
- struct scm_cookie *scm)
+ struct scm_cookie *scm, bool forcecreds)
{
memset(scm, 0, sizeof(*scm));
+ if (forcecreds)
+ scm_set_cred(scm, task_tgid(current), current_cred());
unix_get_peersec_dgram(sock, scm);
if (msg->msg_controllen <= 0)
return 0;
diff --git a/include/net/sock.h b/include/net/sock.h
index e067f8c18f88..72132aef53fc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -218,6 +218,7 @@ struct cg_proto;
* @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_gso_max_size: Maximum GSO segment size to build
+ * @sk_gso_max_segs: Maximum number of GSO segments
* @sk_lingertime: %SO_LINGER l_linger setting
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
@@ -338,6 +339,7 @@ struct sock {
netdev_features_t sk_route_nocaps;
int sk_gso_type;
unsigned int sk_gso_max_size;
+ u16 sk_gso_max_segs;
int sk_rcvlowat;
unsigned long sk_lingertime;
struct sk_buff_head sk_error_queue;
@@ -621,6 +623,7 @@ enum sock_flags {
SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
+ SOCK_MEMALLOC, /* VM depends on this socket for swapping */
SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */
SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */
SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */
@@ -658,6 +661,26 @@ static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
return test_bit(flag, &sk->sk_flags);
}
+#ifdef CONFIG_NET
+extern struct static_key memalloc_socks;
+static inline int sk_memalloc_socks(void)
+{
+ return static_key_false(&memalloc_socks);
+}
+#else
+
+static inline int sk_memalloc_socks(void)
+{
+ return 0;
+}
+
+#endif
+
+static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask)
+{
+ return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC);
+}
+
static inline void sk_acceptq_removed(struct sock *sk)
{
sk->sk_ack_backlog--;
@@ -733,8 +756,13 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
return 0;
}
+extern int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb))
+ return __sk_backlog_rcv(sk, skb);
+
return sk->sk_backlog_rcv(sk, skb);
}
@@ -798,6 +826,8 @@ extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
extern int sk_stream_error(struct sock *sk, int flags, int err);
extern void sk_stream_kill_queues(struct sock *sk);
+extern void sk_set_memalloc(struct sock *sk);
+extern void sk_clear_memalloc(struct sock *sk);
extern int sk_wait_data(struct sock *sk, long *timeo);
@@ -913,7 +943,7 @@ struct proto {
#ifdef SOCK_REFCNT_DEBUG
atomic_t socks;
#endif
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
/*
* cgroup specific init/deinit functions. Called once for all
* protocols that implement it, from cgroups populate function.
@@ -994,7 +1024,7 @@ inline void sk_refcnt_debug_release(const struct sock *sk)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */
-#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_NET)
extern struct static_key memcg_socket_limit_enabled;
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
struct cg_proto *cg_proto)
@@ -1301,12 +1331,14 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size)
__sk_mem_schedule(sk, size, SK_MEM_SEND);
}
-static inline bool sk_rmem_schedule(struct sock *sk, int size)
+static inline bool
+sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size)
{
if (!sk_has_account(sk))
return true;
- return size <= sk->sk_forward_alloc ||
- __sk_mem_schedule(sk, size, SK_MEM_RECV);
+ return size<= sk->sk_forward_alloc ||
+ __sk_mem_schedule(sk, size, SK_MEM_RECV) ||
+ skb_pfmemalloc(skb);
}
static inline void sk_mem_reclaim(struct sock *sk)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e19124b84cd2..1f000ffe7075 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -464,6 +464,7 @@ extern int tcp_disconnect(struct sock *sk, int flags);
void tcp_connect_init(struct sock *sk);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
+void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d9509eb29b80..976a81abe1a2 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -213,6 +213,9 @@ struct xfrm_state {
struct xfrm_lifetime_cur curlft;
struct tasklet_hrtimer mtimer;
+ /* used to fix curlft->add_time when changing date */
+ long saved_tmo;
+
/* Last used time */
unsigned long lastused;
@@ -238,6 +241,7 @@ static inline struct net *xs_net(struct xfrm_state *x)
/* xflags - make enum if more show up */
#define XFRM_TIME_DEFER 1
+#define XFRM_SOFT_EXPIRE 2
enum {
XFRM_STATE_VOID,
@@ -288,6 +292,8 @@ struct xfrm_policy_afinfo {
struct flowi *fl,
int reverse);
int (*get_tos)(const struct flowi *fl);
+ void (*init_dst)(struct net *net,
+ struct xfrm_dst *dst);
int (*init_path)(struct xfrm_dst *path,
struct dst_entry *dst,
int nfheader_len);
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
new file mode 100644
index 000000000000..260470e72483
--- /dev/null
+++ b/include/ras/ras_event.h
@@ -0,0 +1,102 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ras
+#define TRACE_INCLUDE_FILE ras_event
+
+#if !defined(_TRACE_HW_EVENT_MC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HW_EVENT_MC_H
+
+#include <linux/tracepoint.h>
+#include <linux/edac.h>
+#include <linux/ktime.h>
+
+/*
+ * Hardware Events Report
+ *
+ * Those events are generated when hardware detected a corrected or
+ * uncorrected event, and are meant to replace the current API to report
+ * errors defined on both EDAC and MCE subsystems.
+ *
+ * FIXME: Add events for handling memory errors originated from the
+ * MCE subsystem.
+ */
+
+/*
+ * Hardware-independent Memory Controller specific events
+ */
+
+/*
+ * Default error mechanisms for Memory Controller errors (CE and UE)
+ */
+TRACE_EVENT(mc_event,
+
+ TP_PROTO(const unsigned int err_type,
+ const char *error_msg,
+ const char *label,
+ const int error_count,
+ const u8 mc_index,
+ const s8 top_layer,
+ const s8 mid_layer,
+ const s8 low_layer,
+ unsigned long address,
+ const u8 grain_bits,
+ unsigned long syndrome,
+ const char *driver_detail),
+
+ TP_ARGS(err_type, error_msg, label, error_count, mc_index,
+ top_layer, mid_layer, low_layer, address, grain_bits,
+ syndrome, driver_detail),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, error_type )
+ __string( msg, error_msg )
+ __string( label, label )
+ __field( u16, error_count )
+ __field( u8, mc_index )
+ __field( s8, top_layer )
+ __field( s8, middle_layer )
+ __field( s8, lower_layer )
+ __field( long, address )
+ __field( u8, grain_bits )
+ __field( long, syndrome )
+ __string( driver_detail, driver_detail )
+ ),
+
+ TP_fast_assign(
+ __entry->error_type = err_type;
+ __assign_str(msg, error_msg);
+ __assign_str(label, label);
+ __entry->error_count = error_count;
+ __entry->mc_index = mc_index;
+ __entry->top_layer = top_layer;
+ __entry->middle_layer = mid_layer;
+ __entry->lower_layer = low_layer;
+ __entry->address = address;
+ __entry->grain_bits = grain_bits;
+ __entry->syndrome = syndrome;
+ __assign_str(driver_detail, driver_detail);
+ ),
+
+ TP_printk("%d %s error%s:%s%s on %s (mc:%d location:%d:%d:%d address:0x%08lx grain:%d syndrome:0x%08lx%s%s)",
+ __entry->error_count,
+ (__entry->error_type == HW_EVENT_ERR_CORRECTED) ? "Corrected" :
+ ((__entry->error_type == HW_EVENT_ERR_FATAL) ?
+ "Fatal" : "Uncorrected"),
+ __entry->error_count > 1 ? "s" : "",
+ ((char *)__get_str(msg))[0] ? " " : "",
+ __get_str(msg),
+ __get_str(label),
+ __entry->mc_index,
+ __entry->top_layer,
+ __entry->middle_layer,
+ __entry->lower_layer,
+ __entry->address,
+ 1 << __entry->grain_bits,
+ __entry->syndrome,
+ ((char *)__get_str(driver_detail))[0] ? " " : "",
+ __get_str(driver_detail))
+);
+
+#endif /* _TRACE_HW_EVENT_MC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/sound/es1688.h b/include/sound/es1688.h
index 3ec7ecbe2502..f752dd33dfaf 100644
--- a/include/sound/es1688.h
+++ b/include/sound/es1688.h
@@ -29,6 +29,7 @@
#define ES1688_HW_AUTO 0x0000
#define ES1688_HW_688 0x0001
#define ES1688_HW_1688 0x0002
+#define ES1688_HW_UNDEF 0x0003
struct snd_es1688 {
unsigned long port; /* port of ESS chip */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index c75c0d1a85e2..cdca2ab1e711 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -1075,7 +1075,8 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
const char *snd_pcm_format_name(snd_pcm_format_t format);
/**
- * Get a string naming the direction of a stream
+ * snd_pcm_stream_str - Get a string naming the direction of a stream
+ * @substream: the pcm substream instance
*/
static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream)
{
diff --git a/include/sound/tea575x-tuner.h b/include/sound/tea575x-tuner.h
index 0c3c2fb0f939..fe8590cac5c2 100644
--- a/include/sound/tea575x-tuner.h
+++ b/include/sound/tea575x-tuner.h
@@ -37,6 +37,10 @@
struct snd_tea575x;
struct snd_tea575x_ops {
+ /* Drivers using snd_tea575x must either define read_ and write_val */
+ void (*write_val)(struct snd_tea575x *tea, u32 val);
+ u32 (*read_val)(struct snd_tea575x *tea);
+ /* Or define the 3 pin functions */
void (*set_pins)(struct snd_tea575x *tea, u8 pins);
u8 (*get_pins)(struct snd_tea575x *tea);
void (*set_direction)(struct snd_tea575x *tea, bool output);
@@ -49,6 +53,7 @@ struct snd_tea575x {
int radio_nr; /* radio_nr */
bool tea5759; /* 5759 chip is present */
bool cannot_read_data; /* Device cannot read the data pin */
+ bool cannot_mute; /* Device cannot mute */
bool mute; /* Device is muted? */
bool stereo; /* receiving stereo */
bool tuned; /* tuned to a station */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 128ce46fa48a..015cea01ae39 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -503,8 +503,6 @@ struct se_cmd {
u32 se_ordered_id;
/* Total size in bytes associated with command */
u32 data_length;
- /* SCSI Presented Data Transfer Length */
- u32 cmd_spdtl;
u32 residual_count;
u32 orig_fe_lun;
/* Persistent Reservation key */
diff --git a/include/trace/events/gfpflags.h b/include/trace/events/gfpflags.h
index 9fe3a36646e9..d6fd8e5b14b7 100644
--- a/include/trace/events/gfpflags.h
+++ b/include/trace/events/gfpflags.h
@@ -30,6 +30,7 @@
{(unsigned long)__GFP_COMP, "GFP_COMP"}, \
{(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \
{(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \
+ {(unsigned long)__GFP_MEMALLOC, "GFP_MEMALLOC"}, \
{(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
{(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
new file mode 100644
index 000000000000..422df19de732
--- /dev/null
+++ b/include/trace/events/random.h
@@ -0,0 +1,134 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM random
+
+#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RANDOM_H
+
+#include <linux/writeback.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(random__mix_pool_bytes,
+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+ TP_ARGS(pool_name, bytes, IP),
+
+ TP_STRUCT__entry(
+ __field( const char *, pool_name )
+ __field( int, bytes )
+ __field(unsigned long, IP )
+ ),
+
+ TP_fast_assign(
+ __entry->pool_name = pool_name;
+ __entry->bytes = bytes;
+ __entry->IP = IP;
+ ),
+
+ TP_printk("%s pool: bytes %d caller %pF",
+ __entry->pool_name, __entry->bytes, (void *)__entry->IP)
+);
+
+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+ TP_ARGS(pool_name, bytes, IP)
+);
+
+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+ TP_ARGS(pool_name, bytes, IP)
+);
+
+TRACE_EVENT(credit_entropy_bits,
+ TP_PROTO(const char *pool_name, int bits, int entropy_count,
+ int entropy_total, unsigned long IP),
+
+ TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
+
+ TP_STRUCT__entry(
+ __field( const char *, pool_name )
+ __field( int, bits )
+ __field( int, entropy_count )
+ __field( int, entropy_total )
+ __field(unsigned long, IP )
+ ),
+
+ TP_fast_assign(
+ __entry->pool_name = pool_name;
+ __entry->bits = bits;
+ __entry->entropy_count = entropy_count;
+ __entry->entropy_total = entropy_total;
+ __entry->IP = IP;
+ ),
+
+ TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
+ "caller %pF", __entry->pool_name, __entry->bits,
+ __entry->entropy_count, __entry->entropy_total,
+ (void *)__entry->IP)
+);
+
+TRACE_EVENT(get_random_bytes,
+ TP_PROTO(int nbytes, unsigned long IP),
+
+ TP_ARGS(nbytes, IP),
+
+ TP_STRUCT__entry(
+ __field( int, nbytes )
+ __field(unsigned long, IP )
+ ),
+
+ TP_fast_assign(
+ __entry->nbytes = nbytes;
+ __entry->IP = IP;
+ ),
+
+ TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
+);
+
+DECLARE_EVENT_CLASS(random__extract_entropy,
+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+ unsigned long IP),
+
+ TP_ARGS(pool_name, nbytes, entropy_count, IP),
+
+ TP_STRUCT__entry(
+ __field( const char *, pool_name )
+ __field( int, nbytes )
+ __field( int, entropy_count )
+ __field(unsigned long, IP )
+ ),
+
+ TP_fast_assign(
+ __entry->pool_name = pool_name;
+ __entry->nbytes = nbytes;
+ __entry->entropy_count = entropy_count;
+ __entry->IP = IP;
+ ),
+
+ TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
+ __entry->pool_name, __entry->nbytes, __entry->entropy_count,
+ (void *)__entry->IP)
+);
+
+
+DEFINE_EVENT(random__extract_entropy, extract_entropy,
+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+ unsigned long IP),
+
+ TP_ARGS(pool_name, nbytes, entropy_count, IP)
+);
+
+DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+ unsigned long IP),
+
+ TP_ARGS(pool_name, nbytes, entropy_count, IP)
+);
+
+
+
+#endif /* _TRACE_RANDOM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index ea7a2035456d..5a8671e8a67f 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -73,6 +73,9 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
__entry->prio = p->prio;
__entry->success = success;
__entry->target_cpu = task_cpu(p);
+ )
+ TP_perf_assign(
+ __perf_task(p);
),
TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
@@ -325,6 +328,7 @@ DECLARE_EVENT_CLASS(sched_stat_template,
)
TP_perf_assign(
__perf_count(delay);
+ __perf_task(tsk);
),
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index c6bc2faaf261..a763888a36f9 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -712,6 +712,9 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#undef __perf_count
#define __perf_count(c) __count = (c)
+#undef __perf_task
+#define __perf_task(t) __task = (t)
+
#undef TP_perf_assign
#define TP_perf_assign(args...) args
@@ -725,6 +728,7 @@ perf_trace_##call(void *__data, proto) \
struct ftrace_raw_##call *entry; \
struct pt_regs __regs; \
u64 __addr = 0, __count = 1; \
+ struct task_struct *__task = NULL; \
struct hlist_head *head; \
int __entry_size; \
int __data_size; \
@@ -752,7 +756,7 @@ perf_trace_##call(void *__data, proto) \
\
head = this_cpu_ptr(event_call->perf_events); \
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
- __count, &__regs, head); \
+ __count, &__regs, head, __task); \
}
/*
diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h
index 89d43b3d4cb9..5a0e4f9efb53 100644
--- a/include/video/da8xx-fb.h
+++ b/include/video/da8xx-fb.h
@@ -82,6 +82,9 @@ struct lcd_ctrl_config {
/* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */
unsigned char raster_order;
+
+ /* DMA FIFO threshold */
+ int fifo_th;
};
struct lcd_sync_arg {
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index c8e59b4a3364..a6267a2d292b 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -48,6 +48,10 @@
#define DISPC_IRQ_FRAMEDONEWB (1 << 23)
#define DISPC_IRQ_FRAMEDONETV (1 << 24)
#define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25)
+#define DISPC_IRQ_FRAMEDONE3 (1 << 26)
+#define DISPC_IRQ_VSYNC3 (1 << 27)
+#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 28)
+#define DISPC_IRQ_SYNC_LOST3 (1 << 29)
struct omap_dss_device;
struct omap_overlay_manager;
@@ -75,6 +79,7 @@ enum omap_channel {
OMAP_DSS_CHANNEL_LCD = 0,
OMAP_DSS_CHANNEL_DIGIT = 1,
OMAP_DSS_CHANNEL_LCD2 = 2,
+ OMAP_DSS_CHANNEL_LCD3 = 3,
};
enum omap_color_mode {
@@ -99,11 +104,6 @@ enum omap_color_mode {
OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */
};
-enum omap_lcd_display_type {
- OMAP_DSS_LCD_DISPLAY_STN,
- OMAP_DSS_LCD_DISPLAY_TFT,
-};
-
enum omap_dss_load_mode {
OMAP_DSS_LOAD_CLUT_AND_FRAME = 0,
OMAP_DSS_LOAD_CLUT_ONLY = 1,
@@ -121,15 +121,15 @@ enum omap_rfbi_te_mode {
OMAP_DSS_RFBI_TE_MODE_2 = 2,
};
-enum omap_panel_config {
- OMAP_DSS_LCD_IVS = 1<<0,
- OMAP_DSS_LCD_IHS = 1<<1,
- OMAP_DSS_LCD_IPC = 1<<2,
- OMAP_DSS_LCD_IEO = 1<<3,
- OMAP_DSS_LCD_RF = 1<<4,
- OMAP_DSS_LCD_ONOFF = 1<<5,
+enum omap_dss_signal_level {
+ OMAPDSS_SIG_ACTIVE_HIGH = 0,
+ OMAPDSS_SIG_ACTIVE_LOW = 1,
+};
- OMAP_DSS_LCD_TFT = 1<<20,
+enum omap_dss_signal_edge {
+ OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
+ OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ OMAPDSS_DRIVE_SIG_FALLING_EDGE,
};
enum omap_dss_venc_type {
@@ -167,13 +167,6 @@ enum omap_dss_audio_state {
OMAP_DSS_AUDIO_PLAYING,
};
-/* XXX perhaps this should be removed */
-enum omap_dss_overlay_managers {
- OMAP_DSS_OVL_MGR_LCD,
- OMAP_DSS_OVL_MGR_TV,
- OMAP_DSS_OVL_MGR_LCD2,
-};
-
enum omap_dss_rotation_type {
OMAP_DSS_ROT_DMA = 1 << 0,
OMAP_DSS_ROT_VRFB = 1 << 1,
@@ -268,9 +261,6 @@ struct omap_dss_dsi_videomode_data {
int hfp_blanking_mode;
/* Video port sync events */
- int vp_de_pol;
- int vp_hsync_pol;
- int vp_vsync_pol;
bool vp_vsync_end;
bool vp_hsync_end;
@@ -346,6 +336,19 @@ struct omap_video_timings {
u16 vfp; /* Vertical front porch */
/* Unit: line clocks */
u16 vbp; /* Vertical back porch */
+
+ /* Vsync logic level */
+ enum omap_dss_signal_level vsync_level;
+ /* Hsync logic level */
+ enum omap_dss_signal_level hsync_level;
+ /* Interlaced or Progressive timings */
+ bool interlace;
+ /* Pixel clock edge to drive LCD data */
+ enum omap_dss_signal_edge data_pclk_edge;
+ /* Data enable logic level */
+ enum omap_dss_signal_level de_level;
+ /* Pixel clock edges to drive HSYNC and VSYNC signals */
+ enum omap_dss_signal_edge sync_pclk_edge;
};
#ifdef CONFIG_OMAP2_DSS_VENC
@@ -559,8 +562,6 @@ struct omap_dss_device {
/* Unit: line clocks */
int acb; /* ac-bias pin frequency */
- enum omap_panel_config config;
-
enum omap_dss_dsi_pixel_format dsi_pix_fmt;
enum omap_dss_dsi_mode dsi_mode;
struct omap_dss_dsi_videomode_data dsi_vm_data;
diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h
index 7571b27a0ba1..ff43ffc1aab2 100644
--- a/include/video/sh_mobile_lcdc.h
+++ b/include/video/sh_mobile_lcdc.h
@@ -166,6 +166,12 @@ struct sh_mobile_lcdc_bl_info {
int (*get_brightness)(void);
};
+struct sh_mobile_lcdc_overlay_cfg {
+ int fourcc;
+ unsigned int max_xres;
+ unsigned int max_yres;
+};
+
struct sh_mobile_lcdc_chan_cfg {
int chan;
int fourcc;
@@ -186,6 +192,7 @@ struct sh_mobile_lcdc_chan_cfg {
struct sh_mobile_lcdc_info {
int clock_source;
struct sh_mobile_lcdc_chan_cfg ch[2];
+ struct sh_mobile_lcdc_overlay_cfg overlays[4];
struct sh_mobile_meram_info *meram_dev;
};
diff --git a/include/video/sh_mobile_meram.h b/include/video/sh_mobile_meram.h
index 29b2fd3b147e..062e6e7f955c 100644
--- a/include/video/sh_mobile_meram.h
+++ b/include/video/sh_mobile_meram.h
@@ -15,7 +15,6 @@ enum {
struct sh_mobile_meram_priv;
-struct sh_mobile_meram_ops;
/*
* struct sh_mobile_meram_info - MERAM platform data
@@ -24,7 +23,6 @@ struct sh_mobile_meram_ops;
struct sh_mobile_meram_info {
int addr_mode;
u32 reserved_icbs;
- struct sh_mobile_meram_ops *ops;
struct sh_mobile_meram_priv *priv;
struct platform_device *pdev;
};
@@ -38,26 +36,59 @@ struct sh_mobile_meram_cfg {
struct sh_mobile_meram_icb_cfg icb[2];
};
-struct module;
-struct sh_mobile_meram_ops {
- struct module *module;
- /* register usage of meram */
- void *(*meram_register)(struct sh_mobile_meram_info *meram_dev,
- const struct sh_mobile_meram_cfg *cfg,
- unsigned int xres, unsigned int yres,
- unsigned int pixelformat,
- unsigned int *pitch);
-
- /* unregister usage of meram */
- void (*meram_unregister)(struct sh_mobile_meram_info *meram_dev,
- void *data);
-
- /* update meram settings */
- void (*meram_update)(struct sh_mobile_meram_info *meram_dev, void *data,
+#if defined(CONFIG_FB_SH_MOBILE_MERAM) || \
+ defined(CONFIG_FB_SH_MOBILE_MERAM_MODULE)
+unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev,
+ size_t size);
+void sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev,
+ unsigned long mem, size_t size);
+void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev,
+ const struct sh_mobile_meram_cfg *cfg,
+ unsigned int xres, unsigned int yres,
+ unsigned int pixelformat,
+ unsigned int *pitch);
+void sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data);
+void sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data,
+ unsigned long base_addr_y,
+ unsigned long base_addr_c,
+ unsigned long *icb_addr_y,
+ unsigned long *icb_addr_c);
+#else
+static inline unsigned long
+sh_mobile_meram_alloc(struct sh_mobile_meram_info *meram_dev, size_t size)
+{
+ return 0;
+}
+
+static inline void
+sh_mobile_meram_free(struct sh_mobile_meram_info *meram_dev,
+ unsigned long mem, size_t size)
+{
+}
+
+static inline void *
+sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *dev,
+ const struct sh_mobile_meram_cfg *cfg,
+ unsigned int xres, unsigned int yres,
+ unsigned int pixelformat,
+ unsigned int *pitch)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void
+sh_mobile_meram_cache_free(struct sh_mobile_meram_info *dev, void *data)
+{
+}
+
+static inline void
+sh_mobile_meram_cache_update(struct sh_mobile_meram_info *dev, void *data,
unsigned long base_addr_y,
unsigned long base_addr_c,
unsigned long *icb_addr_y,
- unsigned long *icb_addr_c);
-};
+ unsigned long *icb_addr_c)
+{
+}
+#endif
#endif /* __VIDEO_SH_MOBILE_MERAM_H__ */
diff --git a/init/Kconfig b/init/Kconfig
index d07dcf9fc8a9..af6c7f8ba019 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -357,7 +357,7 @@ config AUDIT
config AUDITSYSCALL
bool "Enable system-call auditing support"
- depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || ARM)
+ depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT))
default y if SECURITY_SELINUX
help
Enable low-overhead system-call auditing infrastructure that
@@ -686,7 +686,7 @@ config RESOURCE_COUNTERS
This option enables controller independent resource accounting
infrastructure that works with cgroups.
-config CGROUP_MEM_RES_CTLR
+config MEMCG
bool "Memory Resource Controller for Control Groups"
depends on RESOURCE_COUNTERS
select MM_OWNER
@@ -709,9 +709,9 @@ config CGROUP_MEM_RES_CTLR
This config option also selects MM_OWNER config option, which
could in turn add some fork/exit overhead.
-config CGROUP_MEM_RES_CTLR_SWAP
+config MEMCG_SWAP
bool "Memory Resource Controller Swap Extension"
- depends on CGROUP_MEM_RES_CTLR && SWAP
+ depends on MEMCG && SWAP
help
Add swap management feature to memory resource controller. When you
enable this, you can limit mem+swap usage per cgroup. In other words,
@@ -726,9 +726,9 @@ config CGROUP_MEM_RES_CTLR_SWAP
if boot option "swapaccount=0" is set, swap will not be accounted.
Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
size is 4096bytes, 512k per 1Gbytes of swap.
-config CGROUP_MEM_RES_CTLR_SWAP_ENABLED
+config MEMCG_SWAP_ENABLED
bool "Memory Resource Controller Swap Extension enabled by default"
- depends on CGROUP_MEM_RES_CTLR_SWAP
+ depends on MEMCG_SWAP
default y
help
Memory Resource Controller Swap Extension comes with its price in
@@ -739,9 +739,9 @@ config CGROUP_MEM_RES_CTLR_SWAP_ENABLED
For those who want to have the feature enabled by default should
select this option (if, for some reason, they need to disable it
then swapaccount=0 does the trick).
-config CGROUP_MEM_RES_CTLR_KMEM
+config MEMCG_KMEM
bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)"
- depends on CGROUP_MEM_RES_CTLR && EXPERIMENTAL
+ depends on MEMCG && EXPERIMENTAL
default n
help
The Kernel Memory extension for Memory Resource Controller can limit
@@ -751,6 +751,21 @@ config CGROUP_MEM_RES_CTLR_KMEM
the kmem extension can use it to guarantee that no group of processes
will ever exhaust kernel resources alone.
+config CGROUP_HUGETLB
+ bool "HugeTLB Resource Controller for Control Groups"
+ depends on RESOURCE_COUNTERS && HUGETLB_PAGE && EXPERIMENTAL
+ default n
+ help
+ Provides a cgroup Resource Controller for HugeTLB pages.
+ When you enable this, you can put a per cgroup limit on HugeTLB usage.
+ The limit is enforced during page fault. Since HugeTLB doesn't
+ support page reclaim, enforcing the limit at page fault time implies
+ that, the application will get SIGBUS signal if it tries to access
+ HugeTLB pages beyond its limit. This requires the application to know
+ beforehand how much HugeTLB pages it would require for its use. The
+ control group is tracked in the third page lru pointer. This means
+ that we cannot use the controller with huge page less than 3 pages.
+
config CGROUP_PERF
bool "Enable perf_event per-cpu per-container group (cgroup) monitoring"
depends on PERF_EVENTS && CGROUPS
diff --git a/init/main.c b/init/main.c
index eb72a5c4c844..b28673087ac0 100644
--- a/init/main.c
+++ b/init/main.c
@@ -461,10 +461,6 @@ static void __init mm_init(void)
percpu_init_late();
pgtable_cache_init();
vmalloc_init();
-#ifdef CONFIG_X86
- if (efi_enabled)
- efi_enter_virtual_mode();
-#endif
}
asmlinkage void __init start_kernel(void)
@@ -506,7 +502,7 @@ asmlinkage void __init start_kernel(void)
setup_per_cpu_areas();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
- build_all_zonelists(NULL);
+ build_all_zonelists(NULL, NULL);
page_alloc_init();
printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line);
@@ -606,6 +602,10 @@ asmlinkage void __init start_kernel(void)
calibrate_delay();
pidmap_init();
anon_vma_init();
+#ifdef CONFIG_X86
+ if (efi_enabled)
+ efi_enter_virtual_mode();
+#endif
thread_info_cache_init();
cred_init();
fork_init(totalram_pages);
@@ -725,6 +725,7 @@ static initcall_t *initcall_levels[] __initdata = {
__initcall_end,
};
+/* Keep these in sync with initcalls in include/linux/init.h */
static char *initcall_level_names[] __initdata = {
"early",
"core",
diff --git a/ipc/compat.c b/ipc/compat.c
index a6df704f521e..ad9518eb26e0 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -118,7 +118,7 @@ extern int sem_ctls[];
static inline int compat_ipc_parse_version(int *cmd)
{
-#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC
+#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION
int version = *cmd & IPC_64;
/* this is tricky: architectures that have support for the old
@@ -373,21 +373,21 @@ long compat_sys_semctl(int semid, int semnum, int cmd, int arg)
}
long compat_sys_msgsnd(int msqid, struct compat_msgbuf __user *msgp,
- size_t msgsz, int msgflg)
+ compat_ssize_t msgsz, int msgflg)
{
compat_long_t mtype;
if (get_user(mtype, &msgp->mtype))
return -EFAULT;
- return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
+ return do_msgsnd(msqid, mtype, msgp->mtext, (ssize_t)msgsz, msgflg);
}
long compat_sys_msgrcv(int msqid, struct compat_msgbuf __user *msgp,
- size_t msgsz, long msgtyp, int msgflg)
+ compat_ssize_t msgsz, long msgtyp, int msgflg)
{
long err, mtype;
- err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg);
+ err = do_msgrcv(msqid, &mtype, msgp->mtext, (ssize_t)msgsz, msgtyp, msgflg);
if (err < 0)
goto out;
@@ -514,6 +514,10 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
return err;
}
+#ifndef COMPAT_SHMLBA
+#define COMPAT_SHMLBA SHMLBA
+#endif
+
#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC
long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
void __user *uptr)
@@ -524,7 +528,7 @@ long compat_sys_shmat(int first, int second, compat_uptr_t third, int version,
if (version == 1)
return -EINVAL;
- err = do_shmat(first, uptr, second, &raddr);
+ err = do_shmat(first, uptr, second, &raddr, COMPAT_SHMLBA);
if (err < 0)
return err;
uaddr = compat_ptr(third);
@@ -536,7 +540,7 @@ long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg)
unsigned long ret;
long err;
- err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret);
+ err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA);
if (err)
return err;
force_successful_syscall_return();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index f8e54f5b9080..9a08acc9e649 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -726,7 +726,6 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
struct mq_attr *attr)
{
const struct cred *cred = current_cred();
- struct file *result;
int ret;
if (attr) {
@@ -748,21 +747,11 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
}
mode &= ~current_umask();
- ret = mnt_want_write(path->mnt);
- if (ret)
- return ERR_PTR(ret);
ret = vfs_create(dir, path->dentry, mode, true);
path->dentry->d_fsdata = NULL;
- if (!ret)
- result = dentry_open(path, oflag, cred);
- else
- result = ERR_PTR(ret);
- /*
- * dentry_open() took a persistent mnt_want_write(),
- * so we can now drop this one.
- */
- mnt_drop_write(path->mnt);
- return result;
+ if (ret)
+ return ERR_PTR(ret);
+ return dentry_open(path, oflag, cred);
}
/* Opens existing queue */
@@ -788,7 +777,9 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
struct mq_attr attr;
int fd, error;
struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
- struct dentry *root = ipc_ns->mq_mnt->mnt_root;
+ struct vfsmount *mnt = ipc_ns->mq_mnt;
+ struct dentry *root = mnt->mnt_root;
+ int ro;
if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
return -EFAULT;
@@ -802,6 +793,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
if (fd < 0)
goto out_putname;
+ ro = mnt_want_write(mnt); /* we'll drop it in any case */
error = 0;
mutex_lock(&root->d_inode->i_mutex);
path.dentry = lookup_one_len(name, root, strlen(name));
@@ -809,7 +801,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
error = PTR_ERR(path.dentry);
goto out_putfd;
}
- path.mnt = mntget(ipc_ns->mq_mnt);
+ path.mnt = mntget(mnt);
if (oflag & O_CREAT) {
if (path.dentry->d_inode) { /* entry already exists */
@@ -820,6 +812,10 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
}
filp = do_open(&path, oflag);
} else {
+ if (ro) {
+ error = ro;
+ goto out;
+ }
filp = do_create(ipc_ns, root->d_inode,
&path, oflag, mode,
u_attr ? &attr : NULL);
@@ -845,6 +841,7 @@ out_putfd:
fd = error;
}
mutex_unlock(&root->d_inode->i_mutex);
+ mnt_drop_write(mnt);
out_putname:
putname(name);
return fd;
@@ -857,40 +854,38 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
struct dentry *dentry;
struct inode *inode = NULL;
struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
+ struct vfsmount *mnt = ipc_ns->mq_mnt;
name = getname(u_name);
if (IS_ERR(name))
return PTR_ERR(name);
- mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex,
- I_MUTEX_PARENT);
- dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
+ err = mnt_want_write(mnt);
+ if (err)
+ goto out_name;
+ mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT);
+ dentry = lookup_one_len(name, mnt->mnt_root, strlen(name));
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
goto out_unlock;
}
- if (!dentry->d_inode) {
- err = -ENOENT;
- goto out_err;
- }
-
inode = dentry->d_inode;
- if (inode)
+ if (!inode) {
+ err = -ENOENT;
+ } else {
ihold(inode);
- err = mnt_want_write(ipc_ns->mq_mnt);
- if (err)
- goto out_err;
- err = vfs_unlink(dentry->d_parent->d_inode, dentry);
- mnt_drop_write(ipc_ns->mq_mnt);
-out_err:
+ err = vfs_unlink(dentry->d_parent->d_inode, dentry);
+ }
dput(dentry);
out_unlock:
- mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
- putname(name);
+ mutex_unlock(&mnt->mnt_root->d_inode->i_mutex);
if (inode)
iput(inode);
+ mnt_drop_write(mnt);
+out_name:
+ putname(name);
return err;
}
diff --git a/ipc/shm.c b/ipc/shm.c
index 41c1285d697a..00faa05cf72a 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -953,7 +953,8 @@ out:
* "raddr" thing points to kernel space, and there has to be a wrapper around
* this.
*/
-long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
+long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
+ unsigned long shmlba)
{
struct shmid_kernel *shp;
unsigned long addr;
@@ -973,9 +974,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
if (shmid < 0)
goto out;
else if ((addr = (ulong)shmaddr)) {
- if (addr & (SHMLBA-1)) {
+ if (addr & (shmlba - 1)) {
if (shmflg & SHM_RND)
- addr &= ~(SHMLBA-1); /* round down */
+ addr &= ~(shmlba - 1); /* round down */
else
#ifndef __ARCH_FORCE_SHMLBA
if (addr & ~PAGE_MASK)
@@ -1107,7 +1108,7 @@ SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
unsigned long ret;
long err;
- err = do_shmat(shmid, shmaddr, shmflg, &ret);
+ err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
if (err)
return err;
force_successful_syscall_return();
diff --git a/ipc/syscall.c b/ipc/syscall.c
index 1d6f53f6b562..0d1e32ce048e 100644
--- a/ipc/syscall.c
+++ b/ipc/syscall.c
@@ -73,7 +73,7 @@ SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
default: {
unsigned long raddr;
ret = do_shmat(first, (char __user *)ptr,
- second, &raddr);
+ second, &raddr, SHMLBA);
if (ret)
return ret;
return put_user(raddr, (unsigned long __user *) third);
diff --git a/ipc/util.c b/ipc/util.c
index 75261a31d48d..eb07fd356f27 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -804,7 +804,7 @@ out_up:
return ERR_PTR(err);
}
-#ifdef __ARCH_WANT_IPC_PARSE_VERSION
+#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
/**
@@ -826,7 +826,7 @@ int ipc_parse_version (int *cmd)
}
}
-#endif /* __ARCH_WANT_IPC_PARSE_VERSION */
+#endif /* CONFIG_ARCH_WANT_IPC_PARSE_VERSION */
#ifdef CONFIG_PROC_FS
struct ipc_proc_iter {
diff --git a/ipc/util.h b/ipc/util.h
index 6f5c20bedaab..850ef3e962cb 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -130,7 +130,7 @@ struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
struct ipc_ids *ids, int id, int cmd,
struct ipc64_perm *perm, int extra_perm);
-#ifndef __ARCH_WANT_IPC_PARSE_VERSION
+#ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
/* On IA-64, we always use the "64-bit version" of the IPC structures. */
# define ipc_parse_version(cmd) IPC_64
#else
diff --git a/kernel/audit.c b/kernel/audit.c
index 4a3f28d2ca65..ea3b7b6191c7 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1456,6 +1456,27 @@ void audit_log_key(struct audit_buffer *ab, char *key)
}
/**
+ * audit_log_link_denied - report a link restriction denial
+ * @operation: specific link opreation
+ * @link: the path that triggered the restriction
+ */
+void audit_log_link_denied(const char *operation, struct path *link)
+{
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(current->audit_context, GFP_KERNEL,
+ AUDIT_ANOM_LINK);
+ audit_log_format(ab, "op=%s action=denied", operation);
+ audit_log_format(ab, " pid=%d comm=", current->pid);
+ audit_log_untrustedstring(ab, current->comm);
+ audit_log_d_path(ab, " path=", link);
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, link->dentry->d_inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", link->dentry->d_inode->i_ino);
+ audit_log_end(ab);
+}
+
+/**
* audit_log_end - end one audit record
* @ab: the audit_buffer
*
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 3a5ca582ba1e..ed206fd88cca 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -250,7 +250,6 @@ static void untag_chunk(struct node *p)
spin_unlock(&hash_lock);
spin_unlock(&entry->lock);
fsnotify_destroy_mark(entry);
- fsnotify_put_mark(entry);
goto out;
}
@@ -259,7 +258,7 @@ static void untag_chunk(struct node *p)
fsnotify_duplicate_mark(&new->mark, entry);
if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
- free_chunk(new);
+ fsnotify_put_mark(&new->mark);
goto Fallback;
}
@@ -293,7 +292,7 @@ static void untag_chunk(struct node *p)
spin_unlock(&hash_lock);
spin_unlock(&entry->lock);
fsnotify_destroy_mark(entry);
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(&new->mark); /* drop initial reference */
goto out;
Fallback:
@@ -322,7 +321,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
entry = &chunk->mark;
if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
- free_chunk(chunk);
+ fsnotify_put_mark(entry);
return -ENOSPC;
}
@@ -347,6 +346,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
insert_hash(chunk);
spin_unlock(&hash_lock);
spin_unlock(&entry->lock);
+ fsnotify_put_mark(entry); /* drop initial reference */
return 0;
}
@@ -396,7 +396,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
fsnotify_duplicate_mark(chunk_entry, old_entry);
if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
spin_unlock(&old_entry->lock);
- free_chunk(chunk);
+ fsnotify_put_mark(chunk_entry);
fsnotify_put_mark(old_entry);
return -ENOSPC;
}
@@ -444,8 +444,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
spin_unlock(&chunk_entry->lock);
spin_unlock(&old_entry->lock);
fsnotify_destroy_mark(old_entry);
+ fsnotify_put_mark(chunk_entry); /* drop initial reference */
fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
- fsnotify_put_mark(old_entry); /* and kill it */
return 0;
}
@@ -916,7 +916,12 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify
struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
evict_chunk(chunk);
- fsnotify_put_mark(entry);
+
+ /*
+ * We are guaranteed to have at least one reference to the mark from
+ * either the inode or the caller of fsnotify_destroy_mark().
+ */
+ BUG_ON(atomic_read(&entry->refcnt) < 1);
}
static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
diff --git a/kernel/cpu.c b/kernel/cpu.c
index a4eb5227a19e..14d32588cccd 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -416,7 +416,7 @@ int __cpuinit cpu_up(unsigned int cpu)
if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
mutex_lock(&zonelists_mutex);
- build_all_zonelists(NULL);
+ build_all_zonelists(NULL, NULL);
mutex_unlock(&zonelists_mutex);
}
#endif
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index 8b68ce78ff17..be7b33b73d30 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -12,6 +12,7 @@
#include <linux/kdb.h>
#include <linux/kdebug.h>
#include <linux/export.h>
+#include <linux/hardirq.h>
#include "kdb_private.h"
#include "../debug_core.h"
@@ -52,6 +53,9 @@ int kdb_stub(struct kgdb_state *ks)
if (atomic_read(&kgdb_setting_breakpoint))
reason = KDB_REASON_KEYBOARD;
+ if (in_nmi())
+ reason = KDB_REASON_NMI;
+
for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
if ((bp->bp_enabled) && (bp->bp_addr == addr)) {
reason = KDB_REASON_BREAK;
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index bb9520f0f6ff..0a69d2adc4f3 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -715,9 +715,6 @@ kdb_printit:
/* check for having reached the LINES number of printed lines */
if (kdb_nextline == linecount) {
char buf1[16] = "";
-#if defined(CONFIG_SMP)
- char buf2[32];
-#endif
/* Watch out for recursion here. Any routine that calls
* kdb_printf will come back through here. And kdb_read
@@ -732,14 +729,6 @@ kdb_printit:
if (moreprompt == NULL)
moreprompt = "more> ";
-#if defined(CONFIG_SMP)
- if (strchr(moreprompt, '%')) {
- sprintf(buf2, moreprompt, get_cpu());
- put_cpu();
- moreprompt = buf2;
- }
-#endif
-
kdb_input_flush();
c = console_drivers;
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 1f91413edb87..31df1706b9a9 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -139,11 +139,10 @@ static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
static char *__env[] = {
#if defined(CONFIG_SMP)
"PROMPT=[%d]kdb> ",
- "MOREPROMPT=[%d]more> ",
#else
"PROMPT=kdb> ",
- "MOREPROMPT=more> ",
#endif
+ "MOREPROMPT=more> ",
"RADIX=16",
"MDCOUNT=8", /* lines of md output */
KDB_PLATFORM_ENV,
@@ -1236,18 +1235,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
*cmdbuf = '\0';
*(cmd_hist[cmd_head]) = '\0';
- if (KDB_FLAG(ONLY_DO_DUMP)) {
- /* kdb is off but a catastrophic error requires a dump.
- * Take the dump and reboot.
- * Turn on logging so the kdb output appears in the log
- * buffer in the dump.
- */
- const char *setargs[] = { "set", "LOGGING", "1" };
- kdb_set(2, setargs);
- kdb_reboot(0, NULL);
- /*NOTREACHED*/
- }
-
do_full_getstr:
#if defined(CONFIG_SMP)
snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 6581a040f399..98d4597f43d6 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -153,7 +153,8 @@ put_callchain_entry(int rctx)
put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
}
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+struct perf_callchain_entry *
+perf_callchain(struct perf_event *event, struct pt_regs *regs)
{
int rctx;
struct perf_callchain_entry *entry;
@@ -178,6 +179,12 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
}
if (regs) {
+ /*
+ * Disallow cross-task user callchains.
+ */
+ if (event->ctx->task && event->ctx->task != current)
+ goto exit_put;
+
perf_callchain_store(entry, PERF_CONTEXT_USER);
perf_callchain_user(entry, regs);
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f1cf0edeb39a..b7935fcec7d9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4039,7 +4039,7 @@ void perf_prepare_sample(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
- data->callchain = perf_callchain(regs);
+ data->callchain = perf_callchain(event, regs);
if (data->callchain)
size += data->callchain->nr;
@@ -5209,7 +5209,8 @@ static int perf_tp_event_match(struct perf_event *event,
}
void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
- struct pt_regs *regs, struct hlist_head *head, int rctx)
+ struct pt_regs *regs, struct hlist_head *head, int rctx,
+ struct task_struct *task)
{
struct perf_sample_data data;
struct perf_event *event;
@@ -5228,6 +5229,31 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
perf_swevent_event(event, count, &data, regs);
}
+ /*
+ * If we got specified a target task, also iterate its context and
+ * deliver this event there too.
+ */
+ if (task && task != current) {
+ struct perf_event_context *ctx;
+ struct trace_entry *entry = record;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
+ if (!ctx)
+ goto unlock;
+
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+ if (event->attr.type != PERF_TYPE_TRACEPOINT)
+ continue;
+ if (event->attr.config != entry->type)
+ continue;
+ if (perf_tp_event_match(event, &data, regs))
+ perf_swevent_event(event, count, &data, regs);
+ }
+unlock:
+ rcu_read_unlock();
+ }
+
perf_swevent_put_recursion_context(rctx);
}
EXPORT_SYMBOL_GPL(perf_tp_event);
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index b0b107f90afc..a096c19f2c2a 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -101,7 +101,8 @@ __output_copy(struct perf_output_handle *handle,
}
/* Callchain handling */
-extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+extern struct perf_callchain_entry *
+perf_callchain(struct perf_event *event, struct pt_regs *regs);
extern int get_callchain_buffers(void);
extern void put_callchain_buffers(void);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index f93532748bca..c08a22d02f72 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -32,6 +32,7 @@
#include <linux/swap.h> /* try_to_free_swap */
#include <linux/ptrace.h> /* user_enable_single_step */
#include <linux/kdebug.h> /* notifier mechanism */
+#include "../../mm/internal.h" /* munlock_vma_page */
#include <linux/uprobes.h>
@@ -112,14 +113,14 @@ static bool valid_vma(struct vm_area_struct *vma, bool is_register)
return false;
}
-static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
+static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
{
- loff_t vaddr;
-
- vaddr = vma->vm_start + offset;
- vaddr -= vma->vm_pgoff << PAGE_SHIFT;
+ return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+}
- return vaddr;
+static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
+{
+ return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
}
/**
@@ -127,25 +128,27 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
* based on replace_page in mm/ksm.c
*
* @vma: vma that holds the pte pointing to page
+ * @addr: address the old @page is mapped at
* @page: the cowed page we are replacing by kpage
* @kpage: the modified page we replace page by
*
* Returns 0 on success, -EFAULT on failure.
*/
-static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
+static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
+ struct page *page, struct page *kpage)
{
struct mm_struct *mm = vma->vm_mm;
- unsigned long addr;
spinlock_t *ptl;
pte_t *ptep;
+ int err;
- addr = page_address_in_vma(page, vma);
- if (addr == -EFAULT)
- return -EFAULT;
+ /* For try_to_free_swap() and munlock_vma_page() below */
+ lock_page(page);
+ err = -EAGAIN;
ptep = page_check_address(page, mm, addr, &ptl, 0);
if (!ptep)
- return -EAGAIN;
+ goto unlock;
get_page(kpage);
page_add_new_anon_rmap(kpage, vma, addr);
@@ -162,10 +165,16 @@ static int __replace_page(struct vm_area_struct *vma, struct page *page, struct
page_remove_rmap(page);
if (!page_mapped(page))
try_to_free_swap(page);
- put_page(page);
pte_unmap_unlock(ptep, ptl);
- return 0;
+ if (vma->vm_flags & VM_LOCKED)
+ munlock_vma_page(page);
+ put_page(page);
+
+ err = 0;
+ unlock:
+ unlock_page(page);
+ return err;
}
/**
@@ -206,45 +215,23 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long vaddr, uprobe_opcode_t opcode)
{
struct page *old_page, *new_page;
- struct address_space *mapping;
void *vaddr_old, *vaddr_new;
struct vm_area_struct *vma;
- struct uprobe *uprobe;
int ret;
+
retry:
/* Read the page with vaddr into memory */
ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
if (ret <= 0)
return ret;
- ret = -EINVAL;
-
- /*
- * We are interested in text pages only. Our pages of interest
- * should be mapped for read and execute only. We desist from
- * adding probes in write mapped pages since the breakpoints
- * might end up in the file copy.
- */
- if (!valid_vma(vma, is_swbp_insn(&opcode)))
- goto put_out;
-
- uprobe = container_of(auprobe, struct uprobe, arch);
- mapping = uprobe->inode->i_mapping;
- if (mapping != vma->vm_file->f_mapping)
- goto put_out;
-
ret = -ENOMEM;
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
if (!new_page)
- goto put_out;
+ goto put_old;
__SetPageUptodate(new_page);
- /*
- * lock page will serialize against do_wp_page()'s
- * PageAnon() handling
- */
- lock_page(old_page);
/* copy the page now that we've got it stable */
vaddr_old = kmap_atomic(old_page);
vaddr_new = kmap_atomic(new_page);
@@ -257,17 +244,13 @@ retry:
ret = anon_vma_prepare(vma);
if (ret)
- goto unlock_out;
+ goto put_new;
- lock_page(new_page);
- ret = __replace_page(vma, old_page, new_page);
- unlock_page(new_page);
+ ret = __replace_page(vma, vaddr, old_page, new_page);
-unlock_out:
- unlock_page(old_page);
+put_new:
page_cache_release(new_page);
-
-put_out:
+put_old:
put_page(old_page);
if (unlikely(ret == -EAGAIN))
@@ -791,7 +774,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
curr = info;
info->mm = vma->vm_mm;
- info->vaddr = vma_address(vma, offset);
+ info->vaddr = offset_to_vaddr(vma, offset);
}
mutex_unlock(&mapping->i_mmap_mutex);
@@ -839,12 +822,13 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
goto free;
down_write(&mm->mmap_sem);
- vma = find_vma(mm, (unsigned long)info->vaddr);
- if (!vma || !valid_vma(vma, is_register))
+ vma = find_vma(mm, info->vaddr);
+ if (!vma || !valid_vma(vma, is_register) ||
+ vma->vm_file->f_mapping->host != uprobe->inode)
goto unlock;
- if (vma->vm_file->f_mapping->host != uprobe->inode ||
- vma_address(vma, uprobe->offset) != info->vaddr)
+ if (vma->vm_start > info->vaddr ||
+ vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
goto unlock;
if (is_register) {
@@ -960,59 +944,66 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
put_uprobe(uprobe);
}
-/*
- * Of all the nodes that correspond to the given inode, return the node
- * with the least offset.
- */
-static struct rb_node *find_least_offset_node(struct inode *inode)
+static struct rb_node *
+find_node_in_range(struct inode *inode, loff_t min, loff_t max)
{
- struct uprobe u = { .inode = inode, .offset = 0};
struct rb_node *n = uprobes_tree.rb_node;
- struct rb_node *close_node = NULL;
- struct uprobe *uprobe;
- int match;
while (n) {
- uprobe = rb_entry(n, struct uprobe, rb_node);
- match = match_uprobe(&u, uprobe);
-
- if (uprobe->inode == inode)
- close_node = n;
-
- if (!match)
- return close_node;
+ struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
- if (match < 0)
+ if (inode < u->inode) {
n = n->rb_left;
- else
+ } else if (inode > u->inode) {
n = n->rb_right;
+ } else {
+ if (max < u->offset)
+ n = n->rb_left;
+ else if (min > u->offset)
+ n = n->rb_right;
+ else
+ break;
+ }
}
- return close_node;
+ return n;
}
/*
- * For a given inode, build a list of probes that need to be inserted.
+ * For a given range in vma, build a list of probes that need to be inserted.
*/
-static void build_probe_list(struct inode *inode, struct list_head *head)
+static void build_probe_list(struct inode *inode,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct list_head *head)
{
- struct uprobe *uprobe;
+ loff_t min, max;
unsigned long flags;
- struct rb_node *n;
-
- spin_lock_irqsave(&uprobes_treelock, flags);
-
- n = find_least_offset_node(inode);
+ struct rb_node *n, *t;
+ struct uprobe *u;
- for (; n; n = rb_next(n)) {
- uprobe = rb_entry(n, struct uprobe, rb_node);
- if (uprobe->inode != inode)
- break;
+ INIT_LIST_HEAD(head);
+ min = vaddr_to_offset(vma, start);
+ max = min + (end - start) - 1;
- list_add(&uprobe->pending_list, head);
- atomic_inc(&uprobe->ref);
+ spin_lock_irqsave(&uprobes_treelock, flags);
+ n = find_node_in_range(inode, min, max);
+ if (n) {
+ for (t = n; t; t = rb_prev(t)) {
+ u = rb_entry(t, struct uprobe, rb_node);
+ if (u->inode != inode || u->offset < min)
+ break;
+ list_add(&u->pending_list, head);
+ atomic_inc(&u->ref);
+ }
+ for (t = n; (t = rb_next(t)); ) {
+ u = rb_entry(t, struct uprobe, rb_node);
+ if (u->inode != inode || u->offset > max)
+ break;
+ list_add(&u->pending_list, head);
+ atomic_inc(&u->ref);
+ }
}
-
spin_unlock_irqrestore(&uprobes_treelock, flags);
}
@@ -1031,7 +1022,7 @@ static void build_probe_list(struct inode *inode, struct list_head *head)
int uprobe_mmap(struct vm_area_struct *vma)
{
struct list_head tmp_list;
- struct uprobe *uprobe;
+ struct uprobe *uprobe, *u;
struct inode *inode;
int ret, count;
@@ -1042,21 +1033,15 @@ int uprobe_mmap(struct vm_area_struct *vma)
if (!inode)
return 0;
- INIT_LIST_HEAD(&tmp_list);
mutex_lock(uprobes_mmap_hash(inode));
- build_probe_list(inode, &tmp_list);
+ build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
ret = 0;
count = 0;
- list_for_each_entry(uprobe, &tmp_list, pending_list) {
+ list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
if (!ret) {
- loff_t vaddr = vma_address(vma, uprobe->offset);
-
- if (vaddr < vma->vm_start || vaddr >= vma->vm_end) {
- put_uprobe(uprobe);
- continue;
- }
+ unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
/*
@@ -1097,12 +1082,15 @@ int uprobe_mmap(struct vm_area_struct *vma)
void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct list_head tmp_list;
- struct uprobe *uprobe;
+ struct uprobe *uprobe, *u;
struct inode *inode;
if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
return;
+ if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
+ return;
+
if (!atomic_read(&vma->vm_mm->uprobes_state.count))
return;
@@ -1110,21 +1098,17 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
if (!inode)
return;
- INIT_LIST_HEAD(&tmp_list);
mutex_lock(uprobes_mmap_hash(inode));
- build_probe_list(inode, &tmp_list);
-
- list_for_each_entry(uprobe, &tmp_list, pending_list) {
- loff_t vaddr = vma_address(vma, uprobe->offset);
-
- if (vaddr >= start && vaddr < end) {
- /*
- * An unregister could have removed the probe before
- * unmap. So check before we decrement the count.
- */
- if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1)
- atomic_dec(&vma->vm_mm->uprobes_state.count);
- }
+ build_probe_list(inode, vma, start, end, &tmp_list);
+
+ list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
+ unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
+ /*
+ * An unregister could have removed the probe before
+ * unmap. So check before we decrement the count.
+ */
+ if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1)
+ atomic_dec(&vma->vm_mm->uprobes_state.count);
put_uprobe(uprobe);
}
mutex_unlock(uprobes_mmap_hash(inode));
@@ -1463,12 +1447,9 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
vma = find_vma(mm, bp_vaddr);
if (vma && vma->vm_start <= bp_vaddr) {
if (valid_vma(vma, false)) {
- struct inode *inode;
- loff_t offset;
+ struct inode *inode = vma->vm_file->f_mapping->host;
+ loff_t offset = vaddr_to_offset(vma, bp_vaddr);
- inode = vma->vm_file->f_mapping->host;
- offset = bp_vaddr - vma->vm_start;
- offset += (vma->vm_pgoff << PAGE_SHIFT);
uprobe = find_uprobe(inode, offset);
}
diff --git a/kernel/exit.c b/kernel/exit.c
index d17f6c4ddfa9..f65345f9e5bb 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -483,7 +483,7 @@ static void close_files(struct files_struct * files)
rcu_read_unlock();
for (;;) {
unsigned long set;
- i = j * __NFDBITS;
+ i = j * BITS_PER_LONG;
if (i >= fdt->max_fds)
break;
set = fdt->open_fds[j++];
diff --git a/kernel/fork.c b/kernel/fork.c
index ff1cad3b7bdc..2c8857e12855 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -114,6 +114,10 @@ int nr_processes(void)
return total;
}
+void __weak arch_release_task_struct(struct task_struct *tsk)
+{
+}
+
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
static struct kmem_cache *task_struct_cachep;
@@ -122,17 +126,17 @@ static inline struct task_struct *alloc_task_struct_node(int node)
return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
}
-void __weak arch_release_task_struct(struct task_struct *tsk) { }
-
static inline void free_task_struct(struct task_struct *tsk)
{
- arch_release_task_struct(tsk);
kmem_cache_free(task_struct_cachep, tsk);
}
#endif
+void __weak arch_release_thread_info(struct thread_info *ti)
+{
+}
+
#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
-void __weak arch_release_thread_info(struct thread_info *ti) { }
/*
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
@@ -150,7 +154,6 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
static inline void free_thread_info(struct thread_info *ti)
{
- arch_release_thread_info(ti);
free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
# else
@@ -164,7 +167,6 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
static void free_thread_info(struct thread_info *ti)
{
- arch_release_thread_info(ti);
kmem_cache_free(thread_info_cache, ti);
}
@@ -205,10 +207,12 @@ static void account_kernel_stack(struct thread_info *ti, int account)
void free_task(struct task_struct *tsk)
{
account_kernel_stack(tsk->stack, -1);
+ arch_release_thread_info(tsk->stack);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
put_seccomp_filter(tsk);
+ arch_release_task_struct(tsk);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
@@ -298,23 +302,16 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
return NULL;
ti = alloc_thread_info_node(tsk, node);
- if (!ti) {
- free_task_struct(tsk);
- return NULL;
- }
+ if (!ti)
+ goto free_tsk;
err = arch_dup_task_struct(tsk, orig);
+ if (err)
+ goto free_ti;
- /*
- * We defer looking at err, because we will need this setup
- * for the clean up path to work correctly.
- */
tsk->stack = ti;
- setup_thread_stack(tsk, orig);
-
- if (err)
- goto out;
+ setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
stackend = end_of_stack(tsk);
@@ -338,8 +335,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
return tsk;
-out:
+free_ti:
free_thread_info(ti);
+free_tsk:
free_task_struct(tsk);
return NULL;
}
@@ -383,16 +381,14 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
struct file *file;
if (mpnt->vm_flags & VM_DONTCOPY) {
- long pages = vma_pages(mpnt);
- mm->total_vm -= pages;
vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
- -pages);
+ -vma_pages(mpnt));
continue;
}
charge = 0;
if (mpnt->vm_flags & VM_ACCOUNT) {
- unsigned long len;
- len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
+ unsigned long len = vma_pages(mpnt);
+
if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
goto fail_nomem;
charge = len;
@@ -459,8 +455,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (retval)
goto out;
- if (file && uprobe_mmap(tmp))
- goto out;
+ if (file)
+ uprobe_mmap(tmp);
}
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
@@ -1310,7 +1306,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
p->memcg_batch.do_batch = 0;
p->memcg_batch.memcg = NULL;
#endif
diff --git a/kernel/futex.c b/kernel/futex.c
index e2b0fb9a0b3b..3717e7b306e0 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
* @uaddr2: the pi futex we will take prior to returning to user-space
*
* The caller will wait on uaddr and will be requeued by futex_requeue() to
- * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
- * complete the acquisition of the rt_mutex prior to returning to userspace.
- * This ensures the rt_mutex maintains an owner when it has waiters; without
- * one, the pi logic wouldn't know which task to boost/deboost, if there was a
- * need to.
+ * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
+ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
+ * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
+ * without one, the pi logic would not know which task to boost/deboost, if
+ * there was a need to.
*
* We call schedule in futex_wait_queue_me() when we enqueue and return there
* via the following:
@@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
struct futex_q q = futex_q_init;
int res, ret;
+ if (uaddr == uaddr2)
+ return -EINVAL;
+
if (!bitset)
return -EINVAL;
@@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
- WARN_ON(!&q.pi_state);
+ WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
@@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* fault, unlock the rt_mutex and return the fault to userspace.
*/
if (ret == -EFAULT) {
- if (rt_mutex_owner(pi_mutex) == current)
+ if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index bdb180325551..131ca176b497 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -133,7 +133,7 @@ irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
irqreturn_t retval = IRQ_NONE;
- unsigned int random = 0, irq = desc->irq_data.irq;
+ unsigned int flags = 0, irq = desc->irq_data.irq;
do {
irqreturn_t res;
@@ -161,7 +161,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
/* Fall through to add to randomness */
case IRQ_HANDLED:
- random |= action->flags;
+ flags |= action->flags;
break;
default:
@@ -172,8 +172,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
action = action->next;
} while (action);
- if (random & IRQF_SAMPLE_RANDOM)
- add_interrupt_randomness(irq);
+ add_interrupt_randomness(irq, flags);
if (!noirqdebug)
note_interrupt(irq, desc, retval);
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 38c5eb839c92..49a77727db42 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -10,6 +10,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/topology.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/smp.h>
@@ -45,7 +46,8 @@ static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
{
struct irq_domain *domain;
- domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ domain = kzalloc_node(sizeof(*domain), GFP_KERNEL,
+ of_node_to_nid(of_node));
if (WARN_ON(!domain))
return NULL;
@@ -138,6 +140,36 @@ static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
}
/**
+ * irq_domain_add_simple() - Allocate and register a simple irq_domain.
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @size: total number of irqs in mapping
+ * @first_irq: first number of irq block assigned to the domain
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ *
+ * Allocates a legacy irq_domain if irq_base is positive or a linear
+ * domain otherwise.
+ *
+ * This is intended to implement the expected behaviour for most
+ * interrupt controllers which is that a linear mapping should
+ * normally be used unless the system requires a legacy mapping in
+ * order to support supplying interrupt numbers during non-DT
+ * registration of devices.
+ */
+struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
+ unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ if (first_irq > 0)
+ return irq_domain_add_legacy(of_node, size, first_irq, 0,
+ ops, host_data);
+ else
+ return irq_domain_add_linear(of_node, size, ops, host_data);
+}
+
+/**
* irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @size: total number of irqs in legacy mapping
@@ -203,7 +235,8 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
* one can then use irq_create_mapping() to
* explicitly change them
*/
- ops->map(domain, irq, hwirq);
+ if (ops->map)
+ ops->map(domain, irq, hwirq);
/* Clear norequest flags */
irq_clear_status_flags(irq, IRQ_NOREQUEST);
@@ -215,7 +248,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
/**
- * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
+ * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @size: Number of interrupts in the domain.
* @ops: map/unmap domain callbacks
@@ -229,7 +262,8 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
struct irq_domain *domain;
unsigned int *revmap;
- revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL);
+ revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL,
+ of_node_to_nid(of_node));
if (WARN_ON(!revmap))
return NULL;
@@ -330,24 +364,112 @@ void irq_set_default_host(struct irq_domain *domain)
}
EXPORT_SYMBOL_GPL(irq_set_default_host);
-static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq)
+static void irq_domain_disassociate_many(struct irq_domain *domain,
+ unsigned int irq_base, int count)
{
- struct irq_data *irq_data = irq_get_irq_data(virq);
+ /*
+ * disassociate in reverse order;
+ * not strictly necessary, but nice for unwinding
+ */
+ while (count--) {
+ int irq = irq_base + count;
+ struct irq_data *irq_data = irq_get_irq_data(irq);
+ irq_hw_number_t hwirq = irq_data->hwirq;
+
+ if (WARN_ON(!irq_data || irq_data->domain != domain))
+ continue;
+
+ irq_set_status_flags(irq, IRQ_NOREQUEST);
+
+ /* remove chip and handler */
+ irq_set_chip_and_handler(irq, NULL, NULL);
+
+ /* Make sure it's completed */
+ synchronize_irq(irq);
+
+ /* Tell the PIC about it */
+ if (domain->ops->unmap)
+ domain->ops->unmap(domain, irq);
+ smp_mb();
- irq_data->hwirq = hwirq;
- irq_data->domain = domain;
- if (domain->ops->map(domain, virq, hwirq)) {
- pr_debug("irq-%i==>hwirq-0x%lx mapping failed\n", virq, hwirq);
irq_data->domain = NULL;
irq_data->hwirq = 0;
- return -1;
+
+ /* Clear reverse map */
+ switch(domain->revmap_type) {
+ case IRQ_DOMAIN_MAP_LINEAR:
+ if (hwirq < domain->revmap_data.linear.size)
+ domain->revmap_data.linear.revmap[hwirq] = 0;
+ break;
+ case IRQ_DOMAIN_MAP_TREE:
+ mutex_lock(&revmap_trees_mutex);
+ radix_tree_delete(&domain->revmap_data.tree, hwirq);
+ mutex_unlock(&revmap_trees_mutex);
+ break;
+ }
}
+}
+
+int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
+ irq_hw_number_t hwirq_base, int count)
+{
+ unsigned int virq = irq_base;
+ irq_hw_number_t hwirq = hwirq_base;
+ int i, ret;
+
+ pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
+ of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
+
+ for (i = 0; i < count; i++) {
+ struct irq_data *irq_data = irq_get_irq_data(virq + i);
+
+ if (WARN(!irq_data, "error: irq_desc not allocated; "
+ "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
+ return -EINVAL;
+ if (WARN(irq_data->domain, "error: irq_desc already associated; "
+ "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
+ return -EINVAL;
+ };
+
+ for (i = 0; i < count; i++, virq++, hwirq++) {
+ struct irq_data *irq_data = irq_get_irq_data(virq);
+
+ irq_data->hwirq = hwirq;
+ irq_data->domain = domain;
+ if (domain->ops->map) {
+ ret = domain->ops->map(domain, virq, hwirq);
+ if (ret != 0) {
+ pr_err("irq-%i==>hwirq-0x%lx mapping failed: %d\n",
+ virq, hwirq, ret);
+ WARN_ON(1);
+ irq_data->domain = NULL;
+ irq_data->hwirq = 0;
+ goto err_unmap;
+ }
+ }
- irq_clear_status_flags(virq, IRQ_NOREQUEST);
+ switch (domain->revmap_type) {
+ case IRQ_DOMAIN_MAP_LINEAR:
+ if (hwirq < domain->revmap_data.linear.size)
+ domain->revmap_data.linear.revmap[hwirq] = virq;
+ break;
+ case IRQ_DOMAIN_MAP_TREE:
+ mutex_lock(&revmap_trees_mutex);
+ radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
+ mutex_unlock(&revmap_trees_mutex);
+ break;
+ }
+
+ irq_clear_status_flags(virq, IRQ_NOREQUEST);
+ }
return 0;
+
+ err_unmap:
+ irq_domain_disassociate_many(domain, irq_base, i);
+ return -EINVAL;
}
+EXPORT_SYMBOL_GPL(irq_domain_associate_many);
/**
* irq_create_direct_mapping() - Allocate an irq for direct mapping
@@ -364,10 +486,10 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
if (domain == NULL)
domain = irq_default_domain;
- BUG_ON(domain == NULL);
- WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP);
+ if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
+ return 0;
- virq = irq_alloc_desc_from(1, 0);
+ virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
if (!virq) {
pr_debug("create_direct virq allocation failed\n");
return 0;
@@ -380,7 +502,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
}
pr_debug("create_direct obtained virq %d\n", virq);
- if (irq_setup_virq(domain, virq, virq)) {
+ if (irq_domain_associate(domain, virq, virq)) {
irq_free_desc(virq);
return 0;
}
@@ -433,17 +555,16 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
hint = hwirq % nr_irqs;
if (hint == 0)
hint++;
- virq = irq_alloc_desc_from(hint, 0);
+ virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
if (virq <= 0)
- virq = irq_alloc_desc_from(1, 0);
+ virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
if (virq <= 0) {
pr_debug("-> virq allocation failed\n");
return 0;
}
- if (irq_setup_virq(domain, virq, hwirq)) {
- if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY)
- irq_free_desc(virq);
+ if (irq_domain_associate(domain, virq, hwirq)) {
+ irq_free_desc(virq);
return 0;
}
@@ -454,6 +575,44 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
}
EXPORT_SYMBOL_GPL(irq_create_mapping);
+/**
+ * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
+ * @domain: domain owning the interrupt range
+ * @irq_base: beginning of linux IRQ range
+ * @hwirq_base: beginning of hardware IRQ range
+ * @count: Number of interrupts to map
+ *
+ * This routine is used for allocating and mapping a range of hardware
+ * irqs to linux irqs where the linux irq numbers are at pre-defined
+ * locations. For use by controllers that already have static mappings
+ * to insert in to the domain.
+ *
+ * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
+ * domain insertion.
+ *
+ * 0 is returned upon success, while any failure to establish a static
+ * mapping is treated as an error.
+ */
+int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
+ irq_hw_number_t hwirq_base, int count)
+{
+ int ret;
+
+ ret = irq_alloc_descs(irq_base, irq_base, count,
+ of_node_to_nid(domain->of_node));
+ if (unlikely(ret < 0))
+ return ret;
+
+ ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
+ if (unlikely(ret < 0)) {
+ irq_free_descs(irq_base, count);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
+
unsigned int irq_create_of_mapping(struct device_node *controller,
const u32 *intspec, unsigned int intsize)
{
@@ -511,7 +670,6 @@ void irq_dispose_mapping(unsigned int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
struct irq_domain *domain;
- irq_hw_number_t hwirq;
if (!virq || !irq_data)
return;
@@ -524,33 +682,7 @@ void irq_dispose_mapping(unsigned int virq)
if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
return;
- irq_set_status_flags(virq, IRQ_NOREQUEST);
-
- /* remove chip and handler */
- irq_set_chip_and_handler(virq, NULL, NULL);
-
- /* Make sure it's completed */
- synchronize_irq(virq);
-
- /* Tell the PIC about it */
- if (domain->ops->unmap)
- domain->ops->unmap(domain, virq);
- smp_mb();
-
- /* Clear reverse map */
- hwirq = irq_data->hwirq;
- switch(domain->revmap_type) {
- case IRQ_DOMAIN_MAP_LINEAR:
- if (hwirq < domain->revmap_data.linear.size)
- domain->revmap_data.linear.revmap[hwirq] = 0;
- break;
- case IRQ_DOMAIN_MAP_TREE:
- mutex_lock(&revmap_trees_mutex);
- radix_tree_delete(&domain->revmap_data.tree, hwirq);
- mutex_unlock(&revmap_trees_mutex);
- break;
- }
-
+ irq_domain_disassociate_many(domain, virq, 1);
irq_free_desc(virq);
}
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
@@ -559,16 +691,11 @@ EXPORT_SYMBOL_GPL(irq_dispose_mapping);
* irq_find_mapping() - Find a linux irq from an hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
- *
- * This is a slow path, for use by generic code. It's expected that an
- * irq controller implementation directly calls the appropriate low level
- * mapping function.
*/
unsigned int irq_find_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
- unsigned int i;
- unsigned int hint = hwirq % nr_irqs;
+ struct irq_data *data;
/* Look for default domain if nececssary */
if (domain == NULL)
@@ -576,115 +703,47 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
if (domain == NULL)
return 0;
- /* legacy -> bail early */
- if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
+ switch (domain->revmap_type) {
+ case IRQ_DOMAIN_MAP_LEGACY:
return irq_domain_legacy_revmap(domain, hwirq);
-
- /* Slow path does a linear search of the map */
- if (hint == 0)
- hint = 1;
- i = hint;
- do {
- struct irq_data *data = irq_get_irq_data(i);
+ case IRQ_DOMAIN_MAP_LINEAR:
+ return irq_linear_revmap(domain, hwirq);
+ case IRQ_DOMAIN_MAP_TREE:
+ rcu_read_lock();
+ data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
+ rcu_read_unlock();
+ if (data)
+ return data->irq;
+ break;
+ case IRQ_DOMAIN_MAP_NOMAP:
+ data = irq_get_irq_data(hwirq);
if (data && (data->domain == domain) && (data->hwirq == hwirq))
- return i;
- i++;
- if (i >= nr_irqs)
- i = 1;
- } while(i != hint);
+ return hwirq;
+ break;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(irq_find_mapping);
/**
- * irq_radix_revmap_lookup() - Find a linux irq from a hw irq number.
- * @domain: domain owning this hardware interrupt
- * @hwirq: hardware irq number in that domain space
- *
- * This is a fast path, for use by irq controller code that uses radix tree
- * revmaps
- */
-unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
- irq_hw_number_t hwirq)
-{
- struct irq_data *irq_data;
-
- if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
- return irq_find_mapping(domain, hwirq);
-
- /*
- * Freeing an irq can delete nodes along the path to
- * do the lookup via call_rcu.
- */
- rcu_read_lock();
- irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
- rcu_read_unlock();
-
- /*
- * If found in radix tree, then fine.
- * Else fallback to linear lookup - this should not happen in practice
- * as it means that we failed to insert the node in the radix tree.
- */
- return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
-}
-EXPORT_SYMBOL_GPL(irq_radix_revmap_lookup);
-
-/**
- * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
- * @domain: domain owning this hardware interrupt
- * @virq: linux irq number
- * @hwirq: hardware irq number in that domain space
- *
- * This is for use by irq controllers that use a radix tree reverse
- * mapping for fast lookup.
- */
-void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
- irq_hw_number_t hwirq)
-{
- struct irq_data *irq_data = irq_get_irq_data(virq);
-
- if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
- return;
-
- if (virq) {
- mutex_lock(&revmap_trees_mutex);
- radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
- mutex_unlock(&revmap_trees_mutex);
- }
-}
-EXPORT_SYMBOL_GPL(irq_radix_revmap_insert);
-
-/**
* irq_linear_revmap() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
*
- * This is a fast path, for use by irq controller code that uses linear
- * revmaps. It does fallback to the slow path if the revmap doesn't exist
- * yet and will create the revmap entry with appropriate locking
+ * This is a fast path that can be called directly by irq controller code to
+ * save a handful of instructions.
*/
unsigned int irq_linear_revmap(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
- unsigned int *revmap;
-
- if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR))
- return irq_find_mapping(domain, hwirq);
+ BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
- /* Check revmap bounds */
- if (unlikely(hwirq >= domain->revmap_data.linear.size))
- return irq_find_mapping(domain, hwirq);
-
- /* Check if revmap was allocated */
- revmap = domain->revmap_data.linear.revmap;
- if (unlikely(revmap == NULL))
- return irq_find_mapping(domain, hwirq);
-
- /* Fill up revmap with slow path if no mapping found */
- if (unlikely(!revmap[hwirq]))
- revmap[hwirq] = irq_find_mapping(domain, hwirq);
+ /* Check revmap bounds; complain if exceeded */
+ if (WARN_ON(hwirq >= domain->revmap_data.linear.size))
+ return 0;
- return revmap[hwirq];
+ return domain->revmap_data.linear.revmap[hwirq];
}
EXPORT_SYMBOL_GPL(irq_linear_revmap);
@@ -761,12 +820,6 @@ static int __init irq_debugfs_init(void)
__initcall(irq_debugfs_init);
#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
-static int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- return 0;
-}
-
/**
* irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
*
@@ -829,7 +882,6 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d,
EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
const struct irq_domain_ops irq_domain_simple_ops = {
- .map = irq_domain_simple_map,
.xlate = irq_domain_xlate_onetwocell,
};
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 814c9ef6bba1..4c69326aa773 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -893,22 +893,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
return -ENOSYS;
if (!try_module_get(desc->owner))
return -ENODEV;
- /*
- * Some drivers like serial.c use request_irq() heavily,
- * so we have to be careful not to interfere with a
- * running system.
- */
- if (new->flags & IRQF_SAMPLE_RANDOM) {
- /*
- * This function might sleep, we want to call it first,
- * outside of the atomic block.
- * Yes, this might clear the entropy pool if the wrong
- * driver is attempted to be loaded, without actually
- * installing a new handler, but is this really a problem,
- * only the sysadmin is able to do this.
- */
- rand_initialize_irq(irq);
- }
/*
* Check whether the interrupt nests into another interrupt
@@ -960,6 +944,18 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
}
/*
+ * Drivers are often written to work w/o knowledge about the
+ * underlying irq chip implementation, so a request for a
+ * threaded irq without a primary hard irq context handler
+ * requires the ONESHOT flag to be set. Some irq chips like
+ * MSI based interrupts are per se one shot safe. Check the
+ * chip flags, so we can avoid the unmask dance at the end of
+ * the threaded handler for those.
+ */
+ if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
+ new->flags &= ~IRQF_ONESHOT;
+
+ /*
* The following block of code has to be executed atomically
*/
raw_spin_lock_irqsave(&desc->lock, flags);
@@ -1033,7 +1029,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
new->thread_mask = 1 << ffz(thread_mask);
- } else if (new->handler == irq_default_primary_handler) {
+ } else if (new->handler == irq_default_primary_handler &&
+ !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
/*
* The interrupt was requested with handler = NULL, so
* we use the default primary handler for it. But it
@@ -1354,7 +1351,6 @@ EXPORT_SYMBOL(free_irq);
* Flags:
*
* IRQF_SHARED Interrupt is shared
- * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
* IRQF_TRIGGER_* Specify active edge(s) or level
*
*/
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 4e2e472f6aeb..0668d58d6413 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1424,7 +1424,7 @@ static void update_vmcoreinfo_note(void)
void crash_save_vmcoreinfo(void)
{
- vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
+ vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
update_vmcoreinfo_note();
}
diff --git a/kernel/kmod.c b/kernel/kmod.c
index ff2c7cb86d77..6f99aead66c6 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -45,6 +45,13 @@ extern int max_threads;
static struct workqueue_struct *khelper_wq;
+/*
+ * kmod_thread_locker is used for deadlock avoidance. There is no explicit
+ * locking to protect this global - it is private to the singleton khelper
+ * thread and should only ever be modified by that thread.
+ */
+static const struct task_struct *kmod_thread_locker;
+
#define CAP_BSET (void *)1
#define CAP_PI (void *)2
@@ -221,6 +228,13 @@ fail:
return 0;
}
+static int call_helper(void *data)
+{
+ /* Worker thread started blocking khelper thread. */
+ kmod_thread_locker = current;
+ return ____call_usermodehelper(data);
+}
+
static void call_usermodehelper_freeinfo(struct subprocess_info *info)
{
if (info->cleanup)
@@ -295,9 +309,12 @@ static void __call_usermodehelper(struct work_struct *work)
if (wait == UMH_WAIT_PROC)
pid = kernel_thread(wait_for_helper, sub_info,
CLONE_FS | CLONE_FILES | SIGCHLD);
- else
- pid = kernel_thread(____call_usermodehelper, sub_info,
+ else {
+ pid = kernel_thread(call_helper, sub_info,
CLONE_VFORK | SIGCHLD);
+ /* Worker thread stopped blocking khelper thread. */
+ kmod_thread_locker = NULL;
+ }
switch (wait) {
case UMH_NO_WAIT:
@@ -548,6 +565,16 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
retval = -EBUSY;
goto out;
}
+ /*
+ * Worker thread must not wait for khelper thread at below
+ * wait_for_completion() if the thread was created with CLONE_VFORK
+ * flag, for khelper thread is already waiting for the thread at
+ * wait_for_completion() in do_fork().
+ */
+ if (wait != UMH_NO_WAIT && current == kmod_thread_locker) {
+ retval = -EBUSY;
+ goto out;
+ }
sub_info->complete = &done;
sub_info->wait = wait;
@@ -577,6 +604,12 @@ unlock:
return retval;
}
+/*
+ * call_usermodehelper_fns() will not run the caller-provided cleanup function
+ * if a memory allocation failure is experienced. So the caller might need to
+ * check the call_usermodehelper_fns() return value: if it is -ENOMEM, perform
+ * the necessaary cleanup within the caller.
+ */
int call_usermodehelper_fns(
char *path, char **argv, char **envp, int wait,
int (*init)(struct subprocess_info *info, struct cred *new),
diff --git a/kernel/panic.c b/kernel/panic.c
index d2a5f4ecc6dd..e1b2822fff97 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -75,6 +75,14 @@ void panic(const char *fmt, ...)
int state = 0;
/*
+ * Disable local interrupts. This will prevent panic_smp_self_stop
+ * from deadlocking the first cpu that invokes the panic, since
+ * there is nothing to prevent an interrupt handler (that runs
+ * after the panic_lock is acquired) from invoking panic again.
+ */
+ local_irq_disable();
+
+ /*
* It's possible to come here directly from a panic-assertion and
* not have preempt disabled. Some functions called from here want
* preempt to be disabled. No point enabling it later though...
diff --git a/kernel/printk.c b/kernel/printk.c
index 50c96b5651b6..66a2ea37b576 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -389,8 +389,10 @@ static ssize_t devkmsg_writev(struct kiocb *iocb, const struct iovec *iv,
line = buf;
for (i = 0; i < count; i++) {
- if (copy_from_user(line, iv[i].iov_base, iv[i].iov_len))
+ if (copy_from_user(line, iv[i].iov_base, iv[i].iov_len)) {
+ ret = -EFAULT;
goto out;
+ }
line += iv[i].iov_len;
}
@@ -1032,6 +1034,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
struct log *msg = log_from_idx(idx);
len += msg_print_text(msg, prev, true, NULL, 0);
+ prev = msg->flags;
idx = log_next(idx);
seq++;
}
@@ -1044,6 +1047,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
struct log *msg = log_from_idx(idx);
len -= msg_print_text(msg, prev, true, NULL, 0);
+ prev = msg->flags;
idx = log_next(idx);
seq++;
}
@@ -1540,17 +1544,23 @@ asmlinkage int vprintk_emit(int facility, int level,
lflags |= LOG_NEWLINE;
}
- /* strip syslog prefix and extract log level or control flags */
- if (text[0] == '<' && text[1] && text[2] == '>') {
- switch (text[1]) {
- case '0' ... '7':
- if (level == -1)
- level = text[1] - '0';
- case 'd': /* KERN_DEFAULT */
- lflags |= LOG_PREFIX;
- case 'c': /* KERN_CONT */
- text += 3;
- text_len -= 3;
+ /* strip kernel syslog prefix and extract log level or control flags */
+ if (facility == 0) {
+ int kern_level = printk_get_level(text);
+
+ if (kern_level) {
+ const char *end_of_header = printk_skip_level(text);
+ switch (kern_level) {
+ case '0' ... '7':
+ if (level == -1)
+ level = kern_level - '0';
+ case 'd': /* KERN_DEFAULT */
+ lflags |= LOG_PREFIX;
+ case 'c': /* KERN_CONT */
+ break;
+ }
+ text_len -= end_of_header - text;
+ text = (char *)end_of_header;
}
}
diff --git a/kernel/resource.c b/kernel/resource.c
index dc8b47764443..34d45886ee84 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -7,6 +7,8 @@
* Arbitrary resource management.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -791,8 +793,28 @@ void __init reserve_region_with_split(struct resource *root,
resource_size_t start, resource_size_t end,
const char *name)
{
+ int abort = 0;
+
write_lock(&resource_lock);
- __reserve_region_with_split(root, start, end, name);
+ if (root->start > start || root->end < end) {
+ pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
+ (unsigned long long)start, (unsigned long long)end,
+ root);
+ if (start > root->end || end < root->start)
+ abort = 1;
+ else {
+ if (end > root->end)
+ end = root->end;
+ if (start < root->start)
+ start = root->start;
+ pr_err("fixing request to [0x%llx-0x%llx]\n",
+ (unsigned long long)start,
+ (unsigned long long)end);
+ }
+ dump_stack();
+ }
+ if (!abort)
+ __reserve_region_with_split(root, start, end, name);
write_unlock(&resource_lock);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5d011ef4c0df..fbf1fd098dc6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1910,12 +1910,12 @@ static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
+ trace_sched_switch(prev, next);
sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
prepare_lock_switch(rq, next);
prepare_arch_switch(next);
- trace_sched_switch(prev, next);
}
/**
@@ -3142,6 +3142,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
#endif
+static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total)
+{
+ u64 temp = (__force u64) rtime;
+
+ temp *= (__force u64) utime;
+
+ if (sizeof(cputime_t) == 4)
+ temp = div_u64(temp, (__force u32) total);
+ else
+ temp = div64_u64(temp, (__force u64) total);
+
+ return (__force cputime_t) temp;
+}
+
void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
cputime_t rtime, utime = p->utime, total = utime + p->stime;
@@ -3151,13 +3165,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
*/
rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
- if (total) {
- u64 temp = (__force u64) rtime;
-
- temp *= (__force u64) utime;
- do_div(temp, (__force u32) total);
- utime = (__force cputime_t) temp;
- } else
+ if (total)
+ utime = scale_utime(utime, rtime, total);
+ else
utime = rtime;
/*
@@ -3184,13 +3194,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
total = cputime.utime + cputime.stime;
rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
- if (total) {
- u64 temp = (__force u64) rtime;
-
- temp *= (__force u64) cputime.utime;
- do_div(temp, (__force u32) total);
- utime = (__force cputime_t) temp;
- } else
+ if (total)
+ utime = scale_utime(cputime.utime, rtime, total);
+ else
utime = rtime;
sig->prev_utime = max(sig->prev_utime, utime);
@@ -4340,9 +4346,7 @@ recheck:
*/
if (unlikely(policy == p->policy && (!rt_policy(policy) ||
param->sched_priority == p->rt_priority))) {
-
- __task_rq_unlock(rq);
- raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ task_rq_unlock(rq, p, &flags);
return 0;
}
@@ -7248,6 +7252,7 @@ int in_sched_functions(unsigned long addr)
#ifdef CONFIG_CGROUP_SCHED
struct task_group root_task_group;
+LIST_HEAD(task_groups);
#endif
DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index d72586fdf660..23aa789c53ee 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -65,8 +65,8 @@ static int convert_prio(int prio)
int cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask)
{
- int idx = 0;
- int task_pri = convert_prio(p->prio);
+ int idx = 0;
+ int task_pri = convert_prio(p->prio);
if (task_pri >= MAX_RT_PRIO)
return 0;
@@ -137,9 +137,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
*/
void cpupri_set(struct cpupri *cp, int cpu, int newpri)
{
- int *currpri = &cp->cpu_to_pri[cpu];
- int oldpri = *currpri;
- int do_mb = 0;
+ int *currpri = &cp->cpu_to_pri[cpu];
+ int oldpri = *currpri;
+ int do_mb = 0;
newpri = convert_prio(newpri);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 22321db64952..c219bf8d704c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3069,6 +3069,9 @@ struct lb_env {
int new_dst_cpu;
enum cpu_idle_type idle;
long imbalance;
+ /* The set of CPUs under consideration for load-balancing */
+ struct cpumask *cpus;
+
unsigned int flags;
unsigned int loop;
@@ -3384,6 +3387,14 @@ static int tg_load_down(struct task_group *tg, void *data)
static void update_h_load(long cpu)
{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long now = jiffies;
+
+ if (rq->h_load_throttle == now)
+ return;
+
+ rq->h_load_throttle = now;
+
rcu_read_lock();
walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
rcu_read_unlock();
@@ -3653,8 +3664,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
*/
static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx,
- int local_group, const struct cpumask *cpus,
- int *balance, struct sg_lb_stats *sgs)
+ int local_group, int *balance, struct sg_lb_stats *sgs)
{
unsigned long nr_running, max_nr_running, min_nr_running;
unsigned long load, max_cpu_load, min_cpu_load;
@@ -3671,7 +3681,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
max_nr_running = 0;
min_nr_running = ~0UL;
- for_each_cpu_and(i, sched_group_cpus(group), cpus) {
+ for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);
nr_running = rq->nr_running;
@@ -3800,8 +3810,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* @sds: variable to hold the statistics for this sched_domain.
*/
static inline void update_sd_lb_stats(struct lb_env *env,
- const struct cpumask *cpus,
- int *balance, struct sd_lb_stats *sds)
+ int *balance, struct sd_lb_stats *sds)
{
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
@@ -3818,8 +3827,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
memset(&sgs, 0, sizeof(sgs));
- update_sg_lb_stats(env, sg, load_idx, local_group,
- cpus, balance, &sgs);
+ update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
if (local_group && !(*balance))
return;
@@ -4055,7 +4063,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* to restore balance.
*
* @env: The load balancing environment.
- * @cpus: The set of CPUs under consideration for load-balancing.
* @balance: Pointer to a variable indicating if this_cpu
* is the appropriate cpu to perform load balancing at this_level.
*
@@ -4065,7 +4072,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* put to idle by rebalancing its tasks onto our group.
*/
static struct sched_group *
-find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
+find_busiest_group(struct lb_env *env, int *balance)
{
struct sd_lb_stats sds;
@@ -4075,7 +4082,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
* Compute the various statistics relavent for load balancing at
* this level.
*/
- update_sd_lb_stats(env, cpus, balance, &sds);
+ update_sd_lb_stats(env, balance, &sds);
/*
* this_cpu is not the appropriate cpu to perform load balancing at
@@ -4155,8 +4162,7 @@ ret:
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
static struct rq *find_busiest_queue(struct lb_env *env,
- struct sched_group *group,
- const struct cpumask *cpus)
+ struct sched_group *group)
{
struct rq *busiest = NULL, *rq;
unsigned long max_load = 0;
@@ -4171,7 +4177,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
if (!capacity)
capacity = fix_small_capacity(env->sd, group);
- if (!cpumask_test_cpu(i, cpus))
+ if (!cpumask_test_cpu(i, env->cpus))
continue;
rq = cpu_rq(i);
@@ -4252,6 +4258,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
.dst_grpmask = sched_group_cpus(sd->groups),
.idle = idle,
.loop_break = sched_nr_migrate_break,
+ .cpus = cpus,
};
cpumask_copy(cpus, cpu_active_mask);
@@ -4260,7 +4267,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
schedstat_inc(sd, lb_count[idle]);
redo:
- group = find_busiest_group(&env, cpus, balance);
+ group = find_busiest_group(&env, balance);
if (*balance == 0)
goto out_balanced;
@@ -4270,7 +4277,7 @@ redo:
goto out_balanced;
}
- busiest = find_busiest_queue(&env, group, cpus);
+ busiest = find_busiest_queue(&env, group);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced;
@@ -4294,11 +4301,10 @@ redo:
env.src_rq = busiest;
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
+ update_h_load(env.src_cpu);
more_balance:
local_irq_save(flags);
double_rq_lock(this_rq, busiest);
- if (!env.loop)
- update_h_load(env.src_cpu);
/*
* cur_ld_moved - load moved in current iteration
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 573e1ca01102..944cb68420e9 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -788,6 +788,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
const struct cpumask *span;
span = sched_rt_period_mask();
+#ifdef CONFIG_RT_GROUP_SCHED
+ /*
+ * FIXME: isolated CPUs should really leave the root task group,
+ * whether they are isolcpus or were isolated via cpusets, lest
+ * the timer run on a CPU which does not service all runqueues,
+ * potentially leaving other CPUs indefinitely throttled. If
+ * isolation is really required, the user will turn the throttle
+ * off to kill the perturbations it causes anyway. Meanwhile,
+ * this maintains functionality for boot and/or troubleshooting.
+ */
+ if (rt_b == &root_task_group.rt_bandwidth)
+ span = cpu_online_mask;
+#endif
for_each_cpu(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c35a1a7dd4d6..f6714d009e77 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -80,7 +80,7 @@ extern struct mutex sched_domains_mutex;
struct cfs_rq;
struct rt_rq;
-static LIST_HEAD(task_groups);
+extern struct list_head task_groups;
struct cfs_bandwidth {
#ifdef CONFIG_CFS_BANDWIDTH
@@ -374,7 +374,11 @@ struct rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
-#endif
+#ifdef CONFIG_SMP
+ unsigned long h_load_throttle;
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_FAIR_GROUP_SCHED */
+
#ifdef CONFIG_RT_GROUP_SCHED
struct list_head leaf_rt_rq_list;
#endif
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 7b386e86fd23..da5eb5bed84a 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -27,8 +27,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
{
struct task_struct *stop = rq->stop;
- if (stop && stop->on_rq)
+ if (stop && stop->on_rq) {
+ stop->se.exec_start = rq->clock_task;
return stop;
+ }
return NULL;
}
@@ -52,6 +54,21 @@ static void yield_task_stop(struct rq *rq)
static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
{
+ struct task_struct *curr = rq->curr;
+ u64 delta_exec;
+
+ delta_exec = rq->clock_task - curr->se.exec_start;
+ if (unlikely((s64)delta_exec < 0))
+ delta_exec = 0;
+
+ schedstat_set(curr->se.statistics.exec_max,
+ max(curr->se.statistics.exec_max, delta_exec));
+
+ curr->se.sum_exec_runtime += delta_exec;
+ account_group_exec_runtime(curr, delta_exec);
+
+ curr->se.exec_start = rq->clock_task;
+ cpuacct_charge(curr, delta_exec);
}
static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
@@ -60,6 +77,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
static void set_curr_task_stop(struct rq *rq)
{
+ struct task_struct *stop = rq->stop;
+
+ stop->se.exec_start = rq->clock_task;
}
static void switched_to_stop(struct rq *rq, struct task_struct *p)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 671f9594e368..b73e681df09e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -210,6 +210,14 @@ asmlinkage void __do_softirq(void)
__u32 pending;
int max_restart = MAX_SOFTIRQ_RESTART;
int cpu;
+ unsigned long old_flags = current->flags;
+
+ /*
+ * Mask out PF_MEMALLOC s current task context is borrowed for the
+ * softirq. A softirq handled such as network RX might set PF_MEMALLOC
+ * again if the socket is related to swap
+ */
+ current->flags &= ~PF_MEMALLOC;
pending = local_softirq_pending();
account_system_vtime(current);
@@ -265,6 +273,7 @@ restart:
account_system_vtime(current);
__local_bh_enable(SOFTIRQ_OFFSET);
+ tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
#ifndef __ARCH_HAS_DO_SOFTIRQ
diff --git a/kernel/sys.c b/kernel/sys.c
index 2d39a84cd857..241507f23eca 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2015,7 +2015,6 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
break;
}
me->pdeath_signal = arg2;
- error = 0;
break;
case PR_GET_PDEATHSIG:
error = put_user(me->pdeath_signal, (int __user *)arg2);
@@ -2029,7 +2028,6 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
break;
}
set_dumpable(me->mm, arg2);
- error = 0;
break;
case PR_SET_UNALIGN:
@@ -2056,10 +2054,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
case PR_SET_TIMING:
if (arg2 != PR_TIMING_STATISTICAL)
error = -EINVAL;
- else
- error = 0;
break;
-
case PR_SET_NAME:
comm[sizeof(me->comm)-1] = 0;
if (strncpy_from_user(comm, (char __user *)arg2,
@@ -2067,20 +2062,19 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
return -EFAULT;
set_task_comm(me, comm);
proc_comm_connector(me);
- return 0;
+ break;
case PR_GET_NAME:
get_task_comm(comm, me);
if (copy_to_user((char __user *)arg2, comm,
sizeof(comm)))
return -EFAULT;
- return 0;
+ break;
case PR_GET_ENDIAN:
error = GET_ENDIAN(me, arg2);
break;
case PR_SET_ENDIAN:
error = SET_ENDIAN(me, arg2);
break;
-
case PR_GET_SECCOMP:
error = prctl_get_seccomp();
break;
@@ -2108,7 +2102,6 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
current->default_timer_slack_ns;
else
current->timer_slack_ns = arg2;
- error = 0;
break;
case PR_MCE_KILL:
if (arg4 | arg5)
@@ -2134,7 +2127,6 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
default:
return -EINVAL;
}
- error = 0;
break;
case PR_MCE_KILL_GET:
if (arg2 | arg3 | arg4 | arg5)
@@ -2153,7 +2145,6 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
break;
case PR_SET_CHILD_SUBREAPER:
me->signal->is_child_subreaper = !!arg2;
- error = 0;
break;
case PR_GET_CHILD_SUBREAPER:
error = put_user(me->signal->is_child_subreaper,
@@ -2195,46 +2186,52 @@ static void argv_cleanup(struct subprocess_info *info)
argv_free(info->argv);
}
-/**
- * orderly_poweroff - Trigger an orderly system poweroff
- * @force: force poweroff if command execution fails
- *
- * This may be called from any context to trigger a system shutdown.
- * If the orderly shutdown fails, it will force an immediate shutdown.
- */
-int orderly_poweroff(bool force)
+static int __orderly_poweroff(void)
{
int argc;
- char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
+ char **argv;
static char *envp[] = {
"HOME=/",
"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
NULL
};
- int ret = -ENOMEM;
+ int ret;
+ argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
if (argv == NULL) {
printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
__func__, poweroff_cmd);
- goto out;
+ return -ENOMEM;
}
ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_NO_WAIT,
NULL, argv_cleanup, NULL);
-out:
- if (likely(!ret))
- return 0;
-
if (ret == -ENOMEM)
argv_free(argv);
- if (force) {
+ return ret;
+}
+
+/**
+ * orderly_poweroff - Trigger an orderly system poweroff
+ * @force: force poweroff if command execution fails
+ *
+ * This may be called from any context to trigger a system shutdown.
+ * If the orderly shutdown fails, it will force an immediate shutdown.
+ */
+int orderly_poweroff(bool force)
+{
+ int ret = __orderly_poweroff();
+
+ if (ret && force) {
printk(KERN_WARNING "Failed to start orderly shutdown: "
"forcing the issue\n");
- /* I guess this should try to kick off some daemon to
- sync and poweroff asap. Or not even bother syncing
- if we're doing an emergency shutdown? */
+ /*
+ * I guess this should try to kick off some daemon to sync and
+ * poweroff asap. Or not even bother syncing if we're doing an
+ * emergency shutdown?
+ */
emergency_sync();
kernel_power_off();
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4ab11879aeb4..87174ef59161 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -30,6 +30,7 @@
#include <linux/security.h>
#include <linux/ctype.h>
#include <linux/kmemcheck.h>
+#include <linux/kmemleak.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -174,6 +175,11 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
+static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+static int proc_dostring_coredump(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
#ifdef CONFIG_MAGIC_SYSRQ
/* Note: sysrq code uses it's own private copy */
static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
@@ -410,7 +416,7 @@ static struct ctl_table kern_table[] = {
.data = core_pattern,
.maxlen = CORENAME_MAX_SIZE,
.mode = 0644,
- .proc_handler = proc_dostring,
+ .proc_handler = proc_dostring_coredump,
},
{
.procname = "core_pipe_limit",
@@ -1095,11 +1101,9 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
{
- .procname = "nr_pdflush_threads",
- .data = &nr_pdflush_threads,
- .maxlen = sizeof nr_pdflush_threads,
- .mode = 0444 /* read-only*/,
- .proc_handler = proc_dointvec,
+ .procname = "nr_pdflush_threads",
+ .mode = 0444 /* read-only */,
+ .proc_handler = pdflush_proc_obsolete,
},
{
.procname = "swappiness",
@@ -1494,11 +1498,29 @@ static struct ctl_table fs_table[] = {
#endif
#endif
{
+ .procname = "protected_symlinks",
+ .data = &sysctl_protected_symlinks,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
+ .procname = "protected_hardlinks",
+ .data = &sysctl_protected_hardlinks,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
.procname = "suid_dumpable",
.data = &suid_dumpable,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_dointvec_minmax_coredump,
.extra1 = &zero,
.extra2 = &two,
},
@@ -1551,7 +1573,10 @@ static struct ctl_table dev_table[] = {
int __init sysctl_init(void)
{
- register_sysctl_table(sysctl_base_table);
+ struct ctl_table_header *hdr;
+
+ hdr = register_sysctl_table(sysctl_base_table);
+ kmemleak_not_leak(hdr);
return 0;
}
@@ -2009,6 +2034,34 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
do_proc_dointvec_minmax_conv, &param);
}
+static void validate_coredump_safety(void)
+{
+ if (suid_dumpable == SUID_DUMPABLE_SAFE &&
+ core_pattern[0] != '/' && core_pattern[0] != '|') {
+ printk(KERN_WARNING "Unsafe core_pattern used with "\
+ "suid_dumpable=2. Pipe handler or fully qualified "\
+ "core dump path required.\n");
+ }
+}
+
+static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (!error)
+ validate_coredump_safety();
+ return error;
+}
+
+static int proc_dostring_coredump(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int error = proc_dostring(table, write, buffer, lenp, ppos);
+ if (!error)
+ validate_coredump_safety();
+ return error;
+}
+
static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos,
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index a650694883a1..65bdcf198d4e 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -147,7 +147,7 @@ static const struct bin_table bin_vm_table[] = {
{ CTL_INT, VM_DIRTY_RATIO, "dirty_ratio" },
/* VM_DIRTY_WB_CS "dirty_writeback_centisecs" no longer used */
/* VM_DIRTY_EXPIRE_CS "dirty_expire_centisecs" no longer used */
- { CTL_INT, VM_NR_PDFLUSH_THREADS, "nr_pdflush_threads" },
+ /* VM_NR_PDFLUSH_THREADS "nr_pdflush_threads" no longer used */
{ CTL_INT, VM_OVERCOMMIT_RATIO, "overcommit_ratio" },
/* VM_PAGEBUF unused */
/* VM_HUGETLB_PAGES "nr_hugepages" no longer used */
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 91d4e1742a0c..d320d44903bd 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -75,6 +75,7 @@ void task_work_run(void)
p = q->next;
q->func(q);
q = p;
+ cond_resched();
}
}
}
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index e66046456f4f..d0a32796550f 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -436,6 +436,11 @@ static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
sizeof(struct cgroupstats));
+ if (na == NULL) {
+ rc = -EMSGSIZE;
+ goto err;
+ }
+
stats = nla_data(na);
memset(stats, 0, sizeof(*stats));
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index a470154e0408..46da0537c10b 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -37,7 +37,7 @@
* requested HZ value. It is also not recommended
* for "tick-less" systems.
*/
-#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ))
+#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/SHIFTED_HZ))
/* Since jiffies uses a simple NSEC_PER_JIFFY multiplier
* conversion, the .shift value could be zero. However
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index b7fbadc5c973..24174b4d669b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -28,7 +28,7 @@ DEFINE_SPINLOCK(ntp_lock);
/* USER_HZ period (usecs): */
unsigned long tick_usec = TICK_USEC;
-/* ACTHZ period (nsecs): */
+/* SHIFTED_HZ period (nsecs): */
unsigned long tick_nsec;
static u64 tick_length;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f045cc50832d..34e5eac81424 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -65,14 +65,14 @@ struct timekeeper {
* used instead.
*/
struct timespec wall_to_monotonic;
- /* time spent in suspend */
- struct timespec total_sleep_time;
- /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
- struct timespec raw_time;
/* Offset clock monotonic -> clock realtime */
ktime_t offs_real;
+ /* time spent in suspend */
+ struct timespec total_sleep_time;
/* Offset clock monotonic -> clock boottime */
ktime_t offs_boot;
+ /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
+ struct timespec raw_time;
/* Seqlock for all timekeeper values */
seqlock_t lock;
};
@@ -108,13 +108,39 @@ static struct timespec tk_xtime(struct timekeeper *tk)
static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
{
tk->xtime_sec = ts->tv_sec;
- tk->xtime_nsec = ts->tv_nsec << tk->shift;
+ tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
}
static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
{
tk->xtime_sec += ts->tv_sec;
- tk->xtime_nsec += ts->tv_nsec << tk->shift;
+ tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
+ tk_normalize_xtime(tk);
+}
+
+static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
+{
+ struct timespec tmp;
+
+ /*
+ * Verify consistency of: offset_real = -wall_to_monotonic
+ * before modifying anything
+ */
+ set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
+ -tk->wall_to_monotonic.tv_nsec);
+ WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
+ tk->wall_to_monotonic = wtm;
+ set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
+ tk->offs_real = timespec_to_ktime(tmp);
+}
+
+static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
+{
+ /* Verify consistency before modifying */
+ WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
+
+ tk->total_sleep_time = t;
+ tk->offs_boot = timespec_to_ktime(t);
}
/**
@@ -217,14 +243,6 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
return nsec + arch_gettimeoffset();
}
-static void update_rt_offset(struct timekeeper *tk)
-{
- struct timespec tmp, *wtm = &tk->wall_to_monotonic;
-
- set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
- tk->offs_real = timespec_to_ktime(tmp);
-}
-
/* must hold write on timekeeper.lock */
static void timekeeping_update(struct timekeeper *tk, bool clearntp)
{
@@ -234,12 +252,10 @@ static void timekeeping_update(struct timekeeper *tk, bool clearntp)
tk->ntp_error = 0;
ntp_clear();
}
- update_rt_offset(tk);
xt = tk_xtime(tk);
update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
}
-
/**
* timekeeping_forward_now - update clock to the current time
*
@@ -261,7 +277,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)
tk->xtime_nsec += cycle_delta * tk->mult;
/* If arch requires, add in gettimeoffset() */
- tk->xtime_nsec += arch_gettimeoffset() << tk->shift;
+ tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift;
tk_normalize_xtime(tk);
@@ -277,18 +293,19 @@ static void timekeeping_forward_now(struct timekeeper *tk)
*/
void getnstimeofday(struct timespec *ts)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
s64 nsecs = 0;
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- ts->tv_sec = timekeeper.xtime_sec;
- ts->tv_nsec = timekeeping_get_ns(&timekeeper);
+ ts->tv_sec = tk->xtime_sec;
+ ts->tv_nsec = timekeeping_get_ns(tk);
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
timespec_add_ns(ts, nsecs);
}
@@ -296,19 +313,18 @@ EXPORT_SYMBOL(getnstimeofday);
ktime_t ktime_get(void)
{
+ struct timekeeper *tk = &timekeeper;
unsigned int seq;
s64 secs, nsecs;
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
- secs = timekeeper.xtime_sec +
- timekeeper.wall_to_monotonic.tv_sec;
- nsecs = timekeeping_get_ns(&timekeeper) +
- timekeeper.wall_to_monotonic.tv_nsec;
+ seq = read_seqbegin(&tk->lock);
+ secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
+ nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
/*
* Use ktime_set/ktime_add_ns to create a proper ktime on
* 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -327,18 +343,19 @@ EXPORT_SYMBOL_GPL(ktime_get);
*/
void ktime_get_ts(struct timespec *ts)
{
+ struct timekeeper *tk = &timekeeper;
struct timespec tomono;
unsigned int seq;
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
- ts->tv_sec = timekeeper.xtime_sec;
- ts->tv_nsec = timekeeping_get_ns(&timekeeper);
- tomono = timekeeper.wall_to_monotonic;
+ seq = read_seqbegin(&tk->lock);
+ ts->tv_sec = tk->xtime_sec;
+ ts->tv_nsec = timekeeping_get_ns(tk);
+ tomono = tk->wall_to_monotonic;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec);
@@ -358,22 +375,23 @@ EXPORT_SYMBOL_GPL(ktime_get_ts);
*/
void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
s64 nsecs_raw, nsecs_real;
WARN_ON_ONCE(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- *ts_raw = timekeeper.raw_time;
- ts_real->tv_sec = timekeeper.xtime_sec;
+ *ts_raw = tk->raw_time;
+ ts_real->tv_sec = tk->xtime_sec;
ts_real->tv_nsec = 0;
- nsecs_raw = timekeeping_get_ns_raw(&timekeeper);
- nsecs_real = timekeeping_get_ns(&timekeeper);
+ nsecs_raw = timekeeping_get_ns_raw(tk);
+ nsecs_real = timekeeping_get_ns(tk);
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
timespec_add_ns(ts_raw, nsecs_raw);
timespec_add_ns(ts_real, nsecs_real);
@@ -406,28 +424,28 @@ EXPORT_SYMBOL(do_gettimeofday);
*/
int do_settimeofday(const struct timespec *tv)
{
+ struct timekeeper *tk = &timekeeper;
struct timespec ts_delta, xt;
unsigned long flags;
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ if (!timespec_valid_strict(tv))
return -EINVAL;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
- timekeeping_forward_now(&timekeeper);
+ timekeeping_forward_now(tk);
- xt = tk_xtime(&timekeeper);
+ xt = tk_xtime(tk);
ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
- timekeeper.wall_to_monotonic =
- timespec_sub(timekeeper.wall_to_monotonic, ts_delta);
+ tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
- tk_set_xtime(&timekeeper, tv);
+ tk_set_xtime(tk, tv);
- timekeeping_update(&timekeeper, true);
+ timekeeping_update(tk, true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_sequnlock_irqrestore(&tk->lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -436,7 +454,6 @@ int do_settimeofday(const struct timespec *tv)
}
EXPORT_SYMBOL(do_settimeofday);
-
/**
* timekeeping_inject_offset - Adds or subtracts from the current time.
* @tv: pointer to the timespec variable containing the offset
@@ -445,28 +462,37 @@ EXPORT_SYMBOL(do_settimeofday);
*/
int timekeeping_inject_offset(struct timespec *ts)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long flags;
+ struct timespec tmp;
+ int ret = 0;
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
- timekeeping_forward_now(&timekeeper);
+ timekeeping_forward_now(tk);
+ /* Make sure the proposed value is valid */
+ tmp = timespec_add(tk_xtime(tk), *ts);
+ if (!timespec_valid_strict(&tmp)) {
+ ret = -EINVAL;
+ goto error;
+ }
- tk_xtime_add(&timekeeper, ts);
- timekeeper.wall_to_monotonic =
- timespec_sub(timekeeper.wall_to_monotonic, *ts);
+ tk_xtime_add(tk, ts);
+ tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
- timekeeping_update(&timekeeper, true);
+error: /* even if we error out, we forwarded the time, so call update */
+ timekeeping_update(tk, true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_sequnlock_irqrestore(&tk->lock, flags);
/* signal hrtimers about time change */
clock_was_set();
- return 0;
+ return ret;
}
EXPORT_SYMBOL(timekeeping_inject_offset);
@@ -477,23 +503,24 @@ EXPORT_SYMBOL(timekeeping_inject_offset);
*/
static int change_clocksource(void *data)
{
+ struct timekeeper *tk = &timekeeper;
struct clocksource *new, *old;
unsigned long flags;
new = (struct clocksource *) data;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
- timekeeping_forward_now(&timekeeper);
+ timekeeping_forward_now(tk);
if (!new->enable || new->enable(new) == 0) {
- old = timekeeper.clock;
- tk_setup_internals(&timekeeper, new);
+ old = tk->clock;
+ tk_setup_internals(tk, new);
if (old->disable)
old->disable(old);
}
- timekeeping_update(&timekeeper, true);
+ timekeeping_update(tk, true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_sequnlock_irqrestore(&tk->lock, flags);
return 0;
}
@@ -507,7 +534,9 @@ static int change_clocksource(void *data)
*/
void timekeeping_notify(struct clocksource *clock)
{
- if (timekeeper.clock == clock)
+ struct timekeeper *tk = &timekeeper;
+
+ if (tk->clock == clock)
return;
stop_machine(change_clocksource, clock, NULL);
tick_clock_notify();
@@ -536,35 +565,36 @@ EXPORT_SYMBOL_GPL(ktime_get_real);
*/
void getrawmonotonic(struct timespec *ts)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
s64 nsecs;
do {
- seq = read_seqbegin(&timekeeper.lock);
- nsecs = timekeeping_get_ns_raw(&timekeeper);
- *ts = timekeeper.raw_time;
+ seq = read_seqbegin(&tk->lock);
+ nsecs = timekeeping_get_ns_raw(tk);
+ *ts = tk->raw_time;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
timespec_add_ns(ts, nsecs);
}
EXPORT_SYMBOL(getrawmonotonic);
-
/**
* timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
*/
int timekeeping_valid_for_hres(void)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
int ret;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
+ ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
return ret;
}
@@ -574,15 +604,16 @@ int timekeeping_valid_for_hres(void)
*/
u64 timekeeping_max_deferment(void)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
u64 ret;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- ret = timekeeper.clock->max_idle_ns;
+ ret = tk->clock->max_idle_ns;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
return ret;
}
@@ -622,46 +653,56 @@ void __attribute__((weak)) read_boot_clock(struct timespec *ts)
*/
void __init timekeeping_init(void)
{
+ struct timekeeper *tk = &timekeeper;
struct clocksource *clock;
unsigned long flags;
- struct timespec now, boot;
+ struct timespec now, boot, tmp;
read_persistent_clock(&now);
+ if (!timespec_valid_strict(&now)) {
+ pr_warn("WARNING: Persistent clock returned invalid value!\n"
+ " Check your CMOS/BIOS settings.\n");
+ now.tv_sec = 0;
+ now.tv_nsec = 0;
+ }
+
read_boot_clock(&boot);
+ if (!timespec_valid_strict(&boot)) {
+ pr_warn("WARNING: Boot clock returned invalid value!\n"
+ " Check your CMOS/BIOS settings.\n");
+ boot.tv_sec = 0;
+ boot.tv_nsec = 0;
+ }
- seqlock_init(&timekeeper.lock);
+ seqlock_init(&tk->lock);
ntp_init();
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
clock = clocksource_default_clock();
if (clock->enable)
clock->enable(clock);
- tk_setup_internals(&timekeeper, clock);
+ tk_setup_internals(tk, clock);
- tk_set_xtime(&timekeeper, &now);
- timekeeper.raw_time.tv_sec = 0;
- timekeeper.raw_time.tv_nsec = 0;
+ tk_set_xtime(tk, &now);
+ tk->raw_time.tv_sec = 0;
+ tk->raw_time.tv_nsec = 0;
if (boot.tv_sec == 0 && boot.tv_nsec == 0)
- boot = tk_xtime(&timekeeper);
-
- set_normalized_timespec(&timekeeper.wall_to_monotonic,
- -boot.tv_sec, -boot.tv_nsec);
- update_rt_offset(&timekeeper);
- timekeeper.total_sleep_time.tv_sec = 0;
- timekeeper.total_sleep_time.tv_nsec = 0;
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ boot = tk_xtime(tk);
+
+ set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
+ tk_set_wall_to_mono(tk, tmp);
+
+ tmp.tv_sec = 0;
+ tmp.tv_nsec = 0;
+ tk_set_sleep_time(tk, tmp);
+
+ write_sequnlock_irqrestore(&tk->lock, flags);
}
/* time in seconds when suspend began */
static struct timespec timekeeping_suspend_time;
-static void update_sleep_time(struct timespec t)
-{
- timekeeper.total_sleep_time = t;
- timekeeper.offs_boot = timespec_to_ktime(t);
-}
-
/**
* __timekeeping_inject_sleeptime - Internal function to add sleep interval
* @delta: pointer to a timespec delta value
@@ -672,18 +713,16 @@ static void update_sleep_time(struct timespec t)
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
struct timespec *delta)
{
- if (!timespec_valid(delta)) {
+ if (!timespec_valid_strict(delta)) {
printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
"sleep delta value!\n");
return;
}
-
tk_xtime_add(tk, delta);
- tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta);
- update_sleep_time(timespec_add(tk->total_sleep_time, *delta));
+ tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
+ tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
}
-
/**
* timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
* @delta: pointer to a timespec delta value
@@ -696,6 +735,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
*/
void timekeeping_inject_sleeptime(struct timespec *delta)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long flags;
struct timespec ts;
@@ -704,21 +744,20 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
return;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
- timekeeping_forward_now(&timekeeper);
+ timekeeping_forward_now(tk);
- __timekeeping_inject_sleeptime(&timekeeper, delta);
+ __timekeeping_inject_sleeptime(tk, delta);
- timekeeping_update(&timekeeper, true);
+ timekeeping_update(tk, true);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_sequnlock_irqrestore(&tk->lock, flags);
/* signal hrtimers about time change */
clock_was_set();
}
-
/**
* timekeeping_resume - Resumes the generic timekeeping subsystem.
*
@@ -728,6 +767,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
*/
static void timekeeping_resume(void)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long flags;
struct timespec ts;
@@ -735,18 +775,18 @@ static void timekeeping_resume(void)
clocksource_resume();
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
ts = timespec_sub(ts, timekeeping_suspend_time);
- __timekeeping_inject_sleeptime(&timekeeper, &ts);
+ __timekeeping_inject_sleeptime(tk, &ts);
}
/* re-base the last cycle value */
- timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
- timekeeper.ntp_error = 0;
+ tk->clock->cycle_last = tk->clock->read(tk->clock);
+ tk->ntp_error = 0;
timekeeping_suspended = 0;
- timekeeping_update(&timekeeper, false);
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ timekeeping_update(tk, false);
+ write_sequnlock_irqrestore(&tk->lock, flags);
touch_softlockup_watchdog();
@@ -758,14 +798,15 @@ static void timekeeping_resume(void)
static int timekeeping_suspend(void)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long flags;
struct timespec delta, delta_delta;
static struct timespec old_delta;
read_persistent_clock(&timekeeping_suspend_time);
- write_seqlock_irqsave(&timekeeper.lock, flags);
- timekeeping_forward_now(&timekeeper);
+ write_seqlock_irqsave(&tk->lock, flags);
+ timekeeping_forward_now(tk);
timekeeping_suspended = 1;
/*
@@ -774,7 +815,7 @@ static int timekeeping_suspend(void)
* try to compensate so the difference in system time
* and persistent_clock time stays close to constant.
*/
- delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time);
+ delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
delta_delta = timespec_sub(delta, old_delta);
if (abs(delta_delta.tv_sec) >= 2) {
/*
@@ -787,7 +828,7 @@ static int timekeeping_suspend(void)
timekeeping_suspend_time =
timespec_add(timekeeping_suspend_time, delta_delta);
}
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_sequnlock_irqrestore(&tk->lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
clocksource_suspend();
@@ -898,27 +939,29 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
* the error. This causes the likely below to be unlikely.
*
* The proper fix is to avoid rounding up by using
- * the high precision timekeeper.xtime_nsec instead of
+ * the high precision tk->xtime_nsec instead of
* xtime.tv_nsec everywhere. Fixing this will take some
* time.
*/
if (likely(error <= interval))
adj = 1;
else
- adj = timekeeping_bigadjust(tk, error, &interval,
- &offset);
- } else if (error < -interval) {
- /* See comment above, this is just switched for the negative */
- error >>= 2;
- if (likely(error >= -interval)) {
- adj = -1;
- interval = -interval;
- offset = -offset;
- } else
- adj = timekeeping_bigadjust(tk, error, &interval,
- &offset);
- } else
- return;
+ adj = timekeeping_bigadjust(tk, error, &interval, &offset);
+ } else {
+ if (error < -interval) {
+ /* See comment above, this is just switched for the negative */
+ error >>= 2;
+ if (likely(error >= -interval)) {
+ adj = -1;
+ interval = -interval;
+ offset = -offset;
+ } else {
+ adj = timekeeping_bigadjust(tk, error, &interval, &offset);
+ }
+ } else {
+ goto out_adjust;
+ }
+ }
if (unlikely(tk->clock->maxadj &&
(tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
@@ -981,6 +1024,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
tk->xtime_nsec -= offset;
tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
+out_adjust:
/*
* It may be possible that when we entered this function, xtime_nsec
* was very small. Further, if we're slightly speeding the clocksource
@@ -1003,7 +1047,6 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
}
-
/**
* accumulate_nsecs_to_secs - Accumulates nsecs into secs
*
@@ -1024,15 +1067,21 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
/* Figure out if its a leap sec and apply if needed */
leap = second_overflow(tk->xtime_sec);
- tk->xtime_sec += leap;
- tk->wall_to_monotonic.tv_sec -= leap;
- if (leap)
- clock_was_set_delayed();
+ if (unlikely(leap)) {
+ struct timespec ts;
+
+ tk->xtime_sec += leap;
+
+ ts.tv_sec = leap;
+ ts.tv_nsec = 0;
+ tk_set_wall_to_mono(tk,
+ timespec_sub(tk->wall_to_monotonic, ts));
+ clock_was_set_delayed();
+ }
}
}
-
/**
* logarithmic_accumulation - shifted accumulation of cycles
*
@@ -1076,7 +1125,6 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
return offset;
}
-
/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
@@ -1084,25 +1132,30 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
static void update_wall_time(void)
{
struct clocksource *clock;
+ struct timekeeper *tk = &timekeeper;
cycle_t offset;
int shift = 0, maxshift;
unsigned long flags;
s64 remainder;
- write_seqlock_irqsave(&timekeeper.lock, flags);
+ write_seqlock_irqsave(&tk->lock, flags);
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
goto out;
- clock = timekeeper.clock;
+ clock = tk->clock;
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
- offset = timekeeper.cycle_interval;
+ offset = tk->cycle_interval;
#else
offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
#endif
+ /* Check if there's really nothing to do */
+ if (offset < tk->cycle_interval)
+ goto out;
+
/*
* With NO_HZ we may have to accumulate many cycle_intervals
* (think "ticks") worth of time at once. To do this efficiently,
@@ -1111,19 +1164,19 @@ static void update_wall_time(void)
* chunk in one go, and then try to consume the next smaller
* doubled multiple.
*/
- shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
+ shift = ilog2(offset) - ilog2(tk->cycle_interval);
shift = max(0, shift);
/* Bound shift to one less than what overflows tick_length */
maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
shift = min(shift, maxshift);
- while (offset >= timekeeper.cycle_interval) {
- offset = logarithmic_accumulation(&timekeeper, offset, shift);
- if(offset < timekeeper.cycle_interval<<shift)
+ while (offset >= tk->cycle_interval) {
+ offset = logarithmic_accumulation(tk, offset, shift);
+ if (offset < tk->cycle_interval<<shift)
shift--;
}
/* correct the clock when NTP error is too big */
- timekeeping_adjust(&timekeeper, offset);
+ timekeeping_adjust(tk, offset);
/*
@@ -1135,21 +1188,21 @@ static void update_wall_time(void)
* the vsyscall implementations are converted to use xtime_nsec
* (shifted nanoseconds), this can be killed.
*/
- remainder = timekeeper.xtime_nsec & ((1 << timekeeper.shift) - 1);
- timekeeper.xtime_nsec -= remainder;
- timekeeper.xtime_nsec += 1 << timekeeper.shift;
- timekeeper.ntp_error += remainder << timekeeper.ntp_error_shift;
+ remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
+ tk->xtime_nsec -= remainder;
+ tk->xtime_nsec += 1ULL << tk->shift;
+ tk->ntp_error += remainder << tk->ntp_error_shift;
/*
* Finally, make sure that after the rounding
* xtime_nsec isn't larger than NSEC_PER_SEC
*/
- accumulate_nsecs_to_secs(&timekeeper);
+ accumulate_nsecs_to_secs(tk);
- timekeeping_update(&timekeeper, false);
+ timekeeping_update(tk, false);
out:
- write_sequnlock_irqrestore(&timekeeper.lock, flags);
+ write_sequnlock_irqrestore(&tk->lock, flags);
}
@@ -1166,18 +1219,18 @@ out:
*/
void getboottime(struct timespec *ts)
{
+ struct timekeeper *tk = &timekeeper;
struct timespec boottime = {
- .tv_sec = timekeeper.wall_to_monotonic.tv_sec +
- timekeeper.total_sleep_time.tv_sec,
- .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec +
- timekeeper.total_sleep_time.tv_nsec
+ .tv_sec = tk->wall_to_monotonic.tv_sec +
+ tk->total_sleep_time.tv_sec,
+ .tv_nsec = tk->wall_to_monotonic.tv_nsec +
+ tk->total_sleep_time.tv_nsec
};
set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
}
EXPORT_SYMBOL_GPL(getboottime);
-
/**
* get_monotonic_boottime - Returns monotonic time since boot
* @ts: pointer to the timespec to be set
@@ -1189,19 +1242,20 @@ EXPORT_SYMBOL_GPL(getboottime);
*/
void get_monotonic_boottime(struct timespec *ts)
{
+ struct timekeeper *tk = &timekeeper;
struct timespec tomono, sleep;
unsigned int seq;
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&timekeeper.lock);
- ts->tv_sec = timekeeper.xtime_sec;
- ts->tv_nsec = timekeeping_get_ns(&timekeeper);
- tomono = timekeeper.wall_to_monotonic;
- sleep = timekeeper.total_sleep_time;
+ seq = read_seqbegin(&tk->lock);
+ ts->tv_sec = tk->xtime_sec;
+ ts->tv_nsec = timekeeping_get_ns(tk);
+ tomono = tk->wall_to_monotonic;
+ sleep = tk->total_sleep_time;
- } while (read_seqretry(&timekeeper.lock, seq));
+ } while (read_seqretry(&tk->lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec);
@@ -1231,31 +1285,38 @@ EXPORT_SYMBOL_GPL(ktime_get_boottime);
*/
void monotonic_to_bootbased(struct timespec *ts)
{
- *ts = timespec_add(*ts, timekeeper.total_sleep_time);
+ struct timekeeper *tk = &timekeeper;
+
+ *ts = timespec_add(*ts, tk->total_sleep_time);
}
EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
unsigned long get_seconds(void)
{
- return timekeeper.xtime_sec;
+ struct timekeeper *tk = &timekeeper;
+
+ return tk->xtime_sec;
}
EXPORT_SYMBOL(get_seconds);
struct timespec __current_kernel_time(void)
{
- return tk_xtime(&timekeeper);
+ struct timekeeper *tk = &timekeeper;
+
+ return tk_xtime(tk);
}
struct timespec current_kernel_time(void)
{
+ struct timekeeper *tk = &timekeeper;
struct timespec now;
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- now = tk_xtime(&timekeeper);
- } while (read_seqretry(&timekeeper.lock, seq));
+ now = tk_xtime(tk);
+ } while (read_seqretry(&tk->lock, seq));
return now;
}
@@ -1263,15 +1324,16 @@ EXPORT_SYMBOL(current_kernel_time);
struct timespec get_monotonic_coarse(void)
{
+ struct timekeeper *tk = &timekeeper;
struct timespec now, mono;
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- now = tk_xtime(&timekeeper);
- mono = timekeeper.wall_to_monotonic;
- } while (read_seqretry(&timekeeper.lock, seq));
+ now = tk_xtime(tk);
+ mono = tk->wall_to_monotonic;
+ } while (read_seqretry(&tk->lock, seq));
set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
now.tv_nsec + mono.tv_nsec);
@@ -1300,14 +1362,15 @@ void do_timer(unsigned long ticks)
void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
struct timespec *wtom, struct timespec *sleep)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
do {
- seq = read_seqbegin(&timekeeper.lock);
- *xtim = tk_xtime(&timekeeper);
- *wtom = timekeeper.wall_to_monotonic;
- *sleep = timekeeper.total_sleep_time;
- } while (read_seqretry(&timekeeper.lock, seq));
+ seq = read_seqbegin(&tk->lock);
+ *xtim = tk_xtime(tk);
+ *wtom = tk->wall_to_monotonic;
+ *sleep = tk->total_sleep_time;
+ } while (read_seqretry(&tk->lock, seq));
}
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -1321,19 +1384,20 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
*/
ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
{
+ struct timekeeper *tk = &timekeeper;
ktime_t now;
unsigned int seq;
u64 secs, nsecs;
do {
- seq = read_seqbegin(&timekeeper.lock);
+ seq = read_seqbegin(&tk->lock);
- secs = timekeeper.xtime_sec;
- nsecs = timekeeping_get_ns(&timekeeper);
+ secs = tk->xtime_sec;
+ nsecs = timekeeping_get_ns(tk);
- *offs_real = timekeeper.offs_real;
- *offs_boot = timekeeper.offs_boot;
- } while (read_seqretry(&timekeeper.lock, seq));
+ *offs_real = tk->offs_real;
+ *offs_boot = tk->offs_boot;
+ } while (read_seqretry(&tk->lock, seq));
now = ktime_add_ns(ktime_set(secs, 0), nsecs);
now = ktime_sub(now, *offs_real);
@@ -1346,19 +1410,19 @@ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
*/
ktime_t ktime_get_monotonic_offset(void)
{
+ struct timekeeper *tk = &timekeeper;
unsigned long seq;
struct timespec wtom;
do {
- seq = read_seqbegin(&timekeeper.lock);
- wtom = timekeeper.wall_to_monotonic;
- } while (read_seqretry(&timekeeper.lock, seq));
+ seq = read_seqbegin(&tk->lock);
+ wtom = tk->wall_to_monotonic;
+ } while (read_seqretry(&tk->lock, seq));
return timespec_to_ktime(wtom);
}
EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
-
/**
* xtime_update() - advances the timekeeping infrastructure
* @ticks: number of ticks, that have elapsed since the last call.
diff --git a/kernel/timer.c b/kernel/timer.c
index a61c09374eba..8c5e7b908c68 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1407,13 +1407,6 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds)
#endif
-#ifndef __alpha__
-
-/*
- * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
- * should be moved into arch/i386 instead?
- */
-
/**
* sys_getpid - return the thread group id of the current process
*
@@ -1469,8 +1462,6 @@ SYSCALL_DEFINE0(getegid)
return from_kgid_munged(current_user_ns(), current_egid());
}
-#endif
-
static void process_timeout(unsigned long __data)
{
wake_up_process((struct task_struct *)__data);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index fee3752ae8f6..8a6d2ee2086c 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -281,7 +281,7 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip)
head = this_cpu_ptr(event_function.perf_events);
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
- 1, &regs, head);
+ 1, &regs, head, NULL);
#undef ENTRY_SIZE
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index b31d3d5699fe..1a2117043bb1 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1002,7 +1002,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
head = this_cpu_ptr(call->perf_events);
- perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
+ perf_trace_buf_submit(entry, size, rctx,
+ entry->ip, 1, regs, head, NULL);
}
/* Kretprobe profile handler */
@@ -1033,7 +1034,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
head = this_cpu_ptr(call->perf_events);
- perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head);
+ perf_trace_buf_submit(entry, size, rctx,
+ entry->ret_ip, 1, regs, head, NULL);
}
#endif /* CONFIG_PERF_EVENTS */
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 96fc73369099..6b245f64c8dd 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -506,6 +506,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
int size;
syscall_nr = syscall_get_nr(current, regs);
+ if (syscall_nr < 0)
+ return;
if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
return;
@@ -532,7 +534,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
(unsigned long *)&rec->args);
head = this_cpu_ptr(sys_data->enter_event->perf_events);
- perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
+ perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
int perf_sysenter_enable(struct ftrace_event_call *call)
@@ -580,6 +582,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
int size;
syscall_nr = syscall_get_nr(current, regs);
+ if (syscall_nr < 0)
+ return;
if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
return;
@@ -608,7 +612,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
rec->ret = syscall_get_return_value(current, regs);
head = this_cpu_ptr(sys_data->exit_event->perf_events);
- perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
+ perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
}
int perf_sysexit_enable(struct ftrace_event_call *call)
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 2b36ac68549e..03003cd7dd96 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -670,7 +670,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs)
call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset);
head = this_cpu_ptr(call->perf_events);
- perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head);
+ perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL);
out:
preempt_enable();
diff --git a/lib/Kconfig b/lib/Kconfig
index 8269d56dcdaa..bb94c1ba616a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -340,6 +340,9 @@ config NLATTR
config GENERIC_ATOMIC64
bool
+config ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ def_bool y if GENERIC_ATOMIC64
+
config LRU_CACHE
tristate
@@ -387,4 +390,10 @@ config SIGNATURE
Digital signature verification. Currently only RSA is supported.
Implementation is done using GnuPG MPI library
+#
+# libfdt files, only selected if needed.
+#
+config LIBFDT
+ bool
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4a186508bf8b..2403a63b5da5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1084,18 +1084,105 @@ config LKDTM
Documentation on how to use the module can be found in
Documentation/fault-injection/provoke-crashes.txt
+config NOTIFIER_ERROR_INJECTION
+ tristate "Notifier error injection"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
+ help
+ This option provides the ability to inject artifical errors to
+ specified notifier chain callbacks. It is useful to test the error
+ handling of notifier call chain failures.
+
+ Say N if unsure.
+
config CPU_NOTIFIER_ERROR_INJECT
tristate "CPU notifier error injection module"
- depends on HOTPLUG_CPU && DEBUG_KERNEL
+ depends on HOTPLUG_CPU && NOTIFIER_ERROR_INJECTION
help
This option provides a kernel module that can be used to test
- the error handling of the cpu notifiers
+ the error handling of the cpu notifiers by injecting artifical
+ errors to CPU notifier chain callbacks. It is controlled through
+ debugfs interface under /sys/kernel/debug/notifier-error-inject/cpu
+
+ If the notifier call chain should be failed with some events
+ notified, write the error code to "actions/<notifier event>/error".
+
+ Example: Inject CPU offline error (-1 == -EPERM)
+
+ # cd /sys/kernel/debug/notifier-error-inject/cpu
+ # echo -1 > actions/CPU_DOWN_PREPARE/error
+ # echo 0 > /sys/devices/system/cpu/cpu1/online
+ bash: echo: write error: Operation not permitted
To compile this code as a module, choose M here: the module will
be called cpu-notifier-error-inject.
If unsure, say N.
+config PM_NOTIFIER_ERROR_INJECT
+ tristate "PM notifier error injection module"
+ depends on PM && NOTIFIER_ERROR_INJECTION
+ default m if PM_DEBUG
+ help
+ This option provides the ability to inject artifical errors to
+ PM notifier chain callbacks. It is controlled through debugfs
+ interface /sys/kernel/debug/notifier-error-inject/pm
+
+ If the notifier call chain should be failed with some events
+ notified, write the error code to "actions/<notifier event>/error".
+
+ Example: Inject PM suspend error (-12 = -ENOMEM)
+
+ # cd /sys/kernel/debug/notifier-error-inject/pm/
+ # echo -12 > actions/PM_SUSPEND_PREPARE/error
+ # echo mem > /sys/power/state
+ bash: echo: write error: Cannot allocate memory
+
+ To compile this code as a module, choose M here: the module will
+ be called pm-notifier-error-inject.
+
+ If unsure, say N.
+
+config MEMORY_NOTIFIER_ERROR_INJECT
+ tristate "Memory hotplug notifier error injection module"
+ depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION
+ help
+ This option provides the ability to inject artifical errors to
+ memory hotplug notifier chain callbacks. It is controlled through
+ debugfs interface under /sys/kernel/debug/notifier-error-inject/memory
+
+ If the notifier call chain should be failed with some events
+ notified, write the error code to "actions/<notifier event>/error".
+
+ Example: Inject memory hotplug offline error (-12 == -ENOMEM)
+
+ # cd /sys/kernel/debug/notifier-error-inject/memory
+ # echo -12 > actions/MEM_GOING_OFFLINE/error
+ # echo offline > /sys/devices/system/memory/memoryXXX/state
+ bash: echo: write error: Cannot allocate memory
+
+ To compile this code as a module, choose M here: the module will
+ be called pSeries-reconfig-notifier-error-inject.
+
+ If unsure, say N.
+
+config PSERIES_RECONFIG_NOTIFIER_ERROR_INJECT
+ tristate "pSeries reconfig notifier error injection module"
+ depends on PPC_PSERIES && NOTIFIER_ERROR_INJECTION
+ help
+ This option provides the ability to inject artifical errors to
+ pSeries reconfig notifier chain callbacks. It is controlled
+ through debugfs interface under
+ /sys/kernel/debug/notifier-error-inject/pSeries-reconfig/
+
+ If the notifier call chain should be failed with some events
+ notified, write the error code to "actions/<notifier event>/error".
+
+ To compile this code as a module, choose M here: the module will
+ be called memory-notifier-error-inject.
+
+ If unsure, say N.
+
config FAULT_INJECTION
bool "Fault-injection framework"
depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 8c31a0cb75e9..42d283edc4d3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,7 +11,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o timerqueue.o\
idr.o int_sqrt.o extable.o prio_tree.o \
sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
- proportions.o prio_heap.o ratelimit.o show_mem.o \
+ proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o
lib-$(CONFIG_MMU) += ioremap.o
@@ -22,7 +22,7 @@ lib-y += kobject.o klist.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
- bsearch.o find_last_bit.o find_next_bit.o llist.o
+ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
@@ -90,7 +90,12 @@ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
obj-$(CONFIG_CPU_NOTIFIER_ERROR_INJECT) += cpu-notifier-error-inject.o
+obj-$(CONFIG_PM_NOTIFIER_ERROR_INJECT) += pm-notifier-error-inject.o
+obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
+obj-$(CONFIG_PSERIES_RECONFIG_NOTIFIER_ERROR_INJECT) += \
+ pSeries-reconfig-notifier-error-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o
@@ -130,6 +135,11 @@ obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
+libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o
+$(foreach file, $(libfdt_files), \
+ $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
+lib-$(CONFIG_LIBFDT) += $(libfdt_files)
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index cb99b91c3a1d..00bca223d1e1 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -114,8 +114,7 @@ static __init int test_atomic64(void)
r += one;
BUG_ON(v.counter != r);
-#if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \
- defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM)
+#ifdef CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
INIT(onestwos);
BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
r -= one;
@@ -129,7 +128,7 @@ static __init int test_atomic64(void)
BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
BUG_ON(v.counter != r);
#else
-#warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above
+#warning Please implement atomic64_dec_if_positive for your architecture and select the above Kconfig symbol
#endif
INIT(onestwos);
diff --git a/lib/cpu-notifier-error-inject.c b/lib/cpu-notifier-error-inject.c
index 4dc20321b0d5..707ca24f7b18 100644
--- a/lib/cpu-notifier-error-inject.c
+++ b/lib/cpu-notifier-error-inject.c
@@ -1,58 +1,45 @@
#include <linux/kernel.h>
-#include <linux/cpu.h>
#include <linux/module.h>
-#include <linux/notifier.h>
+#include <linux/cpu.h>
-static int priority;
-static int cpu_up_prepare_error;
-static int cpu_down_prepare_error;
+#include "notifier-error-inject.h"
+static int priority;
module_param(priority, int, 0);
MODULE_PARM_DESC(priority, "specify cpu notifier priority");
-module_param(cpu_up_prepare_error, int, 0644);
-MODULE_PARM_DESC(cpu_up_prepare_error,
- "specify error code to inject CPU_UP_PREPARE action");
-
-module_param(cpu_down_prepare_error, int, 0644);
-MODULE_PARM_DESC(cpu_down_prepare_error,
- "specify error code to inject CPU_DOWN_PREPARE action");
-
-static int err_inject_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- int err = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- err = cpu_up_prepare_error;
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- err = cpu_down_prepare_error;
- break;
+static struct notifier_err_inject cpu_notifier_err_inject = {
+ .actions = {
+ { NOTIFIER_ERR_INJECT_ACTION(CPU_UP_PREPARE) },
+ { NOTIFIER_ERR_INJECT_ACTION(CPU_UP_PREPARE_FROZEN) },
+ { NOTIFIER_ERR_INJECT_ACTION(CPU_DOWN_PREPARE) },
+ { NOTIFIER_ERR_INJECT_ACTION(CPU_DOWN_PREPARE_FROZEN) },
+ {}
}
- if (err)
- printk(KERN_INFO "Injecting error (%d) at cpu notifier\n", err);
-
- return notifier_from_errno(err);
-}
-
-static struct notifier_block err_inject_cpu_notifier = {
- .notifier_call = err_inject_cpu_callback,
};
+static struct dentry *dir;
+
static int err_inject_init(void)
{
- err_inject_cpu_notifier.priority = priority;
+ int err;
+
+ dir = notifier_err_inject_init("cpu", notifier_err_inject_dir,
+ &cpu_notifier_err_inject, priority);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ err = register_hotcpu_notifier(&cpu_notifier_err_inject.nb);
+ if (err)
+ debugfs_remove_recursive(dir);
- return register_hotcpu_notifier(&err_inject_cpu_notifier);
+ return err;
}
static void err_inject_exit(void)
{
- unregister_hotcpu_notifier(&err_inject_cpu_notifier);
+ unregister_hotcpu_notifier(&cpu_notifier_err_inject.nb);
+ debugfs_remove_recursive(dir);
}
module_init(err_inject_init);
diff --git a/lib/crc32.c b/lib/crc32.c
index b0d278fb1d91..61774b8db4de 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -74,7 +74,9 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
size_t i;
# endif
const u32 *t0=tab[0], *t1=tab[1], *t2=tab[2], *t3=tab[3];
+# if CRC_LE_BITS != 32
const u32 *t4 = tab[4], *t5 = tab[5], *t6 = tab[6], *t7 = tab[7];
+# endif
u32 q;
/* Align it */
diff --git a/lib/fdt.c b/lib/fdt.c
new file mode 100644
index 000000000000..97f20069fc37
--- /dev/null
+++ b/lib/fdt.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt.c"
diff --git a/lib/fdt_ro.c b/lib/fdt_ro.c
new file mode 100644
index 000000000000..f73c04ea7be4
--- /dev/null
+++ b/lib/fdt_ro.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_ro.c"
diff --git a/lib/fdt_rw.c b/lib/fdt_rw.c
new file mode 100644
index 000000000000..0c1f0f4a4b13
--- /dev/null
+++ b/lib/fdt_rw.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_rw.c"
diff --git a/lib/fdt_strerror.c b/lib/fdt_strerror.c
new file mode 100644
index 000000000000..8713e3ff4707
--- /dev/null
+++ b/lib/fdt_strerror.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_strerror.c"
diff --git a/lib/fdt_sw.c b/lib/fdt_sw.c
new file mode 100644
index 000000000000..9ac7e50c76ce
--- /dev/null
+++ b/lib/fdt_sw.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_sw.c"
diff --git a/lib/fdt_wip.c b/lib/fdt_wip.c
new file mode 100644
index 000000000000..45b3fc3d3ba1
--- /dev/null
+++ b/lib/fdt_wip.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_wip.c"
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
new file mode 100644
index 000000000000..c785554f9523
--- /dev/null
+++ b/lib/flex_proportions.c
@@ -0,0 +1,272 @@
+/*
+ * Floating proportions with flexible aging period
+ *
+ * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
+ *
+ * The goal of this code is: Given different types of event, measure proportion
+ * of each type of event over time. The proportions are measured with
+ * exponentially decaying history to give smooth transitions. A formula
+ * expressing proportion of event of type 'j' is:
+ *
+ * p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
+ *
+ * Where x_{i,j} is j's number of events in i-th last time period and x_i is
+ * total number of events in i-th last time period.
+ *
+ * Note that p_{j}'s are normalised, i.e.
+ *
+ * \Sum_{j} p_{j} = 1,
+ *
+ * This formula can be straightforwardly computed by maintaing denominator
+ * (let's call it 'd') and for each event type its numerator (let's call it
+ * 'n_j'). When an event of type 'j' happens, we simply need to do:
+ * n_j++; d++;
+ *
+ * When a new period is declared, we could do:
+ * d /= 2
+ * for each j
+ * n_j /= 2
+ *
+ * To avoid iteration over all event types, we instead shift numerator of event
+ * j lazily when someone asks for a proportion of event j or when event j
+ * occurs. This can bit trivially implemented by remembering last period in
+ * which something happened with proportion of type j.
+ */
+#include <linux/flex_proportions.h>
+
+int fprop_global_init(struct fprop_global *p)
+{
+ int err;
+
+ p->period = 0;
+ /* Use 1 to avoid dealing with periods with 0 events... */
+ err = percpu_counter_init(&p->events, 1);
+ if (err)
+ return err;
+ seqcount_init(&p->sequence);
+ return 0;
+}
+
+void fprop_global_destroy(struct fprop_global *p)
+{
+ percpu_counter_destroy(&p->events);
+}
+
+/*
+ * Declare @periods new periods. It is upto the caller to make sure period
+ * transitions cannot happen in parallel.
+ *
+ * The function returns true if the proportions are still defined and false
+ * if aging zeroed out all events. This can be used to detect whether declaring
+ * further periods has any effect.
+ */
+bool fprop_new_period(struct fprop_global *p, int periods)
+{
+ u64 events;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ events = percpu_counter_sum(&p->events);
+ /*
+ * Don't do anything if there are no events.
+ */
+ if (events <= 1) {
+ local_irq_restore(flags);
+ return false;
+ }
+ write_seqcount_begin(&p->sequence);
+ if (periods < 64)
+ events -= events >> periods;
+ /* Use addition to avoid losing events happening between sum and set */
+ percpu_counter_add(&p->events, -events);
+ p->period += periods;
+ write_seqcount_end(&p->sequence);
+ local_irq_restore(flags);
+
+ return true;
+}
+
+/*
+ * ---- SINGLE ----
+ */
+
+int fprop_local_init_single(struct fprop_local_single *pl)
+{
+ pl->events = 0;
+ pl->period = 0;
+ raw_spin_lock_init(&pl->lock);
+ return 0;
+}
+
+void fprop_local_destroy_single(struct fprop_local_single *pl)
+{
+}
+
+static void fprop_reflect_period_single(struct fprop_global *p,
+ struct fprop_local_single *pl)
+{
+ unsigned int period = p->period;
+ unsigned long flags;
+
+ /* Fast path - period didn't change */
+ if (pl->period == period)
+ return;
+ raw_spin_lock_irqsave(&pl->lock, flags);
+ /* Someone updated pl->period while we were spinning? */
+ if (pl->period >= period) {
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
+ return;
+ }
+ /* Aging zeroed our fraction? */
+ if (period - pl->period < BITS_PER_LONG)
+ pl->events >>= period - pl->period;
+ else
+ pl->events = 0;
+ pl->period = period;
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
+}
+
+/* Event of type pl happened */
+void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
+{
+ fprop_reflect_period_single(p, pl);
+ pl->events++;
+ percpu_counter_add(&p->events, 1);
+}
+
+/* Return fraction of events of type pl */
+void fprop_fraction_single(struct fprop_global *p,
+ struct fprop_local_single *pl,
+ unsigned long *numerator, unsigned long *denominator)
+{
+ unsigned int seq;
+ s64 num, den;
+
+ do {
+ seq = read_seqcount_begin(&p->sequence);
+ fprop_reflect_period_single(p, pl);
+ num = pl->events;
+ den = percpu_counter_read_positive(&p->events);
+ } while (read_seqcount_retry(&p->sequence, seq));
+
+ /*
+ * Make fraction <= 1 and denominator > 0 even in presence of percpu
+ * counter errors
+ */
+ if (den <= num) {
+ if (num)
+ den = num;
+ else
+ den = 1;
+ }
+ *denominator = den;
+ *numerator = num;
+}
+
+/*
+ * ---- PERCPU ----
+ */
+#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
+
+int fprop_local_init_percpu(struct fprop_local_percpu *pl)
+{
+ int err;
+
+ err = percpu_counter_init(&pl->events, 0);
+ if (err)
+ return err;
+ pl->period = 0;
+ raw_spin_lock_init(&pl->lock);
+ return 0;
+}
+
+void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
+{
+ percpu_counter_destroy(&pl->events);
+}
+
+static void fprop_reflect_period_percpu(struct fprop_global *p,
+ struct fprop_local_percpu *pl)
+{
+ unsigned int period = p->period;
+ unsigned long flags;
+
+ /* Fast path - period didn't change */
+ if (pl->period == period)
+ return;
+ raw_spin_lock_irqsave(&pl->lock, flags);
+ /* Someone updated pl->period while we were spinning? */
+ if (pl->period >= period) {
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
+ return;
+ }
+ /* Aging zeroed our fraction? */
+ if (period - pl->period < BITS_PER_LONG) {
+ s64 val = percpu_counter_read(&pl->events);
+
+ if (val < (nr_cpu_ids * PROP_BATCH))
+ val = percpu_counter_sum(&pl->events);
+
+ __percpu_counter_add(&pl->events,
+ -val + (val >> (period-pl->period)), PROP_BATCH);
+ } else
+ percpu_counter_set(&pl->events, 0);
+ pl->period = period;
+ raw_spin_unlock_irqrestore(&pl->lock, flags);
+}
+
+/* Event of type pl happened */
+void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
+{
+ fprop_reflect_period_percpu(p, pl);
+ __percpu_counter_add(&pl->events, 1, PROP_BATCH);
+ percpu_counter_add(&p->events, 1);
+}
+
+void fprop_fraction_percpu(struct fprop_global *p,
+ struct fprop_local_percpu *pl,
+ unsigned long *numerator, unsigned long *denominator)
+{
+ unsigned int seq;
+ s64 num, den;
+
+ do {
+ seq = read_seqcount_begin(&p->sequence);
+ fprop_reflect_period_percpu(p, pl);
+ num = percpu_counter_read_positive(&pl->events);
+ den = percpu_counter_read_positive(&p->events);
+ } while (read_seqcount_retry(&p->sequence, seq));
+
+ /*
+ * Make fraction <= 1 and denominator > 0 even in presence of percpu
+ * counter errors
+ */
+ if (den <= num) {
+ if (num)
+ den = num;
+ else
+ den = 1;
+ }
+ *denominator = den;
+ *numerator = num;
+}
+
+/*
+ * Like __fprop_inc_percpu() except that event is counted only if the given
+ * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
+ */
+void __fprop_inc_percpu_max(struct fprop_global *p,
+ struct fprop_local_percpu *pl, int max_frac)
+{
+ if (unlikely(max_frac < FPROP_FRAC_BASE)) {
+ unsigned long numerator, denominator;
+
+ fprop_fraction_percpu(p, pl, &numerator, &denominator);
+ if (numerator >
+ (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
+ return;
+ } else
+ fprop_reflect_period_percpu(p, pl);
+ __percpu_counter_add(&pl->events, 1, PROP_BATCH);
+ percpu_counter_add(&p->events, 1);
+}
diff --git a/lib/memory-notifier-error-inject.c b/lib/memory-notifier-error-inject.c
new file mode 100644
index 000000000000..e6239bf0b0df
--- /dev/null
+++ b/lib/memory-notifier-error-inject.c
@@ -0,0 +1,48 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/memory.h>
+
+#include "notifier-error-inject.h"
+
+static int priority;
+module_param(priority, int, 0);
+MODULE_PARM_DESC(priority, "specify memory notifier priority");
+
+static struct notifier_err_inject memory_notifier_err_inject = {
+ .actions = {
+ { NOTIFIER_ERR_INJECT_ACTION(MEM_GOING_ONLINE) },
+ { NOTIFIER_ERR_INJECT_ACTION(MEM_GOING_OFFLINE) },
+ {}
+ }
+};
+
+static struct dentry *dir;
+
+static int err_inject_init(void)
+{
+ int err;
+
+ dir = notifier_err_inject_init("memory", notifier_err_inject_dir,
+ &memory_notifier_err_inject, priority);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ err = register_memory_notifier(&memory_notifier_err_inject.nb);
+ if (err)
+ debugfs_remove_recursive(dir);
+
+ return err;
+}
+
+static void err_inject_exit(void)
+{
+ unregister_memory_notifier(&memory_notifier_err_inject.nb);
+ debugfs_remove_recursive(dir);
+}
+
+module_init(err_inject_init);
+module_exit(err_inject_exit);
+
+MODULE_DESCRIPTION("memory notifier error injection module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/memweight.c b/lib/memweight.c
new file mode 100644
index 000000000000..e35fc8771893
--- /dev/null
+++ b/lib/memweight.c
@@ -0,0 +1,38 @@
+#include <linux/export.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+
+/**
+ * memweight - count the total number of bits set in memory area
+ * @ptr: pointer to the start of the area
+ * @bytes: the size of the area
+ */
+size_t memweight(const void *ptr, size_t bytes)
+{
+ size_t ret = 0;
+ size_t longs;
+ const unsigned char *bitmap = ptr;
+
+ for (; bytes > 0 && ((unsigned long)bitmap) % sizeof(long);
+ bytes--, bitmap++)
+ ret += hweight8(*bitmap);
+
+ longs = bytes / sizeof(long);
+ if (longs) {
+ BUG_ON(longs >= INT_MAX / BITS_PER_LONG);
+ ret += bitmap_weight((unsigned long *)bitmap,
+ longs * BITS_PER_LONG);
+ bytes -= longs * sizeof(long);
+ bitmap += longs * sizeof(long);
+ }
+ /*
+ * The reason that this last loop is distinct from the preceding
+ * bitmap_weight() call is to compute 1-bits in the last region smaller
+ * than sizeof(long) properly on big-endian systems.
+ */
+ for (; bytes > 0; bytes--, bitmap++)
+ ret += hweight8(*bitmap);
+
+ return ret;
+}
+EXPORT_SYMBOL(memweight);
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
new file mode 100644
index 000000000000..44b92cb6224f
--- /dev/null
+++ b/lib/notifier-error-inject.c
@@ -0,0 +1,112 @@
+#include <linux/module.h>
+
+#include "notifier-error-inject.h"
+
+static int debugfs_errno_set(void *data, u64 val)
+{
+ *(int *)data = clamp_t(int, val, -MAX_ERRNO, 0);
+ return 0;
+}
+
+static int debugfs_errno_get(void *data, u64 *val)
+{
+ *val = *(int *)data;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,
+ "%lld\n");
+
+static struct dentry *debugfs_create_errno(const char *name, mode_t mode,
+ struct dentry *parent, int *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_errno);
+}
+
+static int notifier_err_inject_callback(struct notifier_block *nb,
+ unsigned long val, void *p)
+{
+ int err = 0;
+ struct notifier_err_inject *err_inject =
+ container_of(nb, struct notifier_err_inject, nb);
+ struct notifier_err_inject_action *action;
+
+ for (action = err_inject->actions; action->name; action++) {
+ if (action->val == val) {
+ err = action->error;
+ break;
+ }
+ }
+ if (err)
+ pr_info("Injecting error (%d) to %s\n", err, action->name);
+
+ return notifier_from_errno(err);
+}
+
+struct dentry *notifier_err_inject_dir;
+EXPORT_SYMBOL_GPL(notifier_err_inject_dir);
+
+struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,
+ struct notifier_err_inject *err_inject, int priority)
+{
+ struct notifier_err_inject_action *action;
+ mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+ struct dentry *dir;
+ struct dentry *actions_dir;
+
+ err_inject->nb.notifier_call = notifier_err_inject_callback;
+ err_inject->nb.priority = priority;
+
+ dir = debugfs_create_dir(name, parent);
+ if (!dir)
+ return ERR_PTR(-ENOMEM);
+
+ actions_dir = debugfs_create_dir("actions", dir);
+ if (!actions_dir)
+ goto fail;
+
+ for (action = err_inject->actions; action->name; action++) {
+ struct dentry *action_dir;
+
+ action_dir = debugfs_create_dir(action->name, actions_dir);
+ if (!action_dir)
+ goto fail;
+
+ /*
+ * Create debugfs r/w file containing action->error. If
+ * notifier call chain is called with action->val, it will
+ * fail with the error code
+ */
+ if (!debugfs_create_errno("error", mode, action_dir,
+ &action->error))
+ goto fail;
+ }
+ return dir;
+fail:
+ debugfs_remove_recursive(dir);
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_GPL(notifier_err_inject_init);
+
+static int __init err_inject_init(void)
+{
+ notifier_err_inject_dir =
+ debugfs_create_dir("notifier-error-inject", NULL);
+
+ if (!notifier_err_inject_dir)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit err_inject_exit(void)
+{
+ debugfs_remove_recursive(notifier_err_inject_dir);
+}
+
+module_init(err_inject_init);
+module_exit(err_inject_exit);
+
+MODULE_DESCRIPTION("Notifier error injection module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/notifier-error-inject.h b/lib/notifier-error-inject.h
new file mode 100644
index 000000000000..99b3b6fc470b
--- /dev/null
+++ b/lib/notifier-error-inject.h
@@ -0,0 +1,24 @@
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/notifier.h>
+
+struct notifier_err_inject_action {
+ unsigned long val;
+ int error;
+ const char *name;
+};
+
+#define NOTIFIER_ERR_INJECT_ACTION(action) \
+ .name = #action, .val = (action),
+
+struct notifier_err_inject {
+ struct notifier_block nb;
+ struct notifier_err_inject_action actions[];
+ /* The last slot must be terminated with zero sentinel */
+};
+
+extern struct dentry *notifier_err_inject_dir;
+
+extern struct dentry *notifier_err_inject_init(const char *name,
+ struct dentry *parent, struct notifier_err_inject *err_inject,
+ int priority);
diff --git a/lib/pSeries-reconfig-notifier-error-inject.c b/lib/pSeries-reconfig-notifier-error-inject.c
new file mode 100644
index 000000000000..7f7c98dcd5c4
--- /dev/null
+++ b/lib/pSeries-reconfig-notifier-error-inject.c
@@ -0,0 +1,51 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/pSeries_reconfig.h>
+
+#include "notifier-error-inject.h"
+
+static int priority;
+module_param(priority, int, 0);
+MODULE_PARM_DESC(priority, "specify pSeries reconfig notifier priority");
+
+static struct notifier_err_inject reconfig_err_inject = {
+ .actions = {
+ { NOTIFIER_ERR_INJECT_ACTION(PSERIES_RECONFIG_ADD) },
+ { NOTIFIER_ERR_INJECT_ACTION(PSERIES_RECONFIG_REMOVE) },
+ { NOTIFIER_ERR_INJECT_ACTION(PSERIES_DRCONF_MEM_ADD) },
+ { NOTIFIER_ERR_INJECT_ACTION(PSERIES_DRCONF_MEM_REMOVE) },
+ {}
+ }
+};
+
+static struct dentry *dir;
+
+static int err_inject_init(void)
+{
+ int err;
+
+ dir = notifier_err_inject_init("pSeries-reconfig",
+ notifier_err_inject_dir, &reconfig_err_inject, priority);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ err = pSeries_reconfig_notifier_register(&reconfig_err_inject.nb);
+ if (err)
+ debugfs_remove_recursive(dir);
+
+ return err;
+}
+
+static void err_inject_exit(void)
+{
+ pSeries_reconfig_notifier_unregister(&reconfig_err_inject.nb);
+ debugfs_remove_recursive(dir);
+}
+
+module_init(err_inject_init);
+module_exit(err_inject_exit);
+
+MODULE_DESCRIPTION("pSeries reconfig notifier error injection module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index f8a3f1a829b8..ba6085d9c741 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -12,7 +12,7 @@
#ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD(percpu_counters);
-static DEFINE_MUTEX(percpu_counters_lock);
+static DEFINE_SPINLOCK(percpu_counters_lock);
#endif
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
@@ -123,9 +123,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc->list);
- mutex_lock(&percpu_counters_lock);
+ spin_lock(&percpu_counters_lock);
list_add(&fbc->list, &percpu_counters);
- mutex_unlock(&percpu_counters_lock);
+ spin_unlock(&percpu_counters_lock);
#endif
return 0;
}
@@ -139,9 +139,9 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
debug_percpu_counter_deactivate(fbc);
#ifdef CONFIG_HOTPLUG_CPU
- mutex_lock(&percpu_counters_lock);
+ spin_lock(&percpu_counters_lock);
list_del(&fbc->list);
- mutex_unlock(&percpu_counters_lock);
+ spin_unlock(&percpu_counters_lock);
#endif
free_percpu(fbc->counters);
fbc->counters = NULL;
@@ -170,7 +170,7 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
- mutex_lock(&percpu_counters_lock);
+ spin_lock(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
unsigned long flags;
@@ -181,7 +181,7 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
*pcount = 0;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
- mutex_unlock(&percpu_counters_lock);
+ spin_unlock(&percpu_counters_lock);
#endif
return NOTIFY_OK;
}
diff --git a/lib/pm-notifier-error-inject.c b/lib/pm-notifier-error-inject.c
new file mode 100644
index 000000000000..c094b2dedc23
--- /dev/null
+++ b/lib/pm-notifier-error-inject.c
@@ -0,0 +1,49 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/suspend.h>
+
+#include "notifier-error-inject.h"
+
+static int priority;
+module_param(priority, int, 0);
+MODULE_PARM_DESC(priority, "specify PM notifier priority");
+
+static struct notifier_err_inject pm_notifier_err_inject = {
+ .actions = {
+ { NOTIFIER_ERR_INJECT_ACTION(PM_HIBERNATION_PREPARE) },
+ { NOTIFIER_ERR_INJECT_ACTION(PM_SUSPEND_PREPARE) },
+ { NOTIFIER_ERR_INJECT_ACTION(PM_RESTORE_PREPARE) },
+ {}
+ }
+};
+
+static struct dentry *dir;
+
+static int err_inject_init(void)
+{
+ int err;
+
+ dir = notifier_err_inject_init("pm", notifier_err_inject_dir,
+ &pm_notifier_err_inject, priority);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ err = register_pm_notifier(&pm_notifier_err_inject.nb);
+ if (err)
+ debugfs_remove_recursive(dir);
+
+ return err;
+}
+
+static void err_inject_exit(void)
+{
+ unregister_pm_notifier(&pm_notifier_err_inject.nb);
+ debugfs_remove_recursive(dir);
+}
+
+module_init(err_inject_init);
+module_exit(err_inject_exit);
+
+MODULE_DESCRIPTION("PM notifier error injection module");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 6096e89bee55..fadae774a20c 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -279,14 +279,6 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
if (!left)
sg_mark_end(&sg[sg_size - 1]);
- /*
- * only really needed for mempool backed sg allocations (like
- * SCSI), a possible improvement here would be to pass the
- * table pointer into the allocator and let that clear these
- * flags
- */
- gfp_mask &= ~__GFP_WAIT;
- gfp_mask |= __GFP_HIGH;
prv = sg;
} while (left);
@@ -319,6 +311,70 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
EXPORT_SYMBOL(sg_alloc_table);
/**
+ * sg_alloc_table_from_pages - Allocate and initialize an sg table from
+ * an array of pages
+ * @sgt: The sg table header to use
+ * @pages: Pointer to an array of page pointers
+ * @n_pages: Number of pages in the pages array
+ * @offset: Offset from start of the first page to the start of a buffer
+ * @size: Number of valid bytes in the buffer (after offset)
+ * @gfp_mask: GFP allocation mask
+ *
+ * Description:
+ * Allocate and initialize an sg table from a list of pages. Contiguous
+ * ranges of the pages are squashed into a single scatterlist node. A user
+ * may provide an offset at a start and a size of valid data in a buffer
+ * specified by the page array. The returned sg table is released by
+ * sg_free_table.
+ *
+ * Returns:
+ * 0 on success, negative error on failure
+ */
+int sg_alloc_table_from_pages(struct sg_table *sgt,
+ struct page **pages, unsigned int n_pages,
+ unsigned long offset, unsigned long size,
+ gfp_t gfp_mask)
+{
+ unsigned int chunks;
+ unsigned int i;
+ unsigned int cur_page;
+ int ret;
+ struct scatterlist *s;
+
+ /* compute number of contiguous chunks */
+ chunks = 1;
+ for (i = 1; i < n_pages; ++i)
+ if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
+ ++chunks;
+
+ ret = sg_alloc_table(sgt, chunks, gfp_mask);
+ if (unlikely(ret))
+ return ret;
+
+ /* merging chunks and putting them into the scatterlist */
+ cur_page = 0;
+ for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
+ unsigned long chunk_size;
+ unsigned int j;
+
+ /* look for the end of the current chunk */
+ for (j = cur_page + 1; j < n_pages; ++j)
+ if (page_to_pfn(pages[j]) !=
+ page_to_pfn(pages[j - 1]) + 1)
+ break;
+
+ chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
+ sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
+ size -= chunk_size;
+ offset = 0;
+ cur_page = j;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sg_alloc_table_from_pages);
+
+/**
* sg_miter_start - start mapping iteration over a sg list
* @miter: sg mapping iter to be started
* @sgl: sg list to iterate over
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index e91fbc23fff1..eb10578ae055 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -58,7 +58,7 @@ static void spin_dump(raw_spinlock_t *lock, const char *msg)
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
msg, raw_smp_processor_id(),
current->comm, task_pid_nr(current));
- printk(KERN_EMERG " lock: %ps, .magic: %08x, .owner: %s/%d, "
+ printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
".owner_cpu: %d\n",
lock, lock->magic,
owner ? owner->comm : "<none>",
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index c3f36d415bdf..0e337541f005 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -655,6 +655,50 @@ char *resource_string(char *buf, char *end, struct resource *res,
}
static noinline_for_stack
+char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
+ const char *fmt)
+{
+ int i, len = 1; /* if we pass '%ph[CDN]', field witdh remains
+ negative value, fallback to the default */
+ char separator;
+
+ if (spec.field_width == 0)
+ /* nothing to print */
+ return buf;
+
+ if (ZERO_OR_NULL_PTR(addr))
+ /* NULL pointer */
+ return string(buf, end, NULL, spec);
+
+ switch (fmt[1]) {
+ case 'C':
+ separator = ':';
+ break;
+ case 'D':
+ separator = '-';
+ break;
+ case 'N':
+ separator = 0;
+ break;
+ default:
+ separator = ' ';
+ break;
+ }
+
+ if (spec.field_width > 0)
+ len = min_t(int, spec.field_width, 64);
+
+ for (i = 0; i < len && buf < end - 1; i++) {
+ buf = hex_byte_pack(buf, addr[i]);
+
+ if (buf < end && separator && i != len - 1)
+ *buf++ = separator;
+ }
+
+ return buf;
+}
+
+static noinline_for_stack
char *mac_address_string(char *buf, char *end, u8 *addr,
struct printf_spec spec, const char *fmt)
{
@@ -662,15 +706,28 @@ char *mac_address_string(char *buf, char *end, u8 *addr,
char *p = mac_addr;
int i;
char separator;
+ bool reversed = false;
- if (fmt[1] == 'F') { /* FDDI canonical format */
+ switch (fmt[1]) {
+ case 'F':
separator = '-';
- } else {
+ break;
+
+ case 'R':
+ reversed = true;
+ /* fall through */
+
+ default:
separator = ':';
+ break;
}
for (i = 0; i < 6; i++) {
- p = hex_byte_pack(p, addr[i]);
+ if (reversed)
+ p = hex_byte_pack(p, addr[5 - i]);
+ else
+ p = hex_byte_pack(p, addr[i]);
+
if (fmt[0] == 'M' && i != 5)
*p++ = separator;
}
@@ -933,6 +990,7 @@ int kptr_restrict __read_mostly;
* - 'm' For a 6-byte MAC address, it prints the hex address without colons
* - 'MF' For a 6-byte MAC FDDI address, it prints the address
* with a dash-separated hex notation
+ * - '[mM]R For a 6-byte MAC address, Reverse order (Bluetooth)
* - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
* IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
* IPv6 uses colon separated network-order 16 bit hex with leading 0's
@@ -960,6 +1018,13 @@ int kptr_restrict __read_mostly;
* correctness of the format string and va_list arguments.
* - 'K' For a kernel pointer that should be hidden from unprivileged users
* - 'NF' For a netdev_features_t
+ * - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with
+ * a certain separator (' ' by default):
+ * C colon
+ * D dash
+ * N no separator
+ * The maximum supported length is 64 bytes of the input. Consider
+ * to use print_hex_dump() for the larger input.
*
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
@@ -993,9 +1058,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'R':
case 'r':
return resource_string(buf, end, ptr, spec, fmt);
+ case 'h':
+ return hex_string(buf, end, ptr, spec, fmt);
case 'M': /* Colon separated: 00:01:02:03:04:05 */
case 'm': /* Contiguous: 000102030405 */
- /* [mM]F (FDDI, bit reversed) */
+ /* [mM]F (FDDI) */
+ /* [mM]R (Reverse order; Bluetooth) */
return mac_address_string(buf, end, ptr, spec, fmt);
case 'I': /* Formatted IP supported
* 4: 1.2.3.4
@@ -1030,7 +1098,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
* %pK cannot be used in IRQ context because its test
* for CAP_SYSLOG would be meaningless.
*/
- if (in_irq() || in_serving_softirq() || in_nmi()) {
+ if (kptr_restrict && (in_irq() || in_serving_softirq() ||
+ in_nmi())) {
if (spec.field_width == -1)
spec.field_width = default_width;
return string(buf, end, "pK-error", spec);
@@ -1280,8 +1349,12 @@ qualifier:
* %pI6c print an IPv6 address as specified by RFC 5952
* %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper
* case.
+ * %*ph[CDN] a variable-length hex string with a separator (supports up to 64
+ * bytes of the input)
* %n is ignored
*
+ * ** Please update Documentation/printk-formats.txt when making changes **
+ *
* The return value is the number of characters which would
* be generated for the given input, excluding the trailing
* '\0', as per ISO C99. If you want to have the exact
diff --git a/mm/Kconfig b/mm/Kconfig
index 82fed4eb2b6f..d5c8019c6627 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -140,9 +140,13 @@ config ARCH_DISCARD_MEMBLOCK
config NO_BOOTMEM
boolean
+config MEMORY_ISOLATION
+ boolean
+
# eventually, we can have this option just 'select SPARSEMEM'
config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
+ select MEMORY_ISOLATION
depends on SPARSEMEM || X86_64_ACPI_NUMA
depends on HOTPLUG && ARCH_ENABLE_MEMORY_HOTPLUG
depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390)
@@ -272,6 +276,7 @@ config MEMORY_FAILURE
depends on MMU
depends on ARCH_SUPPORTS_MEMORY_FAILURE
bool "Enable recovery from hardware memory errors"
+ select MEMORY_ISOLATION
help
Enables code to recover from some memory failures on systems
with MCA recovery. This allows a system to continue running
diff --git a/mm/Makefile b/mm/Makefile
index 2e2fbbefb99f..92753e2d82da 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -15,8 +15,9 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
- page_isolation.o mm_init.o mmu_context.o percpu.o \
+ mm_init.o mmu_context.o percpu.o slab_common.o \
compaction.o $(mmu-y)
+
obj-y += init-mm.o
ifdef CONFIG_NO_BOOTMEM
@@ -48,9 +49,11 @@ obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o
-obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
+obj-$(CONFIG_MEMCG) += memcontrol.o page_cgroup.o
+obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o
obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
obj-$(CONFIG_CLEANCACHE) += cleancache.o
+obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index dd8e2aafb07e..b41823cc05e6 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -39,12 +39,6 @@ DEFINE_SPINLOCK(bdi_lock);
LIST_HEAD(bdi_list);
LIST_HEAD(bdi_pending_list);
-static struct task_struct *sync_supers_tsk;
-static struct timer_list sync_supers_timer;
-
-static int bdi_sync_supers(void *);
-static void sync_supers_timer_fn(unsigned long);
-
void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
{
if (wb1 < wb2) {
@@ -250,12 +244,6 @@ static int __init default_bdi_init(void)
{
int err;
- sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
- BUG_ON(IS_ERR(sync_supers_tsk));
-
- setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
- bdi_arm_supers_timer();
-
err = bdi_init(&default_backing_dev_info);
if (!err)
bdi_register(&default_backing_dev_info, NULL, "default");
@@ -270,46 +258,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
return wb_has_dirty_io(&bdi->wb);
}
-/*
- * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
- * or we risk deadlocking on ->s_umount. The longer term solution would be
- * to implement sync_supers_bdi() or similar and simply do it from the
- * bdi writeback thread individually.
- */
-static int bdi_sync_supers(void *unused)
-{
- set_user_nice(current, 0);
-
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
-
- /*
- * Do this periodically, like kupdated() did before.
- */
- sync_supers();
- }
-
- return 0;
-}
-
-void bdi_arm_supers_timer(void)
-{
- unsigned long next;
-
- if (!dirty_writeback_interval)
- return;
-
- next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
- mod_timer(&sync_supers_timer, round_jiffies_up(next));
-}
-
-static void sync_supers_timer_fn(unsigned long unused)
-{
- wake_up_process(sync_supers_tsk);
- bdi_arm_supers_timer();
-}
-
static void wakeup_timer_fn(unsigned long data)
{
struct backing_dev_info *bdi = (struct backing_dev_info *)data;
@@ -677,7 +625,7 @@ int bdi_init(struct backing_dev_info *bdi)
bdi->min_ratio = 0;
bdi->max_ratio = 100;
- bdi->max_prop_frac = PROP_FRAC_BASE;
+ bdi->max_prop_frac = FPROP_FRAC_BASE;
spin_lock_init(&bdi->wb_lock);
INIT_LIST_HEAD(&bdi->bdi_list);
INIT_LIST_HEAD(&bdi->work_list);
@@ -700,7 +648,7 @@ int bdi_init(struct backing_dev_info *bdi)
bdi->write_bandwidth = INIT_BW;
bdi->avg_write_bandwidth = INIT_BW;
- err = prop_local_init_percpu(&bdi->completions);
+ err = fprop_local_init_percpu(&bdi->completions);
if (err) {
err:
@@ -744,7 +692,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
percpu_counter_destroy(&bdi->bdi_stat[i]);
- prop_local_destroy_percpu(&bdi->completions);
+ fprop_local_destroy_percpu(&bdi->completions);
}
EXPORT_SYMBOL(bdi_destroy);
@@ -886,3 +834,23 @@ out:
return ret;
}
EXPORT_SYMBOL(wait_iff_congested);
+
+int pdflush_proc_obsolete(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ char kbuf[] = "0\n";
+
+ if (*ppos) {
+ *lenp = 0;
+ return 0;
+ }
+
+ if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
+ return -EFAULT;
+ printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal\n",
+ table->procname);
+
+ *lenp = 2;
+ *ppos += *lenp;
+ return 2;
+}
diff --git a/mm/compaction.c b/mm/compaction.c
index 2f42d9528539..7fcd3a52e68d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -51,6 +51,47 @@ static inline bool migrate_async_suitable(int migratetype)
}
/*
+ * Compaction requires the taking of some coarse locks that are potentially
+ * very heavily contended. Check if the process needs to be scheduled or
+ * if the lock is contended. For async compaction, back out in the event
+ * if contention is severe. For sync compaction, schedule.
+ *
+ * Returns true if the lock is held.
+ * Returns false if the lock is released and compaction should abort
+ */
+static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
+ bool locked, struct compact_control *cc)
+{
+ if (need_resched() || spin_is_contended(lock)) {
+ if (locked) {
+ spin_unlock_irqrestore(lock, *flags);
+ locked = false;
+ }
+
+ /* async aborts if taking too long or contended */
+ if (!cc->sync) {
+ if (cc->contended)
+ *cc->contended = true;
+ return false;
+ }
+
+ cond_resched();
+ if (fatal_signal_pending(current))
+ return false;
+ }
+
+ if (!locked)
+ spin_lock_irqsave(lock, *flags);
+ return true;
+}
+
+static inline bool compact_trylock_irqsave(spinlock_t *lock,
+ unsigned long *flags, struct compact_control *cc)
+{
+ return compact_checklock_irqsave(lock, flags, false, cc);
+}
+
+/*
* Isolate free pages onto a private freelist. Caller must hold zone->lock.
* If @strict is true, will abort returning 0 on any invalid PFNs or non-free
* pages inside of the pageblock (even though it may still end up isolating
@@ -173,7 +214,7 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
}
/* Update the number of anon and file isolated pages in the zone */
-static void acct_isolated(struct zone *zone, struct compact_control *cc)
+static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
{
struct page *page;
unsigned int count[2] = { 0, };
@@ -181,8 +222,14 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
list_for_each_entry(page, &cc->migratepages, lru)
count[!!page_is_file_cache(page)]++;
- __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
+ /* If locked we can use the interrupt unsafe versions */
+ if (locked) {
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
+ __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
+ } else {
+ mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
+ mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
+ }
}
/* Similar to reclaim, but different enough that they don't share logic */
@@ -228,6 +275,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
struct list_head *migratelist = &cc->migratepages;
isolate_mode_t mode = 0;
struct lruvec *lruvec;
+ unsigned long flags;
+ bool locked;
/*
* Ensure that there are not too many pages isolated from the LRU
@@ -247,25 +296,22 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
/* Time to isolate some pages for migration */
cond_resched();
- spin_lock_irq(&zone->lru_lock);
+ spin_lock_irqsave(&zone->lru_lock, flags);
+ locked = true;
for (; low_pfn < end_pfn; low_pfn++) {
struct page *page;
- bool locked = true;
/* give a chance to irqs before checking need_resched() */
if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
- spin_unlock_irq(&zone->lru_lock);
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
locked = false;
}
- if (need_resched() || spin_is_contended(&zone->lru_lock)) {
- if (locked)
- spin_unlock_irq(&zone->lru_lock);
- cond_resched();
- spin_lock_irq(&zone->lru_lock);
- if (fatal_signal_pending(current))
- break;
- } else if (!locked)
- spin_lock_irq(&zone->lru_lock);
+
+ /* Check if it is ok to still hold the lock */
+ locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
+ locked, cc);
+ if (!locked)
+ break;
/*
* migrate_pfn does not necessarily start aligned to a
@@ -349,9 +395,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
}
}
- acct_isolated(zone, cc);
+ acct_isolated(zone, locked, cc);
- spin_unlock_irq(&zone->lru_lock);
+ if (locked)
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
@@ -384,6 +431,20 @@ static bool suitable_migration_target(struct page *page)
}
/*
+ * Returns the start pfn of the last page block in a zone. This is the starting
+ * point for full compaction of a zone. Compaction searches for free pages from
+ * the end of each zone, while isolate_freepages_block scans forward inside each
+ * page block.
+ */
+static unsigned long start_free_pfn(struct zone *zone)
+{
+ unsigned long free_pfn;
+ free_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ free_pfn &= ~(pageblock_nr_pages-1);
+ return free_pfn;
+}
+
+/*
* Based on information in the current compact_control, find blocks
* suitable for isolating free pages from and then isolate them.
*/
@@ -447,7 +508,16 @@ static void isolate_freepages(struct zone *zone,
* are disabled
*/
isolated = 0;
- spin_lock_irqsave(&zone->lock, flags);
+
+ /*
+ * The zone lock must be held to isolate freepages. This
+ * unfortunately this is a very coarse lock and can be
+ * heavily contended if there are parallel allocations
+ * or parallel compactions. For async compaction do not
+ * spin on the lock
+ */
+ if (!compact_trylock_irqsave(&zone->lock, &flags, cc))
+ break;
if (suitable_migration_target(page)) {
end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
isolated = isolate_freepages_block(pfn, end_pfn,
@@ -461,8 +531,19 @@ static void isolate_freepages(struct zone *zone,
* looking for free pages, the search will restart here as
* page migration may have returned some pages to the allocator
*/
- if (isolated)
+ if (isolated) {
high_pfn = max(high_pfn, pfn);
+
+ /*
+ * If the free scanner has wrapped, update
+ * compact_cached_free_pfn to point to the highest
+ * pageblock with free pages. This reduces excessive
+ * scanning of full pageblocks near the end of the
+ * zone
+ */
+ if (cc->order > 0 && cc->wrapped)
+ zone->compact_cached_free_pfn = high_pfn;
+ }
}
/* split_free_page does not map the pages */
@@ -470,6 +551,11 @@ static void isolate_freepages(struct zone *zone,
cc->free_pfn = high_pfn;
cc->nr_freepages = nr_freepages;
+
+ /* If compact_cached_free_pfn is reset then set it now */
+ if (cc->order > 0 && !cc->wrapped &&
+ zone->compact_cached_free_pfn == start_free_pfn(zone))
+ zone->compact_cached_free_pfn = high_pfn;
}
/*
@@ -565,8 +651,26 @@ static int compact_finished(struct zone *zone,
if (fatal_signal_pending(current))
return COMPACT_PARTIAL;
- /* Compaction run completes if the migrate and free scanner meet */
- if (cc->free_pfn <= cc->migrate_pfn)
+ /*
+ * A full (order == -1) compaction run starts at the beginning and
+ * end of a zone; it completes when the migrate and free scanner meet.
+ * A partial (order > 0) compaction can start with the free scanner
+ * at a random point in the zone, and may have to restart.
+ */
+ if (cc->free_pfn <= cc->migrate_pfn) {
+ if (cc->order > 0 && !cc->wrapped) {
+ /* We started partway through; restart at the end. */
+ unsigned long free_pfn = start_free_pfn(zone);
+ zone->compact_cached_free_pfn = free_pfn;
+ cc->free_pfn = free_pfn;
+ cc->wrapped = 1;
+ return COMPACT_CONTINUE;
+ }
+ return COMPACT_COMPLETE;
+ }
+
+ /* We wrapped around and ended up where we started. */
+ if (cc->wrapped && cc->free_pfn <= cc->start_free_pfn)
return COMPACT_COMPLETE;
/*
@@ -664,8 +768,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
/* Setup to move all movable pages to the end of the zone */
cc->migrate_pfn = zone->zone_start_pfn;
- cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
- cc->free_pfn &= ~(pageblock_nr_pages-1);
+
+ if (cc->order > 0) {
+ /* Incremental compaction. Start where the last one stopped. */
+ cc->free_pfn = zone->compact_cached_free_pfn;
+ cc->start_free_pfn = cc->free_pfn;
+ } else {
+ /* Order == -1 starts at the end of the zone. */
+ cc->free_pfn = start_free_pfn(zone);
+ }
migrate_prep_local();
@@ -718,7 +829,7 @@ out:
static unsigned long compact_zone_order(struct zone *zone,
int order, gfp_t gfp_mask,
- bool sync)
+ bool sync, bool *contended)
{
struct compact_control cc = {
.nr_freepages = 0,
@@ -727,6 +838,7 @@ static unsigned long compact_zone_order(struct zone *zone,
.migratetype = allocflags_to_migratetype(gfp_mask),
.zone = zone,
.sync = sync,
+ .contended = contended,
};
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
@@ -748,7 +860,7 @@ int sysctl_extfrag_threshold = 500;
*/
unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
- bool sync)
+ bool sync, bool *contended)
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
int may_enter_fs = gfp_mask & __GFP_FS;
@@ -772,7 +884,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
nodemask) {
int status;
- status = compact_zone_order(zone, order, gfp_mask, sync);
+ status = compact_zone_order(zone, order, gfp_mask, sync,
+ contended);
rc = max(status, rc);
/* If a normal allocation would succeed, stop compacting */
@@ -808,7 +921,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
if (cc->order > 0) {
int ok = zone_watermark_ok(zone, cc->order,
low_wmark_pages(zone), 0, 0);
- if (ok && cc->order > zone->compact_order_failed)
+ if (ok && cc->order >= zone->compact_order_failed)
zone->compact_order_failed = cc->order + 1;
/* Currently async compaction is never deferred. */
else if (!ok && cc->sync)
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 469491e0af79..9b75a045dbf4 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -93,11 +93,6 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
spin_unlock(&file->f_lock);
break;
case POSIX_FADV_WILLNEED:
- if (!mapping->a_ops->readpage) {
- ret = -EINVAL;
- break;
- }
-
/* First and last PARTIAL page! */
start_index = offset >> PAGE_CACHE_SHIFT;
end_index = endbyte >> PAGE_CACHE_SHIFT;
@@ -106,12 +101,13 @@ SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
nrpages = end_index - start_index + 1;
if (!nrpages)
nrpages = ~0UL;
-
- ret = force_page_cache_readahead(mapping, file,
- start_index,
- nrpages);
- if (ret > 0)
- ret = 0;
+
+ /*
+ * Ignore return value because fadvise() shall return
+ * success even if filesystem can't retrieve a hint,
+ */
+ force_page_cache_readahead(mapping, file, start_index,
+ nrpages);
break;
case POSIX_FADV_NOREUSE:
break;
diff --git a/mm/filemap.c b/mm/filemap.c
index a4a5260b0279..384344575c37 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1412,12 +1412,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
retval = filemap_write_and_wait_range(mapping, pos,
pos + iov_length(iov, nr_segs) - 1);
if (!retval) {
- struct blk_plug plug;
-
- blk_start_plug(&plug);
retval = mapping->a_ops->direct_IO(READ, iocb,
iov, pos, nr_segs);
- blk_finish_plug(&plug);
}
if (retval > 0) {
*ppos = pos + retval;
@@ -1712,8 +1708,35 @@ page_not_uptodate:
}
EXPORT_SYMBOL(filemap_fault);
+int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *page = vmf->page;
+ struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ int ret = VM_FAULT_LOCKED;
+
+ sb_start_pagefault(inode->i_sb);
+ file_update_time(vma->vm_file);
+ lock_page(page);
+ if (page->mapping != inode->i_mapping) {
+ unlock_page(page);
+ ret = VM_FAULT_NOPAGE;
+ goto out;
+ }
+ /*
+ * We mark the page dirty already here so that when freeze is in
+ * progress, we are guaranteed that writeback during freezing will
+ * see the dirty page and writeprotect it again.
+ */
+ set_page_dirty(page);
+out:
+ sb_end_pagefault(inode->i_sb);
+ return ret;
+}
+EXPORT_SYMBOL(filemap_page_mkwrite);
+
const struct vm_operations_struct generic_file_vm_ops = {
.fault = filemap_fault,
+ .page_mkwrite = filemap_page_mkwrite,
};
/* This is used for a general mmap of a disk file */
@@ -2407,8 +2430,6 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
count = ocount;
pos = *ppos;
- vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
-
/* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info;
written = 0;
@@ -2502,13 +2523,12 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- struct blk_plug plug;
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
+ sb_start_write(inode->i_sb);
mutex_lock(&inode->i_mutex);
- blk_start_plug(&plug);
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
@@ -2519,7 +2539,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (err < 0 && ret > 0)
ret = err;
}
- blk_finish_plug(&plug);
+ sb_end_write(inode->i_sb);
return ret;
}
EXPORT_SYMBOL(generic_file_aio_write);
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 213ca1f53409..13e013b1270c 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -304,6 +304,7 @@ out:
static const struct vm_operations_struct xip_file_vm_ops = {
.fault = xip_file_fault,
+ .page_mkwrite = filemap_page_mkwrite,
};
int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
@@ -401,6 +402,8 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
loff_t pos;
ssize_t ret;
+ sb_start_write(inode->i_sb);
+
mutex_lock(&inode->i_mutex);
if (!access_ok(VERIFY_READ, buf, len)) {
@@ -411,8 +414,6 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
pos = *ppos;
count = len;
- vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
-
/* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info;
@@ -436,6 +437,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
current->backing_dev_info = NULL;
out_up:
mutex_unlock(&inode->i_mutex);
+ sb_end_write(inode->i_sb);
return ret;
}
EXPORT_SYMBOL_GPL(xip_file_write);
diff --git a/mm/highmem.c b/mm/highmem.c
index 57d82c6250c3..d517cd16a6eb 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -94,6 +94,18 @@ static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
#endif
+struct page *kmap_to_page(void *vaddr)
+{
+ unsigned long addr = (unsigned long)vaddr;
+
+ if (addr >= PKMAP_ADDR(0) && addr <= PKMAP_ADDR(LAST_PKMAP)) {
+ int i = (addr - PKMAP_ADDR(0)) >> PAGE_SHIFT;
+ return pte_page(pkmap_page_table[i]);
+ }
+
+ return virt_to_page(addr);
+}
+
static void flush_all_zero_pkmaps(void)
{
int i;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e198831276a3..bc727122dd44 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -24,17 +24,20 @@
#include <asm/page.h>
#include <asm/pgtable.h>
-#include <linux/io.h>
+#include <asm/tlb.h>
+#include <linux/io.h>
#include <linux/hugetlb.h>
+#include <linux/hugetlb_cgroup.h>
#include <linux/node.h>
+#include <linux/hugetlb_cgroup.h>
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
-static int max_hstate;
+int hugetlb_max_hstate __read_mostly;
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];
@@ -45,13 +48,10 @@ static struct hstate * __initdata parsed_hstate;
static unsigned long __initdata default_hstate_max_huge_pages;
static unsigned long __initdata default_hstate_size;
-#define for_each_hstate(h) \
- for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
-
/*
* Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
*/
-static DEFINE_SPINLOCK(hugetlb_lock);
+DEFINE_SPINLOCK(hugetlb_lock);
static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
{
@@ -509,7 +509,7 @@ void copy_huge_page(struct page *dst, struct page *src)
static void enqueue_huge_page(struct hstate *h, struct page *page)
{
int nid = page_to_nid(page);
- list_add(&page->lru, &h->hugepage_freelists[nid]);
+ list_move(&page->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
}
@@ -521,7 +521,7 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
if (list_empty(&h->hugepage_freelists[nid]))
return NULL;
page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
- list_del(&page->lru);
+ list_move(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
@@ -593,6 +593,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1 << PG_writeback);
}
+ VM_BUG_ON(hugetlb_cgroup_from_page(page));
set_compound_page_dtor(page, NULL);
set_page_refcounted(page);
arch_release_hugepage(page);
@@ -625,10 +626,13 @@ static void free_huge_page(struct page *page)
page->mapping = NULL;
BUG_ON(page_count(page));
BUG_ON(page_mapcount(page));
- INIT_LIST_HEAD(&page->lru);
spin_lock(&hugetlb_lock);
+ hugetlb_cgroup_uncharge_page(hstate_index(h),
+ pages_per_huge_page(h), page);
if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
+ /* remove the page from active list */
+ list_del(&page->lru);
update_and_free_page(h, page);
h->surplus_huge_pages--;
h->surplus_huge_pages_node[nid]--;
@@ -641,8 +645,10 @@ static void free_huge_page(struct page *page)
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
{
+ INIT_LIST_HEAD(&page->lru);
set_compound_page_dtor(page, free_huge_page);
spin_lock(&hugetlb_lock);
+ set_hugetlb_cgroup(page, NULL);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
spin_unlock(&hugetlb_lock);
@@ -889,8 +895,10 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
spin_lock(&hugetlb_lock);
if (page) {
+ INIT_LIST_HEAD(&page->lru);
r_nid = page_to_nid(page);
set_compound_page_dtor(page, free_huge_page);
+ set_hugetlb_cgroup(page, NULL);
/*
* We incremented the global counters already
*/
@@ -993,7 +1001,6 @@ retry:
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
if ((--needed) < 0)
break;
- list_del(&page->lru);
/*
* This page is now managed by the hugetlb allocator and has
* no users -- drop the buddy allocator's reference.
@@ -1008,7 +1015,6 @@ free:
/* Free unnecessary surplus pages to the buddy allocator */
if (!list_empty(&surplus_list)) {
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
- list_del(&page->lru);
put_page(page);
}
}
@@ -1112,7 +1118,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
struct hstate *h = hstate_vma(vma);
struct page *page;
long chg;
+ int ret, idx;
+ struct hugetlb_cgroup *h_cg;
+ idx = hstate_index(h);
/*
* Processes that did not create the mapping will have no
* reserves and will not have accounted against subpool
@@ -1123,27 +1132,43 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
*/
chg = vma_needs_reservation(h, vma, addr);
if (chg < 0)
- return ERR_PTR(-VM_FAULT_OOM);
+ return ERR_PTR(-ENOMEM);
if (chg)
if (hugepage_subpool_get_pages(spool, chg))
- return ERR_PTR(-VM_FAULT_SIGBUS);
+ return ERR_PTR(-ENOSPC);
+ ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
+ if (ret) {
+ hugepage_subpool_put_pages(spool, chg);
+ return ERR_PTR(-ENOSPC);
+ }
spin_lock(&hugetlb_lock);
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
- spin_unlock(&hugetlb_lock);
-
- if (!page) {
+ if (page) {
+ /* update page cgroup details */
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
+ h_cg, page);
+ spin_unlock(&hugetlb_lock);
+ } else {
+ spin_unlock(&hugetlb_lock);
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
+ hugetlb_cgroup_uncharge_cgroup(idx,
+ pages_per_huge_page(h),
+ h_cg);
hugepage_subpool_put_pages(spool, chg);
- return ERR_PTR(-VM_FAULT_SIGBUS);
+ return ERR_PTR(-ENOSPC);
}
+ spin_lock(&hugetlb_lock);
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
+ h_cg, page);
+ list_move(&page->lru, &h->hugepage_activelist);
+ spin_unlock(&hugetlb_lock);
}
set_page_private(page, (unsigned long)spool);
vma_commit_reservation(h, vma, addr);
-
return page;
}
@@ -1646,7 +1671,7 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
struct attribute_group *hstate_attr_group)
{
int retval;
- int hi = h - hstates;
+ int hi = hstate_index(h);
hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
if (!hstate_kobjs[hi])
@@ -1741,11 +1766,13 @@ void hugetlb_unregister_node(struct node *node)
if (!nhs->hugepages_kobj)
return; /* no hstate attributes */
- for_each_hstate(h)
- if (nhs->hstate_kobjs[h - hstates]) {
- kobject_put(nhs->hstate_kobjs[h - hstates]);
- nhs->hstate_kobjs[h - hstates] = NULL;
+ for_each_hstate(h) {
+ int idx = hstate_index(h);
+ if (nhs->hstate_kobjs[idx]) {
+ kobject_put(nhs->hstate_kobjs[idx]);
+ nhs->hstate_kobjs[idx] = NULL;
}
+ }
kobject_put(nhs->hugepages_kobj);
nhs->hugepages_kobj = NULL;
@@ -1848,7 +1875,7 @@ static void __exit hugetlb_exit(void)
hugetlb_unregister_all_nodes();
for_each_hstate(h) {
- kobject_put(hstate_kobjs[h - hstates]);
+ kobject_put(hstate_kobjs[hstate_index(h)]);
}
kobject_put(hugepages_kobj);
@@ -1869,7 +1896,7 @@ static int __init hugetlb_init(void)
if (!size_to_hstate(default_hstate_size))
hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
}
- default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
+ default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
if (default_hstate_max_huge_pages)
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
@@ -1897,19 +1924,27 @@ void __init hugetlb_add_hstate(unsigned order)
printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
return;
}
- BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
+ BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
BUG_ON(order == 0);
- h = &hstates[max_hstate++];
+ h = &hstates[hugetlb_max_hstate++];
h->order = order;
h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
h->nr_huge_pages = 0;
h->free_huge_pages = 0;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
+ INIT_LIST_HEAD(&h->hugepage_activelist);
h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
+ /*
+ * Add cgroup control files only if the huge page consists
+ * of more than two normal pages. This is because we use
+ * page[2].lru.next for storing cgoup details.
+ */
+ if (order >= HUGETLB_CGROUP_MIN_ORDER)
+ hugetlb_cgroup_file_init(hugetlb_max_hstate - 1);
parsed_hstate = h;
}
@@ -1920,10 +1955,10 @@ static int __init hugetlb_nrpages_setup(char *s)
static unsigned long *last_mhp;
/*
- * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
+ * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
* so this hugepages= parameter goes to the "default hstate".
*/
- if (!max_hstate)
+ if (!hugetlb_max_hstate)
mhp = &default_hstate_max_huge_pages;
else
mhp = &parsed_hstate->max_huge_pages;
@@ -1942,7 +1977,7 @@ static int __init hugetlb_nrpages_setup(char *s)
* But we need to allocate >= MAX_ORDER hstates here early to still
* use the bootmem allocator.
*/
- if (max_hstate && parsed_hstate->order >= MAX_ORDER)
+ if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
hugetlb_hstate_alloc_pages(parsed_hstate);
last_mhp = mhp;
@@ -2308,30 +2343,26 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
return 0;
}
-void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page)
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct page *ref_page)
{
+ int force_flush = 0;
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pte_t *ptep;
pte_t pte;
struct page *page;
- struct page *tmp;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
- /*
- * A page gathering list, protected by per file i_mmap_mutex. The
- * lock is used to avoid list corruption from multiple unmapping
- * of the same page since we are using page->lru.
- */
- LIST_HEAD(page_list);
-
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
BUG_ON(end & ~huge_page_mask(h));
+ tlb_start_vma(tlb, vma);
mmu_notifier_invalidate_range_start(mm, start, end);
+again:
spin_lock(&mm->page_table_lock);
for (address = start; address < end; address += sz) {
ptep = huge_pte_offset(mm, address);
@@ -2370,30 +2401,64 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
}
pte = huge_ptep_get_and_clear(mm, address, ptep);
+ tlb_remove_tlb_entry(tlb, ptep, address);
if (pte_dirty(pte))
set_page_dirty(page);
- list_add(&page->lru, &page_list);
+ page_remove_rmap(page);
+ force_flush = !__tlb_remove_page(tlb, page);
+ if (force_flush)
+ break;
/* Bail out after unmapping reference page if supplied */
if (ref_page)
break;
}
- flush_tlb_range(vma, start, end);
spin_unlock(&mm->page_table_lock);
- mmu_notifier_invalidate_range_end(mm, start, end);
- list_for_each_entry_safe(page, tmp, &page_list, lru) {
- page_remove_rmap(page);
- list_del(&page->lru);
- put_page(page);
+ /*
+ * mmu_gather ran out of room to batch pages, we break out of
+ * the PTE lock to avoid doing the potential expensive TLB invalidate
+ * and page-free while holding it.
+ */
+ if (force_flush) {
+ force_flush = 0;
+ tlb_flush_mmu(tlb);
+ if (address < end && !ref_page)
+ goto again;
}
+ mmu_notifier_invalidate_range_end(mm, start, end);
+ tlb_end_vma(tlb, vma);
+}
+
+void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct page *ref_page)
+{
+ __unmap_hugepage_range(tlb, vma, start, end, ref_page);
+
+ /*
+ * Clear this flag so that x86's huge_pmd_share page_table_shareable
+ * test will fail on a vma being torn down, and not grab a page table
+ * on its way out. We're lucky that the flag has such an appropriate
+ * name, and can in fact be safely cleared here. We could clear it
+ * before the __unmap_hugepage_range above, but all that's necessary
+ * is to clear it before releasing the i_mmap_mutex. This works
+ * because in the context this is called, the VMA is about to be
+ * destroyed and the i_mmap_mutex is held.
+ */
+ vma->vm_flags &= ~VM_MAYSHARE;
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
- mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
- __unmap_hugepage_range(vma, start, end, ref_page);
- mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ struct mm_struct *mm;
+ struct mmu_gather tlb;
+
+ mm = vma->vm_mm;
+
+ tlb_gather_mmu(&tlb, mm, 0);
+ __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
+ tlb_finish_mmu(&tlb, start, end);
}
/*
@@ -2438,9 +2503,8 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
* from the time of fork. This would look like data corruption
*/
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
- __unmap_hugepage_range(iter_vma,
- address, address + huge_page_size(h),
- page);
+ unmap_hugepage_range(iter_vma, address,
+ address + huge_page_size(h), page);
}
mutex_unlock(&mapping->i_mmap_mutex);
@@ -2496,6 +2560,7 @@ retry_avoidcopy:
new_page = alloc_huge_page(vma, address, outside_reserve);
if (IS_ERR(new_page)) {
+ long err = PTR_ERR(new_page);
page_cache_release(old_page);
/*
@@ -2524,7 +2589,10 @@ retry_avoidcopy:
/* Caller expects lock to be held */
spin_lock(&mm->page_table_lock);
- return -PTR_ERR(new_page);
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ else
+ return VM_FAULT_SIGBUS;
}
/*
@@ -2642,7 +2710,11 @@ retry:
goto out;
page = alloc_huge_page(vma, address, 0);
if (IS_ERR(page)) {
- ret = -PTR_ERR(page);
+ ret = PTR_ERR(page);
+ if (ret == -ENOMEM)
+ ret = VM_FAULT_OOM;
+ else
+ ret = VM_FAULT_SIGBUS;
goto out;
}
clear_huge_page(page, address, pages_per_huge_page(h));
@@ -2679,7 +2751,7 @@ retry:
*/
if (unlikely(PageHWPoison(page))) {
ret = VM_FAULT_HWPOISON |
- VM_FAULT_SET_HINDEX(h - hstates);
+ VM_FAULT_SET_HINDEX(hstate_index(h));
goto backout_unlocked;
}
}
@@ -2752,7 +2824,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
- VM_FAULT_SET_HINDEX(h - hstates);
+ VM_FAULT_SET_HINDEX(hstate_index(h));
}
ptep = huge_pte_alloc(mm, address, huge_page_size(h));
@@ -2959,9 +3031,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
}
}
spin_unlock(&mm->page_table_lock);
- mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
-
+ /*
+ * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
+ * may have cleared our pud entry and done put_page on the page table:
+ * once we release i_mmap_mutex, another task can do the final put_page
+ * and that page table be reused and filled with junk.
+ */
flush_tlb_range(vma, start, end);
+ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
}
int hugetlb_reserve_pages(struct inode *inode,
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
new file mode 100644
index 000000000000..a3f358fb8a0c
--- /dev/null
+++ b/mm/hugetlb_cgroup.c
@@ -0,0 +1,418 @@
+/*
+ *
+ * Copyright IBM Corporation, 2012
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#include <linux/cgroup.h>
+#include <linux/slab.h>
+#include <linux/hugetlb.h>
+#include <linux/hugetlb_cgroup.h>
+
+struct hugetlb_cgroup {
+ struct cgroup_subsys_state css;
+ /*
+ * the counter to account for hugepages from hugetlb.
+ */
+ struct res_counter hugepage[HUGE_MAX_HSTATE];
+};
+
+#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
+#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
+#define MEMFILE_ATTR(val) ((val) & 0xffff)
+
+struct cgroup_subsys hugetlb_subsys __read_mostly;
+static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
+
+static inline
+struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
+{
+ return container_of(s, struct hugetlb_cgroup, css);
+}
+
+static inline
+struct hugetlb_cgroup *hugetlb_cgroup_from_cgroup(struct cgroup *cgroup)
+{
+ return hugetlb_cgroup_from_css(cgroup_subsys_state(cgroup,
+ hugetlb_subsys_id));
+}
+
+static inline
+struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
+{
+ return hugetlb_cgroup_from_css(task_subsys_state(task,
+ hugetlb_subsys_id));
+}
+
+static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
+{
+ return (h_cg == root_h_cgroup);
+}
+
+static inline struct hugetlb_cgroup *parent_hugetlb_cgroup(struct cgroup *cg)
+{
+ if (!cg->parent)
+ return NULL;
+ return hugetlb_cgroup_from_cgroup(cg->parent);
+}
+
+static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg)
+{
+ int idx;
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cg);
+
+ for (idx = 0; idx < hugetlb_max_hstate; idx++) {
+ if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
+ return true;
+ }
+ return false;
+}
+
+static struct cgroup_subsys_state *hugetlb_cgroup_create(struct cgroup *cgroup)
+{
+ int idx;
+ struct cgroup *parent_cgroup;
+ struct hugetlb_cgroup *h_cgroup, *parent_h_cgroup;
+
+ h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
+ if (!h_cgroup)
+ return ERR_PTR(-ENOMEM);
+
+ parent_cgroup = cgroup->parent;
+ if (parent_cgroup) {
+ parent_h_cgroup = hugetlb_cgroup_from_cgroup(parent_cgroup);
+ for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
+ res_counter_init(&h_cgroup->hugepage[idx],
+ &parent_h_cgroup->hugepage[idx]);
+ } else {
+ root_h_cgroup = h_cgroup;
+ for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
+ res_counter_init(&h_cgroup->hugepage[idx], NULL);
+ }
+ return &h_cgroup->css;
+}
+
+static void hugetlb_cgroup_destroy(struct cgroup *cgroup)
+{
+ struct hugetlb_cgroup *h_cgroup;
+
+ h_cgroup = hugetlb_cgroup_from_cgroup(cgroup);
+ kfree(h_cgroup);
+}
+
+
+/*
+ * Should be called with hugetlb_lock held.
+ * Since we are holding hugetlb_lock, pages cannot get moved from
+ * active list or uncharged from the cgroup, So no need to get
+ * page reference and test for page active here. This function
+ * cannot fail.
+ */
+static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup,
+ struct page *page)
+{
+ int csize;
+ struct res_counter *counter;
+ struct res_counter *fail_res;
+ struct hugetlb_cgroup *page_hcg;
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+ struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(cgroup);
+
+ page_hcg = hugetlb_cgroup_from_page(page);
+ /*
+ * We can have pages in active list without any cgroup
+ * ie, hugepage with less than 3 pages. We can safely
+ * ignore those pages.
+ */
+ if (!page_hcg || page_hcg != h_cg)
+ goto out;
+
+ csize = PAGE_SIZE << compound_order(page);
+ if (!parent) {
+ parent = root_h_cgroup;
+ /* root has no limit */
+ res_counter_charge_nofail(&parent->hugepage[idx],
+ csize, &fail_res);
+ }
+ counter = &h_cg->hugepage[idx];
+ res_counter_uncharge_until(counter, counter->parent, csize);
+
+ set_hugetlb_cgroup(page, parent);
+out:
+ return;
+}
+
+/*
+ * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
+ * the parent cgroup.
+ */
+static int hugetlb_cgroup_pre_destroy(struct cgroup *cgroup)
+{
+ struct hstate *h;
+ struct page *page;
+ int ret = 0, idx = 0;
+
+ do {
+ if (cgroup_task_count(cgroup) ||
+ !list_empty(&cgroup->children)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ for_each_hstate(h) {
+ spin_lock(&hugetlb_lock);
+ list_for_each_entry(page, &h->hugepage_activelist, lru)
+ hugetlb_cgroup_move_parent(idx, cgroup, page);
+
+ spin_unlock(&hugetlb_lock);
+ idx++;
+ }
+ cond_resched();
+ } while (hugetlb_cgroup_have_usage(cgroup));
+out:
+ return ret;
+}
+
+int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
+{
+ int ret = 0;
+ struct res_counter *fail_res;
+ struct hugetlb_cgroup *h_cg = NULL;
+ unsigned long csize = nr_pages * PAGE_SIZE;
+
+ if (hugetlb_cgroup_disabled())
+ goto done;
+ /*
+ * We don't charge any cgroup if the compound page have less
+ * than 3 pages.
+ */
+ if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
+ goto done;
+again:
+ rcu_read_lock();
+ h_cg = hugetlb_cgroup_from_task(current);
+ if (!css_tryget(&h_cg->css)) {
+ rcu_read_unlock();
+ goto again;
+ }
+ rcu_read_unlock();
+
+ ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res);
+ css_put(&h_cg->css);
+done:
+ *ptr = h_cg;
+ return ret;
+}
+
+/* Should be called with hugetlb_lock held */
+void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
+{
+ if (hugetlb_cgroup_disabled() || !h_cg)
+ return;
+
+ set_hugetlb_cgroup(page, h_cg);
+ return;
+}
+
+/*
+ * Should be called with hugetlb_lock held
+ */
+void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+ struct page *page)
+{
+ struct hugetlb_cgroup *h_cg;
+ unsigned long csize = nr_pages * PAGE_SIZE;
+
+ if (hugetlb_cgroup_disabled())
+ return;
+ VM_BUG_ON(!spin_is_locked(&hugetlb_lock));
+ h_cg = hugetlb_cgroup_from_page(page);
+ if (unlikely(!h_cg))
+ return;
+ set_hugetlb_cgroup(page, NULL);
+ res_counter_uncharge(&h_cg->hugepage[idx], csize);
+ return;
+}
+
+void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
+{
+ unsigned long csize = nr_pages * PAGE_SIZE;
+
+ if (hugetlb_cgroup_disabled() || !h_cg)
+ return;
+
+ if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
+ return;
+
+ res_counter_uncharge(&h_cg->hugepage[idx], csize);
+ return;
+}
+
+static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft,
+ struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ u64 val;
+ char str[64];
+ int idx, name, len;
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+
+ idx = MEMFILE_IDX(cft->private);
+ name = MEMFILE_ATTR(cft->private);
+
+ val = res_counter_read_u64(&h_cg->hugepage[idx], name);
+ len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
+ return simple_read_from_buffer(buf, nbytes, ppos, str, len);
+}
+
+static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
+ const char *buffer)
+{
+ int idx, name, ret;
+ unsigned long long val;
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+
+ idx = MEMFILE_IDX(cft->private);
+ name = MEMFILE_ATTR(cft->private);
+
+ switch (name) {
+ case RES_LIMIT:
+ if (hugetlb_cgroup_is_root(h_cg)) {
+ /* Can't set limit on root */
+ ret = -EINVAL;
+ break;
+ }
+ /* This function does all necessary parse...reuse it */
+ ret = res_counter_memparse_write_strategy(buffer, &val);
+ if (ret)
+ break;
+ ret = res_counter_set_limit(&h_cg->hugepage[idx], val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int hugetlb_cgroup_reset(struct cgroup *cgroup, unsigned int event)
+{
+ int idx, name, ret = 0;
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+
+ idx = MEMFILE_IDX(event);
+ name = MEMFILE_ATTR(event);
+
+ switch (name) {
+ case RES_MAX_USAGE:
+ res_counter_reset_max(&h_cg->hugepage[idx]);
+ break;
+ case RES_FAILCNT:
+ res_counter_reset_failcnt(&h_cg->hugepage[idx]);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static char *mem_fmt(char *buf, int size, unsigned long hsize)
+{
+ if (hsize >= (1UL << 30))
+ snprintf(buf, size, "%luGB", hsize >> 30);
+ else if (hsize >= (1UL << 20))
+ snprintf(buf, size, "%luMB", hsize >> 20);
+ else
+ snprintf(buf, size, "%luKB", hsize >> 10);
+ return buf;
+}
+
+int __init hugetlb_cgroup_file_init(int idx)
+{
+ char buf[32];
+ struct cftype *cft;
+ struct hstate *h = &hstates[idx];
+
+ /* format the size */
+ mem_fmt(buf, 32, huge_page_size(h));
+
+ /* Add the limit file */
+ cft = &h->cgroup_files[0];
+ snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
+ cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
+ cft->read = hugetlb_cgroup_read;
+ cft->write_string = hugetlb_cgroup_write;
+
+ /* Add the usage file */
+ cft = &h->cgroup_files[1];
+ snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
+ cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
+ cft->read = hugetlb_cgroup_read;
+
+ /* Add the MAX usage file */
+ cft = &h->cgroup_files[2];
+ snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
+ cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
+ cft->trigger = hugetlb_cgroup_reset;
+ cft->read = hugetlb_cgroup_read;
+
+ /* Add the failcntfile */
+ cft = &h->cgroup_files[3];
+ snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
+ cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
+ cft->trigger = hugetlb_cgroup_reset;
+ cft->read = hugetlb_cgroup_read;
+
+ /* NULL terminate the last cft */
+ cft = &h->cgroup_files[4];
+ memset(cft, 0, sizeof(*cft));
+
+ WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files));
+
+ return 0;
+}
+
+/*
+ * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
+ * when we migrate hugepages
+ */
+void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
+{
+ struct hugetlb_cgroup *h_cg;
+ struct hstate *h = page_hstate(oldhpage);
+
+ if (hugetlb_cgroup_disabled())
+ return;
+
+ VM_BUG_ON(!PageHuge(oldhpage));
+ spin_lock(&hugetlb_lock);
+ h_cg = hugetlb_cgroup_from_page(oldhpage);
+ set_hugetlb_cgroup(oldhpage, NULL);
+
+ /* move the h_cg details to new cgroup */
+ set_hugetlb_cgroup(newhpage, h_cg);
+ list_move(&newhpage->lru, &h->hugepage_activelist);
+ spin_unlock(&hugetlb_lock);
+ return;
+}
+
+struct cgroup_subsys hugetlb_subsys = {
+ .name = "hugetlb",
+ .create = hugetlb_cgroup_create,
+ .pre_destroy = hugetlb_cgroup_pre_destroy,
+ .destroy = hugetlb_cgroup_destroy,
+ .subsys_id = hugetlb_subsys_id,
+};
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index cc448bb983ba..3a61efc518d5 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -123,7 +123,7 @@ static int pfn_inject_init(void)
if (!dentry)
goto fail;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#ifdef CONFIG_MEMCG_SWAP
dentry = debugfs_create_u64("corrupt-filter-memcg", 0600,
hwpoison_dir, &hwpoison_filter_memcg);
if (!dentry)
diff --git a/mm/internal.h b/mm/internal.h
index 2ba87fbfb75b..b8c91b342e24 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -118,12 +118,19 @@ struct compact_control {
unsigned long nr_freepages; /* Number of isolated free pages */
unsigned long nr_migratepages; /* Number of pages to migrate */
unsigned long free_pfn; /* isolate_freepages search base */
+ unsigned long start_free_pfn; /* where we started the search */
unsigned long migrate_pfn; /* isolate_migratepages search base */
bool sync; /* Synchronous migration */
+ bool wrapped; /* Order > 0 compactions are
+ incremental, once free_pfn
+ and migrate_pfn meet, we restart
+ from the top of the zone;
+ remember we wrapped around. */
int order; /* order a direct compactor needs */
int migratetype; /* MOVABLE, RECLAIMABLE etc */
struct zone *zone;
+ bool *contended; /* True if a lock was contended */
};
unsigned long
@@ -347,3 +354,5 @@ extern u32 hwpoison_filter_enable;
extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
+
+extern void set_pageblock_order(void);
diff --git a/mm/memblock.c b/mm/memblock.c
index 5cc6731b00cc..4d9393c7edc9 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -222,13 +222,13 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
/* Try to find some space for it.
*
* WARNING: We assume that either slab_is_available() and we use it or
- * we use MEMBLOCK for allocations. That means that this is unsafe to use
- * when bootmem is currently active (unless bootmem itself is implemented
- * on top of MEMBLOCK which isn't the case yet)
+ * we use MEMBLOCK for allocations. That means that this is unsafe to
+ * use when bootmem is currently active (unless bootmem itself is
+ * implemented on top of MEMBLOCK which isn't the case yet)
*
* This should however not be an issue for now, as we currently only
- * call into MEMBLOCK while it's still active, or much later when slab is
- * active for memory hotplug operations
+ * call into MEMBLOCK while it's still active, or much later when slab
+ * is active for memory hotplug operations
*/
if (use_slab) {
new_array = kmalloc(new_size, GFP_KERNEL);
@@ -243,8 +243,8 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
new_alloc_size, PAGE_SIZE);
if (!addr && new_area_size)
addr = memblock_find_in_range(0,
- min(new_area_start, memblock.current_limit),
- new_alloc_size, PAGE_SIZE);
+ min(new_area_start, memblock.current_limit),
+ new_alloc_size, PAGE_SIZE);
new_array = addr ? __va(addr) : 0;
}
@@ -254,12 +254,14 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
return -1;
}
- memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
- memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
+ memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
+ memblock_type_name(type), type->max * 2, (u64)addr,
+ (u64)addr + new_size - 1);
- /* Found space, we now need to move the array over before
- * we add the reserved region since it may be our reserved
- * array itself that is full.
+ /*
+ * Found space, we now need to move the array over before we add the
+ * reserved region since it may be our reserved array itself that is
+ * full.
*/
memcpy(new_array, type->regions, old_size);
memset(new_array + type->max, 0, old_size);
@@ -267,17 +269,16 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
type->regions = new_array;
type->max <<= 1;
- /* Free old array. We needn't free it if the array is the
- * static one
- */
+ /* Free old array. We needn't free it if the array is the static one */
if (*in_slab)
kfree(old_array);
else if (old_array != memblock_memory_init_regions &&
old_array != memblock_reserved_init_regions)
memblock_free(__pa(old_array), old_alloc_size);
- /* Reserve the new array if that comes from the memblock.
- * Otherwise, we needn't do it
+ /*
+ * Reserve the new array if that comes from the memblock. Otherwise, we
+ * needn't do it
*/
if (!use_slab)
BUG_ON(memblock_reserve(addr, new_alloc_size));
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f72b5e52451a..795e525afaba 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -61,12 +61,12 @@ struct cgroup_subsys mem_cgroup_subsys __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES 5
static struct mem_cgroup *root_mem_cgroup __read_mostly;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#ifdef CONFIG_MEMCG_SWAP
/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
int do_swap_account __read_mostly;
/* for remember boot option*/
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
+#ifdef CONFIG_MEMCG_SWAP_ENABLED
static int really_do_swap_account __initdata = 1;
#else
static int really_do_swap_account __initdata = 0;
@@ -87,7 +87,7 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
- MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
+ MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
};
@@ -378,9 +378,7 @@ static bool move_file(void)
enum charge_type {
MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
- MEM_CGROUP_CHARGE_TYPE_MAPPED,
- MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
- MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
+ MEM_CGROUP_CHARGE_TYPE_ANON,
MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
NR_CHARGE_TYPE,
@@ -407,8 +405,14 @@ enum charge_type {
static void mem_cgroup_get(struct mem_cgroup *memcg);
static void mem_cgroup_put(struct mem_cgroup *memcg);
+static inline
+struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
+{
+ return container_of(s, struct mem_cgroup, css);
+}
+
/* Writing them here to avoid exposing memcg's inner layout */
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
#include <net/sock.h>
#include <net/ip.h>
@@ -467,9 +471,9 @@ struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
}
EXPORT_SYMBOL(tcp_proto_cgroup);
#endif /* CONFIG_INET */
-#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+#endif /* CONFIG_MEMCG_KMEM */
-#if defined(CONFIG_INET) && defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
+#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
static void disarm_sock_keys(struct mem_cgroup *memcg)
{
if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
@@ -703,7 +707,7 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
bool charge)
{
int val = (charge) ? 1 : -1;
- this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
+ this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
}
static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
@@ -864,9 +868,8 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
{
- return container_of(cgroup_subsys_state(cont,
- mem_cgroup_subsys_id), struct mem_cgroup,
- css);
+ return mem_cgroup_from_css(
+ cgroup_subsys_state(cont, mem_cgroup_subsys_id));
}
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
@@ -879,8 +882,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
if (unlikely(!p))
return NULL;
- return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
- struct mem_cgroup, css);
+ return mem_cgroup_from_css(task_subsys_state(p, mem_cgroup_subsys_id));
}
struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
@@ -966,8 +968,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
css = css_get_next(&mem_cgroup_subsys, id + 1, &root->css, &id);
if (css) {
if (css == &root->css || css_tryget(css))
- memcg = container_of(css,
- struct mem_cgroup, css);
+ memcg = mem_cgroup_from_css(css);
} else
id = 0;
rcu_read_unlock();
@@ -1454,7 +1455,7 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg)
/*
* Return the memory (and swap, if configured) limit for a memcg.
*/
-u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
+static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
{
u64 limit;
u64 memsw;
@@ -1470,6 +1471,73 @@ u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
return min(limit, memsw);
}
+void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
+ int order)
+{
+ struct mem_cgroup *iter;
+ unsigned long chosen_points = 0;
+ unsigned long totalpages;
+ unsigned int points = 0;
+ struct task_struct *chosen = NULL;
+
+ /*
+ * If current has a pending SIGKILL, then automatically select it. The
+ * goal is to allow it to allocate so that it may quickly exit and free
+ * its memory.
+ */
+ if (fatal_signal_pending(current)) {
+ set_thread_flag(TIF_MEMDIE);
+ return;
+ }
+
+ check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
+ totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
+ for_each_mem_cgroup_tree(iter, memcg) {
+ struct cgroup *cgroup = iter->css.cgroup;
+ struct cgroup_iter it;
+ struct task_struct *task;
+
+ cgroup_iter_start(cgroup, &it);
+ while ((task = cgroup_iter_next(cgroup, &it))) {
+ switch (oom_scan_process_thread(task, totalpages, NULL,
+ false)) {
+ case OOM_SCAN_SELECT:
+ if (chosen)
+ put_task_struct(chosen);
+ chosen = task;
+ chosen_points = ULONG_MAX;
+ get_task_struct(chosen);
+ /* fall through */
+ case OOM_SCAN_CONTINUE:
+ continue;
+ case OOM_SCAN_ABORT:
+ cgroup_iter_end(cgroup, &it);
+ mem_cgroup_iter_break(memcg, iter);
+ if (chosen)
+ put_task_struct(chosen);
+ return;
+ case OOM_SCAN_OK:
+ break;
+ };
+ points = oom_badness(task, memcg, NULL, totalpages);
+ if (points > chosen_points) {
+ if (chosen)
+ put_task_struct(chosen);
+ chosen = task;
+ chosen_points = points;
+ get_task_struct(chosen);
+ }
+ }
+ cgroup_iter_end(cgroup, &it);
+ }
+
+ if (!chosen)
+ return;
+ points = chosen_points * 1000 / totalpages;
+ oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
+ NULL, "Memory cgroup out of memory");
+}
+
static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
gfp_t gfp_mask,
unsigned long flags)
@@ -1899,7 +1967,7 @@ again:
return;
/*
* If this memory cgroup is not under account moving, we don't
- * need to take move_lock_page_cgroup(). Because we already hold
+ * need to take move_lock_mem_cgroup(). Because we already hold
* rcu_read_lock(), any calls to move_account will be delayed until
* rcu_read_unlock() if mem_cgroup_stolen() == true.
*/
@@ -1921,7 +1989,7 @@ void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
/*
* It's guaranteed that pc->mem_cgroup never changes while
* lock is held because a routine modifies pc->mem_cgroup
- * should take move_lock_page_cgroup().
+ * should take move_lock_mem_cgroup().
*/
move_unlock_mem_cgroup(pc->mem_cgroup, flags);
}
@@ -2268,7 +2336,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
* We always charge the cgroup the mm_struct belongs to.
* The mm_struct's mem_cgroup changes on task migration if the
* thread group leader migrates. It's possible that mm is not
- * set, if so charge the init_mm (happens for pagecache usage).
+ * set, if so charge the root memcg (happens for pagecache usage).
*/
if (!*ptr && !mm)
*ptr = root_mem_cgroup;
@@ -2429,7 +2497,7 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
css = css_lookup(&mem_cgroup_subsys, id);
if (!css)
return NULL;
- return container_of(css, struct mem_cgroup, css);
+ return mem_cgroup_from_css(css);
}
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
@@ -2473,11 +2541,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
bool anon;
lock_page_cgroup(pc);
- if (unlikely(PageCgroupUsed(pc))) {
- unlock_page_cgroup(pc);
- __mem_cgroup_cancel_charge(memcg, nr_pages);
- return;
- }
+ VM_BUG_ON(PageCgroupUsed(pc));
/*
* we don't need page_cgroup_lock about tail pages, becase they are not
* accessed by any other context at this point.
@@ -2519,7 +2583,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
spin_unlock_irq(&zone->lru_lock);
}
- if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
+ if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
anon = true;
else
anon = false;
@@ -2644,8 +2708,7 @@ out:
static int mem_cgroup_move_parent(struct page *page,
struct page_cgroup *pc,
- struct mem_cgroup *child,
- gfp_t gfp_mask)
+ struct mem_cgroup *child)
{
struct mem_cgroup *parent;
unsigned int nr_pages;
@@ -2728,38 +2791,7 @@ int mem_cgroup_newpage_charge(struct page *page,
VM_BUG_ON(page->mapping && !PageAnon(page));
VM_BUG_ON(!mm);
return mem_cgroup_charge_common(page, mm, gfp_mask,
- MEM_CGROUP_CHARGE_TYPE_MAPPED);
-}
-
-static void
-__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
- enum charge_type ctype);
-
-int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask)
-{
- struct mem_cgroup *memcg = NULL;
- enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
- int ret;
-
- if (mem_cgroup_disabled())
- return 0;
- if (PageCompound(page))
- return 0;
-
- if (unlikely(!mm))
- mm = &init_mm;
- if (!page_is_file_cache(page))
- type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-
- if (!PageSwapCache(page))
- ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
- else { /* page is swapcache/shmem */
- ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &memcg);
- if (!ret)
- __mem_cgroup_commit_charge_swapin(page, memcg, type);
- }
- return ret;
+ MEM_CGROUP_CHARGE_TYPE_ANON);
}
/*
@@ -2768,27 +2800,26 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
* struct page_cgroup is acquired. This refcnt will be consumed by
* "commit()" or removed by "cancel()"
*/
-int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
- struct page *page,
- gfp_t mask, struct mem_cgroup **memcgp)
+static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
+ struct page *page,
+ gfp_t mask,
+ struct mem_cgroup **memcgp)
{
struct mem_cgroup *memcg;
+ struct page_cgroup *pc;
int ret;
- *memcgp = NULL;
-
- if (mem_cgroup_disabled())
- return 0;
-
- if (!do_swap_account)
- goto charge_cur_mm;
+ pc = lookup_page_cgroup(page);
/*
- * A racing thread's fault, or swapoff, may have already updated
- * the pte, and even removed page from swap cache: in those cases
- * do_swap_page()'s pte_same() test will fail; but there's also a
- * KSM case which does need to charge the page.
+ * Every swap fault against a single page tries to charge the
+ * page, bail as early as possible. shmem_unuse() encounters
+ * already charged pages, too. The USED bit is protected by
+ * the page lock, which serializes swap cache removal, which
+ * in turn serializes uncharging.
*/
- if (!PageSwapCache(page))
+ if (PageCgroupUsed(pc))
+ return 0;
+ if (!do_swap_account)
goto charge_cur_mm;
memcg = try_get_mem_cgroup_from_page(page);
if (!memcg)
@@ -2800,14 +2831,44 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
ret = 0;
return ret;
charge_cur_mm:
- if (unlikely(!mm))
- mm = &init_mm;
ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
if (ret == -EINTR)
ret = 0;
return ret;
}
+int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
+ gfp_t gfp_mask, struct mem_cgroup **memcgp)
+{
+ *memcgp = NULL;
+ if (mem_cgroup_disabled())
+ return 0;
+ /*
+ * A racing thread's fault, or swapoff, may have already
+ * updated the pte, and even removed page from swap cache: in
+ * those cases unuse_pte()'s pte_same() test will fail; but
+ * there's also a KSM case which does need to charge the page.
+ */
+ if (!PageSwapCache(page)) {
+ int ret;
+
+ ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, memcgp, true);
+ if (ret == -EINTR)
+ ret = 0;
+ return ret;
+ }
+ return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
+}
+
+void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_disabled())
+ return;
+ if (!memcg)
+ return;
+ __mem_cgroup_cancel_charge(memcg, 1);
+}
+
static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
enum charge_type ctype)
@@ -2842,16 +2903,30 @@ void mem_cgroup_commit_charge_swapin(struct page *page,
struct mem_cgroup *memcg)
{
__mem_cgroup_commit_charge_swapin(page, memcg,
- MEM_CGROUP_CHARGE_TYPE_MAPPED);
+ MEM_CGROUP_CHARGE_TYPE_ANON);
}
-void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
+int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask)
{
+ struct mem_cgroup *memcg = NULL;
+ enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
+ int ret;
+
if (mem_cgroup_disabled())
- return;
- if (!memcg)
- return;
- __mem_cgroup_cancel_charge(memcg, 1);
+ return 0;
+ if (PageCompound(page))
+ return 0;
+
+ if (!PageSwapCache(page))
+ ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
+ else { /* page is swapcache/shmem */
+ ret = __mem_cgroup_try_charge_swapin(mm, page,
+ gfp_mask, &memcg);
+ if (!ret)
+ __mem_cgroup_commit_charge_swapin(page, memcg, type);
+ }
+ return ret;
}
static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
@@ -2911,7 +2986,8 @@ direct_uncharge:
* uncharge if !page_mapped(page)
*/
static struct mem_cgroup *
-__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
+__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
+ bool end_migration)
{
struct mem_cgroup *memcg = NULL;
unsigned int nr_pages = 1;
@@ -2921,8 +2997,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
if (mem_cgroup_disabled())
return NULL;
- if (PageSwapCache(page))
- return NULL;
+ VM_BUG_ON(PageSwapCache(page));
if (PageTransHuge(page)) {
nr_pages <<= compound_order(page);
@@ -2945,7 +3020,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
anon = PageAnon(page);
switch (ctype) {
- case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+ case MEM_CGROUP_CHARGE_TYPE_ANON:
/*
* Generally PageAnon tells if it's the anon statistics to be
* updated; but sometimes e.g. mem_cgroup_uncharge_page() is
@@ -2955,7 +3030,16 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
/* fallthrough */
case MEM_CGROUP_CHARGE_TYPE_DROP:
/* See mem_cgroup_prepare_migration() */
- if (page_mapped(page) || PageCgroupMigration(pc))
+ if (page_mapped(page))
+ goto unlock_out;
+ /*
+ * Pages under migration may not be uncharged. But
+ * end_migration() /must/ be the one uncharging the
+ * unused post-migration page and so it has to call
+ * here with the migration bit still set. See the
+ * res_counter handling below.
+ */
+ if (!end_migration && PageCgroupMigration(pc))
goto unlock_out;
break;
case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
@@ -2989,7 +3073,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
mem_cgroup_swap_statistics(memcg, true);
mem_cgroup_get(memcg);
}
- if (!mem_cgroup_is_root(memcg))
+ /*
+ * Migration does not charge the res_counter for the
+ * replacement page, so leave it alone when phasing out the
+ * page that is unused after the migration.
+ */
+ if (!end_migration && !mem_cgroup_is_root(memcg))
mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
return memcg;
@@ -3005,14 +3094,16 @@ void mem_cgroup_uncharge_page(struct page *page)
if (page_mapped(page))
return;
VM_BUG_ON(page->mapping && !PageAnon(page));
- __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
+ if (PageSwapCache(page))
+ return;
+ __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
}
void mem_cgroup_uncharge_cache_page(struct page *page)
{
VM_BUG_ON(page_mapped(page));
VM_BUG_ON(page->mapping);
- __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
+ __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
}
/*
@@ -3076,7 +3167,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
if (!swapout) /* this was a swap cache but the swap is unused ! */
ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
- memcg = __mem_cgroup_uncharge_common(page, ctype);
+ memcg = __mem_cgroup_uncharge_common(page, ctype, false);
/*
* record memcg information, if swapout && memcg != NULL,
@@ -3087,7 +3178,7 @@ mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
}
#endif
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#ifdef CONFIG_MEMCG_SWAP
/*
* called from swap_entry_free(). remove record in swap_cgroup and
* uncharge "memsw" account.
@@ -3166,19 +3257,18 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
* Before starting migration, account PAGE_SIZE to mem_cgroup that the old
* page belongs to.
*/
-int mem_cgroup_prepare_migration(struct page *page,
- struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask)
+void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
+ struct mem_cgroup **memcgp)
{
struct mem_cgroup *memcg = NULL;
struct page_cgroup *pc;
enum charge_type ctype;
- int ret = 0;
*memcgp = NULL;
VM_BUG_ON(PageTransHuge(page));
if (mem_cgroup_disabled())
- return 0;
+ return;
pc = lookup_page_cgroup(page);
lock_page_cgroup(pc);
@@ -3223,24 +3313,9 @@ int mem_cgroup_prepare_migration(struct page *page,
* we return here.
*/
if (!memcg)
- return 0;
+ return;
*memcgp = memcg;
- ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, memcgp, false);
- css_put(&memcg->css);/* drop extra refcnt */
- if (ret) {
- if (PageAnon(page)) {
- lock_page_cgroup(pc);
- ClearPageCgroupMigration(pc);
- unlock_page_cgroup(pc);
- /*
- * The old page may be fully unmapped while we kept it.
- */
- mem_cgroup_uncharge_page(page);
- }
- /* we'll need to revisit this error code (we have -EINTR) */
- return -ENOMEM;
- }
/*
* We charge new page before it's used/mapped. So, even if unlock_page()
* is called before end_migration, we can catch all events on this new
@@ -3248,13 +3323,15 @@ int mem_cgroup_prepare_migration(struct page *page,
* mapcount will be finally 0 and we call uncharge in end_migration().
*/
if (PageAnon(page))
- ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
- else if (page_is_file_cache(page))
- ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
+ ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
else
- ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
+ ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
+ /*
+ * The page is committed to the memcg, but it's not actually
+ * charged to the res_counter since we plan on replacing the
+ * old one and only one page is going to be left afterwards.
+ */
__mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
- return ret;
}
/* remove redundant charge if migration failed*/
@@ -3276,6 +3353,12 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
used = newpage;
unused = oldpage;
}
+ anon = PageAnon(used);
+ __mem_cgroup_uncharge_common(unused,
+ anon ? MEM_CGROUP_CHARGE_TYPE_ANON
+ : MEM_CGROUP_CHARGE_TYPE_CACHE,
+ true);
+ css_put(&memcg->css);
/*
* We disallowed uncharge of pages under migration because mapcount
* of the page goes down to zero, temporarly.
@@ -3285,10 +3368,6 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
lock_page_cgroup(pc);
ClearPageCgroupMigration(pc);
unlock_page_cgroup(pc);
- anon = PageAnon(used);
- __mem_cgroup_uncharge_common(unused,
- anon ? MEM_CGROUP_CHARGE_TYPE_MAPPED
- : MEM_CGROUP_CHARGE_TYPE_CACHE);
/*
* If a page is a file cache, radix-tree replacement is very atomic
@@ -3340,10 +3419,6 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
*/
if (!memcg)
return;
-
- if (PageSwapBacked(oldpage))
- type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-
/*
* Even if newpage->mapping was NULL before starting replacement,
* the newpage may be on LRU(or pagevec for LRU) already. We lock
@@ -3418,7 +3493,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
/*
* Rather than hide all in some function, I do this in
* open coded manner. You see what this really does.
- * We have to guarantee memcg->res.limit < memcg->memsw.limit.
+ * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
*/
mutex_lock(&set_limit_mutex);
memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
@@ -3479,7 +3554,7 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
/*
* Rather than hide all in some function, I do this in
* open coded manner. You see what this really does.
- * We have to guarantee memcg->res.limit < memcg->memsw.limit.
+ * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
*/
mutex_lock(&set_limit_mutex);
memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
@@ -3611,10 +3686,12 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
}
/*
- * This routine traverse page_cgroup in given list and drop them all.
- * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
+ * Traverse a specified page_cgroup list and try to drop them all. This doesn't
+ * reclaim the pages page themselves - it just removes the page_cgroups.
+ * Returns true if some page_cgroups were not freed, indicating that the caller
+ * must retry this operation.
*/
-static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
+static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
int node, int zid, enum lru_list lru)
{
struct mem_cgroup_per_zone *mz;
@@ -3622,7 +3699,6 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
struct list_head *list;
struct page *busy;
struct zone *zone;
- int ret = 0;
zone = &NODE_DATA(node)->node_zones[zid];
mz = mem_cgroup_zoneinfo(memcg, node, zid);
@@ -3636,7 +3712,6 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
struct page_cgroup *pc;
struct page *page;
- ret = 0;
spin_lock_irqsave(&zone->lru_lock, flags);
if (list_empty(list)) {
spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -3653,21 +3728,14 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
pc = lookup_page_cgroup(page);
- ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
- if (ret == -ENOMEM || ret == -EINTR)
- break;
-
- if (ret == -EBUSY || ret == -EINVAL) {
+ if (mem_cgroup_move_parent(page, pc, memcg)) {
/* found lock contention or "pc" is obsolete. */
busy = page;
cond_resched();
} else
busy = NULL;
}
-
- if (!ret && !list_empty(list))
- return -EBUSY;
- return ret;
+ return !list_empty(list);
}
/*
@@ -3692,9 +3760,6 @@ move_account:
ret = -EBUSY;
if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
goto out;
- ret = -EINTR;
- if (signal_pending(current))
- goto out;
/* This is for making all *used* pages to be on LRU. */
lru_add_drain_all();
drain_all_stock_sync(memcg);
@@ -3715,9 +3780,6 @@ move_account:
}
mem_cgroup_end_move(memcg);
memcg_oom_recover(memcg);
- /* it seems parent cgroup doesn't have enough mem */
- if (ret == -ENOMEM)
- goto try_to_free;
cond_resched();
/* "ret" should also be checked to ensure all lists are empty. */
} while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret);
@@ -3779,6 +3841,10 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
parent_memcg = mem_cgroup_from_cont(parent);
cgroup_lock();
+
+ if (memcg->use_hierarchy == val)
+ goto out;
+
/*
* If parent's use_hierarchy is set, we can't make any modifications
* in the child subtrees. If it is unset, then the change can
@@ -3795,6 +3861,8 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
retval = -EBUSY;
} else
retval = -EINVAL;
+
+out:
cgroup_unlock();
return retval;
@@ -3831,7 +3899,7 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
if (swap)
- val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
+ val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
return val << PAGE_SHIFT;
}
@@ -4015,7 +4083,7 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
#endif
#ifdef CONFIG_NUMA
-static int mem_control_numa_stat_show(struct cgroup *cont, struct cftype *cft,
+static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
struct seq_file *m)
{
int nid;
@@ -4074,7 +4142,7 @@ static inline void mem_cgroup_lru_names_not_uptodate(void)
BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
}
-static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
+static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
struct seq_file *m)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
@@ -4082,7 +4150,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
unsigned int i;
for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
- if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
+ if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
continue;
seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
@@ -4109,7 +4177,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
long long val = 0;
- if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
+ if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
continue;
for_each_mem_cgroup_tree(mi, memcg)
val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
@@ -4533,7 +4601,7 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
return 0;
}
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
return mem_cgroup_sockets_init(memcg, ss);
@@ -4588,7 +4656,7 @@ static struct cftype mem_cgroup_files[] = {
},
{
.name = "stat",
- .read_seq_string = mem_control_stat_show,
+ .read_seq_string = memcg_stat_show,
},
{
.name = "force_empty",
@@ -4620,10 +4688,10 @@ static struct cftype mem_cgroup_files[] = {
#ifdef CONFIG_NUMA
{
.name = "numa_stat",
- .read_seq_string = mem_control_numa_stat_show,
+ .read_seq_string = memcg_numa_stat_show,
},
#endif
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#ifdef CONFIG_MEMCG_SWAP
{
.name = "memsw.usage_in_bytes",
.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
@@ -4810,7 +4878,7 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
}
EXPORT_SYMBOL(parent_mem_cgroup);
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#ifdef CONFIG_MEMCG_SWAP
static void __init enable_swap_cgroup(void)
{
if (!mem_cgroup_disabled() && really_do_swap_account)
@@ -5541,7 +5609,7 @@ struct cgroup_subsys mem_cgroup_subsys = {
.__DEPRECATED_clear_css_refs = true,
};
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#ifdef CONFIG_MEMCG_SWAP
static int __init enable_swap_account(char *s)
{
/* consider enabled if no parameter or 1 is given */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index de4ce7058450..a6e2141a6610 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -128,7 +128,7 @@ static int hwpoison_filter_flags(struct page *p)
* can only guarantee that the page either belongs to the memcg tasks, or is
* a freed page.
*/
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#ifdef CONFIG_MEMCG_SWAP
u64 hwpoison_filter_memcg;
EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
static int hwpoison_filter_task(struct page *p)
@@ -1416,7 +1416,6 @@ static int soft_offline_huge_page(struct page *page, int flags)
int ret;
unsigned long pfn = page_to_pfn(page);
struct page *hpage = compound_head(page);
- LIST_HEAD(pagelist);
ret = get_any_page(page, pfn, flags);
if (ret < 0)
@@ -1431,24 +1430,18 @@ static int soft_offline_huge_page(struct page *page, int flags)
}
/* Keep page count to indicate a given hugepage is isolated. */
-
- list_add(&hpage->lru, &pagelist);
- ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
- true);
+ ret = migrate_huge_page(hpage, new_page, MPOL_MF_MOVE_ALL, false,
+ MIGRATE_SYNC);
+ put_page(hpage);
if (ret) {
- struct page *page1, *page2;
- list_for_each_entry_safe(page1, page2, &pagelist, lru)
- put_page(page1);
-
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
- if (ret > 0)
- ret = -EIO;
return ret;
}
done:
if (!PageHWPoison(hpage))
- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
+ atomic_long_add(1 << compound_trans_order(hpage),
+ &mce_bad_pages);
set_page_hwpoison_huge_page(hpage);
dequeue_hwpoisoned_huge_page(hpage);
/* keep elevated page count for bad page */
@@ -1563,7 +1556,7 @@ int soft_offline_page(struct page *page, int flags)
page_is_file_cache(page));
list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
- 0, MIGRATE_SYNC);
+ false, MIGRATE_SYNC);
if (ret) {
putback_lru_pages(&pagelist);
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
diff --git a/mm/memory.c b/mm/memory.c
index 91f69459d3e8..57361708d1a5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1343,8 +1343,11 @@ static void unmap_single_vma(struct mmu_gather *tlb,
* Since no pte has actually been setup, it is
* safe to do nothing in this case.
*/
- if (vma->vm_file)
- unmap_hugepage_range(vma, start, end, NULL);
+ if (vma->vm_file) {
+ mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
+ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+ }
} else
unmap_page_range(tlb, vma, start, end, details);
}
@@ -2647,6 +2650,9 @@ reuse:
if (!page_mkwrite) {
wait_on_page_locked(dirty_page);
set_page_dirty_balance(dirty_page, page_mkwrite);
+ /* file_update_time outside page_lock */
+ if (vma->vm_file)
+ file_update_time(vma->vm_file);
}
put_page(dirty_page);
if (page_mkwrite) {
@@ -2664,10 +2670,6 @@ reuse:
}
}
- /* file_update_time outside page_lock */
- if (vma->vm_file)
- file_update_time(vma->vm_file);
-
return ret;
}
@@ -3336,12 +3338,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (dirty_page) {
struct address_space *mapping = page->mapping;
+ int dirtied = 0;
if (set_page_dirty(dirty_page))
- page_mkwrite = 1;
+ dirtied = 1;
unlock_page(dirty_page);
put_page(dirty_page);
- if (page_mkwrite && mapping) {
+ if ((dirtied || page_mkwrite) && mapping) {
/*
* Some device drivers do not set page.mapping but still
* dirty their pages
@@ -3350,7 +3353,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
}
/* file_update_time outside page_lock */
- if (vma->vm_file)
+ if (vma->vm_file && !page_mkwrite)
file_update_time(vma->vm_file);
} else {
unlock_page(vmf.page);
@@ -3938,7 +3941,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
free_page((unsigned long)buf);
}
}
- up_read(&current->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
}
#ifdef CONFIG_PROVE_LOCKING
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 427bb291dd0f..3ad25f9d1fc1 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -512,19 +512,20 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
zone->present_pages += onlined_pages;
zone->zone_pgdat->node_present_pages += onlined_pages;
- if (need_zonelists_rebuild)
- build_all_zonelists(zone);
- else
- zone_pcp_update(zone);
+ if (onlined_pages) {
+ node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
+ if (need_zonelists_rebuild)
+ build_all_zonelists(NULL, zone);
+ else
+ zone_pcp_update(zone);
+ }
mutex_unlock(&zonelists_mutex);
init_per_zone_wmark_min();
- if (onlined_pages) {
+ if (onlined_pages)
kswapd_run(zone_to_nid(zone));
- node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
- }
vm_total_pages = nr_free_pagecache_pages();
@@ -562,7 +563,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
* to access not-initialized zonelist, build here.
*/
mutex_lock(&zonelists_mutex);
- build_all_zonelists(NULL);
+ build_all_zonelists(pgdat, NULL);
mutex_unlock(&zonelists_mutex);
return pgdat;
@@ -965,6 +966,9 @@ repeat:
init_per_zone_wmark_min();
+ if (!populated_zone(zone))
+ zone_pcp_reset(zone);
+
if (!node_present_pages(node)) {
node_clear_state(node, N_HIGH_MEMORY);
kswapd_stop(node);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1d771e4200d2..4ada3be6e252 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1602,8 +1602,14 @@ static unsigned interleave_nodes(struct mempolicy *policy)
* task can change it's policy. The system default policy requires no
* such protection.
*/
-unsigned slab_node(struct mempolicy *policy)
+unsigned slab_node(void)
{
+ struct mempolicy *policy;
+
+ if (in_interrupt())
+ return numa_node_id();
+
+ policy = current->mempolicy;
if (!policy || policy->flags & MPOL_F_LOCAL)
return numa_node_id();
@@ -2556,7 +2562,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
break;
default:
- BUG();
+ return -EINVAL;
}
l = strlen(policy_modes[mode]);
diff --git a/mm/mempool.c b/mm/mempool.c
index d9049811f352..54990476c049 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -63,19 +63,21 @@ EXPORT_SYMBOL(mempool_destroy);
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data)
{
- return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1);
+ return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
+ GFP_KERNEL, NUMA_NO_NODE);
}
EXPORT_SYMBOL(mempool_create);
mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
- mempool_free_t *free_fn, void *pool_data, int node_id)
+ mempool_free_t *free_fn, void *pool_data,
+ gfp_t gfp_mask, int node_id)
{
mempool_t *pool;
- pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id);
+ pool = kmalloc_node(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id);
if (!pool)
return NULL;
pool->elements = kmalloc_node(min_nr * sizeof(void *),
- GFP_KERNEL, node_id);
+ gfp_mask, node_id);
if (!pool->elements) {
kfree(pool);
return NULL;
@@ -93,7 +95,7 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
while (pool->curr_nr < pool->min_nr) {
void *element;
- element = pool->alloc(GFP_KERNEL, pool->pool_data);
+ element = pool->alloc(gfp_mask, pool->pool_data);
if (unlikely(!element)) {
mempool_destroy(pool);
return NULL;
diff --git a/mm/migrate.c b/mm/migrate.c
index be26d5cbe56b..77ed2d773705 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -33,6 +33,7 @@
#include <linux/memcontrol.h>
#include <linux/syscalls.h>
#include <linux/hugetlb.h>
+#include <linux/hugetlb_cgroup.h>
#include <linux/gfp.h>
#include <asm/tlbflush.h>
@@ -682,7 +683,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
{
int rc = -EAGAIN;
int remap_swapcache = 1;
- int charge = 0;
struct mem_cgroup *mem;
struct anon_vma *anon_vma = NULL;
@@ -724,12 +724,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
}
/* charge against new page */
- charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL);
- if (charge == -ENOMEM) {
- rc = -ENOMEM;
- goto unlock;
- }
- BUG_ON(charge);
+ mem_cgroup_prepare_migration(page, newpage, &mem);
if (PageWriteback(page)) {
/*
@@ -819,8 +814,7 @@ skip_unmap:
put_anon_vma(anon_vma);
uncharge:
- if (!charge)
- mem_cgroup_end_migration(mem, page, newpage, rc == 0);
+ mem_cgroup_end_migration(mem, page, newpage, rc == 0);
unlock:
unlock_page(page);
out:
@@ -931,16 +925,13 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (anon_vma)
put_anon_vma(anon_vma);
- unlock_page(hpage);
-out:
- if (rc != -EAGAIN) {
- list_del(&hpage->lru);
- put_page(hpage);
- }
+ if (!rc)
+ hugetlb_cgroup_migrate(hpage, new_hpage);
+ unlock_page(hpage);
+out:
put_page(new_hpage);
-
if (result) {
if (rc)
*result = rc;
@@ -1016,48 +1007,32 @@ out:
return nr_failed + retry;
}
-int migrate_huge_pages(struct list_head *from,
- new_page_t get_new_page, unsigned long private, bool offlining,
- enum migrate_mode mode)
+int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
+ unsigned long private, bool offlining,
+ enum migrate_mode mode)
{
- int retry = 1;
- int nr_failed = 0;
- int pass = 0;
- struct page *page;
- struct page *page2;
- int rc;
-
- for (pass = 0; pass < 10 && retry; pass++) {
- retry = 0;
-
- list_for_each_entry_safe(page, page2, from, lru) {
+ int pass, rc;
+
+ for (pass = 0; pass < 10; pass++) {
+ rc = unmap_and_move_huge_page(get_new_page,
+ private, hpage, pass > 2, offlining,
+ mode);
+ switch (rc) {
+ case -ENOMEM:
+ goto out;
+ case -EAGAIN:
+ /* try again */
cond_resched();
-
- rc = unmap_and_move_huge_page(get_new_page,
- private, page, pass > 2, offlining,
- mode);
-
- switch(rc) {
- case -ENOMEM:
- goto out;
- case -EAGAIN:
- retry++;
- break;
- case 0:
- break;
- default:
- /* Permanent failure */
- nr_failed++;
- break;
- }
+ break;
+ case 0:
+ goto out;
+ default:
+ rc = -EIO;
+ goto out;
}
}
- rc = 0;
out:
- if (rc)
- return rc;
-
- return nr_failed + retry;
+ return rc;
}
#ifdef CONFIG_NUMA
diff --git a/mm/mmap.c b/mm/mmap.c
index 3edfcdfa42d9..ae18a48e7e4e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -943,6 +943,8 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
const unsigned long stack_flags
= VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
+ mm->total_vm += pages;
+
if (file) {
mm->shared_vm += pages;
if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
@@ -1347,7 +1349,6 @@ munmap_back:
out:
perf_event_mmap(vma);
- mm->total_vm += len >> PAGE_SHIFT;
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
if (!mlock_vma_pages_range(vma, addr, addr + len))
@@ -1355,9 +1356,8 @@ out:
} else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
make_pages_present(addr, addr + len);
- if (file && uprobe_mmap(vma))
- /* matching probes but cannot insert */
- goto unmap_and_free_vma;
+ if (file)
+ uprobe_mmap(vma);
return addr;
@@ -1707,7 +1707,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
return -ENOMEM;
/* Ok, everything looks good - let it rip */
- mm->total_vm += grow;
if (vma->vm_flags & VM_LOCKED)
mm->locked_vm += grow;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
@@ -1889,7 +1888,6 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
- mm->total_vm -= nrpages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
vma = remove_vma(vma);
} while (vma);
@@ -2310,7 +2308,7 @@ void exit_mmap(struct mm_struct *mm)
}
vm_unacct_memory(nr_accounted);
- BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
+ WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
}
/* Insert vm structure into process list sorted by address
@@ -2345,9 +2343,6 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
security_vm_enough_memory_mm(mm, vma_pages(vma)))
return -ENOMEM;
- if (vma->vm_file && uprobe_mmap(vma))
- return -EINVAL;
-
vma_link(mm, vma, prev, rb_link, rb_parent);
return 0;
}
@@ -2418,9 +2413,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
if (new_vma->vm_file) {
get_file(new_vma->vm_file);
- if (uprobe_mmap(new_vma))
- goto out_free_mempol;
-
if (vma->vm_flags & VM_EXECUTABLE)
added_exe_file_vma(mm);
}
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 9a611d3a1848..862b60822d9f 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -33,6 +33,24 @@
void __mmu_notifier_release(struct mm_struct *mm)
{
struct mmu_notifier *mn;
+ struct hlist_node *n;
+
+ /*
+ * RCU here will block mmu_notifier_unregister until
+ * ->release returns.
+ */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
+ /*
+ * if ->release runs before mmu_notifier_unregister it
+ * must be handled as it's the only way for the driver
+ * to flush all existing sptes and stop the driver
+ * from establishing any more sptes before all the
+ * pages in the mm are freed.
+ */
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+ rcu_read_unlock();
spin_lock(&mm->mmu_notifier_mm->lock);
while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
@@ -46,23 +64,6 @@ void __mmu_notifier_release(struct mm_struct *mm)
* mmu_notifier_unregister to return.
*/
hlist_del_init_rcu(&mn->hlist);
- /*
- * RCU here will block mmu_notifier_unregister until
- * ->release returns.
- */
- rcu_read_lock();
- spin_unlock(&mm->mmu_notifier_mm->lock);
- /*
- * if ->release runs before mmu_notifier_unregister it
- * must be handled as it's the only way for the driver
- * to flush all existing sptes and stop the driver
- * from establishing any more sptes before all the
- * pages in the mm are freed.
- */
- if (mn->ops->release)
- mn->ops->release(mn, mm);
- rcu_read_unlock();
- spin_lock(&mm->mmu_notifier_mm->lock);
}
spin_unlock(&mm->mmu_notifier_mm->lock);
@@ -284,16 +285,13 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
{
BUG_ON(atomic_read(&mm->mm_count) <= 0);
- spin_lock(&mm->mmu_notifier_mm->lock);
if (!hlist_unhashed(&mn->hlist)) {
- hlist_del_rcu(&mn->hlist);
-
/*
* RCU here will force exit_mmap to wait ->release to finish
* before freeing the pages.
*/
rcu_read_lock();
- spin_unlock(&mm->mmu_notifier_mm->lock);
+
/*
* exit_mmap will block in mmu_notifier_release to
* guarantee ->release is called before freeing the
@@ -302,8 +300,11 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
if (mn->ops->release)
mn->ops->release(mn, mm);
rcu_read_unlock();
- } else
+
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ hlist_del_rcu(&mn->hlist);
spin_unlock(&mm->mmu_notifier_mm->lock);
+ }
/*
* Wait any running method to finish, of course including
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 6830eab5bf09..3cef80f6ac79 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -96,7 +96,7 @@ void lruvec_init(struct lruvec *lruvec, struct zone *zone)
for_each_lru(lru)
INIT_LIST_HEAD(&lruvec->lists[lru]);
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
lruvec->zone = zone;
#endif
}
diff --git a/mm/mremap.c b/mm/mremap.c
index 21fed202ddad..cc06d0e48d05 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -260,7 +260,6 @@ static unsigned long move_vma(struct vm_area_struct *vma,
* If this were a serious issue, we'd add a flag to do_munmap().
*/
hiwater_vm = mm->hiwater_vm;
- mm->total_vm += new_len >> PAGE_SHIFT;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
if (do_munmap(mm, old_addr, old_len) < 0) {
@@ -497,7 +496,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
goto out;
}
- mm->total_vm += pages;
vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
if (vma->vm_flags & VM_LOCKED) {
mm->locked_vm += pages;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index ac300c99baf6..198600861638 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -288,76 +288,93 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
}
#endif
+enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
+ unsigned long totalpages, const nodemask_t *nodemask,
+ bool force_kill)
+{
+ if (task->exit_state)
+ return OOM_SCAN_CONTINUE;
+ if (oom_unkillable_task(task, NULL, nodemask))
+ return OOM_SCAN_CONTINUE;
+
+ /*
+ * This task already has access to memory reserves and is being killed.
+ * Don't allow any other task to have access to the reserves.
+ */
+ if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
+ if (unlikely(frozen(task)))
+ __thaw_task(task);
+ if (!force_kill)
+ return OOM_SCAN_ABORT;
+ }
+ if (!task->mm)
+ return OOM_SCAN_CONTINUE;
+
+ if (task->flags & PF_EXITING) {
+ /*
+ * If task is current and is in the process of releasing memory,
+ * allow the "kill" to set TIF_MEMDIE, which will allow it to
+ * access memory reserves. Otherwise, it may stall forever.
+ *
+ * The iteration isn't broken here, however, in case other
+ * threads are found to have already been oom killed.
+ */
+ if (task == current)
+ return OOM_SCAN_SELECT;
+ else if (!force_kill) {
+ /*
+ * If this task is not being ptraced on exit, then wait
+ * for it to finish before killing some other task
+ * unnecessarily.
+ */
+ if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
+ return OOM_SCAN_ABORT;
+ }
+ }
+ return OOM_SCAN_OK;
+}
+
/*
* Simple selection loop. We chose the process with the highest
- * number of 'points'. We expect the caller will lock the tasklist.
+ * number of 'points'.
*
* (not docbooked, we don't want this one cluttering up the manual)
*/
static struct task_struct *select_bad_process(unsigned int *ppoints,
- unsigned long totalpages, struct mem_cgroup *memcg,
- const nodemask_t *nodemask, bool force_kill)
+ unsigned long totalpages, const nodemask_t *nodemask,
+ bool force_kill)
{
struct task_struct *g, *p;
struct task_struct *chosen = NULL;
unsigned long chosen_points = 0;
+ rcu_read_lock();
do_each_thread(g, p) {
unsigned int points;
- if (p->exit_state)
- continue;
- if (oom_unkillable_task(p, memcg, nodemask))
- continue;
-
- /*
- * This task already has access to memory reserves and is
- * being killed. Don't allow any other task access to the
- * memory reserve.
- *
- * Note: this may have a chance of deadlock if it gets
- * blocked waiting for another task which itself is waiting
- * for memory. Is there a better alternative?
- */
- if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
- if (unlikely(frozen(p)))
- __thaw_task(p);
- if (!force_kill)
- return ERR_PTR(-1UL);
- }
- if (!p->mm)
+ switch (oom_scan_process_thread(p, totalpages, nodemask,
+ force_kill)) {
+ case OOM_SCAN_SELECT:
+ chosen = p;
+ chosen_points = ULONG_MAX;
+ /* fall through */
+ case OOM_SCAN_CONTINUE:
continue;
-
- if (p->flags & PF_EXITING) {
- /*
- * If p is the current task and is in the process of
- * releasing memory, we allow the "kill" to set
- * TIF_MEMDIE, which will allow it to gain access to
- * memory reserves. Otherwise, it may stall forever.
- *
- * The loop isn't broken here, however, in case other
- * threads are found to have already been oom killed.
- */
- if (p == current) {
- chosen = p;
- chosen_points = ULONG_MAX;
- } else if (!force_kill) {
- /*
- * If this task is not being ptraced on exit,
- * then wait for it to finish before killing
- * some other task unnecessarily.
- */
- if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
- return ERR_PTR(-1UL);
- }
- }
-
- points = oom_badness(p, memcg, nodemask, totalpages);
+ case OOM_SCAN_ABORT:
+ rcu_read_unlock();
+ return ERR_PTR(-1UL);
+ case OOM_SCAN_OK:
+ break;
+ };
+ points = oom_badness(p, NULL, nodemask, totalpages);
if (points > chosen_points) {
chosen = p;
chosen_points = points;
}
} while_each_thread(g, p);
+ if (chosen)
+ get_task_struct(chosen);
+ rcu_read_unlock();
*ppoints = chosen_points * 1000 / totalpages;
return chosen;
@@ -371,17 +388,16 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
* Dumps the current memory state of all eligible tasks. Tasks not in the same
* memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
* are not shown.
- * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
- * value, oom_score_adj value, and name.
- *
- * Call with tasklist_lock read-locked.
+ * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
+ * swapents, oom_score_adj value, and name.
*/
static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask)
{
struct task_struct *p;
struct task_struct *task;
- pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n");
+ pr_info("[ pid ] uid tgid total_vm rss nr_ptes swapents oom_score_adj name\n");
+ rcu_read_lock();
for_each_process(p) {
if (oom_unkillable_task(p, memcg, nodemask))
continue;
@@ -396,13 +412,15 @@ static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemas
continue;
}
- pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n",
+ pr_info("[%5d] %5d %5d %8lu %8lu %7lu %8lu %5d %s\n",
task->pid, from_kuid(&init_user_ns, task_uid(task)),
task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
- task_cpu(task), task->signal->oom_adj,
+ task->mm->nr_ptes,
+ get_mm_counter(task->mm, MM_SWAPENTS),
task->signal->oom_score_adj, task->comm);
task_unlock(task);
}
+ rcu_read_unlock();
}
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
@@ -423,10 +441,14 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
}
#define K(x) ((x) << (PAGE_SHIFT-10))
-static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
- unsigned int points, unsigned long totalpages,
- struct mem_cgroup *memcg, nodemask_t *nodemask,
- const char *message)
+/*
+ * Must be called while holding a reference to p, which will be released upon
+ * returning.
+ */
+void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ unsigned int points, unsigned long totalpages,
+ struct mem_cgroup *memcg, nodemask_t *nodemask,
+ const char *message)
{
struct task_struct *victim = p;
struct task_struct *child;
@@ -442,6 +464,7 @@ static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
*/
if (p->flags & PF_EXITING) {
set_tsk_thread_flag(p, TIF_MEMDIE);
+ put_task_struct(p);
return;
}
@@ -459,6 +482,7 @@ static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
* parent. This attempts to lose the minimal amount of work done while
* still freeing memory.
*/
+ read_lock(&tasklist_lock);
do {
list_for_each_entry(child, &t->children, sibling) {
unsigned int child_points;
@@ -471,15 +495,26 @@ static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
child_points = oom_badness(child, memcg, nodemask,
totalpages);
if (child_points > victim_points) {
+ put_task_struct(victim);
victim = child;
victim_points = child_points;
+ get_task_struct(victim);
}
}
} while_each_thread(p, t);
+ read_unlock(&tasklist_lock);
- victim = find_lock_task_mm(victim);
- if (!victim)
+ rcu_read_lock();
+ p = find_lock_task_mm(victim);
+ if (!p) {
+ rcu_read_unlock();
+ put_task_struct(victim);
return;
+ } else if (victim != p) {
+ get_task_struct(p);
+ put_task_struct(victim);
+ victim = p;
+ }
/* mm cannot safely be dereferenced after task_unlock(victim) */
mm = victim->mm;
@@ -510,17 +545,19 @@ static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
task_unlock(p);
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
}
+ rcu_read_unlock();
set_tsk_thread_flag(victim, TIF_MEMDIE);
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
+ put_task_struct(victim);
}
#undef K
/*
* Determines whether the kernel must panic because of the panic_on_oom sysctl.
*/
-static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
- int order, const nodemask_t *nodemask)
+void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
+ int order, const nodemask_t *nodemask)
{
if (likely(!sysctl_panic_on_oom))
return;
@@ -533,42 +570,11 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
if (constraint != CONSTRAINT_NONE)
return;
}
- read_lock(&tasklist_lock);
dump_header(NULL, gfp_mask, order, NULL, nodemask);
- read_unlock(&tasklist_lock);
panic("Out of memory: %s panic_on_oom is enabled\n",
sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
}
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
- int order)
-{
- unsigned long limit;
- unsigned int points = 0;
- struct task_struct *p;
-
- /*
- * If current has a pending SIGKILL, then automatically select it. The
- * goal is to allow it to allocate so that it may quickly exit and free
- * its memory.
- */
- if (fatal_signal_pending(current)) {
- set_thread_flag(TIF_MEMDIE);
- return;
- }
-
- check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
- limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
- read_lock(&tasklist_lock);
- p = select_bad_process(&points, limit, memcg, NULL, false);
- if (p && PTR_ERR(p) != -1UL)
- oom_kill_process(p, gfp_mask, order, points, limit, memcg, NULL,
- "Memory cgroup out of memory");
- read_unlock(&tasklist_lock);
-}
-#endif
-
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
int register_oom_notifier(struct notifier_block *nb)
@@ -690,7 +696,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
struct task_struct *p;
unsigned long totalpages;
unsigned long freed = 0;
- unsigned int points;
+ unsigned int uninitialized_var(points);
enum oom_constraint constraint = CONSTRAINT_NONE;
int killed = 0;
@@ -718,22 +724,20 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
- read_lock(&tasklist_lock);
- if (sysctl_oom_kill_allocating_task &&
+ if (sysctl_oom_kill_allocating_task && current->mm &&
!oom_unkillable_task(current, NULL, nodemask) &&
- current->mm) {
+ current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
+ get_task_struct(current);
oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL,
nodemask,
"Out of memory (oom_kill_allocating_task)");
goto out;
}
- p = select_bad_process(&points, totalpages, NULL, mpol_mask,
- force_kill);
+ p = select_bad_process(&points, totalpages, mpol_mask, force_kill);
/* Found nothing?!?! Either we hang forever, or we panic. */
if (!p) {
dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
- read_unlock(&tasklist_lock);
panic("Out of memory and no killable processes...\n");
}
if (PTR_ERR(p) != -1UL) {
@@ -742,14 +746,12 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
killed = 1;
}
out:
- read_unlock(&tasklist_lock);
-
/*
- * Give "p" a good chance of killing itself before we
- * retry to allocate memory unless "p" is current
+ * Give the killed threads a good chance of exiting before trying to
+ * allocate memory again.
*/
- if (killed && !test_thread_flag(TIF_MEMDIE))
- schedule_timeout_uninterruptible(1);
+ if (killed)
+ schedule_timeout_killable(1);
}
/*
@@ -764,6 +766,5 @@ void pagefault_out_of_memory(void)
out_of_memory(NULL, 0, 0, NULL, false);
clear_system_oom();
}
- if (!test_thread_flag(TIF_MEMDIE))
- schedule_timeout_uninterruptible(1);
+ schedule_timeout_killable(1);
}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 93d8d2f7108c..5ad5ce23c1e0 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -34,6 +34,7 @@
#include <linux/syscalls.h>
#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
#include <linux/pagevec.h>
+#include <linux/timer.h>
#include <trace/events/writeback.h>
/*
@@ -135,7 +136,20 @@ unsigned long global_dirty_limit;
* measured in page writeback completions.
*
*/
-static struct prop_descriptor vm_completions;
+static struct fprop_global writeout_completions;
+
+static void writeout_period(unsigned long t);
+/* Timer for aging of writeout_completions */
+static struct timer_list writeout_period_timer =
+ TIMER_DEFERRED_INITIALIZER(writeout_period, 0, 0);
+static unsigned long writeout_period_time = 0;
+
+/*
+ * Length of period for aging writeout fractions of bdis. This is an
+ * arbitrarily chosen number. The longer the period, the slower fractions will
+ * reflect changes in current writeout rate.
+ */
+#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
/*
* Work out the current dirty-memory clamping and background writeout
@@ -322,34 +336,6 @@ bool zone_dirty_ok(struct zone *zone)
zone_page_state(zone, NR_WRITEBACK) <= limit;
}
-/*
- * couple the period to the dirty_ratio:
- *
- * period/2 ~ roundup_pow_of_two(dirty limit)
- */
-static int calc_period_shift(void)
-{
- unsigned long dirty_total;
-
- if (vm_dirty_bytes)
- dirty_total = vm_dirty_bytes / PAGE_SIZE;
- else
- dirty_total = (vm_dirty_ratio * global_dirtyable_memory()) /
- 100;
- return 2 + ilog2(dirty_total - 1);
-}
-
-/*
- * update the period when the dirty threshold changes.
- */
-static void update_completion_period(void)
-{
- int shift = calc_period_shift();
- prop_change_shift(&vm_completions, shift);
-
- writeback_set_ratelimit();
-}
-
int dirty_background_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
@@ -383,7 +369,7 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
- update_completion_period();
+ writeback_set_ratelimit();
vm_dirty_bytes = 0;
}
return ret;
@@ -398,12 +384,21 @@ int dirty_bytes_handler(struct ctl_table *table, int write,
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
- update_completion_period();
+ writeback_set_ratelimit();
vm_dirty_ratio = 0;
}
return ret;
}
+static unsigned long wp_next_time(unsigned long cur_time)
+{
+ cur_time += VM_COMPLETIONS_PERIOD_LEN;
+ /* 0 has a special meaning... */
+ if (!cur_time)
+ return 1;
+ return cur_time;
+}
+
/*
* Increment the BDI's writeout completion count and the global writeout
* completion count. Called from test_clear_page_writeback().
@@ -411,8 +406,19 @@ int dirty_bytes_handler(struct ctl_table *table, int write,
static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
{
__inc_bdi_stat(bdi, BDI_WRITTEN);
- __prop_inc_percpu_max(&vm_completions, &bdi->completions,
- bdi->max_prop_frac);
+ __fprop_inc_percpu_max(&writeout_completions, &bdi->completions,
+ bdi->max_prop_frac);
+ /* First event after period switching was turned off? */
+ if (!unlikely(writeout_period_time)) {
+ /*
+ * We can race with other __bdi_writeout_inc calls here but
+ * it does not cause any harm since the resulting time when
+ * timer will fire and what is in writeout_period_time will be
+ * roughly the same.
+ */
+ writeout_period_time = wp_next_time(jiffies);
+ mod_timer(&writeout_period_timer, writeout_period_time);
+ }
}
void bdi_writeout_inc(struct backing_dev_info *bdi)
@@ -431,11 +437,33 @@ EXPORT_SYMBOL_GPL(bdi_writeout_inc);
static void bdi_writeout_fraction(struct backing_dev_info *bdi,
long *numerator, long *denominator)
{
- prop_fraction_percpu(&vm_completions, &bdi->completions,
+ fprop_fraction_percpu(&writeout_completions, &bdi->completions,
numerator, denominator);
}
/*
+ * On idle system, we can be called long after we scheduled because we use
+ * deferred timers so count with missed periods.
+ */
+static void writeout_period(unsigned long t)
+{
+ int miss_periods = (jiffies - writeout_period_time) /
+ VM_COMPLETIONS_PERIOD_LEN;
+
+ if (fprop_new_period(&writeout_completions, miss_periods + 1)) {
+ writeout_period_time = wp_next_time(writeout_period_time +
+ miss_periods * VM_COMPLETIONS_PERIOD_LEN);
+ mod_timer(&writeout_period_timer, writeout_period_time);
+ } else {
+ /*
+ * Aging has zeroed all fractions. Stop wasting CPU on period
+ * updates.
+ */
+ writeout_period_time = 0;
+ }
+}
+
+/*
* bdi_min_ratio keeps the sum of the minimum dirty shares of all
* registered backing devices, which, for obvious reasons, can not
* exceed 100%.
@@ -475,7 +503,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
ret = -EINVAL;
} else {
bdi->max_ratio = max_ratio;
- bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
+ bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
}
spin_unlock_bh(&bdi_lock);
@@ -918,7 +946,7 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
* bdi->dirty_ratelimit = balanced_dirty_ratelimit;
*
* However to get a more stable dirty_ratelimit, the below elaborated
- * code makes use of task_ratelimit to filter out sigular points and
+ * code makes use of task_ratelimit to filter out singular points and
* limit the step size.
*
* The below code essentially only uses the relative value of
@@ -941,7 +969,7 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
* feel and care are stable dirty rate and small position error.
*
* |task_ratelimit - dirty_ratelimit| is used to limit the step size
- * and filter out the sigular points of balanced_dirty_ratelimit. Which
+ * and filter out the singular points of balanced_dirty_ratelimit. Which
* keeps jumping around randomly and can even leap far away at times
* due to the small 200ms estimation period of dirty_rate (we want to
* keep that period small to reduce time lags).
@@ -1504,7 +1532,6 @@ int dirty_writeback_centisecs_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
proc_dointvec(table, write, buffer, length, ppos);
- bdi_arm_supers_timer();
return 0;
}
@@ -1606,13 +1633,10 @@ static struct notifier_block __cpuinitdata ratelimit_nb = {
*/
void __init page_writeback_init(void)
{
- int shift;
-
writeback_set_ratelimit();
register_cpu_notifier(&ratelimit_nb);
- shift = calc_period_shift();
- prop_descriptor_init(&vm_completions, shift);
+ fprop_global_init(&writeout_completions);
}
/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4a4f9219683f..c66fb875104a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -51,7 +51,6 @@
#include <linux/page_cgroup.h>
#include <linux/debugobjects.h>
#include <linux/kmemleak.h>
-#include <linux/memory.h>
#include <linux/compaction.h>
#include <trace/events/kmem.h>
#include <linux/ftrace_event.h>
@@ -219,7 +218,12 @@ EXPORT_SYMBOL(nr_online_nodes);
int page_group_by_mobility_disabled __read_mostly;
-static void set_pageblock_migratetype(struct page *page, int migratetype)
+/*
+ * NOTE:
+ * Don't use set_pageblock_migratetype(page, MIGRATE_ISOLATE) directly.
+ * Instead, use {un}set_pageblock_isolate.
+ */
+void set_pageblock_migratetype(struct page *page, int migratetype)
{
if (unlikely(page_group_by_mobility_disabled))
@@ -954,7 +958,7 @@ static int move_freepages(struct zone *zone,
return pages_moved;
}
-static int move_freepages_block(struct zone *zone, struct page *page,
+int move_freepages_block(struct zone *zone, struct page *page,
int migratetype)
{
unsigned long start_pfn, end_pfn;
@@ -1158,8 +1162,10 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
to_drain = pcp->batch;
else
to_drain = pcp->count;
- free_pcppages_bulk(zone, to_drain, pcp);
- pcp->count -= to_drain;
+ if (to_drain > 0) {
+ free_pcppages_bulk(zone, to_drain, pcp);
+ pcp->count -= to_drain;
+ }
local_irq_restore(flags);
}
#endif
@@ -1529,16 +1535,16 @@ static int __init setup_fail_page_alloc(char *str)
}
__setup("fail_page_alloc=", setup_fail_page_alloc);
-static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
if (order < fail_page_alloc.min_order)
- return 0;
+ return false;
if (gfp_mask & __GFP_NOFAIL)
- return 0;
+ return false;
if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
- return 0;
+ return false;
if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
- return 0;
+ return false;
return should_fail(&fail_page_alloc.attr, 1 << order);
}
@@ -1578,9 +1584,9 @@ late_initcall(fail_page_alloc_debugfs);
#else /* CONFIG_FAIL_PAGE_ALLOC */
-static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
- return 0;
+ return false;
}
#endif /* CONFIG_FAIL_PAGE_ALLOC */
@@ -1594,6 +1600,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
{
/* free_pages my go negative - that's OK */
long min = mark;
+ long lowmem_reserve = z->lowmem_reserve[classzone_idx];
int o;
free_pages -= (1 << order) - 1;
@@ -1602,7 +1609,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
if (alloc_flags & ALLOC_HARDER)
min -= min / 4;
- if (free_pages <= min + z->lowmem_reserve[classzone_idx])
+ if (free_pages <= min + lowmem_reserve)
return false;
for (o = 0; o < order; o++) {
/* At the next order, this order's pages become unavailable */
@@ -1617,6 +1624,20 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
return true;
}
+#ifdef CONFIG_MEMORY_ISOLATION
+static inline unsigned long nr_zone_isolate_freepages(struct zone *zone)
+{
+ if (unlikely(zone->nr_pageblock_isolate))
+ return zone->nr_pageblock_isolate * pageblock_nr_pages;
+ return 0;
+}
+#else
+static inline unsigned long nr_zone_isolate_freepages(struct zone *zone)
+{
+ return 0;
+}
+#endif
+
bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags)
{
@@ -1632,6 +1653,14 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
+ /*
+ * If the zone has MIGRATE_ISOLATE type free pages, we should consider
+ * it. nr_zone_isolate_freepages is never accurate so kswapd might not
+ * sleep although it could do so. But this is more desirable for memory
+ * hotplug than sleeping which can cause a livelock in the direct
+ * reclaim path.
+ */
+ free_pages -= nr_zone_isolate_freepages(z);
return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
free_pages);
}
@@ -1899,6 +1928,17 @@ this_zone_full:
zlc_active = 0;
goto zonelist_scan;
}
+
+ if (page)
+ /*
+ * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
+ * necessary to allocate the page. The expectation is
+ * that the caller is taking steps that will free more
+ * memory. The caller should avoid the page being used
+ * for !PFMEMALLOC purposes.
+ */
+ page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
+
return page;
}
@@ -2062,7 +2102,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, bool sync_migration,
- bool *deferred_compaction,
+ bool *contended_compaction, bool *deferred_compaction,
unsigned long *did_some_progress)
{
struct page *page;
@@ -2077,7 +2117,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
current->flags |= PF_MEMALLOC;
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
- nodemask, sync_migration);
+ nodemask, sync_migration,
+ contended_compaction);
current->flags &= ~PF_MEMALLOC;
if (*did_some_progress != COMPACT_SKIPPED) {
@@ -2087,8 +2128,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
page = get_page_from_freelist(gfp_mask, nodemask,
order, zonelist, high_zoneidx,
- alloc_flags, preferred_zone,
- migratetype);
+ alloc_flags & ~ALLOC_NO_WATERMARKS,
+ preferred_zone, migratetype);
if (page) {
preferred_zone->compact_considered = 0;
preferred_zone->compact_defer_shift = 0;
@@ -2123,7 +2164,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, bool sync_migration,
- bool *deferred_compaction,
+ bool *contended_compaction, bool *deferred_compaction,
unsigned long *did_some_progress)
{
return NULL;
@@ -2180,8 +2221,8 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
retry:
page = get_page_from_freelist(gfp_mask, nodemask, order,
zonelist, high_zoneidx,
- alloc_flags, preferred_zone,
- migratetype);
+ alloc_flags & ~ALLOC_NO_WATERMARKS,
+ preferred_zone, migratetype);
/*
* If an allocation failed after direct reclaim, it could be because
@@ -2265,15 +2306,24 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
alloc_flags |= ALLOC_HARDER;
if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
- if (!in_interrupt() &&
- ((current->flags & PF_MEMALLOC) ||
- unlikely(test_thread_flag(TIF_MEMDIE))))
+ if (gfp_mask & __GFP_MEMALLOC)
+ alloc_flags |= ALLOC_NO_WATERMARKS;
+ else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
+ alloc_flags |= ALLOC_NO_WATERMARKS;
+ else if (!in_interrupt() &&
+ ((current->flags & PF_MEMALLOC) ||
+ unlikely(test_thread_flag(TIF_MEMDIE))))
alloc_flags |= ALLOC_NO_WATERMARKS;
}
return alloc_flags;
}
+bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
+{
+ return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
+}
+
static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
@@ -2287,6 +2337,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned long did_some_progress;
bool sync_migration = false;
bool deferred_compaction = false;
+ bool contended_compaction = false;
/*
* In the slowpath, we sanity check order to avoid ever trying to
@@ -2340,11 +2391,19 @@ rebalance:
/* Allocate without watermarks if the context allows */
if (alloc_flags & ALLOC_NO_WATERMARKS) {
+ /*
+ * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
+ * the allocation is high priority and these type of
+ * allocations are system rather than user orientated
+ */
+ zonelist = node_zonelist(numa_node_id(), gfp_mask);
+
page = __alloc_pages_high_priority(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
preferred_zone, migratetype);
- if (page)
+ if (page) {
goto got_pg;
+ }
}
/* Atomic allocations - we can't balance anything */
@@ -2368,6 +2427,7 @@ rebalance:
nodemask,
alloc_flags, preferred_zone,
migratetype, sync_migration,
+ &contended_compaction,
&deferred_compaction,
&did_some_progress);
if (page)
@@ -2377,10 +2437,11 @@ rebalance:
/*
* If compaction is deferred for high-order allocations, it is because
* sync compaction recently failed. In this is the case and the caller
- * has requested the system not be heavily disrupted, fail the
- * allocation now instead of entering direct reclaim
+ * requested a movable allocation that does not heavily disrupt the
+ * system then fail the allocation instead of entering direct reclaim.
*/
- if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
+ if ((deferred_compaction || contended_compaction) &&
+ (gfp_mask & __GFP_NO_KSWAPD))
goto nopage;
/* Try direct reclaim and then allocating */
@@ -2451,6 +2512,7 @@ rebalance:
nodemask,
alloc_flags, preferred_zone,
migratetype, sync_migration,
+ &contended_compaction,
&deferred_compaction,
&did_some_progress);
if (page)
@@ -2463,8 +2525,8 @@ nopage:
got_pg:
if (kmemcheck_enabled)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
- return page;
+ return page;
}
/*
@@ -3030,7 +3092,7 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
user_zonelist_order = oldval;
} else if (oldval != user_zonelist_order) {
mutex_lock(&zonelists_mutex);
- build_all_zonelists(NULL);
+ build_all_zonelists(NULL, NULL);
mutex_unlock(&zonelists_mutex);
}
}
@@ -3409,14 +3471,21 @@ static void setup_zone_pageset(struct zone *zone);
DEFINE_MUTEX(zonelists_mutex);
/* return values int ....just for stop_machine() */
-static __init_refok int __build_all_zonelists(void *data)
+static int __build_all_zonelists(void *data)
{
int nid;
int cpu;
+ pg_data_t *self = data;
#ifdef CONFIG_NUMA
memset(node_load, 0, sizeof(node_load));
#endif
+
+ if (self && !node_online(self->node_id)) {
+ build_zonelists(self);
+ build_zonelist_cache(self);
+ }
+
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
@@ -3461,7 +3530,7 @@ static __init_refok int __build_all_zonelists(void *data)
* Called with zonelists_mutex held always
* unless system_state == SYSTEM_BOOTING.
*/
-void __ref build_all_zonelists(void *data)
+void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
{
set_zonelist_order();
@@ -3473,10 +3542,10 @@ void __ref build_all_zonelists(void *data)
/* we have to stop all cpus to guarantee there is no user
of zonelist */
#ifdef CONFIG_MEMORY_HOTPLUG
- if (data)
- setup_zone_pageset((struct zone *)data);
+ if (zone)
+ setup_zone_pageset(zone);
#endif
- stop_machine(__build_all_zonelists, NULL, NULL);
+ stop_machine(__build_all_zonelists, pgdat, NULL);
/* cpuset refresh routine should be here */
}
vm_total_pages = nr_free_pagecache_pages();
@@ -3746,7 +3815,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
#endif
-static int zone_batchsize(struct zone *zone)
+static int __meminit zone_batchsize(struct zone *zone)
{
#ifdef CONFIG_MMU
int batch;
@@ -3828,7 +3897,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
pcp->batch = PAGE_SHIFT * 8;
}
-static void setup_zone_pageset(struct zone *zone)
+static void __meminit setup_zone_pageset(struct zone *zone)
{
int cpu;
@@ -3901,32 +3970,6 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
return 0;
}
-static int __zone_pcp_update(void *data)
-{
- struct zone *zone = data;
- int cpu;
- unsigned long batch = zone_batchsize(zone), flags;
-
- for_each_possible_cpu(cpu) {
- struct per_cpu_pageset *pset;
- struct per_cpu_pages *pcp;
-
- pset = per_cpu_ptr(zone->pageset, cpu);
- pcp = &pset->pcp;
-
- local_irq_save(flags);
- free_pcppages_bulk(zone, pcp->count, pcp);
- setup_pageset(pset, batch);
- local_irq_restore(flags);
- }
- return 0;
-}
-
-void zone_pcp_update(struct zone *zone)
-{
- stop_machine(__zone_pcp_update, zone, NULL);
-}
-
static __meminit void zone_pcp_init(struct zone *zone)
{
/*
@@ -3942,7 +3985,7 @@ static __meminit void zone_pcp_init(struct zone *zone)
zone_batchsize(zone));
}
-__meminit int init_currently_empty_zone(struct zone *zone,
+int __meminit init_currently_empty_zone(struct zone *zone,
unsigned long zone_start_pfn,
unsigned long size,
enum memmap_context context)
@@ -4301,7 +4344,7 @@ static inline void setup_usemap(struct pglist_data *pgdat,
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
-static inline void __init set_pageblock_order(void)
+void __init set_pageblock_order(void)
{
unsigned int order;
@@ -4329,7 +4372,7 @@ static inline void __init set_pageblock_order(void)
* include/linux/pageblock-flags.h for the values of pageblock_order based on
* the kernel config
*/
-static inline void set_pageblock_order(void)
+void __init set_pageblock_order(void)
{
}
@@ -4340,6 +4383,8 @@ static inline void set_pageblock_order(void)
* - mark all pages reserved
* - mark all memory queues empty
* - clear the memory bitmaps
+ *
+ * NOTE: pgdat should get zeroed by caller.
*/
static void __paginginit free_area_init_core(struct pglist_data *pgdat,
unsigned long *zones_size, unsigned long *zholes_size)
@@ -4350,9 +4395,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
int ret;
pgdat_resize_init(pgdat);
- pgdat->nr_zones = 0;
init_waitqueue_head(&pgdat->kswapd_wait);
- pgdat->kswapd_max_order = 0;
+ init_waitqueue_head(&pgdat->pfmemalloc_wait);
pgdat_page_cgroup_init(pgdat);
for (j = 0; j < MAX_NR_ZONES; j++) {
@@ -4394,6 +4438,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone->spanned_pages = size;
zone->present_pages = realsize;
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+ zone->compact_cached_free_pfn = zone->zone_start_pfn +
+ zone->spanned_pages;
+ zone->compact_cached_free_pfn &= ~(pageblock_nr_pages-1);
+#endif
#ifdef CONFIG_NUMA
zone->node = nid;
zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
@@ -4408,8 +4457,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone_pcp_init(zone);
lruvec_init(&zone->lruvec, zone);
- zap_zone_vm_stats(zone);
- zone->flags = 0;
if (!size)
continue;
@@ -4469,6 +4516,9 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
{
pg_data_t *pgdat = NODE_DATA(nid);
+ /* pg_data_t should be reset to zero when it's allocated */
+ WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
+
pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn;
calculate_node_totalpages(pgdat, zones_size, zholes_size);
@@ -4750,7 +4800,7 @@ out:
}
/* Any regular memory on that node ? */
-static void check_for_regular_memory(pg_data_t *pgdat)
+static void __init check_for_regular_memory(pg_data_t *pgdat)
{
#ifdef CONFIG_HIGHMEM
enum zone_type zone_type;
@@ -5468,26 +5518,27 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
}
/*
- * This is designed as sub function...plz see page_isolation.c also.
- * set/clear page block's type to be ISOLATE.
- * page allocater never alloc memory from ISOLATE block.
+ * This function checks whether pageblock includes unmovable pages or not.
+ * If @count is not zero, it is okay to include less @count unmovable pages
+ *
+ * PageLRU check wihtout isolation or lru_lock could race so that
+ * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
+ * expect this function should be exact.
*/
-
-static int
-__count_immobile_pages(struct zone *zone, struct page *page, int count)
+bool has_unmovable_pages(struct zone *zone, struct page *page, int count)
{
unsigned long pfn, iter, found;
int mt;
/*
* For avoiding noise data, lru_add_drain_all() should be called
- * If ZONE_MOVABLE, the zone never contains immobile pages
+ * If ZONE_MOVABLE, the zone never contains unmovable pages
*/
if (zone_idx(zone) == ZONE_MOVABLE)
- return true;
+ return false;
mt = get_pageblock_migratetype(page);
if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
- return true;
+ return false;
pfn = page_to_pfn(page);
for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
@@ -5497,11 +5548,18 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
continue;
page = pfn_to_page(check);
- if (!page_count(page)) {
+ /*
+ * We can't use page_count without pin a page
+ * because another CPU can free compound page.
+ * This check already skips compound tails of THP
+ * because their page->_count is zero at all time.
+ */
+ if (!atomic_read(&page->_count)) {
if (PageBuddy(page))
iter += (1 << page_order(page)) - 1;
continue;
}
+
if (!PageLRU(page))
found++;
/*
@@ -5518,9 +5576,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
* page at boot.
*/
if (found > count)
- return false;
+ return true;
}
- return true;
+ return false;
}
bool is_pageblock_removable_nolock(struct page *page)
@@ -5544,77 +5602,7 @@ bool is_pageblock_removable_nolock(struct page *page)
zone->zone_start_pfn + zone->spanned_pages <= pfn)
return false;
- return __count_immobile_pages(zone, page, 0);
-}
-
-int set_migratetype_isolate(struct page *page)
-{
- struct zone *zone;
- unsigned long flags, pfn;
- struct memory_isolate_notify arg;
- int notifier_ret;
- int ret = -EBUSY;
-
- zone = page_zone(page);
-
- spin_lock_irqsave(&zone->lock, flags);
-
- pfn = page_to_pfn(page);
- arg.start_pfn = pfn;
- arg.nr_pages = pageblock_nr_pages;
- arg.pages_found = 0;
-
- /*
- * It may be possible to isolate a pageblock even if the
- * migratetype is not MIGRATE_MOVABLE. The memory isolation
- * notifier chain is used by balloon drivers to return the
- * number of pages in a range that are held by the balloon
- * driver to shrink memory. If all the pages are accounted for
- * by balloons, are free, or on the LRU, isolation can continue.
- * Later, for example, when memory hotplug notifier runs, these
- * pages reported as "can be isolated" should be isolated(freed)
- * by the balloon driver through the memory notifier chain.
- */
- notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
- notifier_ret = notifier_to_errno(notifier_ret);
- if (notifier_ret)
- goto out;
- /*
- * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
- * We just check MOVABLE pages.
- */
- if (__count_immobile_pages(zone, page, arg.pages_found))
- ret = 0;
-
- /*
- * immobile means "not-on-lru" paes. If immobile is larger than
- * removable-by-driver pages reported by notifier, we'll fail.
- */
-
-out:
- if (!ret) {
- set_pageblock_migratetype(page, MIGRATE_ISOLATE);
- move_freepages_block(zone, page, MIGRATE_ISOLATE);
- }
-
- spin_unlock_irqrestore(&zone->lock, flags);
- if (!ret)
- drain_all_pages();
- return ret;
-}
-
-void unset_migratetype_isolate(struct page *page, unsigned migratetype)
-{
- struct zone *zone;
- unsigned long flags;
- zone = page_zone(page);
- spin_lock_irqsave(&zone->lock, flags);
- if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
- goto out;
- set_pageblock_migratetype(page, migratetype);
- move_freepages_block(zone, page, migratetype);
-out:
- spin_unlock_irqrestore(&zone->lock, flags);
+ return !has_unmovable_pages(zone, page, 0);
}
#ifdef CONFIG_CMA
@@ -5869,7 +5857,49 @@ void free_contig_range(unsigned long pfn, unsigned nr_pages)
}
#endif
+#ifdef CONFIG_MEMORY_HOTPLUG
+static int __meminit __zone_pcp_update(void *data)
+{
+ struct zone *zone = data;
+ int cpu;
+ unsigned long batch = zone_batchsize(zone), flags;
+
+ for_each_possible_cpu(cpu) {
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
+
+ pset = per_cpu_ptr(zone->pageset, cpu);
+ pcp = &pset->pcp;
+
+ local_irq_save(flags);
+ if (pcp->count > 0)
+ free_pcppages_bulk(zone, pcp->count, pcp);
+ setup_pageset(pset, batch);
+ local_irq_restore(flags);
+ }
+ return 0;
+}
+
+void __meminit zone_pcp_update(struct zone *zone)
+{
+ stop_machine(__zone_pcp_update, zone, NULL);
+}
+#endif
+
#ifdef CONFIG_MEMORY_HOTREMOVE
+void zone_pcp_reset(struct zone *zone)
+{
+ unsigned long flags;
+
+ /* avoid races with drain_pages() */
+ local_irq_save(flags);
+ if (zone->pageset != &boot_pageset) {
+ free_percpu(zone->pageset);
+ zone->pageset = &boot_pageset;
+ }
+ local_irq_restore(flags);
+}
+
/*
* All pages in the range must be isolated before calling this.
*/
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index eb750f851395..5ddad0c6daa6 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -317,7 +317,7 @@ void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
#endif
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+#ifdef CONFIG_MEMCG_SWAP
static DEFINE_MUTEX(swap_cgroup_mutex);
struct swap_cgroup_ctrl {
diff --git a/mm/page_io.c b/mm/page_io.c
index 34f02923744c..78eee32ee486 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -17,6 +17,7 @@
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/swapops.h>
+#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/frontswap.h>
#include <asm/pgtable.h>
@@ -86,6 +87,98 @@ void end_swap_bio_read(struct bio *bio, int err)
bio_put(bio);
}
+int generic_swapfile_activate(struct swap_info_struct *sis,
+ struct file *swap_file,
+ sector_t *span)
+{
+ struct address_space *mapping = swap_file->f_mapping;
+ struct inode *inode = mapping->host;
+ unsigned blocks_per_page;
+ unsigned long page_no;
+ unsigned blkbits;
+ sector_t probe_block;
+ sector_t last_block;
+ sector_t lowest_block = -1;
+ sector_t highest_block = 0;
+ int nr_extents = 0;
+ int ret;
+
+ blkbits = inode->i_blkbits;
+ blocks_per_page = PAGE_SIZE >> blkbits;
+
+ /*
+ * Map all the blocks into the extent list. This code doesn't try
+ * to be very smart.
+ */
+ probe_block = 0;
+ page_no = 0;
+ last_block = i_size_read(inode) >> blkbits;
+ while ((probe_block + blocks_per_page) <= last_block &&
+ page_no < sis->max) {
+ unsigned block_in_page;
+ sector_t first_block;
+
+ first_block = bmap(inode, probe_block);
+ if (first_block == 0)
+ goto bad_bmap;
+
+ /*
+ * It must be PAGE_SIZE aligned on-disk
+ */
+ if (first_block & (blocks_per_page - 1)) {
+ probe_block++;
+ goto reprobe;
+ }
+
+ for (block_in_page = 1; block_in_page < blocks_per_page;
+ block_in_page++) {
+ sector_t block;
+
+ block = bmap(inode, probe_block + block_in_page);
+ if (block == 0)
+ goto bad_bmap;
+ if (block != first_block + block_in_page) {
+ /* Discontiguity */
+ probe_block++;
+ goto reprobe;
+ }
+ }
+
+ first_block >>= (PAGE_SHIFT - blkbits);
+ if (page_no) { /* exclude the header page */
+ if (first_block < lowest_block)
+ lowest_block = first_block;
+ if (first_block > highest_block)
+ highest_block = first_block;
+ }
+
+ /*
+ * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
+ */
+ ret = add_swap_extent(sis, page_no, 1, first_block);
+ if (ret < 0)
+ goto out;
+ nr_extents += ret;
+ page_no++;
+ probe_block += blocks_per_page;
+reprobe:
+ continue;
+ }
+ ret = nr_extents;
+ *span = 1 + highest_block - lowest_block;
+ if (page_no == 0)
+ page_no = 1; /* force Empty message */
+ sis->max = page_no;
+ sis->pages = page_no - 1;
+ sis->highest_bit = page_no - 1;
+out:
+ return ret;
+bad_bmap:
+ printk(KERN_ERR "swapon: swapfile has holes\n");
+ ret = -EINVAL;
+ goto out;
+}
+
/*
* We may have stale swap cache pages in memory: notice
* them here and get rid of the unnecessary final write.
@@ -94,6 +187,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
{
struct bio *bio;
int ret = 0, rw = WRITE;
+ struct swap_info_struct *sis = page_swap_info(page);
if (try_to_free_swap(page)) {
unlock_page(page);
@@ -105,6 +199,33 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
end_page_writeback(page);
goto out;
}
+
+ if (sis->flags & SWP_FILE) {
+ struct kiocb kiocb;
+ struct file *swap_file = sis->swap_file;
+ struct address_space *mapping = swap_file->f_mapping;
+ struct iovec iov = {
+ .iov_base = kmap(page),
+ .iov_len = PAGE_SIZE,
+ };
+
+ init_sync_kiocb(&kiocb, swap_file);
+ kiocb.ki_pos = page_file_offset(page);
+ kiocb.ki_left = PAGE_SIZE;
+ kiocb.ki_nbytes = PAGE_SIZE;
+
+ unlock_page(page);
+ ret = mapping->a_ops->direct_IO(KERNEL_WRITE,
+ &kiocb, &iov,
+ kiocb.ki_pos, 1);
+ kunmap(page);
+ if (ret == PAGE_SIZE) {
+ count_vm_event(PSWPOUT);
+ ret = 0;
+ }
+ return ret;
+ }
+
bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
if (bio == NULL) {
set_page_dirty(page);
@@ -126,6 +247,7 @@ int swap_readpage(struct page *page)
{
struct bio *bio;
int ret = 0;
+ struct swap_info_struct *sis = page_swap_info(page);
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageUptodate(page));
@@ -134,6 +256,17 @@ int swap_readpage(struct page *page)
unlock_page(page);
goto out;
}
+
+ if (sis->flags & SWP_FILE) {
+ struct file *swap_file = sis->swap_file;
+ struct address_space *mapping = swap_file->f_mapping;
+
+ ret = mapping->a_ops->readpage(swap_file, page);
+ if (!ret)
+ count_vm_event(PSWPIN);
+ return ret;
+ }
+
bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
if (bio == NULL) {
unlock_page(page);
@@ -145,3 +278,15 @@ int swap_readpage(struct page *page)
out:
return ret;
}
+
+int swap_set_page_dirty(struct page *page)
+{
+ struct swap_info_struct *sis = page_swap_info(page);
+
+ if (sis->flags & SWP_FILE) {
+ struct address_space *mapping = sis->swap_file->f_mapping;
+ return mapping->a_ops->set_page_dirty(page);
+ } else {
+ return __set_page_dirty_no_writeback(page);
+ }
+}
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index c9f04774f2b8..247d1f175739 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -5,8 +5,101 @@
#include <linux/mm.h>
#include <linux/page-isolation.h>
#include <linux/pageblock-flags.h>
+#include <linux/memory.h>
#include "internal.h"
+/* called while holding zone->lock */
+static void set_pageblock_isolate(struct page *page)
+{
+ if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE)
+ return;
+
+ set_pageblock_migratetype(page, MIGRATE_ISOLATE);
+ page_zone(page)->nr_pageblock_isolate++;
+}
+
+/* called while holding zone->lock */
+static void restore_pageblock_isolate(struct page *page, int migratetype)
+{
+ struct zone *zone = page_zone(page);
+ if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE))
+ return;
+
+ BUG_ON(zone->nr_pageblock_isolate <= 0);
+ set_pageblock_migratetype(page, migratetype);
+ zone->nr_pageblock_isolate--;
+}
+
+int set_migratetype_isolate(struct page *page)
+{
+ struct zone *zone;
+ unsigned long flags, pfn;
+ struct memory_isolate_notify arg;
+ int notifier_ret;
+ int ret = -EBUSY;
+
+ zone = page_zone(page);
+
+ spin_lock_irqsave(&zone->lock, flags);
+
+ pfn = page_to_pfn(page);
+ arg.start_pfn = pfn;
+ arg.nr_pages = pageblock_nr_pages;
+ arg.pages_found = 0;
+
+ /*
+ * It may be possible to isolate a pageblock even if the
+ * migratetype is not MIGRATE_MOVABLE. The memory isolation
+ * notifier chain is used by balloon drivers to return the
+ * number of pages in a range that are held by the balloon
+ * driver to shrink memory. If all the pages are accounted for
+ * by balloons, are free, or on the LRU, isolation can continue.
+ * Later, for example, when memory hotplug notifier runs, these
+ * pages reported as "can be isolated" should be isolated(freed)
+ * by the balloon driver through the memory notifier chain.
+ */
+ notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
+ notifier_ret = notifier_to_errno(notifier_ret);
+ if (notifier_ret)
+ goto out;
+ /*
+ * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
+ * We just check MOVABLE pages.
+ */
+ if (!has_unmovable_pages(zone, page, arg.pages_found))
+ ret = 0;
+
+ /*
+ * immobile means "not-on-lru" paes. If immobile is larger than
+ * removable-by-driver pages reported by notifier, we'll fail.
+ */
+
+out:
+ if (!ret) {
+ set_pageblock_isolate(page);
+ move_freepages_block(zone, page, MIGRATE_ISOLATE);
+ }
+
+ spin_unlock_irqrestore(&zone->lock, flags);
+ if (!ret)
+ drain_all_pages();
+ return ret;
+}
+
+void unset_migratetype_isolate(struct page *page, unsigned migratetype)
+{
+ struct zone *zone;
+ unsigned long flags;
+ zone = page_zone(page);
+ spin_lock_irqsave(&zone->lock, flags);
+ if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
+ goto out;
+ move_freepages_block(zone, page, migratetype);
+ restore_pageblock_isolate(page, migratetype);
+out:
+ spin_unlock_irqrestore(&zone->lock, flags);
+}
+
static inline struct page *
__first_valid_page(unsigned long pfn, unsigned long nr_pages)
{
diff --git a/mm/shmem.c b/mm/shmem.c
index c15b998e5a86..d4e184e2a38e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -929,7 +929,8 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
- pvma.vm_pgoff = index;
+ /* Bias interleave by inode number to distribute better across nodes */
+ pvma.vm_pgoff = index + info->vfs_inode.i_ino;
pvma.vm_ops = NULL;
pvma.vm_policy = spol;
return swapin_readahead(swap, gfp, &pvma, 0);
@@ -942,7 +943,8 @@ static struct page *shmem_alloc_page(gfp_t gfp,
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
- pvma.vm_pgoff = index;
+ /* Bias interleave by inode number to distribute better across nodes */
+ pvma.vm_pgoff = index + info->vfs_inode.i_ino;
pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
diff --git a/mm/slab.c b/mm/slab.c
index e901a36e2520..811af03a14ef 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -68,7 +68,7 @@
* Further notes from the original documentation:
*
* 11 April '97. Started multi-threading - markhe
- * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
+ * The global cache-chain is protected by the mutex 'slab_mutex'.
* The sem is only needed when accessing/extending the cache-chain, which
* can never happen inside an interrupt (kmem_cache_create(),
* kmem_cache_shrink() and kmem_cache_reap()).
@@ -87,6 +87,7 @@
*/
#include <linux/slab.h>
+#include "slab.h"
#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/swap.h>
@@ -117,12 +118,16 @@
#include <linux/memory.h>
#include <linux/prefetch.h>
+#include <net/sock.h>
+
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <trace/events/kmem.h>
+#include "internal.h"
+
/*
* DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
* 0 for faster, smaller code (especially in the critical paths).
@@ -151,6 +156,12 @@
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif
+/*
+ * true if a page was allocated from pfmemalloc reserves for network-based
+ * swap
+ */
+static bool pfmemalloc_active __read_mostly;
+
/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
# define CREATE_MASK (SLAB_RED_ZONE | \
@@ -256,9 +267,30 @@ struct array_cache {
* Must have this definition in here for the proper
* alignment of array_cache. Also simplifies accessing
* the entries.
+ *
+ * Entries should not be directly dereferenced as
+ * entries belonging to slabs marked pfmemalloc will
+ * have the lower bits set SLAB_OBJ_PFMEMALLOC
*/
};
+#define SLAB_OBJ_PFMEMALLOC 1
+static inline bool is_obj_pfmemalloc(void *objp)
+{
+ return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
+}
+
+static inline void set_obj_pfmemalloc(void **objp)
+{
+ *objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
+ return;
+}
+
+static inline void clear_obj_pfmemalloc(void **objp)
+{
+ *objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
+}
+
/*
* bootstrap: The caches do not work without cpuarrays anymore, but the
* cpuarrays are allocated from the generic caches...
@@ -424,8 +456,8 @@ static void kmem_list3_init(struct kmem_list3 *parent)
* cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
* redzone word.
* cachep->obj_offset: The real object.
- * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
- * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
+ * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
+ * cachep->size - 1* BYTES_PER_WORD: last caller address
* [BYTES_PER_WORD long]
*/
static int obj_offset(struct kmem_cache *cachep)
@@ -433,11 +465,6 @@ static int obj_offset(struct kmem_cache *cachep)
return cachep->obj_offset;
}
-static int obj_size(struct kmem_cache *cachep)
-{
- return cachep->obj_size;
-}
-
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
@@ -449,23 +476,22 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER)
- return (unsigned long long *)(objp + cachep->buffer_size -
+ return (unsigned long long *)(objp + cachep->size -
sizeof(unsigned long long) -
REDZONE_ALIGN);
- return (unsigned long long *) (objp + cachep->buffer_size -
+ return (unsigned long long *) (objp + cachep->size -
sizeof(unsigned long long));
}
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_STORE_USER));
- return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
+ return (void **)(objp + cachep->size - BYTES_PER_WORD);
}
#else
#define obj_offset(x) 0
-#define obj_size(cachep) (cachep->buffer_size)
#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
@@ -475,7 +501,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#ifdef CONFIG_TRACING
size_t slab_buffer_size(struct kmem_cache *cachep)
{
- return cachep->buffer_size;
+ return cachep->size;
}
EXPORT_SYMBOL(slab_buffer_size);
#endif
@@ -489,56 +515,37 @@ EXPORT_SYMBOL(slab_buffer_size);
static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata;
-/*
- * Functions for storing/retrieving the cachep and or slab from the page
- * allocator. These are used to find the slab an obj belongs to. With kfree(),
- * these are used to find the cache which an obj belongs to.
- */
-static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
-{
- page->lru.next = (struct list_head *)cache;
-}
-
static inline struct kmem_cache *page_get_cache(struct page *page)
{
page = compound_head(page);
BUG_ON(!PageSlab(page));
- return (struct kmem_cache *)page->lru.next;
-}
-
-static inline void page_set_slab(struct page *page, struct slab *slab)
-{
- page->lru.prev = (struct list_head *)slab;
-}
-
-static inline struct slab *page_get_slab(struct page *page)
-{
- BUG_ON(!PageSlab(page));
- return (struct slab *)page->lru.prev;
+ return page->slab_cache;
}
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct page *page = virt_to_head_page(obj);
- return page_get_cache(page);
+ return page->slab_cache;
}
static inline struct slab *virt_to_slab(const void *obj)
{
struct page *page = virt_to_head_page(obj);
- return page_get_slab(page);
+
+ VM_BUG_ON(!PageSlab(page));
+ return page->slab_page;
}
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
unsigned int idx)
{
- return slab->s_mem + cache->buffer_size * idx;
+ return slab->s_mem + cache->size * idx;
}
/*
- * We want to avoid an expensive divide : (offset / cache->buffer_size)
- * Using the fact that buffer_size is a constant for a particular cache,
- * we can replace (offset / cache->buffer_size) by
+ * We want to avoid an expensive divide : (offset / cache->size)
+ * Using the fact that size is a constant for a particular cache,
+ * we can replace (offset / cache->size) by
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
*/
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
@@ -584,33 +591,12 @@ static struct kmem_cache cache_cache = {
.batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1,
- .buffer_size = sizeof(struct kmem_cache),
+ .size = sizeof(struct kmem_cache),
.name = "kmem_cache",
};
#define BAD_ALIEN_MAGIC 0x01020304ul
-/*
- * chicken and egg problem: delay the per-cpu array allocation
- * until the general caches are up.
- */
-static enum {
- NONE,
- PARTIAL_AC,
- PARTIAL_L3,
- EARLY,
- LATE,
- FULL
-} g_cpucache_up;
-
-/*
- * used by boot code to determine if it can use slab based allocator
- */
-int slab_is_available(void)
-{
- return g_cpucache_up >= EARLY;
-}
-
#ifdef CONFIG_LOCKDEP
/*
@@ -676,7 +662,7 @@ static void init_node_lock_keys(int q)
{
struct cache_sizes *s = malloc_sizes;
- if (g_cpucache_up < LATE)
+ if (slab_state < UP)
return;
for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -716,12 +702,6 @@ static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
}
#endif
-/*
- * Guard access to the cache-chain.
- */
-static DEFINE_MUTEX(cache_chain_mutex);
-static struct list_head cache_chain;
-
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
@@ -951,6 +931,124 @@ static struct array_cache *alloc_arraycache(int node, int entries,
return nc;
}
+static inline bool is_slab_pfmemalloc(struct slab *slabp)
+{
+ struct page *page = virt_to_page(slabp->s_mem);
+
+ return PageSlabPfmemalloc(page);
+}
+
+/* Clears pfmemalloc_active if no slabs have pfmalloc set */
+static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
+ struct array_cache *ac)
+{
+ struct kmem_list3 *l3 = cachep->nodelists[numa_mem_id()];
+ struct slab *slabp;
+ unsigned long flags;
+
+ if (!pfmemalloc_active)
+ return;
+
+ spin_lock_irqsave(&l3->list_lock, flags);
+ list_for_each_entry(slabp, &l3->slabs_full, list)
+ if (is_slab_pfmemalloc(slabp))
+ goto out;
+
+ list_for_each_entry(slabp, &l3->slabs_partial, list)
+ if (is_slab_pfmemalloc(slabp))
+ goto out;
+
+ list_for_each_entry(slabp, &l3->slabs_free, list)
+ if (is_slab_pfmemalloc(slabp))
+ goto out;
+
+ pfmemalloc_active = false;
+out:
+ spin_unlock_irqrestore(&l3->list_lock, flags);
+}
+
+static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
+ gfp_t flags, bool force_refill)
+{
+ int i;
+ void *objp = ac->entry[--ac->avail];
+
+ /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
+ if (unlikely(is_obj_pfmemalloc(objp))) {
+ struct kmem_list3 *l3;
+
+ if (gfp_pfmemalloc_allowed(flags)) {
+ clear_obj_pfmemalloc(&objp);
+ return objp;
+ }
+
+ /* The caller cannot use PFMEMALLOC objects, find another one */
+ for (i = 1; i < ac->avail; i++) {
+ /* If a !PFMEMALLOC object is found, swap them */
+ if (!is_obj_pfmemalloc(ac->entry[i])) {
+ objp = ac->entry[i];
+ ac->entry[i] = ac->entry[ac->avail];
+ ac->entry[ac->avail] = objp;
+ return objp;
+ }
+ }
+
+ /*
+ * If there are empty slabs on the slabs_free list and we are
+ * being forced to refill the cache, mark this one !pfmemalloc.
+ */
+ l3 = cachep->nodelists[numa_mem_id()];
+ if (!list_empty(&l3->slabs_free) && force_refill) {
+ struct slab *slabp = virt_to_slab(objp);
+ ClearPageSlabPfmemalloc(virt_to_page(slabp->s_mem));
+ clear_obj_pfmemalloc(&objp);
+ recheck_pfmemalloc_active(cachep, ac);
+ return objp;
+ }
+
+ /* No !PFMEMALLOC objects available */
+ ac->avail++;
+ objp = NULL;
+ }
+
+ return objp;
+}
+
+static inline void *ac_get_obj(struct kmem_cache *cachep,
+ struct array_cache *ac, gfp_t flags, bool force_refill)
+{
+ void *objp;
+
+ if (unlikely(sk_memalloc_socks()))
+ objp = __ac_get_obj(cachep, ac, flags, force_refill);
+ else
+ objp = ac->entry[--ac->avail];
+
+ return objp;
+}
+
+static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
+ void *objp)
+{
+ if (unlikely(pfmemalloc_active)) {
+ /* Some pfmemalloc slabs exist, check if this is one */
+ struct page *page = virt_to_page(objp);
+ if (PageSlabPfmemalloc(page))
+ set_obj_pfmemalloc(&objp);
+ }
+
+ return objp;
+}
+
+static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
+ void *objp)
+{
+ if (unlikely(sk_memalloc_socks()))
+ objp = __ac_put_obj(cachep, ac, objp);
+
+ ac->entry[ac->avail++] = objp;
+}
+
/*
* Transfer objects in one arraycache to another.
* Locking must be handled by the caller.
@@ -1127,7 +1225,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
STATS_INC_ACOVERFLOW(cachep);
__drain_alien_cache(cachep, alien, nodeid);
}
- alien->entry[alien->avail++] = objp;
+ ac_put_obj(cachep, alien, objp);
spin_unlock(&alien->lock);
} else {
spin_lock(&(cachep->nodelists[nodeid])->list_lock);
@@ -1145,7 +1243,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
* When hotplugging memory or a cpu, existing nodelists are not replaced if
* already in use.
*
- * Must hold cache_chain_mutex.
+ * Must hold slab_mutex.
*/
static int init_cache_nodelists_node(int node)
{
@@ -1153,7 +1251,7 @@ static int init_cache_nodelists_node(int node)
struct kmem_list3 *l3;
const int memsize = sizeof(struct kmem_list3);
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each_entry(cachep, &slab_caches, list) {
/*
* Set up the size64 kmemlist for cpu before we can
* begin anything. Make sure some other cpu on this
@@ -1169,7 +1267,7 @@ static int init_cache_nodelists_node(int node)
/*
* The l3s don't come and go as CPUs come and
- * go. cache_chain_mutex is sufficient
+ * go. slab_mutex is sufficient
* protection here.
*/
cachep->nodelists[node] = l3;
@@ -1191,7 +1289,7 @@ static void __cpuinit cpuup_canceled(long cpu)
int node = cpu_to_mem(cpu);
const struct cpumask *mask = cpumask_of_node(node);
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each_entry(cachep, &slab_caches, list) {
struct array_cache *nc;
struct array_cache *shared;
struct array_cache **alien;
@@ -1241,7 +1339,7 @@ free_array_cache:
* the respective cache's slabs, now we can go ahead and
* shrink each nodelist to its limit.
*/
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each_entry(cachep, &slab_caches, list) {
l3 = cachep->nodelists[node];
if (!l3)
continue;
@@ -1270,7 +1368,7 @@ static int __cpuinit cpuup_prepare(long cpu)
* Now we can go ahead with allocating the shared arrays and
* array caches
*/
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each_entry(cachep, &slab_caches, list) {
struct array_cache *nc;
struct array_cache *shared = NULL;
struct array_cache **alien = NULL;
@@ -1338,9 +1436,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
err = cpuup_prepare(cpu);
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
@@ -1350,7 +1448,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
/*
- * Shutdown cache reaper. Note that the cache_chain_mutex is
+ * Shutdown cache reaper. Note that the slab_mutex is
* held so that if cache_reap() is invoked it cannot do
* anything expensive but will only modify reap_work
* and reschedule the timer.
@@ -1377,9 +1475,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
#endif
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
cpuup_canceled(cpu);
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
break;
}
return notifier_from_errno(err);
@@ -1395,14 +1493,14 @@ static struct notifier_block __cpuinitdata cpucache_notifier = {
* Returns -EBUSY if all objects cannot be drained so that the node is not
* removed.
*
- * Must hold cache_chain_mutex.
+ * Must hold slab_mutex.
*/
static int __meminit drain_cache_nodelists_node(int node)
{
struct kmem_cache *cachep;
int ret = 0;
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each_entry(cachep, &slab_caches, list) {
struct kmem_list3 *l3;
l3 = cachep->nodelists[node];
@@ -1433,14 +1531,14 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
switch (action) {
case MEM_GOING_ONLINE:
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
ret = init_cache_nodelists_node(nid);
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
break;
case MEM_GOING_OFFLINE:
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
ret = drain_cache_nodelists_node(nid);
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
break;
case MEM_ONLINE:
case MEM_OFFLINE:
@@ -1544,8 +1642,8 @@ void __init kmem_cache_init(void)
node = numa_mem_id();
/* 1) create the cache_cache */
- INIT_LIST_HEAD(&cache_chain);
- list_add(&cache_cache.next, &cache_chain);
+ INIT_LIST_HEAD(&slab_caches);
+ list_add(&cache_cache.list, &slab_caches);
cache_cache.colour_off = cache_line_size();
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
@@ -1553,18 +1651,16 @@ void __init kmem_cache_init(void)
/*
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
*/
- cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
+ cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
nr_node_ids * sizeof(struct kmem_list3 *);
-#if DEBUG
- cache_cache.obj_size = cache_cache.buffer_size;
-#endif
- cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
+ cache_cache.object_size = cache_cache.size;
+ cache_cache.size = ALIGN(cache_cache.size,
cache_line_size());
cache_cache.reciprocal_buffer_size =
- reciprocal_value(cache_cache.buffer_size);
+ reciprocal_value(cache_cache.size);
for (order = 0; order < MAX_ORDER; order++) {
- cache_estimate(order, cache_cache.buffer_size,
+ cache_estimate(order, cache_cache.size,
cache_line_size(), 0, &left_over, &cache_cache.num);
if (cache_cache.num)
break;
@@ -1585,7 +1681,7 @@ void __init kmem_cache_init(void)
* bug.
*/
- sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
+ sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name,
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
@@ -1593,7 +1689,7 @@ void __init kmem_cache_init(void)
if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep =
- kmem_cache_create(names[INDEX_L3].name,
+ __kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
@@ -1611,14 +1707,14 @@ void __init kmem_cache_init(void)
* allow tighter packing of the smaller caches.
*/
if (!sizes->cs_cachep) {
- sizes->cs_cachep = kmem_cache_create(names->name,
+ sizes->cs_cachep = __kmem_cache_create(names->name,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL);
}
#ifdef CONFIG_ZONE_DMA
- sizes->cs_dmacachep = kmem_cache_create(
+ sizes->cs_dmacachep = __kmem_cache_create(
names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
@@ -1676,27 +1772,27 @@ void __init kmem_cache_init(void)
}
}
- g_cpucache_up = EARLY;
+ slab_state = UP;
}
void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
- g_cpucache_up = LATE;
+ slab_state = UP;
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
/* 6) resize the head arrays to their final sizes */
- mutex_lock(&cache_chain_mutex);
- list_for_each_entry(cachep, &cache_chain, next)
+ mutex_lock(&slab_mutex);
+ list_for_each_entry(cachep, &slab_caches, list)
if (enable_cpucache(cachep, GFP_NOWAIT))
BUG();
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
/* Done! */
- g_cpucache_up = FULL;
+ slab_state = FULL;
/*
* Register a cpu startup notifier callback that initializes
@@ -1727,6 +1823,9 @@ static int __init cpucache_init(void)
*/
for_each_online_cpu(cpu)
start_cpu_timer(cpu);
+
+ /* Done! */
+ slab_state = FULL;
return 0;
}
__initcall(cpucache_init);
@@ -1743,7 +1842,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
nodeid, gfpflags);
printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n",
- cachep->name, cachep->buffer_size, cachep->gfporder);
+ cachep->name, cachep->size, cachep->gfporder);
for_each_online_node(node) {
unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
@@ -1798,7 +1897,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
flags |= __GFP_COMP;
#endif
- flags |= cachep->gfpflags;
+ flags |= cachep->allocflags;
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
flags |= __GFP_RECLAIMABLE;
@@ -1809,6 +1908,10 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
return NULL;
}
+ /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
+ if (unlikely(page->pfmemalloc))
+ pfmemalloc_active = true;
+
nr_pages = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
add_zone_page_state(page_zone(page),
@@ -1816,9 +1919,13 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
else
add_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_pages);
- for (i = 0; i < nr_pages; i++)
+ for (i = 0; i < nr_pages; i++) {
__SetPageSlab(page + i);
+ if (page->pfmemalloc)
+ SetPageSlabPfmemalloc(page + i);
+ }
+
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
@@ -1850,6 +1957,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
NR_SLAB_UNRECLAIMABLE, nr_freed);
while (i--) {
BUG_ON(!PageSlab(page));
+ __ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);
page++;
}
@@ -1874,7 +1982,7 @@ static void kmem_rcu_free(struct rcu_head *head)
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
unsigned long caller)
{
- int size = obj_size(cachep);
+ int size = cachep->object_size;
addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
@@ -1906,7 +2014,7 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
{
- int size = obj_size(cachep);
+ int size = cachep->object_size;
addr = &((char *)addr)[obj_offset(cachep)];
memset(addr, val, size);
@@ -1966,7 +2074,7 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
printk("\n");
}
realobj = (char *)objp + obj_offset(cachep);
- size = obj_size(cachep);
+ size = cachep->object_size;
for (i = 0; i < size && lines; i += 16, lines--) {
int limit;
limit = 16;
@@ -1983,7 +2091,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
int lines = 0;
realobj = (char *)objp + obj_offset(cachep);
- size = obj_size(cachep);
+ size = cachep->object_size;
for (i = 0; i < size; i++) {
char exp = POISON_FREE;
@@ -2047,10 +2155,10 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if (cachep->buffer_size % PAGE_SIZE == 0 &&
+ if (cachep->size % PAGE_SIZE == 0 &&
OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
- cachep->buffer_size / PAGE_SIZE, 1);
+ cachep->size / PAGE_SIZE, 1);
else
check_poison_obj(cachep, objp);
#else
@@ -2194,10 +2302,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
{
- if (g_cpucache_up == FULL)
+ if (slab_state >= FULL)
return enable_cpucache(cachep, gfp);
- if (g_cpucache_up == NONE) {
+ if (slab_state == DOWN) {
/*
* Note: the first kmem_cache_create must create the cache
* that's used by kmalloc(24), otherwise the creation of
@@ -2212,16 +2320,16 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
*/
set_up_list3s(cachep, SIZE_AC);
if (INDEX_AC == INDEX_L3)
- g_cpucache_up = PARTIAL_L3;
+ slab_state = PARTIAL_L3;
else
- g_cpucache_up = PARTIAL_AC;
+ slab_state = PARTIAL_ARRAYCACHE;
} else {
cachep->array[smp_processor_id()] =
kmalloc(sizeof(struct arraycache_init), gfp);
- if (g_cpucache_up == PARTIAL_AC) {
+ if (slab_state == PARTIAL_ARRAYCACHE) {
set_up_list3s(cachep, SIZE_L3);
- g_cpucache_up = PARTIAL_L3;
+ slab_state = PARTIAL_L3;
} else {
int node;
for_each_online_node(node) {
@@ -2247,7 +2355,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
}
/**
- * kmem_cache_create - Create a cache.
+ * __kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
* @align: The required alignment for the objects.
@@ -2274,59 +2382,14 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
* as davem.
*/
struct kmem_cache *
-kmem_cache_create (const char *name, size_t size, size_t align,
+__kmem_cache_create (const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void *))
{
size_t left_over, slab_size, ralign;
- struct kmem_cache *cachep = NULL, *pc;
+ struct kmem_cache *cachep = NULL;
gfp_t gfp;
- /*
- * Sanity checks... these are all serious usage bugs.
- */
- if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
- size > KMALLOC_MAX_SIZE) {
- printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
- name);
- BUG();
- }
-
- /*
- * We use cache_chain_mutex to ensure a consistent view of
- * cpu_online_mask as well. Please see cpuup_callback
- */
- if (slab_is_available()) {
- get_online_cpus();
- mutex_lock(&cache_chain_mutex);
- }
-
- list_for_each_entry(pc, &cache_chain, next) {
- char tmp;
- int res;
-
- /*
- * This happens when the module gets unloaded and doesn't
- * destroy its slab cache and no-one else reuses the vmalloc
- * area of the module. Print a warning.
- */
- res = probe_kernel_address(pc->name, tmp);
- if (res) {
- printk(KERN_ERR
- "SLAB: cache with size %d has lost its name\n",
- pc->buffer_size);
- continue;
- }
-
- if (!strcmp(pc->name, name)) {
- printk(KERN_ERR
- "kmem_cache_create: duplicate cache %s\n", name);
- dump_stack();
- goto oops;
- }
- }
-
#if DEBUG
- WARN_ON(strchr(name, ' ')); /* It confuses parsers */
#if FORCED_DEBUG
/*
* Enable redzoning and last user accounting, except for caches with
@@ -2415,11 +2478,12 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* Get cache's description obj. */
cachep = kmem_cache_zalloc(&cache_cache, gfp);
if (!cachep)
- goto oops;
+ return NULL;
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
+ cachep->object_size = size;
+ cachep->align = align;
#if DEBUG
- cachep->obj_size = size;
/*
* Both debugging options require word-alignment which is calculated
@@ -2442,7 +2506,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
- && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
+ && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
size = PAGE_SIZE;
}
@@ -2471,8 +2535,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
printk(KERN_ERR
"kmem_cache_create: couldn't create cache %s.\n", name);
kmem_cache_free(&cache_cache, cachep);
- cachep = NULL;
- goto oops;
+ return NULL;
}
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+ sizeof(struct slab), align);
@@ -2508,10 +2571,10 @@ kmem_cache_create (const char *name, size_t size, size_t align,
cachep->colour = left_over / cachep->colour_off;
cachep->slab_size = slab_size;
cachep->flags = flags;
- cachep->gfpflags = 0;
+ cachep->allocflags = 0;
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
- cachep->gfpflags |= GFP_DMA;
- cachep->buffer_size = size;
+ cachep->allocflags |= GFP_DMA;
+ cachep->size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
if (flags & CFLGS_OFF_SLAB) {
@@ -2530,8 +2593,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (setup_cpu_cache(cachep, gfp)) {
__kmem_cache_destroy(cachep);
- cachep = NULL;
- goto oops;
+ return NULL;
}
if (flags & SLAB_DEBUG_OBJECTS) {
@@ -2545,18 +2607,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
}
/* cache setup completed, link it into the list */
- list_add(&cachep->next, &cache_chain);
-oops:
- if (!cachep && (flags & SLAB_PANIC))
- panic("kmem_cache_create(): failed to create slab `%s'\n",
- name);
- if (slab_is_available()) {
- mutex_unlock(&cache_chain_mutex);
- put_online_cpus();
- }
+ list_add(&cachep->list, &slab_caches);
return cachep;
}
-EXPORT_SYMBOL(kmem_cache_create);
#if DEBUG
static void check_irq_off(void)
@@ -2671,7 +2724,7 @@ out:
return nr_freed;
}
-/* Called with cache_chain_mutex held to protect against cpu hotplug */
+/* Called with slab_mutex held to protect against cpu hotplug */
static int __cache_shrink(struct kmem_cache *cachep)
{
int ret = 0, i = 0;
@@ -2706,9 +2759,9 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
BUG_ON(!cachep || in_interrupt());
get_online_cpus();
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
ret = __cache_shrink(cachep);
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
put_online_cpus();
return ret;
}
@@ -2736,15 +2789,15 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
/* Find the cache in the chain of caches. */
get_online_cpus();
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
/*
* the chain is never empty, cache_cache is never destroyed
*/
- list_del(&cachep->next);
+ list_del(&cachep->list);
if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects");
- list_add(&cachep->next, &cache_chain);
- mutex_unlock(&cache_chain_mutex);
+ list_add(&cachep->list, &slab_caches);
+ mutex_unlock(&slab_mutex);
put_online_cpus();
return;
}
@@ -2753,7 +2806,7 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
rcu_barrier();
__kmem_cache_destroy(cachep);
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
put_online_cpus();
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -2840,10 +2893,10 @@ static void cache_init_objs(struct kmem_cache *cachep,
slab_error(cachep, "constructor overwrote the"
" start of an object");
}
- if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
+ if ((cachep->size % PAGE_SIZE) == 0 &&
OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
kernel_map_pages(virt_to_page(objp),
- cachep->buffer_size / PAGE_SIZE, 0);
+ cachep->size / PAGE_SIZE, 0);
#else
if (cachep->ctor)
cachep->ctor(objp);
@@ -2857,9 +2910,9 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{
if (CONFIG_ZONE_DMA_FLAG) {
if (flags & GFP_DMA)
- BUG_ON(!(cachep->gfpflags & GFP_DMA));
+ BUG_ON(!(cachep->allocflags & GFP_DMA));
else
- BUG_ON(cachep->gfpflags & GFP_DMA);
+ BUG_ON(cachep->allocflags & GFP_DMA);
}
}
@@ -2918,8 +2971,8 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
nr_pages <<= cache->gfporder;
do {
- page_set_cache(page, cache);
- page_set_slab(page, slab);
+ page->slab_cache = cache;
+ page->slab_page = slab;
page++;
} while (--nr_pages);
}
@@ -3057,7 +3110,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
kfree_debugcheck(objp);
page = virt_to_head_page(objp);
- slabp = page_get_slab(page);
+ slabp = page->slab_page;
if (cachep->flags & SLAB_RED_ZONE) {
verify_redzone_free(cachep, objp);
@@ -3077,10 +3130,10 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
#endif
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
+ if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller);
kernel_map_pages(virt_to_page(objp),
- cachep->buffer_size / PAGE_SIZE, 0);
+ cachep->size / PAGE_SIZE, 0);
} else {
poison_obj(cachep, objp, POISON_FREE);
}
@@ -3120,16 +3173,19 @@ bad:
#define check_slabp(x,y) do { } while(0)
#endif
-static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
+static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
+ bool force_refill)
{
int batchcount;
struct kmem_list3 *l3;
struct array_cache *ac;
int node;
-retry:
check_irq_off();
node = numa_mem_id();
+ if (unlikely(force_refill))
+ goto force_grow;
+retry:
ac = cpu_cache_get(cachep);
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
@@ -3179,8 +3235,8 @@ retry:
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
- ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
- node);
+ ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp,
+ node));
}
check_slabp(cachep, slabp);
@@ -3199,18 +3255,23 @@ alloc_done:
if (unlikely(!ac->avail)) {
int x;
+force_grow:
x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
/* cache_grow can reenable interrupts, then ac could change. */
ac = cpu_cache_get(cachep);
- if (!x && ac->avail == 0) /* no objects in sight? abort */
+ node = numa_mem_id();
+
+ /* no objects in sight? abort */
+ if (!x && (ac->avail == 0 || force_refill))
return NULL;
if (!ac->avail) /* objects refilled by interrupt? */
goto retry;
}
ac->touched = 1;
- return ac->entry[--ac->avail];
+
+ return ac_get_obj(cachep, ac, flags, force_refill);
}
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
@@ -3230,9 +3291,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
return objp;
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
+ if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
- cachep->buffer_size / PAGE_SIZE, 1);
+ cachep->size / PAGE_SIZE, 1);
else
check_poison_obj(cachep, objp);
#else
@@ -3261,8 +3322,8 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
struct slab *slabp;
unsigned objnr;
- slabp = page_get_slab(virt_to_head_page(objp));
- objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
+ slabp = virt_to_head_page(objp)->slab_page;
+ objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
}
#endif
@@ -3285,30 +3346,42 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
if (cachep == &cache_cache)
return false;
- return should_failslab(obj_size(cachep), flags, cachep->flags);
+ return should_failslab(cachep->object_size, flags, cachep->flags);
}
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *objp;
struct array_cache *ac;
+ bool force_refill = false;
check_irq_off();
ac = cpu_cache_get(cachep);
if (likely(ac->avail)) {
- STATS_INC_ALLOCHIT(cachep);
ac->touched = 1;
- objp = ac->entry[--ac->avail];
- } else {
- STATS_INC_ALLOCMISS(cachep);
- objp = cache_alloc_refill(cachep, flags);
+ objp = ac_get_obj(cachep, ac, flags, false);
+
/*
- * the 'ac' may be updated by cache_alloc_refill(),
- * and kmemleak_erase() requires its correct value.
+ * Allow for the possibility all avail objects are not allowed
+ * by the current flags
*/
- ac = cpu_cache_get(cachep);
+ if (objp) {
+ STATS_INC_ALLOCHIT(cachep);
+ goto out;
+ }
+ force_refill = true;
}
+
+ STATS_INC_ALLOCMISS(cachep);
+ objp = cache_alloc_refill(cachep, flags, force_refill);
+ /*
+ * the 'ac' may be updated by cache_alloc_refill(),
+ * and kmemleak_erase() requires its correct value.
+ */
+ ac = cpu_cache_get(cachep);
+
+out:
/*
* To avoid a false negative, if an object that is in one of the
* per-CPU caches is leaked, we need to make sure kmemleak doesn't
@@ -3336,7 +3409,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
nid_alloc = cpuset_slab_spread_node();
else if (current->mempolicy)
- nid_alloc = slab_node(current->mempolicy);
+ nid_alloc = slab_node();
if (nid_alloc != nid_here)
return ____cache_alloc_node(cachep, flags, nid_alloc);
return NULL;
@@ -3368,7 +3441,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
retry_cpuset:
cpuset_mems_cookie = get_mems_allowed();
- zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+ zonelist = node_zonelist(slab_node(), flags);
retry:
/*
@@ -3545,14 +3618,14 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
out:
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
- kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
+ kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
flags);
if (likely(ptr))
- kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
+ kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
if (unlikely((flags & __GFP_ZERO) && ptr))
- memset(ptr, 0, obj_size(cachep));
+ memset(ptr, 0, cachep->object_size);
return ptr;
}
@@ -3607,15 +3680,15 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
objp = __do_cache_alloc(cachep, flags);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
- kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
+ kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
flags);
prefetchw(objp);
if (likely(objp))
- kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
+ kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
if (unlikely((flags & __GFP_ZERO) && objp))
- memset(objp, 0, obj_size(cachep));
+ memset(objp, 0, cachep->object_size);
return objp;
}
@@ -3630,9 +3703,12 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
struct kmem_list3 *l3;
for (i = 0; i < nr_objects; i++) {
- void *objp = objpp[i];
+ void *objp;
struct slab *slabp;
+ clear_obj_pfmemalloc(&objpp[i]);
+ objp = objpp[i];
+
slabp = virt_to_slab(objp);
l3 = cachep->nodelists[node];
list_del(&slabp->list);
@@ -3731,7 +3807,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
- kmemcheck_slab_free(cachep, objp, obj_size(cachep));
+ kmemcheck_slab_free(cachep, objp, cachep->object_size);
/*
* Skip calling cache_free_alien() when the platform is not numa.
@@ -3750,7 +3826,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
cache_flusharray(cachep, ac);
}
- ac->entry[ac->avail++] = objp;
+ ac_put_obj(cachep, ac, objp);
}
/**
@@ -3766,7 +3842,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
trace_kmem_cache_alloc(_RET_IP_, ret,
- obj_size(cachep), cachep->buffer_size, flags);
+ cachep->object_size, cachep->size, flags);
return ret;
}
@@ -3794,7 +3870,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
__builtin_return_address(0));
trace_kmem_cache_alloc_node(_RET_IP_, ret,
- obj_size(cachep), cachep->buffer_size,
+ cachep->object_size, cachep->size,
flags, nodeid);
return ret;
@@ -3876,7 +3952,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
ret = __cache_alloc(cachep, flags, caller);
trace_kmalloc((unsigned long) caller, ret,
- size, cachep->buffer_size, flags);
+ size, cachep->size, flags);
return ret;
}
@@ -3916,9 +3992,9 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
unsigned long flags;
local_irq_save(flags);
- debug_check_no_locks_freed(objp, obj_size(cachep));
+ debug_check_no_locks_freed(objp, cachep->object_size);
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
- debug_check_no_obj_freed(objp, obj_size(cachep));
+ debug_check_no_obj_freed(objp, cachep->object_size);
__cache_free(cachep, objp, __builtin_return_address(0));
local_irq_restore(flags);
@@ -3947,8 +4023,9 @@ void kfree(const void *objp)
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
- debug_check_no_locks_freed(objp, obj_size(c));
- debug_check_no_obj_freed(objp, obj_size(c));
+ debug_check_no_locks_freed(objp, c->object_size);
+
+ debug_check_no_obj_freed(objp, c->object_size);
__cache_free(c, (void *)objp, __builtin_return_address(0));
local_irq_restore(flags);
}
@@ -3956,7 +4033,7 @@ EXPORT_SYMBOL(kfree);
unsigned int kmem_cache_size(struct kmem_cache *cachep)
{
- return obj_size(cachep);
+ return cachep->object_size;
}
EXPORT_SYMBOL(kmem_cache_size);
@@ -4030,7 +4107,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
return 0;
fail:
- if (!cachep->next.next) {
+ if (!cachep->list.next) {
/* Cache is not active yet. Roll back what we did */
node--;
while (node >= 0) {
@@ -4065,7 +4142,7 @@ static void do_ccupdate_local(void *info)
new->new[smp_processor_id()] = old;
}
-/* Always called with the cache_chain_mutex held */
+/* Always called with the slab_mutex held */
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared, gfp_t gfp)
{
@@ -4109,7 +4186,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
return alloc_kmemlist(cachep, gfp);
}
-/* Called with cache_chain_mutex held always */
+/* Called with slab_mutex held always */
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
{
int err;
@@ -4124,13 +4201,13 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
* The numbers are guessed, we should auto-tune as described by
* Bonwick.
*/
- if (cachep->buffer_size > 131072)
+ if (cachep->size > 131072)
limit = 1;
- else if (cachep->buffer_size > PAGE_SIZE)
+ else if (cachep->size > PAGE_SIZE)
limit = 8;
- else if (cachep->buffer_size > 1024)
+ else if (cachep->size > 1024)
limit = 24;
- else if (cachep->buffer_size > 256)
+ else if (cachep->size > 256)
limit = 54;
else
limit = 120;
@@ -4145,7 +4222,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
* to a larger limit. Thus disabled by default.
*/
shared = 0;
- if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
+ if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
shared = 8;
#if DEBUG
@@ -4211,11 +4288,11 @@ static void cache_reap(struct work_struct *w)
int node = numa_mem_id();
struct delayed_work *work = to_delayed_work(w);
- if (!mutex_trylock(&cache_chain_mutex))
+ if (!mutex_trylock(&slab_mutex))
/* Give up. Setup the next iteration. */
goto out;
- list_for_each_entry(searchp, &cache_chain, next) {
+ list_for_each_entry(searchp, &slab_caches, list) {
check_irq_on();
/*
@@ -4253,7 +4330,7 @@ next:
cond_resched();
}
check_irq_on();
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
next_reap_node();
out:
/* Set up the next iteration */
@@ -4289,26 +4366,26 @@ static void *s_start(struct seq_file *m, loff_t *pos)
{
loff_t n = *pos;
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
if (!n)
print_slabinfo_header(m);
- return seq_list_start(&cache_chain, *pos);
+ return seq_list_start(&slab_caches, *pos);
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
- return seq_list_next(p, &cache_chain, pos);
+ return seq_list_next(p, &slab_caches, pos);
}
static void s_stop(struct seq_file *m, void *p)
{
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
}
static int s_show(struct seq_file *m, void *p)
{
- struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
+ struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
struct slab *slabp;
unsigned long active_objs;
unsigned long num_objs;
@@ -4364,7 +4441,7 @@ static int s_show(struct seq_file *m, void *p)
printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
- name, active_objs, num_objs, cachep->buffer_size,
+ name, active_objs, num_objs, cachep->size,
cachep->num, (1 << cachep->gfporder));
seq_printf(m, " : tunables %4u %4u %4u",
cachep->limit, cachep->batchcount, cachep->shared);
@@ -4454,9 +4531,9 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
return -EINVAL;
/* Find the cache in the chain of caches. */
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
res = -EINVAL;
- list_for_each_entry(cachep, &cache_chain, next) {
+ list_for_each_entry(cachep, &slab_caches, list) {
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 || batchcount < 1 ||
batchcount > limit || shared < 0) {
@@ -4469,7 +4546,7 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
break;
}
}
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
if (res >= 0)
res = count;
return res;
@@ -4492,8 +4569,8 @@ static const struct file_operations proc_slabinfo_operations = {
static void *leaks_start(struct seq_file *m, loff_t *pos)
{
- mutex_lock(&cache_chain_mutex);
- return seq_list_start(&cache_chain, *pos);
+ mutex_lock(&slab_mutex);
+ return seq_list_start(&slab_caches, *pos);
}
static inline int add_caller(unsigned long *n, unsigned long v)
@@ -4532,7 +4609,7 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
int i;
if (n[0] == n[1])
return;
- for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
+ for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) {
if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
continue;
if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
@@ -4558,7 +4635,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
static int leaks_show(struct seq_file *m, void *p)
{
- struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
+ struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
struct slab *slabp;
struct kmem_list3 *l3;
const char *name;
@@ -4592,17 +4669,17 @@ static int leaks_show(struct seq_file *m, void *p)
name = cachep->name;
if (n[0] == n[1]) {
/* Increase the buffer size */
- mutex_unlock(&cache_chain_mutex);
+ mutex_unlock(&slab_mutex);
m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
if (!m->private) {
/* Too bad, we are really out */
m->private = n;
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
return -ENOMEM;
}
*(unsigned long *)m->private = n[0] * 2;
kfree(n);
- mutex_lock(&cache_chain_mutex);
+ mutex_lock(&slab_mutex);
/* Now make sure this entry will be retried */
m->count = m->size;
return 0;
@@ -4677,6 +4754,6 @@ size_t ksize(const void *objp)
if (unlikely(objp == ZERO_SIZE_PTR))
return 0;
- return obj_size(virt_to_cache(objp));
+ return virt_to_cache(objp)->object_size;
}
EXPORT_SYMBOL(ksize);
diff --git a/mm/slab.h b/mm/slab.h
new file mode 100644
index 000000000000..db7848caaa25
--- /dev/null
+++ b/mm/slab.h
@@ -0,0 +1,33 @@
+#ifndef MM_SLAB_H
+#define MM_SLAB_H
+/*
+ * Internal slab definitions
+ */
+
+/*
+ * State of the slab allocator.
+ *
+ * This is used to describe the states of the allocator during bootup.
+ * Allocators use this to gradually bootstrap themselves. Most allocators
+ * have the problem that the structures used for managing slab caches are
+ * allocated from slab caches themselves.
+ */
+enum slab_state {
+ DOWN, /* No slab functionality yet */
+ PARTIAL, /* SLUB: kmem_cache_node available */
+ PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
+ PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */
+ UP, /* Slab caches usable but not all extras yet */
+ FULL /* Everything is working */
+};
+
+extern enum slab_state slab_state;
+
+/* The slab cache mutex protects the management structures during changes */
+extern struct mutex slab_mutex;
+extern struct list_head slab_caches;
+
+struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *));
+
+#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
new file mode 100644
index 000000000000..aa3ca5bb01b5
--- /dev/null
+++ b/mm/slab_common.c
@@ -0,0 +1,120 @@
+/*
+ * Slab allocator functions that are independent of the allocator strategy
+ *
+ * (C) 2012 Christoph Lameter <cl@linux.com>
+ */
+#include <linux/slab.h>
+
+#include <linux/mm.h>
+#include <linux/poison.h>
+#include <linux/interrupt.h>
+#include <linux/memory.h>
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+
+#include "slab.h"
+
+enum slab_state slab_state;
+LIST_HEAD(slab_caches);
+DEFINE_MUTEX(slab_mutex);
+
+/*
+ * kmem_cache_create - Create a cache.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @align: The required alignment for the objects.
+ * @flags: SLAB flags
+ * @ctor: A constructor for the objects.
+ *
+ * Returns a ptr to the cache on success, NULL on failure.
+ * Cannot be called within a interrupt, but can be interrupted.
+ * The @ctor is run when new pages are allocated by the cache.
+ *
+ * The flags are
+ *
+ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
+ * to catch references to uninitialised memory.
+ *
+ * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
+ * for buffer overruns.
+ *
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
+ * cacheline. This can be beneficial if you're counting cycles as closely
+ * as davem.
+ */
+
+struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
+ unsigned long flags, void (*ctor)(void *))
+{
+ struct kmem_cache *s = NULL;
+
+#ifdef CONFIG_DEBUG_VM
+ if (!name || in_interrupt() || size < sizeof(void *) ||
+ size > KMALLOC_MAX_SIZE) {
+ printk(KERN_ERR "kmem_cache_create(%s) integrity check"
+ " failed\n", name);
+ goto out;
+ }
+#endif
+
+ get_online_cpus();
+ mutex_lock(&slab_mutex);
+
+#ifdef CONFIG_DEBUG_VM
+ list_for_each_entry(s, &slab_caches, list) {
+ char tmp;
+ int res;
+
+ /*
+ * This happens when the module gets unloaded and doesn't
+ * destroy its slab cache and no-one else reuses the vmalloc
+ * area of the module. Print a warning.
+ */
+ res = probe_kernel_address(s->name, tmp);
+ if (res) {
+ printk(KERN_ERR
+ "Slab cache with size %d has lost its name\n",
+ s->object_size);
+ continue;
+ }
+
+ if (!strcmp(s->name, name)) {
+ printk(KERN_ERR "kmem_cache_create(%s): Cache name"
+ " already exists.\n",
+ name);
+ dump_stack();
+ s = NULL;
+ goto oops;
+ }
+ }
+
+ WARN_ON(strchr(name, ' ')); /* It confuses parsers */
+#endif
+
+ s = __kmem_cache_create(name, size, align, flags, ctor);
+
+#ifdef CONFIG_DEBUG_VM
+oops:
+#endif
+ mutex_unlock(&slab_mutex);
+ put_online_cpus();
+
+#ifdef CONFIG_DEBUG_VM
+out:
+#endif
+ if (!s && (flags & SLAB_PANIC))
+ panic("kmem_cache_create: Failed to create slab '%s'\n", name);
+
+ return s;
+}
+EXPORT_SYMBOL(kmem_cache_create);
+
+int slab_is_available(void)
+{
+ return slab_state >= UP;
+}
diff --git a/mm/slob.c b/mm/slob.c
index 8105be42cad1..45d4ca79933a 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -59,6 +59,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+#include "slab.h"
+
#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
#include <linux/cache.h>
@@ -92,36 +94,6 @@ struct slob_block {
typedef struct slob_block slob_t;
/*
- * We use struct page fields to manage some slob allocation aspects,
- * however to avoid the horrible mess in include/linux/mm_types.h, we'll
- * just define our own struct page type variant here.
- */
-struct slob_page {
- union {
- struct {
- unsigned long flags; /* mandatory */
- atomic_t _count; /* mandatory */
- slobidx_t units; /* free units left in page */
- unsigned long pad[2];
- slob_t *free; /* first free slob_t in page */
- struct list_head list; /* linked list of free pages */
- };
- struct page page;
- };
-};
-static inline void struct_slob_page_wrong_size(void)
-{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
-
-/*
- * free_slob_page: call before a slob_page is returned to the page allocator.
- */
-static inline void free_slob_page(struct slob_page *sp)
-{
- reset_page_mapcount(&sp->page);
- sp->page.mapping = NULL;
-}
-
-/*
* All partially free slob pages go on these lists.
*/
#define SLOB_BREAK1 256
@@ -131,46 +103,23 @@ static LIST_HEAD(free_slob_medium);
static LIST_HEAD(free_slob_large);
/*
- * is_slob_page: True for all slob pages (false for bigblock pages)
- */
-static inline int is_slob_page(struct slob_page *sp)
-{
- return PageSlab((struct page *)sp);
-}
-
-static inline void set_slob_page(struct slob_page *sp)
-{
- __SetPageSlab((struct page *)sp);
-}
-
-static inline void clear_slob_page(struct slob_page *sp)
-{
- __ClearPageSlab((struct page *)sp);
-}
-
-static inline struct slob_page *slob_page(const void *addr)
-{
- return (struct slob_page *)virt_to_page(addr);
-}
-
-/*
* slob_page_free: true for pages on free_slob_pages list.
*/
-static inline int slob_page_free(struct slob_page *sp)
+static inline int slob_page_free(struct page *sp)
{
- return PageSlobFree((struct page *)sp);
+ return PageSlobFree(sp);
}
-static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
+static void set_slob_page_free(struct page *sp, struct list_head *list)
{
list_add(&sp->list, list);
- __SetPageSlobFree((struct page *)sp);
+ __SetPageSlobFree(sp);
}
-static inline void clear_slob_page_free(struct slob_page *sp)
+static inline void clear_slob_page_free(struct page *sp)
{
list_del(&sp->list);
- __ClearPageSlobFree((struct page *)sp);
+ __ClearPageSlobFree(sp);
}
#define SLOB_UNIT sizeof(slob_t)
@@ -267,12 +216,12 @@ static void slob_free_pages(void *b, int order)
/*
* Allocate a slob block within a given slob_page sp.
*/
-static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
+static void *slob_page_alloc(struct page *sp, size_t size, int align)
{
slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size);
- for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
+ for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
slobidx_t avail = slob_units(cur);
if (align) {
@@ -296,12 +245,12 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
if (prev)
set_slob(prev, slob_units(prev), next);
else
- sp->free = next;
+ sp->freelist = next;
} else { /* fragment */
if (prev)
set_slob(prev, slob_units(prev), cur + units);
else
- sp->free = cur + units;
+ sp->freelist = cur + units;
set_slob(cur + units, avail - units, next);
}
@@ -320,7 +269,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
*/
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
- struct slob_page *sp;
+ struct page *sp;
struct list_head *prev;
struct list_head *slob_list;
slob_t *b = NULL;
@@ -341,7 +290,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
* If there's a node specification, search for a partial
* page with a matching node id in the freelist.
*/
- if (node != -1 && page_to_nid(&sp->page) != node)
+ if (node != -1 && page_to_nid(sp) != node)
continue;
#endif
/* Enough room on this page? */
@@ -369,12 +318,12 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b)
return NULL;
- sp = slob_page(b);
- set_slob_page(sp);
+ sp = virt_to_page(b);
+ __SetPageSlab(sp);
spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE);
- sp->free = b;
+ sp->freelist = b;
INIT_LIST_HEAD(&sp->list);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
@@ -392,7 +341,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
*/
static void slob_free(void *block, int size)
{
- struct slob_page *sp;
+ struct page *sp;
slob_t *prev, *next, *b = (slob_t *)block;
slobidx_t units;
unsigned long flags;
@@ -402,7 +351,7 @@ static void slob_free(void *block, int size)
return;
BUG_ON(!size);
- sp = slob_page(block);
+ sp = virt_to_page(block);
units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags);
@@ -412,8 +361,8 @@ static void slob_free(void *block, int size)
if (slob_page_free(sp))
clear_slob_page_free(sp);
spin_unlock_irqrestore(&slob_lock, flags);
- clear_slob_page(sp);
- free_slob_page(sp);
+ __ClearPageSlab(sp);
+ reset_page_mapcount(sp);
slob_free_pages(b, 0);
return;
}
@@ -421,7 +370,7 @@ static void slob_free(void *block, int size)
if (!slob_page_free(sp)) {
/* This slob page is about to become partially free. Easy! */
sp->units = units;
- sp->free = b;
+ sp->freelist = b;
set_slob(b, units,
(void *)((unsigned long)(b +
SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
@@ -441,15 +390,15 @@ static void slob_free(void *block, int size)
*/
sp->units += units;
- if (b < sp->free) {
- if (b + units == sp->free) {
- units += slob_units(sp->free);
- sp->free = slob_next(sp->free);
+ if (b < (slob_t *)sp->freelist) {
+ if (b + units == sp->freelist) {
+ units += slob_units(sp->freelist);
+ sp->freelist = slob_next(sp->freelist);
}
- set_slob(b, units, sp->free);
- sp->free = b;
+ set_slob(b, units, sp->freelist);
+ sp->freelist = b;
} else {
- prev = sp->free;
+ prev = sp->freelist;
next = slob_next(prev);
while (b > next) {
prev = next;
@@ -522,7 +471,7 @@ EXPORT_SYMBOL(__kmalloc_node);
void kfree(const void *block)
{
- struct slob_page *sp;
+ struct page *sp;
trace_kfree(_RET_IP_, block);
@@ -530,43 +479,36 @@ void kfree(const void *block)
return;
kmemleak_free(block);
- sp = slob_page(block);
- if (is_slob_page(sp)) {
+ sp = virt_to_page(block);
+ if (PageSlab(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
} else
- put_page(&sp->page);
+ put_page(sp);
}
EXPORT_SYMBOL(kfree);
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t ksize(const void *block)
{
- struct slob_page *sp;
+ struct page *sp;
BUG_ON(!block);
if (unlikely(block == ZERO_SIZE_PTR))
return 0;
- sp = slob_page(block);
- if (is_slob_page(sp)) {
+ sp = virt_to_page(block);
+ if (PageSlab(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
} else
- return sp->page.private;
+ return sp->private;
}
EXPORT_SYMBOL(ksize);
-struct kmem_cache {
- unsigned int size, align;
- unsigned long flags;
- const char *name;
- void (*ctor)(void *);
-};
-
-struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *c;
@@ -589,13 +531,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
c->align = ARCH_SLAB_MINALIGN;
if (c->align < align)
c->align = align;
- } else if (flags & SLAB_PANIC)
- panic("Cannot create slab cache %s\n", name);
- kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
+ kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
+ c->refcount = 1;
+ }
return c;
}
-EXPORT_SYMBOL(kmem_cache_create);
void kmem_cache_destroy(struct kmem_cache *c)
{
@@ -678,19 +619,12 @@ int kmem_cache_shrink(struct kmem_cache *d)
}
EXPORT_SYMBOL(kmem_cache_shrink);
-static unsigned int slob_ready __read_mostly;
-
-int slab_is_available(void)
-{
- return slob_ready;
-}
-
void __init kmem_cache_init(void)
{
- slob_ready = 1;
+ slab_state = UP;
}
void __init kmem_cache_init_late(void)
{
- /* Nothing to do */
+ slab_state = FULL;
}
diff --git a/mm/slub.c b/mm/slub.c
index 8c691fa1cf3c..8f78e2577031 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
+#include "slab.h"
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmemcheck.h>
@@ -33,15 +34,17 @@
#include <trace/events/kmem.h>
+#include "internal.h"
+
/*
* Lock order:
- * 1. slub_lock (Global Semaphore)
+ * 1. slab_mutex (Global Mutex)
* 2. node->list_lock
* 3. slab_lock(page) (Only on some arches and for debugging)
*
- * slub_lock
+ * slab_mutex
*
- * The role of the slub_lock is to protect the list of all the slabs
+ * The role of the slab_mutex is to protect the list of all the slabs
* and to synchronize major metadata changes to slab cache structures.
*
* The slab_lock is only used for debugging and on arches that do not
@@ -182,17 +185,6 @@ static int kmem_size = sizeof(struct kmem_cache);
static struct notifier_block slab_notifier;
#endif
-static enum {
- DOWN, /* No slab functionality available */
- PARTIAL, /* Kmem_cache_node works */
- UP, /* Everything works but does not show up in sysfs */
- SYSFS /* Sysfs up */
-} slab_state = DOWN;
-
-/* A list of all slab caches on the system */
-static DECLARE_RWSEM(slub_lock);
-static LIST_HEAD(slab_caches);
-
/*
* Tracking user of a slab.
*/
@@ -237,11 +229,6 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
* Core slab cache functions
*******************************************************************/
-int slab_is_available(void)
-{
- return slab_state >= UP;
-}
-
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
return s->node[node];
@@ -311,7 +298,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
* and whatever may come after it.
*/
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
- return s->objsize;
+ return s->object_size;
#endif
/*
@@ -609,11 +596,11 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
if (p > addr + 16)
print_section("Bytes b4 ", p - 16, 16);
- print_section("Object ", p, min_t(unsigned long, s->objsize,
+ print_section("Object ", p, min_t(unsigned long, s->object_size,
PAGE_SIZE));
if (s->flags & SLAB_RED_ZONE)
- print_section("Redzone ", p + s->objsize,
- s->inuse - s->objsize);
+ print_section("Redzone ", p + s->object_size,
+ s->inuse - s->object_size);
if (s->offset)
off = s->offset + sizeof(void *);
@@ -655,12 +642,12 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
u8 *p = object;
if (s->flags & __OBJECT_POISON) {
- memset(p, POISON_FREE, s->objsize - 1);
- p[s->objsize - 1] = POISON_END;
+ memset(p, POISON_FREE, s->object_size - 1);
+ p[s->object_size - 1] = POISON_END;
}
if (s->flags & SLAB_RED_ZONE)
- memset(p + s->objsize, val, s->inuse - s->objsize);
+ memset(p + s->object_size, val, s->inuse - s->object_size);
}
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
@@ -705,10 +692,10 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
* Poisoning uses 0x6b (POISON_FREE) and the last byte is
* 0xa5 (POISON_END)
*
- * object + s->objsize
+ * object + s->object_size
* Padding to reach word boundary. This is also used for Redzoning.
* Padding is extended by another word if Redzoning is enabled and
- * objsize == inuse.
+ * object_size == inuse.
*
* We fill with 0xbb (RED_INACTIVE) for inactive objects and with
* 0xcc (RED_ACTIVE) for objects in use.
@@ -727,7 +714,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
* object + s->size
* Nothing is used beyond s->size.
*
- * If slabcaches are merged then the objsize and inuse boundaries are mostly
+ * If slabcaches are merged then the object_size and inuse boundaries are mostly
* ignored. And therefore no slab options that rely on these boundaries
* may be used with merged slabcaches.
*/
@@ -787,25 +774,25 @@ static int check_object(struct kmem_cache *s, struct page *page,
void *object, u8 val)
{
u8 *p = object;
- u8 *endobject = object + s->objsize;
+ u8 *endobject = object + s->object_size;
if (s->flags & SLAB_RED_ZONE) {
if (!check_bytes_and_report(s, page, object, "Redzone",
- endobject, val, s->inuse - s->objsize))
+ endobject, val, s->inuse - s->object_size))
return 0;
} else {
- if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
+ if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
check_bytes_and_report(s, page, p, "Alignment padding",
- endobject, POISON_INUSE, s->inuse - s->objsize);
+ endobject, POISON_INUSE, s->inuse - s->object_size);
}
}
if (s->flags & SLAB_POISON) {
if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
(!check_bytes_and_report(s, page, p, "Poison", p,
- POISON_FREE, s->objsize - 1) ||
+ POISON_FREE, s->object_size - 1) ||
!check_bytes_and_report(s, page, p, "Poison",
- p + s->objsize - 1, POISON_END, 1)))
+ p + s->object_size - 1, POISON_END, 1)))
return 0;
/*
* check_pad_bytes cleans up on its own.
@@ -926,7 +913,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
page->freelist);
if (!alloc)
- print_section("Object ", (void *)object, s->objsize);
+ print_section("Object ", (void *)object, s->object_size);
dump_stack();
}
@@ -942,14 +929,14 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
lockdep_trace_alloc(flags);
might_sleep_if(flags & __GFP_WAIT);
- return should_failslab(s->objsize, flags, s->flags);
+ return should_failslab(s->object_size, flags, s->flags);
}
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
{
flags &= gfp_allowed_mask;
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
- kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
+ kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
}
static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -966,13 +953,13 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
unsigned long flags;
local_irq_save(flags);
- kmemcheck_slab_free(s, x, s->objsize);
- debug_check_no_locks_freed(x, s->objsize);
+ kmemcheck_slab_free(s, x, s->object_size);
+ debug_check_no_locks_freed(x, s->object_size);
local_irq_restore(flags);
}
#endif
if (!(s->flags & SLAB_DEBUG_OBJECTS))
- debug_check_no_obj_freed(x, s->objsize);
+ debug_check_no_obj_freed(x, s->object_size);
}
/*
@@ -1207,7 +1194,7 @@ out:
__setup("slub_debug", setup_slub_debug);
-static unsigned long kmem_cache_flags(unsigned long objsize,
+static unsigned long kmem_cache_flags(unsigned long object_size,
unsigned long flags, const char *name,
void (*ctor)(void *))
{
@@ -1237,7 +1224,7 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct page *page) {}
-static inline unsigned long kmem_cache_flags(unsigned long objsize,
+static inline unsigned long kmem_cache_flags(unsigned long object_size,
unsigned long flags, const char *name,
void (*ctor)(void *))
{
@@ -1314,13 +1301,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
stat(s, ORDER_FALLBACK);
}
- if (flags & __GFP_WAIT)
- local_irq_disable();
-
- if (!page)
- return NULL;
-
- if (kmemcheck_enabled
+ if (kmemcheck_enabled && page
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);
@@ -1336,6 +1317,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
kmemcheck_mark_unallocated_pages(page, pages);
}
+ if (flags & __GFP_WAIT)
+ local_irq_disable();
+ if (!page)
+ return NULL;
+
page->objects = oo_objects(oo);
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
@@ -1370,6 +1356,8 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab = s;
__SetPageSlab(page);
+ if (page->pfmemalloc)
+ SetPageSlabPfmemalloc(page);
start = page_address(page);
@@ -1413,6 +1401,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-pages);
+ __ClearPageSlabPfmemalloc(page);
__ClearPageSlab(page);
reset_page_mapcount(page);
if (current->reclaim_state)
@@ -1490,12 +1479,12 @@ static inline void remove_partial(struct kmem_cache_node *n,
}
/*
- * Lock slab, remove from the partial list and put the object into the
- * per cpu freelist.
+ * Remove slab from the partial list, freeze it and
+ * return the pointer to the freelist.
*
* Returns a list of objects or NULL if it fails.
*
- * Must hold list_lock.
+ * Must hold list_lock since we modify the partial list.
*/
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
@@ -1510,26 +1499,27 @@ static inline void *acquire_slab(struct kmem_cache *s,
* The old freelist is the list of objects for the
* per cpu allocation list.
*/
- do {
- freelist = page->freelist;
- counters = page->counters;
- new.counters = counters;
- if (mode) {
- new.inuse = page->objects;
- new.freelist = NULL;
- } else {
- new.freelist = freelist;
- }
+ freelist = page->freelist;
+ counters = page->counters;
+ new.counters = counters;
+ if (mode) {
+ new.inuse = page->objects;
+ new.freelist = NULL;
+ } else {
+ new.freelist = freelist;
+ }
- VM_BUG_ON(new.frozen);
- new.frozen = 1;
+ VM_BUG_ON(new.frozen);
+ new.frozen = 1;
- } while (!__cmpxchg_double_slab(s, page,
+ if (!__cmpxchg_double_slab(s, page,
freelist, counters,
new.freelist, new.counters,
- "lock and freeze"));
+ "acquire_slab"))
+ return NULL;
remove_partial(n, page);
+ WARN_ON(!freelist);
return freelist;
}
@@ -1563,7 +1553,6 @@ static void *get_partial_node(struct kmem_cache *s,
if (!object) {
c->page = page;
- c->node = page_to_nid(page);
stat(s, ALLOC_FROM_PARTIAL);
object = t;
available = page->objects - page->inuse;
@@ -1617,7 +1606,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
do {
cpuset_mems_cookie = get_mems_allowed();
- zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+ zonelist = node_zonelist(slab_node(), flags);
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
struct kmem_cache_node *n;
@@ -1731,14 +1720,12 @@ void init_kmem_cache_cpus(struct kmem_cache *s)
/*
* Remove the cpu slab
*/
-static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
+static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
{
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
- struct page *page = c->page;
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
int lock = 0;
enum slab_modes l = M_NONE, m = M_NONE;
- void *freelist;
void *nextfree;
int tail = DEACTIVATE_TO_HEAD;
struct page new;
@@ -1749,11 +1736,6 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
tail = DEACTIVATE_TO_TAIL;
}
- c->tid = next_tid(c->tid);
- c->page = NULL;
- freelist = c->freelist;
- c->freelist = NULL;
-
/*
* Stage one: Free all available per cpu objects back
* to the page freelist while it is still frozen. Leave the
@@ -1879,21 +1861,31 @@ redo:
}
}
-/* Unfreeze all the cpu partial slabs */
+/*
+ * Unfreeze all the cpu partial slabs.
+ *
+ * This function must be called with interrupt disabled.
+ */
static void unfreeze_partials(struct kmem_cache *s)
{
- struct kmem_cache_node *n = NULL;
+ struct kmem_cache_node *n = NULL, *n2 = NULL;
struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
struct page *page, *discard_page = NULL;
while ((page = c->partial)) {
- enum slab_modes { M_PARTIAL, M_FREE };
- enum slab_modes l, m;
struct page new;
struct page old;
c->partial = page->next;
- l = M_FREE;
+
+ n2 = get_node(s, page_to_nid(page));
+ if (n != n2) {
+ if (n)
+ spin_unlock(&n->list_lock);
+
+ n = n2;
+ spin_lock(&n->list_lock);
+ }
do {
@@ -1906,43 +1898,17 @@ static void unfreeze_partials(struct kmem_cache *s)
new.frozen = 0;
- if (!new.inuse && (!n || n->nr_partial > s->min_partial))
- m = M_FREE;
- else {
- struct kmem_cache_node *n2 = get_node(s,
- page_to_nid(page));
-
- m = M_PARTIAL;
- if (n != n2) {
- if (n)
- spin_unlock(&n->list_lock);
-
- n = n2;
- spin_lock(&n->list_lock);
- }
- }
-
- if (l != m) {
- if (l == M_PARTIAL) {
- remove_partial(n, page);
- stat(s, FREE_REMOVE_PARTIAL);
- } else {
- add_partial(n, page,
- DEACTIVATE_TO_TAIL);
- stat(s, FREE_ADD_PARTIAL);
- }
-
- l = m;
- }
-
- } while (!cmpxchg_double_slab(s, page,
+ } while (!__cmpxchg_double_slab(s, page,
old.freelist, old.counters,
new.freelist, new.counters,
"unfreezing slab"));
- if (m == M_FREE) {
+ if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) {
page->next = discard_page;
discard_page = page;
+ } else {
+ add_partial(n, page, DEACTIVATE_TO_TAIL);
+ stat(s, FREE_ADD_PARTIAL);
}
}
@@ -2011,7 +1977,11 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
stat(s, CPUSLAB_FLUSH);
- deactivate_slab(s, c);
+ deactivate_slab(s, c->page, c->freelist);
+
+ c->tid = next_tid(c->tid);
+ c->page = NULL;
+ c->freelist = NULL;
}
/*
@@ -2055,10 +2025,10 @@ static void flush_all(struct kmem_cache *s)
* Check if the objects in a per cpu structure fit numa
* locality expectations.
*/
-static inline int node_match(struct kmem_cache_cpu *c, int node)
+static inline int node_match(struct page *page, int node)
{
#ifdef CONFIG_NUMA
- if (node != NUMA_NO_NODE && c->node != node)
+ if (node != NUMA_NO_NODE && page_to_nid(page) != node)
return 0;
#endif
return 1;
@@ -2101,10 +2071,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
"SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
nid, gfpflags);
printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
- "default order: %d, min order: %d\n", s->name, s->objsize,
+ "default order: %d, min order: %d\n", s->name, s->object_size,
s->size, oo_order(s->oo), oo_order(s->min));
- if (oo_order(s->min) > get_order(s->objsize))
+ if (oo_order(s->min) > get_order(s->object_size))
printk(KERN_WARNING " %s debugging increased min order, use "
"slub_debug=O to disable.\n", s->name);
@@ -2130,10 +2100,16 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
int node, struct kmem_cache_cpu **pc)
{
- void *object;
- struct kmem_cache_cpu *c;
- struct page *page = new_slab(s, flags, node);
+ void *freelist;
+ struct kmem_cache_cpu *c = *pc;
+ struct page *page;
+ freelist = get_partial(s, flags, node, c);
+
+ if (freelist)
+ return freelist;
+
+ page = new_slab(s, flags, node);
if (page) {
c = __this_cpu_ptr(s->cpu_slab);
if (c->page)
@@ -2143,17 +2119,24 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
* No other reference to the page yet so we can
* muck around with it freely without cmpxchg
*/
- object = page->freelist;
+ freelist = page->freelist;
page->freelist = NULL;
stat(s, ALLOC_SLAB);
- c->node = page_to_nid(page);
c->page = page;
*pc = c;
} else
- object = NULL;
+ freelist = NULL;
- return object;
+ return freelist;
+}
+
+static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
+{
+ if (unlikely(PageSlabPfmemalloc(page)))
+ return gfp_pfmemalloc_allowed(gfpflags);
+
+ return true;
}
/*
@@ -2163,6 +2146,8 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
* The page is still frozen if the return value is not NULL.
*
* If this function returns NULL then the page has been unfrozen.
+ *
+ * This function must be called with interrupt disabled.
*/
static inline void *get_freelist(struct kmem_cache *s, struct page *page)
{
@@ -2173,13 +2158,14 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
do {
freelist = page->freelist;
counters = page->counters;
+
new.counters = counters;
VM_BUG_ON(!new.frozen);
new.inuse = page->objects;
new.frozen = freelist != NULL;
- } while (!cmpxchg_double_slab(s, page,
+ } while (!__cmpxchg_double_slab(s, page,
freelist, counters,
NULL, new.counters,
"get_freelist"));
@@ -2206,7 +2192,8 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
- void **object;
+ void *freelist;
+ struct page *page;
unsigned long flags;
local_irq_save(flags);
@@ -2219,25 +2206,41 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
c = this_cpu_ptr(s->cpu_slab);
#endif
- if (!c->page)
+ page = c->page;
+ if (!page)
goto new_slab;
redo:
- if (unlikely(!node_match(c, node))) {
+
+ if (unlikely(!node_match(page, node))) {
stat(s, ALLOC_NODE_MISMATCH);
- deactivate_slab(s, c);
+ deactivate_slab(s, page, c->freelist);
+ c->page = NULL;
+ c->freelist = NULL;
+ goto new_slab;
+ }
+
+ /*
+ * By rights, we should be searching for a slab page that was
+ * PFMEMALLOC but right now, we are losing the pfmemalloc
+ * information when the page leaves the per-cpu allocator
+ */
+ if (unlikely(!pfmemalloc_match(page, gfpflags))) {
+ deactivate_slab(s, page, c->freelist);
+ c->page = NULL;
+ c->freelist = NULL;
goto new_slab;
}
/* must check again c->freelist in case of cpu migration or IRQ */
- object = c->freelist;
- if (object)
+ freelist = c->freelist;
+ if (freelist)
goto load_freelist;
stat(s, ALLOC_SLOWPATH);
- object = get_freelist(s, c->page);
+ freelist = get_freelist(s, page);
- if (!object) {
+ if (!freelist) {
c->page = NULL;
stat(s, DEACTIVATE_BYPASS);
goto new_slab;
@@ -2246,50 +2249,50 @@ redo:
stat(s, ALLOC_REFILL);
load_freelist:
- c->freelist = get_freepointer(s, object);
+ /*
+ * freelist is pointing to the list of objects to be used.
+ * page is pointing to the page from which the objects are obtained.
+ * That page must be frozen for per cpu allocations to work.
+ */
+ VM_BUG_ON(!c->page->frozen);
+ c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
local_irq_restore(flags);
- return object;
+ return freelist;
new_slab:
if (c->partial) {
- c->page = c->partial;
- c->partial = c->page->next;
- c->node = page_to_nid(c->page);
+ page = c->page = c->partial;
+ c->partial = page->next;
stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL;
goto redo;
}
- /* Then do expensive stuff like retrieving pages from the partial lists */
- object = get_partial(s, gfpflags, node, c);
-
- if (unlikely(!object)) {
+ freelist = new_slab_objects(s, gfpflags, node, &c);
- object = new_slab_objects(s, gfpflags, node, &c);
+ if (unlikely(!freelist)) {
+ if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
+ slab_out_of_memory(s, gfpflags, node);
- if (unlikely(!object)) {
- if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
- slab_out_of_memory(s, gfpflags, node);
-
- local_irq_restore(flags);
- return NULL;
- }
+ local_irq_restore(flags);
+ return NULL;
}
- if (likely(!kmem_cache_debug(s)))
+ page = c->page;
+ if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
goto load_freelist;
/* Only entered in the debug case */
- if (!alloc_debug_processing(s, c->page, object, addr))
+ if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
goto new_slab; /* Slab failed checks. Next slab needed */
- c->freelist = get_freepointer(s, object);
- deactivate_slab(s, c);
- c->node = NUMA_NO_NODE;
+ deactivate_slab(s, page, get_freepointer(s, freelist));
+ c->page = NULL;
+ c->freelist = NULL;
local_irq_restore(flags);
- return object;
+ return freelist;
}
/*
@@ -2307,6 +2310,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
{
void **object;
struct kmem_cache_cpu *c;
+ struct page *page;
unsigned long tid;
if (slab_pre_alloc_hook(s, gfpflags))
@@ -2332,8 +2336,8 @@ redo:
barrier();
object = c->freelist;
- if (unlikely(!object || !node_match(c, node)))
-
+ page = c->page;
+ if (unlikely(!object || !node_match(page, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
else {
@@ -2364,7 +2368,7 @@ redo:
}
if (unlikely(gfpflags & __GFP_ZERO) && object)
- memset(object, 0, s->objsize);
+ memset(object, 0, s->object_size);
slab_post_alloc_hook(s, gfpflags, object);
@@ -2375,7 +2379,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
- trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
+ trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
return ret;
}
@@ -2405,7 +2409,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
- s->objsize, s->size, gfpflags, node);
+ s->object_size, s->size, gfpflags, node);
return ret;
}
@@ -2900,7 +2904,7 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min)
static int calculate_sizes(struct kmem_cache *s, int forced_order)
{
unsigned long flags = s->flags;
- unsigned long size = s->objsize;
+ unsigned long size = s->object_size;
unsigned long align = s->align;
int order;
@@ -2929,7 +2933,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
* end of the object and the free pointer. If not then add an
* additional word to have some bytes to store Redzone information.
*/
- if ((flags & SLAB_RED_ZONE) && size == s->objsize)
+ if ((flags & SLAB_RED_ZONE) && size == s->object_size)
size += sizeof(void *);
#endif
@@ -2977,7 +2981,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
* user specified and the dynamic determination of cache line size
* on bootup.
*/
- align = calculate_alignment(flags, align, s->objsize);
+ align = calculate_alignment(flags, align, s->object_size);
s->align = align;
/*
@@ -3025,7 +3029,7 @@ static int kmem_cache_open(struct kmem_cache *s,
memset(s, 0, kmem_size);
s->name = name;
s->ctor = ctor;
- s->objsize = size;
+ s->object_size = size;
s->align = align;
s->flags = kmem_cache_flags(size, flags, name, ctor);
s->reserved = 0;
@@ -3040,7 +3044,7 @@ static int kmem_cache_open(struct kmem_cache *s,
* Disable debugging flags that store metadata if the min slab
* order increased.
*/
- if (get_order(s->size) > get_order(s->objsize)) {
+ if (get_order(s->size) > get_order(s->object_size)) {
s->flags &= ~DEBUG_METADATA_FLAGS;
s->offset = 0;
if (!calculate_sizes(s, -1))
@@ -3114,7 +3118,7 @@ error:
*/
unsigned int kmem_cache_size(struct kmem_cache *s)
{
- return s->objsize;
+ return s->object_size;
}
EXPORT_SYMBOL(kmem_cache_size);
@@ -3192,11 +3196,11 @@ static inline int kmem_cache_close(struct kmem_cache *s)
*/
void kmem_cache_destroy(struct kmem_cache *s)
{
- down_write(&slub_lock);
+ mutex_lock(&slab_mutex);
s->refcount--;
if (!s->refcount) {
list_del(&s->list);
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
if (kmem_cache_close(s)) {
printk(KERN_ERR "SLUB %s: %s called for cache that "
"still has objects.\n", s->name, __func__);
@@ -3206,7 +3210,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
rcu_barrier();
sysfs_slab_remove(s);
} else
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -3268,7 +3272,7 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,
/*
* This function is called with IRQs disabled during early-boot on
- * single CPU so there's no need to take slub_lock here.
+ * single CPU so there's no need to take slab_mutex here.
*/
if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL))
@@ -3553,10 +3557,10 @@ static int slab_mem_going_offline_callback(void *arg)
{
struct kmem_cache *s;
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list)
kmem_cache_shrink(s);
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
return 0;
}
@@ -3577,7 +3581,7 @@ static void slab_mem_offline_callback(void *arg)
if (offline_node < 0)
return;
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
n = get_node(s, offline_node);
if (n) {
@@ -3593,7 +3597,7 @@ static void slab_mem_offline_callback(void *arg)
kmem_cache_free(kmem_cache_node, n);
}
}
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
}
static int slab_mem_going_online_callback(void *arg)
@@ -3616,7 +3620,7 @@ static int slab_mem_going_online_callback(void *arg)
* allocate a kmem_cache_node structure in order to bring the node
* online.
*/
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
/*
* XXX: kmem_cache_alloc_node will fallback to other nodes
@@ -3632,7 +3636,7 @@ static int slab_mem_going_online_callback(void *arg)
s->node[nid] = n;
}
out:
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
return ret;
}
@@ -3843,11 +3847,11 @@ void __init kmem_cache_init(void)
if (s && s->size) {
char *name = kasprintf(GFP_NOWAIT,
- "dma-kmalloc-%d", s->objsize);
+ "dma-kmalloc-%d", s->object_size);
BUG_ON(!name);
kmalloc_dma_caches[i] = create_kmalloc_cache(name,
- s->objsize, SLAB_CACHE_DMA);
+ s->object_size, SLAB_CACHE_DMA);
}
}
#endif
@@ -3924,16 +3928,12 @@ static struct kmem_cache *find_mergeable(size_t size,
return NULL;
}
-struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s;
char *n;
- if (WARN_ON(!name))
- return NULL;
-
- down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
s->refcount++;
@@ -3941,49 +3941,42 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
* Adjust the object sizes so that we clear
* the complete object on kzalloc.
*/
- s->objsize = max(s->objsize, (int)size);
+ s->object_size = max(s->object_size, (int)size);
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
if (sysfs_slab_alias(s, name)) {
s->refcount--;
- goto err;
+ return NULL;
}
- up_write(&slub_lock);
return s;
}
n = kstrdup(name, GFP_KERNEL);
if (!n)
- goto err;
+ return NULL;
s = kmalloc(kmem_size, GFP_KERNEL);
if (s) {
if (kmem_cache_open(s, n,
size, align, flags, ctor)) {
+ int r;
+
list_add(&s->list, &slab_caches);
- up_write(&slub_lock);
- if (sysfs_slab_add(s)) {
- down_write(&slub_lock);
- list_del(&s->list);
- kfree(n);
- kfree(s);
- goto err;
- }
- return s;
+ mutex_unlock(&slab_mutex);
+ r = sysfs_slab_add(s);
+ mutex_lock(&slab_mutex);
+
+ if (!r)
+ return s;
+
+ list_del(&s->list);
+ kmem_cache_close(s);
}
kfree(s);
}
kfree(n);
-err:
- up_write(&slub_lock);
-
- if (flags & SLAB_PANIC)
- panic("Cannot create slabcache %s\n", name);
- else
- s = NULL;
- return s;
+ return NULL;
}
-EXPORT_SYMBOL(kmem_cache_create);
#ifdef CONFIG_SMP
/*
@@ -4002,13 +3995,13 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
list_for_each_entry(s, &slab_caches, list) {
local_irq_save(flags);
__flush_cpu_slab(s, cpu);
local_irq_restore(flags);
}
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
break;
default:
break;
@@ -4500,30 +4493,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
- int node = ACCESS_ONCE(c->node);
+ int node;
struct page *page;
- if (node < 0)
- continue;
page = ACCESS_ONCE(c->page);
- if (page) {
- if (flags & SO_TOTAL)
- x = page->objects;
- else if (flags & SO_OBJECTS)
- x = page->inuse;
- else
- x = 1;
+ if (!page)
+ continue;
- total += x;
- nodes[node] += x;
- }
- page = c->partial;
+ node = page_to_nid(page);
+ if (flags & SO_TOTAL)
+ x = page->objects;
+ else if (flags & SO_OBJECTS)
+ x = page->inuse;
+ else
+ x = 1;
+ total += x;
+ nodes[node] += x;
+
+ page = ACCESS_ONCE(c->partial);
if (page) {
x = page->pobjects;
total += x;
nodes[node] += x;
}
+
per_cpu[node]++;
}
}
@@ -4623,7 +4617,7 @@ SLAB_ATTR_RO(align);
static ssize_t object_size_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%d\n", s->objsize);
+ return sprintf(buf, "%d\n", s->object_size);
}
SLAB_ATTR_RO(object_size);
@@ -5286,7 +5280,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
const char *name;
int unmergeable;
- if (slab_state < SYSFS)
+ if (slab_state < FULL)
/* Defer until later */
return 0;
@@ -5331,7 +5325,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
static void sysfs_slab_remove(struct kmem_cache *s)
{
- if (slab_state < SYSFS)
+ if (slab_state < FULL)
/*
* Sysfs has not been setup yet so no need to remove the
* cache from sysfs.
@@ -5359,7 +5353,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
struct saved_alias *al;
- if (slab_state == SYSFS) {
+ if (slab_state == FULL) {
/*
* If we have a leftover link then remove it.
*/
@@ -5383,16 +5377,16 @@ static int __init slab_sysfs_init(void)
struct kmem_cache *s;
int err;
- down_write(&slub_lock);
+ mutex_lock(&slab_mutex);
slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
if (!slab_kset) {
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
printk(KERN_ERR "Cannot register slab subsystem.\n");
return -ENOSYS;
}
- slab_state = SYSFS;
+ slab_state = FULL;
list_for_each_entry(s, &slab_caches, list) {
err = sysfs_slab_add(s);
@@ -5408,11 +5402,11 @@ static int __init slab_sysfs_init(void)
err = sysfs_slab_alias(al->s, al->name);
if (err)
printk(KERN_ERR "SLUB: Unable to add boot slab alias"
- " %s to sysfs\n", s->name);
+ " %s to sysfs\n", al->name);
kfree(al);
}
- up_write(&slub_lock);
+ mutex_unlock(&slab_mutex);
resiliency_test();
return 0;
}
@@ -5427,7 +5421,7 @@ __initcall(slab_sysfs_init);
static void print_slabinfo_header(struct seq_file *m)
{
seq_puts(m, "slabinfo - version: 2.1\n");
- seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
+ seq_puts(m, "# name <active_objs> <num_objs> <object_size> "
"<objperslab> <pagesperslab>");
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
@@ -5438,7 +5432,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
{
loff_t n = *pos;
- down_read(&slub_lock);
+ mutex_lock(&slab_mutex);
if (!n)
print_slabinfo_header(m);
@@ -5452,7 +5446,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
static void s_stop(struct seq_file *m, void *p)
{
- up_read(&slub_lock);
+ mutex_unlock(&slab_mutex);
}
static int s_show(struct seq_file *m, void *p)
diff --git a/mm/sparse.c b/mm/sparse.c
index c7bb952400c8..fac95f2888f2 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -65,21 +65,18 @@ static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
if (slab_is_available()) {
if (node_state(nid, N_HIGH_MEMORY))
- section = kmalloc_node(array_size, GFP_KERNEL, nid);
+ section = kzalloc_node(array_size, GFP_KERNEL, nid);
else
- section = kmalloc(array_size, GFP_KERNEL);
- } else
+ section = kzalloc(array_size, GFP_KERNEL);
+ } else {
section = alloc_bootmem_node(NODE_DATA(nid), array_size);
-
- if (section)
- memset(section, 0, array_size);
+ }
return section;
}
static int __meminit sparse_index_init(unsigned long section_nr, int nid)
{
- static DEFINE_SPINLOCK(index_init_lock);
unsigned long root = SECTION_NR_TO_ROOT(section_nr);
struct mem_section *section;
int ret = 0;
@@ -90,20 +87,9 @@ static int __meminit sparse_index_init(unsigned long section_nr, int nid)
section = sparse_index_alloc(nid);
if (!section)
return -ENOMEM;
- /*
- * This lock keeps two different sections from
- * reallocating for the same index
- */
- spin_lock(&index_init_lock);
-
- if (mem_section[root]) {
- ret = -EEXIST;
- goto out;
- }
mem_section[root] = section;
-out:
- spin_unlock(&index_init_lock);
+
return ret;
}
#else /* !SPARSEMEM_EXTREME */
@@ -132,6 +118,8 @@ int __section_nr(struct mem_section* ms)
break;
}
+ VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
+
return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
}
@@ -493,6 +481,9 @@ void __init sparse_init(void)
struct page **map_map;
#endif
+ /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
+ set_pageblock_order();
+
/*
* map is using big page (aka 2M in x86 64 bit)
* usemap is less one page (aka 24 bytes)
diff --git a/mm/swap.c b/mm/swap.c
index 4e7e2ec67078..77825883298f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -236,6 +236,58 @@ void put_pages_list(struct list_head *pages)
}
EXPORT_SYMBOL(put_pages_list);
+/*
+ * get_kernel_pages() - pin kernel pages in memory
+ * @kiov: An array of struct kvec structures
+ * @nr_segs: number of segments to pin
+ * @write: pinning for read/write, currently ignored
+ * @pages: array that receives pointers to the pages pinned.
+ * Should be at least nr_segs long.
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno. Each page returned must be released
+ * with a put_page() call when it is finished with.
+ */
+int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
+ struct page **pages)
+{
+ int seg;
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
+ return seg;
+
+ pages[seg] = kmap_to_page(kiov[seg].iov_base);
+ page_cache_get(pages[seg]);
+ }
+
+ return seg;
+}
+EXPORT_SYMBOL_GPL(get_kernel_pages);
+
+/*
+ * get_kernel_page() - pin a kernel page in memory
+ * @start: starting kernel address
+ * @write: pinning for read/write, currently ignored
+ * @pages: array that receives pointer to the page pinned.
+ * Must be at least nr_segs long.
+ *
+ * Returns 1 if page is pinned. If the page was not pinned, returns
+ * -errno. The page returned must be released with a put_page() call
+ * when it is finished with.
+ */
+int get_kernel_page(unsigned long start, int write, struct page **pages)
+{
+ const struct kvec kiov = {
+ .iov_base = (void *)start,
+ .iov_len = PAGE_SIZE
+ };
+
+ return get_kernel_pages(&kiov, 1, write, pages);
+}
+EXPORT_SYMBOL_GPL(get_kernel_page);
+
static void pagevec_lru_move_fn(struct pagevec *pvec,
void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
void *arg)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 4c5ff7f284d9..0cb36fb1f61c 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
+#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/migrate.h>
#include <linux/page_cgroup.h>
@@ -26,7 +27,7 @@
*/
static const struct address_space_operations swap_aops = {
.writepage = swap_writepage,
- .set_page_dirty = __set_page_dirty_no_writeback,
+ .set_page_dirty = swap_set_page_dirty,
.migratepage = migrate_page,
};
@@ -376,6 +377,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
unsigned long offset = swp_offset(entry);
unsigned long start_offset, end_offset;
unsigned long mask = (1UL << page_cluster) - 1;
+ struct blk_plug plug;
/* Read a page_cluster sized and aligned cluster around offset. */
start_offset = offset & ~mask;
@@ -383,6 +385,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
if (!start_offset) /* First page is swap header. */
start_offset++;
+ blk_start_plug(&plug);
for (offset = start_offset; offset <= end_offset ; offset++) {
/* Ok, do the async read-ahead now */
page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
@@ -391,6 +394,8 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
continue;
page_cache_release(page);
}
+ blk_finish_plug(&plug);
+
lru_add_drain(); /* Push any new pages onto the LRU now */
return read_swap_cache_async(entry, gfp_mask, vma, addr);
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 71373d03fcee..14e254c768fc 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -33,6 +33,7 @@
#include <linux/oom.h>
#include <linux/frontswap.h>
#include <linux/swapfile.h>
+#include <linux/export.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -548,7 +549,6 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
/* free if no reference */
if (!usage) {
- struct gendisk *disk = p->bdev->bd_disk;
if (offset < p->lowest_bit)
p->lowest_bit = offset;
if (offset > p->highest_bit)
@@ -559,9 +559,12 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
nr_swap_pages++;
p->inuse_pages--;
frontswap_invalidate_page(p->type, offset);
- if ((p->flags & SWP_BLKDEV) &&
- disk->fops->swap_slot_free_notify)
- disk->fops->swap_slot_free_notify(p->bdev, offset);
+ if (p->flags & SWP_BLKDEV) {
+ struct gendisk *disk = p->bdev->bd_disk;
+ if (disk->fops->swap_slot_free_notify)
+ disk->fops->swap_slot_free_notify(p->bdev,
+ offset);
+ }
}
return usage;
@@ -832,8 +835,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
- if (ret > 0)
- mem_cgroup_cancel_charge_swapin(memcg);
+ mem_cgroup_cancel_charge_swapin(memcg);
ret = 0;
goto out;
}
@@ -1328,6 +1330,14 @@ static void destroy_swap_extents(struct swap_info_struct *sis)
list_del(&se->list);
kfree(se);
}
+
+ if (sis->flags & SWP_FILE) {
+ struct file *swap_file = sis->swap_file;
+ struct address_space *mapping = swap_file->f_mapping;
+
+ sis->flags &= ~SWP_FILE;
+ mapping->a_ops->swap_deactivate(swap_file);
+ }
}
/*
@@ -1336,7 +1346,7 @@ static void destroy_swap_extents(struct swap_info_struct *sis)
*
* This function rather assumes that it is called in ascending page order.
*/
-static int
+int
add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block)
{
@@ -1409,98 +1419,28 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
*/
static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
{
- struct inode *inode;
- unsigned blocks_per_page;
- unsigned long page_no;
- unsigned blkbits;
- sector_t probe_block;
- sector_t last_block;
- sector_t lowest_block = -1;
- sector_t highest_block = 0;
- int nr_extents = 0;
+ struct file *swap_file = sis->swap_file;
+ struct address_space *mapping = swap_file->f_mapping;
+ struct inode *inode = mapping->host;
int ret;
- inode = sis->swap_file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
ret = add_swap_extent(sis, 0, sis->max, 0);
*span = sis->pages;
- goto out;
+ return ret;
}
- blkbits = inode->i_blkbits;
- blocks_per_page = PAGE_SIZE >> blkbits;
-
- /*
- * Map all the blocks into the extent list. This code doesn't try
- * to be very smart.
- */
- probe_block = 0;
- page_no = 0;
- last_block = i_size_read(inode) >> blkbits;
- while ((probe_block + blocks_per_page) <= last_block &&
- page_no < sis->max) {
- unsigned block_in_page;
- sector_t first_block;
-
- first_block = bmap(inode, probe_block);
- if (first_block == 0)
- goto bad_bmap;
-
- /*
- * It must be PAGE_SIZE aligned on-disk
- */
- if (first_block & (blocks_per_page - 1)) {
- probe_block++;
- goto reprobe;
- }
-
- for (block_in_page = 1; block_in_page < blocks_per_page;
- block_in_page++) {
- sector_t block;
-
- block = bmap(inode, probe_block + block_in_page);
- if (block == 0)
- goto bad_bmap;
- if (block != first_block + block_in_page) {
- /* Discontiguity */
- probe_block++;
- goto reprobe;
- }
- }
-
- first_block >>= (PAGE_SHIFT - blkbits);
- if (page_no) { /* exclude the header page */
- if (first_block < lowest_block)
- lowest_block = first_block;
- if (first_block > highest_block)
- highest_block = first_block;
+ if (mapping->a_ops->swap_activate) {
+ ret = mapping->a_ops->swap_activate(sis, swap_file, span);
+ if (!ret) {
+ sis->flags |= SWP_FILE;
+ ret = add_swap_extent(sis, 0, sis->max, 0);
+ *span = sis->pages;
}
+ return ret;
+ }
- /*
- * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
- */
- ret = add_swap_extent(sis, page_no, 1, first_block);
- if (ret < 0)
- goto out;
- nr_extents += ret;
- page_no++;
- probe_block += blocks_per_page;
-reprobe:
- continue;
- }
- ret = nr_extents;
- *span = 1 + highest_block - lowest_block;
- if (page_no == 0)
- page_no = 1; /* force Empty message */
- sis->max = page_no;
- sis->pages = page_no - 1;
- sis->highest_bit = page_no - 1;
-out:
- return ret;
-bad_bmap:
- printk(KERN_ERR "swapon: swapfile has holes\n");
- ret = -EINVAL;
- goto out;
+ return generic_swapfile_activate(sis, swap_file, span);
}
static void enable_swap_info(struct swap_info_struct *p, int prio,
@@ -2285,6 +2225,31 @@ int swapcache_prepare(swp_entry_t entry)
return __swap_duplicate(entry, SWAP_HAS_CACHE);
}
+struct swap_info_struct *page_swap_info(struct page *page)
+{
+ swp_entry_t swap = { .val = page_private(page) };
+ BUG_ON(!PageSwapCache(page));
+ return swap_info[swp_type(swap)];
+}
+
+/*
+ * out-of-line __page_file_ methods to avoid include hell.
+ */
+struct address_space *__page_file_mapping(struct page *page)
+{
+ VM_BUG_ON(!PageSwapCache(page));
+ return page_swap_info(page)->swap_file->f_mapping;
+}
+EXPORT_SYMBOL_GPL(__page_file_mapping);
+
+pgoff_t __page_file_index(struct page *page)
+{
+ swp_entry_t swap = { .val = page_private(page) };
+ VM_BUG_ON(!PageSwapCache(page));
+ return swp_offset(swap);
+}
+EXPORT_SYMBOL_GPL(__page_file_index);
+
/*
* add_swap_count_continuation - called when a swap count is duplicated
* beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2aad49981b57..2bb90b1d241c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -413,11 +413,11 @@ nocache:
if (addr + size - 1 < addr)
goto overflow;
- n = rb_next(&first->rb_node);
- if (n)
- first = rb_entry(n, struct vmap_area, rb_node);
- else
+ if (list_is_last(&first->list, &vmap_area_list))
goto found;
+
+ first = list_entry(first->list.next,
+ struct vmap_area, list);
}
found:
@@ -904,6 +904,14 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
BUG_ON(size & ~PAGE_MASK);
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+ if (WARN_ON(size == 0)) {
+ /*
+ * Allocating 0 bytes isn't what caller wants since
+ * get_order(0) returns funny result. Just warn and terminate
+ * early.
+ */
+ return NULL;
+ }
order = get_order(size);
again:
@@ -1280,7 +1288,7 @@ DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
- unsigned long flags, void *caller)
+ unsigned long flags, const void *caller)
{
vm->flags = flags;
vm->addr = (void *)va->va_start;
@@ -1306,7 +1314,7 @@ static void insert_vmalloc_vmlist(struct vm_struct *vm)
}
static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
- unsigned long flags, void *caller)
+ unsigned long flags, const void *caller)
{
setup_vmalloc_vm(vm, va, flags, caller);
insert_vmalloc_vmlist(vm);
@@ -1314,7 +1322,7 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
static struct vm_struct *__get_vm_area_node(unsigned long size,
unsigned long align, unsigned long flags, unsigned long start,
- unsigned long end, int node, gfp_t gfp_mask, void *caller)
+ unsigned long end, int node, gfp_t gfp_mask, const void *caller)
{
struct vmap_area *va;
struct vm_struct *area;
@@ -1375,7 +1383,7 @@ EXPORT_SYMBOL_GPL(__get_vm_area);
struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end,
- void *caller)
+ const void *caller)
{
return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
caller);
@@ -1397,13 +1405,21 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
- void *caller)
+ const void *caller)
{
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
-1, GFP_KERNEL, caller);
}
-static struct vm_struct *find_vm_area(const void *addr)
+/**
+ * find_vm_area - find a continuous kernel virtual area
+ * @addr: base address
+ *
+ * Search for the kernel VM area starting at @addr, and return it.
+ * It is up to the caller to do all required locking to keep the returned
+ * pointer valid.
+ */
+struct vm_struct *find_vm_area(const void *addr)
{
struct vmap_area *va;
@@ -1568,9 +1584,9 @@ EXPORT_SYMBOL(vmap);
static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
- int node, void *caller);
+ int node, const void *caller);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
- pgprot_t prot, int node, void *caller)
+ pgprot_t prot, int node, const void *caller)
{
const int order = 0;
struct page **pages;
@@ -1643,7 +1659,7 @@ fail:
*/
void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, int node, void *caller)
+ pgprot_t prot, int node, const void *caller)
{
struct vm_struct *area;
void *addr;
@@ -1699,7 +1715,7 @@ fail:
*/
static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
- int node, void *caller)
+ int node, const void *caller)
{
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
gfp_mask, prot, node, caller);
@@ -1975,9 +1991,7 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
* IOREMAP area is treated as memory hole and no copy is done.
*
* If [addr...addr+count) doesn't includes any intersects with alive
- * vm_struct area, returns 0.
- * @buf should be kernel's buffer. Because this function uses KM_USER0,
- * the caller should guarantee KM_USER0 is not used.
+ * vm_struct area, returns 0. @buf should be kernel's buffer.
*
* Note: In usual ops, vread() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy().
@@ -2051,9 +2065,7 @@ finished:
* IOREMAP area is treated as memory hole and no copy is done.
*
* If [addr...addr+count) doesn't includes any intersects with alive
- * vm_struct area, returns 0.
- * @buf should be kernel's buffer. Because this function uses KM_USER0,
- * the caller should guarantee KM_USER0 is not used.
+ * vm_struct area, returns 0. @buf should be kernel's buffer.
*
* Note: In usual ops, vwrite() is never necessary because the caller
* should know vmalloc() area is valid and can use memcpy().
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 347b3ff2a478..8d01243d9560 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -133,7 +133,7 @@ long vm_total_pages; /* The total number of pages which the VM controls */
static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
static bool global_reclaim(struct scan_control *sc)
{
return !sc->target_mem_cgroup;
@@ -687,6 +687,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
cond_resched();
+ mem_cgroup_uncharge_start();
while (!list_empty(page_list)) {
enum page_references references;
struct address_space *mapping;
@@ -720,9 +721,41 @@ static unsigned long shrink_page_list(struct list_head *page_list,
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
if (PageWriteback(page)) {
- nr_writeback++;
- unlock_page(page);
- goto keep;
+ /*
+ * memcg doesn't have any dirty pages throttling so we
+ * could easily OOM just because too many pages are in
+ * writeback and there is nothing else to reclaim.
+ *
+ * Check __GFP_IO, certainly because a loop driver
+ * thread might enter reclaim, and deadlock if it waits
+ * on a page for which it is needed to do the write
+ * (loop masks off __GFP_IO|__GFP_FS for this reason);
+ * but more thought would probably show more reasons.
+ *
+ * Don't require __GFP_FS, since we're not going into
+ * the FS, just waiting on its writeback completion.
+ * Worryingly, ext4 gfs2 and xfs allocate pages with
+ * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so
+ * testing may_enter_fs here is liable to OOM on them.
+ */
+ if (global_reclaim(sc) ||
+ !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
+ /*
+ * This is slightly racy - end_page_writeback()
+ * might have just cleared PageReclaim, then
+ * setting PageReclaim here end up interpreted
+ * as PageReadahead - but that does not matter
+ * enough to care. What we do want is for this
+ * page to have PageReclaim set next time memcg
+ * reclaim reaches the tests above, so it will
+ * then wait_on_page_writeback() to avoid OOM;
+ * and it's also appropriate in global reclaim.
+ */
+ SetPageReclaim(page);
+ nr_writeback++;
+ goto keep_locked;
+ }
+ wait_on_page_writeback(page);
}
references = page_check_references(page, sc);
@@ -921,6 +954,7 @@ keep:
list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate);
+ mem_cgroup_uncharge_end();
*ret_nr_dirty += nr_dirty;
*ret_nr_writeback += nr_writeback;
return nr_reclaimed;
@@ -2112,6 +2146,83 @@ out:
return 0;
}
+static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
+{
+ struct zone *zone;
+ unsigned long pfmemalloc_reserve = 0;
+ unsigned long free_pages = 0;
+ int i;
+ bool wmark_ok;
+
+ for (i = 0; i <= ZONE_NORMAL; i++) {
+ zone = &pgdat->node_zones[i];
+ pfmemalloc_reserve += min_wmark_pages(zone);
+ free_pages += zone_page_state(zone, NR_FREE_PAGES);
+ }
+
+ wmark_ok = free_pages > pfmemalloc_reserve / 2;
+
+ /* kswapd must be awake if processes are being throttled */
+ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
+ pgdat->classzone_idx = min(pgdat->classzone_idx,
+ (enum zone_type)ZONE_NORMAL);
+ wake_up_interruptible(&pgdat->kswapd_wait);
+ }
+
+ return wmark_ok;
+}
+
+/*
+ * Throttle direct reclaimers if backing storage is backed by the network
+ * and the PFMEMALLOC reserve for the preferred node is getting dangerously
+ * depleted. kswapd will continue to make progress and wake the processes
+ * when the low watermark is reached
+ */
+static void throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
+ nodemask_t *nodemask)
+{
+ struct zone *zone;
+ int high_zoneidx = gfp_zone(gfp_mask);
+ pg_data_t *pgdat;
+
+ /*
+ * Kernel threads should not be throttled as they may be indirectly
+ * responsible for cleaning pages necessary for reclaim to make forward
+ * progress. kjournald for example may enter direct reclaim while
+ * committing a transaction where throttling it could forcing other
+ * processes to block on log_wait_commit().
+ */
+ if (current->flags & PF_KTHREAD)
+ return;
+
+ /* Check if the pfmemalloc reserves are ok */
+ first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
+ pgdat = zone->zone_pgdat;
+ if (pfmemalloc_watermark_ok(pgdat))
+ return;
+
+ /* Account for the throttling */
+ count_vm_event(PGSCAN_DIRECT_THROTTLE);
+
+ /*
+ * If the caller cannot enter the filesystem, it's possible that it
+ * is due to the caller holding an FS lock or performing a journal
+ * transaction in the case of a filesystem like ext[3|4]. In this case,
+ * it is not safe to block on pfmemalloc_wait as kswapd could be
+ * blocked waiting on the same lock. Instead, throttle for up to a
+ * second before continuing.
+ */
+ if (!(gfp_mask & __GFP_FS)) {
+ wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
+ pfmemalloc_watermark_ok(pgdat), HZ);
+ return;
+ }
+
+ /* Throttle until kswapd wakes the process */
+ wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
+ pfmemalloc_watermark_ok(pgdat));
+}
+
unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *nodemask)
{
@@ -2131,6 +2242,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.gfp_mask = sc.gfp_mask,
};
+ throttle_direct_reclaim(gfp_mask, zonelist, nodemask);
+
+ /*
+ * Do not enter reclaim if fatal signal is pending. 1 is returned so
+ * that the page allocator does not consider triggering OOM
+ */
+ if (fatal_signal_pending(current))
+ return 1;
+
trace_mm_vmscan_direct_reclaim_begin(order,
sc.may_writepage,
gfp_mask);
@@ -2142,7 +2262,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
return nr_reclaimed;
}
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
gfp_t gfp_mask, bool noswap,
@@ -2275,8 +2395,13 @@ static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
return balanced_pages >= (present_pages >> 2);
}
-/* is kswapd sleeping prematurely? */
-static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
+/*
+ * Prepare kswapd for sleeping. This verifies that there are no processes
+ * waiting in throttle_direct_reclaim() and that watermarks have been met.
+ *
+ * Returns true if kswapd is ready to sleep
+ */
+static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
int classzone_idx)
{
int i;
@@ -2285,7 +2410,21 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
/* If a direct reclaimer woke kswapd within HZ/10, it's premature */
if (remaining)
- return true;
+ return false;
+
+ /*
+ * There is a potential race between when kswapd checks its watermarks
+ * and a process gets throttled. There is also a potential race if
+ * processes get throttled, kswapd wakes, a large process exits therby
+ * balancing the zones that causes kswapd to miss a wakeup. If kswapd
+ * is going to sleep, no process should be sleeping on pfmemalloc_wait
+ * so wake them now if necessary. If necessary, processes will wake
+ * kswapd and get throttled again
+ */
+ if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
+ wake_up(&pgdat->pfmemalloc_wait);
+ return false;
+ }
/* Check the watermark levels */
for (i = 0; i <= classzone_idx; i++) {
@@ -2318,9 +2457,9 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
* must be balanced
*/
if (order)
- return !pgdat_balanced(pgdat, balanced, classzone_idx);
+ return pgdat_balanced(pgdat, balanced, classzone_idx);
else
- return !all_zones_ok;
+ return all_zones_ok;
}
/*
@@ -2546,6 +2685,16 @@ loop_again:
}
}
+
+ /*
+ * If the low watermark is met there is no need for processes
+ * to be throttled on pfmemalloc_wait as they should not be
+ * able to safely make forward progress. Wake them
+ */
+ if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
+ pfmemalloc_watermark_ok(pgdat))
+ wake_up(&pgdat->pfmemalloc_wait);
+
if (all_zones_ok || (order && pgdat_balanced(pgdat, balanced, *classzone_idx)))
break; /* kswapd: all done */
/*
@@ -2647,7 +2796,7 @@ out:
}
/*
- * Return the order we were reclaiming at so sleeping_prematurely()
+ * Return the order we were reclaiming at so prepare_kswapd_sleep()
* makes a decision on the order we were last reclaiming at. However,
* if another caller entered the allocator slow path while kswapd
* was awake, order will remain at the higher level
@@ -2667,7 +2816,7 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
/* Try to sleep for a short interval */
- if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
+ if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
remaining = schedule_timeout(HZ/10);
finish_wait(&pgdat->kswapd_wait, &wait);
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
@@ -2677,7 +2826,7 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
* After a short sleep, check if it was a premature sleep. If not, then
* go fully to sleep until explicitly woken up.
*/
- if (!sleeping_prematurely(pgdat, order, remaining, classzone_idx)) {
+ if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
/*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 1bbbbd9776ad..df7a6748231d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -745,6 +745,7 @@ const char * const vmstat_text[] = {
TEXTS_FOR_ZONES("pgsteal_direct")
TEXTS_FOR_ZONES("pgscan_kswapd")
TEXTS_FOR_ZONES("pgscan_direct")
+ "pgscan_direct_throttle",
#ifdef CONFIG_NUMA
"zone_reclaim_failed",
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 73a2a83ee2da..402442402af7 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -137,9 +137,21 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
return rc;
}
+static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ if (vlan->netpoll)
+ netpoll_send_skb(vlan->netpoll, skb);
+#else
+ BUG();
+#endif
+ return NETDEV_TX_OK;
+}
+
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
unsigned int len;
int ret;
@@ -150,29 +162,30 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
* OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
*/
if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
- vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) {
+ vlan->flags & VLAN_FLAG_REORDER_HDR) {
u16 vlan_tci;
- vlan_tci = vlan_dev_priv(dev)->vlan_id;
+ vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
}
- skb->dev = vlan_dev_priv(dev)->real_dev;
+ skb->dev = vlan->real_dev;
len = skb->len;
- if (netpoll_tx_running(dev))
- return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
+ if (unlikely(netpoll_tx_running(dev)))
+ return vlan_netpoll_send_skb(vlan, skb);
+
ret = dev_queue_xmit(skb);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct vlan_pcpu_stats *stats;
- stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats);
+ stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
u64_stats_update_end(&stats->syncp);
} else {
- this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped);
+ this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped);
}
return ret;
@@ -669,25 +682,26 @@ static void vlan_dev_poll_controller(struct net_device *dev)
return;
}
-static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
+static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo,
+ gfp_t gfp)
{
- struct vlan_dev_priv *info = vlan_dev_priv(dev);
- struct net_device *real_dev = info->real_dev;
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
struct netpoll *netpoll;
int err = 0;
- netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+ netpoll = kzalloc(sizeof(*netpoll), gfp);
err = -ENOMEM;
if (!netpoll)
goto out;
- err = __netpoll_setup(netpoll, real_dev);
+ err = __netpoll_setup(netpoll, real_dev, gfp);
if (err) {
kfree(netpoll);
goto out;
}
- info->netpoll = netpoll;
+ vlan->netpoll = netpoll;
out:
return err;
@@ -695,19 +709,15 @@ out:
static void vlan_dev_netpoll_cleanup(struct net_device *dev)
{
- struct vlan_dev_priv *info = vlan_dev_priv(dev);
- struct netpoll *netpoll = info->netpoll;
+ struct vlan_dev_priv *vlan= vlan_dev_priv(dev);
+ struct netpoll *netpoll = vlan->netpoll;
if (!netpoll)
return;
- info->netpoll = NULL;
-
- /* Wait for transmitting packets to finish before freeing. */
- synchronize_rcu_bh();
+ vlan->netpoll = NULL;
- __netpoll_cleanup(netpoll);
- kfree(netpoll);
+ __netpoll_free_rcu(netpoll);
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
diff --git a/net/atm/common.c b/net/atm/common.c
index b4b44dbed645..0c0ad930a632 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -812,6 +812,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
return -ENOTCONN;
+ memset(&pvc, 0, sizeof(pvc));
pvc.sap_family = AF_ATMPVC;
pvc.sap_addr.itf = vcc->dev->number;
pvc.sap_addr.vpi = vcc->vpi;
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index 3a734919c36c..ae0324021407 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
return -ENOTCONN;
*sockaddr_len = sizeof(struct sockaddr_atmpvc);
addr = (struct sockaddr_atmpvc *)sockaddr;
+ memset(addr, 0, sizeof(*addr));
addr->sap_family = AF_ATMPVC;
addr->sap_addr.itf = vcc->dev->number;
addr->sap_addr.vpi = vcc->vpi;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index b421cc49d2cd..fc866f2e4528 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -200,11 +200,11 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
goto out;
- if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect))
- goto out;
-
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+ if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw)
+ goto out;
+
next_gw = batadv_gw_get_best_gw_node(bat_priv);
if (curr_gw == next_gw)
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index a438f4b582fc..99dd8f75b3ff 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -197,6 +197,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
del:
list_del(&entry->list);
kfree(entry);
+ kfree(tt_change_node);
event_removed = true;
goto unlock;
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 41ff978a33f9..715d7e33fba0 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1365,6 +1365,9 @@ static bool hci_resolve_next_name(struct hci_dev *hdev)
return false;
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
+ if (!e)
+ return false;
+
if (hci_resolve_name(hdev, e) == 0) {
e->name_state = NAME_PENDING;
return true;
@@ -1393,12 +1396,20 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
return;
e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
- if (e) {
+ /* If the device was not found in a list of found devices names of which
+ * are pending. there is no need to continue resolving a next name as it
+ * will be done upon receiving another Remote Name Request Complete
+ * Event */
+ if (!e)
+ return;
+
+ list_del(&e->list);
+ if (name) {
e->name_state = NAME_KNOWN;
- list_del(&e->list);
- if (name)
- mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
- e->data.rssi, name, name_len);
+ mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
+ e->data.rssi, name, name_len);
+ } else {
+ e->name_state = NAME_NOT_KNOWN;
}
if (hci_resolve_next_name(hdev))
@@ -1762,7 +1773,12 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (conn->type == ACL_LINK) {
conn->state = BT_CONFIG;
hci_conn_hold(conn);
- conn->disc_timeout = HCI_DISCONN_TIMEOUT;
+
+ if (!conn->out && !hci_conn_ssp_enabled(conn) &&
+ !hci_find_link_key(hdev, &ev->bdaddr))
+ conn->disc_timeout = HCI_PAIRING_TIMEOUT;
+ else
+ conn->disc_timeout = HCI_DISCONN_TIMEOUT;
} else
conn->state = BT_CONNECTED;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index a7f04de03d79..19fdac78e555 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -694,6 +694,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
*addr_len = sizeof(*haddr);
haddr->hci_family = AF_BLUETOOTH;
haddr->hci_dev = hdev->id;
+ haddr->hci_channel= 0;
release_sock(sk);
return 0;
@@ -1009,6 +1010,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
{
struct hci_filter *f = &hci_pi(sk)->filter;
+ memset(&uf, 0, sizeof(uf));
uf.type_mask = f->type_mask;
uf.opcode = f->opcode;
uf.event_mask[0] = *((u32 *) f->event_mask + 0);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a8964db04bfb..daa149b7003c 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1181,6 +1181,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
sk = chan->sk;
hci_conn_hold(conn->hcon);
+ conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
bacpy(&bt_sk(sk)->src, conn->src);
bacpy(&bt_sk(sk)->dst, conn->dst);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index a4bb27e8427e..1497edd191a2 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -245,6 +245,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
BT_DBG("sock %p, sk %p", sock, sk);
+ memset(la, 0, sizeof(struct sockaddr_l2));
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_l2);
@@ -1174,7 +1175,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p
chan = l2cap_chan_create();
if (!chan) {
- l2cap_sock_kill(sk);
+ sk_free(sk);
return NULL;
}
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 7e1e59645c05..1a17850d093c 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -528,6 +528,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
BT_DBG("sock %p, sk %p", sock, sk);
+ memset(sa, 0, sizeof(*sa));
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
@@ -822,6 +823,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
}
sec.level = rfcomm_pi(sk)->sec_level;
+ sec.key_size = 0;
len = min_t(unsigned int, len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index cb960773c002..56f182393c4c 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -456,7 +456,7 @@ static int rfcomm_get_dev_list(void __user *arg)
size = sizeof(*dl) + dev_num * sizeof(*di);
- dl = kmalloc(size, GFP_KERNEL);
+ dl = kzalloc(size, GFP_KERNEL);
if (!dl)
return -ENOMEM;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 40bbe25dcff7..3589e21edb09 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -131,6 +131,15 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
bh_unlock_sock(sk);
+
+ sco_conn_lock(conn);
+ conn->sk = NULL;
+ sco_pi(sk)->conn = NULL;
+ sco_conn_unlock(conn);
+
+ if (conn->hcon)
+ hci_conn_put(conn->hcon);
+
sco_sock_kill(sk);
}
@@ -821,16 +830,6 @@ static void sco_chan_del(struct sock *sk, int err)
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
- if (conn) {
- sco_conn_lock(conn);
- conn->sk = NULL;
- sco_pi(sk)->conn = NULL;
- sco_conn_unlock(conn);
-
- if (conn->hcon)
- hci_conn_put(conn->hcon);
- }
-
sk->sk_state = BT_CLOSED;
sk->sk_err = err;
sk->sk_state_change(sk);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 16ef0dc85a0a..901a616c8083 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -579,8 +579,11 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
smp = smp_chan_create(conn);
+ else
+ smp = conn->smp_chan;
- smp = conn->smp_chan;
+ if (!smp)
+ return SMP_UNSPECIFIED;
smp->preq[0] = SMP_CMD_PAIRING_REQ;
memcpy(&smp->preq[1], req, sizeof(*req));
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 333484537600..070e8a68cfc6 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -31,9 +31,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_bridge_mdb_entry *mdst;
struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
+ rcu_read_lock();
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
br_nf_pre_routing_finish_bridge_slow(skb);
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
#endif
@@ -48,7 +50,6 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
- rcu_read_lock();
if (is_broadcast_ether_addr(dest))
br_flood_deliver(br, skb);
else if (is_multicast_ether_addr(dest)) {
@@ -206,24 +207,23 @@ static void br_poll_controller(struct net_device *br_dev)
static void br_netpoll_cleanup(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- struct net_bridge_port *p, *n;
+ struct net_bridge_port *p;
- list_for_each_entry_safe(p, n, &br->port_list, list) {
+ list_for_each_entry(p, &br->port_list, list)
br_netpoll_disable(p);
- }
}
-static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
+static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
+ gfp_t gfp)
{
struct net_bridge *br = netdev_priv(dev);
- struct net_bridge_port *p, *n;
+ struct net_bridge_port *p;
int err = 0;
- list_for_each_entry_safe(p, n, &br->port_list, list) {
+ list_for_each_entry(p, &br->port_list, list) {
if (!p->dev)
continue;
-
- err = br_netpoll_enable(p);
+ err = br_netpoll_enable(p, gfp);
if (err)
goto fail;
}
@@ -236,17 +236,17 @@ fail:
goto out;
}
-int br_netpoll_enable(struct net_bridge_port *p)
+int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
{
struct netpoll *np;
int err = 0;
- np = kzalloc(sizeof(*p->np), GFP_KERNEL);
+ np = kzalloc(sizeof(*p->np), gfp);
err = -ENOMEM;
if (!np)
goto out;
- err = __netpoll_setup(np, p->dev);
+ err = __netpoll_setup(np, p->dev, gfp);
if (err) {
kfree(np);
goto out;
@@ -267,11 +267,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
p->np = NULL;
- /* Wait for transmitting packets to finish before freeing. */
- synchronize_rcu_bh();
-
- __netpoll_cleanup(np);
- kfree(np);
+ __netpoll_free_rcu(np);
}
#endif
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index e9466d412707..02015a505d2a 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -65,7 +65,7 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
skb->dev = to->dev;
- if (unlikely(netpoll_tx_running(to->dev))) {
+ if (unlikely(netpoll_tx_running(to->br->dev))) {
if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
kfree_skb(skb);
else {
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index e1144e1617be..1c8fdc3558cd 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -361,7 +361,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (err)
goto err2;
- if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
+ if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL))))
goto err3;
err = netdev_set_master(dev, br->dev);
@@ -427,6 +427,10 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
if (!p || p->br != br)
return -EINVAL;
+ /* Since more than one interface can be attached to a bridge,
+ * there still maybe an alternate path for netconsole to use;
+ * therefore there is no reason for a NETDEV_RELEASE event.
+ */
del_nbp(p);
spin_lock_bh(&br->lock);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index a768b2408edf..f507d2af9646 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -316,7 +316,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
netpoll_send_skb(np, skb);
}
-extern int br_netpoll_enable(struct net_bridge_port *p);
+extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
extern void br_netpoll_disable(struct net_bridge_port *p);
#else
static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
@@ -329,7 +329,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
{
}
-static inline int br_netpoll_enable(struct net_bridge_port *p)
+static inline int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
{
return 0;
}
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 6229b62749e8..13b36bdc76a7 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -27,7 +27,7 @@ struct brport_attribute {
};
#define BRPORT_ATTR(_name,_mode,_show,_store) \
-struct brport_attribute brport_attr_##_name = { \
+const struct brport_attribute brport_attr_##_name = { \
.attr = {.name = __stringify(_name), \
.mode = _mode }, \
.show = _show, \
@@ -164,7 +164,7 @@ static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
store_multicast_router);
#endif
-static struct brport_attribute *brport_attrs[] = {
+static const struct brport_attribute *brport_attrs[] = {
&brport_attr_path_cost,
&brport_attr_priority,
&brport_attr_port_id,
@@ -241,7 +241,7 @@ const struct sysfs_ops brport_sysfs_ops = {
int br_sysfs_addif(struct net_bridge_port *p)
{
struct net_bridge *br = p->br;
- struct brport_attribute **a;
+ const struct brport_attribute **a;
int err;
err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj,
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 78f1cdad5b33..095259f83902 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -141,7 +141,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
err = sk_filter(sk, skb);
if (err)
return err;
- if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
+ if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
set_rx_flow_off(cf_sk);
net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 69771c04ba8f..e597733affb8 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -94,6 +94,10 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
/* check the version of IP */
ip_version = skb_header_pointer(skb, 0, 1, &buf);
+ if (!ip_version) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
switch (*ip_version >> 4) {
case 4:
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index ba4323bce0e9..a8020293f342 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -17,6 +17,7 @@
#include <linux/string.h>
+#include <linux/ceph/ceph_features.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/debugfs.h>
#include <linux/ceph/decode.h>
@@ -83,7 +84,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
return -1;
}
} else {
- pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
memcpy(&client->fsid, fsid, sizeof(*fsid));
}
return 0;
@@ -460,27 +460,23 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
client->auth_err = 0;
client->extra_mon_dispatch = NULL;
- client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT |
+ client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT |
supported_features;
- client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT |
+ client->required_features = CEPH_FEATURES_REQUIRED_DEFAULT |
required_features;
/* msgr */
if (ceph_test_opt(client, MYIP))
myaddr = &client->options->my_addr;
- client->msgr = ceph_messenger_create(myaddr,
- client->supported_features,
- client->required_features);
- if (IS_ERR(client->msgr)) {
- err = PTR_ERR(client->msgr);
- goto fail;
- }
- client->msgr->nocrc = ceph_test_opt(client, NOCRC);
+ ceph_messenger_init(&client->msgr, myaddr,
+ client->supported_features,
+ client->required_features,
+ ceph_test_opt(client, NOCRC));
/* subsystems */
err = ceph_monc_init(&client->monc, client);
if (err < 0)
- goto fail_msgr;
+ goto fail;
err = ceph_osdc_init(&client->osdc, client);
if (err < 0)
goto fail_monc;
@@ -489,8 +485,6 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
fail_monc:
ceph_monc_stop(&client->monc);
-fail_msgr:
- ceph_messenger_destroy(client->msgr);
fail:
kfree(client);
return ERR_PTR(err);
@@ -501,6 +495,8 @@ void ceph_destroy_client(struct ceph_client *client)
{
dout("destroy_client %p\n", client);
+ atomic_set(&client->msgr.stopping, 1);
+
/* unmount */
ceph_osdc_stop(&client->osdc);
@@ -508,8 +504,6 @@ void ceph_destroy_client(struct ceph_client *client)
ceph_debugfs_client_cleanup(client);
- ceph_messenger_destroy(client->msgr);
-
ceph_destroy_options(client->options);
kfree(client);
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index d7edc24333b8..35fce755ce10 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -306,7 +306,6 @@ static int crush_choose(const struct crush_map *map,
int item = 0;
int itemtype;
int collide, reject;
- const unsigned int orig_tries = 5; /* attempts before we fall back to search */
dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
bucket->id, x, outpos, numrep);
@@ -351,8 +350,9 @@ static int crush_choose(const struct crush_map *map,
reject = 1;
goto reject;
}
- if (flocal >= (in->size>>1) &&
- flocal > orig_tries)
+ if (map->choose_local_fallback_tries > 0 &&
+ flocal >= (in->size>>1) &&
+ flocal > map->choose_local_fallback_tries)
item = bucket_perm_choose(in, x, r);
else
item = crush_bucket_choose(in, x, r);
@@ -422,13 +422,14 @@ reject:
ftotal++;
flocal++;
- if (collide && flocal < 3)
+ if (collide && flocal <= map->choose_local_tries)
/* retry locally a few times */
retry_bucket = 1;
- else if (flocal <= in->size + orig_tries)
+ else if (map->choose_local_fallback_tries > 0 &&
+ flocal <= in->size + map->choose_local_fallback_tries)
/* exhaustive bucket search */
retry_bucket = 1;
- else if (ftotal < 20)
+ else if (ftotal <= map->choose_total_tries)
/* then retry descent */
retry_descent = 1;
else
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index b780cb7947dd..9da7fdd3cd8a 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -466,6 +466,7 @@ void ceph_key_destroy(struct key *key) {
struct ceph_crypto_key *ckey = key->payload.data;
ceph_crypto_key_destroy(ckey);
+ kfree(ckey);
}
struct key_type key_type_ceph = {
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index 1919d1550d75..3572dc518bc9 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -16,7 +16,8 @@ struct ceph_crypto_key {
static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
{
- kfree(key->key);
+ if (key)
+ kfree(key->key);
}
extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 54b531a01121..38b5dc1823d4 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -189,6 +189,9 @@ int ceph_debugfs_client_init(struct ceph_client *client)
snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
client->monc.auth->global_id);
+ dout("ceph_debugfs_client_init %p %s\n", client, name);
+
+ BUG_ON(client->debugfs_dir);
client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
if (!client->debugfs_dir)
goto out;
@@ -234,6 +237,7 @@ out:
void ceph_debugfs_client_cleanup(struct ceph_client *client)
{
+ dout("ceph_debugfs_client_cleanup %p\n", client);
debugfs_remove(client->debugfs_osdmap);
debugfs_remove(client->debugfs_monmap);
debugfs_remove(client->osdc.debugfs_file);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 10255e81be79..24c5eea8c45b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -29,6 +29,74 @@
* the sender.
*/
+/*
+ * We track the state of the socket on a given connection using
+ * values defined below. The transition to a new socket state is
+ * handled by a function which verifies we aren't coming from an
+ * unexpected state.
+ *
+ * --------
+ * | NEW* | transient initial state
+ * --------
+ * | con_sock_state_init()
+ * v
+ * ----------
+ * | CLOSED | initialized, but no socket (and no
+ * ---------- TCP connection)
+ * ^ \
+ * | \ con_sock_state_connecting()
+ * | ----------------------
+ * | \
+ * + con_sock_state_closed() \
+ * |+--------------------------- \
+ * | \ \ \
+ * | ----------- \ \
+ * | | CLOSING | socket event; \ \
+ * | ----------- await close \ \
+ * | ^ \ |
+ * | | \ |
+ * | + con_sock_state_closing() \ |
+ * | / \ | |
+ * | / --------------- | |
+ * | / \ v v
+ * | / --------------
+ * | / -----------------| CONNECTING | socket created, TCP
+ * | | / -------------- connect initiated
+ * | | | con_sock_state_connected()
+ * | | v
+ * -------------
+ * | CONNECTED | TCP connection established
+ * -------------
+ *
+ * State values for ceph_connection->sock_state; NEW is assumed to be 0.
+ */
+
+#define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
+#define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
+#define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
+#define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
+#define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
+
+/*
+ * connection states
+ */
+#define CON_STATE_CLOSED 1 /* -> PREOPEN */
+#define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
+#define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
+#define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
+#define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
+#define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
+
+/*
+ * ceph_connection flag bits
+ */
+#define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
+ * messages on errors */
+#define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
+#define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
+#define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
+#define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
+
/* static tag bytes (protocol control messages) */
static char tag_msg = CEPH_MSGR_TAG_MSG;
static char tag_ack = CEPH_MSGR_TAG_ACK;
@@ -147,72 +215,130 @@ void ceph_msgr_flush(void)
}
EXPORT_SYMBOL(ceph_msgr_flush);
+/* Connection socket state transition functions */
+
+static void con_sock_state_init(struct ceph_connection *con)
+{
+ int old_state;
+
+ old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
+ if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
+ printk("%s: unexpected old state %d\n", __func__, old_state);
+ dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
+ CON_SOCK_STATE_CLOSED);
+}
+
+static void con_sock_state_connecting(struct ceph_connection *con)
+{
+ int old_state;
+
+ old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
+ if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
+ printk("%s: unexpected old state %d\n", __func__, old_state);
+ dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
+ CON_SOCK_STATE_CONNECTING);
+}
+
+static void con_sock_state_connected(struct ceph_connection *con)
+{
+ int old_state;
+
+ old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
+ if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
+ printk("%s: unexpected old state %d\n", __func__, old_state);
+ dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
+ CON_SOCK_STATE_CONNECTED);
+}
+
+static void con_sock_state_closing(struct ceph_connection *con)
+{
+ int old_state;
+
+ old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
+ if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
+ old_state != CON_SOCK_STATE_CONNECTED &&
+ old_state != CON_SOCK_STATE_CLOSING))
+ printk("%s: unexpected old state %d\n", __func__, old_state);
+ dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
+ CON_SOCK_STATE_CLOSING);
+}
+
+static void con_sock_state_closed(struct ceph_connection *con)
+{
+ int old_state;
+
+ old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
+ if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
+ old_state != CON_SOCK_STATE_CLOSING &&
+ old_state != CON_SOCK_STATE_CONNECTING &&
+ old_state != CON_SOCK_STATE_CLOSED))
+ printk("%s: unexpected old state %d\n", __func__, old_state);
+ dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
+ CON_SOCK_STATE_CLOSED);
+}
/*
* socket callback functions
*/
/* data available on socket, or listen socket received a connect */
-static void ceph_data_ready(struct sock *sk, int count_unused)
+static void ceph_sock_data_ready(struct sock *sk, int count_unused)
{
struct ceph_connection *con = sk->sk_user_data;
+ if (atomic_read(&con->msgr->stopping)) {
+ return;
+ }
if (sk->sk_state != TCP_CLOSE_WAIT) {
- dout("ceph_data_ready on %p state = %lu, queueing work\n",
+ dout("%s on %p state = %lu, queueing work\n", __func__,
con, con->state);
queue_con(con);
}
}
/* socket has buffer space for writing */
-static void ceph_write_space(struct sock *sk)
+static void ceph_sock_write_space(struct sock *sk)
{
struct ceph_connection *con = sk->sk_user_data;
/* only queue to workqueue if there is data we want to write,
* and there is sufficient space in the socket buffer to accept
- * more data. clear SOCK_NOSPACE so that ceph_write_space()
+ * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
* doesn't get called again until try_write() fills the socket
* buffer. See net/ipv4/tcp_input.c:tcp_check_space()
* and net/core/stream.c:sk_stream_write_space().
*/
- if (test_bit(WRITE_PENDING, &con->state)) {
+ if (test_bit(CON_FLAG_WRITE_PENDING, &con->flags)) {
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
- dout("ceph_write_space %p queueing write work\n", con);
+ dout("%s %p queueing write work\n", __func__, con);
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
queue_con(con);
}
} else {
- dout("ceph_write_space %p nothing to write\n", con);
+ dout("%s %p nothing to write\n", __func__, con);
}
}
/* socket's state has changed */
-static void ceph_state_change(struct sock *sk)
+static void ceph_sock_state_change(struct sock *sk)
{
struct ceph_connection *con = sk->sk_user_data;
- dout("ceph_state_change %p state = %lu sk_state = %u\n",
+ dout("%s %p state = %lu sk_state = %u\n", __func__,
con, con->state, sk->sk_state);
- if (test_bit(CLOSED, &con->state))
- return;
-
switch (sk->sk_state) {
case TCP_CLOSE:
- dout("ceph_state_change TCP_CLOSE\n");
+ dout("%s TCP_CLOSE\n", __func__);
case TCP_CLOSE_WAIT:
- dout("ceph_state_change TCP_CLOSE_WAIT\n");
- if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) {
- if (test_bit(CONNECTING, &con->state))
- con->error_msg = "connection failed";
- else
- con->error_msg = "socket closed";
- queue_con(con);
- }
+ dout("%s TCP_CLOSE_WAIT\n", __func__);
+ con_sock_state_closing(con);
+ set_bit(CON_FLAG_SOCK_CLOSED, &con->flags);
+ queue_con(con);
break;
case TCP_ESTABLISHED:
- dout("ceph_state_change TCP_ESTABLISHED\n");
+ dout("%s TCP_ESTABLISHED\n", __func__);
+ con_sock_state_connected(con);
queue_con(con);
break;
default: /* Everything else is uninteresting */
@@ -228,9 +354,9 @@ static void set_sock_callbacks(struct socket *sock,
{
struct sock *sk = sock->sk;
sk->sk_user_data = con;
- sk->sk_data_ready = ceph_data_ready;
- sk->sk_write_space = ceph_write_space;
- sk->sk_state_change = ceph_state_change;
+ sk->sk_data_ready = ceph_sock_data_ready;
+ sk->sk_write_space = ceph_sock_write_space;
+ sk->sk_state_change = ceph_sock_state_change;
}
@@ -262,6 +388,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
+ con_sock_state_connecting(con);
ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
O_NONBLOCK);
if (ret == -EINPROGRESS) {
@@ -277,7 +404,6 @@ static int ceph_tcp_connect(struct ceph_connection *con)
return ret;
}
con->sock = sock;
-
return 0;
}
@@ -333,16 +459,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
*/
static int con_close_socket(struct ceph_connection *con)
{
- int rc;
+ int rc = 0;
dout("con_close_socket on %p sock %p\n", con, con->sock);
- if (!con->sock)
- return 0;
- set_bit(SOCK_CLOSED, &con->state);
- rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
- sock_release(con->sock);
- con->sock = NULL;
- clear_bit(SOCK_CLOSED, &con->state);
+ if (con->sock) {
+ rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
+ sock_release(con->sock);
+ con->sock = NULL;
+ }
+
+ /*
+ * Forcibly clear the SOCK_CLOSED flag. It gets set
+ * independent of the connection mutex, and we could have
+ * received a socket close event before we had the chance to
+ * shut the socket down.
+ */
+ clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags);
+
+ con_sock_state_closed(con);
return rc;
}
@@ -353,6 +487,10 @@ static int con_close_socket(struct ceph_connection *con)
static void ceph_msg_remove(struct ceph_msg *msg)
{
list_del_init(&msg->list_head);
+ BUG_ON(msg->con == NULL);
+ msg->con->ops->put(msg->con);
+ msg->con = NULL;
+
ceph_msg_put(msg);
}
static void ceph_msg_remove_list(struct list_head *head)
@@ -372,8 +510,11 @@ static void reset_connection(struct ceph_connection *con)
ceph_msg_remove_list(&con->out_sent);
if (con->in_msg) {
+ BUG_ON(con->in_msg->con != con);
+ con->in_msg->con = NULL;
ceph_msg_put(con->in_msg);
con->in_msg = NULL;
+ con->ops->put(con);
}
con->connect_seq = 0;
@@ -391,32 +532,44 @@ static void reset_connection(struct ceph_connection *con)
*/
void ceph_con_close(struct ceph_connection *con)
{
+ mutex_lock(&con->mutex);
dout("con_close %p peer %s\n", con,
ceph_pr_addr(&con->peer_addr.in_addr));
- set_bit(CLOSED, &con->state); /* in case there's queued work */
- clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
- clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
- clear_bit(KEEPALIVE_PENDING, &con->state);
- clear_bit(WRITE_PENDING, &con->state);
- mutex_lock(&con->mutex);
+ con->state = CON_STATE_CLOSED;
+
+ clear_bit(CON_FLAG_LOSSYTX, &con->flags); /* so we retry next connect */
+ clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags);
+ clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+ clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags);
+ clear_bit(CON_FLAG_BACKOFF, &con->flags);
+
reset_connection(con);
con->peer_global_seq = 0;
cancel_delayed_work(&con->work);
+ con_close_socket(con);
mutex_unlock(&con->mutex);
- queue_con(con);
}
EXPORT_SYMBOL(ceph_con_close);
/*
* Reopen a closed connection, with a new peer address.
*/
-void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
+void ceph_con_open(struct ceph_connection *con,
+ __u8 entity_type, __u64 entity_num,
+ struct ceph_entity_addr *addr)
{
+ mutex_lock(&con->mutex);
dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
- set_bit(OPENING, &con->state);
- clear_bit(CLOSED, &con->state);
+
+ BUG_ON(con->state != CON_STATE_CLOSED);
+ con->state = CON_STATE_PREOPEN;
+
+ con->peer_name.type = (__u8) entity_type;
+ con->peer_name.num = cpu_to_le64(entity_num);
+
memcpy(&con->peer_addr, addr, sizeof(*addr));
con->delay = 0; /* reset backoff memory */
+ mutex_unlock(&con->mutex);
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_open);
@@ -430,42 +583,26 @@ bool ceph_con_opened(struct ceph_connection *con)
}
/*
- * generic get/put
- */
-struct ceph_connection *ceph_con_get(struct ceph_connection *con)
-{
- int nref = __atomic_add_unless(&con->nref, 1, 0);
-
- dout("con_get %p nref = %d -> %d\n", con, nref, nref + 1);
-
- return nref ? con : NULL;
-}
-
-void ceph_con_put(struct ceph_connection *con)
-{
- int nref = atomic_dec_return(&con->nref);
-
- BUG_ON(nref < 0);
- if (nref == 0) {
- BUG_ON(con->sock);
- kfree(con);
- }
- dout("con_put %p nref = %d -> %d\n", con, nref + 1, nref);
-}
-
-/*
* initialize a new connection.
*/
-void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
+void ceph_con_init(struct ceph_connection *con, void *private,
+ const struct ceph_connection_operations *ops,
+ struct ceph_messenger *msgr)
{
dout("con_init %p\n", con);
memset(con, 0, sizeof(*con));
- atomic_set(&con->nref, 1);
+ con->private = private;
+ con->ops = ops;
con->msgr = msgr;
+
+ con_sock_state_init(con);
+
mutex_init(&con->mutex);
INIT_LIST_HEAD(&con->out_queue);
INIT_LIST_HEAD(&con->out_sent);
INIT_DELAYED_WORK(&con->work, con_work);
+
+ con->state = CON_STATE_CLOSED;
}
EXPORT_SYMBOL(ceph_con_init);
@@ -486,14 +623,14 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
return ret;
}
-static void ceph_con_out_kvec_reset(struct ceph_connection *con)
+static void con_out_kvec_reset(struct ceph_connection *con)
{
con->out_kvec_left = 0;
con->out_kvec_bytes = 0;
con->out_kvec_cur = &con->out_kvec[0];
}
-static void ceph_con_out_kvec_add(struct ceph_connection *con,
+static void con_out_kvec_add(struct ceph_connection *con,
size_t size, void *data)
{
int index;
@@ -507,6 +644,53 @@ static void ceph_con_out_kvec_add(struct ceph_connection *con,
con->out_kvec_bytes += size;
}
+#ifdef CONFIG_BLOCK
+static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
+{
+ if (!bio) {
+ *iter = NULL;
+ *seg = 0;
+ return;
+ }
+ *iter = bio;
+ *seg = bio->bi_idx;
+}
+
+static void iter_bio_next(struct bio **bio_iter, int *seg)
+{
+ if (*bio_iter == NULL)
+ return;
+
+ BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
+
+ (*seg)++;
+ if (*seg == (*bio_iter)->bi_vcnt)
+ init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
+}
+#endif
+
+static void prepare_write_message_data(struct ceph_connection *con)
+{
+ struct ceph_msg *msg = con->out_msg;
+
+ BUG_ON(!msg);
+ BUG_ON(!msg->hdr.data_len);
+
+ /* initialize page iterator */
+ con->out_msg_pos.page = 0;
+ if (msg->pages)
+ con->out_msg_pos.page_pos = msg->page_alignment;
+ else
+ con->out_msg_pos.page_pos = 0;
+#ifdef CONFIG_BLOCK
+ if (msg->bio)
+ init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
+#endif
+ con->out_msg_pos.data_pos = 0;
+ con->out_msg_pos.did_page_crc = false;
+ con->out_more = 1; /* data + footer will follow */
+}
+
/*
* Prepare footer for currently outgoing message, and finish things
* off. Assumes out_kvec* are already valid.. we just add on to the end.
@@ -516,6 +700,8 @@ static void prepare_write_message_footer(struct ceph_connection *con)
struct ceph_msg *m = con->out_msg;
int v = con->out_kvec_left;
+ m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
+
dout("prepare_write_message_footer %p\n", con);
con->out_kvec_is_msg = true;
con->out_kvec[v].iov_base = &m->footer;
@@ -534,7 +720,7 @@ static void prepare_write_message(struct ceph_connection *con)
struct ceph_msg *m;
u32 crc;
- ceph_con_out_kvec_reset(con);
+ con_out_kvec_reset(con);
con->out_kvec_is_msg = true;
con->out_msg_done = false;
@@ -542,14 +728,16 @@ static void prepare_write_message(struct ceph_connection *con)
* TCP packet that's a good thing. */
if (con->in_seq > con->in_seq_acked) {
con->in_seq_acked = con->in_seq;
- ceph_con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
+ con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
- ceph_con_out_kvec_add(con, sizeof (con->out_temp_ack),
+ con_out_kvec_add(con, sizeof (con->out_temp_ack),
&con->out_temp_ack);
}
+ BUG_ON(list_empty(&con->out_queue));
m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
con->out_msg = m;
+ BUG_ON(m->con != con);
/* put message on sent list */
ceph_msg_get(m);
@@ -576,18 +764,18 @@ static void prepare_write_message(struct ceph_connection *con)
BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
/* tag + hdr + front + middle */
- ceph_con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
- ceph_con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
- ceph_con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
+ con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
+ con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
+ con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
if (m->middle)
- ceph_con_out_kvec_add(con, m->middle->vec.iov_len,
+ con_out_kvec_add(con, m->middle->vec.iov_len,
m->middle->vec.iov_base);
/* fill in crc (except data pages), footer */
crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
con->out_msg->hdr.crc = cpu_to_le32(crc);
- con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
+ con->out_msg->footer.flags = 0;
crc = crc32c(0, m->front.iov_base, m->front.iov_len);
con->out_msg->footer.front_crc = cpu_to_le32(crc);
@@ -597,28 +785,19 @@ static void prepare_write_message(struct ceph_connection *con)
con->out_msg->footer.middle_crc = cpu_to_le32(crc);
} else
con->out_msg->footer.middle_crc = 0;
- con->out_msg->footer.data_crc = 0;
- dout("prepare_write_message front_crc %u data_crc %u\n",
+ dout("%s front_crc %u middle_crc %u\n", __func__,
le32_to_cpu(con->out_msg->footer.front_crc),
le32_to_cpu(con->out_msg->footer.middle_crc));
/* is there a data payload? */
- if (le32_to_cpu(m->hdr.data_len) > 0) {
- /* initialize page iterator */
- con->out_msg_pos.page = 0;
- if (m->pages)
- con->out_msg_pos.page_pos = m->page_alignment;
- else
- con->out_msg_pos.page_pos = 0;
- con->out_msg_pos.data_pos = 0;
- con->out_msg_pos.did_page_crc = false;
- con->out_more = 1; /* data + footer will follow */
- } else {
+ con->out_msg->footer.data_crc = 0;
+ if (m->hdr.data_len)
+ prepare_write_message_data(con);
+ else
/* no, queue up footer too and be done */
prepare_write_message_footer(con);
- }
- set_bit(WRITE_PENDING, &con->state);
+ set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
}
/*
@@ -630,16 +809,16 @@ static void prepare_write_ack(struct ceph_connection *con)
con->in_seq_acked, con->in_seq);
con->in_seq_acked = con->in_seq;
- ceph_con_out_kvec_reset(con);
+ con_out_kvec_reset(con);
- ceph_con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
+ con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
- ceph_con_out_kvec_add(con, sizeof (con->out_temp_ack),
+ con_out_kvec_add(con, sizeof (con->out_temp_ack),
&con->out_temp_ack);
con->out_more = 1; /* more will follow.. eventually.. */
- set_bit(WRITE_PENDING, &con->state);
+ set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
}
/*
@@ -648,9 +827,9 @@ static void prepare_write_ack(struct ceph_connection *con)
static void prepare_write_keepalive(struct ceph_connection *con)
{
dout("prepare_write_keepalive %p\n", con);
- ceph_con_out_kvec_reset(con);
- ceph_con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
- set_bit(WRITE_PENDING, &con->state);
+ con_out_kvec_reset(con);
+ con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
+ set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
}
/*
@@ -665,27 +844,21 @@ static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection
if (!con->ops->get_authorizer) {
con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
con->out_connect.authorizer_len = 0;
-
return NULL;
}
/* Can't hold the mutex while getting authorizer */
-
mutex_unlock(&con->mutex);
-
auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
-
mutex_lock(&con->mutex);
if (IS_ERR(auth))
return auth;
- if (test_bit(CLOSED, &con->state) || test_bit(OPENING, &con->state))
+ if (con->state != CON_STATE_NEGOTIATING)
return ERR_PTR(-EAGAIN);
con->auth_reply_buf = auth->authorizer_reply_buf;
con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
-
-
return auth;
}
@@ -694,12 +867,12 @@ static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection
*/
static void prepare_write_banner(struct ceph_connection *con)
{
- ceph_con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
- ceph_con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
+ con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
+ con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
&con->msgr->my_enc_addr);
con->out_more = 0;
- set_bit(WRITE_PENDING, &con->state);
+ set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
}
static int prepare_write_connect(struct ceph_connection *con)
@@ -742,14 +915,14 @@ static int prepare_write_connect(struct ceph_connection *con)
con->out_connect.authorizer_len = auth ?
cpu_to_le32(auth->authorizer_buf_len) : 0;
- ceph_con_out_kvec_add(con, sizeof (con->out_connect),
+ con_out_kvec_add(con, sizeof (con->out_connect),
&con->out_connect);
if (auth && auth->authorizer_buf_len)
- ceph_con_out_kvec_add(con, auth->authorizer_buf_len,
+ con_out_kvec_add(con, auth->authorizer_buf_len,
auth->authorizer_buf);
con->out_more = 0;
- set_bit(WRITE_PENDING, &con->state);
+ set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
return 0;
}
@@ -797,30 +970,34 @@ out:
return ret; /* done! */
}
-#ifdef CONFIG_BLOCK
-static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
+static void out_msg_pos_next(struct ceph_connection *con, struct page *page,
+ size_t len, size_t sent, bool in_trail)
{
- if (!bio) {
- *iter = NULL;
- *seg = 0;
- return;
- }
- *iter = bio;
- *seg = bio->bi_idx;
-}
+ struct ceph_msg *msg = con->out_msg;
-static void iter_bio_next(struct bio **bio_iter, int *seg)
-{
- if (*bio_iter == NULL)
- return;
+ BUG_ON(!msg);
+ BUG_ON(!sent);
- BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
+ con->out_msg_pos.data_pos += sent;
+ con->out_msg_pos.page_pos += sent;
+ if (sent < len)
+ return;
- (*seg)++;
- if (*seg == (*bio_iter)->bi_vcnt)
- init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
-}
+ BUG_ON(sent != len);
+ con->out_msg_pos.page_pos = 0;
+ con->out_msg_pos.page++;
+ con->out_msg_pos.did_page_crc = false;
+ if (in_trail)
+ list_move_tail(&page->lru,
+ &msg->trail->head);
+ else if (msg->pagelist)
+ list_move_tail(&page->lru,
+ &msg->pagelist->head);
+#ifdef CONFIG_BLOCK
+ else if (msg->bio)
+ iter_bio_next(&msg->bio_iter, &msg->bio_seg);
#endif
+}
/*
* Write as much message data payload as we can. If we finish, queue
@@ -837,41 +1014,36 @@ static int write_partial_msg_pages(struct ceph_connection *con)
bool do_datacrc = !con->msgr->nocrc;
int ret;
int total_max_write;
- int in_trail = 0;
- size_t trail_len = (msg->trail ? msg->trail->length : 0);
+ bool in_trail = false;
+ const size_t trail_len = (msg->trail ? msg->trail->length : 0);
+ const size_t trail_off = data_len - trail_len;
dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
- con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
+ con, msg, con->out_msg_pos.page, msg->nr_pages,
con->out_msg_pos.page_pos);
-#ifdef CONFIG_BLOCK
- if (msg->bio && !msg->bio_iter)
- init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
-#endif
-
+ /*
+ * Iterate through each page that contains data to be
+ * written, and send as much as possible for each.
+ *
+ * If we are calculating the data crc (the default), we will
+ * need to map the page. If we have no pages, they have
+ * been revoked, so use the zero page.
+ */
while (data_len > con->out_msg_pos.data_pos) {
struct page *page = NULL;
int max_write = PAGE_SIZE;
int bio_offset = 0;
- total_max_write = data_len - trail_len -
- con->out_msg_pos.data_pos;
-
- /*
- * if we are calculating the data crc (the default), we need
- * to map the page. if our pages[] has been revoked, use the
- * zero page.
- */
-
- /* have we reached the trail part of the data? */
- if (con->out_msg_pos.data_pos >= data_len - trail_len) {
- in_trail = 1;
+ in_trail = in_trail || con->out_msg_pos.data_pos >= trail_off;
+ if (!in_trail)
+ total_max_write = trail_off - con->out_msg_pos.data_pos;
+ if (in_trail) {
total_max_write = data_len - con->out_msg_pos.data_pos;
page = list_first_entry(&msg->trail->head,
struct page, lru);
- max_write = PAGE_SIZE;
} else if (msg->pages) {
page = msg->pages[con->out_msg_pos.page];
} else if (msg->pagelist) {
@@ -894,15 +1066,14 @@ static int write_partial_msg_pages(struct ceph_connection *con)
if (do_datacrc && !con->out_msg_pos.did_page_crc) {
void *base;
- u32 crc;
- u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
+ u32 crc = le32_to_cpu(msg->footer.data_crc);
char *kaddr;
kaddr = kmap(page);
BUG_ON(kaddr == NULL);
base = kaddr + con->out_msg_pos.page_pos + bio_offset;
- crc = crc32c(tmpcrc, base, len);
- con->out_msg->footer.data_crc = cpu_to_le32(crc);
+ crc = crc32c(crc, base, len);
+ msg->footer.data_crc = cpu_to_le32(crc);
con->out_msg_pos.did_page_crc = true;
}
ret = ceph_tcp_sendpage(con->sock, page,
@@ -915,31 +1086,15 @@ static int write_partial_msg_pages(struct ceph_connection *con)
if (ret <= 0)
goto out;
- con->out_msg_pos.data_pos += ret;
- con->out_msg_pos.page_pos += ret;
- if (ret == len) {
- con->out_msg_pos.page_pos = 0;
- con->out_msg_pos.page++;
- con->out_msg_pos.did_page_crc = false;
- if (in_trail)
- list_move_tail(&page->lru,
- &msg->trail->head);
- else if (msg->pagelist)
- list_move_tail(&page->lru,
- &msg->pagelist->head);
-#ifdef CONFIG_BLOCK
- else if (msg->bio)
- iter_bio_next(&msg->bio_iter, &msg->bio_seg);
-#endif
- }
+ out_msg_pos_next(con, page, len, (size_t) ret, in_trail);
}
dout("write_partial_msg_pages %p msg %p done\n", con, msg);
/* prepare and queue up footer, too */
if (!do_datacrc)
- con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
- ceph_con_out_kvec_reset(con);
+ msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
+ con_out_kvec_reset(con);
prepare_write_message_footer(con);
ret = 1;
out:
@@ -1351,20 +1506,14 @@ static int process_banner(struct ceph_connection *con)
ceph_pr_addr(&con->msgr->inst.addr.in_addr));
}
- set_bit(NEGOTIATING, &con->state);
- prepare_read_connect(con);
return 0;
}
static void fail_protocol(struct ceph_connection *con)
{
reset_connection(con);
- set_bit(CLOSED, &con->state); /* in case there's queued work */
-
- mutex_unlock(&con->mutex);
- if (con->ops->bad_proto)
- con->ops->bad_proto(con);
- mutex_lock(&con->mutex);
+ BUG_ON(con->state != CON_STATE_NEGOTIATING);
+ con->state = CON_STATE_CLOSED;
}
static int process_connect(struct ceph_connection *con)
@@ -1407,7 +1556,7 @@ static int process_connect(struct ceph_connection *con)
return -1;
}
con->auth_retry = 1;
- ceph_con_out_kvec_reset(con);
+ con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
@@ -1428,7 +1577,7 @@ static int process_connect(struct ceph_connection *con)
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr));
reset_connection(con);
- ceph_con_out_kvec_reset(con);
+ con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
@@ -1440,8 +1589,7 @@ static int process_connect(struct ceph_connection *con)
if (con->ops->peer_reset)
con->ops->peer_reset(con);
mutex_lock(&con->mutex);
- if (test_bit(CLOSED, &con->state) ||
- test_bit(OPENING, &con->state))
+ if (con->state != CON_STATE_NEGOTIATING)
return -EAGAIN;
break;
@@ -1454,7 +1602,7 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->out_connect.connect_seq),
le32_to_cpu(con->in_reply.connect_seq));
con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
- ceph_con_out_kvec_reset(con);
+ con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
@@ -1471,7 +1619,7 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->in_reply.global_seq));
get_global_seq(con->msgr,
le32_to_cpu(con->in_reply.global_seq));
- ceph_con_out_kvec_reset(con);
+ con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
@@ -1489,7 +1637,10 @@ static int process_connect(struct ceph_connection *con)
fail_protocol(con);
return -1;
}
- clear_bit(CONNECTING, &con->state);
+
+ BUG_ON(con->state != CON_STATE_NEGOTIATING);
+ con->state = CON_STATE_OPEN;
+
con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
con->connect_seq++;
con->peer_features = server_feat;
@@ -1501,7 +1652,9 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->in_reply.connect_seq));
if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
- set_bit(LOSSYTX, &con->state);
+ set_bit(CON_FLAG_LOSSYTX, &con->flags);
+
+ con->delay = 0; /* reset backoff memory */
prepare_read_tag(con);
break;
@@ -1587,10 +1740,7 @@ static int read_partial_message_section(struct ceph_connection *con,
return 1;
}
-static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
- struct ceph_msg_header *hdr,
- int *skip);
-
+static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip);
static int read_partial_message_pages(struct ceph_connection *con,
struct page **pages,
@@ -1633,9 +1783,6 @@ static int read_partial_message_bio(struct ceph_connection *con,
void *p;
int ret, left;
- if (IS_ERR(bv))
- return PTR_ERR(bv);
-
left = min((int)(data_len - con->in_msg_pos.data_pos),
(int)(bv->bv_len - con->in_msg_pos.page_pos));
@@ -1672,7 +1819,6 @@ static int read_partial_message(struct ceph_connection *con)
int ret;
unsigned int front_len, middle_len, data_len;
bool do_datacrc = !con->msgr->nocrc;
- int skip;
u64 seq;
u32 crc;
@@ -1723,10 +1869,13 @@ static int read_partial_message(struct ceph_connection *con)
/* allocate message? */
if (!con->in_msg) {
+ int skip = 0;
+
dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
con->in_hdr.front_len, con->in_hdr.data_len);
- skip = 0;
- con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
+ ret = ceph_con_in_msg_alloc(con, &skip);
+ if (ret < 0)
+ return ret;
if (skip) {
/* skip this message */
dout("alloc_msg said skip message\n");
@@ -1737,11 +1886,9 @@ static int read_partial_message(struct ceph_connection *con)
con->in_seq++;
return 0;
}
- if (!con->in_msg) {
- con->error_msg =
- "error allocating memory for incoming message";
- return -ENOMEM;
- }
+
+ BUG_ON(!con->in_msg);
+ BUG_ON(con->in_msg->con != con);
m = con->in_msg;
m->front.iov_len = 0; /* haven't read it yet */
if (m->middle)
@@ -1753,6 +1900,11 @@ static int read_partial_message(struct ceph_connection *con)
else
con->in_msg_pos.page_pos = 0;
con->in_msg_pos.data_pos = 0;
+
+#ifdef CONFIG_BLOCK
+ if (m->bio)
+ init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
+#endif
}
/* front */
@@ -1769,10 +1921,6 @@ static int read_partial_message(struct ceph_connection *con)
if (ret <= 0)
return ret;
}
-#ifdef CONFIG_BLOCK
- if (m->bio && !m->bio_iter)
- init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
-#endif
/* (page) data */
while (con->in_msg_pos.data_pos < data_len) {
@@ -1783,7 +1931,7 @@ static int read_partial_message(struct ceph_connection *con)
return ret;
#ifdef CONFIG_BLOCK
} else if (m->bio) {
-
+ BUG_ON(!m->bio_iter);
ret = read_partial_message_bio(con,
&m->bio_iter, &m->bio_seg,
data_len, do_datacrc);
@@ -1837,8 +1985,11 @@ static void process_message(struct ceph_connection *con)
{
struct ceph_msg *msg;
+ BUG_ON(con->in_msg->con != con);
+ con->in_msg->con = NULL;
msg = con->in_msg;
con->in_msg = NULL;
+ con->ops->put(con);
/* if first message, set peer_name */
if (con->peer_name.type == 0)
@@ -1858,7 +2009,6 @@ static void process_message(struct ceph_connection *con)
con->ops->dispatch(con, msg);
mutex_lock(&con->mutex);
- prepare_read_tag(con);
}
@@ -1870,22 +2020,19 @@ static int try_write(struct ceph_connection *con)
{
int ret = 1;
- dout("try_write start %p state %lu nref %d\n", con, con->state,
- atomic_read(&con->nref));
+ dout("try_write start %p state %lu\n", con, con->state);
more:
dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
/* open the socket first? */
- if (con->sock == NULL) {
- ceph_con_out_kvec_reset(con);
+ if (con->state == CON_STATE_PREOPEN) {
+ BUG_ON(con->sock);
+ con->state = CON_STATE_CONNECTING;
+
+ con_out_kvec_reset(con);
prepare_write_banner(con);
- ret = prepare_write_connect(con);
- if (ret < 0)
- goto out;
prepare_read_banner(con);
- set_bit(CONNECTING, &con->state);
- clear_bit(NEGOTIATING, &con->state);
BUG_ON(con->in_msg);
con->in_tag = CEPH_MSGR_TAG_READY;
@@ -1932,7 +2079,7 @@ more_kvec:
}
do_next:
- if (!test_bit(CONNECTING, &con->state)) {
+ if (con->state == CON_STATE_OPEN) {
/* is anything else pending? */
if (!list_empty(&con->out_queue)) {
prepare_write_message(con);
@@ -1942,14 +2089,15 @@ do_next:
prepare_write_ack(con);
goto more;
}
- if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) {
+ if (test_and_clear_bit(CON_FLAG_KEEPALIVE_PENDING,
+ &con->flags)) {
prepare_write_keepalive(con);
goto more;
}
}
/* Nothing to do! */
- clear_bit(WRITE_PENDING, &con->state);
+ clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
dout("try_write nothing else to write.\n");
ret = 0;
out:
@@ -1966,38 +2114,46 @@ static int try_read(struct ceph_connection *con)
{
int ret = -1;
- if (!con->sock)
- return 0;
-
- if (test_bit(STANDBY, &con->state))
+more:
+ dout("try_read start on %p state %lu\n", con, con->state);
+ if (con->state != CON_STATE_CONNECTING &&
+ con->state != CON_STATE_NEGOTIATING &&
+ con->state != CON_STATE_OPEN)
return 0;
- dout("try_read start on %p\n", con);
+ BUG_ON(!con->sock);
-more:
dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
con->in_base_pos);
- /*
- * process_connect and process_message drop and re-take
- * con->mutex. make sure we handle a racing close or reopen.
- */
- if (test_bit(CLOSED, &con->state) ||
- test_bit(OPENING, &con->state)) {
- ret = -EAGAIN;
+ if (con->state == CON_STATE_CONNECTING) {
+ dout("try_read connecting\n");
+ ret = read_partial_banner(con);
+ if (ret <= 0)
+ goto out;
+ ret = process_banner(con);
+ if (ret < 0)
+ goto out;
+
+ BUG_ON(con->state != CON_STATE_CONNECTING);
+ con->state = CON_STATE_NEGOTIATING;
+
+ /*
+ * Received banner is good, exchange connection info.
+ * Do not reset out_kvec, as sending our banner raced
+ * with receiving peer banner after connect completed.
+ */
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ goto out;
+ prepare_read_connect(con);
+
+ /* Send connection info before awaiting response */
goto out;
}
- if (test_bit(CONNECTING, &con->state)) {
- if (!test_bit(NEGOTIATING, &con->state)) {
- dout("try_read connecting\n");
- ret = read_partial_banner(con);
- if (ret <= 0)
- goto out;
- ret = process_banner(con);
- if (ret < 0)
- goto out;
- }
+ if (con->state == CON_STATE_NEGOTIATING) {
+ dout("try_read negotiating\n");
ret = read_partial_connect(con);
if (ret <= 0)
goto out;
@@ -2007,6 +2163,8 @@ more:
goto more;
}
+ BUG_ON(con->state != CON_STATE_OPEN);
+
if (con->in_base_pos < 0) {
/*
* skipping + discarding content.
@@ -2040,7 +2198,8 @@ more:
prepare_read_ack(con);
break;
case CEPH_MSGR_TAG_CLOSE:
- set_bit(CLOSED, &con->state); /* fixme */
+ con_close_socket(con);
+ con->state = CON_STATE_CLOSED;
goto out;
default:
goto bad_tag;
@@ -2063,6 +2222,8 @@ more:
if (con->in_tag == CEPH_MSGR_TAG_READY)
goto more;
process_message(con);
+ if (con->state == CON_STATE_OPEN)
+ prepare_read_tag(con);
goto more;
}
if (con->in_tag == CEPH_MSGR_TAG_ACK) {
@@ -2091,12 +2252,6 @@ bad_tag:
*/
static void queue_con(struct ceph_connection *con)
{
- if (test_bit(DEAD, &con->state)) {
- dout("queue_con %p ignoring: DEAD\n",
- con);
- return;
- }
-
if (!con->ops->get(con)) {
dout("queue_con %p ref count 0\n", con);
return;
@@ -2121,7 +2276,26 @@ static void con_work(struct work_struct *work)
mutex_lock(&con->mutex);
restart:
- if (test_and_clear_bit(BACKOFF, &con->state)) {
+ if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) {
+ switch (con->state) {
+ case CON_STATE_CONNECTING:
+ con->error_msg = "connection failed";
+ break;
+ case CON_STATE_NEGOTIATING:
+ con->error_msg = "negotiation failed";
+ break;
+ case CON_STATE_OPEN:
+ con->error_msg = "socket closed";
+ break;
+ default:
+ dout("unrecognized con state %d\n", (int)con->state);
+ con->error_msg = "unrecognized con state";
+ BUG();
+ }
+ goto fault;
+ }
+
+ if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
dout("con_work %p backing off\n", con);
if (queue_delayed_work(ceph_msgr_wq, &con->work,
round_jiffies_relative(con->delay))) {
@@ -2135,35 +2309,35 @@ restart:
}
}
- if (test_bit(STANDBY, &con->state)) {
+ if (con->state == CON_STATE_STANDBY) {
dout("con_work %p STANDBY\n", con);
goto done;
}
- if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
- dout("con_work CLOSED\n");
- con_close_socket(con);
+ if (con->state == CON_STATE_CLOSED) {
+ dout("con_work %p CLOSED\n", con);
+ BUG_ON(con->sock);
goto done;
}
- if (test_and_clear_bit(OPENING, &con->state)) {
- /* reopen w/ new peer */
+ if (con->state == CON_STATE_PREOPEN) {
dout("con_work OPENING\n");
- con_close_socket(con);
+ BUG_ON(con->sock);
}
- if (test_and_clear_bit(SOCK_CLOSED, &con->state))
- goto fault;
-
ret = try_read(con);
if (ret == -EAGAIN)
goto restart;
- if (ret < 0)
+ if (ret < 0) {
+ con->error_msg = "socket error on read";
goto fault;
+ }
ret = try_write(con);
if (ret == -EAGAIN)
goto restart;
- if (ret < 0)
+ if (ret < 0) {
+ con->error_msg = "socket error on write";
goto fault;
+ }
done:
mutex_unlock(&con->mutex);
@@ -2172,7 +2346,6 @@ done_unlocked:
return;
fault:
- mutex_unlock(&con->mutex);
ceph_fault(con); /* error/fault path */
goto done_unlocked;
}
@@ -2183,26 +2356,31 @@ fault:
* exponential backoff
*/
static void ceph_fault(struct ceph_connection *con)
+ __releases(con->mutex)
{
pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
dout("fault %p state %lu to peer %s\n",
con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
- if (test_bit(LOSSYTX, &con->state)) {
- dout("fault on LOSSYTX channel\n");
- goto out;
- }
-
- mutex_lock(&con->mutex);
- if (test_bit(CLOSED, &con->state))
- goto out_unlock;
+ BUG_ON(con->state != CON_STATE_CONNECTING &&
+ con->state != CON_STATE_NEGOTIATING &&
+ con->state != CON_STATE_OPEN);
con_close_socket(con);
+ if (test_bit(CON_FLAG_LOSSYTX, &con->flags)) {
+ dout("fault on LOSSYTX channel, marking CLOSED\n");
+ con->state = CON_STATE_CLOSED;
+ goto out_unlock;
+ }
+
if (con->in_msg) {
+ BUG_ON(con->in_msg->con != con);
+ con->in_msg->con = NULL;
ceph_msg_put(con->in_msg);
con->in_msg = NULL;
+ con->ops->put(con);
}
/* Requeue anything that hasn't been acked */
@@ -2211,12 +2389,13 @@ static void ceph_fault(struct ceph_connection *con)
/* If there are no messages queued or keepalive pending, place
* the connection in a STANDBY state */
if (list_empty(&con->out_queue) &&
- !test_bit(KEEPALIVE_PENDING, &con->state)) {
+ !test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)) {
dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
- clear_bit(WRITE_PENDING, &con->state);
- set_bit(STANDBY, &con->state);
+ clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+ con->state = CON_STATE_STANDBY;
} else {
/* retry after a delay. */
+ con->state = CON_STATE_PREOPEN;
if (con->delay == 0)
con->delay = BASE_DELAY_INTERVAL;
else if (con->delay < MAX_DELAY_INTERVAL)
@@ -2237,13 +2416,12 @@ static void ceph_fault(struct ceph_connection *con)
* that when con_work restarts we schedule the
* delay then.
*/
- set_bit(BACKOFF, &con->state);
+ set_bit(CON_FLAG_BACKOFF, &con->flags);
}
}
out_unlock:
mutex_unlock(&con->mutex);
-out:
/*
* in case we faulted due to authentication, invalidate our
* current tickets so that we can get new ones.
@@ -2260,18 +2438,14 @@ out:
/*
- * create a new messenger instance
+ * initialize a new messenger instance
*/
-struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr,
- u32 supported_features,
- u32 required_features)
+void ceph_messenger_init(struct ceph_messenger *msgr,
+ struct ceph_entity_addr *myaddr,
+ u32 supported_features,
+ u32 required_features,
+ bool nocrc)
{
- struct ceph_messenger *msgr;
-
- msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
- if (msgr == NULL)
- return ERR_PTR(-ENOMEM);
-
msgr->supported_features = supported_features;
msgr->required_features = required_features;
@@ -2284,30 +2458,23 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr,
msgr->inst.addr.type = 0;
get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
encode_my_addr(msgr);
+ msgr->nocrc = nocrc;
- dout("messenger_create %p\n", msgr);
- return msgr;
-}
-EXPORT_SYMBOL(ceph_messenger_create);
+ atomic_set(&msgr->stopping, 0);
-void ceph_messenger_destroy(struct ceph_messenger *msgr)
-{
- dout("destroy %p\n", msgr);
- kfree(msgr);
- dout("destroyed messenger %p\n", msgr);
+ dout("%s %p\n", __func__, msgr);
}
-EXPORT_SYMBOL(ceph_messenger_destroy);
+EXPORT_SYMBOL(ceph_messenger_init);
static void clear_standby(struct ceph_connection *con)
{
/* come back from STANDBY? */
- if (test_and_clear_bit(STANDBY, &con->state)) {
- mutex_lock(&con->mutex);
+ if (con->state == CON_STATE_STANDBY) {
dout("clear_standby %p and ++connect_seq\n", con);
+ con->state = CON_STATE_PREOPEN;
con->connect_seq++;
- WARN_ON(test_bit(WRITE_PENDING, &con->state));
- WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state));
- mutex_unlock(&con->mutex);
+ WARN_ON(test_bit(CON_FLAG_WRITE_PENDING, &con->flags));
+ WARN_ON(test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags));
}
}
@@ -2316,21 +2483,24 @@ static void clear_standby(struct ceph_connection *con)
*/
void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
{
- if (test_bit(CLOSED, &con->state)) {
- dout("con_send %p closed, dropping %p\n", con, msg);
- ceph_msg_put(msg);
- return;
- }
-
/* set src+dst */
msg->hdr.src = con->msgr->inst.name;
-
BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
-
msg->needs_out_seq = true;
- /* queue */
mutex_lock(&con->mutex);
+
+ if (con->state == CON_STATE_CLOSED) {
+ dout("con_send %p closed, dropping %p\n", con, msg);
+ ceph_msg_put(msg);
+ mutex_unlock(&con->mutex);
+ return;
+ }
+
+ BUG_ON(msg->con != NULL);
+ msg->con = con->ops->get(con);
+ BUG_ON(msg->con == NULL);
+
BUG_ON(!list_empty(&msg->list_head));
list_add_tail(&msg->list_head, &con->out_queue);
dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
@@ -2339,12 +2509,13 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
le32_to_cpu(msg->hdr.front_len),
le32_to_cpu(msg->hdr.middle_len),
le32_to_cpu(msg->hdr.data_len));
+
+ clear_standby(con);
mutex_unlock(&con->mutex);
/* if there wasn't anything waiting to send before, queue
* new work */
- clear_standby(con);
- if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
+ if (test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0)
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_send);
@@ -2352,24 +2523,34 @@ EXPORT_SYMBOL(ceph_con_send);
/*
* Revoke a message that was previously queued for send
*/
-void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
+void ceph_msg_revoke(struct ceph_msg *msg)
{
+ struct ceph_connection *con = msg->con;
+
+ if (!con)
+ return; /* Message not in our possession */
+
mutex_lock(&con->mutex);
if (!list_empty(&msg->list_head)) {
- dout("con_revoke %p msg %p - was on queue\n", con, msg);
+ dout("%s %p msg %p - was on queue\n", __func__, con, msg);
list_del_init(&msg->list_head);
- ceph_msg_put(msg);
+ BUG_ON(msg->con == NULL);
+ msg->con->ops->put(msg->con);
+ msg->con = NULL;
msg->hdr.seq = 0;
+
+ ceph_msg_put(msg);
}
if (con->out_msg == msg) {
- dout("con_revoke %p msg %p - was sending\n", con, msg);
+ dout("%s %p msg %p - was sending\n", __func__, con, msg);
con->out_msg = NULL;
if (con->out_kvec_is_msg) {
con->out_skip = con->out_kvec_bytes;
con->out_kvec_is_msg = false;
}
- ceph_msg_put(msg);
msg->hdr.seq = 0;
+
+ ceph_msg_put(msg);
}
mutex_unlock(&con->mutex);
}
@@ -2377,17 +2558,27 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
/*
* Revoke a message that we may be reading data into
*/
-void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
+void ceph_msg_revoke_incoming(struct ceph_msg *msg)
{
+ struct ceph_connection *con;
+
+ BUG_ON(msg == NULL);
+ if (!msg->con) {
+ dout("%s msg %p null con\n", __func__, msg);
+
+ return; /* Message not in our possession */
+ }
+
+ con = msg->con;
mutex_lock(&con->mutex);
- if (con->in_msg && con->in_msg == msg) {
+ if (con->in_msg == msg) {
unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
/* skip rest of message */
- dout("con_revoke_pages %p msg %p revoked\n", con, msg);
- con->in_base_pos = con->in_base_pos -
+ dout("%s %p msg %p revoked\n", __func__, con, msg);
+ con->in_base_pos = con->in_base_pos -
sizeof(struct ceph_msg_header) -
front_len -
middle_len -
@@ -2398,8 +2589,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
con->in_tag = CEPH_MSGR_TAG_READY;
con->in_seq++;
} else {
- dout("con_revoke_pages %p msg %p pages %p no-op\n",
- con, con->in_msg, msg);
+ dout("%s %p in_msg %p msg %p no-op\n",
+ __func__, con, con->in_msg, msg);
}
mutex_unlock(&con->mutex);
}
@@ -2410,9 +2601,11 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
void ceph_con_keepalive(struct ceph_connection *con)
{
dout("con_keepalive %p\n", con);
+ mutex_lock(&con->mutex);
clear_standby(con);
- if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
- test_and_set_bit(WRITE_PENDING, &con->state) == 0)
+ mutex_unlock(&con->mutex);
+ if (test_and_set_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags) == 0 &&
+ test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0)
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_keepalive);
@@ -2431,6 +2624,8 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
if (m == NULL)
goto out;
kref_init(&m->kref);
+
+ m->con = NULL;
INIT_LIST_HEAD(&m->list_head);
m->hdr.tid = 0;
@@ -2526,46 +2721,77 @@ static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
}
/*
- * Generic message allocator, for incoming messages.
+ * Allocate a message for receiving an incoming message on a
+ * connection, and save the result in con->in_msg. Uses the
+ * connection's private alloc_msg op if available.
+ *
+ * Returns 0 on success, or a negative error code.
+ *
+ * On success, if we set *skip = 1:
+ * - the next message should be skipped and ignored.
+ * - con->in_msg == NULL
+ * or if we set *skip = 0:
+ * - con->in_msg is non-null.
+ * On error (ENOMEM, EAGAIN, ...),
+ * - con->in_msg == NULL
*/
-static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
- struct ceph_msg_header *hdr,
- int *skip)
+static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
{
+ struct ceph_msg_header *hdr = &con->in_hdr;
int type = le16_to_cpu(hdr->type);
int front_len = le32_to_cpu(hdr->front_len);
int middle_len = le32_to_cpu(hdr->middle_len);
- struct ceph_msg *msg = NULL;
- int ret;
+ int ret = 0;
+
+ BUG_ON(con->in_msg != NULL);
if (con->ops->alloc_msg) {
+ struct ceph_msg *msg;
+
mutex_unlock(&con->mutex);
msg = con->ops->alloc_msg(con, hdr, skip);
mutex_lock(&con->mutex);
- if (!msg || *skip)
- return NULL;
+ if (con->state != CON_STATE_OPEN) {
+ ceph_msg_put(msg);
+ return -EAGAIN;
+ }
+ con->in_msg = msg;
+ if (con->in_msg) {
+ con->in_msg->con = con->ops->get(con);
+ BUG_ON(con->in_msg->con == NULL);
+ }
+ if (*skip) {
+ con->in_msg = NULL;
+ return 0;
+ }
+ if (!con->in_msg) {
+ con->error_msg =
+ "error allocating memory for incoming message";
+ return -ENOMEM;
+ }
}
- if (!msg) {
- *skip = 0;
- msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
- if (!msg) {
+ if (!con->in_msg) {
+ con->in_msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
+ if (!con->in_msg) {
pr_err("unable to allocate msg type %d len %d\n",
type, front_len);
- return NULL;
+ return -ENOMEM;
}
- msg->page_alignment = le16_to_cpu(hdr->data_off);
+ con->in_msg->con = con->ops->get(con);
+ BUG_ON(con->in_msg->con == NULL);
+ con->in_msg->page_alignment = le16_to_cpu(hdr->data_off);
}
- memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
+ memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
- if (middle_len && !msg->middle) {
- ret = ceph_alloc_middle(con, msg);
+ if (middle_len && !con->in_msg->middle) {
+ ret = ceph_alloc_middle(con, con->in_msg);
if (ret < 0) {
- ceph_msg_put(msg);
- return NULL;
+ ceph_msg_put(con->in_msg);
+ con->in_msg = NULL;
}
}
- return msg;
+ return ret;
}
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index d0649a9655be..900ea0f043fc 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -106,9 +106,9 @@ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
monc->pending_auth = 1;
monc->m_auth->front.iov_len = len;
monc->m_auth->hdr.front_len = cpu_to_le32(len);
- ceph_con_revoke(monc->con, monc->m_auth);
+ ceph_msg_revoke(monc->m_auth);
ceph_msg_get(monc->m_auth); /* keep our ref */
- ceph_con_send(monc->con, monc->m_auth);
+ ceph_con_send(&monc->con, monc->m_auth);
}
/*
@@ -117,8 +117,11 @@ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
static void __close_session(struct ceph_mon_client *monc)
{
dout("__close_session closing mon%d\n", monc->cur_mon);
- ceph_con_revoke(monc->con, monc->m_auth);
- ceph_con_close(monc->con);
+ ceph_msg_revoke(monc->m_auth);
+ ceph_msg_revoke_incoming(monc->m_auth_reply);
+ ceph_msg_revoke(monc->m_subscribe);
+ ceph_msg_revoke_incoming(monc->m_subscribe_ack);
+ ceph_con_close(&monc->con);
monc->cur_mon = -1;
monc->pending_auth = 0;
ceph_auth_reset(monc->auth);
@@ -142,9 +145,8 @@ static int __open_session(struct ceph_mon_client *monc)
monc->want_next_osdmap = !!monc->want_next_osdmap;
dout("open_session mon%d opening\n", monc->cur_mon);
- monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON;
- monc->con->peer_name.num = cpu_to_le64(monc->cur_mon);
- ceph_con_open(monc->con,
+ ceph_con_open(&monc->con,
+ CEPH_ENTITY_TYPE_MON, monc->cur_mon,
&monc->monmap->mon_inst[monc->cur_mon].addr);
/* initiatiate authentication handshake */
@@ -226,8 +228,8 @@ static void __send_subscribe(struct ceph_mon_client *monc)
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
- ceph_con_revoke(monc->con, msg);
- ceph_con_send(monc->con, ceph_msg_get(msg));
+ ceph_msg_revoke(msg);
+ ceph_con_send(&monc->con, ceph_msg_get(msg));
monc->sub_sent = jiffies | 1; /* never 0 */
}
@@ -247,7 +249,7 @@ static void handle_subscribe_ack(struct ceph_mon_client *monc,
if (monc->hunting) {
pr_info("mon%d %s session established\n",
monc->cur_mon,
- ceph_pr_addr(&monc->con->peer_addr.in_addr));
+ ceph_pr_addr(&monc->con.peer_addr.in_addr));
monc->hunting = false;
}
dout("handle_subscribe_ack after %d seconds\n", seconds);
@@ -309,6 +311,17 @@ int ceph_monc_open_session(struct ceph_mon_client *monc)
EXPORT_SYMBOL(ceph_monc_open_session);
/*
+ * We require the fsid and global_id in order to initialize our
+ * debugfs dir.
+ */
+static bool have_debugfs_info(struct ceph_mon_client *monc)
+{
+ dout("have_debugfs_info fsid %d globalid %lld\n",
+ (int)monc->client->have_fsid, monc->auth->global_id);
+ return monc->client->have_fsid && monc->auth->global_id > 0;
+}
+
+/*
* The monitor responds with mount ack indicate mount success. The
* included client ticket allows the client to talk to MDSs and OSDs.
*/
@@ -318,9 +331,12 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
struct ceph_client *client = monc->client;
struct ceph_monmap *monmap = NULL, *old = monc->monmap;
void *p, *end;
+ int had_debugfs_info, init_debugfs = 0;
mutex_lock(&monc->mutex);
+ had_debugfs_info = have_debugfs_info(monc);
+
dout("handle_monmap\n");
p = msg->front.iov_base;
end = p + msg->front.iov_len;
@@ -342,12 +358,22 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
if (!client->have_fsid) {
client->have_fsid = true;
+ if (!had_debugfs_info && have_debugfs_info(monc)) {
+ pr_info("client%lld fsid %pU\n",
+ ceph_client_id(monc->client),
+ &monc->client->fsid);
+ init_debugfs = 1;
+ }
mutex_unlock(&monc->mutex);
- /*
- * do debugfs initialization without mutex to avoid
- * creating a locking dependency
- */
- ceph_debugfs_client_init(client);
+
+ if (init_debugfs) {
+ /*
+ * do debugfs initialization without mutex to avoid
+ * creating a locking dependency
+ */
+ ceph_debugfs_client_init(monc->client);
+ }
+
goto out_unlocked;
}
out:
@@ -439,6 +465,7 @@ static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
m = NULL;
} else {
dout("get_generic_reply %lld got %p\n", tid, req->reply);
+ *skip = 0;
m = ceph_msg_get(req->reply);
/*
* we don't need to track the connection reading into
@@ -461,7 +488,7 @@ static int do_generic_request(struct ceph_mon_client *monc,
req->request->hdr.tid = cpu_to_le64(req->tid);
__insert_generic_request(monc, req);
monc->num_generic_requests++;
- ceph_con_send(monc->con, ceph_msg_get(req->request));
+ ceph_con_send(&monc->con, ceph_msg_get(req->request));
mutex_unlock(&monc->mutex);
err = wait_for_completion_interruptible(&req->completion);
@@ -684,8 +711,9 @@ static void __resend_generic_request(struct ceph_mon_client *monc)
for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) {
req = rb_entry(p, struct ceph_mon_generic_request, node);
- ceph_con_revoke(monc->con, req->request);
- ceph_con_send(monc->con, ceph_msg_get(req->request));
+ ceph_msg_revoke(req->request);
+ ceph_msg_revoke_incoming(req->reply);
+ ceph_con_send(&monc->con, ceph_msg_get(req->request));
}
}
@@ -705,7 +733,7 @@ static void delayed_work(struct work_struct *work)
__close_session(monc);
__open_session(monc); /* continue hunting */
} else {
- ceph_con_keepalive(monc->con);
+ ceph_con_keepalive(&monc->con);
__validate_auth(monc);
@@ -760,19 +788,12 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
goto out;
/* connection */
- monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
- if (!monc->con)
- goto out_monmap;
- ceph_con_init(monc->client->msgr, monc->con);
- monc->con->private = monc;
- monc->con->ops = &mon_con_ops;
-
/* authentication */
monc->auth = ceph_auth_init(cl->options->name,
cl->options->key);
if (IS_ERR(monc->auth)) {
err = PTR_ERR(monc->auth);
- goto out_con;
+ goto out_monmap;
}
monc->auth->want_keys =
CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
@@ -801,6 +822,9 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
if (!monc->m_auth)
goto out_auth_reply;
+ ceph_con_init(&monc->con, monc, &mon_con_ops,
+ &monc->client->msgr);
+
monc->cur_mon = -1;
monc->hunting = true;
monc->sub_renew_after = jiffies;
@@ -824,8 +848,6 @@ out_subscribe_ack:
ceph_msg_put(monc->m_subscribe_ack);
out_auth:
ceph_auth_destroy(monc->auth);
-out_con:
- monc->con->ops->put(monc->con);
out_monmap:
kfree(monc->monmap);
out:
@@ -841,10 +863,6 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
mutex_lock(&monc->mutex);
__close_session(monc);
- monc->con->private = NULL;
- monc->con->ops->put(monc->con);
- monc->con = NULL;
-
mutex_unlock(&monc->mutex);
/*
@@ -871,8 +889,10 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
{
int ret;
int was_auth = 0;
+ int had_debugfs_info, init_debugfs = 0;
mutex_lock(&monc->mutex);
+ had_debugfs_info = have_debugfs_info(monc);
if (monc->auth->ops)
was_auth = monc->auth->ops->is_authenticated(monc->auth);
monc->pending_auth = 0;
@@ -888,14 +908,29 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
} else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) {
dout("authenticated, starting session\n");
- monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
- monc->client->msgr->inst.name.num =
+ monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
+ monc->client->msgr.inst.name.num =
cpu_to_le64(monc->auth->global_id);
__send_subscribe(monc);
__resend_generic_request(monc);
}
+
+ if (!had_debugfs_info && have_debugfs_info(monc)) {
+ pr_info("client%lld fsid %pU\n",
+ ceph_client_id(monc->client),
+ &monc->client->fsid);
+ init_debugfs = 1;
+ }
mutex_unlock(&monc->mutex);
+
+ if (init_debugfs) {
+ /*
+ * do debugfs initialization without mutex to avoid
+ * creating a locking dependency
+ */
+ ceph_debugfs_client_init(monc->client);
+ }
}
static int __validate_auth(struct ceph_mon_client *monc)
@@ -1000,6 +1035,8 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
case CEPH_MSG_MDS_MAP:
case CEPH_MSG_OSD_MAP:
m = ceph_msg_new(type, front_len, GFP_NOFS, false);
+ if (!m)
+ return NULL; /* ENOMEM--return skip == 0 */
break;
}
@@ -1029,7 +1066,7 @@ static void mon_fault(struct ceph_connection *con)
if (!monc->hunting)
pr_info("mon%d %s session lost, "
"hunting for new mon\n", monc->cur_mon,
- ceph_pr_addr(&monc->con->peer_addr.in_addr));
+ ceph_pr_addr(&monc->con.peer_addr.in_addr));
__close_session(monc);
if (!monc->hunting) {
@@ -1044,9 +1081,23 @@ out:
mutex_unlock(&monc->mutex);
}
+/*
+ * We can ignore refcounting on the connection struct, as all references
+ * will come from the messenger workqueue, which is drained prior to
+ * mon_client destruction.
+ */
+static struct ceph_connection *con_get(struct ceph_connection *con)
+{
+ return con;
+}
+
+static void con_put(struct ceph_connection *con)
+{
+}
+
static const struct ceph_connection_operations mon_con_ops = {
- .get = ceph_con_get,
- .put = ceph_con_put,
+ .get = con_get,
+ .put = con_put,
.dispatch = dispatch,
.fault = mon_fault,
.alloc_msg = mon_alloc_msg,
diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c
index 11d5f4196a73..ddec1c10ac80 100644
--- a/net/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -12,7 +12,7 @@ static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
struct ceph_msgpool *pool = arg;
struct ceph_msg *msg;
- msg = ceph_msg_new(0, pool->front_len, gfp_mask, true);
+ msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true);
if (!msg) {
dout("msgpool_alloc %s failed\n", pool->name);
} else {
@@ -32,10 +32,11 @@ static void msgpool_free(void *element, void *arg)
ceph_msg_put(msg);
}
-int ceph_msgpool_init(struct ceph_msgpool *pool,
+int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
int front_len, int size, bool blocking, const char *name)
{
dout("msgpool %s init\n", name);
+ pool->type = type;
pool->front_len = front_len;
pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
if (!pool->pool)
@@ -61,7 +62,7 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
WARN_ON(1);
/* try to alloc a fresh message */
- return ceph_msg_new(0, front_len, GFP_NOFS, false);
+ return ceph_msg_new(pool->type, front_len, GFP_NOFS, false);
}
msg = mempool_alloc(pool->pool, GFP_NOFS);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index ca59e66c9787..42119c05e82c 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -140,10 +140,9 @@ void ceph_osdc_release_request(struct kref *kref)
if (req->r_request)
ceph_msg_put(req->r_request);
if (req->r_con_filling_msg) {
- dout("release_request revoking pages %p from con %p\n",
+ dout("%s revoking pages %p from con %p\n", __func__,
req->r_pages, req->r_con_filling_msg);
- ceph_con_revoke_message(req->r_con_filling_msg,
- req->r_reply);
+ ceph_msg_revoke_incoming(req->r_reply);
req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
}
if (req->r_reply)
@@ -214,10 +213,13 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
kref_init(&req->r_kref);
init_completion(&req->r_completion);
init_completion(&req->r_safe_completion);
+ rb_init_node(&req->r_node);
INIT_LIST_HEAD(&req->r_unsafe_item);
INIT_LIST_HEAD(&req->r_linger_item);
INIT_LIST_HEAD(&req->r_linger_osd);
INIT_LIST_HEAD(&req->r_req_lru_item);
+ INIT_LIST_HEAD(&req->r_osd_item);
+
req->r_flags = flags;
WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
@@ -243,6 +245,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
}
ceph_pagelist_init(req->r_trail);
}
+
/* create request message; allow space for oid */
msg_size += MAX_OBJ_NAME_SIZE;
if (snapc)
@@ -256,7 +259,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
return NULL;
}
- msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
memset(msg->front.iov_base, 0, msg->front.iov_len);
req->r_request = msg;
@@ -624,7 +626,7 @@ static void osd_reset(struct ceph_connection *con)
/*
* Track open sessions with osds.
*/
-static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
+static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
{
struct ceph_osd *osd;
@@ -634,15 +636,13 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
atomic_set(&osd->o_ref, 1);
osd->o_osdc = osdc;
+ osd->o_osd = onum;
INIT_LIST_HEAD(&osd->o_requests);
INIT_LIST_HEAD(&osd->o_linger_requests);
INIT_LIST_HEAD(&osd->o_osd_lru);
osd->o_incarnation = 1;
- ceph_con_init(osdc->client->msgr, &osd->o_con);
- osd->o_con.private = osd;
- osd->o_con.ops = &osd_con_ops;
- osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD;
+ ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
INIT_LIST_HEAD(&osd->o_keepalive_item);
return osd;
@@ -688,7 +688,7 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
static void remove_all_osds(struct ceph_osd_client *osdc)
{
- dout("__remove_old_osds %p\n", osdc);
+ dout("%s %p\n", __func__, osdc);
mutex_lock(&osdc->request_mutex);
while (!RB_EMPTY_ROOT(&osdc->osds)) {
struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
@@ -752,7 +752,8 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
ret = -EAGAIN;
} else {
ceph_con_close(&osd->o_con);
- ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
+ ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
+ &osdc->osdmap->osd_addr[osd->o_osd]);
osd->o_incarnation++;
}
return ret;
@@ -853,7 +854,7 @@ static void __unregister_request(struct ceph_osd_client *osdc,
if (req->r_osd) {
/* make sure the original request isn't in flight. */
- ceph_con_revoke(&req->r_osd->o_con, req->r_request);
+ ceph_msg_revoke(req->r_request);
list_del_init(&req->r_osd_item);
if (list_empty(&req->r_osd->o_requests) &&
@@ -880,7 +881,7 @@ static void __unregister_request(struct ceph_osd_client *osdc,
static void __cancel_request(struct ceph_osd_request *req)
{
if (req->r_sent && req->r_osd) {
- ceph_con_revoke(&req->r_osd->o_con, req->r_request);
+ ceph_msg_revoke(req->r_request);
req->r_sent = 0;
}
}
@@ -890,7 +891,9 @@ static void __register_linger_request(struct ceph_osd_client *osdc,
{
dout("__register_linger_request %p\n", req);
list_add_tail(&req->r_linger_item, &osdc->req_linger);
- list_add_tail(&req->r_linger_osd, &req->r_osd->o_linger_requests);
+ if (req->r_osd)
+ list_add_tail(&req->r_linger_osd,
+ &req->r_osd->o_linger_requests);
}
static void __unregister_linger_request(struct ceph_osd_client *osdc,
@@ -998,18 +1001,18 @@ static int __map_request(struct ceph_osd_client *osdc,
req->r_osd = __lookup_osd(osdc, o);
if (!req->r_osd && o >= 0) {
err = -ENOMEM;
- req->r_osd = create_osd(osdc);
+ req->r_osd = create_osd(osdc, o);
if (!req->r_osd) {
list_move(&req->r_req_lru_item, &osdc->req_notarget);
goto out;
}
dout("map_request osd %p is osd%d\n", req->r_osd, o);
- req->r_osd->o_osd = o;
- req->r_osd->o_con.peer_name.num = cpu_to_le64(o);
__insert_osd(osdc, req->r_osd);
- ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]);
+ ceph_con_open(&req->r_osd->o_con,
+ CEPH_ENTITY_TYPE_OSD, o,
+ &osdc->osdmap->osd_addr[o]);
}
if (req->r_osd) {
@@ -1304,8 +1307,9 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
mutex_lock(&osdc->request_mutex);
- for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
+ for (p = rb_first(&osdc->requests); p; ) {
req = rb_entry(p, struct ceph_osd_request, r_node);
+ p = rb_next(p);
err = __map_request(osdc, req, force_resend);
if (err < 0)
continue; /* error */
@@ -1313,10 +1317,23 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
dout("%p tid %llu maps to no osd\n", req, req->r_tid);
needmap++; /* request a newer map */
} else if (err > 0) {
- dout("%p tid %llu requeued on osd%d\n", req, req->r_tid,
- req->r_osd ? req->r_osd->o_osd : -1);
- if (!req->r_linger)
+ if (!req->r_linger) {
+ dout("%p tid %llu requeued on osd%d\n", req,
+ req->r_tid,
+ req->r_osd ? req->r_osd->o_osd : -1);
req->r_flags |= CEPH_OSD_FLAG_RETRY;
+ }
+ }
+ if (req->r_linger && list_empty(&req->r_linger_item)) {
+ /*
+ * register as a linger so that we will
+ * re-submit below and get a new tid
+ */
+ dout("%p tid %llu restart on osd%d\n",
+ req, req->r_tid,
+ req->r_osd ? req->r_osd->o_osd : -1);
+ __register_linger_request(osdc, req);
+ __unregister_request(osdc, req);
}
}
@@ -1391,7 +1408,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
epoch, maplen);
newmap = osdmap_apply_incremental(&p, next,
osdc->osdmap,
- osdc->client->msgr);
+ &osdc->client->msgr);
if (IS_ERR(newmap)) {
err = PTR_ERR(newmap);
goto bad;
@@ -1839,11 +1856,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
if (!osdc->req_mempool)
goto out;
- err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true,
+ err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
+ OSD_OP_FRONT_LEN, 10, true,
"osd_op");
if (err < 0)
goto out_mempool;
- err = ceph_msgpool_init(&osdc->msgpool_op_reply,
+ err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
OSD_OPREPLY_FRONT_LEN, 10, true,
"osd_op_reply");
if (err < 0)
@@ -2019,15 +2037,15 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
if (!req) {
*skip = 1;
m = NULL;
- pr_info("get_reply unknown tid %llu from osd%d\n", tid,
- osd->o_osd);
+ dout("get_reply unknown tid %llu from osd%d\n", tid,
+ osd->o_osd);
goto out;
}
if (req->r_con_filling_msg) {
- dout("get_reply revoking msg %p from old con %p\n",
+ dout("%s revoking msg %p from old con %p\n", __func__,
req->r_reply, req->r_con_filling_msg);
- ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
+ ceph_msg_revoke_incoming(req->r_reply);
req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
req->r_con_filling_msg = NULL;
}
@@ -2080,6 +2098,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
int type = le16_to_cpu(hdr->type);
int front = le32_to_cpu(hdr->front_len);
+ *skip = 0;
switch (type) {
case CEPH_MSG_OSD_MAP:
case CEPH_MSG_WATCH_NOTIFY:
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 81e3b84a77ef..3124b71a8883 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -135,6 +135,21 @@ bad:
return -EINVAL;
}
+static int skip_name_map(void **p, void *end)
+{
+ int len;
+ ceph_decode_32_safe(p, end, len ,bad);
+ while (len--) {
+ int strlen;
+ *p += sizeof(u32);
+ ceph_decode_32_safe(p, end, strlen, bad);
+ *p += strlen;
+}
+ return 0;
+bad:
+ return -EINVAL;
+}
+
static struct crush_map *crush_decode(void *pbyval, void *end)
{
struct crush_map *c;
@@ -143,6 +158,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
void **p = &pbyval;
void *start = pbyval;
u32 magic;
+ u32 num_name_maps;
dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
@@ -150,6 +166,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
if (c == NULL)
return ERR_PTR(-ENOMEM);
+ /* set tunables to default values */
+ c->choose_local_tries = 2;
+ c->choose_local_fallback_tries = 5;
+ c->choose_total_tries = 19;
+
ceph_decode_need(p, end, 4*sizeof(u32), bad);
magic = ceph_decode_32(p);
if (magic != CRUSH_MAGIC) {
@@ -297,7 +318,25 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
}
/* ignore trailing name maps. */
+ for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) {
+ err = skip_name_map(p, end);
+ if (err < 0)
+ goto done;
+ }
+
+ /* tunables */
+ ceph_decode_need(p, end, 3*sizeof(u32), done);
+ c->choose_local_tries = ceph_decode_32(p);
+ c->choose_local_fallback_tries = ceph_decode_32(p);
+ c->choose_total_tries = ceph_decode_32(p);
+ dout("crush decode tunable choose_local_tries = %d",
+ c->choose_local_tries);
+ dout("crush decode tunable choose_local_fallback_tries = %d",
+ c->choose_local_fallback_tries);
+ dout("crush decode tunable choose_total_tries = %d",
+ c->choose_total_tries);
+done:
dout("crush_decode success\n");
return c;
@@ -488,15 +527,16 @@ static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
ceph_decode_32_safe(p, end, pool, bad);
ceph_decode_32_safe(p, end, len, bad);
dout(" pool %d len %d\n", pool, len);
+ ceph_decode_need(p, end, len, bad);
pi = __lookup_pg_pool(&map->pg_pools, pool);
if (pi) {
+ char *name = kstrndup(*p, len, GFP_NOFS);
+
+ if (!name)
+ return -ENOMEM;
kfree(pi->name);
- pi->name = kmalloc(len + 1, GFP_NOFS);
- if (pi->name) {
- memcpy(pi->name, *p, len);
- pi->name[len] = '\0';
- dout(" name is %s\n", pi->name);
- }
+ pi->name = name;
+ dout(" name is %s\n", pi->name);
}
*p += len;
}
@@ -666,6 +706,9 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
ceph_decode_copy(p, &pgid, sizeof(pgid));
n = ceph_decode_32(p);
+ err = -EINVAL;
+ if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
+ goto bad;
ceph_decode_need(p, end, n * sizeof(u32), bad);
err = -ENOMEM;
pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
@@ -889,6 +932,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
(void) __remove_pg_mapping(&map->pg_temp, pgid);
/* insert */
+ if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) {
+ err = -EINVAL;
+ goto bad;
+ }
pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
if (!pg) {
err = -ENOMEM;
diff --git a/net/core/dev.c b/net/core/dev.c
index 0ebaea16632f..83988362805e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1055,6 +1055,8 @@ rollback:
*/
int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
{
+ char *new_ifalias;
+
ASSERT_RTNL();
if (len >= IFALIASZ)
@@ -1068,9 +1070,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
return 0;
}
- dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
- if (!dev->ifalias)
+ new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
+ if (!new_ifalias)
return -ENOMEM;
+ dev->ifalias = new_ifalias;
strlcpy(dev->ifalias, alias, len+1);
return len;
@@ -1172,6 +1175,7 @@ static int __dev_open(struct net_device *dev)
net_dmaengine_get();
dev_set_rx_mode(dev);
dev_activate(dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
}
return ret;
@@ -1638,6 +1642,19 @@ static inline int deliver_skb(struct sk_buff *skb,
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
+static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
+{
+ if (ptype->af_packet_priv == NULL)
+ return false;
+
+ if (ptype->id_match)
+ return ptype->id_match(ptype, skb->sk);
+ else if ((struct sock *)ptype->af_packet_priv == skb->sk)
+ return true;
+
+ return false;
+}
+
/*
* Support routine. Sends outgoing frames to any network
* taps currently in use.
@@ -1655,8 +1672,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
* they originated from - MvS (miquels@drinkel.ow.org)
*/
if ((ptype->dev == dev || !ptype->dev) &&
- (ptype->af_packet_priv == NULL ||
- (struct sock *)ptype->af_packet_priv != skb->sk)) {
+ (!skb_loop_sk(ptype, skb))) {
if (pt_prev) {
deliver_skb(skb2, pt_prev, skb->dev);
pt_prev = ptype;
@@ -2133,6 +2149,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
__be16 protocol = skb->protocol;
netdev_features_t features = skb->dev->features;
+ if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
+ features &= ~NETIF_F_GSO_MASK;
+
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
@@ -3155,6 +3174,23 @@ void netdev_rx_handler_unregister(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+/*
+ * Limit the use of PFMEMALLOC reserves to those protocols that implement
+ * the special handling of PFMEMALLOC skbs.
+ */
+static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
+{
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_ARP):
+ case __constant_htons(ETH_P_IP):
+ case __constant_htons(ETH_P_IPV6):
+ case __constant_htons(ETH_P_8021Q):
+ return true;
+ default:
+ return false;
+ }
+}
+
static int __netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;
@@ -3164,14 +3200,27 @@ static int __netif_receive_skb(struct sk_buff *skb)
bool deliver_exact = false;
int ret = NET_RX_DROP;
__be16 type;
+ unsigned long pflags = current->flags;
net_timestamp_check(!netdev_tstamp_prequeue, skb);
trace_netif_receive_skb(skb);
+ /*
+ * PFMEMALLOC skbs are special, they should
+ * - be delivered to SOCK_MEMALLOC sockets only
+ * - stay away from userspace
+ * - have bounded memory usage
+ *
+ * Use PF_MEMALLOC as this saves us from propagating the allocation
+ * context down to all allocation sites.
+ */
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb))
+ current->flags |= PF_MEMALLOC;
+
/* if we've gotten here through NAPI, check netpoll */
if (netpoll_receive_skb(skb))
- return NET_RX_DROP;
+ goto out;
orig_dev = skb->dev;
@@ -3191,7 +3240,7 @@ another_round:
if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
skb = vlan_untag(skb);
if (unlikely(!skb))
- goto out;
+ goto unlock;
}
#ifdef CONFIG_NET_CLS_ACT
@@ -3201,6 +3250,9 @@ another_round:
}
#endif
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb))
+ goto skip_taps;
+
list_for_each_entry_rcu(ptype, &ptype_all, list) {
if (!ptype->dev || ptype->dev == skb->dev) {
if (pt_prev)
@@ -3209,13 +3261,18 @@ another_round:
}
}
+skip_taps:
#ifdef CONFIG_NET_CLS_ACT
skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
if (!skb)
- goto out;
+ goto unlock;
ncls:
#endif
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb)
+ && !skb_pfmemalloc_protocol(skb))
+ goto drop;
+
rx_handler = rcu_dereference(skb->dev->rx_handler);
if (vlan_tx_tag_present(skb)) {
if (pt_prev) {
@@ -3225,7 +3282,7 @@ ncls:
if (vlan_do_receive(&skb, !rx_handler))
goto another_round;
else if (unlikely(!skb))
- goto out;
+ goto unlock;
}
if (rx_handler) {
@@ -3235,7 +3292,7 @@ ncls:
}
switch (rx_handler(&skb)) {
case RX_HANDLER_CONSUMED:
- goto out;
+ goto unlock;
case RX_HANDLER_ANOTHER:
goto another_round;
case RX_HANDLER_EXACT:
@@ -3268,6 +3325,7 @@ ncls:
else
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else {
+drop:
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
@@ -3276,8 +3334,10 @@ ncls:
ret = NET_RX_DROP;
}
-out:
+unlock:
rcu_read_unlock();
+out:
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
return ret;
}
@@ -4801,6 +4861,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
err = ops->ndo_set_mac_address(dev, sa);
if (!err)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
return err;
}
EXPORT_SYMBOL(dev_set_mac_address);
@@ -5579,6 +5640,7 @@ int register_netdevice(struct net_device *dev)
dev_init_scheduler(dev);
dev_hold(dev);
list_netdevice(dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
/* Notify protocols, that a new device appeared. */
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
@@ -5682,6 +5744,7 @@ EXPORT_SYMBOL(netdev_refcnt_read);
/**
* netdev_wait_allrefs - wait until all references are gone.
+ * @dev: target net_device
*
* This is called when unregistering network devices.
*
@@ -5942,6 +6005,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev_net_set(dev, &init_net);
dev->gso_max_size = GSO_MAX_SIZE;
+ dev->gso_max_segs = GSO_MAX_SEGS;
INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
diff --git a/net/core/dst.c b/net/core/dst.c
index 069d51d29414..56d63612e1e4 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -149,7 +149,15 @@ int dst_discard(struct sk_buff *skb)
}
EXPORT_SYMBOL(dst_discard);
-const u32 dst_default_metrics[RTAX_MAX];
+const u32 dst_default_metrics[RTAX_MAX + 1] = {
+ /* This initializer is needed to force linker to place this variable
+ * into const section. Otherwise it might end into bss section.
+ * We really want to avoid false sharing on this variable, and catch
+ * any writes on it.
+ */
+ [RTAX_MAX] = 0xdeadbeef,
+};
+
void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
int initial_ref, int initial_obsolete, unsigned short flags)
diff --git a/net/core/filter.c b/net/core/filter.c
index d4ce2dc712e3..907efd27ec77 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -83,6 +83,14 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
int err;
struct sk_filter *filter;
+ /*
+ * If the skb was allocated from pfmemalloc reserves, only
+ * allow SOCK_MEMALLOC sockets to use it as this socket is
+ * helping free memory
+ */
+ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
+ return -ENOMEM;
+
err = security_sock_rcv_skb(sk, skb);
if (err)
return err;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index b4c90e42b443..e4ba3e70c174 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -26,6 +26,7 @@
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/if_vlan.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <asm/unaligned.h>
@@ -54,7 +55,7 @@ static atomic_t trapped;
MAX_UDP_CHUNK)
static void zap_completion_queue(void);
-static void arp_reply(struct sk_buff *skb);
+static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
static unsigned int carrier_timeout = 4;
module_param(carrier_timeout, uint, 0644);
@@ -170,7 +171,8 @@ static void poll_napi(struct net_device *dev)
list_for_each_entry(napi, &dev->napi_list, dev_list) {
if (napi->poll_owner != smp_processor_id() &&
spin_trylock(&napi->poll_lock)) {
- budget = poll_one_napi(dev->npinfo, napi, budget);
+ budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
+ napi, budget);
spin_unlock(&napi->poll_lock);
if (!budget)
@@ -185,13 +187,14 @@ static void service_arp_queue(struct netpoll_info *npi)
struct sk_buff *skb;
while ((skb = skb_dequeue(&npi->arp_tx)))
- arp_reply(skb);
+ netpoll_arp_reply(skb, npi);
}
}
static void netpoll_poll_dev(struct net_device *dev)
{
const struct net_device_ops *ops;
+ struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
if (!dev || !netif_running(dev))
return;
@@ -206,17 +209,18 @@ static void netpoll_poll_dev(struct net_device *dev)
poll_napi(dev);
if (dev->flags & IFF_SLAVE) {
- if (dev->npinfo) {
+ if (ni) {
struct net_device *bond_dev = dev->master;
struct sk_buff *skb;
- while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
+ struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo);
+ while ((skb = skb_dequeue(&ni->arp_tx))) {
skb->dev = bond_dev;
- skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
+ skb_queue_tail(&bond_ni->arp_tx, skb);
}
}
}
- service_arp_queue(dev->npinfo);
+ service_arp_queue(ni);
zap_completion_queue();
}
@@ -302,6 +306,7 @@ static int netpoll_owner_active(struct net_device *dev)
return 0;
}
+/* call with IRQ disabled */
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
struct net_device *dev)
{
@@ -309,8 +314,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
unsigned long tries;
const struct net_device_ops *ops = dev->netdev_ops;
/* It is up to the caller to keep npinfo alive. */
- struct netpoll_info *npinfo = np->dev->npinfo;
+ struct netpoll_info *npinfo;
+
+ WARN_ON_ONCE(!irqs_disabled());
+ npinfo = rcu_dereference_bh(np->dev->npinfo);
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
__kfree_skb(skb);
return;
@@ -319,16 +327,22 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
/* don't get messages out of order, and no recursion */
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
struct netdev_queue *txq;
- unsigned long flags;
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
- local_irq_save(flags);
/* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
tries > 0; --tries) {
if (__netif_tx_trylock(txq)) {
if (!netif_xmit_stopped(txq)) {
+ if (vlan_tx_tag_present(skb) &&
+ !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) {
+ skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
+ if (unlikely(!skb))
+ break;
+ skb->vlan_tci = 0;
+ }
+
status = ops->ndo_start_xmit(skb, dev);
if (status == NETDEV_TX_OK)
txq_trans_update(txq);
@@ -347,10 +361,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
}
WARN_ONCE(!irqs_disabled(),
- "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
+ "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
dev->name, ops->ndo_start_xmit);
- local_irq_restore(flags);
}
if (status != NETDEV_TX_OK) {
@@ -423,9 +436,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
}
EXPORT_SYMBOL(netpoll_send_udp);
-static void arp_reply(struct sk_buff *skb)
+static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
{
- struct netpoll_info *npinfo = skb->dev->npinfo;
struct arphdr *arp;
unsigned char *arp_ptr;
int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
@@ -543,13 +555,12 @@ static void arp_reply(struct sk_buff *skb)
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
-int __netpoll_rx(struct sk_buff *skb)
+int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
{
int proto, len, ulen;
int hits = 0;
const struct iphdr *iph;
struct udphdr *uh;
- struct netpoll_info *npinfo = skb->dev->npinfo;
struct netpoll *np, *tmp;
if (list_empty(&npinfo->rx_np))
@@ -565,6 +576,12 @@ int __netpoll_rx(struct sk_buff *skb)
return 1;
}
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
+ skb = vlan_untag(skb);
+ if (unlikely(!skb))
+ goto out;
+ }
+
proto = ntohs(eth_hdr(skb)->h_proto);
if (proto != ETH_P_IP)
goto out;
@@ -715,7 +732,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
}
EXPORT_SYMBOL(netpoll_parse_options);
-int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
+int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
{
struct netpoll_info *npinfo;
const struct net_device_ops *ops;
@@ -734,7 +751,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
}
if (!ndev->npinfo) {
- npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
+ npinfo = kmalloc(sizeof(*npinfo), gfp);
if (!npinfo) {
err = -ENOMEM;
goto out;
@@ -752,7 +769,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
ops = np->dev->netdev_ops;
if (ops->ndo_netpoll_setup) {
- err = ops->ndo_netpoll_setup(ndev, npinfo);
+ err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
if (err)
goto free_npinfo;
}
@@ -857,7 +874,7 @@ int netpoll_setup(struct netpoll *np)
refill_skbs();
rtnl_lock();
- err = __netpoll_setup(np, ndev);
+ err = __netpoll_setup(np, ndev, GFP_KERNEL);
rtnl_unlock();
if (err)
@@ -878,6 +895,24 @@ static int __init netpoll_init(void)
}
core_initcall(netpoll_init);
+static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
+{
+ struct netpoll_info *npinfo =
+ container_of(rcu_head, struct netpoll_info, rcu);
+
+ skb_queue_purge(&npinfo->arp_tx);
+ skb_queue_purge(&npinfo->txq);
+
+ /* we can't call cancel_delayed_work_sync here, as we are in softirq */
+ cancel_delayed_work(&npinfo->tx_work);
+
+ /* clean after last, unfinished work */
+ __skb_queue_purge(&npinfo->txq);
+ /* now cancel it again */
+ cancel_delayed_work(&npinfo->tx_work);
+ kfree(npinfo);
+}
+
void __netpoll_cleanup(struct netpoll *np)
{
struct netpoll_info *npinfo;
@@ -903,20 +938,24 @@ void __netpoll_cleanup(struct netpoll *np)
ops->ndo_netpoll_cleanup(np->dev);
RCU_INIT_POINTER(np->dev->npinfo, NULL);
+ call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
+ }
+}
+EXPORT_SYMBOL_GPL(__netpoll_cleanup);
- /* avoid racing with NAPI reading npinfo */
- synchronize_rcu_bh();
+static void rcu_cleanup_netpoll(struct rcu_head *rcu_head)
+{
+ struct netpoll *np = container_of(rcu_head, struct netpoll, rcu);
- skb_queue_purge(&npinfo->arp_tx);
- skb_queue_purge(&npinfo->txq);
- cancel_delayed_work_sync(&npinfo->tx_work);
+ __netpoll_cleanup(np);
+ kfree(np);
+}
- /* clean after last, unfinished work */
- __skb_queue_purge(&npinfo->txq);
- kfree(npinfo);
- }
+void __netpoll_free_rcu(struct netpoll *np)
+{
+ call_rcu_bh(&np->rcu, rcu_cleanup_netpoll);
}
-EXPORT_SYMBOL_GPL(__netpoll_cleanup);
+EXPORT_SYMBOL_GPL(__netpoll_free_rcu);
void netpoll_cleanup(struct netpoll *np)
{
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index ed0c0431fcd8..c75e3f9d060f 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -101,12 +101,10 @@ static int write_update_netdev_table(struct net_device *dev)
u32 max_len;
struct netprio_map *map;
- rtnl_lock();
max_len = atomic_read(&max_prioidx) + 1;
map = rtnl_dereference(dev->priomap);
if (!map || map->priomap_len < max_len)
ret = extend_netdev_table(dev, max_len);
- rtnl_unlock();
return ret;
}
@@ -256,17 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
if (!dev)
goto out_free_devname;
+ rtnl_lock();
ret = write_update_netdev_table(dev);
if (ret < 0)
goto out_put_dev;
- rcu_read_lock();
- map = rcu_dereference(dev->priomap);
+ map = rtnl_dereference(dev->priomap);
if (map)
map->priomap[prioidx] = priority;
- rcu_read_unlock();
out_put_dev:
+ rtnl_unlock();
dev_put(dev);
out_free_devname:
@@ -277,12 +275,6 @@ out_free_devname:
void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct task_struct *p;
- char *tmp = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL);
-
- if (!tmp) {
- pr_warn("Unable to attach cgrp due to alloc failure!\n");
- return;
- }
cgroup_taskset_for_each(p, cgrp, tset) {
unsigned int fd;
@@ -296,32 +288,24 @@ void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
continue;
}
- rcu_read_lock();
+ spin_lock(&files->file_lock);
fdt = files_fdtable(files);
for (fd = 0; fd < fdt->max_fds; fd++) {
- char *path;
struct file *file;
struct socket *sock;
- unsigned long s;
- int rv, err = 0;
+ int err;
file = fcheck_files(files, fd);
if (!file)
continue;
- path = d_path(&file->f_path, tmp, PAGE_SIZE);
- rv = sscanf(path, "socket:[%lu]", &s);
- if (rv <= 0)
- continue;
-
sock = sock_from_file(file, &err);
- if (!err)
+ if (sock)
sock_update_netprioidx(sock->sk, p);
}
- rcu_read_unlock();
+ spin_unlock(&files->file_lock);
task_unlock(p);
}
- kfree(tmp);
}
static struct cftype ss_files[] = {
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 334b930e0de3..2c5a0a06c4ce 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -625,9 +625,13 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
.rta_id = id,
};
- if (expires)
- ci.rta_expires = jiffies_to_clock_t(expires);
+ if (expires) {
+ unsigned long clock;
+ clock = jiffies_to_clock_t(abs(expires));
+ clock = min_t(unsigned long, clock, INT_MAX);
+ ci.rta_expires = (expires > 0) ? clock : -clock;
+ }
return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
}
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
@@ -659,6 +663,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
}
}
+static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
+{
+ return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
+ (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
+}
+
static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
const struct ifinfomsg *ifm)
{
@@ -667,7 +677,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
if (ifm->ifi_change)
flags = (flags & ifm->ifi_change) |
- (dev->flags & ~ifm->ifi_change);
+ (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
return flags;
}
@@ -1371,6 +1381,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
goto errout;
send_addr_notify = 1;
modified = 1;
+ add_device_randomness(dev->dev_addr, dev->addr_len);
}
if (tb[IFLA_MTU]) {
diff --git a/net/core/scm.c b/net/core/scm.c
index 8f6ccfd68ef4..040cebeed45b 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -265,6 +265,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
i++, cmfptr++)
{
+ struct socket *sock;
int new_fd;
err = security_file_receive(fp[i]);
if (err)
@@ -281,6 +282,9 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
}
/* Bump the usage count and install the file. */
get_file(fp[i]);
+ sock = sock_from_file(fp[i], &err);
+ if (sock)
+ sock_update_netprioidx(sock->sk, current);
fd_install(new_fd, fp[i]);
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 368f65c15e4f..fe00d1208167 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -145,6 +145,43 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
BUG();
}
+
+/*
+ * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
+ * the caller if emergency pfmemalloc reserves are being used. If it is and
+ * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
+ * may be used. Otherwise, the packet data may be discarded until enough
+ * memory is free
+ */
+#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
+ __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
+void *__kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip,
+ bool *pfmemalloc)
+{
+ void *obj;
+ bool ret_pfmemalloc = false;
+
+ /*
+ * Try a regular allocation, when that fails and we're not entitled
+ * to the reserves, fail.
+ */
+ obj = kmalloc_node_track_caller(size,
+ flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
+ node);
+ if (obj || !(gfp_pfmemalloc_allowed(flags)))
+ goto out;
+
+ /* Try again but now we are using pfmemalloc reserves */
+ ret_pfmemalloc = true;
+ obj = kmalloc_node_track_caller(size, flags, node);
+
+out:
+ if (pfmemalloc)
+ *pfmemalloc = ret_pfmemalloc;
+
+ return obj;
+}
+
/* Allocate a new skbuff. We do this ourselves so we can fill in a few
* 'private' fields and also do memory statistics to find all the
* [BEEP] leaks.
@@ -155,8 +192,10 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
* __alloc_skb - allocate a network buffer
* @size: size to allocate
* @gfp_mask: allocation mask
- * @fclone: allocate from fclone cache instead of head cache
- * and allocate a cloned (child) skb
+ * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
+ * instead of head cache and allocate a cloned (child) skb.
+ * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
+ * allocations in case the data is required for writeback
* @node: numa node to allocate memory on
*
* Allocate a new &sk_buff. The returned buffer has no headroom and a
@@ -167,14 +206,19 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
* %GFP_ATOMIC.
*/
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
- int fclone, int node)
+ int flags, int node)
{
struct kmem_cache *cache;
struct skb_shared_info *shinfo;
struct sk_buff *skb;
u8 *data;
+ bool pfmemalloc;
- cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
+ cache = (flags & SKB_ALLOC_FCLONE)
+ ? skbuff_fclone_cache : skbuff_head_cache;
+
+ if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
+ gfp_mask |= __GFP_MEMALLOC;
/* Get the HEAD */
skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
@@ -189,7 +233,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
*/
size = SKB_DATA_ALIGN(size);
size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- data = kmalloc_node_track_caller(size, gfp_mask, node);
+ data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
if (!data)
goto nodata;
/* kmalloc(size) might give us more room than requested.
@@ -207,6 +251,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
memset(skb, 0, offsetof(struct sk_buff, tail));
/* Account for allocated memory : skb + skb->head */
skb->truesize = SKB_TRUESIZE(size);
+ skb->pfmemalloc = pfmemalloc;
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
@@ -222,7 +267,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
atomic_set(&shinfo->dataref, 1);
kmemcheck_annotate_variable(shinfo->destructor_arg);
- if (fclone) {
+ if (flags & SKB_ALLOC_FCLONE) {
struct sk_buff *child = skb + 1;
atomic_t *fclone_ref = (atomic_t *) (child + 1);
@@ -232,6 +277,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
atomic_set(fclone_ref, 1);
child->fclone = SKB_FCLONE_UNAVAILABLE;
+ child->pfmemalloc = pfmemalloc;
}
out:
return skb;
@@ -302,14 +348,7 @@ static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
#define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES)
-/**
- * netdev_alloc_frag - allocate a page fragment
- * @fragsz: fragment size
- *
- * Allocates a frag from a page for receive buffer.
- * Uses GFP_ATOMIC allocations.
- */
-void *netdev_alloc_frag(unsigned int fragsz)
+static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
struct netdev_alloc_cache *nc;
void *data = NULL;
@@ -319,7 +358,7 @@ void *netdev_alloc_frag(unsigned int fragsz)
nc = &__get_cpu_var(netdev_alloc_cache);
if (unlikely(!nc->page)) {
refill:
- nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ nc->page = alloc_page(gfp_mask);
if (unlikely(!nc->page))
goto end;
recycle:
@@ -343,6 +382,18 @@ end:
local_irq_restore(flags);
return data;
}
+
+/**
+ * netdev_alloc_frag - allocate a page fragment
+ * @fragsz: fragment size
+ *
+ * Allocates a frag from a page for receive buffer.
+ * Uses GFP_ATOMIC allocations.
+ */
+void *netdev_alloc_frag(unsigned int fragsz)
+{
+ return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
+}
EXPORT_SYMBOL(netdev_alloc_frag);
/**
@@ -366,7 +417,12 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
- void *data = netdev_alloc_frag(fragsz);
+ void *data;
+
+ if (sk_memalloc_socks())
+ gfp_mask |= __GFP_MEMALLOC;
+
+ data = __netdev_alloc_frag(fragsz, gfp_mask);
if (likely(data)) {
skb = build_skb(data, fragsz);
@@ -374,7 +430,8 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
put_page(virt_to_head_page(data));
}
} else {
- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
+ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
+ SKB_ALLOC_RX, NUMA_NO_NODE);
}
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD);
@@ -656,6 +713,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
#if IS_ENABLED(CONFIG_IP_VS)
new->ipvs_property = old->ipvs_property;
#endif
+ new->pfmemalloc = old->pfmemalloc;
new->protocol = old->protocol;
new->mark = old->mark;
new->skb_iif = old->skb_iif;
@@ -814,6 +872,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
n->fclone = SKB_FCLONE_CLONE;
atomic_inc(fclone_ref);
} else {
+ if (skb_pfmemalloc(skb))
+ gfp_mask |= __GFP_MEMALLOC;
+
n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
if (!n)
return NULL;
@@ -850,6 +911,13 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
}
+static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
+{
+ if (skb_pfmemalloc(skb))
+ return SKB_ALLOC_RX;
+ return 0;
+}
+
/**
* skb_copy - create private copy of an sk_buff
* @skb: buffer to copy
@@ -871,7 +939,8 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
int headerlen = skb_headroom(skb);
unsigned int size = skb_end_offset(skb) + skb->data_len;
- struct sk_buff *n = alloc_skb(size, gfp_mask);
+ struct sk_buff *n = __alloc_skb(size, gfp_mask,
+ skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
return NULL;
@@ -906,7 +975,8 @@ EXPORT_SYMBOL(skb_copy);
struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
{
unsigned int size = skb_headlen(skb) + headroom;
- struct sk_buff *n = alloc_skb(size, gfp_mask);
+ struct sk_buff *n = __alloc_skb(size, gfp_mask,
+ skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
goto out;
@@ -979,8 +1049,10 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
size = SKB_DATA_ALIGN(size);
- data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
- gfp_mask);
+ if (skb_pfmemalloc(skb))
+ gfp_mask |= __GFP_MEMALLOC;
+ data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
goto nodata;
size = SKB_WITH_OVERHEAD(ksize(data));
@@ -1092,8 +1164,9 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
/*
* Allocate the copy buffer
*/
- struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
- gfp_mask);
+ struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
+ gfp_mask, skb_alloc_rx_flag(skb),
+ NUMA_NO_NODE);
int oldheadroom = skb_headroom(skb);
int head_copy_len, head_copy_off;
int off;
@@ -2775,8 +2848,9 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
skb_release_head_state(nskb);
__skb_push(nskb, doffset);
} else {
- nskb = alloc_skb(hsize + doffset + headroom,
- GFP_ATOMIC);
+ nskb = __alloc_skb(hsize + doffset + headroom,
+ GFP_ATOMIC, skb_alloc_rx_flag(skb),
+ NUMA_NO_NODE);
if (unlikely(!nskb))
goto err;
diff --git a/net/core/sock.c b/net/core/sock.c
index 2676a88f533e..8f67ced8d6a8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -142,7 +142,7 @@
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
struct proto *proto;
@@ -271,6 +271,61 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
EXPORT_SYMBOL(sysctl_optmem_max);
+struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(memalloc_socks);
+
+/**
+ * sk_set_memalloc - sets %SOCK_MEMALLOC
+ * @sk: socket to set it on
+ *
+ * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
+ * It's the responsibility of the admin to adjust min_free_kbytes
+ * to meet the requirements
+ */
+void sk_set_memalloc(struct sock *sk)
+{
+ sock_set_flag(sk, SOCK_MEMALLOC);
+ sk->sk_allocation |= __GFP_MEMALLOC;
+ static_key_slow_inc(&memalloc_socks);
+}
+EXPORT_SYMBOL_GPL(sk_set_memalloc);
+
+void sk_clear_memalloc(struct sock *sk)
+{
+ sock_reset_flag(sk, SOCK_MEMALLOC);
+ sk->sk_allocation &= ~__GFP_MEMALLOC;
+ static_key_slow_dec(&memalloc_socks);
+
+ /*
+ * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
+ * progress of swapping. However, if SOCK_MEMALLOC is cleared while
+ * it has rmem allocations there is a risk that the user of the
+ * socket cannot make forward progress due to exceeding the rmem
+ * limits. By rights, sk_clear_memalloc() should only be called
+ * on sockets being torn down but warn and reset the accounting if
+ * that assumption breaks.
+ */
+ if (WARN_ON(sk->sk_forward_alloc))
+ sk_mem_reclaim(sk);
+}
+EXPORT_SYMBOL_GPL(sk_clear_memalloc);
+
+int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ int ret;
+ unsigned long pflags = current->flags;
+
+ /* these should have been dropped before queueing */
+ BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
+
+ current->flags |= PF_MEMALLOC;
+ ret = sk->sk_backlog_rcv(sk, skb);
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
+
+ return ret;
+}
+EXPORT_SYMBOL(__sk_backlog_rcv);
+
#if defined(CONFIG_CGROUPS)
#if !defined(CONFIG_NET_CLS_CGROUP)
int net_cls_subsys_id = -1;
@@ -353,7 +408,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (err)
return err;
- if (!sk_rmem_schedule(sk, skb->truesize)) {
+ if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
atomic_inc(&sk->sk_drops);
return -ENOBUFS;
}
@@ -1403,6 +1458,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
} else {
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
sk->sk_gso_max_size = dst->dev->gso_max_size;
+ sk->sk_gso_max_segs = dst->dev->gso_max_segs;
}
}
}
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 75c3582a7678..fb85d371a8de 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -246,7 +246,7 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk,
u32 __user *optval, int __user *optlen)
{
int rc = -ENOPROTOOPT;
- if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
+ if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL)
rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len,
optval, optlen);
return rc;
@@ -257,7 +257,7 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk,
u32 __user *optval, int __user *optlen)
{
int rc = -ENOPROTOOPT;
- if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
+ if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL)
rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len,
optval, optlen);
return rc;
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index d65e98798eca..119c04317d48 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -535,6 +535,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
case DCCP_SOCKOPT_CCID_TX_INFO:
if (len < sizeof(tfrc))
return -EINVAL;
+ memset(&tfrc, 0, sizeof(tfrc));
tfrc.tfrctx_x = hc->tx_x;
tfrc.tfrctx_x_recv = hc->tx_x_recv;
tfrc.tfrctx_x_calc = hc->tx_x_calc;
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index ae2ccf2890e4..15ca63ec604e 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
-obj-$(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) += tcp_memcontrol.o
+obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a0124eb7dbea..77e87aff419a 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -827,7 +827,7 @@ static int arp_process(struct sk_buff *skb)
}
if (arp->ar_op == htons(ARPOP_REQUEST) &&
- ip_route_input(skb, tip, sip, 0, dev) == 0) {
+ ip_route_input_noref(skb, tip, sip, 0, dev) == 0) {
rt = skb_rtable(skb);
addr_type = rt->rt_type;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 8732cc7920ed..c43ae3fba792 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1046,6 +1046,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
if (event == NETDEV_UNREGISTER) {
fib_disable_ip(dev, 2, -1);
+ rt_flush_dev(dev);
return NOTIFY_DONE;
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index e55171f184f9..da80dc14cc76 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -140,6 +140,21 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
},
};
+static void rt_fibinfo_free(struct rtable __rcu **rtp)
+{
+ struct rtable *rt = rcu_dereference_protected(*rtp, 1);
+
+ if (!rt)
+ return;
+
+ /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
+ * because we waited an RCU grace period before calling
+ * free_fib_info_rcu()
+ */
+
+ dst_free(&rt->dst);
+}
+
static void free_nh_exceptions(struct fib_nh *nh)
{
struct fnhe_hash_bucket *hash = nh->nh_exceptions;
@@ -153,6 +168,9 @@ static void free_nh_exceptions(struct fib_nh *nh)
struct fib_nh_exception *next;
next = rcu_dereference_protected(fnhe->fnhe_next, 1);
+
+ rt_fibinfo_free(&fnhe->fnhe_rth);
+
kfree(fnhe);
fnhe = next;
@@ -161,6 +179,23 @@ static void free_nh_exceptions(struct fib_nh *nh)
kfree(hash);
}
+static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
+{
+ int cpu;
+
+ if (!rtp)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct rtable *rt;
+
+ rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
+ if (rt)
+ dst_free(&rt->dst);
+ }
+ free_percpu(rtp);
+}
+
/* Release a nexthop info record */
static void free_fib_info_rcu(struct rcu_head *head)
{
@@ -171,10 +206,8 @@ static void free_fib_info_rcu(struct rcu_head *head)
dev_put(nexthop_nh->nh_dev);
if (nexthop_nh->nh_exceptions)
free_nh_exceptions(nexthop_nh);
- if (nexthop_nh->nh_rth_output)
- dst_release(&nexthop_nh->nh_rth_output->dst);
- if (nexthop_nh->nh_rth_input)
- dst_release(&nexthop_nh->nh_rth_input->dst);
+ rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
+ rt_fibinfo_free(&nexthop_nh->nh_rth_input);
} endfor_nexthops(fi);
release_net(fi->fib_net);
@@ -804,6 +837,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
fi->fib_nhs = nhs;
change_nexthops(fi) {
nexthop_nh->nh_parent = fi;
+ nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
} endfor_nexthops(fi)
if (cfg->fc_mx) {
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 18cbc15b20d5..57bd978483e1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -159,7 +159,6 @@ struct trie {
#endif
};
-static void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n);
static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
int wasfull);
static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
@@ -368,7 +367,7 @@ static void __leaf_free_rcu(struct rcu_head *head)
static inline void free_leaf(struct leaf *l)
{
- call_rcu_bh(&l->rcu, __leaf_free_rcu);
+ call_rcu(&l->rcu, __leaf_free_rcu);
}
static inline void free_leaf_info(struct leaf_info *leaf)
@@ -473,7 +472,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
}
pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
- sizeof(struct rt_trie_node) << bits);
+ sizeof(struct rt_trie_node *) << bits);
return tn;
}
@@ -490,7 +489,7 @@ static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *
return ((struct tnode *) n)->pos == tn->pos + tn->bits;
}
-static inline void put_child(struct trie *t, struct tnode *tn, int i,
+static inline void put_child(struct tnode *tn, int i,
struct rt_trie_node *n)
{
tnode_put_child_reorg(tn, i, n, -1);
@@ -754,8 +753,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
goto nomem;
}
- put_child(t, tn, 2*i, (struct rt_trie_node *) left);
- put_child(t, tn, 2*i+1, (struct rt_trie_node *) right);
+ put_child(tn, 2*i, (struct rt_trie_node *) left);
+ put_child(tn, 2*i+1, (struct rt_trie_node *) right);
}
}
@@ -776,9 +775,9 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
if (tkey_extract_bits(node->key,
oldtnode->pos + oldtnode->bits,
1) == 0)
- put_child(t, tn, 2*i, node);
+ put_child(tn, 2*i, node);
else
- put_child(t, tn, 2*i+1, node);
+ put_child(tn, 2*i+1, node);
continue;
}
@@ -786,8 +785,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
inode = (struct tnode *) node;
if (inode->bits == 1) {
- put_child(t, tn, 2*i, rtnl_dereference(inode->child[0]));
- put_child(t, tn, 2*i+1, rtnl_dereference(inode->child[1]));
+ put_child(tn, 2*i, rtnl_dereference(inode->child[0]));
+ put_child(tn, 2*i+1, rtnl_dereference(inode->child[1]));
tnode_free_safe(inode);
continue;
@@ -817,22 +816,22 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
*/
left = (struct tnode *) tnode_get_child(tn, 2*i);
- put_child(t, tn, 2*i, NULL);
+ put_child(tn, 2*i, NULL);
BUG_ON(!left);
right = (struct tnode *) tnode_get_child(tn, 2*i+1);
- put_child(t, tn, 2*i+1, NULL);
+ put_child(tn, 2*i+1, NULL);
BUG_ON(!right);
size = tnode_child_length(left);
for (j = 0; j < size; j++) {
- put_child(t, left, j, rtnl_dereference(inode->child[j]));
- put_child(t, right, j, rtnl_dereference(inode->child[j + size]));
+ put_child(left, j, rtnl_dereference(inode->child[j]));
+ put_child(right, j, rtnl_dereference(inode->child[j + size]));
}
- put_child(t, tn, 2*i, resize(t, left));
- put_child(t, tn, 2*i+1, resize(t, right));
+ put_child(tn, 2*i, resize(t, left));
+ put_child(tn, 2*i+1, resize(t, right));
tnode_free_safe(inode);
}
@@ -877,7 +876,7 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
if (!newn)
goto nomem;
- put_child(t, tn, i/2, (struct rt_trie_node *)newn);
+ put_child(tn, i/2, (struct rt_trie_node *)newn);
}
}
@@ -892,21 +891,21 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
if (left == NULL) {
if (right == NULL) /* Both are empty */
continue;
- put_child(t, tn, i/2, right);
+ put_child(tn, i/2, right);
continue;
}
if (right == NULL) {
- put_child(t, tn, i/2, left);
+ put_child(tn, i/2, left);
continue;
}
/* Two nonempty children */
newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
- put_child(t, tn, i/2, NULL);
- put_child(t, newBinNode, 0, left);
- put_child(t, newBinNode, 1, right);
- put_child(t, tn, i/2, resize(t, newBinNode));
+ put_child(tn, i/2, NULL);
+ put_child(newBinNode, 0, left);
+ put_child(newBinNode, 1, right);
+ put_child(tn, i/2, resize(t, newBinNode));
}
tnode_free_safe(oldtnode);
return tn;
@@ -1125,7 +1124,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
node_set_parent((struct rt_trie_node *)l, tp);
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
- put_child(t, tp, cindex, (struct rt_trie_node *)l);
+ put_child(tp, cindex, (struct rt_trie_node *)l);
} else {
/* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
/*
@@ -1155,12 +1154,12 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
node_set_parent((struct rt_trie_node *)tn, tp);
missbit = tkey_extract_bits(key, newpos, 1);
- put_child(t, tn, missbit, (struct rt_trie_node *)l);
- put_child(t, tn, 1-missbit, n);
+ put_child(tn, missbit, (struct rt_trie_node *)l);
+ put_child(tn, 1-missbit, n);
if (tp) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
- put_child(t, tp, cindex, (struct rt_trie_node *)tn);
+ put_child(tp, cindex, (struct rt_trie_node *)tn);
} else {
rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tp = tn;
@@ -1619,7 +1618,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l)
if (tp) {
t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
- put_child(t, tp, cindex, NULL);
+ put_child(tp, cindex, NULL);
trie_rebalance(t, tp);
} else
RCU_INIT_POINTER(t->trie, NULL);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index db0cf17c00f7..7f75f21d7b83 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -404,12 +404,15 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct inet_sock *newinet = inet_sk(newsk);
- struct ip_options_rcu *opt = ireq->opt;
+ struct ip_options_rcu *opt;
struct net *net = sock_net(sk);
struct flowi4 *fl4;
struct rtable *rt;
fl4 = &newinet->cork.fl.u.ip4;
+
+ rcu_read_lock();
+ opt = rcu_dereference(newinet->inet_opt);
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -421,11 +424,13 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
goto no_route;
if (opt && opt->opt.is_strictroute && rt->rt_gateway)
goto route_err;
+ rcu_read_unlock();
return &rt->dst;
route_err:
ip_rt_put(rt);
no_route:
+ rcu_read_unlock();
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 7ad88e5e7110..8d07c973409c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -258,8 +258,8 @@ static void ip_expire(unsigned long arg)
/* skb dst is stale, drop it, and perform route lookup again */
skb_dst_drop(head);
iph = ip_hdr(head);
- err = ip_route_input(head, iph->daddr, iph->saddr,
- iph->tos, head->dev);
+ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+ iph->tos, head->dev);
if (err)
goto out_rcu_unlock;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 4ebc6feee250..f1395a6fb35f 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -314,6 +314,7 @@ drop:
}
int sysctl_ip_early_demux __read_mostly = 1;
+EXPORT_SYMBOL(sysctl_ip_early_demux);
static int ip_rcv_finish(struct sk_buff *skb)
{
@@ -324,11 +325,12 @@ static int ip_rcv_finish(struct sk_buff *skb)
const struct net_protocol *ipprot;
int protocol = iph->protocol;
- rcu_read_lock();
ipprot = rcu_dereference(inet_protos[protocol]);
- if (ipprot && ipprot->early_demux)
+ if (ipprot && ipprot->early_demux) {
ipprot->early_demux(skb);
- rcu_read_unlock();
+ /* must reload iph, skb->head might have changed */
+ iph = ip_hdr(skb);
+ }
}
/*
@@ -336,8 +338,8 @@ static int ip_rcv_finish(struct sk_buff *skb)
* how the packet travels inside Linux networking.
*/
if (!skb_dst(skb)) {
- int err = ip_route_input(skb, iph->daddr, iph->saddr,
- iph->tos, skb->dev);
+ int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
+ iph->tos, skb->dev);
if (unlikely(err)) {
if (err == -EXDEV)
NET_INC_STATS_BH(dev_net(skb->dev),
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index ba39a52d18c1..c196d749daf2 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -197,7 +197,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
- if (neigh) {
+ if (!IS_ERR(neigh)) {
int res = dst_neigh_output(dst, neigh, skb);
rcu_read_unlock_bh();
@@ -1338,10 +1338,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
iph->ihl = 5;
iph->tos = inet->tos;
iph->frag_off = df;
- ip_select_ident(iph, &rt->dst, sk);
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
ip_copy_addrs(iph, fl4);
+ ip_select_ident(iph, &rt->dst, sk);
if (opt) {
iph->ihl += opt->optlen>>2;
@@ -1366,9 +1366,8 @@ out:
return skb;
}
-int ip_send_skb(struct sk_buff *skb)
+int ip_send_skb(struct net *net, struct sk_buff *skb)
{
- struct net *net = sock_net(skb->sk);
int err;
err = ip_local_out(skb);
@@ -1391,7 +1390,7 @@ int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
return 0;
/* Netfilter gets whole the not fragmented skb. */
- return ip_send_skb(skb);
+ return ip_send_skb(sock_net(sk), skb);
}
/*
@@ -1536,6 +1535,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum));
nskb->ip_summed = CHECKSUM_NONE;
+ skb_orphan(nskb);
skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
ip_push_pending_frames(sk, &fl4);
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 8eec8f4a0536..ebdf06f938bf 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr_table *ipmr_new_table(struct net *net, u32 id);
+static void ipmr_free_table(struct mr_table *mrt);
+
static int ip_mr_forward(struct net *net, struct mr_table *mrt,
struct sk_buff *skb, struct mfc_cache *cache,
int local);
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mfc_cache *c, struct rtmsg *rtm);
+static void mroute_clean_tables(struct mr_table *mrt);
static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
list_del(&mrt->list);
- kfree(mrt);
+ ipmr_free_table(mrt);
}
fib_rules_unregister(net->ipv4.mr_rules_ops);
}
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
static void __net_exit ipmr_rules_exit(struct net *net)
{
- kfree(net->ipv4.mrt);
+ ipmr_free_table(net->ipv4.mrt);
}
#endif
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
return mrt;
}
+static void ipmr_free_table(struct mr_table *mrt)
+{
+ del_timer_sync(&mrt->ipmr_expire_timer);
+ mroute_clean_tables(mrt);
+ kfree(mrt);
+}
+
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index ea4a23813d26..9c87cde28ff8 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
hdr, NULL, &matchoff, &matchlen,
&addr, &port) > 0) {
- unsigned int matchend, poff, plen, buflen, n;
+ unsigned int olen, matchend, poff, plen, buflen, n;
char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
/* We're only interested in headers related to this
@@ -163,17 +163,18 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
goto next;
}
+ olen = *datalen;
if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
&addr, port))
return NF_DROP;
- matchend = matchoff + matchlen;
+ matchend = matchoff + matchlen + *datalen - olen;
/* The maddr= parameter (RFC 2361) specifies where to send
* the reply. */
if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
"maddr=", &poff, &plen,
- &addr) > 0 &&
+ &addr, true) > 0 &&
addr.ip == ct->tuplehash[dir].tuple.src.u3.ip &&
addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
buflen = sprintf(buffer, "%pI4",
@@ -187,7 +188,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
* from which the server received the request. */
if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen,
"received=", &poff, &plen,
- &addr) > 0 &&
+ &addr, false) > 0 &&
addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip &&
addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
buflen = sprintf(buffer, "%pI4",
@@ -501,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
ret = nf_ct_expect_related(rtcp_exp);
if (ret == 0)
break;
- else if (ret != -EBUSY) {
+ else if (ret == -EBUSY) {
+ nf_ct_unexpect_related(rtp_exp);
+ continue;
+ } else if (ret < 0) {
nf_ct_unexpect_related(rtp_exp);
port = 0;
break;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6bcb8fc71cbc..82cf2a722b23 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -70,7 +70,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/bootmem.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
@@ -80,7 +79,6 @@
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
-#include <linux/workqueue.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
@@ -88,11 +86,9 @@
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
-#include <linux/jhash.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <linux/slab.h>
-#include <linux/prefetch.h>
#include <net/dst.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
@@ -147,6 +143,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu);
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
+static void ipv4_dst_destroy(struct dst_entry *dst);
static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
int how)
@@ -170,6 +167,7 @@ static struct dst_ops ipv4_dst_ops = {
.default_advmss = ipv4_default_advmss,
.mtu = ipv4_mtu,
.cow_metrics = ipv4_cow_metrics,
+ .destroy = ipv4_dst_destroy,
.ifdown = ipv4_dst_ifdown,
.negative_advice = ipv4_negative_advice,
.link_failure = ipv4_link_failure,
@@ -444,7 +442,7 @@ static inline int ip_rt_proc_init(void)
}
#endif /* CONFIG_PROC_FS */
-static inline int rt_is_expired(struct rtable *rth)
+static inline bool rt_is_expired(const struct rtable *rth)
{
return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
}
@@ -587,11 +585,17 @@ static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
build_sk_flow_key(fl4, sk);
}
-static DEFINE_SEQLOCK(fnhe_seqlock);
+static inline void rt_free(struct rtable *rt)
+{
+ call_rcu(&rt->dst.rcu_head, dst_rcu_free);
+}
+
+static DEFINE_SPINLOCK(fnhe_lock);
static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
{
struct fib_nh_exception *fnhe, *oldest;
+ struct rtable *orig;
oldest = rcu_dereference(hash->chain);
for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
@@ -599,6 +603,11 @@ static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
oldest = fnhe;
}
+ orig = rcu_dereference(oldest->fnhe_rth);
+ if (orig) {
+ RCU_INIT_POINTER(oldest->fnhe_rth, NULL);
+ rt_free(orig);
+ }
return oldest;
}
@@ -620,7 +629,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
int depth;
u32 hval = fnhe_hashfun(daddr);
- write_seqlock_bh(&fnhe_seqlock);
+ spin_lock_bh(&fnhe_lock);
hash = nh->nh_exceptions;
if (!hash) {
@@ -667,7 +676,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
fnhe->fnhe_stamp = jiffies;
out_unlock:
- write_sequnlock_bh(&fnhe_seqlock);
+ spin_unlock_bh(&fnhe_lock);
return;
}
@@ -925,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (mtu < ip_rt_min_pmtu)
mtu = ip_rt_min_pmtu;
+ rcu_read_lock();
if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
struct fib_nh *nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
jiffies + ip_rt_mtu_expires);
}
+ rcu_read_unlock();
return mtu;
}
@@ -947,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
dst->obsolete = DST_OBSOLETE_KILL;
} else {
rt->rt_pmtu = mtu;
- dst_set_expires(&rt->dst, ip_rt_mtu_expires);
+ rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
}
}
@@ -1164,67 +1175,126 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
return NULL;
}
-static void rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
+static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
__be32 daddr)
{
- __be32 fnhe_daddr, gw;
- unsigned long expires;
- unsigned int seq;
- u32 pmtu;
-
-restart:
- seq = read_seqbegin(&fnhe_seqlock);
- fnhe_daddr = fnhe->fnhe_daddr;
- gw = fnhe->fnhe_gw;
- pmtu = fnhe->fnhe_pmtu;
- expires = fnhe->fnhe_expires;
- if (read_seqretry(&fnhe_seqlock, seq))
- goto restart;
-
- if (daddr != fnhe_daddr)
- return;
+ bool ret = false;
+
+ spin_lock_bh(&fnhe_lock);
- if (pmtu) {
- unsigned long diff = expires - jiffies;
+ if (daddr == fnhe->fnhe_daddr) {
+ struct rtable *orig;
- if (time_before(jiffies, expires)) {
- rt->rt_pmtu = pmtu;
- dst_set_expires(&rt->dst, diff);
+ if (fnhe->fnhe_pmtu) {
+ unsigned long expires = fnhe->fnhe_expires;
+ unsigned long diff = expires - jiffies;
+
+ if (time_before(jiffies, expires)) {
+ rt->rt_pmtu = fnhe->fnhe_pmtu;
+ dst_set_expires(&rt->dst, diff);
+ }
}
+ if (fnhe->fnhe_gw) {
+ rt->rt_flags |= RTCF_REDIRECTED;
+ rt->rt_gateway = fnhe->fnhe_gw;
+ }
+
+ orig = rcu_dereference(fnhe->fnhe_rth);
+ rcu_assign_pointer(fnhe->fnhe_rth, rt);
+ if (orig)
+ rt_free(orig);
+
+ fnhe->fnhe_stamp = jiffies;
+ ret = true;
+ } else {
+ /* Routes we intend to cache in nexthop exception have
+ * the DST_NOCACHE bit clear. However, if we are
+ * unsuccessful at storing this route into the cache
+ * we really need to set it.
+ */
+ rt->dst.flags |= DST_NOCACHE;
}
- if (gw) {
- rt->rt_flags |= RTCF_REDIRECTED;
- rt->rt_gateway = gw;
- }
- fnhe->fnhe_stamp = jiffies;
-}
+ spin_unlock_bh(&fnhe_lock);
-static inline void rt_release_rcu(struct rcu_head *head)
-{
- struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
- dst_release(dst);
+ return ret;
}
-static void rt_cache_route(struct fib_nh *nh, struct rtable *rt)
+static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
{
- struct rtable *orig, *prev, **p = &nh->nh_rth_output;
-
- if (rt_is_input_route(rt))
- p = &nh->nh_rth_input;
+ struct rtable *orig, *prev, **p;
+ bool ret = true;
+ if (rt_is_input_route(rt)) {
+ p = (struct rtable **)&nh->nh_rth_input;
+ } else {
+ if (!nh->nh_pcpu_rth_output)
+ goto nocache;
+ p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
+ }
orig = *p;
prev = cmpxchg(p, orig, rt);
if (prev == orig) {
- dst_clone(&rt->dst);
if (orig)
- call_rcu_bh(&orig->dst.rcu_head, rt_release_rcu);
+ rt_free(orig);
+ } else {
+ /* Routes we intend to cache in the FIB nexthop have
+ * the DST_NOCACHE bit clear. However, if we are
+ * unsuccessful at storing this route into the cache
+ * we really need to set it.
+ */
+nocache:
+ rt->dst.flags |= DST_NOCACHE;
+ ret = false;
+ }
+
+ return ret;
+}
+
+static DEFINE_SPINLOCK(rt_uncached_lock);
+static LIST_HEAD(rt_uncached_list);
+
+static void rt_add_uncached_list(struct rtable *rt)
+{
+ spin_lock_bh(&rt_uncached_lock);
+ list_add_tail(&rt->rt_uncached, &rt_uncached_list);
+ spin_unlock_bh(&rt_uncached_lock);
+}
+
+static void ipv4_dst_destroy(struct dst_entry *dst)
+{
+ struct rtable *rt = (struct rtable *) dst;
+
+ if (!list_empty(&rt->rt_uncached)) {
+ spin_lock_bh(&rt_uncached_lock);
+ list_del(&rt->rt_uncached);
+ spin_unlock_bh(&rt_uncached_lock);
}
}
-static bool rt_cache_valid(struct rtable *rt)
+void rt_flush_dev(struct net_device *dev)
{
- return (rt && rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK);
+ if (!list_empty(&rt_uncached_list)) {
+ struct net *net = dev_net(dev);
+ struct rtable *rt;
+
+ spin_lock_bh(&rt_uncached_lock);
+ list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
+ if (rt->dst.dev != dev)
+ continue;
+ rt->dst.dev = net->loopback_dev;
+ dev_hold(rt->dst.dev);
+ dev_put(dev);
+ }
+ spin_unlock_bh(&rt_uncached_lock);
+ }
+}
+
+static bool rt_cache_valid(const struct rtable *rt)
+{
+ return rt &&
+ rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
+ !rt_is_expired(rt);
}
static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
@@ -1232,20 +1302,24 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
struct fib_nh_exception *fnhe,
struct fib_info *fi, u16 type, u32 itag)
{
+ bool cached = false;
+
if (fi) {
struct fib_nh *nh = &FIB_RES_NH(*res);
if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK)
rt->rt_gateway = nh->nh_gw;
- if (unlikely(fnhe))
- rt_bind_exception(rt, fnhe, daddr);
dst_init_metrics(&rt->dst, fi->fib_metrics, true);
#ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid;
#endif
- if (!(rt->dst.flags & DST_HOST))
- rt_cache_route(nh, rt);
+ if (unlikely(fnhe))
+ cached = rt_bind_exception(rt, fnhe, daddr);
+ else if (!(rt->dst.flags & DST_NOCACHE))
+ cached = rt_cache_route(nh, rt);
}
+ if (unlikely(!cached))
+ rt_add_uncached_list(rt);
#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
@@ -1259,7 +1333,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
bool nopolicy, bool noxfrm, bool will_cache)
{
return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
- (will_cache ? 0 : DST_HOST) | DST_NOCACHE |
+ (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
(nopolicy ? DST_NOPOLICY : 0) |
(noxfrm ? DST_NOXFRM : 0));
}
@@ -1312,6 +1386,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->rt_iif = 0;
rth->rt_pmtu = 0;
rth->rt_gateway = 0;
+ INIT_LIST_HEAD(&rth->rt_uncached);
if (our) {
rth->dst.input= ip_local_deliver;
rth->rt_flags |= RTCF_LOCAL;
@@ -1364,8 +1439,7 @@ static void ip_handle_martian_source(struct net_device *dev,
static int __mkroute_input(struct sk_buff *skb,
const struct fib_result *res,
struct in_device *in_dev,
- __be32 daddr, __be32 saddr, u32 tos,
- struct rtable **result)
+ __be32 daddr, __be32 saddr, u32 tos)
{
struct rtable *rth;
int err;
@@ -1414,9 +1488,9 @@ static int __mkroute_input(struct sk_buff *skb,
do_cache = false;
if (res->fi) {
if (!itag) {
- rth = FIB_RES_NH(*res).nh_rth_input;
+ rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
if (rt_cache_valid(rth)) {
- dst_hold(&rth->dst);
+ skb_dst_set_noref(skb, &rth->dst);
goto out;
}
do_cache = true;
@@ -1438,13 +1512,14 @@ static int __mkroute_input(struct sk_buff *skb,
rth->rt_iif = 0;
rth->rt_pmtu = 0;
rth->rt_gateway = 0;
+ INIT_LIST_HEAD(&rth->rt_uncached);
rth->dst.input = ip_forward;
rth->dst.output = ip_output;
rt_set_nexthop(rth, daddr, res, NULL, res->fi, res->type, itag);
+ skb_dst_set(skb, &rth->dst);
out:
- *result = rth;
err = 0;
cleanup:
return err;
@@ -1456,21 +1531,13 @@ static int ip_mkroute_input(struct sk_buff *skb,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
{
- struct rtable *rth = NULL;
- int err;
-
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (res->fi && res->fi->fib_nhs > 1)
fib_select_multipath(res);
#endif
/* create a routing cache entry */
- err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
- if (err)
- return err;
-
- skb_dst_set(skb, &rth->dst);
- return 0;
+ return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
}
/*
@@ -1584,10 +1651,11 @@ local_input:
do_cache = false;
if (res.fi) {
if (!itag) {
- rth = FIB_RES_NH(res).nh_rth_input;
+ rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
if (rt_cache_valid(rth)) {
- dst_hold(&rth->dst);
- goto set_and_out;
+ skb_dst_set_noref(skb, &rth->dst);
+ err = 0;
+ goto out;
}
do_cache = true;
}
@@ -1611,6 +1679,7 @@ local_input:
rth->rt_iif = 0;
rth->rt_pmtu = 0;
rth->rt_gateway = 0;
+ INIT_LIST_HEAD(&rth->rt_uncached);
if (res.type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
rth->dst.error= -err;
@@ -1618,7 +1687,6 @@ local_input:
}
if (do_cache)
rt_cache_route(&FIB_RES_NH(res), rth);
-set_and_out:
skb_dst_set(skb, &rth->dst);
err = 0;
goto out;
@@ -1656,8 +1724,8 @@ martian_source_keep_err:
goto out;
}
-int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev)
+int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev)
{
int res;
@@ -1700,7 +1768,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rcu_read_unlock();
return res;
}
-EXPORT_SYMBOL(ip_route_input);
+EXPORT_SYMBOL(ip_route_input_noref);
/* called with rcu_read_lock() */
static struct rtable *__mkroute_output(const struct fib_result *res,
@@ -1750,19 +1818,23 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
fnhe = NULL;
if (fi) {
+ struct rtable __rcu **prth;
+
fnhe = find_exception(&FIB_RES_NH(*res), fl4->daddr);
- if (!fnhe) {
- rth = FIB_RES_NH(*res).nh_rth_output;
- if (rt_cache_valid(rth)) {
- dst_hold(&rth->dst);
- return rth;
- }
+ if (fnhe)
+ prth = &fnhe->fnhe_rth;
+ else
+ prth = __this_cpu_ptr(FIB_RES_NH(*res).nh_pcpu_rth_output);
+ rth = rcu_dereference(*prth);
+ if (rt_cache_valid(rth)) {
+ dst_hold(&rth->dst);
+ return rth;
}
}
rth = rt_dst_alloc(dev_out,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
IN_DEV_CONF_GET(in_dev, NOXFRM),
- fi && !fnhe);
+ fi);
if (!rth)
return ERR_PTR(-ENOBUFS);
@@ -1775,6 +1847,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
rth->rt_iif = orig_oif ? : 0;
rth->rt_pmtu = 0;
rth->rt_gateway = 0;
+ INIT_LIST_HEAD(&rth->rt_uncached);
RT_CACHE_STAT_INC(out_slow_tot);
@@ -1957,7 +2030,6 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
}
dev_out = net->loopback_dev;
fl4->flowi4_oif = dev_out->ifindex;
- res.fi = NULL;
flags |= RTCF_LOCAL;
goto make_route;
}
@@ -2054,6 +2126,8 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_type = ort->rt_type;
rt->rt_gateway = ort->rt_gateway;
+ INIT_LIST_HEAD(&rt->rt_uncached);
+
dst_free(new);
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 5840c3255721..1b5ce96707a3 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -184,7 +184,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
int ret;
unsigned long vec[3];
struct net *net = current->nsproxy->net_ns;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
struct mem_cgroup *memcg;
#endif
@@ -203,7 +203,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
if (ret)
return ret;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
rcu_read_lock();
memcg = mem_cgroup_from_task(current);
@@ -784,13 +784,6 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec
},
{
- .procname = "rt_cache_rebuild_count",
- .data = &init_net.ipv4.sysctl_rt_cache_rebuild_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
.procname = "ping_group_range",
.data = &init_net.ipv4.sysctl_ping_group_range,
.maxlen = sizeof(init_net.ipv4.sysctl_ping_group_range),
@@ -829,8 +822,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
table[5].data =
&net->ipv4.sysctl_icmp_ratemask;
table[6].data =
- &net->ipv4.sysctl_rt_cache_rebuild_count;
- table[7].data =
&net->ipv4.sysctl_ping_group_range;
}
@@ -842,8 +833,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
net->ipv4.sysctl_ping_group_range[0] = 1;
net->ipv4.sysctl_ping_group_range[1] = 0;
- net->ipv4.sysctl_rt_cache_rebuild_count = 4;
-
tcp_init_mem(net);
net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 581ecf02c6b5..2109ff4a1daf 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -811,7 +811,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
old_size_goal + mss_now > xmit_size_goal)) {
xmit_size_goal = old_size_goal;
} else {
- tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
+ tp->xmit_size_goal_segs =
+ min_t(u16, xmit_size_goal / mss_now,
+ sk->sk_gso_max_segs);
xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
}
}
@@ -2681,7 +2683,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
/* Cap the max timeout in ms TCP will retry/retrans
* before giving up and aborting (ETIMEDOUT) a connection.
*/
- icsk->icsk_user_timeout = msecs_to_jiffies(val);
+ if (val < 0)
+ err = -EINVAL;
+ else
+ icsk->icsk_user_timeout = msecs_to_jiffies(val);
break;
default:
err = -ENOPROTOOPT;
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 4d4db16e336e..1432cdb0644c 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -291,7 +291,8 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
left = tp->snd_cwnd - in_flight;
if (sk_can_gso(sk) &&
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
- left * tp->mss_cache < sk->sk_gso_max_size)
+ left * tp->mss_cache < sk->sk_gso_max_size &&
+ left < sk->sk_gso_max_segs)
return true;
return left <= tcp_max_tso_deferred_mss(tp);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3e07a64ca44e..6e38c6c23caa 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
* tcp_xmit_retransmit_queue().
*/
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
- int newly_acked_sacked, bool is_dupack,
+ int prior_sacked, bool is_dupack,
int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
(tcp_fackets_out(tp) > tp->reordering));
+ int newly_acked_sacked = 0;
int fast_rexmit = 0;
if (WARN_ON(!tp->packets_out && tp->sacked_out))
@@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
tcp_add_reno_sack(sk);
} else
do_lost = tcp_try_undo_partial(sk, pkts_acked);
+ newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
break;
case TCP_CA_Loss:
if (flag & FLAG_DATA_ACKED)
@@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
if (is_dupack)
tcp_add_reno_sack(sk);
}
+ newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
tcp_try_undo_dsack(sk);
@@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
int prior_packets;
int prior_sacked = tp->sacked_out;
int pkts_acked = 0;
- int newly_acked_sacked = 0;
bool frto_cwnd = false;
/* If the ack is older than previous acks
@@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
pkts_acked = prior_packets - tp->packets_out;
- newly_acked_sacked = (prior_packets - prior_sacked) -
- (tp->packets_out - tp->sacked_out);
if (tp->frto_counter)
frto_cwnd = tcp_process_frto(sk, flag);
@@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
- tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
@@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
no_queue:
/* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK)
- tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag);
/* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
@@ -3718,8 +3718,7 @@ old_ack:
*/
if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
- newly_acked_sacked = tp->sacked_out - prior_sacked;
- tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag);
}
@@ -4351,19 +4350,20 @@ static void tcp_ofo_queue(struct sock *sk)
static bool tcp_prune_ofo_queue(struct sock *sk);
static int tcp_prune_queue(struct sock *sk);
-static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
+static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
+ unsigned int size)
{
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
- !sk_rmem_schedule(sk, size)) {
+ !sk_rmem_schedule(sk, skb, size)) {
if (tcp_prune_queue(sk) < 0)
return -1;
- if (!sk_rmem_schedule(sk, size)) {
+ if (!sk_rmem_schedule(sk, skb, size)) {
if (!tcp_prune_ofo_queue(sk))
return -1;
- if (!sk_rmem_schedule(sk, size))
+ if (!sk_rmem_schedule(sk, skb, size))
return -1;
}
}
@@ -4418,7 +4418,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
TCP_ECN_check_ce(tp, skb);
- if (unlikely(tcp_try_rmem_schedule(sk, skb->truesize))) {
+ if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
__kfree_skb(skb);
return;
@@ -4552,17 +4552,17 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct tcphdr *th;
bool fragstolen;
- if (tcp_try_rmem_schedule(sk, size + sizeof(*th)))
- goto err;
-
skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
if (!skb)
goto err;
+ if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th)))
+ goto err_free;
+
th = (struct tcphdr *)skb_put(skb, sizeof(*th));
skb_reset_transport_header(skb);
memset(th, 0, sizeof(*th));
@@ -4633,7 +4633,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (eaten <= 0) {
queue_and_out:
if (eaten < 0 &&
- tcp_try_rmem_schedule(sk, skb->truesize))
+ tcp_try_rmem_schedule(sk, skb, skb->truesize))
goto drop;
eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
@@ -5391,6 +5391,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
{
struct tcp_sock *tp = tcp_sk(sk);
+ if (unlikely(sk->sk_rx_dst == NULL))
+ inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
/*
* Header prediction.
* The code loosely follows the one in the famous
@@ -5475,7 +5477,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
if (tp->copied_seq == tp->rcv_nxt &&
len - tcp_header_len <= tp->ucopy.len) {
#ifdef CONFIG_NET_DMA
- if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
+ if (tp->ucopy.task == current &&
+ sock_owned_by_user(sk) &&
+ tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
copied_early = 1;
eaten = 1;
}
@@ -5602,7 +5606,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
tcp_set_state(sk, TCP_ESTABLISHED);
if (skb != NULL) {
- sk->sk_rx_dst = dst_clone(skb_dst(skb));
+ icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
security_inet_conn_established(sk, skb);
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3e30548ac32a..00a748d14062 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -417,10 +417,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
tp->mtu_info = info;
- if (!sock_owned_by_user(sk))
+ if (!sock_owned_by_user(sk)) {
tcp_v4_mtu_reduced(sk);
- else
- set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
+ } else {
+ if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
+ sock_hold(sk);
+ }
goto out;
}
@@ -1462,6 +1464,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
goto exit_nonewsk;
newsk->sk_gso_type = SKB_GSO_TCPV4;
+ inet_sk_rx_dst_set(newsk, skb);
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);
@@ -1617,21 +1620,16 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
#endif
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+ struct dst_entry *dst = sk->sk_rx_dst;
+
sock_rps_save_rxhash(sk, skb);
- if (sk->sk_rx_dst) {
- struct dst_entry *dst = sk->sk_rx_dst;
- if (dst->ops->check(dst, 0) == NULL) {
+ if (dst) {
+ if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
+ dst->ops->check(dst, 0) == NULL) {
dst_release(dst);
sk->sk_rx_dst = NULL;
}
}
- if (unlikely(sk->sk_rx_dst == NULL)) {
- struct inet_sock *icsk = inet_sk(sk);
- struct rtable *rt = skb_rtable(skb);
-
- sk->sk_rx_dst = dst_clone(&rt->dst);
- icsk->rx_dst_ifindex = inet_iif(skb);
- }
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
goto reset;
@@ -1686,7 +1684,6 @@ void tcp_v4_early_demux(struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
const struct iphdr *iph;
const struct tcphdr *th;
- struct net_device *dev;
struct sock *sk;
if (skb->pkt_type != PACKET_HOST)
@@ -1701,24 +1698,20 @@ void tcp_v4_early_demux(struct sk_buff *skb)
if (th->doff < sizeof(struct tcphdr) / 4)
return;
- if (!pskb_may_pull(skb, ip_hdrlen(skb) + th->doff * 4))
- return;
-
- dev = skb->dev;
sk = __inet_lookup_established(net, &tcp_hashinfo,
iph->saddr, th->source,
iph->daddr, ntohs(th->dest),
- dev->ifindex);
+ skb->skb_iif);
if (sk) {
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk->sk_state != TCP_TIME_WAIT) {
struct dst_entry *dst = sk->sk_rx_dst;
- struct inet_sock *icsk = inet_sk(sk);
+
if (dst)
dst = dst_check(dst, 0);
if (dst &&
- icsk->rx_dst_ifindex == dev->ifindex)
+ inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
skb_dst_set_noref(skb, dst);
}
}
@@ -1879,10 +1872,21 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_destructor= tcp_twsk_destructor,
};
+void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ dst_hold(dst);
+ sk->sk_rx_dst = dst;
+ inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+}
+EXPORT_SYMBOL(inet_sk_rx_dst_set);
+
const struct inet_connection_sock_af_ops ipv4_specific = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
+ .sk_rx_dst_set = inet_sk_rx_dst_set,
.conn_request = tcp_v4_conn_request,
.syn_recv_sock = tcp_v4_syn_recv_sock,
.net_header_len = sizeof(struct iphdr),
@@ -2640,7 +2644,7 @@ struct proto tcp_prot = {
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
.init_cgroup = tcp_init_cgroup,
.destroy_cgroup = tcp_destroy_cgroup,
.proto_cgroup = tcp_proto_cgroup,
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 2288a6399e1e..0abe67bb4d3a 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -731,6 +731,18 @@ static int __net_init tcp_net_metrics_init(struct net *net)
static void __net_exit tcp_net_metrics_exit(struct net *net)
{
+ unsigned int i;
+
+ for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
+ struct tcp_metrics_block *tm, *next;
+
+ tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
+ while (tm) {
+ next = rcu_dereference_protected(tm->tcpm_next, 1);
+ kfree(tm);
+ tm = next;
+ }
+ }
kfree(net->ipv4.tcp_metrics_hash);
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 5912ac3fd240..6ff7f10dce9d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -387,8 +387,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
struct tcp_sock *oldtp = tcp_sk(sk);
struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
- newsk->sk_rx_dst = dst_clone(skb_dst(skb));
-
/* TCP Cookie Transactions require space for the cookie pair,
* as it differs for each connection. There is no need to
* copy any s_data_payload stored at the original socket.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 33cd065cfbd8..d04632673a9e 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -910,14 +910,18 @@ void tcp_release_cb(struct sock *sk)
if (flags & (1UL << TCP_TSQ_DEFERRED))
tcp_tsq_handler(sk);
- if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED))
+ if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
tcp_write_timer_handler(sk);
-
- if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED))
+ __sock_put(sk);
+ }
+ if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) {
tcp_delack_timer_handler(sk);
-
- if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED))
+ __sock_put(sk);
+ }
+ if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
sk->sk_prot->mtu_reduced(sk);
+ __sock_put(sk);
+ }
}
EXPORT_SYMBOL(tcp_release_cb);
@@ -940,7 +944,7 @@ void __init tcp_tasklet_init(void)
* We cant xmit new skbs from this context, as we might already
* hold qdisc lock.
*/
-void tcp_wfree(struct sk_buff *skb)
+static void tcp_wfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct tcp_sock *tp = tcp_sk(sk);
@@ -1522,21 +1526,21 @@ static void tcp_cwnd_validate(struct sock *sk)
* when we would be allowed to send the split-due-to-Nagle skb fully.
*/
static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
- unsigned int mss_now, unsigned int cwnd)
+ unsigned int mss_now, unsigned int max_segs)
{
const struct tcp_sock *tp = tcp_sk(sk);
- u32 needed, window, cwnd_len;
+ u32 needed, window, max_len;
window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
- cwnd_len = mss_now * cwnd;
+ max_len = mss_now * max_segs;
- if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
- return cwnd_len;
+ if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
+ return max_len;
needed = min(skb->len, window);
- if (cwnd_len <= needed)
- return cwnd_len;
+ if (max_len <= needed)
+ return max_len;
return needed - needed % mss_now;
}
@@ -1765,7 +1769,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
limit = min(send_win, cong_win);
/* If a full-sized TSO skb can be sent, do it. */
- if (limit >= sk->sk_gso_max_size)
+ if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
+ sk->sk_gso_max_segs * tp->mss_cache))
goto send_now;
/* Middle in queue won't get any more data, full sendable already? */
@@ -1999,7 +2004,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
- cwnd_quota);
+ min_t(unsigned int,
+ cwnd_quota,
+ sk->sk_gso_max_segs));
if (skb->len > limit &&
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
@@ -2045,7 +2052,8 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
if (unlikely(sk->sk_state == TCP_CLOSE))
return;
- if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC))
+ if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
+ sk_gfp_atomic(sk, GFP_ATOMIC)))
tcp_check_probe_timer(sk);
}
@@ -2666,7 +2674,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
s_data_desired = cvp->s_data_desired;
- skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired, GFP_ATOMIC);
+ skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired,
+ sk_gfp_atomic(sk, GFP_ATOMIC));
if (unlikely(!skb)) {
dst_release(dst);
return NULL;
@@ -3064,7 +3073,7 @@ void tcp_send_ack(struct sock *sk)
* tcp_transmit_skb() will set the ownership to this
* sock.
*/
- buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
+ buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
if (buff == NULL) {
inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
@@ -3079,7 +3088,7 @@ void tcp_send_ack(struct sock *sk)
/* Send it off, this clears delayed acks for us. */
TCP_SKB_CB(buff)->when = tcp_time_stamp;
- tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
+ tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
}
/* This routine sends a packet with an out of date sequence
@@ -3099,7 +3108,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
struct sk_buff *skb;
/* We don't queue it, tcp_transmit_skb() sets ownership. */
- skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
+ skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
if (skb == NULL)
return -1;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 6df36ad55a38..b774a03bd1dc 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -252,7 +252,8 @@ static void tcp_delack_timer(unsigned long data)
inet_csk(sk)->icsk_ack.blocked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
/* deleguate our work to tcp_release_cb() */
- set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
+ if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
+ sock_hold(sk);
}
bh_unlock_sock(sk);
sock_put(sk);
@@ -481,7 +482,8 @@ static void tcp_write_timer(unsigned long data)
tcp_write_timer_handler(sk);
} else {
/* deleguate our work to tcp_release_cb() */
- set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
+ if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
+ sock_hold(sk);
}
bh_unlock_sock(sk);
sock_put(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index b4c3582a991f..6f6d1aca3c3d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -758,7 +758,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
uh->check = CSUM_MANGLED_0;
send:
- err = ip_send_skb(skb);
+ err = ip_send_skb(sock_net(sk), skb);
if (err) {
if (err == -ENOBUFS && !inet->recverr) {
UDP_INC_STATS_USER(sock_net(sk),
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 58d23a572509..06814b6216dc 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -27,8 +27,8 @@ static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
if (skb_dst(skb) == NULL) {
const struct iphdr *iph = ip_hdr(skb);
- if (ip_route_input(skb, iph->daddr, iph->saddr,
- iph->tos, skb->dev))
+ if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
+ iph->tos, skb->dev))
goto drop;
}
return dst_input(skb);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index c6281847f16a..681ea2f413e2 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -92,6 +92,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
xdst->u.rt.rt_type = rt->rt_type;
xdst->u.rt.rt_gateway = rt->rt_gateway;
xdst->u.rt.rt_pmtu = rt->rt_pmtu;
+ INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
return 0;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 79181819a24f..6bc85f7c31e3 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -494,8 +494,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
struct net_device *dev;
struct inet6_dev *idev;
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.forwarding) ^ (!newf);
@@ -504,7 +503,6 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
dev_forward_change(idev);
}
}
- rcu_read_unlock();
}
static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 6dc7fd353ef5..282f3723ee19 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
struct esp_data *esp = x->data;
/* skb is pure payload to encrypt */
- err = -ENOMEM;
-
aead = esp->aead;
alen = crypto_aead_authsize(aead);
@@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
}
tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
- if (!tmp)
+ if (!tmp) {
+ err = -ENOMEM;
goto error;
+ }
seqhi = esp_tmp_seqhi(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 5ab923e51af3..a52d864d562b 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -47,9 +47,16 @@
-inline int ip6_rcv_finish( struct sk_buff *skb)
+int ip6_rcv_finish(struct sk_buff *skb)
{
- if (skb_dst(skb) == NULL)
+ if (sysctl_ip_early_demux && !skb_dst(skb)) {
+ const struct inet6_protocol *ipprot;
+
+ ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
+ if (ipprot && ipprot->early_demux)
+ ipprot->early_demux(skb);
+ }
+ if (!skb_dst(skb))
ip6_route_input(skb);
return dst_input(skb);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index da2e92d05c15..745a32042950 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -307,10 +307,10 @@ static int __net_init ipv6_proc_init_net(struct net *net)
goto proc_dev_snmp6_fail;
return 0;
+proc_dev_snmp6_fail:
+ proc_net_remove(net, "snmp6");
proc_snmp6_fail:
proc_net_remove(net, "sockstat6");
-proc_dev_snmp6_fail:
- proc_net_remove(net, "dev_snmp6");
return -ENOMEM;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index cf02cb97bbdd..8e80fd279100 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2480,12 +2480,8 @@ static int rt6_fill_node(struct net *net,
goto nla_put_failure;
if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
goto nla_put_failure;
- if (!(rt->rt6i_flags & RTF_EXPIRES))
- expires = 0;
- else if (rt->dst.expires - jiffies < INT_MAX)
- expires = rt->dst.expires - jiffies;
- else
- expires = INT_MAX;
+
+ expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
goto nla_put_failure;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index f49476e2d884..a3e60cc04a8a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -94,6 +94,18 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
}
#endif
+static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ const struct rt6_info *rt = (const struct rt6_info *)dst;
+
+ dst_hold(dst);
+ sk->sk_rx_dst = dst;
+ inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+ if (rt->rt6i_node)
+ inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
+}
+
static void tcp_v6_hash(struct sock *sk)
{
if (sk->sk_state != TCP_CLOSE) {
@@ -1270,6 +1282,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newsk->sk_gso_type = SKB_GSO_TCPV6;
__ip6_dst_store(newsk, dst, NULL, NULL);
+ inet6_sk_rx_dst_set(newsk, skb);
newtcp6sk = (struct tcp6_sock *)newsk;
inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
@@ -1299,7 +1312,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
if (treq->pktopts != NULL) {
- newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
+ newnp->pktoptions = skb_clone(treq->pktopts,
+ sk_gfp_atomic(sk, GFP_ATOMIC));
consume_skb(treq->pktopts);
treq->pktopts = NULL;
if (newnp->pktoptions)
@@ -1349,7 +1363,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
* across. Shucks.
*/
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
- AF_INET6, key->key, key->keylen, GFP_ATOMIC);
+ AF_INET6, key->key, key->keylen,
+ sk_gfp_atomic(sk, GFP_ATOMIC));
}
#endif
@@ -1442,10 +1457,20 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
--ANK (980728)
*/
if (np->rxopt.all)
- opt_skb = skb_clone(skb, GFP_ATOMIC);
+ opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+ struct dst_entry *dst = sk->sk_rx_dst;
+
sock_rps_save_rxhash(sk, skb);
+ if (dst) {
+ if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
+ dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
+ dst_release(dst);
+ sk->sk_rx_dst = NULL;
+ }
+ }
+
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
if (opt_skb)
@@ -1674,6 +1699,43 @@ do_time_wait:
goto discard_it;
}
+static void tcp_v6_early_demux(struct sk_buff *skb)
+{
+ const struct ipv6hdr *hdr;
+ const struct tcphdr *th;
+ struct sock *sk;
+
+ if (skb->pkt_type != PACKET_HOST)
+ return;
+
+ if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
+ return;
+
+ hdr = ipv6_hdr(skb);
+ th = tcp_hdr(skb);
+
+ if (th->doff < sizeof(struct tcphdr) / 4)
+ return;
+
+ sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
+ &hdr->saddr, th->source,
+ &hdr->daddr, ntohs(th->dest),
+ inet6_iif(skb));
+ if (sk) {
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+ if (sk->sk_state != TCP_TIME_WAIT) {
+ struct dst_entry *dst = sk->sk_rx_dst;
+ struct inet_sock *icsk = inet_sk(sk);
+ if (dst)
+ dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
+ if (dst &&
+ icsk->rx_dst_ifindex == skb->skb_iif)
+ skb_dst_set_noref(skb, dst);
+ }
+ }
+}
+
static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp6_timewait_sock),
.twsk_unique = tcp_twsk_unique,
@@ -1684,6 +1746,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
.queue_xmit = inet6_csk_xmit,
.send_check = tcp_v6_send_check,
.rebuild_header = inet6_sk_rebuild_header,
+ .sk_rx_dst_set = inet6_sk_rx_dst_set,
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
.net_header_len = sizeof(struct ipv6hdr),
@@ -1715,6 +1778,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
+ .sk_rx_dst_set = inet_sk_rx_dst_set,
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
.net_header_len = sizeof(struct iphdr),
@@ -1978,12 +2042,13 @@ struct proto tcpv6_prot = {
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
.proto_cgroup = tcp_proto_cgroup,
#endif
};
static const struct inet6_protocol tcpv6_protocol = {
+ .early_demux = tcp_v6_early_demux,
.handler = tcp_v6_rcv,
.err_handler = tcp_v6_err,
.gso_send_check = tcp_v6_gso_send_check,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ef39812107b1..f8c4c08ffb60 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -73,6 +73,13 @@ static int xfrm6_get_tos(const struct flowi *fl)
return 0;
}
+static void xfrm6_init_dst(struct net *net, struct xfrm_dst *xdst)
+{
+ struct rt6_info *rt = (struct rt6_info *)xdst;
+
+ rt6_init_peer(rt, net->ipv6.peers);
+}
+
static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
int nfheader_len)
{
@@ -286,6 +293,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
.get_saddr = xfrm6_get_saddr,
.decode_session = _decode_session6,
.get_tos = xfrm6_get_tos,
+ .init_dst = xfrm6_init_dst,
.init_path = xfrm6_init_path,
.fill_dst = xfrm6_fill_dst,
.blackhole_route = ip6_blackhole_route,
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 393355d37b47..513cab08a986 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
/* Remove from tunnel list */
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_del_rcu(&tunnel->list);
+ kfree_rcu(tunnel, rcu);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
- synchronize_rcu();
atomic_dec(&l2tp_tunnel_count);
- kfree(tunnel);
}
/* Create a socket for the tunnel, if one isn't set up by
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a38ec6cdeee1..56d583e083a7 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */
+ struct rcu_head rcu;
rwlock_t hlist_lock; /* protect session_hlist */
struct hlist_head session_hlist[L2TP_HASH_SIZE];
/* hashed list of sessions,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 35e1e4bde587..927547171bc7 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -410,6 +410,7 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
lsa->l2tp_family = AF_INET6;
lsa->l2tp_flowinfo = 0;
lsa->l2tp_scope_id = 0;
+ lsa->l2tp_unused = 0;
if (peer) {
if (!lsk->peer_conn_id)
return -ENOTCONN;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index f6fe4d400502..c2190005a114 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -969,14 +969,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
struct sockaddr_llc sllc;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
- int rc = 0;
+ int rc = -EBADF;
memset(&sllc, 0, sizeof(sllc));
lock_sock(sk);
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
*uaddrlen = sizeof(sllc);
- memset(uaddr, 0, *uaddrlen);
if (peer) {
rc = -ENOTCONN;
if (sk->sk_state != TCP_ESTABLISHED)
@@ -1206,7 +1205,7 @@ static int __init llc2_init(void)
rc = llc_proc_init();
if (rc != 0) {
printk(llc_proc_err_msg);
- goto out_unregister_llc_proto;
+ goto out_station;
}
rc = llc_sysctl_init();
if (rc) {
@@ -1226,7 +1225,8 @@ out_sysctl:
llc_sysctl_exit();
out_proc:
llc_proc_exit();
-out_unregister_llc_proto:
+out_station:
+ llc_station_exit();
proto_unregister(&llc_proto);
goto out;
}
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index e32cab44ea95..dd3e83328ad5 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -42,6 +42,7 @@ static void (*llc_type_handlers[2])(struct llc_sap *sap,
void llc_add_pack(int type, void (*handler)(struct llc_sap *sap,
struct sk_buff *skb))
{
+ smp_wmb(); /* ensure initialisation is complete before it's called */
if (type == LLC_DEST_SAP || type == LLC_DEST_CONN)
llc_type_handlers[type - 1] = handler;
}
@@ -50,11 +51,19 @@ void llc_remove_pack(int type)
{
if (type == LLC_DEST_SAP || type == LLC_DEST_CONN)
llc_type_handlers[type - 1] = NULL;
+ synchronize_net();
}
void llc_set_station_handler(void (*handler)(struct sk_buff *skb))
{
+ /* Ensure initialisation is complete before it's called */
+ if (handler)
+ smp_wmb();
+
llc_station_handler = handler;
+
+ if (!handler)
+ synchronize_net();
}
/**
@@ -150,6 +159,8 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
int dest;
int (*rcv)(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
+ void (*sta_handler)(struct sk_buff *skb);
+ void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb);
if (!net_eq(dev_net(dev), &init_net))
goto drop;
@@ -182,7 +193,8 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
*/
rcv = rcu_dereference(sap->rcv_func);
dest = llc_pdu_type(skb);
- if (unlikely(!dest || !llc_type_handlers[dest - 1])) {
+ sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL;
+ if (unlikely(!sap_handler)) {
if (rcv)
rcv(skb, dev, pt, orig_dev);
else
@@ -193,7 +205,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev,
if (cskb)
rcv(cskb, dev, pt, orig_dev);
}
- llc_type_handlers[dest - 1](sap, skb);
+ sap_handler(sap, skb);
}
llc_sap_put(sap);
out:
@@ -202,9 +214,10 @@ drop:
kfree_skb(skb);
goto out;
handle_station:
- if (!llc_station_handler)
+ sta_handler = ACCESS_ONCE(llc_station_handler);
+ if (!sta_handler)
goto drop;
- llc_station_handler(skb);
+ sta_handler(skb);
goto out;
}
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index 39a8d8924b9c..b2f2bac2c2a2 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -268,7 +268,7 @@ static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb)
out:
return rc;
free:
- kfree_skb(skb);
+ kfree_skb(nskb);
goto out;
}
@@ -293,7 +293,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb)
out:
return rc;
free:
- kfree_skb(skb);
+ kfree_skb(nskb);
goto out;
}
@@ -322,7 +322,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
out:
return rc;
free:
- kfree_skb(skb);
+ kfree_skb(nskb);
goto out;
}
@@ -687,12 +687,8 @@ static void llc_station_rcv(struct sk_buff *skb)
llc_station_state_process(skb);
}
-int __init llc_station_init(void)
+void __init llc_station_init(void)
{
- int rc = -ENOBUFS;
- struct sk_buff *skb;
- struct llc_station_state_ev *ev;
-
skb_queue_head_init(&llc_main_station.mac_pdu_q);
skb_queue_head_init(&llc_main_station.ev_q.list);
spin_lock_init(&llc_main_station.ev_q.lock);
@@ -700,23 +696,12 @@ int __init llc_station_init(void)
(unsigned long)&llc_main_station);
llc_main_station.ack_timer.expires = jiffies +
sysctl_llc_station_ack_timeout;
- skb = alloc_skb(0, GFP_ATOMIC);
- if (!skb)
- goto out;
- rc = 0;
- llc_set_station_handler(llc_station_rcv);
- ev = llc_station_ev(skb);
- memset(ev, 0, sizeof(*ev));
llc_main_station.maximum_retry = 1;
- llc_main_station.state = LLC_STATION_STATE_DOWN;
- ev->type = LLC_STATION_EV_TYPE_SIMPLE;
- ev->prim_type = LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK;
- rc = llc_station_next_state(skb);
-out:
- return rc;
+ llc_main_station.state = LLC_STATION_STATE_UP;
+ llc_set_station_handler(llc_station_rcv);
}
-void __exit llc_station_exit(void)
+void llc_station_exit(void)
{
llc_set_station_handler(NULL);
}
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index 1bf7903496f8..bcffa6903129 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -276,7 +276,7 @@ static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local)
read_lock(&tpt_trig->trig.leddev_list_lock);
list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list)
- led_brightness_set(led_cdev, LED_OFF);
+ led_set_brightness(led_cdev, LED_OFF);
read_unlock(&tpt_trig->trig.leddev_list_lock);
}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6fac18c0423f..85572353a7e3 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -622,6 +622,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
del_timer_sync(&sdata->u.mesh.housekeeping_timer);
del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
+ del_timer_sync(&sdata->u.mesh.mesh_path_timer);
/*
* If the timer fired while we waited for it, it will have
* requeued the work. Now the work will be running again
@@ -634,6 +635,8 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
local->fif_other_bss--;
atomic_dec(&local->iff_allmultis);
ieee80211_configure_filter(local);
+
+ sdata->u.mesh.timers_running = 0;
}
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index cef0c9e79aba..a4a5acdbaa4d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1430,6 +1430,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
del_timer_sync(&sdata->u.mgd.timer);
del_timer_sync(&sdata->u.mgd.chswitch_timer);
+
+ sdata->u.mgd.timers_running = 0;
}
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index bcaee5d12839..839dd9737989 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -299,7 +299,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
if (local->scan_req != local->int_scan_req)
cfg80211_scan_done(local->scan_req, aborted);
local->scan_req = NULL;
- local->scan_sdata = NULL;
+ rcu_assign_pointer(local->scan_sdata, NULL);
local->scanning = 0;
local->scan_channel = NULL;
@@ -984,7 +984,6 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
kfree(local->sched_scan_ies.ie[i]);
drv_sched_scan_stop(local, sdata);
- rcu_assign_pointer(local->sched_scan_sdata, NULL);
}
out:
mutex_unlock(&local->mtx);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index acf712ffb5e6..c5e8c9c31f76 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
sdata, NULL, NULL);
} else {
- int is_mesh_mcast = 1;
- const u8 *mesh_da;
+ /* DS -> MBSS (802.11-2012 13.11.3.3).
+ * For unicast with unknown forwarding information,
+ * destination might be in the MBSS or if that fails
+ * forwarded to another mesh gate. In either case
+ * resolution will be handled in ieee80211_xmit(), so
+ * leave the original DA. This also works for mcast */
+ const u8 *mesh_da = skb->data;
+
+ if (mppath)
+ mesh_da = mppath->mpp;
+ else if (mpath)
+ mesh_da = mpath->dst;
+ rcu_read_unlock();
- if (is_multicast_ether_addr(skb->data))
- /* DA TA mSA AE:SA */
- mesh_da = skb->data;
- else {
- static const u8 bcast[ETH_ALEN] =
- { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
- if (mppath) {
- /* RA TA mDA mSA AE:DA SA */
- mesh_da = mppath->mpp;
- is_mesh_mcast = 0;
- } else if (mpath) {
- mesh_da = mpath->dst;
- is_mesh_mcast = 0;
- } else {
- /* DA TA mSA AE:SA */
- mesh_da = bcast;
- }
- }
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
mesh_da, sdata->vif.addr);
- rcu_read_unlock();
- if (is_mesh_mcast)
+ if (is_multicast_ether_addr(mesh_da))
+ /* DA TA mSA AE:SA */
meshhdrlen =
ieee80211_new_mesh_header(&mesh_hdr,
sdata,
skb->data + ETH_ALEN,
NULL);
else
+ /* RA TA mDA mSA AE:DA SA */
meshhdrlen =
ieee80211_new_mesh_header(&mesh_hdr,
sdata,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 84444dda194b..f51013c07b9f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
goto out_err;
}
svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
- if (!svc->stats.cpustats)
+ if (!svc->stats.cpustats) {
+ ret = -ENOMEM;
goto out_err;
+ }
/* I'm the first user of the service */
atomic_set(&svc->usecnt, 0);
@@ -2759,6 +2761,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
struct ip_vs_timeout_user t;
+ memset(&t, 0, sizeof(t));
__ip_vs_get_timeouts(net, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index cf4875565d67..2ceec64b19f9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack)
{
struct nf_conn *ct = (void *)ul_conntrack;
struct net *net = nf_ct_net(ct);
+ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+ BUG_ON(ecache == NULL);
if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
/* bad luck, let's retry again */
- ct->timeout.expires = jiffies +
+ ecache->timeout.expires = jiffies +
(random32() % net->ct.sysctl_events_retry_timeout);
- add_timer(&ct->timeout);
+ add_timer(&ecache->timeout);
return;
}
/* we've got the event delivered, now it's dying */
@@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack)
void nf_ct_insert_dying_list(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
+ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+ BUG_ON(ecache == NULL);
/* add this conntrack to the dying list */
spin_lock_bh(&nf_conntrack_lock);
@@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
&net->ct.dying);
spin_unlock_bh(&nf_conntrack_lock);
/* set a new timer to retry event delivery */
- setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
- ct->timeout.expires = jiffies +
+ setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
+ ecache->timeout.expires = jiffies +
(random32() % net->ct.sysctl_events_retry_timeout);
- add_timer(&ct->timeout);
+ add_timer(&ecache->timeout);
}
EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 45cf602a76bc..527651a53a45 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -361,23 +361,6 @@ static void evict_oldest_expect(struct nf_conn *master,
}
}
-static inline int refresh_timer(struct nf_conntrack_expect *i)
-{
- struct nf_conn_help *master_help = nfct_help(i->master);
- const struct nf_conntrack_expect_policy *p;
-
- if (!del_timer(&i->timeout))
- return 0;
-
- p = &rcu_dereference_protected(
- master_help->helper,
- lockdep_is_held(&nf_conntrack_lock)
- )->expect_policy[i->class];
- i->timeout.expires = jiffies + p->timeout * HZ;
- add_timer(&i->timeout);
- return 1;
-}
-
static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
{
const struct nf_conntrack_expect_policy *p;
@@ -386,7 +369,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
struct nf_conn_help *master_help = nfct_help(master);
struct nf_conntrack_helper *helper;
struct net *net = nf_ct_exp_net(expect);
- struct hlist_node *n;
+ struct hlist_node *n, *next;
unsigned int h;
int ret = 1;
@@ -395,12 +378,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
goto out;
}
h = nf_ct_expect_dst_hash(&expect->tuple);
- hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
+ hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) {
if (expect_matches(i, expect)) {
- /* Refresh timer: if it's dying, ignore.. */
- if (refresh_timer(i)) {
- ret = 0;
- goto out;
+ if (del_timer(&i->timeout)) {
+ nf_ct_unlink_expect(i);
+ nf_ct_expect_put(i);
+ break;
}
} else if (expect_clash(i, expect)) {
ret = -EBUSY;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 14f67a2cbcb5..9807f3278fcb 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1896,10 +1896,15 @@ static int
ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
{
struct nlattr *cda[CTA_MAX+1];
+ int ret;
nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
- return ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
+ spin_lock_bh(&nf_conntrack_lock);
+ ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
+ spin_unlock_bh(&nf_conntrack_lock);
+
+ return ret;
}
static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
@@ -2785,7 +2790,8 @@ static int __init ctnetlink_init(void)
goto err_unreg_subsys;
}
- if (register_pernet_subsys(&ctnetlink_net_ops)) {
+ ret = register_pernet_subsys(&ctnetlink_net_ops);
+ if (ret < 0) {
pr_err("ctnetlink_init: cannot register pernet operations\n");
goto err_unreg_exp_subsys;
}
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 758a1bacc126..5c0a112aeee6 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -183,12 +183,12 @@ static int media_len(const struct nf_conn *ct, const char *dptr,
return len + digits_len(ct, dptr, limit, shift);
}
-static int parse_addr(const struct nf_conn *ct, const char *cp,
- const char **endp, union nf_inet_addr *addr,
- const char *limit)
+static int sip_parse_addr(const struct nf_conn *ct, const char *cp,
+ const char **endp, union nf_inet_addr *addr,
+ const char *limit, bool delim)
{
const char *end;
- int ret = 0;
+ int ret;
if (!ct)
return 0;
@@ -197,16 +197,28 @@ static int parse_addr(const struct nf_conn *ct, const char *cp,
switch (nf_ct_l3num(ct)) {
case AF_INET:
ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end);
+ if (ret == 0)
+ return 0;
break;
case AF_INET6:
+ if (cp < limit && *cp == '[')
+ cp++;
+ else if (delim)
+ return 0;
+
ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end);
+ if (ret == 0)
+ return 0;
+
+ if (end < limit && *end == ']')
+ end++;
+ else if (delim)
+ return 0;
break;
default:
BUG();
}
- if (ret == 0 || end == cp)
- return 0;
if (endp)
*endp = end;
return 1;
@@ -219,7 +231,7 @@ static int epaddr_len(const struct nf_conn *ct, const char *dptr,
union nf_inet_addr addr;
const char *aux = dptr;
- if (!parse_addr(ct, dptr, &dptr, &addr, limit)) {
+ if (!sip_parse_addr(ct, dptr, &dptr, &addr, limit, true)) {
pr_debug("ip: %s parse failed.!\n", dptr);
return 0;
}
@@ -296,7 +308,7 @@ int ct_sip_parse_request(const struct nf_conn *ct,
return 0;
dptr += shift;
- if (!parse_addr(ct, dptr, &end, addr, limit))
+ if (!sip_parse_addr(ct, dptr, &end, addr, limit, true))
return -1;
if (end < limit && *end == ':') {
end++;
@@ -550,7 +562,7 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
if (ret == 0)
return ret;
- if (!parse_addr(ct, dptr + *matchoff, &c, addr, limit))
+ if (!sip_parse_addr(ct, dptr + *matchoff, &c, addr, limit, true))
return -1;
if (*c == ':') {
c++;
@@ -599,7 +611,7 @@ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
const char *name,
unsigned int *matchoff, unsigned int *matchlen,
- union nf_inet_addr *addr)
+ union nf_inet_addr *addr, bool delim)
{
const char *limit = dptr + datalen;
const char *start, *end;
@@ -613,7 +625,7 @@ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
return 0;
start += strlen(name);
- if (!parse_addr(ct, start, &end, addr, limit))
+ if (!sip_parse_addr(ct, start, &end, addr, limit, delim))
return 0;
*matchoff = start - dptr;
*matchlen = end - start;
@@ -675,6 +687,47 @@ static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,
return 1;
}
+static int sdp_parse_addr(const struct nf_conn *ct, const char *cp,
+ const char **endp, union nf_inet_addr *addr,
+ const char *limit)
+{
+ const char *end;
+ int ret;
+
+ memset(addr, 0, sizeof(*addr));
+ switch (nf_ct_l3num(ct)) {
+ case AF_INET:
+ ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end);
+ break;
+ case AF_INET6:
+ ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end);
+ break;
+ default:
+ BUG();
+ }
+
+ if (ret == 0)
+ return 0;
+ if (endp)
+ *endp = end;
+ return 1;
+}
+
+/* skip ip address. returns its length. */
+static int sdp_addr_len(const struct nf_conn *ct, const char *dptr,
+ const char *limit, int *shift)
+{
+ union nf_inet_addr addr;
+ const char *aux = dptr;
+
+ if (!sdp_parse_addr(ct, dptr, &dptr, &addr, limit)) {
+ pr_debug("ip: %s parse failed.!\n", dptr);
+ return 0;
+ }
+
+ return dptr - aux;
+}
+
/* SDP header parsing: a SDP session description contains an ordered set of
* headers, starting with a section containing general session parameters,
* optionally followed by multiple media descriptions.
@@ -686,10 +739,10 @@ static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,
*/
static const struct sip_header ct_sdp_hdrs[] = {
[SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len),
- [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", epaddr_len),
- [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", epaddr_len),
- [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", epaddr_len),
- [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", epaddr_len),
+ [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", sdp_addr_len),
+ [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", sdp_addr_len),
+ [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", sdp_addr_len),
+ [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", sdp_addr_len),
[SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len),
};
@@ -775,8 +828,8 @@ static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr,
if (ret <= 0)
return ret;
- if (!parse_addr(ct, dptr + *matchoff, NULL, addr,
- dptr + *matchoff + *matchlen))
+ if (!sdp_parse_addr(ct, dptr + *matchoff, NULL, addr,
+ dptr + *matchoff + *matchlen))
return -1;
return 1;
}
@@ -1515,7 +1568,6 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
}
static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly;
-static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly;
static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = {
[SIP_EXPECT_SIGNALLING] = {
@@ -1585,9 +1637,9 @@ static int __init nf_conntrack_sip_init(void)
sip[i][j].me = THIS_MODULE;
if (ports[i] == SIP_PORT)
- sprintf(sip_names[i][j], "sip");
+ sprintf(sip[i][j].name, "sip");
else
- sprintf(sip_names[i][j], "sip-%u", i);
+ sprintf(sip[i][j].name, "sip-%u", i);
pr_debug("port #%u: %u\n", i, ports[i]);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 169ab59ed9d4..14e2f3903142 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -480,7 +480,7 @@ __build_packet_message(struct nfulnl_instance *inst,
}
if (indev && skb_mac_header_was_set(skb)) {
- if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
+ if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
nla_put_be16(inst->skb, NFULA_HWLEN,
htons(skb->dev->hard_header_len)) ||
nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
@@ -996,8 +996,10 @@ static int __init nfnetlink_log_init(void)
#ifdef CONFIG_PROC_FS
if (!proc_create("nfnetlink_log", 0440,
- proc_net_netfilter, &nful_file_ops))
+ proc_net_netfilter, &nful_file_ops)) {
+ status = -ENOMEM;
goto cleanup_logger;
+ }
#endif
return status;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 5463969da45b..527023823b5c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1362,7 +1362,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (NULL == siocb->scm)
siocb->scm = &scm;
- err = scm_send(sock, msg, siocb->scm);
+ err = scm_send(sock, msg, siocb->scm, true);
if (err < 0)
return err;
@@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
dst_pid = addr->nl_pid;
dst_group = ffs(addr->nl_groups);
err = -EPERM;
- if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
+ if ((dst_group || dst_pid) &&
+ !netlink_capable(sock, NL_NONROOT_SEND))
goto out;
} else {
dst_pid = nlk->dst_pid;
@@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void)
rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
nl_table[NETLINK_USERSOCK].registered = 1;
+ nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
netlink_table_ungrab();
}
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 320fa0e6951a..f3f96badf5aa 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -325,9 +325,6 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
}
}
- if (!acts_list)
- return 0;
-
return do_execute_actions(dp, skb, nla_data(acts_list),
nla_len(acts_list), true);
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ceaca7c134a0..c5c9e2a54218 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1079,7 +1079,7 @@ static void *packet_current_rx_frame(struct packet_sock *po,
default:
WARN(1, "TPACKET version not supported\n");
BUG();
- return 0;
+ return NULL;
}
}
@@ -1273,6 +1273,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
spin_unlock(&f->lock);
}
+static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
+{
+ if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
+ return true;
+
+ return false;
+}
+
static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
{
struct packet_sock *po = pkt_sk(sk);
@@ -1325,6 +1333,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
match->prot_hook.dev = po->prot_hook.dev;
match->prot_hook.func = packet_rcv_fanout;
match->prot_hook.af_packet_priv = match;
+ match->prot_hook.id_match = match_fanout_group;
dev_add_pack(&match->prot_hook);
list_add(&match->list, &fanout_list);
}
@@ -1936,7 +1945,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
if (likely(po->tx_ring.pg_vec)) {
ph = skb_shinfo(skb)->destructor_arg;
- BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
atomic_dec(&po->tx_ring.pending);
__packet_set_status(po, ph, TP_STATUS_AVAILABLE);
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index f10fb8256442..05d60859d8e3 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
struct tcf_common *pc;
int ret = 0;
int err;
+#ifdef CONFIG_GACT_PROB
+ struct tc_gact_p *p_parm = NULL;
+#endif
if (nla == NULL)
return -EINVAL;
@@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
#ifndef CONFIG_GACT_PROB
if (tb[TCA_GACT_PROB] != NULL)
return -EOPNOTSUPP;
+#else
+ if (tb[TCA_GACT_PROB]) {
+ p_parm = nla_data(tb[TCA_GACT_PROB]);
+ if (p_parm->ptype >= MAX_RAND)
+ return -EINVAL;
+ }
#endif
pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
@@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
spin_lock_bh(&gact->tcf_lock);
gact->tcf_action = parm->action;
#ifdef CONFIG_GACT_PROB
- if (tb[TCA_GACT_PROB] != NULL) {
- struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]);
+ if (p_parm) {
gact->tcfg_paction = p_parm->paction;
gact->tcfg_pval = p_parm->pval;
gact->tcfg_ptype = p_parm->ptype;
@@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
spin_lock(&gact->tcf_lock);
#ifdef CONFIG_GACT_PROB
- if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL)
+ if (gact->tcfg_ptype)
action = gact_rand[gact->tcfg_ptype](gact);
else
action = gact->tcf_action;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60e281ad0f07..58fb3c7aab9e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -185,7 +185,12 @@ err3:
err2:
kfree(tname);
err1:
- kfree(pc);
+ if (ret == ACT_P_CREATED) {
+ if (est)
+ gen_kill_estimator(&pc->tcfc_bstats,
+ &pc->tcfc_rate_est);
+ kfree_rcu(pc, tcfc_rcu);
+ }
return err;
}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index fe81cc18e9e0..9c0fd0c78814 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -200,13 +200,12 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
out:
if (err) {
m->tcf_qstats.overlimits++;
- /* should we be asking for packet to be dropped?
- * may make sense for redirect case only
- */
- retval = TC_ACT_SHOT;
- } else {
+ if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
+ retval = TC_ACT_SHOT;
+ else
+ retval = m->tcf_action;
+ } else
retval = m->tcf_action;
- }
spin_unlock(&m->tcf_lock);
return retval;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 26aa2f6ce257..45c53ab067a6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -74,7 +74,10 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
p = to_pedit(pc);
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL) {
- kfree(pc);
+ if (est)
+ gen_kill_estimator(&pc->tcfc_bstats,
+ &pc->tcfc_rate_est);
+ kfree_rcu(pc, tcfc_rcu);
return -ENOMEM;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 3922f2a2821b..3714f60f0b3c 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -131,7 +131,10 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
d = to_defact(pc);
ret = alloc_defdata(d, defdata);
if (ret < 0) {
- kfree(pc);
+ if (est)
+ gen_kill_estimator(&pc->tcfc_bstats,
+ &pc->tcfc_rate_est);
+ kfree_rcu(pc, tcfc_rcu);
return ret;
}
d->tcf_action = parm->action;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 9af01f3df18c..e4723d31fdd5 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -203,6 +203,34 @@ out:
return index;
}
+/* Length of the next packet (0 if the queue is empty). */
+static unsigned int qdisc_peek_len(struct Qdisc *sch)
+{
+ struct sk_buff *skb;
+
+ skb = sch->ops->peek(sch);
+ return skb ? qdisc_pkt_len(skb) : 0;
+}
+
+static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *);
+static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
+ unsigned int len);
+
+static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl,
+ u32 lmax, u32 inv_w, int delta_w)
+{
+ int i;
+
+ /* update qfq-specific data */
+ cl->lmax = lmax;
+ cl->inv_w = inv_w;
+ i = qfq_calc_index(cl->inv_w, cl->lmax);
+
+ cl->grp = &q->groups[i];
+
+ q->wsum += delta_w;
+}
+
static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
struct nlattr **tca, unsigned long *arg)
{
@@ -250,6 +278,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
lmax = 1UL << QFQ_MTU_SHIFT;
if (cl != NULL) {
+ bool need_reactivation = false;
+
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
qdisc_root_sleeping_lock(sch),
@@ -258,12 +288,29 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
return err;
}
- if (inv_w != cl->inv_w) {
- sch_tree_lock(sch);
- q->wsum += delta_w;
- cl->inv_w = inv_w;
- sch_tree_unlock(sch);
+ if (lmax == cl->lmax && inv_w == cl->inv_w)
+ return 0; /* nothing to update */
+
+ i = qfq_calc_index(inv_w, lmax);
+ sch_tree_lock(sch);
+ if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) {
+ /*
+ * shift cl->F back, to not charge the
+ * class for the not-yet-served head
+ * packet
+ */
+ cl->F = cl->S;
+ /* remove class from its slot in the old group */
+ qfq_deactivate_class(q, cl);
+ need_reactivation = true;
}
+
+ qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
+
+ if (need_reactivation) /* activate in new group */
+ qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc));
+ sch_tree_unlock(sch);
+
return 0;
}
@@ -273,11 +320,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->refcnt = 1;
cl->common.classid = classid;
- cl->lmax = lmax;
- cl->inv_w = inv_w;
- i = qfq_calc_index(cl->inv_w, cl->lmax);
- cl->grp = &q->groups[i];
+ qfq_update_class_params(q, cl, lmax, inv_w, delta_w);
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, classid);
@@ -294,7 +338,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
return err;
}
}
- q->wsum += weight;
sch_tree_lock(sch);
qdisc_class_hash_insert(&q->clhash, &cl->common);
@@ -711,15 +754,6 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
}
}
-/* What is length of next packet in queue (0 if queue is empty) */
-static unsigned int qdisc_peek_len(struct Qdisc *sch)
-{
- struct sk_buff *skb;
-
- skb = sch->ops->peek(sch);
- return skb ? qdisc_pkt_len(skb) : 0;
-}
-
/*
* Updates the class, returns true if also the group needs to be updated.
*/
@@ -843,11 +877,8 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
- struct qfq_group *grp;
struct qfq_class *cl;
int err;
- u64 roundedS;
- int s;
cl = qfq_classify(skb, sch, &err);
if (cl == NULL) {
@@ -876,11 +907,25 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err;
/* If reach this point, queue q was idle */
- grp = cl->grp;
+ qfq_activate_class(q, cl, qdisc_pkt_len(skb));
+
+ return err;
+}
+
+/*
+ * Handle class switch from idle to backlogged.
+ */
+static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl,
+ unsigned int pkt_len)
+{
+ struct qfq_group *grp = cl->grp;
+ u64 roundedS;
+ int s;
+
qfq_update_start(q, cl);
/* compute new finish time and rounded start. */
- cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w;
+ cl->F = cl->S + (u64)pkt_len * cl->inv_w;
roundedS = qfq_round_down(cl->S, grp->slot_shift);
/*
@@ -917,8 +962,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
skip_update:
qfq_slot_insert(grp, cl, roundedS);
-
- return err;
}
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 33d894776192..10c018a5b9fe 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -702,7 +702,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
if (rx_count >= asoc->base.sk->sk_rcvbuf) {
if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
- (!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize)))
+ (!sk_rmem_schedule(asoc->base.sk, chunk->skb,
+ chunk->skb->truesize)))
goto fail;
}
diff --git a/net/socket.c b/net/socket.c
index dfe5b66c97e0..edc3c4af9085 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2604,7 +2604,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
set_fs(old_fs);
if (!err)
- err = compat_put_timeval(up, &ktv);
+ err = compat_put_timeval(&ktv, up);
return err;
}
@@ -2620,7 +2620,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
set_fs(old_fs);
if (!err)
- err = compat_put_timespec(up, &kts);
+ err = compat_put_timespec(&kts, up);
return err;
}
@@ -2657,6 +2657,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf)))
return -EFAULT;
+ memset(&ifc, 0, sizeof(ifc));
if (ifc32.ifcbuf == 0) {
ifc32.ifc_len = 0;
ifc.ifc_len = 0;
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 9fe8857d8d59..03d03e37a7d5 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -21,6 +21,11 @@ config SUNRPC_XPRT_RDMA
If unsure, say N.
+config SUNRPC_SWAP
+ bool
+ depends on SUNRPC
+ select NETVM
+
config RPCSEC_GSS_KRB5
tristate "Secure RPC: Kerberos V mechanism"
depends on SUNRPC && CRYPTO
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 727e506cacda..b5c067bccc45 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/hash.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/gss_api.h>
#include <linux/spinlock.h>
#ifdef RPC_DEBUG
@@ -122,6 +123,59 @@ rpcauth_unregister(const struct rpc_authops *ops)
}
EXPORT_SYMBOL_GPL(rpcauth_unregister);
+/**
+ * rpcauth_list_flavors - discover registered flavors and pseudoflavors
+ * @array: array to fill in
+ * @size: size of "array"
+ *
+ * Returns the number of array items filled in, or a negative errno.
+ *
+ * The returned array is not sorted by any policy. Callers should not
+ * rely on the order of the items in the returned array.
+ */
+int
+rpcauth_list_flavors(rpc_authflavor_t *array, int size)
+{
+ rpc_authflavor_t flavor;
+ int result = 0;
+
+ spin_lock(&rpc_authflavor_lock);
+ for (flavor = 0; flavor < RPC_AUTH_MAXFLAVOR; flavor++) {
+ const struct rpc_authops *ops = auth_flavors[flavor];
+ rpc_authflavor_t pseudos[4];
+ int i, len;
+
+ if (result >= size) {
+ result = -ENOMEM;
+ break;
+ }
+
+ if (ops == NULL)
+ continue;
+ if (ops->list_pseudoflavors == NULL) {
+ array[result++] = ops->au_flavor;
+ continue;
+ }
+ len = ops->list_pseudoflavors(pseudos, ARRAY_SIZE(pseudos));
+ if (len < 0) {
+ result = len;
+ break;
+ }
+ for (i = 0; i < len; i++) {
+ if (result >= size) {
+ result = -ENOMEM;
+ break;
+ }
+ array[result++] = pseudos[i];
+ }
+ }
+ spin_unlock(&rpc_authflavor_lock);
+
+ dprintk("RPC: %s returns %d\n", __func__, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(rpcauth_list_flavors);
+
struct rpc_auth *
rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt)
{
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index d3ad81f8da5b..34c522021004 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1619,6 +1619,7 @@ static const struct rpc_authops authgss_ops = {
.crcreate = gss_create_cred,
.pipes_create = gss_pipes_dentries_create,
.pipes_destroy = gss_pipes_dentries_destroy,
+ .list_pseudoflavors = gss_mech_list_pseudoflavors,
};
static const struct rpc_credops gss_credops = {
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 782bfe1b6465..b174fcd9ff4c 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -239,14 +239,28 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
-int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
+/**
+ * gss_mech_list_pseudoflavors - Discover registered GSS pseudoflavors
+ * @array: array to fill in
+ * @size: size of "array"
+ *
+ * Returns the number of array items filled in, or a negative errno.
+ *
+ * The returned array is not sorted by any policy. Callers should not
+ * rely on the order of the items in the returned array.
+ */
+int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr, int size)
{
struct gss_api_mech *pos = NULL;
int j, i = 0;
spin_lock(&registered_mechs_lock);
list_for_each_entry(pos, &registered_mechs, gm_list) {
- for (j=0; j < pos->gm_pf_num; j++) {
+ for (j = 0; j < pos->gm_pf_num; j++) {
+ if (i >= size) {
+ spin_unlock(&registered_mechs_lock);
+ return -ENOMEM;
+ }
array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
}
}
@@ -254,8 +268,6 @@ int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
return i;
}
-EXPORT_SYMBOL_GPL(gss_mech_list_pseudoflavors);
-
u32
gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
{
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 47ad2666fdf6..2afd2a84dc35 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1349,8 +1349,11 @@ static int c_show(struct seq_file *m, void *p)
if (cache_check(cd, cp, NULL))
/* cache_check does a cache_put on failure */
seq_printf(m, "# ");
- else
+ else {
+ if (cache_is_expired(cd, cp))
+ seq_printf(m, "# ");
cache_put(cp, cd);
+ }
return cd->cache_show(m, cd, cp);
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 00eb859b7de5..fa48c60aef23 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -717,6 +717,15 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
atomic_inc(&clnt->cl_count);
if (clnt->cl_softrtry)
task->tk_flags |= RPC_TASK_SOFT;
+ if (sk_memalloc_socks()) {
+ struct rpc_xprt *xprt;
+
+ rcu_read_lock();
+ xprt = rcu_dereference(clnt->cl_xprt);
+ if (xprt->swapper)
+ task->tk_flags |= RPC_TASK_SWAPPER;
+ rcu_read_unlock();
+ }
/* Add to the client's list of all tasks */
spin_lock(&clnt->cl_lock);
list_add_tail(&task->tk_task, &clnt->cl_tasks);
@@ -1844,12 +1853,13 @@ call_timeout(struct rpc_task *task)
return;
}
if (RPC_IS_SOFT(task)) {
- if (clnt->cl_chatty)
+ if (clnt->cl_chatty) {
rcu_read_lock();
printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
clnt->cl_protname,
rcu_dereference(clnt->cl_xprt)->servername);
rcu_read_unlock();
+ }
if (task->tk_flags & RPC_TASK_TIMEOUT)
rpc_exit(task, -ETIMEDOUT);
else
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 92509ffe15fc..a70acae496e4 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -251,7 +251,7 @@ static int rpcb_create_local_unix(struct net *net)
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create AF_LOCAL rpcbind "
"client (errno %ld).\n", PTR_ERR(clnt));
- result = -PTR_ERR(clnt);
+ result = PTR_ERR(clnt);
goto out;
}
@@ -298,7 +298,7 @@ static int rpcb_create_local_net(struct net *net)
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create local rpcbind "
"client (errno %ld).\n", PTR_ERR(clnt));
- result = -PTR_ERR(clnt);
+ result = PTR_ERR(clnt);
goto out;
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 994cfea2bad6..128494ec9a64 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -300,8 +300,9 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
/*
* Make an RPC task runnable.
*
- * Note: If the task is ASYNC, this must be called with
- * the spinlock held to protect the wait queue operation.
+ * Note: If the task is ASYNC, and is being made runnable after sitting on an
+ * rpc_wait_queue, this must be called with the queue spinlock held to protect
+ * the wait queue operation.
*/
static void rpc_make_runnable(struct rpc_task *task)
{
@@ -790,7 +791,9 @@ void rpc_execute(struct rpc_task *task)
static void rpc_async_schedule(struct work_struct *work)
{
+ current->flags |= PF_FSTRANS;
__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
+ current->flags &= ~PF_FSTRANS;
}
/**
@@ -812,7 +815,10 @@ static void rpc_async_schedule(struct work_struct *work)
void *rpc_malloc(struct rpc_task *task, size_t size)
{
struct rpc_buffer *buf;
- gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
+ gfp_t gfp = GFP_NOWAIT;
+
+ if (RPC_IS_SWAPPER(task))
+ gfp |= __GFP_MEMALLOC;
size += sizeof(struct rpc_buffer);
if (size <= RPC_BUFFER_MAXSIZE)
@@ -886,7 +892,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
static struct rpc_task *
rpc_alloc_task(void)
{
- return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
+ return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO);
}
/*
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 88f2bf671960..bac973a31367 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -316,7 +316,6 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt)
*/
void svc_xprt_enqueue(struct svc_xprt *xprt)
{
- struct svc_serv *serv = xprt->xpt_server;
struct svc_pool *pool;
struct svc_rqst *rqstp;
int cpu;
@@ -362,8 +361,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
rqstp, rqstp->rq_xprt);
rqstp->rq_xprt = xprt;
svc_xprt_get(xprt);
- rqstp->rq_reserved = serv->sv_max_mesg;
- atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
pool->sp_stats.threads_woken++;
wake_up(&rqstp->rq_wait);
} else {
@@ -640,8 +637,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (xprt) {
rqstp->rq_xprt = xprt;
svc_xprt_get(xprt);
- rqstp->rq_reserved = serv->sv_max_mesg;
- atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
/* As there is a shortage of threads and this request
* had to be queued, don't allow the thread to wait so
@@ -738,6 +733,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
else
len = xprt->xpt_ops->xpo_recvfrom(rqstp);
dprintk("svc: got len=%d\n", len);
+ rqstp->rq_reserved = serv->sv_max_mesg;
+ atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
}
svc_xprt_received(xprt);
@@ -794,7 +791,8 @@ int svc_send(struct svc_rqst *rqstp)
/* Grab mutex to serialize outgoing data. */
mutex_lock(&xprt->xpt_mutex);
- if (test_bit(XPT_DEAD, &xprt->xpt_flags))
+ if (test_bit(XPT_DEAD, &xprt->xpt_flags)
+ || test_bit(XPT_CLOSE, &xprt->xpt_flags))
len = -ENOTCONN;
else
len = xprt->xpt_ops->xpo_sendto(rqstp);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 18bc130255a7..998aa8c1807c 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1129,9 +1129,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
if (len >= 0)
svsk->sk_tcplen += len;
if (len != want) {
+ svc_tcp_save_pages(svsk, rqstp);
if (len < 0 && len != -EAGAIN)
goto err_other;
- svc_tcp_save_pages(svsk, rqstp);
dprintk("svc: incomplete TCP record (%d of %d)\n",
svsk->sk_tcplen, svsk->sk_reclen);
goto err_noclose;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 0cf165580d8d..0afba1b4b656 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -129,34 +129,6 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len)
EXPORT_SYMBOL_GPL(xdr_terminate_string);
void
-xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
- unsigned int len)
-{
- struct kvec *tail = xdr->tail;
- u32 *p;
-
- xdr->pages = pages;
- xdr->page_base = base;
- xdr->page_len = len;
-
- p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
- tail->iov_base = p;
- tail->iov_len = 0;
-
- if (len & 3) {
- unsigned int pad = 4 - (len & 3);
-
- *p = 0;
- tail->iov_base = (char *)p + (len & 3);
- tail->iov_len = pad;
- len += pad;
- }
- xdr->buflen += len;
- xdr->len += len;
-}
-EXPORT_SYMBOL_GPL(xdr_encode_pages);
-
-void
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
struct page **pages, unsigned int base, unsigned int len)
{
@@ -457,6 +429,16 @@ xdr_shift_buf(struct xdr_buf *buf, size_t len)
EXPORT_SYMBOL_GPL(xdr_shift_buf);
/**
+ * xdr_stream_pos - Return the current offset from the start of the xdr_stream
+ * @xdr: pointer to struct xdr_stream
+ */
+unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
+{
+ return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
+}
+EXPORT_SYMBOL_GPL(xdr_stream_pos);
+
+/**
* xdr_init_encode - Initialize a struct xdr_stream for sending data.
* @xdr: pointer to xdr_stream struct
* @buf: pointer to XDR buffer in which to encode data
@@ -556,13 +538,11 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
EXPORT_SYMBOL_GPL(xdr_write_pages);
static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
- __be32 *p, unsigned int len)
+ unsigned int len)
{
if (len > iov->iov_len)
len = iov->iov_len;
- if (p == NULL)
- p = (__be32*)iov->iov_base;
- xdr->p = p;
+ xdr->p = (__be32*)iov->iov_base;
xdr->end = (__be32*)(iov->iov_base + len);
xdr->iov = iov;
xdr->page_ptr = NULL;
@@ -609,7 +589,7 @@ static void xdr_set_next_page(struct xdr_stream *xdr)
newbase -= xdr->buf->page_base;
if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
- xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+ xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
}
static bool xdr_set_next_buffer(struct xdr_stream *xdr)
@@ -618,7 +598,7 @@ static bool xdr_set_next_buffer(struct xdr_stream *xdr)
xdr_set_next_page(xdr);
else if (xdr->iov == xdr->buf->head) {
if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
- xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+ xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
}
return xdr->p != xdr->end;
}
@@ -634,10 +614,15 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
xdr->buf = buf;
xdr->scratch.iov_base = NULL;
xdr->scratch.iov_len = 0;
+ xdr->nwords = XDR_QUADLEN(buf->len);
if (buf->head[0].iov_len != 0)
- xdr_set_iov(xdr, buf->head, p, buf->len);
+ xdr_set_iov(xdr, buf->head, buf->len);
else if (buf->page_len != 0)
xdr_set_page_base(xdr, 0, buf->len);
+ if (p != NULL && p > xdr->p && xdr->end >= p) {
+ xdr->nwords -= p - xdr->p;
+ xdr->p = p;
+ }
}
EXPORT_SYMBOL_GPL(xdr_init_decode);
@@ -662,12 +647,14 @@ EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
+ unsigned int nwords = XDR_QUADLEN(nbytes);
__be32 *p = xdr->p;
- __be32 *q = p + XDR_QUADLEN(nbytes);
+ __be32 *q = p + nwords;
- if (unlikely(q > xdr->end || q < p))
+ if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
return NULL;
xdr->p = q;
+ xdr->nwords -= nwords;
return p;
}
@@ -734,6 +721,31 @@ __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
}
EXPORT_SYMBOL_GPL(xdr_inline_decode);
+static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
+{
+ struct xdr_buf *buf = xdr->buf;
+ struct kvec *iov;
+ unsigned int nwords = XDR_QUADLEN(len);
+ unsigned int cur = xdr_stream_pos(xdr);
+
+ if (xdr->nwords == 0)
+ return 0;
+ if (nwords > xdr->nwords) {
+ nwords = xdr->nwords;
+ len = nwords << 2;
+ }
+ /* Realign pages to current pointer position */
+ iov = buf->head;
+ if (iov->iov_len > cur)
+ xdr_shrink_bufhead(buf, iov->iov_len - cur);
+
+ /* Truncate page data and move it into the tail */
+ if (buf->page_len > len)
+ xdr_shrink_pagelen(buf, buf->page_len - len);
+ xdr->nwords = XDR_QUADLEN(buf->len - cur);
+ return len;
+}
+
/**
* xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
* @xdr: pointer to xdr_stream struct
@@ -742,39 +754,37 @@ EXPORT_SYMBOL_GPL(xdr_inline_decode);
* Moves data beyond the current pointer position from the XDR head[] buffer
* into the page list. Any data that lies beyond current position + "len"
* bytes is moved into the XDR tail[].
+ *
+ * Returns the number of XDR encoded bytes now contained in the pages
*/
-void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
+unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
{
struct xdr_buf *buf = xdr->buf;
struct kvec *iov;
- ssize_t shift;
+ unsigned int nwords;
unsigned int end;
- int padding;
+ unsigned int padding;
- /* Realign pages to current pointer position */
- iov = buf->head;
- shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
- if (shift > 0)
- xdr_shrink_bufhead(buf, shift);
-
- /* Truncate page data and move it into the tail */
- if (buf->page_len > len)
- xdr_shrink_pagelen(buf, buf->page_len - len);
- padding = (XDR_QUADLEN(len) << 2) - len;
+ len = xdr_align_pages(xdr, len);
+ if (len == 0)
+ return 0;
+ nwords = XDR_QUADLEN(len);
+ padding = (nwords << 2) - len;
xdr->iov = iov = buf->tail;
/* Compute remaining message length. */
- end = iov->iov_len;
- shift = buf->buflen - buf->len;
- if (shift < end)
- end -= shift;
- else if (shift > 0)
- end = 0;
+ end = ((xdr->nwords - nwords) << 2) + padding;
+ if (end > iov->iov_len)
+ end = iov->iov_len;
+
/*
* Position current pointer at beginning of tail, and
* set remaining message length.
*/
xdr->p = (__be32 *)((char *)iov->iov_base + padding);
xdr->end = (__be32 *)((char *)iov->iov_base + end);
+ xdr->page_ptr = NULL;
+ xdr->nwords = XDR_QUADLEN(end - padding);
+ return len;
}
EXPORT_SYMBOL_GPL(xdr_read_pages);
@@ -790,12 +800,13 @@ EXPORT_SYMBOL_GPL(xdr_read_pages);
*/
void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
{
- xdr_read_pages(xdr, len);
+ len = xdr_align_pages(xdr, len);
/*
* Position current pointer at beginning of tail, and
* set remaining message length.
*/
- xdr_set_page_base(xdr, 0, len);
+ if (len != 0)
+ xdr_set_page_base(xdr, 0, len);
}
EXPORT_SYMBOL_GPL(xdr_enter_page);
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index b446e100286f..06cdbff79e4a 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -200,6 +200,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
int rc = 0;
if (!xprt->shutdown) {
+ current->flags |= PF_FSTRANS;
xprt_clear_connected(xprt);
dprintk("RPC: %s: %sconnect\n", __func__,
@@ -212,10 +213,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
out:
xprt_wake_pending_tasks(xprt, rc);
-
out_clear:
dprintk("RPC: %s: exit\n", __func__);
xprt_clear_connecting(xprt);
+ current->flags &= ~PF_FSTRANS;
}
/*
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 62d0dac8f780..400567243f84 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1892,6 +1892,8 @@ static void xs_local_setup_socket(struct work_struct *work)
if (xprt->shutdown)
goto out;
+ current->flags |= PF_FSTRANS;
+
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
status = __sock_create(xprt->xprt_net, AF_LOCAL,
SOCK_STREAM, 0, &sock, 1);
@@ -1925,7 +1927,47 @@ static void xs_local_setup_socket(struct work_struct *work)
out:
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
+ current->flags &= ~PF_FSTRANS;
+}
+
+#ifdef CONFIG_SUNRPC_SWAP
+static void xs_set_memalloc(struct rpc_xprt *xprt)
+{
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
+ xprt);
+
+ if (xprt->swapper)
+ sk_set_memalloc(transport->inet);
+}
+
+/**
+ * xs_swapper - Tag this transport as being used for swap.
+ * @xprt: transport to tag
+ * @enable: enable/disable
+ *
+ */
+int xs_swapper(struct rpc_xprt *xprt, int enable)
+{
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
+ xprt);
+ int err = 0;
+
+ if (enable) {
+ xprt->swapper++;
+ xs_set_memalloc(xprt);
+ } else if (xprt->swapper) {
+ xprt->swapper--;
+ sk_clear_memalloc(transport->inet);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xs_swapper);
+#else
+static void xs_set_memalloc(struct rpc_xprt *xprt)
+{
}
+#endif
static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
{
@@ -1951,6 +1993,8 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
transport->sock = sock;
transport->inet = sk;
+ xs_set_memalloc(xprt);
+
write_unlock_bh(&sk->sk_callback_lock);
}
xs_udp_do_set_buffer_size(xprt);
@@ -1967,6 +2011,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
if (xprt->shutdown)
goto out;
+ current->flags |= PF_FSTRANS;
+
/* Start by resetting any existing state */
xs_reset_transport(transport);
sock = xs_create_sock(xprt, transport,
@@ -1985,6 +2031,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
out:
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
+ current->flags &= ~PF_FSTRANS;
}
/*
@@ -2075,6 +2122,8 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
if (!xprt_bound(xprt))
goto out;
+ xs_set_memalloc(xprt);
+
/* Tell the socket layer to start connecting... */
xprt->stat.connect_count++;
xprt->stat.connect_start = jiffies;
@@ -2110,6 +2159,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
if (xprt->shutdown)
goto out;
+ current->flags |= PF_FSTRANS;
+
if (!sock) {
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
sock = xs_create_sock(xprt, transport,
@@ -2159,6 +2210,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
case -EINPROGRESS:
case -EALREADY:
xprt_clear_connecting(xprt);
+ current->flags &= ~PF_FSTRANS;
return;
case -EINVAL:
/* Happens, for instance, if the user specified a link
@@ -2171,6 +2223,7 @@ out_eagain:
out:
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
+ current->flags &= ~PF_FSTRANS;
}
/**
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 79981d97bc9c..c5ee4ff61364 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -823,6 +823,34 @@ fail:
return NULL;
}
+static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
+{
+ struct dentry *dentry;
+ struct path path;
+ int err = 0;
+ /*
+ * Get the parent directory, calculate the hash for last
+ * component.
+ */
+ dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
+ err = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
+ return err;
+
+ /*
+ * All right, let's create it.
+ */
+ err = security_path_mknod(&path, dentry, mode, 0);
+ if (!err) {
+ err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
+ if (!err) {
+ res->mnt = mntget(path.mnt);
+ res->dentry = dget(dentry);
+ }
+ }
+ done_path_create(&path, dentry);
+ return err;
+}
static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
@@ -831,8 +859,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
char *sun_path = sunaddr->sun_path;
- struct dentry *dentry = NULL;
- struct path path;
int err;
unsigned int hash;
struct unix_address *addr;
@@ -869,43 +895,23 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
atomic_set(&addr->refcnt, 1);
if (sun_path[0]) {
- umode_t mode;
- err = 0;
- /*
- * Get the parent directory, calculate the hash for last
- * component.
- */
- dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
- err = PTR_ERR(dentry);
- if (IS_ERR(dentry))
- goto out_mknod_parent;
-
- /*
- * All right, let's create it.
- */
- mode = S_IFSOCK |
+ struct path path;
+ umode_t mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current_umask());
- err = mnt_want_write(path.mnt);
- if (err)
- goto out_mknod_dput;
- err = security_path_mknod(&path, dentry, mode, 0);
- if (err)
- goto out_mknod_drop_write;
- err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
-out_mknod_drop_write:
- mnt_drop_write(path.mnt);
- if (err)
- goto out_mknod_dput;
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- dput(path.dentry);
- path.dentry = dentry;
-
+ err = unix_mknod(sun_path, mode, &path);
+ if (err) {
+ if (err == -EEXIST)
+ err = -EADDRINUSE;
+ unix_release_addr(addr);
+ goto out_up;
+ }
addr->hash = UNIX_HASH_SIZE;
- }
-
- spin_lock(&unix_table_lock);
-
- if (!sun_path[0]) {
+ hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
+ spin_lock(&unix_table_lock);
+ u->path = path;
+ list = &unix_socket_table[hash];
+ } else {
+ spin_lock(&unix_table_lock);
err = -EADDRINUSE;
if (__unix_find_socket_byname(net, sunaddr, addr_len,
sk->sk_type, hash)) {
@@ -914,9 +920,6 @@ out_mknod_drop_write:
}
list = &unix_socket_table[addr->hash];
- } else {
- list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
- u->path = path;
}
err = 0;
@@ -930,16 +933,6 @@ out_up:
mutex_unlock(&u->readlock);
out:
return err;
-
-out_mknod_dput:
- dput(dentry);
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- path_put(&path);
-out_mknod_parent:
- if (err == -EEXIST)
- err = -EADDRINUSE;
- unix_release_addr(addr);
- goto out_up;
}
static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
@@ -1457,7 +1450,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
wait_for_unix_gc();
- err = scm_send(sock, msg, siocb->scm);
+ err = scm_send(sock, msg, siocb->scm, false);
if (err < 0)
return err;
@@ -1626,7 +1619,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
wait_for_unix_gc();
- err = scm_send(sock, msg, siocb->scm);
+ err = scm_send(sock, msg, siocb->scm, false);
if (err < 0)
return err;
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 788a12c1eb5d..2ab785064b7e 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -602,36 +602,31 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
* successfully, add it to the interface list.
*/
- if (dev->name == NULL) {
- err = -EINVAL;
- } else {
+#ifdef WANDEBUG
+ printk(KERN_INFO "%s: registering interface %s...\n",
+ wanrouter_modname, dev->name);
+#endif
- #ifdef WANDEBUG
- printk(KERN_INFO "%s: registering interface %s...\n",
- wanrouter_modname, dev->name);
- #endif
-
- err = register_netdev(dev);
- if (!err) {
- struct net_device *slave = NULL;
- unsigned long smp_flags=0;
-
- lock_adapter_irq(&wandev->lock, &smp_flags);
-
- if (wandev->dev == NULL) {
- wandev->dev = dev;
- } else {
- for (slave=wandev->dev;
- DEV_TO_SLAVE(slave);
- slave = DEV_TO_SLAVE(slave))
- DEV_TO_SLAVE(slave) = dev;
- }
- ++wandev->ndev;
-
- unlock_adapter_irq(&wandev->lock, &smp_flags);
- err = 0; /* done !!! */
- goto out;
+ err = register_netdev(dev);
+ if (!err) {
+ struct net_device *slave = NULL;
+ unsigned long smp_flags=0;
+
+ lock_adapter_irq(&wandev->lock, &smp_flags);
+
+ if (wandev->dev == NULL) {
+ wandev->dev = dev;
+ } else {
+ for (slave=wandev->dev;
+ DEV_TO_SLAVE(slave);
+ slave = DEV_TO_SLAVE(slave))
+ DEV_TO_SLAVE(slave) = dev;
}
+ ++wandev->ndev;
+
+ unlock_adapter_irq(&wandev->lock, &smp_flags);
+ err = 0; /* done !!! */
+ goto out;
}
if (wandev->del_if)
wandev->del_if(wandev, dev);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 31b40cc4a9c3..dcd64d5b07aa 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -952,6 +952,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
*/
synchronize_rcu();
INIT_LIST_HEAD(&wdev->list);
+ /*
+ * Ensure that all events have been processed and
+ * freed.
+ */
+ cfg80211_process_wdev_events(wdev);
break;
case NETDEV_PRE_UP:
if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 5206c6844fd7..bc7430b54771 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
struct net_device *dev, enum nl80211_iftype ntype,
u32 *flags, struct vif_params *params);
void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
+void cfg80211_process_wdev_events(struct wireless_dev *wdev);
int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2303ee73b50a..2ded3c7fad06 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -680,6 +680,8 @@ static u32 map_regdom_flags(u32 rd_flags)
channel_flags |= IEEE80211_CHAN_NO_IBSS;
if (rd_flags & NL80211_RRF_DFS)
channel_flags |= IEEE80211_CHAN_RADAR;
+ if (rd_flags & NL80211_RRF_NO_OFDM)
+ channel_flags |= IEEE80211_CHAN_NO_OFDM;
return channel_flags;
}
@@ -901,7 +903,21 @@ static void handle_channel(struct wiphy *wiphy,
chan->max_antenna_gain = min(chan->orig_mag,
(int) MBI_TO_DBI(power_rule->max_antenna_gain));
chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp);
- chan->max_power = min(chan->max_power, chan->max_reg_power);
+ if (chan->orig_mpwr) {
+ /*
+ * Devices that have their own custom regulatory domain
+ * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the
+ * passed country IE power settings.
+ */
+ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY &&
+ wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
+ chan->max_power = chan->max_reg_power;
+ else
+ chan->max_power = min(chan->orig_mpwr,
+ chan->max_reg_power);
+ } else
+ chan->max_power = chan->max_reg_power;
}
static void handle_band(struct wiphy *wiphy,
@@ -1885,6 +1901,7 @@ static void restore_custom_reg_settings(struct wiphy *wiphy)
chan->flags = chan->orig_flags;
chan->max_antenna_gain = chan->orig_mag;
chan->max_power = chan->orig_mpwr;
+ chan->beacon_found = false;
}
}
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 26f8cd30f712..994e2f0cc7a8 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -735,7 +735,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
wdev->connect_keys = NULL;
}
-static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
+void cfg80211_process_wdev_events(struct wireless_dev *wdev)
{
struct cfg80211_event *ev;
unsigned long flags;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c5a5165a5927..5a2aa17e4d3c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1357,6 +1357,8 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
xdst->flo.ops = &xfrm_bundle_fc_ops;
+ if (afinfo->init_dst)
+ afinfo->init_dst(net, xdst);
} else
xdst = ERR_PTR(-ENOBUFS);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5b228f97d4b3..210be48d8ae3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -415,8 +415,17 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
if (x->lft.hard_add_expires_seconds) {
long tmo = x->lft.hard_add_expires_seconds +
x->curlft.add_time - now;
- if (tmo <= 0)
- goto expired;
+ if (tmo <= 0) {
+ if (x->xflags & XFRM_SOFT_EXPIRE) {
+ /* enter hard expire without soft expire first?!
+ * setting a new date could trigger this.
+ * workarbound: fix x->curflt.add_time by below:
+ */
+ x->curlft.add_time = now - x->saved_tmo - 1;
+ tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
+ } else
+ goto expired;
+ }
if (tmo < next)
next = tmo;
}
@@ -433,10 +442,14 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me)
if (x->lft.soft_add_expires_seconds) {
long tmo = x->lft.soft_add_expires_seconds +
x->curlft.add_time - now;
- if (tmo <= 0)
+ if (tmo <= 0) {
warn = 1;
- else if (tmo < next)
+ x->xflags &= ~XFRM_SOFT_EXPIRE;
+ } else if (tmo < next) {
next = tmo;
+ x->xflags |= XFRM_SOFT_EXPIRE;
+ x->saved_tmo = tmo;
+ }
}
if (x->lft.soft_use_expires_seconds) {
long tmo = x->lft.soft_use_expires_seconds +
@@ -1981,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
goto error;
x->outer_mode = xfrm_get_mode(x->props.mode, family);
- if (x->outer_mode == NULL)
+ if (x->outer_mode == NULL) {
+ err = -EPROTONOSUPPORT;
goto error;
+ }
if (init_replay) {
err = xfrm_init_replay(x);
diff --git a/scripts/Makefile.fwinst b/scripts/Makefile.fwinst
index 6bf8e87f1dcf..c3f69ae275d1 100644
--- a/scripts/Makefile.fwinst
+++ b/scripts/Makefile.fwinst
@@ -42,7 +42,7 @@ quiet_cmd_install = INSTALL $(subst $(srctree)/,,$@)
$(installed-fw-dirs):
$(call cmd,mkdir)
-$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $(INSTALL_FW_PATH)/$$(dir %)
+$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $$(dir $(INSTALL_FW_PATH)/%)
$(call cmd,install)
PHONY += __fw_install __fw_modinst FORCE
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index e5bd60ff48e3..ca05ba217f5f 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1600,13 +1600,17 @@ sub process {
# Check signature styles
if (!$in_header_lines &&
- $line =~ /^(\s*)($signature_tags)(\s*)(.*)/) {
+ $line =~ /^(\s*)([a-z0-9_-]+by:|$signature_tags)(\s*)(.*)/i) {
my $space_before = $1;
my $sign_off = $2;
my $space_after = $3;
my $email = $4;
my $ucfirst_sign_off = ucfirst(lc($sign_off));
+ if ($sign_off !~ /$signature_tags/) {
+ WARN("BAD_SIGN_OFF",
+ "Non-standard signature: $sign_off\n" . $herecurr);
+ }
if (defined $space_before && $space_before ne "") {
WARN("BAD_SIGN_OFF",
"Do not use whitespace before $ucfirst_sign_off\n" . $herecurr);
@@ -1848,8 +1852,8 @@ sub process {
my $pos = pos_last_openparen($rest);
if ($pos >= 0) {
- $line =~ /^\+([ \t]*)/;
- my $newindent = $1;
+ $line =~ /^(\+| )([ \t]*)/;
+ my $newindent = $2;
my $goodtabindent = $oldindent .
"\t" x ($pos / 8) .
@@ -2984,6 +2988,46 @@ sub process {
}
}
+# do {} while (0) macro tests:
+# single-statement macros do not need to be enclosed in do while (0) loop,
+# macro should not end with a semicolon
+ if ($^V && $^V ge 5.10.0 &&
+ $realfile !~ m@/vmlinux.lds.h$@ &&
+ $line =~ /^.\s*\#\s*define\s+$Ident(\()?/) {
+ my $ln = $linenr;
+ my $cnt = $realcnt;
+ my ($off, $dstat, $dcond, $rest);
+ my $ctx = '';
+ ($dstat, $dcond, $ln, $cnt, $off) =
+ ctx_statement_block($linenr, $realcnt, 0);
+ $ctx = $dstat;
+
+ $dstat =~ s/\\\n.//g;
+
+ if ($dstat =~ /^\+\s*#\s*define\s+$Ident\s*${balanced_parens}\s*do\s*{(.*)\s*}\s*while\s*\(\s*0\s*\)\s*([;\s]*)\s*$/) {
+ my $stmts = $2;
+ my $semis = $3;
+
+ $ctx =~ s/\n*$//;
+ my $cnt = statement_rawlines($ctx);
+ my $herectx = $here . "\n";
+
+ for (my $n = 0; $n < $cnt; $n++) {
+ $herectx .= raw_line($linenr, $n) . "\n";
+ }
+
+ if (($stmts =~ tr/;/;/) == 1 &&
+ $stmts !~ /^\s*(if|while|for|switch)\b/) {
+ WARN("SINGLE_STATEMENT_DO_WHILE_MACRO",
+ "Single statement macros should not use a do {} while (0) loop\n" . "$herectx");
+ }
+ if (defined $semis && $semis ne "") {
+ WARN("DO_WHILE_MACRO_WITH_TRAILING_SEMICOLON",
+ "do {} while (0) macros should not be semicolon terminated\n" . "$herectx");
+ }
+ }
+ }
+
# make sure symbols are always wrapped with VMLINUX_SYMBOL() ...
# all assignments may have only one of the following with an assignment:
# .
@@ -3261,6 +3305,12 @@ sub process {
"sizeof(& should be avoided\n" . $herecurr);
}
+# check for sizeof without parenthesis
+ if ($line =~ /\bsizeof\s+((?:\*\s*|)$Lval|$Type(?:\s+$Lval|))/) {
+ WARN("SIZEOF_PARENTHESIS",
+ "sizeof $1 should be sizeof($1)\n" . $herecurr);
+ }
+
# check for line continuations in quoted strings with odd counts of "
if ($rawline =~ /\\$/ && $rawline =~ tr/"/"/ % 2) {
WARN("LINE_CONTINUATIONS",
@@ -3309,6 +3359,22 @@ sub process {
}
}
+# check usleep_range arguments
+ if ($^V && $^V ge 5.10.0 &&
+ defined $stat &&
+ $stat =~ /^\+(?:.*?)\busleep_range\s*\(\s*($FuncArg)\s*,\s*($FuncArg)\s*\)/) {
+ my $min = $1;
+ my $max = $7;
+ if ($min eq $max) {
+ WARN("USLEEP_RANGE",
+ "usleep_range should not use min == max args; see Documentation/timers/timers-howto.txt\n" . "$here\n$stat\n");
+ } elsif ($min =~ /^\d+$/ && $max =~ /^\d+$/ &&
+ $min > $max) {
+ WARN("USLEEP_RANGE",
+ "usleep_range args reversed, use min then max; see Documentation/timers/timers-howto.txt\n" . "$here\n$stat\n");
+ }
+ }
+
# check for new externs in .c files.
if ($realfile =~ /\.c$/ && defined $stat &&
$stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s)
diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
new file mode 100644
index 000000000000..06284c57a951
--- /dev/null
+++ b/scripts/coccinelle/iterators/use_after_iter.cocci
@@ -0,0 +1,147 @@
+/// If list_for_each_entry, etc complete a traversal of the list, the iterator
+/// variable ends up pointing to an address at an offset from the list head,
+/// and not a meaningful structure. Thus this value should not be used after
+/// the end of the iterator.
+//#False positives arise when there is a goto in the iterator and the
+//#reported reference is at the label of this goto. Some flag tests
+//#may also cause a report to be a false positive.
+///
+// Confidence: Moderate
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LIP6. GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual context
+virtual org
+virtual report
+
+@r exists@
+identifier c,member;
+expression E,x;
+iterator name list_for_each_entry;
+iterator name list_for_each_entry_reverse;
+iterator name list_for_each_entry_continue;
+iterator name list_for_each_entry_continue_reverse;
+iterator name list_for_each_entry_from;
+iterator name list_for_each_entry_safe;
+iterator name list_for_each_entry_safe_continue;
+iterator name list_for_each_entry_safe_from;
+iterator name list_for_each_entry_safe_reverse;
+iterator name hlist_for_each_entry;
+iterator name hlist_for_each_entry_continue;
+iterator name hlist_for_each_entry_from;
+iterator name hlist_for_each_entry_safe;
+statement S;
+position p1,p2;
+@@
+
+(
+list_for_each_entry@p1(c,...,member) { ... when != break;
+ when forall
+ when strict
+}
+|
+list_for_each_entry_reverse@p1(c,...,member) { ... when != break;
+ when forall
+ when strict
+}
+|
+list_for_each_entry_continue@p1(c,...,member) { ... when != break;
+ when forall
+ when strict
+}
+|
+list_for_each_entry_continue_reverse@p1(c,...,member) { ... when != break;
+ when forall
+ when strict
+}
+|
+list_for_each_entry_from@p1(c,...,member) { ... when != break;
+ when forall
+ when strict
+}
+|
+list_for_each_entry_safe@p1(c,...,member) { ... when != break;
+ when forall
+ when strict
+}
+|
+list_for_each_entry_safe_continue@p1(c,...,member) { ... when != break;
+ when forall
+ when strict
+}
+|
+list_for_each_entry_safe_from@p1(c,...,member) { ... when != break;
+ when forall
+ when strict
+}
+|
+list_for_each_entry_safe_reverse@p1(c,...,member) { ... when != break;
+ when forall
+ when strict
+}
+)
+...
+(
+list_for_each_entry(c,...) S
+|
+list_for_each_entry_reverse(c,...) S
+|
+list_for_each_entry_continue(c,...) S
+|
+list_for_each_entry_continue_reverse(c,...) S
+|
+list_for_each_entry_from(c,...) S
+|
+list_for_each_entry_safe(c,...) S
+|
+list_for_each_entry_safe(x,c,...) S
+|
+list_for_each_entry_safe_continue(c,...) S
+|
+list_for_each_entry_safe_continue(x,c,...) S
+|
+list_for_each_entry_safe_from(c,...) S
+|
+list_for_each_entry_safe_from(x,c,...) S
+|
+list_for_each_entry_safe_reverse(c,...) S
+|
+list_for_each_entry_safe_reverse(x,c,...) S
+|
+hlist_for_each_entry(c,...) S
+|
+hlist_for_each_entry_continue(c,...) S
+|
+hlist_for_each_entry_from(c,...) S
+|
+hlist_for_each_entry_safe(c,...) S
+|
+list_remove_head(x,c,...)
+|
+sizeof(<+...c...+>)
+|
+&c->member
+|
+c = E
+|
+*c@p2
+)
+
+@script:python depends on org@
+p1 << r.p1;
+p2 << r.p2;
+@@
+
+cocci.print_main("invalid iterator index reference",p2)
+cocci.print_secs("iterator",p1)
+
+@script:python depends on report@
+p1 << r.p1;
+p2 << r.p2;
+@@
+
+msg = "ERROR: invalid reference to the index variable of the iterator on line %s" % (p1[0].line)
+coccilib.report.print_report(p2[0], msg)
diff --git a/scripts/coccinelle/misc/irqf_oneshot.cocci b/scripts/coccinelle/misc/irqf_oneshot.cocci
new file mode 100644
index 000000000000..6cfde94be0ef
--- /dev/null
+++ b/scripts/coccinelle/misc/irqf_oneshot.cocci
@@ -0,0 +1,65 @@
+/// Make sure threaded IRQs without a primary handler are always request with
+/// IRQF_ONESHOT
+///
+//
+// Confidence: Good
+// Comments:
+// Options: --no-includes
+
+virtual patch
+virtual context
+virtual org
+virtual report
+
+@r1@
+expression irq;
+expression thread_fn;
+expression flags;
+position p;
+@@
+request_threaded_irq@p(irq, NULL, thread_fn,
+(
+flags | IRQF_ONESHOT
+|
+IRQF_ONESHOT
+)
+, ...)
+
+@depends on patch@
+expression irq;
+expression thread_fn;
+expression flags;
+position p != r1.p;
+@@
+request_threaded_irq@p(irq, NULL, thread_fn,
+(
+-0
++IRQF_ONESHOT
+|
+-flags
++flags | IRQF_ONESHOT
+)
+, ...)
+
+@depends on context@
+position p != r1.p;
+@@
+*request_threaded_irq@p(...)
+
+@match depends on report || org@
+expression irq;
+position p != r1.p;
+@@
+request_threaded_irq@p(irq, NULL, ...)
+
+@script:python depends on org@
+p << match.p;
+@@
+msg = "ERROR: Threaded IRQ with no primary handler requested without IRQF_ONESHOT"
+coccilib.org.print_todo(p[0],msg)
+
+@script:python depends on report@
+p << match.p;
+@@
+msg = "ERROR: Threaded IRQ with no primary handler requested without IRQF_ONESHOT"
+coccilib.report.print_report(p[0],msg)
diff --git a/scripts/config b/scripts/config
index ed6653ef9702..ee355394f4ef 100755
--- a/scripts/config
+++ b/scripts/config
@@ -1,6 +1,9 @@
#!/bin/bash
# Manipulate options in a .config file from the command line
+# If no prefix forced, use the default CONFIG_
+CONFIG_="${CONFIG_-CONFIG_}"
+
usage() {
cat >&2 <<EOL
Manipulate options in a .config file from the command line.
@@ -14,6 +17,7 @@ commands:
Set option to "string"
--set-val option value
Set option to value
+ --undefine|-u option Undefine option
--state|-s option Print state of option (n,y,m,undef)
--enable-after|-E beforeopt option
@@ -26,10 +30,17 @@ commands:
commands can be repeated multiple times
options:
- --file .config file to change (default .config)
+ --file config-file .config file to change (default .config)
+ --keep-case|-k Keep next symbols' case (dont' upper-case it)
config doesn't check the validity of the .config file. This is done at next
- make time.
+make time.
+
+By default, config will upper-case the given symbol. Use --keep-case to keep
+the case of all following symbols unchanged.
+
+config uses 'CONFIG_' as the default symbol prefix. Set the environment
+variable CONFIG_ to the prefix to use. Eg.: CONFIG_="FOO_" config ...
EOL
exit 1
}
@@ -40,11 +51,13 @@ checkarg() {
usage
fi
case "$ARG" in
- CONFIG_*)
- ARG="${ARG/CONFIG_/}"
+ ${CONFIG_}*)
+ ARG="${ARG/${CONFIG_}/}"
;;
esac
- ARG="`echo $ARG | tr a-z A-Z`"
+ if [ "$MUNGE_CASE" = "yes" ] ; then
+ ARG="`echo $ARG | tr a-z A-Z`"
+ fi
}
set_var() {
@@ -61,6 +74,12 @@ set_var() {
fi
}
+undef_var() {
+ local name=$1
+
+ sed -ri "/^($name=|# $name is not set)/d" "$FN"
+}
+
if [ "$1" = "--file" ]; then
FN="$2"
if [ "$FN" = "" ] ; then
@@ -75,10 +94,16 @@ if [ "$1" = "" ] ; then
usage
fi
+MUNGE_CASE=yes
while [ "$1" != "" ] ; do
CMD="$1"
shift
case "$CMD" in
+ --keep-case|-k)
+ MUNGE_CASE=no
+ shift
+ continue
+ ;;
--refresh)
;;
--*-after)
@@ -95,55 +120,58 @@ while [ "$1" != "" ] ; do
esac
case "$CMD" in
--enable|-e)
- set_var "CONFIG_$ARG" "CONFIG_$ARG=y"
+ set_var "${CONFIG_}$ARG" "${CONFIG_}$ARG=y"
;;
--disable|-d)
- set_var "CONFIG_$ARG" "# CONFIG_$ARG is not set"
+ set_var "${CONFIG_}$ARG" "# ${CONFIG_}$ARG is not set"
;;
--module|-m)
- set_var "CONFIG_$ARG" "CONFIG_$ARG=m"
+ set_var "${CONFIG_}$ARG" "${CONFIG_}$ARG=m"
;;
--set-str)
# sed swallows one level of escaping, so we need double-escaping
- set_var "CONFIG_$ARG" "CONFIG_$ARG=\"${1//\"/\\\\\"}\""
+ set_var "${CONFIG_}$ARG" "${CONFIG_}$ARG=\"${1//\"/\\\\\"}\""
shift
;;
--set-val)
- set_var "CONFIG_$ARG" "CONFIG_$ARG=$1"
+ set_var "${CONFIG_}$ARG" "${CONFIG_}$ARG=$1"
shift
;;
+ --undefine|-u)
+ undef_var "${CONFIG_}$ARG"
+ ;;
--state|-s)
- if grep -q "# CONFIG_$ARG is not set" $FN ; then
+ if grep -q "# ${CONFIG_}$ARG is not set" $FN ; then
echo n
else
- V="$(grep "^CONFIG_$ARG=" $FN)"
+ V="$(grep "^${CONFIG_}$ARG=" $FN)"
if [ $? != 0 ] ; then
echo undef
else
- V="${V/#CONFIG_$ARG=/}"
+ V="${V/#${CONFIG_}$ARG=/}"
V="${V/#\"/}"
V="${V/%\"/}"
- V="${V/\\\"/\"}"
+ V="${V//\\\"/\"}"
echo "${V}"
fi
fi
;;
--enable-after|-E)
- set_var "CONFIG_$B" "CONFIG_$B=y" "CONFIG_$A"
+ set_var "${CONFIG_}$B" "${CONFIG_}$B=y" "${CONFIG_}$A"
;;
--disable-after|-D)
- set_var "CONFIG_$B" "# CONFIG_$B is not set" "CONFIG_$A"
+ set_var "${CONFIG_}$B" "# ${CONFIG_}$B is not set" "${CONFIG_}$A"
;;
--module-after|-M)
- set_var "CONFIG_$B" "CONFIG_$B=m" "CONFIG_$A"
+ set_var "${CONFIG_}$B" "${CONFIG_}$B=m" "${CONFIG_}$A"
;;
# undocumented because it ignores --file (fixme)
diff --git a/scripts/decodecode b/scripts/decodecode
index 18ba881c3415..4f8248d5a11f 100755
--- a/scripts/decodecode
+++ b/scripts/decodecode
@@ -89,7 +89,7 @@ echo $code >> $T.s
disas $T
cat $T.dis >> $T.aa
-faultline=`cat $T.dis | head -1 | cut -d":" -f2`
+faultline=`cat $T.dis | head -1 | cut -d":" -f2-`
faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
cat $T.oo | sed -e "s/\($faultline\)/\*\1 <-- trapping instruction/g"
diff --git a/scripts/kconfig/.gitignore b/scripts/kconfig/.gitignore
index ee120d441565..be603c4fef62 100644
--- a/scripts/kconfig/.gitignore
+++ b/scripts/kconfig/.gitignore
@@ -7,7 +7,6 @@ config*
*.tab.h
zconf.hash.c
*.moc
-lkc_defs.h
gconf.glade.h
*.pot
*.mo
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 79662658fb91..77d53999ffb9 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -114,7 +114,7 @@ help:
@echo ' alldefconfig - New config with all symbols set to default'
@echo ' randconfig - New config with random answer to all options'
@echo ' listnewconfig - List new options'
- @echo ' oldnoconfig - Same as silentoldconfig but set new symbols to n (unset)'
+ @echo ' oldnoconfig - Same as silentoldconfig but sets new symbols to their default value'
# lxdialog stuff
check-lxdialog := $(srctree)/$(src)/lxdialog/check-lxdialog.sh
@@ -234,12 +234,12 @@ $(obj)/.tmp_qtcheck:
if [ -f $$d/include/qconfig.h ]; then dir=$$d; break; fi; \
done; \
if [ -z "$$dir" ]; then \
- echo "*"; \
- echo "* Unable to find any QT installation. Please make sure that"; \
- echo "* the QT4 or QT3 development package is correctly installed and"; \
- echo "* either qmake can be found or install pkg-config or set"; \
- echo "* the QTDIR environment variable to the correct location."; \
- echo "*"; \
+ echo >&2 "*"; \
+ echo >&2 "* Unable to find any QT installation. Please make sure that"; \
+ echo >&2 "* the QT4 or QT3 development package is correctly installed and"; \
+ echo >&2 "* either qmake can be found or install pkg-config or set"; \
+ echo >&2 "* the QTDIR environment variable to the correct location."; \
+ echo >&2 "*"; \
false; \
fi; \
libpath=$$dir/lib; lib=qt; osdir=""; \
@@ -260,8 +260,8 @@ $(obj)/.tmp_qtcheck:
else \
cflags="\$$(shell pkg-config QtCore QtGui Qt3Support --cflags)"; \
libs="\$$(shell pkg-config QtCore QtGui Qt3Support --libs)"; \
- binpath="\$$(shell pkg-config QtCore --variable=prefix)"; \
- moc="$$binpath/bin/moc"; \
+ moc="\$$(shell pkg-config QtCore --variable=moc_location)"; \
+ [ -n "$$moc" ] || moc="\$$(shell pkg-config QtCore --variable=prefix)/bin/moc"; \
fi; \
echo "KC_QT_CFLAGS=$$cflags" > $@; \
echo "KC_QT_LIBS=$$libs" >> $@; \
@@ -279,17 +279,17 @@ $(obj)/.tmp_gtkcheck:
if `pkg-config --atleast-version=2.0.0 gtk+-2.0`; then \
touch $@; \
else \
- echo "*"; \
- echo "* GTK+ is present but version >= 2.0.0 is required."; \
- echo "*"; \
+ echo >&2 "*"; \
+ echo >&2 "* GTK+ is present but version >= 2.0.0 is required."; \
+ echo >&2 "*"; \
false; \
fi \
else \
- echo "*"; \
- echo "* Unable to find the GTK+ installation. Please make sure that"; \
- echo "* the GTK+ 2.0 development package is correctly installed..."; \
- echo "* You need gtk+-2.0, glib-2.0 and libglade-2.0."; \
- echo "*"; \
+ echo >&2 "*"; \
+ echo >&2 "* Unable to find the GTK+ installation. Please make sure that"; \
+ echo >&2 "* the GTK+ 2.0 development package is correctly installed..."; \
+ echo >&2 "* You need gtk+-2.0, glib-2.0 and libglade-2.0."; \
+ echo >&2 "*"; \
false; \
fi
endif
@@ -298,8 +298,11 @@ $(obj)/zconf.tab.o: $(obj)/zconf.lex.c $(obj)/zconf.hash.c
$(obj)/qconf.o: $(obj)/qconf.moc
-$(obj)/%.moc: $(src)/%.h
- $(KC_QT_MOC) -i $< -o $@
+quiet_cmd_moc = MOC $@
+ cmd_moc = $(KC_QT_MOC) -i $< -o $@
+
+$(obj)/%.moc: $(src)/%.h $(obj)/.tmp_qtcheck
+ $(call cmd,moc)
# Extract gconf menu items for I18N support
$(obj)/gconf.glade.h: $(obj)/gconf.glade
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 52577f052bc1..13ddf1126c2a 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -182,10 +182,66 @@ static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
return 0;
}
+#define LINE_GROWTH 16
+static int add_byte(int c, char **lineptr, size_t slen, size_t *n)
+{
+ char *nline;
+ size_t new_size = slen + 1;
+ if (new_size > *n) {
+ new_size += LINE_GROWTH - 1;
+ new_size *= 2;
+ nline = realloc(*lineptr, new_size);
+ if (!nline)
+ return -1;
+
+ *lineptr = nline;
+ *n = new_size;
+ }
+
+ (*lineptr)[slen] = c;
+
+ return 0;
+}
+
+static ssize_t compat_getline(char **lineptr, size_t *n, FILE *stream)
+{
+ char *line = *lineptr;
+ size_t slen = 0;
+
+ for (;;) {
+ int c = getc(stream);
+
+ switch (c) {
+ case '\n':
+ if (add_byte(c, &line, slen, n) < 0)
+ goto e_out;
+ slen++;
+ /* fall through */
+ case EOF:
+ if (add_byte('\0', &line, slen, n) < 0)
+ goto e_out;
+ *lineptr = line;
+ if (slen == 0)
+ return -1;
+ return slen;
+ default:
+ if (add_byte(c, &line, slen, n) < 0)
+ goto e_out;
+ slen++;
+ }
+ }
+
+e_out:
+ line[slen-1] = '\0';
+ *lineptr = line;
+ return -1;
+}
+
int conf_read_simple(const char *name, int def)
{
FILE *in = NULL;
- char line[1024];
+ char *line = NULL;
+ size_t line_asize = 0;
char *p, *p2;
struct symbol *sym;
int i, def_flags;
@@ -247,7 +303,7 @@ load:
}
}
- while (fgets(line, sizeof(line), in)) {
+ while (compat_getline(&line, &line_asize, in) != -1) {
conf_lineno++;
sym = NULL;
if (line[0] == '#') {
@@ -335,6 +391,7 @@ setsym:
cs->def[def].tri = EXPR_OR(cs->def[def].tri, sym->def[def].tri);
}
}
+ free(line);
fclose(in);
if (modules_sym)
diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh
index 82cc3a85e7f8..e3b12c010417 100644
--- a/scripts/kconfig/lxdialog/check-lxdialog.sh
+++ b/scripts/kconfig/lxdialog/check-lxdialog.sh
@@ -4,7 +4,7 @@
# What library to link
ldflags()
{
- for ext in so a dylib ; do
+ for ext in so a dll.a dylib ; do
for lib in ncursesw ncurses curses ; do
$cc -print-file-name=lib${lib}.${ext} | grep -q /
if [ $? -eq 0 ]; then
@@ -19,12 +19,12 @@ ldflags()
# Where is ncurses.h?
ccflags()
{
- if [ -f /usr/include/ncurses/ncurses.h ]; then
+ if [ -f /usr/include/ncursesw/curses.h ]; then
+ echo '-I/usr/include/ncursesw -DCURSES_LOC="<ncursesw/curses.h>"'
+ elif [ -f /usr/include/ncurses/ncurses.h ]; then
echo '-I/usr/include/ncurses -DCURSES_LOC="<ncurses.h>"'
elif [ -f /usr/include/ncurses/curses.h ]; then
echo '-I/usr/include/ncurses -DCURSES_LOC="<ncurses/curses.h>"'
- elif [ -f /usr/include/ncursesw/curses.h ]; then
- echo '-I/usr/include/ncursesw -DCURSES_LOC="<ncursesw/curses.h>"'
elif [ -f /usr/include/ncurses.h ]; then
echo '-DCURSES_LOC="<ncurses.h>"'
else
diff --git a/scripts/kconfig/lxdialog/textbox.c b/scripts/kconfig/lxdialog/textbox.c
index 154c2dd245b7..4e5de60a0c0d 100644
--- a/scripts/kconfig/lxdialog/textbox.c
+++ b/scripts/kconfig/lxdialog/textbox.c
@@ -129,6 +129,7 @@ do_resize:
case 'e':
case 'X':
case 'x':
+ case 'q':
delwin(box);
delwin(dialog);
return 0;
@@ -190,6 +191,7 @@ do_resize:
break;
case 'B': /* Previous page */
case 'b':
+ case 'u':
case KEY_PPAGE:
if (begin_reached)
break;
@@ -214,6 +216,7 @@ do_resize:
break;
case KEY_NPAGE: /* Next page */
case ' ':
+ case 'd':
if (end_reached)
break;
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index f606738d421d..f584a281bb4c 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -105,10 +105,10 @@ static const char mconf_readme[] = N_(
"Text Box (Help Window)\n"
"--------\n"
"o Use the cursor keys to scroll up/down/left/right. The VI editor\n"
-" keys h,j,k,l function here as do <SPACE BAR> and <B> for those\n"
-" who are familiar with less and lynx.\n"
+" keys h,j,k,l function here as do <u>, <d>, <SPACE BAR> and <B> for \n"
+" those who are familiar with less and lynx.\n"
"\n"
-"o Press <E>, <X>, <Enter> or <Esc><Esc> to exit.\n"
+"o Press <E>, <X>, <q>, <Enter> or <Esc><Esc> to exit.\n"
"\n"
"\n"
"Alternate Configuration Files\n"
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index 8c0eb65978c9..1704a8562a5d 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -83,10 +83,10 @@ static const char nconf_readme[] = N_(
"Text Box (Help Window)\n"
"--------\n"
"o Use the cursor keys to scroll up/down/left/right. The VI editor\n"
-" keys h,j,k,l function here as do <SPACE BAR> for those\n"
-" who are familiar with less and lynx.\n"
+" keys h,j,k,l function here as do <u>, <d> and <SPACE BAR> for\n"
+" those who are familiar with less and lynx.\n"
"\n"
-"o Press <Enter>, <F1>, <F5>, <F7> or <Esc> to exit.\n"
+"o Press <Enter>, <F1>, <F5>, <F9>, <q> or <Esc> to exit.\n"
"\n"
"\n"
"Alternate Configuration Files\n"
@@ -1503,7 +1503,11 @@ int main(int ac, char **av)
}
notimeout(stdscr, FALSE);
+#if NCURSES_REENTRANT
+ set_escdelay(1);
+#else
ESCDELAY = 1;
+#endif
/* set btns menu */
curses_menu = new_menu(curses_menu_items);
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 3b18dd839668..379003c7a2b4 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -604,9 +604,11 @@ void show_scroll_win(WINDOW *main_window,
switch (res) {
case KEY_NPAGE:
case ' ':
+ case 'd':
start_y += text_lines-2;
break;
case KEY_PPAGE:
+ case 'u':
start_y -= text_lines+2;
break;
case KEY_HOME:
@@ -632,10 +634,10 @@ void show_scroll_win(WINDOW *main_window,
start_x++;
break;
}
- if (res == 10 || res == 27 || res == 'q'
- || res == KEY_F(F_BACK) || res == KEY_F(F_EXIT)) {
+ if (res == 10 || res == 27 || res == 'q' ||
+ res == KEY_F(F_HELP) || res == KEY_F(F_BACK) ||
+ res == KEY_F(F_EXIT))
break;
- }
if (start_y < 0)
start_y = 0;
if (start_y >= total_lines-text_lines)
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index bccf07ddd0b6..2fbbbc1ddea0 100644
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -45,6 +45,16 @@
use strict;
use Getopt::Long;
+# set the environment variable LOCALMODCONFIG_DEBUG to get
+# debug output.
+my $debugprint = 0;
+$debugprint = 1 if (defined($ENV{LOCALMODCONFIG_DEBUG}));
+
+sub dprint {
+ return if (!$debugprint);
+ print STDERR @_;
+}
+
my $config = ".config";
my $uname = `uname -r`;
@@ -113,6 +123,10 @@ sub find_config {
find_config;
+# Read in the entire config file into config_file
+my @config_file = <CIN>;
+close CIN;
+
# Parse options
my $localmodconfig = 0;
my $localyesconfig = 0;
@@ -186,6 +200,7 @@ sub read_kconfig {
$state = "NEW";
$config = $2;
+ # Add depends for 'if' nesting
for (my $i = 0; $i < $iflevel; $i++) {
if ($i) {
$depends{$config} .= " " . $ifdeps[$i];
@@ -204,10 +219,11 @@ sub read_kconfig {
# Get the configs that select this config
} elsif ($state ne "NONE" && /^\s*select\s+(\S+)/) {
- if (defined($selects{$1})) {
- $selects{$1} .= " " . $config;
+ my $conf = $1;
+ if (defined($selects{$conf})) {
+ $selects{$conf} .= " " . $config;
} else {
- $selects{$1} = $config;
+ $selects{$conf} = $config;
}
# configs without prompts must be selected
@@ -250,6 +266,7 @@ if ($kconfig) {
read_kconfig($kconfig);
}
+# Makefiles can use variables to define their dependencies
sub convert_vars {
my ($line, %vars) = @_;
@@ -293,6 +310,7 @@ foreach my $makefile (@makefiles) {
my $objs;
+ # Convert variables in a line (could define configs)
$_ = convert_vars($_, %make_vars);
# collect objects after obj-$(CONFIG_FOO_BAR)
@@ -373,13 +391,15 @@ while (<LIN>) {
close (LIN);
# add to the configs hash all configs that are needed to enable
-# a loaded module.
+# a loaded module. This is a direct obj-${CONFIG_FOO} += bar.o
+# where we know we need bar.o so we add FOO to the list.
my %configs;
foreach my $module (keys(%modules)) {
if (defined($objects{$module})) {
my @arr = @{$objects{$module}};
foreach my $conf (@arr) {
$configs{$conf} = $module;
+ dprint "$conf added by direct ($module)\n";
}
} else {
# Most likely, someone has a custom (binary?) module loaded.
@@ -387,9 +407,24 @@ foreach my $module (keys(%modules)) {
}
}
+# Read the current config, and see what is enabled. We want to
+# ignore configs that we would not enable anyway.
+
+my %orig_configs;
my $valid = "A-Za-z_0-9";
+
+foreach my $line (@config_file) {
+ $_ = $line;
+
+ if (/(CONFIG_[$valid]*)=(m|y)/) {
+ $orig_configs{$1} = $2;
+ }
+}
+
my $repeat = 1;
+my $depconfig;
+
#
# Note, we do not care about operands (like: &&, ||, !) we want to add any
# config that is in the depend list of another config. This script does
@@ -398,7 +433,7 @@ my $repeat = 1;
# to keep on. If A was on in the original config, B would not have been
# and B would not be turned on by this script.
#
-sub parse_config_dep_select
+sub parse_config_depends
{
my ($p) = @_;
@@ -409,10 +444,16 @@ sub parse_config_dep_select
$p =~ s/^[^$valid]*[$valid]+//;
+ # We only need to process if the depend config is a module
+ if (!defined($orig_configs{$conf}) || !$orig_configs{conf} eq "m") {
+ next;
+ }
+
if (!defined($configs{$conf})) {
# We must make sure that this config has its
# dependencies met.
$repeat = 1; # do again
+ dprint "$conf selected by depend $depconfig\n";
$configs{$conf} = 1;
}
} else {
@@ -421,31 +462,132 @@ sub parse_config_dep_select
}
}
-while ($repeat) {
- $repeat = 0;
+# Select is treated a bit differently than depends. We call this
+# when a config has no prompt and requires another config to be
+# selected. We use to just select all configs that selected this
+# config, but found that that can balloon into enabling hundreds
+# of configs that we do not care about.
+#
+# The idea is we look at all the configs that select it. If one
+# is already in our list of configs to enable, then there's nothing
+# else to do. If there isn't, we pick the first config that was
+# enabled in the orignal config and use that.
+sub parse_config_selects
+{
+ my ($config, $p) = @_;
- foreach my $config (keys %configs) {
- $config =~ s/^CONFIG_//;
+ my $next_config;
+
+ while ($p =~ /[$valid]/) {
- if (defined($depends{$config})) {
- # This config has dependencies. Make sure they are also included
- parse_config_dep_select $depends{$config};
+ if ($p =~ /^[^$valid]*([$valid]+)/) {
+ my $conf = "CONFIG_" . $1;
+
+ $p =~ s/^[^$valid]*[$valid]+//;
+
+ # Make sure that this config exists in the current .config file
+ if (!defined($orig_configs{$conf})) {
+ dprint "$conf not set for $config select\n";
+ next;
+ }
+
+ # Check if something other than a module selects this config
+ if (defined($orig_configs{$conf}) && $orig_configs{$conf} ne "m") {
+ dprint "$conf (non module) selects config, we are good\n";
+ # we are good with this
+ return;
+ }
+ if (defined($configs{$conf})) {
+ dprint "$conf selects $config so we are good\n";
+ # A set config selects this config, we are good
+ return;
+ }
+ # Set this config to be selected
+ if (!defined($next_config)) {
+ $next_config = $conf;
+ }
+ } else {
+ die "this should never happen";
}
+ }
- if (defined($prompts{$config}) || !defined($selects{$config})) {
- next;
+ # If no possible config selected this, then something happened.
+ if (!defined($next_config)) {
+ print STDERR "WARNING: $config is required, but nothing in the\n";
+ print STDERR " current config selects it.\n";
+ return;
+ }
+
+ # If we are here, then we found no config that is set and
+ # selects this config. Repeat.
+ $repeat = 1;
+ # Make this config need to be selected
+ $configs{$next_config} = 1;
+ dprint "$next_config selected by select $config\n";
+}
+
+my %process_selects;
+
+# loop through all configs, select their dependencies.
+sub loop_depend {
+ $repeat = 1;
+
+ while ($repeat) {
+ $repeat = 0;
+
+ forloop:
+ foreach my $config (keys %configs) {
+
+ # If this config is not a module, we do not need to process it
+ if (defined($orig_configs{$config}) && $orig_configs{$config} ne "m") {
+ next forloop;
+ }
+
+ $config =~ s/^CONFIG_//;
+ $depconfig = $config;
+
+ if (defined($depends{$config})) {
+ # This config has dependencies. Make sure they are also included
+ parse_config_depends $depends{$config};
+ }
+
+ # If the config has no prompt, then we need to check if a config
+ # that is enabled selected it. Or if we need to enable one.
+ if (!defined($prompts{$config}) && defined($selects{$config})) {
+ $process_selects{$config} = 1;
+ }
}
+ }
+}
+
+sub loop_select {
+
+ foreach my $config (keys %process_selects) {
+ $config =~ s/^CONFIG_//;
+
+ dprint "Process select $config\n";
# config has no prompt and must be selected.
- parse_config_dep_select $selects{$config};
+ parse_config_selects $config, $selects{$config};
}
}
+while ($repeat) {
+ # Get the first set of configs and their dependencies.
+ loop_depend;
+
+ $repeat = 0;
+
+ # Now we need to see if we have to check selects;
+ loop_select;
+}
+
my %setconfigs;
# Finally, read the .config file and turn off any module enabled that
# we could not find a reason to keep enabled.
-while(<CIN>) {
+foreach my $line (@config_file) {
+ $_ = $line;
if (/CONFIG_IKCONFIG/) {
if (/# CONFIG_IKCONFIG is not set/) {
@@ -473,7 +615,6 @@ while(<CIN>) {
}
print;
}
-close(CIN);
# Integrity check, make sure all modules that we want enabled do
# indeed have their configs set.
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 9b0c0b8b4ab4..8fd107a3fac4 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1786,6 +1786,7 @@ sub dump_function($$) {
$prototype =~ s/__init +//;
$prototype =~ s/__init_or_module +//;
$prototype =~ s/__must_check +//;
+ $prototype =~ s/__weak +//;
$prototype =~ s/^#\s*define\s+//; #ak added
$prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index cd9c6c6bb4c9..4235a6361fec 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -210,8 +210,8 @@ if [ -n "${CONFIG_KALLSYMS}" ]; then
mksysmap ${kallsyms_vmlinux} .tmp_System.map
if ! cmp -s System.map .tmp_System.map; then
- echo Inconsistent kallsyms data
- echo echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
+ echo >&2 Inconsistent kallsyms data
+ echo >&2 Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
cleanup
exit 1
fi
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index c95fdda58414..acb86507828a 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -92,7 +92,7 @@ rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir"
mkdir -m 755 -p "$tmpdir/DEBIAN"
mkdir -p "$tmpdir/lib" "$tmpdir/boot" "$tmpdir/usr/share/doc/$packagename"
mkdir -m 755 -p "$fwdir/DEBIAN"
-mkdir -p "$fwdir/lib" "$fwdir/usr/share/doc/$fwpackagename"
+mkdir -p "$fwdir/lib/firmware/$version/" "$fwdir/usr/share/doc/$fwpackagename"
mkdir -m 755 -p "$libc_headers_dir/DEBIAN"
mkdir -p "$libc_headers_dir/usr/share/doc/$libc_headers_packagename"
mkdir -m 755 -p "$kernel_headers_dir/DEBIAN"
@@ -243,7 +243,7 @@ EOF
fi
# Build header package
-(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
+(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
(cd $objtree; find arch/$SRCARCH/include .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
destdir=$kernel_headers_dir/usr/src/linux-headers-$version
@@ -267,7 +267,8 @@ EOF
# Do we have firmware? Move it out of the way and build it into a package.
if [ -e "$tmpdir/lib/firmware" ]; then
- mv "$tmpdir/lib/firmware" "$fwdir/lib/"
+ mv "$tmpdir/lib/firmware"/* "$fwdir/lib/firmware/$version/"
+ rmdir "$tmpdir/lib/firmware"
cat <<EOF >> debian/control
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index 1ca9ceb95eb6..6acf83449105 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -247,6 +247,7 @@ do_file(char const *const fname)
case EM_X86_64:
custom_sort = sort_x86_table;
break;
+ case EM_S390:
case EM_MIPS:
break;
} /* end switch */
diff --git a/scripts/tags.sh b/scripts/tags.sh
index cf7b12fee573..cff8faad73d1 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -153,7 +153,8 @@ exuberant()
--regex-c++='/CLEARPAGEFLAG_NOOP\(([^,)]*).*/ClearPage\1/' \
--regex-c++='/__CLEARPAGEFLAG_NOOP\(([^,)]*).*/__ClearPage\1/' \
--regex-c++='/TESTCLEARFLAG_FALSE\(([^,)]*).*/TestClearPage\1/' \
- --regex-c++='/__TESTCLEARFLAG_FALSE\(([^,)]*).*/__TestClearPage\1/'
+ --regex-c++='/__TESTCLEARFLAG_FALSE\(([^,)]*).*/__TestClearPage\1/' \
+ --regex-c++='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/'
all_kconfigs | xargs $1 -a \
--langdef=kconfig --language-force=kconfig \
@@ -195,7 +196,8 @@ emacs()
--regex='/CLEARPAGEFLAG_NOOP\(([^,)]*).*/ClearPage\1/' \
--regex='/__CLEARPAGEFLAG_NOOP\(([^,)]*).*/__ClearPage\1/' \
--regex='/TESTCLEARFLAG_FALSE\(([^,)]*).*/TestClearPage\1/' \
- --regex='/__TESTCLEARFLAG_FALSE\(([^,)]*).*/__TestClearPage\1/'
+ --regex='/__TESTCLEARFLAG_FALSE\(([^,)]*).*/__TestClearPage\1/' \
+ --regex='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/'
all_kconfigs | xargs $1 -a \
--regex='/^[ \t]*\(\(menu\)*config\)[ \t]+\([a-zA-Z0-9_]+\)/\3/'
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 68d82daed257..4d3fab47e643 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -274,7 +274,7 @@ static struct avc_node *avc_alloc_node(void)
{
struct avc_node *node;
- node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC);
+ node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
if (!node)
goto out;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 689fe2d22165..6c77f63c7591 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2129,7 +2129,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
int fd;
j++;
- i = j * __NFDBITS;
+ i = j * BITS_PER_LONG;
fdt = files_fdtable(files);
if (i >= fdt->max_fds)
break;
@@ -2791,11 +2791,16 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
/* We strip a nul only if it is at the end, otherwise the
* context contains a nul and we should audit that */
- str = value;
- if (str[size - 1] == '\0')
- audit_size = size - 1;
- else
- audit_size = size;
+ if (value) {
+ str = value;
+ if (str[size - 1] == '\0')
+ audit_size = size - 1;
+ else
+ audit_size = size;
+ } else {
+ str = "";
+ audit_size = 0;
+ }
ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR);
audit_log_format(ab, "op=setxattr invalid_context=");
audit_log_n_untrustedstring(ab, value, audit_size);
@@ -3180,6 +3185,7 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd,
case F_GETFL:
case F_GETOWN:
case F_GETSIG:
+ case F_GETOWNER_UIDS:
/* Just check FD__USE permission */
err = file_has_perm(cred, file, 0);
break;
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index d31e6d957c21..b1b768e4049a 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -323,11 +323,11 @@ static int smk_parse_long_rule(const char *data, struct smack_rule *rule,
int datalen;
int rc = -1;
- /*
- * This is probably inefficient, but safe.
- */
+ /* This is inefficient */
datalen = strlen(data);
- subject = kzalloc(datalen, GFP_KERNEL);
+
+ /* Our first element can be 64 + \0 with no spaces */
+ subject = kzalloc(datalen + 1, GFP_KERNEL);
if (subject == NULL)
return -1;
object = kzalloc(datalen, GFP_KERNEL);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 83554ee8a587..0cc99a3ea42d 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -279,12 +279,46 @@ static int yama_ptrace_access_check(struct task_struct *child,
}
if (rc) {
- char name[sizeof(current->comm)];
printk_ratelimited(KERN_NOTICE
"ptrace of pid %d was attempted by: %s (pid %d)\n",
- child->pid,
- get_task_comm(name, current),
- current->pid);
+ child->pid, current->comm, current->pid);
+ }
+
+ return rc;
+}
+
+/**
+ * yama_ptrace_traceme - validate PTRACE_TRACEME calls
+ * @parent: task that will become the ptracer of the current task
+ *
+ * Returns 0 if following the ptrace is allowed, -ve on error.
+ */
+static int yama_ptrace_traceme(struct task_struct *parent)
+{
+ int rc;
+
+ /* If standard caps disallows it, so does Yama. We should
+ * only tighten restrictions further.
+ */
+ rc = cap_ptrace_traceme(parent);
+ if (rc)
+ return rc;
+
+ /* Only disallow PTRACE_TRACEME on more aggressive settings. */
+ switch (ptrace_scope) {
+ case YAMA_SCOPE_CAPABILITY:
+ if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE))
+ rc = -EPERM;
+ break;
+ case YAMA_SCOPE_NO_ATTACH:
+ rc = -EPERM;
+ break;
+ }
+
+ if (rc) {
+ printk_ratelimited(KERN_NOTICE
+ "ptraceme of pid %d was attempted by: %s (pid %d)\n",
+ current->pid, parent->comm, parent->pid);
}
return rc;
@@ -294,6 +328,7 @@ static struct security_operations yama_ops = {
.name = "yama",
.ptrace_access_check = yama_ptrace_access_check,
+ .ptrace_traceme = yama_ptrace_traceme,
.task_prctl = yama_task_prctl,
.task_free = yama_task_free,
};
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 0d7b25e81643..4e1fda75c1c9 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -106,7 +106,7 @@ static struct pxa2xx_pcm_client pxa2xx_ac97_pcm_client = {
.prepare = pxa2xx_ac97_pcm_prepare,
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int pxa2xx_ac97_do_suspend(struct snd_card *card)
{
@@ -243,7 +243,7 @@ static struct platform_driver pxa2xx_ac97_driver = {
.driver = {
.name = "pxa2xx-ac97",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.pm = &pxa2xx_ac97_pm_ops,
#endif
},
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index eb4ceb71123e..277ebce23a45 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -452,6 +452,7 @@ static int __devinit atmel_abdac_probe(struct platform_device *pdev)
dac->regs = ioremap(regs->start, resource_size(regs));
if (!dac->regs) {
dev_dbg(&pdev->dev, "could not remap register memory\n");
+ retval = -ENOMEM;
goto out_free_card;
}
@@ -534,7 +535,7 @@ out_put_pclk:
return retval;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int atmel_abdac_suspend(struct device *pdev)
{
struct snd_card *card = dev_get_drvdata(pdev);
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index bf47025bdf45..9052aff37f64 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -278,14 +278,9 @@ static int atmel_ac97c_capture_hw_params(struct snd_pcm_substream *substream,
if (retval < 0)
return retval;
/* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
- if (cpu_is_at32ap7000()) {
- if (retval < 0)
- return retval;
- /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
- if (retval == 1)
- if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.rx_chan);
- }
+ if (cpu_is_at32ap7000() && retval == 1)
+ if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
+ dw_dma_cyclic_free(chip->dma.rx_chan);
/* Set restrictions to params. */
mutex_lock(&opened_mutex);
@@ -980,6 +975,7 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev)
if (!chip->regs) {
dev_dbg(&pdev->dev, "could not remap register memory\n");
+ retval = -ENOMEM;
goto err_ioremap;
}
@@ -1134,7 +1130,7 @@ err_snd_card_new:
return retval;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int atmel_ac97c_suspend(struct device *pdev)
{
struct snd_card *card = dev_get_drvdata(pdev);
diff --git a/sound/core/misc.c b/sound/core/misc.c
index 768167925409..30e027ecf4da 100644
--- a/sound/core/misc.c
+++ b/sound/core/misc.c
@@ -68,6 +68,7 @@ void __snd_printk(unsigned int level, const char *path, int line,
{
va_list args;
#ifdef CONFIG_SND_VERBOSE_PRINTK
+ int kern_level;
struct va_format vaf;
char verbose_fmt[] = KERN_DEFAULT "ALSA %s:%d %pV";
#endif
@@ -81,12 +82,16 @@ void __snd_printk(unsigned int level, const char *path, int line,
#ifdef CONFIG_SND_VERBOSE_PRINTK
vaf.fmt = format;
vaf.va = &args;
- if (format[0] == '<' && format[2] == '>') {
- memcpy(verbose_fmt, format, 3);
- vaf.fmt = format + 3;
+
+ kern_level = printk_get_level(format);
+ if (kern_level) {
+ const char *end_of_header = printk_skip_level(format);
+ memcpy(verbose_fmt, format, end_of_header - format);
+ vaf.fmt = end_of_header;
} else if (level)
- memcpy(verbose_fmt, KERN_DEBUG, 3);
+ memcpy(verbose_fmt, KERN_DEBUG, sizeof(KERN_DEBUG) - 1);
printk(verbose_fmt, sanity_file_name(path), line, &vaf);
+
#else
vprintk(format, args);
#endif
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c
index 4e7ec2b49873..d0f00356fc11 100644
--- a/sound/core/sgbuf.c
+++ b/sound/core/sgbuf.c
@@ -101,7 +101,7 @@ void *snd_malloc_sgbuf_pages(struct device *device,
if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device,
chunk, &tmpb) < 0) {
if (!sgbuf->pages)
- return NULL;
+ goto _failed;
if (!res_size)
goto _failed;
size = sgbuf->pages * PAGE_SIZE;
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 1128b35b2b05..5a34355e78e8 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -1176,7 +1176,7 @@ static int __devexit loopback_remove(struct platform_device *devptr)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int loopback_suspend(struct device *pdev)
{
struct snd_card *card = dev_get_drvdata(pdev);
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index f7d3bfc6bca8..54bb6644a598 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -1064,7 +1064,7 @@ static int __devexit snd_dummy_remove(struct platform_device *devptr)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int snd_dummy_suspend(struct device *pdev)
{
struct snd_card *card = dev_get_drvdata(pdev);
diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c
index 1cff331a228e..4608c2ca43f8 100644
--- a/sound/drivers/mpu401/mpu401_uart.c
+++ b/sound/drivers/mpu401/mpu401_uart.c
@@ -554,6 +554,7 @@ int snd_mpu401_uart_new(struct snd_card *card, int device,
spin_lock_init(&mpu->output_lock);
spin_lock_init(&mpu->timer_lock);
mpu->hardware = hardware;
+ mpu->irq = -1;
if (! (info_flags & MPU401_INFO_INTEGRATED)) {
int res_size = hardware == MPU401_HW_PC98II ? 4 : 2;
mpu->res = request_region(port, res_size, "MPU401 UART");
diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
index 6ca59fc6dcb9..ef171295f6d4 100644
--- a/sound/drivers/pcsp/pcsp.c
+++ b/sound/drivers/pcsp/pcsp.c
@@ -199,7 +199,7 @@ static void pcsp_stop_beep(struct snd_pcsp *chip)
pcspkr_stop_sound();
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int pcsp_suspend(struct device *dev)
{
struct snd_pcsp *chip = dev_get_drvdata(dev);
@@ -212,7 +212,7 @@ static SIMPLE_DEV_PM_OPS(pcsp_pm, pcsp_suspend, NULL);
#define PCSP_PM_OPS &pcsp_pm
#else
#define PCSP_PM_OPS NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static void pcsp_shutdown(struct platform_device *dev)
{
diff --git a/sound/i2c/other/tea575x-tuner.c b/sound/i2c/other/tea575x-tuner.c
index 7eca25fae413..d14edb7d6484 100644
--- a/sound/i2c/other/tea575x-tuner.c
+++ b/sound/i2c/other/tea575x-tuner.c
@@ -71,6 +71,9 @@ static void snd_tea575x_write(struct snd_tea575x *tea, unsigned int val)
u16 l;
u8 data;
+ if (tea->ops->write_val)
+ return tea->ops->write_val(tea, val);
+
tea->ops->set_direction(tea, 1);
udelay(16);
@@ -94,6 +97,9 @@ static u32 snd_tea575x_read(struct snd_tea575x *tea)
u16 l, rdata;
u32 data = 0;
+ if (tea->ops->read_val)
+ return tea->ops->read_val(tea);
+
tea->ops->set_direction(tea, 0);
tea->ops->set_pins(tea, 0);
udelay(16);
@@ -197,6 +203,8 @@ static int vidioc_g_tuner(struct file *file, void *priv,
strcpy(v->name, "FM");
v->type = V4L2_TUNER_RADIO;
v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
+ if (!tea->cannot_read_data)
+ v->capability |= V4L2_TUNER_CAP_HWSEEK_BOUNDED;
v->rangelow = FREQ_LO;
v->rangehigh = FREQ_HI;
v->rxsubchans = tea->stereo ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO;
@@ -305,7 +313,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *fh,
}
tea->val &= ~TEA575X_BIT_SEARCH;
snd_tea575x_set_freq(tea);
- return -EAGAIN;
+ return -ENODATA;
}
static int tea575x_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -377,7 +385,6 @@ int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
strlcpy(tea->vd.name, tea->v4l2_dev->name, sizeof(tea->vd.name));
tea->vd.lock = &tea->mutex;
tea->vd.v4l2_dev = tea->v4l2_dev;
- tea->vd.ctrl_handler = &tea->ctrl_handler;
tea->fops = tea575x_fops;
tea->fops.owner = owner;
tea->vd.fops = &tea->fops;
@@ -386,29 +393,33 @@ int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
if (tea->cannot_read_data)
v4l2_disable_ioctl(&tea->vd, VIDIOC_S_HW_FREQ_SEEK);
- v4l2_ctrl_handler_init(&tea->ctrl_handler, 1);
- v4l2_ctrl_new_std(&tea->ctrl_handler, &tea575x_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
- retval = tea->ctrl_handler.error;
- if (retval) {
- v4l2_err(tea->v4l2_dev, "can't initialize controls\n");
- v4l2_ctrl_handler_free(&tea->ctrl_handler);
- return retval;
- }
-
- if (tea->ext_init) {
- retval = tea->ext_init(tea);
+ if (!tea->cannot_mute) {
+ tea->vd.ctrl_handler = &tea->ctrl_handler;
+ v4l2_ctrl_handler_init(&tea->ctrl_handler, 1);
+ v4l2_ctrl_new_std(&tea->ctrl_handler, &tea575x_ctrl_ops,
+ V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1);
+ retval = tea->ctrl_handler.error;
if (retval) {
+ v4l2_err(tea->v4l2_dev, "can't initialize controls\n");
v4l2_ctrl_handler_free(&tea->ctrl_handler);
return retval;
}
- }
- v4l2_ctrl_handler_setup(&tea->ctrl_handler);
+ if (tea->ext_init) {
+ retval = tea->ext_init(tea);
+ if (retval) {
+ v4l2_ctrl_handler_free(&tea->ctrl_handler);
+ return retval;
+ }
+ }
+
+ v4l2_ctrl_handler_setup(&tea->ctrl_handler);
+ }
retval = video_register_device(&tea->vd, VFL_TYPE_RADIO, tea->radio_nr);
if (retval) {
v4l2_err(tea->v4l2_dev, "can't register video device!\n");
- v4l2_ctrl_handler_free(&tea->ctrl_handler);
+ v4l2_ctrl_handler_free(tea->vd.ctrl_handler);
return retval;
}
@@ -418,7 +429,7 @@ int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
void snd_tea575x_exit(struct snd_tea575x *tea)
{
video_unregister_device(&tea->vd);
- v4l2_ctrl_handler_free(&tea->ctrl_handler);
+ v4l2_ctrl_handler_free(tea->vd.ctrl_handler);
}
static int __init alsa_tea575x_module_init(void)
diff --git a/sound/isa/als100.c b/sound/isa/als100.c
index 2d67c78c9f4b..f7cdaf51512d 100644
--- a/sound/isa/als100.c
+++ b/sound/isa/als100.c
@@ -233,7 +233,7 @@ static int __devinit snd_card_als100_probe(int dev,
irq[dev], dma8[dev], dma16[dev]);
}
- if ((error = snd_sb16dsp_pcm(chip, 0, NULL)) < 0) {
+ if ((error = snd_sb16dsp_pcm(chip, 0, &chip->pcm)) < 0) {
snd_card_free(card);
return error;
}
diff --git a/sound/isa/es1688/es1688_lib.c b/sound/isa/es1688/es1688_lib.c
index 1d47be8170b5..b3b4f15e45ba 100644
--- a/sound/isa/es1688/es1688_lib.c
+++ b/sound/isa/es1688/es1688_lib.c
@@ -612,10 +612,10 @@ static int snd_es1688_capture_close(struct snd_pcm_substream *substream)
static int snd_es1688_free(struct snd_es1688 *chip)
{
- if (chip->res_port) {
+ if (chip->hardware != ES1688_HW_UNDEF)
snd_es1688_init(chip, 0);
+ if (chip->res_port)
release_and_free_resource(chip->res_port);
- }
if (chip->irq >= 0)
free_irq(chip->irq, (void *) chip);
if (chip->dma8 >= 0) {
@@ -657,19 +657,27 @@ int snd_es1688_create(struct snd_card *card,
return -ENOMEM;
chip->irq = -1;
chip->dma8 = -1;
+ chip->hardware = ES1688_HW_UNDEF;
- if ((chip->res_port = request_region(port + 4, 12, "ES1688")) == NULL) {
+ chip->res_port = request_region(port + 4, 12, "ES1688");
+ if (chip->res_port == NULL) {
snd_printk(KERN_ERR "es1688: can't grab port 0x%lx\n", port + 4);
- return -EBUSY;
+ err = -EBUSY;
+ goto exit;
}
- if (request_irq(irq, snd_es1688_interrupt, 0, "ES1688", (void *) chip)) {
+
+ err = request_irq(irq, snd_es1688_interrupt, 0, "ES1688", (void *) chip);
+ if (err < 0) {
snd_printk(KERN_ERR "es1688: can't grab IRQ %d\n", irq);
- return -EBUSY;
+ goto exit;
}
+
chip->irq = irq;
- if (request_dma(dma8, "ES1688")) {
+ err = request_dma(dma8, "ES1688");
+
+ if (err < 0) {
snd_printk(KERN_ERR "es1688: can't grab DMA8 %d\n", dma8);
- return -EBUSY;
+ goto exit;
}
chip->dma8 = dma8;
@@ -685,14 +693,18 @@ int snd_es1688_create(struct snd_card *card,
err = snd_es1688_probe(chip);
if (err < 0)
- return err;
+ goto exit;
err = snd_es1688_init(chip, 1);
if (err < 0)
- return err;
+ goto exit;
/* Register device */
- return snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+exit:
+ if (err)
+ snd_es1688_free(chip);
+ return err;
}
static struct snd_pcm_ops snd_es1688_playback_ops = {
diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
index 733b014ec7d1..b2b3c014221a 100644
--- a/sound/oss/sb_audio.c
+++ b/sound/oss/sb_audio.c
@@ -575,13 +575,15 @@ static int jazz16_audio_set_speed(int dev, int speed)
if (speed > 0)
{
int tmp;
- int s = speed * devc->channels;
+ int s;
if (speed < 5000)
speed = 5000;
if (speed > 44100)
speed = 44100;
+ s = speed * devc->channels;
+
devc->tconst = (256 - ((1000000 + s / 2) / s)) & 0xff;
tmp = 256 - devc->tconst;
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index f75f5ffdfdfb..a71d1c14a0f6 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -94,7 +94,7 @@ static unsigned short snd_cs46xx_codec_read(struct snd_cs46xx *chip,
if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX &&
codec_index != CS46XX_SECONDARY_CODEC_INDEX))
- return -EINVAL;
+ return 0xffff;
chip->active_ctrl(chip, 1);
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index 8e40262d4117..2f6e9c762d3f 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -1725,8 +1725,10 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
atc_connect_resources(atc);
atc->timer = ct_timer_new(atc);
- if (!atc->timer)
+ if (!atc->timer) {
+ err = -ENOMEM;
goto error1;
+ }
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, atc, &ops);
if (err < 0)
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index 4f502a2bdc3c..0a436626182b 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -326,7 +326,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
for (page = blk->first_page; page <= blk->last_page; page++, idx++) {
unsigned long ofs = idx << PAGE_SHIFT;
dma_addr_t addr;
- addr = snd_pcm_sgbuf_get_addr(substream, ofs);
+ if (ofs >= runtime->dma_bytes)
+ addr = emu->silent_page.addr;
+ else
+ addr = snd_pcm_sgbuf_get_addr(substream, ofs);
if (! is_valid_page(emu, addr)) {
printk(KERN_ERR "emu: failure page = %d\n", idx);
mutex_unlock(&hdr->block_mutex);
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 647218d69f68..4f7d2dfcef7b 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -332,13 +332,12 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec,
if (cfg->dig_outs)
snd_printd(" dig-out=0x%x/0x%x\n",
cfg->dig_out_pins[0], cfg->dig_out_pins[1]);
- snd_printd(" inputs:");
+ snd_printd(" inputs:\n");
for (i = 0; i < cfg->num_inputs; i++) {
- snd_printd(" %s=0x%x",
+ snd_printd(" %s=0x%x\n",
hda_get_autocfg_input_label(codec, cfg, i),
cfg->inputs[i].pin);
}
- snd_printd("\n");
if (cfg->dig_in_pin)
snd_printd(" dig-in=0x%x\n", cfg->dig_in_pin);
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index 0bc2315b181d..0849aac449f2 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -231,16 +231,22 @@ void snd_hda_detach_beep_device(struct hda_codec *codec)
}
EXPORT_SYMBOL_HDA(snd_hda_detach_beep_device);
+static bool ctl_has_mute(struct snd_kcontrol *kcontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ return query_amp_caps(codec, get_amp_nid(kcontrol),
+ get_amp_direction(kcontrol)) & AC_AMPCAP_MUTE;
+}
+
/* get/put callbacks for beep mute mixer switches */
int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct hda_beep *beep = codec->beep;
- if (beep) {
+ if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) {
ucontrol->value.integer.value[0] =
- ucontrol->value.integer.value[1] =
- beep->enabled;
+ ucontrol->value.integer.value[1] = beep->enabled;
return 0;
}
return snd_hda_mixer_amp_switch_get(kcontrol, ucontrol);
@@ -252,9 +258,20 @@ int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct hda_beep *beep = codec->beep;
- if (beep)
- snd_hda_enable_beep_device(codec,
- *ucontrol->value.integer.value);
+ if (beep) {
+ u8 chs = get_amp_channels(kcontrol);
+ int enable = 0;
+ long *valp = ucontrol->value.integer.value;
+ if (chs & 1) {
+ enable |= *valp;
+ valp++;
+ }
+ if (chs & 2)
+ enable |= *valp;
+ snd_hda_enable_beep_device(codec, enable);
+ }
+ if (!ctl_has_mute(kcontrol))
+ return 0;
return snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
}
EXPORT_SYMBOL_HDA(snd_hda_mixer_amp_switch_put_beep);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 88a9c20eb7a2..f25c24c743f9 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1209,6 +1209,9 @@ static void snd_hda_codec_free(struct hda_codec *codec)
kfree(codec);
}
+static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec,
+ hda_nid_t fg, unsigned int power_state);
+
static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
unsigned int power_state);
@@ -1317,6 +1320,10 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus,
AC_VERB_GET_SUBSYSTEM_ID, 0);
}
+ codec->epss = snd_hda_codec_get_supported_ps(codec,
+ codec->afg ? codec->afg : codec->mfg,
+ AC_PWRST_EPSS);
+
/* power-up all before initialization */
hda_set_power_state(codec,
codec->afg ? codec->afg : codec->mfg,
@@ -1386,6 +1393,44 @@ int snd_hda_codec_configure(struct hda_codec *codec)
}
EXPORT_SYMBOL_HDA(snd_hda_codec_configure);
+/* update the stream-id if changed */
+static void update_pcm_stream_id(struct hda_codec *codec,
+ struct hda_cvt_setup *p, hda_nid_t nid,
+ u32 stream_tag, int channel_id)
+{
+ unsigned int oldval, newval;
+
+ if (p->stream_tag != stream_tag || p->channel_id != channel_id) {
+ oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
+ newval = (stream_tag << 4) | channel_id;
+ if (oldval != newval)
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_CHANNEL_STREAMID,
+ newval);
+ p->stream_tag = stream_tag;
+ p->channel_id = channel_id;
+ }
+}
+
+/* update the format-id if changed */
+static void update_pcm_format(struct hda_codec *codec, struct hda_cvt_setup *p,
+ hda_nid_t nid, int format)
+{
+ unsigned int oldval;
+
+ if (p->format_id != format) {
+ oldval = snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_GET_STREAM_FORMAT, 0);
+ if (oldval != format) {
+ msleep(1);
+ snd_hda_codec_write(codec, nid, 0,
+ AC_VERB_SET_STREAM_FORMAT,
+ format);
+ }
+ p->format_id = format;
+ }
+}
+
/**
* snd_hda_codec_setup_stream - set up the codec for streaming
* @codec: the CODEC to set up
@@ -1400,7 +1445,6 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
{
struct hda_codec *c;
struct hda_cvt_setup *p;
- unsigned int oldval, newval;
int type;
int i;
@@ -1413,29 +1457,13 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
p = get_hda_cvt_setup(codec, nid);
if (!p)
return;
- /* update the stream-id if changed */
- if (p->stream_tag != stream_tag || p->channel_id != channel_id) {
- oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
- newval = (stream_tag << 4) | channel_id;
- if (oldval != newval)
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_CHANNEL_STREAMID,
- newval);
- p->stream_tag = stream_tag;
- p->channel_id = channel_id;
- }
- /* update the format-id if changed */
- if (p->format_id != format) {
- oldval = snd_hda_codec_read(codec, nid, 0,
- AC_VERB_GET_STREAM_FORMAT, 0);
- if (oldval != format) {
- msleep(1);
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_STREAM_FORMAT,
- format);
- }
- p->format_id = format;
- }
+
+ if (codec->pcm_format_first)
+ update_pcm_format(codec, p, nid, format);
+ update_pcm_stream_id(codec, p, nid, stream_tag, channel_id);
+ if (!codec->pcm_format_first)
+ update_pcm_format(codec, p, nid, format);
+
p->active = 1;
p->dirty = 0;
@@ -3497,7 +3525,7 @@ static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec, hda_nid_t fg
{
int sup = snd_hda_param_read(codec, fg, AC_PAR_POWER_STATE);
- if (sup < 0)
+ if (sup == -1)
return false;
if (sup & power_state)
return true;
@@ -3522,8 +3550,7 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
/* this delay seems necessary to avoid click noise at power-down */
if (power_state == AC_PWRST_D3) {
/* transition time less than 10ms for power down */
- bool epss = snd_hda_codec_get_supported_ps(codec, fg, AC_PWRST_EPSS);
- msleep(epss ? 10 : 100);
+ msleep(codec->epss ? 10 : 100);
}
/* repeat power states setting at most 10 times*/
@@ -4433,6 +4460,8 @@ static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down)
* then there is no need to go through power up here.
*/
if (codec->power_on) {
+ if (codec->power_transition < 0)
+ codec->power_transition = 0;
spin_unlock(&codec->power_lock);
return;
}
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index c422d330ca54..e5a7e19a8071 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -861,6 +861,8 @@ struct hda_codec {
unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */
unsigned int no_jack_detect:1; /* Machine has no jack-detection */
+ unsigned int pcm_format_first:1; /* PCM format must be set first */
+ unsigned int epss:1; /* supporting EPSS? */
#ifdef CONFIG_SND_HDA_POWER_SAVE
unsigned int power_on :1; /* current (global) power-state */
int power_transition; /* power-state in transition */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c8aced182fd1..60882c62f180 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -151,6 +151,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
"{Intel, CPT},"
"{Intel, PPT},"
"{Intel, LPT},"
+ "{Intel, LPT_LP},"
"{Intel, HPT},"
"{Intel, PBG},"
"{Intel, SCH},"
@@ -3270,6 +3271,14 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
{ PCI_DEVICE(0x8086, 0x8c20),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
+ /* Lynx Point-LP */
+ { PCI_DEVICE(0x8086, 0x9c20),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
+ AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
+ /* Lynx Point-LP */
+ { PCI_DEVICE(0x8086, 0x9c21),
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
+ AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0c0c),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 7e46258fc700..6894ec66258c 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -412,7 +412,7 @@ static void print_digital_conv(struct snd_info_buffer *buffer,
if (digi1 & AC_DIG1_EMPHASIS)
snd_iprintf(buffer, " Preemphasis");
if (digi1 & AC_DIG1_COPYRIGHT)
- snd_iprintf(buffer, " Copyright");
+ snd_iprintf(buffer, " Non-Copyright");
if (digi1 & AC_DIG1_NONAUDIO)
snd_iprintf(buffer, " Non-Audio");
if (digi1 & AC_DIG1_PROFESSIONAL)
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index d0d3540e39e7..49750a96d649 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -246,7 +246,7 @@ static void init_output(struct hda_codec *codec, hda_nid_t pin, hda_nid_t dac)
AC_VERB_SET_AMP_GAIN_MUTE,
AMP_OUT_UNMUTE);
}
- if (dac)
+ if (dac && (get_wcaps(codec, dac) & AC_WCAP_OUT_AMP))
snd_hda_codec_write(codec, dac, 0,
AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO);
}
@@ -261,7 +261,7 @@ static void init_input(struct hda_codec *codec, hda_nid_t pin, hda_nid_t adc)
AC_VERB_SET_AMP_GAIN_MUTE,
AMP_IN_UNMUTE(0));
}
- if (adc)
+ if (adc && (get_wcaps(codec, adc) & AC_WCAP_IN_AMP))
snd_hda_codec_write(codec, adc, 0, AC_VERB_SET_AMP_GAIN_MUTE,
AMP_IN_UNMUTE(0));
}
@@ -275,6 +275,10 @@ static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
int type = dir ? HDA_INPUT : HDA_OUTPUT;
struct snd_kcontrol_new knew =
HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type);
+ if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_MUTE) == 0) {
+ snd_printdd("Skipping '%s %s Switch' (no mute on node 0x%x)\n", pfx, dirstr[dir], nid);
+ return 0;
+ }
sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]);
return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
}
@@ -286,6 +290,10 @@ static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx,
int type = dir ? HDA_INPUT : HDA_OUTPUT;
struct snd_kcontrol_new knew =
HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type);
+ if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_NUM_STEPS) == 0) {
+ snd_printdd("Skipping '%s %s Volume' (no amp on node 0x%x)\n", pfx, dirstr[dir], nid);
+ return 0;
+ }
sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]);
return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec));
}
@@ -464,50 +472,17 @@ exit:
}
/*
- * PCM stuffs
+ * PCM callbacks
*/
-static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid,
- u32 stream_tag,
- int channel_id, int format)
+static int ca0132_playback_pcm_open(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ struct snd_pcm_substream *substream)
{
- unsigned int oldval, newval;
-
- if (!nid)
- return;
-
- snd_printdd("ca0132_setup_stream: "
- "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n",
- nid, stream_tag, channel_id, format);
-
- /* update the format-id if changed */
- oldval = snd_hda_codec_read(codec, nid, 0,
- AC_VERB_GET_STREAM_FORMAT,
- 0);
- if (oldval != format) {
- msleep(20);
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_STREAM_FORMAT,
- format);
- }
-
- oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
- newval = (stream_tag << 4) | channel_id;
- if (oldval != newval) {
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_CHANNEL_STREAMID,
- newval);
- }
-}
-
-static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid)
-{
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0);
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0);
+ struct ca0132_spec *spec = codec->spec;
+ return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream,
+ hinfo);
}
-/*
- * PCM callbacks
- */
static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
@@ -515,10 +490,8 @@ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
-
- ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
-
- return 0;
+ return snd_hda_multi_out_analog_prepare(codec, &spec->multiout,
+ stream_tag, format, substream);
}
static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
@@ -526,92 +499,45 @@ static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
-
- ca0132_cleanup_stream(codec, spec->dacs[0]);
-
- return 0;
+ return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
}
/*
* Digital out
*/
-static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- unsigned int stream_tag,
- unsigned int format,
- struct snd_pcm_substream *substream)
+static int ca0132_dig_playback_pcm_open(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
-
- ca0132_setup_stream(codec, spec->dig_out, stream_tag, 0, format);
-
- return 0;
+ return snd_hda_multi_out_dig_open(codec, &spec->multiout);
}
-static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- struct ca0132_spec *spec = codec->spec;
-
- ca0132_cleanup_stream(codec, spec->dig_out);
-
- return 0;
-}
-
-/*
- * Analog capture
- */
-static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
+static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
unsigned int stream_tag,
unsigned int format,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
-
- ca0132_setup_stream(codec, spec->adcs[substream->number],
- stream_tag, 0, format);
-
- return 0;
+ return snd_hda_multi_out_dig_prepare(codec, &spec->multiout,
+ stream_tag, format, substream);
}
-static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
+static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
-
- ca0132_cleanup_stream(codec, spec->adcs[substream->number]);
-
- return 0;
+ return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
}
-/*
- * Digital capture
- */
-static int ca0132_dig_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- unsigned int stream_tag,
- unsigned int format,
- struct snd_pcm_substream *substream)
+static int ca0132_dig_playback_pcm_close(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ struct snd_pcm_substream *substream)
{
struct ca0132_spec *spec = codec->spec;
-
- ca0132_setup_stream(codec, spec->dig_in, stream_tag, 0, format);
-
- return 0;
-}
-
-static int ca0132_dig_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- struct ca0132_spec *spec = codec->spec;
-
- ca0132_cleanup_stream(codec, spec->dig_in);
-
- return 0;
+ return snd_hda_multi_out_dig_close(codec, &spec->multiout);
}
/*
@@ -621,6 +547,7 @@ static struct hda_pcm_stream ca0132_pcm_analog_playback = {
.channels_min = 2,
.channels_max = 2,
.ops = {
+ .open = ca0132_playback_pcm_open,
.prepare = ca0132_playback_pcm_prepare,
.cleanup = ca0132_playback_pcm_cleanup
},
@@ -630,10 +557,6 @@ static struct hda_pcm_stream ca0132_pcm_analog_capture = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
- .ops = {
- .prepare = ca0132_capture_pcm_prepare,
- .cleanup = ca0132_capture_pcm_cleanup
- },
};
static struct hda_pcm_stream ca0132_pcm_digital_playback = {
@@ -641,6 +564,8 @@ static struct hda_pcm_stream ca0132_pcm_digital_playback = {
.channels_min = 2,
.channels_max = 2,
.ops = {
+ .open = ca0132_dig_playback_pcm_open,
+ .close = ca0132_dig_playback_pcm_close,
.prepare = ca0132_dig_playback_pcm_prepare,
.cleanup = ca0132_dig_playback_pcm_cleanup
},
@@ -650,10 +575,6 @@ static struct hda_pcm_stream ca0132_pcm_digital_capture = {
.substreams = 1,
.channels_min = 2,
.channels_max = 2,
- .ops = {
- .prepare = ca0132_dig_capture_pcm_prepare,
- .cleanup = ca0132_dig_capture_pcm_cleanup
- },
};
static int ca0132_build_pcms(struct hda_codec *codec)
@@ -928,18 +849,16 @@ static int ca0132_build_controls(struct hda_codec *codec)
spec->dig_out);
if (err < 0)
return err;
- err = add_out_volume(codec, spec->dig_out, "IEC958");
+ err = snd_hda_create_spdif_share_sw(codec, &spec->multiout);
if (err < 0)
return err;
+ /* spec->multiout.share_spdif = 1; */
}
if (spec->dig_in) {
err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in);
if (err < 0)
return err;
- err = add_in_volume(codec, spec->dig_in, "IEC958");
- if (err < 0)
- return err;
}
return 0;
}
@@ -961,6 +880,9 @@ static void ca0132_config(struct hda_codec *codec)
struct ca0132_spec *spec = codec->spec;
struct auto_pin_cfg *cfg = &spec->autocfg;
+ codec->pcm_format_first = 1;
+ codec->no_sticky_stream = 1;
+
/* line-outs */
cfg->line_outs = 1;
cfg->line_out_pins[0] = 0x0b; /* front */
@@ -988,14 +910,24 @@ static void ca0132_config(struct hda_codec *codec)
/* Mic-in */
spec->input_pins[0] = 0x12;
- spec->input_labels[0] = "Mic-In";
+ spec->input_labels[0] = "Mic";
spec->adcs[0] = 0x07;
/* Line-In */
spec->input_pins[1] = 0x11;
- spec->input_labels[1] = "Line-In";
+ spec->input_labels[1] = "Line";
spec->adcs[1] = 0x08;
spec->num_inputs = 2;
+
+ /* SPDIF I/O */
+ spec->dig_out = 0x05;
+ spec->multiout.dig_out_nid = spec->dig_out;
+ cfg->dig_out_pins[0] = 0x0c;
+ cfg->dig_outs = 1;
+ cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
+ spec->dig_in = 0x09;
+ cfg->dig_in_pin = 0x0e;
+ cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
}
static void ca0132_init_chip(struct hda_codec *codec)
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 14361184ae1e..5e22a8f43d2e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -2967,12 +2967,10 @@ static const char * const cxt5066_models[CXT5066_MODELS] = {
};
static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
- SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT5066_AUTO),
SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
- SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
@@ -2988,14 +2986,10 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
- SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T510", CXT5066_AUTO),
- SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
- SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
- SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO),
{}
};
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 641408dc28c0..8f23374fa642 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1165,14 +1165,20 @@ static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
+ snd_hda_codec_cleanup_stream(codec, hinfo->nid);
+ return 0;
+}
+
+static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
+ struct hda_codec *codec,
+ struct snd_pcm_substream *substream)
+{
struct hdmi_spec *spec = codec->spec;
int cvt_idx, pin_idx;
struct hdmi_spec_per_cvt *per_cvt;
struct hdmi_spec_per_pin *per_pin;
int pinctl;
- snd_hda_codec_cleanup_stream(codec, hinfo->nid);
-
if (hinfo->nid) {
cvt_idx = cvt_nid_to_cvt_index(spec, hinfo->nid);
if (snd_BUG_ON(cvt_idx < 0))
@@ -1195,12 +1201,12 @@ static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
pinctl & ~PIN_OUT);
snd_hda_spdif_ctls_unassign(codec, pin_idx);
}
-
return 0;
}
static const struct hda_pcm_ops generic_ops = {
.open = hdmi_pcm_open,
+ .close = hdmi_pcm_close,
.prepare = generic_hdmi_playback_pcm_prepare,
.cleanup = generic_hdmi_playback_pcm_cleanup,
};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f141395dfee6..4f81dd44c837 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -203,6 +203,7 @@ struct alc_spec {
unsigned int shared_mic_hp:1; /* HP/Mic-in sharing */
unsigned int inv_dmic_fixup:1; /* has inverted digital-mic workaround */
unsigned int inv_dmic_muted:1; /* R-ch of inv d-mic is muted? */
+ unsigned int no_primary_hp:1; /* Don't prefer HP pins to speaker pins */
/* auto-mute control */
int automute_mode;
@@ -4323,7 +4324,8 @@ static int alc_parse_auto_config(struct hda_codec *codec,
return 0; /* can't find valid BIOS pin config */
}
- if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT &&
+ if (!spec->no_primary_hp &&
+ cfg->line_out_type == AUTO_PIN_SPEAKER_OUT &&
cfg->line_outs <= cfg->hp_outs) {
/* use HP as primary out */
cfg->speaker_outs = cfg->line_outs;
@@ -5050,6 +5052,7 @@ enum {
ALC889_FIXUP_MBP_VREF,
ALC889_FIXUP_IMAC91_VREF,
ALC882_FIXUP_INV_DMIC,
+ ALC882_FIXUP_NO_PRIMARY_HP,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -5171,6 +5174,17 @@ static void alc889_fixup_imac91_vref(struct hda_codec *codec,
spec->keep_vref_in_automute = 1;
}
+/* Don't take HP output as primary
+ * strangely, the speaker output doesn't work on VAIO Z through DAC 0x05
+ */
+static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ if (action == ALC_FIXUP_ACT_PRE_PROBE)
+ spec->no_primary_hp = 1;
+}
+
static const struct alc_fixup alc882_fixups[] = {
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
.type = ALC_FIXUP_PINS,
@@ -5357,6 +5371,10 @@ static const struct alc_fixup alc882_fixups[] = {
.type = ALC_FIXUP_FUNC,
.v.func = alc_fixup_inv_dmic_0x12,
},
+ [ALC882_FIXUP_NO_PRIMARY_HP] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc882_fixup_no_primary_hp,
+ },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -5391,6 +5409,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
/* All Apple entries are in codec SSIDs */
SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
@@ -5432,6 +5451,7 @@ static const struct alc_model_fixup alc882_fixup_models[] = {
{.id = ALC882_FIXUP_ACER_ASPIRE_8930G, .name = "acer-aspire-8930g"},
{.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"},
{.id = ALC882_FIXUP_INV_DMIC, .name = "inv-dmic"},
+ {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"},
{}
};
@@ -6079,6 +6099,8 @@ static const struct alc_fixup alc269_fixups[] = {
[ALC269_FIXUP_PCM_44K] = {
.type = ALC_FIXUP_FUNC,
.v.func = alc269_fixup_pcm_44k,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_QUANTA_MUTE
},
[ALC269_FIXUP_STEREO_DMIC] = {
.type = ALC_FIXUP_FUNC,
@@ -6186,9 +6208,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
#if 0
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index a1596a3b171c..6f806d3e56bb 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -101,6 +101,8 @@ enum {
STAC_92HD83XXX_HP_cNB11_INTQUAD,
STAC_HP_DV7_4000,
STAC_HP_ZEPHYR,
+ STAC_92HD83XXX_HP_LED,
+ STAC_92HD83XXX_HP_INV_LED,
STAC_92HD83XXX_MODELS
};
@@ -1675,6 +1677,8 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
[STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad",
[STAC_HP_DV7_4000] = "hp-dv7-4000",
[STAC_HP_ZEPHYR] = "hp-zephyr",
+ [STAC_92HD83XXX_HP_LED] = "hp-led",
+ [STAC_92HD83XXX_HP_INV_LED] = "hp-inv-led",
};
static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
@@ -1729,6 +1733,8 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
"HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3561,
"HP", STAC_HP_ZEPHYR),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3660,
+ "HP Mini", STAC_92HD83XXX_HP_LED),
{} /* terminator */
};
@@ -4266,7 +4272,8 @@ static int stac92xx_init(struct hda_codec *codec)
unsigned int gpio;
int i;
- snd_hda_sequence_write(codec, spec->init);
+ if (spec->init)
+ snd_hda_sequence_write(codec, spec->init);
/* power down adcs initially */
if (spec->powerdown_adcs)
@@ -4414,7 +4421,12 @@ static int stac92xx_init(struct hda_codec *codec)
snd_hda_jack_report_sync(codec);
/* sync mute LED */
- snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
+ if (spec->gpio_led) {
+ if (spec->vmaster_mute.hook)
+ snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
+ else /* the very first init call doesn't have vmaster yet */
+ stac92xx_update_led_status(codec, false);
+ }
/* sync the power-map */
if (spec->num_pwrs)
@@ -4531,6 +4543,9 @@ static void stac92xx_line_out_detect(struct hda_codec *codec,
struct auto_pin_cfg *cfg = &spec->autocfg;
int i;
+ if (cfg->speaker_outs == 0)
+ return;
+
for (i = 0; i < cfg->line_outs; i++) {
if (presence)
break;
@@ -5507,6 +5522,7 @@ static void stac92hd8x_fill_auto_spec(struct hda_codec *codec)
static int patch_stac92hd83xxx(struct hda_codec *codec)
{
struct sigmatel_spec *spec;
+ int default_polarity = -1; /* no default cfg */
int err;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
@@ -5518,6 +5534,7 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e);
}
+ codec->epss = 0; /* longer delay needed for D3 */
codec->no_trigger_sense = 1;
codec->spec = spec;
@@ -5555,9 +5572,15 @@ again:
case STAC_HP_ZEPHYR:
spec->init = stac92hd83xxx_hp_zephyr_init;
break;
+ case STAC_92HD83XXX_HP_LED:
+ default_polarity = 0;
+ break;
+ case STAC_92HD83XXX_HP_INV_LED:
+ default_polarity = 1;
+ break;
}
- if (find_mute_led_cfg(codec, -1/*no default cfg*/))
+ if (find_mute_led_cfg(codec, default_polarity))
snd_printd("mute LED gpio %d polarity %d\n",
spec->gpio_led,
spec->gpio_led_polarity);
@@ -5730,7 +5753,6 @@ again:
/* fallthru */
case 0x111d76b4: /* 6 Port without Analog Mixer */
case 0x111d76b5:
- spec->init = stac92hd71bxx_core_init;
codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs;
spec->num_dmics = stac92xx_connected_ports(codec,
stac92hd71bxx_dmic_nids,
@@ -5755,7 +5777,6 @@ again:
spec->stream_delay = 40; /* 40 milliseconds */
/* disable VSW */
- spec->init = stac92hd71bxx_core_init;
unmute_init++;
snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0);
snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3);
@@ -5770,7 +5791,6 @@ again:
/* fallthru */
default:
- spec->init = stac92hd71bxx_core_init;
codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs;
spec->num_dmics = stac92xx_connected_ports(codec,
stac92hd71bxx_dmic_nids,
@@ -5778,6 +5798,9 @@ again:
break;
}
+ if (get_wcaps_type(get_wcaps(codec, 0x28)) == AC_WID_VOL_KNB)
+ spec->init = stac92hd71bxx_core_init;
+
if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP)
snd_hda_sequence_write_cache(codec, unmute_init);
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 90645560ed39..430771776915 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -1752,6 +1752,14 @@ static int via_suspend(struct hda_codec *codec)
{
struct via_spec *spec = codec->spec;
vt1708_stop_hp_work(spec);
+
+ if (spec->codec_type == VT1802) {
+ /* Fix pop noise on headphones */
+ int i;
+ for (i = 0; i < spec->autocfg.hp_outs; i++)
+ snd_hda_set_pin_ctl(codec, spec->autocfg.hp_pins[i], 0);
+ }
+
return 0;
}
#endif
@@ -3226,7 +3234,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
{
struct via_spec *spec = codec->spec;
int imux_is_smixer;
- unsigned int parm;
+ unsigned int parm, parm2;
/* MUX6 (1eh) = stereo mixer */
imux_is_smixer =
snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 5;
@@ -3249,7 +3257,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
parm = AC_PWRST_D3;
set_pin_power_state(codec, 0x27, &parm);
update_power_state(codec, 0x1a, parm);
- update_power_state(codec, 0xb, parm);
+ parm2 = parm; /* for pin 0x0b */
/* PW2 (26h), AOW2 (ah) */
parm = AC_PWRST_D3;
@@ -3264,6 +3272,9 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
if (!spec->hp_independent_mode) /* check for redirected HP */
set_pin_power_state(codec, 0x28, &parm);
update_power_state(codec, 0x8, parm);
+ if (!spec->hp_independent_mode && parm2 != AC_PWRST_D3)
+ parm = parm2;
+ update_power_state(codec, 0xb, parm);
/* MW9 (21h), Mw2 (1ah), AOW0 (8h) */
update_power_state(codec, 0x21, imux_is_smixer ? AC_PWRST_D0 : parm);
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index d1ab43706735..5579b08bb35b 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -851,6 +851,8 @@ static int __devinit lx_pcm_create(struct lx6464es *chip)
/* hardcoded device name & channel count */
err = snd_pcm_new(chip->card, (char *)card_name, 0,
1, 1, &pcm);
+ if (err < 0)
+ return err;
pcm->private_data = chip;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index b8ac8710f47f..b12308b5ba2a 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6585,7 +6585,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
snd_printk(KERN_ERR "HDSPM: "
"unable to kmalloc Mixer memory of %d Bytes\n",
(int)sizeof(struct hdspm_mixer));
- return err;
+ return -ENOMEM;
}
hdspm->port_names_in = NULL;
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index 512434efcc31..805ab6e9a78f 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -1377,8 +1377,9 @@ static int __devinit sis_chip_create(struct snd_card *card,
if (rc)
goto error_out_cleanup;
- if (request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME,
- sis)) {
+ rc = request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME,
+ sis);
+ if (rc) {
dev_err(&pci->dev, "unable to allocate irq %d\n", sis->irq);
goto error_out_cleanup;
}
diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c
index f5ceb6f282de..210cafe04890 100644
--- a/sound/ppc/powermac.c
+++ b/sound/ppc/powermac.c
@@ -143,7 +143,7 @@ static int __devexit snd_pmac_remove(struct platform_device *devptr)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int snd_pmac_driver_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 1aa52eff526a..9b18b5243a56 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -1040,6 +1040,7 @@ static int __devinit snd_ps3_driver_probe(struct ps3_system_bus_device *dev)
GFP_KERNEL);
if (!the_card.null_buffer_start_vaddr) {
pr_info("%s: nullbuffer alloc failed\n", __func__);
+ ret = -ENOMEM;
goto clean_preallocate;
}
pr_debug("%s: null vaddr=%p dma=%#llx\n", __func__,
diff --git a/sound/soc/blackfin/bf6xx-sport.c b/sound/soc/blackfin/bf6xx-sport.c
index 318c5ba5360f..dfb744381c42 100644
--- a/sound/soc/blackfin/bf6xx-sport.c
+++ b/sound/soc/blackfin/bf6xx-sport.c
@@ -413,7 +413,14 @@ EXPORT_SYMBOL(sport_create);
void sport_delete(struct sport_device *sport)
{
+ if (sport->tx_desc)
+ dma_free_coherent(NULL, sport->tx_desc_size,
+ sport->tx_desc, 0);
+ if (sport->rx_desc)
+ dma_free_coherent(NULL, sport->rx_desc_size,
+ sport->rx_desc, 0);
sport_free_resource(sport);
+ kfree(sport);
}
EXPORT_SYMBOL(sport_delete);
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index 3c795921c5f6..23b40186f9b8 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -2406,6 +2406,10 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
/* Setup AB8500 according to board-settings */
pdata = (struct ab8500_platform_data *)dev_get_platdata(dev->parent);
+
+ /* Inform SoC Core that we have our own I/O arrangements. */
+ codec->control_data = (void *)true;
+
status = ab8500_audio_setup_mics(codec, &pdata->codec->amics);
if (status < 0) {
pr_err("%s: Failed to setup mics (%d)!\n", __func__, status);
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 8c39dddd7d00..11b1b714b8b5 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -186,6 +186,7 @@ static int ad1980_soc_probe(struct snd_soc_codec *codec)
printk(KERN_INFO "AD1980 SoC Audio Codec\n");
+ codec->control_data = codec; /* we don't use regmap! */
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0) {
printk(KERN_ERR "ad1980: failed to register AC97 codec\n");
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 6276e352125f..8f726c063f42 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -581,6 +581,8 @@ static int mc13783_probe(struct snd_soc_codec *codec)
{
struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
+ codec->control_data = priv->mc13xxx;
+
mc13xxx_lock(priv->mc13xxx);
/* these are the reset values */
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 8af6a5245b18..df2f99d1d428 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -239,6 +239,7 @@ static const struct snd_soc_dapm_route sgtl5000_dapm_routes[] = {
{"Headphone Mux", "DAC", "DAC"}, /* dac --> hp_mux */
{"LO", NULL, "DAC"}, /* dac --> line_out */
+ {"LINE_IN", NULL, "VAG_POWER"},
{"Headphone Mux", "LINE_IN", "LINE_IN"},/* line_in --> hp_mux */
{"HP", NULL, "Headphone Mux"}, /* hp_mux --> hp */
@@ -1357,8 +1358,6 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
if (ret)
goto err;
- snd_soc_dapm_new_widgets(&codec->dapm);
-
return 0;
err:
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 982e437799a8..33c0f3d39c87 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -340,6 +340,7 @@ static int stac9766_codec_probe(struct snd_soc_codec *codec)
printk(KERN_INFO "STAC9766 SoC Audio Codec %s\n", STAC9766_VERSION);
+ codec->control_data = codec; /* we don't use regmap! */
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0)
goto codec_err;
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 0ff1e70b7770..c084c549942e 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -653,7 +653,7 @@ int twl6040_get_hs_step_size(struct snd_soc_codec *codec)
{
struct twl6040 *twl6040 = codec->control_data;
- if (twl6040_get_revid(twl6040) < TWL6040_REV_ES1_2)
+ if (twl6040_get_revid(twl6040) < TWL6040_REV_ES1_3)
/* For ES under ES_1.3 HS step is 2 mV */
return 2;
else
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 6537f16d383e..e33d327396ad 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -128,13 +128,9 @@ SOC_SINGLE_TLV("EQ4 B5 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B5_GAIN_SHIFT,
ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE),
-ARIZONA_MIXER_CONTROLS("DRC2L", ARIZONA_DRC2LMIX_INPUT_1_SOURCE),
-ARIZONA_MIXER_CONTROLS("DRC2R", ARIZONA_DRC2RMIX_INPUT_1_SOURCE),
SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5,
ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA),
-SND_SOC_BYTES_MASK("DRC2", ARIZONA_DRC2_CTRL1, 5,
- ARIZONA_DRC2R_ENA | ARIZONA_DRC2L_ENA),
ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE),
ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE),
@@ -236,8 +232,6 @@ ARIZONA_MIXER_ENUMS(EQ4, ARIZONA_EQ4MIX_INPUT_1_SOURCE);
ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE);
ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE);
-ARIZONA_MIXER_ENUMS(DRC2L, ARIZONA_DRC2LMIX_INPUT_1_SOURCE);
-ARIZONA_MIXER_ENUMS(DRC2R, ARIZONA_DRC2RMIX_INPUT_1_SOURCE);
ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE);
ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE);
@@ -349,10 +343,6 @@ SND_SOC_DAPM_PGA("DRC1L", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1L_ENA_SHIFT, 0,
NULL, 0),
SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0,
NULL, 0),
-SND_SOC_DAPM_PGA("DRC2L", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2L_ENA_SHIFT, 0,
- NULL, 0),
-SND_SOC_DAPM_PGA("DRC2R", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2R_ENA_SHIFT, 0,
- NULL, 0),
SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0,
NULL, 0),
@@ -466,8 +456,6 @@ ARIZONA_MIXER_WIDGETS(EQ4, "EQ4"),
ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"),
ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"),
-ARIZONA_MIXER_WIDGETS(DRC2L, "DRC2L"),
-ARIZONA_MIXER_WIDGETS(DRC2R, "DRC2R"),
ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"),
ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"),
@@ -553,8 +541,6 @@ SND_SOC_DAPM_OUTPUT("SPKDAT1R"),
{ name, "EQ4", "EQ4" }, \
{ name, "DRC1L", "DRC1L" }, \
{ name, "DRC1R", "DRC1R" }, \
- { name, "DRC2L", "DRC2L" }, \
- { name, "DRC2R", "DRC2R" }, \
{ name, "LHPF1", "LHPF1" }, \
{ name, "LHPF2", "LHPF2" }, \
{ name, "LHPF3", "LHPF3" }, \
@@ -639,6 +625,15 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
{ "AIF2 Capture", NULL, "SYSCLK" },
{ "AIF3 Capture", NULL, "SYSCLK" },
+ { "IN1L PGA", NULL, "IN1L" },
+ { "IN1R PGA", NULL, "IN1R" },
+
+ { "IN2L PGA", NULL, "IN2L" },
+ { "IN2R PGA", NULL, "IN2R" },
+
+ { "IN3L PGA", NULL, "IN3L" },
+ { "IN3R PGA", NULL, "IN3R" },
+
ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"),
@@ -675,8 +670,6 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"),
ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"),
- ARIZONA_MIXER_ROUTES("DRC2L", "DRC2L"),
- ARIZONA_MIXER_ROUTES("DRC2R", "DRC2R"),
ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"),
ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"),
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 8033f7065189..01ebbcc5c6a4 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -681,6 +681,18 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
{ "AIF2 Capture", NULL, "SYSCLK" },
{ "AIF3 Capture", NULL, "SYSCLK" },
+ { "IN1L PGA", NULL, "IN1L" },
+ { "IN1R PGA", NULL, "IN1R" },
+
+ { "IN2L PGA", NULL, "IN2L" },
+ { "IN2R PGA", NULL, "IN2R" },
+
+ { "IN3L PGA", NULL, "IN3L" },
+ { "IN3R PGA", NULL, "IN3R" },
+
+ { "IN4L PGA", NULL, "IN4L" },
+ { "IN4R PGA", NULL, "IN4R" },
+
ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"),
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index eaf65863ec21..ce6720073798 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2501,6 +2501,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
/* VMID 2*250k */
snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
WM8962_VMID_SEL_MASK, 0x100);
+
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+ msleep(100);
break;
case SND_SOC_BIAS_OFF:
@@ -3730,21 +3733,6 @@ static int wm8962_runtime_resume(struct device *dev)
regcache_sync(wm8962->regmap);
- regmap_update_bits(wm8962->regmap, WM8962_ANTI_POP,
- WM8962_STARTUP_BIAS_ENA | WM8962_VMID_BUF_ENA,
- WM8962_STARTUP_BIAS_ENA | WM8962_VMID_BUF_ENA);
-
- /* Bias enable at 2*50k for ramp */
- regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1,
- WM8962_VMID_SEL_MASK | WM8962_BIAS_ENA,
- WM8962_BIAS_ENA | 0x180);
-
- msleep(5);
-
- /* VMID back to 2x250k for standby */
- regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1,
- WM8962_VMID_SEL_MASK, 0x100);
-
return 0;
}
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index bb62f4b3d563..6c9eeca85b95 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2649,7 +2649,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- bclk_rate = params_rate(params) * 2;
+ bclk_rate = params_rate(params) * 4;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
bclk_rate *= 16;
@@ -3253,10 +3253,13 @@ static void wm8994_mic_work(struct work_struct *work)
int ret;
int report;
+ pm_runtime_get_sync(dev);
+
ret = regmap_read(regmap, WM8994_INTERRUPT_RAW_STATUS_2, &reg);
if (ret < 0) {
dev_err(dev, "Failed to read microphone status: %d\n",
ret);
+ pm_runtime_put(dev);
return;
}
@@ -3299,6 +3302,8 @@ static void wm8994_mic_work(struct work_struct *work)
snd_soc_jack_report(priv->micdet[1].jack, report,
SND_JACK_HEADSET | SND_JACK_BTN_0);
+
+ pm_runtime_put(dev);
}
static irqreturn_t wm8994_mic_irq(int irq, void *data)
@@ -3421,12 +3426,15 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
int reg;
bool present;
+ pm_runtime_get_sync(codec->dev);
+
mutex_lock(&wm8994->accdet_lock);
reg = snd_soc_read(codec, WM1811_JACKDET_CTRL);
if (reg < 0) {
dev_err(codec->dev, "Failed to read jack status: %d\n", reg);
mutex_unlock(&wm8994->accdet_lock);
+ pm_runtime_put(codec->dev);
return IRQ_NONE;
}
@@ -3491,6 +3499,7 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
SND_JACK_MECHANICAL | SND_JACK_HEADSET |
wm8994->btn_mask);
+ pm_runtime_put(codec->dev);
return IRQ_HANDLED;
}
@@ -3602,6 +3611,8 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
if (!(snd_soc_read(codec, WM8958_MIC_DETECT_1) & WM8958_MICD_ENA))
return IRQ_HANDLED;
+ pm_runtime_get_sync(codec->dev);
+
/* We may occasionally read a detection without an impedence
* range being provided - if that happens loop again.
*/
@@ -3612,6 +3623,7 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
dev_err(codec->dev,
"Failed to read mic detect status: %d\n",
reg);
+ pm_runtime_put(codec->dev);
return IRQ_NONE;
}
@@ -3639,6 +3651,7 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
dev_warn(codec->dev, "Accessory detection with no callback\n");
out:
+ pm_runtime_put(codec->dev);
return IRQ_HANDLED;
}
@@ -4025,6 +4038,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
break;
case WM8958:
if (wm8994->revision < 1) {
+ snd_soc_dapm_add_routes(dapm, wm8994_intercon,
+ ARRAY_SIZE(wm8994_intercon));
snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon,
ARRAY_SIZE(wm8994_revd_intercon));
snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon,
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 099e6ec32125..c6d2076a796b 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -148,7 +148,7 @@ SOC_SINGLE("Treble Volume", AC97_MASTER_TONE, 0, 15, 1),
SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1),
SOC_ENUM("Capture Volume Steps", wm9712_enum[6]),
-SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1),
+SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 0),
SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0),
SOC_SINGLE_TLV("Mic 1 Volume", AC97_MIC, 8, 31, 1, main_tlv),
@@ -272,7 +272,7 @@ SOC_DAPM_ENUM("Route", wm9712_enum[9]);
/* Mic select */
static const struct snd_kcontrol_new wm9712_mic_src_controls =
-SOC_DAPM_ENUM("Route", wm9712_enum[7]);
+SOC_DAPM_ENUM("Mic Source Select", wm9712_enum[7]);
/* diff select */
static const struct snd_kcontrol_new wm9712_diff_sel_controls =
@@ -291,7 +291,9 @@ SND_SOC_DAPM_MUX("Left Capture Select", SND_SOC_NOPM, 0, 0,
&wm9712_capture_selectl_controls),
SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0,
&wm9712_capture_selectr_controls),
-SND_SOC_DAPM_MUX("Mic Select Source", SND_SOC_NOPM, 0, 0,
+SND_SOC_DAPM_MUX("Left Mic Select Source", SND_SOC_NOPM, 0, 0,
+ &wm9712_mic_src_controls),
+SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0,
&wm9712_mic_src_controls),
SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0,
&wm9712_diff_sel_controls),
@@ -319,6 +321,7 @@ SND_SOC_DAPM_PGA("Out 3 PGA", AC97_INT_PAGING, 5, 1, NULL, 0),
SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0),
SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0),
SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0),
+SND_SOC_DAPM_PGA("Differential Mic", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1),
SND_SOC_DAPM_OUTPUT("MONOOUT"),
SND_SOC_DAPM_OUTPUT("HPOUTL"),
@@ -379,6 +382,18 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = {
{"Mic PGA", NULL, "MIC1"},
{"Mic PGA", NULL, "MIC2"},
+ /* microphones */
+ {"Differential Mic", NULL, "MIC1"},
+ {"Differential Mic", NULL, "MIC2"},
+ {"Left Mic Select Source", "Mic 1", "MIC1"},
+ {"Left Mic Select Source", "Mic 2", "MIC2"},
+ {"Left Mic Select Source", "Stereo", "MIC1"},
+ {"Left Mic Select Source", "Differential", "Differential Mic"},
+ {"Right Mic Select Source", "Mic 1", "MIC1"},
+ {"Right Mic Select Source", "Mic 2", "MIC2"},
+ {"Right Mic Select Source", "Stereo", "MIC2"},
+ {"Right Mic Select Source", "Differential", "Differential Mic"},
+
/* left capture selector */
{"Left Capture Select", "Mic", "MIC1"},
{"Left Capture Select", "Speaker Mixer", "Speaker Mixer"},
@@ -619,6 +634,7 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
{
int ret = 0;
+ codec->control_data = codec; /* we don't use regmap! */
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0) {
printk(KERN_ERR "wm9712: failed to register AC97 codec\n");
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 3eb19fb71d17..d0b8a3287a85 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -1196,6 +1196,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
if (wm9713 == NULL)
return -ENOMEM;
snd_soc_codec_set_drvdata(codec, wm9713);
+ codec->control_data = wm9713; /* we don't use regmap! */
ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0);
if (ret < 0)
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 95441bfc8190..ce5e5cd254dd 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -380,14 +380,20 @@ static void mcasp_start_tx(struct davinci_audio_dev *dev)
static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream)
{
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- if (dev->txnumevt) /* enable FIFO */
+ if (dev->txnumevt) { /* enable FIFO */
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
+ FIFO_ENABLE);
mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL,
FIFO_ENABLE);
+ }
mcasp_start_tx(dev);
} else {
- if (dev->rxnumevt) /* enable FIFO */
+ if (dev->rxnumevt) { /* enable FIFO */
+ mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
+ FIFO_ENABLE);
mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL,
FIFO_ENABLE);
+ }
mcasp_start_rx(dev);
}
}
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index 28dd76c7cb1c..81d7728cf67f 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -380,13 +380,14 @@ static int imx_ssi_dai_probe(struct snd_soc_dai *dai)
static struct snd_soc_dai_driver imx_ssi_dai = {
.probe = imx_ssi_dai_probe,
.playback = {
- .channels_min = 1,
+ /* The SSI does not support monaural audio. */
+ .channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
- .channels_min = 1,
+ .channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index fa4556750451..7646dd7f30cb 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -458,7 +458,13 @@ static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev)
}
clk_prepare_enable(priv->clk);
- return snd_soc_register_dai(&pdev->dev, &kirkwood_i2s_dai);
+ err = snd_soc_register_dai(&pdev->dev, &kirkwood_i2s_dai);
+ if (!err)
+ return 0;
+ dev_err(&pdev->dev, "snd_soc_register_dai failed\n");
+
+ clk_disable_unprepare(priv->clk);
+ clk_put(priv->clk);
err_ioremap:
iounmap(priv->io);
diff --git a/sound/soc/mxs/Kconfig b/sound/soc/mxs/Kconfig
index 99a997f19bb9..b6fa77678d97 100644
--- a/sound/soc/mxs/Kconfig
+++ b/sound/soc/mxs/Kconfig
@@ -10,7 +10,7 @@ menuconfig SND_MXS_SOC
if SND_MXS_SOC
config SND_SOC_MXS_SGTL5000
- tristate "SoC Audio support for i.MX boards with sgtl5000"
+ tristate "SoC Audio support for MXS boards with sgtl5000"
depends on I2C
select SND_SOC_SGTL5000
help
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index aba71bfa33b1..b3030718c228 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -394,9 +394,14 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai)
{
struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
+ struct mxs_saif *master_saif;
u32 scr, stat;
int ret;
+ master_saif = mxs_saif_get_master(saif);
+ if (!master_saif)
+ return -EINVAL;
+
/* mclk should already be set */
if (!saif->mclk && saif->mclk_in_use) {
dev_err(cpu_dai->dev, "set mclk first\n");
@@ -420,6 +425,25 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
return ret;
}
+ /* prepare clk in hw_param, enable in trigger */
+ clk_prepare(saif->clk);
+ if (saif != master_saif) {
+ /*
+ * Set an initial clock rate for the saif internal logic to work
+ * properly. This is important when working in EXTMASTER mode
+ * that uses the other saif's BITCLK&LRCLK but it still needs a
+ * basic clock which should be fast enough for the internal
+ * logic.
+ */
+ clk_enable(saif->clk);
+ ret = clk_set_rate(saif->clk, 24000000);
+ clk_disable(saif->clk);
+ if (ret)
+ return ret;
+
+ clk_prepare(master_saif->clk);
+ }
+
scr = __raw_readl(saif->base + SAIF_CTRL);
scr &= ~BM_SAIF_CTRL_WORD_LENGTH;
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 34835e8a9160..d33c48baaf71 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -745,7 +745,7 @@ int omap_mcbsp_6pin_src_mux(struct omap_mcbsp *mcbsp, u8 mux)
{
const char *signal, *src;
- if (mcbsp->pdata->mux_signal)
+ if (!mcbsp->pdata->mux_signal)
return -EINVAL;
switch (mux) {
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 1046083e90a0..acdd3ef14e08 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -820,3 +820,4 @@ module_platform_driver(asoc_mcbsp_driver);
MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>");
MODULE_DESCRIPTION("OMAP I2S SoC Interface");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:omap-mcbsp");
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 5a649da9122a..f0feb06615f8 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -441,3 +441,4 @@ module_platform_driver(omap_pcm_driver);
MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>");
MODULE_DESCRIPTION("OMAP PCM DMA module");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:omap-pcm-audio");
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index b7b2a1f91425..89b064650f14 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -20,7 +20,7 @@
#include <sound/pcm_params.h>
#include <plat/audio.h>
-#include <plat/dma.h>
+#include <mach/dma.h>
#include "dma.h"
#include "pcm.h"
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index f219b2f7ee68..c501af6d8dbe 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -826,7 +826,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
}
if (!rtd->cpu_dai) {
- dev_dbg(card->dev, "CPU DAI %s not registered\n",
+ dev_err(card->dev, "CPU DAI %s not registered\n",
dai_link->cpu_dai_name);
return -EPROBE_DEFER;
}
@@ -857,14 +857,14 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
}
if (!rtd->codec_dai) {
- dev_dbg(card->dev, "CODEC DAI %s not registered\n",
+ dev_err(card->dev, "CODEC DAI %s not registered\n",
dai_link->codec_dai_name);
return -EPROBE_DEFER;
}
}
if (!rtd->codec) {
- dev_dbg(card->dev, "CODEC %s not registered\n",
+ dev_err(card->dev, "CODEC %s not registered\n",
dai_link->codec_name);
return -EPROBE_DEFER;
}
@@ -888,7 +888,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
rtd->platform = platform;
}
if (!rtd->platform) {
- dev_dbg(card->dev, "platform %s not registered\n",
+ dev_err(card->dev, "platform %s not registered\n",
dai_link->platform_name);
return -EPROBE_DEFER;
}
@@ -1096,7 +1096,7 @@ static int soc_probe_codec(struct snd_soc_card *card,
}
/* If the driver didn't set I/O up try regmap */
- if (!codec->control_data)
+ if (!codec->write && dev_get_regmap(codec->dev, NULL))
snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
if (driver->controls)
@@ -1481,6 +1481,8 @@ static int soc_check_aux_dev(struct snd_soc_card *card, int num)
return 0;
}
+ dev_err(card->dev, "%s not registered\n", aux_dev->codec_name);
+
return -EPROBE_DEFER;
}
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 7f8b3b7428bb..0c172938b82a 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -103,7 +103,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
}
/* Report before the DAPM sync to help users updating micbias status */
- blocking_notifier_call_chain(&jack->notifier, status, jack);
+ blocking_notifier_call_chain(&jack->notifier, jack->status, jack);
snd_soc_dapm_sync(dapm);
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index d684df294c0c..e463529b38bb 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -177,7 +177,7 @@ static __devinit int tegra_alc5632_probe(struct platform_device *pdev)
}
alc5632->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
- if (alc5632->gpio_hp_det == -ENODEV)
+ if (alc5632->gpio_hp_det == -EPROBE_DEFER)
return -EPROBE_DEFER;
ret = snd_soc_of_parse_card_name(card, "nvidia,model");
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index 0c5bb33d258e..d4f14e492341 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -284,27 +284,27 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
} else if (np) {
pdata->gpio_spkr_en = of_get_named_gpio(np,
"nvidia,spkr-en-gpios", 0);
- if (pdata->gpio_spkr_en == -ENODEV)
+ if (pdata->gpio_spkr_en == -EPROBE_DEFER)
return -EPROBE_DEFER;
pdata->gpio_hp_mute = of_get_named_gpio(np,
"nvidia,hp-mute-gpios", 0);
- if (pdata->gpio_hp_mute == -ENODEV)
+ if (pdata->gpio_hp_mute == -EPROBE_DEFER)
return -EPROBE_DEFER;
pdata->gpio_hp_det = of_get_named_gpio(np,
"nvidia,hp-det-gpios", 0);
- if (pdata->gpio_hp_det == -ENODEV)
+ if (pdata->gpio_hp_det == -EPROBE_DEFER)
return -EPROBE_DEFER;
pdata->gpio_int_mic_en = of_get_named_gpio(np,
"nvidia,int-mic-en-gpios", 0);
- if (pdata->gpio_int_mic_en == -ENODEV)
+ if (pdata->gpio_int_mic_en == -EPROBE_DEFER)
return -EPROBE_DEFER;
pdata->gpio_ext_mic_en = of_get_named_gpio(np,
"nvidia,ext-mic-en-gpios", 0);
- if (pdata->gpio_ext_mic_en == -ENODEV)
+ if (pdata->gpio_ext_mic_en == -EPROBE_DEFER)
return -EPROBE_DEFER;
}
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index 62ac0285bfaf..057e28ef770e 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -21,7 +21,7 @@
#include <linux/mfd/dbx500-prcmu.h>
#include <mach/hardware.h>
-#include <mach/board-mop500-msp.h>
+#include <mach/msp.h>
#include <sound/soc.h>
#include <sound/soc-dai.h>
diff --git a/sound/soc/ux500/ux500_msp_i2s.c b/sound/soc/ux500/ux500_msp_i2s.c
index ee14d2dac2f5..5c472f335a64 100644
--- a/sound/soc/ux500/ux500_msp_i2s.c
+++ b/sound/soc/ux500/ux500_msp_i2s.c
@@ -19,7 +19,7 @@
#include <linux/slab.h>
#include <mach/hardware.h>
-#include <mach/board-mop500-msp.h>
+#include <mach/msp.h>
#include <sound/soc.h>
diff --git a/sound/soc/ux500/ux500_msp_i2s.h b/sound/soc/ux500/ux500_msp_i2s.h
index 7f71b4a0d4bc..2d9136da9865 100644
--- a/sound/soc/ux500/ux500_msp_i2s.h
+++ b/sound/soc/ux500/ux500_msp_i2s.h
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
-#include <mach/board-mop500-msp.h>
+#include <mach/msp.h>
#define MSP_INPUT_FREQ_APB 48000000
diff --git a/sound/sound_firmware.c b/sound/sound_firmware.c
index 7e96249536b4..37711a5d0d6b 100644
--- a/sound/sound_firmware.c
+++ b/sound/sound_firmware.c
@@ -23,14 +23,14 @@ static int do_mod_firmware_load(const char *fn, char **fp)
if (l <= 0 || l > 131072)
{
printk(KERN_INFO "Invalid firmware '%s'\n", fn);
- filp_close(filp, current->files);
+ filp_close(filp, NULL);
return 0;
}
dp = vmalloc(l);
if (dp == NULL)
{
printk(KERN_INFO "Out of memory loading '%s'.\n", fn);
- filp_close(filp, current->files);
+ filp_close(filp, NULL);
return 0;
}
pos = 0;
@@ -38,10 +38,10 @@ static int do_mod_firmware_load(const char *fn, char **fp)
{
printk(KERN_INFO "Failed to read '%s'.\n", fn);
vfree(dp);
- filp_close(filp, current->files);
+ filp_close(filp, NULL);
return 0;
}
- filp_close(filp, current->files);
+ filp_close(filp, NULL);
*fp = dp;
return (int) l;
}
diff --git a/sound/usb/card.c b/sound/usb/card.c
index d5b5c3388e28..4a469f0cb6d4 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -553,7 +553,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
struct snd_usb_audio *chip)
{
struct snd_card *card;
- struct list_head *p;
+ struct list_head *p, *n;
if (chip == (void *)-1L)
return;
@@ -570,7 +570,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
snd_usb_stream_disconnect(p);
}
/* release the endpoint resources */
- list_for_each(p, &chip->ep_list) {
+ list_for_each_safe(p, n, &chip->ep_list) {
snd_usb_endpoint_free(p);
}
/* release the midi resources */
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 379baad3d5ad..5e634a2eb282 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -111,7 +111,8 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, int source_id)
return 0;
/* If a clock source can't tell us whether it's valid, we assume it is */
- if (!uac2_control_is_readable(cs_desc->bmControls, UAC2_CS_CONTROL_CLOCK_VALID))
+ if (!uac2_control_is_readable(cs_desc->bmControls,
+ UAC2_CS_CONTROL_CLOCK_VALID - 1))
return 1;
err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 0f647d22cb4a..d6e2bb49c59c 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -141,7 +141,7 @@ int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep)
*
* For implicit feedback, next_packet_size() is unused.
*/
-static int next_packet_size(struct snd_usb_endpoint *ep)
+int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
{
unsigned long flags;
int ret;
@@ -177,15 +177,6 @@ static void retire_inbound_urb(struct snd_usb_endpoint *ep,
ep->retire_data_urb(ep->data_subs, urb);
}
-static void prepare_outbound_urb_sizes(struct snd_usb_endpoint *ep,
- struct snd_urb_ctx *ctx)
-{
- int i;
-
- for (i = 0; i < ctx->packets; ++i)
- ctx->packet_size[i] = next_packet_size(ep);
-}
-
/*
* Prepare a PLAYBACK urb for submission to the bus.
*/
@@ -370,7 +361,6 @@ static void snd_complete_urb(struct urb *urb)
goto exit_clear;
}
- prepare_outbound_urb_sizes(ep, ctx);
prepare_outbound_urb(ep, ctx);
} else {
retire_inbound_urb(ep, ctx);
@@ -799,7 +789,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
/**
* snd_usb_endpoint_start: start an snd_usb_endpoint
*
- * @ep: the endpoint to start
+ * @ep: the endpoint to start
+ * @can_sleep: flag indicating whether the operation is executed in
+ * non-atomic context
*
* A call to this function will increment the use count of the endpoint.
* In case it is not already running, the URBs for this endpoint will be
@@ -809,7 +801,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
*
* Returns an error if the URB submission failed, 0 in all other cases.
*/
-int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
+int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep)
{
int err;
unsigned int i;
@@ -822,8 +814,9 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
return 0;
/* just to be sure */
- deactivate_urbs(ep, 0, 1);
- wait_clear_urbs(ep);
+ deactivate_urbs(ep, 0, can_sleep);
+ if (can_sleep)
+ wait_clear_urbs(ep);
ep->active_mask = 0;
ep->unlink_mask = 0;
@@ -854,7 +847,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
goto __error;
if (usb_pipeout(ep->pipe)) {
- prepare_outbound_urb_sizes(ep, urb->context);
prepare_outbound_urb(ep, urb->context);
} else {
prepare_inbound_urb(ep, urb->context);
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index ee2723fb174f..cbbbdf226d66 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -13,7 +13,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
struct audioformat *fmt,
struct snd_usb_endpoint *sync_ep);
-int snd_usb_endpoint_start(struct snd_usb_endpoint *ep);
+int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep);
void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
int force, int can_sleep, int wait);
int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
@@ -21,6 +21,7 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_free(struct list_head *head);
int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep);
+int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep);
void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
struct snd_usb_endpoint *sender,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index a1298f379428..fd5e982fc98c 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -212,7 +212,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
}
}
-static int start_endpoints(struct snd_usb_substream *subs)
+static int start_endpoints(struct snd_usb_substream *subs, int can_sleep)
{
int err;
@@ -225,7 +225,7 @@ static int start_endpoints(struct snd_usb_substream *subs)
snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep);
ep->data_subs = subs;
- err = snd_usb_endpoint_start(ep);
+ err = snd_usb_endpoint_start(ep, can_sleep);
if (err < 0) {
clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags);
return err;
@@ -236,10 +236,25 @@ static int start_endpoints(struct snd_usb_substream *subs)
!test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) {
struct snd_usb_endpoint *ep = subs->sync_endpoint;
+ if (subs->data_endpoint->iface != subs->sync_endpoint->iface ||
+ subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) {
+ err = usb_set_interface(subs->dev,
+ subs->sync_endpoint->iface,
+ subs->sync_endpoint->alt_idx);
+ if (err < 0) {
+ snd_printk(KERN_ERR
+ "%d:%d:%d: cannot set interface (%d)\n",
+ subs->dev->devnum,
+ subs->sync_endpoint->iface,
+ subs->sync_endpoint->alt_idx, err);
+ return -EIO;
+ }
+ }
+
snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep);
ep->sync_slave = subs->data_endpoint;
- err = snd_usb_endpoint_start(ep);
+ err = snd_usb_endpoint_start(ep, can_sleep);
if (err < 0) {
clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags);
return err;
@@ -547,7 +562,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
/* for playback, submit the URBs now; otherwise, the first hwptr_done
* updates for all URBs would happen at the same time when starting */
if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
- return start_endpoints(subs);
+ return start_endpoints(subs, 1);
return 0;
}
@@ -1029,6 +1044,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
struct urb *urb)
{
struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
+ struct snd_usb_endpoint *ep = subs->data_endpoint;
struct snd_urb_ctx *ctx = urb->context;
unsigned int counts, frames, bytes;
int i, stride, period_elapsed = 0;
@@ -1040,7 +1056,11 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
urb->number_of_packets = 0;
spin_lock_irqsave(&subs->lock, flags);
for (i = 0; i < ctx->packets; i++) {
- counts = ctx->packet_size[i];
+ if (ctx->packet_size[i])
+ counts = ctx->packet_size[i];
+ else
+ counts = snd_usb_endpoint_next_packet_size(ep);
+
/* set up descriptor */
urb->iso_frame_desc[i].offset = frames * stride;
urb->iso_frame_desc[i].length = counts * stride;
@@ -1091,7 +1111,16 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
subs->hwptr_done += bytes;
if (subs->hwptr_done >= runtime->buffer_size * stride)
subs->hwptr_done -= runtime->buffer_size * stride;
+
+ /* update delay with exact number of samples queued */
+ runtime->delay = subs->last_delay;
runtime->delay += frames;
+ subs->last_delay = runtime->delay;
+
+ /* realign last_frame_number */
+ subs->last_frame_number = usb_get_current_frame_number(subs->dev);
+ subs->last_frame_number &= 0xFF; /* keep 8 LSBs */
+
spin_unlock_irqrestore(&subs->lock, flags);
urb->transfer_buffer_length = bytes;
if (period_elapsed)
@@ -1109,12 +1138,26 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
int stride = runtime->frame_bits >> 3;
int processed = urb->transfer_buffer_length / stride;
+ int est_delay;
spin_lock_irqsave(&subs->lock, flags);
- if (processed > runtime->delay)
- runtime->delay = 0;
+ est_delay = snd_usb_pcm_delay(subs, runtime->rate);
+ /* update delay with exact number of samples played */
+ if (processed > subs->last_delay)
+ subs->last_delay = 0;
else
- runtime->delay -= processed;
+ subs->last_delay -= processed;
+ runtime->delay = subs->last_delay;
+
+ /*
+ * Report when delay estimate is off by more than 2ms.
+ * The error should be lower than 2ms since the estimate relies
+ * on two reads of a counter updated every ms.
+ */
+ if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
+ snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n",
+ est_delay, subs->last_delay);
+
spin_unlock_irqrestore(&subs->lock, flags);
}
@@ -1172,7 +1215,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- err = start_endpoints(subs);
+ err = start_endpoints(subs, 0);
if (err < 0)
return err;
diff --git a/tools/lib/traceevent/.gitignore b/tools/lib/traceevent/.gitignore
new file mode 100644
index 000000000000..35f56be5a4cd
--- /dev/null
+++ b/tools/lib/traceevent/.gitignore
@@ -0,0 +1 @@
+TRACEEVENT-CFLAGS
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 46c2f6b7b123..14131cb0522d 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -207,7 +207,7 @@ libtraceevent.so: $(PEVENT_LIB_OBJS)
libtraceevent.a: $(PEVENT_LIB_OBJS)
$(Q)$(do_build_static_lib)
-$(PEVENT_LIB_OBJS): %.o: $(src)/%.c
+$(PEVENT_LIB_OBJS): %.o: $(src)/%.c TRACEEVENT-CFLAGS
$(Q)$(do_fpic_compile)
define make_version.h
@@ -272,6 +272,16 @@ ifneq ($(dep_includes),)
include $(dep_includes)
endif
+### Detect environment changes
+TRACK_CFLAGS = $(subst ','\'',$(CFLAGS)):$(ARCH):$(CROSS_COMPILE)
+
+TRACEEVENT-CFLAGS: force
+ @FLAGS='$(TRACK_CFLAGS)'; \
+ if test x"$$FLAGS" != x"`cat TRACEEVENT-CFLAGS 2>/dev/null`" ; then \
+ echo 1>&2 " * new build flags or cross compiler"; \
+ echo "$$FLAGS" >TRACEEVENT-CFLAGS; \
+ fi
+
tags: force
$(RM) tags
find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
@@ -297,7 +307,7 @@ install: install_lib
clean:
$(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d
- $(RM) tags TAGS
+ $(RM) TRACEEVENT-CFLAGS tags TAGS
endif # skip-makefile
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 75d74e5db8d5..35655c3a7b7a 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -319,6 +319,8 @@ LIB_H += $(ARCH_INCLUDE)
LIB_H += util/cgroup.h
LIB_H += $(TRACE_EVENT_DIR)event-parse.h
LIB_H += util/target.h
+LIB_H += util/rblist.h
+LIB_H += util/intlist.h
LIB_OBJS += $(OUTPUT)util/abspath.o
LIB_OBJS += $(OUTPUT)util/alias.o
@@ -354,6 +356,7 @@ LIB_OBJS += $(OUTPUT)util/usage.o
LIB_OBJS += $(OUTPUT)util/wrapper.o
LIB_OBJS += $(OUTPUT)util/sigchain.o
LIB_OBJS += $(OUTPUT)util/symbol.o
+LIB_OBJS += $(OUTPUT)util/dso-test-data.o
LIB_OBJS += $(OUTPUT)util/color.o
LIB_OBJS += $(OUTPUT)util/pager.o
LIB_OBJS += $(OUTPUT)util/header.o
@@ -382,6 +385,8 @@ LIB_OBJS += $(OUTPUT)util/xyarray.o
LIB_OBJS += $(OUTPUT)util/cpumap.o
LIB_OBJS += $(OUTPUT)util/cgroup.o
LIB_OBJS += $(OUTPUT)util/target.o
+LIB_OBJS += $(OUTPUT)util/rblist.o
+LIB_OBJS += $(OUTPUT)util/intlist.o
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
@@ -803,6 +808,9 @@ $(OUTPUT)ui/browsers/map.o: ui/browsers/map.c $(OUTPUT)PERF-CFLAGS
$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+$(OUTPUT)util/parse-events.o: util/parse-events.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Wno-redundant-decls $<
+
$(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
@@ -979,7 +987,8 @@ clean:
$(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope*
$(MAKE) -C Documentation/ clean
$(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS
- $(RM) $(OUTPUT)util/*-{bison,flex}*
+ $(RM) $(OUTPUT)util/*-bison*
+ $(RM) $(OUTPUT)util/*-flex*
$(python-clean)
.PHONY: all install clean strip $(LIBTRACEEVENT)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f5a6452931e6..4db6e1ba54e3 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -313,7 +313,7 @@ try_again:
}
}
- perf_session__update_sample_type(session);
+ perf_session__set_id_hdr_size(session);
}
static int process_buildids(struct perf_record *rec)
@@ -844,8 +844,6 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
struct perf_record *rec = &record;
char errbuf[BUFSIZ];
- perf_header__set_cmdline(argc, argv);
-
evsel_list = perf_evlist__new(NULL, NULL);
if (evsel_list == NULL)
return -ENOMEM;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 69b1c1185159..7c88a243b5db 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -249,8 +249,9 @@ static int process_read_event(struct perf_tool *tool,
static int perf_report__setup_sample_type(struct perf_report *rep)
{
struct perf_session *self = rep->session;
+ u64 sample_type = perf_evlist__sample_type(self->evlist);
- if (!self->fd_pipe && !(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ if (!self->fd_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
ui__error("Selected --sort parent, but no "
"callchain data. Did you call "
@@ -274,7 +275,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
if (sort__branch_mode == 1) {
if (!self->fd_pipe &&
- !(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
+ !(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
ui__error("Selected -b but no branch data. "
"Did you call perf record without -b?\n");
return -1;
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 5ce30305462b..1d592f5cbea9 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -478,7 +478,6 @@ static int test__basic_mmap(void)
unsigned int nr_events[nsyscalls],
expected_nr_events[nsyscalls], i, j;
struct perf_evsel *evsels[nsyscalls], *evsel;
- int sample_size = __perf_evsel__sample_size(attr.sample_type);
for (i = 0; i < nsyscalls; ++i) {
char name[64];
@@ -563,8 +562,7 @@ static int test__basic_mmap(void)
goto out_munmap;
}
- err = perf_event__parse_sample(event, attr.sample_type, sample_size,
- false, &sample, false);
+ err = perf_evlist__parse_sample(evlist, event, &sample, false);
if (err) {
pr_err("Can't parse sample, err = %d\n", err);
goto out_munmap;
@@ -661,12 +659,12 @@ static int test__PERF_RECORD(void)
const char *cmd = "sleep";
const char *argv[] = { cmd, "1", NULL, };
char *bname;
- u64 sample_type, prev_time = 0;
+ u64 prev_time = 0;
bool found_cmd_mmap = false,
found_libc_mmap = false,
found_vdso_mmap = false,
found_ld_mmap = false;
- int err = -1, errs = 0, i, wakeups = 0, sample_size;
+ int err = -1, errs = 0, i, wakeups = 0;
u32 cpu;
int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
@@ -757,13 +755,6 @@ static int test__PERF_RECORD(void)
}
/*
- * We'll need these two to parse the PERF_SAMPLE_* fields in each
- * event.
- */
- sample_type = perf_evlist__sample_type(evlist);
- sample_size = __perf_evsel__sample_size(sample_type);
-
- /*
* Now that all is properly set up, enable the events, they will
* count just on workload.pid, which will start...
*/
@@ -788,9 +779,7 @@ static int test__PERF_RECORD(void)
if (type < PERF_RECORD_MAX)
nr_events[type]++;
- err = perf_event__parse_sample(event, sample_type,
- sample_size, true,
- &sample, false);
+ err = perf_evlist__parse_sample(evlist, event, &sample, false);
if (err < 0) {
if (verbose)
perf_event__fprintf(event, stderr);
@@ -1142,6 +1131,10 @@ static struct test {
.func = test__perf_pmu,
},
{
+ .desc = "Test dso data interface",
+ .func = dso__test_data,
+ },
+ {
.func = NULL,
},
};
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index e3cab5f088f8..68cd61ef6ac5 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -38,6 +38,7 @@
#include "util/cpumap.h"
#include "util/xyarray.h"
#include "util/sort.h"
+#include "util/intlist.h"
#include "util/debug.h"
@@ -125,7 +126,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
/*
* We can't annotate with just /proc/kallsyms
*/
- if (map->dso->symtab_type == SYMTAB__KALLSYMS) {
+ if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) {
pr_err("Can't annotate %s: No vmlinux file was found in the "
"path\n", sym->name);
sleep(1);
@@ -706,8 +707,16 @@ static void perf_event__process_sample(struct perf_tool *tool,
int err;
if (!machine && perf_guest) {
- pr_err("Can't find guest [%d]'s kernel information\n",
- event->ip.pid);
+ static struct intlist *seen;
+
+ if (!seen)
+ seen = intlist__new();
+
+ if (!intlist__has_entry(seen, event->ip.pid)) {
+ pr_err("Can't find guest [%d]'s kernel information\n",
+ event->ip.pid);
+ intlist__add(seen, event->ip.pid);
+ }
return;
}
@@ -811,7 +820,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
int ret;
while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
- ret = perf_session__parse_sample(session, event, &sample);
+ ret = perf_evlist__parse_sample(top->evlist, event, &sample, false);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
continue;
@@ -943,8 +952,10 @@ try_again:
* based cpu-clock-tick sw counter, which
* is always available even if no PMU support:
*/
- if (attr->type == PERF_TYPE_HARDWARE &&
- attr->config == PERF_COUNT_HW_CPU_CYCLES) {
+ if ((err == ENOENT || err == ENXIO) &&
+ (attr->type == PERF_TYPE_HARDWARE) &&
+ (attr->config == PERF_COUNT_HW_CPU_CYCLES)) {
+
if (verbose)
ui__warning("Cycles event not supported,\n"
"trying to fall back to cpu-clock-ticks\n");
@@ -1032,7 +1043,7 @@ static int __cmd_top(struct perf_top *top)
&top->session->host_machine);
perf_top__start_counters(top);
top->session->evlist = top->evlist;
- perf_session__update_sample_type(top->session);
+ perf_session__set_id_hdr_size(top->session);
/* Wait for a minimal set of events before starting the snapshot */
poll(top->evlist->pollfd, top->evlist->nr_fds, 100);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 482f0517b61e..413bd62eedb1 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -978,8 +978,8 @@ static int hist_browser__dump(struct hist_browser *browser)
fp = fopen(filename, "w");
if (fp == NULL) {
char bf[64];
- strerror_r(errno, bf, sizeof(bf));
- ui_helpline__fpush("Couldn't write to %s: %s", filename, bf);
+ const char *err = strerror_r(errno, bf, sizeof(bf));
+ ui_helpline__fpush("Couldn't write to %s: %s", filename, err);
return -1;
}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 8069dfb5ba77..3a282c0057d2 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -426,7 +426,18 @@ int symbol__alloc_hist(struct symbol *sym)
{
struct annotation *notes = symbol__annotation(sym);
const size_t size = symbol__size(sym);
- size_t sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
+ size_t sizeof_sym_hist;
+
+ /* Check for overflow when calculating sizeof_sym_hist */
+ if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(u64))
+ return -1;
+
+ sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64));
+
+ /* Check for overflow in zalloc argument */
+ if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src))
+ / symbol_conf.nr_events)
+ return -1;
notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist);
if (notes->src == NULL)
@@ -777,7 +788,7 @@ fallback:
free_filename = false;
}
- if (dso->symtab_type == SYMTAB__KALLSYMS) {
+ if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) {
char bf[BUILD_ID_SIZE * 2 + 16] = " with build id ";
char *build_id_msg = NULL;
diff --git a/tools/perf/util/dso-test-data.c b/tools/perf/util/dso-test-data.c
new file mode 100644
index 000000000000..541cdc72c7df
--- /dev/null
+++ b/tools/perf/util/dso-test-data.c
@@ -0,0 +1,153 @@
+#include "util.h"
+
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+
+#include "symbol.h"
+
+#define TEST_ASSERT_VAL(text, cond) \
+do { \
+ if (!(cond)) { \
+ pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
+ return -1; \
+ } \
+} while (0)
+
+static char *test_file(int size)
+{
+ static char buf_templ[] = "/tmp/test-XXXXXX";
+ char *templ = buf_templ;
+ int fd, i;
+ unsigned char *buf;
+
+ fd = mkostemp(templ, O_CREAT|O_WRONLY|O_TRUNC);
+
+ buf = malloc(size);
+ if (!buf) {
+ close(fd);
+ return NULL;
+ }
+
+ for (i = 0; i < size; i++)
+ buf[i] = (unsigned char) ((int) i % 10);
+
+ if (size != write(fd, buf, size))
+ templ = NULL;
+
+ close(fd);
+ return templ;
+}
+
+#define TEST_FILE_SIZE (DSO__DATA_CACHE_SIZE * 20)
+
+struct test_data_offset {
+ off_t offset;
+ u8 data[10];
+ int size;
+};
+
+struct test_data_offset offsets[] = {
+ /* Fill first cache page. */
+ {
+ .offset = 10,
+ .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ .size = 10,
+ },
+ /* Read first cache page. */
+ {
+ .offset = 10,
+ .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ .size = 10,
+ },
+ /* Fill cache boundary pages. */
+ {
+ .offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10,
+ .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ .size = 10,
+ },
+ /* Read cache boundary pages. */
+ {
+ .offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10,
+ .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ .size = 10,
+ },
+ /* Fill final cache page. */
+ {
+ .offset = TEST_FILE_SIZE - 10,
+ .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ .size = 10,
+ },
+ /* Read final cache page. */
+ {
+ .offset = TEST_FILE_SIZE - 10,
+ .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ .size = 10,
+ },
+ /* Read final cache page. */
+ {
+ .offset = TEST_FILE_SIZE - 3,
+ .data = { 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 },
+ .size = 3,
+ },
+};
+
+int dso__test_data(void)
+{
+ struct machine machine;
+ struct dso *dso;
+ char *file = test_file(TEST_FILE_SIZE);
+ size_t i;
+
+ TEST_ASSERT_VAL("No test file", file);
+
+ memset(&machine, 0, sizeof(machine));
+
+ dso = dso__new((const char *)file);
+
+ /* Basic 10 bytes tests. */
+ for (i = 0; i < ARRAY_SIZE(offsets); i++) {
+ struct test_data_offset *data = &offsets[i];
+ ssize_t size;
+ u8 buf[10];
+
+ memset(buf, 0, 10);
+ size = dso__data_read_offset(dso, &machine, data->offset,
+ buf, 10);
+
+ TEST_ASSERT_VAL("Wrong size", size == data->size);
+ TEST_ASSERT_VAL("Wrong data", !memcmp(buf, data->data, 10));
+ }
+
+ /* Read cross multiple cache pages. */
+ {
+ ssize_t size;
+ int c;
+ u8 *buf;
+
+ buf = malloc(TEST_FILE_SIZE);
+ TEST_ASSERT_VAL("ENOMEM\n", buf);
+
+ /* First iteration to fill caches, second one to read them. */
+ for (c = 0; c < 2; c++) {
+ memset(buf, 0, TEST_FILE_SIZE);
+ size = dso__data_read_offset(dso, &machine, 10,
+ buf, TEST_FILE_SIZE);
+
+ TEST_ASSERT_VAL("Wrong size",
+ size == (TEST_FILE_SIZE - 10));
+
+ for (i = 0; i < (size_t)size; i++)
+ TEST_ASSERT_VAL("Wrong data",
+ buf[i] == (i % 10));
+ }
+
+ free(buf);
+ }
+
+ dso__delete(dso);
+ unlink(file);
+ return 0;
+}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 1b197280c621..d84870b06426 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -197,9 +197,6 @@ int perf_event__preprocess_sample(const union perf_event *self,
const char *perf_event__name(unsigned int id);
-int perf_event__parse_sample(const union perf_event *event, u64 type,
- int sample_size, bool sample_id_all,
- struct perf_sample *sample, bool swapped);
int perf_event__synthesize_sample(union perf_event *event, u64 type,
const struct perf_sample *sample,
bool swapped);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index f74e9560350e..9b38681add9e 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -214,7 +214,7 @@ int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
attrs[i].type = PERF_TYPE_TRACEPOINT;
attrs[i].config = err;
attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
- PERF_SAMPLE_CPU);
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD);
attrs[i].sample_period = 1;
}
@@ -881,3 +881,10 @@ int perf_evlist__start_workload(struct perf_evlist *evlist)
return 0;
}
+
+int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
+ struct perf_sample *sample, bool swapped)
+{
+ struct perf_evsel *e = list_entry(evlist->entries.next, struct perf_evsel, node);
+ return perf_evsel__parse_sample(e, event, sample, swapped);
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 40d4d3cdced0..528c1acd9298 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -122,6 +122,9 @@ u64 perf_evlist__sample_type(const struct perf_evlist *evlist);
bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist);
+int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
+ struct perf_sample *sample, bool swapped);
+
bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index e81771364867..2eaae140def2 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -20,7 +20,7 @@
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
-int __perf_evsel__sample_size(u64 sample_type)
+static int __perf_evsel__sample_size(u64 sample_type)
{
u64 mask = sample_type & PERF_SAMPLE_MASK;
int size = 0;
@@ -53,6 +53,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
evsel->attr = *attr;
INIT_LIST_HEAD(&evsel->node);
hists__init(&evsel->hists);
+ evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
}
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
@@ -728,10 +729,10 @@ static bool sample_overlap(const union perf_event *event,
return false;
}
-int perf_event__parse_sample(const union perf_event *event, u64 type,
- int sample_size, bool sample_id_all,
+int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *data, bool swapped)
{
+ u64 type = evsel->attr.sample_type;
const u64 *array;
/*
@@ -746,14 +747,14 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
data->period = 1;
if (event->header.type != PERF_RECORD_SAMPLE) {
- if (!sample_id_all)
+ if (!evsel->attr.sample_id_all)
return 0;
return perf_event__parse_id_sample(event, type, data, swapped);
}
array = event->sample.array;
- if (sample_size + sizeof(event->header) > event->header.size)
+ if (evsel->sample_size + sizeof(event->header) > event->header.size)
return -EFAULT;
if (type & PERF_SAMPLE_IP) {
@@ -895,7 +896,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
u.val32[1] = sample->tid;
if (swapped) {
/*
- * Inverse of what is done in perf_event__parse_sample
+ * Inverse of what is done in perf_evsel__parse_sample
*/
u.val32[0] = bswap_32(u.val32[0]);
u.val32[1] = bswap_32(u.val32[1]);
@@ -930,7 +931,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
u.val32[0] = sample->cpu;
if (swapped) {
/*
- * Inverse of what is done in perf_event__parse_sample
+ * Inverse of what is done in perf_evsel__parse_sample
*/
u.val32[0] = bswap_32(u.val32[0]);
u.val64 = bswap_64(u.val64);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 67cc5033d192..b559929983bb 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -65,6 +65,7 @@ struct perf_evsel {
void *func;
void *data;
} handler;
+ unsigned int sample_size;
bool supported;
};
@@ -177,13 +178,8 @@ static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
return __perf_evsel__read(evsel, ncpus, nthreads, true);
}
-int __perf_evsel__sample_size(u64 sample_type);
-
-static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
-{
- return __perf_evsel__sample_size(evsel->attr.sample_type);
-}
-
void hists__init(struct hists *hists);
+int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
+ struct perf_sample *sample, bool swapped);
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 5a47aba46759..74ea3c2f8138 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -174,6 +174,15 @@ perf_header__set_cmdline(int argc, const char **argv)
{
int i;
+ /*
+ * If header_argv has already been set, do not override it.
+ * This allows a command to set the cmdline, parse args and
+ * then call another builtin function that implements a
+ * command -- e.g, cmd_kvm calling cmd_record.
+ */
+ if (header_argv)
+ return 0;
+
header_argc = (u32)argc;
/* do not include NULL termination */
@@ -1212,6 +1221,12 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
attr.exclude_user,
attr.exclude_kernel);
+ fprintf(fp, ", excl_host = %d, excl_guest = %d",
+ attr.exclude_host,
+ attr.exclude_guest);
+
+ fprintf(fp, ", precise_ip = %d", attr.precise_ip);
+
if (nr)
fprintf(fp, ", id = {");
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 514e2a4b367d..f247ef2789a4 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -708,7 +708,7 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
bool printed = false;
struct rb_node *node;
int i = 0;
- int ret;
+ int ret = 0;
/*
* If have one single callchain root, don't bother printing
@@ -747,8 +747,11 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
root = &cnode->rb_root;
}
- return __callchain__fprintf_graph(fp, root, total_samples,
+ ret += __callchain__fprintf_graph(fp, root, total_samples,
1, 1, left_margin);
+ ret += fprintf(fp, "\n");
+
+ return ret;
}
static size_t __callchain__fprintf_flat(FILE *fp,
diff --git a/tools/perf/util/intlist.c b/tools/perf/util/intlist.c
new file mode 100644
index 000000000000..fd530dced9cb
--- /dev/null
+++ b/tools/perf/util/intlist.c
@@ -0,0 +1,101 @@
+/*
+ * Based on intlist.c by:
+ * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <linux/compiler.h>
+
+#include "intlist.h"
+
+static struct rb_node *intlist__node_new(struct rblist *rblist __used,
+ const void *entry)
+{
+ int i = (int)((long)entry);
+ struct rb_node *rc = NULL;
+ struct int_node *node = malloc(sizeof(*node));
+
+ if (node != NULL) {
+ node->i = i;
+ rc = &node->rb_node;
+ }
+
+ return rc;
+}
+
+static void int_node__delete(struct int_node *ilist)
+{
+ free(ilist);
+}
+
+static void intlist__node_delete(struct rblist *rblist __used,
+ struct rb_node *rb_node)
+{
+ struct int_node *node = container_of(rb_node, struct int_node, rb_node);
+
+ int_node__delete(node);
+}
+
+static int intlist__node_cmp(struct rb_node *rb_node, const void *entry)
+{
+ int i = (int)((long)entry);
+ struct int_node *node = container_of(rb_node, struct int_node, rb_node);
+
+ return node->i - i;
+}
+
+int intlist__add(struct intlist *ilist, int i)
+{
+ return rblist__add_node(&ilist->rblist, (void *)((long)i));
+}
+
+void intlist__remove(struct intlist *ilist __used, struct int_node *node)
+{
+ int_node__delete(node);
+}
+
+struct int_node *intlist__find(struct intlist *ilist, int i)
+{
+ struct int_node *node = NULL;
+ struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i));
+
+ if (rb_node)
+ node = container_of(rb_node, struct int_node, rb_node);
+
+ return node;
+}
+
+struct intlist *intlist__new(void)
+{
+ struct intlist *ilist = malloc(sizeof(*ilist));
+
+ if (ilist != NULL) {
+ rblist__init(&ilist->rblist);
+ ilist->rblist.node_cmp = intlist__node_cmp;
+ ilist->rblist.node_new = intlist__node_new;
+ ilist->rblist.node_delete = intlist__node_delete;
+ }
+
+ return ilist;
+}
+
+void intlist__delete(struct intlist *ilist)
+{
+ if (ilist != NULL)
+ rblist__delete(&ilist->rblist);
+}
+
+struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx)
+{
+ struct int_node *node = NULL;
+ struct rb_node *rb_node;
+
+ rb_node = rblist__entry(&ilist->rblist, idx);
+ if (rb_node)
+ node = container_of(rb_node, struct int_node, rb_node);
+
+ return node;
+}
diff --git a/tools/perf/util/intlist.h b/tools/perf/util/intlist.h
new file mode 100644
index 000000000000..6d63ab90db50
--- /dev/null
+++ b/tools/perf/util/intlist.h
@@ -0,0 +1,75 @@
+#ifndef __PERF_INTLIST_H
+#define __PERF_INTLIST_H
+
+#include <linux/rbtree.h>
+#include <stdbool.h>
+
+#include "rblist.h"
+
+struct int_node {
+ struct rb_node rb_node;
+ int i;
+};
+
+struct intlist {
+ struct rblist rblist;
+};
+
+struct intlist *intlist__new(void);
+void intlist__delete(struct intlist *ilist);
+
+void intlist__remove(struct intlist *ilist, struct int_node *in);
+int intlist__add(struct intlist *ilist, int i);
+
+struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx);
+struct int_node *intlist__find(struct intlist *ilist, int i);
+
+static inline bool intlist__has_entry(struct intlist *ilist, int i)
+{
+ return intlist__find(ilist, i) != NULL;
+}
+
+static inline bool intlist__empty(const struct intlist *ilist)
+{
+ return rblist__empty(&ilist->rblist);
+}
+
+static inline unsigned int intlist__nr_entries(const struct intlist *ilist)
+{
+ return rblist__nr_entries(&ilist->rblist);
+}
+
+/* For intlist iteration */
+static inline struct int_node *intlist__first(struct intlist *ilist)
+{
+ struct rb_node *rn = rb_first(&ilist->rblist.entries);
+ return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
+}
+static inline struct int_node *intlist__next(struct int_node *in)
+{
+ struct rb_node *rn;
+ if (!in)
+ return NULL;
+ rn = rb_next(&in->rb_node);
+ return rn ? rb_entry(rn, struct int_node, rb_node) : NULL;
+}
+
+/**
+ * intlist_for_each - iterate over a intlist
+ * @pos: the &struct int_node to use as a loop cursor.
+ * @ilist: the &struct intlist for loop.
+ */
+#define intlist__for_each(pos, ilist) \
+ for (pos = intlist__first(ilist); pos; pos = intlist__next(pos))
+
+/**
+ * intlist_for_each_safe - iterate over a intlist safe against removal of
+ * int_node
+ * @pos: the &struct int_node to use as a loop cursor.
+ * @n: another &struct int_node to use as temporary storage.
+ * @ilist: the &struct intlist for loop.
+ */
+#define intlist__for_each_safe(pos, n, ilist) \
+ for (pos = intlist__first(ilist), n = intlist__next(pos); pos;\
+ pos = n, n = intlist__next(n))
+#endif /* __PERF_INTLIST_H */
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index a1f4e3669142..cc33486ad9e2 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -7,6 +7,8 @@
#include <stdio.h>
#include <unistd.h>
#include "map.h"
+#include "thread.h"
+#include "strlist.h"
const char *map_type__name[MAP__NR_TYPES] = {
[MAP__FUNCTION] = "Functions",
@@ -585,7 +587,21 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid)
self->kmaps.machine = self;
self->pid = pid;
self->root_dir = strdup(root_dir);
- return self->root_dir == NULL ? -ENOMEM : 0;
+ if (self->root_dir == NULL)
+ return -ENOMEM;
+
+ if (pid != HOST_KERNEL_ID) {
+ struct thread *thread = machine__findnew_thread(self, pid);
+ char comm[64];
+
+ if (thread == NULL)
+ return -ENOMEM;
+
+ snprintf(comm, sizeof(comm), "[guest/%d]", pid);
+ thread__set_comm(thread, comm);
+ }
+
+ return 0;
}
static void dsos__delete(struct list_head *self)
@@ -680,7 +696,15 @@ struct machine *machines__findnew(struct rb_root *self, pid_t pid)
(symbol_conf.guestmount)) {
sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
if (access(path, R_OK)) {
- pr_err("Can't access file %s\n", path);
+ static struct strlist *seen;
+
+ if (!seen)
+ seen = strlist__new(true, NULL);
+
+ if (!strlist__has_entry(seen, path)) {
+ pr_err("Can't access file %s\n", path);
+ strlist__add(seen, path);
+ }
machine = NULL;
goto out;
}
@@ -714,3 +738,16 @@ char *machine__mmap_name(struct machine *self, char *bf, size_t size)
return bf;
}
+
+void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size)
+{
+ struct rb_node *node;
+ struct machine *machine;
+
+ for (node = rb_first(machines); node; node = rb_next(node)) {
+ machine = rb_entry(node, struct machine, rb_node);
+ machine->id_hdr_size = id_hdr_size;
+ }
+
+ return;
+}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index c14c665d9a25..03a1e9b08b21 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -151,6 +151,7 @@ struct machine *machines__add(struct rb_root *self, pid_t pid,
struct machine *machines__find_host(struct rb_root *self);
struct machine *machines__find(struct rb_root *self, pid_t pid);
struct machine *machines__findnew(struct rb_root *self, pid_t pid);
+void machines__set_id_hdr_size(struct rb_root *self, u16 id_hdr_size);
char *machine__mmap_name(struct machine *self, char *bf, size_t size);
int machine__init(struct machine *self, const char *root_dir, pid_t pid);
void machine__exit(struct machine *self);
diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c
index 1b997d2b89ce..127d648cc548 100644
--- a/tools/perf/util/parse-events-test.c
+++ b/tools/perf/util/parse-events-test.c
@@ -13,6 +13,9 @@ do { \
} \
} while (0)
+#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
+
static int test__checkevent_tracepoint(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -21,8 +24,7 @@ static int test__checkevent_tracepoint(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong sample_type",
- (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
- evsel->attr.sample_type);
+ PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
return 0;
}
@@ -37,8 +39,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong type",
PERF_TYPE_TRACEPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong sample_type",
- (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU)
- == evsel->attr.sample_type);
+ PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period",
1 == evsel->attr.sample_period);
}
@@ -428,8 +429,7 @@ static int test__checkevent_list(struct perf_evlist *evlist)
evsel = list_entry(evsel->node.next, struct perf_evsel, node);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
TEST_ASSERT_VAL("wrong sample_type",
- (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
- evsel->attr.sample_type);
+ PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type);
TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 1aa721d7c10f..74a5af4d33ec 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -377,6 +377,7 @@ static int add_tracepoint(struct list_head **list, int *idx,
attr.sample_type |= PERF_SAMPLE_RAW;
attr.sample_type |= PERF_SAMPLE_TIME;
attr.sample_type |= PERF_SAMPLE_CPU;
+ attr.sample_type |= PERF_SAMPLE_PERIOD;
attr.sample_period = 1;
snprintf(name, MAX_NAME_LEN, "%s:%s", sys_name, evt_name);
@@ -489,6 +490,7 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx,
attr.bp_len = HW_BREAKPOINT_LEN_4;
attr.type = PERF_TYPE_BREAKPOINT;
+ attr.sample_period = 1;
return add_event(list, idx, &attr, NULL);
}
diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c
index 99d02aa57dbf..594f8fad5ecd 100644
--- a/tools/perf/util/parse-options.c
+++ b/tools/perf/util/parse-options.c
@@ -1,6 +1,7 @@
#include "util.h"
#include "parse-options.h"
#include "cache.h"
+#include "header.h"
#define OPT_SHORT 1
#define OPT_UNSET 2
@@ -413,6 +414,8 @@ int parse_options(int argc, const char **argv, const struct option *options,
{
struct parse_opt_ctx_t ctx;
+ perf_header__set_cmdline(argc, argv);
+
parse_options_start(&ctx, argc, argv, flags);
switch (parse_options_step(&ctx, options, usagestr)) {
case PARSE_OPT_HELP:
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 2884e67ee625..213362850abd 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,10 +10,12 @@ util/ctype.c
util/evlist.c
util/evsel.c
util/cpumap.c
+util/hweight.c
util/thread_map.c
util/util.c
util/xyarray.c
util/cgroup.c
util/debugfs.c
+util/rblist.c
util/strlist.c
../../lib/rbtree.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index e03b58a48424..0688bfb6d280 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -797,17 +797,13 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
event = perf_evlist__mmap_read(evlist, cpu);
if (event != NULL) {
- struct perf_evsel *first;
PyObject *pyevent = pyrf_event__new(event);
struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
if (pyevent == NULL)
return PyErr_NoMemory();
- first = list_entry(evlist->entries.next, struct perf_evsel, node);
- err = perf_event__parse_sample(event, first->attr.sample_type,
- perf_evsel__sample_size(first),
- sample_id_all, &pevent->sample, false);
+ err = perf_evlist__parse_sample(evlist, event, &pevent->sample, false);
if (err)
return PyErr_Format(PyExc_OSError,
"perf: can't parse sample, err=%d", err);
diff --git a/tools/perf/util/rblist.c b/tools/perf/util/rblist.c
new file mode 100644
index 000000000000..0171fb611004
--- /dev/null
+++ b/tools/perf/util/rblist.c
@@ -0,0 +1,107 @@
+/*
+ * Based on strlist.c by:
+ * (c) 2009 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Licensed under the GPLv2.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "rblist.h"
+
+int rblist__add_node(struct rblist *rblist, const void *new_entry)
+{
+ struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node *parent = NULL, *new_node;
+
+ while (*p != NULL) {
+ int rc;
+
+ parent = *p;
+
+ rc = rblist->node_cmp(parent, new_entry);
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ new_node = rblist->node_new(rblist, new_entry);
+ if (new_node == NULL)
+ return -ENOMEM;
+
+ rb_link_node(new_node, parent, p);
+ rb_insert_color(new_node, &rblist->entries);
+ ++rblist->nr_entries;
+
+ return 0;
+}
+
+void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node)
+{
+ rb_erase(rb_node, &rblist->entries);
+ rblist->node_delete(rblist, rb_node);
+}
+
+struct rb_node *rblist__find(struct rblist *rblist, const void *entry)
+{
+ struct rb_node **p = &rblist->entries.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p != NULL) {
+ int rc;
+
+ parent = *p;
+
+ rc = rblist->node_cmp(parent, entry);
+ if (rc > 0)
+ p = &(*p)->rb_left;
+ else if (rc < 0)
+ p = &(*p)->rb_right;
+ else
+ return parent;
+ }
+
+ return NULL;
+}
+
+void rblist__init(struct rblist *rblist)
+{
+ if (rblist != NULL) {
+ rblist->entries = RB_ROOT;
+ rblist->nr_entries = 0;
+ }
+
+ return;
+}
+
+void rblist__delete(struct rblist *rblist)
+{
+ if (rblist != NULL) {
+ struct rb_node *pos, *next = rb_first(&rblist->entries);
+
+ while (next) {
+ pos = next;
+ next = rb_next(pos);
+ rb_erase(pos, &rblist->entries);
+ rblist->node_delete(rblist, pos);
+ }
+ free(rblist);
+ }
+}
+
+struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx)
+{
+ struct rb_node *node;
+
+ for (node = rb_first(&rblist->entries); node; node = rb_next(node)) {
+ if (!idx--)
+ return node;
+ }
+
+ return NULL;
+}
diff --git a/tools/perf/util/rblist.h b/tools/perf/util/rblist.h
new file mode 100644
index 000000000000..6d0cae5ae83d
--- /dev/null
+++ b/tools/perf/util/rblist.h
@@ -0,0 +1,47 @@
+#ifndef __PERF_RBLIST_H
+#define __PERF_RBLIST_H
+
+#include <linux/rbtree.h>
+#include <stdbool.h>
+
+/*
+ * create node structs of the form:
+ * struct my_node {
+ * struct rb_node rb_node;
+ * ... my data ...
+ * };
+ *
+ * create list structs of the form:
+ * struct mylist {
+ * struct rblist rblist;
+ * ... my data ...
+ * };
+ */
+
+struct rblist {
+ struct rb_root entries;
+ unsigned int nr_entries;
+
+ int (*node_cmp)(struct rb_node *rbn, const void *entry);
+ struct rb_node *(*node_new)(struct rblist *rlist, const void *new_entry);
+ void (*node_delete)(struct rblist *rblist, struct rb_node *rb_node);
+};
+
+void rblist__init(struct rblist *rblist);
+void rblist__delete(struct rblist *rblist);
+int rblist__add_node(struct rblist *rblist, const void *new_entry);
+void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node);
+struct rb_node *rblist__find(struct rblist *rblist, const void *entry);
+struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx);
+
+static inline bool rblist__empty(const struct rblist *rblist)
+{
+ return rblist->nr_entries == 0;
+}
+
+static inline unsigned int rblist__nr_entries(const struct rblist *rblist)
+{
+ return rblist->nr_entries;
+}
+
+#endif /* __PERF_RBLIST_H */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 8e485592ca20..2437fb0b463a 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -80,13 +80,12 @@ out_close:
return -1;
}
-void perf_session__update_sample_type(struct perf_session *self)
+void perf_session__set_id_hdr_size(struct perf_session *session)
{
- self->sample_type = perf_evlist__sample_type(self->evlist);
- self->sample_size = __perf_evsel__sample_size(self->sample_type);
- self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
- self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist);
- self->host_machine.id_hdr_size = self->id_hdr_size;
+ u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
+
+ session->host_machine.id_hdr_size = id_hdr_size;
+ machines__set_id_hdr_size(&session->machines, id_hdr_size);
}
int perf_session__create_kernel_maps(struct perf_session *self)
@@ -146,7 +145,7 @@ struct perf_session *perf_session__new(const char *filename, int mode,
if (mode == O_RDONLY) {
if (perf_session__open(self, force) < 0)
goto out_delete;
- perf_session__update_sample_type(self);
+ perf_session__set_id_hdr_size(self);
} else if (mode == O_WRONLY) {
/*
* In O_RDONLY mode this will be performed when reading the
@@ -157,7 +156,7 @@ struct perf_session *perf_session__new(const char *filename, int mode,
}
if (tool && tool->ordering_requires_timestamps &&
- tool->ordered_samples && !self->sample_id_all) {
+ tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) {
dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
tool->ordered_samples = false;
}
@@ -672,7 +671,8 @@ static void flush_sample_queue(struct perf_session *s,
if (iter->timestamp > limit)
break;
- ret = perf_session__parse_sample(s, iter->event, &sample);
+ ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample,
+ s->header.needs_swap);
if (ret)
pr_err("Can't parse sample, err = %d\n", ret);
else
@@ -864,16 +864,18 @@ static void perf_session__print_tstamp(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample)
{
+ u64 sample_type = perf_evlist__sample_type(session->evlist);
+
if (event->header.type != PERF_RECORD_SAMPLE &&
- !session->sample_id_all) {
+ !perf_evlist__sample_id_all(session->evlist)) {
fputs("-1 -1 ", stdout);
return;
}
- if ((session->sample_type & PERF_SAMPLE_CPU))
+ if ((sample_type & PERF_SAMPLE_CPU))
printf("%u ", sample->cpu);
- if (session->sample_type & PERF_SAMPLE_TIME)
+ if (sample_type & PERF_SAMPLE_TIME)
printf("%" PRIu64 " ", sample->time);
}
@@ -898,6 +900,8 @@ static void dump_event(struct perf_session *session, union perf_event *event,
static void dump_sample(struct perf_session *session, union perf_event *event,
struct perf_sample *sample)
{
+ u64 sample_type;
+
if (!dump_trace)
return;
@@ -905,10 +909,12 @@ static void dump_sample(struct perf_session *session, union perf_event *event,
event->header.misc, sample->pid, sample->tid, sample->ip,
sample->period, sample->addr);
- if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
+ sample_type = perf_evlist__sample_type(session->evlist);
+
+ if (sample_type & PERF_SAMPLE_CALLCHAIN)
callchain__printf(sample);
- if (session->sample_type & PERF_SAMPLE_BRANCH_STACK)
+ if (sample_type & PERF_SAMPLE_BRANCH_STACK)
branch_stack__printf(sample);
}
@@ -918,7 +924,9 @@ static struct machine *
{
const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
+ if (perf_guest &&
+ ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
+ (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
u32 pid;
if (event->header.type == PERF_RECORD_MMAP)
@@ -1003,7 +1011,7 @@ static int perf_session__preprocess_sample(struct perf_session *session,
union perf_event *event, struct perf_sample *sample)
{
if (event->header.type != PERF_RECORD_SAMPLE ||
- !(session->sample_type & PERF_SAMPLE_CALLCHAIN))
+ !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
return 0;
if (!ip_callchain__valid(sample->callchain, event)) {
@@ -1027,7 +1035,7 @@ static int perf_session__process_user_event(struct perf_session *session, union
case PERF_RECORD_HEADER_ATTR:
err = tool->attr(event, &session->evlist);
if (err == 0)
- perf_session__update_sample_type(session);
+ perf_session__set_id_hdr_size(session);
return err;
case PERF_RECORD_HEADER_EVENT_TYPE:
return tool->event_type(tool, event);
@@ -1062,7 +1070,7 @@ static int perf_session__process_event(struct perf_session *session,
int ret;
if (session->header.needs_swap)
- event_swap(event, session->sample_id_all);
+ event_swap(event, perf_evlist__sample_id_all(session->evlist));
if (event->header.type >= PERF_RECORD_HEADER_MAX)
return -EINVAL;
@@ -1075,7 +1083,8 @@ static int perf_session__process_event(struct perf_session *session,
/*
* For all kernel events we get the sample data
*/
- ret = perf_session__parse_sample(session, event, &sample);
+ ret = perf_evlist__parse_sample(session->evlist, event, &sample,
+ session->header.needs_swap);
if (ret)
return ret;
@@ -1386,9 +1395,9 @@ int perf_session__process_events(struct perf_session *self,
return err;
}
-bool perf_session__has_traces(struct perf_session *self, const char *msg)
+bool perf_session__has_traces(struct perf_session *session, const char *msg)
{
- if (!(self->sample_type & PERF_SAMPLE_RAW)) {
+ if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
return false;
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 7c435bde6eb0..1f7ec87db7d7 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -41,13 +41,9 @@ struct perf_session {
* perf.data file.
*/
struct hists hists;
- u64 sample_type;
- int sample_size;
int fd;
bool fd_pipe;
bool repipe;
- bool sample_id_all;
- u16 id_hdr_size;
int cwdlen;
char *cwd;
struct ordered_samples ordered_samples;
@@ -86,7 +82,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr);
int perf_session__create_kernel_maps(struct perf_session *self);
-void perf_session__update_sample_type(struct perf_session *self);
+void perf_session__set_id_hdr_size(struct perf_session *session);
void perf_session__remove_thread(struct perf_session *self, struct thread *th);
static inline
@@ -130,24 +126,6 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *self,
size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
-static inline int perf_session__parse_sample(struct perf_session *session,
- const union perf_event *event,
- struct perf_sample *sample)
-{
- return perf_event__parse_sample(event, session->sample_type,
- session->sample_size,
- session->sample_id_all, sample,
- session->header.needs_swap);
-}
-
-static inline int perf_session__synthesize_sample(struct perf_session *session,
- union perf_event *event,
- const struct perf_sample *sample)
-{
- return perf_event__synthesize_sample(event, session->sample_type,
- sample, session->header.needs_swap);
-}
-
struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type);
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 6783a2043555..95856ff3dda4 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -10,23 +10,28 @@
#include <stdlib.h>
#include <string.h>
-static struct str_node *str_node__new(const char *s, bool dupstr)
+static
+struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry)
{
- struct str_node *self = malloc(sizeof(*self));
+ const char *s = entry;
+ struct rb_node *rc = NULL;
+ struct strlist *strlist = container_of(rblist, struct strlist, rblist);
+ struct str_node *snode = malloc(sizeof(*snode));
- if (self != NULL) {
- if (dupstr) {
+ if (snode != NULL) {
+ if (strlist->dupstr) {
s = strdup(s);
if (s == NULL)
goto out_delete;
}
- self->s = s;
+ snode->s = s;
+ rc = &snode->rb_node;
}
- return self;
+ return rc;
out_delete:
- free(self);
+ free(snode);
return NULL;
}
@@ -37,36 +42,26 @@ static void str_node__delete(struct str_node *self, bool dupstr)
free(self);
}
-int strlist__add(struct strlist *self, const char *new_entry)
+static
+void strlist__node_delete(struct rblist *rblist, struct rb_node *rb_node)
{
- struct rb_node **p = &self->entries.rb_node;
- struct rb_node *parent = NULL;
- struct str_node *sn;
-
- while (*p != NULL) {
- int rc;
-
- parent = *p;
- sn = rb_entry(parent, struct str_node, rb_node);
- rc = strcmp(sn->s, new_entry);
-
- if (rc > 0)
- p = &(*p)->rb_left;
- else if (rc < 0)
- p = &(*p)->rb_right;
- else
- return -EEXIST;
- }
+ struct strlist *slist = container_of(rblist, struct strlist, rblist);
+ struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
- sn = str_node__new(new_entry, self->dupstr);
- if (sn == NULL)
- return -ENOMEM;
+ str_node__delete(snode, slist->dupstr);
+}
- rb_link_node(&sn->rb_node, parent, p);
- rb_insert_color(&sn->rb_node, &self->entries);
- ++self->nr_entries;
+static int strlist__node_cmp(struct rb_node *rb_node, const void *entry)
+{
+ const char *str = entry;
+ struct str_node *snode = container_of(rb_node, struct str_node, rb_node);
+
+ return strcmp(snode->s, str);
+}
- return 0;
+int strlist__add(struct strlist *self, const char *new_entry)
+{
+ return rblist__add_node(&self->rblist, new_entry);
}
int strlist__load(struct strlist *self, const char *filename)
@@ -96,34 +91,20 @@ out:
return err;
}
-void strlist__remove(struct strlist *self, struct str_node *sn)
+void strlist__remove(struct strlist *slist, struct str_node *snode)
{
- rb_erase(&sn->rb_node, &self->entries);
- str_node__delete(sn, self->dupstr);
+ str_node__delete(snode, slist->dupstr);
}
-struct str_node *strlist__find(struct strlist *self, const char *entry)
+struct str_node *strlist__find(struct strlist *slist, const char *entry)
{
- struct rb_node **p = &self->entries.rb_node;
- struct rb_node *parent = NULL;
-
- while (*p != NULL) {
- struct str_node *sn;
- int rc;
-
- parent = *p;
- sn = rb_entry(parent, struct str_node, rb_node);
- rc = strcmp(sn->s, entry);
-
- if (rc > 0)
- p = &(*p)->rb_left;
- else if (rc < 0)
- p = &(*p)->rb_right;
- else
- return sn;
- }
+ struct str_node *snode = NULL;
+ struct rb_node *rb_node = rblist__find(&slist->rblist, entry);
- return NULL;
+ if (rb_node)
+ snode = container_of(rb_node, struct str_node, rb_node);
+
+ return snode;
}
static int strlist__parse_list_entry(struct strlist *self, const char *s)
@@ -156,9 +137,12 @@ struct strlist *strlist__new(bool dupstr, const char *slist)
struct strlist *self = malloc(sizeof(*self));
if (self != NULL) {
- self->entries = RB_ROOT;
+ rblist__init(&self->rblist);
+ self->rblist.node_cmp = strlist__node_cmp;
+ self->rblist.node_new = strlist__node_new;
+ self->rblist.node_delete = strlist__node_delete;
+
self->dupstr = dupstr;
- self->nr_entries = 0;
if (slist && strlist__parse_list(self, slist) != 0)
goto out_error;
}
@@ -171,30 +155,18 @@ out_error:
void strlist__delete(struct strlist *self)
{
- if (self != NULL) {
- struct str_node *pos;
- struct rb_node *next = rb_first(&self->entries);
-
- while (next) {
- pos = rb_entry(next, struct str_node, rb_node);
- next = rb_next(&pos->rb_node);
- strlist__remove(self, pos);
- }
- self->entries = RB_ROOT;
- free(self);
- }
+ if (self != NULL)
+ rblist__delete(&self->rblist);
}
-struct str_node *strlist__entry(const struct strlist *self, unsigned int idx)
+struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx)
{
- struct rb_node *nd;
+ struct str_node *snode = NULL;
+ struct rb_node *rb_node;
- for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
- struct str_node *pos = rb_entry(nd, struct str_node, rb_node);
+ rb_node = rblist__entry(&slist->rblist, idx);
+ if (rb_node)
+ snode = container_of(rb_node, struct str_node, rb_node);
- if (!idx--)
- return pos;
- }
-
- return NULL;
+ return snode;
}
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index 3ba839007d2c..dd9f922ec67c 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -4,14 +4,15 @@
#include <linux/rbtree.h>
#include <stdbool.h>
+#include "rblist.h"
+
struct str_node {
struct rb_node rb_node;
const char *s;
};
struct strlist {
- struct rb_root entries;
- unsigned int nr_entries;
+ struct rblist rblist;
bool dupstr;
};
@@ -32,18 +33,18 @@ static inline bool strlist__has_entry(struct strlist *self, const char *entry)
static inline bool strlist__empty(const struct strlist *self)
{
- return self->nr_entries == 0;
+ return rblist__empty(&self->rblist);
}
static inline unsigned int strlist__nr_entries(const struct strlist *self)
{
- return self->nr_entries;
+ return rblist__nr_entries(&self->rblist);
}
/* For strlist iteration */
static inline struct str_node *strlist__first(struct strlist *self)
{
- struct rb_node *rn = rb_first(&self->entries);
+ struct rb_node *rn = rb_first(&self->rblist.entries);
return rn ? rb_entry(rn, struct str_node, rb_node) : NULL;
}
static inline struct str_node *strlist__next(struct str_node *sn)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 50958bbeb26a..8b63b678e127 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -29,6 +29,7 @@
#define NT_GNU_BUILD_ID 3
#endif
+static void dso_cache__free(struct rb_root *root);
static bool dso__build_id_equal(const struct dso *dso, u8 *build_id);
static int elf_read_build_id(Elf *elf, void *bf, size_t size);
static void dsos__add(struct list_head *head, struct dso *dso);
@@ -48,6 +49,31 @@ struct symbol_conf symbol_conf = {
.symfs = "",
};
+static enum dso_binary_type binary_type_symtab[] = {
+ DSO_BINARY_TYPE__KALLSYMS,
+ DSO_BINARY_TYPE__GUEST_KALLSYMS,
+ DSO_BINARY_TYPE__JAVA_JIT,
+ DSO_BINARY_TYPE__DEBUGLINK,
+ DSO_BINARY_TYPE__BUILD_ID_CACHE,
+ DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
+ DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+ DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
+ DSO_BINARY_TYPE__GUEST_KMODULE,
+ DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
+ DSO_BINARY_TYPE__NOT_FOUND,
+};
+
+#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
+
+static enum dso_binary_type binary_type_data[] = {
+ DSO_BINARY_TYPE__BUILD_ID_CACHE,
+ DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
+ DSO_BINARY_TYPE__NOT_FOUND,
+};
+
+#define DSO_BINARY_TYPE__DATA_CNT ARRAY_SIZE(binary_type_data)
+
int dso__name_len(const struct dso *dso)
{
if (!dso)
@@ -318,7 +344,9 @@ struct dso *dso__new(const char *name)
dso__set_short_name(dso, dso->name);
for (i = 0; i < MAP__NR_TYPES; ++i)
dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
- dso->symtab_type = SYMTAB__NOT_FOUND;
+ dso->cache = RB_ROOT;
+ dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
+ dso->data_type = DSO_BINARY_TYPE__NOT_FOUND;
dso->loaded = 0;
dso->sorted_by_name = 0;
dso->has_build_id = 0;
@@ -352,6 +380,7 @@ void dso__delete(struct dso *dso)
free((char *)dso->short_name);
if (dso->lname_alloc)
free(dso->long_name);
+ dso_cache__free(&dso->cache);
free(dso);
}
@@ -806,9 +835,9 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
symbols__fixup_end(&dso->symbols[map->type]);
if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
- dso->symtab_type = SYMTAB__GUEST_KALLSYMS;
+ dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
else
- dso->symtab_type = SYMTAB__KALLSYMS;
+ dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
return dso__split_kallsyms(dso, map, filter);
}
@@ -1660,32 +1689,110 @@ out:
char dso__symtab_origin(const struct dso *dso)
{
static const char origin[] = {
- [SYMTAB__KALLSYMS] = 'k',
- [SYMTAB__JAVA_JIT] = 'j',
- [SYMTAB__DEBUGLINK] = 'l',
- [SYMTAB__BUILD_ID_CACHE] = 'B',
- [SYMTAB__FEDORA_DEBUGINFO] = 'f',
- [SYMTAB__UBUNTU_DEBUGINFO] = 'u',
- [SYMTAB__BUILDID_DEBUGINFO] = 'b',
- [SYMTAB__SYSTEM_PATH_DSO] = 'd',
- [SYMTAB__SYSTEM_PATH_KMODULE] = 'K',
- [SYMTAB__GUEST_KALLSYMS] = 'g',
- [SYMTAB__GUEST_KMODULE] = 'G',
+ [DSO_BINARY_TYPE__KALLSYMS] = 'k',
+ [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
+ [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
+ [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
+ [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
+ [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
+ [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
+ [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
+ [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
+ [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
+ [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
};
- if (dso == NULL || dso->symtab_type == SYMTAB__NOT_FOUND)
+ if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
return '!';
return origin[dso->symtab_type];
}
+int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
+ char *root_dir, char *file, size_t size)
+{
+ char build_id_hex[BUILD_ID_SIZE * 2 + 1];
+ int ret = 0;
+
+ switch (type) {
+ case DSO_BINARY_TYPE__DEBUGLINK: {
+ char *debuglink;
+
+ strncpy(file, dso->long_name, size);
+ debuglink = file + dso->long_name_len;
+ while (debuglink != file && *debuglink != '/')
+ debuglink--;
+ if (*debuglink == '/')
+ debuglink++;
+ filename__read_debuglink(dso->long_name, debuglink,
+ size - (debuglink - file));
+ }
+ break;
+ case DSO_BINARY_TYPE__BUILD_ID_CACHE:
+ /* skip the locally configured cache if a symfs is given */
+ if (symbol_conf.symfs[0] ||
+ (dso__build_id_filename(dso, file, size) == NULL))
+ ret = -1;
+ break;
+
+ case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
+ snprintf(file, size, "%s/usr/lib/debug%s.debug",
+ symbol_conf.symfs, dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
+ snprintf(file, size, "%s/usr/lib/debug%s",
+ symbol_conf.symfs, dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
+ if (!dso->has_build_id) {
+ ret = -1;
+ break;
+ }
+
+ build_id__sprintf(dso->build_id,
+ sizeof(dso->build_id),
+ build_id_hex);
+ snprintf(file, size,
+ "%s/usr/lib/debug/.build-id/%.2s/%s.debug",
+ symbol_conf.symfs, build_id_hex, build_id_hex + 2);
+ break;
+
+ case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
+ snprintf(file, size, "%s%s",
+ symbol_conf.symfs, dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__GUEST_KMODULE:
+ snprintf(file, size, "%s%s%s", symbol_conf.symfs,
+ root_dir, dso->long_name);
+ break;
+
+ case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
+ snprintf(file, size, "%s%s", symbol_conf.symfs,
+ dso->long_name);
+ break;
+
+ default:
+ case DSO_BINARY_TYPE__KALLSYMS:
+ case DSO_BINARY_TYPE__GUEST_KALLSYMS:
+ case DSO_BINARY_TYPE__JAVA_JIT:
+ case DSO_BINARY_TYPE__NOT_FOUND:
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
{
- int size = PATH_MAX;
char *name;
int ret = -1;
int fd;
+ u_int i;
struct machine *machine;
- const char *root_dir;
+ char *root_dir = (char *) "";
int want_symtab;
dso__set_loaded(dso, map->type);
@@ -1700,7 +1807,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
else
machine = NULL;
- name = malloc(size);
+ name = malloc(PATH_MAX);
if (!name)
return -1;
@@ -1719,81 +1826,27 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
}
ret = dso__load_perf_map(dso, map, filter);
- dso->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT :
- SYMTAB__NOT_FOUND;
+ dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
+ DSO_BINARY_TYPE__NOT_FOUND;
return ret;
}
+ if (machine)
+ root_dir = machine->root_dir;
+
/* Iterate over candidate debug images.
* On the first pass, only load images if they have a full symtab.
* Failing that, do a second pass where we accept .dynsym also
*/
want_symtab = 1;
restart:
- for (dso->symtab_type = SYMTAB__DEBUGLINK;
- dso->symtab_type != SYMTAB__NOT_FOUND;
- dso->symtab_type++) {
- switch (dso->symtab_type) {
- case SYMTAB__DEBUGLINK: {
- char *debuglink;
- strncpy(name, dso->long_name, size);
- debuglink = name + dso->long_name_len;
- while (debuglink != name && *debuglink != '/')
- debuglink--;
- if (*debuglink == '/')
- debuglink++;
- filename__read_debuglink(dso->long_name, debuglink,
- size - (debuglink - name));
- }
- break;
- case SYMTAB__BUILD_ID_CACHE:
- /* skip the locally configured cache if a symfs is given */
- if (symbol_conf.symfs[0] ||
- (dso__build_id_filename(dso, name, size) == NULL)) {
- continue;
- }
- break;
- case SYMTAB__FEDORA_DEBUGINFO:
- snprintf(name, size, "%s/usr/lib/debug%s.debug",
- symbol_conf.symfs, dso->long_name);
- break;
- case SYMTAB__UBUNTU_DEBUGINFO:
- snprintf(name, size, "%s/usr/lib/debug%s",
- symbol_conf.symfs, dso->long_name);
- break;
- case SYMTAB__BUILDID_DEBUGINFO: {
- char build_id_hex[BUILD_ID_SIZE * 2 + 1];
+ for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
- if (!dso->has_build_id)
- continue;
+ dso->symtab_type = binary_type_symtab[i];
- build_id__sprintf(dso->build_id,
- sizeof(dso->build_id),
- build_id_hex);
- snprintf(name, size,
- "%s/usr/lib/debug/.build-id/%.2s/%s.debug",
- symbol_conf.symfs, build_id_hex, build_id_hex + 2);
- }
- break;
- case SYMTAB__SYSTEM_PATH_DSO:
- snprintf(name, size, "%s%s",
- symbol_conf.symfs, dso->long_name);
- break;
- case SYMTAB__GUEST_KMODULE:
- if (map->groups && machine)
- root_dir = machine->root_dir;
- else
- root_dir = "";
- snprintf(name, size, "%s%s%s", symbol_conf.symfs,
- root_dir, dso->long_name);
- break;
-
- case SYMTAB__SYSTEM_PATH_KMODULE:
- snprintf(name, size, "%s%s", symbol_conf.symfs,
- dso->long_name);
- break;
- default:;
- }
+ if (dso__binary_type_file(dso, dso->symtab_type,
+ root_dir, name, PATH_MAX))
+ continue;
/* Name is now the name of the next image to try */
fd = open(name, O_RDONLY);
@@ -2010,9 +2063,9 @@ struct map *machine__new_module(struct machine *machine, u64 start,
return NULL;
if (machine__is_host(machine))
- dso->symtab_type = SYMTAB__SYSTEM_PATH_KMODULE;
+ dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
else
- dso->symtab_type = SYMTAB__GUEST_KMODULE;
+ dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
map_groups__insert(&machine->kmaps, map);
return map;
}
@@ -2564,8 +2617,15 @@ int machine__create_kernel_maps(struct machine *machine)
__machine__create_kernel_maps(machine, kernel) < 0)
return -1;
- if (symbol_conf.use_modules && machine__create_modules(machine) < 0)
- pr_debug("Problems creating module maps, continuing anyway...\n");
+ if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
+ if (machine__is_host(machine))
+ pr_debug("Problems creating module maps, "
+ "continuing anyway...\n");
+ else
+ pr_debug("Problems creating module maps for guest %d, "
+ "continuing anyway...\n", machine->pid);
+ }
+
/*
* Now that we have all the maps created, just set the ->end of them:
*/
@@ -2815,6 +2875,7 @@ int machines__create_guest_kernel_maps(struct rb_root *machines)
int i, items = 0;
char path[PATH_MAX];
pid_t pid;
+ char *endp;
if (symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_modules ||
@@ -2831,7 +2892,14 @@ int machines__create_guest_kernel_maps(struct rb_root *machines)
/* Filter out . and .. */
continue;
}
- pid = atoi(namelist[i]->d_name);
+ pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
+ if ((*endp != '\0') ||
+ (endp == namelist[i]->d_name) ||
+ (errno == ERANGE)) {
+ pr_debug("invalid directory (%s). Skipping.\n",
+ namelist[i]->d_name);
+ continue;
+ }
sprintf(path, "%s/%s/proc/kallsyms",
symbol_conf.guestmount,
namelist[i]->d_name);
@@ -2905,3 +2973,218 @@ struct map *dso__new_map(const char *name)
return map;
}
+
+static int open_dso(struct dso *dso, struct machine *machine)
+{
+ char *root_dir = (char *) "";
+ char *name;
+ int fd;
+
+ name = malloc(PATH_MAX);
+ if (!name)
+ return -ENOMEM;
+
+ if (machine)
+ root_dir = machine->root_dir;
+
+ if (dso__binary_type_file(dso, dso->data_type,
+ root_dir, name, PATH_MAX)) {
+ free(name);
+ return -EINVAL;
+ }
+
+ fd = open(name, O_RDONLY);
+ free(name);
+ return fd;
+}
+
+int dso__data_fd(struct dso *dso, struct machine *machine)
+{
+ int i = 0;
+
+ if (dso->data_type != DSO_BINARY_TYPE__NOT_FOUND)
+ return open_dso(dso, machine);
+
+ do {
+ int fd;
+
+ dso->data_type = binary_type_data[i++];
+
+ fd = open_dso(dso, machine);
+ if (fd >= 0)
+ return fd;
+
+ } while (dso->data_type != DSO_BINARY_TYPE__NOT_FOUND);
+
+ return -EINVAL;
+}
+
+static void
+dso_cache__free(struct rb_root *root)
+{
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ struct dso_cache *cache;
+
+ cache = rb_entry(next, struct dso_cache, rb_node);
+ next = rb_next(&cache->rb_node);
+ rb_erase(&cache->rb_node, root);
+ free(cache);
+ }
+}
+
+static struct dso_cache*
+dso_cache__find(struct rb_root *root, u64 offset)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct dso_cache *cache;
+
+ while (*p != NULL) {
+ u64 end;
+
+ parent = *p;
+ cache = rb_entry(parent, struct dso_cache, rb_node);
+ end = cache->offset + DSO__DATA_CACHE_SIZE;
+
+ if (offset < cache->offset)
+ p = &(*p)->rb_left;
+ else if (offset >= end)
+ p = &(*p)->rb_right;
+ else
+ return cache;
+ }
+ return NULL;
+}
+
+static void
+dso_cache__insert(struct rb_root *root, struct dso_cache *new)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct dso_cache *cache;
+ u64 offset = new->offset;
+
+ while (*p != NULL) {
+ u64 end;
+
+ parent = *p;
+ cache = rb_entry(parent, struct dso_cache, rb_node);
+ end = cache->offset + DSO__DATA_CACHE_SIZE;
+
+ if (offset < cache->offset)
+ p = &(*p)->rb_left;
+ else if (offset >= end)
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&new->rb_node, parent, p);
+ rb_insert_color(&new->rb_node, root);
+}
+
+static ssize_t
+dso_cache__memcpy(struct dso_cache *cache, u64 offset,
+ u8 *data, u64 size)
+{
+ u64 cache_offset = offset - cache->offset;
+ u64 cache_size = min(cache->size - cache_offset, size);
+
+ memcpy(data, cache->data + cache_offset, cache_size);
+ return cache_size;
+}
+
+static ssize_t
+dso_cache__read(struct dso *dso, struct machine *machine,
+ u64 offset, u8 *data, ssize_t size)
+{
+ struct dso_cache *cache;
+ ssize_t ret;
+ int fd;
+
+ fd = dso__data_fd(dso, machine);
+ if (fd < 0)
+ return -1;
+
+ do {
+ u64 cache_offset;
+
+ ret = -ENOMEM;
+
+ cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
+ if (!cache)
+ break;
+
+ cache_offset = offset & DSO__DATA_CACHE_MASK;
+ ret = -EINVAL;
+
+ if (-1 == lseek(fd, cache_offset, SEEK_SET))
+ break;
+
+ ret = read(fd, cache->data, DSO__DATA_CACHE_SIZE);
+ if (ret <= 0)
+ break;
+
+ cache->offset = cache_offset;
+ cache->size = ret;
+ dso_cache__insert(&dso->cache, cache);
+
+ ret = dso_cache__memcpy(cache, offset, data, size);
+
+ } while (0);
+
+ if (ret <= 0)
+ free(cache);
+
+ close(fd);
+ return ret;
+}
+
+static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
+ u64 offset, u8 *data, ssize_t size)
+{
+ struct dso_cache *cache;
+
+ cache = dso_cache__find(&dso->cache, offset);
+ if (cache)
+ return dso_cache__memcpy(cache, offset, data, size);
+ else
+ return dso_cache__read(dso, machine, offset, data, size);
+}
+
+ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
+ u64 offset, u8 *data, ssize_t size)
+{
+ ssize_t r = 0;
+ u8 *p = data;
+
+ do {
+ ssize_t ret;
+
+ ret = dso_cache_read(dso, machine, offset, p, size);
+ if (ret < 0)
+ return ret;
+
+ /* Reached EOF, return what we have. */
+ if (!ret)
+ break;
+
+ BUG_ON(ret > size);
+
+ r += ret;
+ p += ret;
+ offset += ret;
+ size -= ret;
+
+ } while (size);
+
+ return r;
+}
+
+ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
+ struct machine *machine, u64 addr,
+ u8 *data, ssize_t size)
+{
+ u64 offset = map->map_ip(map, addr);
+ return dso__data_read_offset(dso, machine, offset, data, size);
+}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index a884b99017f0..1fe733a1e21f 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -155,6 +155,21 @@ struct addr_location {
s32 cpu;
};
+enum dso_binary_type {
+ DSO_BINARY_TYPE__KALLSYMS = 0,
+ DSO_BINARY_TYPE__GUEST_KALLSYMS,
+ DSO_BINARY_TYPE__JAVA_JIT,
+ DSO_BINARY_TYPE__DEBUGLINK,
+ DSO_BINARY_TYPE__BUILD_ID_CACHE,
+ DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
+ DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+ DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+ DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
+ DSO_BINARY_TYPE__GUEST_KMODULE,
+ DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
+ DSO_BINARY_TYPE__NOT_FOUND,
+};
+
enum dso_kernel_type {
DSO_TYPE_USER = 0,
DSO_TYPE_KERNEL,
@@ -167,19 +182,31 @@ enum dso_swap_type {
DSO_SWAP__YES,
};
+#define DSO__DATA_CACHE_SIZE 4096
+#define DSO__DATA_CACHE_MASK ~(DSO__DATA_CACHE_SIZE - 1)
+
+struct dso_cache {
+ struct rb_node rb_node;
+ u64 offset;
+ u64 size;
+ char data[0];
+};
+
struct dso {
struct list_head node;
struct rb_root symbols[MAP__NR_TYPES];
struct rb_root symbol_names[MAP__NR_TYPES];
+ struct rb_root cache;
enum dso_kernel_type kernel;
enum dso_swap_type needs_swap;
+ enum dso_binary_type symtab_type;
+ enum dso_binary_type data_type;
u8 adjust_symbols:1;
u8 has_build_id:1;
u8 hit:1;
u8 annotate_warned:1;
u8 sname_alloc:1;
u8 lname_alloc:1;
- unsigned char symtab_type;
u8 sorted_by_name;
u8 loaded;
u8 build_id[BUILD_ID_SIZE];
@@ -253,21 +280,6 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
enum map_type type, FILE *fp);
size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp);
-enum symtab_type {
- SYMTAB__KALLSYMS = 0,
- SYMTAB__GUEST_KALLSYMS,
- SYMTAB__JAVA_JIT,
- SYMTAB__DEBUGLINK,
- SYMTAB__BUILD_ID_CACHE,
- SYMTAB__FEDORA_DEBUGINFO,
- SYMTAB__UBUNTU_DEBUGINFO,
- SYMTAB__BUILDID_DEBUGINFO,
- SYMTAB__SYSTEM_PATH_DSO,
- SYMTAB__GUEST_KMODULE,
- SYMTAB__SYSTEM_PATH_KMODULE,
- SYMTAB__NOT_FOUND,
-};
-
char dso__symtab_origin(const struct dso *dso);
void dso__set_long_name(struct dso *dso, char *name);
void dso__set_build_id(struct dso *dso, void *build_id);
@@ -304,4 +316,14 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type);
size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp);
+int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
+ char *root_dir, char *file, size_t size);
+
+int dso__data_fd(struct dso *dso, struct machine *machine);
+ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
+ u64 offset, u8 *data, ssize_t size);
+ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
+ struct machine *machine, u64 addr,
+ u8 *data, ssize_t size);
+int dso__test_data(void);
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/target.c b/tools/perf/util/target.c
index 1064d5b148ad..051eaa68095e 100644
--- a/tools/perf/util/target.c
+++ b/tools/perf/util/target.c
@@ -110,8 +110,17 @@ int perf_target__strerror(struct perf_target *target, int errnum,
int idx;
const char *msg;
+ BUG_ON(buflen == 0);
+
if (errnum >= 0) {
- strerror_r(errnum, buf, buflen);
+ const char *err = strerror_r(errnum, buf, buflen);
+
+ if (err != buf) {
+ size_t len = strlen(err);
+ char *c = mempcpy(buf, err, min(buflen - 1, len));
+ *c = '\0';
+ }
+
return 0;
}
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index fd8e1f1297aa..f85649554191 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -1,4 +1,5 @@
turbostat : turbostat.c
+CFLAGS += -Wall
clean :
rm -f turbostat
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index adf175f61496..74e44507dfe9 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -27,7 +27,11 @@ supports an "invariant" TSC, plus the APERF and MPERF MSRs.
on processors that additionally support C-state residency counters.
.SS Options
-The \fB-s\fP option prints only a 1-line summary for each sample interval.
+The \fB-s\fP option limits output to a 1-line system summary for each interval.
+.PP
+The \fB-c\fP option limits output to the 1st thread in each core.
+.PP
+The \fB-p\fP option limits output to the 1st thread in each package.
.PP
The \fB-v\fP option increases verbosity.
.PP
@@ -65,19 +69,19 @@ Subsequent rows show per-CPU statistics.
.nf
[root@x980]# ./turbostat
cor CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
- 0.60 1.63 3.38 2.91 0.00 96.49 0.00 76.64
- 0 0 0.59 1.62 3.38 4.51 0.00 94.90 0.00 76.64
- 0 6 1.13 1.64 3.38 3.97 0.00 94.90 0.00 76.64
- 1 2 0.08 1.62 3.38 0.07 0.00 99.85 0.00 76.64
- 1 8 0.03 1.62 3.38 0.12 0.00 99.85 0.00 76.64
- 2 4 0.01 1.62 3.38 0.06 0.00 99.93 0.00 76.64
- 2 10 0.04 1.62 3.38 0.02 0.00 99.93 0.00 76.64
- 8 1 2.85 1.62 3.38 11.71 0.00 85.44 0.00 76.64
- 8 7 1.98 1.62 3.38 12.58 0.00 85.44 0.00 76.64
- 9 3 0.36 1.62 3.38 0.71 0.00 98.93 0.00 76.64
- 9 9 0.09 1.62 3.38 0.98 0.00 98.93 0.00 76.64
- 10 5 0.03 1.62 3.38 0.09 0.00 99.87 0.00 76.64
- 10 11 0.07 1.62 3.38 0.06 0.00 99.87 0.00 76.64
+ 0.09 1.62 3.38 1.83 0.32 97.76 1.26 83.61
+ 0 0 0.15 1.62 3.38 10.23 0.05 89.56 1.26 83.61
+ 0 6 0.05 1.62 3.38 10.34
+ 1 2 0.03 1.62 3.38 0.07 0.05 99.86
+ 1 8 0.03 1.62 3.38 0.06
+ 2 4 0.21 1.62 3.38 0.10 1.49 98.21
+ 2 10 0.02 1.62 3.38 0.29
+ 8 1 0.04 1.62 3.38 0.04 0.08 99.84
+ 8 7 0.01 1.62 3.38 0.06
+ 9 3 0.53 1.62 3.38 0.10 0.20 99.17
+ 9 9 0.02 1.62 3.38 0.60
+ 10 5 0.01 1.62 3.38 0.02 0.04 99.92
+ 10 11 0.02 1.62 3.38 0.02
.fi
.SH SUMMARY EXAMPLE
The "-s" option prints the column headers just once,
@@ -86,9 +90,10 @@ and then the one line system summary for each sample interval.
.nf
[root@x980]# ./turbostat -s
%c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
- 0.61 1.89 3.38 5.95 0.00 93.44 0.00 66.33
- 0.52 1.62 3.38 6.83 0.00 92.65 0.00 61.11
- 0.62 1.92 3.38 5.47 0.00 93.91 0.00 67.31
+ 0.23 1.67 3.38 2.00 0.30 97.47 1.07 82.12
+ 0.10 1.62 3.38 1.87 2.25 95.77 12.02 72.60
+ 0.20 1.64 3.38 1.98 0.11 97.72 0.30 83.36
+ 0.11 1.70 3.38 1.86 1.81 96.22 9.71 74.90
.fi
.SH VERBOSE EXAMPLE
The "-v" option adds verbosity to the output:
@@ -120,30 +125,28 @@ until ^C while the other CPUs are mostly idle:
[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
^C
cor CPU %c0 GHz TSC %c1 %c3 %c6 %pc3 %pc6
- 8.63 3.64 3.38 14.46 0.49 76.42 0.00 0.00
- 0 0 0.34 3.36 3.38 99.66 0.00 0.00 0.00 0.00
- 0 6 99.96 3.64 3.38 0.04 0.00 0.00 0.00 0.00
- 1 2 0.14 3.50 3.38 1.75 2.04 96.07 0.00 0.00
- 1 8 0.38 3.57 3.38 1.51 2.04 96.07 0.00 0.00
- 2 4 0.01 2.65 3.38 0.06 0.00 99.93 0.00 0.00
- 2 10 0.03 2.12 3.38 0.04 0.00 99.93 0.00 0.00
- 8 1 0.91 3.59 3.38 35.27 0.92 62.90 0.00 0.00
- 8 7 1.61 3.63 3.38 34.57 0.92 62.90 0.00 0.00
- 9 3 0.04 3.38 3.38 0.20 0.00 99.76 0.00 0.00
- 9 9 0.04 3.29 3.38 0.20 0.00 99.76 0.00 0.00
- 10 5 0.03 3.08 3.38 0.12 0.00 99.85 0.00 0.00
- 10 11 0.05 3.07 3.38 0.10 0.00 99.85 0.00 0.00
-4.907015 sec
-
+ 8.86 3.61 3.38 15.06 31.19 44.89 0.00 0.00
+ 0 0 1.46 3.22 3.38 16.84 29.48 52.22 0.00 0.00
+ 0 6 0.21 3.06 3.38 18.09
+ 1 2 0.53 3.33 3.38 2.80 46.40 50.27
+ 1 8 0.89 3.47 3.38 2.44
+ 2 4 1.36 3.43 3.38 9.04 23.71 65.89
+ 2 10 0.18 2.86 3.38 10.22
+ 8 1 0.04 2.87 3.38 99.96 0.01 0.00
+ 8 7 99.72 3.63 3.38 0.27
+ 9 3 0.31 3.21 3.38 7.64 56.55 35.50
+ 9 9 0.08 2.95 3.38 7.88
+ 10 5 1.42 3.43 3.38 2.14 30.99 65.44
+ 10 11 0.16 2.88 3.38 3.40
.fi
-Above the cycle soaker drives cpu6 up 3.6 Ghz turbo limit
+Above the cycle soaker drives cpu7 up its 3.6 Ghz turbo limit
while the other processors are generally in various states of idle.
-Note that cpu0 is an HT sibling sharing core0
-with cpu6, and thus it is unable to get to an idle state
-deeper than c1 while cpu6 is busy.
+Note that cpu1 and cpu7 are HT siblings within core8.
+As cpu7 is very busy, it prevents its sibling, cpu1,
+from entering a c-state deeper than c1.
-Note that turbostat reports average GHz of 3.64, while
+Note that turbostat reports average GHz of 3.63, while
the arithmetic average of the GHz column above is lower.
This is a weighted average, where the weight is %c0. ie. it is the total number of
un-halted cycles elapsed per time divided by the number of CPUs.
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 16de7ad4850f..861d77190206 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -67,92 +67,119 @@ double bclk;
unsigned int show_pkg;
unsigned int show_core;
unsigned int show_cpu;
+unsigned int show_pkg_only;
+unsigned int show_core_only;
+char *output_buffer, *outp;
int aperf_mperf_unstable;
int backwards_count;
char *progname;
-int num_cpus;
-cpu_set_t *cpu_present_set, *cpu_mask;
-size_t cpu_present_setsize, cpu_mask_size;
-
-struct counters {
- unsigned long long tsc; /* per thread */
- unsigned long long aperf; /* per thread */
- unsigned long long mperf; /* per thread */
- unsigned long long c1; /* per thread (calculated) */
- unsigned long long c3; /* per core */
- unsigned long long c6; /* per core */
- unsigned long long c7; /* per core */
- unsigned long long pc2; /* per package */
- unsigned long long pc3; /* per package */
- unsigned long long pc6; /* per package */
- unsigned long long pc7; /* per package */
- unsigned long long extra_msr; /* per thread */
- int pkg;
- int core;
- int cpu;
- struct counters *next;
-};
-
-struct counters *cnt_even;
-struct counters *cnt_odd;
-struct counters *cnt_delta;
-struct counters *cnt_average;
-struct timeval tv_even;
-struct timeval tv_odd;
-struct timeval tv_delta;
-
-int mark_cpu_present(int pkg, int core, int cpu)
+cpu_set_t *cpu_present_set, *cpu_affinity_set;
+size_t cpu_present_setsize, cpu_affinity_setsize;
+
+struct thread_data {
+ unsigned long long tsc;
+ unsigned long long aperf;
+ unsigned long long mperf;
+ unsigned long long c1; /* derived */
+ unsigned long long extra_msr;
+ unsigned int cpu_id;
+ unsigned int flags;
+#define CPU_IS_FIRST_THREAD_IN_CORE 0x2
+#define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
+} *thread_even, *thread_odd;
+
+struct core_data {
+ unsigned long long c3;
+ unsigned long long c6;
+ unsigned long long c7;
+ unsigned int core_id;
+} *core_even, *core_odd;
+
+struct pkg_data {
+ unsigned long long pc2;
+ unsigned long long pc3;
+ unsigned long long pc6;
+ unsigned long long pc7;
+ unsigned int package_id;
+} *package_even, *package_odd;
+
+#define ODD_COUNTERS thread_odd, core_odd, package_odd
+#define EVEN_COUNTERS thread_even, core_even, package_even
+
+#define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
+ (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
+ topo.num_threads_per_core + \
+ (core_no) * topo.num_threads_per_core + (thread_no))
+#define GET_CORE(core_base, core_no, pkg_no) \
+ (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
+#define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
+
+struct system_summary {
+ struct thread_data threads;
+ struct core_data cores;
+ struct pkg_data packages;
+} sum, average;
+
+
+struct topo_params {
+ int num_packages;
+ int num_cpus;
+ int num_cores;
+ int max_cpu_num;
+ int num_cores_per_pkg;
+ int num_threads_per_core;
+} topo;
+
+struct timeval tv_even, tv_odd, tv_delta;
+
+void setup_all_buffers(void);
+
+int cpu_is_not_present(int cpu)
{
- CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
- return 0;
+ return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
}
-
/*
- * cpu_mask_init(ncpus)
- *
- * allocate and clear cpu_mask
- * set cpu_mask_size
+ * run func(thread, core, package) in topology order
+ * skip non-present cpus
*/
-void cpu_mask_init(int ncpus)
+
+int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg_data *),
+ struct thread_data *thread_base, struct core_data *core_base, struct pkg_data *pkg_base)
{
- cpu_mask = CPU_ALLOC(ncpus);
- if (cpu_mask == NULL) {
- perror("CPU_ALLOC");
- exit(3);
- }
- cpu_mask_size = CPU_ALLOC_SIZE(ncpus);
- CPU_ZERO_S(cpu_mask_size, cpu_mask);
+ int retval, pkg_no, core_no, thread_no;
- /*
- * Allocate and initialize cpu_present_set
- */
- cpu_present_set = CPU_ALLOC(ncpus);
- if (cpu_present_set == NULL) {
- perror("CPU_ALLOC");
- exit(3);
- }
- cpu_present_setsize = CPU_ALLOC_SIZE(ncpus);
- CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
- for_all_cpus(mark_cpu_present);
-}
+ for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
+ for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
+ for (thread_no = 0; thread_no <
+ topo.num_threads_per_core; ++thread_no) {
+ struct thread_data *t;
+ struct core_data *c;
+ struct pkg_data *p;
-void cpu_mask_uninit()
-{
- CPU_FREE(cpu_mask);
- cpu_mask = NULL;
- cpu_mask_size = 0;
- CPU_FREE(cpu_present_set);
- cpu_present_set = NULL;
- cpu_present_setsize = 0;
+ t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
+
+ if (cpu_is_not_present(t->cpu_id))
+ continue;
+
+ c = GET_CORE(core_base, core_no, pkg_no);
+ p = GET_PKG(pkg_base, pkg_no);
+
+ retval = func(t, c, p);
+ if (retval)
+ return retval;
+ }
+ }
+ }
+ return 0;
}
int cpu_migrate(int cpu)
{
- CPU_ZERO_S(cpu_mask_size, cpu_mask);
- CPU_SET_S(cpu, cpu_mask_size, cpu_mask);
- if (sched_setaffinity(0, cpu_mask_size, cpu_mask) == -1)
+ CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
+ CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
+ if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
return -1;
else
return 0;
@@ -181,67 +208,72 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
void print_header(void)
{
if (show_pkg)
- fprintf(stderr, "pk");
+ outp += sprintf(outp, "pk");
if (show_pkg)
- fprintf(stderr, " ");
+ outp += sprintf(outp, " ");
if (show_core)
- fprintf(stderr, "cor");
+ outp += sprintf(outp, "cor");
if (show_cpu)
- fprintf(stderr, " CPU");
+ outp += sprintf(outp, " CPU");
if (show_pkg || show_core || show_cpu)
- fprintf(stderr, " ");
+ outp += sprintf(outp, " ");
if (do_nhm_cstates)
- fprintf(stderr, " %%c0");
+ outp += sprintf(outp, " %%c0");
if (has_aperf)
- fprintf(stderr, " GHz");
- fprintf(stderr, " TSC");
+ outp += sprintf(outp, " GHz");
+ outp += sprintf(outp, " TSC");
if (do_nhm_cstates)
- fprintf(stderr, " %%c1");
+ outp += sprintf(outp, " %%c1");
if (do_nhm_cstates)
- fprintf(stderr, " %%c3");
+ outp += sprintf(outp, " %%c3");
if (do_nhm_cstates)
- fprintf(stderr, " %%c6");
+ outp += sprintf(outp, " %%c6");
if (do_snb_cstates)
- fprintf(stderr, " %%c7");
+ outp += sprintf(outp, " %%c7");
if (do_snb_cstates)
- fprintf(stderr, " %%pc2");
+ outp += sprintf(outp, " %%pc2");
if (do_nhm_cstates)
- fprintf(stderr, " %%pc3");
+ outp += sprintf(outp, " %%pc3");
if (do_nhm_cstates)
- fprintf(stderr, " %%pc6");
+ outp += sprintf(outp, " %%pc6");
if (do_snb_cstates)
- fprintf(stderr, " %%pc7");
+ outp += sprintf(outp, " %%pc7");
if (extra_msr_offset)
- fprintf(stderr, " MSR 0x%x ", extra_msr_offset);
+ outp += sprintf(outp, " MSR 0x%x ", extra_msr_offset);
- putc('\n', stderr);
+ outp += sprintf(outp, "\n");
}
-void dump_cnt(struct counters *cnt)
+int dump_counters(struct thread_data *t, struct core_data *c,
+ struct pkg_data *p)
{
- if (!cnt)
- return;
- if (cnt->pkg) fprintf(stderr, "package: %d ", cnt->pkg);
- if (cnt->core) fprintf(stderr, "core:: %d ", cnt->core);
- if (cnt->cpu) fprintf(stderr, "CPU: %d ", cnt->cpu);
- if (cnt->tsc) fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
- if (cnt->c3) fprintf(stderr, "c3: %016llX\n", cnt->c3);
- if (cnt->c6) fprintf(stderr, "c6: %016llX\n", cnt->c6);
- if (cnt->c7) fprintf(stderr, "c7: %016llX\n", cnt->c7);
- if (cnt->aperf) fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
- if (cnt->pc2) fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
- if (cnt->pc3) fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
- if (cnt->pc6) fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
- if (cnt->pc7) fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
- if (cnt->extra_msr) fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
-}
+ fprintf(stderr, "t %p, c %p, p %p\n", t, c, p);
+
+ if (t) {
+ fprintf(stderr, "CPU: %d flags 0x%x\n", t->cpu_id, t->flags);
+ fprintf(stderr, "TSC: %016llX\n", t->tsc);
+ fprintf(stderr, "aperf: %016llX\n", t->aperf);
+ fprintf(stderr, "mperf: %016llX\n", t->mperf);
+ fprintf(stderr, "c1: %016llX\n", t->c1);
+ fprintf(stderr, "msr0x%x: %016llX\n",
+ extra_msr_offset, t->extra_msr);
+ }
-void dump_list(struct counters *cnt)
-{
- printf("dump_list 0x%p\n", cnt);
+ if (c) {
+ fprintf(stderr, "core: %d\n", c->core_id);
+ fprintf(stderr, "c3: %016llX\n", c->c3);
+ fprintf(stderr, "c6: %016llX\n", c->c6);
+ fprintf(stderr, "c7: %016llX\n", c->c7);
+ }
- for (; cnt; cnt = cnt->next)
- dump_cnt(cnt);
+ if (p) {
+ fprintf(stderr, "package: %d\n", p->package_id);
+ fprintf(stderr, "pc2: %016llX\n", p->pc2);
+ fprintf(stderr, "pc3: %016llX\n", p->pc3);
+ fprintf(stderr, "pc6: %016llX\n", p->pc6);
+ fprintf(stderr, "pc7: %016llX\n", p->pc7);
+ }
+ return 0;
}
/*
@@ -253,321 +285,389 @@ void dump_list(struct counters *cnt)
* TSC: "TSC" 3 columns %3.2
* percentage " %pc3" %6.2
*/
-void print_cnt(struct counters *p)
+int format_counters(struct thread_data *t, struct core_data *c,
+ struct pkg_data *p)
{
double interval_float;
+ /* if showing only 1st thread in core and this isn't one, bail out */
+ if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
+ return 0;
+
+ /* if showing only 1st thread in pkg and this isn't one, bail out */
+ if (show_pkg_only && !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+ return 0;
+
interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
- /* topology columns, print blanks on 1st (average) line */
- if (p == cnt_average) {
+ /* topo columns, print blanks on 1st (average) line */
+ if (t == &average.threads) {
if (show_pkg)
- fprintf(stderr, " ");
+ outp += sprintf(outp, " ");
if (show_pkg && show_core)
- fprintf(stderr, " ");
+ outp += sprintf(outp, " ");
if (show_core)
- fprintf(stderr, " ");
+ outp += sprintf(outp, " ");
if (show_cpu)
- fprintf(stderr, " " " ");
+ outp += sprintf(outp, " " " ");
} else {
- if (show_pkg)
- fprintf(stderr, "%2d", p->pkg);
+ if (show_pkg) {
+ if (p)
+ outp += sprintf(outp, "%2d", p->package_id);
+ else
+ outp += sprintf(outp, " ");
+ }
if (show_pkg && show_core)
- fprintf(stderr, " ");
- if (show_core)
- fprintf(stderr, "%3d", p->core);
+ outp += sprintf(outp, " ");
+ if (show_core) {
+ if (c)
+ outp += sprintf(outp, "%3d", c->core_id);
+ else
+ outp += sprintf(outp, " ");
+ }
if (show_cpu)
- fprintf(stderr, " %3d", p->cpu);
+ outp += sprintf(outp, " %3d", t->cpu_id);
}
/* %c0 */
if (do_nhm_cstates) {
if (show_pkg || show_core || show_cpu)
- fprintf(stderr, " ");
+ outp += sprintf(outp, " ");
if (!skip_c0)
- fprintf(stderr, "%6.2f", 100.0 * p->mperf/p->tsc);
+ outp += sprintf(outp, "%6.2f", 100.0 * t->mperf/t->tsc);
else
- fprintf(stderr, " ****");
+ outp += sprintf(outp, " ****");
}
/* GHz */
if (has_aperf) {
if (!aperf_mperf_unstable) {
- fprintf(stderr, " %3.2f",
- 1.0 * p->tsc / units * p->aperf /
- p->mperf / interval_float);
+ outp += sprintf(outp, " %3.2f",
+ 1.0 * t->tsc / units * t->aperf /
+ t->mperf / interval_float);
} else {
- if (p->aperf > p->tsc || p->mperf > p->tsc) {
- fprintf(stderr, " ***");
+ if (t->aperf > t->tsc || t->mperf > t->tsc) {
+ outp += sprintf(outp, " ***");
} else {
- fprintf(stderr, "%3.1f*",
- 1.0 * p->tsc /
- units * p->aperf /
- p->mperf / interval_float);
+ outp += sprintf(outp, "%3.1f*",
+ 1.0 * t->tsc /
+ units * t->aperf /
+ t->mperf / interval_float);
}
}
}
/* TSC */
- fprintf(stderr, "%5.2f", 1.0 * p->tsc/units/interval_float);
+ outp += sprintf(outp, "%5.2f", 1.0 * t->tsc/units/interval_float);
if (do_nhm_cstates) {
if (!skip_c1)
- fprintf(stderr, " %6.2f", 100.0 * p->c1/p->tsc);
+ outp += sprintf(outp, " %6.2f", 100.0 * t->c1/t->tsc);
else
- fprintf(stderr, " ****");
+ outp += sprintf(outp, " ****");
}
+
+ /* print per-core data only for 1st thread in core */
+ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
+ goto done;
+
if (do_nhm_cstates)
- fprintf(stderr, " %6.2f", 100.0 * p->c3/p->tsc);
+ outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc);
if (do_nhm_cstates)
- fprintf(stderr, " %6.2f", 100.0 * p->c6/p->tsc);
+ outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc);
if (do_snb_cstates)
- fprintf(stderr, " %6.2f", 100.0 * p->c7/p->tsc);
+ outp += sprintf(outp, " %6.2f", 100.0 * c->c7/t->tsc);
+
+ /* print per-package data only for 1st core in package */
+ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+ goto done;
+
if (do_snb_cstates)
- fprintf(stderr, " %6.2f", 100.0 * p->pc2/p->tsc);
+ outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc);
if (do_nhm_cstates)
- fprintf(stderr, " %6.2f", 100.0 * p->pc3/p->tsc);
+ outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc);
if (do_nhm_cstates)
- fprintf(stderr, " %6.2f", 100.0 * p->pc6/p->tsc);
+ outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
if (do_snb_cstates)
- fprintf(stderr, " %6.2f", 100.0 * p->pc7/p->tsc);
+ outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
+done:
if (extra_msr_offset)
- fprintf(stderr, " 0x%016llx", p->extra_msr);
- putc('\n', stderr);
+ outp += sprintf(outp, " 0x%016llx", t->extra_msr);
+ outp += sprintf(outp, "\n");
+
+ return 0;
}
-void print_counters(struct counters *counters)
+void flush_stdout()
+{
+ fputs(output_buffer, stdout);
+ outp = output_buffer;
+}
+void flush_stderr()
+{
+ fputs(output_buffer, stderr);
+ outp = output_buffer;
+}
+void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
{
- struct counters *cnt;
static int printed;
-
if (!printed || !summary_only)
print_header();
- if (num_cpus > 1)
- print_cnt(cnt_average);
+ if (topo.num_cpus > 1)
+ format_counters(&average.threads, &average.cores,
+ &average.packages);
printed = 1;
if (summary_only)
return;
- for (cnt = counters; cnt != NULL; cnt = cnt->next)
- print_cnt(cnt);
-
+ for_all_cpus(format_counters, t, c, p);
}
-#define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after))
+void
+delta_package(struct pkg_data *new, struct pkg_data *old)
+{
+ old->pc2 = new->pc2 - old->pc2;
+ old->pc3 = new->pc3 - old->pc3;
+ old->pc6 = new->pc6 - old->pc6;
+ old->pc7 = new->pc7 - old->pc7;
+}
-int compute_delta(struct counters *after,
- struct counters *before, struct counters *delta)
+void
+delta_core(struct core_data *new, struct core_data *old)
{
- int errors = 0;
- int perf_err = 0;
+ old->c3 = new->c3 - old->c3;
+ old->c6 = new->c6 - old->c6;
+ old->c7 = new->c7 - old->c7;
+}
- skip_c0 = skip_c1 = 0;
+/*
+ * old = new - old
+ */
+void
+delta_thread(struct thread_data *new, struct thread_data *old,
+ struct core_data *core_delta)
+{
+ old->tsc = new->tsc - old->tsc;
+
+ /* check for TSC < 1 Mcycles over interval */
+ if (old->tsc < (1000 * 1000)) {
+ fprintf(stderr, "Insanely slow TSC rate, TSC stops in idle?\n");
+ fprintf(stderr, "You can disable all c-states by booting with \"idle=poll\"\n");
+ fprintf(stderr, "or just the deep ones with \"processor.max_cstate=1\"\n");
+ exit(-3);
+ }
- for ( ; after && before && delta;
- after = after->next, before = before->next, delta = delta->next) {
- if (before->cpu != after->cpu) {
- printf("cpu configuration changed: %d != %d\n",
- before->cpu, after->cpu);
- return -1;
- }
+ old->c1 = new->c1 - old->c1;
- if (SUBTRACT_COUNTER(after->tsc, before->tsc, delta->tsc)) {
- fprintf(stderr, "cpu%d TSC went backwards %llX to %llX\n",
- before->cpu, before->tsc, after->tsc);
- errors++;
- }
- /* check for TSC < 1 Mcycles over interval */
- if (delta->tsc < (1000 * 1000)) {
- fprintf(stderr, "Insanely slow TSC rate,"
- " TSC stops in idle?\n");
- fprintf(stderr, "You can disable all c-states"
- " by booting with \"idle=poll\"\n");
- fprintf(stderr, "or just the deep ones with"
- " \"processor.max_cstate=1\"\n");
- exit(-3);
- }
- if (SUBTRACT_COUNTER(after->c3, before->c3, delta->c3)) {
- fprintf(stderr, "cpu%d c3 counter went backwards %llX to %llX\n",
- before->cpu, before->c3, after->c3);
- errors++;
- }
- if (SUBTRACT_COUNTER(after->c6, before->c6, delta->c6)) {
- fprintf(stderr, "cpu%d c6 counter went backwards %llX to %llX\n",
- before->cpu, before->c6, after->c6);
- errors++;
- }
- if (SUBTRACT_COUNTER(after->c7, before->c7, delta->c7)) {
- fprintf(stderr, "cpu%d c7 counter went backwards %llX to %llX\n",
- before->cpu, before->c7, after->c7);
- errors++;
- }
- if (SUBTRACT_COUNTER(after->pc2, before->pc2, delta->pc2)) {
- fprintf(stderr, "cpu%d pc2 counter went backwards %llX to %llX\n",
- before->cpu, before->pc2, after->pc2);
- errors++;
- }
- if (SUBTRACT_COUNTER(after->pc3, before->pc3, delta->pc3)) {
- fprintf(stderr, "cpu%d pc3 counter went backwards %llX to %llX\n",
- before->cpu, before->pc3, after->pc3);
- errors++;
- }
- if (SUBTRACT_COUNTER(after->pc6, before->pc6, delta->pc6)) {
- fprintf(stderr, "cpu%d pc6 counter went backwards %llX to %llX\n",
- before->cpu, before->pc6, after->pc6);
- errors++;
- }
- if (SUBTRACT_COUNTER(after->pc7, before->pc7, delta->pc7)) {
- fprintf(stderr, "cpu%d pc7 counter went backwards %llX to %llX\n",
- before->cpu, before->pc7, after->pc7);
- errors++;
- }
+ if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
+ old->aperf = new->aperf - old->aperf;
+ old->mperf = new->mperf - old->mperf;
+ } else {
- perf_err = SUBTRACT_COUNTER(after->aperf, before->aperf, delta->aperf);
- if (perf_err) {
- fprintf(stderr, "cpu%d aperf counter went backwards %llX to %llX\n",
- before->cpu, before->aperf, after->aperf);
- }
- perf_err |= SUBTRACT_COUNTER(after->mperf, before->mperf, delta->mperf);
- if (perf_err) {
- fprintf(stderr, "cpu%d mperf counter went backwards %llX to %llX\n",
- before->cpu, before->mperf, after->mperf);
- }
- if (perf_err) {
- if (!aperf_mperf_unstable) {
- fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
- fprintf(stderr, "* Frequency results do not cover entire interval *\n");
- fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
+ if (!aperf_mperf_unstable) {
+ fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
+ fprintf(stderr, "* Frequency results do not cover entire interval *\n");
+ fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
- aperf_mperf_unstable = 1;
- }
- /*
- * mperf delta is likely a huge "positive" number
- * can not use it for calculating c0 time
- */
- skip_c0 = 1;
- skip_c1 = 1;
+ aperf_mperf_unstable = 1;
}
-
/*
- * As mperf and tsc collection are not atomic,
- * it is possible for mperf's non-halted cycles
- * to exceed TSC's all cycles: show c1 = 0% in that case.
+ * mperf delta is likely a huge "positive" number
+ * can not use it for calculating c0 time
*/
- if (delta->mperf > delta->tsc)
- delta->c1 = 0;
- else /* normal case, derive c1 */
- delta->c1 = delta->tsc - delta->mperf
- - delta->c3 - delta->c6 - delta->c7;
+ skip_c0 = 1;
+ skip_c1 = 1;
+ }
- if (delta->mperf == 0)
- delta->mperf = 1; /* divide by 0 protection */
- /*
- * for "extra msr", just copy the latest w/o subtracting
- */
- delta->extra_msr = after->extra_msr;
- if (errors) {
- fprintf(stderr, "ERROR cpu%d before:\n", before->cpu);
- dump_cnt(before);
- fprintf(stderr, "ERROR cpu%d after:\n", before->cpu);
- dump_cnt(after);
- errors = 0;
- }
+ /*
+ * As counter collection is not atomic,
+ * it is possible for mperf's non-halted cycles + idle states
+ * to exceed TSC's all cycles: show c1 = 0% in that case.
+ */
+ if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
+ old->c1 = 0;
+ else {
+ /* normal case, derive c1 */
+ old->c1 = old->tsc - old->mperf - core_delta->c3
+ - core_delta->c6 - core_delta->c7;
}
+
+ if (old->mperf == 0) {
+ if (verbose > 1) fprintf(stderr, "cpu%d MPERF 0!\n", old->cpu_id);
+ old->mperf = 1; /* divide by 0 protection */
+ }
+
+ /*
+ * for "extra msr", just copy the latest w/o subtracting
+ */
+ old->extra_msr = new->extra_msr;
+}
+
+int delta_cpu(struct thread_data *t, struct core_data *c,
+ struct pkg_data *p, struct thread_data *t2,
+ struct core_data *c2, struct pkg_data *p2)
+{
+ /* calculate core delta only for 1st thread in core */
+ if (t->flags & CPU_IS_FIRST_THREAD_IN_CORE)
+ delta_core(c, c2);
+
+ /* always calculate thread delta */
+ delta_thread(t, t2, c2); /* c2 is core delta */
+
+ /* calculate package delta only for 1st core in package */
+ if (t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)
+ delta_package(p, p2);
+
return 0;
}
-void compute_average(struct counters *delta, struct counters *avg)
+void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+{
+ t->tsc = 0;
+ t->aperf = 0;
+ t->mperf = 0;
+ t->c1 = 0;
+
+ /* tells format_counters to dump all fields from this set */
+ t->flags = CPU_IS_FIRST_THREAD_IN_CORE | CPU_IS_FIRST_CORE_IN_PACKAGE;
+
+ c->c3 = 0;
+ c->c6 = 0;
+ c->c7 = 0;
+
+ p->pc2 = 0;
+ p->pc3 = 0;
+ p->pc6 = 0;
+ p->pc7 = 0;
+}
+int sum_counters(struct thread_data *t, struct core_data *c,
+ struct pkg_data *p)
{
- struct counters *sum;
+ average.threads.tsc += t->tsc;
+ average.threads.aperf += t->aperf;
+ average.threads.mperf += t->mperf;
+ average.threads.c1 += t->c1;
- sum = calloc(1, sizeof(struct counters));
- if (sum == NULL) {
- perror("calloc sum");
- exit(1);
- }
+ /* sum per-core values only for 1st thread in core */
+ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
+ return 0;
- for (; delta; delta = delta->next) {
- sum->tsc += delta->tsc;
- sum->c1 += delta->c1;
- sum->c3 += delta->c3;
- sum->c6 += delta->c6;
- sum->c7 += delta->c7;
- sum->aperf += delta->aperf;
- sum->mperf += delta->mperf;
- sum->pc2 += delta->pc2;
- sum->pc3 += delta->pc3;
- sum->pc6 += delta->pc6;
- sum->pc7 += delta->pc7;
- }
- avg->tsc = sum->tsc/num_cpus;
- avg->c1 = sum->c1/num_cpus;
- avg->c3 = sum->c3/num_cpus;
- avg->c6 = sum->c6/num_cpus;
- avg->c7 = sum->c7/num_cpus;
- avg->aperf = sum->aperf/num_cpus;
- avg->mperf = sum->mperf/num_cpus;
- avg->pc2 = sum->pc2/num_cpus;
- avg->pc3 = sum->pc3/num_cpus;
- avg->pc6 = sum->pc6/num_cpus;
- avg->pc7 = sum->pc7/num_cpus;
-
- free(sum);
+ average.cores.c3 += c->c3;
+ average.cores.c6 += c->c6;
+ average.cores.c7 += c->c7;
+
+ /* sum per-pkg values only for 1st core in pkg */
+ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+ return 0;
+
+ average.packages.pc2 += p->pc2;
+ average.packages.pc3 += p->pc3;
+ average.packages.pc6 += p->pc6;
+ average.packages.pc7 += p->pc7;
+
+ return 0;
+}
+/*
+ * sum the counters for all cpus in the system
+ * compute the weighted average
+ */
+void compute_average(struct thread_data *t, struct core_data *c,
+ struct pkg_data *p)
+{
+ clear_counters(&average.threads, &average.cores, &average.packages);
+
+ for_all_cpus(sum_counters, t, c, p);
+
+ average.threads.tsc /= topo.num_cpus;
+ average.threads.aperf /= topo.num_cpus;
+ average.threads.mperf /= topo.num_cpus;
+ average.threads.c1 /= topo.num_cpus;
+
+ average.cores.c3 /= topo.num_cores;
+ average.cores.c6 /= topo.num_cores;
+ average.cores.c7 /= topo.num_cores;
+
+ average.packages.pc2 /= topo.num_packages;
+ average.packages.pc3 /= topo.num_packages;
+ average.packages.pc6 /= topo.num_packages;
+ average.packages.pc7 /= topo.num_packages;
}
-int get_counters(struct counters *cnt)
+static unsigned long long rdtsc(void)
{
- for ( ; cnt; cnt = cnt->next) {
+ unsigned int low, high;
- if (cpu_migrate(cnt->cpu))
- return -1;
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
- if (get_msr(cnt->cpu, MSR_TSC, &cnt->tsc))
- return -1;
+ return low | ((unsigned long long)high) << 32;
+}
- if (has_aperf) {
- if (get_msr(cnt->cpu, MSR_APERF, &cnt->aperf))
- return -1;
- if (get_msr(cnt->cpu, MSR_MPERF, &cnt->mperf))
- return -1;
- }
- if (do_nhm_cstates) {
- if (get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY, &cnt->c3))
- return -1;
- if (get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY, &cnt->c6))
- return -1;
- }
+/*
+ * get_counters(...)
+ * migrate to cpu
+ * acquire and record local counters for that cpu
+ */
+int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
+{
+ int cpu = t->cpu_id;
- if (do_snb_cstates)
- if (get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY, &cnt->c7))
- return -1;
+ if (cpu_migrate(cpu))
+ return -1;
- if (do_nhm_cstates) {
- if (get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY, &cnt->pc3))
- return -1;
- if (get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY, &cnt->pc6))
- return -1;
- }
- if (do_snb_cstates) {
- if (get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY, &cnt->pc2))
- return -1;
- if (get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY, &cnt->pc7))
- return -1;
- }
- if (extra_msr_offset)
- if (get_msr(cnt->cpu, extra_msr_offset, &cnt->extra_msr))
- return -1;
+ t->tsc = rdtsc(); /* we are running on local CPU of interest */
+
+ if (has_aperf) {
+ if (get_msr(cpu, MSR_APERF, &t->aperf))
+ return -3;
+ if (get_msr(cpu, MSR_MPERF, &t->mperf))
+ return -4;
+ }
+
+ if (extra_msr_offset)
+ if (get_msr(cpu, extra_msr_offset, &t->extra_msr))
+ return -5;
+
+ /* collect core counters only for 1st thread in core */
+ if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
+ return 0;
+
+ if (do_nhm_cstates) {
+ if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
+ return -6;
+ if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
+ return -7;
+ }
+
+ if (do_snb_cstates)
+ if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
+ return -8;
+
+ /* collect package counters only for 1st core in package */
+ if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
+ return 0;
+
+ if (do_nhm_cstates) {
+ if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
+ return -9;
+ if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
+ return -10;
+ }
+ if (do_snb_cstates) {
+ if (get_msr(cpu, MSR_PKG_C2_RESIDENCY, &p->pc2))
+ return -11;
+ if (get_msr(cpu, MSR_PKG_C7_RESIDENCY, &p->pc7))
+ return -12;
}
return 0;
}
-void print_nehalem_info(void)
+void print_verbose_header(void)
{
unsigned long long msr;
unsigned int ratio;
@@ -615,143 +715,82 @@ void print_nehalem_info(void)
}
-void free_counter_list(struct counters *list)
+void free_all_buffers(void)
{
- struct counters *p;
+ CPU_FREE(cpu_present_set);
+ cpu_present_set = NULL;
+ cpu_present_set = 0;
- for (p = list; p; ) {
- struct counters *free_me;
+ CPU_FREE(cpu_affinity_set);
+ cpu_affinity_set = NULL;
+ cpu_affinity_setsize = 0;
- free_me = p;
- p = p->next;
- free(free_me);
- }
-}
+ free(thread_even);
+ free(core_even);
+ free(package_even);
-void free_all_counters(void)
-{
- free_counter_list(cnt_even);
- cnt_even = NULL;
+ thread_even = NULL;
+ core_even = NULL;
+ package_even = NULL;
- free_counter_list(cnt_odd);
- cnt_odd = NULL;
+ free(thread_odd);
+ free(core_odd);
+ free(package_odd);
- free_counter_list(cnt_delta);
- cnt_delta = NULL;
+ thread_odd = NULL;
+ core_odd = NULL;
+ package_odd = NULL;
- free_counter_list(cnt_average);
- cnt_average = NULL;
+ free(output_buffer);
+ output_buffer = NULL;
+ outp = NULL;
}
-void insert_counters(struct counters **list,
- struct counters *new)
+/*
+ * cpu_is_first_sibling_in_core(cpu)
+ * return 1 if given CPU is 1st HT sibling in the core
+ */
+int cpu_is_first_sibling_in_core(int cpu)
{
- struct counters *prev;
-
- /*
- * list was empty
- */
- if (*list == NULL) {
- new->next = *list;
- *list = new;
- return;
- }
-
- if (!summary_only)
- show_cpu = 1; /* there is more than one CPU */
-
- /*
- * insert on front of list.
- * It is sorted by ascending package#, core#, cpu#
- */
- if (((*list)->pkg > new->pkg) ||
- (((*list)->pkg == new->pkg) && ((*list)->core > new->core)) ||
- (((*list)->pkg == new->pkg) && ((*list)->core == new->core) && ((*list)->cpu > new->cpu))) {
- new->next = *list;
- *list = new;
- return;
- }
-
- prev = *list;
-
- while (prev->next && (prev->next->pkg < new->pkg)) {
- prev = prev->next;
- if (!summary_only)
- show_pkg = 1; /* there is more than 1 package */
- }
-
- while (prev->next && (prev->next->pkg == new->pkg)
- && (prev->next->core < new->core)) {
- prev = prev->next;
- if (!summary_only)
- show_core = 1; /* there is more than 1 core */
- }
+ char path[64];
+ FILE *filep;
+ int first_cpu;
- while (prev->next && (prev->next->pkg == new->pkg)
- && (prev->next->core == new->core)
- && (prev->next->cpu < new->cpu)) {
- prev = prev->next;
+ sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
+ filep = fopen(path, "r");
+ if (filep == NULL) {
+ perror(path);
+ exit(1);
}
-
- /*
- * insert after "prev"
- */
- new->next = prev->next;
- prev->next = new;
+ fscanf(filep, "%d", &first_cpu);
+ fclose(filep);
+ return (cpu == first_cpu);
}
-void alloc_new_counters(int pkg, int core, int cpu)
+/*
+ * cpu_is_first_core_in_package(cpu)
+ * return 1 if given CPU is 1st core in package
+ */
+int cpu_is_first_core_in_package(int cpu)
{
- struct counters *new;
-
- if (verbose > 1)
- printf("pkg%d core%d, cpu%d\n", pkg, core, cpu);
-
- new = (struct counters *)calloc(1, sizeof(struct counters));
- if (new == NULL) {
- perror("calloc");
- exit(1);
- }
- new->pkg = pkg;
- new->core = core;
- new->cpu = cpu;
- insert_counters(&cnt_odd, new);
-
- new = (struct counters *)calloc(1,
- sizeof(struct counters));
- if (new == NULL) {
- perror("calloc");
- exit(1);
- }
- new->pkg = pkg;
- new->core = core;
- new->cpu = cpu;
- insert_counters(&cnt_even, new);
-
- new = (struct counters *)calloc(1, sizeof(struct counters));
- if (new == NULL) {
- perror("calloc");
- exit(1);
- }
- new->pkg = pkg;
- new->core = core;
- new->cpu = cpu;
- insert_counters(&cnt_delta, new);
+ char path[64];
+ FILE *filep;
+ int first_cpu;
- new = (struct counters *)calloc(1, sizeof(struct counters));
- if (new == NULL) {
- perror("calloc");
+ sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu);
+ filep = fopen(path, "r");
+ if (filep == NULL) {
+ perror(path);
exit(1);
}
- new->pkg = pkg;
- new->core = core;
- new->cpu = cpu;
- cnt_average = new;
+ fscanf(filep, "%d", &first_cpu);
+ fclose(filep);
+ return (cpu == first_cpu);
}
int get_physical_package_id(int cpu)
{
- char path[64];
+ char path[80];
FILE *filep;
int pkg;
@@ -768,7 +807,7 @@ int get_physical_package_id(int cpu)
int get_core_id(int cpu)
{
- char path[64];
+ char path[80];
FILE *filep;
int core;
@@ -783,14 +822,87 @@ int get_core_id(int cpu)
return core;
}
+int get_num_ht_siblings(int cpu)
+{
+ char path[80];
+ FILE *filep;
+ int sib1, sib2;
+ int matches;
+ char character;
+
+ sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
+ filep = fopen(path, "r");
+ if (filep == NULL) {
+ perror(path);
+ exit(1);
+ }
+ /*
+ * file format:
+ * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
+ * otherwinse 1 sibling (self).
+ */
+ matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
+
+ fclose(filep);
+
+ if (matches == 3)
+ return 2;
+ else
+ return 1;
+}
+
/*
- * run func(pkg, core, cpu) on every cpu in /proc/stat
+ * run func(thread, core, package) in topology order
+ * skip non-present cpus
*/
-int for_all_cpus(void (func)(int, int, int))
+int for_all_cpus_2(int (func)(struct thread_data *, struct core_data *,
+ struct pkg_data *, struct thread_data *, struct core_data *,
+ struct pkg_data *), struct thread_data *thread_base,
+ struct core_data *core_base, struct pkg_data *pkg_base,
+ struct thread_data *thread_base2, struct core_data *core_base2,
+ struct pkg_data *pkg_base2)
+{
+ int retval, pkg_no, core_no, thread_no;
+
+ for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
+ for (core_no = 0; core_no < topo.num_cores_per_pkg; ++core_no) {
+ for (thread_no = 0; thread_no <
+ topo.num_threads_per_core; ++thread_no) {
+ struct thread_data *t, *t2;
+ struct core_data *c, *c2;
+ struct pkg_data *p, *p2;
+
+ t = GET_THREAD(thread_base, thread_no, core_no, pkg_no);
+
+ if (cpu_is_not_present(t->cpu_id))
+ continue;
+
+ t2 = GET_THREAD(thread_base2, thread_no, core_no, pkg_no);
+
+ c = GET_CORE(core_base, core_no, pkg_no);
+ c2 = GET_CORE(core_base2, core_no, pkg_no);
+
+ p = GET_PKG(pkg_base, pkg_no);
+ p2 = GET_PKG(pkg_base2, pkg_no);
+
+ retval = func(t, c, p, t2, c2, p2);
+ if (retval)
+ return retval;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * run func(cpu) on every cpu in /proc/stat
+ * return max_cpu number
+ */
+int for_all_proc_cpus(int (func)(int))
{
FILE *fp;
- int cpu_count;
+ int cpu_num;
int retval;
fp = fopen(proc_stat, "r");
@@ -805,78 +917,88 @@ int for_all_cpus(void (func)(int, int, int))
exit(1);
}
- for (cpu_count = 0; ; cpu_count++) {
- int cpu;
-
- retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu);
+ while (1) {
+ retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num);
if (retval != 1)
break;
- func(get_physical_package_id(cpu), get_core_id(cpu), cpu);
+ retval = func(cpu_num);
+ if (retval) {
+ fclose(fp);
+ return(retval);
+ }
}
fclose(fp);
- return cpu_count;
+ return 0;
}
void re_initialize(void)
{
- free_all_counters();
- num_cpus = for_all_cpus(alloc_new_counters);
- cpu_mask_uninit();
- cpu_mask_init(num_cpus);
- printf("turbostat: re-initialized with num_cpus %d\n", num_cpus);
+ free_all_buffers();
+ setup_all_buffers();
+ printf("turbostat: re-initialized with num_cpus %d\n", topo.num_cpus);
}
-void dummy(int pkg, int core, int cpu) { return; }
+
/*
- * check to see if a cpu came on-line
+ * count_cpus()
+ * remember the last one seen, it will be the max
*/
-int verify_num_cpus(void)
+int count_cpus(int cpu)
{
- int new_num_cpus;
-
- new_num_cpus = for_all_cpus(dummy);
+ if (topo.max_cpu_num < cpu)
+ topo.max_cpu_num = cpu;
- if (new_num_cpus != num_cpus) {
- if (verbose)
- printf("num_cpus was %d, is now %d\n",
- num_cpus, new_num_cpus);
- return -1;
- }
+ topo.num_cpus += 1;
+ return 0;
+}
+int mark_cpu_present(int cpu)
+{
+ CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
return 0;
}
void turbostat_loop()
{
+ int retval;
+
restart:
- get_counters(cnt_even);
+ retval = for_all_cpus(get_counters, EVEN_COUNTERS);
+ if (retval) {
+ re_initialize();
+ goto restart;
+ }
gettimeofday(&tv_even, (struct timezone *)NULL);
while (1) {
- if (verify_num_cpus()) {
+ if (for_all_proc_cpus(cpu_is_not_present)) {
re_initialize();
goto restart;
}
sleep(interval_sec);
- if (get_counters(cnt_odd)) {
+ retval = for_all_cpus(get_counters, ODD_COUNTERS);
+ if (retval) {
re_initialize();
goto restart;
}
gettimeofday(&tv_odd, (struct timezone *)NULL);
- compute_delta(cnt_odd, cnt_even, cnt_delta);
timersub(&tv_odd, &tv_even, &tv_delta);
- compute_average(cnt_delta, cnt_average);
- print_counters(cnt_delta);
+ for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
+ compute_average(EVEN_COUNTERS);
+ format_all_counters(EVEN_COUNTERS);
+ flush_stdout();
sleep(interval_sec);
- if (get_counters(cnt_even)) {
+ retval = for_all_cpus(get_counters, EVEN_COUNTERS);
+ if (retval) {
re_initialize();
goto restart;
}
gettimeofday(&tv_even, (struct timezone *)NULL);
- compute_delta(cnt_even, cnt_odd, cnt_delta);
timersub(&tv_even, &tv_odd, &tv_delta);
- compute_average(cnt_delta, cnt_average);
- print_counters(cnt_delta);
+ for_all_cpus_2(delta_cpu, EVEN_COUNTERS, ODD_COUNTERS);
+ compute_average(ODD_COUNTERS);
+ format_all_counters(ODD_COUNTERS);
+ flush_stdout();
}
}
@@ -1051,6 +1173,208 @@ int open_dev_cpu_msr(int dummy1)
return 0;
}
+void topology_probe()
+{
+ int i;
+ int max_core_id = 0;
+ int max_package_id = 0;
+ int max_siblings = 0;
+ struct cpu_topology {
+ int core_id;
+ int physical_package_id;
+ } *cpus;
+
+ /* Initialize num_cpus, max_cpu_num */
+ topo.num_cpus = 0;
+ topo.max_cpu_num = 0;
+ for_all_proc_cpus(count_cpus);
+ if (!summary_only && topo.num_cpus > 1)
+ show_cpu = 1;
+
+ if (verbose > 1)
+ fprintf(stderr, "num_cpus %d max_cpu_num %d\n", topo.num_cpus, topo.max_cpu_num);
+
+ cpus = calloc(1, (topo.max_cpu_num + 1) * sizeof(struct cpu_topology));
+ if (cpus == NULL) {
+ perror("calloc cpus");
+ exit(1);
+ }
+
+ /*
+ * Allocate and initialize cpu_present_set
+ */
+ cpu_present_set = CPU_ALLOC((topo.max_cpu_num + 1));
+ if (cpu_present_set == NULL) {
+ perror("CPU_ALLOC");
+ exit(3);
+ }
+ cpu_present_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
+ CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
+ for_all_proc_cpus(mark_cpu_present);
+
+ /*
+ * Allocate and initialize cpu_affinity_set
+ */
+ cpu_affinity_set = CPU_ALLOC((topo.max_cpu_num + 1));
+ if (cpu_affinity_set == NULL) {
+ perror("CPU_ALLOC");
+ exit(3);
+ }
+ cpu_affinity_setsize = CPU_ALLOC_SIZE((topo.max_cpu_num + 1));
+ CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
+
+
+ /*
+ * For online cpus
+ * find max_core_id, max_package_id
+ */
+ for (i = 0; i <= topo.max_cpu_num; ++i) {
+ int siblings;
+
+ if (cpu_is_not_present(i)) {
+ if (verbose > 1)
+ fprintf(stderr, "cpu%d NOT PRESENT\n", i);
+ continue;
+ }
+ cpus[i].core_id = get_core_id(i);
+ if (cpus[i].core_id > max_core_id)
+ max_core_id = cpus[i].core_id;
+
+ cpus[i].physical_package_id = get_physical_package_id(i);
+ if (cpus[i].physical_package_id > max_package_id)
+ max_package_id = cpus[i].physical_package_id;
+
+ siblings = get_num_ht_siblings(i);
+ if (siblings > max_siblings)
+ max_siblings = siblings;
+ if (verbose > 1)
+ fprintf(stderr, "cpu %d pkg %d core %d\n",
+ i, cpus[i].physical_package_id, cpus[i].core_id);
+ }
+ topo.num_cores_per_pkg = max_core_id + 1;
+ if (verbose > 1)
+ fprintf(stderr, "max_core_id %d, sizing for %d cores per package\n",
+ max_core_id, topo.num_cores_per_pkg);
+ if (!summary_only && topo.num_cores_per_pkg > 1)
+ show_core = 1;
+
+ topo.num_packages = max_package_id + 1;
+ if (verbose > 1)
+ fprintf(stderr, "max_package_id %d, sizing for %d packages\n",
+ max_package_id, topo.num_packages);
+ if (!summary_only && topo.num_packages > 1)
+ show_pkg = 1;
+
+ topo.num_threads_per_core = max_siblings;
+ if (verbose > 1)
+ fprintf(stderr, "max_siblings %d\n", max_siblings);
+
+ free(cpus);
+}
+
+void
+allocate_counters(struct thread_data **t, struct core_data **c, struct pkg_data **p)
+{
+ int i;
+
+ *t = calloc(topo.num_threads_per_core * topo.num_cores_per_pkg *
+ topo.num_packages, sizeof(struct thread_data));
+ if (*t == NULL)
+ goto error;
+
+ for (i = 0; i < topo.num_threads_per_core *
+ topo.num_cores_per_pkg * topo.num_packages; i++)
+ (*t)[i].cpu_id = -1;
+
+ *c = calloc(topo.num_cores_per_pkg * topo.num_packages,
+ sizeof(struct core_data));
+ if (*c == NULL)
+ goto error;
+
+ for (i = 0; i < topo.num_cores_per_pkg * topo.num_packages; i++)
+ (*c)[i].core_id = -1;
+
+ *p = calloc(topo.num_packages, sizeof(struct pkg_data));
+ if (*p == NULL)
+ goto error;
+
+ for (i = 0; i < topo.num_packages; i++)
+ (*p)[i].package_id = i;
+
+ return;
+error:
+ perror("calloc counters");
+ exit(1);
+}
+/*
+ * init_counter()
+ *
+ * set cpu_id, core_num, pkg_num
+ * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
+ *
+ * increment topo.num_cores when 1st core in pkg seen
+ */
+void init_counter(struct thread_data *thread_base, struct core_data *core_base,
+ struct pkg_data *pkg_base, int thread_num, int core_num,
+ int pkg_num, int cpu_id)
+{
+ struct thread_data *t;
+ struct core_data *c;
+ struct pkg_data *p;
+
+ t = GET_THREAD(thread_base, thread_num, core_num, pkg_num);
+ c = GET_CORE(core_base, core_num, pkg_num);
+ p = GET_PKG(pkg_base, pkg_num);
+
+ t->cpu_id = cpu_id;
+ if (thread_num == 0) {
+ t->flags |= CPU_IS_FIRST_THREAD_IN_CORE;
+ if (cpu_is_first_core_in_package(cpu_id))
+ t->flags |= CPU_IS_FIRST_CORE_IN_PACKAGE;
+ }
+
+ c->core_id = core_num;
+ p->package_id = pkg_num;
+}
+
+
+int initialize_counters(int cpu_id)
+{
+ int my_thread_id, my_core_id, my_package_id;
+
+ my_package_id = get_physical_package_id(cpu_id);
+ my_core_id = get_core_id(cpu_id);
+
+ if (cpu_is_first_sibling_in_core(cpu_id)) {
+ my_thread_id = 0;
+ topo.num_cores++;
+ } else {
+ my_thread_id = 1;
+ }
+
+ init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
+ init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
+ return 0;
+}
+
+void allocate_output_buffer()
+{
+ output_buffer = calloc(1, (1 + topo.num_cpus) * 128);
+ outp = output_buffer;
+ if (outp == NULL) {
+ perror("calloc");
+ exit(-1);
+ }
+}
+
+void setup_all_buffers(void)
+{
+ topology_probe();
+ allocate_counters(&thread_even, &core_even, &package_even);
+ allocate_counters(&thread_odd, &core_odd, &package_odd);
+ allocate_output_buffer();
+ for_all_proc_cpus(initialize_counters);
+}
void turbostat_init()
{
check_cpuid();
@@ -1058,21 +1382,19 @@ void turbostat_init()
check_dev_msr();
check_super_user();
- num_cpus = for_all_cpus(alloc_new_counters);
- cpu_mask_init(num_cpus);
+ setup_all_buffers();
if (verbose)
- print_nehalem_info();
+ print_verbose_header();
}
int fork_it(char **argv)
{
- int retval;
pid_t child_pid;
- get_counters(cnt_even);
- /* clear affinity side-effect of get_counters() */
- sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
+ for_all_cpus(get_counters, EVEN_COUNTERS);
+ /* clear affinity side-effect of get_counters() */
+ sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
gettimeofday(&tv_even, (struct timezone *)NULL);
child_pid = fork();
@@ -1095,14 +1417,17 @@ int fork_it(char **argv)
exit(1);
}
}
- get_counters(cnt_odd);
+ /*
+ * n.b. fork_it() does not check for errors from for_all_cpus()
+ * because re-starting is problematic when forking
+ */
+ for_all_cpus(get_counters, ODD_COUNTERS);
gettimeofday(&tv_odd, (struct timezone *)NULL);
- retval = compute_delta(cnt_odd, cnt_even, cnt_delta);
-
timersub(&tv_odd, &tv_even, &tv_delta);
- compute_average(cnt_delta, cnt_average);
- if (!retval)
- print_counters(cnt_delta);
+ for_all_cpus_2(delta_cpu, ODD_COUNTERS, EVEN_COUNTERS);
+ compute_average(EVEN_COUNTERS);
+ format_all_counters(EVEN_COUNTERS);
+ flush_stderr();
fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);
@@ -1115,8 +1440,14 @@ void cmdline(int argc, char **argv)
progname = argv[0];
- while ((opt = getopt(argc, argv, "+svi:M:")) != -1) {
+ while ((opt = getopt(argc, argv, "+cpsvi:M:")) != -1) {
switch (opt) {
+ case 'c':
+ show_core_only++;
+ break;
+ case 'p':
+ show_pkg_only++;
+ break;
case 's':
summary_only++;
break;
@@ -1142,10 +1473,8 @@ int main(int argc, char **argv)
cmdline(argc, argv);
if (verbose > 1)
- fprintf(stderr, "turbostat Dec 6, 2010"
+ fprintf(stderr, "turbostat v2.0 May 16, 2012"
" - Len Brown <lenb@kernel.org>\n");
- if (verbose > 1)
- fprintf(stderr, "http://userweb.kernel.org/~lenb/acpi/utils/pmtools/turbostat/\n");
turbostat_init();
diff --git a/tools/testing/fault-injection/failcmd.sh b/tools/testing/fault-injection/failcmd.sh
new file mode 100644
index 000000000000..78a9ed7fecdb
--- /dev/null
+++ b/tools/testing/fault-injection/failcmd.sh
@@ -0,0 +1,219 @@
+#!/bin/bash
+#
+# NAME
+# failcmd.sh - run a command with injecting slab/page allocation failures
+#
+# SYNOPSIS
+# failcmd.sh --help
+# failcmd.sh [<options>] command [arguments]
+#
+# DESCRIPTION
+# Run command with injecting slab/page allocation failures by fault
+# injection.
+#
+# NOTE: you need to run this script as root.
+#
+
+usage()
+{
+ cat >&2 <<EOF
+Usage: $0 [options] command [arguments]
+
+OPTIONS
+ -p percent
+ --probability=percent
+ likelihood of failure injection, in percent.
+ Default value is 1
+
+ -t value
+ --times=value
+ specifies how many times failures may happen at most.
+ Default value is 1
+
+ --oom-kill-allocating-task=value
+ set /proc/sys/vm/oom_kill_allocating_task to specified value
+ before running the command.
+ Default value is 1
+
+ -h, --help
+ Display a usage message and exit
+
+ --interval=value, --space=value, --verbose=value, --task-filter=value,
+ --stacktrace-depth=value, --require-start=value, --require-end=value,
+ --reject-start=value, --reject-end=value, --ignore-gfp-wait=value
+ See Documentation/fault-injection/fault-injection.txt for more
+ information
+
+ failslab options:
+ --cache-filter=value
+
+ fail_page_alloc options:
+ --ignore-gfp-highmem=value, --min-order=value
+
+ENVIRONMENT
+ FAILCMD_TYPE
+ The following values for FAILCMD_TYPE are recognized:
+
+ failslab
+ inject slab allocation failures
+ fail_page_alloc
+ inject page allocation failures
+
+ If FAILCMD_TYPE is not defined, then failslab is used.
+EOF
+}
+
+if [ $UID != 0 ]; then
+ echo must be run as root >&2
+ exit 1
+fi
+
+DEBUGFS=`mount -t debugfs | head -1 | awk '{ print $3}'`
+
+if [ ! -d "$DEBUGFS" ]; then
+ echo debugfs is not mounted >&2
+ exit 1
+fi
+
+FAILCMD_TYPE=${FAILCMD_TYPE:-failslab}
+FAULTATTR=$DEBUGFS/$FAILCMD_TYPE
+
+if [ ! -d $FAULTATTR ]; then
+ echo $FAILCMD_TYPE is not available >&2
+ exit 1
+fi
+
+LONGOPTS=probability:,interval:,times:,space:,verbose:,task-filter:
+LONGOPTS=$LONGOPTS,stacktrace-depth:,require-start:,require-end:
+LONGOPTS=$LONGOPTS,reject-start:,reject-end:,oom-kill-allocating-task:,help
+
+if [ $FAILCMD_TYPE = failslab ]; then
+ LONGOPTS=$LONGOPTS,ignore-gfp-wait:,cache-filter:
+elif [ $FAILCMD_TYPE = fail_page_alloc ]; then
+ LONGOPTS=$LONGOPTS,ignore-gfp-wait:,ignore-gfp-highmem:,min-order:
+fi
+
+TEMP=`getopt -o p:i:t:s:v:h --long $LONGOPTS -n 'failcmd.sh' -- "$@"`
+
+if [ $? != 0 ]; then
+ usage
+ exit 1
+fi
+
+eval set -- "$TEMP"
+
+fault_attr_default()
+{
+ echo N > $FAULTATTR/task-filter
+ echo 0 > $FAULTATTR/probability
+ echo 1 > $FAULTATTR/times
+}
+
+fault_attr_default
+
+oom_kill_allocating_task_saved=`cat /proc/sys/vm/oom_kill_allocating_task`
+
+restore_values()
+{
+ fault_attr_default
+ echo $oom_kill_allocating_task_saved \
+ > /proc/sys/vm/oom_kill_allocating_task
+}
+
+#
+# Default options
+#
+declare -i oom_kill_allocating_task=1
+declare task_filter=Y
+declare -i probability=1
+declare -i times=1
+
+while true; do
+ case "$1" in
+ -p|--probability)
+ probability=$2
+ shift 2
+ ;;
+ -i|--interval)
+ echo $2 > $FAULTATTR/interval
+ shift 2
+ ;;
+ -t|--times)
+ times=$2
+ shift 2
+ ;;
+ -s|--space)
+ echo $2 > $FAULTATTR/space
+ shift 2
+ ;;
+ -v|--verbose)
+ echo $2 > $FAULTATTR/verbose
+ shift 2
+ ;;
+ --task-filter)
+ task_filter=$2
+ shift 2
+ ;;
+ --stacktrace-depth)
+ echo $2 > $FAULTATTR/stacktrace-depth
+ shift 2
+ ;;
+ --require-start)
+ echo $2 > $FAULTATTR/require-start
+ shift 2
+ ;;
+ --require-end)
+ echo $2 > $FAULTATTR/require-end
+ shift 2
+ ;;
+ --reject-start)
+ echo $2 > $FAULTATTR/reject-start
+ shift 2
+ ;;
+ --reject-end)
+ echo $2 > $FAULTATTR/reject-end
+ shift 2
+ ;;
+ --oom-kill-allocating-task)
+ oom_kill_allocating_task=$2
+ shift 2
+ ;;
+ --ignore-gfp-wait)
+ echo $2 > $FAULTATTR/ignore-gfp-wait
+ shift 2
+ ;;
+ --cache-filter)
+ echo $2 > $FAULTATTR/cache_filter
+ shift 2
+ ;;
+ --ignore-gfp-highmem)
+ echo $2 > $FAULTATTR/ignore-gfp-highmem
+ shift 2
+ ;;
+ --min-order)
+ echo $2 > $FAULTATTR/min-order
+ shift 2
+ ;;
+ -h|--help)
+ usage
+ exit 0
+ shift
+ ;;
+ --)
+ shift
+ break
+ ;;
+ esac
+done
+
+[ -z "$1" ] && exit 0
+
+echo $oom_kill_allocating_task > /proc/sys/vm/oom_kill_allocating_task
+echo $task_filter > $FAULTATTR/task-filter
+echo $probability > $FAULTATTR/probability
+echo $times > $FAULTATTR/times
+
+trap "restore_values" SIGINT SIGTERM EXIT
+
+cmd="echo 1 > /proc/self/make-it-fail && exec $@"
+bash -c "$cmd"
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index 292b13ad03f5..52b7959cd513 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -52,6 +52,7 @@ my %default = (
"STOP_AFTER_SUCCESS" => 10,
"STOP_AFTER_FAILURE" => 60,
"STOP_TEST_AFTER" => 600,
+ "MAX_MONITOR_WAIT" => 1800,
# required, and we will ask users if they don't have them but we keep the default
# value something that is common.
@@ -77,6 +78,11 @@ my $output_config;
my $test_type;
my $build_type;
my $build_options;
+my $final_post_ktest;
+my $pre_ktest;
+my $post_ktest;
+my $pre_test;
+my $post_test;
my $pre_build;
my $post_build;
my $pre_build_die;
@@ -93,6 +99,7 @@ my $reboot_on_success;
my $die_on_failure;
my $powercycle_after_reboot;
my $poweroff_after_halt;
+my $max_monitor_wait;
my $ssh_exec;
my $scp_to_target;
my $scp_to_target_install;
@@ -101,6 +108,7 @@ my $grub_menu;
my $grub_number;
my $target;
my $make;
+my $pre_install;
my $post_install;
my $no_install;
my $noclean;
@@ -167,6 +175,7 @@ my $bisect_check;
my $config_bisect;
my $config_bisect_type;
+my $config_bisect_check;
my $patchcheck_type;
my $patchcheck_start;
@@ -182,6 +191,9 @@ my $newconfig = 0;
my %entered_configs;
my %config_help;
my %variable;
+
+# force_config is the list of configs that we force enabled (or disabled)
+# in a .config file. The MIN_CONFIG and ADD_CONFIG configs.
my %force_config;
# do not force reboots on config problems
@@ -197,6 +209,10 @@ my %option_map = (
"OUTPUT_DIR" => \$outputdir,
"BUILD_DIR" => \$builddir,
"TEST_TYPE" => \$test_type,
+ "PRE_KTEST" => \$pre_ktest,
+ "POST_KTEST" => \$post_ktest,
+ "PRE_TEST" => \$pre_test,
+ "POST_TEST" => \$post_test,
"BUILD_TYPE" => \$build_type,
"BUILD_OPTIONS" => \$build_options,
"PRE_BUILD" => \$pre_build,
@@ -216,6 +232,7 @@ my %option_map = (
"ADD_CONFIG" => \$addconfig,
"REBOOT_TYPE" => \$reboot_type,
"GRUB_MENU" => \$grub_menu,
+ "PRE_INSTALL" => \$pre_install,
"POST_INSTALL" => \$post_install,
"NO_INSTALL" => \$no_install,
"REBOOT_SCRIPT" => \$reboot_script,
@@ -228,6 +245,7 @@ my %option_map = (
"POWER_OFF" => \$power_off,
"POWERCYCLE_AFTER_REBOOT" => \$powercycle_after_reboot,
"POWEROFF_AFTER_HALT" => \$poweroff_after_halt,
+ "MAX_MONITOR_WAIT" => \$max_monitor_wait,
"SLEEP_TIME" => \$sleep_time,
"BISECT_SLEEP_TIME" => \$bisect_sleep_time,
"PATCHCHECK_SLEEP_TIME" => \$patchcheck_sleep_time,
@@ -272,6 +290,7 @@ my %option_map = (
"CONFIG_BISECT" => \$config_bisect,
"CONFIG_BISECT_TYPE" => \$config_bisect_type,
+ "CONFIG_BISECT_CHECK" => \$config_bisect_check,
"PATCHCHECK_TYPE" => \$patchcheck_type,
"PATCHCHECK_START" => \$patchcheck_start,
@@ -604,6 +623,10 @@ sub process_compare {
return $lval eq $rval;
} elsif ($cmp eq "!=") {
return $lval ne $rval;
+ } elsif ($cmp eq "=~") {
+ return $lval =~ m/$rval/;
+ } elsif ($cmp eq "!~") {
+ return $lval !~ m/$rval/;
}
my $statement = "$lval $cmp $rval";
@@ -659,7 +682,7 @@ sub process_expression {
}
}
- if ($val =~ /(.*)(==|\!=|>=|<=|>|<)(.*)/) {
+ if ($val =~ /(.*)(==|\!=|>=|<=|>|<|=~|\!~)(.*)/) {
my $ret = process_compare($1, $2, $3);
if ($ret < 0) {
die "$name: $.: Unable to process comparison\n";
@@ -1117,7 +1140,11 @@ sub reboot {
}
if (defined($time)) {
- wait_for_monitor($time, $reboot_success_line);
+ if (wait_for_monitor($time, $reboot_success_line)) {
+ # reboot got stuck?
+ doprint "Reboot did not finish. Forcing power cycle\n";
+ run_command "$power_cycle";
+ }
end_monitor;
}
}
@@ -1212,6 +1239,11 @@ sub wait_for_monitor {
my $full_line = "";
my $line;
my $booted = 0;
+ my $start_time = time;
+ my $skip_call_trace = 0;
+ my $bug = 0;
+ my $bug_ignored = 0;
+ my $now;
doprint "** Wait for monitor to settle down **\n";
@@ -1227,11 +1259,39 @@ sub wait_for_monitor {
$booted = 1;
}
+ if ($full_line =~ /\[ backtrace testing \]/) {
+ $skip_call_trace = 1;
+ }
+
+ if ($full_line =~ /call trace:/i) {
+ if (!$bug && !$skip_call_trace) {
+ if ($ignore_errors) {
+ $bug_ignored = 1;
+ } else {
+ $bug = 1;
+ }
+ }
+ }
+
+ if ($full_line =~ /\[ end of backtrace testing \]/) {
+ $skip_call_trace = 0;
+ }
+
+ if ($full_line =~ /Kernel panic -/) {
+ $bug = 1;
+ }
+
if ($line =~ /\n/) {
$full_line = "";
}
+ $now = time;
+ if ($now - $start_time >= $max_monitor_wait) {
+ doprint "Exiting monitor flush due to hitting MAX_MONITOR_WAIT\n";
+ return 1;
+ }
}
print "** Monitor flushed **\n";
+ return $bug;
}
sub save_logs {
@@ -1273,6 +1333,10 @@ sub save_logs {
sub fail {
+ if (defined($post_test)) {
+ run_command $post_test;
+ }
+
if ($die_on_failure) {
dodie @_;
}
@@ -1656,6 +1720,12 @@ sub install {
return if ($no_install);
+ if (defined($pre_install)) {
+ my $cp_pre_install = eval_kernel_version $pre_install;
+ run_command "$cp_pre_install" or
+ dodie "Failed to run pre install";
+ }
+
my $cp_target = eval_kernel_version $target_image;
run_scp_install "$outputdir/$build_target", "$cp_target" or
@@ -1814,6 +1884,7 @@ sub make_oldconfig {
sub load_force_config {
my ($config) = @_;
+ doprint "Loading force configs from $config\n";
open(IN, $config) or
dodie "failed to read $config";
while (<IN>) {
@@ -1937,6 +2008,10 @@ sub halt {
sub success {
my ($i) = @_;
+ if (defined($post_test)) {
+ run_command $post_test;
+ }
+
$successes++;
my $name = "";
@@ -2003,6 +2078,7 @@ sub do_run_test {
my $line;
my $full_line;
my $bug = 0;
+ my $bug_ignored = 0;
wait_for_monitor 1;
@@ -2027,7 +2103,11 @@ sub do_run_test {
doprint $line;
if ($full_line =~ /call trace:/i) {
- $bug = 1;
+ if ($ignore_errors) {
+ $bug_ignored = 1;
+ } else {
+ $bug = 1;
+ }
}
if ($full_line =~ /Kernel panic -/) {
@@ -2040,6 +2120,10 @@ sub do_run_test {
}
} while (!$child_done && !$bug);
+ if (!$bug && $bug_ignored) {
+ doprint "WARNING: Call Trace detected but ignored due to IGNORE_ERRORS=1\n";
+ }
+
if ($bug) {
my $failure_start = time;
my $now;
@@ -2362,9 +2446,24 @@ sub bisect {
success $i;
}
+# config_ignore holds the configs that were set (or unset) for
+# a good config and we will ignore these configs for the rest
+# of a config bisect. These configs stay as they were.
my %config_ignore;
+
+# config_set holds what all configs were set as.
my %config_set;
+# config_off holds the set of configs that the bad config had disabled.
+# We need to record them and set them in the .config when running
+# oldnoconfig, because oldnoconfig does not turn off new symbols, but
+# instead just keeps the defaults.
+my %config_off;
+
+# config_off_tmp holds a set of configs to turn off for now
+my @config_off_tmp;
+
+# config_list is the set of configs that are being tested
my %config_list;
my %null_config;
@@ -2443,12 +2542,21 @@ sub create_config {
}
}
+ # turn off configs to keep off
+ foreach my $config (keys %config_off) {
+ print OUT "# $config is not set\n";
+ }
+
+ # turn off configs that should be off for now
+ foreach my $config (@config_off_tmp) {
+ print OUT "# $config is not set\n";
+ }
+
foreach my $config (keys %config_ignore) {
print OUT "$config_ignore{$config}\n";
}
close(OUT);
-# exit;
make_oldconfig;
}
@@ -2525,6 +2633,13 @@ sub run_config_bisect {
do {
my @tophalf = @start_list[0 .. $half];
+ # keep the bottom half off
+ if ($half < $#start_list) {
+ @config_off_tmp = @start_list[$half + 1 .. $#start_list];
+ } else {
+ @config_off_tmp = ();
+ }
+
create_config @tophalf;
read_current_config \%current_config;
@@ -2541,7 +2656,11 @@ sub run_config_bisect {
if (!$found) {
# try the other half
doprint "Top half produced no set configs, trying bottom half\n";
+
+ # keep the top half off
+ @config_off_tmp = @tophalf;
@tophalf = @start_list[$half + 1 .. $#start_list];
+
create_config @tophalf;
read_current_config \%current_config;
foreach my $config (@tophalf) {
@@ -2679,6 +2798,10 @@ sub config_bisect {
$added_configs{$2} = $1;
$config_list{$2} = $1;
}
+ } elsif (/^# ((CONFIG\S*).*)/) {
+ # Keep these configs disabled
+ $config_set{$2} = $1;
+ $config_off{$2} = $1;
}
}
close(IN);
@@ -2701,6 +2824,8 @@ sub config_bisect {
my %config_test;
my $once = 0;
+ @config_off_tmp = ();
+
# Sometimes kconfig does weird things. We must make sure
# that the config we autocreate has everything we need
# to test, otherwise we may miss testing configs, or
@@ -2719,6 +2844,18 @@ sub config_bisect {
}
}
my $ret;
+
+ if (defined($config_bisect_check) && $config_bisect_check) {
+ doprint " Checking to make sure bad config with min config fails\n";
+ create_config keys %config_list;
+ $ret = run_config_bisect_test $config_bisect_type;
+ if ($ret) {
+ doprint " FAILED! Bad config with min config boots fine\n";
+ return -1;
+ }
+ doprint " Bad config with min config fails as expected\n";
+ }
+
do {
$ret = run_config_bisect;
} while (!$ret);
@@ -3510,6 +3647,8 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
$iteration = $i;
+ undef %force_config;
+
my $makecmd = set_test_option("MAKE_CMD", $i);
# Load all the options into their mapped variable names
@@ -3519,6 +3658,18 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
$start_minconfig_defined = 1;
+ # The first test may override the PRE_KTEST option
+ if (defined($pre_ktest) && $i == 1) {
+ doprint "\n";
+ run_command $pre_ktest;
+ }
+
+ # Any test can override the POST_KTEST option
+ # The last test takes precedence.
+ if (defined($post_ktest)) {
+ $final_post_ktest = $post_ktest;
+ }
+
if (!defined($start_minconfig)) {
$start_minconfig_defined = 0;
$start_minconfig = $minconfig;
@@ -3573,6 +3724,10 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
doprint "\n\n";
doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type$installme\n\n";
+ if (defined($pre_test)) {
+ run_command $pre_test;
+ }
+
unlink $dmesg;
unlink $buildlog;
unlink $testlog;
@@ -3638,6 +3793,10 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
success $i;
}
+if (defined($final_post_ktest)) {
+ run_command $final_post_ktest;
+}
+
if ($opt{"POWEROFF_ON_SUCCESS"}) {
halt;
} elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot && $reboot_success) {
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
index cf362b3d1ec9..de28a0a3b8fc 100644
--- a/tools/testing/ktest/sample.conf
+++ b/tools/testing/ktest/sample.conf
@@ -376,6 +376,24 @@
# DEFAULTS
# DEFAULTS SKIP
+# If you want to execute some command before the first test runs
+# you can set this option. Note, it can be set as a default option
+# or an option in the first test case. All other test cases will
+# ignore it. If both the default and first test have this option
+# set, then the first test will take precedence.
+#
+# default (undefined)
+#PRE_KTEST = ${SSH} ~/set_up_test
+
+# If you want to execute some command after all the tests have
+# completed, you can set this option. Note, it can be set as a
+# default or any test case can override it. If multiple test cases
+# set this option, then the last test case that set it will take
+# precedence
+#
+# default (undefined)
+#POST_KTEST = ${SSH} ~/dismantle_test
+
# The default test type (default test)
# The test types may be:
# build - only build the kernel, do nothing else
@@ -408,6 +426,14 @@
# (default "")
#BUILD_OPTIONS = -j20
+# If you need to do some special handling before installing
+# you can add a script with this option.
+# The environment variable KERNEL_VERSION will be set to the
+# kernel version that is used.
+#
+# default (undefined)
+#PRE_INSTALL = ssh user@target rm -rf '/lib/modules/*-test*'
+
# If you need an initrd, you can add a script or code here to install
# it. The environment variable KERNEL_VERSION will be set to the
# kernel version that is used. Remember to add the initrd line
@@ -426,6 +452,18 @@
# (default 0)
#NO_INSTALL = 1
+# If there is a command that you want to run before the individual test
+# case executes, then you can set this option
+#
+# default (undefined)
+#PRE_TEST = ${SSH} reboot_to_special_kernel
+
+# If there is a command you want to run after the individual test case
+# completes, then you can set this option.
+#
+# default (undefined)
+#POST_TEST = cd ${BUILD_DIR}; git reset --hard
+
# If there is a script that you require to run before the build is done
# you can specify it with PRE_BUILD.
#
@@ -657,6 +695,14 @@
# (default 60)
#BISECT_SLEEP_TIME = 60
+# The max wait time (in seconds) for waiting for the console to finish.
+# If for some reason, the console is outputting content without
+# ever finishing, this will cause ktest to get stuck. This
+# option is the max time ktest will wait for the monitor (console)
+# to settle down before continuing.
+# (default 1800)
+#MAX_MONITOR_WAIT
+
# The time in between patch checks to sleep (in seconds)
# (default 60)
#PATCHCHECK_SLEEP_TIME = 60
@@ -1039,6 +1085,12 @@
# can specify it with CONFIG_BISECT_GOOD. Otherwise
# the MIN_CONFIG is the base.
#
+# CONFIG_BISECT_CHECK (optional)
+# Set this to 1 if you want to confirm that the config ktest
+# generates (the bad config with the min config) is still bad.
+# It may be that the min config fixes what broke the bad config
+# and the test will not return a result.
+#
# Example:
# TEST_START
# TEST_TYPE = config_bisect
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index a4162e15c25f..85baf11e2acd 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,4 +1,4 @@
-TARGETS = breakpoints kcmp mqueue vm
+TARGETS = breakpoints kcmp mqueue vm cpu-hotplug memory-hotplug
all:
for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/cpu-hotplug/Makefile b/tools/testing/selftests/cpu-hotplug/Makefile
new file mode 100644
index 000000000000..7c9c20ff578a
--- /dev/null
+++ b/tools/testing/selftests/cpu-hotplug/Makefile
@@ -0,0 +1,6 @@
+all:
+
+run_tests:
+ ./on-off-test.sh
+
+clean:
diff --git a/tools/testing/selftests/cpu-hotplug/on-off-test.sh b/tools/testing/selftests/cpu-hotplug/on-off-test.sh
new file mode 100644
index 000000000000..bdde7cf428bb
--- /dev/null
+++ b/tools/testing/selftests/cpu-hotplug/on-off-test.sh
@@ -0,0 +1,221 @@
+#!/bin/bash
+
+SYSFS=
+
+prerequisite()
+{
+ msg="skip all tests:"
+
+ if [ $UID != 0 ]; then
+ echo $msg must be run as root >&2
+ exit 0
+ fi
+
+ SYSFS=`mount -t sysfs | head -1 | awk '{ print $3 }'`
+
+ if [ ! -d "$SYSFS" ]; then
+ echo $msg sysfs is not mounted >&2
+ exit 0
+ fi
+
+ if ! ls $SYSFS/devices/system/cpu/cpu* > /dev/null 2>&1; then
+ echo $msg cpu hotplug is not supported >&2
+ exit 0
+ fi
+}
+
+#
+# list all hot-pluggable CPUs
+#
+hotpluggable_cpus()
+{
+ local state=${1:-.\*}
+
+ for cpu in $SYSFS/devices/system/cpu/cpu*; do
+ if [ -f $cpu/online ] && grep -q $state $cpu/online; then
+ echo ${cpu##/*/cpu}
+ fi
+ done
+}
+
+hotplaggable_offline_cpus()
+{
+ hotpluggable_cpus 0
+}
+
+hotpluggable_online_cpus()
+{
+ hotpluggable_cpus 1
+}
+
+cpu_is_online()
+{
+ grep -q 1 $SYSFS/devices/system/cpu/cpu$1/online
+}
+
+cpu_is_offline()
+{
+ grep -q 0 $SYSFS/devices/system/cpu/cpu$1/online
+}
+
+online_cpu()
+{
+ echo 1 > $SYSFS/devices/system/cpu/cpu$1/online
+}
+
+offline_cpu()
+{
+ echo 0 > $SYSFS/devices/system/cpu/cpu$1/online
+}
+
+online_cpu_expect_success()
+{
+ local cpu=$1
+
+ if ! online_cpu $cpu; then
+ echo $FUNCNAME $cpu: unexpected fail >&2
+ elif ! cpu_is_online $cpu; then
+ echo $FUNCNAME $cpu: unexpected offline >&2
+ fi
+}
+
+online_cpu_expect_fail()
+{
+ local cpu=$1
+
+ if online_cpu $cpu 2> /dev/null; then
+ echo $FUNCNAME $cpu: unexpected success >&2
+ elif ! cpu_is_offline $cpu; then
+ echo $FUNCNAME $cpu: unexpected online >&2
+ fi
+}
+
+offline_cpu_expect_success()
+{
+ local cpu=$1
+
+ if ! offline_cpu $cpu; then
+ echo $FUNCNAME $cpu: unexpected fail >&2
+ elif ! cpu_is_offline $cpu; then
+ echo $FUNCNAME $cpu: unexpected offline >&2
+ fi
+}
+
+offline_cpu_expect_fail()
+{
+ local cpu=$1
+
+ if offline_cpu $cpu 2> /dev/null; then
+ echo $FUNCNAME $cpu: unexpected success >&2
+ elif ! cpu_is_online $cpu; then
+ echo $FUNCNAME $cpu: unexpected offline >&2
+ fi
+}
+
+error=-12
+priority=0
+
+while getopts e:hp: opt; do
+ case $opt in
+ e)
+ error=$OPTARG
+ ;;
+ h)
+ echo "Usage $0 [ -e errno ] [ -p notifier-priority ]"
+ exit
+ ;;
+ p)
+ priority=$OPTARG
+ ;;
+ esac
+done
+
+if ! [ "$error" -ge -4095 -a "$error" -lt 0 ]; then
+ echo "error code must be -4095 <= errno < 0" >&2
+ exit 1
+fi
+
+prerequisite
+
+#
+# Online all hot-pluggable CPUs
+#
+for cpu in `hotplaggable_offline_cpus`; do
+ online_cpu_expect_success $cpu
+done
+
+#
+# Offline all hot-pluggable CPUs
+#
+for cpu in `hotpluggable_online_cpus`; do
+ offline_cpu_expect_success $cpu
+done
+
+#
+# Online all hot-pluggable CPUs again
+#
+for cpu in `hotplaggable_offline_cpus`; do
+ online_cpu_expect_success $cpu
+done
+
+#
+# Test with cpu notifier error injection
+#
+
+DEBUGFS=`mount -t debugfs | head -1 | awk '{ print $3 }'`
+NOTIFIER_ERR_INJECT_DIR=$DEBUGFS/notifier-error-inject/cpu
+
+prerequisite_extra()
+{
+ msg="skip extra tests:"
+
+ /sbin/modprobe -q -r cpu-notifier-error-inject
+ /sbin/modprobe -q cpu-notifier-error-inject priority=$priority
+
+ if [ ! -d "$DEBUGFS" ]; then
+ echo $msg debugfs is not mounted >&2
+ exit 0
+ fi
+
+ if [ ! -d $NOTIFIER_ERR_INJECT_DIR ]; then
+ echo $msg cpu-notifier-error-inject module is not available >&2
+ exit 0
+ fi
+}
+
+prerequisite_extra
+
+#
+# Offline all hot-pluggable CPUs
+#
+echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/CPU_DOWN_PREPARE/error
+for cpu in `hotpluggable_online_cpus`; do
+ offline_cpu_expect_success $cpu
+done
+
+#
+# Test CPU hot-add error handling (offline => online)
+#
+echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/CPU_UP_PREPARE/error
+for cpu in `hotplaggable_offline_cpus`; do
+ online_cpu_expect_fail $cpu
+done
+
+#
+# Online all hot-pluggable CPUs
+#
+echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/CPU_UP_PREPARE/error
+for cpu in `hotplaggable_offline_cpus`; do
+ online_cpu_expect_success $cpu
+done
+
+#
+# Test CPU hot-remove error handling (online => offline)
+#
+echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/CPU_DOWN_PREPARE/error
+for cpu in `hotpluggable_online_cpus`; do
+ offline_cpu_expect_fail $cpu
+done
+
+echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/CPU_DOWN_PREPARE/error
+/sbin/modprobe -q -r cpu-notifier-error-inject
diff --git a/tools/testing/selftests/memory-hotplug/Makefile b/tools/testing/selftests/memory-hotplug/Makefile
new file mode 100644
index 000000000000..7c9c20ff578a
--- /dev/null
+++ b/tools/testing/selftests/memory-hotplug/Makefile
@@ -0,0 +1,6 @@
+all:
+
+run_tests:
+ ./on-off-test.sh
+
+clean:
diff --git a/tools/testing/selftests/memory-hotplug/on-off-test.sh b/tools/testing/selftests/memory-hotplug/on-off-test.sh
new file mode 100644
index 000000000000..a2816f631542
--- /dev/null
+++ b/tools/testing/selftests/memory-hotplug/on-off-test.sh
@@ -0,0 +1,230 @@
+#!/bin/bash
+
+SYSFS=
+
+prerequisite()
+{
+ msg="skip all tests:"
+
+ if [ $UID != 0 ]; then
+ echo $msg must be run as root >&2
+ exit 0
+ fi
+
+ SYSFS=`mount -t sysfs | head -1 | awk '{ print $3 }'`
+
+ if [ ! -d "$SYSFS" ]; then
+ echo $msg sysfs is not mounted >&2
+ exit 0
+ fi
+
+ if ! ls $SYSFS/devices/system/memory/memory* > /dev/null 2>&1; then
+ echo $msg memory hotplug is not supported >&2
+ exit 0
+ fi
+}
+
+#
+# list all hot-pluggable memory
+#
+hotpluggable_memory()
+{
+ local state=${1:-.\*}
+
+ for memory in $SYSFS/devices/system/memory/memory*; do
+ if grep -q 1 $memory/removable &&
+ grep -q $state $memory/state; then
+ echo ${memory##/*/memory}
+ fi
+ done
+}
+
+hotplaggable_offline_memory()
+{
+ hotpluggable_memory offline
+}
+
+hotpluggable_online_memory()
+{
+ hotpluggable_memory online
+}
+
+memory_is_online()
+{
+ grep -q online $SYSFS/devices/system/memory/memory$1/state
+}
+
+memory_is_offline()
+{
+ grep -q offline $SYSFS/devices/system/memory/memory$1/state
+}
+
+online_memory()
+{
+ echo online > $SYSFS/devices/system/memory/memory$1/state
+}
+
+offline_memory()
+{
+ echo offline > $SYSFS/devices/system/memory/memory$1/state
+}
+
+online_memory_expect_success()
+{
+ local memory=$1
+
+ if ! online_memory $memory; then
+ echo $FUNCNAME $memory: unexpected fail >&2
+ elif ! memory_is_online $memory; then
+ echo $FUNCNAME $memory: unexpected offline >&2
+ fi
+}
+
+online_memory_expect_fail()
+{
+ local memory=$1
+
+ if online_memory $memory 2> /dev/null; then
+ echo $FUNCNAME $memory: unexpected success >&2
+ elif ! memory_is_offline $memory; then
+ echo $FUNCNAME $memory: unexpected online >&2
+ fi
+}
+
+offline_memory_expect_success()
+{
+ local memory=$1
+
+ if ! offline_memory $memory; then
+ echo $FUNCNAME $memory: unexpected fail >&2
+ elif ! memory_is_offline $memory; then
+ echo $FUNCNAME $memory: unexpected offline >&2
+ fi
+}
+
+offline_memory_expect_fail()
+{
+ local memory=$1
+
+ if offline_memory $memory 2> /dev/null; then
+ echo $FUNCNAME $memory: unexpected success >&2
+ elif ! memory_is_online $memory; then
+ echo $FUNCNAME $memory: unexpected offline >&2
+ fi
+}
+
+error=-12
+priority=0
+ratio=10
+
+while getopts e:hp:r: opt; do
+ case $opt in
+ e)
+ error=$OPTARG
+ ;;
+ h)
+ echo "Usage $0 [ -e errno ] [ -p notifier-priority ] [ -r percent-of-memory-to-offline ]"
+ exit
+ ;;
+ p)
+ priority=$OPTARG
+ ;;
+ r)
+ ratio=$OPTARG
+ ;;
+ esac
+done
+
+if ! [ "$error" -ge -4095 -a "$error" -lt 0 ]; then
+ echo "error code must be -4095 <= errno < 0" >&2
+ exit 1
+fi
+
+prerequisite
+
+#
+# Online all hot-pluggable memory
+#
+for memory in `hotplaggable_offline_memory`; do
+ online_memory_expect_success $memory
+done
+
+#
+# Offline $ratio percent of hot-pluggable memory
+#
+for memory in `hotpluggable_online_memory`; do
+ if [ $((RANDOM % 100)) -lt $ratio ]; then
+ offline_memory_expect_success $memory
+ fi
+done
+
+#
+# Online all hot-pluggable memory again
+#
+for memory in `hotplaggable_offline_memory`; do
+ online_memory_expect_success $memory
+done
+
+#
+# Test with memory notifier error injection
+#
+
+DEBUGFS=`mount -t debugfs | head -1 | awk '{ print $3 }'`
+NOTIFIER_ERR_INJECT_DIR=$DEBUGFS/notifier-error-inject/memory
+
+prerequisite_extra()
+{
+ msg="skip extra tests:"
+
+ /sbin/modprobe -q -r memory-notifier-error-inject
+ /sbin/modprobe -q memory-notifier-error-inject priority=$priority
+
+ if [ ! -d "$DEBUGFS" ]; then
+ echo $msg debugfs is not mounted >&2
+ exit 0
+ fi
+
+ if [ ! -d $NOTIFIER_ERR_INJECT_DIR ]; then
+ echo $msg memory-notifier-error-inject module is not available >&2
+ exit 0
+ fi
+}
+
+prerequisite_extra
+
+#
+# Offline $ratio percent of hot-pluggable memory
+#
+echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error
+for memory in `hotpluggable_online_memory`; do
+ if [ $((RANDOM % 100)) -lt $ratio ]; then
+ offline_memory_expect_success $memory
+ fi
+done
+
+#
+# Test memory hot-add error handling (offline => online)
+#
+echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_ONLINE/error
+for memory in `hotplaggable_offline_memory`; do
+ online_memory_expect_fail $memory
+done
+
+#
+# Online all hot-pluggable memory
+#
+echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_ONLINE/error
+for memory in `hotplaggable_offline_memory`; do
+ online_memory_expect_success $memory
+done
+
+#
+# Test memory hot-remove error handling (online => offline)
+#
+echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error
+for memory in `hotpluggable_online_memory`; do
+ offline_memory_expect_fail $memory
+done
+
+echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error
+/sbin/modprobe -q -r memory-notifier-error-inject
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index 164cbcf61106..808d5a9d5dcf 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -437,34 +437,34 @@ static void slab_stats(struct slabinfo *s)
printf("Fastpath %8lu %8lu %3lu %3lu\n",
s->alloc_fastpath, s->free_fastpath,
s->alloc_fastpath * 100 / total_alloc,
- s->free_fastpath * 100 / total_free);
+ total_free ? s->free_fastpath * 100 / total_free : 0);
printf("Slowpath %8lu %8lu %3lu %3lu\n",
total_alloc - s->alloc_fastpath, s->free_slowpath,
(total_alloc - s->alloc_fastpath) * 100 / total_alloc,
- s->free_slowpath * 100 / total_free);
+ total_free ? s->free_slowpath * 100 / total_free : 0);
printf("Page Alloc %8lu %8lu %3lu %3lu\n",
s->alloc_slab, s->free_slab,
s->alloc_slab * 100 / total_alloc,
- s->free_slab * 100 / total_free);
+ total_free ? s->free_slab * 100 / total_free : 0);
printf("Add partial %8lu %8lu %3lu %3lu\n",
s->deactivate_to_head + s->deactivate_to_tail,
s->free_add_partial,
(s->deactivate_to_head + s->deactivate_to_tail) * 100 / total_alloc,
- s->free_add_partial * 100 / total_free);
+ total_free ? s->free_add_partial * 100 / total_free : 0);
printf("Remove partial %8lu %8lu %3lu %3lu\n",
s->alloc_from_partial, s->free_remove_partial,
s->alloc_from_partial * 100 / total_alloc,
- s->free_remove_partial * 100 / total_free);
+ total_free ? s->free_remove_partial * 100 / total_free : 0);
printf("Cpu partial list %8lu %8lu %3lu %3lu\n",
s->cpu_partial_alloc, s->cpu_partial_free,
s->cpu_partial_alloc * 100 / total_alloc,
- s->cpu_partial_free * 100 / total_free);
+ total_free ? s->cpu_partial_free * 100 / total_free : 0);
printf("RemoteObj/SlabFrozen %8lu %8lu %3lu %3lu\n",
s->deactivate_remote_frees, s->free_frozen,
s->deactivate_remote_frees * 100 / total_alloc,
- s->free_frozen * 100 / total_free);
+ total_free ? s->free_frozen * 100 / total_free : 0);
printf("Total %8lu %8lu\n\n", total_alloc, total_free);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 246852397e30..d617f69131d7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1976,9 +1976,10 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
if (copy_from_user(&csigset, sigmask_arg->sigset,
sizeof csigset))
goto out;
- }
- sigset_from_compat(&sigset, &csigset);
- r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
+ sigset_from_compat(&sigset, &csigset);
+ r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
+ } else
+ r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
break;
}
default: